Index: doc/theses/thierry_delisle_PhD/thesis/Makefile
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/Makefile	(revision bfd551277c04ecd8721e431ae84ab67dfad0fd2b)
+++ doc/theses/thierry_delisle_PhD/thesis/Makefile	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
@@ -37,4 +37,8 @@
 	emptytree \
 	fairness \
+	idle \
+	idle1 \
+	idle2 \
+	idle_state \
 	io_uring \
 	pivot_ring \
@@ -42,4 +46,19 @@
 	cycle \
 	result.cycle.jax.ops \
+	result.yield.jax.ops \
+	result.churn.jax.ops \
+	result.cycle.jax.ns \
+	result.yield.jax.ns \
+	result.churn.jax.ns \
+	result.cycle.low.jax.ops \
+	result.yield.low.jax.ops \
+	result.churn.low.jax.ops \
+	result.cycle.low.jax.ns \
+	result.yield.low.jax.ns \
+	result.churn.low.jax.ns \
+	result.memcd.updt.qps \
+	result.memcd.updt.lat \
+	result.memcd.rate.qps \
+	result.memcd.rate.99th \
 }
 
@@ -116,9 +135,31 @@
 	python3 $< $@
 
-build/result.%.ns.svg : data/% | ${Build}
-	../../../../benchmark/plot.py -f $< -o $@ -y "ns per ops"
+cycle_jax_ops_FLAGS = --MaxY=120000000
+cycle_low_jax_ops_FLAGS = --MaxY=120000000
+cycle_jax_ns_FLAGS = --MaxY=2000
+cycle_low_jax_ns_FLAGS = --MaxY=2000
 
-build/result.%.ops.svg : data/% | ${Build}
-	../../../../benchmark/plot.py -f $< -o $@ -y "Ops per second"
+yield_jax_ops_FLAGS = --MaxY=150000000
+yield_low_jax_ops_FLAGS = --MaxY=150000000
+yield_jax_ns_FLAGS = --MaxY=1500
+yield_low_jax_ns_FLAGS = --MaxY=1500
+
+build/result.%.ns.svg : data/% Makefile | ${Build}
+	../../../../benchmark/plot.py -f $< -o $@ -y "ns per ops/procs" $($(subst .,_,$*)_ns_FLAGS)
+
+build/result.%.ops.svg : data/% Makefile | ${Build}
+	../../../../benchmark/plot.py -f $< -o $@ -y "Ops per second" $($(subst .,_,$*)_ops_FLAGS)
+
+build/result.memcd.updt.qps.svg : data/memcd.updt Makefile | ${Build}
+	../../../../benchmark/plot.py -f $< -o $@ -y "Actual QPS" -x "Update Ratio"
+
+build/result.memcd.updt.lat.svg : data/memcd.updt Makefile | ${Build}
+	../../../../benchmark/plot.py -f $< -o $@ -y "Average Read Latency" -x "Update Ratio"
+
+build/result.memcd.rate.qps.svg : data/memcd.rate Makefile | ${Build}
+	../../../../benchmark/plot.py -f $< -o $@ -y "Actual QPS" -x "Target QPS"
+
+build/result.memcd.rate.99th.svg : data/memcd.rate Makefile | ${Build}
+	../../../../benchmark/plot.py -f $< -o $@ -y "Tail Read Latency" -x "Target QPS"
 
 ## pstex with inverted colors
Index: doc/theses/thierry_delisle_PhD/thesis/data/churn.jax
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/data/churn.jax	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
+++ doc/theses/thierry_delisle_PhD/thesis/data/churn.jax	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
@@ -0,0 +1,1 @@
+[["rdq-churn-cfa", "./rdq-churn-cfa -p 1 -d 10 -t 100 -s 80", {"Duration (ms)": 10016.628354, "Number of processors": 1.0, "Number of threads": 100.0, "Number of spots": 80.0, "Total Operations(ops)": 103371393.0, "Total blocks": 42643001.0, "Ops per second": 10319978.87, "ns per ops": 96.9, "Ops per threads": 1033713.0, "Ops per procs": 103371393.0, "Ops/sec/procs": 10319978.87, "ns per ops/procs": 96.9}],["rdq-churn-tokio", "./rdq-churn-tokio -p 8 -d 10 -t 800 -s 640", {"Duration (ms)": 10100.0, "Number of processors": 8.0, "Number of threads": 800.0, "Number of spots": 6.0, "Total Operations(ops)": 256473373.0, "Ops per second": 25647337.0, "ns per ops": 39.0, "Ops per threads": 320591.0, "Ops per procs": 32059171.0, "Ops/sec/procs": 3205917.0, "ns per ops/procs": 315.0}],["rdq-churn-fibre", "./rdq-churn-fibre -p 16 -d 10 -t 1600 -s 1280", {"Duration (ms)": 10017.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Number of spots": 1280.0, "Total Operations(ops)": 76683227.0, "Total blocks": 27590624.0, "Ops per second": 7655096.57, "ns per ops": 130.63, "Ops per threads": 47927.0, "Ops per procs": 4792701.0, "Ops/sec/procs": 478443.54, "ns per ops/procs": 2090.11}],["rdq-churn-go", "./rdq-churn-go -p 24 -d 10 -t 2400 -s 1920", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Number of spots": 1920.0, "Total Operations(ops)": 190480943.0, "Ops per second": 19045032.48, "ns per ops": 52.51, "Ops per threads": 79367.0, "Ops per procs": 7936705.0, "Ops/sec/procs": 793543.02, "ns per ops/procs": 1260.17}],["rdq-churn-fibre", "./rdq-churn-fibre -p 16 -d 10 -t 1600 -s 1280", {"Duration (ms)": 10016.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Number of spots": 1280.0, "Total Operations(ops)": 76380433.0, "Total blocks": 27484490.0, "Ops per second": 7625307.92, "ns per ops": 131.14, "Ops per threads": 47737.0, "Ops per procs": 4773777.0, "Ops/sec/procs": 476581.75, "ns per ops/procs": 2098.28}],["rdq-churn-tokio", "./rdq-churn-tokio -p 1 -d 10 -t 100 -s 80", {"Duration (ms)": 10099.0, "Number of processors": 1.0, "Number of threads": 100.0, "Number of spots": 6.0, "Total Operations(ops)": 73903563.0, "Ops per second": 7390356.0, "ns per ops": 136.0, "Ops per threads": 739035.0, "Ops per procs": 73903563.0, "Ops/sec/procs": 7390356.0, "ns per ops/procs": 136.0}],["rdq-churn-tokio", "./rdq-churn-tokio -p 16 -d 10 -t 1600 -s 1280", {"Duration (ms)": 10000.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Number of spots": 6.0, "Total Operations(ops)": 469714305.0, "Ops per second": 46971430.0, "ns per ops": 21.0, "Ops per threads": 293571.0, "Ops per procs": 29357144.0, "Ops/sec/procs": 2935714.0, "ns per ops/procs": 340.0}],["rdq-churn-tokio", "./rdq-churn-tokio -p 24 -d 10 -t 2400 -s 1920", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Number of spots": 6.0, "Total Operations(ops)": 653514397.0, "Ops per second": 65351439.0, "ns per ops": 15.0, "Ops per threads": 272297.0, "Ops per procs": 27229766.0, "Ops/sec/procs": 2722976.0, "ns per ops/procs": 367.0}],["rdq-churn-go", "./rdq-churn-go -p 16 -d 10 -t 1600 -s 1280", {"Duration (ms)": 10001.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Number of spots": 1280.0, "Total Operations(ops)": 213476978.0, "Ops per second": 21344951.67, "ns per ops": 46.85, "Ops per threads": 133423.0, "Ops per procs": 13342311.0, "Ops/sec/procs": 1334059.48, "ns per ops/procs": 749.59}],["rdq-churn-go", "./rdq-churn-go -p 16 -d 10 -t 1600 -s 1280", {"Duration (ms)": 10001.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Number of spots": 1280.0, "Total Operations(ops)": 214624132.0, "Ops per second": 21458252.32, "ns per ops": 46.6, "Ops per threads": 134140.0, "Ops per procs": 13414008.0, "Ops/sec/procs": 1341140.77, "ns per ops/procs": 745.63}],["rdq-churn-cfa", "./rdq-churn-cfa -p 16 -d 10 -t 1600 -s 1280", {"Duration (ms)": 10025.783632, "Number of processors": 16.0, "Number of threads": 1600.0, "Number of spots": 1280.0, "Total Operations(ops)": 550743553.0, "Total blocks": 240452132.0, "Ops per second": 54932718.8, "ns per ops": 18.2, "Ops per threads": 344214.0, "Ops per procs": 34421472.0, "Ops/sec/procs": 3433294.92, "ns per ops/procs": 291.27}],["rdq-churn-fibre", "./rdq-churn-fibre -p 24 -d 10 -t 2400 -s 1920", {"Duration (ms)": 10026.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Number of spots": 1920.0, "Total Operations(ops)": 75322787.0, "Total blocks": 28388443.0, "Ops per second": 7512321.69, "ns per ops": 133.11, "Ops per threads": 31384.0, "Ops per procs": 3138449.0, "Ops/sec/procs": 313013.4, "ns per ops/procs": 3194.75}],["rdq-churn-fibre", "./rdq-churn-fibre -p 1 -d 10 -t 100 -s 80", {"Duration (ms)": 10008.0, "Number of processors": 1.0, "Number of threads": 100.0, "Number of spots": 80.0, "Total Operations(ops)": 79715530.0, "Total blocks": 24912815.0, "Ops per second": 7964937.17, "ns per ops": 125.55, "Ops per threads": 797155.0, "Ops per procs": 79715530.0, "Ops/sec/procs": 7964937.17, "ns per ops/procs": 125.55}],["rdq-churn-fibre", "./rdq-churn-fibre -p 1 -d 10 -t 100 -s 80", {"Duration (ms)": 10007.0, "Number of processors": 1.0, "Number of threads": 100.0, "Number of spots": 80.0, "Total Operations(ops)": 79751618.0, "Total blocks": 24924094.0, "Ops per second": 7968946.78, "ns per ops": 125.49, "Ops per threads": 797516.0, "Ops per procs": 79751618.0, "Ops/sec/procs": 7968946.78, "ns per ops/procs": 125.49}],["rdq-churn-cfa", "./rdq-churn-cfa -p 1 -d 10 -t 100 -s 80", {"Duration (ms)": 10016.627702, "Number of processors": 1.0, "Number of threads": 100.0, "Number of spots": 80.0, "Total Operations(ops)": 100444522.0, "Total blocks": 42678977.0, "Ops per second": 10027778.31, "ns per ops": 99.72, "Ops per threads": 1004445.0, "Ops per procs": 100444522.0, "Ops/sec/procs": 10027778.31, "ns per ops/procs": 99.72}],["rdq-churn-cfa", "./rdq-churn-cfa -p 24 -d 10 -t 2400 -s 1920", {"Duration (ms)": 10006.863438, "Number of processors": 24.0, "Number of threads": 2400.0, "Number of spots": 1920.0, "Total Operations(ops)": 761548918.0, "Total blocks": 327474630.0, "Ops per second": 76102659.21, "ns per ops": 13.14, "Ops per threads": 317312.0, "Ops per procs": 31731204.0, "Ops/sec/procs": 3170944.13, "ns per ops/procs": 315.36}],["rdq-churn-cfa", "./rdq-churn-cfa -p 16 -d 10 -t 1600 -s 1280", {"Duration (ms)": 10024.630415, "Number of processors": 16.0, "Number of threads": 1600.0, "Number of spots": 1280.0, "Total Operations(ops)": 549456394.0, "Total blocks": 238577198.0, "Ops per second": 54810638.52, "ns per ops": 18.24, "Ops per threads": 343410.0, "Ops per procs": 34341024.0, "Ops/sec/procs": 3425664.91, "ns per ops/procs": 291.91}],["rdq-churn-tokio", "./rdq-churn-tokio -p 24 -d 10 -t 2400 -s 1920", {"Duration (ms)": 10000.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Number of spots": 6.0, "Total Operations(ops)": 653669226.0, "Ops per second": 65366922.0, "ns per ops": 15.0, "Ops per threads": 272362.0, "Ops per procs": 27236217.0, "Ops/sec/procs": 2723621.0, "ns per ops/procs": 367.0}],["rdq-churn-tokio", "./rdq-churn-tokio -p 16 -d 10 -t 1600 -s 1280", {"Duration (ms)": 10000.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Number of spots": 6.0, "Total Operations(ops)": 478747005.0, "Ops per second": 47874700.0, "ns per ops": 20.0, "Ops per threads": 299216.0, "Ops per procs": 29921687.0, "Ops/sec/procs": 2992168.0, "ns per ops/procs": 334.0}],["rdq-churn-tokio", "./rdq-churn-tokio -p 8 -d 10 -t 800 -s 640", {"Duration (ms)": 10100.0, "Number of processors": 8.0, "Number of threads": 800.0, "Number of spots": 6.0, "Total Operations(ops)": 259926863.0, "Ops per second": 25992686.0, "ns per ops": 38.0, "Ops per threads": 324908.0, "Ops per procs": 32490857.0, "Ops/sec/procs": 3249085.0, "ns per ops/procs": 310.0}],["rdq-churn-go", "./rdq-churn-go -p 24 -d 10 -t 2400 -s 1920", {"Duration (ms)": 10002.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Number of spots": 1920.0, "Total Operations(ops)": 186159297.0, "Ops per second": 18611771.03, "ns per ops": 53.73, "Ops per threads": 77566.0, "Ops per procs": 7756637.0, "Ops/sec/procs": 775490.46, "ns per ops/procs": 1289.51}],["rdq-churn-fibre", "./rdq-churn-fibre -p 1 -d 10 -t 100 -s 80", {"Duration (ms)": 10007.0, "Number of processors": 1.0, "Number of threads": 100.0, "Number of spots": 80.0, "Total Operations(ops)": 79811057.0, "Total blocks": 24942609.0, "Ops per second": 7974829.0, "ns per ops": 125.39, "Ops per threads": 798110.0, "Ops per procs": 79811057.0, "Ops/sec/procs": 7974829.0, "ns per ops/procs": 125.39}],["rdq-churn-fibre", "./rdq-churn-fibre -p 24 -d 10 -t 2400 -s 1920", {"Duration (ms)": 10041.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Number of spots": 1920.0, "Total Operations(ops)": 75138224.0, "Total blocks": 28316320.0, "Ops per second": 7483121.08, "ns per ops": 133.63, "Ops per threads": 31307.0, "Ops per procs": 3130759.0, "Ops/sec/procs": 311796.71, "ns per ops/procs": 3207.22}],["rdq-churn-fibre", "./rdq-churn-fibre -p 16 -d 10 -t 1600 -s 1280", {"Duration (ms)": 10024.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Number of spots": 1280.0, "Total Operations(ops)": 76515053.0, "Total blocks": 27532672.0, "Ops per second": 7632511.21, "ns per ops": 131.02, "Ops per threads": 47821.0, "Ops per procs": 4782190.0, "Ops/sec/procs": 477031.95, "ns per ops/procs": 2096.3}],["rdq-churn-fibre", "./rdq-churn-fibre -p 24 -d 10 -t 2400 -s 1920", {"Duration (ms)": 10026.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Number of spots": 1920.0, "Total Operations(ops)": 75360901.0, "Total blocks": 28401609.0, "Ops per second": 7515905.66, "ns per ops": 133.05, "Ops per threads": 31400.0, "Ops per procs": 3140037.0, "Ops/sec/procs": 313162.74, "ns per ops/procs": 3193.23}],["rdq-churn-go", "./rdq-churn-go -p 1 -d 10 -t 100 -s 80", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 100.0, "Number of spots": 80.0, "Total Operations(ops)": 151314962.0, "Ops per second": 15129984.81, "ns per ops": 66.09, "Ops per threads": 1513149.0, "Ops per procs": 151314962.0, "Ops/sec/procs": 15129984.81, "ns per ops/procs": 66.09}],["rdq-churn-fibre", "./rdq-churn-fibre -p 8 -d 10 -t 800 -s 640", {"Duration (ms)": 10011.0, "Number of processors": 8.0, "Number of threads": 800.0, "Number of spots": 640.0, "Total Operations(ops)": 79141078.0, "Total blocks": 29863613.0, "Ops per second": 7904875.43, "ns per ops": 126.5, "Ops per threads": 98926.0, "Ops per procs": 9892634.0, "Ops/sec/procs": 988109.43, "ns per ops/procs": 1012.03}],["rdq-churn-go", "./rdq-churn-go -p 24 -d 10 -t 2400 -s 1920", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Number of spots": 1920.0, "Total Operations(ops)": 188556624.0, "Ops per second": 18852533.83, "ns per ops": 53.04, "Ops per threads": 78565.0, "Ops per procs": 7856526.0, "Ops/sec/procs": 785522.24, "ns per ops/procs": 1273.04}],["rdq-churn-go", "./rdq-churn-go -p 8 -d 10 -t 800 -s 640", {"Duration (ms)": 10001.0, "Number of processors": 8.0, "Number of threads": 800.0, "Number of spots": 640.0, "Total Operations(ops)": 228229792.0, "Ops per second": 22820542.67, "ns per ops": 43.82, "Ops per threads": 285287.0, "Ops per procs": 28528724.0, "Ops/sec/procs": 2852567.83, "ns per ops/procs": 350.56}],["rdq-churn-cfa", "./rdq-churn-cfa -p 8 -d 10 -t 800 -s 640", {"Duration (ms)": 10020.121849, "Number of processors": 8.0, "Number of threads": 800.0, "Number of spots": 640.0, "Total Operations(ops)": 302307497.0, "Total blocks": 128429554.0, "Ops per second": 30170042.0, "ns per ops": 33.15, "Ops per threads": 377884.0, "Ops per procs": 37788437.0, "Ops/sec/procs": 3771255.25, "ns per ops/procs": 265.16}],["rdq-churn-go", "./rdq-churn-go -p 8 -d 10 -t 800 -s 640", {"Duration (ms)": 10001.0, "Number of processors": 8.0, "Number of threads": 800.0, "Number of spots": 640.0, "Total Operations(ops)": 226161389.0, "Ops per second": 22613570.5, "ns per ops": 44.22, "Ops per threads": 282701.0, "Ops per procs": 28270173.0, "Ops/sec/procs": 2826696.31, "ns per ops/procs": 353.77}],["rdq-churn-fibre", "./rdq-churn-fibre -p 8 -d 10 -t 800 -s 640", {"Duration (ms)": 10017.0, "Number of processors": 8.0, "Number of threads": 800.0, "Number of spots": 640.0, "Total Operations(ops)": 78799007.0, "Total blocks": 29733108.0, "Ops per second": 7865960.45, "ns per ops": 127.13, "Ops per threads": 98498.0, "Ops per procs": 9849875.0, "Ops/sec/procs": 983245.06, "ns per ops/procs": 1017.04}],["rdq-churn-go", "./rdq-churn-go -p 8 -d 10 -t 800 -s 640", {"Duration (ms)": 10005.0, "Number of processors": 8.0, "Number of threads": 800.0, "Number of spots": 640.0, "Total Operations(ops)": 226114608.0, "Ops per second": 22598888.16, "ns per ops": 44.25, "Ops per threads": 282643.0, "Ops per procs": 28264326.0, "Ops/sec/procs": 2824861.02, "ns per ops/procs": 354.0}],["rdq-churn-cfa", "./rdq-churn-cfa -p 8 -d 10 -t 800 -s 640", {"Duration (ms)": 10019.343306, "Number of processors": 8.0, "Number of threads": 800.0, "Number of spots": 640.0, "Total Operations(ops)": 291933237.0, "Total blocks": 129498687.0, "Ops per second": 29136963.18, "ns per ops": 34.32, "Ops per threads": 364916.0, "Ops per procs": 36491654.0, "Ops/sec/procs": 3642120.4, "ns per ops/procs": 274.57}],["rdq-churn-tokio", "./rdq-churn-tokio -p 8 -d 10 -t 800 -s 640", {"Duration (ms)": 10100.0, "Number of processors": 8.0, "Number of threads": 800.0, "Number of spots": 6.0, "Total Operations(ops)": 260279998.0, "Ops per second": 26027999.0, "ns per ops": 38.0, "Ops per threads": 325349.0, "Ops per procs": 32534999.0, "Ops/sec/procs": 3253499.0, "ns per ops/procs": 310.0}],["rdq-churn-cfa", "./rdq-churn-cfa -p 24 -d 10 -t 2400 -s 1920", {"Duration (ms)": 10007.059222, "Number of processors": 24.0, "Number of threads": 2400.0, "Number of spots": 1920.0, "Total Operations(ops)": 765662737.0, "Total blocks": 325286764.0, "Ops per second": 76512262.0, "ns per ops": 13.07, "Ops per threads": 319026.0, "Ops per procs": 31902614.0, "Ops/sec/procs": 3188010.92, "ns per ops/procs": 313.68}],["rdq-churn-cfa", "./rdq-churn-cfa -p 1 -d 10 -t 100 -s 80", {"Duration (ms)": 10016.849943, "Number of processors": 1.0, "Number of threads": 100.0, "Number of spots": 80.0, "Total Operations(ops)": 96124027.0, "Total blocks": 44464161.0, "Ops per second": 9596233.1, "ns per ops": 104.21, "Ops per threads": 961240.0, "Ops per procs": 96124027.0, "Ops/sec/procs": 9596233.1, "ns per ops/procs": 104.21}],["rdq-churn-tokio", "./rdq-churn-tokio -p 1 -d 10 -t 100 -s 80", {"Duration (ms)": 10099.0, "Number of processors": 1.0, "Number of threads": 100.0, "Number of spots": 6.0, "Total Operations(ops)": 74842673.0, "Ops per second": 7484267.0, "ns per ops": 134.0, "Ops per threads": 748426.0, "Ops per procs": 74842673.0, "Ops/sec/procs": 7484267.0, "ns per ops/procs": 134.0}],["rdq-churn-fibre", "./rdq-churn-fibre -p 8 -d 10 -t 800 -s 640", {"Duration (ms)": 10016.0, "Number of processors": 8.0, "Number of threads": 800.0, "Number of spots": 640.0, "Total Operations(ops)": 80963737.0, "Total blocks": 30553569.0, "Ops per second": 8082674.95, "ns per ops": 123.72, "Ops per threads": 101204.0, "Ops per procs": 10120467.0, "Ops/sec/procs": 1010334.37, "ns per ops/procs": 989.77}],["rdq-churn-tokio", "./rdq-churn-tokio -p 1 -d 10 -t 100 -s 80", {"Duration (ms)": 10099.0, "Number of processors": 1.0, "Number of threads": 100.0, "Number of spots": 6.0, "Total Operations(ops)": 73702204.0, "Ops per second": 7370220.0, "ns per ops": 137.0, "Ops per threads": 737022.0, "Ops per procs": 73702204.0, "Ops/sec/procs": 7370220.0, "ns per ops/procs": 137.0}],["rdq-churn-go", "./rdq-churn-go -p 1 -d 10 -t 100 -s 80", {"Duration (ms)": 10001.0, "Number of processors": 1.0, "Number of threads": 100.0, "Number of spots": 80.0, "Total Operations(ops)": 151011982.0, "Ops per second": 15099599.79, "ns per ops": 66.23, "Ops per threads": 1510119.0, "Ops per procs": 151011982.0, "Ops/sec/procs": 15099599.79, "ns per ops/procs": 66.23}],["rdq-churn-go", "./rdq-churn-go -p 1 -d 10 -t 100 -s 80", {"Duration (ms)": 10001.0, "Number of processors": 1.0, "Number of threads": 100.0, "Number of spots": 80.0, "Total Operations(ops)": 151419332.0, "Ops per second": 15140359.92, "ns per ops": 66.05, "Ops per threads": 1514193.0, "Ops per procs": 151419332.0, "Ops/sec/procs": 15140359.92, "ns per ops/procs": 66.05}],["rdq-churn-tokio", "./rdq-churn-tokio -p 24 -d 10 -t 2400 -s 1920", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Number of spots": 6.0, "Total Operations(ops)": 648186982.0, "Ops per second": 64818698.0, "ns per ops": 15.0, "Ops per threads": 270077.0, "Ops per procs": 27007790.0, "Ops/sec/procs": 2700779.0, "ns per ops/procs": 370.0}],["rdq-churn-go", "./rdq-churn-go -p 16 -d 10 -t 1600 -s 1280", {"Duration (ms)": 10001.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Number of spots": 1280.0, "Total Operations(ops)": 213886424.0, "Ops per second": 21385699.62, "ns per ops": 46.76, "Ops per threads": 133679.0, "Ops per procs": 13367901.0, "Ops/sec/procs": 1336606.23, "ns per ops/procs": 748.16}],["rdq-churn-cfa", "./rdq-churn-cfa -p 16 -d 10 -t 1600 -s 1280", {"Duration (ms)": 10025.525505, "Number of processors": 16.0, "Number of threads": 1600.0, "Number of spots": 1280.0, "Total Operations(ops)": 552853400.0, "Total blocks": 239647709.0, "Ops per second": 55144580.67, "ns per ops": 18.13, "Ops per threads": 345533.0, "Ops per procs": 34553337.0, "Ops/sec/procs": 3446536.29, "ns per ops/procs": 290.15}],["rdq-churn-cfa", "./rdq-churn-cfa -p 8 -d 10 -t 800 -s 640", {"Duration (ms)": 10020.252098, "Number of processors": 8.0, "Number of threads": 800.0, "Number of spots": 640.0, "Total Operations(ops)": 295438407.0, "Total blocks": 128292778.0, "Ops per second": 29484129.15, "ns per ops": 33.92, "Ops per threads": 369298.0, "Ops per procs": 36929800.0, "Ops/sec/procs": 3685516.14, "ns per ops/procs": 271.33}],["rdq-churn-tokio", "./rdq-churn-tokio -p 16 -d 10 -t 1600 -s 1280", {"Duration (ms)": 10000.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Number of spots": 6.0, "Total Operations(ops)": 476585040.0, "Ops per second": 47658504.0, "ns per ops": 20.0, "Ops per threads": 297865.0, "Ops per procs": 29786565.0, "Ops/sec/procs": 2978656.0, "ns per ops/procs": 335.0}],["rdq-churn-cfa", "./rdq-churn-cfa -p 24 -d 10 -t 2400 -s 1920", {"Duration (ms)": 10007.127025, "Number of processors": 24.0, "Number of threads": 2400.0, "Number of spots": 1920.0, "Total Operations(ops)": 777392421.0, "Total blocks": 323387255.0, "Ops per second": 77683876.61, "ns per ops": 12.87, "Ops per threads": 323913.0, "Ops per procs": 32391350.0, "Ops/sec/procs": 3236828.19, "ns per ops/procs": 308.94}]]
Index: doc/theses/thierry_delisle_PhD/thesis/data/churn.low.jax
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/data/churn.low.jax	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
+++ doc/theses/thierry_delisle_PhD/thesis/data/churn.low.jax	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
@@ -0,0 +1,1 @@
+[["rdq-churn-fibre", "./rdq-churn-fibre -p 8 -d 10 -t 16 -s 8", {"Duration (ms)": 10007.0, "Number of processors": 8.0, "Number of threads": 16.0, "Number of spots": 8.0, "Total Operations(ops)": 45886505.0, "Total blocks": 23581519.0, "Ops per second": 4584996.42, "ns per ops": 218.1, "Ops per threads": 2867906.0, "Ops per procs": 5735813.0, "Ops/sec/procs": 573124.55, "ns per ops/procs": 1744.82}],["rdq-churn-go", "./rdq-churn-go -p 16 -d 10 -t 32 -s 16", {"Duration (ms)": 10031.0, "Number of processors": 16.0, "Number of threads": 32.0, "Number of spots": 16.0, "Total Operations(ops)": 52948646.0, "Ops per second": 5278451.52, "ns per ops": 189.45, "Ops per threads": 1654645.0, "Ops per procs": 3309290.0, "Ops/sec/procs": 329903.22, "ns per ops/procs": 3031.19}],["rdq-churn-fibre", "./rdq-churn-fibre -p 8 -d 10 -t 16 -s 8", {"Duration (ms)": 10008.0, "Number of processors": 8.0, "Number of threads": 16.0, "Number of spots": 8.0, "Total Operations(ops)": 45571308.0, "Total blocks": 23418912.0, "Ops per second": 4553347.51, "ns per ops": 219.62, "Ops per threads": 2848206.0, "Ops per procs": 5696413.0, "Ops/sec/procs": 569168.44, "ns per ops/procs": 1756.95}],["rdq-churn-go", "./rdq-churn-go -p 8 -d 10 -t 16 -s 8", {"Duration (ms)": 10020.0, "Number of processors": 8.0, "Number of threads": 16.0, "Number of spots": 8.0, "Total Operations(ops)": 57549768.0, "Ops per second": 5743275.64, "ns per ops": 174.12, "Ops per threads": 3596860.0, "Ops per procs": 7193721.0, "Ops/sec/procs": 717909.45, "ns per ops/procs": 1392.93}],["rdq-churn-go", "./rdq-churn-go -p 16 -d 10 -t 32 -s 16", {"Duration (ms)": 10025.0, "Number of processors": 16.0, "Number of threads": 32.0, "Number of spots": 16.0, "Total Operations(ops)": 52431553.0, "Ops per second": 5229696.44, "ns per ops": 191.22, "Ops per threads": 1638486.0, "Ops per procs": 3276972.0, "Ops/sec/procs": 326856.03, "ns per ops/procs": 3059.45}],["rdq-churn-fibre", "./rdq-churn-fibre -p 16 -d 10 -t 32 -s 16", {"Duration (ms)": 10009.0, "Number of processors": 16.0, "Number of threads": 32.0, "Number of spots": 16.0, "Total Operations(ops)": 67810265.0, "Total blocks": 32862605.0, "Ops per second": 6774910.95, "ns per ops": 147.6, "Ops per threads": 2119070.0, "Ops per procs": 4238141.0, "Ops/sec/procs": 423431.93, "ns per ops/procs": 2361.65}],["rdq-churn-go", "./rdq-churn-go -p 8 -d 10 -t 16 -s 8", {"Duration (ms)": 10013.0, "Number of processors": 8.0, "Number of threads": 16.0, "Number of spots": 8.0, "Total Operations(ops)": 57847054.0, "Ops per second": 5777194.35, "ns per ops": 173.09, "Ops per threads": 3615440.0, "Ops per procs": 7230881.0, "Ops/sec/procs": 722149.29, "ns per ops/procs": 1384.76}],["rdq-churn-go", "./rdq-churn-go -p 1 -d 10 -t 2 -s 1", {"Duration (ms)": 10065.0, "Number of processors": 1.0, "Number of threads": 2.0, "Number of spots": 1.0, "Total Operations(ops)": 75010542.0, "Ops per second": 7452234.24, "ns per ops": 134.19, "Ops per threads": 37505271.0, "Ops per procs": 75010542.0, "Ops/sec/procs": 7452234.24, "ns per ops/procs": 134.19}],["rdq-churn-go", "./rdq-churn-go -p 1 -d 10 -t 2 -s 1", {"Duration (ms)": 10018.0, "Number of processors": 1.0, "Number of threads": 2.0, "Number of spots": 1.0, "Total Operations(ops)": 75353303.0, "Ops per second": 7521765.77, "ns per ops": 132.95, "Ops per threads": 37676651.0, "Ops per procs": 75353303.0, "Ops/sec/procs": 7521765.77, "ns per ops/procs": 132.95}],["rdq-churn-go", "./rdq-churn-go -p 24 -d 10 -t 48 -s 24", {"Duration (ms)": 10038.0, "Number of processors": 24.0, "Number of threads": 48.0, "Number of spots": 24.0, "Total Operations(ops)": 77580756.0, "Ops per second": 7728213.1, "ns per ops": 129.4, "Ops per threads": 1616265.0, "Ops per procs": 3232531.0, "Ops/sec/procs": 322008.88, "ns per ops/procs": 3105.5}],["rdq-churn-tokio", "./rdq-churn-tokio -p 16 -d 10 -t 32 -s 16", {"Duration (ms)": 10100.0, "Number of processors": 16.0, "Number of threads": 32.0, "Number of spots": 6.0, "Total Operations(ops)": 55704847.0, "Ops per second": 5570484.0, "ns per ops": 181.0, "Ops per threads": 1740776.0, "Ops per procs": 3481552.0, "Ops/sec/procs": 348155.0, "ns per ops/procs": 2901.0}],["rdq-churn-fibre", "./rdq-churn-fibre -p 24 -d 10 -t 48 -s 24", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 48.0, "Number of spots": 24.0, "Total Operations(ops)": 75768750.0, "Total blocks": 39198331.0, "Ops per second": 7575582.46, "ns per ops": 132.0, "Ops per threads": 1578515.0, "Ops per procs": 3157031.0, "Ops/sec/procs": 315649.27, "ns per ops/procs": 3168.07}],["rdq-churn-fibre", "./rdq-churn-fibre -p 16 -d 10 -t 32 -s 16", {"Duration (ms)": 10009.0, "Number of processors": 16.0, "Number of threads": 32.0, "Number of spots": 16.0, "Total Operations(ops)": 68082972.0, "Total blocks": 32947596.0, "Ops per second": 6801956.28, "ns per ops": 147.02, "Ops per threads": 2127592.0, "Ops per procs": 4255185.0, "Ops/sec/procs": 425122.27, "ns per ops/procs": 2352.26}],["rdq-churn-tokio", "./rdq-churn-tokio -p 1 -d 10 -t 2 -s 1", {"Duration (ms)": 10099.0, "Number of processors": 1.0, "Number of threads": 2.0, "Number of spots": 6.0, "Total Operations(ops)": 53235539.0, "Ops per second": 5323553.0, "ns per ops": 189.0, "Ops per threads": 26617769.0, "Ops per procs": 53235539.0, "Ops/sec/procs": 5323553.0, "ns per ops/procs": 189.0}],["rdq-churn-go", "./rdq-churn-go -p 16 -d 10 -t 32 -s 16", {"Duration (ms)": 10029.0, "Number of processors": 16.0, "Number of threads": 32.0, "Number of spots": 16.0, "Total Operations(ops)": 51995117.0, "Ops per second": 5184034.28, "ns per ops": 192.9, "Ops per threads": 1624847.0, "Ops per procs": 3249694.0, "Ops/sec/procs": 324002.14, "ns per ops/procs": 3086.4}],["rdq-churn-tokio", "./rdq-churn-tokio -p 24 -d 10 -t 48 -s 24", {"Duration (ms)": 10099.0, "Number of processors": 24.0, "Number of threads": 48.0, "Number of spots": 6.0, "Total Operations(ops)": 75855247.0, "Ops per second": 7585524.0, "ns per ops": 133.0, "Ops per threads": 1580317.0, "Ops per procs": 3160635.0, "Ops/sec/procs": 316063.0, "ns per ops/procs": 3195.0}],["rdq-churn-cfa", "./rdq-churn-cfa -p 1 -d 10 -t 2 -s 1", {"Duration (ms)": 10016.677107, "Number of processors": 1.0, "Number of threads": 2.0, "Number of spots": 1.0, "Total Operations(ops)": 82722736.0, "Total blocks": 51097615.0, "Ops per second": 8258500.81, "ns per ops": 121.09, "Ops per threads": 41361368.0, "Ops per procs": 82722736.0, "Ops/sec/procs": 8258500.81, "ns per ops/procs": 121.09}],["rdq-churn-tokio", "./rdq-churn-tokio -p 24 -d 10 -t 48 -s 24", {"Duration (ms)": 10099.0, "Number of processors": 24.0, "Number of threads": 48.0, "Number of spots": 6.0, "Total Operations(ops)": 82039759.0, "Ops per second": 8203975.0, "ns per ops": 123.0, "Ops per threads": 1709161.0, "Ops per procs": 3418323.0, "Ops/sec/procs": 341832.0, "ns per ops/procs": 2954.0}],["rdq-churn-cfa", "./rdq-churn-cfa -p 8 -d 10 -t 16 -s 8", {"Duration (ms)": 10019.859008, "Number of processors": 8.0, "Number of threads": 16.0, "Number of spots": 8.0, "Total Operations(ops)": 60823669.0, "Total blocks": 20963529.0, "Ops per second": 6070311.86, "ns per ops": 164.74, "Ops per threads": 3801479.0, "Ops per procs": 7602958.0, "Ops/sec/procs": 758788.98, "ns per ops/procs": 1317.89}],["rdq-churn-cfa", "./rdq-churn-cfa -p 16 -d 10 -t 32 -s 16", {"Duration (ms)": 10026.064514, "Number of processors": 16.0, "Number of threads": 32.0, "Number of spots": 16.0, "Total Operations(ops)": 84419895.0, "Total blocks": 31896077.0, "Ops per second": 8420043.07, "ns per ops": 118.76, "Ops per threads": 2638121.0, "Ops per procs": 5276243.0, "Ops/sec/procs": 526252.69, "ns per ops/procs": 1900.23}],["rdq-churn-go", "./rdq-churn-go -p 1 -d 10 -t 2 -s 1", {"Duration (ms)": 10066.0, "Number of processors": 1.0, "Number of threads": 2.0, "Number of spots": 1.0, "Total Operations(ops)": 86259431.0, "Ops per second": 8569319.99, "ns per ops": 116.7, "Ops per threads": 43129715.0, "Ops per procs": 86259431.0, "Ops/sec/procs": 8569319.99, "ns per ops/procs": 116.7}],["rdq-churn-cfa", "./rdq-churn-cfa -p 8 -d 10 -t 16 -s 8", {"Duration (ms)": 10020.476753, "Number of processors": 8.0, "Number of threads": 16.0, "Number of spots": 8.0, "Total Operations(ops)": 61723297.0, "Total blocks": 27893419.0, "Ops per second": 6159716.6, "ns per ops": 162.35, "Ops per threads": 3857706.0, "Ops per procs": 7715412.0, "Ops/sec/procs": 769964.58, "ns per ops/procs": 1298.76}],["rdq-churn-tokio", "./rdq-churn-tokio -p 8 -d 10 -t 16 -s 8", {"Duration (ms)": 10099.0, "Number of processors": 8.0, "Number of threads": 16.0, "Number of spots": 6.0, "Total Operations(ops)": 39895438.0, "Ops per second": 3989543.0, "ns per ops": 253.0, "Ops per threads": 2493464.0, "Ops per procs": 4986929.0, "Ops/sec/procs": 498692.0, "ns per ops/procs": 2025.0}],["rdq-churn-go", "./rdq-churn-go -p 24 -d 10 -t 48 -s 24", {"Duration (ms)": 10044.0, "Number of processors": 24.0, "Number of threads": 48.0, "Number of spots": 24.0, "Total Operations(ops)": 78307639.0, "Ops per second": 7795946.42, "ns per ops": 128.27, "Ops per threads": 1631409.0, "Ops per procs": 3262818.0, "Ops/sec/procs": 324831.1, "ns per ops/procs": 3078.52}],["rdq-churn-fibre", "./rdq-churn-fibre -p 8 -d 10 -t 16 -s 8", {"Duration (ms)": 10008.0, "Number of processors": 8.0, "Number of threads": 16.0, "Number of spots": 8.0, "Total Operations(ops)": 45589496.0, "Total blocks": 23479172.0, "Ops per second": 4555270.66, "ns per ops": 219.53, "Ops per threads": 2849343.0, "Ops per procs": 5698687.0, "Ops/sec/procs": 569408.83, "ns per ops/procs": 1756.21}],["rdq-churn-tokio", "./rdq-churn-tokio -p 1 -d 10 -t 2 -s 1", {"Duration (ms)": 10099.0, "Number of processors": 1.0, "Number of threads": 2.0, "Number of spots": 6.0, "Total Operations(ops)": 52936021.0, "Ops per second": 5293602.0, "ns per ops": 190.0, "Ops per threads": 26468010.0, "Ops per procs": 52936021.0, "Ops/sec/procs": 5293602.0, "ns per ops/procs": 190.0}],["rdq-churn-fibre", "./rdq-churn-fibre -p 24 -d 10 -t 48 -s 24", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 48.0, "Number of spots": 24.0, "Total Operations(ops)": 74937686.0, "Total blocks": 38772422.0, "Ops per second": 7492506.92, "ns per ops": 133.47, "Ops per threads": 1561201.0, "Ops per procs": 3122403.0, "Ops/sec/procs": 312187.79, "ns per ops/procs": 3203.2}],["rdq-churn-cfa", "./rdq-churn-cfa -p 8 -d 10 -t 16 -s 8", {"Duration (ms)": 10019.966204, "Number of processors": 8.0, "Number of threads": 16.0, "Number of spots": 8.0, "Total Operations(ops)": 66694419.0, "Total blocks": 28038485.0, "Ops per second": 6656152.09, "ns per ops": 150.24, "Ops per threads": 4168401.0, "Ops per procs": 8336802.0, "Ops/sec/procs": 832019.01, "ns per ops/procs": 1201.9}],["rdq-churn-tokio", "./rdq-churn-tokio -p 16 -d 10 -t 32 -s 16", {"Duration (ms)": 10099.0, "Number of processors": 16.0, "Number of threads": 32.0, "Number of spots": 6.0, "Total Operations(ops)": 55692916.0, "Ops per second": 5569291.0, "ns per ops": 181.0, "Ops per threads": 1740403.0, "Ops per procs": 3480807.0, "Ops/sec/procs": 348080.0, "ns per ops/procs": 2901.0}],["rdq-churn-cfa", "./rdq-churn-cfa -p 24 -d 10 -t 48 -s 24", {"Duration (ms)": 10007.064432, "Number of processors": 24.0, "Number of threads": 48.0, "Number of spots": 24.0, "Total Operations(ops)": 112046278.0, "Total blocks": 39631405.0, "Ops per second": 11196717.95, "ns per ops": 89.31, "Ops per threads": 2334297.0, "Ops per procs": 4668594.0, "Ops/sec/procs": 466529.91, "ns per ops/procs": 2143.49}],["rdq-churn-tokio", "./rdq-churn-tokio -p 8 -d 10 -t 16 -s 8", {"Duration (ms)": 10099.0, "Number of processors": 8.0, "Number of threads": 16.0, "Number of spots": 6.0, "Total Operations(ops)": 34267306.0, "Ops per second": 3426730.0, "ns per ops": 294.0, "Ops per threads": 2141706.0, "Ops per procs": 4283413.0, "Ops/sec/procs": 428341.0, "ns per ops/procs": 2357.0}],["rdq-churn-cfa", "./rdq-churn-cfa -p 1 -d 10 -t 2 -s 1", {"Duration (ms)": 10016.937779, "Number of processors": 1.0, "Number of threads": 2.0, "Number of spots": 1.0, "Total Operations(ops)": 77026352.0, "Total blocks": 52134150.0, "Ops per second": 7689610.71, "ns per ops": 130.05, "Ops per threads": 38513176.0, "Ops per procs": 77026352.0, "Ops/sec/procs": 7689610.71, "ns per ops/procs": 130.05}],["rdq-churn-fibre", "./rdq-churn-fibre -p 1 -d 10 -t 2 -s 1", {"Duration (ms)": 10007.0, "Number of processors": 1.0, "Number of threads": 2.0, "Number of spots": 1.0, "Total Operations(ops)": 59271050.0, "Total blocks": 0.0, "Ops per second": 5922868.5, "ns per ops": 168.84, "Ops per threads": 29635525.0, "Ops per procs": 59271050.0, "Ops/sec/procs": 5922868.5, "ns per ops/procs": 168.84}],["rdq-churn-fibre", "./rdq-churn-fibre -p 1 -d 10 -t 2 -s 1", {"Duration (ms)": 10007.0, "Number of processors": 1.0, "Number of threads": 2.0, "Number of spots": 1.0, "Total Operations(ops)": 59229442.0, "Total blocks": 0.0, "Ops per second": 5918658.48, "ns per ops": 168.96, "Ops per threads": 29614721.0, "Ops per procs": 59229442.0, "Ops/sec/procs": 5918658.48, "ns per ops/procs": 168.96}],["rdq-churn-tokio", "./rdq-churn-tokio -p 24 -d 10 -t 48 -s 24", {"Duration (ms)": 10099.0, "Number of processors": 24.0, "Number of threads": 48.0, "Number of spots": 6.0, "Total Operations(ops)": 76525587.0, "Ops per second": 7652558.0, "ns per ops": 131.0, "Ops per threads": 1594283.0, "Ops per procs": 3188566.0, "Ops/sec/procs": 318856.0, "ns per ops/procs": 3167.0}],["rdq-churn-tokio", "./rdq-churn-tokio -p 8 -d 10 -t 16 -s 8", {"Duration (ms)": 10099.0, "Number of processors": 8.0, "Number of threads": 16.0, "Number of spots": 6.0, "Total Operations(ops)": 35399271.0, "Ops per second": 3539927.0, "ns per ops": 285.0, "Ops per threads": 2212454.0, "Ops per procs": 4424908.0, "Ops/sec/procs": 442490.0, "ns per ops/procs": 2282.0}],["rdq-churn-tokio", "./rdq-churn-tokio -p 1 -d 10 -t 2 -s 1", {"Duration (ms)": 10099.0, "Number of processors": 1.0, "Number of threads": 2.0, "Number of spots": 6.0, "Total Operations(ops)": 52944602.0, "Ops per second": 5294460.0, "ns per ops": 190.0, "Ops per threads": 26472301.0, "Ops per procs": 52944602.0, "Ops/sec/procs": 5294460.0, "ns per ops/procs": 190.0}],["rdq-churn-fibre", "./rdq-churn-fibre -p 1 -d 10 -t 2 -s 1", {"Duration (ms)": 10007.0, "Number of processors": 1.0, "Number of threads": 2.0, "Number of spots": 1.0, "Total Operations(ops)": 59246475.0, "Total blocks": 0.0, "Ops per second": 5920233.28, "ns per ops": 168.91, "Ops per threads": 29623237.0, "Ops per procs": 59246475.0, "Ops/sec/procs": 5920233.28, "ns per ops/procs": 168.91}],["rdq-churn-cfa", "./rdq-churn-cfa -p 1 -d 10 -t 2 -s 1", {"Duration (ms)": 10017.056033, "Number of processors": 1.0, "Number of threads": 2.0, "Number of spots": 1.0, "Total Operations(ops)": 78139970.0, "Total blocks": 50626382.0, "Ops per second": 7800692.11, "ns per ops": 128.19, "Ops per threads": 39069985.0, "Ops per procs": 78139970.0, "Ops/sec/procs": 7800692.11, "ns per ops/procs": 128.19}],["rdq-churn-cfa", "./rdq-churn-cfa -p 16 -d 10 -t 32 -s 16", {"Duration (ms)": 10024.66772, "Number of processors": 16.0, "Number of threads": 32.0, "Number of spots": 16.0, "Total Operations(ops)": 91752654.0, "Total blocks": 25309024.0, "Ops per second": 9152687.81, "ns per ops": 109.26, "Ops per threads": 2867270.0, "Ops per procs": 5734540.0, "Ops/sec/procs": 572042.99, "ns per ops/procs": 1748.12}],["rdq-churn-cfa", "./rdq-churn-cfa -p 24 -d 10 -t 48 -s 24", {"Duration (ms)": 10007.111246, "Number of processors": 24.0, "Number of threads": 48.0, "Number of spots": 24.0, "Total Operations(ops)": 103630541.0, "Total blocks": 37194166.0, "Ops per second": 10355689.91, "ns per ops": 96.57, "Ops per threads": 2158969.0, "Ops per procs": 4317939.0, "Ops/sec/procs": 431487.08, "ns per ops/procs": 2317.57}],["rdq-churn-fibre", "./rdq-churn-fibre -p 24 -d 10 -t 48 -s 24", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 48.0, "Number of spots": 24.0, "Total Operations(ops)": 75844225.0, "Total blocks": 39206982.0, "Ops per second": 7583184.77, "ns per ops": 131.87, "Ops per threads": 1580088.0, "Ops per procs": 3160176.0, "Ops/sec/procs": 315966.03, "ns per ops/procs": 3164.9}],["rdq-churn-go", "./rdq-churn-go -p 24 -d 10 -t 48 -s 24", {"Duration (ms)": 10039.0, "Number of processors": 24.0, "Number of threads": 48.0, "Number of spots": 24.0, "Total Operations(ops)": 78988905.0, "Ops per second": 7867923.65, "ns per ops": 127.1, "Ops per threads": 1645602.0, "Ops per procs": 3291204.0, "Ops/sec/procs": 327830.15, "ns per ops/procs": 3050.36}],["rdq-churn-cfa", "./rdq-churn-cfa -p 24 -d 10 -t 48 -s 24", {"Duration (ms)": 10007.049884, "Number of processors": 24.0, "Number of threads": 48.0, "Number of spots": 24.0, "Total Operations(ops)": 103737120.0, "Total blocks": 40651067.0, "Ops per second": 10366403.81, "ns per ops": 96.47, "Ops per threads": 2161190.0, "Ops per procs": 4322380.0, "Ops/sec/procs": 431933.49, "ns per ops/procs": 2315.17}],["rdq-churn-tokio", "./rdq-churn-tokio -p 16 -d 10 -t 32 -s 16", {"Duration (ms)": 10100.0, "Number of processors": 16.0, "Number of threads": 32.0, "Number of spots": 6.0, "Total Operations(ops)": 46882118.0, "Ops per second": 4688211.0, "ns per ops": 215.0, "Ops per threads": 1465066.0, "Ops per procs": 2930132.0, "Ops/sec/procs": 293013.0, "ns per ops/procs": 3446.0}],["rdq-churn-go", "./rdq-churn-go -p 8 -d 10 -t 16 -s 8", {"Duration (ms)": 10018.0, "Number of processors": 8.0, "Number of threads": 16.0, "Number of spots": 8.0, "Total Operations(ops)": 57617091.0, "Ops per second": 5751024.83, "ns per ops": 173.88, "Ops per threads": 3601068.0, "Ops per procs": 7202136.0, "Ops/sec/procs": 718878.1, "ns per ops/procs": 1391.06}],["rdq-churn-cfa", "./rdq-churn-cfa -p 16 -d 10 -t 32 -s 16", {"Duration (ms)": 10025.5689, "Number of processors": 16.0, "Number of threads": 32.0, "Number of spots": 16.0, "Total Operations(ops)": 93174864.0, "Total blocks": 36631659.0, "Ops per second": 9293723.37, "ns per ops": 107.6, "Ops per threads": 2911714.0, "Ops per procs": 5823429.0, "Ops/sec/procs": 580857.71, "ns per ops/procs": 1721.59}],["rdq-churn-fibre", "./rdq-churn-fibre -p 16 -d 10 -t 32 -s 16", {"Duration (ms)": 10008.0, "Number of processors": 16.0, "Number of threads": 32.0, "Number of spots": 16.0, "Total Operations(ops)": 68440910.0, "Total blocks": 33142661.0, "Ops per second": 6838025.5, "ns per ops": 146.24, "Ops per threads": 2138778.0, "Ops per procs": 4277556.0, "Ops/sec/procs": 427376.59, "ns per ops/procs": 2339.86}]]
Index: doc/theses/thierry_delisle_PhD/thesis/data/cycle.jax
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/data/cycle.jax	(revision bfd551277c04ecd8721e431ae84ab67dfad0fd2b)
+++ doc/theses/thierry_delisle_PhD/thesis/data/cycle.jax	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
@@ -1,1 +1,1 @@
-[["rdq-cycle-go", "./rdq-cycle-go -t 4 -p 4 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 4.0, "Number of threads": 20.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 43606897.0, "Ops per second": 8720908.73, "ns per ops": 114.67, "Ops per threads": 2180344.0, "Ops per procs": 10901724.0, "Ops/sec/procs": 2180227.18, "ns per ops/procs": 458.67}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 16 -p 16 -d 5 -r 5", {"Duration (ms)": 5010.922033, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 93993568.0, "Total blocks": 93993209.0, "Ops per second": 18757739.07, "ns per ops": 53.31, "Ops per threads": 1174919.0, "Ops per procs": 5874598.0, "Ops/sec/procs": 1172358.69, "ns per ops/procs": 852.98}],["rdq-cycle-go", "./rdq-cycle-go -t 16 -p 16 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 136763517.0, "Ops per second": 27351079.35, "ns per ops": 36.56, "Ops per threads": 1709543.0, "Ops per procs": 8547719.0, "Ops/sec/procs": 1709442.46, "ns per ops/procs": 584.99}],["rdq-cycle-go", "./rdq-cycle-go -t 1 -p 1 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 27778961.0, "Ops per second": 5555545.09, "ns per ops": 180.0, "Ops per threads": 5555792.0, "Ops per procs": 27778961.0, "Ops/sec/procs": 5555545.09, "ns per ops/procs": 180.0}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 4 -p 4 -d 5 -r 5", {"Duration (ms)": 5009.290878, "Number of processors": 4.0, "Number of threads": 20.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 43976310.0, "Total blocks": 43976217.0, "Ops per second": 8778949.17, "ns per ops": 113.91, "Ops per threads": 2198815.0, "Ops per procs": 10994077.0, "Ops/sec/procs": 2194737.29, "ns per ops/procs": 455.64}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 4 -p 4 -d 5 -r 5", {"Duration (ms)": 5009.151542, "Number of processors": 4.0, "Number of threads": 20.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 44132300.0, "Total blocks": 44132201.0, "Ops per second": 8810334.37, "ns per ops": 113.5, "Ops per threads": 2206615.0, "Ops per procs": 11033075.0, "Ops/sec/procs": 2202583.59, "ns per ops/procs": 454.01}],["rdq-cycle-go", "./rdq-cycle-go -t 4 -p 4 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 4.0, "Number of threads": 20.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 46353896.0, "Ops per second": 9270294.11, "ns per ops": 107.87, "Ops per threads": 2317694.0, "Ops per procs": 11588474.0, "Ops/sec/procs": 2317573.53, "ns per ops/procs": 431.49}],["rdq-cycle-go", "./rdq-cycle-go -t 1 -p 1 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 27894379.0, "Ops per second": 5578591.58, "ns per ops": 179.26, "Ops per threads": 5578875.0, "Ops per procs": 27894379.0, "Ops/sec/procs": 5578591.58, "ns per ops/procs": 179.26}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 1 -p 1 -d 5 -r 5", {"Duration (ms)": 5008.743463, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 32825528.0, "Total blocks": 32825527.0, "Ops per second": 6553645.29, "ns per ops": 152.59, "Ops per threads": 6565105.0, "Ops per procs": 32825528.0, "Ops/sec/procs": 6553645.29, "ns per ops/procs": 152.59}],["rdq-cycle-go", "./rdq-cycle-go -t 16 -p 16 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 138213098.0, "Ops per second": 27640977.5, "ns per ops": 36.18, "Ops per threads": 1727663.0, "Ops per procs": 8638318.0, "Ops/sec/procs": 1727561.09, "ns per ops/procs": 578.85}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 4 -p 4 -d 5 -r 5", {"Duration (ms)": 5007.914168, "Number of processors": 4.0, "Number of threads": 20.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 44109513.0, "Total blocks": 44109419.0, "Ops per second": 8807961.06, "ns per ops": 113.53, "Ops per threads": 2205475.0, "Ops per procs": 11027378.0, "Ops/sec/procs": 2201990.27, "ns per ops/procs": 454.13}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 16 -p 16 -d 5 -r 5", {"Duration (ms)": 5012.121876, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 94130673.0, "Total blocks": 94130291.0, "Ops per second": 18780603.37, "ns per ops": 53.25, "Ops per threads": 1176633.0, "Ops per procs": 5883167.0, "Ops/sec/procs": 1173787.71, "ns per ops/procs": 851.94}],["rdq-cycle-go", "./rdq-cycle-go -t 16 -p 16 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 140936367.0, "Ops per second": 28185668.38, "ns per ops": 35.48, "Ops per threads": 1761704.0, "Ops per procs": 8808522.0, "Ops/sec/procs": 1761604.27, "ns per ops/procs": 567.66}],["rdq-cycle-go", "./rdq-cycle-go -t 4 -p 4 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 4.0, "Number of threads": 20.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 44279585.0, "Ops per second": 8855475.01, "ns per ops": 112.92, "Ops per threads": 2213979.0, "Ops per procs": 11069896.0, "Ops/sec/procs": 2213868.75, "ns per ops/procs": 451.7}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 1 -p 1 -d 5 -r 5", {"Duration (ms)": 5008.37392, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 32227534.0, "Total blocks": 32227533.0, "Ops per second": 6434730.02, "ns per ops": 155.41, "Ops per threads": 6445506.0, "Ops per procs": 32227534.0, "Ops/sec/procs": 6434730.02, "ns per ops/procs": 155.41}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 16 -p 16 -d 5 -r 5", {"Duration (ms)": 5011.019789, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 90600569.0, "Total blocks": 90600173.0, "Ops per second": 18080265.66, "ns per ops": 55.31, "Ops per threads": 1132507.0, "Ops per procs": 5662535.0, "Ops/sec/procs": 1130016.6, "ns per ops/procs": 884.94}],["rdq-cycle-cfa", "./rdq-cycle-cfa -t 1 -p 1 -d 5 -r 5", {"Duration (ms)": 5008.52474, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 32861776.0, "Total blocks": 32861775.0, "Ops per second": 6561168.75, "ns per ops": 152.41, "Ops per threads": 6572355.0, "Ops per procs": 32861776.0, "Ops/sec/procs": 6561168.75, "ns per ops/procs": 152.41}],["rdq-cycle-go", "./rdq-cycle-go -t 1 -p 1 -d 5 -r 5", {"Duration (ms)": 5000.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 28097680.0, "Ops per second": 5619274.9, "ns per ops": 177.96, "Ops per threads": 5619536.0, "Ops per procs": 28097680.0, "Ops/sec/procs": 5619274.9, "ns per ops/procs": 177.96}]]
+[["rdq-cycle-go", "./rdq-cycle-go -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 1138076440.0, "Ops per second": 113792094.48, "ns per ops": 8.79, "Ops per threads": 94839.0, "Ops per procs": 47419851.0, "Ops/sec/procs": 4741337.27, "ns per ops/procs": 210.91}],["rdq-cycle-go", "./rdq-cycle-go -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 200285.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 17638575791.0, "Ops per second": 88067238.72, "ns per ops": 11.35, "Ops per threads": 2204821.0, "Ops per procs": 1102410986.0, "Ops/sec/procs": 5504202.42, "ns per ops/procs": 181.68}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10100.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 54856916.0, "Ops per second": 5485691.0, "ns per ops": 184.0, "Ops per threads": 109713.0, "Ops per procs": 54856916.0, "Ops/sec/procs": 5485691.0, "ns per ops/procs": 184.0}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10025.449006, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 558836360.0, "Total blocks": 558836360.0, "Ops per second": 55741778.71, "ns per ops": 17.94, "Ops per threads": 69854.0, "Ops per procs": 34927272.0, "Ops/sec/procs": 3483861.17, "ns per ops/procs": 287.04}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10038.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 58647049.0, "Total blocks": 58647049.0, "Ops per second": 5842287.68, "ns per ops": 171.17, "Ops per threads": 7330.0, "Ops per procs": 3665440.0, "Ops/sec/procs": 365142.98, "ns per ops/procs": 2738.65}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10003.489711, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 728096996.0, "Total blocks": 728096996.0, "Ops per second": 72784299.98, "ns per ops": 13.74, "Ops per threads": 60674.0, "Ops per procs": 30337374.0, "Ops/sec/procs": 3032679.17, "ns per ops/procs": 329.74}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10021.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 63157049.0, "Total blocks": 63157049.0, "Ops per second": 6302255.13, "ns per ops": 158.67, "Ops per threads": 15789.0, "Ops per procs": 7894631.0, "Ops/sec/procs": 787781.89, "ns per ops/procs": 1269.39}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10009.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 62412200.0, "Total blocks": 62411700.0, "Ops per second": 6235572.31, "ns per ops": 160.37, "Ops per threads": 124824.0, "Ops per procs": 62412200.0, "Ops/sec/procs": 6235572.31, "ns per ops/procs": 160.37}],["rdq-cycle-go", "./rdq-cycle-go -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10000.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 464608617.0, "Ops per second": 46457191.42, "ns per ops": 21.53, "Ops per threads": 116152.0, "Ops per procs": 58076077.0, "Ops/sec/procs": 5807148.93, "ns per ops/procs": 172.2}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10099.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 391521066.0, "Ops per second": 39152106.0, "ns per ops": 25.0, "Ops per threads": 97880.0, "Ops per procs": 48940133.0, "Ops/sec/procs": 4894013.0, "ns per ops/procs": 206.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10099.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 963549550.0, "Ops per second": 96354955.0, "ns per ops": 10.0, "Ops per threads": 80295.0, "Ops per procs": 40147897.0, "Ops/sec/procs": 4014789.0, "ns per ops/procs": 251.0}],["rdq-cycle-go", "./rdq-cycle-go -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10001.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 867718190.0, "Ops per second": 86761170.55, "ns per ops": 11.53, "Ops per threads": 108464.0, "Ops per procs": 54232386.0, "Ops/sec/procs": 5422573.16, "ns per ops/procs": 184.41}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10100.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 962016289.0, "Ops per second": 96201628.0, "ns per ops": 10.0, "Ops per threads": 80168.0, "Ops per procs": 40084012.0, "Ops/sec/procs": 4008401.0, "ns per ops/procs": 251.0}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10016.837824, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 54738237.0, "Total blocks": 54737741.0, "Ops per second": 5464622.46, "ns per ops": 183.0, "Ops per threads": 109476.0, "Ops per procs": 54738237.0, "Ops/sec/procs": 5464622.46, "ns per ops/procs": 183.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10099.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 731309408.0, "Ops per second": 73130940.0, "ns per ops": 13.0, "Ops per threads": 91413.0, "Ops per procs": 45706838.0, "Ops/sec/procs": 4570683.0, "ns per ops/procs": 220.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10100.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 739772688.0, "Ops per second": 73977268.0, "ns per ops": 13.0, "Ops per threads": 92471.0, "Ops per procs": 46235793.0, "Ops/sec/procs": 4623579.0, "ns per ops/procs": 218.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10100.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 391449785.0, "Ops per second": 39144978.0, "ns per ops": 25.0, "Ops per threads": 97862.0, "Ops per procs": 48931223.0, "Ops/sec/procs": 4893122.0, "ns per ops/procs": 206.0}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10048.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 57239183.0, "Total blocks": 57239183.0, "Ops per second": 5696211.13, "ns per ops": 175.56, "Ops per threads": 4769.0, "Ops per procs": 2384965.0, "Ops/sec/procs": 237342.13, "ns per ops/procs": 4213.33}],["rdq-cycle-go", "./rdq-cycle-go -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 55248375.0, "Ops per second": 5524562.87, "ns per ops": 181.01, "Ops per threads": 110496.0, "Ops per procs": 55248375.0, "Ops/sec/procs": 5524562.87, "ns per ops/procs": 181.01}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10021.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 61553053.0, "Total blocks": 61553053.0, "Ops per second": 6142186.88, "ns per ops": 162.81, "Ops per threads": 15388.0, "Ops per procs": 7694131.0, "Ops/sec/procs": 767773.36, "ns per ops/procs": 1302.47}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10008.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 62811642.0, "Total blocks": 62811142.0, "Ops per second": 6275517.47, "ns per ops": 159.35, "Ops per threads": 125623.0, "Ops per procs": 62811642.0, "Ops/sec/procs": 6275517.47, "ns per ops/procs": 159.35}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10018.820873, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 260866706.0, "Total blocks": 260862710.0, "Ops per second": 26037665.44, "ns per ops": 38.41, "Ops per threads": 65216.0, "Ops per procs": 32608338.0, "Ops/sec/procs": 3254708.18, "ns per ops/procs": 307.25}],["rdq-cycle-go", "./rdq-cycle-go -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10000.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 874581175.0, "Ops per second": 87449851.2, "ns per ops": 11.44, "Ops per threads": 109322.0, "Ops per procs": 54661323.0, "Ops/sec/procs": 5465615.7, "ns per ops/procs": 182.96}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10099.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 55228782.0, "Ops per second": 5522878.0, "ns per ops": 182.0, "Ops per threads": 110457.0, "Ops per procs": 55228782.0, "Ops/sec/procs": 5522878.0, "ns per ops/procs": 182.0}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10009.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 62564955.0, "Total blocks": 62564455.0, "Ops per second": 6250797.96, "ns per ops": 159.98, "Ops per threads": 125129.0, "Ops per procs": 62564955.0, "Ops/sec/procs": 6250797.96, "ns per ops/procs": 159.98}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10100.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 738848909.0, "Ops per second": 73884890.0, "ns per ops": 13.0, "Ops per threads": 92356.0, "Ops per procs": 46178056.0, "Ops/sec/procs": 4617805.0, "ns per ops/procs": 218.0}],["rdq-cycle-go", "./rdq-cycle-go -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 1131221613.0, "Ops per second": 113108175.94, "ns per ops": 8.84, "Ops per threads": 94268.0, "Ops per procs": 47134233.0, "Ops/sec/procs": 4712840.66, "ns per ops/procs": 212.19}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10008.209159, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 729328104.0, "Total blocks": 729328099.0, "Ops per second": 72872987.81, "ns per ops": 13.72, "Ops per threads": 60777.0, "Ops per procs": 30388671.0, "Ops/sec/procs": 3036374.49, "ns per ops/procs": 329.34}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10099.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 961002611.0, "Ops per second": 96100261.0, "ns per ops": 10.0, "Ops per threads": 80083.0, "Ops per procs": 40041775.0, "Ops/sec/procs": 4004177.0, "ns per ops/procs": 252.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10099.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 390098231.0, "Ops per second": 39009823.0, "ns per ops": 25.0, "Ops per threads": 97524.0, "Ops per procs": 48762278.0, "Ops/sec/procs": 4876227.0, "ns per ops/procs": 207.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10100.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 55237591.0, "Ops per second": 5523759.0, "ns per ops": 182.0, "Ops per threads": 110475.0, "Ops per procs": 55237591.0, "Ops/sec/procs": 5523759.0, "ns per ops/procs": 182.0}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10016.576699, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 54510321.0, "Total blocks": 54509820.0, "Ops per second": 5442011.04, "ns per ops": 183.76, "Ops per threads": 109020.0, "Ops per procs": 54510321.0, "Ops/sec/procs": 5442011.04, "ns per ops/procs": 183.76}],["rdq-cycle-go", "./rdq-cycle-go -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 1135730371.0, "Ops per second": 113558509.97, "ns per ops": 8.81, "Ops per threads": 94644.0, "Ops per procs": 47322098.0, "Ops/sec/procs": 4731604.58, "ns per ops/procs": 211.34}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10039.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 61004037.0, "Total blocks": 61004037.0, "Ops per second": 6076255.04, "ns per ops": 164.58, "Ops per threads": 7625.0, "Ops per procs": 3812752.0, "Ops/sec/procs": 379765.94, "ns per ops/procs": 2633.2}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10004.891999, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 747946345.0, "Total blocks": 747934349.0, "Ops per second": 74758062.86, "ns per ops": 13.38, "Ops per threads": 62328.0, "Ops per procs": 31164431.0, "Ops/sec/procs": 3114919.29, "ns per ops/procs": 321.04}],["rdq-cycle-go", "./rdq-cycle-go -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10000.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 466424792.0, "Ops per second": 46638931.23, "ns per ops": 21.44, "Ops per threads": 116606.0, "Ops per procs": 58303099.0, "Ops/sec/procs": 5829866.4, "ns per ops/procs": 171.53}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10086.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 57343570.0, "Total blocks": 57343570.0, "Ops per second": 5685308.81, "ns per ops": 175.89, "Ops per threads": 4778.0, "Ops per procs": 2389315.0, "Ops/sec/procs": 236887.87, "ns per ops/procs": 4221.41}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10020.39533, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 263517289.0, "Total blocks": 263513293.0, "Ops per second": 26298093.07, "ns per ops": 38.03, "Ops per threads": 65879.0, "Ops per procs": 32939661.0, "Ops/sec/procs": 3287261.63, "ns per ops/procs": 304.2}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10025.357431, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 551670395.0, "Total blocks": 551662399.0, "Ops per second": 55027503.89, "ns per ops": 18.17, "Ops per threads": 68958.0, "Ops per procs": 34479399.0, "Ops/sec/procs": 3439218.99, "ns per ops/procs": 290.76}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 24 -d 10 -r 5 -t 2400", {"Duration (ms)": 10050.0, "Number of processors": 24.0, "Number of threads": 12000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 56162695.0, "Total blocks": 56162695.0, "Ops per second": 5588033.65, "ns per ops": 178.95, "Ops per threads": 4680.0, "Ops per procs": 2340112.0, "Ops/sec/procs": 232834.74, "ns per ops/procs": 4294.89}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10019.690183, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 271866976.0, "Total blocks": 271862980.0, "Ops per second": 27133271.69, "ns per ops": 36.86, "Ops per threads": 67966.0, "Ops per procs": 33983372.0, "Ops/sec/procs": 3391658.96, "ns per ops/procs": 294.84}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10057.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 62105022.0, "Total blocks": 62105022.0, "Ops per second": 6175186.04, "ns per ops": 161.94, "Ops per threads": 15526.0, "Ops per procs": 7763127.0, "Ops/sec/procs": 771898.25, "ns per ops/procs": 1295.51}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10025.81217, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 537080117.0, "Total blocks": 537072121.0, "Ops per second": 53569736.59, "ns per ops": 18.67, "Ops per threads": 67135.0, "Ops per procs": 33567507.0, "Ops/sec/procs": 3348108.54, "ns per ops/procs": 298.68}],["rdq-cycle-go", "./rdq-cycle-go -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 55967030.0, "Ops per second": 5596438.25, "ns per ops": 178.69, "Ops per threads": 111934.0, "Ops per procs": 55967030.0, "Ops/sec/procs": 5596438.25, "ns per ops/procs": 178.69}],["rdq-cycle-go", "./rdq-cycle-go -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 55703320.0, "Ops per second": 5570084.72, "ns per ops": 179.53, "Ops per threads": 111406.0, "Ops per procs": 55703320.0, "Ops/sec/procs": 5570084.72, "ns per ops/procs": 179.53}],["rdq-cycle-go", "./rdq-cycle-go -p 8 -d 10 -r 5 -t 800", {"Duration (ms)": 10000.0, "Number of processors": 8.0, "Number of threads": 4000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 469211793.0, "Ops per second": 46918327.16, "ns per ops": 21.31, "Ops per threads": 117302.0, "Ops per procs": 58651474.0, "Ops/sec/procs": 5864790.9, "ns per ops/procs": 170.51}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 1 -d 10 -r 5 -t 100", {"Duration (ms)": 10016.545208, "Number of processors": 1.0, "Number of threads": 500.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 54925472.0, "Total blocks": 54924976.0, "Ops per second": 5483474.68, "ns per ops": 182.37, "Ops per threads": 109850.0, "Ops per procs": 54925472.0, "Ops/sec/procs": 5483474.68, "ns per ops/procs": 182.37}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 16 -d 10 -r 5 -t 1600", {"Duration (ms)": 10037.0, "Number of processors": 16.0, "Number of threads": 8000.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 60770550.0, "Total blocks": 60770550.0, "Ops per second": 6054474.7, "ns per ops": 165.17, "Ops per threads": 7596.0, "Ops per procs": 3798159.0, "Ops/sec/procs": 378404.67, "ns per ops/procs": 2642.67}]]
Index: doc/theses/thierry_delisle_PhD/thesis/data/cycle.low.jax
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/data/cycle.low.jax	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
+++ doc/theses/thierry_delisle_PhD/thesis/data/cycle.low.jax	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
@@ -0,0 +1,1 @@
+[["rdq-cycle-tokio", "./rdq-cycle-tokio -p 8 -d 10 -r 5 -t 8", {"Duration (ms)": 10012.0, "Number of processors": 8.0, "Number of threads": 40.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 76310077.0, "Ops per second": 7631007.0, "ns per ops": 131.0, "Ops per threads": 1907751.0, "Ops per procs": 9538759.0, "Ops/sec/procs": 953875.0, "ns per ops/procs": 1049.0}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 16 -d 10 -r 5 -t 16", {"Duration (ms)": 10010.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 60971759.0, "Total blocks": 60971758.0, "Ops per second": 6090773.1, "ns per ops": 164.18, "Ops per threads": 762146.0, "Ops per procs": 3810734.0, "Ops/sec/procs": 380673.32, "ns per ops/procs": 2626.92}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 16 -d 10 -r 5 -t 16", {"Duration (ms)": 10025.310277, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 181018643.0, "Total blocks": 181017892.0, "Ops per second": 18056163.65, "ns per ops": 55.38, "Ops per threads": 2262733.0, "Ops per procs": 11313665.0, "Ops/sec/procs": 1128510.23, "ns per ops/procs": 886.12}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 16 -d 10 -r 5 -t 16", {"Duration (ms)": 10026.598882, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 194131436.0, "Total blocks": 194130800.0, "Ops per second": 19361643.79, "ns per ops": 51.65, "Ops per threads": 2426642.0, "Ops per procs": 12133214.0, "Ops/sec/procs": 1210102.74, "ns per ops/procs": 826.38}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 24 -d 10 -r 5 -t 24", {"Duration (ms)": 10006.334698, "Number of processors": 24.0, "Number of threads": 120.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 227505184.0, "Total blocks": 227503833.0, "Ops per second": 22736115.76, "ns per ops": 43.98, "Ops per threads": 1895876.0, "Ops per procs": 9479382.0, "Ops/sec/procs": 947338.16, "ns per ops/procs": 1055.59}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 1 -d 10 -r 5 -t 1", {"Duration (ms)": 10016.990169, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 66752749.0, "Total blocks": 66752748.0, "Ops per second": 6663952.73, "ns per ops": 150.06, "Ops per threads": 13350549.0, "Ops per procs": 66752749.0, "Ops/sec/procs": 6663952.73, "ns per ops/procs": 150.06}],["rdq-cycle-go", "./rdq-cycle-go -p 16 -d 10 -r 5 -t 16", {"Duration (ms)": 10000.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 279630079.0, "Ops per second": 27961432.66, "ns per ops": 35.76, "Ops per threads": 3495375.0, "Ops per procs": 17476879.0, "Ops/sec/procs": 1747589.54, "ns per ops/procs": 572.22}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 8 -d 10 -r 5 -t 8", {"Duration (ms)": 10007.0, "Number of processors": 8.0, "Number of threads": 40.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 59771852.0, "Total blocks": 59771852.0, "Ops per second": 5972444.0, "ns per ops": 167.44, "Ops per threads": 1494296.0, "Ops per procs": 7471481.0, "Ops/sec/procs": 746555.5, "ns per ops/procs": 1339.49}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 24 -d 10 -r 5 -t 24", {"Duration (ms)": 10099.0, "Number of processors": 24.0, "Number of threads": 120.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 232398538.0, "Ops per second": 23239853.0, "ns per ops": 43.0, "Ops per threads": 1936654.0, "Ops per procs": 9683272.0, "Ops/sec/procs": 968327.0, "ns per ops/procs": 1043.0}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 8 -d 10 -r 5 -t 8", {"Duration (ms)": 10020.460683, "Number of processors": 8.0, "Number of threads": 40.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 123722971.0, "Total blocks": 123722720.0, "Ops per second": 12347034.22, "ns per ops": 80.99, "Ops per threads": 3093074.0, "Ops per procs": 15465371.0, "Ops/sec/procs": 1543379.28, "ns per ops/procs": 647.93}],["rdq-cycle-go", "./rdq-cycle-go -p 1 -d 10 -r 5 -t 1", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 56469367.0, "Ops per second": 5646680.63, "ns per ops": 177.1, "Ops per threads": 11293873.0, "Ops per procs": 56469367.0, "Ops/sec/procs": 5646680.63, "ns per ops/procs": 177.1}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 1 -d 10 -r 5 -t 1", {"Duration (ms)": 10016.913984, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 66648794.0, "Total blocks": 66648790.0, "Ops per second": 6653625.47, "ns per ops": 150.29, "Ops per threads": 13329758.0, "Ops per procs": 66648794.0, "Ops/sec/procs": 6653625.47, "ns per ops/procs": 150.29}],["rdq-cycle-go", "./rdq-cycle-go -p 8 -d 10 -r 5 -t 8", {"Duration (ms)": 10000.0, "Number of processors": 8.0, "Number of threads": 40.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 178174877.0, "Ops per second": 17816504.68, "ns per ops": 56.13, "Ops per threads": 4454371.0, "Ops per procs": 22271859.0, "Ops/sec/procs": 2227063.08, "ns per ops/procs": 449.02}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 24 -d 10 -r 5 -t 24", {"Duration (ms)": 10004.0, "Number of processors": 24.0, "Number of threads": 120.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 59200307.0, "Total blocks": 59200307.0, "Ops per second": 5917304.82, "ns per ops": 169.0, "Ops per threads": 493335.0, "Ops per procs": 2466679.0, "Ops/sec/procs": 246554.37, "ns per ops/procs": 4055.9}],["rdq-cycle-go", "./rdq-cycle-go -p 24 -d 10 -r 5 -t 24", {"Duration (ms)": 10000.0, "Number of processors": 24.0, "Number of threads": 120.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 489892922.0, "Ops per second": 48986097.69, "ns per ops": 20.41, "Ops per threads": 4082441.0, "Ops per procs": 20412205.0, "Ops/sec/procs": 2041087.4, "ns per ops/procs": 489.93}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 24 -d 10 -r 5 -t 24", {"Duration (ms)": 10100.0, "Number of processors": 24.0, "Number of threads": 120.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 267579722.0, "Ops per second": 26757972.0, "ns per ops": 37.0, "Ops per threads": 2229831.0, "Ops per procs": 11149155.0, "Ops/sec/procs": 1114915.0, "ns per ops/procs": 905.0}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 24 -d 10 -r 5 -t 24", {"Duration (ms)": 10002.567137, "Number of processors": 24.0, "Number of threads": 120.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 221454282.0, "Total blocks": 221452348.0, "Ops per second": 22139744.62, "ns per ops": 45.17, "Ops per threads": 1845452.0, "Ops per procs": 9227261.0, "Ops/sec/procs": 922489.36, "ns per ops/procs": 1084.02}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 8 -d 10 -r 5 -t 8", {"Duration (ms)": 10020.640204, "Number of processors": 8.0, "Number of threads": 40.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 119212534.0, "Total blocks": 119212291.0, "Ops per second": 11896698.37, "ns per ops": 84.06, "Ops per threads": 2980313.0, "Ops per procs": 14901566.0, "Ops/sec/procs": 1487087.3, "ns per ops/procs": 672.46}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 1 -d 10 -r 5 -t 1", {"Duration (ms)": 10015.706272, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 66731723.0, "Total blocks": 66731719.0, "Ops per second": 6662707.67, "ns per ops": 150.09, "Ops per threads": 13346344.0, "Ops per procs": 66731723.0, "Ops/sec/procs": 6662707.67, "ns per ops/procs": 150.09}],["rdq-cycle-go", "./rdq-cycle-go -p 8 -d 10 -r 5 -t 8", {"Duration (ms)": 10000.0, "Number of processors": 8.0, "Number of threads": 40.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 152568234.0, "Ops per second": 15255771.25, "ns per ops": 65.55, "Ops per threads": 3814205.0, "Ops per procs": 19071029.0, "Ops/sec/procs": 1906971.41, "ns per ops/procs": 524.39}],["rdq-cycle-go", "./rdq-cycle-go -p 16 -d 10 -r 5 -t 16", {"Duration (ms)": 10000.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 289322235.0, "Ops per second": 28930586.92, "ns per ops": 34.57, "Ops per threads": 3616527.0, "Ops per procs": 18082639.0, "Ops/sec/procs": 1808161.68, "ns per ops/procs": 553.05}],["rdq-cycle-go", "./rdq-cycle-go -p 24 -d 10 -r 5 -t 24", {"Duration (ms)": 10000.0, "Number of processors": 24.0, "Number of threads": 120.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 487856302.0, "Ops per second": 48781608.3, "ns per ops": 20.5, "Ops per threads": 4065469.0, "Ops per procs": 20327345.0, "Ops/sec/procs": 2032567.01, "ns per ops/procs": 491.99}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 8 -d 10 -r 5 -t 8", {"Duration (ms)": 10009.0, "Number of processors": 8.0, "Number of threads": 40.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 76846283.0, "Ops per second": 7684628.0, "ns per ops": 130.0, "Ops per threads": 1921157.0, "Ops per procs": 9605785.0, "Ops/sec/procs": 960578.0, "ns per ops/procs": 1042.0}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 24 -d 10 -r 5 -t 24", {"Duration (ms)": 10002.0, "Number of processors": 24.0, "Number of threads": 120.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 57552105.0, "Total blocks": 57552103.0, "Ops per second": 5753926.0, "ns per ops": 173.79, "Ops per threads": 479600.0, "Ops per procs": 2398004.0, "Ops/sec/procs": 239746.92, "ns per ops/procs": 4171.07}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 8 -d 10 -r 5 -t 8", {"Duration (ms)": 10016.759028, "Number of processors": 8.0, "Number of threads": 40.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 123970604.0, "Total blocks": 123970351.0, "Ops per second": 12376318.89, "ns per ops": 80.8, "Ops per threads": 3099265.0, "Ops per procs": 15496325.0, "Ops/sec/procs": 1547039.86, "ns per ops/procs": 646.4}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 8 -d 10 -r 5 -t 8", {"Duration (ms)": 10007.0, "Number of processors": 8.0, "Number of threads": 40.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 61716938.0, "Total blocks": 61716938.0, "Ops per second": 6166945.93, "ns per ops": 162.15, "Ops per threads": 1542923.0, "Ops per procs": 7714617.0, "Ops/sec/procs": 770868.24, "ns per ops/procs": 1297.24}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 1 -d 10 -r 5 -t 1", {"Duration (ms)": 10007.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 70872904.0, "Total blocks": 70872899.0, "Ops per second": 7081970.19, "ns per ops": 141.2, "Ops per threads": 14174580.0, "Ops per procs": 70872904.0, "Ops/sec/procs": 7081970.19, "ns per ops/procs": 141.2}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 16 -d 10 -r 5 -t 16", {"Duration (ms)": 10001.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 156397978.0, "Ops per second": 15639797.0, "ns per ops": 63.0, "Ops per threads": 1954974.0, "Ops per procs": 9774873.0, "Ops/sec/procs": 977487.0, "ns per ops/procs": 1023.0}],["rdq-cycle-go", "./rdq-cycle-go -p 8 -d 10 -r 5 -t 8", {"Duration (ms)": 10000.0, "Number of processors": 8.0, "Number of threads": 40.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 150292371.0, "Ops per second": 15028431.57, "ns per ops": 66.54, "Ops per threads": 3757309.0, "Ops per procs": 18786546.0, "Ops/sec/procs": 1878553.95, "ns per ops/procs": 532.32}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 16 -d 10 -r 5 -t 16", {"Duration (ms)": 10009.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 59322364.0, "Total blocks": 59322364.0, "Ops per second": 5926827.38, "ns per ops": 168.72, "Ops per threads": 741529.0, "Ops per procs": 3707647.0, "Ops/sec/procs": 370426.71, "ns per ops/procs": 2699.59}],["rdq-cycle-go", "./rdq-cycle-go -p 24 -d 10 -r 5 -t 24", {"Duration (ms)": 10000.0, "Number of processors": 24.0, "Number of threads": 120.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 484392787.0, "Ops per second": 48436133.63, "ns per ops": 20.65, "Ops per threads": 4036606.0, "Ops per procs": 20183032.0, "Ops/sec/procs": 2018172.23, "ns per ops/procs": 495.5}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 1 -d 10 -r 5 -t 1", {"Duration (ms)": 10007.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 71150883.0, "Total blocks": 71150878.0, "Ops per second": 7109813.4, "ns per ops": 140.65, "Ops per threads": 14230176.0, "Ops per procs": 71150883.0, "Ops/sec/procs": 7109813.4, "ns per ops/procs": 140.65}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 8 -d 10 -r 5 -t 8", {"Duration (ms)": 10008.0, "Number of processors": 8.0, "Number of threads": 40.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 80740751.0, "Ops per second": 8074075.0, "ns per ops": 123.0, "Ops per threads": 2018518.0, "Ops per procs": 10092593.0, "Ops/sec/procs": 1009259.0, "ns per ops/procs": 991.0}],["rdq-cycle-go", "./rdq-cycle-go -p 16 -d 10 -r 5 -t 16", {"Duration (ms)": 10000.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 288121315.0, "Ops per second": 28809957.03, "ns per ops": 34.71, "Ops per threads": 3601516.0, "Ops per procs": 18007582.0, "Ops/sec/procs": 1800622.31, "ns per ops/procs": 555.36}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 16 -d 10 -r 5 -t 16", {"Duration (ms)": 10100.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 153491548.0, "Ops per second": 15349154.0, "ns per ops": 65.0, "Ops per threads": 1918644.0, "Ops per procs": 9593221.0, "Ops/sec/procs": 959322.0, "ns per ops/procs": 1052.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 24 -d 10 -r 5 -t 24", {"Duration (ms)": 10100.0, "Number of processors": 24.0, "Number of threads": 120.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 265150851.0, "Ops per second": 26515085.0, "ns per ops": 38.0, "Ops per threads": 2209590.0, "Ops per procs": 11047952.0, "Ops/sec/procs": 1104795.0, "ns per ops/procs": 914.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 1 -d 10 -r 5 -t 1", {"Duration (ms)": 10100.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 54873960.0, "Ops per second": 5487396.0, "ns per ops": 184.0, "Ops per threads": 10974792.0, "Ops per procs": 54873960.0, "Ops/sec/procs": 5487396.0, "ns per ops/procs": 184.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 16 -d 10 -r 5 -t 16", {"Duration (ms)": 10000.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 164735691.0, "Ops per second": 16473569.0, "ns per ops": 60.0, "Ops per threads": 2059196.0, "Ops per procs": 10295980.0, "Ops/sec/procs": 1029598.0, "ns per ops/procs": 971.0}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 1 -d 10 -r 5 -t 1", {"Duration (ms)": 10007.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 71038106.0, "Total blocks": 71038101.0, "Ops per second": 7098555.38, "ns per ops": 140.87, "Ops per threads": 14207621.0, "Ops per procs": 71038106.0, "Ops/sec/procs": 7098555.38, "ns per ops/procs": 140.87}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 24 -d 10 -r 5 -t 24", {"Duration (ms)": 10007.037227, "Number of processors": 24.0, "Number of threads": 120.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 233110848.0, "Total blocks": 233109436.0, "Ops per second": 23294691.8, "ns per ops": 42.93, "Ops per threads": 1942590.0, "Ops per procs": 9712952.0, "Ops/sec/procs": 970612.16, "ns per ops/procs": 1030.28}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 16 -d 10 -r 5 -t 16", {"Duration (ms)": 10012.0, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 60897008.0, "Total blocks": 60897007.0, "Ops per second": 6082228.89, "ns per ops": 164.41, "Ops per threads": 761212.0, "Ops per procs": 3806063.0, "Ops/sec/procs": 380139.31, "ns per ops/procs": 2630.61}],["rdq-cycle-go", "./rdq-cycle-go -p 1 -d 10 -r 5 -t 1", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 56664861.0, "Ops per second": 5666229.66, "ns per ops": 176.48, "Ops per threads": 11332972.0, "Ops per procs": 56664861.0, "Ops/sec/procs": 5666229.66, "ns per ops/procs": 176.48}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 1 -d 10 -r 5 -t 1", {"Duration (ms)": 10100.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 54918505.0, "Ops per second": 5491850.0, "ns per ops": 183.0, "Ops per threads": 10983701.0, "Ops per procs": 54918505.0, "Ops/sec/procs": 5491850.0, "ns per ops/procs": 183.0}],["rdq-cycle-tokio", "./rdq-cycle-tokio -p 1 -d 10 -r 5 -t 1", {"Duration (ms)": 10100.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 54930921.0, "Ops per second": 5493092.0, "ns per ops": 183.0, "Ops per threads": 10986184.0, "Ops per procs": 54930921.0, "Ops/sec/procs": 5493092.0, "ns per ops/procs": 183.0}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 24 -d 10 -r 5 -t 24", {"Duration (ms)": 10003.0, "Number of processors": 24.0, "Number of threads": 120.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 56683908.0, "Total blocks": 56683908.0, "Ops per second": 5666230.51, "ns per ops": 176.48, "Ops per threads": 472365.0, "Ops per procs": 2361829.0, "Ops/sec/procs": 236092.94, "ns per ops/procs": 4235.62}],["rdq-cycle-fibre", "./rdq-cycle-fibre -p 8 -d 10 -r 5 -t 8", {"Duration (ms)": 10007.0, "Number of processors": 8.0, "Number of threads": 40.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 63241275.0, "Total blocks": 63241275.0, "Ops per second": 6319199.64, "ns per ops": 158.25, "Ops per threads": 1581031.0, "Ops per procs": 7905159.0, "Ops/sec/procs": 789899.95, "ns per ops/procs": 1265.98}],["rdq-cycle-go", "./rdq-cycle-go -p 1 -d 10 -r 5 -t 1", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 5.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 56846729.0, "Ops per second": 5684418.62, "ns per ops": 175.92, "Ops per threads": 11369345.0, "Ops per procs": 56846729.0, "Ops/sec/procs": 5684418.62, "ns per ops/procs": 175.92}],["rdq-cycle-cfa", "./rdq-cycle-cfa -p 16 -d 10 -r 5 -t 16", {"Duration (ms)": 10024.602154, "Number of processors": 16.0, "Number of threads": 80.0, "Cycle size (# thrds)": 5.0, "Total Operations(ops)": 189976405.0, "Total blocks": 189975754.0, "Ops per second": 18951016.92, "ns per ops": 52.77, "Ops per threads": 2374705.0, "Ops per procs": 11873525.0, "Ops/sec/procs": 1184438.56, "ns per ops/procs": 844.28}]]
Index: doc/theses/thierry_delisle_PhD/thesis/data/memcd.rate
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/data/memcd.rate	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
+++ doc/theses/thierry_delisle_PhD/thesis/data/memcd.rate	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
@@ -0,0 +1,1 @@
+[["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 498755.2, "Median Read Latency": 3190.7, "Tail Read Latency": 225397.5, "Median Update Latency": 2830.3, "Tail Update Latency": 226163.8}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 199980.9, "Median Read Latency": 95.5, "Tail Read Latency": 170.9, "Median Update Latency": 100.7, "Tail Update Latency": 176.2}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 399978.4, "Median Read Latency": 117.8, "Tail Read Latency": 1207.1, "Median Update Latency": 121.9, "Tail Update Latency": 1336.8}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 612437.4, "Median Read Latency": 15884.0, "Tail Read Latency": 248252.4, "Median Update Latency": 15749.0, "Tail Update Latency": 247485.6}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 399937.0, "Median Read Latency": 111.9, "Tail Read Latency": 869.0, "Median Update Latency": 117.5, "Tail Update Latency": 880.8}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 100019.3, "Median Read Latency": 85.8, "Tail Read Latency": 161.8, "Median Update Latency": 92.2, "Tail Update Latency": 169.1}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 499962.9, "Median Read Latency": 185.6, "Tail Read Latency": 12305.1, "Median Update Latency": 250.1, "Tail Update Latency": 12317.4}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 100007.8, "Median Read Latency": 85.9, "Tail Read Latency": 161.0, "Median Update Latency": 92.3, "Tail Update Latency": 168.9}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 557475.3, "Median Read Latency": 18115.8, "Tail Read Latency": 251927.5, "Median Update Latency": 18299.3, "Tail Update Latency": 250306.8}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 200004.6, "Median Read Latency": 95.3, "Tail Read Latency": 173.3, "Median Update Latency": 99.9, "Tail Update Latency": 180.6}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 597071.5, "Median Read Latency": 13212.4, "Tail Read Latency": 62539.9, "Median Update Latency": 13231.2, "Tail Update Latency": 69653.7}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 199978.5, "Median Read Latency": 95.5, "Tail Read Latency": 177.6, "Median Update Latency": 100.3, "Tail Update Latency": 186.7}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 614246.9, "Median Read Latency": 16225.4, "Tail Read Latency": 250527.9, "Median Update Latency": 16499.8, "Tail Update Latency": 256449.6}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 300008.6, "Median Read Latency": 87.5, "Tail Read Latency": 183.5, "Median Update Latency": 93.1, "Tail Update Latency": 190.9}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 100006.9, "Median Read Latency": 85.4, "Tail Read Latency": 177.6, "Median Update Latency": 92.0, "Tail Update Latency": 194.1}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 200005.2, "Median Read Latency": 95.3, "Tail Read Latency": 173.8, "Median Update Latency": 99.8, "Tail Update Latency": 183.7}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 572663.5, "Median Read Latency": 371.4, "Tail Read Latency": 227972.8, "Median Update Latency": 1399.1, "Tail Update Latency": 226684.5}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 622639.2, "Median Read Latency": 5780.5, "Tail Read Latency": 230039.3, "Median Update Latency": 7841.8, "Tail Update Latency": 229186.6}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 592141.1, "Median Read Latency": 13408.6, "Tail Read Latency": 48231.9, "Median Update Latency": 13507.0, "Tail Update Latency": 49970.4}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 299997.1, "Median Read Latency": 91.3, "Tail Read Latency": 346.2, "Median Update Latency": 97.8, "Tail Update Latency": 359.3}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 299972.9, "Median Read Latency": 100.3, "Tail Read Latency": 454.5, "Median Update Latency": 106.7, "Tail Update Latency": 436.9}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 598258.7, "Median Read Latency": 13440.2, "Tail Read Latency": 50537.1, "Median Update Latency": 13527.6, "Tail Update Latency": 47965.6}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 299962.2, "Median Read Latency": 88.0, "Tail Read Latency": 181.4, "Median Update Latency": 93.3, "Tail Update Latency": 187.2}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 602219.4, "Median Read Latency": 25297.0, "Tail Read Latency": 250896.6, "Median Update Latency": 25038.5, "Tail Update Latency": 251507.9}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 564998.0, "Median Read Latency": 20010.7, "Tail Read Latency": 250571.5, "Median Update Latency": 20091.0, "Tail Update Latency": 250161.2}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499891.1, "Median Read Latency": 7175.0, "Tail Read Latency": 220926.7, "Median Update Latency": 7105.9, "Tail Update Latency": 221994.0}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 499974.2, "Median Read Latency": 211.4, "Tail Read Latency": 11680.1, "Median Update Latency": 829.2, "Tail Update Latency": 11990.8}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 300012.7, "Median Read Latency": 97.1, "Tail Read Latency": 222.8, "Median Update Latency": 101.7, "Tail Update Latency": 238.1}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 300070.5, "Median Read Latency": 96.8, "Tail Read Latency": 220.3, "Median Update Latency": 101.7, "Tail Update Latency": 237.8}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 100008.7, "Median Read Latency": 85.6, "Tail Read Latency": 163.0, "Median Update Latency": 92.3, "Tail Update Latency": 175.3}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 400028.9, "Median Read Latency": 114.8, "Tail Read Latency": 956.1, "Median Update Latency": 120.3, "Tail Update Latency": 1350.3}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 300055.6, "Median Read Latency": 86.4, "Tail Read Latency": 175.9, "Median Update Latency": 91.2, "Tail Update Latency": 180.7}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 673707.6, "Median Read Latency": 6827.8, "Tail Read Latency": 231197.0, "Median Update Latency": 7140.8, "Tail Update Latency": 231287.8}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 601228.5, "Median Read Latency": 12871.0, "Tail Read Latency": 219146.6, "Median Update Latency": 12852.8, "Tail Update Latency": 56501.0}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 300015.0, "Median Read Latency": 100.1, "Tail Read Latency": 481.1, "Median Update Latency": 105.7, "Tail Update Latency": 488.7}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 99997.9, "Median Read Latency": 85.4, "Tail Read Latency": 167.7, "Median Update Latency": 92.4, "Tail Update Latency": 173.4}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 200021.1, "Median Read Latency": 88.1, "Tail Read Latency": 244.9, "Median Update Latency": 95.0, "Tail Update Latency": 253.9}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 400017.0, "Median Read Latency": 113.1, "Tail Read Latency": 849.1, "Median Update Latency": 119.1, "Tail Update Latency": 882.8}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 562604.7, "Median Read Latency": 19819.4, "Tail Read Latency": 249845.8, "Median Update Latency": 19871.1, "Tail Update Latency": 249555.6}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 572312.9, "Median Read Latency": 6524.3, "Tail Read Latency": 228864.5, "Median Update Latency": 2602.5, "Tail Update Latency": 228616.6}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 621196.7, "Median Read Latency": 2295.2, "Tail Read Latency": 231836.0, "Median Update Latency": 1441.1, "Tail Update Latency": 230955.6}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 595177.6, "Median Read Latency": 13372.4, "Tail Read Latency": 46883.0, "Median Update Latency": 13426.8, "Tail Update Latency": 64667.1}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 400026.7, "Median Read Latency": 115.7, "Tail Read Latency": 881.2, "Median Update Latency": 121.1, "Tail Update Latency": 929.1}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 498000.8, "Median Read Latency": 4060.3, "Tail Read Latency": 226676.1, "Median Update Latency": 2978.6, "Tail Update Latency": 225731.4}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 579656.7, "Median Read Latency": 12225.7, "Tail Read Latency": 219760.8, "Median Update Latency": 12238.9, "Tail Update Latency": 203698.4}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 200001.8, "Median Read Latency": 88.1, "Tail Read Latency": 240.2, "Median Update Latency": 94.6, "Tail Update Latency": 251.4}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 594850.0, "Median Read Latency": 13333.6, "Tail Read Latency": 52682.0, "Median Update Latency": 13351.9, "Tail Update Latency": 56204.4}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499921.3, "Median Read Latency": 6662.5, "Tail Read Latency": 219673.4, "Median Update Latency": 6837.7, "Tail Update Latency": 211568.1}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 572487.8, "Median Read Latency": 10378.4, "Tail Read Latency": 226976.4, "Median Update Latency": 10403.8, "Tail Update Latency": 227094.8}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 594974.2, "Median Read Latency": 13359.6, "Tail Read Latency": 41942.6, "Median Update Latency": 13334.3, "Tail Update Latency": 44809.8}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 199997.8, "Median Read Latency": 78.3, "Tail Read Latency": 128.0, "Median Update Latency": 82.8, "Tail Update Latency": 134.8}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 400037.0, "Median Read Latency": 115.5, "Tail Read Latency": 996.4, "Median Update Latency": 120.1, "Tail Update Latency": 1664.7}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 500010.9, "Median Read Latency": 165.0, "Tail Read Latency": 12206.4, "Median Update Latency": 179.4, "Tail Update Latency": 12184.2}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 200000.1, "Median Read Latency": 95.5, "Tail Read Latency": 180.0, "Median Update Latency": 99.7, "Tail Update Latency": 182.7}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 99978.3, "Median Read Latency": 77.9, "Tail Read Latency": 118.1, "Median Update Latency": 83.2, "Tail Update Latency": 128.9}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 399987.9, "Median Read Latency": 108.4, "Tail Read Latency": 539.7, "Median Update Latency": 113.9, "Tail Update Latency": 539.2}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499820.4, "Median Read Latency": 5658.7, "Tail Read Latency": 212053.1, "Median Update Latency": 5741.1, "Tail Update Latency": 50321.8}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 400070.7, "Median Read Latency": 111.9, "Tail Read Latency": 890.4, "Median Update Latency": 117.0, "Tail Update Latency": 828.8}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 601775.8, "Median Read Latency": 23117.3, "Tail Read Latency": 250232.9, "Median Update Latency": 23116.7, "Tail Update Latency": 246551.8}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 100001.3, "Median Read Latency": 92.8, "Tail Read Latency": 140.9, "Median Update Latency": 98.0, "Tail Update Latency": 152.9}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 574754.4, "Median Read Latency": 357.0, "Tail Read Latency": 231151.6, "Median Update Latency": 557.4, "Tail Update Latency": 229444.1}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 573363.3, "Median Read Latency": 9889.6, "Tail Read Latency": 229927.1, "Median Update Latency": 9906.8, "Tail Update Latency": 231207.8}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 679908.4, "Median Read Latency": 5451.8, "Tail Read Latency": 230889.9, "Median Update Latency": 5681.1, "Tail Update Latency": 232091.7}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 609366.0, "Median Read Latency": 16768.6, "Tail Read Latency": 247631.3, "Median Update Latency": 17033.4, "Tail Update Latency": 253910.6}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 200039.8, "Median Read Latency": 95.6, "Tail Read Latency": 174.6, "Median Update Latency": 100.5, "Tail Update Latency": 180.7}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 596702.7, "Median Read Latency": 13428.9, "Tail Read Latency": 51857.9, "Median Update Latency": 13435.2, "Tail Update Latency": 83668.0}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 576584.3, "Median Read Latency": 10177.2, "Tail Read Latency": 228562.7, "Median Update Latency": 10194.1, "Tail Update Latency": 227658.5}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 499992.8, "Median Read Latency": 162.7, "Tail Read Latency": 11374.3, "Median Update Latency": 167.4, "Tail Update Latency": 11372.9}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 595017.0, "Median Read Latency": 17998.2, "Tail Read Latency": 243922.3, "Median Update Latency": 18344.5, "Tail Update Latency": 239502.7}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 199994.0, "Median Read Latency": 88.5, "Tail Read Latency": 239.3, "Median Update Latency": 95.4, "Tail Update Latency": 248.8}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 299957.9, "Median Read Latency": 105.1, "Tail Read Latency": 274.5, "Median Update Latency": 109.5, "Tail Update Latency": 287.3}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 200018.7, "Median Read Latency": 95.2, "Tail Read Latency": 172.9, "Median Update Latency": 100.4, "Tail Update Latency": 174.6}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 500038.3, "Median Read Latency": 211.6, "Tail Read Latency": 11382.7, "Median Update Latency": 206.1, "Tail Update Latency": 11382.7}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 597722.5, "Median Read Latency": 13397.7, "Tail Read Latency": 58411.2, "Median Update Latency": 13387.9, "Tail Update Latency": 68941.3}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 100030.7, "Median Read Latency": 85.7, "Tail Read Latency": 163.9, "Median Update Latency": 92.0, "Tail Update Latency": 171.4}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 558818.6, "Median Read Latency": 13363.8, "Tail Read Latency": 248229.0, "Median Update Latency": 13350.2, "Tail Update Latency": 249960.2}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 621757.3, "Median Read Latency": 5239.7, "Tail Read Latency": 234406.2, "Median Update Latency": 6894.0, "Tail Update Latency": 234114.4}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 679182.4, "Median Read Latency": 5967.0, "Tail Read Latency": 228719.6, "Median Update Latency": 6772.4, "Tail Update Latency": 228625.2}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 200045.5, "Median Read Latency": 79.2, "Tail Read Latency": 128.4, "Median Update Latency": 84.2, "Tail Update Latency": 137.4}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 99987.1, "Median Read Latency": 93.3, "Tail Read Latency": 141.6, "Median Update Latency": 98.5, "Tail Update Latency": 152.4}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 300010.7, "Median Read Latency": 92.5, "Tail Read Latency": 349.6, "Median Update Latency": 99.4, "Tail Update Latency": 379.9}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 400028.0, "Median Read Latency": 113.6, "Tail Read Latency": 841.4, "Median Update Latency": 120.0, "Tail Update Latency": 882.9}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 593569.4, "Median Read Latency": 13403.1, "Tail Read Latency": 39646.1, "Median Update Latency": 13399.0, "Tail Update Latency": 37874.8}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 400020.1, "Median Read Latency": 107.8, "Tail Read Latency": 564.8, "Median Update Latency": 112.1, "Tail Update Latency": 551.0}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 597114.9, "Median Read Latency": 18109.7, "Tail Read Latency": 246701.4, "Median Update Latency": 17982.1, "Tail Update Latency": 247698.7}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 592504.7, "Median Read Latency": 13426.0, "Tail Read Latency": 44154.2, "Median Update Latency": 13401.8, "Tail Update Latency": 46925.7}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 300004.6, "Median Read Latency": 87.4, "Tail Read Latency": 181.7, "Median Update Latency": 92.3, "Tail Update Latency": 184.6}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 199991.6, "Median Read Latency": 87.7, "Tail Read Latency": 238.7, "Median Update Latency": 94.3, "Tail Update Latency": 241.1}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 300039.2, "Median Read Latency": 100.0, "Tail Read Latency": 465.5, "Median Update Latency": 105.5, "Tail Update Latency": 437.3}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 555859.2, "Median Read Latency": 14324.7, "Tail Read Latency": 252224.7, "Median Update Latency": 14183.0, "Tail Update Latency": 253064.7}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 591884.8, "Median Read Latency": 18852.4, "Tail Read Latency": 245546.0, "Median Update Latency": 18781.5, "Tail Update Latency": 251330.1}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 560411.7, "Median Read Latency": 16212.0, "Tail Read Latency": 249414.2, "Median Update Latency": 16315.3, "Tail Update Latency": 252118.3}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499875.4, "Median Read Latency": 7148.7, "Tail Read Latency": 221728.4, "Median Update Latency": 6854.7, "Tail Update Latency": 223478.5}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 603884.3, "Median Read Latency": 12495.1, "Tail Read Latency": 220449.1, "Median Update Latency": 12489.2, "Tail Update Latency": 220650.4}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 686509.4, "Median Read Latency": 6617.1, "Tail Read Latency": 231746.5, "Median Update Latency": 6934.3, "Tail Update Latency": 232363.0}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 623145.4, "Median Read Latency": 5661.8, "Tail Read Latency": 229372.2, "Median Update Latency": 6085.7, "Tail Update Latency": 230691.9}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 100019.7, "Median Read Latency": 93.3, "Tail Read Latency": 142.6, "Median Update Latency": 98.3, "Tail Update Latency": 152.2}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 499889.0, "Median Read Latency": 155.9, "Tail Read Latency": 12059.8, "Median Update Latency": 169.9, "Tail Update Latency": 12072.0}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 100018.5, "Median Read Latency": 86.6, "Tail Read Latency": 164.3, "Median Update Latency": 92.9, "Tail Update Latency": 179.2}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 100008.8, "Median Read Latency": 85.5, "Tail Read Latency": 162.2, "Median Update Latency": 92.2, "Tail Update Latency": 169.4}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 559071.8, "Median Read Latency": 13053.0, "Tail Read Latency": 247054.3, "Median Update Latency": 13181.4, "Tail Update Latency": 246805.2}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 100016.0, "Median Read Latency": 78.2, "Tail Read Latency": 119.3, "Median Update Latency": 84.1, "Tail Update Latency": 130.7}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 400025.2, "Median Read Latency": 104.6, "Tail Read Latency": 513.5, "Median Update Latency": 108.7, "Tail Update Latency": 505.0}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 100009.2, "Median Read Latency": 93.0, "Tail Read Latency": 143.2, "Median Update Latency": 98.0, "Tail Update Latency": 154.2}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 399978.2, "Median Read Latency": 104.6, "Tail Read Latency": 482.3, "Median Update Latency": 109.5, "Tail Update Latency": 481.6}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 399992.4, "Median Read Latency": 115.3, "Tail Read Latency": 1018.2, "Median Update Latency": 121.2, "Tail Update Latency": 1420.5}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 200039.6, "Median Read Latency": 88.3, "Tail Read Latency": 235.4, "Median Update Latency": 94.9, "Tail Update Latency": 242.1}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 498725.8, "Median Read Latency": 3893.4, "Tail Read Latency": 227347.3, "Median Update Latency": 3733.9, "Tail Update Latency": 227619.0}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 200028.7, "Median Read Latency": 79.2, "Tail Read Latency": 129.6, "Median Update Latency": 84.3, "Tail Update Latency": 138.4}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 620433.1, "Median Read Latency": 21839.7, "Tail Read Latency": 249643.8, "Median Update Latency": 22022.9, "Tail Update Latency": 249993.9}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 569787.5, "Median Read Latency": 3043.0, "Tail Read Latency": 226727.4, "Median Update Latency": 6870.7, "Tail Update Latency": 227823.6}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 597059.4, "Median Read Latency": 13321.6, "Tail Read Latency": 45190.1, "Median Update Latency": 13316.7, "Tail Update Latency": 44028.0}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 300053.7, "Median Read Latency": 86.8, "Tail Read Latency": 184.7, "Median Update Latency": 92.1, "Tail Update Latency": 192.5}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 100014.4, "Median Read Latency": 93.1, "Tail Read Latency": 141.1, "Median Update Latency": 98.9, "Tail Update Latency": 150.9}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 618846.7, "Median Read Latency": 902.7, "Tail Read Latency": 227141.2, "Median Update Latency": 637.8, "Tail Update Latency": 224313.9}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 595532.7, "Median Read Latency": 16978.5, "Tail Read Latency": 248593.5, "Median Update Latency": 16895.7, "Tail Update Latency": 250405.5}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 620493.4, "Median Read Latency": 19597.9, "Tail Read Latency": 247174.2, "Median Update Latency": 19579.3, "Tail Update Latency": 250788.1}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 399994.3, "Median Read Latency": 103.5, "Tail Read Latency": 456.3, "Median Update Latency": 108.6, "Tail Update Latency": 464.3}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 572443.4, "Median Read Latency": 5819.9, "Tail Read Latency": 229576.2, "Median Update Latency": 5419.3, "Tail Update Latency": 230185.9}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 200006.2, "Median Read Latency": 95.2, "Tail Read Latency": 174.1, "Median Update Latency": 99.8, "Tail Update Latency": 183.5}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 675884.4, "Median Read Latency": 5168.9, "Tail Read Latency": 232226.3, "Median Update Latency": 5221.0, "Tail Update Latency": 234665.5}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 300048.0, "Median Read Latency": 104.7, "Tail Read Latency": 274.3, "Median Update Latency": 110.1, "Tail Update Latency": 270.9}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 593973.5, "Median Read Latency": 13469.9, "Tail Read Latency": 48997.9, "Median Update Latency": 13481.7, "Tail Update Latency": 43736.0}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 400014.9, "Median Read Latency": 114.7, "Tail Read Latency": 985.2, "Median Update Latency": 117.9, "Tail Update Latency": 836.7}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 100023.3, "Median Read Latency": 92.3, "Tail Read Latency": 140.0, "Median Update Latency": 97.5, "Tail Update Latency": 150.4}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 200024.8, "Median Read Latency": 78.3, "Tail Read Latency": 127.6, "Median Update Latency": 82.7, "Tail Update Latency": 133.8}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 299955.1, "Median Read Latency": 92.6, "Tail Read Latency": 352.0, "Median Update Latency": 99.0, "Tail Update Latency": 340.3}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 499993.2, "Median Read Latency": 496.8, "Tail Read Latency": 11875.1, "Median Update Latency": 347.0, "Tail Update Latency": 11798.5}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 400080.4, "Median Read Latency": 105.7, "Tail Read Latency": 515.6, "Median Update Latency": 110.8, "Tail Update Latency": 518.0}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 621476.0, "Median Read Latency": 4652.6, "Tail Read Latency": 228156.0, "Median Update Latency": 4843.4, "Tail Update Latency": 226596.6}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 618279.2, "Median Read Latency": 4429.6, "Tail Read Latency": 228611.1, "Median Update Latency": 4362.4, "Tail Update Latency": 226378.2}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 593594.2, "Median Read Latency": 13393.0, "Tail Read Latency": 57221.3, "Median Update Latency": 13422.7, "Tail Update Latency": 80690.8}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 200022.4, "Median Read Latency": 95.8, "Tail Read Latency": 172.8, "Median Update Latency": 100.5, "Tail Update Latency": 185.4}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 560918.1, "Median Read Latency": 16358.1, "Tail Read Latency": 253540.6, "Median Update Latency": 16212.4, "Tail Update Latency": 251232.8}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 669430.7, "Median Read Latency": 5583.0, "Tail Read Latency": 231084.5, "Median Update Latency": 6019.7, "Tail Update Latency": 232056.7}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 399991.3, "Median Read Latency": 106.0, "Tail Read Latency": 530.9, "Median Update Latency": 111.6, "Tail Update Latency": 541.9}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 399954.1, "Median Read Latency": 115.9, "Tail Read Latency": 1160.1, "Median Update Latency": 121.1, "Tail Update Latency": 1101.6}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 606298.3, "Median Read Latency": 12500.1, "Tail Read Latency": 221340.8, "Median Update Latency": 12479.4, "Tail Update Latency": 220126.4}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 677365.7, "Median Read Latency": 5572.2, "Tail Read Latency": 232186.6, "Median Update Latency": 5377.3, "Tail Update Latency": 234817.9}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 560278.3, "Median Read Latency": 15552.2, "Tail Read Latency": 251034.1, "Median Update Latency": 15592.0, "Tail Update Latency": 246436.6}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 595753.0, "Median Read Latency": 23720.5, "Tail Read Latency": 249422.3, "Median Update Latency": 23512.1, "Tail Update Latency": 249583.9}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 400063.9, "Median Read Latency": 113.4, "Tail Read Latency": 862.6, "Median Update Latency": 120.0, "Tail Update Latency": 817.9}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499823.1, "Median Read Latency": 7675.7, "Tail Read Latency": 221134.1, "Median Update Latency": 7669.5, "Tail Update Latency": 221961.7}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 399924.2, "Median Read Latency": 113.2, "Tail Read Latency": 860.6, "Median Update Latency": 119.8, "Tail Update Latency": 955.3}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 300058.2, "Median Read Latency": 87.1, "Tail Read Latency": 181.8, "Median Update Latency": 91.9, "Tail Update Latency": 186.4}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 100027.5, "Median Read Latency": 79.1, "Tail Read Latency": 123.2, "Median Update Latency": 84.7, "Tail Update Latency": 132.5}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 598396.1, "Median Read Latency": 15300.7, "Tail Read Latency": 244675.0, "Median Update Latency": 15021.1, "Tail Update Latency": 241147.1}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 572232.9, "Median Read Latency": 5540.0, "Tail Read Latency": 226307.6, "Median Update Latency": 6565.7, "Tail Update Latency": 228217.0}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 100026.0, "Median Read Latency": 79.4, "Tail Read Latency": 119.9, "Median Update Latency": 84.8, "Tail Update Latency": 127.7}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 612789.2, "Median Read Latency": 20433.4, "Tail Read Latency": 248317.0, "Median Update Latency": 19920.1, "Tail Update Latency": 248162.6}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 633366.9, "Median Read Latency": 27763.5, "Tail Read Latency": 261832.4, "Median Update Latency": 27589.3, "Tail Update Latency": 260902.8}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 300076.6, "Median Read Latency": 91.9, "Tail Read Latency": 355.3, "Median Update Latency": 97.9, "Tail Update Latency": 346.9}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 400067.4, "Median Read Latency": 104.2, "Tail Read Latency": 490.8, "Median Update Latency": 109.1, "Tail Update Latency": 489.3}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 100018.2, "Median Read Latency": 79.5, "Tail Read Latency": 124.0, "Median Update Latency": 85.6, "Tail Update Latency": 130.7}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 673606.9, "Median Read Latency": 6399.4, "Tail Read Latency": 232519.9, "Median Update Latency": 7309.5, "Tail Update Latency": 233818.8}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 618746.2, "Median Read Latency": 5440.4, "Tail Read Latency": 229943.5, "Median Update Latency": 6759.6, "Tail Update Latency": 229563.8}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 300022.9, "Median Read Latency": 87.6, "Tail Read Latency": 182.6, "Median Update Latency": 92.8, "Tail Update Latency": 187.9}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 400008.0, "Median Read Latency": 111.5, "Tail Read Latency": 841.2, "Median Update Latency": 118.5, "Tail Update Latency": 843.9}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 300049.4, "Median Read Latency": 88.0, "Tail Read Latency": 183.8, "Median Update Latency": 94.3, "Tail Update Latency": 187.9}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 299973.6, "Median Read Latency": 103.9, "Tail Read Latency": 264.6, "Median Update Latency": 108.6, "Tail Update Latency": 264.8}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 634051.9, "Median Read Latency": 29679.0, "Tail Read Latency": 276428.2, "Median Update Latency": 29553.8, "Tail Update Latency": 275365.1}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 595601.7, "Median Read Latency": 16592.4, "Tail Read Latency": 248858.5, "Median Update Latency": 16643.3, "Tail Update Latency": 249676.4}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 299996.7, "Median Read Latency": 105.2, "Tail Read Latency": 265.0, "Median Update Latency": 110.0, "Tail Update Latency": 291.7}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 601384.7, "Median Read Latency": 21991.8, "Tail Read Latency": 251918.5, "Median Update Latency": 22250.9, "Tail Update Latency": 254757.7}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 672509.3, "Median Read Latency": 6597.4, "Tail Read Latency": 231210.5, "Median Update Latency": 6618.7, "Tail Update Latency": 230082.2}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 618813.0, "Median Read Latency": 2470.6, "Tail Read Latency": 229954.7, "Median Update Latency": 448.3, "Tail Update Latency": 230106.4}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 399964.2, "Median Read Latency": 104.2, "Tail Read Latency": 500.7, "Median Update Latency": 109.3, "Tail Update Latency": 512.1}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 200027.6, "Median Read Latency": 88.4, "Tail Read Latency": 242.7, "Median Update Latency": 95.4, "Tail Update Latency": 259.1}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 200074.5, "Median Read Latency": 95.6, "Tail Read Latency": 174.1, "Median Update Latency": 100.6, "Tail Update Latency": 180.8}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 590638.3, "Median Read Latency": 20591.5, "Tail Read Latency": 245328.4, "Median Update Latency": 20026.6, "Tail Update Latency": 247076.2}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499880.6, "Median Read Latency": 6449.0, "Tail Read Latency": 220474.9, "Median Update Latency": 6244.1, "Tail Update Latency": 221755.8}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 499651.8, "Median Read Latency": 3100.6, "Tail Read Latency": 224805.6, "Median Update Latency": 3231.5, "Tail Update Latency": 220308.2}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 679918.1, "Median Read Latency": 6117.7, "Tail Read Latency": 235370.8, "Median Update Latency": 5461.9, "Tail Update Latency": 235881.3}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 562603.6, "Median Read Latency": 17687.5, "Tail Read Latency": 249811.6, "Median Update Latency": 17850.1, "Tail Update Latency": 248409.0}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 498800.0, "Median Read Latency": 4824.4, "Tail Read Latency": 226001.1, "Median Update Latency": 3904.7, "Tail Update Latency": 225754.6}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 300001.7, "Median Read Latency": 87.0, "Tail Read Latency": 177.7, "Median Update Latency": 91.8, "Tail Update Latency": 186.5}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 200017.1, "Median Read Latency": 79.3, "Tail Read Latency": 128.7, "Median Update Latency": 84.2, "Tail Update Latency": 141.4}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 400017.2, "Median Read Latency": 115.5, "Tail Read Latency": 1159.7, "Median Update Latency": 120.1, "Tail Update Latency": 989.5}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 99991.2, "Median Read Latency": 78.9, "Tail Read Latency": 121.9, "Median Update Latency": 84.3, "Tail Update Latency": 129.4}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 200019.4, "Median Read Latency": 78.7, "Tail Read Latency": 127.4, "Median Update Latency": 83.4, "Tail Update Latency": 138.3}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 574746.3, "Median Read Latency": 10094.5, "Tail Read Latency": 229286.1, "Median Update Latency": 10163.4, "Tail Update Latency": 228392.7}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 666919.6, "Median Read Latency": 5602.6, "Tail Read Latency": 231151.9, "Median Update Latency": 7137.9, "Tail Update Latency": 230738.8}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 300001.0, "Median Read Latency": 87.0, "Tail Read Latency": 178.4, "Median Update Latency": 92.1, "Tail Update Latency": 184.3}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 400016.4, "Median Read Latency": 112.3, "Tail Read Latency": 861.1, "Median Update Latency": 119.0, "Tail Update Latency": 826.2}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 554192.0, "Median Read Latency": 14565.8, "Tail Read Latency": 251281.6, "Median Update Latency": 14621.9, "Tail Update Latency": 248503.4}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 400004.9, "Median Read Latency": 112.6, "Tail Read Latency": 847.6, "Median Update Latency": 119.8, "Tail Update Latency": 780.5}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 499926.2, "Median Read Latency": 202.8, "Tail Read Latency": 11364.8, "Median Update Latency": 225.1, "Tail Update Latency": 11373.2}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499827.3, "Median Read Latency": 6236.1, "Tail Read Latency": 219172.4, "Median Update Latency": 6311.9, "Tail Update Latency": 218648.6}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 299983.9, "Median Read Latency": 105.0, "Tail Read Latency": 266.1, "Median Update Latency": 110.0, "Tail Update Latency": 270.9}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 572297.0, "Median Read Latency": 7121.5, "Tail Read Latency": 229699.2, "Median Update Latency": 6446.6, "Tail Update Latency": 229921.5}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 300049.7, "Median Read Latency": 100.0, "Tail Read Latency": 462.1, "Median Update Latency": 106.0, "Tail Update Latency": 495.3}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 300036.2, "Median Read Latency": 97.4, "Tail Read Latency": 215.2, "Median Update Latency": 101.7, "Tail Update Latency": 222.3}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 592848.3, "Median Read Latency": 13499.3, "Tail Read Latency": 54109.4, "Median Update Latency": 13537.0, "Tail Update Latency": 67041.7}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 599841.2, "Median Read Latency": 13013.2, "Tail Read Latency": 219173.4, "Median Update Latency": 12913.4, "Tail Update Latency": 78396.6}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 499623.8, "Median Read Latency": 3350.4, "Tail Read Latency": 224439.8, "Median Update Latency": 2713.4, "Tail Update Latency": 227667.6}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 400044.7, "Median Read Latency": 114.6, "Tail Read Latency": 904.6, "Median Update Latency": 121.3, "Tail Update Latency": 889.9}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 99961.3, "Median Read Latency": 78.9, "Tail Read Latency": 121.4, "Median Update Latency": 84.1, "Tail Update Latency": 129.1}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 399956.7, "Median Read Latency": 111.9, "Tail Read Latency": 913.7, "Median Update Latency": 117.9, "Tail Update Latency": 812.8}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 199987.8, "Median Read Latency": 88.7, "Tail Read Latency": 240.9, "Median Update Latency": 94.8, "Tail Update Latency": 263.6}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 578039.8, "Median Read Latency": 12007.4, "Tail Read Latency": 222983.8, "Median Update Latency": 12072.4, "Tail Update Latency": 223117.5}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 599235.9, "Median Read Latency": 12980.0, "Tail Read Latency": 89029.3, "Median Update Latency": 13117.6, "Tail Update Latency": 84983.3}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 400003.6, "Median Read Latency": 104.1, "Tail Read Latency": 506.4, "Median Update Latency": 108.0, "Tail Update Latency": 499.3}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 667063.6, "Median Read Latency": 7072.8, "Tail Read Latency": 228047.4, "Median Update Latency": 6478.2, "Tail Update Latency": 226000.1}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 591587.5, "Median Read Latency": 13359.7, "Tail Read Latency": 50039.7, "Median Update Latency": 13361.1, "Tail Update Latency": 49096.8}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 100046.8, "Median Read Latency": 93.6, "Tail Read Latency": 141.1, "Median Update Latency": 98.7, "Tail Update Latency": 149.6}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 100035.3, "Median Read Latency": 93.2, "Tail Read Latency": 140.9, "Median Update Latency": 98.0, "Tail Update Latency": 149.2}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 579557.0, "Median Read Latency": 11146.3, "Tail Read Latency": 225592.5, "Median Update Latency": 11213.4, "Tail Update Latency": 227281.7}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499854.0, "Median Read Latency": 6874.0, "Tail Read Latency": 221957.8, "Median Update Latency": 6881.0, "Tail Update Latency": 223218.6}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 199990.1, "Median Read Latency": 88.2, "Tail Read Latency": 238.6, "Median Update Latency": 95.8, "Tail Update Latency": 247.6}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 617227.7, "Median Read Latency": 17171.0, "Tail Read Latency": 249354.5, "Median Update Latency": 16888.9, "Tail Update Latency": 253622.6}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 616791.3, "Median Read Latency": 7783.5, "Tail Read Latency": 231012.6, "Median Update Latency": 7816.4, "Tail Update Latency": 229123.0}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 100038.9, "Median Read Latency": 78.3, "Tail Read Latency": 119.7, "Median Update Latency": 83.5, "Tail Update Latency": 130.2}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 600648.1, "Median Read Latency": 19930.5, "Tail Read Latency": 252949.2, "Median Update Latency": 19774.3, "Tail Update Latency": 254769.5}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 400077.3, "Median Read Latency": 115.5, "Tail Read Latency": 1407.8, "Median Update Latency": 119.4, "Tail Update Latency": 1102.4}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 100003.5, "Median Read Latency": 85.9, "Tail Read Latency": 164.6, "Median Update Latency": 92.1, "Tail Update Latency": 171.8}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 99984.3, "Median Read Latency": 78.3, "Tail Read Latency": 120.5, "Median Update Latency": 83.7, "Tail Update Latency": 128.5}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 596507.7, "Median Read Latency": 13251.2, "Tail Read Latency": 73339.7, "Median Update Latency": 13352.3, "Tail Update Latency": 112626.7}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 200030.3, "Median Read Latency": 94.9, "Tail Read Latency": 171.9, "Median Update Latency": 99.8, "Tail Update Latency": 185.1}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499890.3, "Median Read Latency": 6051.4, "Tail Read Latency": 220881.9, "Median Update Latency": 5816.4, "Tail Update Latency": 220025.4}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 617601.3, "Median Read Latency": 6576.4, "Tail Read Latency": 228343.0, "Median Update Latency": 6669.8, "Tail Update Latency": 229107.3}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 614289.3, "Median Read Latency": 27798.8, "Tail Read Latency": 249319.1, "Median Update Latency": 28052.1, "Tail Update Latency": 255922.6}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 400085.5, "Median Read Latency": 108.2, "Tail Read Latency": 596.3, "Median Update Latency": 112.3, "Tail Update Latency": 580.3}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 499937.2, "Median Read Latency": 246.8, "Tail Read Latency": 12044.7, "Median Update Latency": 370.5, "Tail Update Latency": 11969.0}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 300032.5, "Median Read Latency": 99.9, "Tail Read Latency": 450.5, "Median Update Latency": 106.5, "Tail Update Latency": 506.6}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 200065.4, "Median Read Latency": 78.6, "Tail Read Latency": 127.6, "Median Update Latency": 83.6, "Tail Update Latency": 136.7}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 200023.6, "Median Read Latency": 95.8, "Tail Read Latency": 175.4, "Median Update Latency": 100.8, "Tail Update Latency": 181.3}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 200018.8, "Median Read Latency": 88.0, "Tail Read Latency": 237.2, "Median Update Latency": 95.0, "Tail Update Latency": 241.3}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 100003.9, "Median Read Latency": 92.9, "Tail Read Latency": 141.7, "Median Update Latency": 98.0, "Tail Update Latency": 149.9}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 200027.7, "Median Read Latency": 78.8, "Tail Read Latency": 128.1, "Median Update Latency": 84.1, "Tail Update Latency": 138.5}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 573103.5, "Median Read Latency": 6813.3, "Tail Read Latency": 228646.3, "Median Update Latency": 6506.7, "Tail Update Latency": 228655.2}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 500002.4, "Median Read Latency": 165.8, "Tail Read Latency": 12247.6, "Median Update Latency": 153.3, "Tail Update Latency": 12160.1}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 499939.4, "Median Read Latency": 236.9, "Tail Read Latency": 11686.2, "Median Update Latency": 1735.2, "Tail Update Latency": 11889.0}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 100010.1, "Median Read Latency": 92.7, "Tail Read Latency": 140.3, "Median Update Latency": 98.4, "Tail Update Latency": 150.4}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 200049.3, "Median Read Latency": 79.0, "Tail Read Latency": 128.4, "Median Update Latency": 83.8, "Tail Update Latency": 135.6}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499781.9, "Median Read Latency": 6814.0, "Tail Read Latency": 221736.9, "Median Update Latency": 6911.9, "Tail Update Latency": 208931.6}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 570152.2, "Median Read Latency": 5421.5, "Tail Read Latency": 227907.2, "Median Update Latency": 370.5, "Tail Update Latency": 226747.8}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 577102.5, "Median Read Latency": 10328.5, "Tail Read Latency": 229337.1, "Median Update Latency": 10259.6, "Tail Update Latency": 228080.1}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 300011.4, "Median Read Latency": 104.6, "Tail Read Latency": 269.3, "Median Update Latency": 108.7, "Tail Update Latency": 273.6}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 679919.6, "Median Read Latency": 6524.6, "Tail Read Latency": 232330.6, "Median Update Latency": 6061.6, "Tail Update Latency": 230619.7}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 573394.8, "Median Read Latency": 847.7, "Tail Read Latency": 229024.9, "Median Update Latency": 383.2, "Tail Update Latency": 229457.9}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 200085.1, "Median Read Latency": 88.4, "Tail Read Latency": 241.4, "Median Update Latency": 95.2, "Tail Update Latency": 234.6}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 200047.0, "Median Read Latency": 94.9, "Tail Read Latency": 171.3, "Median Update Latency": 99.5, "Tail Update Latency": 172.3}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 559301.4, "Median Read Latency": 14825.1, "Tail Read Latency": 254187.0, "Median Update Latency": 14664.1, "Tail Update Latency": 254937.9}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 100010.6, "Median Read Latency": 85.3, "Tail Read Latency": 162.3, "Median Update Latency": 91.4, "Tail Update Latency": 173.7}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 578074.7, "Median Read Latency": 11791.1, "Tail Read Latency": 223980.5, "Median Update Latency": 11864.7, "Tail Update Latency": 226155.9}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 100001.6, "Median Read Latency": 85.8, "Tail Read Latency": 162.2, "Median Update Latency": 92.7, "Tail Update Latency": 171.3}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 400002.4, "Median Read Latency": 116.0, "Tail Read Latency": 1275.3, "Median Update Latency": 120.4, "Tail Update Latency": 1950.4}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 99993.8, "Median Read Latency": 85.5, "Tail Read Latency": 165.2, "Median Update Latency": 91.9, "Tail Update Latency": 174.4}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 300001.0, "Median Read Latency": 104.6, "Tail Read Latency": 264.9, "Median Update Latency": 109.1, "Tail Update Latency": 267.0}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 300047.2, "Median Read Latency": 104.8, "Tail Read Latency": 270.7, "Median Update Latency": 108.8, "Tail Update Latency": 291.1}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 99984.4, "Median Read Latency": 92.5, "Tail Read Latency": 140.9, "Median Update Latency": 97.6, "Tail Update Latency": 151.5}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 499211.4, "Median Read Latency": 4829.0, "Tail Read Latency": 227460.6, "Median Update Latency": 5820.2, "Tail Update Latency": 228629.5}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 399973.0, "Median Read Latency": 116.6, "Tail Read Latency": 1517.8, "Median Update Latency": 122.0, "Tail Update Latency": 2040.5}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 604492.9, "Median Read Latency": 22608.3, "Tail Read Latency": 250393.4, "Median Update Latency": 23071.2, "Tail Update Latency": 256668.7}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 611693.3, "Median Read Latency": 14027.6, "Tail Read Latency": 243356.0, "Median Update Latency": 14022.5, "Tail Update Latency": 250001.8}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 200042.2, "Median Read Latency": 88.5, "Tail Read Latency": 243.2, "Median Update Latency": 94.9, "Tail Update Latency": 236.6}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 559704.6, "Median Read Latency": 14430.7, "Tail Read Latency": 250332.9, "Median Update Latency": 14418.1, "Tail Update Latency": 253148.7}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 499223.6, "Median Read Latency": 4367.8, "Tail Read Latency": 226883.4, "Median Update Latency": 5244.0, "Tail Update Latency": 221016.3}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 568544.1, "Median Read Latency": 2365.9, "Tail Read Latency": 228962.5, "Median Update Latency": 4500.7, "Tail Update Latency": 230563.3}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 100000.3, "Median Read Latency": 78.8, "Tail Read Latency": 119.8, "Median Update Latency": 84.1, "Tail Update Latency": 127.7}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 499122.9, "Median Read Latency": 3607.2, "Tail Read Latency": 224649.4, "Median Update Latency": 3444.9, "Tail Update Latency": 225614.4}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 400014.4, "Median Read Latency": 116.2, "Tail Read Latency": 1195.9, "Median Update Latency": 120.5, "Tail Update Latency": 1364.5}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 199998.7, "Median Read Latency": 88.4, "Tail Read Latency": 243.1, "Median Update Latency": 94.8, "Tail Update Latency": 250.8}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 99995.7, "Median Read Latency": 86.0, "Tail Read Latency": 169.2, "Median Update Latency": 92.9, "Tail Update Latency": 184.7}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 100033.6, "Median Read Latency": 92.2, "Tail Read Latency": 140.4, "Median Update Latency": 97.9, "Tail Update Latency": 150.8}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 400016.4, "Median Read Latency": 107.1, "Tail Read Latency": 600.3, "Median Update Latency": 113.1, "Tail Update Latency": 614.8}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 596136.4, "Median Read Latency": 13397.2, "Tail Read Latency": 56097.2, "Median Update Latency": 13382.5, "Tail Update Latency": 52032.8}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 99991.6, "Median Read Latency": 78.8, "Tail Read Latency": 123.0, "Median Update Latency": 84.3, "Tail Update Latency": 129.0}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 577576.0, "Median Read Latency": 11304.9, "Tail Read Latency": 223373.9, "Median Update Latency": 11351.8, "Tail Update Latency": 222493.1}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 621059.2, "Median Read Latency": 836.3, "Tail Read Latency": 229758.4, "Median Update Latency": 5313.0, "Tail Update Latency": 229136.4}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 200039.4, "Median Read Latency": 79.0, "Tail Read Latency": 128.4, "Median Update Latency": 83.6, "Tail Update Latency": 137.9}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 563457.7, "Median Read Latency": 16701.6, "Tail Read Latency": 249200.5, "Median Update Latency": 16861.1, "Tail Update Latency": 248788.2}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 399982.2, "Median Read Latency": 113.1, "Tail Read Latency": 881.3, "Median Update Latency": 119.4, "Tail Update Latency": 900.6}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 595465.4, "Median Read Latency": 13157.2, "Tail Read Latency": 75250.3, "Median Update Latency": 13247.2, "Tail Update Latency": 80003.1}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 499959.8, "Median Read Latency": 262.9, "Tail Read Latency": 12395.7, "Median Update Latency": 295.8, "Tail Update Latency": 12350.6}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 589612.6, "Median Read Latency": 13433.9, "Tail Read Latency": 47674.3, "Median Update Latency": 13410.5, "Tail Update Latency": 57342.6}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 199957.2, "Median Read Latency": 80.1, "Tail Read Latency": 138.1, "Median Update Latency": 85.2, "Tail Update Latency": 143.2}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 200034.1, "Median Read Latency": 94.9, "Tail Read Latency": 171.1, "Median Update Latency": 99.7, "Tail Update Latency": 180.0}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 500007.9, "Median Read Latency": 178.2, "Tail Read Latency": 12033.8, "Median Update Latency": 168.2, "Tail Update Latency": 12001.3}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 497856.1, "Median Read Latency": 4145.0, "Tail Read Latency": 226546.5, "Median Update Latency": 3441.9, "Tail Update Latency": 227252.1}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 300033.4, "Median Read Latency": 104.2, "Tail Read Latency": 268.6, "Median Update Latency": 109.9, "Tail Update Latency": 279.2}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 568855.7, "Median Read Latency": 9605.0, "Tail Read Latency": 232225.5, "Median Update Latency": 9612.0, "Tail Update Latency": 232598.3}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 200009.7, "Median Read Latency": 88.0, "Tail Read Latency": 239.6, "Median Update Latency": 94.3, "Tail Update Latency": 241.1}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 570228.2, "Median Read Latency": 6570.1, "Tail Read Latency": 225938.3, "Median Update Latency": 6319.2, "Tail Update Latency": 221840.9}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 299969.9, "Median Read Latency": 90.6, "Tail Read Latency": 349.4, "Median Update Latency": 96.7, "Tail Update Latency": 363.3}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 620657.5, "Median Read Latency": 5477.7, "Tail Read Latency": 231316.9, "Median Update Latency": 3529.9, "Tail Update Latency": 232065.4}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 99976.6, "Median Read Latency": 92.6, "Tail Read Latency": 140.5, "Median Update Latency": 97.5, "Tail Update Latency": 149.4}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 400042.8, "Median Read Latency": 117.3, "Tail Read Latency": 1777.2, "Median Update Latency": 123.8, "Tail Update Latency": 1476.7}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 100038.9, "Median Read Latency": 79.2, "Tail Read Latency": 122.3, "Median Update Latency": 85.3, "Tail Update Latency": 130.3}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 680848.2, "Median Read Latency": 6171.2, "Tail Read Latency": 227595.7, "Median Update Latency": 6211.2, "Tail Update Latency": 226364.4}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 400014.0, "Median Read Latency": 113.7, "Tail Read Latency": 861.3, "Median Update Latency": 120.8, "Tail Update Latency": 910.6}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 299995.0, "Median Read Latency": 99.8, "Tail Read Latency": 460.1, "Median Update Latency": 105.9, "Tail Update Latency": 467.5}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 499364.1, "Median Read Latency": 2224.1, "Tail Read Latency": 226096.6, "Median Update Latency": 3068.3, "Tail Update Latency": 225018.1}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 500025.6, "Median Read Latency": 313.0, "Tail Read Latency": 11828.0, "Median Update Latency": 321.6, "Tail Update Latency": 11853.5}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 620927.4, "Median Read Latency": 24535.1, "Tail Read Latency": 257784.9, "Median Update Latency": 24537.7, "Tail Update Latency": 255864.2}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 602632.4, "Median Read Latency": 13072.1, "Tail Read Latency": 61232.2, "Median Update Latency": 13168.5, "Tail Update Latency": 92245.5}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 199927.4, "Median Read Latency": 89.4, "Tail Read Latency": 245.8, "Median Update Latency": 95.9, "Tail Update Latency": 254.7}], ["fibre", "memcached fibre", {"Target QPS": 700000, "Actual QPS": 595090.7, "Median Read Latency": 16183.6, "Tail Read Latency": 247979.2, "Median Update Latency": 16252.1, "Tail Update Latency": 250173.3}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 618956.2, "Median Read Latency": 6020.9, "Tail Read Latency": 229495.0, "Median Update Latency": 415.4, "Tail Update Latency": 230616.9}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 399956.4, "Median Read Latency": 104.6, "Tail Read Latency": 502.8, "Median Update Latency": 109.0, "Tail Update Latency": 496.5}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 299923.6, "Median Read Latency": 88.0, "Tail Read Latency": 180.5, "Median Update Latency": 93.2, "Tail Update Latency": 186.6}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 300010.4, "Median Read Latency": 99.9, "Tail Read Latency": 441.2, "Median Update Latency": 105.4, "Tail Update Latency": 423.4}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 300014.0, "Median Read Latency": 97.4, "Tail Read Latency": 219.0, "Median Update Latency": 101.9, "Tail Update Latency": 236.6}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 499459.5, "Median Read Latency": 4253.1, "Tail Read Latency": 224248.7, "Median Update Latency": 3402.1, "Tail Update Latency": 224262.8}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 300062.5, "Median Read Latency": 91.6, "Tail Read Latency": 357.4, "Median Update Latency": 98.9, "Tail Update Latency": 364.4}], ["vanilla", "memcached vanilla", {"Target QPS": 800000, "Actual QPS": 678611.1, "Median Read Latency": 6160.8, "Tail Read Latency": 229544.6, "Median Update Latency": 5851.0, "Tail Update Latency": 229411.7}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 578256.9, "Median Read Latency": 11180.9, "Tail Read Latency": 225596.4, "Median Update Latency": 11147.2, "Tail Update Latency": 222763.9}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 400003.0, "Median Read Latency": 116.8, "Tail Read Latency": 1089.4, "Median Update Latency": 122.7, "Tail Update Latency": 831.8}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 300032.2, "Median Read Latency": 90.3, "Tail Read Latency": 348.7, "Median Update Latency": 96.4, "Tail Update Latency": 350.9}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 200064.6, "Median Read Latency": 78.7, "Tail Read Latency": 128.0, "Median Update Latency": 84.5, "Tail Update Latency": 138.0}], ["fibre", "memcached fibre", {"Target QPS": 400000, "Actual QPS": 399974.1, "Median Read Latency": 115.0, "Tail Read Latency": 1410.0, "Median Update Latency": 120.0, "Tail Update Latency": 1261.5}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 100010.9, "Median Read Latency": 85.4, "Tail Read Latency": 160.3, "Median Update Latency": 91.7, "Tail Update Latency": 173.0}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 400024.5, "Median Read Latency": 106.4, "Tail Read Latency": 505.2, "Median Update Latency": 111.2, "Tail Update Latency": 491.0}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 300011.7, "Median Read Latency": 87.3, "Tail Read Latency": 178.7, "Median Update Latency": 92.3, "Tail Update Latency": 184.7}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 99988.9, "Median Read Latency": 93.3, "Tail Read Latency": 140.6, "Median Update Latency": 98.0, "Tail Update Latency": 150.8}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 299993.4, "Median Read Latency": 87.5, "Tail Read Latency": 181.0, "Median Update Latency": 93.2, "Tail Update Latency": 182.8}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 617912.5, "Median Read Latency": 25810.6, "Tail Read Latency": 252464.1, "Median Update Latency": 25674.3, "Tail Update Latency": 251736.1}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 625701.7, "Median Read Latency": 22503.9, "Tail Read Latency": 259542.7, "Median Update Latency": 22468.4, "Tail Update Latency": 267154.9}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499785.2, "Median Read Latency": 6496.3, "Tail Read Latency": 222659.0, "Median Update Latency": 6234.7, "Tail Update Latency": 222262.0}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 572021.8, "Median Read Latency": 9548.6, "Tail Read Latency": 233766.8, "Median Update Latency": 9524.3, "Tail Update Latency": 236551.7}], ["fibre", "memcached fibre", {"Target QPS": 200000, "Actual QPS": 200009.0, "Median Read Latency": 95.4, "Tail Read Latency": 175.8, "Median Update Latency": 100.7, "Tail Update Latency": 190.9}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 200027.1, "Median Read Latency": 78.6, "Tail Read Latency": 126.6, "Median Update Latency": 83.1, "Tail Update Latency": 129.1}], ["fibre", "memcached fibre", {"Target QPS": 600000, "Actual QPS": 557369.8, "Median Read Latency": 16483.1, "Tail Read Latency": 253742.7, "Median Update Latency": 16400.4, "Tail Update Latency": 255241.9}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499805.4, "Median Read Latency": 6508.7, "Tail Read Latency": 221536.4, "Median Update Latency": 6509.6, "Tail Update Latency": 223234.9}], ["forall", "memcached forall", {"Target QPS": 300000, "Actual QPS": 300035.4, "Median Read Latency": 100.2, "Tail Read Latency": 457.3, "Median Update Latency": 106.8, "Tail Update Latency": 525.7}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 300050.2, "Median Read Latency": 96.4, "Tail Read Latency": 213.9, "Median Update Latency": 100.7, "Tail Update Latency": 220.0}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499885.9, "Median Read Latency": 7063.8, "Tail Read Latency": 214747.3, "Median Update Latency": 6995.4, "Tail Update Latency": 219631.2}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 609226.7, "Median Read Latency": 14494.9, "Tail Read Latency": 237210.7, "Median Update Latency": 14428.8, "Tail Update Latency": 235805.5}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 569466.3, "Median Read Latency": 511.4, "Tail Read Latency": 226153.5, "Median Update Latency": 364.6, "Tail Update Latency": 224980.5}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499908.5, "Median Read Latency": 5780.3, "Tail Read Latency": 221614.4, "Median Update Latency": 5583.2, "Tail Update Latency": 223730.2}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 564939.5, "Median Read Latency": 9668.2, "Tail Read Latency": 232075.5, "Median Update Latency": 9684.1, "Tail Update Latency": 233548.1}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 100060.1, "Median Read Latency": 78.8, "Tail Read Latency": 121.0, "Median Update Latency": 84.6, "Tail Update Latency": 130.7}], ["forall", "memcached forall", {"Target QPS": 200000, "Actual QPS": 199988.8, "Median Read Latency": 88.0, "Tail Read Latency": 242.2, "Median Update Latency": 95.1, "Tail Update Latency": 263.1}], ["vanilla", "memcached vanilla", {"Target QPS": 700000, "Actual QPS": 619177.1, "Median Read Latency": 5783.8, "Tail Read Latency": 230993.8, "Median Update Latency": 5783.0, "Tail Update Latency": 228857.0}], ["forall", "memcached forall", {"Target QPS": 700000, "Actual QPS": 591662.2, "Median Read Latency": 13386.9, "Tail Read Latency": 50085.4, "Median Update Latency": 13368.7, "Tail Update Latency": 44908.7}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 100031.6, "Median Read Latency": 78.6, "Tail Read Latency": 120.5, "Median Update Latency": 84.0, "Tail Update Latency": 128.7}], ["forall", "memcached forall", {"Target QPS": 600000, "Actual QPS": 578665.8, "Median Read Latency": 12495.4, "Tail Read Latency": 71938.0, "Median Update Latency": 12460.8, "Tail Update Latency": 134007.8}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 593825.5, "Median Read Latency": 13381.8, "Tail Read Latency": 42648.3, "Median Update Latency": 13428.1, "Tail Update Latency": 52034.5}], ["vanilla", "memcached vanilla", {"Target QPS": 400000, "Actual QPS": 400058.1, "Median Read Latency": 107.0, "Tail Read Latency": 542.8, "Median Update Latency": 112.4, "Tail Update Latency": 534.6}], ["forall", "memcached forall", {"Target QPS": 100000, "Actual QPS": 100023.5, "Median Read Latency": 86.3, "Tail Read Latency": 167.1, "Median Update Latency": 93.4, "Tail Update Latency": 177.1}], ["vanilla", "memcached vanilla", {"Target QPS": 500000, "Actual QPS": 500013.8, "Median Read Latency": 427.0, "Tail Read Latency": 11978.9, "Median Update Latency": 1271.9, "Tail Update Latency": 12021.7}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 200028.8, "Median Read Latency": 79.0, "Tail Read Latency": 128.9, "Median Update Latency": 84.0, "Tail Update Latency": 135.6}], ["fibre", "memcached fibre", {"Target QPS": 800000, "Actual QPS": 622974.3, "Median Read Latency": 25949.6, "Tail Read Latency": 253985.8, "Median Update Latency": 26016.2, "Tail Update Latency": 256999.9}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 498679.6, "Median Read Latency": 2696.6, "Tail Read Latency": 224780.0, "Median Update Latency": 3725.6, "Tail Update Latency": 225606.8}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 300005.0, "Median Read Latency": 87.2, "Tail Read Latency": 181.0, "Median Update Latency": 91.9, "Tail Update Latency": 190.0}], ["vanilla", "memcached vanilla", {"Target QPS": 100000, "Actual QPS": 100054.4, "Median Read Latency": 78.9, "Tail Read Latency": 121.5, "Median Update Latency": 84.5, "Tail Update Latency": 128.5}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 594765.9, "Median Read Latency": 13457.0, "Tail Read Latency": 57014.3, "Median Update Latency": 13535.6, "Tail Update Latency": 62271.0}], ["fibre", "memcached fibre", {"Target QPS": 100000, "Actual QPS": 100045.0, "Median Read Latency": 94.0, "Tail Read Latency": 147.8, "Median Update Latency": 98.6, "Tail Update Latency": 155.2}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 570755.1, "Median Read Latency": 1747.1, "Tail Read Latency": 229720.5, "Median Update Latency": 376.1, "Tail Update Latency": 230143.6}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 400028.9, "Median Read Latency": 113.5, "Tail Read Latency": 883.0, "Median Update Latency": 119.6, "Tail Update Latency": 930.9}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 499161.2, "Median Read Latency": 4035.8, "Tail Read Latency": 226041.1, "Median Update Latency": 4145.7, "Tail Update Latency": 227144.8}], ["vanilla", "memcached vanilla", {"Target QPS": 200000, "Actual QPS": 200004.9, "Median Read Latency": 79.6, "Tail Read Latency": 128.8, "Median Update Latency": 84.8, "Tail Update Latency": 138.1}], ["fibre", "memcached fibre", {"Target QPS": 500000, "Actual QPS": 499892.3, "Median Read Latency": 6412.0, "Tail Read Latency": 220894.5, "Median Update Latency": 6356.1, "Tail Update Latency": 222058.1}], ["vanilla", "memcached vanilla", {"Target QPS": 600000, "Actual QPS": 569172.7, "Median Read Latency": 1014.0, "Tail Read Latency": 227912.8, "Median Update Latency": 299.3, "Tail Update Latency": 226796.7}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 592497.5, "Median Read Latency": 13366.1, "Tail Read Latency": 50141.0, "Median Update Latency": 13458.6, "Tail Update Latency": 64470.4}], ["forall", "memcached forall", {"Target QPS": 400000, "Actual QPS": 400035.6, "Median Read Latency": 114.4, "Tail Read Latency": 884.8, "Median Update Latency": 121.0, "Tail Update Latency": 883.7}], ["forall", "memcached forall", {"Target QPS": 500000, "Actual QPS": 499062.3, "Median Read Latency": 2907.3, "Tail Read Latency": 226259.2, "Median Update Latency": 3329.0, "Tail Update Latency": 226354.8}], ["fibre", "memcached fibre", {"Target QPS": 300000, "Actual QPS": 300127.5, "Median Read Latency": 104.7, "Tail Read Latency": 266.2, "Median Update Latency": 109.1, "Tail Update Latency": 269.6}], ["vanilla", "memcached vanilla", {"Target QPS": 300000, "Actual QPS": 300046.9, "Median Read Latency": 88.4, "Tail Read Latency": 182.9, "Median Update Latency": 93.4, "Tail Update Latency": 188.3}], ["forall", "memcached forall", {"Target QPS": 800000, "Actual QPS": 585332.5, "Median Read Latency": 13581.7, "Tail Read Latency": 41485.9, "Median Update Latency": 13562.4, "Tail Update Latency": 42725.2}]]
Index: doc/theses/thierry_delisle_PhD/thesis/data/memcd.updt
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/data/memcd.updt	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
+++ doc/theses/thierry_delisle_PhD/thesis/data/memcd.updt	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
@@ -0,0 +1,1 @@
+[["fibre", "memcached fibre", {"Update Ratio": 30, "Actual QPS": 629656.2, "Average Read Latency": 27319.8, "Median Read Latency": 22952.7, "Tail Read Latency": 246885.9, "Average Update Latency": 27991.9, "Median Update Latency": 23033.7, "Tail Update Latency": 252283.9}], ["fibre", "memcached fibre", {"Update Ratio": 20, "Actual QPS": 648887.0, "Average Read Latency": 30926.5, "Median Read Latency": 26547.5, "Tail Read Latency": 245170.2, "Average Update Latency": 31078.3, "Median Update Latency": 26466.1, "Tail Update Latency": 245702.3}], ["fibre", "memcached fibre", {"Update Ratio": 20, "Actual QPS": 659773.8, "Average Read Latency": 25337.6, "Median Read Latency": 20743.6, "Tail Read Latency": 250895.8, "Average Update Latency": 25691.3, "Median Update Latency": 20786.0, "Tail Update Latency": 252716.3}], ["forall", "memcached forall", {"Update Ratio": 20, "Actual QPS": 593242.6, "Average Read Latency": 16959.6, "Median Read Latency": 13191.5, "Tail Read Latency": 35809.5, "Average Update Latency": 17359.4, "Median Update Latency": 13176.7, "Tail Update Latency": 39772.7}], ["fibre", "memcached fibre", {"Update Ratio": 30, "Actual QPS": 632062.8, "Average Read Latency": 32964.0, "Median Read Latency": 28637.6, "Tail Read Latency": 239904.1, "Average Update Latency": 33330.3, "Median Update Latency": 28653.8, "Tail Update Latency": 245537.7}], ["vanilla", "memcached vanilla", {"Update Ratio": 30, "Actual QPS": 739300.3, "Average Read Latency": 24611.8, "Median Read Latency": 15550.2, "Tail Read Latency": 250448.4, "Average Update Latency": 24234.4, "Median Update Latency": 15504.0, "Tail Update Latency": 249174.1}], ["fibre", "memcached fibre", {"Update Ratio": 30, "Actual QPS": 635540.7, "Average Read Latency": 26437.0, "Median Read Latency": 21871.7, "Tail Read Latency": 249677.1, "Average Update Latency": 27007.6, "Median Update Latency": 21919.5, "Tail Update Latency": 254080.7}], ["fibre", "memcached fibre", {"Update Ratio": 20, "Actual QPS": 662431.5, "Average Read Latency": 28397.9, "Median Read Latency": 22980.7, "Tail Read Latency": 252279.7, "Average Update Latency": 28848.1, "Median Update Latency": 22954.0, "Tail Update Latency": 256070.8}], ["vanilla", "memcached vanilla", {"Update Ratio": 5, "Actual QPS": 771468.6, "Average Read Latency": 21535.3, "Median Read Latency": 11447.2, "Tail Read Latency": 239536.3, "Average Update Latency": 21420.0, "Median Update Latency": 11457.2, "Tail Update Latency": 239594.8}], ["fibre", "memcached fibre", {"Update Ratio": 10, "Actual QPS": 671082.4, "Average Read Latency": 30220.6, "Median Read Latency": 24900.7, "Tail Read Latency": 254034.5, "Average Update Latency": 30377.6, "Median Update Latency": 24913.7, "Tail Update Latency": 255033.5}], ["fibre", "memcached fibre", {"Update Ratio": 10, "Actual QPS": 672847.0, "Average Read Latency": 31129.7, "Median Read Latency": 27301.0, "Tail Read Latency": 92314.4, "Average Update Latency": 31493.5, "Median Update Latency": 27378.8, "Tail Update Latency": 222712.9}], ["fibre", "memcached fibre", {"Update Ratio": 5, "Actual QPS": 684419.5, "Average Read Latency": 34114.6, "Median Read Latency": 29765.8, "Tail Read Latency": 257535.6, "Average Update Latency": 34848.1, "Median Update Latency": 29566.2, "Tail Update Latency": 264376.6}], ["vanilla", "memcached vanilla", {"Update Ratio": 10, "Actual QPS": 764493.6, "Average Read Latency": 22398.2, "Median Read Latency": 12207.4, "Tail Read Latency": 242402.3, "Average Update Latency": 22038.2, "Median Update Latency": 12234.9, "Tail Update Latency": 240284.4}], ["fibre", "memcached fibre", {"Update Ratio": 40, "Actual QPS": 608626.8, "Average Read Latency": 30286.6, "Median Read Latency": 25633.0, "Tail Read Latency": 240653.7, "Average Update Latency": 30652.8, "Median Update Latency": 25603.9, "Tail Update Latency": 246478.7}], ["vanilla", "memcached vanilla", {"Update Ratio": 50, "Actual QPS": 686252.6, "Average Read Latency": 31374.4, "Median Read Latency": 30501.6, "Tail Read Latency": 63738.2, "Average Update Latency": 31347.0, "Median Update Latency": 30428.1, "Tail Update Latency": 63812.1}], ["forall", "memcached forall", {"Update Ratio": 10, "Actual QPS": 595821.8, "Average Read Latency": 16941.1, "Median Read Latency": 13280.7, "Tail Read Latency": 36355.4, "Average Update Latency": 17445.3, "Median Update Latency": 13307.1, "Tail Update Latency": 39244.2}], ["forall", "memcached forall", {"Update Ratio": 15, "Actual QPS": 590278.9, "Average Read Latency": 16989.3, "Median Read Latency": 13312.9, "Tail Read Latency": 37996.6, "Average Update Latency": 17547.9, "Median Update Latency": 13344.8, "Tail Update Latency": 39578.1}], ["fibre", "memcached fibre", {"Update Ratio": 40, "Actual QPS": 619433.4, "Average Read Latency": 30159.8, "Median Read Latency": 25357.0, "Tail Read Latency": 251306.7, "Average Update Latency": 30411.0, "Median Update Latency": 25361.7, "Tail Update Latency": 254805.8}], ["forall", "memcached forall", {"Update Ratio": 5, "Actual QPS": 593761.3, "Average Read Latency": 16959.6, "Median Read Latency": 13400.4, "Tail Read Latency": 47335.2, "Average Update Latency": 17053.7, "Median Update Latency": 13376.7, "Tail Update Latency": 42239.3}], ["forall", "memcached forall", {"Update Ratio": 10, "Actual QPS": 597040.0, "Average Read Latency": 17352.0, "Median Read Latency": 12940.7, "Tail Read Latency": 65486.0, "Average Update Latency": 17545.6, "Median Update Latency": 12969.6, "Tail Update Latency": 92147.2}], ["forall", "memcached forall", {"Update Ratio": 40, "Actual QPS": 587327.3, "Average Read Latency": 14420.7, "Median Read Latency": 13053.8, "Tail Read Latency": 28889.7, "Average Update Latency": 14766.7, "Median Update Latency": 13061.8, "Tail Update Latency": 29130.7}], ["fibre", "memcached fibre", {"Update Ratio": 50, "Actual QPS": 619688.5, "Average Read Latency": 26716.2, "Median Read Latency": 22403.5, "Tail Read Latency": 245740.0, "Average Update Latency": 27090.3, "Median Update Latency": 22366.9, "Tail Update Latency": 248135.7}], ["vanilla", "memcached vanilla", {"Update Ratio": 5, "Actual QPS": 765840.6, "Average Read Latency": 22396.6, "Median Read Latency": 12223.6, "Tail Read Latency": 242722.6, "Average Update Latency": 23164.3, "Median Update Latency": 12122.3, "Tail Update Latency": 246732.7}], ["vanilla", "memcached vanilla", {"Update Ratio": 15, "Actual QPS": 754242.6, "Average Read Latency": 22345.6, "Median Read Latency": 13000.6, "Tail Read Latency": 242559.2, "Average Update Latency": 21803.8, "Median Update Latency": 13027.6, "Tail Update Latency": 240360.2}], ["vanilla", "memcached vanilla", {"Update Ratio": 20, "Actual QPS": 750259.1, "Average Read Latency": 22937.1, "Median Read Latency": 13719.7, "Tail Read Latency": 246024.2, "Average Update Latency": 23195.7, "Median Update Latency": 13716.5, "Tail Update Latency": 247084.9}], ["vanilla", "memcached vanilla", {"Update Ratio": 10, "Actual QPS": 761986.1, "Average Read Latency": 21767.5, "Median Read Latency": 12522.9, "Tail Read Latency": 240214.6, "Average Update Latency": 21621.3, "Median Update Latency": 12541.2, "Tail Update Latency": 239749.6}], ["forall", "memcached forall", {"Update Ratio": 50, "Actual QPS": 566851.8, "Average Read Latency": 14265.5, "Median Read Latency": 13544.1, "Tail Read Latency": 27746.8, "Average Update Latency": 14507.3, "Median Update Latency": 13553.5, "Tail Update Latency": 28471.6}], ["vanilla", "memcached vanilla", {"Update Ratio": 50, "Actual QPS": 708028.0, "Average Read Latency": 29145.3, "Median Read Latency": 23983.1, "Tail Read Latency": 237401.9, "Average Update Latency": 29124.0, "Median Update Latency": 24027.6, "Tail Update Latency": 235432.4}], ["forall", "memcached forall", {"Update Ratio": 5, "Actual QPS": 597227.5, "Average Read Latency": 17511.6, "Median Read Latency": 13228.9, "Tail Read Latency": 58767.4, "Average Update Latency": 18215.1, "Median Update Latency": 13249.5, "Tail Update Latency": 81599.3}], ["fibre", "memcached fibre", {"Update Ratio": 50, "Actual QPS": 599839.9, "Average Read Latency": 26555.1, "Median Read Latency": 21691.3, "Tail Read Latency": 245066.2, "Average Update Latency": 27096.6, "Median Update Latency": 21758.1, "Tail Update Latency": 248899.9}], ["forall", "memcached forall", {"Update Ratio": 15, "Actual QPS": 594852.2, "Average Read Latency": 17337.0, "Median Read Latency": 13137.9, "Tail Read Latency": 37979.0, "Average Update Latency": 17635.4, "Median Update Latency": 13176.4, "Tail Update Latency": 48130.7}], ["vanilla", "memcached vanilla", {"Update Ratio": 40, "Actual QPS": 718833.1, "Average Read Latency": 27863.0, "Median Read Latency": 22526.4, "Tail Read Latency": 245434.6, "Average Update Latency": 27699.5, "Median Update Latency": 22539.8, "Tail Update Latency": 243109.8}], ["fibre", "memcached fibre", {"Update Ratio": 40, "Actual QPS": 628507.6, "Average Read Latency": 28670.7, "Median Read Latency": 24369.4, "Tail Read Latency": 249240.9, "Average Update Latency": 28858.2, "Median Update Latency": 24429.6, "Tail Update Latency": 249891.7}], ["fibre", "memcached fibre", {"Update Ratio": 10, "Actual QPS": 666251.9, "Average Read Latency": 28297.4, "Median Read Latency": 24552.7, "Tail Read Latency": 76372.4, "Average Update Latency": 28566.0, "Median Update Latency": 24466.9, "Tail Update Latency": 90321.1}], ["vanilla", "memcached vanilla", {"Update Ratio": 20, "Actual QPS": 752354.4, "Average Read Latency": 23536.8, "Median Read Latency": 13936.7, "Tail Read Latency": 248639.8, "Average Update Latency": 23798.2, "Median Update Latency": 14040.6, "Tail Update Latency": 247961.7}], ["forall", "memcached forall", {"Update Ratio": 50, "Actual QPS": 565950.3, "Average Read Latency": 14232.6, "Median Read Latency": 13373.6, "Tail Read Latency": 26848.2, "Average Update Latency": 14466.4, "Median Update Latency": 13384.9, "Tail Update Latency": 27958.6}], ["fibre", "memcached fibre", {"Update Ratio": 15, "Actual QPS": 674044.3, "Average Read Latency": 29547.1, "Median Read Latency": 23376.7, "Tail Read Latency": 257169.3, "Average Update Latency": 30137.2, "Median Update Latency": 23423.1, "Tail Update Latency": 262729.5}], ["forall", "memcached forall", {"Update Ratio": 10, "Actual QPS": 589565.9, "Average Read Latency": 16941.2, "Median Read Latency": 13278.3, "Tail Read Latency": 40040.0, "Average Update Latency": 17308.7, "Median Update Latency": 13285.2, "Tail Update Latency": 44209.1}], ["fibre", "memcached fibre", {"Update Ratio": 50, "Actual QPS": 609521.3, "Average Read Latency": 31260.8, "Median Read Latency": 26017.5, "Tail Read Latency": 255078.0, "Average Update Latency": 31635.9, "Median Update Latency": 26086.2, "Tail Update Latency": 256862.8}], ["vanilla", "memcached vanilla", {"Update Ratio": 5, "Actual QPS": 767111.6, "Average Read Latency": 21590.9, "Median Read Latency": 11566.7, "Tail Read Latency": 239628.7, "Average Update Latency": 21399.7, "Median Update Latency": 11585.0, "Tail Update Latency": 239823.6}], ["forall", "memcached forall", {"Update Ratio": 5, "Actual QPS": 599244.3, "Average Read Latency": 17045.5, "Median Read Latency": 13243.6, "Tail Read Latency": 48650.1, "Average Update Latency": 16998.4, "Median Update Latency": 13280.5, "Tail Update Latency": 49466.4}], ["forall", "memcached forall", {"Update Ratio": 15, "Actual QPS": 592856.6, "Average Read Latency": 16172.7, "Median Read Latency": 13278.0, "Tail Read Latency": 31872.0, "Average Update Latency": 16571.3, "Median Update Latency": 13296.0, "Tail Update Latency": 33983.7}], ["fibre", "memcached fibre", {"Update Ratio": 15, "Actual QPS": 662998.4, "Average Read Latency": 26400.5, "Median Read Latency": 22183.5, "Tail Read Latency": 247443.9, "Average Update Latency": 26822.0, "Median Update Latency": 22267.6, "Tail Update Latency": 250068.2}], ["vanilla", "memcached vanilla", {"Update Ratio": 20, "Actual QPS": 748345.8, "Average Read Latency": 23985.2, "Median Read Latency": 14548.6, "Tail Read Latency": 249111.8, "Average Update Latency": 24053.4, "Median Update Latency": 14609.1, "Tail Update Latency": 249310.7}], ["fibre", "memcached fibre", {"Update Ratio": 5, "Actual QPS": 685611.1, "Average Read Latency": 33820.8, "Median Read Latency": 29843.9, "Tail Read Latency": 249003.5, "Average Update Latency": 34612.2, "Median Update Latency": 29937.2, "Tail Update Latency": 265505.6}], ["fibre", "memcached fibre", {"Update Ratio": 5, "Actual QPS": 685539.2, "Average Read Latency": 37016.3, "Median Read Latency": 32225.9, "Tail Read Latency": 263388.6, "Average Update Latency": 37284.7, "Median Update Latency": 32289.9, "Tail Update Latency": 262337.2}], ["forall", "memcached forall", {"Update Ratio": 30, "Actual QPS": 591380.5, "Average Read Latency": 17979.5, "Median Read Latency": 13499.7, "Tail Read Latency": 33487.1, "Average Update Latency": 18303.9, "Median Update Latency": 13523.5, "Tail Update Latency": 38051.2}], ["forall", "memcached forall", {"Update Ratio": 20, "Actual QPS": 593734.3, "Average Read Latency": 16688.8, "Median Read Latency": 13365.9, "Tail Read Latency": 30994.1, "Average Update Latency": 16892.2, "Median Update Latency": 13374.8, "Tail Update Latency": 32244.0}], ["vanilla", "memcached vanilla", {"Update Ratio": 10, "Actual QPS": 761087.3, "Average Read Latency": 22618.8, "Median Read Latency": 12952.0, "Tail Read Latency": 244425.0, "Average Update Latency": 22239.2, "Median Update Latency": 12910.4, "Tail Update Latency": 241797.4}], ["vanilla", "memcached vanilla", {"Update Ratio": 50, "Actual QPS": 689123.1, "Average Read Latency": 31148.1, "Median Read Latency": 29977.4, "Tail Read Latency": 65004.9, "Average Update Latency": 31122.1, "Median Update Latency": 30043.8, "Tail Update Latency": 64488.0}], ["forall", "memcached forall", {"Update Ratio": 20, "Actual QPS": 586549.0, "Average Read Latency": 15286.4, "Median Read Latency": 13246.3, "Tail Read Latency": 30564.7, "Average Update Latency": 16016.0, "Median Update Latency": 13256.5, "Tail Update Latency": 32001.1}], ["vanilla", "memcached vanilla", {"Update Ratio": 15, "Actual QPS": 759348.9, "Average Read Latency": 22876.0, "Median Read Latency": 12931.3, "Tail Read Latency": 244584.8, "Average Update Latency": 22772.0, "Median Update Latency": 12963.1, "Tail Update Latency": 244540.3}], ["forall", "memcached forall", {"Update Ratio": 30, "Actual QPS": 590171.0, "Average Read Latency": 17542.8, "Median Read Latency": 13058.2, "Tail Read Latency": 37919.8, "Average Update Latency": 17986.1, "Median Update Latency": 13075.7, "Tail Update Latency": 47181.7}], ["vanilla", "memcached vanilla", {"Update Ratio": 30, "Actual QPS": 736755.8, "Average Read Latency": 25724.9, "Median Read Latency": 17214.0, "Tail Read Latency": 251561.3, "Average Update Latency": 25647.4, "Median Update Latency": 17123.3, "Tail Update Latency": 251594.7}], ["forall", "memcached forall", {"Update Ratio": 40, "Actual QPS": 594613.0, "Average Read Latency": 15627.5, "Median Read Latency": 13012.1, "Tail Read Latency": 29321.6, "Average Update Latency": 15866.2, "Median Update Latency": 13034.3, "Tail Update Latency": 29997.2}], ["vanilla", "memcached vanilla", {"Update Ratio": 15, "Actual QPS": 753799.1, "Average Read Latency": 22983.2, "Median Read Latency": 13361.2, "Tail Read Latency": 245494.2, "Average Update Latency": 23014.0, "Median Update Latency": 13407.2, "Tail Update Latency": 245242.0}], ["fibre", "memcached fibre", {"Update Ratio": 15, "Actual QPS": 659243.2, "Average Read Latency": 27600.4, "Median Read Latency": 23642.4, "Tail Read Latency": 75626.1, "Average Update Latency": 28038.3, "Median Update Latency": 23443.4, "Tail Update Latency": 135871.1}], ["forall", "memcached forall", {"Update Ratio": 50, "Actual QPS": 583513.8, "Average Read Latency": 15354.0, "Median Read Latency": 13050.8, "Tail Read Latency": 29195.2, "Average Update Latency": 15693.2, "Median Update Latency": 13051.2, "Tail Update Latency": 29477.5}], ["forall", "memcached forall", {"Update Ratio": 40, "Actual QPS": 588410.8, "Average Read Latency": 17002.9, "Median Read Latency": 13316.4, "Tail Read Latency": 29604.6, "Average Update Latency": 17357.7, "Median Update Latency": 13320.0, "Tail Update Latency": 31441.1}], ["vanilla", "memcached vanilla", {"Update Ratio": 40, "Actual QPS": 716262.2, "Average Read Latency": 28975.9, "Median Read Latency": 25327.0, "Tail Read Latency": 228397.2, "Average Update Latency": 29063.3, "Median Update Latency": 25431.2, "Tail Update Latency": 228666.6}], ["vanilla", "memcached vanilla", {"Update Ratio": 30, "Actual QPS": 742447.9, "Average Read Latency": 24479.8, "Median Read Latency": 14438.4, "Tail Read Latency": 251570.6, "Average Update Latency": 24354.2, "Median Update Latency": 14496.9, "Tail Update Latency": 250835.7}], ["forall", "memcached forall", {"Update Ratio": 30, "Actual QPS": 583473.1, "Average Read Latency": 15821.5, "Median Read Latency": 13189.8, "Tail Read Latency": 30907.0, "Average Update Latency": 16668.2, "Median Update Latency": 13208.3, "Tail Update Latency": 33617.6}], ["vanilla", "memcached vanilla", {"Update Ratio": 40, "Actual QPS": 735416.8, "Average Read Latency": 24244.3, "Median Read Latency": 13281.7, "Tail Read Latency": 254833.8, "Average Update Latency": 24481.9, "Median Update Latency": 13318.5, "Tail Update Latency": 254867.7}]]
Index: doc/theses/thierry_delisle_PhD/thesis/data/yield.jax
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/data/yield.jax	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
+++ doc/theses/thierry_delisle_PhD/thesis/data/yield.jax	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
@@ -0,0 +1,1 @@
+[["rdq-yield-tokio", "./rdq-yield-tokio -p 24 -d 10 -t 2400", {"Duration (ms)": 10030.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Total Operations(ops)": 415071979.0, "Ops per second": 41507197.0, "ns per ops": 24.0, "Ops per threads": 172946.0, "Ops per procs": 17294665.0, "Ops/sec/procs": 1729466.0, "ns per ops/procs": 579.0}],["rdq-yield-cfa", "./rdq-yield-cfa -p 24 -d 10 -t 2400", {"Duration (ms)": 0.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Total Operations(ops)": 868181095.0, "Ops per second": 86818109.5, "ns per ops": 11.53, "Ops per threads": 361742.0, "Ops per procs": 36174212.0, "Ops/sec/procs": 3617421.23, "ns per ops/procs": 276.64}],["rdq-yield-tokio", "./rdq-yield-tokio -p 1 -d 10 -t 100", {"Duration (ms)": 10099.0, "Number of processors": 1.0, "Number of threads": 100.0, "Total Operations(ops)": 90502272.0, "Ops per second": 9050227.0, "ns per ops": 111.0, "Ops per threads": 905022.0, "Ops per procs": 90502272.0, "Ops/sec/procs": 9050227.0, "ns per ops/procs": 111.0}],["rdq-yield-fibre", "./rdq-yield-fibre -p 1 -d 10 -t 100", {"Duration (ms)": 10008.0, "Number of processors": 1.0, "Number of threads": 100.0, "Total Operations(ops)": 100521509.0, "Ops per second": 10043710.22, "ns per ops": 99.56, "Ops per threads": 1005215.0, "Ops per procs": 100521509.0, "Ops/sec/procs": 10043710.22, "ns per ops/procs": 99.56}],["rdq-yield-cfa", "./rdq-yield-cfa -p 8 -d 10 -t 800", {"Duration (ms)": 0.0, "Number of processors": 8.0, "Number of threads": 800.0, "Total Operations(ops)": 335861578.0, "Ops per second": 33586157.8, "ns per ops": 29.84, "Ops per threads": 419826.0, "Ops per procs": 41982697.0, "Ops/sec/procs": 4198269.72, "ns per ops/procs": 238.69}],["rdq-yield-tokio", "./rdq-yield-tokio -p 8 -d 10 -t 800", {"Duration (ms)": 10099.0, "Number of processors": 8.0, "Number of threads": 800.0, "Total Operations(ops)": 336458246.0, "Ops per second": 33645824.0, "ns per ops": 30.0, "Ops per threads": 420572.0, "Ops per procs": 42057280.0, "Ops/sec/procs": 4205728.0, "ns per ops/procs": 240.0}],["rdq-yield-cfa", "./rdq-yield-cfa -p 24 -d 10 -t 2400", {"Duration (ms)": 0.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Total Operations(ops)": 863132609.0, "Ops per second": 86313260.9, "ns per ops": 11.59, "Ops per threads": 359638.0, "Ops per procs": 35963858.0, "Ops/sec/procs": 3596385.87, "ns per ops/procs": 278.27}],["rdq-yield-cfa", "./rdq-yield-cfa -p 16 -d 10 -t 1600", {"Duration (ms)": 0.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Total Operations(ops)": 644981830.0, "Ops per second": 64498183.0, "ns per ops": 15.54, "Ops per threads": 403113.0, "Ops per procs": 40311364.0, "Ops/sec/procs": 4031136.44, "ns per ops/procs": 248.71}],["rdq-yield-fibre", "./rdq-yield-fibre -p 16 -d 10 -t 1600", {"Duration (ms)": 10022.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Total Operations(ops)": 1001843527.0, "Ops per second": 99961538.2, "ns per ops": 10.0, "Ops per threads": 626152.0, "Ops per procs": 62615220.0, "Ops/sec/procs": 6247596.14, "ns per ops/procs": 160.06}],["rdq-yield-fibre", "./rdq-yield-fibre -p 24 -d 10 -t 2400", {"Duration (ms)": 10021.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Total Operations(ops)": 1448950383.0, "Ops per second": 144584592.02, "ns per ops": 6.92, "Ops per threads": 603729.0, "Ops per procs": 60372932.0, "Ops/sec/procs": 6024358.0, "ns per ops/procs": 165.99}],["rdq-yield-cfa", "./rdq-yield-cfa -p 1 -d 10 -t 100", {"Duration (ms)": 0.0, "Number of processors": 1.0, "Number of threads": 100.0, "Total Operations(ops)": 67367876.0, "Ops per second": 6736787.6, "ns per ops": 148.69, "Ops per threads": 673678.0, "Ops per procs": 67367876.0, "Ops/sec/procs": 6736787.6, "ns per ops/procs": 148.69}],["rdq-yield-cfa", "./rdq-yield-cfa -p 16 -d 10 -t 1600", {"Duration (ms)": 0.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Total Operations(ops)": 635515991.0, "Ops per second": 63551599.1, "ns per ops": 15.77, "Ops per threads": 397197.0, "Ops per procs": 39719749.0, "Ops/sec/procs": 3971974.94, "ns per ops/procs": 252.34}],["rdq-yield-tokio", "./rdq-yield-tokio -p 16 -d 10 -t 1600", {"Duration (ms)": 10001.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Total Operations(ops)": 647292406.0, "Ops per second": 64729240.0, "ns per ops": 15.0, "Ops per threads": 404557.0, "Ops per procs": 40455775.0, "Ops/sec/procs": 4045577.0, "ns per ops/procs": 247.0}],["rdq-yield-go", "./rdq-yield-go -p 8 -d 10 -t 800", {"Duration (ms)": 10002.0, "Number of processors": 8.0, "Number of threads": 800.0, "Total Operations(ops)": 32040808.0, "Ops per second": 3203363.64, "ns per ops": 312.17, "Ops per threads": 40051.0, "Ops per procs": 4005101.0, "Ops/sec/procs": 400420.45, "ns per ops/procs": 2497.37}],["rdq-yield-go", "./rdq-yield-go -p 24 -d 10 -t 2400", {"Duration (ms)": 10003.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Total Operations(ops)": 23453819.0, "Ops per second": 2344620.16, "ns per ops": 426.51, "Ops per threads": 9772.0, "Ops per procs": 977242.0, "Ops/sec/procs": 97692.51, "ns per ops/procs": 10236.2}],["rdq-yield-cfa", "./rdq-yield-cfa -p 1 -d 10 -t 100", {"Duration (ms)": 0.0, "Number of processors": 1.0, "Number of threads": 100.0, "Total Operations(ops)": 67463939.0, "Ops per second": 6746393.9, "ns per ops": 148.47, "Ops per threads": 674639.0, "Ops per procs": 67463939.0, "Ops/sec/procs": 6746393.9, "ns per ops/procs": 148.47}],["rdq-yield-fibre", "./rdq-yield-fibre -p 8 -d 10 -t 800", {"Duration (ms)": 10009.0, "Number of processors": 8.0, "Number of threads": 800.0, "Total Operations(ops)": 447365602.0, "Ops per second": 44693347.97, "ns per ops": 22.37, "Ops per threads": 559207.0, "Ops per procs": 55920700.0, "Ops/sec/procs": 5586668.5, "ns per ops/procs": 179.0}],["rdq-yield-fibre", "./rdq-yield-fibre -p 16 -d 10 -t 1600", {"Duration (ms)": 10022.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Total Operations(ops)": 1004323666.0, "Ops per second": 100208984.11, "ns per ops": 9.98, "Ops per threads": 627702.0, "Ops per procs": 62770229.0, "Ops/sec/procs": 6263061.51, "ns per ops/procs": 159.67}],["rdq-yield-fibre", "./rdq-yield-fibre -p 24 -d 10 -t 2400", {"Duration (ms)": 10022.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Total Operations(ops)": 1438830223.0, "Ops per second": 143560196.99, "ns per ops": 6.97, "Ops per threads": 599512.0, "Ops per procs": 59951259.0, "Ops/sec/procs": 5981674.87, "ns per ops/procs": 167.18}],["rdq-yield-go", "./rdq-yield-go -p 16 -d 10 -t 1600", {"Duration (ms)": 10002.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Total Operations(ops)": 26637182.0, "Ops per second": 2663052.8, "ns per ops": 375.51, "Ops per threads": 16648.0, "Ops per procs": 1664823.0, "Ops/sec/procs": 166440.8, "ns per ops/procs": 6008.14}],["rdq-yield-cfa", "./rdq-yield-cfa -p 16 -d 10 -t 1600", {"Duration (ms)": 0.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Total Operations(ops)": 638755972.0, "Ops per second": 63875597.2, "ns per ops": 15.7, "Ops per threads": 399222.0, "Ops per procs": 39922248.0, "Ops/sec/procs": 3992224.83, "ns per ops/procs": 251.13}],["rdq-yield-tokio", "./rdq-yield-tokio -p 24 -d 10 -t 2400", {"Duration (ms)": 10005.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Total Operations(ops)": 430182652.0, "Ops per second": 43018265.0, "ns per ops": 23.0, "Ops per threads": 179242.0, "Ops per procs": 17924277.0, "Ops/sec/procs": 1792427.0, "ns per ops/procs": 558.0}],["rdq-yield-fibre", "./rdq-yield-fibre -p 1 -d 10 -t 100", {"Duration (ms)": 10008.0, "Number of processors": 1.0, "Number of threads": 100.0, "Total Operations(ops)": 100472124.0, "Ops per second": 10038792.53, "ns per ops": 99.61, "Ops per threads": 1004721.0, "Ops per procs": 100472124.0, "Ops/sec/procs": 10038792.53, "ns per ops/procs": 99.61}],["rdq-yield-go", "./rdq-yield-go -p 1 -d 10 -t 100", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 100.0, "Total Operations(ops)": 117008332.0, "Ops per second": 11700304.5, "ns per ops": 85.47, "Ops per threads": 1170083.0, "Ops per procs": 117008332.0, "Ops/sec/procs": 11700304.5, "ns per ops/procs": 85.47}],["rdq-yield-go", "./rdq-yield-go -p 24 -d 10 -t 2400", {"Duration (ms)": 10003.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Total Operations(ops)": 23810780.0, "Ops per second": 2380314.39, "ns per ops": 420.11, "Ops per threads": 9921.0, "Ops per procs": 992115.0, "Ops/sec/procs": 99179.77, "ns per ops/procs": 10082.7}],["rdq-yield-tokio", "./rdq-yield-tokio -p 16 -d 10 -t 1600", {"Duration (ms)": 10000.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Total Operations(ops)": 675450930.0, "Ops per second": 67545093.0, "ns per ops": 14.0, "Ops per threads": 422156.0, "Ops per procs": 42215683.0, "Ops/sec/procs": 4221568.0, "ns per ops/procs": 236.0}],["rdq-yield-fibre", "./rdq-yield-fibre -p 1 -d 10 -t 100", {"Duration (ms)": 10008.0, "Number of processors": 1.0, "Number of threads": 100.0, "Total Operations(ops)": 100428805.0, "Ops per second": 10034445.55, "ns per ops": 99.66, "Ops per threads": 1004288.0, "Ops per procs": 100428805.0, "Ops/sec/procs": 10034445.55, "ns per ops/procs": 99.66}],["rdq-yield-tokio", "./rdq-yield-tokio -p 1 -d 10 -t 100", {"Duration (ms)": 10099.0, "Number of processors": 1.0, "Number of threads": 100.0, "Total Operations(ops)": 90311107.0, "Ops per second": 9031110.0, "ns per ops": 111.0, "Ops per threads": 903111.0, "Ops per procs": 90311107.0, "Ops/sec/procs": 9031110.0, "ns per ops/procs": 111.0}],["rdq-yield-cfa", "./rdq-yield-cfa -p 8 -d 10 -t 800", {"Duration (ms)": 0.0, "Number of processors": 8.0, "Number of threads": 800.0, "Total Operations(ops)": 333502891.0, "Ops per second": 33350289.1, "ns per ops": 30.05, "Ops per threads": 416878.0, "Ops per procs": 41687861.0, "Ops/sec/procs": 4168786.14, "ns per ops/procs": 240.38}],["rdq-yield-go", "./rdq-yield-go -p 8 -d 10 -t 800", {"Duration (ms)": 10002.0, "Number of processors": 8.0, "Number of threads": 800.0, "Total Operations(ops)": 32147984.0, "Ops per second": 3214143.7, "ns per ops": 311.12, "Ops per threads": 40184.0, "Ops per procs": 4018498.0, "Ops/sec/procs": 401767.96, "ns per ops/procs": 2489.0}],["rdq-yield-tokio", "./rdq-yield-tokio -p 16 -d 10 -t 1600", {"Duration (ms)": 10003.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Total Operations(ops)": 689066173.0, "Ops per second": 68906617.0, "ns per ops": 14.0, "Ops per threads": 430666.0, "Ops per procs": 43066635.0, "Ops/sec/procs": 4306663.0, "ns per ops/procs": 232.0}],["rdq-yield-fibre", "./rdq-yield-fibre -p 8 -d 10 -t 800", {"Duration (ms)": 10019.0, "Number of processors": 8.0, "Number of threads": 800.0, "Total Operations(ops)": 441418651.0, "Ops per second": 44056605.57, "ns per ops": 22.7, "Ops per threads": 551773.0, "Ops per procs": 55177331.0, "Ops/sec/procs": 5507075.7, "ns per ops/procs": 181.58}],["rdq-yield-fibre", "./rdq-yield-fibre -p 16 -d 10 -t 1600", {"Duration (ms)": 10028.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Total Operations(ops)": 999658036.0, "Ops per second": 99685104.34, "ns per ops": 10.03, "Ops per threads": 624786.0, "Ops per procs": 62478627.0, "Ops/sec/procs": 6230319.02, "ns per ops/procs": 160.51}],["rdq-yield-go", "./rdq-yield-go -p 16 -d 10 -t 1600", {"Duration (ms)": 10002.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Total Operations(ops)": 26737174.0, "Ops per second": 2672964.85, "ns per ops": 374.12, "Ops per threads": 16710.0, "Ops per procs": 1671073.0, "Ops/sec/procs": 167060.3, "ns per ops/procs": 5985.86}],["rdq-yield-tokio", "./rdq-yield-tokio -p 1 -d 10 -t 100", {"Duration (ms)": 10099.0, "Number of processors": 1.0, "Number of threads": 100.0, "Total Operations(ops)": 90695862.0, "Ops per second": 9069586.0, "ns per ops": 111.0, "Ops per threads": 906958.0, "Ops per procs": 90695862.0, "Ops/sec/procs": 9069586.0, "ns per ops/procs": 111.0}],["rdq-yield-fibre", "./rdq-yield-fibre -p 24 -d 10 -t 2400", {"Duration (ms)": 10004.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Total Operations(ops)": 1440698903.0, "Ops per second": 144010330.78, "ns per ops": 6.94, "Ops per threads": 600291.0, "Ops per procs": 60029120.0, "Ops/sec/procs": 6000430.45, "ns per ops/procs": 166.65}],["rdq-yield-go", "./rdq-yield-go -p 24 -d 10 -t 2400", {"Duration (ms)": 10003.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Total Operations(ops)": 23253629.0, "Ops per second": 2324527.32, "ns per ops": 430.19, "Ops per threads": 9689.0, "Ops per procs": 968901.0, "Ops/sec/procs": 96855.3, "ns per ops/procs": 10324.68}],["rdq-yield-go", "./rdq-yield-go -p 1 -d 10 -t 100", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 100.0, "Total Operations(ops)": 116842959.0, "Ops per second": 11683781.88, "ns per ops": 85.59, "Ops per threads": 1168429.0, "Ops per procs": 116842959.0, "Ops/sec/procs": 11683781.88, "ns per ops/procs": 85.59}],["rdq-yield-cfa", "./rdq-yield-cfa -p 1 -d 10 -t 100", {"Duration (ms)": 0.0, "Number of processors": 1.0, "Number of threads": 100.0, "Total Operations(ops)": 67109099.0, "Ops per second": 6710909.9, "ns per ops": 149.26, "Ops per threads": 671090.0, "Ops per procs": 67109099.0, "Ops/sec/procs": 6710909.9, "ns per ops/procs": 149.26}],["rdq-yield-go", "./rdq-yield-go -p 1 -d 10 -t 100", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 100.0, "Total Operations(ops)": 116630659.0, "Ops per second": 11662547.92, "ns per ops": 85.74, "Ops per threads": 1166306.0, "Ops per procs": 116630659.0, "Ops/sec/procs": 11662547.92, "ns per ops/procs": 85.74}],["rdq-yield-fibre", "./rdq-yield-fibre -p 8 -d 10 -t 800", {"Duration (ms)": 10009.0, "Number of processors": 8.0, "Number of threads": 800.0, "Total Operations(ops)": 438143011.0, "Ops per second": 43771074.82, "ns per ops": 22.85, "Ops per threads": 547678.0, "Ops per procs": 54767876.0, "Ops/sec/procs": 5471384.35, "ns per ops/procs": 182.77}],["rdq-yield-go", "./rdq-yield-go -p 16 -d 10 -t 1600", {"Duration (ms)": 10002.0, "Number of processors": 16.0, "Number of threads": 1600.0, "Total Operations(ops)": 26832358.0, "Ops per second": 2682510.65, "ns per ops": 372.79, "Ops per threads": 16770.0, "Ops per procs": 1677022.0, "Ops/sec/procs": 167656.92, "ns per ops/procs": 5964.56}],["rdq-yield-tokio", "./rdq-yield-tokio -p 8 -d 10 -t 800", {"Duration (ms)": 10100.0, "Number of processors": 8.0, "Number of threads": 800.0, "Total Operations(ops)": 357826049.0, "Ops per second": 35782604.0, "ns per ops": 28.0, "Ops per threads": 447282.0, "Ops per procs": 44728256.0, "Ops/sec/procs": 4472825.0, "ns per ops/procs": 225.0}],["rdq-yield-tokio", "./rdq-yield-tokio -p 8 -d 10 -t 800", {"Duration (ms)": 10100.0, "Number of processors": 8.0, "Number of threads": 800.0, "Total Operations(ops)": 331894279.0, "Ops per second": 33189427.0, "ns per ops": 30.0, "Ops per threads": 414867.0, "Ops per procs": 41486784.0, "Ops/sec/procs": 4148678.0, "ns per ops/procs": 243.0}],["rdq-yield-cfa", "./rdq-yield-cfa -p 8 -d 10 -t 800", {"Duration (ms)": 0.0, "Number of processors": 8.0, "Number of threads": 800.0, "Total Operations(ops)": 335906841.0, "Ops per second": 33590684.1, "ns per ops": 29.83, "Ops per threads": 419883.0, "Ops per procs": 41988355.0, "Ops/sec/procs": 4198835.51, "ns per ops/procs": 238.65}],["rdq-yield-go", "./rdq-yield-go -p 8 -d 10 -t 800", {"Duration (ms)": 10002.0, "Number of processors": 8.0, "Number of threads": 800.0, "Total Operations(ops)": 31669651.0, "Ops per second": 3166283.11, "ns per ops": 315.83, "Ops per threads": 39587.0, "Ops per procs": 3958706.0, "Ops/sec/procs": 395785.39, "ns per ops/procs": 2526.62}],["rdq-yield-tokio", "./rdq-yield-tokio -p 24 -d 10 -t 2400", {"Duration (ms)": 10005.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Total Operations(ops)": 431374888.0, "Ops per second": 43137488.0, "ns per ops": 23.0, "Ops per threads": 179739.0, "Ops per procs": 17973953.0, "Ops/sec/procs": 1797395.0, "ns per ops/procs": 556.0}],["rdq-yield-cfa", "./rdq-yield-cfa -p 24 -d 10 -t 2400", {"Duration (ms)": 0.0, "Number of processors": 24.0, "Number of threads": 2400.0, "Total Operations(ops)": 859381490.0, "Ops per second": 85938149.0, "ns per ops": 11.64, "Ops per threads": 358075.0, "Ops per procs": 35807562.0, "Ops/sec/procs": 3580756.21, "ns per ops/procs": 279.37}]]
Index: doc/theses/thierry_delisle_PhD/thesis/data/yield.low.jax
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/data/yield.low.jax	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
+++ doc/theses/thierry_delisle_PhD/thesis/data/yield.low.jax	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
@@ -0,0 +1,1 @@
+[["rdq-yield-cfa", "./rdq-yield-cfa -p 24 -d 10 -t 24", {"Duration (ms)": 0.0, "Number of processors": 24.0, "Number of threads": 24.0, "Total Operations(ops)": 198228606.0, "Ops per second": 19822860.6, "ns per ops": 50.49, "Ops per threads": 8259525.0, "Ops per procs": 8259525.0, "Ops/sec/procs": 825952.53, "ns per ops/procs": 1211.87}],["rdq-yield-tokio", "./rdq-yield-tokio -p 1 -d 10 -t 1", {"Duration (ms)": 10100.0, "Number of processors": 1.0, "Number of threads": 1.0, "Total Operations(ops)": 91771391.0, "Ops per second": 9177139.0, "ns per ops": 110.0, "Ops per threads": 91771391.0, "Ops per procs": 91771391.0, "Ops/sec/procs": 9177139.0, "ns per ops/procs": 110.0}],["rdq-yield-tokio", "./rdq-yield-tokio -p 16 -d 10 -t 16", {"Duration (ms)": 10099.0, "Number of processors": 16.0, "Number of threads": 16.0, "Total Operations(ops)": 1155463759.0, "Ops per second": 115546375.0, "ns per ops": 8.0, "Ops per threads": 72216484.0, "Ops per procs": 72216484.0, "Ops/sec/procs": 7221648.0, "ns per ops/procs": 139.0}],["rdq-yield-go", "./rdq-yield-go -p 16 -d 10 -t 16", {"Duration (ms)": 10001.0, "Number of processors": 16.0, "Number of threads": 16.0, "Total Operations(ops)": 17081410.0, "Ops per second": 1707820.12, "ns per ops": 585.54, "Ops per threads": 1067588.0, "Ops per procs": 1067588.0, "Ops/sec/procs": 106738.76, "ns per ops/procs": 9368.67}],["rdq-yield-fibre", "./rdq-yield-fibre -p 8 -d 10 -t 8", {"Duration (ms)": 10007.0, "Number of processors": 8.0, "Number of threads": 8.0, "Total Operations(ops)": 2151927291.0, "Ops per second": 215027860.62, "ns per ops": 4.65, "Ops per threads": 268990911.0, "Ops per procs": 268990911.0, "Ops/sec/procs": 26878482.58, "ns per ops/procs": 37.2}],["rdq-yield-tokio", "./rdq-yield-tokio -p 24 -d 10 -t 24", {"Duration (ms)": 10099.0, "Number of processors": 24.0, "Number of threads": 24.0, "Total Operations(ops)": 1281636563.0, "Ops per second": 128163656.0, "ns per ops": 7.0, "Ops per threads": 53401523.0, "Ops per procs": 53401523.0, "Ops/sec/procs": 5340152.0, "ns per ops/procs": 189.0}],["rdq-yield-fibre", "./rdq-yield-fibre -p 24 -d 10 -t 24", {"Duration (ms)": 10004.0, "Number of processors": 24.0, "Number of threads": 24.0, "Total Operations(ops)": 5317671032.0, "Ops per second": 531537767.14, "ns per ops": 1.88, "Ops per threads": 221569626.0, "Ops per procs": 221569626.0, "Ops/sec/procs": 22147406.96, "ns per ops/procs": 45.15}],["rdq-yield-cfa", "./rdq-yield-cfa -p 8 -d 10 -t 8", {"Duration (ms)": 0.0, "Number of processors": 8.0, "Number of threads": 8.0, "Total Operations(ops)": 158323555.0, "Ops per second": 15832355.5, "ns per ops": 63.29, "Ops per threads": 19790444.0, "Ops per procs": 19790444.0, "Ops/sec/procs": 1979044.44, "ns per ops/procs": 506.3}],["rdq-yield-go", "./rdq-yield-go -p 8 -d 10 -t 8", {"Duration (ms)": 10001.0, "Number of processors": 8.0, "Number of threads": 8.0, "Total Operations(ops)": 27128459.0, "Ops per second": 2712537.66, "ns per ops": 368.66, "Ops per threads": 3391057.0, "Ops per procs": 3391057.0, "Ops/sec/procs": 339067.21, "ns per ops/procs": 2949.27}],["rdq-yield-go", "./rdq-yield-go -p 8 -d 10 -t 8", {"Duration (ms)": 10001.0, "Number of processors": 8.0, "Number of threads": 8.0, "Total Operations(ops)": 27155837.0, "Ops per second": 2715305.28, "ns per ops": 368.28, "Ops per threads": 3394479.0, "Ops per procs": 3394479.0, "Ops/sec/procs": 339413.16, "ns per ops/procs": 2946.26}],["rdq-yield-cfa", "./rdq-yield-cfa -p 16 -d 10 -t 16", {"Duration (ms)": 0.0, "Number of processors": 16.0, "Number of threads": 16.0, "Total Operations(ops)": 211249329.0, "Ops per second": 21124932.9, "ns per ops": 47.45, "Ops per threads": 13203083.0, "Ops per procs": 13203083.0, "Ops/sec/procs": 1320308.31, "ns per ops/procs": 759.17}],["rdq-yield-go", "./rdq-yield-go -p 1 -d 10 -t 1", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 1.0, "Total Operations(ops)": 85555889.0, "Ops per second": 8555216.69, "ns per ops": 116.89, "Ops per threads": 85555889.0, "Ops per procs": 85555889.0, "Ops/sec/procs": 8555216.69, "ns per ops/procs": 116.89}],["rdq-yield-tokio", "./rdq-yield-tokio -p 8 -d 10 -t 8", {"Duration (ms)": 10099.0, "Number of processors": 8.0, "Number of threads": 8.0, "Total Operations(ops)": 390829055.0, "Ops per second": 39082905.0, "ns per ops": 25.0, "Ops per threads": 48853631.0, "Ops per procs": 48853631.0, "Ops/sec/procs": 4885363.0, "ns per ops/procs": 206.0}],["rdq-yield-go", "./rdq-yield-go -p 24 -d 10 -t 24", {"Duration (ms)": 10002.0, "Number of processors": 24.0, "Number of threads": 24.0, "Total Operations(ops)": 13250872.0, "Ops per second": 1324722.09, "ns per ops": 754.88, "Ops per threads": 552119.0, "Ops per procs": 552119.0, "Ops/sec/procs": 55196.75, "ns per ops/procs": 18117.01}],["rdq-yield-tokio", "./rdq-yield-tokio -p 16 -d 10 -t 16", {"Duration (ms)": 10099.0, "Number of processors": 16.0, "Number of threads": 16.0, "Total Operations(ops)": 1000850284.0, "Ops per second": 100085028.0, "ns per ops": 10.0, "Ops per threads": 62553142.0, "Ops per procs": 62553142.0, "Ops/sec/procs": 6255314.0, "ns per ops/procs": 161.0}],["rdq-yield-cfa", "./rdq-yield-cfa -p 24 -d 10 -t 24", {"Duration (ms)": 0.0, "Number of processors": 24.0, "Number of threads": 24.0, "Total Operations(ops)": 184091475.0, "Ops per second": 18409147.5, "ns per ops": 54.36, "Ops per threads": 7670478.0, "Ops per procs": 7670478.0, "Ops/sec/procs": 767047.81, "ns per ops/procs": 1304.54}],["rdq-yield-fibre", "./rdq-yield-fibre -p 1 -d 10 -t 1", {"Duration (ms)": 10007.0, "Number of processors": 1.0, "Number of threads": 1.0, "Total Operations(ops)": 292489865.0, "Ops per second": 29227283.49, "ns per ops": 34.21, "Ops per threads": 292489865.0, "Ops per procs": 292489865.0, "Ops/sec/procs": 29227283.49, "ns per ops/procs": 34.21}],["rdq-yield-go", "./rdq-yield-go -p 24 -d 10 -t 24", {"Duration (ms)": 10002.0, "Number of processors": 24.0, "Number of threads": 24.0, "Total Operations(ops)": 14063200.0, "Ops per second": 1406005.33, "ns per ops": 711.23, "Ops per threads": 585966.0, "Ops per procs": 585966.0, "Ops/sec/procs": 58583.56, "ns per ops/procs": 17069.64}],["rdq-yield-cfa", "./rdq-yield-cfa -p 8 -d 10 -t 8", {"Duration (ms)": 0.0, "Number of processors": 8.0, "Number of threads": 8.0, "Total Operations(ops)": 162528481.0, "Ops per second": 16252848.1, "ns per ops": 61.66, "Ops per threads": 20316060.0, "Ops per procs": 20316060.0, "Ops/sec/procs": 2031606.01, "ns per ops/procs": 493.26}],["rdq-yield-fibre", "./rdq-yield-fibre -p 16 -d 10 -t 16", {"Duration (ms)": 10008.0, "Number of processors": 16.0, "Number of threads": 16.0, "Total Operations(ops)": 3965938528.0, "Ops per second": 396248257.59, "ns per ops": 2.52, "Ops per threads": 247871158.0, "Ops per procs": 247871158.0, "Ops/sec/procs": 24765516.1, "ns per ops/procs": 40.38}],["rdq-yield-fibre", "./rdq-yield-fibre -p 16 -d 10 -t 16", {"Duration (ms)": 10009.0, "Number of processors": 16.0, "Number of threads": 16.0, "Total Operations(ops)": 4048953063.0, "Ops per second": 404492768.97, "ns per ops": 2.47, "Ops per threads": 253059566.0, "Ops per procs": 253059566.0, "Ops/sec/procs": 25280798.06, "ns per ops/procs": 39.56}],["rdq-yield-fibre", "./rdq-yield-fibre -p 24 -d 10 -t 24", {"Duration (ms)": 10004.0, "Number of processors": 24.0, "Number of threads": 24.0, "Total Operations(ops)": 5311054327.0, "Ops per second": 530869875.38, "ns per ops": 1.88, "Ops per threads": 221293930.0, "Ops per procs": 221293930.0, "Ops/sec/procs": 22119578.14, "ns per ops/procs": 45.21}],["rdq-yield-cfa", "./rdq-yield-cfa -p 16 -d 10 -t 16", {"Duration (ms)": 0.0, "Number of processors": 16.0, "Number of threads": 16.0, "Total Operations(ops)": 193683806.0, "Ops per second": 19368380.6, "ns per ops": 51.76, "Ops per threads": 12105237.0, "Ops per procs": 12105237.0, "Ops/sec/procs": 1210523.79, "ns per ops/procs": 828.15}],["rdq-yield-go", "./rdq-yield-go -p 16 -d 10 -t 16", {"Duration (ms)": 10001.0, "Number of processors": 16.0, "Number of threads": 16.0, "Total Operations(ops)": 16763469.0, "Ops per second": 1676051.66, "ns per ops": 596.64, "Ops per threads": 1047716.0, "Ops per procs": 1047716.0, "Ops/sec/procs": 104753.23, "ns per ops/procs": 9546.25}],["rdq-yield-tokio", "./rdq-yield-tokio -p 24 -d 10 -t 24", {"Duration (ms)": 10099.0, "Number of processors": 24.0, "Number of threads": 24.0, "Total Operations(ops)": 1250273189.0, "Ops per second": 125027318.0, "ns per ops": 8.0, "Ops per threads": 52094716.0, "Ops per procs": 52094716.0, "Ops/sec/procs": 5209471.0, "ns per ops/procs": 193.0}],["rdq-yield-go", "./rdq-yield-go -p 8 -d 10 -t 8", {"Duration (ms)": 10001.0, "Number of processors": 8.0, "Number of threads": 8.0, "Total Operations(ops)": 26753255.0, "Ops per second": 2675042.69, "ns per ops": 373.83, "Ops per threads": 3344156.0, "Ops per procs": 3344156.0, "Ops/sec/procs": 334380.34, "ns per ops/procs": 2990.61}],["rdq-yield-tokio", "./rdq-yield-tokio -p 16 -d 10 -t 16", {"Duration (ms)": 10099.0, "Number of processors": 16.0, "Number of threads": 16.0, "Total Operations(ops)": 1139181136.0, "Ops per second": 113918113.0, "ns per ops": 8.0, "Ops per threads": 71198821.0, "Ops per procs": 71198821.0, "Ops/sec/procs": 7119882.0, "ns per ops/procs": 141.0}],["rdq-yield-cfa", "./rdq-yield-cfa -p 8 -d 10 -t 8", {"Duration (ms)": 0.0, "Number of processors": 8.0, "Number of threads": 8.0, "Total Operations(ops)": 148345422.0, "Ops per second": 14834542.2, "ns per ops": 67.54, "Ops per threads": 18543177.0, "Ops per procs": 18543177.0, "Ops/sec/procs": 1854317.77, "ns per ops/procs": 540.32}],["rdq-yield-cfa", "./rdq-yield-cfa -p 1 -d 10 -t 1", {"Duration (ms)": 0.0, "Number of processors": 1.0, "Number of threads": 1.0, "Total Operations(ops)": 76054104.0, "Ops per second": 7605410.4, "ns per ops": 131.71, "Ops per threads": 76054104.0, "Ops per procs": 76054104.0, "Ops/sec/procs": 7605410.4, "ns per ops/procs": 131.71}],["rdq-yield-fibre", "./rdq-yield-fibre -p 1 -d 10 -t 1", {"Duration (ms)": 10007.0, "Number of processors": 1.0, "Number of threads": 1.0, "Total Operations(ops)": 292635742.0, "Ops per second": 29241803.8, "ns per ops": 34.2, "Ops per threads": 292635742.0, "Ops per procs": 292635742.0, "Ops/sec/procs": 29241803.8, "ns per ops/procs": 34.2}],["rdq-yield-fibre", "./rdq-yield-fibre -p 24 -d 10 -t 24", {"Duration (ms)": 10001.0, "Number of processors": 24.0, "Number of threads": 24.0, "Total Operations(ops)": 5304170687.0, "Ops per second": 530361478.44, "ns per ops": 1.89, "Ops per threads": 221007111.0, "Ops per procs": 221007111.0, "Ops/sec/procs": 22098394.93, "ns per ops/procs": 45.25}],["rdq-yield-fibre", "./rdq-yield-fibre -p 16 -d 10 -t 16", {"Duration (ms)": 10009.0, "Number of processors": 16.0, "Number of threads": 16.0, "Total Operations(ops)": 4047689095.0, "Ops per second": 404367842.39, "ns per ops": 2.47, "Ops per threads": 252980568.0, "Ops per procs": 252980568.0, "Ops/sec/procs": 25272990.15, "ns per ops/procs": 39.57}],["rdq-yield-go", "./rdq-yield-go -p 1 -d 10 -t 1", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 1.0, "Total Operations(ops)": 85548399.0, "Ops per second": 8554476.24, "ns per ops": 116.9, "Ops per threads": 85548399.0, "Ops per procs": 85548399.0, "Ops/sec/procs": 8554476.24, "ns per ops/procs": 116.9}],["rdq-yield-tokio", "./rdq-yield-tokio -p 1 -d 10 -t 1", {"Duration (ms)": 10100.0, "Number of processors": 1.0, "Number of threads": 1.0, "Total Operations(ops)": 91474395.0, "Ops per second": 9147439.0, "ns per ops": 110.0, "Ops per threads": 91474395.0, "Ops per procs": 91474395.0, "Ops/sec/procs": 9147439.0, "ns per ops/procs": 110.0}],["rdq-yield-tokio", "./rdq-yield-tokio -p 24 -d 10 -t 24", {"Duration (ms)": 10099.0, "Number of processors": 24.0, "Number of threads": 24.0, "Total Operations(ops)": 1289349577.0, "Ops per second": 128934957.0, "ns per ops": 7.0, "Ops per threads": 53722899.0, "Ops per procs": 53722899.0, "Ops/sec/procs": 5372289.0, "ns per ops/procs": 187.0}],["rdq-yield-cfa", "./rdq-yield-cfa -p 24 -d 10 -t 24", {"Duration (ms)": 0.0, "Number of processors": 24.0, "Number of threads": 24.0, "Total Operations(ops)": 227319382.0, "Ops per second": 22731938.2, "ns per ops": 44.02, "Ops per threads": 9471640.0, "Ops per procs": 9471640.0, "Ops/sec/procs": 947164.09, "ns per ops/procs": 1056.48}],["rdq-yield-go", "./rdq-yield-go -p 16 -d 10 -t 16", {"Duration (ms)": 10001.0, "Number of processors": 16.0, "Number of threads": 16.0, "Total Operations(ops)": 16769801.0, "Ops per second": 1676661.26, "ns per ops": 596.42, "Ops per threads": 1048112.0, "Ops per procs": 1048112.0, "Ops/sec/procs": 104791.33, "ns per ops/procs": 9542.77}],["rdq-yield-fibre", "./rdq-yield-fibre -p 8 -d 10 -t 8", {"Duration (ms)": 10007.0, "Number of processors": 8.0, "Number of threads": 8.0, "Total Operations(ops)": 2148355210.0, "Ops per second": 214670482.89, "ns per ops": 4.66, "Ops per threads": 268544401.0, "Ops per procs": 268544401.0, "Ops/sec/procs": 26833810.36, "ns per ops/procs": 37.27}],["rdq-yield-fibre", "./rdq-yield-fibre -p 8 -d 10 -t 8", {"Duration (ms)": 10007.0, "Number of processors": 8.0, "Number of threads": 8.0, "Total Operations(ops)": 2151532871.0, "Ops per second": 214988947.76, "ns per ops": 4.65, "Ops per threads": 268941608.0, "Ops per procs": 268941608.0, "Ops/sec/procs": 26873618.47, "ns per ops/procs": 37.21}],["rdq-yield-fibre", "./rdq-yield-fibre -p 1 -d 10 -t 1", {"Duration (ms)": 10009.0, "Number of processors": 1.0, "Number of threads": 1.0, "Total Operations(ops)": 292690252.0, "Ops per second": 29242303.95, "ns per ops": 34.2, "Ops per threads": 292690252.0, "Ops per procs": 292690252.0, "Ops/sec/procs": 29242303.95, "ns per ops/procs": 34.2}],["rdq-yield-cfa", "./rdq-yield-cfa -p 1 -d 10 -t 1", {"Duration (ms)": 0.0, "Number of processors": 1.0, "Number of threads": 1.0, "Total Operations(ops)": 75801333.0, "Ops per second": 7580133.3, "ns per ops": 132.15, "Ops per threads": 75801333.0, "Ops per procs": 75801333.0, "Ops/sec/procs": 7580133.3, "ns per ops/procs": 132.15}],["rdq-yield-cfa", "./rdq-yield-cfa -p 16 -d 10 -t 16", {"Duration (ms)": 0.0, "Number of processors": 16.0, "Number of threads": 16.0, "Total Operations(ops)": 221926675.0, "Ops per second": 22192667.5, "ns per ops": 45.18, "Ops per threads": 13870417.0, "Ops per procs": 13870417.0, "Ops/sec/procs": 1387041.72, "ns per ops/procs": 722.81}],["rdq-yield-go", "./rdq-yield-go -p 1 -d 10 -t 1", {"Duration (ms)": 10000.0, "Number of processors": 1.0, "Number of threads": 1.0, "Total Operations(ops)": 84740180.0, "Ops per second": 8473654.06, "ns per ops": 118.01, "Ops per threads": 84740180.0, "Ops per procs": 84740180.0, "Ops/sec/procs": 8473654.06, "ns per ops/procs": 118.01}],["rdq-yield-cfa", "./rdq-yield-cfa -p 1 -d 10 -t 1", {"Duration (ms)": 0.0, "Number of processors": 1.0, "Number of threads": 1.0, "Total Operations(ops)": 73281534.0, "Ops per second": 7328153.4, "ns per ops": 136.67, "Ops per threads": 73281534.0, "Ops per procs": 73281534.0, "Ops/sec/procs": 7328153.4, "ns per ops/procs": 136.67}],["rdq-yield-tokio", "./rdq-yield-tokio -p 8 -d 10 -t 8", {"Duration (ms)": 10099.0, "Number of processors": 8.0, "Number of threads": 8.0, "Total Operations(ops)": 388708305.0, "Ops per second": 38870830.0, "ns per ops": 25.0, "Ops per threads": 48588538.0, "Ops per procs": 48588538.0, "Ops/sec/procs": 4858853.0, "ns per ops/procs": 207.0}],["rdq-yield-go", "./rdq-yield-go -p 24 -d 10 -t 24", {"Duration (ms)": 10002.0, "Number of processors": 24.0, "Number of threads": 24.0, "Total Operations(ops)": 13787158.0, "Ops per second": 1378423.2, "ns per ops": 725.47, "Ops per threads": 574464.0, "Ops per procs": 574464.0, "Ops/sec/procs": 57434.3, "ns per ops/procs": 17411.2}],["rdq-yield-tokio", "./rdq-yield-tokio -p 8 -d 10 -t 8", {"Duration (ms)": 10099.0, "Number of processors": 8.0, "Number of threads": 8.0, "Total Operations(ops)": 398849975.0, "Ops per second": 39884997.0, "ns per ops": 25.0, "Ops per threads": 49856246.0, "Ops per procs": 49856246.0, "Ops/sec/procs": 4985624.0, "ns per ops/procs": 202.0}],["rdq-yield-tokio", "./rdq-yield-tokio -p 1 -d 10 -t 1", {"Duration (ms)": 10100.0, "Number of processors": 1.0, "Number of threads": 1.0, "Total Operations(ops)": 91905024.0, "Ops per second": 9190502.0, "ns per ops": 109.0, "Ops per threads": 91905024.0, "Ops per procs": 91905024.0, "Ops/sec/procs": 9190502.0, "ns per ops/procs": 109.0}]]
Index: doc/theses/thierry_delisle_PhD/thesis/fig/SAVE.fig
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/fig/SAVE.fig	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
+++ doc/theses/thierry_delisle_PhD/thesis/fig/SAVE.fig	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
@@ -0,0 +1,81 @@
+#FIG 3.2  Produced by xfig version 3.2.7b
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+6 7650 5400 8700 6318
+2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
+	 7650 5854 7913 6308 8438 6308 8700 5854 8438 5400 7913 5400
+	 7650 5854
+-6
+6 9675 5400 10725 6318
+2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
+	 9675 5854 9938 6308 10463 6308 10725 5854 10463 5400 9938 5400
+	 9675 5854
+-6
+6 8175 6675 8608 7050
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 8234 6734 8175 7050
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 8589 6734 8569 6853
+3 2 0 1 0 7 50 -1 -1 0.000 0 0 0 4
+	 8214 6853 8332 6813 8470 6872 8569 6853
+	 0.000 -0.500 -0.500 0.000
+3 2 0 1 0 7 50 -1 -1 0.000 0 0 0 4
+	 8236 6729 8354 6690 8492 6749 8590 6729
+	 0.000 -0.500 -0.500 0.000
+-6
+6 8325 6900 8700 7400
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 8325 7025 8450 6900 8700 6900 8700 7400 8325 7400 8325 7025
+	 8450 7025 8450 6900
+-6
+6 5694 5250 6150 5775
+5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 5922.000 5409.011 5877 5410 5922 5364 5967 5410
+5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 5922.000 5410.000 5785 5410 5922 5273 6059 5410
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 5785 5410 5785 5501 5694 5501 5694 5775 6150 5775 6150 5501
+	 6059 5501 6059 5410
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 4
+	 5877 5410 5877 5501 5967 5501 5967 5410
+-6
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 5625 5250 6825 5250 6825 6600 5625 6600 5625 5250
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+	1 1 1.00 60.00 120.00
+	 7050 5850 7725 5850
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+	1 1 1.00 60.00 120.00
+	 9150 5850 9750 5850
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+	1 1 1.00 60.00 120.00
+	 8175 6525 8175 6900
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 5625 6150 6825 6150
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 1 2
+	7 1 1.00 60.00 120.00
+	 6150 5850 7200 5850
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 1 2
+	7 1 1.00 60.00 120.00
+	 8175 5850 9150 5850
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 1 2
+	7 0 1.00 60.00 120.00
+	 8175 6150 8175 6600
+3 2 0 1 0 7 50 -1 -1 0.000 0 0 1 5
+	7 0 1.00 60.00 120.00
+	 6150 6375 6225 6900 6525 7200 6900 7350 7425 7350
+	 0.000 -0.500 -0.500 -0.500 0.000
+3 2 0 1 0 7 50 -1 -1 0.000 0 1 0 6
+	1 1 1.00 60.00 120.00
+	 6225 6900 6300 7050 6525 7200 6900 7350 7425 7350 7950 7200
+	 0.000 -0.500 -0.500 -0.500 -0.500 0.000
+4 0 0 50 -1 0 11 0.0000 2 135 1260 7875 5325 Idle Processor\001
+4 0 0 50 -1 0 11 0.0000 2 165 810 7725 6975 Benaphore\001
+4 0 0 50 -1 0 11 0.0000 2 135 1440 8325 7500 Private Event FD\001
+4 0 0 50 -1 0 11 0.0000 2 135 810 5925 5175 Idle List\001
+4 0 0 50 -1 0 11 0.0000 2 135 810 5250 5550 Idle List\001
+4 0 0 50 -1 0 11 0.0000 2 135 360 5400 5700 Lock\001
Index: doc/theses/thierry_delisle_PhD/thesis/fig/idle.fig
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/fig/idle.fig	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
+++ doc/theses/thierry_delisle_PhD/thesis/fig/idle.fig	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
@@ -0,0 +1,133 @@
+#FIG 3.2  Produced by xfig version 3.2.7b
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+6 5919 5250 6375 5775
+5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 6147.000 5409.011 6102 5410 6147 5364 6192 5410
+5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 6147.000 5410.000 6010 5410 6147 5273 6284 5410
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 6010 5410 6010 5501 5919 5501 5919 5775 6375 5775 6375 5501
+	 6284 5501 6284 5410
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 4
+	 6102 5410 6102 5501 6192 5501 6192 5410
+-6
+6 7442 6525 7875 6900
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 7501 6584 7442 6900
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 7856 6584 7836 6703
+3 2 0 1 0 7 50 -1 -1 0.000 0 0 0 4
+	 7481 6703 7599 6663 7737 6722 7836 6703
+	 0.000 -0.500 -0.500 0.000
+3 2 0 1 0 7 50 -1 -1 0.000 0 0 0 4
+	 7503 6579 7621 6540 7759 6599 7857 6579
+	 0.000 -0.500 -0.500 0.000
+-6
+6 7575 6825 7950 7325
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 7575 6950 7700 6825 7950 6825 7950 7325 7575 7325 7575 6950
+	 7700 6950 7700 6825
+-6
+6 9092 6525 9525 6900
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 9151 6584 9092 6900
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 9506 6584 9486 6703
+3 2 0 1 0 7 50 -1 -1 0.000 0 0 0 4
+	 9131 6703 9249 6663 9387 6722 9486 6703
+	 0.000 -0.500 -0.500 0.000
+3 2 0 1 0 7 50 -1 -1 0.000 0 0 0 4
+	 9153 6579 9271 6540 9409 6599 9507 6579
+	 0.000 -0.500 -0.500 0.000
+-6
+6 9225 6825 9600 7325
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 9225 6950 9350 6825 9600 6825 9600 7325 9225 7325 9225 6950
+	 9350 6950 9350 6825
+-6
+6 10742 6525 11175 6900
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 10801 6584 10742 6900
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 11156 6584 11136 6703
+3 2 0 1 0 7 50 -1 -1 0.000 0 0 0 4
+	 10781 6703 10899 6663 11037 6722 11136 6703
+	 0.000 -0.500 -0.500 0.000
+3 2 0 1 0 7 50 -1 -1 0.000 0 0 0 4
+	 10803 6579 10921 6540 11059 6599 11157 6579
+	 0.000 -0.500 -0.500 0.000
+-6
+6 10875 6825 11250 7325
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 10875 6950 11000 6825 11250 6825 11250 7325 10875 7325 10875 6950
+	 11000 6950 11000 6825
+-6
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 5850 6150 6675 6150
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 5850 5250 6675 5250 6675 6600 5850 6600 5850 5250
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 0 1.00 60.00 60.00
+	 7725 6150 7725 6525
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 0 1.00 60.00 60.00
+	 9375 6150 9375 6525
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 0 1.00 60.00 60.00
+	 11025 6150 11025 6525
+2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
+	 10500 5854 10763 6308 11288 6308 11550 5854 11288 5400 10763 5400
+	 10500 5854
+2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
+	 8850 5854 9113 6308 9638 6308 9900 5854 9638 5400 9113 5400
+	 8850 5854
+2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
+	 7200 5854 7463 6308 7988 6308 8250 5854 7988 5400 7463 5400
+	 7200 5854
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 6450 5925 7275 5925
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 8025 5925 8925 5925
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 9675 5925 10575 5925
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 10725 5775 9825 5775
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 9075 5775 8175 5775
+3 2 0 1 0 7 50 -1 -1 0.000 0 1 1 4
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 6300 6375 6375 6825 6750 7050 7350 6975
+	 0.000 -0.500 -0.500 0.000
+4 0 0 50 -1 0 11 0.0000 2 135 810 5925 5175 Idle List\001
+4 0 0 50 -1 0 11 0.0000 2 135 810 5175 5550 Idle List\001
+4 0 0 50 -1 0 11 0.0000 2 135 360 5325 5700 Lock\001
+4 0 0 50 -1 0 11 0.0000 2 135 540 5775 6900 Atomic\001
+4 0 0 50 -1 0 11 0.0000 2 135 630 5775 7125 Pointer\001
+4 0 0 50 -1 0 11 0.0000 2 165 810 7950 6675 Benaphore\001
+4 0 0 50 -1 0 11 0.0000 2 135 720 8025 7125 Event FD\001
+4 0 0 50 -1 0 11 0.0000 2 135 1260 7275 5325 Idle Processor\001
+4 0 0 50 -1 0 11 0.0000 2 165 810 9600 6675 Benaphore\001
+4 0 0 50 -1 0 11 0.0000 2 135 720 9675 7125 Event FD\001
+4 0 0 50 -1 0 11 0.0000 2 135 1260 8925 5325 Idle Processor\001
+4 0 0 50 -1 0 11 0.0000 2 165 810 11250 6675 Benaphore\001
+4 0 0 50 -1 0 11 0.0000 2 135 720 11325 7125 Event FD\001
+4 0 0 50 -1 0 11 0.0000 2 135 1260 10575 5325 Idle Processor\001
Index: doc/theses/thierry_delisle_PhD/thesis/fig/idle1.fig
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/fig/idle1.fig	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
+++ doc/theses/thierry_delisle_PhD/thesis/fig/idle1.fig	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
@@ -0,0 +1,85 @@
+#FIG 3.2  Produced by xfig version 3.2.7b
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+6 5919 5250 6375 5775
+5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 6147.000 5409.011 6102 5410 6147 5364 6192 5410
+5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 6147.000 5410.000 6010 5410 6147 5273 6284 5410
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 6010 5410 6010 5501 5919 5501 5919 5775 6375 5775 6375 5501
+	 6284 5501 6284 5410
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 4
+	 6102 5410 6102 5501 6192 5501 6192 5410
+-6
+6 7575 6525 7950 7025
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 7575 6650 7700 6525 7950 6525 7950 7025 7575 7025 7575 6650
+	 7700 6650 7700 6525
+-6
+6 9225 6525 9600 7025
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 9225 6650 9350 6525 9600 6525 9600 7025 9225 7025 9225 6650
+	 9350 6650 9350 6525
+-6
+6 10875 6525 11250 7025
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 10875 6650 11000 6525 11250 6525 11250 7025 10875 7025 10875 6650
+	 11000 6650 11000 6525
+-6
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 0 1.00 60.00 60.00
+	 7725 6150 7725 6525
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 0 1.00 60.00 60.00
+	 9375 6150 9375 6525
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 0 1.00 60.00 60.00
+	 11025 6150 11025 6525
+2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
+	 10500 5854 10763 6308 11288 6308 11550 5854 11288 5400 10763 5400
+	 10500 5854
+2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
+	 8850 5854 9113 6308 9638 6308 9900 5854 9638 5400 9113 5400
+	 8850 5854
+2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
+	 7200 5854 7463 6308 7988 6308 8250 5854 7988 5400 7463 5400
+	 7200 5854
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 6450 5925 7275 5925
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 8025 5925 8925 5925
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 9675 5925 10575 5925
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 10725 5775 9825 5775
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 9075 5775 8175 5775
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 5850 5250 6675 5250 6675 6075 5850 6075 5850 5250
+4 0 0 50 -1 0 11 0.0000 2 135 810 5925 5175 Idle List\001
+4 0 0 50 -1 0 11 0.0000 2 135 810 5175 5550 Idle List\001
+4 0 0 50 -1 0 11 0.0000 2 135 360 5325 5700 Lock\001
+4 0 0 50 -1 0 11 0.0000 2 135 1260 7275 5325 Idle Processor\001
+4 0 0 50 -1 0 11 0.0000 2 135 1260 8925 5325 Idle Processor\001
+4 0 0 50 -1 0 11 0.0000 2 135 1260 10575 5325 Idle Processor\001
+4 0 0 50 -1 0 11 0.0000 2 135 720 8025 6825 Event FD\001
+4 0 0 50 -1 0 11 0.0000 2 135 720 9675 6825 Event FD\001
+4 0 0 50 -1 0 11 0.0000 2 135 720 11325 6825 Event FD\001
Index: doc/theses/thierry_delisle_PhD/thesis/fig/idle2.fig
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/fig/idle2.fig	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
+++ doc/theses/thierry_delisle_PhD/thesis/fig/idle2.fig	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
@@ -0,0 +1,94 @@
+#FIG 3.2  Produced by xfig version 3.2.7b
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+6 5919 5250 6375 5775
+5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 6147.000 5409.011 6102 5410 6147 5364 6192 5410
+5 1 0 1 0 7 50 -1 -1 0.000 0 0 0 0 6147.000 5410.000 6010 5410 6147 5273 6284 5410
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 6010 5410 6010 5501 5919 5501 5919 5775 6375 5775 6375 5501
+	 6284 5501 6284 5410
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 4
+	 6102 5410 6102 5501 6192 5501 6192 5410
+-6
+6 7575 6525 7950 7025
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 7575 6650 7700 6525 7950 6525 7950 7025 7575 7025 7575 6650
+	 7700 6650 7700 6525
+-6
+6 9225 6525 9600 7025
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 9225 6650 9350 6525 9600 6525 9600 7025 9225 7025 9225 6650
+	 9350 6650 9350 6525
+-6
+6 10875 6525 11250 7025
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+	 10875 6650 11000 6525 11250 6525 11250 7025 10875 7025 10875 6650
+	 11000 6650 11000 6525
+-6
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+	 5850 6150 6675 6150
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+	 5850 5250 6675 5250 6675 6600 5850 6600 5850 5250
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 0 1.00 60.00 60.00
+	 7725 6150 7725 6525
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 0 1.00 60.00 60.00
+	 9375 6150 9375 6525
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 0 1.00 60.00 60.00
+	 11025 6150 11025 6525
+2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
+	 10500 5854 10763 6308 11288 6308 11550 5854 11288 5400 10763 5400
+	 10500 5854
+2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
+	 8850 5854 9113 6308 9638 6308 9900 5854 9638 5400 9113 5400
+	 8850 5854
+2 3 0 1 0 7 50 -1 -1 0.000 0 0 0 0 0 7
+	 7200 5854 7463 6308 7988 6308 8250 5854 7988 5400 7463 5400
+	 7200 5854
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 6450 5925 7275 5925
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 8025 5925 8925 5925
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 9675 5925 10575 5925
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 10725 5775 9825 5775
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 1 2
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 9075 5775 8175 5775
+3 2 0 1 0 7 50 -1 -1 0.000 0 1 1 4
+	1 1 1.00 60.00 120.00
+	7 1 1.00 60.00 60.00
+	 6300 6375 6375 6825 6900 6975 7500 6750
+	 0.000 -0.500 -0.500 0.000
+4 0 0 50 -1 0 11 0.0000 2 135 810 5925 5175 Idle List\001
+4 0 0 50 -1 0 11 0.0000 2 135 810 5175 5550 Idle List\001
+4 0 0 50 -1 0 11 0.0000 2 135 360 5325 5700 Lock\001
+4 0 0 50 -1 0 11 0.0000 2 135 540 5775 6900 Atomic\001
+4 0 0 50 -1 0 11 0.0000 2 135 630 5775 7125 Pointer\001
+4 0 0 50 -1 0 11 0.0000 2 135 1260 7275 5325 Idle Processor\001
+4 0 0 50 -1 0 11 0.0000 2 135 1260 8925 5325 Idle Processor\001
+4 0 0 50 -1 0 11 0.0000 2 135 1260 10575 5325 Idle Processor\001
+4 0 0 50 -1 0 11 0.0000 2 135 720 8025 6825 Event FD\001
+4 0 0 50 -1 0 11 0.0000 2 135 720 9675 6825 Event FD\001
+4 0 0 50 -1 0 11 0.0000 2 135 720 11325 6825 Event FD\001
Index: doc/theses/thierry_delisle_PhD/thesis/fig/idle_state.fig
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/fig/idle_state.fig	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
+++ doc/theses/thierry_delisle_PhD/thesis/fig/idle_state.fig	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
@@ -0,0 +1,27 @@
+#FIG 3.2  Produced by xfig version 3.2.7b
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-2
+1200 2
+1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 3900 3600 571 571 3900 3600 3375 3375
+1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 6300 3600 605 605 6300 3600 5775 3300
+1 3 0 1 0 7 50 -1 -1 0.000 1 0.0000 5100 5400 600 600 5100 5400 4500 5400
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+	0 0 1.00 60.00 120.00
+	 4200 4125 4725 4950
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+	0 0 1.00 60.00 120.00
+	 4500 3600 5700 3600
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+	0 0 1.00 60.00 120.00
+	 5923 4125 5475 4875
+4 1 0 50 -1 0 11 0.0000 2 135 450 5100 5475 AWAKE\001
+4 1 0 50 -1 0 11 0.0000 2 135 450 6300 3675 SLEEP\001
+4 1 0 50 -1 0 11 0.0000 2 135 540 3900 3675 SEARCH\001
+4 0 0 50 -1 0 11 0.0000 2 135 360 5775 4650 WAKE\001
+4 2 0 50 -1 0 11 0.0000 2 135 540 4350 4650 CANCEL\001
+4 1 0 50 -1 0 11 0.0000 2 135 630 5025 3450 CONFIRM\001
Index: doc/theses/thierry_delisle_PhD/thesis/local.bib
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/local.bib	(revision bfd551277c04ecd8721e431ae84ab67dfad0fd2b)
+++ doc/theses/thierry_delisle_PhD/thesis/local.bib	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
@@ -701,2 +701,22 @@
   note = "[Online; accessed 12-April-2022]"
 }
+
+% RMR notes :
+% [05/04, 12:36] Trevor Brown
+%     i don't know where rmr complexity was first introduced, but there are many many many papers that use the term and define it
+% ​[05/04, 12:37] Trevor Brown
+%     here's one paper that uses the term a lot and links to many others that use it... might trace it to something useful there https://drops.dagstuhl.de/opus/volltexte/2021/14832/pdf/LIPIcs-DISC-2021-30.pdf
+% ​[05/04, 12:37] Trevor Brown
+%     another option might be to cite a textbook
+% ​[05/04, 12:42] Trevor Brown
+%     but i checked two textbooks in the area i'm aware of and i don't see a definition of rmr complexity in either
+% ​[05/04, 12:42] Trevor Brown
+%     this one has a nice statement about the prevelance of rmr complexity, as well as some rough definition
+% ​[05/04, 12:42] Trevor Brown
+%     https://dl.acm.org/doi/pdf/10.1145/3465084.3467938
+
+% Race to idle notes :
+% [13/04, 16:56] Martin Karsten
+%       I don't have a citation. Google brings up this one, which might be good:
+%
+% https://doi.org/10.1137/1.9781611973099.100
Index: doc/theses/thierry_delisle_PhD/thesis/test.svg
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/test.svg	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
+++ doc/theses/thierry_delisle_PhD/thesis/test.svg	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
@@ -0,0 +1,492 @@
+<?xml version="1.0" encoding="utf-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
+  "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Created with matplotlib (https://matplotlib.org/) -->
+<svg height="345.6pt" version="1.1" viewBox="0 0 460.8 345.6" width="460.8pt" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
+ <defs>
+  <style type="text/css">
+*{stroke-linecap:butt;stroke-linejoin:round;}
+  </style>
+ </defs>
+ <g id="figure_1">
+  <g id="patch_1">
+   <path d="M 0 345.6 
+L 460.8 345.6 
+L 460.8 0 
+L 0 0 
+z
+" style="fill:#ffffff;"/>
+  </g>
+  <g id="axes_1">
+   <g id="patch_2">
+    <path d="M 57.6 307.584 
+L 414.72 307.584 
+L 414.72 41.472 
+L 57.6 41.472 
+z
+" style="fill:#ffffff;"/>
+   </g>
+   <g id="PathCollection_1">
+    <defs>
+     <path d="M -3 3 
+L 3 -3 
+M -3 -3 
+L 3 3 
+" id="mb6fe696cd4" style="stroke:#0095e3;stroke-width:1.5;"/>
+    </defs>
+    <g clip-path="url(#pd400051835)">
+     <use style="fill:#0095e3;stroke:#0095e3;stroke-width:1.5;" x="127.853115" xlink:href="#mb6fe696cd4" y="238.969387"/>
+     <use style="fill:#0095e3;stroke:#0095e3;stroke-width:1.5;" x="408.865574" xlink:href="#mb6fe696cd4" y="92.390403"/>
+     <use style="fill:#0095e3;stroke:#0095e3;stroke-width:1.5;" x="57.6" xlink:href="#mb6fe696cd4" y="263.873925"/>
+     <use style="fill:#0095e3;stroke:#0095e3;stroke-width:1.5;" x="127.853115" xlink:href="#mb6fe696cd4" y="234.646917"/>
+     <use style="fill:#0095e3;stroke:#0095e3;stroke-width:1.5;" x="57.6" xlink:href="#mb6fe696cd4" y="263.692599"/>
+     <use style="fill:#0095e3;stroke:#0095e3;stroke-width:1.5;" x="408.865574" xlink:href="#mb6fe696cd4" y="90.109534"/>
+     <use style="fill:#0095e3;stroke:#0095e3;stroke-width:1.5;" x="408.865574" xlink:href="#mb6fe696cd4" y="85.824"/>
+     <use style="fill:#0095e3;stroke:#0095e3;stroke-width:1.5;" x="127.853115" xlink:href="#mb6fe696cd4" y="237.910643"/>
+     <use style="fill:#0095e3;stroke:#0095e3;stroke-width:1.5;" x="57.6" xlink:href="#mb6fe696cd4" y="263.37251"/>
+    </g>
+   </g>
+   <g id="PathCollection_2">
+    <defs>
+     <path d="M -3 3 
+L 3 -3 
+M -3 -3 
+L 3 3 
+" id="mb7a392378d" style="stroke:#006cb4;stroke-width:1.5;"/>
+    </defs>
+    <g clip-path="url(#pd400051835)">
+     <use style="fill:#006cb4;stroke:#006cb4;stroke-width:1.5;" x="408.865574" xlink:href="#mb7a392378d" y="160.001329"/>
+     <use style="fill:#006cb4;stroke:#006cb4;stroke-width:1.5;" x="127.853115" xlink:href="#mb7a392378d" y="238.512735"/>
+     <use style="fill:#006cb4;stroke:#006cb4;stroke-width:1.5;" x="127.853115" xlink:href="#mb7a392378d" y="238.265802"/>
+     <use style="fill:#006cb4;stroke:#006cb4;stroke-width:1.5;" x="57.6" xlink:href="#mb7a392378d" y="256.021044"/>
+     <use style="fill:#006cb4;stroke:#006cb4;stroke-width:1.5;" x="127.853115" xlink:href="#mb7a392378d" y="238.284474"/>
+     <use style="fill:#006cb4;stroke:#006cb4;stroke-width:1.5;" x="408.865574" xlink:href="#mb7a392378d" y="159.821437"/>
+     <use style="fill:#006cb4;stroke:#006cb4;stroke-width:1.5;" x="57.6" xlink:href="#mb7a392378d" y="256.956649"/>
+     <use style="fill:#006cb4;stroke:#006cb4;stroke-width:1.5;" x="408.865574" xlink:href="#mb7a392378d" y="165.331574"/>
+     <use style="fill:#006cb4;stroke:#006cb4;stroke-width:1.5;" x="57.6" xlink:href="#mb7a392378d" y="255.961851"/>
+    </g>
+   </g>
+   <g id="matplotlib.axis_1">
+    <g id="xtick_1">
+     <g id="line2d_1">
+      <path clip-path="url(#pd400051835)" d="M 57.6 307.584 
+L 57.6 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_2">
+      <defs>
+       <path d="M 0 0 
+L 0 3.5 
+" id="m9bee3d39da" style="stroke:#000000;stroke-width:0.8;"/>
+      </defs>
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="57.6" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_1">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 57.6, 322.182437)" x="57.6" y="322.182437">1</text>
+     </g>
+    </g>
+    <g id="xtick_2">
+     <g id="line2d_3">
+      <path clip-path="url(#pd400051835)" d="M 81.017705 307.584 
+L 81.017705 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_4">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="81.017705" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_2">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 81.017705, 322.182437)" x="81.017705" y="322.182437">2</text>
+     </g>
+    </g>
+    <g id="xtick_3">
+     <g id="line2d_5">
+      <path clip-path="url(#pd400051835)" d="M 104.43541 307.584 
+L 104.43541 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_6">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="104.43541" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_3">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 104.43541, 322.182437)" x="104.43541" y="322.182437">3</text>
+     </g>
+    </g>
+    <g id="xtick_4">
+     <g id="line2d_7">
+      <path clip-path="url(#pd400051835)" d="M 127.853115 307.584 
+L 127.853115 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_8">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="127.853115" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_4">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 127.853115, 322.182437)" x="127.853115" y="322.182437">4</text>
+     </g>
+    </g>
+    <g id="xtick_5">
+     <g id="line2d_9">
+      <path clip-path="url(#pd400051835)" d="M 151.27082 307.584 
+L 151.27082 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_10">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="151.27082" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_5">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 151.27082, 322.182437)" x="151.27082" y="322.182437">5</text>
+     </g>
+    </g>
+    <g id="xtick_6">
+     <g id="line2d_11">
+      <path clip-path="url(#pd400051835)" d="M 174.688525 307.584 
+L 174.688525 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_12">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="174.688525" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_6">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 174.688525, 322.182437)" x="174.688525" y="322.182437">6</text>
+     </g>
+    </g>
+    <g id="xtick_7">
+     <g id="line2d_13">
+      <path clip-path="url(#pd400051835)" d="M 198.10623 307.584 
+L 198.10623 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_14">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="198.10623" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_7">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 198.10623, 322.182437)" x="198.10623" y="322.182437">7</text>
+     </g>
+    </g>
+    <g id="xtick_8">
+     <g id="line2d_15">
+      <path clip-path="url(#pd400051835)" d="M 221.523934 307.584 
+L 221.523934 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_16">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="221.523934" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_8">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 221.523934, 322.182437)" x="221.523934" y="322.182437">8</text>
+     </g>
+    </g>
+    <g id="xtick_9">
+     <g id="line2d_17">
+      <path clip-path="url(#pd400051835)" d="M 244.941639 307.584 
+L 244.941639 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_18">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="244.941639" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_9">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 244.941639, 322.182437)" x="244.941639" y="322.182437">9</text>
+     </g>
+    </g>
+    <g id="xtick_10">
+     <g id="line2d_19">
+      <path clip-path="url(#pd400051835)" d="M 268.359344 307.584 
+L 268.359344 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_20">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="268.359344" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_10">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 268.359344, 322.182437)" x="268.359344" y="322.182437">10</text>
+     </g>
+    </g>
+    <g id="xtick_11">
+     <g id="line2d_21">
+      <path clip-path="url(#pd400051835)" d="M 291.777049 307.584 
+L 291.777049 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_22">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="291.777049" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_11">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 291.777049, 322.182437)" x="291.777049" y="322.182437">11</text>
+     </g>
+    </g>
+    <g id="xtick_12">
+     <g id="line2d_23">
+      <path clip-path="url(#pd400051835)" d="M 315.194754 307.584 
+L 315.194754 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_24">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="315.194754" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_12">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 315.194754, 322.182437)" x="315.194754" y="322.182437">12</text>
+     </g>
+    </g>
+    <g id="xtick_13">
+     <g id="line2d_25">
+      <path clip-path="url(#pd400051835)" d="M 338.612459 307.584 
+L 338.612459 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_26">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="338.612459" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_13">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 338.612459, 322.182437)" x="338.612459" y="322.182437">13</text>
+     </g>
+    </g>
+    <g id="xtick_14">
+     <g id="line2d_27">
+      <path clip-path="url(#pd400051835)" d="M 362.030164 307.584 
+L 362.030164 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_28">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="362.030164" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_14">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 362.030164, 322.182437)" x="362.030164" y="322.182437">14</text>
+     </g>
+    </g>
+    <g id="xtick_15">
+     <g id="line2d_29">
+      <path clip-path="url(#pd400051835)" d="M 385.447869 307.584 
+L 385.447869 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_30">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="385.447869" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_15">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 385.447869, 322.182437)" x="385.447869" y="322.182437">15</text>
+     </g>
+    </g>
+    <g id="xtick_16">
+     <g id="line2d_31">
+      <path clip-path="url(#pd400051835)" d="M 408.865574 307.584 
+L 408.865574 41.472 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_32">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="408.865574" xlink:href="#m9bee3d39da" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_16">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 408.865574, 322.182437)" x="408.865574" y="322.182437">16</text>
+     </g>
+    </g>
+    <g id="text_17">
+     <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-0, 236.16, 335.860562)" x="236.16" y="335.860562">Number of processors</text>
+    </g>
+   </g>
+   <g id="matplotlib.axis_2">
+    <g id="ytick_1">
+     <g id="line2d_33">
+      <path clip-path="url(#pd400051835)" d="M 57.6 307.584 
+L 414.72 307.584 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_34">
+      <defs>
+       <path d="M 0 0 
+L -3.5 0 
+" id="m082b2e4a56" style="stroke:#000000;stroke-width:0.8;"/>
+      </defs>
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="57.6" xlink:href="#m082b2e4a56" y="307.584"/>
+      </g>
+     </g>
+     <g id="text_18">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:end;" transform="rotate(-0, 50.6, 311.383219)" x="50.6" y="311.383219">0 Ops</text>
+     </g>
+    </g>
+    <g id="ytick_2">
+     <g id="line2d_35">
+      <path clip-path="url(#pd400051835)" d="M 57.6 268.244858 
+L 414.72 268.244858 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_36">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="57.6" xlink:href="#m082b2e4a56" y="268.244858"/>
+      </g>
+     </g>
+     <g id="text_19">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:end;" transform="rotate(-0, 50.6, 272.044077)" x="50.6" y="272.044077">5 MOps</text>
+     </g>
+    </g>
+    <g id="ytick_3">
+     <g id="line2d_37">
+      <path clip-path="url(#pd400051835)" d="M 57.6 228.905717 
+L 414.72 228.905717 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_38">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="57.6" xlink:href="#m082b2e4a56" y="228.905717"/>
+      </g>
+     </g>
+     <g id="text_20">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:end;" transform="rotate(-0, 50.6, 232.704936)" x="50.6" y="232.704936">10 MOps</text>
+     </g>
+    </g>
+    <g id="ytick_4">
+     <g id="line2d_39">
+      <path clip-path="url(#pd400051835)" d="M 57.6 189.566575 
+L 414.72 189.566575 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_40">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="57.6" xlink:href="#m082b2e4a56" y="189.566575"/>
+      </g>
+     </g>
+     <g id="text_21">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:end;" transform="rotate(-0, 50.6, 193.365794)" x="50.6" y="193.365794">15 MOps</text>
+     </g>
+    </g>
+    <g id="ytick_5">
+     <g id="line2d_41">
+      <path clip-path="url(#pd400051835)" d="M 57.6 150.227434 
+L 414.72 150.227434 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_42">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="57.6" xlink:href="#m082b2e4a56" y="150.227434"/>
+      </g>
+     </g>
+     <g id="text_22">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:end;" transform="rotate(-0, 50.6, 154.026652)" x="50.6" y="154.026652">20 MOps</text>
+     </g>
+    </g>
+    <g id="ytick_6">
+     <g id="line2d_43">
+      <path clip-path="url(#pd400051835)" d="M 57.6 110.888292 
+L 414.72 110.888292 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_44">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="57.6" xlink:href="#m082b2e4a56" y="110.888292"/>
+      </g>
+     </g>
+     <g id="text_23">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:end;" transform="rotate(-0, 50.6, 114.687511)" x="50.6" y="114.687511">25 MOps</text>
+     </g>
+    </g>
+    <g id="ytick_7">
+     <g id="line2d_45">
+      <path clip-path="url(#pd400051835)" d="M 57.6 71.54915 
+L 414.72 71.54915 
+" style="fill:none;stroke:#b0b0b0;stroke-linecap:square;stroke-width:0.8;"/>
+     </g>
+     <g id="line2d_46">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="57.6" xlink:href="#m082b2e4a56" y="71.54915"/>
+      </g>
+     </g>
+     <g id="text_24">
+      <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:end;" transform="rotate(-0, 50.6, 75.348369)" x="50.6" y="75.348369">30 MOps</text>
+     </g>
+    </g>
+    <g id="text_25">
+     <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:middle;" transform="rotate(-90, 0.559375, 174.528)" x="0.559375" y="174.528">Ops per second</text>
+    </g>
+   </g>
+   <g id="patch_3">
+    <path d="M 57.6 307.584 
+L 57.6 41.472 
+" style="fill:none;stroke:#000000;stroke-linecap:square;stroke-linejoin:miter;stroke-width:0.8;"/>
+   </g>
+   <g id="patch_4">
+    <path d="M 414.72 307.584 
+L 414.72 41.472 
+" style="fill:none;stroke:#000000;stroke-linecap:square;stroke-linejoin:miter;stroke-width:0.8;"/>
+   </g>
+   <g id="patch_5">
+    <path d="M 57.6 307.584 
+L 414.72 307.584 
+" style="fill:none;stroke:#000000;stroke-linecap:square;stroke-linejoin:miter;stroke-width:0.8;"/>
+   </g>
+   <g id="patch_6">
+    <path d="M 57.6 41.472 
+L 414.72 41.472 
+" style="fill:none;stroke:#000000;stroke-linecap:square;stroke-linejoin:miter;stroke-width:0.8;"/>
+   </g>
+   <g id="legend_1">
+    <g id="patch_7">
+     <path d="M 64.6 78.82825 
+L 161.615625 78.82825 
+Q 163.615625 78.82825 163.615625 76.82825 
+L 163.615625 48.472 
+Q 163.615625 46.472 161.615625 46.472 
+L 64.6 46.472 
+Q 62.6 46.472 62.6 48.472 
+L 62.6 76.82825 
+Q 62.6 78.82825 64.6 78.82825 
+z
+" style="fill:#ffffff;opacity:0.8;stroke:#cccccc;stroke-linejoin:miter;"/>
+    </g>
+    <g id="PathCollection_3">
+     <g>
+      <use style="fill:#0095e3;stroke:#0095e3;stroke-width:1.5;" x="76.6" xlink:href="#mb6fe696cd4" y="55.445437"/>
+     </g>
+    </g>
+    <g id="text_26">
+     <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:start;" transform="rotate(-0, 94.6, 58.070437)" x="94.6" y="58.070437">rdq-cycle-go</text>
+    </g>
+    <g id="PathCollection_4">
+     <g>
+      <use style="fill:#006cb4;stroke:#006cb4;stroke-width:1.5;" x="76.6" xlink:href="#mb7a392378d" y="70.123562"/>
+     </g>
+    </g>
+    <g id="text_27">
+     <text style="font-family:DejaVu Sans;font-size:10px;font-style:normal;font-weight:normal;text-anchor:start;" transform="rotate(-0, 94.6, 72.748562)" x="94.6" y="72.748562">rdq-cycle-cfa</text>
+    </g>
+   </g>
+  </g>
+ </g>
+ <defs>
+  <clipPath id="pd400051835">
+   <rect height="266.112" width="357.12" x="57.6" y="41.472"/>
+  </clipPath>
+ </defs>
+</svg>
Index: doc/theses/thierry_delisle_PhD/thesis/text/eval_macro.tex
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/text/eval_macro.tex	(revision bfd551277c04ecd8721e431ae84ab67dfad0fd2b)
+++ doc/theses/thierry_delisle_PhD/thesis/text/eval_macro.tex	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
@@ -7,7 +7,46 @@
 Networked ZIPF
 
+Nginx : 5Gb still good, 4Gb starts to suffer
+
+Cforall : 10Gb too high, 4 Gb too low
+
 \section{Memcached}
 
-In Memory
+\subsection{Benchmark Environment}
+These experiments are run on a cluster of homogenous Supermicro SYS-6017R-TDF compute nodes with the following characteristics:
+The server runs Ubuntu 20.04.3 LTS on top of Linux Kernel 5.11.0-34.
+Each node has 2 Intel(R) Xeon(R) CPU E5-2620 v2 running at 2.10GHz.
+These CPUs have 6 cores per CPUs and 2 \glspl{hthrd} per core, for a total of 24 \glspl{hthrd}.
+The cpus each have 384 KB, 3 MB and 30 MB of L1, L2 and L3 caches respectively.
+Each node is connected to the network through a Mellanox 10 Gigabit Ethernet port.
+The network route uses 1 Mellanox SX1012 10/40 Gigabit Ethernet cluster switch.
 
-Networked
+
+
+\begin{figure}
+	\centering
+	\input{result.memcd.updt.qps.pstex_t}
+	\caption[Churn Benchmark : Throughput on Intel]{Churn Benchmark : Throughput on Intel\smallskip\newline Description}
+	\label{fig:memcd:updt:qps}
+\end{figure}
+
+\begin{figure}
+	\centering
+	\input{result.memcd.updt.lat.pstex_t}
+	\caption[Churn Benchmark : Throughput on Intel]{Churn Benchmark : Throughput on Intel\smallskip\newline Description}
+	\label{fig:memcd:updt:lat}
+\end{figure}
+
+\begin{figure}
+	\centering
+	\input{result.memcd.rate.qps.pstex_t}
+	\caption[Churn Benchmark : Throughput on Intel]{Churn Benchmark : Throughput on Intel\smallskip\newline Description}
+	\label{fig:memcd:rate:qps}
+\end{figure}
+
+\begin{figure}
+	\centering
+	\input{result.memcd.rate.99th.pstex_t}
+	\caption[Churn Benchmark : Throughput on Intel]{Churn Benchmark : Throughput on Intel\smallskip\newline Description}
+	\label{fig:memcd:rate:tail}
+\end{figure}
Index: doc/theses/thierry_delisle_PhD/thesis/text/eval_micro.tex
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/text/eval_micro.tex	(revision bfd551277c04ecd8721e431ae84ab67dfad0fd2b)
+++ doc/theses/thierry_delisle_PhD/thesis/text/eval_micro.tex	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
@@ -6,4 +6,9 @@
 \section{Benchmark Environment}
 All of these benchmarks are run on two distinct hardware environment, an AMD and an INTEL machine.
+
+For all benchmarks, \texttt{taskset} is used to limit the experiment to 1 NUMA Node with no hyper threading.
+If more \glspl{hthrd} are needed, then 1 NUMA Node with hyperthreading is used.
+If still more \glspl{hthrd} are needed then the experiment is limited to as few NUMA Nodes as needed.
+
 
 \paragraph{AMD} The AMD machine is a server with two AMD EPYC 7662 CPUs and 256GB of DDR4 RAM.
@@ -23,4 +28,10 @@
 
 \section{Cycling latency}
+\begin{figure}
+	\centering
+	\input{cycle.pstex_t}
+	\caption[Cycle benchmark]{Cycle benchmark\smallskip\newline Each \gls{at} unparks the next \gls{at} in the cycle before parking itself.}
+	\label{fig:cycle}
+\end{figure}
 The most basic evaluation of any ready queue is to evaluate the latency needed to push and pop one element from the ready-queue.
 Since these two operation also describe a \texttt{yield} operation, many systems use this as the most basic benchmark.
@@ -42,11 +53,4 @@
 Note that this problem is only present on SMP machines and is significantly mitigated by the fact that there are multiple rings in the system.
 
-\begin{figure}
-	\centering
-	\input{cycle.pstex_t}
-	\caption[Cycle benchmark]{Cycle benchmark\smallskip\newline Each \gls{at} unparks the next \gls{at} in the cycle before parking itself.}
-	\label{fig:cycle}
-\end{figure}
-
 To avoid this benchmark from being dominated by the idle sleep handling, the number of rings is kept at least as high as the number of \glspl{proc} available.
 Beyond this point, adding more rings serves to mitigate even more the idle sleep handling.
@@ -54,24 +58,59 @@
 
 The actual benchmark is more complicated to handle termination, but that simply requires using a binary semphore or a channel instead of raw \texttt{park}/\texttt{unpark} and carefully picking the order of the \texttt{P} and \texttt{V} with respect to the loop condition.
-
-\begin{lstlisting}
-	Thread.main() {
-		count := 0
-		for {
-			wait()
-			this.next.wake()
-			count ++
-			if must_stop() { break }
-		}
-		global.count += count
-	}
-\end{lstlisting}
-
-\begin{figure}
-	\centering
-	\input{result.cycle.jax.ops.pstex_t}
-	\vspace*{-10pt}
-	\label{fig:cycle:ns:jax}
-\end{figure}
+Figure~\ref{fig:cycle:code} shows pseudo code for this benchmark.
+
+\begin{figure}
+	\begin{lstlisting}
+		Thread.main() {
+			count := 0
+			for {
+				wait()
+				this.next.wake()
+				count ++
+				if must_stop() { break }
+			}
+			global.count += count
+		}
+	\end{lstlisting}
+	\caption[Cycle Benchmark : Pseudo Code]{Cycle Benchmark : Pseudo Code}
+	\label{fig:cycle:code}
+\end{figure}
+
+
+
+\subsection{Results}
+\begin{figure}
+	\subfloat[][Throughput, 100 \ats per \proc]{
+		\resizebox{0.5\linewidth}{!}{
+			\input{result.cycle.jax.ops.pstex_t}
+		}
+		\label{fig:cycle:jax:ops}
+	}
+	\subfloat[][Throughput, 1 \ats per \proc]{
+		\resizebox{0.5\linewidth}{!}{
+			\input{result.cycle.low.jax.ops.pstex_t}
+		}
+		\label{fig:cycle:jax:low:ops}
+	}
+
+	\subfloat[][Latency, 100 \ats per \proc]{
+		\resizebox{0.5\linewidth}{!}{
+			\input{result.cycle.jax.ns.pstex_t}
+		}
+
+	}
+	\subfloat[][Latency, 1 \ats per \proc]{
+		\resizebox{0.5\linewidth}{!}{
+			\input{result.cycle.low.jax.ns.pstex_t}
+		}
+		\label{fig:cycle:jax:low:ns}
+	}
+	\caption[Cycle Benchmark on Intel]{Cycle Benchmark on Intel\smallskip\newline Throughput as a function of \proc count, using 100 cycles per \proc, 5 \ats per cycle.}
+	\label{fig:cycle:jax}
+\end{figure}
+Figure~\ref{fig:cycle:jax} shows the throughput as a function of \proc count, with the following constants:
+Each run uses 100 cycles per \proc, 5 \ats per cycle.
+
+\todo{results discussion}
 
 \section{Yield}
@@ -81,17 +120,56 @@
 Its only interesting variable is the number of \glspl{at} per \glspl{proc}, where ratios close to 1 means the ready queue(s) could be empty.
 This sometimes puts more strain on the idle sleep handling, compared to scenarios where there is clearly plenty of work to be done.
-
-\todo{code, setup, results}
-
-\begin{lstlisting}
-	Thread.main() {
-		count := 0
-		while !stop {
-			yield()
-			count ++
-		}
-		global.count += count
-	}
-\end{lstlisting}
+Figure~\ref{fig:yield:code} shows pseudo code for this benchmark, the ``wait/wake-next'' is simply replaced by a yield.
+
+\begin{figure}
+	\begin{lstlisting}
+		Thread.main() {
+			count := 0
+			for {
+				yield()
+				count ++
+				if must_stop() { break }
+			}
+			global.count += count
+		}
+	\end{lstlisting}
+	\caption[Yield Benchmark : Pseudo Code]{Yield Benchmark : Pseudo Code}
+	\label{fig:yield:code}
+\end{figure}
+
+\subsection{Results}
+\begin{figure}
+	\subfloat[][Throughput, 100 \ats per \proc]{
+		\resizebox{0.5\linewidth}{!}{
+			\input{result.yield.jax.ops.pstex_t}
+		}
+		\label{fig:yield:jax:ops}
+	}
+	\subfloat[][Throughput, 1 \ats per \proc]{
+		\resizebox{0.5\linewidth}{!}{
+		\input{result.yield.low.jax.ops.pstex_t}
+		}
+		\label{fig:yield:jax:low:ops}
+	}
+
+	\subfloat[][Latency, 100 \ats per \proc]{
+		\resizebox{0.5\linewidth}{!}{
+		\input{result.yield.jax.ns.pstex_t}
+		}
+		\label{fig:yield:jax:ns}
+	}
+	\subfloat[][Latency, 1 \ats per \proc]{
+		\resizebox{0.5\linewidth}{!}{
+		\input{result.yield.low.jax.ns.pstex_t}
+		}
+		\label{fig:yield:jax:low:ns}
+	}
+	\caption[Yield Benchmark on Intel]{Yield Benchmark on Intel\smallskip\newline Throughput as a function of \proc count, using 1 \ats per \proc.}
+	\label{fig:yield:jax}
+\end{figure}
+Figure~\ref{fig:yield:ops:jax} shows the throughput as a function of \proc count, with the following constants:
+Each run uses 100 \ats per \proc.
+
+\todo{results discussion}
 
 
@@ -105,8 +183,9 @@
 In either case, this benchmark aims to highlight how each scheduler handles these cases, since both cases can lead to performance degradation if they are not handled correctly.
 
-To achieve this the benchmark uses a fixed size array of \newterm{chair}s, where a chair is a data structure that holds a single blocked \gls{at}.
-When a \gls{at} attempts to block on the chair, it must first unblocked the \gls{at} currently blocked on said chair, if any.
-This creates a flow where \glspl{at} push each other out of the chairs before being pushed out themselves.
-For this benchmark to work however, the number of \glspl{at} must be equal or greater to the number of chairs plus the number of \glspl{proc}.
+To achieve this the benchmark uses a fixed size array of semaphores.
+Each \gls{at} picks a random semaphore, \texttt{V}s it to unblock a \at waiting and then \texttt{P}s on the semaphore.
+This creates a flow where \glspl{at} push each other out of the semaphores before being pushed out themselves.
+For this benchmark to work however, the number of \glspl{at} must be equal or greater to the number of semaphores plus the number of \glspl{proc}.
+Note that the nature of these semaphores mean the counter can go beyond 1, which could lead to calls to \texttt{P} not blocking.
 
 \todo{code, setup, results}
@@ -116,7 +195,6 @@
 		for {
 			r := random() % len(spots)
-			next := xchg(spots[r], this)
-			if next { next.wake() }
-			wait()
+			spots[r].V()
+			spots[r].P()
 			count ++
 			if must_stop() { break }
@@ -125,4 +203,34 @@
 	}
 \end{lstlisting}
+
+\begin{figure}
+	\subfloat[][Throughput, 100 \ats per \proc]{
+		\resizebox{0.5\linewidth}{!}{
+			\input{result.churn.jax.ops.pstex_t}
+		}
+		\label{fig:churn:jax:ops}
+	}
+	\subfloat[][Throughput, 1 \ats per \proc]{
+		\resizebox{0.5\linewidth}{!}{
+			\input{result.churn.low.jax.ops.pstex_t}
+		}
+		\label{fig:churn:jax:low:ops}
+	}
+
+	\subfloat[][Latency, 100 \ats per \proc]{
+		\resizebox{0.5\linewidth}{!}{
+			\input{result.churn.jax.ns.pstex_t}
+		}
+
+	}
+	\subfloat[][Latency, 1 \ats per \proc]{
+		\resizebox{0.5\linewidth}{!}{
+			\input{result.churn.low.jax.ns.pstex_t}
+		}
+		\label{fig:churn:jax:low:ns}
+	}
+	\caption[Churn Benchmark on Intel]{\centering Churn Benchmark on Intel\smallskip\newline Throughput and latency of the Churn on the benchmark on the Intel machine. Throughput is the total operation per second across all cores. Latency is the duration of each opeartion.}
+	\label{fig:churn:jax}
+\end{figure}
 
 \section{Locality}
Index: doc/theses/thierry_delisle_PhD/thesis/text/practice.tex
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/text/practice.tex	(revision bfd551277c04ecd8721e431ae84ab67dfad0fd2b)
+++ doc/theses/thierry_delisle_PhD/thesis/text/practice.tex	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
@@ -7,16 +7,19 @@
 More precise \CFA supports adding \procs using the RAII object @processor@.
 These objects can be created at any time and can be destroyed at any time.
-They are normally create as automatic stack variables, but this is not a requirement.
+They are normally created as automatic stack variables, but this is not a requirement.
 
 The consequence is that the scheduler and \io subsystems must support \procs comming in and out of existence.
 
 \section{Manual Resizing}
-The consequence of dynamically changing the number of \procs is that all internal arrays that are sized based on the number of \procs neede to be \texttt{realloc}ed.
-This also means that any references into these arrays, pointers or indexes, may need to be fixed when shrinking\footnote{Indexes may still need fixing because there is no guarantee the \proc causing the shrink had the highest index. Therefore indexes need to be reassigned to preserve contiguous indexes.}.
-
-There are no performance requirements, within reason, for resizing since this is usually considered as part of setup and teardown.
+Manual resizing is expected to be a rare operation.
+Programmers are mostly expected to resize clusters on startup or teardown.
+Therefore dynamically changing the number of \procs is an appropriate moment to allocate or free resources to match the new state.
+As such all internal arrays that are sized based on the number of \procs need to be \texttt{realloc}ed.
+This also means that any references into these arrays, pointers or indexes, may need to be fixed when shrinking\footnote{Indexes may still need fixing when shrinkingbecause some indexes are expected to refer to dense contiguous resources and there is no guarantee the resource being removed has the highest index.}.
+
+There are no performance requirements, within reason, for resizing since it is expected to be rare.
 However, this operation has strict correctness requirements since shrinking and idle sleep can easily lead to deadlocks.
 It should also avoid as much as possible any effect on performance when the number of \procs remain constant.
-This later requirement prehibits simple solutions, like simply adding a global lock to these arrays.
+This later requirement prohibits naive solutions, like simply adding a global lock to the ready-queue arrays.
 
 \subsection{Read-Copy-Update}
@@ -24,11 +27,11 @@
 In this pattern, resizing is done by creating a copy of the internal data strucures, updating the copy with the desired changes, and then attempt an Idiana Jones Switch to replace the original witht the copy.
 This approach potentially has the advantage that it may not need any synchronization to do the switch.
-The switch definitely implies a race where \procs could still use the previous, original, data structure after the copy was switched in.
-The important question then becomes whether or not this race can be recovered from.
-If the changes that arrived late can be transferred from the original to the copy then this solution works.
-
-For linked-lists, dequeing is somewhat of a problem.
+However, there is a race where \procs could still use the previous, original, data structure after the copy was switched in.
+This race not only requires some added memory reclamation scheme, it also requires that operations made on the stale original version be eventually moved to the copy.
+
+For linked-lists, enqueing is only somewhat problematic, \ats enqueued to the original queues need to be transferred to the new, which might not preserve ordering.
+Dequeing is more challenging.
 Dequeing from the original will not necessarily update the copy which could lead to multiple \procs dequeing the same \at.
-Fixing this requires making the array contain pointers to subqueues rather than the subqueues themselves.
+Fixing this requires more synchronization or more indirection on the queues.
 
 Another challenge is that the original must be kept until all \procs have witnessed the change.
@@ -97,6 +100,6 @@
 In addition to users manually changing the number of \procs, it is desireable to support ``removing'' \procs when there is not enough \ats for all the \procs to be useful.
 While manual resizing is expected to be rare, the number of \ats is expected to vary much more which means \procs may need to be ``removed'' for only short periods of time.
-Furthermore, race conditions that spuriously lead to the impression no \ats are ready are actually common in practice.
-Therefore \procs should not be actually \emph{removed} but simply put into an idle state where the \gls{kthrd} is blocked until more \ats become ready.
+Furthermore, race conditions that spuriously lead to the impression that no \ats are ready are actually common in practice.
+Therefore resources associated with \procs should not be freed but \procs simply put into an idle state where the \gls{kthrd} is blocked until more \ats become ready.
 This state is referred to as \newterm{Idle-Sleep}.
 
@@ -110,18 +113,111 @@
 The \CFA scheduler simply follows the ``Race-to-Idle'\cit{https://doi.org/10.1137/1.9781611973099.100}' approach where a sleeping \proc is woken any time an \at becomes ready and \procs go to idle sleep anytime they run out of work.
 
+\section{Sleeping}
+As usual, the corner-stone of any feature related to the kernel is the choice of system call.
+In terms of blocking a \gls{kthrd} until some event occurs the linux kernel has many available options:
+
+\paragraph{\texttt{pthread\_mutex}/\texttt{pthread\_cond}}
+The most classic option is to use some combination of \texttt{pthread\_mutex} and \texttt{pthread\_cond}.
+These serve as straight forward mutual exclusion and synchronization tools and allow a \gls{kthrd} to wait on a \texttt{pthread\_cond} until signalled.
+While this approach is generally perfectly appropriate for \glspl{kthrd} waiting after eachother, \io operations do not signal \texttt{pthread\_cond}s.
+For \io results to wake a \proc waiting on a \texttt{pthread\_cond} means that a different \glspl{kthrd} must be woken up first, and then the \proc can be signalled.
+
+\subsection{\texttt{io\_uring} and Epoll}
+An alternative is to flip the problem on its head and block waiting for \io, using \texttt{io\_uring} or even \texttt{epoll}.
+This creates the inverse situation, where \io operations directly wake sleeping \procs but waking \proc from a running \gls{kthrd} must use an indirect scheme.
+This generally takes the form of creating a file descriptor, \eg, a dummy file, a pipe or an event fd, and using that file descriptor when \procs need to wake eachother.
+This leads to additional complexity because there can be a race between these artificial \io operations and genuine \io operations.
+If not handled correctly, this can lead to the artificial files going out of sync.
+
+\subsection{Event FDs}
+Another interesting approach is to use an event file descriptor\cit{eventfd}.
+This is a Linux feature that is a file descriptor that behaves like \io, \ie, uses \texttt{read} and \texttt{write}, but also behaves like a semaphore.
+Indeed, all read and writes must use 64bits large values\footnote{On 64-bit Linux, a 32-bit Linux would use 32 bits values.}.
+Writes add their values to the buffer, that is arithmetic addition and not buffer append, and reads zero out the buffer and return the buffer values so far\footnote{This is without the \texttt{EFD\_SEMAPHORE} flag. This flags changes the behavior of \texttt{read} but is not needed for this work.}.
+If a read is made while the buffer is already 0, the read blocks until a non-0 value is added.
+What makes this feature particularly interesting is that \texttt{io\_uring} supports the \texttt{IORING\_REGISTER\_EVENTFD} command, to register an event fd to a particular instance.
+Once that instance is registered, any \io completion will result in \texttt{io\_uring} writing to the event FD.
+This means that a \proc waiting on the event FD can be \emph{directly} woken up by either other \procs or incomming \io.
+
+\begin{figure}
+	\centering
+	\input{idle1.pstex_t}
+	\caption[Basic Idle Sleep Data Structure]{Basic Idle Sleep Data Structure \smallskip\newline Each idle \proc is put unto a doubly-linked stack protected by a lock.
+	Each \proc has a private event FD.}
+	\label{fig:idle1}
+\end{figure}
+
 
 \section{Tracking Sleepers}
 Tracking which \procs are in idle sleep requires a data structure holding all the sleeping \procs, but more importantly it requires a concurrent \emph{handshake} so that no \at is stranded on a ready-queue with no active \proc.
 The classic challenge is when a \at is made ready while a \proc is going to sleep, there is a race where the new \at may not see the sleeping \proc and the sleeping \proc may not see the ready \at.
-
-Furthermore, the ``Race-to-Idle'' approach means that there is some
-
-\section{Sleeping}
-
-\subsection{Event FDs}
-
-\subsection{Epoll}
-
-\subsection{\texttt{io\_uring}}
-
-\section{Reducing Latency}
+Since \ats can be made ready by timers, \io operations or other events outside a clusre, this race can occur even if the \proc going to sleep is the only \proc awake.
+As a result, improper handling of this race can lead to all \procs going to sleep and the system deadlocking.
+
+Furthermore, the ``Race-to-Idle'' approach means that there may be contention on the data structure tracking sleepers.
+Contention slowing down \procs attempting to sleep or wake-up can be tolerated.
+These \procs are not doing useful work and therefore not contributing to overall performance.
+However, notifying, checking if a \proc must be woken-up and doing so if needed, can significantly affect overall performance and must be low cost.
+
+\subsection{Sleepers List}
+Each cluster maintains a list of idle \procs, organized as a stack.
+This ordering hopefully allows \proc at the tail to stay in idle sleep for extended period of times.
+Because of these unbalanced performance requirements, the algorithm tracking sleepers is designed to have idle \proc handle as much of the work as possible.
+The idle \procs maintain the of sleepers among themselves and notifying a sleeping \proc takes as little work as possible.
+This approach means that maintaining the list is fairly straightforward.
+The list can simply use a single lock per cluster and only \procs that are getting in and out of idle state will contend for that lock.
+
+This approach also simplifies notification.
+Indeed, \procs need to be notify when a new \at is readied, but they also must be notified during resizing, so the \gls{kthrd} can be joined.
+This means that whichever entity removes idle \procs from the sleeper list must be able to do so in any order.
+Using a simple lock over this data structure makes the removal much simpler than using a lock-free data structure.
+The notification process then simply needs to wake-up the desired idle \proc, using \texttt{pthread\_cond\_signal}, \texttt{write} on an fd, etc., and the \proc will handle the rest.
+
+\subsection{Reducing Latency}
+As mentioned in this section, \procs going idle for extremely short periods of time is likely in certain common scenarios.
+Therefore, the latency of doing a system call to read from and writing to the event fd can actually negatively affect overall performance in a notable way.
+Is it important to reduce latency and contention of the notification as much as possible.
+Figure~\ref{fig:idle1} shoes the basic idle sleep data structure.
+For the notifiers, this data structure can cause contention on the lock and the event fd syscall can cause notable latency.
+
+\begin{figure}
+	\centering
+	\input{idle2.pstex_t}
+	\caption[Improved Idle Sleep Data Structure]{Improved Idle Sleep Data Structure \smallskip\newline An atomic pointer is added to the list, pointing to the Event FD of the first \proc on the list.}
+	\label{fig:idle2}
+\end{figure}
+
+The contention is mostly due to the lock on the list needing to be held to get to the head \proc.
+That lock can be contended by \procs attempting to go to sleep, \procs waking or notification attempts.
+The contentention from the \procs attempting to go to sleep can be mitigated slightly by using \texttt{try\_acquire} instead, so the \procs simply continue searching for \ats if the lock is held.
+This trick cannot be used for waking \procs since they are not in a state where they can run \ats.
+However, it is worth nothing that notification does not strictly require accessing the list or the head \proc.
+Therefore, contention can be reduced notably by having notifiers avoid the lock entirely and adding a pointer to the event fd of the first idle \proc, as in Figure~\ref{fig:idle2}.
+To avoid contention between the notifiers, instead of simply reading the atomic pointer, notifiers atomically exchange it to \texttt{null} so only only notifier will contend on the system call.
+
+\begin{figure}
+	\centering
+	\input{idle_state.pstex_t}
+	\caption[Improved Idle Sleep Data Structure]{Improved Idle Sleep Data Structure \smallskip\newline An atomic pointer is added to the list, pointing to the Event FD of the first \proc on the list.}
+	\label{fig:idle:state}
+\end{figure}
+
+The next optimization that can be done is to avoid the latency of the event fd when possible.
+This can be done by adding what is effectively a benaphore\cit{benaphore} in front of the event fd.
+A simple three state flag is added beside the event fd to avoid unnecessary system calls, as shown in Figure~\ref{fig:idle:state}.
+The flag starts in state \texttt{SEARCH}, while the \proc is searching for \ats to run.
+The \proc then confirms the sleep by atomically swaping the state to \texttt{SLEEP}.
+If the previous state was still \texttt{SEARCH}, then the \proc does read the event fd.
+Meanwhile, notifiers atomically exchange the state to \texttt{AWAKE} state.
+if the previous state was \texttt{SLEEP}, then the notifier must write to the event fd.
+However, if the notify arrives almost immediately after the \proc marks itself idle, then both reads and writes on the event fd can be omitted, which reduces latency notably.
+This leads to the final data structure shown in Figure~\ref{fig:idle}.
+
+\begin{figure}
+	\centering
+	\input{idle.pstex_t}
+	\caption[Low-latency Idle Sleep Data Structure]{Low-latency Idle Sleep Data Structure \smallskip\newline Each idle \proc is put unto a doubly-linked stack protected by a lock.
+	Each \proc has a private event FD with a benaphore in front of it.
+	The list also has an atomic pointer to the event fd and benaphore of the first \proc on the list.}
+	\label{fig:idle}
+\end{figure}
Index: doc/theses/thierry_delisle_PhD/thesis/thesis.tex
===================================================================
--- doc/theses/thierry_delisle_PhD/thesis/thesis.tex	(revision bfd551277c04ecd8721e431ae84ab67dfad0fd2b)
+++ doc/theses/thierry_delisle_PhD/thesis/thesis.tex	(revision 622a3588907d3e5b451979e7c3b54926a5d59386)
@@ -82,4 +82,5 @@
 \usepackage{xcolor}
 \usepackage{graphicx} % For including graphics
+\usepackage{subcaption}
 
 % Hyperlinks make it very easy to navigate an electronic document.
@@ -204,4 +205,5 @@
 \newcommand\at{\gls{at}\xspace}%
 \newcommand\ats{\glspl{at}\xspace}%
+\newcommand\Proc{\Pls{proc}\xspace}%
 \newcommand\proc{\gls{proc}\xspace}%
 \newcommand\procs{\glspl{proc}\xspace}%
