Index: benchmark/plot.py
===================================================================
--- benchmark/plot.py	(revision ed49dbd8bfe367b6f266f0906501f3ebe5951dd0)
+++ benchmark/plot.py	(revision e9c5db2e1f3fc3a42c60d50ab5f6d54ca846005d)
@@ -22,21 +22,28 @@
 
 class Field:
-	def __init__(self, unit, _min):
+	def __init__(self, unit, _min, _log):
 		self.unit = unit
 		self.min  = _min
+		self.log  = _log
 
 field_names = {
-	"ns per ops"           : Field('ns'    , 0),
-	"Number of processors" : Field(''      , 1),
-	"Ops per procs"        : Field('Ops'   , 0),
-	"Ops per threads"      : Field('Ops'   , 0),
-	"ns per ops/procs"     : Field('ns'    , 0),
-	"Number of threads"    : Field('thrd'  , 1),
-	"Total Operations(ops)": Field('Ops'   , 0),
-	"Ops/sec/procs"        : Field('Ops'   , 0),
-	"Total blocks"         : Field('Blocks', 0),
-	"Ops per second"       : Field('Ops'   , 0),
-	"Cycle size (# thrds)" : Field('thrd'  , 1),
-	"Duration (ms)"        : Field('ms'    , 0),
+	"ns per ops"            : Field('ns'    , 0, False),
+	"Number of processors"  : Field(''      , 1, False),
+	"Ops per procs"         : Field('Ops'   , 0, False),
+	"Ops per threads"       : Field('Ops'   , 0, False),
+	"ns per ops/procs"      : Field('ns'    , 0, False),
+	"Number of threads"     : Field('thrd'  , 1, False),
+	"Total Operations(ops)" : Field('Ops'   , 0, False),
+	"Ops/sec/procs"         : Field('Ops'   , 0, False),
+	"Total blocks"          : Field('Blocks', 0, False),
+	"Ops per second"        : Field('Ops'   , 0, False),
+	"Cycle size (# thrds)"  : Field('thrd'  , 1, False),
+	"Duration (ms)"         : Field('ms'    , 0, False),
+	"Target QPS"            : Field('QPS'   , 0, False),
+	"Actual QPS"            : Field('QPS'   , 0, False),
+	"Median Read Latency"   : Field('us'    , 0, True),
+	"Tail Read Latency"     : Field('us'    , 0, True),
+	"Median Update Latency" : Field('us'    , 0, True),
+	"Tail Update Latency"   : Field('us'    , 0, True),
 }
 
@@ -46,4 +53,7 @@
 	series = {} # scatter data for each individual data point
 	groups = {} # data points for x value
+
+	print("Preparing Data")
+
 	for entry in in_data:
 		name = entry[0]
@@ -65,4 +75,6 @@
 			groups[name][xval].append(yval)
 
+	print("Preparing Lines")
+
 	lines = {} # lines from groups with min, max, median, etc.
 	for name, data in groups.items():
@@ -78,4 +90,6 @@
 			lines[name]['avg'].append(statistics.mean(ys))
 
+	print("Making Plots")
+
 	for name, data in series.items():
 		_col = next(colors)
@@ -85,16 +99,30 @@
 		plt.plot(lines[name]['x'], lines[name]['med'], '-', color=_col)
 
+	print("Calculating Extremums")
+
 	mx = max([max(s['x']) for s in series.values()])
 	my = max([max(s['y']) for s in series.values()])
 
+	print("Finishing Plots")
+
 	plt.ylabel(y)
-	plt.xlim(field_names[x].min, mx + 0.25)
-	plt.xticks(range(1, math.ceil(mx) + 1))
+	# plt.xticks(range(1, math.ceil(mx) + 1))
 	plt.xlabel(x)
-	plt.ylim(field_names[y].min, my*1.2)
 	plt.grid(b = True)
 	ax.xaxis.set_major_formatter( EngFormatter(unit=field_names[x].unit) )
+	if field_names[x].log:
+		ax.set_xscale('log')
+	else:
+		plt.xlim(field_names[x].min, mx + 0.25)
+
 	ax.yaxis.set_major_formatter( EngFormatter(unit=field_names[y].unit) )
+	if field_names[y].log:
+		ax.set_yscale('log')
+	else:
+		plt.ylim(field_names[y].min, my*1.2)
+
 	plt.legend(loc='upper left')
+
+	print("Results Ready")
 	if out:
 		plt.savefig(out)
@@ -106,15 +134,11 @@
 	# ================================================================================
 	# parse command line arguments
-	parser = parser = argparse.ArgumentParser(description='Python Script to draw R.M.I.T. results')
-	parser.add_argument('-f', '--file', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
-	parser.add_argument('-o', '--out', nargs='?', type=str, default=None)
-	parser.add_argument('-y', nargs='?', type=str, default="")
+	parser = argparse.ArgumentParser(description='Python Script to draw R.M.I.T. results')
+	parser.add_argument('-f', '--file', nargs='?', type=argparse.FileType('r'), default=sys.stdin, help="Input file")
+	parser.add_argument('-o', '--out', nargs='?', type=str, default=None, help="Output file")
+	parser.add_argument('-y', nargs='?', type=str, default="", help="Which field to use as the Y axis")
+	parser.add_argument('-x', nargs='?', type=str, default="", help="Which field to use as the X axis")
 
-	try:
-		options =  parser.parse_args()
-	except:
-		print('ERROR: invalid arguments', file=sys.stderr)
-		parser.print_help(sys.stderr)
-		sys.exit(1)
+	options =  parser.parse_args()
 
 	# ================================================================================
@@ -140,12 +164,21 @@
 	if not options.out :
 		print(series)
-		print("fields")
-		for f in fields:
-			print("{}".format(f))
+		print("fields: ", ' '.join(fields))
 
-	if options.y and options.y in field_names.keys():
-		plot(data, "Number of processors", options.y, options.out)
-	else:
-		if options.y:
-			print("Could not find key '{}', defaulting to 'ns per ops'".format(options.y))
-		plot(data, "Number of processors", "ns per ops", options.out)
+	wantx = "Number of processors"
+	wanty = "ns per ops"
+
+	if options.x:
+		if options.x in field_names.keys():
+			wantx = options.x
+		else:
+			print("Could not find X key '{}', defaulting to '{}'".format(options.x, wantx))
+
+	if options.y:
+		if options.y in field_names.keys():
+			wanty = options.y
+		else:
+			print("Could not find Y key '{}', defaulting to '{}'".format(options.y, wanty))
+
+
+	plot(data, wantx, wanty, options.out)
Index: benchmark/process-mutilate.py
===================================================================
--- benchmark/process-mutilate.py	(revision e9c5db2e1f3fc3a42c60d50ab5f6d54ca846005d)
+++ benchmark/process-mutilate.py	(revision e9c5db2e1f3fc3a42c60d50ab5f6d54ca846005d)
@@ -0,0 +1,124 @@
+#!/usr/bin/python3
+"""
+Python Script to convert output from mutilate to rmit like output
+"""
+import argparse
+import json
+import locale
+import os
+import re
+import sys
+
+locale.setlocale( locale.LC_ALL, 'en_US.UTF-8' )
+
+parser = argparse.ArgumentParser(description='Python Script to convert output from mutilate to rmit like output')
+parser.add_argument('--out', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
+try:
+	options =  parser.parse_args()
+except:
+	print('ERROR: invalid arguments', file=sys.stderr)
+	parser.print_help(sys.stderr)
+	sys.exit(1)
+
+thisdir = os.getcwd()
+dirs = os.listdir( thisdir )
+
+names = ['fibre', 'forall', 'vanilla']
+names_re = '|'.join(names)
+
+def precentile(line):
+	fields = line.split()
+
+	try:
+		lat50s = fields[6]
+		lat99s = fields[9]
+	except:
+		raise Warning("Warning: \"{}\"! insufficient fields".format(line))
+
+	try:
+		lat50 = locale.atof(lat50s)
+		lat99 = locale.atof(lat99s)
+	except:
+		raise Warning("Warning: \"{}\" \"{}\"! can't convert to float".format(lat50s, lat99s))
+
+	return lat50, lat99
+
+def want0(line):
+	line = line.strip()
+	if not line.endswith("= 0 (0.0%)"):
+		raise Warning("Warning: \"{}\"! should be 0".format(line))
+
+def extract(filename, out):
+	with open(filename, "r") as file:
+		lines = file.readlines()
+
+	warns = []
+
+	for line in lines:
+		try:
+			if   line.startswith("read"):
+				rlat50, rlat99 = precentile(line)
+
+			elif line.startswith("update"):
+				ulat50, ulat99 = precentile(line)
+
+			elif line.startswith("Total QPS"):
+				match = re.search("Total QPS = ([0-9,\.]+)", line)
+				if match:
+					try:
+						qps = locale.atof(match[1])
+					except:
+						raise Warning("Warning: \"{}\" can't convert qps to float".format(match[1]))
+				else:
+					raise Warning("Warning: \"{}\" line unreadable".format(line))
+
+			if line.startswith("Misses") or line.startswith("Skipped TXs"):
+				want0(line)
+		except Warning as w:
+			warns.append(str(w))
+
+	try:
+		out['Actual QPS'] = qps
+	except:
+		warns.append("Warning: No total QPS")
+
+	try:
+		out['Median Read Latency'] = rlat50
+		out['Tail Read Latency'] = rlat99
+	except:
+		warns.append("Warning: no read latencies")
+
+	try:
+		out['Median Update Latency'] = ulat50
+		out['Tail Update Latency'] = ulat99
+	except:
+		warns.append("Warning: no update latencies")
+
+	return warns
+
+
+data = []
+
+for filename in dirs:
+	f = os.path.join( thisdir, filename )
+	# checking if it is a file
+	if os.path.isfile(f):
+		match = re.search("({})\.([0-9]+)\.([0-9]+)".format(names_re), filename)
+		try:
+			series = match[1]
+			rate = match[2]
+			rep = match[3]
+		except:
+			continue
+
+		d = { 'Target QPS': int(rate) }
+
+		w = extract( f, d )
+
+		data.append([series, "memcached {}".format(series), d])
+		if w:
+			print("{} {} {}\n{}\n".format(series, rate, rep, '\n'.join(w)))
+
+options.out.write(json.dumps(data))
+options.out.flush()
+options.out.write("\n")
Index: benchmark/rmit.py
===================================================================
--- benchmark/rmit.py	(revision ed49dbd8bfe367b6f266f0906501f3ebe5951dd0)
+++ benchmark/rmit.py	(revision e9c5db2e1f3fc3a42c60d50ab5f6d54ca846005d)
@@ -281,3 +281,3 @@
 
 	if options.file != sys.stdout:
-		print("Done                                                                                ")
+		print("Done");                                                                                ")
