Index: benchmark/plot.py
===================================================================
--- benchmark/plot.py	(revision ce1d72168b2e1590d531c1c3b7cbc9f32c4033e7)
+++ benchmark/plot.py	(revision c0458be3eeb2ce674325617ec1c812bb8e6af257)
@@ -14,4 +14,5 @@
 import math
 import numpy
+import os
 import re
 import statistics
@@ -22,9 +23,10 @@
 
 class Field:
-	def __init__(self, unit, _min, _log, _name=None):
+	def __init__(self, unit, _min, _log, _name=None, _factor=1.0):
 		self.unit = unit
 		self.min  = _min
 		self.log  = _log
 		self.name = _name
+		self.factor = _factor
 
 field_names = {
@@ -33,26 +35,28 @@
 	"Ops per procs"         : Field('Ops'   , 0, False),
 	"Ops per threads"       : Field('Ops'   , 0, False),
-	"ns per ops/procs"      : Field(''    , 0, False, _name = "Latency (ns $/$ (Processor $\\times$ Operation))" ),
+	"ns per ops/procs"      : Field(''      , 0, False, _name = "Latency (ns $/$ (Processor $\\times$ Operation))" ),
 	"Number of threads"     : Field(''      , 1, False),
 	"Total Operations(ops)" : Field('Ops'   , 0, False),
 	"Ops/sec/procs"         : Field('Ops'   , 0, False),
 	"Total blocks"          : Field('Blocks', 0, False),
-	"Ops per second"        : Field(''   , 0, False),
+	"Ops per second"        : Field(''      , 0, False),
 	"Cycle size (# thrds)"  : Field('thrd'  , 1, False),
 	"Duration (ms)"         : Field('ms'    , 0, False),
 	"Target QPS"            : Field(''      , 0, False),
 	"Actual QPS"            : Field(''      , 0, False),
-	"Average Read Latency"  : Field('us'    , 0, True),
-	"Median Read Latency"   : Field('us'    , 0, True),
-	"Tail Read Latency"     : Field('us'    , 0, True),
-	"Average Update Latency": Field('us'    , 0, True),
-	"Median Update Latency" : Field('us'    , 0, True),
-	"Tail Update Latency"   : Field('us'    , 0, True),
+	"Average Read Latency"  : Field('s'     , 0, True, _factor = 0.000001),
+	"Median Read Latency"   : Field('s'     , 0, True, _factor = 0.000001),
+	"Tail Read Latency"     : Field('s'     , 0, True, _factor = 0.000001),
+	"Average Update Latency": Field('s'     , 0, True, _factor = 0.000001),
+	"Median Update Latency" : Field('s'     , 0, True, _factor = 0.000001),
+	"Tail Update Latency"   : Field('s'     , 0, True, _factor = 0.000001),
 	"Update Ratio"          : Field('\%'    , 0, False),
+	"Request Rate"          : Field('req/s' , 0, False),
+	"Data Rate"             : Field('b/s'   , 0, False, _factor = 1000 * 1000, _name = "Response Throughput"),
 }
 
-def plot(in_data, x, y, options):
+def plot(in_data, x, y, options, prefix):
 	fig, ax = plt.subplots()
-	colors = itertools.cycle(['#0095e3','#006cb4','#69df00','#0aa000','#fb0300','#e30002','#fd8f00','#ff7f00','#8f00d6','#4b009a','#ffff00','#b13f00'])
+	colors = itertools.cycle(['#006cb4','#0aa000','#ff6600','#8510a1','#0095e3','#fd8f00','#e30002','#8f00d6','#4b009a','#ffff00','#69df00','#fb0300','#b13f00'])
 	series = {} # scatter data for each individual data point
 	groups = {} # data points for x value
@@ -70,5 +74,5 @@
 		if x in entry[2] and y in entry[2]:
 			xval = entry[2][x]
-			yval = entry[2][y]
+			yval = entry[2][y] * field_names[y].factor
 			series[name]['x'].append(xval)
 			series[name]['y'].append(yval)
@@ -98,5 +102,5 @@
 	for name, data in sorted(series.items()):
 		_col = next(colors)
-		plt.scatter(data['x'], data['y'], color=_col, label=name, marker='x')
+		plt.scatter(data['x'], data['y'], color=_col, label=name[len(prefix):], marker='x')
 		plt.plot(lines[name]['x'], lines[name]['min'], '--', color=_col)
 		plt.plot(lines[name]['x'], lines[name]['max'], '--', color=_col)
@@ -122,5 +126,4 @@
 		plt.xlim(field_names[x].min, mx + 0.25)
 
-	ax.yaxis.set_major_formatter( EngFormatter(unit=field_names[y].unit) )
 	if options.logy:
 		ax.set_yscale('log')
@@ -129,4 +132,6 @@
 	else:
 		plt.ylim(field_names[y].min, options.MaxY if options.MaxY else my*1.2)
+
+	ax.yaxis.set_major_formatter( EngFormatter(unit=field_names[y].unit) )
 
 	plt.legend(loc='upper left')
@@ -173,4 +178,7 @@
 			fields.add(label)
 
+	# find the common prefix on series for removal
+	prefix = os.path.commonprefix(list(series))
+
 	if not options.out :
 		print(series)
@@ -193,3 +201,3 @@
 
 
-	plot(data, wantx, wanty, options)
+	plot(data, wantx, wanty, options, prefix)
Index: benchmark/process-trun.py
===================================================================
--- benchmark/process-trun.py	(revision c0458be3eeb2ce674325617ec1c812bb8e6af257)
+++ benchmark/process-trun.py	(revision c0458be3eeb2ce674325617ec1c812bb8e6af257)
@@ -0,0 +1,111 @@
+#!/usr/bin/python3
+
+"""
+Python Script to convert output from trun to rmit like output
+"""
+import argparse
+import json
+import locale
+import os
+import re
+import sys
+
+locale.setlocale( locale.LC_ALL, 'en_US.UTF-8' )
+
+parser = argparse.ArgumentParser(description='Python Script to convert output from trun to rmit like output')
+parser.add_argument('--out', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
+parser.add_argument('files', nargs='+')
+options =  parser.parse_args()
+
+def extract(file):
+	raw = []
+	with open(file, "r") as f:
+		for line in f:
+			if line.startswith("Combined"):
+				break
+
+		h = next(f)
+		next(f)
+
+		columns = h.strip().split()
+		if not len(columns) == 20:
+			print("{}: Failed to split header row '{}', unexpected number of columns.".format(file, h), file=sys.stderr)
+			raise ValueError
+
+		for line in f:
+			line = line.strip()
+			if not line:
+				break
+
+			raw.append(line)
+
+	results = []
+	for l in raw:
+		vals = l.split()
+		if not len(vals) == 21:
+			print("{}: Failed to split result row '{}', unexpected number of columns ({}).".format(file, l, len(vals)), file=sys.stderr)
+			raise ValueError
+		d = {
+			'Request Rate': float(vals[0]),
+			'MinReplies':   float(vals[1]),
+			'MeanReplies':  float(vals[2]),
+			'MaxReplies':   float(vals[3]),
+			'StdReplies':   float(vals[4]),
+			'Conn':         float(vals[5]),
+			'Resp':         float(vals[6]),
+			'Xfer':         float(vals[7]),
+			'Total':        float(vals[8]),
+			'Std':          float(vals[9]),
+			'Med':          float(vals[10]),
+			'Min':          float(vals[11]),
+			'Max':          float(vals[12]),
+			'Data Rate':    float(vals[13]),
+			'Errors':       float(vals[14]),
+			'Reqs':         float(vals[15]),
+			'ActRateC':     float(vals[16]),
+			'ActRate':      float(vals[17]),
+			'ErrRate':      float(vals[18]),
+			'SamplesT':     float(vals[19]),
+			'SamplesR':     float(vals[20])
+		}
+		results.append(d)
+
+	return results
+
+
+
+
+data = []
+
+print(options.files)
+
+for file in options.files:
+	# checking if it is a file
+	if os.path.isfile(file):
+		filename = os.path.basename(file)
+		match = re.search("swbsrv\.([0-9]+)gb\.(cfa|nginx)", filename)
+		try:
+			series = match[2]
+			memory = match[1]
+		except:
+			print("Can't parse filename '{}' from File '{}'.".format(filename, file), file=sys.stderr)
+			continue
+
+		try:
+			results = extract(file)
+		except OSError:
+			print("Cannot open File '{}'.".format(file), file=sys.stderr)
+			continue
+		except ValueError:
+			continue
+
+		for result in results:
+			d = [series, memory, result]
+			data.append(d)
+
+	else:
+		print("File '{}' does not exist.".format(file), file=sys.stderr)
+
+options.out.write(json.dumps(data))
+options.out.flush()
+options.out.write("\n")
