source: doc/theses/mike_brooks_MMath/benchmarks/list/detail-plots.py

Last change on this file was bb5b866, checked in by Michael Brooks <mlbrooks@…>, 5 months ago

LL perf: adjust configurations

Simplify doing a manual one-off run. Disable harness's iterators on "zoomout" plots. Remove cfa-strip and add lq-list for "zoomout" plots. Add more exploratory analyses.

  • Property mode set to 100644
File size: 3.4 KB
Line 
1# Read csv given on cmdline, e.g results-sizing-c.csv
2# In each op dimension (movement, polarity, accessor)
3# that has several values showing up in the input
4# extend it with an 'all' member.
5# Each resulting op combination defines an output file, so named, e.g.
6# results-sizing-c-stack-insfirst-allhead.dat
7# results-sizing-c-queue-insfirst-allhead.dat
8# results-sizing-c-all-insfirst-allhead.dat
9# For each output file
10# considering the subset of the input data that qualifies,
11# proceed as in crunch1, i.e. [following steps], putting the output in that file
12# Split "series" goups of fx
13# Group by all remaining classifiers except "repeat number"
14# output:
15# x y-mean y-stdev y-min y-max
16# where x is size, y is duration
17# in chunks, each headed by fx
18
19import pandas as pd
20import numpy as np
21import sys
22import os
23from contextlib import redirect_stdout
24
25plotsdir = os.path.dirname(__file__) + "/../../plots"
26sys.path.insert(0, plotsdir)
27from ListCommon import *
28
29infile = sys.argv[1]
30
31outdir = 'detail-plots'
32if (len(sys.argv) >= 3 ):
33 outdir = sys.argv[2]
34os.makedirs(outdir, exist_ok=True)
35
36timings = getDataset( infile )
37
38## inventory the op dimensions
39
40movements = timings['movement'].unique()
41polarities = timings['polarity'].unique()
42accessors = timings['accessor'].unique()
43interleaves = timings['InterleaveFrac'].unique()
44
45if movements.size > 1:
46 movements = np.append(movements, 'all')
47if polarities.size > 1:
48 polarities = np.append(polarities, 'all')
49if accessors.size > 1:
50 accessors = np.append(accessors, 'all')
51# if interleaves.size > 1:
52# interleaves = np.append(interleaves, 'all')
53
54# print(movements)
55# print(polarities)
56# print(accessors)
57# print(interleaves)
58
59ops = np.stack(np.meshgrid(movements, polarities, accessors, interleaves), -1).reshape(-1, 4)
60# print(ops)
61
62for [movement, polarity, accessor, interleave] in ops: # output-file grain
63
64 tgtOp = '{}-{}-{}-{}'.format(movement, polarity, accessor, interleave)
65 outfile = '{}/{}-{}.dat'.format(outdir, infile[:-4], tgtOp)
66 # print()
67 print ("=== ", outfile, " ===")
68 # print()
69
70 ## re-shape
71
72 timingsFiltered = timings
73
74 if (movement != 'all'):
75 grp = timingsFiltered.groupby('movement')
76 timingsFiltered = grp.get_group(movement)
77 if (polarity != 'all'):
78 grp = timingsFiltered.groupby('polarity')
79 timingsFiltered = grp.get_group(polarity)
80 if (accessor != 'all'):
81 grp = timingsFiltered.groupby('accessor')
82 timingsFiltered = grp.get_group(accessor)
83 if (interleave != 'all'):
84 timingsFiltered = timingsFiltered[ timingsFiltered['InterleaveFrac'] == float(interleave) ]
85
86 rows = timingsFiltered.shape[0]
87 if rows == 0:
88 print("skip")
89 else:
90 print("got", rows)
91
92 with open(outfile, 'w') as f:
93 with redirect_stdout(f):
94
95 groupedFx = timingsFiltered.groupby('fx')
96
97 for fx, fgroup in groupedFx:
98 # print(fgroup.head())
99 groupedRun = fgroup.groupby(['NumNodes']) # , 'fx', 'op'
100 aggregated = groupedRun['mean_op_dur_ns'].agg(["mean", "std", "min", "max", "count"])
101 #print(aggregated.head())
102
103 print('"{header}"'.format(header=fx))
104 text = aggregated.to_csv(header=False, index=True, sep='\t')
105 print(text)
106 print()
107 print()
Note: See TracBrowser for help on using the repository browser.