| 1 | # Read csv given on cmdline, e.g results-sizing-c.csv
|
|---|
| 2 | # In each op dimension (movement, polarity, accessor)
|
|---|
| 3 | # that has several values showing up in the input
|
|---|
| 4 | # extend it with an 'all' member.
|
|---|
| 5 | # Each resulting op combination defines an output file, so named, e.g.
|
|---|
| 6 | # results-sizing-c-stack-insfirst-allhead.dat
|
|---|
| 7 | # results-sizing-c-queue-insfirst-allhead.dat
|
|---|
| 8 | # results-sizing-c-all-insfirst-allhead.dat
|
|---|
| 9 | # For each output file
|
|---|
| 10 | # considering the subset of the input data that qualifies,
|
|---|
| 11 | # proceed as in crunch1, i.e. [following steps], putting the output in that file
|
|---|
| 12 | # Split "series" goups of fx
|
|---|
| 13 | # Group by all remaining classifiers except "repeat number"
|
|---|
| 14 | # output:
|
|---|
| 15 | # x y-mean y-stdev y-min y-max
|
|---|
| 16 | # where x is size, y is duration
|
|---|
| 17 | # in chunks, each headed by fx
|
|---|
| 18 |
|
|---|
| 19 | import pandas as pd
|
|---|
| 20 | import numpy as np
|
|---|
| 21 | import sys
|
|---|
| 22 | import os
|
|---|
| 23 | from contextlib import redirect_stdout
|
|---|
| 24 |
|
|---|
| 25 | plotsdir = os.path.dirname(__file__) + "/../../plots"
|
|---|
| 26 | sys.path.insert(0, plotsdir)
|
|---|
| 27 | from ListCommon import *
|
|---|
| 28 |
|
|---|
| 29 | infile = sys.argv[1]
|
|---|
| 30 |
|
|---|
| 31 | outdir = 'detail-plots'
|
|---|
| 32 | if (len(sys.argv) >= 3 ):
|
|---|
| 33 | outdir = sys.argv[2]
|
|---|
| 34 | os.makedirs(outdir, exist_ok=True)
|
|---|
| 35 |
|
|---|
| 36 | timings = getDataset( infile )
|
|---|
| 37 |
|
|---|
| 38 | ## inventory the op dimensions
|
|---|
| 39 |
|
|---|
| 40 | movements = timings['movement'].unique()
|
|---|
| 41 | polarities = timings['polarity'].unique()
|
|---|
| 42 | accessors = timings['accessor'].unique()
|
|---|
| 43 | interleaves = timings['InterleaveFrac'].unique()
|
|---|
| 44 |
|
|---|
| 45 | if movements.size > 1:
|
|---|
| 46 | movements = np.append(movements, 'all')
|
|---|
| 47 | if polarities.size > 1:
|
|---|
| 48 | polarities = np.append(polarities, 'all')
|
|---|
| 49 | if accessors.size > 1:
|
|---|
| 50 | accessors = np.append(accessors, 'all')
|
|---|
| 51 | # if interleaves.size > 1:
|
|---|
| 52 | # interleaves = np.append(interleaves, 'all')
|
|---|
| 53 |
|
|---|
| 54 | # print(movements)
|
|---|
| 55 | # print(polarities)
|
|---|
| 56 | # print(accessors)
|
|---|
| 57 | # print(interleaves)
|
|---|
| 58 |
|
|---|
| 59 | ops = np.stack(np.meshgrid(movements, polarities, accessors, interleaves), -1).reshape(-1, 4)
|
|---|
| 60 | # print(ops)
|
|---|
| 61 |
|
|---|
| 62 | for [movement, polarity, accessor, interleave] in ops: # output-file grain
|
|---|
| 63 |
|
|---|
| 64 | tgtOp = '{}-{}-{}-{}'.format(movement, polarity, accessor, interleave)
|
|---|
| 65 | outfile = '{}/{}-{}.dat'.format(outdir, infile[:-4], tgtOp)
|
|---|
| 66 | # print()
|
|---|
| 67 | print ("=== ", outfile, " ===")
|
|---|
| 68 | # print()
|
|---|
| 69 |
|
|---|
| 70 | ## re-shape
|
|---|
| 71 |
|
|---|
| 72 | timingsFiltered = timings
|
|---|
| 73 |
|
|---|
| 74 | if (movement != 'all'):
|
|---|
| 75 | grp = timingsFiltered.groupby('movement')
|
|---|
| 76 | timingsFiltered = grp.get_group(movement)
|
|---|
| 77 | if (polarity != 'all'):
|
|---|
| 78 | grp = timingsFiltered.groupby('polarity')
|
|---|
| 79 | timingsFiltered = grp.get_group(polarity)
|
|---|
| 80 | if (accessor != 'all'):
|
|---|
| 81 | grp = timingsFiltered.groupby('accessor')
|
|---|
| 82 | timingsFiltered = grp.get_group(accessor)
|
|---|
| 83 | if (interleave != 'all'):
|
|---|
| 84 | timingsFiltered = timingsFiltered[ timingsFiltered['InterleaveFrac'] == float(interleave) ]
|
|---|
| 85 |
|
|---|
| 86 | rows = timingsFiltered.shape[0]
|
|---|
| 87 | if rows == 0:
|
|---|
| 88 | print("skip")
|
|---|
| 89 | else:
|
|---|
| 90 | print("got", rows)
|
|---|
| 91 |
|
|---|
| 92 | with open(outfile, 'w') as f:
|
|---|
| 93 | with redirect_stdout(f):
|
|---|
| 94 |
|
|---|
| 95 | groupedFx = timingsFiltered.groupby('fx')
|
|---|
| 96 |
|
|---|
| 97 | for fx, fgroup in groupedFx:
|
|---|
| 98 | # print(fgroup.head())
|
|---|
| 99 | groupedRun = fgroup.groupby(['NumNodes']) # , 'fx', 'op'
|
|---|
| 100 | aggregated = groupedRun['mean_op_dur_ns'].agg(["mean", "std", "min", "max", "count"])
|
|---|
| 101 | #print(aggregated.head())
|
|---|
| 102 |
|
|---|
| 103 | print('"{header}"'.format(header=fx))
|
|---|
| 104 | text = aggregated.to_csv(header=False, index=True, sep='\t')
|
|---|
| 105 | print(text)
|
|---|
| 106 | print()
|
|---|
| 107 | print()
|
|---|