| 1 | # Read thesis-append-pbv.csv
 | 
|---|
| 2 | # Output for string-graph-peq-sharing.dat
 | 
|---|
| 3 | 
 | 
|---|
| 4 | # Project details
 | 
|---|
| 5 | # Filter operation=peq
 | 
|---|
| 6 | # Split "series" goups of sut; only those in the "pretty" list
 | 
|---|
| 7 | # Assert one row per string-length
 | 
|---|
| 8 | # output:
 | 
|---|
| 9 | # string-len op-duration
 | 
|---|
| 10 | # in chunks, each headed by pertty(sut)
 | 
|---|
| 11 | 
 | 
|---|
| 12 | import pandas as pd
 | 
|---|
| 13 | import numpy as np
 | 
|---|
| 14 | import os
 | 
|---|
| 15 | import sys
 | 
|---|
| 16 | 
 | 
|---|
| 17 | sys.path.insert(0, os.path.dirname(__file__))
 | 
|---|
| 18 | from common import *
 | 
|---|
| 19 | 
 | 
|---|
| 20 | prettyFieldNames = {
 | 
|---|
| 21 |     "cfa-ll-share-fresh": "{/Helvetica=15 C{/Symbol \\42} +=} share fresh",
 | 
|---|
| 22 |     "cfa-ll-share-reuse": "{/Helvetica=15 C{/Symbol \\42} +=} share reuse",
 | 
|---|
| 23 |     "stl-na-na-fresh": "STL {/Helvetica=15 +=} fresh",
 | 
|---|
| 24 |     "stl-na-na-reuse": "STL {/Helvetica=15 +=} reuse",
 | 
|---|
| 25 | }
 | 
|---|
| 26 | 
 | 
|---|
| 27 | timings = loadParseTimingData('result-append-pbv.csv')
 | 
|---|
| 28 | 
 | 
|---|
| 29 | 
 | 
|---|
| 30 | # Filter operation=peq, corpus=100-*-*+*+t0
 | 
|---|
| 31 | 
 | 
|---|
| 32 | timings = timings.groupby('operation').get_group('peq')
 | 
|---|
| 33 | timings = timings.groupby('corpus-nstrs-tgt').get_group(100)
 | 
|---|
| 34 | timings = timings.groupby('corpus-offset-instr').get_group('t0')
 | 
|---|
| 35 | 
 | 
|---|
| 36 | 
 | 
|---|
| 37 | # Emit in groups
 | 
|---|
| 38 | 
 | 
|---|
| 39 | groupedSut = timings.groupby('sut')
 | 
|---|
| 40 | 
 | 
|---|
| 41 | for sut, sgroup in groupedSut:
 | 
|---|
| 42 | 
 | 
|---|
| 43 |     if sut in prettyFieldNames:
 | 
|---|
| 44 | 
 | 
|---|
| 45 |         sgroup_sorted = sgroup.sort_values(by='corpusMeanLenCharsAct')
 | 
|---|
| 46 | 
 | 
|---|
| 47 |         print('"{header}"'.format(header=prettyFieldNames[sut]))
 | 
|---|
| 48 |         text = sgroup_sorted[['corpusMeanLenCharsAct', 'op-duration-ns']].to_csv(header=False, index=False, sep='\t')
 | 
|---|
| 49 |         print(text)
 | 
|---|
| 50 |         print()
 | 
|---|