# Read thesis-append-pbv.csv
# Output for string-graph-pta-sharing.dat

# Project details
# Filter operation=peq
# Split "series" goups of sut; only those in the "pretty" list
# Assert one row per string-length
# output:
# string-len op-duration
# in chunks, each headed by pertty(sut)

import pandas as pd
import numpy as np
import os
import sys

sys.path.insert(0, os.path.dirname(__file__))
from common import *

prettyFieldNames = {
    "peq": {
        "cfa-ll-share-fresh": "{/Helvetica=15 C{/Symbol \\42} x += y} share fresh",
        "stl-na-na-fresh": "STL {/Helvetica=15 x += y} fresh",
    },
    "pta": {
        "cfa-ll-share-fresh": "{/Helvetica=15 C{/Symbol \\42} x = x + y} share fresh",
        "stl-na-na-fresh": "STL {/Helvetica=15  x = x + y} fresh",
    }
}

timings = loadParseTimingData('result-append-pbv.csv')


# Filter corpus=100-*-1, corpus=100-*-*+*+t0

timings = timings.groupby('corpus-nstrs-tgt').get_group(100)
timings = timings.groupby('corpus-offset-instr').get_group('t0')

# Emit in groups

groupedSut = timings.groupby('sut')

for sut, sgroup in groupedSut:
    groupedOp = sgroup.groupby('operation')
    for op,opPretty in prettyFieldNames.items():

        if op in groupedOp.groups:
            tgtOpTimings = groupedOp.get_group(op)

            if sut in opPretty:

                sgroup_sorted = tgtOpTimings.sort_values(by='corpusMeanLenCharsAct')

                print('"{header}"'.format(header=opPretty[sut]))
                text = sgroup_sorted[['corpusMeanLenCharsAct', 'op-duration-ns']].to_csv(header=False, index=False, sep='\t')
                print(text)
                print()
