# Read thesis-append-pbv.csv
# Output for string-graph-peq-sharing.dat

# Project details
# Filter operation=peq
# Split "series" goups of sut; only those in the "pretty" list
# Assert one row per string-length
# output:
# string-len op-duration
# in chunks, each headed by pertty(sut)

import pandas as pd
import numpy as np
import os
import sys

sys.path.insert(0, os.path.dirname(__file__))
from common import *

sizes_i_want = [50, 200] # [20, 50, 100, 200]

# assume CFA threshold only run at default value

cfatimings = loadParseTimingData('result-allocate-speed-cfa.csv',
                xClasNames=['expansion'], xClasDtypes={'expansion':'Float64'},
                xFactNames=['topIters'], xFactDtypes={'topIters':np.int64})

cfaattribs = loadParseAttribData('result-allocate-attrib-cfa.ssv')

stltimings = loadParseTimingData('result-allocate-speed-stl.csv',
                xClasNames=['expansion'], xClasDtypes={'expansion':'Float64'},
                xFactNames=['topIters'], xFactDtypes={'topIters':np.int64})

stlattribs = loadParseAttribData('result-allocate-attrib-stl.ssv')

timings = pd.concat([cfatimings, stltimings])
attribs = pd.concat([cfaattribs, stlattribs])

combined = pd.merge(
    left=timings[['sut-platform', 'corpus-meanlen','expansion', 'op-duration-ns']],
    right=attribs[['sut-platform', 'corpus-meanlen','expansion', 'category', 'fraction']],
    on=['sut-platform', 'corpus-meanlen','expansion']
)

combined['cat-duration-ns'] = combined['op-duration-ns'] * combined['fraction']
combined.drop(columns=['expansion', 'op-duration-ns', 'fraction'], inplace=True)

pvt = combined.pivot( columns='category', values='cat-duration-ns', index=['corpus-meanlen', 'sut-platform'] )

desired_dcol_order = ["ctor-dtor", "gc", "malloc-free", "text-import", "harness-leaf", "other"]
pvt = pvt[desired_dcol_order]

filtered = pvt.loc[pvt.index.get_level_values('corpus-meanlen').isin(sizes_i_want)]

print(filtered.to_csv(header=True, index=True, sep='\t', na_rep="0"))

