source: doc/theses/mike_brooks_MMath/plots/string-allocn.py@ 6c58850

Last change on this file since 6c58850 was e0350e0, checked in by Michael Brooks <mlbrooks@…>, 4 months ago

Recent rework of string benchmarks

  • Property mode set to 100644
File size: 4.6 KB
Line 
1# Read thesis-append-pbv.csv
2# Output for string-graph-peq-sharing.dat
3
4# Project details
5# Filter operation=peq
6# Split "series" goups of sut; only those in the "pretty" list
7# Assert one row per string-length
8# output:
9# string-len op-duration
10# in chunks, each headed by pertty(sut)
11
12import pandas as pd
13import numpy as np
14import os
15import sys
16
17sys.path.insert(0, os.path.dirname(__file__))
18from common import *
19
20# re: apparent cherrypicking
21# The system's response to the liveness threshold is not smooth.
22# The system only uses the threshold to decide whether it will double the text heap again or not.
23# The system's speed for a given string size in a given amount of memory is not affected by the specific value of the liveness threshold.
24# Goals with this selection are
25# - showing one speed result per <string size, memory usage amount>
26# - cropping diminishing or negative returns for large memory sizes
27# - diminishing is obvious, already shown past chosen sweet spot in this selection
28# - negative caused by overflowing llc, not relevant to sting impl
29favSizes = {20:[-1.0, 0.05, 0.1, 0.2, 0.5, 0.9],
30 50:[-1.0, 0.05, 0.1, 0.2, 0.5, 0.9],
31 100:[-1.0, 0.1, 0.2, 0.5, 0.9],
32 200:[-1.0, 0.1, 0.2, 0.5, 0.9],
33 500:[-1.0, 0.2, 0.4, 0.9, 0.98]}
34
35defaultExpansions = [-1, 0.2]
36
37cfatimings = loadParseTimingData('result-allocate-speed-cfa.csv',
38 xClasNames=['expansion'], xClasDtypes={'expansion':'Float64'},
39 xFactNames=['topIters'], xFactDtypes={'topIters':np.int64})
40
41cfasizings = loadParseSizingData('result-allocate-space-cfa.ssv', xClasNames=['expansion'], xClasDtypes={'expansion':'Float64'})
42
43stltimings = loadParseTimingData('result-allocate-speed-stl.csv',
44 xClasNames=['expansion'], xClasDtypes={'expansion':'Float64'},
45 xFactNames=['topIters'], xFactDtypes={'topIters':np.int64})
46
47stlsizings = loadParseSizingData('result-allocate-space-stl.ssv', xClasNames=['expansion'], xClasDtypes={'expansion':'Float64'})
48
49timings = pd.concat([cfatimings, stltimings])
50sizings = pd.concat([cfasizings, stlsizings])
51
52combined = pd.merge(
53 left=timings,
54 right=sizings[['sut', 'corpus','expansion','hw_cur_req_mem(B)']],
55 on=['sut', 'corpus','expansion']
56)
57
58combined = combined.pivot_table( values=['op-duration-ns','hw_cur_req_mem(B)'], index=['corpus-meanlen-tgt', 'sut-platform', 'expansion'], aggfunc=['mean', 'min', 'max'] )
59combined = combined.reset_index()
60combined.columns = combined.columns.to_flat_index()
61
62# text = combined.to_csv(header=True, index=True, sep='\t')
63# print(text)
64
65
66combined['is-default'] = np.isin(combined[('expansion','')], defaultExpansions).astype(int)
67
68
69
70# print ('!!')
71# print(combined)
72
73
74# Emit
75
76# First, for the CFA curves
77sut = "cfa"
78sutGroup = combined.groupby(('sut-platform','')).get_group(sut)
79
80groupedSize = sutGroup.groupby(('corpus-meanlen-tgt',''))
81
82for sz, szgroup in groupedSize:
83
84 if sz in favSizes.keys():
85 szgroup_sorted = szgroup.sort_values(by=('expansion',''))
86
87 print('"{sut}, len={len}"'.format(sut=sut, len=sz))
88 # print(szgroup_sorted) ##
89 # print(szgroup_sorted['expansion'], 'isin', favSizes[sz]) ##
90 favoured = szgroup_sorted.loc[szgroup_sorted[('expansion','')].isin(favSizes[sz])]
91 # print('!') ##
92 # print(favoured) ##
93 text = favoured[[('expansion',''),
94 ('mean','op-duration-ns'),
95 ('min','op-duration-ns'),
96 ('max','op-duration-ns'),
97 ('mean', 'hw_cur_req_mem(B)'),
98 ('min', 'hw_cur_req_mem(B)'),
99 ('max', 'hw_cur_req_mem(B)'),
100 'is-default']].to_csv(header=False, index=False, sep='\t')
101 print(text)
102 print()
103
104# Again, for the STL-comparisons, default expansion only
105
106atDefaults = combined.groupby('is-default').get_group(1)
107
108for sz, szgroup in atDefaults.groupby(('corpus-meanlen-tgt','')):
109
110 if sz in favSizes.keys():
111 print(sz)
112 text = szgroup[[('expansion',''),
113 ('mean','op-duration-ns'),
114 ('min','op-duration-ns'),
115 ('max','op-duration-ns'),
116 ('mean', 'hw_cur_req_mem(B)'),
117 ('min', 'hw_cur_req_mem(B)'),
118 ('max', 'hw_cur_req_mem(B)'),
119 ('sut-platform','')]].to_csv(header=False, index=False, sep='\t')
120 print(text)
121 print()
Note: See TracBrowser for help on using the repository browser.