forked from mlcommons/cm4mlperf-results
-
Notifications
You must be signed in to change notification settings - Fork 0
/
customize.py
115 lines (81 loc) · 3.45 KB
/
customize.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import cmind
import os
from pathlib import Path
def preprocess(i):
env = i['env']
ii={'action':'find',
'automation':'experiment,a0a2d123ef064bcb'}
experiment_name=i.get('input',{}).get('experiment_name','')
if experiment_name!='':
ii['artifact']=experiment_name
experiment_tags=i.get('input',{}).get('experiment_tags','')
if experiment_tags=='':
experiment_tags=env.get('CM_MLPERF_RESULT_TYPE','')
if experiment_tags!='':
ii['tags']=experiment_tags
else:
ii['tags']='mlperf-inference'
# Query CM to get experiment entries with MLPerf results
r=cmind.access(ii)
if r['return']>0: return r
lst = r['list']
for experiment in lst:
print ('Processing experiment: {}'.format(experiment.path))
# Do something with results ...
# Check tags
meta = experiment.meta
tags = meta.get('tags',[])
updated = False
for x in [('mlperf-inference', 'inference'),
('mlperf-tiny', 'tiny'),
('mlperf-training', 'training'),
('closed-power', 'power'),
('open-power', 'power'),
('closed-network', 'network')]:
if x[0] in tags:
if x[1] not in tags:
tags.append(x[1])
updated = True
if updated:
ii['tags']=tags
ii['meta']=meta
ii['action']='update'
ii['replace']=True
ii['artifact']=meta['uid']
r = cmind.access(ii)
if r['return']>0: return r
print (' - Tags updated!')
for path in Path(experiment.path).rglob("cm-result.json"):
print (path)
r = cmind.utils.load_json(path)
if r['return']>0: return r
results = r['meta']
updated = False
for result in results:
if 'mlperf-inference' in tags:
if result.get('has_power', False):
result_power = result.get('Result_Power', '')
scenario = result.get('Scenario', '').lower()
if scenario == 'offline':
infs_per_second = result.get('Result')
if not infs_per_second or not result_power:
continue
inference_per_joule = infs_per_second / result_power
elif scenario in ['singlestream', 'multistream']:
latency_per_inference = result.get('Result')
result_power_units = result.get('Result_Power_Units')
if not latency_per_inference or not result_power or not result_power_units:
continue
if result_power_units == "millijoules":
inference_per_joule = 1000 / result_power if scenario == 'singlestream' else 8000/result_power
else:
continue
else:
continue
result['Inference_per_Joule'] = inference_per_joule
updated=True
print (' Result updated')
if updated:
r=cmind.utils.save_json(path, results)
if r['return']>0: return r
return {'return':0}