-
Notifications
You must be signed in to change notification settings - Fork 0
/
runci.py
executable file
·130 lines (116 loc) · 3.66 KB
/
runci.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
#!/usr/bin/env python3
"""Module to run a smaller/quicker version of the benchmarking suite"""
import argparse
from decimal import Decimal
import json
import sys
import time
import yapsy.PluginFileLocator as pfl
import yapsy.PluginManager as pm
from benchmark_suite.benchmarks.multithreaded import MultiThread
from benchmark_suite.benchmarkessentials import BenchmarkPlugin
from benchmark_suite.resultsreturn import ResultsReturn
from benchmark_suite.suite import Suite
sys.path.insert(1, f"{sys.path[0]}/setup/")
def get_args():
"""Get command line arguments to run the benchmarks."""
parser = argparse.ArgumentParser()
parser.add_argument(
"-v", "--verbose", help="""Increase output verbosity""", action="store_true"
)
parser.add_argument(
"-c",
"--clear_cache_bin",
type=str,
help="""Executable to drop test host cache""",
)
parser.add_argument(
"-n", "--name", type=str, help="""Name of program being tested""", required=True
)
parser.add_argument(
"-t", "--tag", type=str, help="""Git tag of program being tested"""
)
parser.add_argument(
"-r",
"--revision",
type=str,
help="""Git revision hash of program being tested""",
)
parser.add_argument(
"--datetime", type=str, help="""Commit date and time of program being tested"""
)
parser.add_argument(
"-e", "--executable", type=str, help="""Executable to test""", required=True
)
parser.add_argument(
"-a",
"--arguments",
type=str,
help="""Arguments to executable being tested""",
default="",
)
parser.add_argument(
"-w", "--working", type=str, help="""Working dir""", required=True
)
parser.add_argument(
"--override_power",
type=Decimal,
help="""Override detected power consumption in watts""",
)
parser.add_argument("--tco", type=Decimal, help="""TCO of machine in £""")
parser.add_argument(
"--return_results",
help="""Automatically upload results to Sanger""",
default=False,
action="store_true",
)
parser.add_argument(
"--repeats",
help="""Number of times to repeat the experiment""",
type=int,
default=3,
)
return parser.parse_args()
args = get_args()
# Setup benchsuite
benchsuite = Suite(
clear_cache_bin=args.clear_cache_bin,
nickname=args.name,
override_power=args.override_power,
tco=args.tco,
)
# Create benchmark to run and add to benchsuite
pluginmanager = pm.PluginManager(
categories_filter={"Benchmarks": BenchmarkPlugin},
plugin_locator=pfl.PluginFileLocator(
analyzers=(pfl.PluginFileAnalyzerMathingRegex("", r"(?!^__init__.py$).*\.py$"),)
),
)
pluginmanager.setPluginPlaces([sys.path[0] + "/benchmark_suite/benchmarks"])
pluginmanager.collectPlugins()
benchsuite.add_benchmark(
MultiThread(
suite=benchsuite,
command=args.executable + " " + args.arguments,
install_path=args.executable,
result_dir=args.working,
repeats=args.repeats,
)
)
# Run benchmark and create JSON output
output = {
"name": args.name,
"tag": args.tag,
"revision": args.revision,
"datetime": args.datetime,
"executable": args.executable,
"arguments": args.arguments,
"results": benchsuite.run(),
}
if args.return_results:
r = ResultsReturn("https://it_randd.cog.sanger.ac.uk/post_signed_url_ci.json")
result_filename = time.strftime("%Y-%m-%d-%H%M%S") + "_" + args.name + ".json"
r.post_results(
result_filename, json.dumps(output, indent=2, sort_keys=True), args.verbose
)
print(json.dumps(output))