This repository has been archived by the owner on Sep 2, 2018. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 8
/
run.py
executable file
·156 lines (136 loc) · 5.09 KB
/
run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
#!/usr/bin/env python3
"""
@author: Zsolt Kovari
"""
import argparse
import logger
import logging
import os
import shutil
import subprocess
import sys
import util
from loader import Loader
from subprocess import TimeoutExpired
from subprocess import CalledProcessError
def flatten(lst):
return sum(([x] if not isinstance(x, list) else flatten(x) for x in lst), [])
def build(conf, skip_tests):
"""Builds the project.
"""
util.set_working_directory("../")
args = flatten(["mvn", conf.vmargs, "clean", "install", "--fail-at-end"])
if skip_tests:
args.append("-DskipTests")
subprocess.check_call(args)
util.set_working_directory()
def generate(conf):
"""Generates the models.
"""
target = util.get_generator_jar()
for size in conf.sizes:
subprocess.check_call(flatten(["java", conf.vmargs, "-Xmx" + conf.xmx, "-jar", target, "-size", str(size)]))
def benchmark(conf):
"""Runs measurements.
"""
header = "../output/header.tsv"
result_file = "../output/output.tsv"
if os.path.exists(result_file):
os.remove(result_file)
shutil.copy(header, result_file)
for change_set in conf.change_sets:
for tool in conf.tools:
for query in conf.queries:
for args in conf.optional_arguments:
for size in conf.sizes:
target = util.get_tool_jar(tool)
print("Running benchmark: tool = " + tool + ", change set = " + change_set +
", query = " + query + ", size = " + str(size) + ", extra arguments = " + str(args))
try:
command = ["java", conf.vmargs,
"-jar", target,
"-runs", str(conf.runs),
"-size", str(size),
"-query", query,
"-changeSet", change_set,
"-iterationCount", str(conf.iterations)]
command += args
command = flatten(command)
output = subprocess.check_output(command, timeout=conf.timeout)
with open(result_file, "ab") as file:
file.write(output)
except TimeoutExpired:
print("Timed out after", conf.timeout, "s, continuing with the next query.")
break
except CalledProcessError as e:
print("Program exited with error")
break
def clean_dir(dir):
if os.path.exists(dir):
shutil.rmtree(dir)
os.mkdir(dir)
def visualize():
"""Visualizes the benchmark results
"""
clean_dir("../diagrams")
util.set_working_directory("../reporting")
subprocess.call(["Rscript", "visualize.R", "../config/reporting-1.json"])
#subprocess.call(["Rscript", "visualize.R", "../config/reporting-2.json"])
def extract_results():
"""Extracts the benchmark results
"""
clean_dir("../results")
util.set_working_directory("../reporting")
subprocess.call(["Rscript", "extract_results.R"])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--build",
help="build the project",
action="store_true")
parser.add_argument("-g", "--generate",
help="generate models",
action="store_true")
parser.add_argument("-m", "--measure",
help="run the benchmark",
action="store_true")
parser.add_argument("-s", "--skip-tests",
help="skip JUnit tests",
action="store_true")
parser.add_argument("-v", "--visualize",
help="create visualizations",
action="store_true")
parser.add_argument("-e", "--extract",
help="extract results",
action="store_true")
parser.add_argument("-t", "--test",
help="run test",
action="store_true")
args = parser.parse_args()
if (args.skip_tests and not args.build):
raise ValueError("skip-tests provided without build argument")
util.set_working_directory()
logger.init()
loader = Loader()
config = loader.load()
# if there are no args, execute a full sequence
# with the test and the visualization/reporting
no_args = all(val==False for val in vars(args).values())
if no_args:
args.test = True
args.visualize = True
args.extract = True
if args.build:
build(config, args.skip_tests)
if args.generate:
generate(config)
if args.measure:
benchmark(config)
if args.test:
build(config, True)
generate(config)
build(config, False)
benchmark(config)
if args.visualize:
visualize()
if args.extract:
extract_results()