-
Notifications
You must be signed in to change notification settings - Fork 8
/
output_checker.py
191 lines (125 loc) · 4.99 KB
/
output_checker.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
from typing import Dict, List, Optional
import glob
import json
import logging
import sys
import time
import numpy as np
import pandas as pd
from pyhailing import RidehailEnv
def get_eval_files(the_dir: str) -> List[str]:
"""Provides a list of all evaluation files in the directory."""
if the_dir is None:
the_dir = "./"
if the_dir[-1] != '/':
the_dir = the_dir + "/"
return glob.glob(the_dir + "pyhailing_eval_results*.json")
def load_result_file(eval_file) -> Optional[Dict]:
"""Loads the results data from a submitted file.
If the file cannot be read, returns None.
"""
try:
with open(eval_file, 'r') as f:
result = json.load(f)
return result
except Exception:
return None
def get_instance_category(entry) -> Optional[str]:
"""Determines the instance category for which the entry was submitted.
If it does not match the config of any instance category, returns None.
"""
instance_categories = RidehailEnv.DIMACS_CONFIGS.ALL_CONFIGS
entry_config = entry["config"]
keys_to_check = list(entry_config.keys())
try:
keys_to_check.remove("nickname")
except:
return None
for category_name, category_config in instance_categories.items():
if all((entry_config[key] == category_config[key] for key in keys_to_check)):
return category_name
return None
def get_rwd_upperbound(env) -> float:
"""Provides an upperbound on the reward for the env's current episode."""
# TODO
ub = np.inf
return ub
def check_vehicle_sequences(assignments, env) -> None:
"""Checks that assignments are plausible."""
# TODO
return True
def get_entry_performance(entry):
"""Checks the performance of an entry."""
eps_results = entry["episodes"]
# Make sure the proper number of episodes were performed
expected_num_eps = RidehailEnv.DIMACS_NUM_EVAL_EPISODES
if len(eps_results) != expected_num_eps:
logging.warning(
f"Invalid entry. Expected results for {len(eps_results)} episodes but got results for {expected_num_eps}.")
return None
# Create an environment to check the solution
config = entry["config"]
config["for_evaluation"] = False
env = RidehailEnv(entry["config"])
# For each episode...
for i, eps_result in enumerate(eps_results):
env.reset()
# Make sure the claimed reward does not exceed a computable UB
rwd_upperbound = get_rwd_upperbound(env)
if eps_result['final_reward'] > rwd_upperbound:
logging.warning("Invalid reward achieved.")
return None
# Get the sequence for each vehicle
# get the sequence of reqs for each vehicle. for each, make sure it is time feasible (with an easy check)
seqs_feasible = check_vehicle_sequences(eps_result['assignments'], env)
if not seqs_feasible:
logging.warning("Invalid vehicle-to-request assignments.")
return None
# All good. Get the mean episode reward
mean_reward = (
sum((eps_result["final_reward"] for eps_result in eps_results))
/ expected_num_eps
)
return mean_reward
def get_competition_results(eval_files:List[str]) -> pd.DataFrame:
# Initialize the results of the competition
competition_results = []
for eval_file in eval_files:
# Initialize a results record for this entry
entry_results = {
"filename": eval_file,
"instance_category": None,
"mean_reward": None,
}
# Load the file
entry = load_result_file(eval_file)
if entry is None:
logging.warning(f"Entry file could not be read: {eval_file}")
competition_results.append(entry_results)
continue
# Determine the entry's instance category
instance_category = get_instance_category(entry)
if instance_category is None:
logging.warning(
f"Entry file's config does not match the config of any instance category: {eval_file}"
)
competition_results.append(entry_results)
continue
entry_results["instance_category"] = instance_category
# Get the entry's mean episode reward
mean_reward = get_entry_performance(entry)
# Done.
competition_results.append(entry_results)
# Combine all results into df and return it
return pd.DataFrame(competition_results)
if __name__ == "__main__":
now = time.strftime("%Y%m%d%H%M%S")
args = sys.argv
if len(args) != 2:
logging.warning("Grabbing eval files from the current directory.")
eval_files_dir = "./"
else:
eval_files_dir = args[1]
eval_files = get_eval_files(eval_files_dir)
eval_results = get_competition_results(eval_files)
eval_results.to_csv(f"competition_results_{now}.csv", index=None)