forked from facebookresearch/pytorch_GAN_zoo
-
Notifications
You must be signed in to change notification settings - Fork 0
/
eval.py
59 lines (50 loc) · 2.44 KB
/
eval.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import importlib
import argparse
import sys
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Testing script', add_help=False)
parser.add_argument('evaluation_name', type=str,
help='Name of the evaluation method to launch. To get \
the arguments specific to an evaluation method please \
use: eval.py evaluation_name -h')
parser.add_argument('--no_vis', help='Print more data',
action='store_true')
parser.add_argument('--np_vis', help=' Replace visdom by a numpy based \
visualizer (SLURM)',
action='store_true')
parser.add_argument('-m', '--module', help="Module to evaluate, available\
modules: PGAN, PPGAN, DCGAN",
type=str, dest="module")
parser.add_argument('-n', '--name', help="Model's name",
type=str, dest="name")
parser.add_argument('-d', '--dir', help='Output directory',
type=str, dest="dir", default="output_networks")
parser.add_argument('-i', '--iter', help='Iteration to evaluate',
type=int, dest="iter")
parser.add_argument('-s', '--scale', help='Scale to evaluate',
type=int, dest="scale")
parser.add_argument('-c', '--config', help='Training configuration',
type=str, dest="config")
parser.add_argument('-v', '--partitionValue', help="Partition's value",
type=str, dest="partition_value")
parser.add_argument("-A", "--statsFile", dest="statsFile",
type=str, help="Path to the statistics file")
if len(sys.argv) > 1 and sys.argv[1] in ['-h', '--help']:
parser.print_help()
sys.exit()
args, unknown = parser.parse_known_args()
vis_module = None
if args.np_vis:
vis_module = importlib.import_module("visualization.np_visualizer")
elif args.no_vis:
print("Visualization disabled")
else:
vis_module = importlib.import_module("visualization.visualizer")
module = importlib.import_module("models.eval." + args.evaluation_name)
print("Running " + args.evaluation_name)
parser.add_argument('-h', '--help', action='help')
out = module.test(parser, visualisation=vis_module)
if out is not None and not out:
print("...FAIL")
else:
print("...OK")