-
Notifications
You must be signed in to change notification settings - Fork 4
/
main_infer.py
135 lines (107 loc) · 4.31 KB
/
main_infer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
"""
DDP training for Linear Probing
"""
from __future__ import print_function
import os
# os.environ["CUDA_VISIBLE_DEVICES"]="2,3"
import torch
import torch.nn as nn
import pandas as pd
import torch.multiprocessing as mp
from options.test_options import TestOptions
from learning.linear_trainer import LinearTrainer
from networks.build_backbone import build_model
from networks.build_linear import build_linear, build_linear_head
from datasets.util import build_linear_loader, build_test_loader
def main():
args = TestOptions().parse()
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
args.world_size = ngpus_per_node * args.world_size
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
raise NotImplementedError('Currently only DDP training')
def acc(cf):
out = 0
for i in range(cf.shape[0]):
out += cf[i][i]
return out/cf.shape[0]
def main_worker(gpu, ngpus_per_node, args):
# initialize trainer and ddp environment
trainer = LinearTrainer(args)
trainer.init_ddp_environment(gpu, ngpus_per_node)
# build encoder and classifier
model, _ = build_model(args)
classifier = build_linear(args)
# build dataset
train_loader, val_loader, train_sampler = \
build_test_loader(args, ngpus_per_node)
# build criterion and optimizer
criterion = nn.CrossEntropyLoss().cuda()
# load pre-trained ckpt for encoder
model = trainer.load_encoder_weights(model)
classifier = trainer.load_classifier_weights(classifier)
# wrap up models
model, classifier = trainer.wrap_up(model, classifier)
# check and resume a classifier
# start_epoch = trainer.resume_model(classifier, optimizer)
# init tensorboard logger
trainer.init_tensorboard_logger()
print(args.rank, ngpus_per_node)
if args.rank % ngpus_per_node == 0:
# % Modulo Remainder when a is divided by b
top1_avg, losses_avg, output_stat, processing_time = trainer.test(0, val_loader, model, classifier, criterion)
outs = [top1_avg, losses_avg]
trainer.logging(0, outs, train=False)
for i in output_stat.keys():
print(i, [output_stat[i]])
conf_mat = output_stat['conf_mat']
o = 0
for i in range(9):
o += conf_mat[i][i]
o += conf_mat[2, 4]
o += conf_mat[7, 5]
acc = o/conf_mat.sum()
print(acc)
import json
log_file = f'{args.model_folder}/stat.json'
json_data = {}
# json stat log file, update and overwrite
if not os.path.isfile(log_file):
with open(log_file, 'w') as json_file:
json.dump({}, json_file) # create empty file
# with open(log_file) as json_file:
# json_data = json.load(json_file)
# current_epoch_dict = {net_name: out}
# json_data.update(current_epoch_dict)
json_data = output_stat
# if ('crc' in args.model_name) or ('crc' in args.dataset_name):
# output_stat['conf_mat'] = output_stat['conf_mat'][:5, :5]
# output_stat['acc'] = acc(output_stat['acc'])
json_data = {'acc': output_stat['acc'], 'processing_time_second': processing_time,
'cf': pd.Series({'conf_mat': output_stat['conf_mat']}).to_json(orient='records')}
with open(log_file, 'w') as json_file:
json.dump(json_data, json_file)
# routine
# for epoch in range(start_epoch, args.epochs + 1):
# train_sampler.set_epoch(epoch)
# trainer.adjust_learning_rate(optimizer, epoch)
#
# outs = trainer.train(epoch, train_loader, model, classifier,
# criterion, optimizer)
#
# # log to tensorbard
# trainer.logging(epoch, outs, optimizer.param_groups[0]['lr'], train=True)
#
# # evaluation and logging
# print(args.rank, ngpus_per_node)
# if args.rank % ngpus_per_node == 0:
# outs = trainer.validate(epoch, val_loader, model,
# classifier, criterion)
# trainer.logging(epoch, outs, train=False)
#
# # saving model
# trainer.save(classifier, optimizer, epoch)
if __name__ == '__main__':
main()