-
Notifications
You must be signed in to change notification settings - Fork 21
/
nic_train_network_batch.py
executable file
·158 lines (126 loc) · 5.37 KB
/
nic_train_network_batch.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
# ---------------------------------------------------------------------------------------------
# MS lesion segmentation pipeline
# ---------------------------------
# - incorporates:
# - MRI identification
# - registration
# - skull stripping
# - MS lesion segmentation training and testing using the CNN aproach
# of Valverde et al (NI2017)
#
# Sergi Valverde 2017
# ---------------------------------------------------------------------------------------------
import os
import sys
import platform
import time
import argparse
import ConfigParser
from utils.preprocess import preprocess_scan
from utils.load_options import load_options, print_options
os.system('cls' if platform.system() == 'Windows' else 'clear')
print "##################################################"
print "# ------------ #"
print "# nicMSlesions #"
print "# ------------ #"
print "# MS WM lesion segmentation #"
print "# #"
print "# ------------------------------- #"
print "# (c) Sergi Valverde 2019 #"
print "# Neuroimage Computing Group #"
print "# ------------------------------- #"
print "##################################################\n"
# load options from input
parser = argparse.ArgumentParser()
parser.add_argument('--docker',
dest='docker',
action='store_true')
parser.set_defaults(docker=False)
args = parser.parse_args()
container = args.docker
# link related libraries
CURRENT_PATH = CURRENT_PATH = os.path.split(os.path.realpath(__file__))[0]
sys.path.append(os.path.join(CURRENT_PATH, 'libs'))
# load default options and update them with user information
default_config = ConfigParser.SafeConfigParser()
default_config.read(os.path.join(CURRENT_PATH, 'config', 'default.cfg'))
user_config = ConfigParser.RawConfigParser()
user_config.read(os.path.join(CURRENT_PATH, 'config', 'configuration.cfg'))
# read user's configuration file
options = load_options(default_config, user_config)
options['tmp_folder'] = CURRENT_PATH + '/tmp'
if options['debug']:
print_options(options)
# set paths taking into account the host OS
host_os = platform.system()
if host_os == 'Linux':
options['niftyreg_path'] = CURRENT_PATH + '/libs/linux/niftyreg'
options['robex_path'] = CURRENT_PATH + '/libs/linux/ROBEX/runROBEX.sh'
options['test_slices'] = 256
elif host_os == 'Windows':
options['niftyreg_path'] = os.path.normpath(
os.path.join(CURRENT_PATH, 'libs', 'win', 'niftyreg'))
options['robex_path'] = os.path.normpath(
os.path.join(CURRENT_PATH, 'libs', 'win', 'ROBEX', 'runROBEX.bat'))
options['test_slices'] = 256
else:
print "The OS system", host_os, "is not currently supported."
exit()
# set GPU mode from the configuration file. Trying to update
# the backend automatically from here in order to use either theano
# or tensorflow backends
# tensorflow backend
device = str(options['gpu_number'])
print "DEBUG: ", device
os.environ['KERAS_BACKEND'] = 'tensorflow'
os.environ["CUDA_VISIBLE_DEVICES"] = device
from CNN.base import train_cascaded_model
from CNN.build_model import cascade_model
if container:
options['train_folder'] = os.path.normpath(
'/data' + options['train_folder'])
else:
options['train_folder'] = os.path.normpath(options['train_folder'])
# set task to train
options['task'] = 'training'
# list scan
scan_list = os.listdir(options['train_folder'])
scan_list.sort()
for scan in scan_list:
total_time = time.time()
preprocess_time = time.time()
# --------------------------------------------------
# move things to a tmp folder before starting
# --------------------------------------------------
options['tmp_scan'] = scan
current_folder = os.path.join(options['train_folder'], scan)
options['tmp_folder'] = os.path.normpath(
os.path.join(current_folder, 'tmp'))
# preprocess scan
preprocess_scan(current_folder, options)
# --------------------------------------------------
# WM MS lesion training
# - configure net and train
# --------------------------------------------------
seg_time = time.time()
print "> CNN: Starting training session"
# select training scans
train_x_data = {f: {m: os.path.join(options['train_folder'], f, 'tmp', n)
for m, n in zip(options['modalities'], options['x_names'])}
for f in scan_list}
train_y_data = {f: os.path.join(options['train_folder'], f, 'tmp',
'lesion.nii.gz')
for f in scan_list}
options['weight_paths'] = os.path.join(CURRENT_PATH, 'nets')
options['load_weights'] = False
# train the model for the current scan
print "> CNN: training net with %d subjects" % (len(train_x_data.keys()))
# --------------------------------------------------
# initialize the CNN and train the classifier
# --------------------------------------------------
model = cascade_model(options)
model = train_cascaded_model(model, train_x_data, train_y_data, options)
print "> INFO: training time:", round(time.time() - seg_time), "sec"
print "> INFO: total pipeline time: ", round(time.time() - total_time), "sec"
print "> INFO: All processes have been finished. Have a good day!"