-
Notifications
You must be signed in to change notification settings - Fork 19
/
dense_utils.py
153 lines (131 loc) · 5.54 KB
/
dense_utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
import tensorflow as tf
def _dense_block(input, layers, growth_rate=12, bottleneck=False, training=tf.placeholder(dtype=tf.bool, name="is_training"), seed=None, name=None, activation="relu"):
# with tf.name_scope('block_' + name) as scope:
# input layer
layer1 = _dense_layer(input, growth_rate, training=training, name=name + "_layer1")
layer2 = _dense_layer(layer1, growth_rate, training=training, name=name + "_layer2")
concat_inputs = [layer1, layer2]
for i in range(2, layers):
# concat the inputs
layer_inputs = tf.concat(concat_inputs,
axis=3,
name="concat_"+name+"_" + str(i))
# optional bottleneck
if bottleneck:
layer_inputs = _bottleneck(input, growth_rate, training = training, name=name + "_bottleneck_" + str(i))
layers = _dense_layer(layer_inputs, growth_rate, training=training, name=name + "_layer_" + str(i))
concat_inputs.append(layers)
output = tf.concat(concat_inputs,
axis=3,
name="concat_"+name+"_final")
return output
def _dense_layer(input, filters, stride=(1,1), training = tf.placeholder(dtype=tf.bool, name="is_training"), epsilon=1e-8, padding="SAME", seed=None, lambd=0.0, name=None, activation="relu"):
with tf.name_scope('dense_'+name) as scope:
# batch norm
layer = tf.layers.batch_normalization(
input,
axis=-1,
momentum=0.99,
epsilon=epsilon,
center=True,
scale=True,
beta_initializer=tf.zeros_initializer(),
gamma_initializer=tf.ones_initializer(),
moving_mean_initializer=tf.zeros_initializer(),
moving_variance_initializer=tf.ones_initializer(),
training=training,
name='bn_'+name
)
# activation
if activation == "relu":
# apply relu
layer = tf.nn.relu(layer, name='relu_' + name)
elif activation == "elu":
layer = tf.nn.elu(layer, name="elu_" + name)
# 3x3 convolution
layer = tf.layers.conv2d(
layer,
filters=filters,
kernel_size=(3,3),
strides=stride,
padding=padding,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=5e-2, seed=seed),
kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=lambd),
name='conv_'+name
)
return layer
def _transition(input, filters, training = tf.placeholder(dtype=tf.bool, name="is_training"), epsilon=1e-8, padding="SAME", seed=None, lambd=0.0, name=None, activation="relu"):
with tf.name_scope('transition_' + name) as scope:
# batch norm
layer = tf.layers.batch_normalization(
input,
axis=-1,
momentum=0.99,
epsilon=epsilon,
center=True,
scale=True,
beta_initializer=tf.zeros_initializer(),
gamma_initializer=tf.ones_initializer(),
moving_mean_initializer=tf.zeros_initializer(),
moving_variance_initializer=tf.ones_initializer(),
training=training,
name='tn_bn_' + name
)
# 1x1 conv
layer = tf.layers.conv2d(
layer,
filters=filters,
kernel_size=(1, 1),
strides=(1,1),
padding=padding,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=5e-2, seed=seed),
kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=lambd),
name='1x1conv_' + name
)
# average pooling
layer = tf.layers.average_pooling2d(
layer,
pool_size=(2, 2),
strides=(2, 2),
padding='SAME',
name='pool_' + name
)
return layer
def _bottleneck(input, growth_rate, training = tf.placeholder(dtype=tf.bool, name="is_training"), epsilon=1e-8, padding="SAME", seed=None, lambd=0.0, name=None, activation="relu"):
with tf.name_scope('bottleneck_' + name) as scope:
# batch norm
layer = tf.layers.batch_normalization(
input,
axis=-1,
momentum=0.99,
epsilon=epsilon,
center=True,
scale=True,
beta_initializer=tf.zeros_initializer(),
gamma_initializer=tf.ones_initializer(),
moving_mean_initializer=tf.zeros_initializer(),
moving_variance_initializer=tf.ones_initializer(),
training=training,
name='bottleneck_bn_'+name
)
# activation
if activation == "relu":
# apply relu
layer = tf.nn.relu(layer, name='bottleneck_relu_' + name)
elif activation == "elu":
layer = tf.nn.elu(layer, name="bottleneck_elu_" + name)
# 1x1 convolution
layer = tf.layers.conv2d(
layer,
filters=growth_rate * 4,
kernel_size=(1,1),
strides=(1,1),
padding=padding,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=5e-2, seed=seed),
kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=lambd),
name='bottleneck_'+name
)
return layer