Skip to content

Commit

Permalink
update
Browse files Browse the repository at this point in the history
  • Loading branch information
deeppomf committed Feb 11, 2018
1 parent 485d76e commit 704ff80
Show file tree
Hide file tree
Showing 10 changed files with 638 additions and 0 deletions.
95 changes: 95 additions & 0 deletions decensor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
import numpy as np
import tensorflow as tf
from PIL import Image
import tqdm
import os
import matplotlib.pyplot as plt
import sys
sys.path.append('..')
from model import Model

IMAGE_SIZE = 128
LOCAL_SIZE = 64
HOLE_MIN = 24
HOLE_MAX = 48
BATCH_SIZE = 16

image_path = './lfw.npy'

def test():
x = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3])
mask = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 1])
local_x = tf.placeholder(tf.float32, [BATCH_SIZE, LOCAL_SIZE, LOCAL_SIZE, 3])
global_completion = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3])
local_completion = tf.placeholder(tf.float32, [BATCH_SIZE, LOCAL_SIZE, LOCAL_SIZE, 3])
is_training = tf.placeholder(tf.bool, [])

model = Model(x, mask, local_x, global_completion, local_completion, is_training, batch_size=BATCH_SIZE)
sess = tf.Session()
init_op = tf.global_variables_initializer()
sess.run(init_op)

saver = tf.train.Saver()
saver.restore(sess, './models/latest')

x_test = np.load(test_npy)
np.random.shuffle(x_test)
x_test = np.array([a / 127.5 - 1 for a in x_test])

step_num = int(len(x_test) / BATCH_SIZE)

cnt = 0
for i in tqdm.tqdm(range(step_num)):
x_batch = x_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]
_, mask_batch = get_points()
completion = sess.run(model.completion, feed_dict={x: x_batch, mask: mask_batch, is_training: False})
for i in range(BATCH_SIZE):
cnt += 1
raw = x_batch[i]
raw = np.array((raw + 1) * 127.5, dtype=np.uint8)
masked = raw * (1 - mask_batch[i]) + np.ones_like(raw) * mask_batch[i] * 255
img = completion[i]
img = np.array((img + 1) * 127.5, dtype=np.uint8)
dst = './output/{}.jpg'.format("{0:06d}".format(cnt))
output_image([['Input', masked], ['Output', img], ['Ground Truth', raw]], dst)


def get_points():
points = []
mask = []
for i in range(BATCH_SIZE):
x1, y1 = np.random.randint(0, IMAGE_SIZE - LOCAL_SIZE + 1, 2)
x2, y2 = np.array([x1, y1]) + LOCAL_SIZE
points.append([x1, y1, x2, y2])

w, h = np.random.randint(HOLE_MIN, HOLE_MAX + 1, 2)
p1 = x1 + np.random.randint(0, LOCAL_SIZE - w)
q1 = y1 + np.random.randint(0, LOCAL_SIZE - h)
p2 = p1 + w
q2 = q1 + h

m = np.zeros((IMAGE_SIZE, IMAGE_SIZE, 1), dtype=np.uint8)
m[q1:q2 + 1, p1:p2 + 1] = 1
mask.append(m)

return np.array(points), np.array(mask)


def output_image(images, dst):
fig = plt.figure()
for i, image in enumerate(images):
text, img = image
fig.add_subplot(1, 3, i + 1)
plt.imshow(img)
plt.tick_params(labelbottom='off')
plt.tick_params(labelleft='off')
plt.gca().get_xaxis().set_ticks_position('none')
plt.gca().get_yaxis().set_ticks_position('none')
plt.xlabel(text)
plt.savefig(dst)
plt.close()


if __name__ == '__main__':
test()

95 changes: 95 additions & 0 deletions layer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
import tensorflow as tf

def conv_layer(x, filter_shape, stride):
filters = tf.get_variable(
name='weight',
shape=filter_shape,
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer(),
trainable=True)
return tf.nn.conv2d(x, filters, [1, stride, stride, 1], padding='SAME')


def dilated_conv_layer(x, filter_shape, dilation):
filters = tf.get_variable(
name='weight',
shape=filter_shape,
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer(),
trainable=True)
return tf.nn.atrous_conv2d(x, filters, dilation, padding='SAME')


def deconv_layer(x, filter_shape, output_shape, stride):
filters = tf.get_variable(
name='weight',
shape=filter_shape,
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer(),
trainable=True)
return tf.nn.conv2d_transpose(x, filters, output_shape, [1, stride, stride, 1])


def batch_normalize(x, is_training, decay=0.99, epsilon=0.001):
def bn_train():
batch_mean, batch_var = tf.nn.moments(x, axes=[0, 1, 2])
train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
train_var = tf.assign(pop_var, pop_var * decay + batch_var * (1 - decay))
with tf.control_dependencies([train_mean, train_var]):
return tf.nn.batch_normalization(x, batch_mean, batch_var, beta, scale, epsilon)

def bn_inference():
return tf.nn.batch_normalization(x, pop_mean, pop_var, beta, scale, epsilon)

dim = x.get_shape().as_list()[-1]
beta = tf.get_variable(
name='beta',
shape=[dim],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.0),
trainable=True)
scale = tf.get_variable(
name='scale',
shape=[dim],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.1),
trainable=True)
pop_mean = tf.get_variable(
name='pop_mean',
shape=[dim],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0),
trainable=False)
pop_var = tf.get_variable(
name='pop_var',
shape=[dim],
dtype=tf.float32,
initializer=tf.constant_initializer(1.0),
trainable=False)

return tf.cond(is_training, bn_train, bn_inference)


def flatten_layer(x):
input_shape = x.get_shape().as_list()
dim = input_shape[1] * input_shape[2] * input_shape[3]
transposed = tf.transpose(x, (0, 3, 1, 2))
return tf.reshape(transposed, [-1, dim])


def full_connection_layer(x, out_dim):
in_dim = x.get_shape().as_list()[-1]
W = tf.get_variable(
name='weight',
shape=[in_dim, out_dim],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.1),
trainable=True)
b = tf.get_variable(
name='bias',
shape=[out_dim],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0),
trainable=True)
return tf.add(tf.matmul(x, W), b)

14 changes: 14 additions & 0 deletions load.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import os
import numpy as np

def load(dir_='./training_data/npy'):
x_train = np.load(os.path.join(dir_, 'x_train.npy'))
x_test = np.load(os.path.join(dir_, 'x_test.npy'))
return x_train, x_test


if __name__ == '__main__':
x_train, x_test = load()
print(x_train.shape)
print(x_test.shape)

162 changes: 162 additions & 0 deletions model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,162 @@
from layer import *

class Model:
def __init__(self, x, mask, local_x, global_completion, local_completion, is_training, batch_size):
self.batch_size = batch_size
self.imitation = self.generator(x * (1 - mask), is_training)
self.completion = self.imitation * mask + x * (1 - mask)
self.real = self.discriminator(x, local_x, reuse=False)
self.fake = self.discriminator(global_completion, local_completion, reuse=True)
self.g_loss = self.calc_g_loss(x, self.completion)
self.d_loss = self.calc_d_loss(self.real, self.fake)
self.g_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator')
self.d_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator')


def generator(self, x, is_training):
with tf.variable_scope('generator'):
with tf.variable_scope('conv1'):
x = conv_layer(x, [5, 5, 3, 64], 1)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv2'):
x = conv_layer(x, [3, 3, 64, 128], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv3'):
x = conv_layer(x, [3, 3, 128, 128], 1)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv4'):
x = conv_layer(x, [3, 3, 128, 256], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv5'):
x = conv_layer(x, [3, 3, 256, 256], 1)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv6'):
x = conv_layer(x, [3, 3, 256, 256], 1)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('dilated1'):
x = dilated_conv_layer(x, [3, 3, 256, 256], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('dilated2'):
x = dilated_conv_layer(x, [3, 3, 256, 256], 4)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('dilated3'):
x = dilated_conv_layer(x, [3, 3, 256, 256], 8)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('dilated4'):
x = dilated_conv_layer(x, [3, 3, 256, 256], 16)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv7'):
x = conv_layer(x, [3, 3, 256, 256], 1)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv8'):
x = conv_layer(x, [3, 3, 256, 256], 1)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('deconv1'):
x = deconv_layer(x, [4, 4, 128, 256], [self.batch_size, 64, 64, 128], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv9'):
x = conv_layer(x, [3, 3, 128, 128], 1)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('deconv2'):
x = deconv_layer(x, [4, 4, 64, 128], [self.batch_size, 128, 128, 64], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv10'):
x = conv_layer(x, [3, 3, 64, 32], 1)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv11'):
x = conv_layer(x, [3, 3, 32, 3], 1)
x = tf.nn.tanh(x)

return x


def discriminator(self, global_x, local_x, reuse):
def global_discriminator(x):
is_training = tf.constant(True)
with tf.variable_scope('global'):
with tf.variable_scope('conv1'):
x = conv_layer(x, [5, 5, 3, 64], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv2'):
x = conv_layer(x, [5, 5, 64, 128], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv3'):
x = conv_layer(x, [5, 5, 128, 256], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv4'):
x = conv_layer(x, [5, 5, 256, 512], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv5'):
x = conv_layer(x, [5, 5, 512, 512], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('fc'):
x = flatten_layer(x)
x = full_connection_layer(x, 1024)
return x

def local_discriminator(x):
is_training = tf.constant(True)
with tf.variable_scope('local'):
with tf.variable_scope('conv1'):
x = conv_layer(x, [5, 5, 3, 64], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv2'):
x = conv_layer(x, [5, 5, 64, 128], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv3'):
x = conv_layer(x, [5, 5, 128, 256], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv4'):
x = conv_layer(x, [5, 5, 256, 512], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('fc'):
x = flatten_layer(x)
x = full_connection_layer(x, 1024)
return x

with tf.variable_scope('discriminator', reuse=reuse):
global_output = global_discriminator(global_x)
local_output = local_discriminator(local_x)
with tf.variable_scope('concatenation'):
output = tf.concat((global_output, local_output), 1)
output = full_connection_layer(output, 1)

return output


def calc_g_loss(self, x, completion):
loss = tf.nn.l2_loss(x - completion)
return tf.reduce_mean(loss)


def calc_d_loss(self, real, fake):
alpha = 4e-4
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=real, labels=tf.ones_like(real)))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=fake, labels=tf.zeros_like(fake)))
return tf.add(d_loss_real, d_loss_fake) * alpha

Empty file added models/.gitkeep
Empty file.
Loading

0 comments on commit 704ff80

Please sign in to comment.