From 76ed8a8a40597d16703823ccbfdb7894ce0b73ad Mon Sep 17 00:00:00 2001 From: deeppomf Date: Fri, 9 Feb 2018 22:55:29 -0500 Subject: [PATCH] update --- README.md | 5 +++-- src/decensor.py | 2 +- src/test.py | 4 ++-- src/train.py | 12 ++++++------ 4 files changed, 12 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 58846b6..6068872 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # DeepMindBreak Decensoring Hentai with Deep Neural Networks - +This project applies an implementation of [Globally and Locally Consistent Image Completion](http://hi.cs.waseda.ac.jp/%7Eiizuka/projects/completion/data/completion_sig2017.pdf) to the problem of hentai decensorship. Using a deep fully convolutional neural network, DeepMindBreak can replace censored artwork in hentai with plausible reconstructions. The user needs to only specify the censored regions for the algorithm to run. # Limitations @@ -15,9 +15,10 @@ It does NOT work with: - Real life porn - Mosaic censorship - Censorship of nipples -- Complete censorship - Animated gifs/videos +In particular, if a vagina or penis is completely censored out, THERE IS NO HOPE OF RECOVERY. + # Dependencies - Python 2/3 diff --git a/src/decensor.py b/src/decensor.py index 8393a55..f7a9b64 100644 --- a/src/decensor.py +++ b/src/decensor.py @@ -30,7 +30,7 @@ def test(): sess.run(init_op) saver = tf.train.Saver() - saver.restore(sess, '../saved_models/latest') + saver.restore(sess, '/saved_models/latest') x_test = np.load(test_npy) np.random.shuffle(x_test) diff --git a/src/test.py b/src/test.py index 8393a55..f6fb519 100644 --- a/src/test.py +++ b/src/test.py @@ -30,7 +30,7 @@ def test(): sess.run(init_op) saver = tf.train.Saver() - saver.restore(sess, '../saved_models/latest') + saver.restore(sess, '/saved_models/latest') x_test = np.load(test_npy) np.random.shuffle(x_test) @@ -50,7 +50,7 @@ def test(): masked = raw * (1 - mask_batch[i]) + np.ones_like(raw) * mask_batch[i] * 255 img = completion[i] img = np.array((img + 1) * 127.5, dtype=np.uint8) - dst = './output/{}.jpg'.format("{0:06d}".format(cnt)) + dst = '/output/{}.jpg'.format("{0:06d}".format(cnt)) output_image([['Input', masked], ['Output', img], ['Ground Truth', raw]], dst) diff --git a/src/train.py b/src/train.py index 5f14497..06145e1 100644 --- a/src/train.py +++ b/src/train.py @@ -33,9 +33,9 @@ def train(): init_op = tf.global_variables_initializer() sess.run(init_op) - if tf.train.get_checkpoint_state('./saved_model'): + if tf.train.get_checkpoint_state('/saved_model'): saver = tf.train.Saver() - saver.restore(sess, './saved_model/latest') + saver.restore(sess, '/saved_model/latest') x_train, x_test = load.load() x_train = np.array([a / 127.5 - 1 for a in x_train]) @@ -69,9 +69,9 @@ def train(): saver = tf.train.Saver() - saver.save(sess, './saved_model/latest', write_meta_graph=False) + saver.save(sess, '/saved_model/latest', write_meta_graph=False) if sess.run(epoch) == PRETRAIN_EPOCH: - saver.save(sess, './saved_model/pretrained', write_meta_graph=False) + saver.save(sess, '/saved_model/pretrained', write_meta_graph=False) # Discrimitation @@ -106,10 +106,10 @@ def train(): x_batch = x_test[:BATCH_SIZE] completion = sess.run(model.completion, feed_dict={x: x_batch, mask: mask_batch, is_training: False}) sample = np.array((completion[0] + 1) * 127.5, dtype=np.uint8) - cv2.imwrite('./output/{}.jpg'.format("{0:06d}".format(sess.run(epoch))), cv2.cvtColor(sample, cv2.COLOR_RGB2BGR)) + cv2.imwrite('/output/{}.jpg'.format("{0:06d}".format(sess.run(epoch))), cv2.cvtColor(sample, cv2.COLOR_RGB2BGR)) saver = tf.train.Saver() - saver.save(sess, './saved_model/latest', write_meta_graph=False) + saver.save(sess, '/saved_model/latest', write_meta_graph=False) def get_points():