From 569a764a65b4d94c810ff2409a748956866428f1 Mon Sep 17 00:00:00 2001 From: ReDeiPirati Date: Sun, 10 Sep 2017 21:43:47 +0200 Subject: [PATCH] Update args, fix typo --- README.md | 17 ++++++++++++----- dcgan.py | 4 ++-- generate.py | 10 +++++++++- 3 files changed, 23 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index f078a8c..8823407 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,10 @@ # Deep Convolution Generative Adversarial Networks -This project implements the paper [Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks](http://arxiv.org/abs/1511.06434) and is a porting from [pytorch/examples/dcgan](https://github.com/pytorch/examples/tree/master/dcgan) to be usable on [FloydHub](https://www.floydhub.com/). +This project implements the paper [Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks](http://arxiv.org/abs/1511.06434) from a porting of [pytorch/examples/dcgan](https://github.com/pytorch/examples/tree/master/dcgan) making it usables on [FloydHub](https://www.floydhub.com/). -The implementation is very close to the Torch implementation [dcgan.torch](https://github.com/soumith/dcgan.torch) +The implementation is very close to the Torch implementation [dcgan.torch](https://github.com/soumith/dcgan.torch). + +Before start, the `fixed_noise.pth` (serialized Z vector used for generating image on training) is saved in the outf folder. After every 100 training iterations, the files `real_samples.png` and `fake_samples.png` are written to disk with the samples from the generative model. @@ -50,13 +52,16 @@ optional arguments: Generating script: ```bash -usage: generate.py [-h] --netG NETG [--outf OUTF] [--Zvector ZVECTOR] +usage: generate.py [-h] --netG NETG [--outf OUTF] [--Zvector ZVECTOR] [--cuda] + [--ngpu NGPU] optional arguments: -h, --help show this help message and exit --netG NETG path to netG (for generating images) --outf OUTF folder to output images - --Zvector ZVECTOR Path to Serialized Z vector + --Zvector ZVECTOR path to Serialized Z vector + --cuda enables cuda + --ngpu NGPU number of GPUs to use ``` @@ -106,8 +111,10 @@ floyd run --gpu --env pytorch -data "python gener ### Try our pre-trained model +We have provided to you a pre-trained model trained on the lfw-dataset for about 300 epochs. + ```bash -floyd run --gpu --env pytorch -data floydhub/dcgan/1/output:/model "python generator.py --netG /model/netG_epoch_99.pth" +floyd run --gpu --env pytorch -data floydhub/dcgan/1/output:/model "python generator.py --netG /model/netG_epoch_299.pth --ngpu 1 --cuda" ``` ### Serve model through REST API diff --git a/dcgan.py b/dcgan.py index aa0d205..ef37ee0 100644 --- a/dcgan.py +++ b/dcgan.py @@ -77,8 +77,8 @@ def __init__(self, imageSize=64, nz=100, ngf=64, - cuda=False, - ngpu=1, + cuda=None, + ngpu=None, outf="/output"): """ DCGAN - netG Builder diff --git a/generate.py b/generate.py index d4ada28..c931ab5 100644 --- a/generate.py +++ b/generate.py @@ -8,6 +8,8 @@ parser.add_argument('--netG', required=True, default='', help="path to netG (for generating images)") parser.add_argument('--outf', default='/output', help='folder to output images') parser.add_argument('--Zvector', help="path to Serialized Z vector") +parser.add_argument('--cuda', action='store_true', help='enables cuda') +parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use') opt = parser.parse_args() print(opt) @@ -22,7 +24,13 @@ if opt.outf: outf = opt.outf +# GPU and CUDA +cuda = None +if opt.cuda: + cuda = opt.cuda +ngpu = int(opt.ngpu) + # Generate An Image from input json or default parameters -Generator = DCGAN(netG=opt.netG, zvector=zvector, batchSize=batchSize, outf=outf) +Generator = DCGAN(netG=opt.netG, zvector=zvector, batchSize=batchSize, outf=outf, cuda=cuda, ngpu=ngpu) Generator.build_model() Generator.generate()