-
Notifications
You must be signed in to change notification settings - Fork 3
/
runner.py
106 lines (86 loc) · 3.45 KB
/
runner.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import tensorflow as tf
from os.path import join
import getopt
import sys
import constants as c
from LSTMModel import LSTMModel
from data_reader import DataReader
class LyricGenRunner:
def __init__(self, model_load_path, artist_name, test, prime_text):
self.sess = tf.Session()
self.artist_name = artist_name
print 'Process data...'
self.data_reader = DataReader(self.artist_name)
self.vocab = self.data_reader.get_vocab()
print 'Init model...'
self.model = LSTMModel(self.sess,
self.vocab,
c.BATCH_SIZE,
c.SEQ_LEN,
c.CELL_SIZE,
c.NUM_LAYERS,
test=test)
print 'Init variables...'
self.saver = tf.train.Saver(max_to_keep=None)
self.sess.run(tf.global_variables_initializer())
# if load path specified, load a saved model
if model_load_path is not None:
self.saver.restore(self.sess, model_load_path)
print 'Model restored from ' + model_load_path
if test:
self.test(prime_text)
else:
self.train()
def train(self):
"""
Runs a training loop on the model.
"""
while True:
inputs, targets = self.data_reader.get_train_batch(c.BATCH_SIZE, c.SEQ_LEN)
print 'Training model...'
feed_dict = {self.model.inputs: inputs, self.model.targets: targets}
global_step, loss, _ = self.sess.run([self.model.global_step,
self.model.loss,
self.model.train_op],
feed_dict=feed_dict)
print 'Step: %d | loss: %f' % (global_step, loss)
if global_step % c.MODEL_SAVE_FREQ == 0:
print 'Saving model...'
self.saver.save(self.sess, join(c.MODEL_SAVE_DIR, self.artist_name + '.ckpt'),
global_step=global_step)
def test(self, prime_text):
"""
Generates a text sequence.
"""
# generate and save sample sequence
sample = self.model.generate(prime=prime_text)
print sample
def main():
load_path = None
artist_name = 'kanye_west'
test = False
prime_text = None
try:
opts, _ = getopt.getopt(sys.argv[1:], 'l:m:a:p:s:t', ['load_path=', 'model_name=',
'artist_name=', 'prime=', 'seq_len',
'test', 'save_freq='])
except getopt.GetoptError:
sys.exit(2)
for opt, arg in opts:
if opt in ('-l', '--load_path'):
load_path = arg
if opt in ('-m', '--model_name'):
c.set_save_name(arg)
if opt in ('-a', '--artist_name'):
artist_name = arg
if opt in ('-p', '--prime'):
prime_text = arg
if opt in ('-s', '--seq_len'):
c.SEQ_LEN = arg
if opt in ('-t', '--test'):
test = True
if opt == '--save_freq':
c.MODEL_SAVE_FREQ = int(arg)
LyricGenRunner(load_path, artist_name, test, prime_text)
if __name__ == '__main__':
main()