forked from llSourcell/AI_Composer
-
Notifications
You must be signed in to change notification settings - Fork 1
/
rnn_sample.py
118 lines (97 loc) · 4.39 KB
/
rnn_sample.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import os, sys
import argparse
import time
import itertools
import cPickle
import numpy as np
import tensorflow as tf
import util
import nottingham_util
from model import Model, NottinghamModel
from rnn import DefaultConfig
if __name__ == '__main__':
np.random.seed()
parser = argparse.ArgumentParser(description='Script to generated a MIDI file sample from a trained model.')
parser.add_argument('--config_file', type=str, required=True)
parser.add_argument('--sample_melody', action='store_true', default=False)
parser.add_argument('--sample_harmony', action='store_true', default=False)
parser.add_argument('--sample_seq', type=str, default='random',
choices = ['random', 'chords'])
parser.add_argument('--conditioning', type=int, default=-1)
parser.add_argument('--sample_length', type=int, default=512)
args = parser.parse_args()
with open(args.config_file, 'r') as f:
config = cPickle.load(f)
if config.dataset == 'softmax':
config.time_batch_len = 1
config.max_time_batches = -1
model_class = NottinghamModel
with open(nottingham_util.PICKLE_LOC, 'r') as f:
pickle = cPickle.load(f)
chord_to_idx = pickle['chord_to_idx']
time_step = 120
resolution = 480
# use time batch len of 1 so that every target is covered
test_data = util.batch_data(pickle['test'], time_batch_len = 1,
max_time_batches = -1, softmax = True)
else:
raise Exception("Other datasets not yet implemented")
print config
with tf.Graph().as_default(), tf.Session() as session:
with tf.variable_scope("model", reuse=None):
sampling_model = model_class(config)
saver = tf.train.Saver(tf.all_variables())
model_path = os.path.join(os.path.dirname(args.config_file),
config.model_name)
saver.restore(session, model_path)
state = sampling_model.get_cell_zero_state(session, 1)
if args.sample_seq == 'chords':
# 16 - one measure, 64 - chord progression
repeats = args.sample_length / 64
sample_seq = nottingham_util.i_vi_iv_v(chord_to_idx, repeats, config.input_dim)
print 'Sampling melody using a I, VI, IV, V progression'
elif args.sample_seq == 'random':
sample_index = np.random.choice(np.arange(len(pickle['test'])))
sample_seq = [ pickle['test'][sample_index][i, :]
for i in range(pickle['test'][sample_index].shape[0]) ]
chord = sample_seq[0]
seq = [chord]
if args.conditioning > 0:
for i in range(1, args.conditioning):
seq_input = np.reshape(chord, [1, 1, config.input_dim])
feed = {
sampling_model.seq_input: seq_input,
sampling_model.initial_state: state,
}
state = session.run(sampling_model.final_state, feed_dict=feed)
chord = sample_seq[i]
seq.append(chord)
if config.dataset == 'softmax':
writer = nottingham_util.NottinghamMidiWriter(chord_to_idx, verbose=False)
sampler = nottingham_util.NottinghamSampler(chord_to_idx, verbose=False)
else:
# writer = midi_util.MidiWriter()
# sampler = sampling.Sampler(verbose=False)
raise Exception("Other datasets not yet implemented")
for i in range(max(args.sample_length - len(seq), 0)):
seq_input = np.reshape(chord, [1, 1, config.input_dim])
feed = {
sampling_model.seq_input: seq_input,
sampling_model.initial_state: state,
}
[probs, state] = session.run(
[sampling_model.probs, sampling_model.final_state],
feed_dict=feed)
probs = np.reshape(probs, [config.input_dim])
chord = sampler.sample_notes(probs)
if config.dataset == 'softmax':
r = nottingham_util.NOTTINGHAM_MELODY_RANGE
if args.sample_melody:
chord[r:] = 0
chord[r:] = sample_seq[i][r:]
elif args.sample_harmony:
chord[:r] = 0
chord[:r] = sample_seq[i][:r]
seq.append(chord)
writer.dump_sequence_to_midi(seq, "best.midi",
time_step=time_step, resolution=resolution)