-
Notifications
You must be signed in to change notification settings - Fork 0
/
text_generation_project.py
111 lines (91 loc) · 3.07 KB
/
text_generation_project.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
# -*- coding: utf-8 -*-
"""text_generation_project.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1nqWkIxgrY_wMfWAkMZox6i84d9VSOWOK
"""
import numpy
import sys
import nltk
nltk.download('stopwords')
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM
from keras.utils import np_utils
from keras.callbacks import ModelCheckpoint
#load file
file=open("frankenstein.txt").read()
#tokenization
#standardization
def tokenize_words(input):
input=input.lower()
tokenizer=RegexpTokenizer(r'\w+')
tokens=tokenizer.tokenize(input)
filtered=filter(lambda token: token not in stopwords.words('english'), tokens)
return "".join(filtered)
processed_inputs=tokenize_words(file)
#chars to numbers
chars=sorted(list(set(processed_inputs)))
char_to_num=dict((c,i) for i, c in enumerate(chars))
#check if words to chars or chars to num has worked
input_len=len(processed_inputs)
vocab_len=len(chars)
print('Total number of characters:', input_len)
print('Total vocab:', vocab_len)
#seq length
seq_length=100
x_data=[]
y_data=[]
#loop through the sequence
for i in range(0,input_len - seq_length,1):
in_seq=processed_inputs[i:i + seq_length]
out_seq=processed_inputs[i+seq_length]
x_data.append([char_to_num[char] for char in in_seq])
y_data.append(char_to_num[out_seq])
n_patterns=len(x_data)
print('Total Patterns:',n_patterns)
#convert input sequence to np array and so on
x=numpy.reshape(x_data,(n_patterns,seq_length,1))
x=x/float(vocab_len)
#one-hot encoding
y=np_utils.to_categorical(y_data)
#creating the model
model=Sequential()
model.add(LSTM(256,input_shape=(x.shape[1],x.shape[2]),return_sequences=True))
model.add(Dropout(0,2))
model.add(LSTM(256,return_sequences=True))
model.add(Dropout(0,2))
model.add(LSTM(128))
model.add(Dropout(0,2))
model.add(Dense(y.shape[1],activation='softmax'))
#compile the model
model.compile(loss='categorical_crossentropy',optimizer='adam')
#saving weigths
filepath='model_weights_saved.hdf5'
checkpoint=ModelCheckpoint(filepath,monitor='loss',verbose=1,save_best_only=True,mode='min')
desired_callbacks=[checkpoint]
#fit model and let it train
model.fit(x,y,epochs=4,batch_size=256,callbacks=desired_callbacks)
#recompile model with the saved weights
filename="model weights saved.hfd5"
model.load_weights(filename)
model.compile(loss="categorical_crossentropy",optimizer='data')
#output of the model back into characters
num_to_char=dict((i,c) for i,c in enumerate(chars))
#random seed to help generate
start=numpy.random.randint(0,len(x_data)-1)
pattern=x_data[start]
print('Random Seed: ')
print("\"",''.join([num_to_char[value] for value in pattern]),"\"")
#generate the text
for i in range(100):
x=numpy.reshape(pattern,(1,len(pattern),1))
x=x/float(vocab_len)
prediction=model.predict(x,verbose=0)
index=numpy.argmax[prediction]
result=num_to_char[index]
seq_in=[num_to_char[value] for value in pattern]
sys.stdout.write(result)
pattern.append(index)
pattern=pattern[1:len(pattern)]