diff --git a/lm_1b/lm_1b_eval.py b/lm_1b/lm_1b_eval.py index 65c48aa4a5..150ab6ca4a 100644 --- a/lm_1b/lm_1b_eval.py +++ b/lm_1b/lm_1b_eval.py @@ -17,6 +17,7 @@ """ import os import sys +import six import numpy as np import tensorflow as tf @@ -83,7 +84,7 @@ def _LoadModel(gd_file, ckpt_file): with tf.Graph().as_default(): sys.stderr.write('Recovering graph.\n') with tf.gfile.FastGFile(gd_file, 'r') as f: - s = f.read() + s = f.read().decode() gd = tf.GraphDef() text_format.Merge(s, gd) @@ -177,7 +178,7 @@ def _SampleModel(prefix_words, vocab): prefix = [vocab.word_to_id(w) for w in prefix_words.split()] prefix_char_ids = [vocab.word_to_char_ids(w) for w in prefix_words.split()] - for _ in xrange(FLAGS.num_samples): + for _ in six.moves.range(FLAGS.num_samples): inputs = np.zeros([BATCH_SIZE, NUM_TIMESTEPS], np.int32) char_ids_inputs = np.zeros( [BATCH_SIZE, NUM_TIMESTEPS, vocab.max_word_length], np.int32) @@ -230,7 +231,7 @@ def _DumpEmb(vocab): sys.stderr.write('Finished softmax weights\n') all_embs = np.zeros([vocab.size, 1024]) - for i in range(vocab.size): + for i in six.moves.range(vocab.size): input_dict = {t['inputs_in']: inputs, t['targets_in']: targets, t['target_weights_in']: weights} @@ -269,7 +270,7 @@ def _DumpSentenceEmbedding(sentence, vocab): inputs = np.zeros([BATCH_SIZE, NUM_TIMESTEPS], np.int32) char_ids_inputs = np.zeros( [BATCH_SIZE, NUM_TIMESTEPS, vocab.max_word_length], np.int32) - for i in xrange(len(word_ids)): + for i in six.moves.range(len(word_ids)): inputs[0, 0] = word_ids[i] char_ids_inputs[0, 0, :] = char_ids[i]