From 0d5f3137200b382a5e1fcba338f9482858524b4b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edouard=20Fouch=C3=A9?= Date: Fri, 28 Apr 2017 19:36:40 +0200 Subject: [PATCH 1/2] Update lm_1b_eval.py Solve Python 3 compatibility: - replacing xrange to range - add .decode() after f.read() to get strings instead of bytes when reading text file --- lm_1b/lm_1b_eval.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lm_1b/lm_1b_eval.py b/lm_1b/lm_1b_eval.py index 65c48aa4a54..4d1a7c20bd1 100644 --- a/lm_1b/lm_1b_eval.py +++ b/lm_1b/lm_1b_eval.py @@ -83,7 +83,7 @@ def _LoadModel(gd_file, ckpt_file): with tf.Graph().as_default(): sys.stderr.write('Recovering graph.\n') with tf.gfile.FastGFile(gd_file, 'r') as f: - s = f.read() + s = f.read().decode() gd = tf.GraphDef() text_format.Merge(s, gd) @@ -177,7 +177,7 @@ def _SampleModel(prefix_words, vocab): prefix = [vocab.word_to_id(w) for w in prefix_words.split()] prefix_char_ids = [vocab.word_to_char_ids(w) for w in prefix_words.split()] - for _ in xrange(FLAGS.num_samples): + for _ in range(FLAGS.num_samples): inputs = np.zeros([BATCH_SIZE, NUM_TIMESTEPS], np.int32) char_ids_inputs = np.zeros( [BATCH_SIZE, NUM_TIMESTEPS, vocab.max_word_length], np.int32) @@ -269,7 +269,7 @@ def _DumpSentenceEmbedding(sentence, vocab): inputs = np.zeros([BATCH_SIZE, NUM_TIMESTEPS], np.int32) char_ids_inputs = np.zeros( [BATCH_SIZE, NUM_TIMESTEPS, vocab.max_word_length], np.int32) - for i in xrange(len(word_ids)): + for i in range(len(word_ids)): inputs[0, 0] = word_ids[i] char_ids_inputs[0, 0, :] = char_ids[i] From b2fc63b3fcd1cd7732b732d07b108cec64bf75de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Edouard=20Fouch=C3=A9?= Date: Sat, 6 May 2017 17:26:55 +0200 Subject: [PATCH 2/2] use six.moves.range instead of range --- lm_1b/lm_1b_eval.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lm_1b/lm_1b_eval.py b/lm_1b/lm_1b_eval.py index 4d1a7c20bd1..150ab6ca4af 100644 --- a/lm_1b/lm_1b_eval.py +++ b/lm_1b/lm_1b_eval.py @@ -17,6 +17,7 @@ """ import os import sys +import six import numpy as np import tensorflow as tf @@ -177,7 +178,7 @@ def _SampleModel(prefix_words, vocab): prefix = [vocab.word_to_id(w) for w in prefix_words.split()] prefix_char_ids = [vocab.word_to_char_ids(w) for w in prefix_words.split()] - for _ in range(FLAGS.num_samples): + for _ in six.moves.range(FLAGS.num_samples): inputs = np.zeros([BATCH_SIZE, NUM_TIMESTEPS], np.int32) char_ids_inputs = np.zeros( [BATCH_SIZE, NUM_TIMESTEPS, vocab.max_word_length], np.int32) @@ -230,7 +231,7 @@ def _DumpEmb(vocab): sys.stderr.write('Finished softmax weights\n') all_embs = np.zeros([vocab.size, 1024]) - for i in range(vocab.size): + for i in six.moves.range(vocab.size): input_dict = {t['inputs_in']: inputs, t['targets_in']: targets, t['target_weights_in']: weights} @@ -269,7 +270,7 @@ def _DumpSentenceEmbedding(sentence, vocab): inputs = np.zeros([BATCH_SIZE, NUM_TIMESTEPS], np.int32) char_ids_inputs = np.zeros( [BATCH_SIZE, NUM_TIMESTEPS, vocab.max_word_length], np.int32) - for i in range(len(word_ids)): + for i in six.moves.range(len(word_ids)): inputs[0, 0] = word_ids[i] char_ids_inputs[0, 0, :] = char_ids[i]