Skip to content

Commit

Permalink
Merge pull request tensorflow#1461 from EvanKepner/master
Browse files Browse the repository at this point in the history
update sequence loss to contrib from legacy
  • Loading branch information
lukaszkaiser authored Jun 16, 2017
2 parents c9f2ae1 + 983b7d0 commit 1514b37
Showing 1 changed file with 15 additions and 5 deletions.
20 changes: 15 additions & 5 deletions tutorials/rnn/ptb/ptb_word_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,11 +162,21 @@ def attn_cell():
"softmax_w", [size, vocab_size], dtype=data_type())
softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=data_type())
logits = tf.matmul(output, softmax_w) + softmax_b
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[logits],
[tf.reshape(input_.targets, [-1])],
[tf.ones([batch_size * num_steps], dtype=data_type())])
self._cost = cost = tf.reduce_sum(loss) / batch_size

# Reshape logits to be 3-D tensor for sequence loss
logits = tf.reshape(logits, [batch_size, num_steps, vocab_size])

# use the contrib sequence loss and average over the batches
loss = tf.contrib.seq2seq.sequence_loss(
logits,
input_.targets,
tf.ones([batch_size, num_steps], dtype=data_type()),
average_across_timesteps=False,
average_across_batch=True
)

# update the cost variables
self._cost = cost = tf.reduce_sum(loss)
self._final_state = state

if not is_training:
Expand Down

0 comments on commit 1514b37

Please sign in to comment.