Skip to content

Commit

Permalink
Fixed typo and multi-gpu processing same batch on each gpu
Browse files Browse the repository at this point in the history
  • Loading branch information
tfboyd committed Jun 8, 2017
1 parent c3e2ae5 commit 9e8fd6d
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 2 deletions.
6 changes: 5 additions & 1 deletion tutorials/image/cifar10/cifar10_multi_gpu_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,7 @@ def average_gradients(tower_grads):


def train():
print(FLAGS.batch_size)
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Create a variable to count the number of train() calls. This equals the
Expand All @@ -163,13 +164,16 @@ def train():

# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()

batch_queue = tf.contrib.slim.prefetch_queue.prefetch_queue(
[images, labels], capacity=2 * FLAGS.num_gpus)
# Calculate the gradients for each model tower.
tower_grads = []
with tf.variable_scope(tf.get_variable_scope()):
for i in xrange(FLAGS.num_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % (cifar10.TOWER_NAME, i)) as scope:
# Dequeues one batch for the GPU
images, labels = batch_queue.dequeue()
# Calculate the loss for one tower of the CIFAR model. This function
# constructs the entire CIFAR model but shares the variables across
# all towers.
Expand Down
2 changes: 1 addition & 1 deletion tutorials/image/cifar10/cifar10_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def train():
# Get images and labels for CIFAR-10.
# Force input pipeline to CPU:0 to avoid operations sometimes ending up on
# GPU and resulting in a slow down.
with tf.device('/CPU:0'):
with tf.device('/cpu:0'):
images, labels = cifar10.distorted_inputs()

# Build a Graph that computes the logits predictions from the
Expand Down

0 comments on commit 9e8fd6d

Please sign in to comment.