Skip to content

Commit

Permalink
Converted the models repo to TF 1.0 using the upgrade script
Browse files Browse the repository at this point in the history
  • Loading branch information
nealwu committed Mar 14, 2017
1 parent f21c427 commit 052e5e8
Show file tree
Hide file tree
Showing 71 changed files with 314 additions and 314 deletions.
2 changes: 1 addition & 1 deletion autoencoder/autoencoder_models/Autoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def __init__(self, n_input, n_hidden, transfer_function=tf.nn.softplus, optimize
self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])

# cost
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0))
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
self.optimizer = optimizer.minimize(self.cost)

init = tf.global_variables_initializer()
Expand Down
4 changes: 2 additions & 2 deletions autoencoder/autoencoder_models/DenoisingAutoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus, optimi
self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])

# cost
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0))
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
self.optimizer = optimizer.minimize(self.cost)

init = tf.global_variables_initializer()
Expand Down Expand Up @@ -89,7 +89,7 @@ def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus, optimi
self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])

# cost
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0))
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
self.optimizer = optimizer.minimize(self.cost)

init = tf.global_variables_initializer()
Expand Down
6 changes: 3 additions & 3 deletions autoencoder/autoencoder_models/VariationalAutoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,13 @@ def __init__(self, n_input, n_hidden, optimizer = tf.train.AdamOptimizer()):
self.z_log_sigma_sq = tf.add(tf.matmul(self.x, self.weights['log_sigma_w1']), self.weights['log_sigma_b1'])

# sample from gaussian distribution
eps = tf.random_normal(tf.pack([tf.shape(self.x)[0], self.n_hidden]), 0, 1, dtype = tf.float32)
self.z = tf.add(self.z_mean, tf.mul(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))
eps = tf.random_normal(tf.stack([tf.shape(self.x)[0], self.n_hidden]), 0, 1, dtype = tf.float32)
self.z = tf.add(self.z_mean, tf.multiply(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))

self.reconstruction = tf.add(tf.matmul(self.z, self.weights['w2']), self.weights['b2'])

# cost
reconstr_loss = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0))
reconstr_loss = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
latent_loss = -0.5 * tf.reduce_sum(1 + self.z_log_sigma_sq
- tf.square(self.z_mean)
- tf.exp(self.z_log_sigma_sq), 1)
Expand Down
Empty file modified compression/decoder.py
100755 → 100644
Empty file.
Empty file modified compression/encoder.py
100755 → 100644
Empty file.
Empty file modified compression/msssim.py
100755 → 100644
Empty file.
4 changes: 2 additions & 2 deletions differential_privacy/dp_sgd/dp_mnist/dp_mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,7 @@ def Train(mnist_train_file, mnist_test_file, network_parameters, num_steps,
images, network_parameters)

cost = tf.nn.softmax_cross_entropy_with_logits(
logits, tf.one_hot(labels, 10))
logits=logits, labels=tf.one_hot(labels, 10))

# The actual cost is the average across the examples.
cost = tf.reduce_sum(cost, [0]) / batch_size
Expand Down Expand Up @@ -343,7 +343,7 @@ def Train(mnist_train_file, mnist_test_file, network_parameters, num_steps,

# We need to maintain the intialization sequence.
for v in tf.trainable_variables():
sess.run(tf.initialize_variables([v]))
sess.run(tf.variables_initializer([v]))
sess.run(tf.global_variables_initializer())
sess.run(init_ops)

Expand Down
4 changes: 2 additions & 2 deletions differential_privacy/dp_sgd/dp_optimizer/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,7 @@ def BatchClipByL2norm(t, upper_bound, name=None):
with tf.op_scope([t, upper_bound], name, "batch_clip_by_l2norm") as name:
saved_shape = tf.shape(t)
batch_size = tf.slice(saved_shape, [0], [1])
t2 = tf.reshape(t, tf.concat(0, [batch_size, [-1]]))
t2 = tf.reshape(t, tf.concat(axis=0, values=[batch_size, [-1]]))
upper_bound_inv = tf.fill(tf.slice(saved_shape, [0], [1]),
tf.constant(1.0/upper_bound))
# Add a small number to avoid divide by 0
Expand Down Expand Up @@ -266,7 +266,7 @@ def SoftThreshold(t, threshold_ratio, name=None):
assert threshold_ratio >= 0
with tf.op_scope([t, threshold_ratio], name, "soft_thresholding") as name:
saved_shape = tf.shape(t)
t2 = tf.reshape(t, tf.concat(0, [tf.slice(saved_shape, [0], [1]), -1]))
t2 = tf.reshape(t, tf.concat(axis=0, values=[tf.slice(saved_shape, [0], [1]), -1]))
t_abs = tf.abs(t2)
t_x = tf.sign(t2) * tf.nn.relu(t_abs -
(tf.reduce_mean(t_abs, [0],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ def __call__(self, x, z_grads):
z_grads, = z_grads
x_expanded = tf.expand_dims(x, 2)
z_grads_expanded = tf.expand_dims(z_grads, 1)
return tf.mul(x_expanded, z_grads_expanded)
return tf.multiply(x_expanded, z_grads_expanded)


pxg_registry.Register("MatMul", MatMulPXG)
Expand Down Expand Up @@ -245,7 +245,7 @@ def _PxConv2DBuilder(self, input_, w, strides, padding):
num_x = int(conv_x.get_shape()[0])
assert num_x == 1, num_x
assert len(conv_px) == batch_size
conv = tf.concat(0, conv_px)
conv = tf.concat(axis=0, values=conv_px)
assert int(conv.get_shape()[0]) == batch_size
return conv, w_px

Expand Down Expand Up @@ -274,7 +274,7 @@ def __call__(self, w, z_grads):
self.colocate_gradients_with_ops,
gate_gradients=self.gate_gradients)

return tf.pack(gradients_list)
return tf.stack(gradients_list)

pxg_registry.Register("Conv2D", Conv2DPXG)

Expand Down
8 changes: 4 additions & 4 deletions differential_privacy/multiple_teachers/deep_cnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def _variable_with_weight_decay(name, shape, stddev, wd):
var = _variable_on_cpu(name, shape,
tf.truncated_normal_initializer(stddev=stddev))
if wd is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var

Expand Down Expand Up @@ -398,7 +398,7 @@ def train_op_fun(total_loss, global_step):
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.scalar_summary('learning_rate', lr)
tf.summary.scalar('learning_rate', lr)

# Generate moving averages of all losses and associated summaries.
loss_averages_op = moving_av(total_loss)
Expand All @@ -413,7 +413,7 @@ def train_op_fun(total_loss, global_step):

# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.histogram_summary(var.op.name, var)
tf.summary.histogram(var.op.name, var)

# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
Expand Down Expand Up @@ -485,7 +485,7 @@ def train(images, labels, ckpt_path, dropout=False):
train_op = train_op_fun(loss, global_step)

# Create a saver.
saver = tf.train.Saver(tf.all_variables())
saver = tf.train.Saver(tf.global_variables())

print("Graph constructed and saver created")

Expand Down
4 changes: 2 additions & 2 deletions differential_privacy/privacy_accountant/tf/accountant.py
Original file line number Diff line number Diff line change
Expand Up @@ -361,12 +361,12 @@ def _differential_moments(self, sigma, s, t):
exponents = tf.constant([j * (j + 1.0 - 2.0 * s) / (2.0 * sigma * sigma)
for j in range(t + 1)], dtype=tf.float64)
# x[i, j] = binomial[i, j] * signs[i, j] = (i choose j) * (-1)^{i-j}
x = tf.mul(binomial, signs)
x = tf.multiply(binomial, signs)
# y[i, j] = x[i, j] * exp(exponents[j])
# = (i choose j) * (-1)^{i-j} * exp(j(j-1)/(2 sigma^2))
# Note: this computation is done by broadcasting pointwise multiplication
# between [t+1, t+1] tensor and [t+1] tensor.
y = tf.mul(x, tf.exp(exponents))
y = tf.multiply(x, tf.exp(exponents))
# z[i] = sum_j y[i, j]
# = sum_j (i choose j) * (-1)^{i-j} * exp(j(j-1)/(2 sigma^2))
z = tf.reduce_sum(y, 1)
Expand Down
6 changes: 3 additions & 3 deletions im2txt/im2txt/show_and_tell_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ def build_model(self):
if self.mode == "inference":
# In inference mode, use concatenated states for convenient feeding and
# fetching.
tf.concat(initial_state, 1, name="initial_state")
tf.concat(axis=initial_state, values=1, name="initial_state")

# Placeholder for feeding a batch of concatenated states.
state_feed = tf.placeholder(dtype=tf.float32,
Expand All @@ -274,11 +274,11 @@ def build_model(self):

# Run a single LSTM step.
lstm_outputs, state_tuple = lstm_cell(
inputs=tf.squeeze(self.seq_embeddings, squeeze_dims=[1]),
inputs=tf.squeeze(self.seq_embeddings, axis=[1]),
state=state_tuple)

# Concatentate the resulting state.
tf.concat(state_tuple, 1, name="state")
tf.concat(axis=state_tuple, values=1, name="state")
else:
# Run the batch of sequence embeddings through the LSTM.
sequence_length = tf.reduce_sum(self.input_mask, 1)
Expand Down
Empty file modified inception/inception/data/preprocess_imagenet_validation_data.py
100755 → 100644
Empty file.
Empty file modified inception/inception/data/process_bounding_boxes.py
100755 → 100644
Empty file.
16 changes: 8 additions & 8 deletions inception/inception/image_processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ def distort_image(image, height, width, bbox, thread_id=0, scope=None):
if not thread_id:
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox)
tf.image_summary('image_with_bounding_boxes', image_with_box)
tf.summary.image('image_with_bounding_boxes', image_with_box)

# A large fraction of image datasets contain a human-annotated bounding
# box delineating the region of the image containing the object of interest.
Expand All @@ -242,7 +242,7 @@ def distort_image(image, height, width, bbox, thread_id=0, scope=None):
if not thread_id:
image_with_distorted_box = tf.image.draw_bounding_boxes(
tf.expand_dims(image, 0), distort_bbox)
tf.image_summary('images_with_distorted_bounding_box',
tf.summary.image('images_with_distorted_bounding_box',
image_with_distorted_box)

# Crop the image to the specified bounding box.
Expand All @@ -259,7 +259,7 @@ def distort_image(image, height, width, bbox, thread_id=0, scope=None):
# the third dimension.
distorted_image.set_shape([height, width, 3])
if not thread_id:
tf.image_summary('cropped_resized_image',
tf.summary.image('cropped_resized_image',
tf.expand_dims(distorted_image, 0))

# Randomly flip the image horizontally.
Expand All @@ -269,7 +269,7 @@ def distort_image(image, height, width, bbox, thread_id=0, scope=None):
distorted_image = distort_color(distorted_image, thread_id)

if not thread_id:
tf.image_summary('final_distorted_image',
tf.summary.image('final_distorted_image',
tf.expand_dims(distorted_image, 0))
return distorted_image

Expand Down Expand Up @@ -328,8 +328,8 @@ def image_preprocessing(image_buffer, bbox, train, thread_id=0):
image = eval_image(image, height, width)

# Finally, rescale to [-1,1] instead of [0, 1)
image = tf.sub(image, 0.5)
image = tf.mul(image, 2.0)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image


Expand Down Expand Up @@ -394,7 +394,7 @@ def parse_example_proto(example_serialized):
ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)

# Note that we impose an ordering of (y, x) just to make life difficult.
bbox = tf.concat(0, [ymin, xmin, ymax, xmax])
bbox = tf.concat(axis=0, values=[ymin, xmin, ymax, xmax])

# Force the variable number of bounding boxes into the shape
# [1, num_boxes, coords].
Expand Down Expand Up @@ -505,6 +505,6 @@ def batch_inputs(dataset, batch_size, train, num_preprocess_threads=None,
images = tf.reshape(images, shape=[batch_size, height, width, depth])

# Display the training images in the visualizer.
tf.image_summary('images', images)
tf.summary.image('images', images)

return images, tf.reshape(label_index_batch, [batch_size])
12 changes: 6 additions & 6 deletions inception/inception/inception_distributed_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ def train(target, dataset, cluster_spec):
FLAGS.learning_rate_decay_factor,
staircase=True)
# Add a summary to track the learning rate.
tf.scalar_summary('learning_rate', lr)
tf.summary.scalar('learning_rate', lr)

# Create an optimizer that performs gradient descent.
opt = tf.train.RMSPropOptimizer(lr,
Expand Down Expand Up @@ -171,8 +171,8 @@ def train(target, dataset, cluster_spec):
loss_name = l.op.name
# Name each loss as '(raw)' and name the moving average version of the
# loss as the original loss name.
tf.scalar_summary(loss_name + ' (raw)', l)
tf.scalar_summary(loss_name, loss_averages.average(l))
tf.summary.scalar(loss_name + ' (raw)', l)
tf.summary.scalar(loss_name, loss_averages.average(l))

# Add dependency to compute loss_averages.
with tf.control_dependencies([loss_averages_op]):
Expand All @@ -191,7 +191,7 @@ def train(target, dataset, cluster_spec):

# Add histograms for model variables.
for var in variables_to_average:
tf.histogram_summary(var.op.name, var)
tf.summary.histogram(var.op.name, var)

# Create synchronous replica optimizer.
opt = tf.train.SyncReplicasOptimizer(
Expand All @@ -215,7 +215,7 @@ def train(target, dataset, cluster_spec):
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.histogram_summary(var.op.name + '/gradients', grad)
tf.summary.histogram(var.op.name + '/gradients', grad)

apply_gradients_op = opt.apply_gradients(grads, global_step=global_step)

Expand All @@ -233,7 +233,7 @@ def train(target, dataset, cluster_spec):
saver = tf.train.Saver()

# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
summary_op = tf.summary.merge_all()

# Build an initialization operation to run below.
init_op = tf.global_variables_initializer()
Expand Down
4 changes: 2 additions & 2 deletions inception/inception/inception_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,10 +158,10 @@ def evaluate(dataset):
saver = tf.train.Saver(variables_to_restore)

# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
summary_op = tf.summary.merge_all()

graph_def = tf.get_default_graph().as_graph_def()
summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
summary_writer = tf.summary.FileWriter(FLAGS.eval_dir,
graph_def=graph_def)

while True:
Expand Down
6 changes: 3 additions & 3 deletions inception/inception/inception_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def loss(logits, labels, batch_size=None):
# shape [FLAGS.batch_size, num_classes].
sparse_labels = tf.reshape(labels, [batch_size, 1])
indices = tf.reshape(tf.range(batch_size), [batch_size, 1])
concated = tf.concat(1, [indices, sparse_labels])
concated = tf.concat(axis=1, values=[indices, sparse_labels])
num_classes = logits[0].get_shape()[-1].value
dense_labels = tf.sparse_to_dense(concated,
[batch_size, num_classes],
Expand Down Expand Up @@ -147,8 +147,8 @@ def _activation_summary(x):
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.contrib.deprecated.histogram_summary(tensor_name + '/activations', x)
tf.contrib.deprecated.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))


def _activation_summaries(endpoints):
Expand Down
Loading

0 comments on commit 052e5e8

Please sign in to comment.