From 9d837ff5afdc12f4f9ee2ebc524a45a8951c447b Mon Sep 17 00:00:00 2001 From: Dustin Tran Date: Thu, 27 Apr 2017 06:55:01 +0200 Subject: [PATCH] Update Edward to version 1.3.0 (#623) * version 1.3.0 * remove call to _ref now that tf.gradients uses as_ref=True in implementation * make miscellaneous revisions --- docs/tex/getting-started.tex | 5 +++++ docs/tex/iclr2017.tex | 6 +++--- edward/inferences/conjugacy/conjugate_log_probs.py | 2 +- edward/inferences/klpq.py | 2 +- edward/inferences/klqp.py | 12 ++++++------ edward/inferences/map.py | 2 +- edward/version.py | 2 +- notebooks/iclr2017.ipynb | 6 +++--- 8 files changed, 21 insertions(+), 16 deletions(-) diff --git a/docs/tex/getting-started.tex b/docs/tex/getting-started.tex index 83fc196b0..5bd2a0e7b 100644 --- a/docs/tex/getting-started.tex +++ b/docs/tex/getting-started.tex @@ -5,6 +5,11 @@ \subsection{Getting Started} \subsubsection{Installation} +\textbf{Note: +TensorFlow 1.1.0 made breaking API changes. +Edward's latest stable version requires at least TensorFlow 1.1.0. +} + To install the latest stable version, run \begin{lstlisting}[language=Java] diff --git a/docs/tex/iclr2017.tex b/docs/tex/iclr2017.tex index b35cc57ee..d6d1dcc0c 100644 --- a/docs/tex/iclr2017.tex +++ b/docs/tex/iclr2017.tex @@ -12,9 +12,9 @@ \subsection{Deep Probabilistic Programming} The code snippets assume the following versions. \begin{lstlisting}[language=bash] -pip install -e "git+https://github.com/blei-lab/edward.git#egg=edward" -pip install tensorflow==1.0.0 # alternatively, tensorflow-gpu==1.0.0 -pip install keras==1.0.0 +pip install edward==1.3.0 +pip install tensorflow==1.1.0 # alternatively, tensorflow-gpu==1.1.0 +pip install keras==2.0.0 \end{lstlisting} \subsubsection{Section 3. Compositional Representations for Probabilistic Models} diff --git a/edward/inferences/conjugacy/conjugate_log_probs.py b/edward/inferences/conjugacy/conjugate_log_probs.py index bbe9a3c11..b22f610bf 100644 --- a/edward/inferences/conjugacy/conjugate_log_probs.py +++ b/edward/inferences/conjugacy/conjugate_log_probs.py @@ -29,7 +29,7 @@ def beta_log_prob(self, val): conc0 = self.parameters['concentration0'] conc1 = self.parameters['concentration1'] result = (conc1 - 1.0) * tf.log(val) - result += (conc0 - 1.0) * tf.log(tf.constant(1.0) - val) + result += (conc0 - 1.0) * tf.log(1.0 - val) result += -tf.lgamma(conc1) - tf.lgamma(conc0) + tf.lgamma(conc1 + conc0) return result diff --git a/edward/inferences/klpq.py b/edward/inferences/klpq.py index 373d07c70..216116a27 100644 --- a/edward/inferences/klpq.py +++ b/edward/inferences/klpq.py @@ -136,6 +136,6 @@ def build_loss_and_gradients(self, var_list): loss = tf.reduce_mean(w_norm * log_w) grads = tf.gradients( -tf.reduce_mean(q_log_prob * tf.stop_gradient(w_norm)), - [v._ref() for v in var_list]) + var_list) grads_and_vars = list(zip(grads, var_list)) return loss, grads_and_vars diff --git a/edward/inferences/klqp.py b/edward/inferences/klqp.py index 644318b27..eb150bb5a 100644 --- a/edward/inferences/klqp.py +++ b/edward/inferences/klqp.py @@ -392,7 +392,7 @@ def build_reparam_loss_and_gradients(inference, var_list): q_log_prob = tf.stack(q_log_prob) loss = -tf.reduce_mean(p_log_prob - q_log_prob) - grads = tf.gradients(loss, [v._ref() for v in var_list]) + grads = tf.gradients(loss, var_list) grads_and_vars = list(zip(grads, var_list)) return loss, grads_and_vars @@ -446,7 +446,7 @@ def build_reparam_kl_loss_and_gradients(inference, var_list): loss = -(tf.reduce_mean(p_log_lik) - kl) - grads = tf.gradients(loss, [v._ref() for v in var_list]) + grads = tf.gradients(loss, var_list) grads_and_vars = list(zip(grads, var_list)) return loss, grads_and_vars @@ -504,7 +504,7 @@ def build_reparam_entropy_loss_and_gradients(inference, var_list): loss = -(tf.reduce_mean(p_log_prob) + q_entropy) - grads = tf.gradients(loss, [v._ref() for v in var_list]) + grads = tf.gradients(loss, var_list) grads_and_vars = list(zip(grads, var_list)) return loss, grads_and_vars @@ -558,7 +558,7 @@ def build_score_loss_and_gradients(inference, var_list): grads = tf.gradients( -tf.reduce_mean(q_log_prob * tf.stop_gradient(losses)), - [v._ref() for v in var_list]) + var_list) grads_and_vars = list(zip(grads, var_list)) return loss, grads_and_vars @@ -611,7 +611,7 @@ def build_score_kl_loss_and_gradients(inference, var_list): loss = -(tf.reduce_mean(p_log_lik) - kl) grads = tf.gradients( -(tf.reduce_mean(q_log_prob * tf.stop_gradient(p_log_lik)) - kl), - [v._ref() for v in var_list]) + var_list) grads_and_vars = list(zip(grads, var_list)) return loss, grads_and_vars @@ -669,6 +669,6 @@ def build_score_entropy_loss_and_gradients(inference, var_list): grads = tf.gradients( -(tf.reduce_mean(q_log_prob * tf.stop_gradient(p_log_prob)) + q_entropy), - [v._ref() for v in var_list]) + var_list) grads_and_vars = list(zip(grads, var_list)) return loss, grads_and_vars diff --git a/edward/inferences/map.py b/edward/inferences/map.py index 33f0eab35..9cc33b374 100644 --- a/edward/inferences/map.py +++ b/edward/inferences/map.py @@ -122,6 +122,6 @@ def build_loss_and_gradients(self, var_list): loss = -p_log_prob - grads = tf.gradients(loss, [v._ref() for v in var_list]) + grads = tf.gradients(loss, var_list) grads_and_vars = list(zip(grads, var_list)) return loss, grads_and_vars diff --git a/edward/version.py b/edward/version.py index daab838ba..19b4f1d60 100644 --- a/edward/version.py +++ b/edward/version.py @@ -1 +1 @@ -__version__ = '1.2.4' +__version__ = '1.3.0' diff --git a/notebooks/iclr2017.ipynb b/notebooks/iclr2017.ipynb index ba8b0c0a9..21bee742f 100644 --- a/notebooks/iclr2017.ipynb +++ b/notebooks/iclr2017.ipynb @@ -8,9 +8,9 @@ "\n", "The code snippets assume the following versions.\n", "```bash\n", - "pip install -e \"git+https://github.com/blei-lab/edward.git#egg=edward\"\n", - "pip install tensorflow==1.0.0 # alternatively, tensorflow-gpu==1.0.0\n", - "pip install keras==1.0.0\n", + "pip install edward==1.3.0\n", + "pip install tensorflow==1.1.0 # alternatively, tensorflow-gpu==1.1.0\n", + "pip install keras==2.0.0\n", "```" ] },