Skip to content

Commit

Permalink
Update Edward to version 1.3.0 (#623)
Browse files Browse the repository at this point in the history
* version 1.3.0

* remove call to _ref now that tf.gradients uses as_ref=True in implementation

* make miscellaneous revisions
  • Loading branch information
dustinvtran authored Apr 27, 2017
1 parent ec45bad commit 9d837ff
Show file tree
Hide file tree
Showing 8 changed files with 21 additions and 16 deletions.
5 changes: 5 additions & 0 deletions docs/tex/getting-started.tex
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,11 @@ \subsection{Getting Started}

\subsubsection{Installation}

\textbf{Note:
TensorFlow 1.1.0 made breaking API changes.
Edward's latest stable version requires at least TensorFlow 1.1.0.
}

To install the latest stable version, run

\begin{lstlisting}[language=Java]
Expand Down
6 changes: 3 additions & 3 deletions docs/tex/iclr2017.tex
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,9 @@ \subsection{Deep Probabilistic Programming}
The code snippets assume the following versions.

\begin{lstlisting}[language=bash]
pip install -e "git+https://github.com/blei-lab/edward.git#egg=edward"
pip install tensorflow==1.0.0 # alternatively, tensorflow-gpu==1.0.0
pip install keras==1.0.0
pip install edward==1.3.0
pip install tensorflow==1.1.0 # alternatively, tensorflow-gpu==1.1.0
pip install keras==2.0.0
\end{lstlisting}

\subsubsection{Section 3. Compositional Representations for Probabilistic Models}
Expand Down
2 changes: 1 addition & 1 deletion edward/inferences/conjugacy/conjugate_log_probs.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def beta_log_prob(self, val):
conc0 = self.parameters['concentration0']
conc1 = self.parameters['concentration1']
result = (conc1 - 1.0) * tf.log(val)
result += (conc0 - 1.0) * tf.log(tf.constant(1.0) - val)
result += (conc0 - 1.0) * tf.log(1.0 - val)
result += -tf.lgamma(conc1) - tf.lgamma(conc0) + tf.lgamma(conc1 + conc0)
return result

Expand Down
2 changes: 1 addition & 1 deletion edward/inferences/klpq.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,6 @@ def build_loss_and_gradients(self, var_list):
loss = tf.reduce_mean(w_norm * log_w)
grads = tf.gradients(
-tf.reduce_mean(q_log_prob * tf.stop_gradient(w_norm)),
[v._ref() for v in var_list])
var_list)
grads_and_vars = list(zip(grads, var_list))
return loss, grads_and_vars
12 changes: 6 additions & 6 deletions edward/inferences/klqp.py
Original file line number Diff line number Diff line change
Expand Up @@ -392,7 +392,7 @@ def build_reparam_loss_and_gradients(inference, var_list):
q_log_prob = tf.stack(q_log_prob)
loss = -tf.reduce_mean(p_log_prob - q_log_prob)

grads = tf.gradients(loss, [v._ref() for v in var_list])
grads = tf.gradients(loss, var_list)
grads_and_vars = list(zip(grads, var_list))
return loss, grads_and_vars

Expand Down Expand Up @@ -446,7 +446,7 @@ def build_reparam_kl_loss_and_gradients(inference, var_list):

loss = -(tf.reduce_mean(p_log_lik) - kl)

grads = tf.gradients(loss, [v._ref() for v in var_list])
grads = tf.gradients(loss, var_list)
grads_and_vars = list(zip(grads, var_list))
return loss, grads_and_vars

Expand Down Expand Up @@ -504,7 +504,7 @@ def build_reparam_entropy_loss_and_gradients(inference, var_list):

loss = -(tf.reduce_mean(p_log_prob) + q_entropy)

grads = tf.gradients(loss, [v._ref() for v in var_list])
grads = tf.gradients(loss, var_list)
grads_and_vars = list(zip(grads, var_list))
return loss, grads_and_vars

Expand Down Expand Up @@ -558,7 +558,7 @@ def build_score_loss_and_gradients(inference, var_list):

grads = tf.gradients(
-tf.reduce_mean(q_log_prob * tf.stop_gradient(losses)),
[v._ref() for v in var_list])
var_list)
grads_and_vars = list(zip(grads, var_list))
return loss, grads_and_vars

Expand Down Expand Up @@ -611,7 +611,7 @@ def build_score_kl_loss_and_gradients(inference, var_list):
loss = -(tf.reduce_mean(p_log_lik) - kl)
grads = tf.gradients(
-(tf.reduce_mean(q_log_prob * tf.stop_gradient(p_log_lik)) - kl),
[v._ref() for v in var_list])
var_list)
grads_and_vars = list(zip(grads, var_list))
return loss, grads_and_vars

Expand Down Expand Up @@ -669,6 +669,6 @@ def build_score_entropy_loss_and_gradients(inference, var_list):
grads = tf.gradients(
-(tf.reduce_mean(q_log_prob * tf.stop_gradient(p_log_prob)) +
q_entropy),
[v._ref() for v in var_list])
var_list)
grads_and_vars = list(zip(grads, var_list))
return loss, grads_and_vars
2 changes: 1 addition & 1 deletion edward/inferences/map.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,6 @@ def build_loss_and_gradients(self, var_list):

loss = -p_log_prob

grads = tf.gradients(loss, [v._ref() for v in var_list])
grads = tf.gradients(loss, var_list)
grads_and_vars = list(zip(grads, var_list))
return loss, grads_and_vars
2 changes: 1 addition & 1 deletion edward/version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = '1.2.4'
__version__ = '1.3.0'
6 changes: 3 additions & 3 deletions notebooks/iclr2017.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@
"\n",
"The code snippets assume the following versions.\n",
"```bash\n",
"pip install -e \"git+https://github.com/blei-lab/edward.git#egg=edward\"\n",
"pip install tensorflow==1.0.0 # alternatively, tensorflow-gpu==1.0.0\n",
"pip install keras==1.0.0\n",
"pip install edward==1.3.0\n",
"pip install tensorflow==1.1.0 # alternatively, tensorflow-gpu==1.1.0\n",
"pip install keras==2.0.0\n",
"```"
]
},
Expand Down

0 comments on commit 9d837ff

Please sign in to comment.