diff --git a/docs/misc/changelog.rst b/docs/misc/changelog.rst index 897726a42b..26fa36f504 100644 --- a/docs/misc/changelog.rst +++ b/docs/misc/changelog.rst @@ -65,7 +65,7 @@ Bug Fixes: - Fixed a bug in CloudPickleWrapper's (used by VecEnvs) ``__setstate___`` where loading was incorrectly using ``pickle.loads`` (@shwang). - Fixed a bug in ``SAC`` and ``TD3`` where the log timesteps was not correct(@YangRui2015) - Fixed a bug where the environment was reset twice when using ``evaluate_policy`` - +- Fixed a bug where ``SAC`` uses wrong step to log to tensorboard after multiple calls to ``SAC.learn(..., reset_num_timesteps=True)`` Deprecations: ^^^^^^^^^^^^^ diff --git a/stable_baselines/sac/sac.py b/stable_baselines/sac/sac.py index f466af76d3..e303dc7150 100644 --- a/stable_baselines/sac/sac.py +++ b/stable_baselines/sac/sac.py @@ -314,6 +314,7 @@ def setup_model(self): self.summary = tf.summary.merge_all() def _train_step(self, step, writer, learning_rate): + del step # Sample a batch from the replay buffer batch = self.replay_buffer.sample(self.batch_size, env=self._vec_normalize_env) batch_obs, batch_actions, batch_rewards, batch_next_obs, batch_dones = batch @@ -336,7 +337,7 @@ def _train_step(self, step, writer, learning_rate): if writer is not None: out = self.sess.run([self.summary] + self.step_ops, feed_dict) summary = out.pop(0) - writer.add_summary(summary, step) + writer.add_summary(summary, self.num_timesteps) else: out = self.sess.run(self.step_ops, feed_dict)