Skip to content

Commit

Permalink
Remove step in SAC._train_step arglist, fixes issue of redundant wrap…
Browse files Browse the repository at this point in the history
…ping of HER replay_buffer
  • Loading branch information
krishpop committed Sep 21, 2020
1 parent b7e0f40 commit aad197b
Show file tree
Hide file tree
Showing 3 changed files with 8 additions and 5 deletions.
3 changes: 2 additions & 1 deletion docs/misc/changelog.rst
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,8 @@ Bug Fixes:
- Fixed a bug in CloudPickleWrapper's (used by VecEnvs) ``__setstate___`` where loading was incorrectly using ``pickle.loads`` (@shwang).
- Fixed a bug in ``SAC`` and ``TD3`` where the log timesteps was not correct(@YangRui2015)
- Fixed a bug where the environment was reset twice when using ``evaluate_policy``
- Fixed a bug where ``SAC`` uses wrong step to log to tensorboard after multiple calls to ``SAC.learn(..., reset_num_timesteps=True)``
- Fixed a bug where ``SAC`` uses wrong step to log to tensorboard after multiple calls to ``SAC.learn(..., reset_num_timesteps=True)`` (@krishpop)
- Fixed issue where HER replay buffer wrapper is used multiple times after multiple calls to ``HER.learn`` (@krishpop)

Deprecations:
^^^^^^^^^^^^^
Expand Down
5 changes: 4 additions & 1 deletion stable_baselines/her/her.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ def _create_replay_wrapper(self, env):
n_sampled_goal=self.n_sampled_goal,
goal_selection_strategy=self.goal_selection_strategy,
wrapped_env=self.env)
self.wrapped_buffer = False

def set_env(self, env):
assert not isinstance(env, VecEnvWrapper), "HER does not support VecEnvWrapper"
Expand Down Expand Up @@ -108,9 +109,11 @@ def setup_model(self):

def learn(self, total_timesteps, callback=None, log_interval=100, tb_log_name="HER",
reset_num_timesteps=True):
replay_wrapper = self.replay_wrapper if self.wrapped_buffer else None
self.wrapped_buffer = True
return self.model.learn(total_timesteps, callback=callback, log_interval=log_interval,
tb_log_name=tb_log_name, reset_num_timesteps=reset_num_timesteps,
replay_wrapper=self.replay_wrapper)
replay_wrapper=replay_wrapper)

def _check_obs(self, observation):
if isinstance(observation, dict):
Expand Down
5 changes: 2 additions & 3 deletions stable_baselines/sac/sac.py
Original file line number Diff line number Diff line change
Expand Up @@ -313,8 +313,7 @@ def setup_model(self):

self.summary = tf.summary.merge_all()

def _train_step(self, step, writer, learning_rate):
del step
def _train_step(self, writer, learning_rate):
# Sample a batch from the replay buffer
batch = self.replay_buffer.sample(self.batch_size, env=self._vec_normalize_env)
batch_obs, batch_actions, batch_rewards, batch_next_obs, batch_dones = batch
Expand Down Expand Up @@ -461,7 +460,7 @@ def learn(self, total_timesteps, callback=None,
frac = 1.0 - step / total_timesteps
current_lr = self.learning_rate(frac)
# Update policy and critics (q functions)
mb_infos_vals.append(self._train_step(step, writer, current_lr))
mb_infos_vals.append(self._train_step(writer, current_lr))
# Update target network
if (step + grad_step) % self.target_update_interval == 0:
# Update target network
Expand Down

0 comments on commit aad197b

Please sign in to comment.