python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
|---|---|---|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Analysis for cartpole."""
from typing import Optional, Sequence
from bsuite.experiments.cartpole import sweep
from bsuite.utils import plotting
import numpy as np
import pandas as pd
import plotnine as gg
NUM_EPISODES = sweep.NUM_EPISODES
BASE_REGRET = 1000
GOOD_EPISODE = 500
TAGS = sweep.TAGS
def score(df: pd.DataFrame) -> float:
"""Output a single score for cartpole = 50% regret, 50% has a good run."""
cp_df = cartpole_preprocess(df_in=df)
regret_score = plotting.ave_regret_score(
cp_df, baseline_regret=BASE_REGRET, episode=NUM_EPISODES)
# Give 50% of score if your "best" episode > GOOD_EPISODE threshold.
solve_score = np.mean(
cp_df.groupby('bsuite_id')['best_episode'].max() > GOOD_EPISODE)
return 0.5 * (regret_score + solve_score)
def cartpole_preprocess(df_in: pd.DataFrame) -> pd.DataFrame:
"""Preprocess cartpole data for use with regret metrics."""
df = df_in.copy()
df = df[df.episode <= NUM_EPISODES]
df['total_regret'] = (BASE_REGRET * df.episode) - df.raw_return
return df
def plot_learning(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Simple learning curves for cartpole."""
df = cartpole_preprocess(df)
p = plotting.plot_regret_learning(
df, sweep_vars=sweep_vars, max_episode=NUM_EPISODES)
p += gg.geom_hline(gg.aes(yintercept=BASE_REGRET),
linetype='dashed', alpha=0.4, size=1.75)
return p
def plot_seeds(df_in: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
colour_var: Optional[str] = None) -> gg.ggplot:
"""Plot the returns through time individually by run."""
df = df_in.copy()
df['average_return'] = df.raw_return.diff() / df.episode.diff()
p = plotting.plot_individual_returns(
df_in=df,
max_episode=NUM_EPISODES,
return_column='average_return',
colour_var=colour_var,
yintercept=BASE_REGRET,
sweep_vars=sweep_vars,
)
return p + gg.ylab('average episodic return')
|
bsuite-master
|
bsuite/experiments/cartpole/analysis.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/experiments/cartpole/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The Cartpole reinforcement learning environment."""
from bsuite.environments import cartpole
load = cartpole.Cartpole
|
bsuite-master
|
bsuite/experiments/cartpole/cartpole.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweep definition for a balancing experiment in Cartpole."""
NUM_EPISODES = 1000
SETTINGS = tuple({'seed': None} for _ in range(20))
TAGS = ('basic', 'credit_assignment', 'generalization')
|
bsuite-master
|
bsuite/experiments/cartpole/sweep.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Analysis for discounting_chain."""
from typing import Optional, Sequence
from bsuite.experiments.discounting_chain import sweep
from bsuite.utils import plotting
import numpy as np
import pandas as pd
import plotnine as gg
NUM_EPISODES = sweep.NUM_EPISODES
BASE_REGRET = 0.08
TAGS = sweep.TAGS
_HORIZONS = np.array([1, 3, 10, 30, 100])
def score(df: pd.DataFrame) -> float:
"""Output a single score for discounting_chain."""
n_eps = np.minimum(df.episode.max(), sweep.NUM_EPISODES)
ave_return = df.loc[df.episode == n_eps, 'total_return'].mean() / n_eps
raw_score = 1. - 10. * (1.1 - ave_return)
return np.clip(raw_score, 0, 1)
def _mapping_seed_compatibility(df: pd.DataFrame) -> pd.DataFrame:
"""Utility function to maintain compatibility with old bsuite runs."""
# Discounting chain kwarg "seed" was renamed to "mapping_seed"
if 'mapping_seed' in df.columns:
nan_seeds = df.mapping_seed.isna()
if np.any(nan_seeds):
df.loc[nan_seeds, 'mapping_seed'] = df.loc[nan_seeds, 'seed']
print('WARNING: seed renamed to "mapping_seed" for compatibility.')
else:
if 'seed' in df.columns:
print('WARNING: seed renamed to "mapping_seed" for compatibility.')
df['mapping_seed'] = df.seed
else:
print('ERROR: outdated bsuite run, please relaunch.')
return df
def dc_preprocess(df_in: pd.DataFrame) -> pd.DataFrame:
"""Preprocess discounting chain data for use with regret metrics."""
df = df_in.copy()
df = _mapping_seed_compatibility(df)
df['optimal_horizon'] = _HORIZONS[
(df.mapping_seed % len(_HORIZONS)).astype(int)]
df['total_regret'] = 1.1 * df.episode - df.total_return
df['optimal_horizon'] = df.optimal_horizon.astype('category')
return df
def plot_learning(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Plots the average regret through time by optimal_horizon."""
df = dc_preprocess(df_in=df)
p = plotting.plot_regret_learning(
df_in=df,
group_col='optimal_horizon',
sweep_vars=sweep_vars,
max_episode=sweep.NUM_EPISODES
)
p += gg.geom_hline(gg.aes(yintercept=BASE_REGRET),
linetype='dashed', alpha=0.4, size=1.75)
p += gg.coord_cartesian(ylim=(0, 0.1))
return p
def plot_average(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Plots the average regret at 1k episodes by optimal_horizon."""
df = dc_preprocess(df_in=df)
p = plotting.plot_regret_average(
df_in=df,
group_col='optimal_horizon',
episode=sweep.NUM_EPISODES,
sweep_vars=sweep_vars
)
p += gg.geom_hline(gg.aes(yintercept=BASE_REGRET),
linetype='dashed', alpha=0.4, size=1.75)
return p
def plot_seeds(df_in: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Plot the returns through time individually by run."""
df = dc_preprocess(df_in)
df['average_return'] = 1.1 - (df.total_regret.diff() / df.episode.diff())
p = plotting.plot_individual_returns(
df_in=df,
max_episode=NUM_EPISODES,
return_column='average_return',
colour_var='optimal_horizon',
yintercept=1.1,
sweep_vars=sweep_vars,
)
return p + gg.ylab('average episodic return')
|
bsuite-master
|
bsuite/experiments/discounting_chain/analysis.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/experiments/discounting_chain/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Simple diagnostic discounting challenge.
Observation is two pixels: (context, time_to_live)
Context will only be -1 in the first step, then equal to the action selected in
the first step. For all future decisions the agent is in a "chain" for that
action. Reward of +1 come at one of: 1, 3, 10, 30, 100
However, depending on the seed, one of these chains has a 10% bonus.
"""
from bsuite.environments import discounting_chain
load = discounting_chain.DiscountingChain
|
bsuite-master
|
bsuite/experiments/discounting_chain/discounting_chain.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweep definition for discounting_chain experiment."""
NUM_EPISODES = 1000
SETTINGS = tuple({'mapping_seed': n} for n in range(20))
TAGS = ('credit_assignment',)
|
bsuite-master
|
bsuite/experiments/discounting_chain/sweep.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Analysis for bandit_scale environments."""
from typing import Optional, Sequence
from bsuite.experiments.bandit import analysis as bandit_analysis
from bsuite.experiments.bandit_noise import analysis as bandit_noise_analysis
from bsuite.experiments.bandit_scale import sweep
import pandas as pd
import plotnine as gg
NUM_EPISODES = sweep.NUM_EPISODES
TAGS = sweep.TAGS
def score(df: pd.DataFrame) -> float:
return bandit_noise_analysis.score(df, scaling_var='reward_scale')
def plot_learning(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
return bandit_noise_analysis.plot_learning(df, sweep_vars, 'reward_scale')
def plot_average(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
return bandit_noise_analysis.plot_average(df, sweep_vars, 'reward_scale')
def plot_seeds(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Plot the performance by individual work unit."""
return bandit_analysis.plot_seeds(
df_in=df,
sweep_vars=sweep_vars,
colour_var='reward_scale'
) + gg.ylab('average episodic return (after rescaling)')
|
bsuite-master
|
bsuite/experiments/bandit_scale/analysis.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/experiments/bandit_scale/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweep definition for bandit_scale experiment."""
from bsuite.experiments.bandit import sweep as bandit_sweep
NUM_EPISODES = bandit_sweep.NUM_EPISODES
_settings = []
for scale in [0.001, 0.03, 1.0, 30., 1000.]:
for n in range(4):
_settings.append({'reward_scale': scale, 'seed': None, 'mapping_seed': n})
SETTINGS = tuple(_settings)
TAGS = ('scale',)
|
bsuite-master
|
bsuite/experiments/bandit_scale/sweep.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Simple diagnostic bandit_scale challenge.
Observation is a single pixel of 0 - this is an indep arm bandit problem!
Rewards are np.linspace(0, 1, 11) with no noise, but rescaled.
"""
from bsuite.environments import bandit
from bsuite.experiments.bandit import sweep
from bsuite.utils import wrappers
def load(reward_scale, seed, mapping_seed):
"""Load a bandit_scale experiment with the prescribed settings."""
env = wrappers.RewardScale(
env=bandit.SimpleBandit(mapping_seed=mapping_seed),
reward_scale=reward_scale,
seed=seed)
env.bsuite_num_episodes = sweep.NUM_EPISODES
return env
|
bsuite-master
|
bsuite/experiments/bandit_scale/bandit_scale.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.experiments.bandit_scale."""
from absl.testing import absltest
from bsuite.experiments.bandit_scale import bandit_scale
from dm_env import test_utils
import numpy as np
class InterfaceTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
return bandit_scale.load(10, 42, 42)
def make_action_sequence(self):
valid_actions = range(11)
rng = np.random.RandomState(42)
for _ in range(100):
yield rng.choice(valid_actions)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/experiments/bandit_scale/bandit_scale_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Analysis for mnist scale environments."""
from typing import Optional, Sequence
from bsuite.experiments.mnist import analysis as mnist_analysis
from bsuite.experiments.mnist_noise import analysis as mnist_noise_analysis
from bsuite.experiments.mnist_scale import sweep
import pandas as pd
import plotnine as gg
NUM_EPISODES = sweep.NUM_EPISODES
TAGS = sweep.TAGS
def score(df: pd.DataFrame) -> float:
return mnist_noise_analysis.score(df, scaling_var='reward_scale')
def plot_learning(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
return mnist_noise_analysis.plot_learning(df, sweep_vars, 'reward_scale')
def plot_average(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
return mnist_noise_analysis.plot_average(df, sweep_vars, 'reward_scale')
def plot_seeds(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Plot the performance by individual work unit."""
return mnist_analysis.plot_seeds(
df_in=df,
sweep_vars=sweep_vars,
colour_var='reward_scale'
)
|
bsuite-master
|
bsuite/experiments/mnist_scale/analysis.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MNIST bandit with reward scaling."""
from bsuite.environments import mnist
from bsuite.experiments.mnist_scale import sweep
from bsuite.utils import wrappers
def load(reward_scale, seed):
"""Load a bandit_scale experiment with the prescribed settings."""
env = wrappers.RewardScale(
env=mnist.MNISTBandit(seed=seed),
reward_scale=reward_scale,
seed=seed)
env.bsuite_num_episodes = sweep.NUM_EPISODES
return env
|
bsuite-master
|
bsuite/experiments/mnist_scale/mnist_scale.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.experiments.mnist."""
from absl.testing import absltest
from bsuite.experiments.mnist_scale import mnist_scale
from dm_env import test_utils
import numpy as np
class InterfaceTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
return mnist_scale.load(reward_scale=2.0, seed=101)
def make_action_sequence(self):
num_actions = self.environment.action_spec().num_values
rng = np.random.RandomState(42)
for _ in range(100):
yield rng.randint(num_actions)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/experiments/mnist_scale/mnist_scale_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/experiments/mnist_scale/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweep definition for mnist_scale experiment."""
from bsuite.experiments.mnist import sweep as mnist_sweep
NUM_EPISODES = mnist_sweep.NUM_EPISODES
_settings = []
for scale in [0.001, 0.03, 1.0, 30., 1000.]:
for seed in range(4):
_settings.append({'reward_scale': scale, 'seed': None})
SETTINGS = tuple(_settings)
TAGS = ('scale', 'generalization')
|
bsuite-master
|
bsuite/experiments/mnist_scale/sweep.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Analysis for catch."""
from typing import Optional, Sequence
from bsuite.experiments.catch import sweep
from bsuite.utils import plotting
import pandas as pd
import plotnine as gg
NUM_EPISODES = sweep.NUM_EPISODES
BASE_REGRET = 1.6
TAGS = sweep.TAGS
def score(df: pd.DataFrame) -> float:
"""Output a single score for catch."""
return plotting.ave_regret_score(
df, baseline_regret=BASE_REGRET, episode=sweep.NUM_EPISODES)
def plot_learning(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Simple learning curves for catch."""
p = plotting.plot_regret_learning(
df, sweep_vars=sweep_vars, max_episode=sweep.NUM_EPISODES)
p += gg.geom_hline(
gg.aes(yintercept=BASE_REGRET), linetype='dashed', alpha=0.4, size=1.75)
return p
def plot_seeds(df_in: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
colour_var: Optional[str] = None) -> gg.ggplot:
"""Plot the returns through time individually by run."""
df = df_in.copy()
df['average_return'] = 1.0 - (df.total_regret.diff() / df.episode.diff())
p = plotting.plot_individual_returns(
df_in=df,
max_episode=NUM_EPISODES,
return_column='average_return',
colour_var=colour_var,
yintercept=1.,
sweep_vars=sweep_vars,
)
return p + gg.ylab('average episodic return')
|
bsuite-master
|
bsuite/experiments/catch/analysis.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/experiments/catch/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweep definition for catch experiment."""
NUM_EPISODES = 10000
SETTINGS = tuple({'seed': None} for _ in range(20))
TAGS = ('basic', 'credit_assignment')
|
bsuite-master
|
bsuite/experiments/catch/sweep.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Catch reinforcement learning environment."""
from bsuite.environments import catch
load = catch.Catch
|
bsuite-master
|
bsuite/experiments/catch/catch.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Analysis for mountain_car_noise."""
from typing import Optional, Sequence
from bsuite.experiments.mountain_car import analysis as mountain_car_analysis
from bsuite.experiments.mountain_car_noise import sweep
from bsuite.utils import plotting
import pandas as pd
import plotnine as gg
NUM_EPISODES = sweep.NUM_EPISODES
TAGS = sweep.TAGS
def score(df: pd.DataFrame, scaling_var='noise_scale') -> float:
"""Output a single score for experiment = mean - std over scaling_var."""
return plotting.score_by_scaling(
df=df,
score_fn=mountain_car_analysis.score,
scaling_var=scaling_var,
)
def plot_learning(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
group_col: str = 'noise_scale') -> gg.ggplot:
"""Plots the average regret through time."""
df = mountain_car_analysis.mountain_car_preprocess(df)
p = plotting.plot_regret_learning(
df_in=df, group_col=group_col, sweep_vars=sweep_vars,
max_episode=sweep.NUM_EPISODES)
p += gg.geom_hline(gg.aes(yintercept=mountain_car_analysis.BASE_REGRET),
linetype='dashed', alpha=0.4, size=1.75)
return p
def plot_average(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
group_col: str = 'noise_scale',) -> gg.ggplot:
"""Plots the average regret through time by noise_scale."""
df = mountain_car_analysis.mountain_car_preprocess(df)
p = plotting.plot_regret_average(
df_in=df,
group_col=group_col,
episode=sweep.NUM_EPISODES,
sweep_vars=sweep_vars
)
p += gg.geom_hline(gg.aes(yintercept=mountain_car_analysis.BASE_REGRET),
linetype='dashed', alpha=0.4, size=1.75)
return p
def plot_seeds(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Plot the performance by individual work unit."""
return mountain_car_analysis.plot_seeds(
df_in=df,
sweep_vars=sweep_vars,
colour_var='noise_scale'
) + gg.ylab('average episodic return (removing noise)')
|
bsuite-master
|
bsuite/experiments/mountain_car_noise/analysis.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.experiments.mountain_car_noise."""
from absl.testing import absltest
from bsuite.experiments.mountain_car_noise import mountain_car_noise
from dm_env import test_utils
import numpy as np
class InterfaceTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
return mountain_car_noise.load(1., 22)
def make_action_sequence(self):
valid_actions = [0, 1, 2]
rng = np.random.RandomState(42)
for _ in range(100):
yield rng.choice(valid_actions)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/experiments/mountain_car_noise/mountain_car_noise_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/experiments/mountain_car_noise/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Mountain car environment with noisy rewards."""
from bsuite.environments import mountain_car
from bsuite.experiments.mountain_car_noise import sweep
from bsuite.utils import wrappers
def load(noise_scale, seed):
"""Load a mountain_car experiment with the prescribed settings."""
env = wrappers.RewardNoise(
env=mountain_car.MountainCar(seed=seed),
noise_scale=noise_scale,
seed=seed)
env.bsuite_num_episodes = sweep.NUM_EPISODES
return env
|
bsuite-master
|
bsuite/experiments/mountain_car_noise/mountain_car_noise.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweep definition for mountain car noise experiment."""
from bsuite.experiments.mountain_car import sweep as mountain_car_sweep
NUM_EPISODES = mountain_car_sweep.NUM_EPISODES
_settings = []
for scale in [0.1, 0.3, 1.0, 3., 10.]:
for seed in range(4):
_settings.append({'noise_scale': scale, 'seed': None})
SETTINGS = tuple(_settings)
TAGS = ('noise', 'generalization')
|
bsuite-master
|
bsuite/experiments/mountain_car_noise/sweep.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Analysis for cartpole_noise."""
from typing import Optional, Sequence
from bsuite.experiments.cartpole import analysis as cartpole_analysis
from bsuite.experiments.cartpole_noise import sweep
from bsuite.utils import plotting
import pandas as pd
import plotnine as gg
NUM_EPISODES = sweep.NUM_EPISODES
TAGS = sweep.TAGS
def score(df: pd.DataFrame, scaling_var='noise_scale') -> float:
"""Output a single score for experiment = mean - std over scaling_var."""
return plotting.score_by_scaling(
df=df,
score_fn=cartpole_analysis.score,
scaling_var=scaling_var,
)
def plot_learning(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
group_col: str = 'noise_scale') -> gg.ggplot:
"""Plots the average regret through time."""
df = cartpole_analysis.cartpole_preprocess(df)
p = plotting.plot_regret_learning(
df_in=df, group_col=group_col, sweep_vars=sweep_vars,
max_episode=sweep.NUM_EPISODES)
p += gg.geom_hline(gg.aes(yintercept=cartpole_analysis.BASE_REGRET),
linetype='dashed', alpha=0.4, size=1.75)
return p
def plot_average(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
group_col: str = 'noise_scale') -> gg.ggplot:
"""Plots the average regret through time by noise_scale."""
df = cartpole_analysis.cartpole_preprocess(df)
p = plotting.plot_regret_average(
df_in=df,
group_col=group_col,
episode=sweep.NUM_EPISODES,
sweep_vars=sweep_vars
)
p += gg.geom_hline(gg.aes(yintercept=cartpole_analysis.BASE_REGRET),
linetype='dashed', alpha=0.4, size=1.75)
return p
def plot_seeds(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Plot the performance by individual work unit."""
return cartpole_analysis.plot_seeds(
df_in=df,
sweep_vars=sweep_vars,
colour_var='noise_scale'
) + gg.ylab('average episodic return (removing noise)')
|
bsuite-master
|
bsuite/experiments/cartpole_noise/analysis.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/experiments/cartpole_noise/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.experiments.cartpole_noise."""
from absl.testing import absltest
from bsuite.experiments.cartpole_noise import cartpole_noise
from dm_env import test_utils
import numpy as np
class InterfaceTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
return cartpole_noise.load(1., 22)
def make_action_sequence(self):
valid_actions = [0, 1, 2]
rng = np.random.RandomState(42)
for _ in range(100):
yield rng.choice(valid_actions)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/experiments/cartpole_noise/cartpole_noise_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweep definition for cartpole_noise experiment."""
from bsuite.experiments.cartpole import sweep as cartpole_sweep
NUM_EPISODES = cartpole_sweep.NUM_EPISODES
_settings = []
for scale in [0.1, 0.3, 1.0, 3., 10.]:
for seed in range(4):
_settings.append({'noise_scale': scale, 'seed': None})
SETTINGS = tuple(_settings)
TAGS = ('noise', 'generalization')
|
bsuite-master
|
bsuite/experiments/cartpole_noise/sweep.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Cartpole environment with noisy rewards."""
from bsuite.environments import cartpole
from bsuite.experiments.cartpole_noise import sweep
from bsuite.utils import wrappers
def load(noise_scale, seed):
"""Load a cartpole experiment with the prescribed settings."""
env = wrappers.RewardNoise(
env=cartpole.Cartpole(seed=seed),
noise_scale=noise_scale,
seed=seed)
env.bsuite_num_episodes = sweep.NUM_EPISODES
return env
|
bsuite-master
|
bsuite/experiments/cartpole_noise/cartpole_noise.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Analysis for cartpole swingup."""
from typing import Optional, Sequence
from bsuite.experiments.cartpole_swingup import sweep
from bsuite.utils import plotting
import numpy as np
import pandas as pd
import plotnine as gg
NUM_EPISODES = sweep.NUM_EPISODES
BASE_REGRET = 700
GOOD_EPISODE = 100
TAGS = sweep.TAGS
def cp_swingup_preprocess(df_in: pd.DataFrame) -> pd.DataFrame:
"""Preprocess data for cartpole swingup."""
df = df_in.copy()
df = df[df.episode <= NUM_EPISODES]
df['perfection_regret'] = df.episode * BASE_REGRET - df.total_return
return df
def score(df: pd.DataFrame) -> float:
"""Output a single score for swingup = 50% regret, 50% does a swingup."""
df = cp_swingup_preprocess(df_in=df)
scores = []
for _, sub_df in df.groupby('height_threshold'):
regret_score = plotting.ave_regret_score(
sub_df,
baseline_regret=BASE_REGRET,
episode=NUM_EPISODES,
regret_column='perfection_regret'
)
swingup_score = np.mean(
sub_df.groupby('bsuite_id')['best_episode'].max() > GOOD_EPISODE)
scores.append(0.5 * (regret_score + swingup_score))
return np.mean(scores)
def plot_learning(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Plots the average return through time by cartpole swingup."""
df = cp_swingup_preprocess(df_in=df)
p = plotting.plot_regret_group_nosmooth(
df_in=df,
group_col='height_threshold',
sweep_vars=sweep_vars,
regret_col='perfection_regret',
max_episode=sweep.NUM_EPISODES,
)
return p
def plot_scale(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Plots the best episode observed by height_threshold."""
df = cp_swingup_preprocess(df_in=df)
group_vars = ['height_threshold']
if sweep_vars:
group_vars += sweep_vars
plt_df = df.groupby(group_vars)['best_episode'].max().reset_index()
p = (gg.ggplot(plt_df)
+ gg.aes(x='factor(height_threshold)', y='best_episode',
colour='best_episode > {}'.format(GOOD_EPISODE))
+ gg.geom_point(size=5, alpha=0.8)
+ gg.scale_colour_manual(values=['#d73027', '#313695'])
+ gg.geom_hline(gg.aes(yintercept=0.0), alpha=0) # axis hack
+ gg.scale_x_discrete(breaks=[0, 0.25, 0.5, 0.75, 1.0])
+ gg.ylab('best return in first {} episodes'.format(NUM_EPISODES))
+ gg.xlab('height threshold')
)
return plotting.facet_sweep_plot(p, sweep_vars)
def plot_seeds(df_in: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Plot the returns through time individually by run."""
df = df_in.copy()
df['average_return'] = df.raw_return.diff() / df.episode.diff()
p = plotting.plot_individual_returns(
df_in=df[df.episode > 1],
max_episode=NUM_EPISODES,
return_column='average_return',
colour_var='height_threshold',
sweep_vars=sweep_vars,
)
return p + gg.ylab('average episodic return')
|
bsuite-master
|
bsuite/experiments/cartpole_swingup/analysis.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.experiments.cartpole_swingup."""
from absl.testing import absltest
from bsuite.experiments.cartpole_swingup import cartpole_swingup
from dm_env import test_utils
import numpy as np
class InterfaceTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
return cartpole_swingup.CartpoleSwingup(seed=42, height_threshold=0.8)
def make_action_sequence(self):
valid_actions = [0, 1, 2]
rng = np.random.RandomState(42)
for _ in range(100):
yield rng.choice(valid_actions)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/experiments/cartpole_swingup/cartpole_swingup_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/experiments/cartpole_swingup/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweep definition for a swing up experiment in Cartpole."""
from bsuite.experiments.cartpole import sweep as cartpole_sweep
NUM_EPISODES = cartpole_sweep.NUM_EPISODES
SETTINGS = tuple({'height_threshold': n / 20, 'x_reward_threshold': 1 - n / 20}
for n in range(20))
TAGS = ('exploration', 'generalization')
|
bsuite-master
|
bsuite/experiments/cartpole_swingup/sweep.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A swing up experiment in Cartpole."""
from typing import Optional
from bsuite.environments import base
from bsuite.environments import cartpole
from bsuite.experiments.cartpole_swingup import sweep
import dm_env
from dm_env import specs
import numpy as np
class CartpoleSwingup(base.Environment):
"""A difficult 'swing up' version of the classic Cart Pole task.
In this version of the problem the pole begins downwards, and the agent must
swing the pole up in order to see reward. Unlike the typical cartpole task
the agent must pay a cost for moving, which aggravates the explore-exploit
tradedoff. Algorithms without 'deep exploration' will simply remain still.
"""
def __init__(self,
height_threshold: float = 0.5,
theta_dot_threshold: float = 1.,
x_reward_threshold: float = 1.,
move_cost: float = 0.1,
x_threshold: float = 3.,
timescale: float = 0.01,
max_time: float = 10.,
init_range: float = 0.05,
seed: Optional[int] = None):
# Setup.
self._state = cartpole.CartpoleState(0, 0, 0, 0, 0)
super().__init__()
self._rng = np.random.RandomState(seed)
self._init_fn = lambda: self._rng.uniform(low=-init_range, high=init_range)
# Logging info
self._raw_return = 0.
self._total_upright = 0.
self._best_episode = 0.
self._episode_return = 0.
# Reward/episode logic
self._height_threshold = height_threshold
self._theta_dot_threshold = theta_dot_threshold
self._x_reward_threshold = x_reward_threshold
self._move_cost = move_cost
self._x_threshold = x_threshold
self._timescale = timescale
self._max_time = max_time
# Problem config
self._cartpole_config = cartpole.CartpoleConfig(
mass_cart=1.,
mass_pole=0.1,
length=0.5,
force_mag=10.,
gravity=9.8,
)
# Public attributes.
self.bsuite_num_episodes = sweep.NUM_EPISODES
def reset(self):
self._reset_next_step = False
self._state = cartpole.CartpoleState(
x=self._init_fn(),
x_dot=self._init_fn(),
theta=np.pi + self._init_fn(),
theta_dot=self._init_fn(),
time_elapsed=0.,
)
self._episode_return = 0.
return dm_env.restart(self.observation)
def step(self, action):
if self._reset_next_step:
return self.reset()
self._state = cartpole.step_cartpole(
action=action,
timescale=self._timescale,
state=self._state,
config=self._cartpole_config,
)
# Rewards only when the pole is central and balanced
is_upright = (np.cos(self._state.theta) > self._height_threshold
and np.abs(self._state.theta_dot) < self._theta_dot_threshold
and np.abs(self._state.x) < self._x_reward_threshold)
reward = -1. * np.abs(action - 1) * self._move_cost
if is_upright:
reward += 1.
self._total_upright += 1
self._raw_return += reward
self._episode_return += reward
is_end_of_episode = (self._state.time_elapsed > self._max_time
or np.abs(self._state.x) > self._x_threshold)
if is_end_of_episode:
self._best_episode = max(self._episode_return, self._best_episode)
self._reset_next_step = True
return dm_env.termination(reward=reward, observation=self.observation)
else: # continuing transition.
return dm_env.transition(reward=reward, observation=self.observation)
def _step(self, action: int) -> dm_env.TimeStep:
raise NotImplementedError('This environment implements its own auto-reset.')
def _reset(self) -> dm_env.TimeStep:
raise NotImplementedError('This environment implements its own auto-reset.')
def action_spec(self):
return specs.DiscreteArray(dtype=int, num_values=3, name='action')
def observation_spec(self):
return specs.Array(shape=(1, 8), dtype=np.float32, name='state')
@property
def observation(self) -> np.ndarray:
"""Approximately normalize output."""
obs = np.zeros((1, 8), dtype=np.float32)
obs[0, 0] = self._state.x / self._x_threshold
obs[0, 1] = self._state.x_dot / self._x_threshold
obs[0, 2] = np.sin(self._state.theta)
obs[0, 3] = np.cos(self._state.theta)
obs[0, 4] = self._state.theta_dot
obs[0, 5] = self._state.time_elapsed / self._max_time
obs[0, 6] = 1. if np.abs(self._state.x) < self._x_reward_threshold else -1.
theta_dot = self._state.theta_dot
obs[0, 7] = 1. if np.abs(theta_dot) < self._theta_dot_threshold else -1.
return obs
def bsuite_info(self):
return dict(raw_return=self._raw_return,
total_upright=self._total_upright,
best_episode=self._best_episode)
|
bsuite-master
|
bsuite/experiments/cartpole_swingup/cartpole_swingup.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Analysis for memory_len."""
from typing import Optional, Sequence
from bsuite.experiments.memory_len import sweep
from bsuite.utils import plotting
import numpy as np
import pandas as pd
import plotnine as gg
NUM_EPISODES = sweep.NUM_EPISODES
TAGS = sweep.TAGS
LEARNING_THRESH = 0.75
def memory_preprocess(df_in: pd.DataFrame) -> pd.DataFrame:
"""Preprocess data for memory environments = regret relative to random."""
df = df_in.copy()
df['perfection_regret'] = df.episode - df.total_perfect
# a random agent always has 50% chance on each episode
# independently from memory length and number of bits.
df['base_rate'] = 0.5
df['regret_ratio'] = df.perfection_regret / df.base_rate
return df
def score(df: pd.DataFrame, group_col: str = 'memory_length') -> float:
"""Output a single score for memory_len."""
df = memory_preprocess(df_in=df)
regret_list = [] # Loop to handle partially-finished runs.
for _, sub_df in df.groupby(group_col):
max_eps = np.minimum(sub_df.episode.max(), sweep.NUM_EPISODES)
ave_perfection = (
sub_df.loc[sub_df.episode == max_eps, 'regret_ratio'].mean() / max_eps)
regret_list.append(ave_perfection)
return np.mean(np.array(regret_list) < LEARNING_THRESH)
def plot_learning(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
group_col: str = 'memory_length') -> gg.ggplot:
"""Plots the average return through time by memory_length."""
df = memory_preprocess(df_in=df)
p = plotting.plot_regret_group_nosmooth(
df_in=df,
group_col=group_col,
sweep_vars=sweep_vars,
regret_col='regret_ratio',
max_episode=sweep.NUM_EPISODES,
)
return p + gg.ylab('average % of correct episodes compared to random.')
def plot_scale(df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
group_col: str = 'memory_length') -> gg.ggplot:
"""Plots the regret_ratio through time by memory_length."""
df = memory_preprocess(df_in=df)
p = plotting.plot_regret_ave_scaling(
df_in=df,
group_col=group_col,
episode=sweep.NUM_EPISODES,
regret_thresh=LEARNING_THRESH,
sweep_vars=sweep_vars,
regret_col='regret_ratio'
)
return p + gg.ylab('% correct episodes after\n{} episodes compared to random'
.format(sweep.NUM_EPISODES))
def plot_seeds(df_in: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None,
colour_var: str = 'memory_length') -> gg.ggplot:
"""Plot the returns through time individually by run."""
df = df_in.copy()
df['average_return'] = df.total_return.diff() / df.episode.diff()
p = plotting.plot_individual_returns(
df_in=df[df.episode > 10],
max_episode=NUM_EPISODES,
return_column='average_return',
colour_var=colour_var,
sweep_vars=sweep_vars,
)
return p + gg.ylab('average episodic return')
|
bsuite-master
|
bsuite/experiments/memory_len/analysis.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/experiments/memory_len/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Simple diagnostic memory challenge.
Observation is given by n+1 pixels: (context, time_to_live).
Context will only be nonzero in the first step, when it will be +1 or -1 iid
by component. All actions take no effect until time_to_live=0, then the agent
must repeat the observations that it saw bit-by-bit.
"""
from typing import Optional
from bsuite.environments import memory_chain
from bsuite.experiments.memory_len import sweep
def load(memory_length: int, seed: Optional[int] = 0):
"""Memory Chain environment, with variable delay between cue and decision."""
env = memory_chain.MemoryChain(
memory_length=memory_length,
num_bits=1,
seed=seed,
)
env.bsuite_num_episodes = sweep.NUM_EPISODES
return env
|
bsuite-master
|
bsuite/experiments/memory_len/memory_len.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sweep definition for memory_len experiment."""
NUM_EPISODES = 10000
_log_spaced = []
_log_spaced.extend(range(1, 11))
_log_spaced.extend([12, 14, 17, 20, 25])
_log_spaced.extend(range(30, 105, 10))
SETTINGS = tuple({'memory_length': n} for n in _log_spaced)
TAGS = ('memory',)
|
bsuite-master
|
bsuite/experiments/memory_len/sweep.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.sweep."""
from absl.testing import absltest
from absl.testing import parameterized
from bsuite import sweep
class SweepTest(parameterized.TestCase):
def test_access_sweep(self):
self.assertNotEmpty(sweep.SETTINGS)
def test_access_experiment_constants(self):
self.assertNotEmpty(sweep.DEEP_SEA)
@parameterized.parameters(*sweep.SETTINGS)
def test_sweep_name_format(self, bsuite_id):
self.assertIn(sweep.SEPARATOR, bsuite_id)
split = bsuite_id.split(sweep.SEPARATOR)
self.assertTrue(len(split), 2)
self.assertNotEmpty(split[0])
self.assertNotEmpty(split[1])
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/tests/sweep_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/tests/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests that we can load all settings in sweep.py with bsuite.load."""
from absl.testing import absltest
from absl.testing import parameterized
from bsuite import bsuite
from bsuite import sweep
def _reduced_names_and_kwargs():
"""Returns a subset of sweep.SETTINGS that covers all environment types."""
result = []
last_name = None
last_keywords = None
for bsuite_id, kwargs in sweep.SETTINGS.items():
name = bsuite_id.split(sweep.SEPARATOR)[0]
keywords = set(kwargs)
if name != last_name or keywords != last_keywords:
if 'mnist' not in name:
result.append((name, kwargs))
last_name = name
last_keywords = keywords
return result
class EnvironmentsTest(parameterized.TestCase):
@parameterized.parameters(*_reduced_names_and_kwargs())
def test_environment(self, name, settings):
env = bsuite.load(name, settings)
self.assertGreater(env.action_spec().num_values, 0)
self.assertGreater(env.bsuite_num_episodes, 0)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/tests/environments_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Common plotting and analysis code.
This code is based around plotnine = python implementation of ggplot.
Typically, these plots will be imported and used within experiment analysis.
"""
from typing import Callable, Optional, Sequence
from bsuite.utils import smoothers
from matplotlib import style
import numpy as np
import pandas as pd
import plotnine as gg
# Updates the theme to preferred default settings
gg.theme_set(gg.theme_bw(base_size=18, base_family='serif'))
gg.theme_update(figure_size=(12, 8), panel_spacing_x=0.01, panel_spacing_y=0.01)
style.use('seaborn-poster')
style.use('ggplot')
FIVE_COLOURS = [
'#313695', # DARK BLUE
'#74add1', # LIGHT BLUE
'#4daf4a', # GREEN
'#f46d43', # ORANGE
'#d73027', # RED
] * 10 # Hack to allow internal code to use functions without error
CATEGORICAL_COLOURS = ([
'#313695', # DARK BLUE
'#74add1', # LIGHT BLUE
'#4daf4a', # GREEN
'#f46d43', # ORANGE
'#d73027', # RED
'#984ea3', # PURPLE
'#f781bf', # PINK
'#ffc832', # YELLOW
'#000000', # BLACK
]) * 100 # For very large sweeps the colours will just have to repeat.
def ave_regret_score(df: pd.DataFrame,
baseline_regret: float,
episode: int,
regret_column: str = 'total_regret') -> float:
"""Score performance by average regret, normalized to [0,1] by baseline."""
n_eps = np.minimum(df.episode.max(), episode)
mean_regret = df.loc[df.episode == n_eps, regret_column].mean() / n_eps
unclipped_score = (baseline_regret - mean_regret) / baseline_regret
return np.clip(unclipped_score, 0, 1)
def score_by_scaling(df: pd.DataFrame,
score_fn: Callable[[pd.DataFrame], float],
scaling_var: str) -> float:
"""Apply scoring function based on mean and std."""
scores = []
for _, sub_df in df.groupby(scaling_var):
scores.append(score_fn(sub_df))
mean_score = np.clip(np.mean(scores), 0, 1)
lcb_score = np.clip(np.mean(scores) - np.std(scores), 0, 1)
return 0.5 * (mean_score + lcb_score)
def facet_sweep_plot(base_plot: gg.ggplot,
sweep_vars: Optional[Sequence[str]] = None,
tall_plot: bool = False) -> gg.ggplot:
"""Add a facet_wrap to the plot based on sweep_vars."""
df = base_plot.data.copy()
if sweep_vars:
# Work out what size the plot should be based on the hypers + add facet.
n_hypers = df[sweep_vars].drop_duplicates().shape[0]
base_plot += gg.facet_wrap(sweep_vars, labeller='label_both')
else:
n_hypers = 1
if n_hypers == 1:
fig_size = (7, 5)
elif n_hypers == 2:
fig_size = (13, 5)
elif n_hypers == 4:
fig_size = (13, 8)
elif n_hypers <= 12:
fig_size = (15, 4 * np.divide(n_hypers, 3) + 1)
else:
print('WARNING - comparing {} agents at once is more than recommended.'
.format(n_hypers))
fig_size = (15, 12)
if tall_plot:
fig_size = (fig_size[0], fig_size[1] * 1.25)
theme_settings = gg.theme_bw(base_size=18, base_family='serif')
theme_settings += gg.theme(
figure_size=fig_size, panel_spacing_x=0.01, panel_spacing_y=0.01,)
return base_plot + theme_settings
def plot_regret_learning(df_in: pd.DataFrame,
group_col: Optional[str] = None,
sweep_vars: Optional[Sequence[str]] = None,
regret_col: str = 'total_regret',
max_episode: Optional[int] = None) -> gg.ggplot:
"""Plots the average regret through time, grouped by group_var."""
df = df_in.copy()
df['average_regret'] = df[regret_col] / df.episode
df = df[df.episode <= (max_episode or np.inf)]
if group_col is None:
p = _plot_regret_single(df)
else:
p = _plot_regret_group(df, group_col)
p += gg.geom_hline(gg.aes(yintercept=0.0), alpha=0) # axis hack
p += gg.ylab('average regret per timestep')
p += gg.coord_cartesian(xlim=(0, max_episode))
return facet_sweep_plot(p, sweep_vars, tall_plot=True)
def _plot_regret_single(df: pd.DataFrame) -> gg.ggplot:
"""Plots the average regret through time for single variable."""
p = (gg.ggplot(df)
+ gg.aes(x='episode', y='average_regret')
+ gg.geom_smooth(method=smoothers.mean, span=0.1, size=1.75, alpha=0.1,
colour='#313695', fill='#313695'))
return p
def _plot_regret_group(df: pd.DataFrame, group_col: str) -> gg.ggplot:
"""Plots the average regret through time when grouped."""
group_name = group_col.replace('_', ' ')
df[group_name] = df[group_col].astype('category')
p = (gg.ggplot(df)
+ gg.aes(x='episode', y='average_regret',
group=group_name, colour=group_name, fill=group_name)
+ gg.geom_smooth(method=smoothers.mean, span=0.1, size=1.75, alpha=0.1)
+ gg.scale_colour_manual(values=FIVE_COLOURS)
+ gg.scale_fill_manual(values=FIVE_COLOURS))
return p
def plot_regret_group_nosmooth(df_in: pd.DataFrame,
group_col: str,
sweep_vars: Optional[Sequence[str]] = None,
regret_col: str = 'total_regret',
max_episode: Optional[int] = None) -> gg.ggplot:
"""Plots the average regret through time without smoothing."""
df = df_in.copy()
df['average_regret'] = df[regret_col] / df.episode
df = df[df.episode <= max_episode]
group_name = group_col.replace('_', ' ')
df[group_name] = df[group_col]
p = (gg.ggplot(df)
+ gg.aes(x='episode', y='average_regret',
group=group_name, colour=group_name)
+ gg.geom_line(size=2, alpha=0.75)
+ gg.geom_hline(gg.aes(yintercept=0.0), alpha=0) # axis hack
)
p += gg.coord_cartesian(xlim=(0, max_episode))
return facet_sweep_plot(p, sweep_vars, tall_plot=True)
def _preprocess_ave_regret(df_in: pd.DataFrame,
group_col: str,
episode: int,
sweep_vars: Optional[Sequence[str]] = None,
regret_col: str = 'total_regret') -> pd.DataFrame:
"""Preprocess the data at episode for average regret calculations."""
df = df_in.copy()
group_vars = (sweep_vars or []) + [group_col]
plt_df = (df[df.episode == episode]
.groupby(group_vars)[regret_col].mean().reset_index())
if len(plt_df) == 0: # pylint:disable=g-explicit-length-test
raise ValueError('Your experiment has not yet run the necessary {} episodes'
.format(episode))
group_name = group_col.replace('_', ' ')
plt_df[group_name] = plt_df[group_col].astype('category')
plt_df['average_regret'] = plt_df[regret_col] / episode
return plt_df
def plot_regret_average(df_in: pd.DataFrame,
group_col: str,
episode: int,
sweep_vars: Optional[Sequence[str]] = None,
regret_col: str = 'total_regret') -> gg.ggplot:
"""Bar plot the average regret at end of learning."""
df = _preprocess_ave_regret(df_in, group_col, episode, sweep_vars, regret_col)
group_name = group_col.replace('_', ' ')
p = (gg.ggplot(df)
+ gg.aes(x=group_name, y='average_regret', fill=group_name)
+ gg.geom_bar(stat='identity')
+ gg.scale_fill_manual(values=FIVE_COLOURS)
+ gg.ylab('average regret after {} episodes'.format(episode))
)
return facet_sweep_plot(p, sweep_vars)
def plot_regret_ave_scaling(df_in: pd.DataFrame,
group_col: str,
episode: int,
regret_thresh: float,
sweep_vars: Optional[Sequence[str]] = None,
regret_col: str = 'total_regret') -> gg.ggplot:
"""Point plot of average regret investigating scaling to threshold."""
df = _preprocess_ave_regret(df_in, group_col, episode, sweep_vars, regret_col)
group_name = group_col.replace('_', ' ')
p = (gg.ggplot(df)
+ gg.aes(x=group_name, y='average_regret',
colour='average_regret < {}'.format(regret_thresh))
+ gg.geom_point(size=5, alpha=0.8)
+ gg.scale_x_log10(breaks=[1, 3, 10, 30, 100])
+ gg.scale_colour_manual(values=['#d73027', '#313695'])
+ gg.ylab('average regret at {} episodes'.format(episode))
+ gg.geom_hline(gg.aes(yintercept=0.0), alpha=0) # axis hack
)
return facet_sweep_plot(p, sweep_vars)
def _make_unique_group_col(
df: pd.DataFrame,
sweep_vars: Optional[Sequence[str]] = None) -> pd.DataFrame:
"""Adds a unique_group column based on sweep_vars + bsuite_id."""
unique_vars = ['bsuite_id']
if sweep_vars:
unique_vars += sweep_vars
unique_group = (df[unique_vars].astype(str)
.apply(lambda x: x.name + '=' + x, axis=0)
.apply(lambda x: '\n'.join(x), axis=1) # pylint:disable=unnecessary-lambda
)
return unique_group
def plot_individual_returns(
df_in: pd.DataFrame,
max_episode: int,
return_column: str = 'episode_return',
colour_var: Optional[str] = None,
yintercept: Optional[float] = None,
sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot:
"""Plot individual learning curves: one curve per sweep setting."""
df = df_in.copy()
df['unique_group'] = _make_unique_group_col(df, sweep_vars)
p = (gg.ggplot(df)
+ gg.aes(x='episode', y=return_column, group='unique_group')
+ gg.coord_cartesian(xlim=(0, max_episode))
)
if colour_var:
p += gg.geom_line(gg.aes(colour=colour_var), size=1.1, alpha=0.75)
if len(df[colour_var].unique()) <= 5:
df[colour_var] = df[colour_var].astype('category')
p += gg.scale_colour_manual(values=FIVE_COLOURS)
else:
p += gg.geom_line(size=1.1, alpha=0.75, colour='#313695')
if yintercept:
p += gg.geom_hline(
yintercept=yintercept, alpha=0.5, size=2, linetype='dashed')
return facet_sweep_plot(p, sweep_vars, tall_plot=True)
|
bsuite-master
|
bsuite/utils/plotting.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.utils.wrapper."""
from absl.testing import absltest
from absl.testing import parameterized
from bsuite import environments
from bsuite.environments import catch
from bsuite.utils import wrappers
import dm_env
from dm_env import specs
from dm_env import test_utils
import mock
import numpy as np
class FakeEnvironment(environments.Environment):
"""An environment that returns pre-determined rewards and observations."""
def __init__(self, time_steps):
"""Initializes a new FakeEnvironment.
Args:
time_steps: A sequence of time step namedtuples. This could represent
one episode, or several. This class just repeatedly plays through the
sequence and doesn't inspect the contents.
"""
super().__init__()
self.bsuite_num_episodes = 1000
self._time_steps = time_steps
obs = np.asarray(self._time_steps[0].observation)
self._observation_spec = specs.Array(shape=obs.shape, dtype=obs.dtype)
self._step_index = 0
self._reset_next_step = True
def reset(self):
self._reset_next_step = False
self._step_index = 0
return self._time_steps[0]
def step(self, action):
del action
if self._reset_next_step:
return self.reset()
self._step_index += 1
self._step_index %= len(self._time_steps)
return self._time_steps[self._step_index]
def _reset(self):
raise NotImplementedError
def _step(self, action: int):
raise NotImplementedError
def observation_spec(self):
return self._observation_spec
def action_spec(self):
return specs.Array(shape=(), dtype=np.int32)
def bsuite_info(self):
return {}
class WrapperTest(absltest.TestCase):
def test_wrapper(self):
"""Tests that the wrapper computes and logs the correct data."""
mock_logger = mock.MagicMock()
mock_logger.write = mock.MagicMock()
# Make a fake environment that cycles through these time steps.
timesteps = [
dm_env.restart([]),
dm_env.transition(1, []),
dm_env.transition(2, []),
dm_env.termination(3, []),
]
expected_episode_return = 6
fake_env = FakeEnvironment(timesteps)
env = wrappers.Logging(env=fake_env, logger=mock_logger, log_every=True) # pytype: disable=wrong-arg-types
num_episodes = 5
for _ in range(num_episodes):
timestep = env.reset()
while not timestep.last():
timestep = env.step(action=0)
# We count the number of transitions, hence the -1.
expected_episode_length = len(timesteps) - 1
expected_calls = []
for i in range(1, num_episodes + 1):
expected_calls.append(
mock.call(dict(
steps=expected_episode_length * i,
episode=i,
total_return=expected_episode_return * i,
episode_len=expected_episode_length,
episode_return=expected_episode_return,
))
)
mock_logger.write.assert_has_calls(expected_calls)
def test_unwrap(self):
raw_env = FakeEnvironment([dm_env.restart([])])
scale_env = wrappers.RewardScale(raw_env, reward_scale=1.)
noise_env = wrappers.RewardNoise(scale_env, noise_scale=1.)
logging_env = wrappers.Logging(noise_env, logger=None) # pytype: disable=wrong-arg-types
unwrapped = logging_env.raw_env
self.assertEqual(id(raw_env), id(unwrapped))
class ImageObservationTest(parameterized.TestCase):
@parameterized.parameters(
((84, 84, 4), np.array([1, 2])),
((70, 90), np.array([[1, 0, 2, 3]])),
)
def test_to_image(self, shape, observation):
image = wrappers.to_image(shape, observation)
self.assertEqual(image.shape, shape)
self.assertCountEqual(np.unique(image), np.unique(observation))
class ImageWrapperCatchTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
env = catch.Catch()
return wrappers.ImageObservation(env, (84, 84, 4))
def make_action_sequence(self):
actions = [0, 1, 2]
rng = np.random.RandomState(42)
for _ in range(100):
yield rng.choice(actions)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/utils/wrappers_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Downloads and loads the MNIST dataset.
Adapted from https://github.com/google/jax/blob/master/examples/datasets.py
"""
import array
import gzip
import os
from os import path
import struct
from absl import logging
import numpy as np
from six.moves.urllib.request import urlretrieve
def _download(url, filename, directory="/tmp/mnist"):
"""Download a url to a file in the given directory."""
if not path.exists(directory):
os.makedirs(directory)
out_file = path.join(directory, filename)
if not path.isfile(out_file):
urlretrieve(url, out_file)
logging.info("Downloaded %s to %s", url, directory)
def load_mnist(directory="/tmp/mnist"):
"""Download and parse the raw MNIST dataset."""
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
base_url = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def parse_labels(filename):
with gzip.open(filename, "rb") as fh:
_ = struct.unpack(">II", fh.read(8))
return np.array(array.array("B", fh.read()), dtype=np.uint8)
def parse_images(filename):
with gzip.open(filename, "rb") as fh:
_, num_data, rows, cols = struct.unpack(">IIII", fh.read(16))
return np.array(array.array("B", fh.read()),
dtype=np.int8).reshape((num_data, rows, cols))
for filename in ["train-images-idx3-ubyte.gz", "train-labels-idx1-ubyte.gz",
"t10k-images-idx3-ubyte.gz", "t10k-labels-idx1-ubyte.gz"]:
_download(base_url + filename, filename, directory)
train_images = parse_images(
path.join(directory, "train-images-idx3-ubyte.gz"))
train_labels = parse_labels(
path.join(directory, "train-labels-idx1-ubyte.gz"))
test_images = parse_images(path.join(directory, "t10k-images-idx3-ubyte.gz"))
test_labels = parse_labels(path.join(directory, "t10k-labels-idx1-ubyte.gz"))
return (train_images, train_labels), (test_images, test_labels)
|
bsuite-master
|
bsuite/utils/datasets.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/utils/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.utils.gym_wrapper."""
from absl.testing import absltest
from bsuite.utils import gym_wrapper
from dm_env import specs
import gym
import numpy as np
class DMEnvFromGymTest(absltest.TestCase):
def test_gym_cartpole(self):
env = gym_wrapper.DMEnvFromGym(gym.make('CartPole-v0'))
# Test converted observation spec.
observation_spec = env.observation_spec()
self.assertEqual(type(observation_spec), specs.BoundedArray)
self.assertEqual(observation_spec.shape, (4,))
self.assertEqual(observation_spec.minimum.shape, (4,))
self.assertEqual(observation_spec.maximum.shape, (4,))
self.assertEqual(observation_spec.dtype, np.dtype('float32'))
# Test converted action spec.
action_spec = env.action_spec()
self.assertEqual(type(action_spec), specs.DiscreteArray)
self.assertEqual(action_spec.shape, ())
self.assertEqual(action_spec.minimum, 0)
self.assertEqual(action_spec.maximum, 1)
self.assertEqual(action_spec.num_values, 2)
self.assertEqual(action_spec.dtype, np.dtype('int64'))
# Test step.
timestep = env.reset()
self.assertTrue(timestep.first())
timestep = env.step(1)
self.assertEqual(timestep.reward, 1.0)
self.assertEqual(timestep.observation.shape, (4,))
env.close()
def test_episode_truncation(self):
# Pendulum has no early termination condition.
gym_env = gym.make('Pendulum-v0')
env = gym_wrapper.DMEnvFromGym(gym_env)
ts = env.reset()
while not ts.last():
ts = env.step(env.action_spec().generate_value())
self.assertEqual(ts.discount, 1.0)
env.close()
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/utils/gym_wrapper_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""bsuite logging and image observation wrappers."""
from typing import Any, Dict, Optional, Sequence
from bsuite import environments
from bsuite.logging import base
import dm_env
from dm_env import specs
import numpy as np
from skimage import transform
# Keys that are present for all experiments. These are computed from within
# the `Logging` wrapper.
STANDARD_KEYS = frozenset(
['steps', 'episode', 'total_return', 'episode_len', 'episode_return'])
class Logging(dm_env.Environment):
"""Environment wrapper to track and log bsuite stats."""
def __init__(self,
env: environments.Environment,
logger: base.Logger,
log_by_step: bool = False,
log_every: bool = False):
"""Initializes the logging wrapper.
Args:
env: Environment to wrap.
logger: An object that records a row of data. This must have a `write`
method that accepts a dictionary mapping from column name to value.
log_by_step: Whether to log based on step or episode count (default).
log_every: Forces logging at each step or episode, e.g. for debugging.
"""
self._env = env
self._logger = logger
self._log_by_step = log_by_step
self._log_every = log_every
# Accumulating throughout experiment.
self._steps = 0
self._episode = 0
self._total_return = 0.0
# Most-recent-episode.
self._episode_len = 0
self._episode_return = 0.0
def flush(self):
if hasattr(self._logger, 'flush'):
self._logger.flush()
def reset(self):
timestep = self._env.reset()
self._track(timestep)
return timestep
def step(self, action):
timestep = self._env.step(action)
self._track(timestep)
return timestep
def action_spec(self):
return self._env.action_spec()
def observation_spec(self):
return self._env.observation_spec()
def _track(self, timestep: dm_env.TimeStep):
# Count transitions only.
if not timestep.first():
self._steps += 1
self._episode_len += 1
if timestep.last():
self._episode += 1
self._episode_return += timestep.reward or 0.0
self._total_return += timestep.reward or 0.0
# Log statistics periodically, either by step or by episode.
if self._log_by_step:
if _logarithmic_logging(self._steps) or self._log_every:
self._log_bsuite_data()
elif timestep.last():
if _logarithmic_logging(self._episode) or self._log_every:
self._log_bsuite_data()
# Perform bookkeeping at the end of episodes.
if timestep.last():
self._episode_len = 0
self._episode_return = 0.0
if self._episode == self._env.bsuite_num_episodes:
self.flush()
def _log_bsuite_data(self):
"""Log summary data for bsuite."""
data = dict(
# Accumulated data.
steps=self._steps,
episode=self._episode,
total_return=self._total_return,
# Most-recent-episode data.
episode_len=self._episode_len,
episode_return=self._episode_return,
)
# Environment-specific metadata used for scoring.
data.update(self._env.bsuite_info())
self._logger.write(data)
@property
def raw_env(self):
# Recursively unwrap until we reach the true 'raw' env.
wrapped = self._env
if hasattr(wrapped, 'raw_env'):
return wrapped.raw_env
return wrapped
def __getattr__(self, attr):
"""Delegate attribute access to underlying environment."""
return getattr(self._env, attr)
def _logarithmic_logging(episode: int,
ratios: Optional[Sequence[float]] = None) -> bool:
"""Returns `True` only at specific ratios of 10**exponent."""
if ratios is None:
ratios = [1., 1.2, 1.4, 1.7, 2., 2.5, 3., 4., 5., 6., 7., 8., 9., 10.]
exponent = np.floor(np.log10(np.maximum(1, episode)))
special_vals = [10**exponent * ratio for ratio in ratios]
return any(episode == val for val in special_vals)
class ImageObservation(dm_env.Environment):
"""Environment wrapper to convert observations to an image-like format."""
def __init__(self, env: dm_env.Environment, shape: Sequence[int]):
self._env = env
self._shape = shape
def observation_spec(self):
spec = self._env.observation_spec()
return specs.Array(shape=self._shape, dtype=spec.dtype, name=spec.name)
def action_spec(self):
return self._env.action_spec()
def reset(self):
timestep = self._env.reset()
return timestep._replace(
observation=to_image(self._shape, timestep.observation))
def step(self, action):
timestep = self._env.step(action)
return timestep._replace(
observation=to_image(self._shape, timestep.observation))
def __getattr__(self, attr):
"""Delegate attribute access to underlying environment."""
return getattr(self._env, attr)
def _small_state_to_image(shape: Sequence[int],
observation: np.ndarray) -> np.ndarray:
"""Converts a small state into an image-like format."""
result = np.empty(shape=shape, dtype=observation.dtype)
size = observation.size
flattened = observation.ravel()
# Explicitly handle small observation dimensions separately
if size == 1:
result[:] = flattened[0]
elif size == 2:
result[:, :shape[1] // 2] = flattened[0]
result[:, shape[1] // 2:] = flattened[1]
elif size == 3 or size == 4:
# Top-left.
result[:shape[0] // 2, :shape[1] // 2] = flattened[0]
# Top-right.
result[shape[0] // 2:, :shape[1] // 2] = flattened[1]
# Bottom-left.
result[:shape[0] // 2, shape[1] // 2:] = flattened[2]
# Bottom-right.
result[shape[0] // 2:, shape[1] // 2:] = flattened[-1]
else:
raise ValueError('Hand-crafted rule only for small state observation.')
return result
def _interpolate_to_image(shape: Sequence[int],
observation: np.ndarray) -> np.ndarray:
"""Converts observation to desired shape using an interpolation."""
result = np.empty(shape=shape, dtype=observation.dtype)
if len(observation.shape) == 1:
observation = np.expand_dims(observation, 0)
# Interpolate the image and broadcast over all trailing channels.
plane_image = transform.resize(observation, shape[:2], preserve_range=True)
while plane_image.ndim < len(shape):
plane_image = np.expand_dims(plane_image, -1)
result[:, :] = plane_image
return result
def to_image(shape: Sequence[int], observation: np.ndarray) -> np.ndarray:
"""Converts a bsuite observation into an image-like format.
Example usage, converting a 3-element array into a stacked Atari-like format:
observation = to_image((84, 84, 4), np.array([1, 2, 0]))
Args:
shape: A sequence containing the desired output shape (length >= 2).
observation: A numpy array containing the observation data.
Returns:
A numpy array with shape `shape` and dtype matching the dtype of
`observation`. The entries in this array are tiled from `observation`'s
entries.
"""
assert len(shape) >= 2
if observation.size <= 4:
return _small_state_to_image(shape, observation)
elif len(observation.shape) <= 2:
return _interpolate_to_image(shape, observation)
else:
raise ValueError(
'Cannot convert observation shape {} to desired shape {}'.format(
observation.shape, shape))
class RewardNoise(environments.Environment):
"""Reward Noise environment wrapper."""
def __init__(self,
env: environments.Environment,
noise_scale: float,
seed: Optional[int] = None):
"""Builds the Reward Noise environment wrapper.
Args:
env: An environment whose rewards to perturb.
noise_scale: Standard deviation of gaussian noise on rewards.
seed: Optional seed for numpy's random number generator (RNG).
"""
super(RewardNoise, self).__init__()
self._env = env
self._noise_scale = noise_scale
self._rng = np.random.RandomState(seed)
def reset(self):
return self._env.reset()
def step(self, action):
return self._add_reward_noise(self._env.step(action))
def _add_reward_noise(self, timestep: dm_env.TimeStep):
if timestep.first():
return timestep
reward = timestep.reward + self._noise_scale * self._rng.randn()
return dm_env.TimeStep(
step_type=timestep.step_type,
reward=reward,
discount=timestep.discount,
observation=timestep.observation)
def observation_spec(self):
return self._env.observation_spec()
def action_spec(self):
return self._env.action_spec()
@property
def raw_env(self):
# Recursively unwrap until we reach the true 'raw' env.
wrapped = self._env
if hasattr(wrapped, 'raw_env'):
return wrapped.raw_env
return wrapped
def _step(self, action: int) -> dm_env.TimeStep:
raise NotImplementedError('Please call step() instead of _step().')
def _reset(self) -> dm_env.TimeStep:
raise NotImplementedError('Please call reset() instead of _reset().')
def bsuite_info(self) -> Dict[str, Any]:
return self._env.bsuite_info()
def __getattr__(self, attr):
"""Delegate attribute access to underlying environment."""
return getattr(self._env, attr)
class RewardScale(environments.Environment):
"""Reward Scale environment wrapper."""
def __init__(self,
env: environments.Environment,
reward_scale: float,
seed: Optional[int] = None):
"""Builds the Reward Scale environment wrapper.
Args:
env: Environment whose rewards to rescale.
reward_scale: Rescaling for rewards.
seed: Optional seed for numpy's random number generator (RNG).
"""
super(RewardScale, self).__init__()
self._env = env
self._reward_scale = reward_scale
self._rng = np.random.RandomState(seed)
def reset(self):
return self._env.reset()
def step(self, action):
return self._rescale_rewards(self._env.step(action))
def _rescale_rewards(self, timestep: dm_env.TimeStep):
if timestep.first():
return timestep
reward = timestep.reward * self._reward_scale
return dm_env.TimeStep(
step_type=timestep.step_type,
reward=reward,
discount=timestep.discount,
observation=timestep.observation)
def observation_spec(self):
return self._env.observation_spec()
def action_spec(self):
return self._env.action_spec()
def _step(self, action: int) -> dm_env.TimeStep:
raise NotImplementedError('Please call step() instead of _step().')
def _reset(self) -> dm_env.TimeStep:
raise NotImplementedError('Please call reset() instead of _reset().')
@property
def raw_env(self):
# Recursively unwrap until we reach the true 'raw' env.
wrapped = self._env
if hasattr(wrapped, 'raw_env'):
return wrapped.raw_env
return wrapped
def bsuite_info(self) -> Dict[str, Any]:
return self._env.bsuite_info()
def __getattr__(self, attr):
"""Delegate attribute access to underlying environment."""
return getattr(self._env, attr)
|
bsuite-master
|
bsuite/utils/wrappers.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""bsuite adapter for OpenAI gym run-loops."""
from typing import Any, Dict, Optional, Tuple, Union
import dm_env
from dm_env import specs
import gym
from gym import spaces
import numpy as np
# OpenAI gym step format = obs, reward, is_finished, other_info
_GymTimestep = Tuple[np.ndarray, float, bool, Dict[str, Any]]
class GymFromDMEnv(gym.Env):
"""A wrapper that converts a dm_env.Environment to an OpenAI gym.Env."""
metadata = {'render.modes': ['human', 'rgb_array']}
def __init__(self, env: dm_env.Environment):
self._env = env # type: dm_env.Environment
self._last_observation = None # type: Optional[np.ndarray]
self.viewer = None
self.game_over = False # Needed for Dopamine agents.
def step(self, action: int) -> _GymTimestep:
timestep = self._env.step(action)
self._last_observation = timestep.observation
reward = timestep.reward or 0.
if timestep.last():
self.game_over = True
return timestep.observation, reward, timestep.last(), {}
def reset(self) -> np.ndarray:
self.game_over = False
timestep = self._env.reset()
self._last_observation = timestep.observation
return timestep.observation
def render(self, mode: str = 'rgb_array') -> Union[np.ndarray, bool]:
if self._last_observation is None:
raise ValueError('Environment not ready to render. Call reset() first.')
if mode == 'rgb_array':
return self._last_observation
if mode == 'human':
if self.viewer is None:
# pylint: disable=import-outside-toplevel
# pylint: disable=g-import-not-at-top
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(self._last_observation)
return self.viewer.isopen
@property
def action_space(self) -> spaces.Discrete:
action_spec = self._env.action_spec() # type: specs.DiscreteArray
return spaces.Discrete(action_spec.num_values)
@property
def observation_space(self) -> spaces.Box:
obs_spec = self._env.observation_spec() # type: specs.Array
if isinstance(obs_spec, specs.BoundedArray):
return spaces.Box(
low=float(obs_spec.minimum),
high=float(obs_spec.maximum),
shape=obs_spec.shape,
dtype=obs_spec.dtype)
return spaces.Box(
low=-float('inf'),
high=float('inf'),
shape=obs_spec.shape,
dtype=obs_spec.dtype)
@property
def reward_range(self) -> Tuple[float, float]:
reward_spec = self._env.reward_spec()
if isinstance(reward_spec, specs.BoundedArray):
return reward_spec.minimum, reward_spec.maximum
return -float('inf'), float('inf')
def __getattr__(self, attr):
"""Delegate attribute access to underlying environment."""
return getattr(self._env, attr)
def space2spec(space: gym.Space, name: Optional[str] = None):
"""Converts an OpenAI Gym space to a dm_env spec or nested structure of specs.
Box, MultiBinary and MultiDiscrete Gym spaces are converted to BoundedArray
specs. Discrete OpenAI spaces are converted to DiscreteArray specs. Tuple and
Dict spaces are recursively converted to tuples and dictionaries of specs.
Args:
space: The Gym space to convert.
name: Optional name to apply to all return spec(s).
Returns:
A dm_env spec or nested structure of specs, corresponding to the input
space.
"""
if isinstance(space, spaces.Discrete):
return specs.DiscreteArray(num_values=space.n, dtype=space.dtype, name=name)
elif isinstance(space, spaces.Box):
return specs.BoundedArray(shape=space.shape, dtype=space.dtype,
minimum=space.low, maximum=space.high, name=name)
elif isinstance(space, spaces.MultiBinary):
return specs.BoundedArray(shape=space.shape, dtype=space.dtype, minimum=0.0,
maximum=1.0, name=name)
elif isinstance(space, spaces.MultiDiscrete):
return specs.BoundedArray(shape=space.shape, dtype=space.dtype,
minimum=np.zeros(space.shape),
maximum=space.nvec, name=name)
elif isinstance(space, spaces.Tuple):
return tuple(space2spec(s, name) for s in space.spaces)
elif isinstance(space, spaces.Dict):
return {key: space2spec(value, name) for key, value in space.spaces.items()}
else:
raise ValueError('Unexpected gym space: {}'.format(space))
class DMEnvFromGym(dm_env.Environment):
"""A wrapper to convert an OpenAI Gym environment to a dm_env.Environment."""
def __init__(self, gym_env: gym.Env):
self.gym_env = gym_env
# Convert gym action and observation spaces to dm_env specs.
self._observation_spec = space2spec(self.gym_env.observation_space,
name='observations')
self._action_spec = space2spec(self.gym_env.action_space, name='actions')
self._reset_next_step = True
def reset(self) -> dm_env.TimeStep:
self._reset_next_step = False
observation = self.gym_env.reset()
return dm_env.restart(observation)
def step(self, action: int) -> dm_env.TimeStep:
if self._reset_next_step:
return self.reset()
# Convert the gym step result to a dm_env TimeStep.
observation, reward, done, info = self.gym_env.step(action)
self._reset_next_step = done
if done:
is_truncated = info.get('TimeLimit.truncated', False)
if is_truncated:
return dm_env.truncation(reward, observation)
else:
return dm_env.termination(reward, observation)
else:
return dm_env.transition(reward, observation)
def close(self):
self.gym_env.close()
def observation_spec(self):
return self._observation_spec
def action_spec(self):
return self._action_spec
|
bsuite-master
|
bsuite/utils/gym_wrapper.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Collection of smoothers designed for plotnine ggplot."""
from typing import Any, Dict, Optional
import numpy as np
import pandas as pd
import scipy.stats as stats
def _mean(data: pd.DataFrame, span: float, edge_tolerance: float = 0.):
"""Compute rolling mean of data via histogram, smooth endpoints.
Args:
data: pandas dataframe including columns ['x', 'y'] sorted by 'x'
span: float in (0, 1) proportion of data to include.
edge_tolerance: float of how much forgiveness to give to points that are
close to the histogram boundary (in proportion of bin width).
Returns:
output_data: pandas dataframe with 'x', 'y' and 'stderr'
"""
num_bins = np.ceil(1. / span).astype(np.int32)
count, edges = np.histogram(data.x, bins=num_bins)
# Include points that may be slightly on wrong side of histogram bin.
tol = edge_tolerance * (edges[1] - edges[0])
x_list = []
y_list = []
stderr_list = []
for i, num_obs in enumerate(count):
if num_obs > 0:
sub_df = data.loc[(data.x > edges[i] - tol)
& (data.x < edges[i + 1] + tol)]
x_list.append(sub_df.x.mean())
y_list.append(sub_df.y.mean())
stderr_list.append(sub_df.y.std() / np.sqrt(len(sub_df)))
return pd.DataFrame(dict(x=x_list, y=y_list, stderr=stderr_list))
def mean(data: pd.DataFrame,
xseq,
span: float = 0.1,
se: bool = True,
level: float = 0.95,
method_args: Optional[Dict[str, Any]] = None,
**params) -> pd.DataFrame:
"""Computes the rolling mean over a portion of the data.
Confidence intervals are given by approx Gaussian standard error bars.
Unused/strangely named arguments are kept here for consistency with the rest
of the plotnine package.
Args:
data: pandas dataframe passed to the smoother
xseq: sequence of x at which to output prediction (unused)
span: proportion of the data to use in lowess smoother.
se: boolean for whether to show confidence interval.
level: level in (0,1) for confidence standard errorbars
method_args: other parameters that get passed through plotnine to method
(edge_tolerance=0.05, num_boot=20)
**params: dictionary other parameters passed to smoother (unused)
Returns:
output_data: pd Dataframe with x, y, ymin, ymax for confidence smooth.
"""
del xseq # Unused.
del params # Unused.
if method_args is None:
method_args = {}
edge_tolerance = method_args.get('edge_tolerance', 0.05)
output_data = _mean(data, span, edge_tolerance)
if not se:
return output_data
num_std = stats.norm.interval(level)[1] # Gaussian approx to CIs
if 'group_smooth' in data.columns:
# Perform bootstrapping over whole line/timeseries at once. Each unique
# element of 'group_smooth' is treated as an atomic unit for bootstrap.
data = data.set_index('group_smooth')
num_boot = method_args.get('num_boot', 20)
unique_ids = data.index.unique()
boot_preds = np.ones([len(output_data), num_boot]) * np.nan
for n in range(num_boot):
boot_inds = np.random.choice(unique_ids, len(unique_ids))
boot_data = data.loc[boot_inds].copy()
boot_data = boot_data.sort_values('x')
boot_out = _mean(boot_data, span, edge_tolerance)
boot_preds[:, n] = np.interp(output_data.x, boot_out.x, boot_out.y)
stddev = np.std(boot_preds, axis=1, ddof=2)
output_data['ymin'] = output_data.y - num_std * stddev
output_data['ymax'] = output_data.y + num_std * stddev
else:
# Just use the "estimated stderr" from each bin 1 / sqrt(n)
output_data['ymin'] = output_data.y - num_std * output_data.stderr
output_data['ymax'] = output_data.y + num_std * output_data.stderr
return output_data
|
bsuite-master
|
bsuite/utils/smoothers.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/baselines/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple agent-environment training loop."""
from bsuite.baselines import base
from bsuite.logging import terminal_logging
import dm_env
def run(agent: base.Agent,
environment: dm_env.Environment,
num_episodes: int,
verbose: bool = False) -> None:
"""Runs an agent on an environment.
Note that for bsuite environments, logging is handled internally.
Args:
agent: The agent to train and evaluate.
environment: The environment to train on.
num_episodes: Number of episodes to train for.
verbose: Whether to also log to terminal.
"""
if verbose:
environment = terminal_logging.wrap_environment(
environment, log_every=True) # pytype: disable=wrong-arg-types
for _ in range(num_episodes):
# Run an episode.
timestep = environment.reset()
while not timestep.last():
# Generate an action from the agent's policy.
action = agent.select_action(timestep)
# Step the environment.
new_timestep = environment.step(action)
# Tell the agent about what just happened.
agent.update(timestep, action, new_timestep)
# Book-keeping.
timestep = new_timestep
|
bsuite-master
|
bsuite/baselines/experiment.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple agent interface."""
import abc
import dm_env
Action = int # Only discrete-action agents for now.
class Agent(abc.ABC):
"""An agent consists of an action-selection mechanism and an update rule."""
@abc.abstractmethod
def select_action(self, timestep: dm_env.TimeStep) -> Action:
"""Takes in a timestep, samples from agent's policy, returns an action."""
@abc.abstractmethod
def update(
self,
timestep: dm_env.TimeStep,
action: Action,
new_timestep: dm_env.TimeStep,
) -> None:
"""Updates the agent given a transition."""
|
bsuite-master
|
bsuite/baselines/base.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for sequence buffer."""
from absl.testing import absltest
from bsuite.baselines.utils import sequence
import dm_env
from dm_env import specs
import numpy as np
class BufferTest(absltest.TestCase):
"""Tests for the sequence buffer class."""
def test_buffer(self):
# Given a buffer and some dummy data...
max_sequence_length = 10
obs_shape = (3, 3)
buffer = sequence.Buffer(
obs_spec=specs.Array(obs_shape, dtype=float),
action_spec=specs.Array((), dtype=int),
max_sequence_length=max_sequence_length)
dummy_step = dm_env.transition(observation=np.zeros(obs_shape), reward=0.)
# If we add `max_sequence_length` items to the buffer...
for _ in range(max_sequence_length):
buffer.append(dummy_step, 0, dummy_step)
# Then the buffer should now be full.
self.assertTrue(buffer.full())
# Any further appends should throw an error.
with self.assertRaises(ValueError):
buffer.append(dummy_step, 0, dummy_step)
# If we now drain this trajectory from the buffer...
trajectory = buffer.drain()
# The `observations` sequence should have length `T + 1`.
self.assertLen(trajectory.observations, max_sequence_length + 1)
# All other sequences should have length `T`.
self.assertLen(trajectory.actions, max_sequence_length)
self.assertLen(trajectory.rewards, max_sequence_length)
self.assertLen(trajectory.discounts, max_sequence_length)
# The buffer should now be empty.
self.assertTrue(buffer.empty())
# A second call to drain() should throw an error, since the buffer is empty.
with self.assertRaises(ValueError):
buffer.drain()
# If we now append another transition...
buffer.append(dummy_step, 0, dummy_step)
# And immediately drain the buffer...
trajectory = buffer.drain()
# We should have a valid partial trajectory of length T=1.
self.assertLen(trajectory.observations, 2)
self.assertLen(trajectory.actions, 1)
self.assertLen(trajectory.rewards, 1)
self.assertLen(trajectory.discounts, 1)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/baselines/utils/sequence_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple windowed buffer for accumulating sequences."""
from typing import NamedTuple
from bsuite.baselines import base
import dm_env
from dm_env import specs
import numpy as np
class Trajectory(NamedTuple):
"""A trajectory is a sequence of observations, actions, rewards, discounts.
Note: `observations` should be of length T+1 to make up the final transition.
"""
# TODO(b/152889430): Make this generic once it is supported by Pytype.
observations: np.ndarray # [T + 1, ...]
actions: np.ndarray # [T]
rewards: np.ndarray # [T]
discounts: np.ndarray # [T]
class Buffer:
"""A simple buffer for accumulating trajectories."""
_observations: np.ndarray
_actions: np.ndarray
_rewards: np.ndarray
_discounts: np.ndarray
_max_sequence_length: int
_needs_reset: bool = True
_t: int = 0
def __init__(
self,
obs_spec: specs.Array,
action_spec: specs.Array,
max_sequence_length: int,
):
"""Pre-allocates buffers of numpy arrays to hold the sequences."""
self._observations = np.zeros(
shape=(max_sequence_length + 1, *obs_spec.shape), dtype=obs_spec.dtype)
self._actions = np.zeros(
shape=(max_sequence_length, *action_spec.shape),
dtype=action_spec.dtype)
self._rewards = np.zeros(max_sequence_length, dtype=np.float32)
self._discounts = np.zeros(max_sequence_length, dtype=np.float32)
self._max_sequence_length = max_sequence_length
def append(
self,
timestep: dm_env.TimeStep,
action: base.Action,
new_timestep: dm_env.TimeStep,
):
"""Appends an observation, action, reward, and discount to the buffer."""
if self.full():
raise ValueError('Cannot append; sequence buffer is full.')
# Start a new sequence with an initial observation, if required.
if self._needs_reset:
self._t = 0
self._observations[self._t] = timestep.observation
self._needs_reset = False
# Append (o, a, r, d) to the sequence buffer.
self._observations[self._t + 1] = new_timestep.observation
self._actions[self._t] = action
self._rewards[self._t] = new_timestep.reward
self._discounts[self._t] = new_timestep.discount
self._t += 1
# Don't accumulate sequences that cross episode boundaries.
# It is up to the caller to drain the buffer in this case.
if new_timestep.last():
self._needs_reset = True
def drain(self) -> Trajectory:
"""Empties the buffer and returns the (possibly partial) trajectory."""
if self.empty():
raise ValueError('Cannot drain; sequence buffer is empty.')
trajectory = Trajectory(
self._observations[:self._t + 1],
self._actions[:self._t],
self._rewards[:self._t],
self._discounts[:self._t],
)
self._t = 0 # Mark sequences as consumed.
self._needs_reset = True
return trajectory
def empty(self) -> bool:
"""Returns whether or not the trajectory buffer is empty."""
return self._t == 0
def full(self) -> bool:
"""Returns whether or not the trajectory buffer is full."""
return self._t == self._max_sequence_length
|
bsuite-master
|
bsuite/baselines/utils/sequence.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/baselines/utils/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.baselines.replay."""
from absl.testing import absltest
from bsuite.baselines.utils import replay as replay_lib
import numpy as np
class BasicReplayTest(absltest.TestCase):
def test_end_to_end(self):
shapes = (10, 10, 3), ()
capacity = 5
def generate_sample():
return [np.random.randint(0, 256, size=(10, 10, 3), dtype=np.uint8),
np.random.uniform(size=())]
replay = replay_lib.Replay(capacity=capacity)
# Does it crash if we sample when there's barely any data?
sample = generate_sample()
replay.add(sample)
samples = replay.sample(size=2)
for sample, shape in zip(samples, shapes):
self.assertEqual(sample.shape, (2,) + shape)
# Fill to capacity.
for _ in range(capacity - 1):
replay.add(generate_sample())
samples = replay.sample(size=3)
for sample, shape in zip(samples, shapes):
self.assertEqual(sample.shape, (3,) + shape)
replay.add(generate_sample())
samples = replay.sample(size=capacity)
for sample, shape in zip(samples, shapes):
self.assertEqual(sample.shape, (capacity,) + shape)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/baselines/utils/replay_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple, uniformly sampled replay buffer."""
from typing import Any, Optional, Sequence
import numpy as np
class Replay:
"""Uniform replay buffer. Allocates all required memory at initialization."""
_data: Optional[Sequence[np.ndarray]]
_capacity: int
_num_added: int
def __init__(self, capacity: int):
"""Initializes a new `Replay`.
Args:
capacity: The maximum number of items allowed in the replay. Adding
items to a replay that is at maximum capacity will overwrite the oldest
items.
"""
self._data = None
self._capacity = capacity
self._num_added = 0
def add(self, items: Sequence[Any]):
"""Adds a single sequence of items to the replay.
Args:
items: Sequence of items to add. Does not handle batched or nested items.
"""
if self._data is None:
self._preallocate(items)
for slot, item in zip(self._data, items):
slot[self._num_added % self._capacity] = item
self._num_added += 1
def sample(self, size: int) -> Sequence[np.ndarray]:
"""Returns a transposed/stacked minibatch. Each array has shape [B, ...]."""
indices = np.random.randint(self.size, size=size)
return [slot[indices] for slot in self._data]
def reset(self,):
"""Resets the replay."""
self._data = None
@property
def size(self) -> int:
return min(self._capacity, self._num_added)
@property
def fraction_filled(self) -> float:
return self.size / self._capacity
def _preallocate(self, items: Sequence[Any]):
"""Assume flat structure of items."""
as_array = []
for item in items:
if item is None:
raise ValueError('Cannot store `None` objects in replay.')
as_array.append(np.asarray(item))
self._data = [np.zeros(dtype=x.dtype, shape=(self._capacity,) + x.shape)
for x in as_array]
def __repr__(self):
return 'Replay: size={}, capacity={}, num_added={}'.format(
self.size, self._capacity, self._num_added)
|
bsuite-master
|
bsuite/baselines/utils/replay.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example of generating a full set of bsuite results using multiprocessing."""
from concurrent import futures
import multiprocessing
from typing import Callable, Optional, Sequence
import termcolor
import tqdm
BsuiteId = str
def map_mpi(
run_fn: Callable[[BsuiteId], BsuiteId],
bsuite_ids: Sequence[BsuiteId],
num_processes: Optional[int] = None,
):
"""Maps `run_fn` over `bsuite_ids`, using `num_processes` in parallel."""
num_processes = num_processes or multiprocessing.cpu_count()
num_experiments = len(bsuite_ids)
message = """
Experiment info
---------------
Num experiments: {num_experiments}
Num worker processes: {num_processes}
""".format(
num_processes=num_processes, num_experiments=num_experiments)
termcolor.cprint(message, color='blue', attrs=['bold'])
# Create a pool of processes, dispatch the experiments to them, show progress.
pool = futures.ProcessPoolExecutor(num_processes)
progress_bar = tqdm.tqdm(total=num_experiments)
for bsuite_id in pool.map(run_fn, bsuite_ids):
description = '[Last finished: {}]'.format(bsuite_id)
progress_bar.set_description(termcolor.colored(description, color='green'))
progress_bar.update()
|
bsuite-master
|
bsuite/baselines/utils/pool.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/baselines/third_party/__init__.py
|
# pytype: skip-file
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Trains an OpenAI Baselines DQN agent on bsuite.
Note that OpenAI Gym is not installed with bsuite by default.
See also github.com/openai/baselines for more information.
"""
from absl import app
from absl import flags
from baselines import deepq
import bsuite
from bsuite import sweep
from bsuite.baselines.utils import pool
from bsuite.logging import terminal_logging
from bsuite.utils import gym_wrapper
# Internal imports.
# Experiment flags.
flags.DEFINE_string(
'bsuite_id', 'catch/0', 'BSuite identifier. '
'This global flag can be used to control which environment is loaded.')
flags.DEFINE_string('save_path', '/tmp/bsuite', 'where to save bsuite results')
flags.DEFINE_enum('logging_mode', 'csv', ['csv', 'sqlite', 'terminal'],
'which form of logging to use for bsuite results')
flags.DEFINE_boolean('overwrite', False, 'overwrite csv logging if found')
flags.DEFINE_boolean('verbose', True, 'whether to log to std output')
flags.DEFINE_integer('num_hidden_layers', 2, 'number of hidden layers')
flags.DEFINE_integer('num_units', 50, 'number of units per hidden layer')
flags.DEFINE_integer('batch_size', 32, 'size of batches sampled from replay')
flags.DEFINE_float('agent_discount', .99, 'discounting on the agent side')
flags.DEFINE_integer('replay_capacity', 100000, 'size of the replay buffer')
flags.DEFINE_integer('min_replay_size', 128, 'min replay size before training.')
flags.DEFINE_integer('sgd_period', 1, 'steps between online net updates')
flags.DEFINE_integer('target_update_period', 4,
'steps between target net updates')
flags.DEFINE_float('learning_rate', 1e-3, 'learning rate for optimizer')
flags.DEFINE_float('epsilon', 0.05, 'fraction of exploratory random actions')
flags.DEFINE_integer('seed', 42, 'seed for random number generation')
flags.DEFINE_integer('num_episodes', None, 'Number of episodes to run for.')
flags.DEFINE_integer('total_timesteps', 10_000_000,
'maximum steps if not caught by bsuite_num_episodes')
FLAGS = flags.FLAGS
def run(bsuite_id: str) -> str:
"""Runs a DQN agent on a given bsuite environment, logging to CSV."""
raw_env = bsuite.load_and_record(
bsuite_id=bsuite_id,
save_path=FLAGS.save_path,
logging_mode=FLAGS.logging_mode,
overwrite=FLAGS.overwrite,
)
if FLAGS.verbose:
raw_env = terminal_logging.wrap_environment(raw_env, log_every=True) # pytype: disable=wrong-arg-types
env = gym_wrapper.GymFromDMEnv(raw_env)
num_episodes = FLAGS.num_episodes or getattr(raw_env, 'bsuite_num_episodes')
def callback(lcl, unused_glb):
# Terminate after `num_episodes`.
try:
return lcl['num_episodes'] > num_episodes
except KeyError:
return False
# Note: we should never run for this many steps as we end after `num_episodes`
total_timesteps = FLAGS.total_timesteps
deepq.learn(
env=env,
network='mlp',
hiddens=[FLAGS.num_units] * FLAGS.num_hidden_layers,
batch_size=FLAGS.batch_size,
lr=FLAGS.learning_rate,
total_timesteps=total_timesteps,
buffer_size=FLAGS.replay_capacity,
exploration_fraction=1./total_timesteps, # i.e. immediately anneal.
exploration_final_eps=FLAGS.epsilon, # constant epsilon.
print_freq=None, # pylint: disable=wrong-arg-types
learning_starts=FLAGS.min_replay_size,
target_network_update_freq=FLAGS.target_update_period,
callback=callback, # pytype: disable=wrong-arg-types
gamma=FLAGS.agent_discount,
checkpoint_freq=None,
)
return bsuite_id
def main(argv):
# Parses whether to run a single bsuite_id, or multiprocess sweep.
del argv # Unused.
bsuite_id = FLAGS.bsuite_id
if bsuite_id in sweep.SWEEP:
print(f'Running single experiment: bsuite_id={bsuite_id}.')
run(bsuite_id)
elif hasattr(sweep, bsuite_id):
bsuite_sweep = getattr(sweep, bsuite_id)
print(f'Running sweep over bsuite_id in sweep.{bsuite_sweep}')
FLAGS.verbose = False
pool.map_mpi(run, bsuite_sweep)
else:
raise ValueError(f'Invalid flag: bsuite_id={bsuite_id}.')
if __name__ == '__main__':
app.run(main)
|
bsuite-master
|
bsuite/baselines/third_party/openai_dqn/run.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/baselines/third_party/openai_dqn/__init__.py
|
# pytype: skip-file
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Trains an OpenAI Baselines PPO agent on bsuite.
Note that OpenAI Gym is not installed with bsuite by default.
See also github.com/openai/baselines for more information.
"""
from absl import app
from absl import flags
from baselines.common.vec_env import dummy_vec_env
from baselines.ppo2 import ppo2
import bsuite
from bsuite import sweep
from bsuite.baselines.utils import pool
from bsuite.logging import terminal_logging
from bsuite.utils import gym_wrapper
# Internal imports.
# Experiment flags.
flags.DEFINE_string(
'bsuite_id', 'catch/0', 'BSuite identifier. '
'This global flag can be used to control which environment is loaded.')
flags.DEFINE_string('save_path', '/tmp/bsuite', 'where to save bsuite results')
flags.DEFINE_enum('logging_mode', 'csv', ['csv', 'sqlite', 'terminal'],
'which form of logging to use for bsuite results')
flags.DEFINE_boolean('overwrite', False, 'overwrite csv logging if found')
flags.DEFINE_boolean('verbose', True, 'whether to log to std output')
flags.DEFINE_string('network', 'mlp', 'name of network architecture')
flags.DEFINE_float('agent_discount', .99, 'discounting on the agent side')
flags.DEFINE_integer('nsteps', 100, 'number of steps per ppo rollout')
flags.DEFINE_integer('total_timesteps', 1_000_000, 'total steps for experiment')
flags.DEFINE_float('learning_rate', 1e-3, 'learning rate for optimizer')
FLAGS = flags.FLAGS
def run(bsuite_id: str) -> str:
"""Runs a PPO agent on a given bsuite environment, logging to CSV."""
def _load_env():
raw_env = bsuite.load_and_record(
bsuite_id=bsuite_id,
save_path=FLAGS.save_path,
logging_mode=FLAGS.logging_mode,
overwrite=FLAGS.overwrite,
)
if FLAGS.verbose:
raw_env = terminal_logging.wrap_environment(raw_env, log_every=True) # pytype: disable=wrong-arg-types
return gym_wrapper.GymFromDMEnv(raw_env)
env = dummy_vec_env.DummyVecEnv([_load_env])
ppo2.learn(
env=env,
network=FLAGS.network,
lr=FLAGS.learning_rate,
total_timesteps=FLAGS.total_timesteps, # make sure to run enough steps
nsteps=FLAGS.nsteps,
gamma=FLAGS.agent_discount,
)
return bsuite_id
def main(argv):
# Parses whether to run a single bsuite_id, or multiprocess sweep.
del argv # Unused.
bsuite_id = FLAGS.bsuite_id
if bsuite_id in sweep.SWEEP:
print(f'Running single experiment: bsuite_id={bsuite_id}.')
run(bsuite_id)
elif hasattr(sweep, bsuite_id):
bsuite_sweep = getattr(sweep, bsuite_id)
print(f'Running sweep over bsuite_id in sweep.{bsuite_sweep}')
FLAGS.verbose = False
pool.map_mpi(run, bsuite_sweep)
else:
raise ValueError(f'Invalid flag: bsuite_id={bsuite_id}.')
if __name__ == '__main__':
app.run(main)
|
bsuite-master
|
bsuite/baselines/third_party/openai_ppo/run.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/baselines/third_party/openai_ppo/__init__.py
|
# pytype: skip-file
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Trains an Dopamine DQN agent on bsuite.
Note that Dopamine is not installed with bsuite by default.
See also github.com/google/dopamine for more information.
"""
from absl import app
from absl import flags
import bsuite
from bsuite import sweep
from bsuite.baselines.utils import pool
from bsuite.logging import terminal_logging
from bsuite.utils import gym_wrapper
from bsuite.utils import wrappers
from dopamine.agents.dqn import dqn_agent
from dopamine.discrete_domains import atari_lib
from dopamine.discrete_domains import run_experiment
import gym
import tensorflow.compat.v1 as tf
# Internal imports.
# Experiment flags.
flags.DEFINE_string(
'bsuite_id', 'catch/0', 'BSuite identifier. '
'This global flag can be used to control which environment is loaded.')
flags.DEFINE_string('save_path', '/tmp/bsuite', 'where to save bsuite results')
flags.DEFINE_enum('logging_mode', 'csv', ['csv', 'sqlite', 'terminal'],
'which form of logging to use for bsuite results')
flags.DEFINE_boolean('overwrite', False, 'overwrite csv logging if found')
# algorithm
flags.DEFINE_integer('num_episodes', None, 'number of steps to run')
flags.DEFINE_integer('num_hidden_layers', 2, 'number of hidden layers')
flags.DEFINE_integer('num_units', 50, 'number of units per hidden layer')
flags.DEFINE_float('agent_discount', .99, 'discounting on the agent side')
flags.DEFINE_integer('replay_capacity', 100000, 'size of the replay buffer')
flags.DEFINE_integer('min_replay_size', 128, 'min replay size before training.')
flags.DEFINE_integer('sgd_period', 1, 'steps between online net updates')
flags.DEFINE_integer('target_update_period', 4,
'steps between target net updates')
flags.DEFINE_float('learning_rate', 1e-3, 'learning rate for optimizer')
flags.DEFINE_float('epsilon', 0.05, 'fraction of exploratory random actions')
flags.DEFINE_float('epsilon_decay_period', 1000,
'number of steps to anneal epsilon')
flags.DEFINE_integer('seed', 42, 'seed for random number generation')
flags.DEFINE_boolean('verbose', True, 'whether to log to std output')
flags.DEFINE_string('base_dir', '/tmp/dopamine', 'directory for dopamine logs')
FLAGS = flags.FLAGS
OBSERVATION_SHAPE = (20, 20)
def run(bsuite_id: str) -> str:
"""Runs Dopamine DQN on a given bsuite environment, logging to CSV."""
raw_env = bsuite.load_and_record(
bsuite_id=bsuite_id,
save_path=FLAGS.save_path,
logging_mode=FLAGS.logging_mode,
overwrite=FLAGS.overwrite,
)
class Network(tf.keras.Model):
"""Build deep network compatible with dopamine/discrete_domains/gym_lib."""
def __init__(self, num_actions: int, name='Network'):
super(Network, self).__init__(name=name)
self.forward_fn = tf.keras.Sequential(
[tf.keras.layers.Flatten()] +
[tf.keras.layers.Dense(FLAGS.num_units,
activation=tf.keras.activations.relu)
for _ in range(FLAGS.num_hidden_layers)] +
[tf.keras.layers.Dense(num_actions, activation=None)])
def call(self, state):
"""Creates the output tensor/op given the state tensor as input."""
x = tf.cast(state, tf.float32)
x = self.forward_fn(x)
return atari_lib.DQNNetworkType(x)
def create_agent(sess: tf.Session, environment: gym.Env, summary_writer=None):
"""Factory method for agent initialization in Dopmamine."""
del summary_writer
return dqn_agent.DQNAgent(
sess=sess,
num_actions=environment.action_space.n,
observation_shape=OBSERVATION_SHAPE,
observation_dtype=tf.float32,
stack_size=1,
network=Network,
gamma=FLAGS.agent_discount,
update_horizon=1,
min_replay_history=FLAGS.min_replay_size,
update_period=FLAGS.sgd_period,
target_update_period=FLAGS.target_update_period,
epsilon_decay_period=FLAGS.epsilon_decay_period,
epsilon_train=FLAGS.epsilon,
optimizer=tf.train.AdamOptimizer(FLAGS.learning_rate),
)
def create_environment() -> gym.Env:
"""Factory method for environment initialization in Dopmamine."""
env = wrappers.ImageObservation(raw_env, OBSERVATION_SHAPE)
if FLAGS.verbose:
env = terminal_logging.wrap_environment(env, log_every=True) # pytype: disable=wrong-arg-types
env = gym_wrapper.GymFromDMEnv(env)
env.game_over = False # Dopamine looks for this
return env
runner = run_experiment.Runner(
base_dir=FLAGS.base_dir,
create_agent_fn=create_agent,
create_environment_fn=create_environment,
)
num_episodes = FLAGS.num_episodes or getattr(raw_env, 'bsuite_num_episodes')
for _ in range(num_episodes):
runner._run_one_episode() # pylint: disable=protected-access
return bsuite_id
def main(argv):
# Parses whether to run a single bsuite_id, or multiprocess sweep.
del argv # Unused.
bsuite_id = FLAGS.bsuite_id
if bsuite_id in sweep.SWEEP:
print(f'Running single experiment: bsuite_id={bsuite_id}.')
run(bsuite_id)
elif hasattr(sweep, bsuite_id):
bsuite_sweep = getattr(sweep, bsuite_id)
print(f'Running sweep over bsuite_id in sweep.{bsuite_sweep}')
FLAGS.verbose = False
pool.map_mpi(run, bsuite_sweep)
else:
raise ValueError(f'Invalid flag: bsuite_id={bsuite_id}.')
if __name__ == '__main__':
app.run(main)
|
bsuite-master
|
bsuite/baselines/third_party/dopamine_dqn/run.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/baselines/third_party/dopamine_dqn/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Runs a random agent on a bsuite experiment."""
from absl import app
from absl import flags
import bsuite
from bsuite import sweep
from bsuite.baselines import experiment
from bsuite.baselines import random
from bsuite.baselines.utils import pool
# Internal imports.
# Experiment flags.
flags.DEFINE_string(
'bsuite_id', 'catch/0', 'BSuite identifier. '
'This global flag can be used to control which environment is loaded.')
flags.DEFINE_string('save_path', '/tmp/bsuite', 'where to save bsuite results')
flags.DEFINE_enum('logging_mode', 'csv', ['csv', 'sqlite', 'terminal'],
'which form of logging to use for bsuite results')
flags.DEFINE_boolean('overwrite', False, 'overwrite csv logging if found')
flags.DEFINE_integer('num_episodes', None, 'Overrides number of training eps.')
flags.DEFINE_integer('seed', 42, 'seed for random number generation')
flags.DEFINE_boolean('verbose', True, 'whether to log to std output')
FLAGS = flags.FLAGS
def run(bsuite_id: str) -> str:
"""Runs a random agent on a given bsuite environment, logging to CSV."""
env = bsuite.load_and_record(
bsuite_id=bsuite_id,
save_path=FLAGS.save_path,
logging_mode=FLAGS.logging_mode,
overwrite=FLAGS.overwrite,
)
agent = random.default_agent(obs_spec=env.observation_spec(),
action_spec=env.action_spec(),
seed=FLAGS.seed)
experiment.run(
agent=agent,
environment=env,
num_episodes=FLAGS.num_episodes or env.bsuite_num_episodes, # pytype: disable=attribute-error
verbose=FLAGS.verbose)
return bsuite_id
def main(argv):
# Parses whether to run a single bsuite_id, or multiprocess sweep.
del argv # Unused.
bsuite_id = FLAGS.bsuite_id
if bsuite_id in sweep.SWEEP:
print(f'Running single experiment: bsuite_id={bsuite_id}.')
run(bsuite_id)
elif hasattr(sweep, bsuite_id):
bsuite_sweep = getattr(sweep, bsuite_id)
print(f'Running sweep over bsuite_id in sweep.{bsuite_sweep}')
FLAGS.verbose = False
pool.map_mpi(run, bsuite_sweep)
else:
raise ValueError(f'Invalid flag: bsuite_id={bsuite_id}.')
if __name__ == '__main__':
app.run(main)
|
bsuite-master
|
bsuite/baselines/random/run.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""An agent that takes uniformly random actions."""
from bsuite.baselines.random.agent import default_agent
from bsuite.baselines.random.agent import Random
|
bsuite-master
|
bsuite/baselines/random/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""An agent that takes uniformly random actions."""
from typing import Optional
from bsuite.baselines import base
import dm_env
from dm_env import specs
import numpy as np
class Random(base.Agent):
"""A random agent."""
def __init__(self,
action_spec: specs.DiscreteArray,
seed: Optional[int] = None):
self._num_actions = action_spec.num_values
self._rng = np.random.RandomState(seed)
def select_action(self, timestep: dm_env.TimeStep) -> base.Action:
del timestep
return self._rng.randint(self._num_actions)
def update(self,
timestep: dm_env.TimeStep,
action: base.Action,
new_timestep: dm_env.TimeStep) -> None:
del timestep
del action
del new_timestep
def default_agent(obs_spec: specs.Array, action_spec: specs.DiscreteArray,
**kwargs) -> Random:
del obs_spec # for compatibility
return Random(action_spec=action_spec, **kwargs)
|
bsuite-master
|
bsuite/baselines/random/agent.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Basic test coverage for agent training."""
from absl.testing import absltest
from absl.testing import parameterized
from bsuite import bsuite
from bsuite import sweep
from bsuite.baselines import experiment
from bsuite.baselines import random
class RunTest(parameterized.TestCase):
@parameterized.parameters(*sweep.TESTING)
def test_run(self, bsuite_id: str):
env = bsuite.load_from_id(bsuite_id)
agent = random.default_agent(
env.observation_spec(), env.action_spec())
experiment.run(
agent=agent,
environment=env,
num_episodes=5)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/baselines/random/run_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/baselines/jax/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Run a Dqn agent instance (using JAX) on a bsuite experiment."""
from absl import app
from absl import flags
import bsuite
from bsuite import sweep
from bsuite.baselines import experiment
from bsuite.baselines.jax import dqn
from bsuite.baselines.utils import pool
# Internal imports.
# Experiment flags.
flags.DEFINE_string(
'bsuite_id', 'catch/0', 'BSuite identifier. '
'This global flag can be used to control which environment is loaded.')
flags.DEFINE_string('save_path', '/tmp/bsuite', 'where to save bsuite results')
flags.DEFINE_enum('logging_mode', 'csv', ['csv', 'sqlite', 'terminal'],
'which form of logging to use for bsuite results')
flags.DEFINE_boolean('overwrite', False, 'overwrite csv logging if found')
flags.DEFINE_integer('num_episodes', None, 'Number of episodes to run for.')
flags.DEFINE_boolean('verbose', True, 'whether to log to std output')
FLAGS = flags.FLAGS
def run(bsuite_id: str) -> str:
"""Runs a DQN agent on a given bsuite environment, logging to CSV."""
env = bsuite.load_and_record(
bsuite_id=bsuite_id,
save_path=FLAGS.save_path,
logging_mode=FLAGS.logging_mode,
overwrite=FLAGS.overwrite,
)
agent = dqn.default_agent(env.observation_spec(), env.action_spec())
num_episodes = FLAGS.num_episodes or getattr(env, 'bsuite_num_episodes')
experiment.run(
agent=agent,
environment=env,
num_episodes=num_episodes,
verbose=FLAGS.verbose)
return bsuite_id
def main(_):
# Parses whether to run a single bsuite_id, or multiprocess sweep.
bsuite_id = FLAGS.bsuite_id
if bsuite_id in sweep.SWEEP:
print(f'Running single experiment: bsuite_id={bsuite_id}.')
run(bsuite_id)
elif hasattr(sweep, bsuite_id):
bsuite_sweep = getattr(sweep, bsuite_id)
print(f'Running sweep over bsuite_id in sweep.{bsuite_sweep}')
FLAGS.verbose = False
pool.map_mpi(run, bsuite_sweep)
else:
raise ValueError(f'Invalid flag: bsuite_id={bsuite_id}.')
if __name__ == '__main__':
app.run(main)
|
bsuite-master
|
bsuite/baselines/jax/dqn/run.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple DQN agent implemented in JAX."""
from bsuite.baselines.jax.dqn.agent import default_agent
from bsuite.baselines.jax.dqn.agent import DQN
|
bsuite-master
|
bsuite/baselines/jax/dqn/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple JAX-based DQN implementation.
Reference: "Playing atari with deep reinforcement learning" (Mnih et al, 2015).
Link: https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf.
"""
from typing import Any, Callable, NamedTuple, Sequence
from bsuite.baselines import base
from bsuite.baselines.utils import replay
import dm_env
from dm_env import specs
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
import rlax
class TrainingState(NamedTuple):
"""Holds the agent's training state."""
params: hk.Params
target_params: hk.Params
opt_state: Any
step: int
class DQN(base.Agent):
"""A simple DQN agent using JAX."""
def __init__(
self,
obs_spec: specs.Array,
action_spec: specs.DiscreteArray,
network: Callable[[jnp.ndarray], jnp.ndarray],
optimizer: optax.GradientTransformation,
batch_size: int,
epsilon: float,
rng: hk.PRNGSequence,
discount: float,
replay_capacity: int,
min_replay_size: int,
sgd_period: int,
target_update_period: int,
):
# Transform the (impure) network into a pure function.
network = hk.without_apply_rng(hk.transform(network))
# Define loss function.
def loss(params: hk.Params,
target_params: hk.Params,
transitions: Sequence[jnp.ndarray]) -> jnp.ndarray:
"""Computes the standard TD(0) Q-learning loss on batch of transitions."""
o_tm1, a_tm1, r_t, d_t, o_t = transitions
q_tm1 = network.apply(params, o_tm1)
q_t = network.apply(target_params, o_t)
batch_q_learning = jax.vmap(rlax.q_learning)
td_error = batch_q_learning(q_tm1, a_tm1, r_t, discount * d_t, q_t)
return jnp.mean(td_error**2)
# Define update function.
@jax.jit
def sgd_step(state: TrainingState,
transitions: Sequence[jnp.ndarray]) -> TrainingState:
"""Performs an SGD step on a batch of transitions."""
gradients = jax.grad(loss)(state.params, state.target_params, transitions)
updates, new_opt_state = optimizer.update(gradients, state.opt_state)
new_params = optax.apply_updates(state.params, updates)
return TrainingState(
params=new_params,
target_params=state.target_params,
opt_state=new_opt_state,
step=state.step + 1)
# Initialize the networks and optimizer.
dummy_observation = np.zeros((1, *obs_spec.shape), jnp.float32)
initial_params = network.init(next(rng), dummy_observation)
initial_target_params = network.init(next(rng), dummy_observation)
initial_opt_state = optimizer.init(initial_params)
# This carries the agent state relevant to training.
self._state = TrainingState(
params=initial_params,
target_params=initial_target_params,
opt_state=initial_opt_state,
step=0)
self._sgd_step = sgd_step
self._forward = jax.jit(network.apply)
self._replay = replay.Replay(capacity=replay_capacity)
# Store hyperparameters.
self._num_actions = action_spec.num_values
self._batch_size = batch_size
self._sgd_period = sgd_period
self._target_update_period = target_update_period
self._epsilon = epsilon
self._total_steps = 0
self._min_replay_size = min_replay_size
def select_action(self, timestep: dm_env.TimeStep) -> base.Action:
"""Selects actions according to an epsilon-greedy policy."""
if np.random.rand() < self._epsilon:
return np.random.randint(self._num_actions)
# Greedy policy, breaking ties uniformly at random.
observation = timestep.observation[None, ...]
q_values = self._forward(self._state.params, observation)
action = np.random.choice(np.flatnonzero(q_values == q_values.max()))
return int(action)
def update(
self,
timestep: dm_env.TimeStep,
action: base.Action,
new_timestep: dm_env.TimeStep,
):
"""Adds transition to replay and periodically does SGD."""
# Add this transition to replay.
self._replay.add([
timestep.observation,
action,
new_timestep.reward,
new_timestep.discount,
new_timestep.observation,
])
self._total_steps += 1
if self._total_steps % self._sgd_period != 0:
return
if self._replay.size < self._min_replay_size:
return
# Do a batch of SGD.
transitions = self._replay.sample(self._batch_size)
self._state = self._sgd_step(self._state, transitions)
# Periodically update target parameters.
if self._state.step % self._target_update_period == 0:
self._state = self._state._replace(target_params=self._state.params)
def default_agent(obs_spec: specs.Array,
action_spec: specs.DiscreteArray,
seed: int = 0) -> base.Agent:
"""Initialize a DQN agent with default parameters."""
def network(inputs: jnp.ndarray) -> jnp.ndarray:
flat_inputs = hk.Flatten()(inputs)
mlp = hk.nets.MLP([64, 64, action_spec.num_values])
action_values = mlp(flat_inputs)
return action_values
return DQN(
obs_spec=obs_spec,
action_spec=action_spec,
network=network,
optimizer=optax.adam(1e-3),
batch_size=32,
discount=0.99,
replay_capacity=10000,
min_replay_size=100,
sgd_period=1,
target_update_period=4,
epsilon=0.05,
rng=hk.PRNGSequence(seed),
)
|
bsuite-master
|
bsuite/baselines/jax/dqn/agent.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Basic test coverage for agent training."""
from absl.testing import absltest
from absl.testing import parameterized
from bsuite import bsuite
from bsuite import sweep
from bsuite.baselines import experiment
from bsuite.baselines.jax import dqn
class RunTest(parameterized.TestCase):
@parameterized.parameters(*sweep.TESTING)
def test_run(self, bsuite_id: str):
env = bsuite.load_from_id(bsuite_id)
agent = dqn.default_agent(
env.observation_spec(), env.action_spec())
experiment.run(
agent=agent,
environment=env,
num_episodes=5)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/baselines/jax/dqn/run_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Run an actor-critic agent instance on a bsuite experiment."""
from absl import app
from absl import flags
import bsuite
from bsuite import sweep
from bsuite.baselines import experiment
from bsuite.baselines.jax import actor_critic
from bsuite.baselines.utils import pool
# Internal imports.
# Experiment flags.
flags.DEFINE_string(
'bsuite_id', 'catch/0', 'BSuite identifier. '
'This global flag can be used to control which environment is loaded.')
flags.DEFINE_string('save_path', '/tmp/bsuite', 'where to save bsuite results')
flags.DEFINE_enum('logging_mode', 'csv', ['csv', 'sqlite', 'terminal'],
'which form of logging to use for bsuite results')
flags.DEFINE_boolean('overwrite', False, 'overwrite csv logging if found')
flags.DEFINE_integer('num_episodes', None, 'Overrides number of training eps.')
flags.DEFINE_boolean('verbose', True, 'whether to log to std output')
FLAGS = flags.FLAGS
def run(bsuite_id: str) -> str:
"""Runs an A2C agent on a given bsuite environment, logging to CSV."""
env = bsuite.load_and_record(
bsuite_id=bsuite_id,
save_path=FLAGS.save_path,
logging_mode=FLAGS.logging_mode,
overwrite=FLAGS.overwrite,
)
agent = actor_critic.default_agent(
env.observation_spec(), env.action_spec())
num_episodes = FLAGS.num_episodes or getattr(env, 'bsuite_num_episodes')
experiment.run(
agent=agent,
environment=env,
num_episodes=num_episodes,
verbose=FLAGS.verbose)
return bsuite_id
def main(_):
# Parses whether to run a single bsuite_id, or multiprocess sweep.
bsuite_id = FLAGS.bsuite_id
if bsuite_id in sweep.SWEEP:
print(f'Running single experiment: bsuite_id={bsuite_id}.')
run(bsuite_id)
elif hasattr(sweep, bsuite_id):
bsuite_sweep = getattr(sweep, bsuite_id)
print(f'Running sweep over bsuite_id in sweep.{bsuite_sweep}')
FLAGS.verbose = False
pool.map_mpi(run, bsuite_sweep)
else:
raise ValueError(f'Invalid flag: bsuite_id={bsuite_id}.')
if __name__ == '__main__':
app.run(main)
|
bsuite-master
|
bsuite/baselines/jax/actor_critic/run.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple actor-critic implementation in JAX."""
from bsuite.baselines.jax.actor_critic.agent import ActorCritic
from bsuite.baselines.jax.actor_critic.agent import default_agent
|
bsuite-master
|
bsuite/baselines/jax/actor_critic/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple actor-critic agent implemented in JAX + Haiku."""
from typing import Any, Callable, NamedTuple, Tuple
from bsuite.baselines import base
from bsuite.baselines.utils import sequence
import dm_env
from dm_env import specs
import haiku as hk
import jax
import jax.numpy as jnp
import optax
import rlax
Logits = jnp.ndarray
Value = jnp.ndarray
PolicyValueNet = Callable[[jnp.ndarray], Tuple[Logits, Value]]
class TrainingState(NamedTuple):
params: hk.Params
opt_state: Any
class ActorCritic(base.Agent):
"""Feed-forward actor-critic agent."""
def __init__(
self,
obs_spec: specs.Array,
action_spec: specs.DiscreteArray,
network: PolicyValueNet,
optimizer: optax.GradientTransformation,
rng: hk.PRNGSequence,
sequence_length: int,
discount: float,
td_lambda: float,
):
# Define loss function.
def loss(trajectory: sequence.Trajectory) -> jnp.ndarray:
""""Actor-critic loss."""
logits, values = network(trajectory.observations) # pytype: disable=wrong-arg-types # jax-ndarray
td_errors = rlax.td_lambda(
v_tm1=values[:-1],
r_t=trajectory.rewards,
discount_t=trajectory.discounts * discount,
v_t=values[1:],
lambda_=jnp.array(td_lambda),
)
critic_loss = jnp.mean(td_errors**2)
actor_loss = rlax.policy_gradient_loss(
logits_t=logits[:-1],
a_t=trajectory.actions,
adv_t=td_errors,
w_t=jnp.ones_like(td_errors))
return actor_loss + critic_loss
# Transform the loss into a pure function.
loss_fn = hk.without_apply_rng(hk.transform(loss)).apply
# Define update function.
@jax.jit
def sgd_step(state: TrainingState,
trajectory: sequence.Trajectory) -> TrainingState:
"""Does a step of SGD over a trajectory."""
gradients = jax.grad(loss_fn)(state.params, trajectory)
updates, new_opt_state = optimizer.update(gradients, state.opt_state)
new_params = optax.apply_updates(state.params, updates)
return TrainingState(params=new_params, opt_state=new_opt_state)
# Initialize network parameters and optimiser state.
init, forward = hk.without_apply_rng(hk.transform(network))
dummy_observation = jnp.zeros((1, *obs_spec.shape), dtype=jnp.float32)
initial_params = init(next(rng), dummy_observation)
initial_opt_state = optimizer.init(initial_params)
# Internalize state.
self._state = TrainingState(initial_params, initial_opt_state)
self._forward = jax.jit(forward)
self._buffer = sequence.Buffer(obs_spec, action_spec, sequence_length)
self._sgd_step = sgd_step
self._rng = rng
def select_action(self, timestep: dm_env.TimeStep) -> base.Action:
"""Selects actions according to a softmax policy."""
key = next(self._rng)
observation = timestep.observation[None, ...]
logits, _ = self._forward(self._state.params, observation)
action = jax.random.categorical(key, logits).squeeze()
return int(action)
def update(
self,
timestep: dm_env.TimeStep,
action: base.Action,
new_timestep: dm_env.TimeStep,
):
"""Adds a transition to the trajectory buffer and periodically does SGD."""
self._buffer.append(timestep, action, new_timestep)
if self._buffer.full() or new_timestep.last():
trajectory = self._buffer.drain()
self._state = self._sgd_step(self._state, trajectory)
def default_agent(obs_spec: specs.Array,
action_spec: specs.DiscreteArray,
seed: int = 0) -> base.Agent:
"""Creates an actor-critic agent with default hyperparameters."""
def network(inputs: jnp.ndarray) -> Tuple[Logits, Value]:
flat_inputs = hk.Flatten()(inputs)
torso = hk.nets.MLP([64, 64])
policy_head = hk.Linear(action_spec.num_values)
value_head = hk.Linear(1)
embedding = torso(flat_inputs)
logits = policy_head(embedding)
value = value_head(embedding)
return logits, jnp.squeeze(value, axis=-1)
return ActorCritic(
obs_spec=obs_spec,
action_spec=action_spec,
network=network,
optimizer=optax.adam(3e-3),
rng=hk.PRNGSequence(seed),
sequence_length=32,
discount=0.99,
td_lambda=0.9,
)
|
bsuite-master
|
bsuite/baselines/jax/actor_critic/agent.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Basic test coverage for agent training."""
from absl.testing import absltest
from absl.testing import parameterized
from bsuite import bsuite
from bsuite import sweep
from bsuite.baselines import experiment
from bsuite.baselines.jax import actor_critic
class RunTest(parameterized.TestCase):
@parameterized.parameters(*sweep.TESTING)
def test_run(self, bsuite_id: str):
env = bsuite.load_from_id(bsuite_id)
agent = actor_critic.default_agent(
env.observation_spec(), env.action_spec())
experiment.run(
agent=agent,
environment=env,
num_episodes=5)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/baselines/jax/actor_critic/run_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Run a Dqn agent instance (using JAX) on a bsuite experiment."""
from absl import app
from absl import flags
import bsuite
from bsuite import sweep
from bsuite.baselines import experiment
from bsuite.baselines.jax import boot_dqn
from bsuite.baselines.utils import pool
import haiku as hk
from jax import lax
import jax.numpy as jnp
import optax
# Internal imports.
flags.DEFINE_integer('num_ensemble', 1, 'Size of ensemble.')
# Experiment flags.
flags.DEFINE_string(
'bsuite_id', 'catch/0', 'BSuite identifier. '
'This global flag can be used to control which environment is loaded.')
flags.DEFINE_string('save_path', '/tmp/bsuite', 'where to save bsuite results')
flags.DEFINE_enum('logging_mode', 'csv', ['csv', 'sqlite', 'terminal'],
'which form of logging to use for bsuite results')
flags.DEFINE_boolean('overwrite', False, 'overwrite csv logging if found')
flags.DEFINE_integer('num_episodes', None, 'Number of episodes to run for.')
flags.DEFINE_boolean('verbose', True, 'whether to log to std output')
FLAGS = flags.FLAGS
def run(bsuite_id: str) -> str:
"""Runs a DQN agent on a given bsuite environment, logging to CSV."""
env = bsuite.load_and_record(
bsuite_id=bsuite_id,
save_path=FLAGS.save_path,
logging_mode=FLAGS.logging_mode,
overwrite=FLAGS.overwrite,
)
action_spec = env.action_spec()
# Define network.
prior_scale = 5.
hidden_sizes = [50, 50]
def network(inputs: jnp.ndarray) -> jnp.ndarray:
"""Simple Q-network with randomized prior function."""
net = hk.nets.MLP([*hidden_sizes, action_spec.num_values])
prior_net = hk.nets.MLP([*hidden_sizes, action_spec.num_values])
x = hk.Flatten()(inputs)
return net(x) + prior_scale * lax.stop_gradient(prior_net(x))
optimizer = optax.adam(learning_rate=1e-3)
agent = boot_dqn.BootstrappedDqn(
obs_spec=env.observation_spec(),
action_spec=action_spec,
network=network,
optimizer=optimizer,
num_ensemble=FLAGS.num_ensemble,
batch_size=128,
discount=.99,
replay_capacity=10000,
min_replay_size=128,
sgd_period=1,
target_update_period=4,
mask_prob=1.0,
noise_scale=0.,
)
num_episodes = FLAGS.num_episodes or getattr(env, 'bsuite_num_episodes')
experiment.run(
agent=agent,
environment=env,
num_episodes=num_episodes,
verbose=FLAGS.verbose)
return bsuite_id
def main(_):
# Parses whether to run a single bsuite_id, or multiprocess sweep.
bsuite_id = FLAGS.bsuite_id
if bsuite_id in sweep.SWEEP:
print(f'Running single experiment: bsuite_id={bsuite_id}.')
run(bsuite_id)
elif hasattr(sweep, bsuite_id):
bsuite_sweep = getattr(sweep, bsuite_id)
print(f'Running sweep over bsuite_id in sweep.{bsuite_sweep}')
FLAGS.verbose = False
pool.map_mpi(run, bsuite_sweep)
else:
raise ValueError(f'Invalid flag: bsuite_id={bsuite_id}.')
if __name__ == '__main__':
app.run(main)
|
bsuite-master
|
bsuite/baselines/jax/boot_dqn/run.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple DQN agent implemented in JAX."""
from bsuite.baselines.jax.boot_dqn.agent import BootstrappedDqn
from bsuite.baselines.jax.boot_dqn.agent import default_agent
|
bsuite-master
|
bsuite/baselines/jax/boot_dqn/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple implementation of Bootstrapped DQN with prior networks.
References:
1. "Deep Exploration via Bootstrapped DQN" (Osband et al., 2016)
2. "Deep Exploration via Randomized Value Functions" (Osband et al., 2017)
3. "Randomized Prior Functions for Deep RL" (Osband et al, 2018)
Links:
1. https://arxiv.org/abs/1602.04621
2. https://arxiv.org/abs/1703.07608
3. https://arxiv.org/abs/1806.03335
Notes:
- This agent is implemented with TensorFlow 2 and Sonnet 2. For installation
instructions for these libraries, see the README.md in the parent folder.
- This implementation is potentially inefficient, as it does not parallelise
computation across the ensemble for simplicity and readability.
"""
from typing import Any, Callable, NamedTuple, Sequence
from bsuite.baselines import base
from bsuite.baselines.utils import replay
import dm_env
from dm_env import specs
import haiku as hk
import jax
from jax import lax
import jax.numpy as jnp
import numpy as np
import optax
import rlax
class TrainingState(NamedTuple):
params: hk.Params
target_params: hk.Params
opt_state: Any
step: int
class BootstrappedDqn(base.Agent):
"""Bootstrapped DQN with randomized prior functions."""
def __init__(
self,
obs_spec: specs.Array,
action_spec: specs.DiscreteArray,
network: Callable[[jnp.ndarray], jnp.ndarray],
num_ensemble: int,
batch_size: int,
discount: float,
replay_capacity: int,
min_replay_size: int,
sgd_period: int,
target_update_period: int,
optimizer: optax.GradientTransformation,
mask_prob: float,
noise_scale: float,
epsilon_fn: Callable[[int], float] = lambda _: 0.,
seed: int = 1,
):
# Transform the (impure) network into a pure function.
network = hk.without_apply_rng(hk.transform(network))
# Define loss function, including bootstrap mask `m_t` & reward noise `z_t`.
def loss(params: hk.Params, target_params: hk.Params,
transitions: Sequence[jnp.ndarray]) -> jnp.ndarray:
"""Q-learning loss with added reward noise + half-in bootstrap."""
o_tm1, a_tm1, r_t, d_t, o_t, m_t, z_t = transitions
q_tm1 = network.apply(params, o_tm1)
q_t = network.apply(target_params, o_t)
r_t += noise_scale * z_t
batch_q_learning = jax.vmap(rlax.q_learning)
td_error = batch_q_learning(q_tm1, a_tm1, r_t, discount * d_t, q_t)
return jnp.mean(m_t * td_error**2)
# Define update function for each member of ensemble..
@jax.jit
def sgd_step(state: TrainingState,
transitions: Sequence[jnp.ndarray]) -> TrainingState:
"""Does a step of SGD for the whole ensemble over `transitions`."""
gradients = jax.grad(loss)(state.params, state.target_params, transitions)
updates, new_opt_state = optimizer.update(gradients, state.opt_state)
new_params = optax.apply_updates(state.params, updates)
return TrainingState(
params=new_params,
target_params=state.target_params,
opt_state=new_opt_state,
step=state.step + 1)
# Initialize parameters and optimizer state for an ensemble of Q-networks.
rng = hk.PRNGSequence(seed)
dummy_obs = np.zeros((1, *obs_spec.shape), jnp.float32)
initial_params = [
network.init(next(rng), dummy_obs) for _ in range(num_ensemble)
]
initial_target_params = [
network.init(next(rng), dummy_obs) for _ in range(num_ensemble)
]
initial_opt_state = [optimizer.init(p) for p in initial_params]
# Internalize state.
self._ensemble = [
TrainingState(p, tp, o, step=0) for p, tp, o in zip(
initial_params, initial_target_params, initial_opt_state)
]
self._forward = jax.jit(network.apply)
self._sgd_step = sgd_step
self._num_ensemble = num_ensemble
self._optimizer = optimizer
self._replay = replay.Replay(capacity=replay_capacity)
# Agent hyperparameters.
self._num_actions = action_spec.num_values
self._batch_size = batch_size
self._sgd_period = sgd_period
self._target_update_period = target_update_period
self._min_replay_size = min_replay_size
self._epsilon_fn = epsilon_fn
self._mask_prob = mask_prob
# Agent state.
self._active_head = self._ensemble[0]
self._total_steps = 0
def select_action(self, timestep: dm_env.TimeStep) -> base.Action:
"""Select values via Thompson sampling, then use epsilon-greedy policy."""
self._total_steps += 1
if np.random.rand() < self._epsilon_fn(self._total_steps):
return np.random.randint(self._num_actions)
# Greedy policy, breaking ties uniformly at random.
batched_obs = timestep.observation[None, ...]
q_values = self._forward(self._active_head.params, batched_obs)
action = np.random.choice(np.flatnonzero(q_values == q_values.max()))
return int(action)
def update(
self,
timestep: dm_env.TimeStep,
action: base.Action,
new_timestep: dm_env.TimeStep,
):
"""Update the agent: add transition to replay and periodically do SGD."""
# Thompson sampling: every episode pick a new Q-network as the policy.
if new_timestep.last():
k = np.random.randint(self._num_ensemble)
self._active_head = self._ensemble[k]
# Generate bootstrapping mask & reward noise.
mask = np.random.binomial(1, self._mask_prob, self._num_ensemble)
noise = np.random.randn(self._num_ensemble)
# Make transition and add to replay.
transition = [
timestep.observation,
action,
np.float32(new_timestep.reward),
np.float32(new_timestep.discount),
new_timestep.observation,
mask,
noise,
]
self._replay.add(transition)
if self._replay.size < self._min_replay_size:
return
# Periodically sample from replay and do SGD for the whole ensemble.
if self._total_steps % self._sgd_period == 0:
transitions = self._replay.sample(self._batch_size)
o_tm1, a_tm1, r_t, d_t, o_t, m_t, z_t = transitions
for k, state in enumerate(self._ensemble):
transitions = [o_tm1, a_tm1, r_t, d_t, o_t, m_t[:, k], z_t[:, k]]
self._ensemble[k] = self._sgd_step(state, transitions)
# Periodically update target parameters.
for k, state in enumerate(self._ensemble):
if state.step % self._target_update_period == 0:
self._ensemble[k] = state._replace(target_params=state.params)
def default_agent(
obs_spec: specs.Array,
action_spec: specs.DiscreteArray,
seed: int = 0,
num_ensemble: int = 20,
) -> BootstrappedDqn:
"""Initialize a Bootstrapped DQN agent with default parameters."""
# Define network.
prior_scale = 5.
hidden_sizes = [50, 50]
def network(inputs: jnp.ndarray) -> jnp.ndarray:
"""Simple Q-network with randomized prior function."""
net = hk.nets.MLP([*hidden_sizes, action_spec.num_values])
prior_net = hk.nets.MLP([*hidden_sizes, action_spec.num_values])
x = hk.Flatten()(inputs)
return net(x) + prior_scale * lax.stop_gradient(prior_net(x))
optimizer = optax.adam(learning_rate=1e-3)
return BootstrappedDqn(
obs_spec=obs_spec,
action_spec=action_spec,
network=network,
batch_size=128,
discount=.99,
num_ensemble=num_ensemble,
replay_capacity=10000,
min_replay_size=128,
sgd_period=1,
target_update_period=4,
optimizer=optimizer,
mask_prob=1.,
noise_scale=0.,
epsilon_fn=lambda _: 0.,
seed=seed,
)
|
bsuite-master
|
bsuite/baselines/jax/boot_dqn/agent.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Basic test coverage for agent training."""
from absl.testing import absltest
from absl.testing import parameterized
from bsuite import bsuite
from bsuite import sweep
from bsuite.baselines import experiment
from bsuite.baselines.jax import boot_dqn
class RunTest(parameterized.TestCase):
@parameterized.parameters(*sweep.TESTING)
def test_run(self, bsuite_id: str):
env = bsuite.load_from_id(bsuite_id)
agent = boot_dqn.default_agent(
env.observation_spec(), env.action_spec(), num_ensemble=2)
experiment.run(
agent=agent,
environment=env,
num_episodes=5)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/baselines/jax/boot_dqn/run_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Run an actor-critic agent instance on a bsuite experiment."""
from absl import app
from absl import flags
import bsuite
from bsuite import sweep
from bsuite.baselines import experiment
from bsuite.baselines.jax import actor_critic_rnn
from bsuite.baselines.utils import pool
# Internal imports.
# Experiment flags.
flags.DEFINE_string(
'bsuite_id', 'catch/0', 'BSuite identifier. '
'This global flag can be used to control which environment is loaded.')
flags.DEFINE_string('save_path', '/tmp/bsuite', 'where to save bsuite results')
flags.DEFINE_enum('logging_mode', 'csv', ['csv', 'sqlite', 'terminal'],
'which form of logging to use for bsuite results')
flags.DEFINE_boolean('overwrite', False, 'overwrite csv logging if found')
flags.DEFINE_integer('num_episodes', None, 'Overrides number of training eps.')
flags.DEFINE_boolean('verbose', True, 'whether to log to std output')
FLAGS = flags.FLAGS
def run(bsuite_id: str) -> str:
"""Runs an A2C agent on a given bsuite environment, logging to CSV."""
env = bsuite.load_and_record(
bsuite_id=bsuite_id,
save_path=FLAGS.save_path,
logging_mode=FLAGS.logging_mode,
overwrite=FLAGS.overwrite,
)
agent = actor_critic_rnn.default_agent(
env.observation_spec(), env.action_spec())
num_episodes = FLAGS.num_episodes or getattr(env, 'bsuite_num_episodes')
experiment.run(
agent=agent,
environment=env,
num_episodes=num_episodes,
verbose=FLAGS.verbose)
return bsuite_id
def main(_):
# Parses whether to run a single bsuite_id, or multiprocess sweep.
bsuite_id = FLAGS.bsuite_id
if bsuite_id in sweep.SWEEP:
print(f'Running single experiment: bsuite_id={bsuite_id}.')
run(bsuite_id)
elif hasattr(sweep, bsuite_id):
bsuite_sweep = getattr(sweep, bsuite_id)
print(f'Running sweep over bsuite_id in sweep.{bsuite_sweep}')
FLAGS.verbose = False
pool.map_mpi(run, bsuite_sweep)
else:
raise ValueError(f'Invalid flag: bsuite_id={bsuite_id}.')
if __name__ == '__main__':
app.run(main)
|
bsuite-master
|
bsuite/baselines/jax/actor_critic_rnn/run.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple actor-critic implementation in JAX."""
from bsuite.baselines.jax.actor_critic_rnn.agent import ActorCriticRNN
from bsuite.baselines.jax.actor_critic_rnn.agent import default_agent
|
bsuite-master
|
bsuite/baselines/jax/actor_critic_rnn/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple recurrent actor-critic agent implemented in JAX + Haiku."""
from typing import Any, Callable, NamedTuple, Tuple
from bsuite.baselines import base
from bsuite.baselines.utils import sequence
import dm_env
from dm_env import specs
import haiku as hk
import jax
import jax.numpy as jnp
import optax
import rlax
Logits = jnp.ndarray
Value = jnp.ndarray
LSTMState = Any
RecurrentPolicyValueNet = Callable[[jnp.ndarray, LSTMState],
Tuple[Tuple[Logits, Value], LSTMState]]
class AgentState(NamedTuple):
"""Holds the network parameters, optimizer state, and RNN state."""
params: hk.Params
opt_state: Any
rnn_state: LSTMState
rnn_unroll_state: LSTMState
class ActorCriticRNN(base.Agent):
"""Recurrent actor-critic agent."""
def __init__(
self,
obs_spec: specs.Array,
action_spec: specs.DiscreteArray,
network: RecurrentPolicyValueNet,
initial_rnn_state: LSTMState,
optimizer: optax.GradientTransformation,
rng: hk.PRNGSequence,
sequence_length: int,
discount: float,
td_lambda: float,
entropy_cost: float = 0.,
):
# Define loss function.
def loss(trajectory: sequence.Trajectory, rnn_unroll_state: LSTMState):
""""Actor-critic loss."""
(logits, values), new_rnn_unroll_state = hk.dynamic_unroll(
network, trajectory.observations[:, None, ...], rnn_unroll_state)
seq_len = trajectory.actions.shape[0]
td_errors = rlax.td_lambda(
v_tm1=values[:-1, 0],
r_t=trajectory.rewards,
discount_t=trajectory.discounts * discount,
v_t=values[1:, 0],
lambda_=jnp.array(td_lambda),
)
critic_loss = jnp.mean(td_errors**2)
actor_loss = rlax.policy_gradient_loss(
logits_t=logits[:-1, 0],
a_t=trajectory.actions,
adv_t=td_errors,
w_t=jnp.ones(seq_len))
entropy_loss = jnp.mean(
rlax.entropy_loss(logits[:-1, 0], jnp.ones(seq_len)))
combined_loss = actor_loss + critic_loss + entropy_cost * entropy_loss
return combined_loss, new_rnn_unroll_state
# Transform the loss into a pure function.
loss_fn = hk.without_apply_rng(hk.transform(loss)).apply
# Define update function.
@jax.jit
def sgd_step(state: AgentState,
trajectory: sequence.Trajectory) -> AgentState:
"""Does a step of SGD over a trajectory."""
gradients, new_rnn_state = jax.grad(
loss_fn, has_aux=True)(state.params, trajectory,
state.rnn_unroll_state)
updates, new_opt_state = optimizer.update(gradients, state.opt_state)
new_params = optax.apply_updates(state.params, updates)
return state._replace(
params=new_params,
opt_state=new_opt_state,
rnn_unroll_state=new_rnn_state)
# Initialize network parameters and optimiser state.
init, forward = hk.without_apply_rng(hk.transform(network))
dummy_observation = jnp.zeros((1, *obs_spec.shape), dtype=obs_spec.dtype)
initial_params = init(next(rng), dummy_observation, initial_rnn_state)
initial_opt_state = optimizer.init(initial_params)
# Internalize state.
self._state = AgentState(initial_params, initial_opt_state,
initial_rnn_state, initial_rnn_state)
self._forward = jax.jit(forward)
self._buffer = sequence.Buffer(obs_spec, action_spec, sequence_length)
self._sgd_step = sgd_step
self._rng = rng
self._initial_rnn_state = initial_rnn_state
def select_action(self, timestep: dm_env.TimeStep) -> base.Action:
"""Selects actions according to a softmax policy."""
key = next(self._rng)
observation = timestep.observation[None, ...]
(logits, _), rnn_state = self._forward(self._state.params, observation,
self._state.rnn_state)
self._state = self._state._replace(rnn_state=rnn_state)
action = jax.random.categorical(key, logits).squeeze()
return int(action)
def update(
self,
timestep: dm_env.TimeStep,
action: base.Action,
new_timestep: dm_env.TimeStep,
):
"""Adds a transition to the trajectory buffer and periodically does SGD."""
if new_timestep.last():
self._state = self._state._replace(rnn_state=self._initial_rnn_state)
self._buffer.append(timestep, action, new_timestep)
if self._buffer.full() or new_timestep.last():
trajectory = self._buffer.drain()
self._state = self._sgd_step(self._state, trajectory)
def default_agent(obs_spec: specs.Array,
action_spec: specs.DiscreteArray,
seed: int = 0) -> base.Agent:
"""Creates an actor-critic agent with default hyperparameters."""
hidden_size = 256
initial_rnn_state = hk.LSTMState(
hidden=jnp.zeros((1, hidden_size), dtype=jnp.float32),
cell=jnp.zeros((1, hidden_size), dtype=jnp.float32))
def network(inputs: jnp.ndarray,
state) -> Tuple[Tuple[Logits, Value], LSTMState]:
flat_inputs = hk.Flatten()(inputs)
torso = hk.nets.MLP([hidden_size, hidden_size])
lstm = hk.LSTM(hidden_size)
policy_head = hk.Linear(action_spec.num_values)
value_head = hk.Linear(1)
embedding = torso(flat_inputs)
embedding, state = lstm(embedding, state)
logits = policy_head(embedding)
value = value_head(embedding)
return (logits, jnp.squeeze(value, axis=-1)), state
return ActorCriticRNN(
obs_spec=obs_spec,
action_spec=action_spec,
network=network,
initial_rnn_state=initial_rnn_state,
optimizer=optax.adam(3e-3),
rng=hk.PRNGSequence(seed),
sequence_length=32,
discount=0.99,
td_lambda=0.9,
)
|
bsuite-master
|
bsuite/baselines/jax/actor_critic_rnn/agent.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Basic test coverage for agent training."""
from absl.testing import absltest
from absl.testing import parameterized
from bsuite import bsuite
from bsuite import sweep
from bsuite.baselines import experiment
from bsuite.baselines.jax import actor_critic_rnn
class RunTest(parameterized.TestCase):
@parameterized.parameters(*sweep.TESTING)
def test_run(self, bsuite_id: str):
env = bsuite.load_from_id(bsuite_id)
agent = actor_critic_rnn.default_agent(
env.observation_spec(), env.action_spec())
experiment.run(
agent=agent,
environment=env,
num_episodes=5)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/baselines/jax/actor_critic_rnn/run_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
|
bsuite-master
|
bsuite/baselines/tf/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Run a Dqn agent instance (using TensorFlow 2) on a bsuite experiment."""
from absl import app
from absl import flags
import bsuite
from bsuite import sweep
from bsuite.baselines import experiment
from bsuite.baselines.tf import dqn
from bsuite.baselines.utils import pool
import sonnet as snt
# Internal imports.
# Experiment flags.
flags.DEFINE_string(
'bsuite_id', 'catch/0', 'BSuite identifier. '
'This global flag can be used to control which environment is loaded.')
flags.DEFINE_string('save_path', '/tmp/bsuite', 'where to save bsuite results')
flags.DEFINE_enum('logging_mode', 'csv', ['csv', 'sqlite', 'terminal'],
'which form of logging to use for bsuite results')
flags.DEFINE_boolean('overwrite', False, 'overwrite csv logging if found')
flags.DEFINE_integer('num_episodes', None, 'Overrides number of training eps.')
flags.DEFINE_integer('num_hidden_layers', 2, 'number of hidden layers')
flags.DEFINE_integer('num_units', 50, 'number of units per hidden layer')
flags.DEFINE_integer('batch_size', 32, 'size of batches sampled from replay')
flags.DEFINE_float('discount', .99, 'discounting on the agent side')
flags.DEFINE_integer('replay_capacity', 100000, 'size of the replay buffer')
flags.DEFINE_integer('min_replay_size', 128, 'min replay size before training.')
flags.DEFINE_integer('sgd_period', 1, 'steps between online net updates')
flags.DEFINE_integer('target_update_period', 4,
'steps between target net updates')
flags.DEFINE_float('learning_rate', 1e-3, 'learning rate for optimizer')
flags.DEFINE_float('epsilon', 0.05, 'fraction of exploratory random actions')
flags.DEFINE_integer('seed', 42, 'seed for random number generation')
flags.DEFINE_boolean('verbose', True, 'whether to log to std output')
FLAGS = flags.FLAGS
def run(bsuite_id: str) -> str:
"""Runs a DQN agent on a given bsuite environment, logging to CSV."""
env = bsuite.load_and_record(
bsuite_id=bsuite_id,
save_path=FLAGS.save_path,
logging_mode=FLAGS.logging_mode,
overwrite=FLAGS.overwrite,
)
# Making the networks.
hidden_units = [FLAGS.num_units] * FLAGS.num_hidden_layers
network = snt.Sequential([
snt.Flatten(),
snt.nets.MLP(hidden_units + [env.action_spec().num_values]),
])
optimizer = snt.optimizers.Adam(learning_rate=FLAGS.learning_rate)
agent = dqn.DQN(
action_spec=env.action_spec(),
network=network,
batch_size=FLAGS.batch_size,
discount=FLAGS.discount,
replay_capacity=FLAGS.replay_capacity,
min_replay_size=FLAGS.min_replay_size,
sgd_period=FLAGS.sgd_period,
target_update_period=FLAGS.target_update_period,
optimizer=optimizer,
epsilon=FLAGS.epsilon,
seed=FLAGS.seed,
)
num_episodes = FLAGS.num_episodes or getattr(env, 'bsuite_num_episodes')
experiment.run(
agent=agent,
environment=env,
num_episodes=num_episodes,
verbose=FLAGS.verbose)
return bsuite_id
def main(argv):
# Parses whether to run a single bsuite_id, or multiprocess sweep.
del argv # Unused.
bsuite_id = FLAGS.bsuite_id
if bsuite_id in sweep.SWEEP:
print(f'Running single experiment: bsuite_id={bsuite_id}.')
run(bsuite_id)
elif hasattr(sweep, bsuite_id):
bsuite_sweep = getattr(sweep, bsuite_id)
print(f'Running sweep over bsuite_id in sweep.{bsuite_sweep}')
FLAGS.verbose = False
pool.map_mpi(run, bsuite_sweep)
else:
raise ValueError(f'Invalid flag: bsuite_id={bsuite_id}.')
if __name__ == '__main__':
app.run(main)
|
bsuite-master
|
bsuite/baselines/tf/dqn/run.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple TensorFlow 2-based DQN implementation."""
from bsuite.baselines.tf.dqn.agent import default_agent
from bsuite.baselines.tf.dqn.agent import DQN
|
bsuite-master
|
bsuite/baselines/tf/dqn/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple TensorFlow 2-based DQN implementation.
Reference: "Playing atari with deep reinforcement learning" (Mnih et al, 2015).
Link: https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf.
"""
import copy
from typing import Optional, Sequence
from bsuite.baselines import base
from bsuite.baselines.utils import replay
import dm_env
from dm_env import specs
import numpy as np
import sonnet as snt
import tensorflow as tf
class DQN(base.Agent):
"""A simple DQN agent using TF2."""
def __init__(
self,
action_spec: specs.DiscreteArray,
network: snt.Module,
batch_size: int,
discount: float,
replay_capacity: int,
min_replay_size: int,
sgd_period: int,
target_update_period: int,
optimizer: snt.Optimizer,
epsilon: float,
seed: Optional[int] = None,
):
# Internalise hyperparameters.
self._num_actions = action_spec.num_values
self._discount = discount
self._batch_size = batch_size
self._sgd_period = sgd_period
self._target_update_period = target_update_period
self._epsilon = epsilon
self._min_replay_size = min_replay_size
# Seed the RNG.
tf.random.set_seed(seed)
self._rng = np.random.RandomState(seed)
# Internalise the components (networks, optimizer, replay buffer).
self._optimizer = optimizer
self._replay = replay.Replay(capacity=replay_capacity)
self._online_network = network
self._target_network = copy.deepcopy(network)
self._forward = tf.function(network)
self._total_steps = tf.Variable(0)
def select_action(self, timestep: dm_env.TimeStep) -> base.Action:
# Epsilon-greedy policy.
if self._rng.rand() < self._epsilon:
return self._rng.randint(self._num_actions)
observation = tf.convert_to_tensor(timestep.observation[None, ...])
# Greedy policy, breaking ties uniformly at random.
q_values = self._forward(observation).numpy()
action = self._rng.choice(np.flatnonzero(q_values == q_values.max()))
return int(action)
def update(
self,
timestep: dm_env.TimeStep,
action: base.Action,
new_timestep: dm_env.TimeStep,
):
# Add this transition to replay.
self._replay.add([
timestep.observation,
action,
new_timestep.reward,
new_timestep.discount,
new_timestep.observation,
])
self._total_steps.assign_add(1)
if tf.math.mod(self._total_steps, self._sgd_period) != 0:
return
if self._replay.size < self._min_replay_size:
return
# Do a batch of SGD.
transitions = self._replay.sample(self._batch_size)
self._training_step(transitions)
@tf.function
def _training_step(self, transitions: Sequence[tf.Tensor]) -> tf.Tensor:
"""Does a step of SGD on a batch of transitions."""
o_tm1, a_tm1, r_t, d_t, o_t = transitions
r_t = tf.cast(r_t, tf.float32) # [B]
d_t = tf.cast(d_t, tf.float32) # [B]
o_tm1 = tf.convert_to_tensor(o_tm1)
o_t = tf.convert_to_tensor(o_t)
with tf.GradientTape() as tape:
q_tm1 = self._online_network(o_tm1) # [B, A]
q_t = self._target_network(o_t) # [B, A]
onehot_actions = tf.one_hot(a_tm1, depth=self._num_actions) # [B, A]
qa_tm1 = tf.reduce_sum(q_tm1 * onehot_actions, axis=-1) # [B]
qa_t = tf.reduce_max(q_t, axis=-1) # [B]
# One-step Q-learning loss.
target = r_t + d_t * self._discount * qa_t
td_error = qa_tm1 - target
loss = 0.5 * tf.reduce_mean(td_error**2) # []
# Update the online network via SGD.
variables = self._online_network.trainable_variables
gradients = tape.gradient(loss, variables)
self._optimizer.apply(gradients, variables)
# Periodically copy online -> target network variables.
if tf.math.mod(self._total_steps, self._target_update_period) == 0:
for target, param in zip(self._target_network.trainable_variables,
self._online_network.trainable_variables):
target.assign(param)
return loss
def default_agent(obs_spec: specs.Array,
action_spec: specs.DiscreteArray):
"""Initialize a DQN agent with default parameters."""
del obs_spec # Unused.
network = snt.Sequential([
snt.Flatten(),
snt.nets.MLP([50, 50, action_spec.num_values]),
])
optimizer = snt.optimizers.Adam(learning_rate=1e-3)
return DQN(
action_spec=action_spec,
network=network,
batch_size=32,
discount=0.99,
replay_capacity=10000,
min_replay_size=100,
sgd_period=1,
target_update_period=4,
optimizer=optimizer,
epsilon=0.05,
seed=42)
|
bsuite-master
|
bsuite/baselines/tf/dqn/agent.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Basic test coverage for agent training."""
from absl.testing import absltest
from absl.testing import parameterized
from bsuite import bsuite
from bsuite import sweep
from bsuite.baselines import experiment
from bsuite.baselines.tf import dqn
class RunTest(parameterized.TestCase):
@parameterized.parameters(*sweep.TESTING)
def test_run(self, bsuite_id: str):
env = bsuite.load_from_id(bsuite_id)
agent = dqn.default_agent(
env.observation_spec(), env.action_spec())
experiment.run(
agent=agent,
environment=env,
num_episodes=5)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/baselines/tf/dqn/run_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Run an actor-critic agent instance on a bsuite experiment."""
from absl import app
from absl import flags
import bsuite
from bsuite import sweep
from bsuite.baselines import experiment
from bsuite.baselines.tf import actor_critic
from bsuite.baselines.utils import pool
import sonnet as snt
# Internal imports.
# Experiment flags.
flags.DEFINE_string(
'bsuite_id', 'catch/0', 'BSuite identifier. '
'This global flag can be used to control which environment is loaded.')
flags.DEFINE_string('save_path', '/tmp/bsuite', 'where to save bsuite results')
flags.DEFINE_enum('logging_mode', 'csv', ['csv', 'sqlite', 'terminal'],
'which form of logging to use for bsuite results')
flags.DEFINE_boolean('overwrite', False, 'overwrite csv logging if found')
flags.DEFINE_integer('num_episodes', None, 'Overrides number of training eps.')
# algorithm
flags.DEFINE_integer('seed', 42, 'seed for random number generation')
flags.DEFINE_integer('num_hidden_layers', 2, 'number of hidden layers')
flags.DEFINE_integer('num_units', 64, 'number of units per hidden layer')
flags.DEFINE_float('learning_rate', 1e-2, 'the learning rate')
flags.DEFINE_integer('sequence_length', 32, 'mumber of transitions to batch')
flags.DEFINE_float('td_lambda', 0.9, 'mixing parameter for boostrapping')
flags.DEFINE_float('discount', .99, 'discounting on the agent side')
flags.DEFINE_boolean('verbose', True, 'whether to log to std output')
FLAGS = flags.FLAGS
def run(bsuite_id: str) -> str:
"""Runs A2C agent on a single bsuite environment, logging to CSV."""
env = bsuite.load_and_record(
bsuite_id=bsuite_id,
save_path=FLAGS.save_path,
logging_mode=FLAGS.logging_mode,
overwrite=FLAGS.overwrite,
)
hidden_sizes = [FLAGS.num_units] * FLAGS.num_hidden_layers
network = actor_critic.PolicyValueNet(
hidden_sizes=hidden_sizes,
action_spec=env.action_spec(),
)
agent = actor_critic.ActorCritic(
obs_spec=env.observation_spec(),
action_spec=env.action_spec(),
network=network,
optimizer=snt.optimizers.Adam(learning_rate=FLAGS.learning_rate),
max_sequence_length=FLAGS.sequence_length,
td_lambda=FLAGS.td_lambda,
discount=FLAGS.discount,
seed=FLAGS.seed,
)
num_episodes = FLAGS.num_episodes or getattr(env, 'bsuite_num_episodes')
experiment.run(
agent=agent,
environment=env,
num_episodes=num_episodes,
verbose=FLAGS.verbose)
return bsuite_id
def main(_):
# Parses whether to run a single bsuite_id, or multiprocess sweep.
bsuite_id = FLAGS.bsuite_id
if bsuite_id in sweep.SWEEP:
print(f'Running single experiment: bsuite_id={bsuite_id}.')
run(bsuite_id)
elif hasattr(sweep, bsuite_id):
bsuite_sweep = getattr(sweep, bsuite_id)
print(f'Running sweep over bsuite_id in sweep.{bsuite_sweep}')
FLAGS.verbose = False
pool.map_mpi(run, bsuite_sweep)
else:
raise ValueError(f'Invalid flag: bsuite_id={bsuite_id}.')
if __name__ == '__main__':
app.run(main)
|
bsuite-master
|
bsuite/baselines/tf/actor_critic/run.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple TensorFlow 2-based implementation of the actor-critic algorithm."""
from bsuite.baselines.tf.actor_critic.agent import ActorCritic
from bsuite.baselines.tf.actor_critic.agent import default_agent
from bsuite.baselines.tf.actor_critic.agent import PolicyValueNet
|
bsuite-master
|
bsuite/baselines/tf/actor_critic/__init__.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple TensorFlow-based implementation of the actor-critic algorithm.
Reference: "Simple Statistical Gradient-Following Algorithms for Connectionist
Reinforcement Learning" (Williams, 1992).
Link: http://www-anw.cs.umass.edu/~barto/courses/cs687/williams92simple.pdf.
"""
from typing import Sequence, Tuple
from bsuite.baselines import base
from bsuite.baselines.utils import sequence
import dm_env
from dm_env import specs
import sonnet as snt
import tensorflow as tf
import tensorflow_probability as tfp
import tree
import trfl
tfd = tfp.distributions
class ActorCritic(base.Agent):
"""A simple TensorFlow-based feedforward actor-critic implementation."""
def __init__(
self,
obs_spec: specs.Array,
action_spec: specs.Array,
network: 'PolicyValueNet',
optimizer: snt.Optimizer,
max_sequence_length: int,
td_lambda: float,
discount: float,
seed: int,
):
"""A simple actor-critic agent."""
# Internalise hyperparameters.
tf.random.set_seed(seed)
self._td_lambda = td_lambda
self._discount = discount
# Internalise network and optimizer.
self._network = network
self._optimizer = optimizer
# Create windowed buffer for learning from trajectories.
self._buffer = sequence.Buffer(obs_spec, action_spec, max_sequence_length)
@tf.function
def _sample_policy(self, inputs: tf.Tensor) -> tf.Tensor:
policy, _ = self._network(inputs)
action = policy.sample()
return tf.squeeze(action)
@tf.function
def _step(self, trajectory: sequence.Trajectory):
"""Do a batch of SGD on the actor + critic loss."""
observations, actions, rewards, discounts = trajectory
# Add dummy batch dimensions.
rewards = tf.expand_dims(rewards, axis=-1) # [T, 1]
discounts = tf.expand_dims(discounts, axis=-1) # [T, 1]
observations = tf.expand_dims(observations, axis=1) # [T+1, 1, ...]
# Extract final observation for bootstrapping.
observations, final_observation = observations[:-1], observations[-1]
with tf.GradientTape() as tape:
# Build actor and critic losses.
policies, values = snt.BatchApply(self._network)(observations)
_, bootstrap_value = self._network(final_observation)
critic_loss, (advantages, _) = trfl.td_lambda(
state_values=values,
rewards=rewards,
pcontinues=self._discount * discounts,
bootstrap_value=bootstrap_value,
lambda_=self._td_lambda)
advantages = tf.squeeze(advantages, axis=-1) # [T]
actor_loss = -policies.log_prob(actions) * tf.stop_gradient(advantages)
loss = tf.reduce_sum(actor_loss) + critic_loss
gradients = tape.gradient(loss, self._network.trainable_variables)
self._optimizer.apply(gradients, self._network.trainable_variables)
def select_action(self, timestep: dm_env.TimeStep) -> base.Action:
"""Selects actions according to the latest softmax policy."""
observation = tf.expand_dims(timestep.observation, axis=0)
action = self._sample_policy(observation)
return action.numpy()
def update(
self,
timestep: dm_env.TimeStep,
action: base.Action,
new_timestep: dm_env.TimeStep,
):
"""Receives a transition and performs a learning update."""
self._buffer.append(timestep, action, new_timestep)
# When the batch is full, do a step of SGD.
if self._buffer.full() or new_timestep.last():
trajectory = self._buffer.drain()
trajectory = tree.map_structure(tf.convert_to_tensor, trajectory)
self._step(trajectory)
class PolicyValueNet(snt.Module):
"""A simple multilayer perceptron with a value and a policy head."""
def __init__(self, hidden_sizes: Sequence[int],
action_spec: specs.DiscreteArray):
super().__init__(name='policy_value_net')
self._torso = snt.Sequential([
snt.Flatten(),
snt.nets.MLP(hidden_sizes, activate_final=True),
])
self._policy_head = snt.Linear(action_spec.num_values)
self._value_head = snt.Linear(1)
self._action_dtype = action_spec.dtype
def __call__(self, inputs: tf.Tensor) -> Tuple[tfd.Distribution, tf.Tensor]:
"""Returns a (policy, value) pair: (pi(.|s), V(s))."""
embedding = self._torso(inputs)
logits = self._policy_head(embedding) # [B, A]
value = tf.squeeze(self._value_head(embedding), axis=-1) # [B]
policy = tfd.Categorical(logits, dtype=self._action_dtype)
return policy, value
def default_agent(obs_spec: specs.Array,
action_spec: specs.DiscreteArray) -> base.Agent:
"""Initialize a DQN agent with default parameters."""
network = PolicyValueNet(
hidden_sizes=[64, 64],
action_spec=action_spec,
)
return ActorCritic(
obs_spec=obs_spec,
action_spec=action_spec,
network=network,
optimizer=snt.optimizers.Adam(learning_rate=3e-3),
max_sequence_length=32,
td_lambda=0.9,
discount=0.99,
seed=42,
)
|
bsuite-master
|
bsuite/baselines/tf/actor_critic/agent.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Basic test coverage for agent training."""
from absl.testing import absltest
from absl.testing import parameterized
from bsuite import bsuite
from bsuite import sweep
from bsuite.baselines import experiment
from bsuite.baselines.tf import actor_critic
class RunTest(parameterized.TestCase):
@parameterized.parameters(*sweep.TESTING)
def test_run(self, bsuite_id: str):
env = bsuite.load_from_id(bsuite_id)
agent = actor_critic.default_agent(
env.observation_spec(), env.action_spec())
experiment.run(
agent=agent,
environment=env,
num_episodes=5)
if __name__ == '__main__':
absltest.main()
|
bsuite-master
|
bsuite/baselines/tf/actor_critic/run_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Run agent on a bsuite experiment."""
from absl import app
from absl import flags
import bsuite
from bsuite import sweep
from bsuite.baselines import experiment
from bsuite.baselines.tf import boot_dqn
from bsuite.baselines.utils import pool
import sonnet as snt
# Internal imports.
# Experiment flags.
flags.DEFINE_string(
'bsuite_id', 'catch/0', 'BSuite identifier. '
'This global flag can be used to control which environment is loaded.')
flags.DEFINE_string('save_path', '/tmp/bsuite', 'where to save bsuite results')
flags.DEFINE_enum('logging_mode', 'csv', ['csv', 'sqlite', 'terminal'],
'which form of logging to use for bsuite results')
flags.DEFINE_boolean('overwrite', False, 'overwrite csv logging if found')
flags.DEFINE_integer('num_episodes', None, 'Overrides number of training eps.')
# Network options
flags.DEFINE_integer('num_ensemble', 20, 'number of ensemble networks')
flags.DEFINE_integer('num_hidden_layers', 2, 'number of hidden layers')
flags.DEFINE_integer('num_units', 50, 'number of units per hidden layer')
flags.DEFINE_float('prior_scale', 3., 'scale for additive prior network')
# Core DQN options
flags.DEFINE_integer('batch_size', 128, 'size of batches sampled from replay')
flags.DEFINE_float('discount', .99, 'discounting on the agent side')
flags.DEFINE_integer('replay_capacity', 100000, 'size of the replay buffer')
flags.DEFINE_integer('min_replay_size', 128, 'min transitions for sampling')
flags.DEFINE_integer('sgd_period', 1, 'steps between online net updates')
flags.DEFINE_integer('target_update_period', 4,
'steps between target net updates')
flags.DEFINE_float('mask_prob', 0.5, 'probability for bootstrap mask')
flags.DEFINE_float('noise_scale', 0.0, 'std of additive target noise')
flags.DEFINE_float('learning_rate', 1e-3, 'learning rate for optimizer')
flags.DEFINE_integer('seed', 42, 'seed for random number generation')
flags.DEFINE_float('epsilon', 0.0, 'fraction of exploratory random actions')
flags.DEFINE_boolean('verbose', True, 'whether to log to std output')
FLAGS = flags.FLAGS
def run(bsuite_id: str) -> str:
"""Runs a BDQN agent on a given bsuite environment, logging to CSV."""
env = bsuite.load_and_record(
bsuite_id=bsuite_id,
save_path=FLAGS.save_path,
logging_mode=FLAGS.logging_mode,
overwrite=FLAGS.overwrite,
)
ensemble = boot_dqn.make_ensemble(
num_actions=env.action_spec().num_values,
num_ensemble=FLAGS.num_ensemble,
num_hidden_layers=FLAGS.num_hidden_layers,
num_units=FLAGS.num_units,
prior_scale=FLAGS.prior_scale)
agent = boot_dqn.BootstrappedDqn(
obs_spec=env.observation_spec(),
action_spec=env.action_spec(),
ensemble=ensemble,
batch_size=FLAGS.batch_size,
discount=FLAGS.discount,
replay_capacity=FLAGS.replay_capacity,
min_replay_size=FLAGS.min_replay_size,
sgd_period=FLAGS.sgd_period,
target_update_period=FLAGS.target_update_period,
optimizer=snt.optimizers.Adam(learning_rate=FLAGS.learning_rate),
mask_prob=FLAGS.mask_prob,
noise_scale=FLAGS.noise_scale,
epsilon_fn=lambda x: FLAGS.epsilon,
seed=FLAGS.seed)
num_episodes = FLAGS.num_episodes or getattr(env, 'bsuite_num_episodes')
experiment.run(
agent=agent,
environment=env,
num_episodes=num_episodes,
verbose=FLAGS.verbose)
return bsuite_id
def main(argv):
# Parses whether to run a single bsuite_id, or multiprocess sweep.
del argv # Unused.
bsuite_id = FLAGS.bsuite_id
if bsuite_id in sweep.SWEEP:
print(f'Running single experiment: bsuite_id={bsuite_id}.')
run(bsuite_id)
elif hasattr(sweep, bsuite_id):
bsuite_sweep = getattr(sweep, bsuite_id)
print(f'Running sweep over bsuite_id in sweep.{bsuite_sweep}')
FLAGS.verbose = False
pool.map_mpi(run, bsuite_sweep)
else:
raise ValueError(f'Invalid flag: bsuite_id={bsuite_id}.')
if __name__ == '__main__':
app.run(main)
|
bsuite-master
|
bsuite/baselines/tf/boot_dqn/run.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A simple implementation of Bootstrapped DQN with prior networks."""
from bsuite.baselines.tf.boot_dqn.agent import BootstrappedDqn
from bsuite.baselines.tf.boot_dqn.agent import default_agent
from bsuite.baselines.tf.boot_dqn.agent import make_ensemble
|
bsuite-master
|
bsuite/baselines/tf/boot_dqn/__init__.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.