repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
rllab | rllab-master/sandbox/rocky/tf/misc/__init__.py | 1 | 0 | 0 | py | |
rllab | rllab-master/sandbox/rocky/tf/samplers/batch_sampler.py | from rllab.sampler.base import BaseSampler
from rllab.sampler import parallel_sampler
from rllab.sampler.stateful_pool import singleton_pool
import tensorflow as tf
def worker_init_tf(G):
G.sess = tf.Session()
G.sess.__enter__()
def worker_init_tf_vars(G):
G.sess.run(tf.global_variables_initializer())
class BatchSampler(BaseSampler):
def start_worker(self):
if singleton_pool.n_parallel > 1:
singleton_pool.run_each(worker_init_tf)
parallel_sampler.populate_task(self.algo.env, self.algo.policy)
if singleton_pool.n_parallel > 1:
singleton_pool.run_each(worker_init_tf_vars)
def shutdown_worker(self):
parallel_sampler.terminate_task(scope=self.algo.scope)
def obtain_samples(self, itr):
cur_policy_params = self.algo.policy.get_param_values()
cur_env_params = self.algo.env.get_param_values()
paths = parallel_sampler.sample_paths(
policy_params=cur_policy_params,
env_params=cur_env_params,
max_samples=self.algo.batch_size,
max_path_length=self.algo.max_path_length,
scope=self.algo.scope,
)
if self.algo.whole_paths:
return paths
else:
paths_truncated = parallel_sampler.truncate_paths(paths, self.algo.batch_size)
return paths_truncated
| 1,376 | 31.785714 | 90 | py |
rllab | rllab-master/sandbox/rocky/tf/samplers/__init__.py | 1 | 0 | 0 | py | |
rllab | rllab-master/sandbox/rocky/tf/samplers/vectorized_sampler.py | import pickle
import tensorflow as tf
from rllab.sampler.base import BaseSampler
from sandbox.rocky.tf.envs.parallel_vec_env_executor import ParallelVecEnvExecutor
from sandbox.rocky.tf.envs.vec_env_executor import VecEnvExecutor
from rllab.misc import tensor_utils
import numpy as np
from rllab.sampler.stateful_pool import ProgBarCounter
import rllab.misc.logger as logger
import itertools
class VectorizedSampler(BaseSampler):
def __init__(self, algo, n_envs=None):
super(VectorizedSampler, self).__init__(algo)
self.n_envs = n_envs
def start_worker(self):
n_envs = self.n_envs
if n_envs is None:
n_envs = int(self.algo.batch_size / self.algo.max_path_length)
n_envs = max(1, min(n_envs, 100))
if getattr(self.algo.env, 'vectorized', False):
self.vec_env = self.algo.env.vec_env_executor(n_envs=n_envs, max_path_length=self.algo.max_path_length)
else:
envs = [pickle.loads(pickle.dumps(self.algo.env)) for _ in range(n_envs)]
self.vec_env = VecEnvExecutor(
envs=envs,
max_path_length=self.algo.max_path_length
)
self.env_spec = self.algo.env.spec
def shutdown_worker(self):
self.vec_env.terminate()
def obtain_samples(self, itr):
logger.log("Obtaining samples for iteration %d..." % itr)
paths = []
n_samples = 0
obses = self.vec_env.reset()
dones = np.asarray([True] * self.vec_env.num_envs)
running_paths = [None] * self.vec_env.num_envs
pbar = ProgBarCounter(self.algo.batch_size)
policy_time = 0
env_time = 0
process_time = 0
policy = self.algo.policy
import time
while n_samples < self.algo.batch_size:
t = time.time()
policy.reset(dones)
actions, agent_infos = policy.get_actions(obses)
policy_time += time.time() - t
t = time.time()
next_obses, rewards, dones, env_infos = self.vec_env.step(actions)
env_time += time.time() - t
t = time.time()
agent_infos = tensor_utils.split_tensor_dict_list(agent_infos)
env_infos = tensor_utils.split_tensor_dict_list(env_infos)
if env_infos is None:
env_infos = [dict() for _ in range(self.vec_env.num_envs)]
if agent_infos is None:
agent_infos = [dict() for _ in range(self.vec_env.num_envs)]
for idx, observation, action, reward, env_info, agent_info, done in zip(itertools.count(), obses, actions,
rewards, env_infos, agent_infos,
dones):
if running_paths[idx] is None:
running_paths[idx] = dict(
observations=[],
actions=[],
rewards=[],
env_infos=[],
agent_infos=[],
)
running_paths[idx]["observations"].append(observation)
running_paths[idx]["actions"].append(action)
running_paths[idx]["rewards"].append(reward)
running_paths[idx]["env_infos"].append(env_info)
running_paths[idx]["agent_infos"].append(agent_info)
if done:
paths.append(dict(
observations=self.env_spec.observation_space.flatten_n(running_paths[idx]["observations"]),
actions=self.env_spec.action_space.flatten_n(running_paths[idx]["actions"]),
rewards=tensor_utils.stack_tensor_list(running_paths[idx]["rewards"]),
env_infos=tensor_utils.stack_tensor_dict_list(running_paths[idx]["env_infos"]),
agent_infos=tensor_utils.stack_tensor_dict_list(running_paths[idx]["agent_infos"]),
))
n_samples += len(running_paths[idx]["rewards"])
running_paths[idx] = None
process_time += time.time() - t
pbar.inc(len(obses))
obses = next_obses
pbar.stop()
logger.record_tabular("PolicyExecTime", policy_time)
logger.record_tabular("EnvExecTime", env_time)
logger.record_tabular("ProcessExecTime", process_time)
return paths
| 4,537 | 40.633028 | 118 | py |
rllab | rllab-master/sandbox/rocky/tf/regressors/gaussian_mlp_regressor.py | import numpy as np
import sandbox.rocky.tf.core.layers as L
from sandbox.rocky.tf.core.layers_powered import LayersPowered
from sandbox.rocky.tf.core.network import MLP
from sandbox.rocky.tf.misc import tensor_utils
from sandbox.rocky.tf.optimizers.lbfgs_optimizer import LbfgsOptimizer
from sandbox.rocky.tf.optimizers.penalty_lbfgs_optimizer import PenaltyLbfgsOptimizer
from sandbox.rocky.tf.distributions.diagonal_gaussian import DiagonalGaussian
from rllab.core.serializable import Serializable
from rllab.misc import logger
import tensorflow as tf
class GaussianMLPRegressor(LayersPowered, Serializable):
"""
A class for performing regression by fitting a Gaussian distribution to the outputs.
"""
def __init__(
self,
name,
input_shape,
output_dim,
mean_network=None,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
optimizer=None,
use_trust_region=True,
step_size=0.01,
learn_std=True,
init_std=1.0,
adaptive_std=False,
std_share_network=False,
std_hidden_sizes=(32, 32),
std_nonlinearity=None,
normalize_inputs=True,
normalize_outputs=True,
subsample_factor=1.0
):
"""
:param input_shape: Shape of the input data.
:param output_dim: Dimension of output.
:param hidden_sizes: Number of hidden units of each layer of the mean network.
:param hidden_nonlinearity: Non-linearity used for each layer of the mean network.
:param optimizer: Optimizer for minimizing the negative log-likelihood.
:param use_trust_region: Whether to use trust region constraint.
:param step_size: KL divergence constraint for each iteration
:param learn_std: Whether to learn the standard deviations. Only effective if adaptive_std is False. If
adaptive_std is True, this parameter is ignored, and the weights for the std network are always learned.
:param adaptive_std: Whether to make the std a function of the states.
:param std_share_network: Whether to use the same network as the mean.
:param std_hidden_sizes: Number of hidden units of each layer of the std network. Only used if
`std_share_network` is False. It defaults to the same architecture as the mean.
:param std_nonlinearity: Non-linearity used for each layer of the std network. Only used if `std_share_network`
is False. It defaults to the same non-linearity as the mean.
"""
Serializable.quick_init(self, locals())
with tf.variable_scope(name):
if optimizer is None:
if use_trust_region:
optimizer = PenaltyLbfgsOptimizer("optimizer")
else:
optimizer = LbfgsOptimizer("optimizer")
self._optimizer = optimizer
self._subsample_factor = subsample_factor
if mean_network is None:
mean_network = MLP(
name="mean_network",
input_shape=input_shape,
output_dim=output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=None,
)
l_mean = mean_network.output_layer
if adaptive_std:
l_log_std = MLP(
name="log_std_network",
input_shape=input_shape,
input_var=mean_network.input_layer.input_var,
output_dim=output_dim,
hidden_sizes=std_hidden_sizes,
hidden_nonlinearity=std_nonlinearity,
output_nonlinearity=None,
).output_layer
else:
l_log_std = L.ParamLayer(
mean_network.input_layer,
num_units=output_dim,
param=tf.constant_initializer(np.log(init_std)),
name="output_log_std",
trainable=learn_std,
)
LayersPowered.__init__(self, [l_mean, l_log_std])
xs_var = mean_network.input_layer.input_var
ys_var = tf.placeholder(dtype=tf.float32, name="ys", shape=(None, output_dim))
old_means_var = tf.placeholder(dtype=tf.float32, name="ys", shape=(None, output_dim))
old_log_stds_var = tf.placeholder(dtype=tf.float32, name="old_log_stds", shape=(None, output_dim))
x_mean_var = tf.Variable(
np.zeros((1,) + input_shape, dtype=np.float32),
name="x_mean",
)
x_std_var = tf.Variable(
np.ones((1,) + input_shape, dtype=np.float32),
name="x_std",
)
y_mean_var = tf.Variable(
np.zeros((1, output_dim), dtype=np.float32),
name="y_mean",
)
y_std_var = tf.Variable(
np.ones((1, output_dim), dtype=np.float32),
name="y_std",
)
normalized_xs_var = (xs_var - x_mean_var) / x_std_var
normalized_ys_var = (ys_var - y_mean_var) / y_std_var
normalized_means_var = L.get_output(l_mean, {mean_network.input_layer: normalized_xs_var})
normalized_log_stds_var = L.get_output(l_log_std, {mean_network.input_layer: normalized_xs_var})
means_var = normalized_means_var * y_std_var + y_mean_var
log_stds_var = normalized_log_stds_var + tf.log(y_std_var)
normalized_old_means_var = (old_means_var - y_mean_var) / y_std_var
normalized_old_log_stds_var = old_log_stds_var - tf.log(y_std_var)
dist = self._dist = DiagonalGaussian(output_dim)
normalized_dist_info_vars = dict(mean=normalized_means_var, log_std=normalized_log_stds_var)
mean_kl = tf.reduce_mean(dist.kl_sym(
dict(mean=normalized_old_means_var, log_std=normalized_old_log_stds_var),
normalized_dist_info_vars,
))
loss = - tf.reduce_mean(dist.log_likelihood_sym(normalized_ys_var, normalized_dist_info_vars))
self._f_predict = tensor_utils.compile_function([xs_var], means_var)
self._f_pdists = tensor_utils.compile_function([xs_var], [means_var, log_stds_var])
self._l_mean = l_mean
self._l_log_std = l_log_std
optimizer_args = dict(
loss=loss,
target=self,
network_outputs=[normalized_means_var, normalized_log_stds_var],
)
if use_trust_region:
optimizer_args["leq_constraint"] = (mean_kl, step_size)
optimizer_args["inputs"] = [xs_var, ys_var, old_means_var, old_log_stds_var]
else:
optimizer_args["inputs"] = [xs_var, ys_var]
self._optimizer.update_opt(**optimizer_args)
self._use_trust_region = use_trust_region
self._name = name
self._normalize_inputs = normalize_inputs
self._normalize_outputs = normalize_outputs
self._mean_network = mean_network
self._x_mean_var = x_mean_var
self._x_std_var = x_std_var
self._y_mean_var = y_mean_var
self._y_std_var = y_std_var
def fit(self, xs, ys):
if self._subsample_factor < 1:
num_samples_tot = xs.shape[0]
idx = np.random.randint(0, num_samples_tot, int(num_samples_tot * self._subsample_factor))
xs, ys = xs[idx], ys[idx]
sess = tf.get_default_session()
if self._normalize_inputs:
# recompute normalizing constants for inputs
sess.run([
tf.assign(self._x_mean_var, np.mean(xs, axis=0, keepdims=True)),
tf.assign(self._x_std_var, np.std(xs, axis=0, keepdims=True) + 1e-8),
])
if self._normalize_outputs:
# recompute normalizing constants for outputs
sess.run([
tf.assign(self._y_mean_var, np.mean(ys, axis=0, keepdims=True)),
tf.assign(self._y_std_var, np.std(ys, axis=0, keepdims=True) + 1e-8),
])
if self._use_trust_region:
old_means, old_log_stds = self._f_pdists(xs)
inputs = [xs, ys, old_means, old_log_stds]
else:
inputs = [xs, ys]
loss_before = self._optimizer.loss(inputs)
if self._name:
prefix = self._name + "_"
else:
prefix = ""
logger.record_tabular(prefix + 'LossBefore', loss_before)
self._optimizer.optimize(inputs)
loss_after = self._optimizer.loss(inputs)
logger.record_tabular(prefix + 'LossAfter', loss_after)
if self._use_trust_region:
logger.record_tabular(prefix + 'MeanKL', self._optimizer.constraint_val(inputs))
logger.record_tabular(prefix + 'dLoss', loss_before - loss_after)
def predict(self, xs):
"""
Return the maximum likelihood estimate of the predicted y.
:param xs:
:return:
"""
return self._f_predict(xs)
def sample_predict(self, xs):
"""
Sample one possible output from the prediction distribution.
:param xs:
:return:
"""
means, log_stds = self._f_pdists(xs)
return self._dist.sample(dict(mean=means, log_std=log_stds))
def predict_log_likelihood(self, xs, ys):
means, log_stds = self._f_pdists(xs)
return self._dist.log_likelihood(ys, dict(mean=means, log_std=log_stds))
def log_likelihood_sym(self, x_var, y_var):
normalized_xs_var = (x_var - self._x_mean_var) / self._x_std_var
normalized_means_var, normalized_log_stds_var = \
L.get_output([self._l_mean, self._l_log_std], {self._mean_network.input_layer: normalized_xs_var})
means_var = normalized_means_var * self._y_std_var + self._y_mean_var
log_stds_var = normalized_log_stds_var + TT.log(self._y_std_var)
return self._dist.log_likelihood_sym(y_var, dict(mean=means_var, log_std=log_stds_var))
def get_param_values(self, **tags):
return LayersPowered.get_param_values(self, **tags)
def set_param_values(self, flattened_params, **tags):
LayersPowered.set_param_values(self, flattened_params, **tags)
| 10,577 | 40.810277 | 119 | py |
rllab | rllab-master/sandbox/rocky/tf/regressors/categorical_mlp_regressor.py |
import numpy as np
import tensorflow as tf
from sandbox.rocky.tf.core.layers_powered import LayersPowered
from sandbox.rocky.tf.core.network import MLP
from sandbox.rocky.tf.misc import tensor_utils
from sandbox.rocky.tf.distributions.categorical import Categorical
from sandbox.rocky.tf.optimizers.penalty_lbfgs_optimizer import PenaltyLbfgsOptimizer
from sandbox.rocky.tf.optimizers.lbfgs_optimizer import LbfgsOptimizer
from sandbox.rocky.tf.optimizers.conjugate_gradient_optimizer import ConjugateGradientOptimizer
import sandbox.rocky.tf.core.layers as L
from rllab.core.serializable import Serializable
from rllab.misc import ext
from rllab.misc import logger
NONE = list()
class CategoricalMLPRegressor(LayersPowered, Serializable):
"""
A class for performing regression (or classification, really) by fitting a categorical distribution to the outputs.
Assumes that the outputs will be always a one hot vector.
"""
def __init__(
self,
name,
input_shape,
output_dim,
prob_network=None,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
optimizer=None,
tr_optimizer=None,
use_trust_region=True,
step_size=0.01,
normalize_inputs=True,
no_initial_trust_region=True,
):
"""
:param input_shape: Shape of the input data.
:param output_dim: Dimension of output.
:param hidden_sizes: Number of hidden units of each layer of the mean network.
:param hidden_nonlinearity: Non-linearity used for each layer of the mean network.
:param optimizer: Optimizer for minimizing the negative log-likelihood.
:param use_trust_region: Whether to use trust region constraint.
:param step_size: KL divergence constraint for each iteration
"""
Serializable.quick_init(self, locals())
with tf.variable_scope(name):
if optimizer is None:
optimizer = LbfgsOptimizer(name="optimizer")
if tr_optimizer is None:
tr_optimizer = ConjugateGradientOptimizer()
self.output_dim = output_dim
self.optimizer = optimizer
self.tr_optimizer = tr_optimizer
if prob_network is None:
prob_network = MLP(
input_shape=input_shape,
output_dim=output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=tf.nn.softmax,
name="prob_network"
)
l_prob = prob_network.output_layer
LayersPowered.__init__(self, [l_prob])
xs_var = prob_network.input_layer.input_var
ys_var = tf.placeholder(dtype=tf.float32, shape=[None, output_dim], name="ys")
old_prob_var = tf.placeholder(dtype=tf.float32, shape=[None, output_dim], name="old_prob")
x_mean_var = tf.get_variable(
name="x_mean",
shape=(1,) + input_shape,
initializer=tf.constant_initializer(0., dtype=tf.float32)
)
x_std_var = tf.get_variable(
name="x_std",
shape=(1,) + input_shape,
initializer=tf.constant_initializer(1., dtype=tf.float32)
)
normalized_xs_var = (xs_var - x_mean_var) / x_std_var
prob_var = L.get_output(l_prob, {prob_network.input_layer: normalized_xs_var})
old_info_vars = dict(prob=old_prob_var)
info_vars = dict(prob=prob_var)
dist = self._dist = Categorical(output_dim)
mean_kl = tf.reduce_mean(dist.kl_sym(old_info_vars, info_vars))
loss = - tf.reduce_mean(dist.log_likelihood_sym(ys_var, info_vars))
predicted = tensor_utils.to_onehot_sym(tf.argmax(prob_var, axis=1), output_dim)
self.prob_network = prob_network
self.f_predict = tensor_utils.compile_function([xs_var], predicted)
self.f_prob = tensor_utils.compile_function([xs_var], prob_var)
self.l_prob = l_prob
self.optimizer.update_opt(loss=loss, target=self, network_outputs=[prob_var], inputs=[xs_var, ys_var])
self.tr_optimizer.update_opt(loss=loss, target=self, network_outputs=[prob_var],
inputs=[xs_var, ys_var, old_prob_var],
leq_constraint=(mean_kl, step_size)
)
self.use_trust_region = use_trust_region
self.name = name
self.normalize_inputs = normalize_inputs
self.x_mean_var = x_mean_var
self.x_std_var = x_std_var
self.first_optimized = not no_initial_trust_region
def fit(self, xs, ys):
if self.normalize_inputs:
# recompute normalizing constants for inputs
new_mean = np.mean(xs, axis=0, keepdims=True)
new_std = np.std(xs, axis=0, keepdims=True) + 1e-8
tf.get_default_session().run(tf.group(
tf.assign(self.x_mean_var, new_mean),
tf.assign(self.x_std_var, new_std),
))
if self.use_trust_region and self.first_optimized:
old_prob = self.f_prob(xs)
inputs = [xs, ys, old_prob]
optimizer = self.tr_optimizer
else:
inputs = [xs, ys]
optimizer = self.optimizer
loss_before = optimizer.loss(inputs)
if self.name:
prefix = self.name + "_"
else:
prefix = ""
logger.record_tabular(prefix + 'LossBefore', loss_before)
optimizer.optimize(inputs)
loss_after = optimizer.loss(inputs)
logger.record_tabular(prefix + 'LossAfter', loss_after)
logger.record_tabular(prefix + 'dLoss', loss_before - loss_after)
self.first_optimized = True
def predict(self, xs):
return self.f_predict(np.asarray(xs))
def predict_log_likelihood(self, xs, ys):
prob = self.f_prob(np.asarray(xs))
return self._dist.log_likelihood(np.asarray(ys), dict(prob=prob))
def dist_info_sym(self, x_var):
normalized_xs_var = (x_var - self.x_mean_var) / self.x_std_var
prob = L.get_output(self.l_prob, {self.prob_network.input_layer: normalized_xs_var})
return dict(prob=prob)
def log_likelihood_sym(self, x_var, y_var):
normalized_xs_var = (x_var - self.x_mean_var) / self.x_std_var
prob = L.get_output(self.l_prob, {self.prob_network.input_layer: normalized_xs_var})
return self._dist.log_likelihood_sym(y_var, dict(prob=prob))
def get_param_values(self, **tags):
return LayersPowered.get_param_values(self, **tags)
def set_param_values(self, flattened_params, **tags):
return LayersPowered.set_param_values(self, flattened_params, **tags)
| 7,049 | 38.830508 | 119 | py |
rllab | rllab-master/sandbox/rocky/tf/regressors/bernoulli_mlp_regressor.py |
import sandbox.rocky.tf.core.layers as L
import numpy as np
import tensorflow as tf
from sandbox.rocky.tf.core.layers_powered import LayersPowered
from sandbox.rocky.tf.core.network import MLP
from rllab.core.serializable import Serializable
from sandbox.rocky.tf.distributions.bernoulli import Bernoulli
from sandbox.rocky.tf.misc import tensor_utils
from rllab.misc import logger
from sandbox.rocky.tf.optimizers.conjugate_gradient_optimizer import ConjugateGradientOptimizer
from sandbox.rocky.tf.optimizers.lbfgs_optimizer import LbfgsOptimizer
class BernoulliMLPRegressor(LayersPowered, Serializable):
"""
A class for performing regression (or classification, really) by fitting a bernoulli distribution to each of the
output units.
"""
def __init__(
self,
input_shape,
output_dim,
name,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.relu,
optimizer=None,
tr_optimizer=None,
use_trust_region=True,
step_size=0.01,
normalize_inputs=True,
no_initial_trust_region=True,
):
"""
:param input_shape: Shape of the input data.
:param output_dim: Dimension of output.
:param hidden_sizes: Number of hidden units of each layer of the mean network.
:param hidden_nonlinearity: Non-linearity used for each layer of the mean network.
:param optimizer: Optimizer for minimizing the negative log-likelihood.
:param use_trust_region: Whether to use trust region constraint.
:param step_size: KL divergence constraint for each iteration
"""
Serializable.quick_init(self, locals())
with tf.variable_scope(name):
if optimizer is None:
optimizer = LbfgsOptimizer(name="optimizer")
if tr_optimizer is None:
tr_optimizer = ConjugateGradientOptimizer()
self.output_dim = output_dim
self.optimizer = optimizer
self.tr_optimizer = tr_optimizer
p_network = MLP(
input_shape=input_shape,
output_dim=output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=tf.nn.sigmoid,
name="p_network"
)
l_p = p_network.output_layer
LayersPowered.__init__(self, [l_p])
xs_var = p_network.input_layer.input_var
ys_var = tf.placeholder(dtype=tf.float32, shape=(None, output_dim), name="ys")
old_p_var = tf.placeholder(dtype=tf.float32, shape=(None, output_dim), name="old_p")
x_mean_var = tf.get_variable(name="x_mean", initializer=tf.zeros_initializer(), shape=(1,) + input_shape)
x_std_var = tf.get_variable(name="x_std", initializer=tf.ones_initializer(), shape=(1,) + input_shape)
normalized_xs_var = (xs_var - x_mean_var) / x_std_var
p_var = L.get_output(l_p, {p_network.input_layer: normalized_xs_var})
old_info_vars = dict(p=old_p_var)
info_vars = dict(p=p_var)
dist = self._dist = Bernoulli(output_dim)
mean_kl = tf.reduce_mean(dist.kl_sym(old_info_vars, info_vars))
loss = - tf.reduce_mean(dist.log_likelihood_sym(ys_var, info_vars))
predicted = p_var >= 0.5
self.f_predict = tensor_utils.compile_function([xs_var], predicted)
self.f_p = tensor_utils.compile_function([xs_var], p_var)
self.l_p = l_p
self.optimizer.update_opt(loss=loss, target=self, network_outputs=[p_var], inputs=[xs_var, ys_var])
self.tr_optimizer.update_opt(loss=loss, target=self, network_outputs=[p_var],
inputs=[xs_var, ys_var, old_p_var],
leq_constraint=(mean_kl, step_size)
)
self.use_trust_region = use_trust_region
self.name = name
self.normalize_inputs = normalize_inputs
self.x_mean_var = x_mean_var
self.x_std_var = x_std_var
self.first_optimized = not no_initial_trust_region
def fit(self, xs, ys):
if self.normalize_inputs:
# recompute normalizing constants for inputs
new_mean = np.mean(xs, axis=0, keepdims=True)
new_std = np.std(xs, axis=0, keepdims=True) + 1e-8
tf.get_default_session().run(tf.group(
tf.assign(self.x_mean_var, new_mean),
tf.assign(self.x_std_var, new_std),
))
# self._x_mean_var.set_value(np.mean(xs, axis=0, keepdims=True))
# self._x_std_var.set_value(np.std(xs, axis=0, keepdims=True) + 1e-8)
if self.use_trust_region and self.first_optimized:
old_p = self.f_p(xs)
inputs = [xs, ys, old_p]
optimizer = self.tr_optimizer
else:
inputs = [xs, ys]
optimizer = self.optimizer
loss_before = optimizer.loss(inputs)
if self.name:
prefix = self.name + "_"
else:
prefix = ""
logger.record_tabular(prefix + 'LossBefore', loss_before)
optimizer.optimize(inputs)
loss_after = optimizer.loss(inputs)
logger.record_tabular(prefix + 'LossAfter', loss_after)
logger.record_tabular(prefix + 'dLoss', loss_before - loss_after)
self.first_optimized = True
def predict(self, xs):
return self.f_predict(np.asarray(xs))
def sample_predict(self, xs):
p = self.f_p(xs)
return self._dist.sample(dict(p=p))
def predict_log_likelihood(self, xs, ys):
p = self.f_p(np.asarray(xs))
return self._dist.log_likelihood(np.asarray(ys), dict(p=p))
def get_param_values(self, **tags):
return LayersPowered.get_param_values(self, **tags)
def set_param_values(self, flattened_params, **tags):
return LayersPowered.set_param_values(self, flattened_params, **tags)
| 6,158 | 37.735849 | 117 | py |
rllab | rllab-master/sandbox/rocky/tf/regressors/__init__.py | 1 | 0 | 0 | py | |
rllab | rllab-master/sandbox/rocky/tf/regressors/deterministic_mlp_regressor.py |
import numpy as np
import tensorflow as tf
from sandbox.rocky.tf.core.layers_powered import LayersPowered
from sandbox.rocky.tf.core.network import MLP
from sandbox.rocky.tf.misc import tensor_utils
from sandbox.rocky.tf.distributions.categorical import Categorical
from sandbox.rocky.tf.optimizers.penalty_lbfgs_optimizer import PenaltyLbfgsOptimizer
from sandbox.rocky.tf.optimizers.lbfgs_optimizer import LbfgsOptimizer
import sandbox.rocky.tf.core.layers as L
from rllab.core.serializable import Serializable
from rllab.misc import ext
from rllab.misc import logger
NONE = list()
class DeterministicMLPRegressor(LayersPowered, Serializable):
"""
A class for performing nonlinear regression.
"""
def __init__(
self,
name,
input_shape,
output_dim,
network=None,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
optimizer=None,
normalize_inputs=True,
):
"""
:param input_shape: Shape of the input data.
:param output_dim: Dimension of output.
:param hidden_sizes: Number of hidden units of each layer of the mean network.
:param hidden_nonlinearity: Non-linearity used for each layer of the mean network.
:param optimizer: Optimizer for minimizing the negative log-likelihood.
"""
Serializable.quick_init(self, locals())
with tf.variable_scope(name):
if optimizer is None:
optimizer = LbfgsOptimizer(name="optimizer")
self.output_dim = output_dim
self.optimizer = optimizer
if network is None:
network = MLP(
input_shape=input_shape,
output_dim=output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity,
name="network"
)
l_out = network.output_layer
LayersPowered.__init__(self, [l_out])
xs_var = network.input_layer.input_var
ys_var = tf.placeholder(dtype=tf.float32, shape=[None, output_dim], name="ys")
x_mean_var = tf.get_variable(
name="x_mean",
shape=(1,) + input_shape,
initializer=tf.constant_initializer(0., dtype=tf.float32)
)
x_std_var = tf.get_variable(
name="x_std",
shape=(1,) + input_shape,
initializer=tf.constant_initializer(1., dtype=tf.float32)
)
normalized_xs_var = (xs_var - x_mean_var) / x_std_var
fit_ys_var = L.get_output(l_out, {network.input_layer: normalized_xs_var})
loss = - tf.reduce_mean(tf.square(fit_ys_var - ys_var))
self.f_predict = tensor_utils.compile_function([xs_var], fit_ys_var)
optimizer_args = dict(
loss=loss,
target=self,
network_outputs=[fit_ys_var],
)
optimizer_args["inputs"] = [xs_var, ys_var]
self.optimizer.update_opt(**optimizer_args)
self.name = name
self.l_out = l_out
self.normalize_inputs = normalize_inputs
self.x_mean_var = x_mean_var
self.x_std_var = x_std_var
def predict_sym(self, xs):
return L.get_output(self.l_out, xs)
# def fit(self, xs, ys):
# if self._normalize_inputs:
# # recompute normalizing constants for inputs
# new_mean = np.mean(xs, axis=0, keepdims=True)
# new_std = np.std(xs, axis=0, keepdims=True) + 1e-8
# tf.get_default_session().run(tf.group(
# tf.assign(self._x_mean_var, new_mean),
# tf.assign(self._x_std_var, new_std),
# ))
# inputs = [xs, ys]
# loss_before = self._optimizer.loss(inputs)
# if self._name:
# prefix = self._name + "_"
# else:
# prefix = ""
# logger.record_tabular(prefix + 'LossBefore', loss_before)
# self._optimizer.optimize(inputs)
# loss_after = self._optimizer.loss(inputs)
# logger.record_tabular(prefix + 'LossAfter', loss_after)
# logger.record_tabular(prefix + 'dLoss', loss_before - loss_after)
def predict(self, xs):
return self.f_predict(np.asarray(xs))
def get_param_values(self, **tags):
return LayersPowered.get_param_values(self, **tags)
def set_param_values(self, flattened_params, **tags):
return LayersPowered.set_param_values(self, flattened_params, **tags)
| 4,785 | 32.468531 | 90 | py |
rllab | rllab-master/sandbox/rocky/tf/q_functions/base.py | from sandbox.rocky.tf.core.parameterized import Parameterized
class QFunction(Parameterized):
pass
| 104 | 20 | 61 | py |
rllab | rllab-master/sandbox/rocky/tf/q_functions/continuous_mlp_q_function.py | from sandbox.rocky.tf.q_functions.base import QFunction
from rllab.core.serializable import Serializable
from rllab.misc import ext
from sandbox.rocky.tf.core.layers_powered import LayersPowered
from sandbox.rocky.tf.core.network import MLP
from sandbox.rocky.tf.core.layers import batch_norm
from sandbox.rocky.tf.distributions.categorical import Categorical
from sandbox.rocky.tf.policies.base import StochasticPolicy
from sandbox.rocky.tf.misc import tensor_utils
import tensorflow as tf
import sandbox.rocky.tf.core.layers as L
class ContinuousMLPQFunction(QFunction, LayersPowered, Serializable):
def __init__(
self,
env_spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.relu,
action_merge_layer=-2,
output_nonlinearity=None,
bn=False):
Serializable.quick_init(self, locals())
l_obs = L.InputLayer(shape=(None, env_spec.observation_space.flat_dim), name="obs")
l_action = L.InputLayer(shape=(None, env_spec.action_space.flat_dim), name="actions")
n_layers = len(hidden_sizes) + 1
if n_layers > 1:
action_merge_layer = \
(action_merge_layer % n_layers + n_layers) % n_layers
else:
action_merge_layer = 1
l_hidden = l_obs
for idx, size in enumerate(hidden_sizes):
if bn:
l_hidden = batch_norm(l_hidden)
if idx == action_merge_layer:
l_hidden = L.ConcatLayer([l_hidden, l_action])
l_hidden = L.DenseLayer(
l_hidden,
num_units=size,
nonlinearity=hidden_nonlinearity,
name="h%d" % (idx + 1)
)
if action_merge_layer == n_layers:
l_hidden = L.ConcatLayer([l_hidden, l_action])
l_output = L.DenseLayer(
l_hidden,
num_units=1,
nonlinearity=output_nonlinearity,
name="output"
)
output_var = L.get_output(l_output, deterministic=True)
self._f_qval = tensor_utils.compile_function([l_obs.input_var, l_action.input_var], output_var)
self._output_layer = l_output
self._obs_layer = l_obs
self._action_layer = l_action
self._output_nonlinearity = output_nonlinearity
LayersPowered.__init__(self, [l_output])
def get_qval(self, observations, actions):
return self._f_qval(observations, actions)
def get_qval_sym(self, obs_var, action_var, **kwargs):
qvals = L.get_output(
self._output_layer,
{self._obs_layer: obs_var, self._action_layer: action_var},
**kwargs
)
return tf.reshape(qvals, (-1,))
| 2,759 | 31.857143 | 103 | py |
rllab | rllab-master/sandbox/rocky/tf/q_functions/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/sandbox/rocky/tf/optimizers/first_order_optimizer.py |
from rllab.misc import ext
from rllab.misc import logger
from rllab.core.serializable import Serializable
from sandbox.rocky.tf.misc import tensor_utils
# from rllab.algo.first_order_method import parse_update_method
from rllab.optimizers.minibatch_dataset import BatchDataset
from collections import OrderedDict
import tensorflow as tf
import time
from functools import partial
import pyprind
class FirstOrderOptimizer(Serializable):
"""
Performs (stochastic) gradient descent, possibly using fancier methods like adam etc.
"""
def __init__(
self,
tf_optimizer_cls=None,
tf_optimizer_args=None,
# learning_rate=1e-3,
max_epochs=1000,
tolerance=1e-6,
batch_size=32,
callback=None,
verbose=False,
**kwargs):
"""
:param max_epochs:
:param tolerance:
:param update_method:
:param batch_size: None or an integer. If None the whole dataset will be used.
:param callback:
:param kwargs:
:return:
"""
Serializable.quick_init(self, locals())
self._opt_fun = None
self._target = None
self._callback = callback
if tf_optimizer_cls is None:
tf_optimizer_cls = tf.train.AdamOptimizer
if tf_optimizer_args is None:
tf_optimizer_args = dict(learning_rate=1e-3)
self._tf_optimizer = tf_optimizer_cls(**tf_optimizer_args)
self._max_epochs = max_epochs
self._tolerance = tolerance
self._batch_size = batch_size
self._verbose = verbose
self._input_vars = None
self._train_op = None
def update_opt(self, loss, target, inputs, extra_inputs=None, **kwargs):
"""
:param loss: Symbolic expression for the loss function.
:param target: A parameterized object to optimize over. It should implement methods of the
:class:`rllab.core.paramerized.Parameterized` class.
:param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
:param inputs: A list of symbolic variables as inputs
:return: No return value.
"""
self._target = target
self._train_op = self._tf_optimizer.minimize(loss, var_list=target.get_params(trainable=True))
# updates = OrderedDict([(k, v.astype(k.dtype)) for k, v in updates.iteritems()])
if extra_inputs is None:
extra_inputs = list()
self._input_vars = inputs + extra_inputs
self._opt_fun = ext.lazydict(
f_loss=lambda: tensor_utils.compile_function(inputs + extra_inputs, loss),
)
def loss(self, inputs, extra_inputs=None):
if extra_inputs is None:
extra_inputs = tuple()
return self._opt_fun["f_loss"](*(tuple(inputs) + extra_inputs))
def optimize(self, inputs, extra_inputs=None, callback=None):
if len(inputs) == 0:
# Assumes that we should always sample mini-batches
raise NotImplementedError
f_loss = self._opt_fun["f_loss"]
if extra_inputs is None:
extra_inputs = tuple()
last_loss = f_loss(*(tuple(inputs) + extra_inputs))
start_time = time.time()
dataset = BatchDataset(inputs, self._batch_size, extra_inputs=extra_inputs)
sess = tf.get_default_session()
for epoch in range(self._max_epochs):
if self._verbose:
logger.log("Epoch %d" % (epoch))
progbar = pyprind.ProgBar(len(inputs[0]))
for batch in dataset.iterate(update=True):
sess.run(self._train_op, dict(list(zip(self._input_vars, batch))))
if self._verbose:
progbar.update(len(batch[0]))
if self._verbose:
if progbar.active:
progbar.stop()
new_loss = f_loss(*(tuple(inputs) + extra_inputs))
if self._verbose:
logger.log("Epoch: %d | Loss: %f" % (epoch, new_loss))
if self._callback or callback:
elapsed = time.time() - start_time
callback_args = dict(
loss=new_loss,
params=self._target.get_param_values(trainable=True) if self._target else None,
itr=epoch,
elapsed=elapsed,
)
if self._callback:
self._callback(callback_args)
if callback:
callback(**callback_args)
if abs(last_loss - new_loss) < self._tolerance:
break
last_loss = new_loss
| 4,742 | 32.878571 | 112 | py |
rllab | rllab-master/sandbox/rocky/tf/optimizers/penalty_lbfgs_optimizer.py | from sandbox.rocky.tf.misc import tensor_utils
from rllab.misc import logger
from rllab.misc import ext
from rllab.core.serializable import Serializable
import tensorflow as tf
import numpy as np
import scipy.optimize
class PenaltyLbfgsOptimizer(Serializable):
"""
Performs constrained optimization via penalized L-BFGS. The penalty term is adaptively adjusted to make sure that
the constraint is satisfied.
"""
def __init__(
self,
name,
max_opt_itr=20,
initial_penalty=1.0,
min_penalty=1e-2,
max_penalty=1e6,
increase_penalty_factor=2,
decrease_penalty_factor=0.5,
max_penalty_itr=10,
adapt_penalty=True):
Serializable.quick_init(self, locals())
self._name = name
self._max_opt_itr = max_opt_itr
self._penalty = initial_penalty
self._initial_penalty = initial_penalty
self._min_penalty = min_penalty
self._max_penalty = max_penalty
self._increase_penalty_factor = increase_penalty_factor
self._decrease_penalty_factor = decrease_penalty_factor
self._max_penalty_itr = max_penalty_itr
self._adapt_penalty = adapt_penalty
self._opt_fun = None
self._target = None
self._max_constraint_val = None
self._constraint_name = None
def update_opt(self, loss, target, leq_constraint, inputs, constraint_name="constraint", *args, **kwargs):
"""
:param loss: Symbolic expression for the loss function.
:param target: A parameterized object to optimize over. It should implement methods of the
:class:`rllab.core.paramerized.Parameterized` class.
:param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
:param inputs: A list of symbolic variables as inputs
:return: No return value.
"""
constraint_term, constraint_value = leq_constraint
with tf.variable_scope(self._name):
penalty_var = tf.placeholder(tf.float32, tuple(), name="penalty")
penalized_loss = loss + penalty_var * constraint_term
self._target = target
self._max_constraint_val = constraint_value
self._constraint_name = constraint_name
def get_opt_output():
params = target.get_params(trainable=True)
grads = tf.gradients(penalized_loss, params)
for idx, (grad, param) in enumerate(zip(grads, params)):
if grad is None:
grads[idx] = tf.zeros_like(param)
flat_grad = tensor_utils.flatten_tensor_variables(grads)
return [
tf.cast(penalized_loss, tf.float64),
tf.cast(flat_grad, tf.float64),
]
self._opt_fun = ext.lazydict(
f_loss=lambda: tensor_utils.compile_function(inputs, loss, log_name="f_loss"),
f_constraint=lambda: tensor_utils.compile_function(inputs, constraint_term, log_name="f_constraint"),
f_penalized_loss=lambda: tensor_utils.compile_function(
inputs=inputs + [penalty_var],
outputs=[penalized_loss, loss, constraint_term],
log_name="f_penalized_loss",
),
f_opt=lambda: tensor_utils.compile_function(
inputs=inputs + [penalty_var],
outputs=get_opt_output(),
)
)
def loss(self, inputs):
return self._opt_fun["f_loss"](*inputs)
def constraint_val(self, inputs):
return self._opt_fun["f_constraint"](*inputs)
def optimize(self, inputs):
inputs = tuple(inputs)
try_penalty = np.clip(
self._penalty, self._min_penalty, self._max_penalty)
penalty_scale_factor = None
f_opt = self._opt_fun["f_opt"]
f_penalized_loss = self._opt_fun["f_penalized_loss"]
def gen_f_opt(penalty):
def f(flat_params):
self._target.set_param_values(flat_params, trainable=True)
return f_opt(*(inputs + (penalty,)))
return f
cur_params = self._target.get_param_values(trainable=True).astype('float64')
opt_params = cur_params
for penalty_itr in range(self._max_penalty_itr):
logger.log('trying penalty=%.3f...' % try_penalty)
itr_opt_params, _, _ = scipy.optimize.fmin_l_bfgs_b(
func=gen_f_opt(try_penalty), x0=cur_params,
maxiter=self._max_opt_itr
)
_, try_loss, try_constraint_val = f_penalized_loss(*(inputs + (try_penalty,)))
logger.log('penalty %f => loss %f, %s %f' %
(try_penalty, try_loss, self._constraint_name, try_constraint_val))
# Either constraint satisfied, or we are at the last iteration already and no alternative parameter
# satisfies the constraint
if try_constraint_val < self._max_constraint_val or \
(penalty_itr == self._max_penalty_itr - 1 and opt_params is None):
opt_params = itr_opt_params
if not self._adapt_penalty:
break
# Decide scale factor on the first iteration, or if constraint violation yields numerical error
if penalty_scale_factor is None or np.isnan(try_constraint_val):
# Increase penalty if constraint violated, or if constraint term is NAN
if try_constraint_val > self._max_constraint_val or np.isnan(try_constraint_val):
penalty_scale_factor = self._increase_penalty_factor
else:
# Otherwise (i.e. constraint satisfied), shrink penalty
penalty_scale_factor = self._decrease_penalty_factor
opt_params = itr_opt_params
else:
if penalty_scale_factor > 1 and \
try_constraint_val <= self._max_constraint_val:
break
elif penalty_scale_factor < 1 and \
try_constraint_val >= self._max_constraint_val:
break
try_penalty *= penalty_scale_factor
try_penalty = np.clip(try_penalty, self._min_penalty, self._max_penalty)
self._penalty = try_penalty
self._target.set_param_values(opt_params, trainable=True)
| 6,481 | 40.025316 | 117 | py |
rllab | rllab-master/sandbox/rocky/tf/optimizers/conjugate_gradient_optimizer.py | from rllab.misc import ext
from rllab.misc import krylov
from rllab.misc import logger
from rllab.core.serializable import Serializable
# from rllab.misc.ext import flatten_tensor_variables
import itertools
import numpy as np
import tensorflow as tf
from sandbox.rocky.tf.misc import tensor_utils
from rllab.misc.ext import sliced_fun
class PerlmutterHvp(object):
def __init__(self, num_slices=1):
self.target = None
self.reg_coeff = None
self.opt_fun = None
self._num_slices = num_slices
def update_opt(self, f, target, inputs, reg_coeff):
self.target = target
self.reg_coeff = reg_coeff
params = target.get_params(trainable=True)
constraint_grads = tf.gradients(f, xs=params)
for idx, (grad, param) in enumerate(zip(constraint_grads, params)):
if grad is None:
constraint_grads[idx] = tf.zeros_like(param)
xs = tuple([tensor_utils.new_tensor_like(p.name.split(":")[0], p) for p in params])
def Hx_plain():
Hx_plain_splits = tf.gradients(
tf.reduce_sum(
tf.stack([tf.reduce_sum(g * x) for g, x in zip(constraint_grads, xs)])
),
params
)
for idx, (Hx, param) in enumerate(zip(Hx_plain_splits, params)):
if Hx is None:
Hx_plain_splits[idx] = tf.zeros_like(param)
return tensor_utils.flatten_tensor_variables(Hx_plain_splits)
self.opt_fun = ext.lazydict(
f_Hx_plain=lambda: tensor_utils.compile_function(
inputs=inputs + xs,
outputs=Hx_plain(),
log_name="f_Hx_plain",
),
)
def build_eval(self, inputs):
def eval(x):
xs = tuple(self.target.flat_to_params(x, trainable=True))
ret = sliced_fun(self.opt_fun["f_Hx_plain"], self._num_slices)(inputs, xs) + self.reg_coeff * x
return ret
return eval
class FiniteDifferenceHvp(object):
def __init__(self, base_eps=1e-8, symmetric=True, grad_clip=None, num_slices=1):
self.base_eps = base_eps
self.symmetric = symmetric
self.grad_clip = grad_clip
self._num_slices = num_slices
def update_opt(self, f, target, inputs, reg_coeff):
self.target = target
self.reg_coeff = reg_coeff
params = target.get_params(trainable=True)
constraint_grads = tf.gradients(f, xs=params)
for idx, (grad, param) in enumerate(zip(constraint_grads, params)):
if grad is None:
constraint_grads[idx] = tf.zeros_like(param)
flat_grad = tensor_utils.flatten_tensor_variables(constraint_grads)
def f_Hx_plain(*args):
inputs_ = args[:len(inputs)]
xs = args[len(inputs):]
flat_xs = np.concatenate([np.reshape(x, (-1,)) for x in xs])
param_val = self.target.get_param_values(trainable=True)
eps = np.cast['float32'](self.base_eps / (np.linalg.norm(param_val) + 1e-8))
self.target.set_param_values(param_val + eps * flat_xs, trainable=True)
flat_grad_dvplus = self.opt_fun["f_grad"](*inputs_)
self.target.set_param_values(param_val, trainable=True)
if self.symmetric:
self.target.set_param_values(param_val - eps * flat_xs, trainable=True)
flat_grad_dvminus = self.opt_fun["f_grad"](*inputs_)
hx = (flat_grad_dvplus - flat_grad_dvminus) / (2 * eps)
self.target.set_param_values(param_val, trainable=True)
else:
flat_grad = self.opt_fun["f_grad"](*inputs_)
hx = (flat_grad_dvplus - flat_grad) / eps
return hx
self.opt_fun = ext.lazydict(
f_grad=lambda: tensor_utils.compile_function(
inputs=inputs,
outputs=flat_grad,
log_name="f_grad",
),
f_Hx_plain=lambda: f_Hx_plain,
)
def build_eval(self, inputs):
def eval(x):
xs = tuple(self.target.flat_to_params(x, trainable=True))
ret = sliced_fun(self.opt_fun["f_Hx_plain"], self._num_slices)(inputs,xs) + self.reg_coeff * x
return ret
return eval
class ConjugateGradientOptimizer(Serializable):
"""
Performs constrained optimization via line search. The search direction is computed using a conjugate gradient
algorithm, which gives x = A^{-1}g, where A is a second order approximation of the constraint and g is the gradient
of the loss function.
"""
def __init__(
self,
cg_iters=10,
reg_coeff=1e-5,
subsample_factor=1.,
backtrack_ratio=0.8,
max_backtracks=15,
debug_nan=False,
accept_violation=False,
hvp_approach=None,
num_slices=1):
"""
:param cg_iters: The number of CG iterations used to calculate A^-1 g
:param reg_coeff: A small value so that A -> A + reg*I
:param subsample_factor: Subsampling factor to reduce samples when using "conjugate gradient. Since the
computation time for the descent direction dominates, this can greatly reduce the overall computation time.
:param debug_nan: if set to True, NanGuard will be added to the compilation, and ipdb will be invoked when
nan is detected
:param accept_violation: whether to accept the descent step if it violates the line search condition after
exhausting all backtracking budgets
:return:
"""
Serializable.quick_init(self, locals())
self._cg_iters = cg_iters
self._reg_coeff = reg_coeff
self._subsample_factor = subsample_factor
self._backtrack_ratio = backtrack_ratio
self._max_backtracks = max_backtracks
self._num_slices = num_slices
self._opt_fun = None
self._target = None
self._max_constraint_val = None
self._constraint_name = None
self._debug_nan = debug_nan
self._accept_violation = accept_violation
if hvp_approach is None:
hvp_approach = PerlmutterHvp(num_slices)
self._hvp_approach = hvp_approach
def update_opt(self, loss, target, leq_constraint, inputs, extra_inputs=None, constraint_name="constraint", *args,
**kwargs):
"""
:param loss: Symbolic expression for the loss function.
:param target: A parameterized object to optimize over. It should implement methods of the
:class:`rllab.core.paramerized.Parameterized` class.
:param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
:param inputs: A list of symbolic variables as inputs, which could be subsampled if needed. It is assumed
that the first dimension of these inputs should correspond to the number of data points
:param extra_inputs: A list of symbolic variables as extra inputs which should not be subsampled
:return: No return value.
"""
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
else:
extra_inputs = tuple(extra_inputs)
constraint_term, constraint_value = leq_constraint
params = target.get_params(trainable=True)
grads = tf.gradients(loss, xs=params)
for idx, (grad, param) in enumerate(zip(grads, params)):
if grad is None:
grads[idx] = tf.zeros_like(param)
flat_grad = tensor_utils.flatten_tensor_variables(grads)
self._hvp_approach.update_opt(f=constraint_term, target=target, inputs=inputs + extra_inputs,
reg_coeff=self._reg_coeff)
self._target = target
self._max_constraint_val = constraint_value
self._constraint_name = constraint_name
self._opt_fun = ext.lazydict(
f_loss=lambda: tensor_utils.compile_function(
inputs=inputs + extra_inputs,
outputs=loss,
log_name="f_loss",
),
f_grad=lambda: tensor_utils.compile_function(
inputs=inputs + extra_inputs,
outputs=flat_grad,
log_name="f_grad",
),
f_constraint=lambda: tensor_utils.compile_function(
inputs=inputs + extra_inputs,
outputs=constraint_term,
log_name="constraint",
),
f_loss_constraint=lambda: tensor_utils.compile_function(
inputs=inputs + extra_inputs,
outputs=[loss, constraint_term],
log_name="f_loss_constraint",
),
)
def loss(self, inputs, extra_inputs=None):
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
return sliced_fun(self._opt_fun["f_loss"], self._num_slices)(inputs, extra_inputs)
def constraint_val(self, inputs, extra_inputs=None):
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
return sliced_fun(self._opt_fun["f_constraint"], self._num_slices)(inputs, extra_inputs)
def optimize(self, inputs, extra_inputs=None, subsample_grouped_inputs=None):
prev_param = np.copy(self._target.get_param_values(trainable=True))
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
if self._subsample_factor < 1:
if subsample_grouped_inputs is None:
subsample_grouped_inputs = [inputs]
subsample_inputs = tuple()
for inputs_grouped in subsample_grouped_inputs:
n_samples = len(inputs_grouped[0])
inds = np.random.choice(
n_samples, int(n_samples * self._subsample_factor), replace=False)
subsample_inputs += tuple([x[inds] for x in inputs_grouped])
else:
subsample_inputs = inputs
logger.log("Start CG optimization: #parameters: %d, #inputs: %d, #subsample_inputs: %d"%(len(prev_param),len(inputs[0]), len(subsample_inputs[0])))
logger.log("computing loss before")
loss_before = sliced_fun(self._opt_fun["f_loss"], self._num_slices)(inputs, extra_inputs)
logger.log("performing update")
logger.log("computing gradient")
flat_g = sliced_fun(self._opt_fun["f_grad"], self._num_slices)(inputs, extra_inputs)
logger.log("gradient computed")
logger.log("computing descent direction")
Hx = self._hvp_approach.build_eval(subsample_inputs + extra_inputs)
descent_direction = krylov.cg(Hx, flat_g, cg_iters=self._cg_iters)
initial_step_size = np.sqrt(
2.0 * self._max_constraint_val * (1. / (descent_direction.dot(Hx(descent_direction)) + 1e-8))
)
if np.isnan(initial_step_size):
initial_step_size = 1.
flat_descent_step = initial_step_size * descent_direction
logger.log("descent direction computed")
n_iter = 0
for n_iter, ratio in enumerate(self._backtrack_ratio ** np.arange(self._max_backtracks)):
cur_step = ratio * flat_descent_step
cur_param = prev_param - cur_step
self._target.set_param_values(cur_param, trainable=True)
loss, constraint_val = sliced_fun(self._opt_fun["f_loss_constraint"], self._num_slices)(inputs,
extra_inputs)
if self._debug_nan and np.isnan(constraint_val):
import ipdb;
ipdb.set_trace()
if loss < loss_before and constraint_val <= self._max_constraint_val:
break
if (np.isnan(loss) or np.isnan(constraint_val) or loss >= loss_before or constraint_val >=
self._max_constraint_val) and not self._accept_violation:
logger.log("Line search condition violated. Rejecting the step!")
if np.isnan(loss):
logger.log("Violated because loss is NaN")
if np.isnan(constraint_val):
logger.log("Violated because constraint %s is NaN" % self._constraint_name)
if loss >= loss_before:
logger.log("Violated because loss not improving")
if constraint_val >= self._max_constraint_val:
logger.log("Violated because constraint %s is violated" % self._constraint_name)
self._target.set_param_values(prev_param, trainable=True)
logger.log("backtrack iters: %d" % n_iter)
logger.log("computing loss after")
logger.log("optimization finished")
| 12,905 | 41.314754 | 155 | py |
rllab | rllab-master/sandbox/rocky/tf/optimizers/__init__.py | 1 | 0 | 0 | py | |
rllab | rllab-master/sandbox/rocky/tf/optimizers/lbfgs_optimizer.py |
from rllab.misc import ext
from sandbox.rocky.tf.misc import tensor_utils
from rllab.core.serializable import Serializable
import tensorflow as tf
import scipy.optimize
import time
class LbfgsOptimizer(Serializable):
"""
Performs unconstrained optimization via L-BFGS.
"""
def __init__(self, name, max_opt_itr=20, callback=None):
Serializable.quick_init(self, locals())
self._name = name
self._max_opt_itr = max_opt_itr
self._opt_fun = None
self._target = None
self._callback = callback
def update_opt(self, loss, target, inputs, extra_inputs=None, *args, **kwargs):
"""
:param loss: Symbolic expression for the loss function.
:param target: A parameterized object to optimize over. It should implement methods of the
:class:`rllab.core.paramerized.Parameterized` class.
:param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
:param inputs: A list of symbolic variables as inputs
:return: No return value.
"""
self._target = target
def get_opt_output():
flat_grad = tensor_utils.flatten_tensor_variables(tf.gradients(loss, target.get_params(trainable=True)))
return [tf.cast(loss, tf.float64), tf.cast(flat_grad, tf.float64)]
if extra_inputs is None:
extra_inputs = list()
self._opt_fun = ext.lazydict(
f_loss=lambda: tensor_utils.compile_function(inputs + extra_inputs, loss),
f_opt=lambda: tensor_utils.compile_function(
inputs=inputs + extra_inputs,
outputs=get_opt_output(),
)
)
def loss(self, inputs, extra_inputs=None):
if extra_inputs is None:
extra_inputs = list()
return self._opt_fun["f_loss"](*(list(inputs) + list(extra_inputs)))
def optimize(self, inputs, extra_inputs=None):
f_opt = self._opt_fun["f_opt"]
if extra_inputs is None:
extra_inputs = list()
def f_opt_wrapper(flat_params):
self._target.set_param_values(flat_params, trainable=True)
ret = f_opt(*inputs)
return ret
itr = [0]
start_time = time.time()
if self._callback:
def opt_callback(params):
loss = self._opt_fun["f_loss"](*(inputs + extra_inputs))
elapsed = time.time() - start_time
self._callback(dict(
loss=loss,
params=params,
itr=itr[0],
elapsed=elapsed,
))
itr[0] += 1
else:
opt_callback = None
scipy.optimize.fmin_l_bfgs_b(
func=f_opt_wrapper, x0=self._target.get_param_values(trainable=True),
maxiter=self._max_opt_itr, callback=opt_callback,
)
| 2,938 | 32.397727 | 116 | py |
rllab | rllab-master/scripts/sync_s3.py | import sys
sys.path.append('.')
from rllab import config
import os
import argparse
import ast
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('folder', type=str, default=None, nargs='?')
parser.add_argument('--dry', action='store_true', default=False)
parser.add_argument('--bare', action='store_true', default=False)
args = parser.parse_args()
remote_dir = config.AWS_S3_PATH
local_dir = os.path.join(config.LOG_DIR, "s3")
if args.folder:
remote_dir = os.path.join(remote_dir, args.folder)
local_dir = os.path.join(local_dir, args.folder)
if args.bare:
command = ("""
aws s3 sync {remote_dir} {local_dir} --exclude '*' --include '*.csv' --include '*.json' --content-type "UTF-8"
""".format(local_dir=local_dir, remote_dir=remote_dir))
else:
command = ("""
aws s3 sync {remote_dir} {local_dir} --exclude '*stdout.log' --exclude '*stdouterr.log' --content-type "UTF-8"
""".format(local_dir=local_dir, remote_dir=remote_dir))
if args.dry:
print(command)
else:
os.system(command) | 1,147 | 37.266667 | 122 | py |
rllab | rllab-master/scripts/sim_env.py | import argparse
import sys
import time
import numpy as np
import pygame
from rllab.envs.base import Env
# from rllab.env.base import MDP
from rllab.misc.resolve import load_class
def sample_action(lb, ub):
Du = len(lb)
if np.any(np.isinf(lb)) or np.any(np.isinf(ub)):
raise ValueError('Cannot sample unbounded actions')
return np.random.rand(Du) * (ub - lb) + lb
def to_onehot(ind, dim):
ret = np.zeros(dim)
ret[ind] = 1
return ret
def visualize_env(env, mode, max_steps=sys.maxsize, speedup=1):
timestep = 0.05
# step ahead with all-zero action
if mode == 'noop':
for _ in range(max_steps):
env.render()
time.sleep(timestep / speedup)
elif mode == 'random':
env.reset()
env.render()
for i in range(max_steps):
action = env.action_space.sample()
_, _, done, _ = env.step(action)
# if i % 10 == 0:
env.render()
# import time as ttime
time.sleep(timestep / speedup)
if done:
env.reset()
elif mode == 'static':
env.reset()
while True:
env.render()
time.sleep(timestep / speedup)
elif mode == 'human':
if hasattr(env, 'start_interactive'):
env.start_interactive()
else:
env.reset()
env.render()
tr = 0.
from rllab.envs.box2d.box2d_env import Box2DEnv
if isinstance(env, Box2DEnv):
for _ in range(max_steps):
pygame.event.pump()
keys = pygame.key.get_pressed()
action = env.action_from_keys(keys)
ob, r, done, _ = env.step(action)
tr += r
env.render()
time.sleep(timestep / speedup)
if done:
tr = 0.
env.reset()
return
from rllab.envs.mujoco.mujoco_env import MujocoEnv
from rllab.envs.mujoco.maze.maze_env import MazeEnv
if isinstance(env, (MujocoEnv, MazeEnv)):
trs = [tr]
actions = [np.zeros(2)]
from rllab.mujoco_py import glfw
def cb(window, key, scancode, action, mods):
actions[0] = env.action_from_key(key)
glfw.set_key_callback(env.viewer.window, cb)
while True:
try:
actions[0] = np.zeros(2)
glfw.poll_events()
# if np.linalg.norm(actions[0]) > 0:
ob, r, done, info = env.step(actions[0])
trs[0] += r
env.render()
# time.sleep(env.timestep / speedup)
time.sleep(env.timestep / speedup)
if done:
trs[0] = 0.
env.reset()
except Exception as e:
print(e)
return
assert hasattr(env, "start_interactive"), "The environment must implement method start_interactive"
env.start_interactive()
# Assume using matplotlib
# TODO - make this logic more legit
# env.render()
# import matplotlib.pyplot as plt
# def handle_key_pressed(event):
# action = env.action_from_key(event.key)
# if action is not None:
# _, _, done, _ = env.step(action)
# if done:
# plt.close()
# return
# env.render()
#
# env.matplotlib_figure.canvas.mpl_connect('key_press_event', handle_key_pressed)
# plt.ioff()
# plt.show()
else:
raise ValueError('Unsupported mode: %s' % mode)
# env.stop_viewer()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('env', type=str,
help='module path to the env class')
parser.add_argument('--mode', type=str, default='static',
choices=['noop', 'random', 'static', 'human'],
help='module path to the env class')
parser.add_argument('--speedup', type=float, default=1, help='speedup')
parser.add_argument('--max_steps', type=int,
default=sys.maxsize, help='max steps')
args = parser.parse_args()
env = load_class(args.env, Env, ["rllab", "envs"])()
visualize_env(env, mode=args.mode, max_steps=args.max_steps,
speedup=args.speedup)
| 4,738 | 32.85 | 111 | py |
rllab | rllab-master/scripts/resume_training.py |
from rllab.sampler.utils import rollout
from rllab.algos.batch_polopt import BatchPolopt
import argparse
import joblib
import uuid
import os
import random
import numpy as np
import json
import subprocess
from rllab.misc import logger
from rllab.misc.instrument import to_local_command
filename = str(uuid.uuid4())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str,
help='path to the snapshot file')
parser.add_argument('--log_dir', type=str, default=None,
help='path to the new log directory')
# Look for params.json file
args = parser.parse_args()
parent_dir = os.path.dirname(os.path.realpath(args.file))
json_file_path = os.path.join(parent_dir, "params.json")
logger.log("Looking for params.json at %s..." % json_file_path)
try:
with open(json_file_path, "r") as f:
params = json.load(f)
# exclude certain parameters
excluded = ['json_args']
for k in excluded:
if k in params:
del params[k]
for k, v in list(params.items()):
if v is None:
del params[k]
if args.log_dir is not None:
params['log_dir'] = args.log_dir
params['resume_from'] = args.file
command = to_local_command(params, script='scripts/run_experiment_lite.py')
print(command)
try:
subprocess.call(command, shell=True, env=os.environ)
except Exception as e:
print(e)
if isinstance(e, KeyboardInterrupt):
raise
except IOError as e:
logger.log("Failed to find json file. Continuing in non-stub mode...")
data = joblib.load(args.file)
assert 'algo' in data
algo = data['algo']
assert isinstance(algo, BatchPolopt)
algo.train()
| 1,902 | 30.716667 | 83 | py |
rllab | rllab-master/scripts/sim_policy.py | import argparse
import joblib
import tensorflow as tf
from rllab.misc.console import query_yes_no
from rllab.sampler.utils import rollout
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str,
help='path to the snapshot file')
parser.add_argument('--max_path_length', type=int, default=1000,
help='Max length of rollout')
parser.add_argument('--speedup', type=float, default=1,
help='Speedup')
args = parser.parse_args()
# If the snapshot file use tensorflow, do:
# import tensorflow as tf
# with tf.Session():
# [rest of the code]
with tf.Session() as sess:
data = joblib.load(args.file)
policy = data['policy']
env = data['env']
while True:
path = rollout(env, policy, max_path_length=args.max_path_length,
animated=True, speedup=args.speedup)
if not query_yes_no('Continue simulation?'):
break
| 1,062 | 31.212121 | 77 | py |
rllab | rllab-master/scripts/run_experiment_lite.py | import sys
sys.path.append(".")
from rllab.misc.ext import is_iterable, set_seed
from rllab.misc.instrument import concretize
from rllab import config
import rllab.misc.logger as logger
import argparse
import os.path as osp
import datetime
import dateutil.tz
import ast
import uuid
import pickle as pickle
import base64
import joblib
import logging
def run_experiment(argv):
default_log_dir = config.LOG_DIR
now = datetime.datetime.now(dateutil.tz.tzlocal())
# avoid name clashes when running distributed jobs
rand_id = str(uuid.uuid4())[:5]
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S_%f_%Z')
default_exp_name = 'experiment_%s_%s' % (timestamp, rand_id)
parser = argparse.ArgumentParser()
parser.add_argument('--n_parallel', type=int, default=1,
help='Number of parallel workers to perform rollouts. 0 => don\'t start any workers')
parser.add_argument(
'--exp_name', type=str, default=default_exp_name, help='Name of the experiment.')
parser.add_argument('--log_dir', type=str, default=None,
help='Path to save the log and iteration snapshot.')
parser.add_argument('--snapshot_mode', type=str, default='all',
help='Mode to save the snapshot. Can be either "all" '
'(all iterations will be saved), "last" (only '
'the last iteration will be saved), "gap" (every'
'`snapshot_gap` iterations are saved), or "none" '
'(do not save snapshots)')
parser.add_argument('--snapshot_gap', type=int, default=1,
help='Gap between snapshot iterations.')
parser.add_argument('--tabular_log_file', type=str, default='progress.csv',
help='Name of the tabular log file (in csv).')
parser.add_argument('--text_log_file', type=str, default='debug.log',
help='Name of the text log file (in pure text).')
parser.add_argument('--params_log_file', type=str, default='params.json',
help='Name of the parameter log file (in json).')
parser.add_argument('--variant_log_file', type=str, default='variant.json',
help='Name of the variant log file (in json).')
parser.add_argument('--resume_from', type=str, default=None,
help='Name of the pickle file to resume experiment from.')
parser.add_argument('--plot', type=ast.literal_eval, default=False,
help='Whether to plot the iteration results')
parser.add_argument('--log_tabular_only', type=ast.literal_eval, default=False,
help='Whether to only print the tabular log information (in a horizontal format)')
parser.add_argument('--seed', type=int,
help='Random seed for numpy')
parser.add_argument('--args_data', type=str,
help='Pickled data for stub objects')
parser.add_argument('--variant_data', type=str,
help='Pickled data for variant configuration')
parser.add_argument('--use_cloudpickle', type=ast.literal_eval, default=False)
args = parser.parse_args(argv[1:])
if args.seed is not None:
set_seed(args.seed)
if args.n_parallel > 0:
from rllab.sampler import parallel_sampler
parallel_sampler.initialize(n_parallel=args.n_parallel)
if args.seed is not None:
parallel_sampler.set_seed(args.seed)
if args.plot:
from rllab.plotter import plotter
plotter.init_worker()
if args.log_dir is None:
log_dir = osp.join(default_log_dir, args.exp_name)
else:
log_dir = args.log_dir
tabular_log_file = osp.join(log_dir, args.tabular_log_file)
text_log_file = osp.join(log_dir, args.text_log_file)
params_log_file = osp.join(log_dir, args.params_log_file)
if args.variant_data is not None:
variant_data = pickle.loads(base64.b64decode(args.variant_data))
variant_log_file = osp.join(log_dir, args.variant_log_file)
logger.log_variant(variant_log_file, variant_data)
else:
variant_data = None
if not args.use_cloudpickle:
logger.log_parameters_lite(params_log_file, args)
logger.add_text_output(text_log_file)
logger.add_tabular_output(tabular_log_file)
prev_snapshot_dir = logger.get_snapshot_dir()
prev_mode = logger.get_snapshot_mode()
logger.set_snapshot_dir(log_dir)
logger.set_snapshot_mode(args.snapshot_mode)
logger.set_snapshot_gap(args.snapshot_gap)
logger.set_log_tabular_only(args.log_tabular_only)
logger.push_prefix("[%s] " % args.exp_name)
if args.resume_from is not None:
data = joblib.load(args.resume_from)
assert 'algo' in data
algo = data['algo']
algo.train()
else:
# read from stdin
if args.use_cloudpickle:
import cloudpickle
method_call = cloudpickle.loads(base64.b64decode(args.args_data))
method_call(variant_data)
else:
data = pickle.loads(base64.b64decode(args.args_data))
maybe_iter = concretize(data)
if is_iterable(maybe_iter):
for _ in maybe_iter:
pass
logger.set_snapshot_mode(prev_mode)
logger.set_snapshot_dir(prev_snapshot_dir)
logger.remove_tabular_output(tabular_log_file)
logger.remove_text_output(text_log_file)
logger.pop_prefix()
if __name__ == "__main__":
run_experiment(sys.argv)
| 5,603 | 39.608696 | 109 | py |
rllab | rllab-master/scripts/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/scripts/submit_gym.py |
import argparse
import os
import os.path as osp
import gym
from rllab.viskit.core import load_params
if __name__ == "__main__":
# rl_gym.api_key = 'g8JOpnNVmcjMShBiFtyji2VWX3P2uCzc'
if 'OPENAI_GYM_API_KEY' not in os.environ:
raise ValueError("OpenAi Gym API key not configured. Please register an account on https://gym.openai.com and"
" set the OPENAI_GYM_API_KEY environment variable, and try the script again.")
parser = argparse.ArgumentParser()
parser.add_argument('log_dir', type=str,
help='path to the logging directory')
parser.add_argument('--algorithm_id', type=str, default=None, help='Algorithm ID')
args = parser.parse_args()
snapshot_dir = osp.abspath(osp.join(args.log_dir, ".."))
params_file_path = osp.join(snapshot_dir, "params.json")
gym.upload(args.log_dir, algorithm_id=args.algorithm_id)
| 907 | 38.478261 | 118 | py |
rllab | rllab-master/scripts/setup_ec2_for_rllab.py | import boto3
import re
import sys
import json
import botocore
import os
from rllab.misc import console
from rllab import config
from string import Template
ACCESS_KEY = os.environ["AWS_ACCESS_KEY"]
ACCESS_SECRET = os.environ["AWS_ACCESS_SECRET"]
S3_BUCKET_NAME = os.environ["RLLAB_S3_BUCKET"]
ALL_REGION_AWS_SECURITY_GROUP_IDS = {}
ALL_REGION_AWS_KEY_NAMES = {}
CONFIG_TEMPLATE = Template("""
import os.path as osp
import os
USE_GPU = False
USE_TF = True
AWS_REGION_NAME = "us-west-1"
if USE_GPU:
DOCKER_IMAGE = "dementrock/rllab3-shared-gpu"
else:
DOCKER_IMAGE = "dementrock/rllab3-shared"
DOCKER_LOG_DIR = "/tmp/expt"
AWS_S3_PATH = "s3://$s3_bucket_name/rllab/experiments"
AWS_CODE_SYNC_S3_PATH = "s3://$s3_bucket_name/rllab/code"
ALL_REGION_AWS_IMAGE_IDS = {
"ap-northeast-1": "ami-002f0167",
"ap-northeast-2": "ami-590bd937",
"ap-south-1": "ami-77314318",
"ap-southeast-1": "ami-1610a975",
"ap-southeast-2": "ami-9dd4ddfe",
"eu-central-1": "ami-63af720c",
"eu-west-1": "ami-41484f27",
"sa-east-1": "ami-b7234edb",
"us-east-1": "ami-83f26195",
"us-east-2": "ami-66614603",
"us-west-1": "ami-576f4b37",
"us-west-2": "ami-b8b62bd8"
}
AWS_IMAGE_ID = ALL_REGION_AWS_IMAGE_IDS[AWS_REGION_NAME]
if USE_GPU:
AWS_INSTANCE_TYPE = "g2.2xlarge"
else:
AWS_INSTANCE_TYPE = "c4.2xlarge"
ALL_REGION_AWS_KEY_NAMES = $all_region_aws_key_names
AWS_KEY_NAME = ALL_REGION_AWS_KEY_NAMES[AWS_REGION_NAME]
AWS_SPOT = True
AWS_SPOT_PRICE = '0.5'
AWS_ACCESS_KEY = os.environ.get("AWS_ACCESS_KEY", None)
AWS_ACCESS_SECRET = os.environ.get("AWS_ACCESS_SECRET", None)
AWS_IAM_INSTANCE_PROFILE_NAME = "rllab"
AWS_SECURITY_GROUPS = ["rllab-sg"]
ALL_REGION_AWS_SECURITY_GROUP_IDS = $all_region_aws_security_group_ids
AWS_SECURITY_GROUP_IDS = ALL_REGION_AWS_SECURITY_GROUP_IDS[AWS_REGION_NAME]
FAST_CODE_SYNC_IGNORES = [
".git",
"data/local",
"data/s3",
"data/video",
"src",
".idea",
".pods",
"tests",
"examples",
"docs",
".idea",
".DS_Store",
".ipynb_checkpoints",
"blackbox",
"blackbox.zip",
"*.pyc",
"*.ipynb",
"scratch-notebooks",
"conopt_root",
"private/key_pairs",
]
FAST_CODE_SYNC = True
""")
def setup_iam():
iam_client = boto3.client(
"iam",
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=ACCESS_SECRET,
)
iam = boto3.resource('iam', aws_access_key_id=ACCESS_KEY, aws_secret_access_key=ACCESS_SECRET)
# delete existing role if it exists
try:
existing_role = iam.Role('rllab')
existing_role.load()
# if role exists, delete and recreate
if not query_yes_no(
"There is an existing role named rllab. Proceed to delete everything rllab-related and recreate?",
default="no"):
sys.exit()
print("Listing instance profiles...")
inst_profiles = existing_role.instance_profiles.all()
for prof in inst_profiles:
for role in prof.roles:
print("Removing role %s from instance profile %s" % (role.name, prof.name))
prof.remove_role(RoleName=role.name)
print("Deleting instance profile %s" % prof.name)
prof.delete()
for policy in existing_role.policies.all():
print("Deleting inline policy %s" % policy.name)
policy.delete()
for policy in existing_role.attached_policies.all():
print("Detaching policy %s" % policy.arn)
existing_role.detach_policy(PolicyArn=policy.arn)
print("Deleting role")
existing_role.delete()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'NoSuchEntity':
pass
else:
raise e
print("Creating role rllab")
iam_client.create_role(
Path='/',
RoleName='rllab',
AssumeRolePolicyDocument=json.dumps({'Version': '2012-10-17', 'Statement': [
{'Action': 'sts:AssumeRole', 'Effect': 'Allow', 'Principal': {'Service': 'ec2.amazonaws.com'}}]})
)
role = iam.Role('rllab')
print("Attaching policies")
role.attach_policy(PolicyArn='arn:aws:iam::aws:policy/AmazonS3FullAccess')
role.attach_policy(PolicyArn='arn:aws:iam::aws:policy/ResourceGroupsandTagEditorFullAccess')
print("Creating inline policies")
iam_client.put_role_policy(
RoleName=role.name,
PolicyName='CreateTags',
PolicyDocument=json.dumps({
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["ec2:CreateTags"],
"Resource": ["*"]
}
]
})
)
iam_client.put_role_policy(
RoleName=role.name,
PolicyName='TerminateInstances',
PolicyDocument=json.dumps({
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Stmt1458019101000",
"Effect": "Allow",
"Action": [
"ec2:TerminateInstances"
],
"Resource": [
"*"
]
}
]
})
)
print("Creating instance profile rllab")
iam_client.create_instance_profile(
InstanceProfileName='rllab',
Path='/'
)
print("Adding role rllab to instance profile rllab")
iam_client.add_role_to_instance_profile(
InstanceProfileName='rllab',
RoleName='rllab'
)
def setup_s3():
print("Creating S3 bucket at s3://%s" % S3_BUCKET_NAME)
s3_client = boto3.client(
"s3",
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=ACCESS_SECRET,
)
try:
s3_client.create_bucket(
ACL='private',
Bucket=S3_BUCKET_NAME,
)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'BucketAlreadyExists':
raise ValueError("Bucket %s already exists. Please reconfigure S3_BUCKET_NAME" % S3_BUCKET_NAME) from e
elif e.response['Error']['Code'] == 'BucketAlreadyOwnedByYou':
print("Bucket already created by you")
else:
raise e
print("S3 bucket created")
def setup_ec2():
for region in ["us-east-1", "us-west-1", "us-west-2"]:
print("Setting up region %s" % region)
ec2 = boto3.resource(
"ec2",
region_name=region,
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=ACCESS_SECRET,
)
ec2_client = boto3.client(
"ec2",
region_name=region,
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=ACCESS_SECRET,
)
existing_vpcs = list(ec2.vpcs.all())
assert len(existing_vpcs) >= 1
vpc = existing_vpcs[0]
print("Creating security group in VPC %s" % str(vpc.id))
try:
security_group = vpc.create_security_group(
GroupName='rllab-sg', Description='Security group for rllab'
)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'InvalidGroup.Duplicate':
sgs = list(vpc.security_groups.filter(GroupNames=['rllab-sg']))
security_group = sgs[0]
else:
raise e
ALL_REGION_AWS_SECURITY_GROUP_IDS[region] = [security_group.id]
ec2_client.create_tags(Resources=[security_group.id], Tags=[{'Key': 'Name', 'Value': 'rllab-sg'}])
try:
security_group.authorize_ingress(FromPort=22, ToPort=22, IpProtocol='tcp', CidrIp='0.0.0.0/0')
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'InvalidPermission.Duplicate':
pass
else:
raise e
print("Security group created with id %s" % str(security_group.id))
key_name = 'rllab-%s' % region
try:
print("Trying to create key pair with name %s" % key_name)
key_pair = ec2_client.create_key_pair(KeyName=key_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'InvalidKeyPair.Duplicate':
if not query_yes_no("Key pair with name %s exists. Proceed to delete and recreate?" % key_name, "no"):
sys.exit()
print("Deleting existing key pair with name %s" % key_name)
ec2_client.delete_key_pair(KeyName=key_name)
print("Recreating key pair with name %s" % key_name)
key_pair = ec2_client.create_key_pair(KeyName=key_name)
else:
raise e
key_pair_folder_path = os.path.join(config.PROJECT_PATH, "private", "key_pairs")
file_name = os.path.join(key_pair_folder_path, "%s.pem" % key_name)
print("Saving keypair file")
console.mkdir_p(key_pair_folder_path)
with os.fdopen(os.open(file_name, os.O_WRONLY | os.O_CREAT, 0o600), 'w') as handle:
handle.write(key_pair['KeyMaterial'] + '\n')
# adding pem file to ssh
os.system("ssh-add %s" % file_name)
ALL_REGION_AWS_KEY_NAMES[region] = key_name
def write_config():
print("Writing config file...")
content = CONFIG_TEMPLATE.substitute(
all_region_aws_key_names=json.dumps(ALL_REGION_AWS_KEY_NAMES, indent=4),
all_region_aws_security_group_ids=json.dumps(ALL_REGION_AWS_SECURITY_GROUP_IDS, indent=4),
s3_bucket_name=S3_BUCKET_NAME,
)
config_personal_file = os.path.join(config.PROJECT_PATH, "rllab/config_personal.py")
if os.path.exists(config_personal_file):
if not query_yes_no("rllab/config_personal.py exists. Override?", "no"):
sys.exit()
with open(config_personal_file, "wb") as f:
f.write(content.encode("utf-8"))
def setup():
setup_s3()
setup_iam()
setup_ec2()
write_config()
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
if __name__ == "__main__":
setup()
| 11,283 | 30.431755 | 118 | py |
rllab | rllab-master/tests/test_spaces.py |
from rllab.spaces import Product, Discrete, Box
import numpy as np
def test_product_space():
_ = Product([Discrete(3), Discrete(2)])
product_space = Product(Discrete(3), Discrete(2))
sample = product_space.sample()
assert product_space.contains(sample)
def test_product_space_unflatten_n():
space = Product([Discrete(3), Discrete(3)])
np.testing.assert_array_equal(space.flatten((2, 2)), space.flatten_n([(2, 2)])[0])
np.testing.assert_array_equal(
space.unflatten(space.flatten((2, 2))),
space.unflatten_n(space.flatten_n([(2, 2)]))[0]
)
def test_box():
space = Box(low=-1, high=1, shape=(2, 2))
np.testing.assert_array_equal(space.flatten([[1, 2], [3, 4]]), [1, 2, 3, 4])
np.testing.assert_array_equal(space.flatten_n([[[1, 2], [3, 4]]]), [[1, 2, 3, 4]])
np.testing.assert_array_equal(space.unflatten([1, 2, 3, 4]), [[1, 2], [3, 4]])
np.testing.assert_array_equal(space.unflatten_n([[1, 2, 3, 4]]), [[[1, 2], [3, 4]]])
| 996 | 34.607143 | 88 | py |
rllab | rllab-master/tests/test_serializable.py | import tensorflow as tf
from rllab.core.serializable import Serializable
from sandbox.rocky.tf.core.parameterized import Parameterized, suppress_params_loading
class Simple(Parameterized, Serializable):
def __init__(self, name):
Serializable.quick_init(self, locals())
with tf.variable_scope(name):
self.w = tf.get_variable("w", [10, 10])
def get_params_internal(self, **tags):
return [self.w]
class AllArgs(Serializable):
def __init__(self, vararg, *args, **kwargs):
Serializable.quick_init(self, locals())
self.vararg = vararg
self.args = args
self.kwargs = kwargs
def test_serializable():
with suppress_params_loading():
obj = Simple(name="obj")
obj1 = Serializable.clone(obj, name="obj1")
assert obj.w.name.startswith('obj/')
assert obj1.w.name.startswith('obj1/')
obj2 = AllArgs(0, *(1,), **{'kwarg': 2})
obj3 = Serializable.clone(obj2)
assert obj3.vararg == 0
assert len(obj3.args) == 1 and obj3.args[0] == 1
assert len(obj3.kwargs) == 1 and obj3.kwargs['kwarg'] == 2
if __name__ == "__main__":
test_serializable()
| 1,192 | 28.097561 | 86 | py |
rllab | rllab-master/tests/test_instrument.py |
from rllab.misc import instrument
from nose2.tools import such
class TestClass(object):
@property
def arr(self):
return [1, 2, 3]
@property
def compound_arr(self):
return [dict(a=1)]
with such.A("instrument") as it:
@it.should
def test_concretize():
it.assertEqual(instrument.concretize([5]), [5])
it.assertEqual(instrument.concretize((5,)), (5,))
fake_globals = dict(TestClass=TestClass)
instrument.stub(fake_globals)
modified = fake_globals["TestClass"]
it.assertIsInstance(modified, instrument.StubClass)
it.assertIsInstance(modified(), instrument.StubObject)
it.assertEqual(instrument.concretize((5,)), (5,))
it.assertIsInstance(instrument.concretize(modified()), TestClass)
@it.should
def test_chained_call():
fake_globals = dict(TestClass=TestClass)
instrument.stub(fake_globals)
modified = fake_globals["TestClass"]
it.assertIsInstance(modified().arr[0], instrument.StubMethodCall)
it.assertIsInstance(modified().compound_arr[0]["a"], instrument.StubMethodCall)
it.assertEqual(instrument.concretize(modified().arr[0]), 1)
@it.should
def test_variant_generator():
vg = instrument.VariantGenerator()
vg.add("key1", [1, 2, 3])
vg.add("key2", [True, False])
vg.add("key3", lambda key2: [1] if key2 else [1, 2])
it.assertEqual(len(vg.variants()), 9)
class VG(instrument.VariantGenerator):
@instrument.variant
def key1(self):
return [1, 2, 3]
@instrument.variant
def key2(self):
yield True
yield False
@instrument.variant
def key3(self, key2):
if key2:
yield 1
else:
yield 1
yield 2
it.assertEqual(len(VG().variants()), 9)
it.createTests(globals())
| 2,013 | 26.589041 | 87 | py |
rllab | rllab-master/tests/test_networks.py | def test_gru_network():
from rllab.core.network import GRUNetwork
import lasagne.layers as L
from rllab.misc import ext
import numpy as np
network = GRUNetwork(
input_shape=(2, 3),
output_dim=5,
hidden_dim=4,
)
f_output = ext.compile_function(
inputs=[network.input_layer.input_var],
outputs=L.get_output(network.output_layer)
)
assert f_output(np.zeros((6, 8, 2, 3))).shape == (6, 8, 5)
| 464 | 28.0625 | 62 | py |
rllab | rllab-master/tests/test_algos.py | import os
from rllab.algos.cem import CEM
from rllab.algos.cma_es import CMAES
from rllab.algos.erwr import ERWR
os.environ['THEANO_FLAGS'] = 'device=cpu,mode=FAST_COMPILE,optimizer=None'
from rllab.algos.vpg import VPG
from rllab.algos.tnpg import TNPG
from rllab.algos.ppo import PPO
from rllab.algos.trpo import TRPO
from rllab.algos.reps import REPS
from rllab.algos.ddpg import DDPG
from rllab.envs.grid_world_env import GridWorldEnv
from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.policies.categorical_mlp_policy import CategoricalMLPPolicy
from rllab.policies.categorical_gru_policy import CategoricalGRUPolicy
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
from rllab.policies.gaussian_gru_policy import GaussianGRUPolicy
from rllab.policies.deterministic_mlp_policy import DeterministicMLPPolicy
from rllab.q_functions.continuous_mlp_q_function import ContinuousMLPQFunction
from rllab.exploration_strategies.ou_strategy import OUStrategy
from rllab.baselines.zero_baseline import ZeroBaseline
from nose2 import tools
import numpy as np
common_batch_algo_args = dict(
n_itr=1,
batch_size=1000,
max_path_length=100,
)
algo_args = {
VPG: common_batch_algo_args,
TNPG: dict(common_batch_algo_args,
optimizer_args=dict(
cg_iters=1,
),
),
TRPO: dict(common_batch_algo_args,
optimizer_args=dict(
cg_iters=1,
),
),
PPO: dict(common_batch_algo_args,
optimizer_args=dict(
max_penalty_itr=1,
max_opt_itr=1
),
),
REPS: dict(common_batch_algo_args,
max_opt_itr=1,
),
DDPG: dict(
n_epochs=1,
epoch_length=100,
batch_size=32,
min_pool_size=50,
replay_pool_size=1000,
eval_samples=100,
),
CEM: dict(
n_itr=1,
max_path_length=100,
n_samples=5,
),
CMAES: dict(
n_itr=1,
max_path_length=100,
batch_size=1000,
),
ERWR: common_batch_algo_args,
}
polopt_cases = []
for algo in [VPG, TNPG, PPO, TRPO, CEM, CMAES, ERWR, REPS]:
polopt_cases.extend([
(algo, GridWorldEnv, CategoricalMLPPolicy),
(algo, CartpoleEnv, GaussianMLPPolicy),
(algo, GridWorldEnv, CategoricalGRUPolicy),
(algo, CartpoleEnv, GaussianGRUPolicy),
])
@tools.params(*polopt_cases)
def test_polopt_algo(algo_cls, env_cls, policy_cls):
print("Testing %s, %s, %s" % (algo_cls.__name__, env_cls.__name__, policy_cls.__name__))
env = env_cls()
policy = policy_cls(env_spec=env.spec, )
baseline = ZeroBaseline(env_spec=env.spec)
algo = algo_cls(env=env, policy=policy, baseline=baseline, **(algo_args.get(algo_cls, dict())))
algo.train()
assert not np.any(np.isnan(policy.get_param_values()))
def test_ddpg():
env = CartpoleEnv()
policy = DeterministicMLPPolicy(env.spec)
qf = ContinuousMLPQFunction(env.spec)
es = OUStrategy(env.spec)
algo = DDPG(
env=env, policy=policy, qf=qf, es=es,
n_epochs=1,
epoch_length=100,
batch_size=32,
min_pool_size=50,
replay_pool_size=1000,
eval_samples=100,
)
algo.train()
| 3,227 | 27.821429 | 99 | py |
rllab | rllab-master/tests/test_sampler.py |
import numpy as np
def test_truncate_paths():
from rllab.sampler.parallel_sampler import truncate_paths
paths = [
dict(
observations=np.zeros((100, 1)),
actions=np.zeros((100, 1)),
rewards=np.zeros(100),
env_infos=dict(),
agent_infos=dict(lala=np.zeros(100)),
),
dict(
observations=np.zeros((50, 1)),
actions=np.zeros((50, 1)),
rewards=np.zeros(50),
env_infos=dict(),
agent_infos=dict(lala=np.zeros(50)),
),
]
truncated = truncate_paths(paths, 130)
assert len(truncated) == 2
assert len(truncated[-1]["observations"]) == 30
assert len(truncated[0]["observations"]) == 100
# make sure not to change the original one
assert len(paths) == 2
assert len(paths[-1]["observations"]) == 50
| 880 | 25.69697 | 61 | py |
rllab | rllab-master/tests/test_stateful_pool.py |
def _worker_collect_once(_):
return 'a', 1
def test_stateful_pool():
from rllab.sampler import stateful_pool
stateful_pool.singleton_pool.initialize(n_parallel=3)
results = stateful_pool.singleton_pool.run_collect(_worker_collect_once, 3, show_prog_bar=False)
assert tuple(results) == ('a', 'a', 'a')
def test_stateful_pool_over_capacity():
from rllab.sampler import stateful_pool
stateful_pool.singleton_pool.initialize(n_parallel=4)
results = stateful_pool.singleton_pool.run_collect(_worker_collect_once, 3, show_prog_bar=False)
assert len(results) >= 3
| 601 | 27.666667 | 100 | py |
rllab | rllab-master/tests/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/tests/test_baselines.py | import os
os.environ['THEANO_FLAGS'] = 'mode=FAST_COMPILE,optimizer=None'
from rllab.algos.vpg import VPG
from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.baselines.zero_baseline import ZeroBaseline
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.baselines.gaussian_mlp_baseline import GaussianMLPBaseline
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
from nose2 import tools
baselines = [ZeroBaseline, LinearFeatureBaseline, GaussianMLPBaseline]
@tools.params(*baselines)
def test_baseline(baseline_cls):
env = CartpoleEnv()
policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(6,))
baseline = baseline_cls(env_spec=env.spec)
algo = VPG(
env=env, policy=policy, baseline=baseline,
n_itr=1, batch_size=1000, max_path_length=100
)
algo.train()
| 868 | 31.185185 | 73 | py |
rllab | rllab-master/tests/envs/test_envs.py | import numpy as np
from nose2 import tools
from rllab.envs.box2d.car_parking_env import CarParkingEnv
from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.envs.box2d.cartpole_swingup_env import CartpoleSwingupEnv
from rllab.envs.box2d.double_pendulum_env import DoublePendulumEnv
from rllab.envs.box2d.mountain_car_env import MountainCarEnv
from rllab.envs.grid_world_env import GridWorldEnv
from rllab.envs.identification_env import IdentificationEnv
import os
MUJOCO_ENABLED = True
try:
import rllab.mujoco_py
from rllab.envs.mujoco.half_cheetah_env import HalfCheetahEnv
from rllab.envs.mujoco.hopper_env import HopperEnv
from rllab.envs.mujoco.inverted_double_pendulum_env import InvertedDoublePendulumEnv
from rllab.envs.mujoco.point_env import PointEnv
from rllab.envs.mujoco.simple_humanoid_env import SimpleHumanoidEnv
from rllab.envs.mujoco.swimmer_env import SwimmerEnv
from rllab.envs.mujoco.walker2d_env import Walker2DEnv
from rllab.envs.mujoco.gather.point_gather_env import PointGatherEnv
from rllab.envs.mujoco.gather.swimmer_gather_env import SwimmerGatherEnv
from rllab.envs.mujoco.gather.ant_gather_env import AntGatherEnv
from rllab.envs.mujoco.maze.point_maze_env import PointMazeEnv
from rllab.envs.mujoco.maze.swimmer_maze_env import SwimmerMazeEnv
from rllab.envs.mujoco.maze.ant_maze_env import AntMazeEnv
except OSError:
print("Warning: Mujoco not installed. Skipping mujoco-related tests")
MUJOCO_ENABLED = False
from rllab.envs.noisy_env import NoisyObservationEnv, DelayedActionEnv
from rllab.envs.normalized_env import NormalizedEnv
from rllab.envs.proxy_env import ProxyEnv
from rllab.envs.gym_env import GymEnv
simple_env_classes = [
GridWorldEnv,
CartpoleEnv,
CarParkingEnv,
CartpoleSwingupEnv,
DoublePendulumEnv,
MountainCarEnv,
]
if MUJOCO_ENABLED:
simple_env_classes.extend([
PointEnv,
Walker2DEnv,
SwimmerEnv,
SimpleHumanoidEnv,
InvertedDoublePendulumEnv,
HopperEnv,
HalfCheetahEnv,
PointGatherEnv,
SwimmerGatherEnv,
AntGatherEnv,
PointMazeEnv,
SwimmerMazeEnv,
AntMazeEnv,
])
envs = [cls() for cls in simple_env_classes]
envs.append(
ProxyEnv(envs[0])
)
envs.append(
IdentificationEnv(CartpoleEnv, {})
)
envs.append(
NoisyObservationEnv(CartpoleEnv())
)
envs.append(
DelayedActionEnv(CartpoleEnv())
)
envs.append(
NormalizedEnv(CartpoleEnv())
)
envs.append(
GymEnv('CartPole-v0')
)
@tools.params(*envs)
def test_env(env):
print("Testing", env.__class__)
ob_space = env.observation_space
act_space = env.action_space
ob = env.reset()
assert ob_space.contains(ob)
a = act_space.sample()
assert act_space.contains(a)
res = env.step(a)
assert ob_space.contains(res.observation)
assert np.isscalar(res.reward)
if 'CIRCLECI' in os.environ:
print("Skipping rendering test")
else:
env.render()
env.terminate()
| 3,052 | 28.640777 | 88 | py |
rllab | rllab-master/tests/envs/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/tests/envs/test_maze_env.py | import math
from rllab.envs.mujoco.maze.maze_env_utils import line_intersect, ray_segment_intersect
def test_line_intersect():
assert line_intersect((0, 0), (0, 1), (0, 0), (1, 0))[:2] == (0, 0)
assert line_intersect((0, 0), (0, 1), (0, 0), (0, 1))[2] == 0
assert ray_segment_intersect(ray=((0, 0), 0), segment=((1, -1), (1, 1))) == (1, 0)
assert ray_segment_intersect(ray=((0, 0), math.pi), segment=((1, -1), (1, 1))) is None
| 446 | 39.636364 | 90 | py |
rllab | rllab-master/tests/algos/test_trpo.py |
from rllab.envs.base import Env, Step
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
from rllab.baselines.zero_baseline import ZeroBaseline
from rllab.algos.trpo import TRPO
from rllab.spaces.box import Box
import lasagne.nonlinearities
import numpy as np
import theano.tensor as TT
class DummyEnv(Env):
@property
def observation_space(self):
return Box(low=-np.inf, high=np.inf, shape=(1,))
@property
def action_space(self):
return Box(low=-5.0, high=5.0, shape=(1,))
def reset(self):
return np.zeros(1)
def step(self, action):
return Step(observation=np.zeros(1), reward=np.random.normal(), done=True)
def naive_relu(x):
return TT.max(x, 0)
def test_trpo_relu_nan():
env = DummyEnv()
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_nonlinearity=naive_relu,
hidden_sizes=(1,))
baseline = ZeroBaseline(env_spec=env.spec)
algo = TRPO(
env=env, policy=policy, baseline=baseline, n_itr=1, batch_size=1000, max_path_length=100,
step_size=0.001
)
algo.train()
assert not np.isnan(np.sum(policy.get_param_values()))
def test_trpo_deterministic_nan():
env = DummyEnv()
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(1,))
policy._l_log_std.param.set_value([np.float32(np.log(1e-8))])
baseline = ZeroBaseline(env_spec=env.spec)
algo = TRPO(
env=env, policy=policy, baseline=baseline, n_itr=10, batch_size=1000, max_path_length=100,
step_size=0.01
)
algo.train()
assert not np.isnan(np.sum(policy.get_param_values()))
| 1,651 | 26.081967 | 98 | py |
rllab | rllab-master/tests/algos/__init__.py | 1 | 0 | 0 | py | |
rllab | rllab-master/tests/regression_tests/test_issue_3.py |
from nose2.tools import such
from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
from rllab.algos.trpo import TRPO
from rllab.baselines.zero_baseline import ZeroBaseline
with such.A("Issue #3") as it:
@it.should("be fixed")
def test_issue_3():
"""
As reported in https://github.com/rllab/rllab/issues/3, the adaptive_std parameter was not functioning properly
"""
env = CartpoleEnv()
policy = GaussianMLPPolicy(
env_spec=env,
adaptive_std=True
)
baseline = ZeroBaseline(env_spec=env.spec)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=100,
n_itr=1
)
algo.train()
it.createTests(globals())
| 854 | 25.71875 | 119 | py |
rllab | rllab-master/tests/regression_tests/__init__.py | 1 | 0 | 0 | py | |
rllab | rllab-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# rllab documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 15 20:07:12 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'rllab'
copyright = '2016, rllab contributors'
author = 'rllab contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'rllabdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'rllab.tex', 'rllab Documentation',
'rllab contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'rllab', 'rllab Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'rllab', 'rllab Documentation',
author, 'rllab', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 9,550 | 31.050336 | 79 | py |
rllab | rllab-master/rllab/config_personal_template.py | import os
USE_GPU = False
DOCKER_IMAGE = "dementrock/rllab3-shared"
KUBE_PREFIX = "template_"
DOCKER_LOG_DIR = "/tmp/expt"
AWS_IMAGE_ID = "ami-67c5d00d"
if USE_GPU:
AWS_INSTANCE_TYPE = "g2.2xlarge"
else:
AWS_INSTANCE_TYPE = "c4.2xlarge"
AWS_KEY_NAME = "research_virginia"
AWS_SPOT = True
AWS_SPOT_PRICE = '10.0'
AWS_IAM_INSTANCE_PROFILE_NAME = "rllab"
AWS_SECURITY_GROUPS = ["rllab"]
AWS_REGION_NAME = "us-west-2"
AWS_CODE_SYNC_S3_PATH = "<insert aws s3 bucket url for code>e"
CODE_SYNC_IGNORES = ["*.git/*", "*data/*", "*src/*",
"*.pods/*", "*tests/*", "*examples/*", "docs/*"]
LOCAL_CODE_DIR = "<insert local code dir>"
AWS_S3_PATH = "<insert aws s3 bucket url>"
LABEL = "template"
DOCKER_CODE_DIR = "/root/code/rllab"
AWS_ACCESS_KEY = os.environ.get("AWS_ACCESS_KEY", "<insert aws key>")
AWS_ACCESS_SECRET = os.environ.get("AWS_ACCESS_SECRET", "<insert aws secret>")
| 919 | 19 | 78 | py |
rllab | rllab-master/rllab/config.py | import os.path as osp
import os
PROJECT_PATH = osp.abspath(osp.join(osp.dirname(__file__), '..'))
LOG_DIR = PROJECT_PATH + "/data"
USE_TF = False
DOCKER_IMAGE = "DOCKER_IMAGE"
DOCKERFILE_PATH = "/path/to/Dockerfile"
KUBE_PREFIX = "rllab_"
DOCKER_LOG_DIR = "/tmp/expt"
POD_DIR = PROJECT_PATH + "/.pods"
AWS_S3_PATH = None
AWS_IMAGE_ID = None
AWS_INSTANCE_TYPE = "m4.xlarge"
AWS_KEY_NAME = "AWS_KEY_NAME"
AWS_SPOT = True
AWS_SPOT_PRICE = '1.0'
AWS_ACCESS_KEY = os.environ.get("AWS_ACCESS_KEY", None)
AWS_ACCESS_SECRET = os.environ.get("AWS_ACCESS_SECRET", None)
AWS_IAM_INSTANCE_PROFILE_NAME = "rllab"
AWS_SECURITY_GROUPS = ["rllab"]
AWS_SECURITY_GROUP_IDS = []
AWS_NETWORK_INTERFACES = []
AWS_EXTRA_CONFIGS = dict()
AWS_REGION_NAME = "us-east-1"
CODE_SYNC_IGNORES = ["*.git/*", "*data/*", "*.pod/*"]
DOCKER_CODE_DIR = "/root/code/rllab"
AWS_CODE_SYNC_S3_PATH = "s3://to/be/overriden/in/personal"
# whether to use fast code sync
FAST_CODE_SYNC = True
FAST_CODE_SYNC_IGNORES = [".git", "data", ".pods"]
KUBE_DEFAULT_RESOURCES = {
"requests": {
"cpu": 0.8,
}
}
KUBE_DEFAULT_NODE_SELECTOR = {
"aws/type": "m4.xlarge",
}
MUJOCO_KEY_PATH = osp.expanduser("~/.mujoco")
# MUJOCO_KEY_PATH = osp.join(osp.dirname(__file__), "../vendor/mujoco")
ENV = {}
EBS_OPTIMIZED = True
if osp.exists(osp.join(osp.dirname(__file__), "config_personal.py")):
from .config_personal import *
else:
print("Creating your personal config from template...")
from shutil import copy
copy(osp.join(PROJECT_PATH, "rllab/config_personal_template.py"), osp.join(PROJECT_PATH, "rllab/config_personal.py"))
from .config_personal import *
print("Personal config created, but you should probably edit it before further experiments " \
"are run")
if 'CIRCLECI' not in os.environ:
print("Exiting.")
import sys; sys.exit(0)
LABEL = ""
| 1,899 | 20.111111 | 121 | py |
rllab | rllab-master/rllab/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/rllab/sampler/base.py |
import numpy as np
from rllab.misc import special
from rllab.misc import tensor_utils
from rllab.algos import util
import rllab.misc.logger as logger
class Sampler(object):
def start_worker(self):
"""
Initialize the sampler, e.g. launching parallel workers if necessary.
"""
raise NotImplementedError
def obtain_samples(self, itr):
"""
Collect samples for the given iteration number.
:param itr: Iteration number.
:return: A list of paths.
"""
raise NotImplementedError
def process_samples(self, itr, paths):
"""
Return processed sample data (typically a dictionary of concatenated tensors) based on the collected paths.
:param itr: Iteration number.
:param paths: A list of collected paths.
:return: Processed sample data.
"""
raise NotImplementedError
def shutdown_worker(self):
"""
Terminate workers if necessary.
"""
raise NotImplementedError
class BaseSampler(Sampler):
def __init__(self, algo):
"""
:type algo: BatchPolopt
"""
self.algo = algo
def process_samples(self, itr, paths):
baselines = []
returns = []
if hasattr(self.algo.baseline, "predict_n"):
all_path_baselines = self.algo.baseline.predict_n(paths)
else:
all_path_baselines = [self.algo.baseline.predict(path) for path in paths]
for idx, path in enumerate(paths):
path_baselines = np.append(all_path_baselines[idx], 0)
deltas = path["rewards"] + \
self.algo.discount * path_baselines[1:] - \
path_baselines[:-1]
path["advantages"] = special.discount_cumsum(
deltas, self.algo.discount * self.algo.gae_lambda)
path["returns"] = special.discount_cumsum(path["rewards"], self.algo.discount)
baselines.append(path_baselines[:-1])
returns.append(path["returns"])
ev = special.explained_variance_1d(
np.concatenate(baselines),
np.concatenate(returns)
)
if not self.algo.policy.recurrent:
observations = tensor_utils.concat_tensor_list([path["observations"] for path in paths])
actions = tensor_utils.concat_tensor_list([path["actions"] for path in paths])
rewards = tensor_utils.concat_tensor_list([path["rewards"] for path in paths])
returns = tensor_utils.concat_tensor_list([path["returns"] for path in paths])
advantages = tensor_utils.concat_tensor_list([path["advantages"] for path in paths])
env_infos = tensor_utils.concat_tensor_dict_list([path["env_infos"] for path in paths])
agent_infos = tensor_utils.concat_tensor_dict_list([path["agent_infos"] for path in paths])
if self.algo.center_adv:
advantages = util.center_advantages(advantages)
if self.algo.positive_adv:
advantages = util.shift_advantages_to_positive(advantages)
average_discounted_return = \
np.mean([path["returns"][0] for path in paths])
undiscounted_returns = [sum(path["rewards"]) for path in paths]
ent = np.mean(self.algo.policy.distribution.entropy(agent_infos))
samples_data = dict(
observations=observations,
actions=actions,
rewards=rewards,
returns=returns,
advantages=advantages,
env_infos=env_infos,
agent_infos=agent_infos,
paths=paths,
)
else:
max_path_length = max([len(path["advantages"]) for path in paths])
# make all paths the same length (pad extra advantages with 0)
obs = [path["observations"] for path in paths]
obs = tensor_utils.pad_tensor_n(obs, max_path_length)
if self.algo.center_adv:
raw_adv = np.concatenate([path["advantages"] for path in paths])
adv_mean = np.mean(raw_adv)
adv_std = np.std(raw_adv) + 1e-8
adv = [(path["advantages"] - adv_mean) / adv_std for path in paths]
else:
adv = [path["advantages"] for path in paths]
adv = np.asarray([tensor_utils.pad_tensor(a, max_path_length) for a in adv])
actions = [path["actions"] for path in paths]
actions = tensor_utils.pad_tensor_n(actions, max_path_length)
rewards = [path["rewards"] for path in paths]
rewards = tensor_utils.pad_tensor_n(rewards, max_path_length)
returns = [path["returns"] for path in paths]
returns = tensor_utils.pad_tensor_n(returns, max_path_length)
agent_infos = [path["agent_infos"] for path in paths]
agent_infos = tensor_utils.stack_tensor_dict_list(
[tensor_utils.pad_tensor_dict(p, max_path_length) for p in agent_infos]
)
env_infos = [path["env_infos"] for path in paths]
env_infos = tensor_utils.stack_tensor_dict_list(
[tensor_utils.pad_tensor_dict(p, max_path_length) for p in env_infos]
)
valids = [np.ones_like(path["returns"]) for path in paths]
valids = tensor_utils.pad_tensor_n(valids, max_path_length)
average_discounted_return = \
np.mean([path["returns"][0] for path in paths])
undiscounted_returns = [sum(path["rewards"]) for path in paths]
ent = np.sum(self.algo.policy.distribution.entropy(agent_infos) * valids) / np.sum(valids)
samples_data = dict(
observations=obs,
actions=actions,
advantages=adv,
rewards=rewards,
returns=returns,
valids=valids,
agent_infos=agent_infos,
env_infos=env_infos,
paths=paths,
)
logger.log("fitting baseline...")
if hasattr(self.algo.baseline, 'fit_with_samples'):
self.algo.baseline.fit_with_samples(paths, samples_data)
else:
self.algo.baseline.fit(paths)
logger.log("fitted")
logger.record_tabular('Iteration', itr)
logger.record_tabular('AverageDiscountedReturn',
average_discounted_return)
logger.record_tabular('AverageReturn', np.mean(undiscounted_returns))
logger.record_tabular('ExplainedVariance', ev)
logger.record_tabular('NumTrajs', len(paths))
logger.record_tabular('Entropy', ent)
logger.record_tabular('Perplexity', np.exp(ent))
logger.record_tabular('StdReturn', np.std(undiscounted_returns))
logger.record_tabular('MaxReturn', np.max(undiscounted_returns))
logger.record_tabular('MinReturn', np.min(undiscounted_returns))
return samples_data
| 7,078 | 37.68306 | 115 | py |
rllab | rllab-master/rllab/sampler/utils.py | import numpy as np
from rllab.misc import tensor_utils
import time
def rollout(env, agent, max_path_length=np.inf, animated=False, speedup=1,
always_return_paths=False):
observations = []
actions = []
rewards = []
agent_infos = []
env_infos = []
o = env.reset()
agent.reset()
path_length = 0
if animated:
env.render()
while path_length < max_path_length:
a, agent_info = agent.get_action(o)
next_o, r, d, env_info = env.step(a)
observations.append(env.observation_space.flatten(o))
rewards.append(r)
actions.append(env.action_space.flatten(a))
agent_infos.append(agent_info)
env_infos.append(env_info)
path_length += 1
if d:
break
o = next_o
if animated:
env.render()
timestep = 0.05
time.sleep(timestep / speedup)
if animated and not always_return_paths:
return
return dict(
observations=tensor_utils.stack_tensor_list(observations),
actions=tensor_utils.stack_tensor_list(actions),
rewards=tensor_utils.stack_tensor_list(rewards),
agent_infos=tensor_utils.stack_tensor_dict_list(agent_infos),
env_infos=tensor_utils.stack_tensor_dict_list(env_infos),
)
| 1,314 | 28.886364 | 74 | py |
rllab | rllab-master/rllab/sampler/parallel_sampler.py | from rllab.sampler.utils import rollout
from rllab.sampler.stateful_pool import singleton_pool, SharedGlobal
from rllab.misc import ext
from rllab.misc import logger
from rllab.misc import tensor_utils
import pickle
import numpy as np
def _worker_init(G, id):
if singleton_pool.n_parallel > 1:
import os
os.environ['THEANO_FLAGS'] = 'device=cpu'
os.environ['CUDA_VISIBLE_DEVICES'] = ""
G.worker_id = id
def initialize(n_parallel):
singleton_pool.initialize(n_parallel)
singleton_pool.run_each(_worker_init, [(id,) for id in range(singleton_pool.n_parallel)])
def _get_scoped_G(G, scope):
if scope is None:
return G
if not hasattr(G, "scopes"):
G.scopes = dict()
if scope not in G.scopes:
G.scopes[scope] = SharedGlobal()
G.scopes[scope].worker_id = G.worker_id
return G.scopes[scope]
def _worker_populate_task(G, env, policy, scope=None):
G = _get_scoped_G(G, scope)
G.env = pickle.loads(env)
G.policy = pickle.loads(policy)
def _worker_terminate_task(G, scope=None):
G = _get_scoped_G(G, scope)
if getattr(G, "env", None):
G.env.terminate()
G.env = None
if getattr(G, "policy", None):
G.policy.terminate()
G.policy = None
def populate_task(env, policy, scope=None):
logger.log("Populating workers...")
if singleton_pool.n_parallel > 1:
singleton_pool.run_each(
_worker_populate_task,
[(pickle.dumps(env), pickle.dumps(policy), scope)] * singleton_pool.n_parallel
)
else:
# avoid unnecessary copying
G = _get_scoped_G(singleton_pool.G, scope)
G.env = env
G.policy = policy
logger.log("Populated")
def terminate_task(scope=None):
singleton_pool.run_each(
_worker_terminate_task,
[(scope,)] * singleton_pool.n_parallel
)
def _worker_set_seed(_, seed):
logger.log("Setting seed to %d" % seed)
ext.set_seed(seed)
def set_seed(seed):
singleton_pool.run_each(
_worker_set_seed,
[(seed + i,) for i in range(singleton_pool.n_parallel)]
)
def _worker_set_policy_params(G, params, scope=None):
G = _get_scoped_G(G, scope)
G.policy.set_param_values(params)
def _worker_set_env_params(G,params,scope=None):
G = _get_scoped_G(G, scope)
G.env.set_param_values(params)
def _worker_collect_one_path(G, max_path_length, scope=None):
G = _get_scoped_G(G, scope)
path = rollout(G.env, G.policy, max_path_length)
return path, len(path["rewards"])
def sample_paths(
policy_params,
max_samples,
max_path_length=np.inf,
env_params=None,
scope=None):
"""
:param policy_params: parameters for the policy. This will be updated on each worker process
:param max_samples: desired maximum number of samples to be collected. The actual number of collected samples
might be greater since all trajectories will be rolled out either until termination or until max_path_length is
reached
:param max_path_length: horizon / maximum length of a single trajectory
:return: a list of collected paths
"""
singleton_pool.run_each(
_worker_set_policy_params,
[(policy_params, scope)] * singleton_pool.n_parallel
)
if env_params is not None:
singleton_pool.run_each(
_worker_set_env_params,
[(env_params, scope)] * singleton_pool.n_parallel
)
return singleton_pool.run_collect(
_worker_collect_one_path,
threshold=max_samples,
args=(max_path_length, scope),
show_prog_bar=True
)
def truncate_paths(paths, max_samples):
"""
Truncate the list of paths so that the total number of samples is exactly equal to max_samples. This is done by
removing extra paths at the end of the list, and make the last path shorter if necessary
:param paths: a list of paths
:param max_samples: the absolute maximum number of samples
:return: a list of paths, truncated so that the number of samples adds up to max-samples
"""
# chop samples collected by extra paths
# make a copy
paths = list(paths)
total_n_samples = sum(len(path["rewards"]) for path in paths)
while len(paths) > 0 and total_n_samples - len(paths[-1]["rewards"]) >= max_samples:
total_n_samples -= len(paths.pop(-1)["rewards"])
if len(paths) > 0:
last_path = paths.pop(-1)
truncated_last_path = dict()
truncated_len = len(last_path["rewards"]) - (total_n_samples - max_samples)
for k, v in last_path.items():
if k in ["observations", "actions", "rewards"]:
truncated_last_path[k] = tensor_utils.truncate_tensor_list(v, truncated_len)
elif k in ["env_infos", "agent_infos"]:
truncated_last_path[k] = tensor_utils.truncate_tensor_dict(v, truncated_len)
else:
raise NotImplementedError
paths.append(truncated_last_path)
return paths
| 5,045 | 31.346154 | 115 | py |
rllab | rllab-master/rllab/sampler/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/rllab/sampler/stateful_pool.py |
from joblib.pool import MemmapingPool
import multiprocessing as mp
from rllab.misc import logger
import pyprind
import time
import traceback
import sys
class ProgBarCounter(object):
def __init__(self, total_count):
self.total_count = total_count
self.max_progress = 1000000
self.cur_progress = 0
self.cur_count = 0
if not logger.get_log_tabular_only():
self.pbar = pyprind.ProgBar(self.max_progress)
else:
self.pbar = None
def inc(self, increment):
if not logger.get_log_tabular_only():
self.cur_count += increment
new_progress = self.cur_count * self.max_progress / self.total_count
if new_progress < self.max_progress:
self.pbar.update(new_progress - self.cur_progress)
self.cur_progress = new_progress
def stop(self):
if self.pbar is not None and self.pbar.active:
self.pbar.stop()
class SharedGlobal(object):
pass
class StatefulPool(object):
def __init__(self):
self.n_parallel = 1
self.pool = None
self.queue = None
self.worker_queue = None
self.G = SharedGlobal()
def initialize(self, n_parallel):
self.n_parallel = n_parallel
if self.pool is not None:
print("Warning: terminating existing pool")
self.pool.terminate()
self.queue.close()
self.worker_queue.close()
self.G = SharedGlobal()
if n_parallel > 1:
self.queue = mp.Queue()
self.worker_queue = mp.Queue()
self.pool = MemmapingPool(
self.n_parallel,
temp_folder="/tmp",
)
def run_each(self, runner, args_list=None):
"""
Run the method on each worker process, and collect the result of execution.
The runner method will receive 'G' as its first argument, followed by the arguments
in the args_list, if any
:return:
"""
if args_list is None:
args_list = [tuple()] * self.n_parallel
assert len(args_list) == self.n_parallel
if self.n_parallel > 1:
results = self.pool.map_async(
_worker_run_each, [(runner, args) for args in args_list]
)
for i in range(self.n_parallel):
self.worker_queue.get()
for i in range(self.n_parallel):
self.queue.put(None)
return results.get()
return [runner(self.G, *args_list[0])]
def run_map(self, runner, args_list):
if self.n_parallel > 1:
return self.pool.map(_worker_run_map, [(runner, args) for args in args_list])
else:
ret = []
for args in args_list:
ret.append(runner(self.G, *args))
return ret
def run_imap_unordered(self, runner, args_list):
if self.n_parallel > 1:
for x in self.pool.imap_unordered(_worker_run_map, [(runner, args) for args in args_list]):
yield x
else:
for args in args_list:
yield runner(self.G, *args)
def run_collect(self, collect_once, threshold, args=None, show_prog_bar=True):
"""
Run the collector method using the worker pool. The collect_once method will receive 'G' as
its first argument, followed by the provided args, if any. The method should return a pair of values.
The first should be the object to be collected, and the second is the increment to be added.
This will continue until the total increment reaches or exceeds the given threshold.
Sample script:
def collect_once(G):
return 'a', 1
stateful_pool.run_collect(collect_once, threshold=3) # => ['a', 'a', 'a']
:param collector:
:param threshold:
:return:
"""
if args is None:
args = tuple()
if self.pool:
manager = mp.Manager()
counter = manager.Value('i', 0)
lock = manager.RLock()
results = self.pool.map_async(
_worker_run_collect,
[(collect_once, counter, lock, threshold, args)] * self.n_parallel
)
if show_prog_bar:
pbar = ProgBarCounter(threshold)
last_value = 0
while True:
time.sleep(0.1)
with lock:
if counter.value >= threshold:
if show_prog_bar:
pbar.stop()
break
if show_prog_bar:
pbar.inc(counter.value - last_value)
last_value = counter.value
return sum(results.get(), [])
else:
count = 0
results = []
if show_prog_bar:
pbar = ProgBarCounter(threshold)
while count < threshold:
result, inc = collect_once(self.G, *args)
results.append(result)
count += inc
if show_prog_bar:
pbar.inc(inc)
if show_prog_bar:
pbar.stop()
return results
singleton_pool = StatefulPool()
def _worker_run_each(all_args):
try:
runner, args = all_args
# signals to the master that this task is up and running
singleton_pool.worker_queue.put(None)
# wait for the master to signal continuation
singleton_pool.queue.get()
return runner(singleton_pool.G, *args)
except Exception:
raise Exception("".join(traceback.format_exception(*sys.exc_info())))
def _worker_run_collect(all_args):
try:
collect_once, counter, lock, threshold, args = all_args
collected = []
while True:
with lock:
if counter.value >= threshold:
return collected
result, inc = collect_once(singleton_pool.G, *args)
collected.append(result)
with lock:
counter.value += inc
if counter.value >= threshold:
return collected
except Exception:
raise Exception("".join(traceback.format_exception(*sys.exc_info())))
def _worker_run_map(all_args):
try:
runner, args = all_args
return runner(singleton_pool.G, *args)
except Exception:
raise Exception("".join(traceback.format_exception(*sys.exc_info())))
| 6,578 | 32.060302 | 109 | py |
rllab | rllab-master/rllab/core/network.py |
import lasagne.layers as L
import lasagne.nonlinearities as LN
import lasagne.init as LI
import theano.tensor as TT
import theano
from rllab.misc import ext
from rllab.core.lasagne_layers import OpLayer
from rllab.core.lasagne_powered import LasagnePowered
from rllab.core.serializable import Serializable
import numpy as np
def wrapped_conv(*args, **kwargs):
copy = dict(kwargs)
copy.pop("image_shape", None)
copy.pop("filter_shape", None)
assert copy.pop("filter_flip", False)
input, W, input_shape, get_W_shape = args
if theano.config.device == 'cpu':
return theano.tensor.nnet.conv2d(*args, **kwargs)
try:
return theano.sandbox.cuda.dnn.dnn_conv(
input.astype('float32'),
W.astype('float32'),
**copy
)
except Exception as e:
print("falling back to default conv2d")
return theano.tensor.nnet.conv2d(*args, **kwargs)
class MLP(LasagnePowered, Serializable):
def __init__(self, output_dim, hidden_sizes, hidden_nonlinearity,
output_nonlinearity, hidden_W_init=LI.GlorotUniform(), hidden_b_init=LI.Constant(0.),
output_W_init=LI.GlorotUniform(), output_b_init=LI.Constant(0.),
name=None, input_var=None, input_layer=None, input_shape=None, batch_norm=False):
Serializable.quick_init(self, locals())
if name is None:
prefix = ""
else:
prefix = name + "_"
if input_layer is None:
l_in = L.InputLayer(shape=(None,) + input_shape, input_var=input_var)
else:
l_in = input_layer
self._layers = [l_in]
l_hid = l_in
for idx, hidden_size in enumerate(hidden_sizes):
l_hid = L.DenseLayer(
l_hid,
num_units=hidden_size,
nonlinearity=hidden_nonlinearity,
name="%shidden_%d" % (prefix, idx),
W=hidden_W_init,
b=hidden_b_init,
)
if batch_norm:
l_hid = L.batch_norm(l_hid)
self._layers.append(l_hid)
l_out = L.DenseLayer(
l_hid,
num_units=output_dim,
nonlinearity=output_nonlinearity,
name="%soutput" % (prefix,),
W=output_W_init,
b=output_b_init,
)
self._layers.append(l_out)
self._l_in = l_in
self._l_out = l_out
# self._input_var = l_in.input_var
self._output = L.get_output(l_out)
LasagnePowered.__init__(self, [l_out])
@property
def input_layer(self):
return self._l_in
@property
def output_layer(self):
return self._l_out
# @property
# def input_var(self):
# return self._l_in.input_var
@property
def layers(self):
return self._layers
@property
def output(self):
return self._output
class GRULayer(L.Layer):
"""
A gated recurrent unit implements the following update mechanism:
Reset gate: r(t) = f_r(x(t) @ W_xr + h(t-1) @ W_hr + b_r)
Update gate: u(t) = f_u(x(t) @ W_xu + h(t-1) @ W_hu + b_u)
Cell gate: c(t) = f_c(x(t) @ W_xc + r(t) * (h(t-1) @ W_hc) + b_c)
New hidden state: h(t) = (1 - u(t)) * h(t-1) + u_t * c(t)
Note that the reset, update, and cell vectors must have the same dimension as the hidden state
"""
def __init__(self, incoming, num_units, hidden_nonlinearity,
gate_nonlinearity=LN.sigmoid, name=None,
W_init=LI.GlorotUniform(), b_init=LI.Constant(0.),
hidden_init=LI.Constant(0.), hidden_init_trainable=True):
if hidden_nonlinearity is None:
hidden_nonlinearity = LN.identity
if gate_nonlinearity is None:
gate_nonlinearity = LN.identity
super(GRULayer, self).__init__(incoming, name=name)
input_shape = self.input_shape[2:]
input_dim = ext.flatten_shape_dim(input_shape)
# self._name = name
# Weights for the initial hidden state
self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
regularizable=False)
# Weights for the reset gate
self.W_xr = self.add_param(W_init, (input_dim, num_units), name="W_xr")
self.W_hr = self.add_param(W_init, (num_units, num_units), name="W_hr")
self.b_r = self.add_param(b_init, (num_units,), name="b_r", regularizable=False)
# Weights for the update gate
self.W_xu = self.add_param(W_init, (input_dim, num_units), name="W_xu")
self.W_hu = self.add_param(W_init, (num_units, num_units), name="W_hu")
self.b_u = self.add_param(b_init, (num_units,), name="b_u", regularizable=False)
# Weights for the cell gate
self.W_xc = self.add_param(W_init, (input_dim, num_units), name="W_xc")
self.W_hc = self.add_param(W_init, (num_units, num_units), name="W_hc")
self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
self.gate_nonlinearity = gate_nonlinearity
self.num_units = num_units
self.nonlinearity = hidden_nonlinearity
def step(self, x, hprev):
r = self.gate_nonlinearity(x.dot(self.W_xr) + hprev.dot(self.W_hr) + self.b_r)
u = self.gate_nonlinearity(x.dot(self.W_xu) + hprev.dot(self.W_hu) + self.b_u)
c = self.nonlinearity(x.dot(self.W_xc) + r * (hprev.dot(self.W_hc)) + self.b_c)
h = (1 - u) * hprev + u * c
return h.astype(theano.config.floatX)
def get_step_layer(self, l_in, l_prev_hidden):
return GRUStepLayer(incomings=[l_in, l_prev_hidden], gru_layer=self)
def get_output_shape_for(self, input_shape):
n_batch, n_steps = input_shape[:2]
return n_batch, n_steps, self.num_units
def get_output_for(self, input, **kwargs):
n_batches = input.shape[0]
n_steps = input.shape[1]
input = TT.reshape(input, (n_batches, n_steps, -1))
h0s = TT.tile(TT.reshape(self.h0, (1, self.num_units)), (n_batches, 1))
# flatten extra dimensions
shuffled_input = input.dimshuffle(1, 0, 2)
hs, _ = theano.scan(fn=self.step, sequences=[shuffled_input], outputs_info=h0s)
shuffled_hs = hs.dimshuffle(1, 0, 2)
return shuffled_hs
class GRUStepLayer(L.MergeLayer):
def __init__(self, incomings, gru_layer, name=None):
super(GRUStepLayer, self).__init__(incomings, name)
self._gru_layer = gru_layer
def get_params(self, **tags):
return self._gru_layer.get_params(**tags)
def get_output_shape_for(self, input_shapes):
n_batch = input_shapes[0]
return n_batch, self._gru_layer.num_units
def get_output_for(self, inputs, **kwargs):
x, hprev = inputs
n_batch = x.shape[0]
x = x.reshape((n_batch, -1))
return self._gru_layer.step(x, hprev)
class GRUNetwork(object):
def __init__(self, input_shape, output_dim, hidden_dim, hidden_nonlinearity=LN.rectify,
output_nonlinearity=None, name=None, input_var=None, input_layer=None):
if input_layer is None:
l_in = L.InputLayer(shape=(None, None) + input_shape, input_var=input_var, name="input")
else:
l_in = input_layer
l_step_input = L.InputLayer(shape=(None,) + input_shape)
l_step_prev_hidden = L.InputLayer(shape=(None, hidden_dim))
l_gru = GRULayer(l_in, num_units=hidden_dim, hidden_nonlinearity=hidden_nonlinearity,
hidden_init_trainable=False)
l_gru_flat = L.ReshapeLayer(
l_gru, shape=(-1, hidden_dim)
)
l_output_flat = L.DenseLayer(
l_gru_flat,
num_units=output_dim,
nonlinearity=output_nonlinearity,
)
l_output = OpLayer(
l_output_flat,
op=lambda flat_output, l_input:
flat_output.reshape((l_input.shape[0], l_input.shape[1], -1)),
shape_op=lambda flat_output_shape, l_input_shape:
(l_input_shape[0], l_input_shape[1], flat_output_shape[-1]),
extras=[l_in]
)
l_step_hidden = l_gru.get_step_layer(l_step_input, l_step_prev_hidden)
l_step_output = L.DenseLayer(
l_step_hidden,
num_units=output_dim,
nonlinearity=output_nonlinearity,
W=l_output_flat.W,
b=l_output_flat.b,
)
self._l_in = l_in
self._hid_init_param = l_gru.h0
self._l_gru = l_gru
self._l_out = l_output
self._l_step_input = l_step_input
self._l_step_prev_hidden = l_step_prev_hidden
self._l_step_hidden = l_step_hidden
self._l_step_output = l_step_output
@property
def input_layer(self):
return self._l_in
@property
def input_var(self):
return self._l_in.input_var
@property
def output_layer(self):
return self._l_out
@property
def step_input_layer(self):
return self._l_step_input
@property
def step_prev_hidden_layer(self):
return self._l_step_prev_hidden
@property
def step_hidden_layer(self):
return self._l_step_hidden
@property
def step_output_layer(self):
return self._l_step_output
@property
def hid_init_param(self):
return self._hid_init_param
class ConvNetwork(object):
def __init__(self, input_shape, output_dim, hidden_sizes,
conv_filters, conv_filter_sizes, conv_strides, conv_pads,
hidden_W_init=LI.GlorotUniform(), hidden_b_init=LI.Constant(0.),
output_W_init=LI.GlorotUniform(), output_b_init=LI.Constant(0.),
# conv_W_init=LI.GlorotUniform(), conv_b_init=LI.Constant(0.),
hidden_nonlinearity=LN.rectify,
output_nonlinearity=LN.softmax,
name=None, input_var=None):
if name is None:
prefix = ""
else:
prefix = name + "_"
if len(input_shape) == 3:
l_in = L.InputLayer(shape=(None, np.prod(input_shape)), input_var=input_var)
l_hid = L.reshape(l_in, ([0],) + input_shape)
elif len(input_shape) == 2:
l_in = L.InputLayer(shape=(None, np.prod(input_shape)), input_var=input_var)
input_shape = (1,) + input_shape
l_hid = L.reshape(l_in, ([0],) + input_shape)
else:
l_in = L.InputLayer(shape=(None,) + input_shape, input_var=input_var)
l_hid = l_in
for idx, conv_filter, filter_size, stride, pad in zip(
range(len(conv_filters)),
conv_filters,
conv_filter_sizes,
conv_strides,
conv_pads,
):
l_hid = L.Conv2DLayer(
l_hid,
num_filters=conv_filter,
filter_size=filter_size,
stride=(stride, stride),
pad=pad,
nonlinearity=hidden_nonlinearity,
name="%sconv_hidden_%d" % (prefix, idx),
convolution=wrapped_conv,
)
for idx, hidden_size in enumerate(hidden_sizes):
l_hid = L.DenseLayer(
l_hid,
num_units=hidden_size,
nonlinearity=hidden_nonlinearity,
name="%shidden_%d" % (prefix, idx),
W=hidden_W_init,
b=hidden_b_init,
)
l_out = L.DenseLayer(
l_hid,
num_units=output_dim,
nonlinearity=output_nonlinearity,
name="%soutput" % (prefix,),
W=output_W_init,
b=output_b_init,
)
self._l_in = l_in
self._l_out = l_out
self._input_var = l_in.input_var
@property
def input_layer(self):
return self._l_in
@property
def output_layer(self):
return self._l_out
@property
def input_var(self):
return self._l_in.input_var
| 12,163 | 34.054755 | 103 | py |
rllab | rllab-master/rllab/core/lasagne_powered.py | from rllab.core.parameterized import Parameterized
from rllab.misc.overrides import overrides
import lasagne.layers as L
class LasagnePowered(Parameterized):
def __init__(self, output_layers):
self._output_layers = output_layers
super(LasagnePowered, self).__init__()
@property
def output_layers(self):
return self._output_layers
@overrides
def get_params_internal(self, **tags): # this gives ALL the vars (not the params values)
return L.get_all_params( # this lasagne function also returns all var below the passed layers
L.concat(self._output_layers),
**tags
)
| 654 | 30.190476 | 102 | py |
rllab | rllab-master/rllab/core/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/rllab/core/serializable.py | import inspect
import sys
class Serializable(object):
def __init__(self, *args, **kwargs):
self.__args = args
self.__kwargs = kwargs
def quick_init(self, locals_):
if getattr(self, "_serializable_initialized", False):
return
if sys.version_info >= (3, 0):
spec = inspect.getfullargspec(self.__init__)
# Exclude the first "self" parameter
if spec.varkw:
kwargs = locals_[spec.varkw]
else:
kwargs = dict()
else:
spec = inspect.getargspec(self.__init__)
if spec.keywords:
kwargs = locals_[spec.keywords]
else:
kwargs = dict()
if spec.varargs:
varargs = locals_[spec.varargs]
else:
varargs = tuple()
in_order_args = [locals_[arg] for arg in spec.args][1:]
self.__args = tuple(in_order_args) + varargs
self.__kwargs = kwargs
setattr(self, "_serializable_initialized", True)
def __getstate__(self):
return {"__args": self.__args, "__kwargs": self.__kwargs}
def __setstate__(self, d):
out = type(self)(*d["__args"], **d["__kwargs"])
self.__dict__.update(out.__dict__)
@classmethod
def clone(cls, obj, **kwargs):
assert isinstance(obj, Serializable)
d = obj.__getstate__()
# Split the entries in kwargs between positional and keyword arguments
# and update d['__args'] and d['__kwargs'], respectively.
if sys.version_info >= (3, 0):
spec = inspect.getfullargspec(obj.__init__)
else:
spec = inspect.getargspec(obj.__init__)
in_order_args = spec.args[1:]
d["__args"] = list(d["__args"])
for kw, val in kwargs.items():
if kw in in_order_args:
d["__args"][in_order_args.index(kw)] = val
else:
d["__kwargs"][kw] = val
out = type(obj).__new__(type(obj))
out.__setstate__(d)
return out
| 2,077 | 30.484848 | 78 | py |
rllab | rllab-master/rllab/core/lasagne_layers.py | # encoding: utf-8
import lasagne.layers as L
import lasagne
import theano
import theano.tensor as TT
class ParamLayer(L.Layer):
def __init__(self, incoming, num_units, param=lasagne.init.Constant(0.),
trainable=True, **kwargs):
super(ParamLayer, self).__init__(incoming, **kwargs)
self.num_units = num_units
self.param = self.add_param(
param,
(num_units,),
name="param",
trainable=trainable
)
def get_output_shape_for(self, input_shape):
return input_shape[:-1] + (self.num_units,)
def get_output_for(self, input, **kwargs):
ndim = input.ndim
reshaped_param = TT.reshape(self.param, (1,) * (ndim - 1) + (self.num_units,))
tile_arg = TT.concatenate([input.shape[:-1], [1]])
tiled = TT.tile(reshaped_param, tile_arg, ndim=ndim)
return tiled
class OpLayer(L.MergeLayer):
def __init__(self, incoming, op,
shape_op=lambda x: x, extras=None, **kwargs):
if extras is None:
extras = []
incomings = [incoming] + extras
super(OpLayer, self).__init__(incomings, **kwargs)
self.op = op
self.shape_op = shape_op
self.incomings = incomings
def get_output_shape_for(self, input_shapes):
return self.shape_op(*input_shapes)
def get_output_for(self, inputs, **kwargs):
return self.op(*inputs)
class BatchNormLayer(L.Layer):
"""
lasagne.layers.BatchNormLayer(incoming, axes='auto', epsilon=1e-4,
alpha=0.1, mode='low_mem',
beta=lasagne.init.Constant(0), gamma=lasagne.init.Constant(1),
mean=lasagne.init.Constant(0), std=lasagne.init.Constant(1), **kwargs)
Batch Normalization
This layer implements batch normalization of its inputs, following [1]_:
.. math::
y = \\frac{x - \\mu}{\\sqrt{\\sigma^2 + \\epsilon}} \\gamma + \\beta
That is, the input is normalized to zero mean and unit variance, and then
linearly transformed. The crucial part is that the mean and variance are
computed across the batch dimension, i.e., over examples, not per example.
During training, :math:`\\mu` and :math:`\\sigma^2` are defined to be the
mean and variance of the current input mini-batch :math:`x`, and during
testing, they are replaced with average statistics over the training
data. Consequently, this layer has four stored parameters: :math:`\\beta`,
:math:`\\gamma`, and the averages :math:`\\mu` and :math:`\\sigma^2`
(nota bene: instead of :math:`\\sigma^2`, the layer actually stores
:math:`1 / \\sqrt{\\sigma^2 + \\epsilon}`, for compatibility to cuDNN).
By default, this layer learns the average statistics as exponential moving
averages computed during training, so it can be plugged into an existing
network without any changes of the training procedure (see Notes).
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape
axes : 'auto', int or tuple of int
The axis or axes to normalize over. If ``'auto'`` (the default),
normalize over all axes except for the second: this will normalize over
the minibatch dimension for dense layers, and additionally over all
spatial dimensions for convolutional layers.
epsilon : scalar
Small constant :math:`\\epsilon` added to the variance before taking
the square root and dividing by it, to avoid numerical problems
alpha : scalar
Coefficient for the exponential moving average of batch-wise means and
standard deviations computed during training; the closer to one, the
more it will depend on the last batches seen
beta : Theano shared variable, expression, numpy array, callable or None
Initial value, expression or initializer for :math:`\\beta`. Must match
the incoming shape, skipping all axes in `axes`. Set to ``None`` to fix
it to 0.0 instead of learning it.
See :func:`lasagne.utils.create_param` for more information.
gamma : Theano shared variable, expression, numpy array, callable or None
Initial value, expression or initializer for :math:`\\gamma`. Must
match the incoming shape, skipping all axes in `axes`. Set to ``None``
to fix it to 1.0 instead of learning it.
See :func:`lasagne.utils.create_param` for more information.
mean : Theano shared variable, expression, numpy array, or callable
Initial value, expression or initializer for :math:`\\mu`. Must match
the incoming shape, skipping all axes in `axes`.
See :func:`lasagne.utils.create_param` for more information.
std : Theano shared variable, expression, numpy array, or callable
Initial value, expression or initializer for :math:`1 / \\sqrt{
\\sigma^2 + \\epsilon}`. Must match the incoming shape, skipping all
axes in `axes`.
See :func:`lasagne.utils.create_param` for more information.
**kwargs
Any additional keyword arguments are passed to the :class:`Layer`
superclass.
Notes
-----
This layer should be inserted between a linear transformation (such as a
:class:`DenseLayer`, or :class:`Conv2DLayer`) and its nonlinearity. The
convenience function :func:`batch_norm` modifies an existing layer to
insert batch normalization in front of its nonlinearity.
The behavior can be controlled by passing keyword arguments to
:func:`lasagne.layers.get_output()` when building the output expression
of any network containing this layer.
During training, [1]_ normalize each input mini-batch by its statistics
and update an exponential moving average of the statistics to be used for
validation. This can be achieved by passing ``deterministic=False``.
For validation, [1]_ normalize each input mini-batch by the stored
statistics. This can be achieved by passing ``deterministic=True``.
For more fine-grained control, ``batch_norm_update_averages`` can be passed
to update the exponential moving averages (``True``) or not (``False``),
and ``batch_norm_use_averages`` can be passed to use the exponential moving
averages for normalization (``True``) or normalize each mini-batch by its
own statistics (``False``). These settings override ``deterministic``.
Note that for testing a model after training, [1]_ replace the stored
exponential moving average statistics by fixing all network weights and
re-computing average statistics over the training data in a layerwise
fashion. This is not part of the layer implementation.
In case you set `axes` to not include the batch dimension (the first axis,
usually), normalization is done per example, not across examples. This does
not require any averages, so you can pass ``batch_norm_update_averages``
and ``batch_norm_use_averages`` as ``False`` in this case.
See also
--------
batch_norm : Convenience function to apply batch normalization to a layer
References
----------
.. [1] Ioffe, Sergey and Szegedy, Christian (2015):
Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift. http://arxiv.org/abs/1502.03167.
"""
def __init__(self, incoming, axes='auto', epsilon=1e-4, alpha=0.1,
mode='low_mem', beta=lasagne.init.Constant(0), gamma=lasagne.init.Constant(1),
mean=lasagne.init.Constant(0), std=lasagne.init.Constant(1), **kwargs):
super(BatchNormLayer, self).__init__(incoming, **kwargs)
if axes == 'auto':
# default: normalize over all but the second axis
axes = (0,) + tuple(range(2, len(self.input_shape)))
elif isinstance(axes, int):
axes = (axes,)
self.axes = axes
self.epsilon = epsilon
self.alpha = alpha
self.mode = mode
# create parameters, ignoring all dimensions in axes
shape = [size for axis, size in enumerate(self.input_shape)
if axis not in self.axes]
if any(size is None for size in shape):
raise ValueError("BatchNormLayer needs specified input sizes for "
"all axes not normalized over.")
if beta is None:
self.beta = None
else:
self.beta = self.add_param(beta, shape, 'beta',
trainable=True, regularizable=False)
if gamma is None:
self.gamma = None
else:
self.gamma = self.add_param(gamma, shape, 'gamma',
trainable=True, regularizable=False)
self.mean = self.add_param(mean, shape, 'mean',
trainable=False, regularizable=False)
self.std = self.add_param(std, shape, 'std',
trainable=False, regularizable=False)
def get_output_for(self, input, deterministic=False, **kwargs):
input_mean = input.mean(self.axes)
input_std = TT.sqrt(input.var(self.axes) + self.epsilon)
# Decide whether to use the stored averages or mini-batch statistics
use_averages = kwargs.get('batch_norm_use_averages',
deterministic)
if use_averages:
mean = self.mean
std = self.std
else:
mean = input_mean
std = input_std
# Decide whether to update the stored averages
update_averages = kwargs.get('batch_norm_update_averages',
not deterministic)
if update_averages:
# Trick: To update the stored statistics, we create memory-aliased
# clones of the stored statistics:
running_mean = theano.clone(self.mean, share_inputs=False)
running_std = theano.clone(self.std, share_inputs=False)
# set a default update for them:
running_mean.default_update = ((1 - self.alpha) * running_mean +
self.alpha * input_mean)
running_std.default_update = ((1 - self.alpha) *
running_std +
self.alpha * input_std)
# and make sure they end up in the graph without participating in
# the computation (this way their default_update will be collected
# and applied, but the computation will be optimized away):
mean += 0 * running_mean
std += 0 * running_std
# prepare dimshuffle pattern inserting broadcastable axes as needed
param_axes = iter(list(range(input.ndim - len(self.axes))))
pattern = ['x' if input_axis in self.axes
else next(param_axes)
for input_axis in range(input.ndim)]
# apply dimshuffle pattern to all parameters
beta = 0 if self.beta is None else self.beta.dimshuffle(pattern)
gamma = 1 if self.gamma is None else self.gamma.dimshuffle(pattern)
mean = mean.dimshuffle(pattern)
std = std.dimshuffle(pattern)
# normalize
normalized = (input - mean) * (gamma * TT.inv(std)) + beta
return normalized
def batch_norm(layer, **kwargs):
"""
Apply batch normalization to an existing layer. This is a convenience
function modifying an existing layer to include batch normalization: It
will steal the layer's nonlinearity if there is one (effectively
introducing the normalization right before the nonlinearity), remove
the layer's bias if there is one (because it would be redundant), and add
a :class:`BatchNormLayer` and :class:`NonlinearityLayer` on top.
Parameters
----------
layer : A :class:`Layer` instance
The layer to apply the normalization to; note that it will be
irreversibly modified as specified above
**kwargs
Any additional keyword arguments are passed on to the
:class:`BatchNormLayer` constructor.
Returns
-------
BatchNormLayer or NonlinearityLayer instance
A batch normalization layer stacked on the given modified `layer`, or
a nonlinearity layer stacked on top of both if `layer` was nonlinear.
Examples
--------
Just wrap any layer into a :func:`batch_norm` call on creating it:
>>> from lasagne.layers import InputLayer, DenseLayer, batch_norm
>>> from lasagne.nonlinearities import tanh
>>> l1 = InputLayer((64, 768))
>>> l2 = batch_norm(DenseLayer(l1, num_units=500, nonlinearity=tanh))
This introduces batch normalization right before its nonlinearity:
>>> from lasagne.layers import get_all_layers
>>> [l.__class__.__name__ for l in get_all_layers(l2)]
['InputLayer', 'DenseLayer', 'BatchNormLayer', 'NonlinearityLayer']
"""
nonlinearity = getattr(layer, 'nonlinearity', None)
if nonlinearity is not None:
layer.nonlinearity = lasagne.nonlinearities.identity
if hasattr(layer, 'b') and layer.b is not None:
del layer.params[layer.b]
layer.b = None
layer = BatchNormLayer(layer, **kwargs)
if nonlinearity is not None:
layer = L.NonlinearityLayer(layer, nonlinearity)
return layer
| 13,436 | 43.939799 | 95 | py |
rllab | rllab-master/rllab/core/parameterized.py | from contextlib import contextmanager
from rllab.core.serializable import Serializable
from rllab.misc.tensor_utils import flatten_tensors, unflatten_tensors
load_params = True
@contextmanager
def suppress_params_loading():
global load_params
load_params = False
yield
load_params = True
class Parameterized(Serializable):
def __init__(self):
self._cached_params = {}
self._cached_param_dtypes = {}
self._cached_param_shapes = {}
def get_params_internal(self, **tags):
"""
Internal method to be implemented which does not perform caching
"""
raise NotImplementedError
def get_params(self, **tags): # adds the list to the _cached_params dict under the tuple key (one)
"""
Get the list of parameters, filtered by the provided tags.
Some common tags include 'regularizable' and 'trainable'
"""
tag_tuple = tuple(sorted(list(tags.items()), key=lambda x: x[0]))
if tag_tuple not in self._cached_params:
self._cached_params[tag_tuple] = self.get_params_internal(**tags)
return self._cached_params[tag_tuple]
def get_param_dtypes(self, **tags):
tag_tuple = tuple(sorted(list(tags.items()), key=lambda x: x[0]))
if tag_tuple not in self._cached_param_dtypes:
self._cached_param_dtypes[tag_tuple] = \
[param.get_value(borrow=True).dtype
for param in self.get_params(**tags)]
return self._cached_param_dtypes[tag_tuple]
def get_param_shapes(self, **tags):
tag_tuple = tuple(sorted(list(tags.items()), key=lambda x: x[0]))
if tag_tuple not in self._cached_param_shapes:
self._cached_param_shapes[tag_tuple] = \
[param.get_value(borrow=True).shape
for param in self.get_params(**tags)]
return self._cached_param_shapes[tag_tuple]
def get_param_values(self, **tags):
return flatten_tensors(
[param.get_value(borrow=True)
for param in self.get_params(**tags)]
)
def set_param_values(self, flattened_params, **tags):
debug = tags.pop("debug", False)
param_values = unflatten_tensors(
flattened_params, self.get_param_shapes(**tags))
for param, dtype, value in zip(
self.get_params(**tags),
self.get_param_dtypes(**tags),
param_values):
param.set_value(value.astype(dtype))
if debug:
print("setting value of %s" % param.name)
def flat_to_params(self, flattened_params, **tags):
return unflatten_tensors(flattened_params, self.get_param_shapes(**tags))
def __getstate__(self):
d = Serializable.__getstate__(self)
d["params"] = self.get_param_values()
return d
def __setstate__(self, d):
Serializable.__setstate__(self, d)
global load_params
if load_params:
self.set_param_values(d["params"])
| 3,042 | 34.383721 | 103 | py |
rllab | rllab-master/rllab/core/lasagne_helpers.py | from lasagne.layers import get_all_layers
from lasagne import utils
def get_full_output(layer_or_layers, inputs=None, **kwargs):
"""
Computes the output of the network at one or more given layers.
Optionally, you can define the input(s) to propagate through the network
instead of using the input variable(s) associated with the network's
input layer(s).
Parameters
----------
layer_or_layers : Layer or list
the :class:`Layer` instance for which to compute the output
expressions, or a list of :class:`Layer` instances.
inputs : None, Theano expression, numpy array, or dict
If None, uses the input variables associated with the
:class:`InputLayer` instances.
If a Theano expression, this defines the input for a single
:class:`InputLayer` instance. Will throw a ValueError if there
are multiple :class:`InputLayer` instances.
If a numpy array, this will be wrapped as a Theano constant
and used just like a Theano expression.
If a dictionary, any :class:`Layer` instance (including the
input layers) can be mapped to a Theano expression or numpy
array to use instead of its regular output.
Returns
-------
output : Theano expression or list
the output of the given layer(s) for the given network input
Notes
-----
Depending on your network architecture, `get_output([l1, l2])` may
be crucially different from `[get_output(l1), get_output(l2)]`. Only
the former ensures that the output expressions depend on the same
intermediate expressions. For example, when `l1` and `l2` depend on
a common dropout layer, the former will use the same dropout mask for
both, while the latter will use two different dropout masks.
"""
from lasagne.layers.input import InputLayer
from lasagne.layers.base import MergeLayer
# obtain topological ordering of all layers the output layer(s) depend on
treat_as_input = list(inputs.keys()) if isinstance(inputs, dict) else []
all_layers = get_all_layers(layer_or_layers, treat_as_input)
# initialize layer-to-expression mapping from all input layers
all_outputs = dict((layer, layer.input_var)
for layer in all_layers
if isinstance(layer, InputLayer) and
layer not in treat_as_input)
extra_outputs = dict()
# update layer-to-expression mapping from given input(s), if any
if isinstance(inputs, dict):
all_outputs.update((layer, utils.as_theano_expression(expr))
for layer, expr in list(inputs.items()))
elif inputs is not None:
# if len(all_outputs) > 1:
# raise ValueError("get_output() was called with a single input "
# "expression on a network with multiple input "
# "layers. Please call it with a dictionary of "
# "input expressions instead.")
for input_layer in all_outputs:
all_outputs[input_layer] = utils.as_theano_expression(inputs)
# update layer-to-expression mapping by propagating the inputs
for layer in all_layers:
if layer not in all_outputs:
try:
if isinstance(layer, MergeLayer):
layer_inputs = [all_outputs[input_layer]
for input_layer in layer.input_layers]
else:
layer_inputs = all_outputs[layer.input_layer]
except KeyError:
# one of the input_layer attributes must have been `None`
raise ValueError("get_output() was called without giving an "
"input expression for the free-floating "
"layer %r. Please call it with a dictionary "
"mapping this layer to an input expression."
% layer)
if hasattr(layer, "get_full_output_for"):
output, extra = layer.get_full_output_for(layer_inputs, **kwargs)
all_outputs[layer] = output
extra_outputs[layer] = extra
else:
all_outputs[layer] = layer.get_output_for(
layer_inputs, **kwargs)
# return the output(s) of the requested layer(s) only
try:
return [all_outputs[layer] for layer in layer_or_layers], extra_outputs
except TypeError:
return all_outputs[layer_or_layers], extra_outputs
def get_output(layer_or_layers, inputs=None, **kwargs):
return get_full_output(layer_or_layers, inputs, **kwargs)[0]
| 4,719 | 46.676768 | 81 | py |
rllab | rllab-master/rllab/envs/base.py | from .env_spec import EnvSpec
import collections
from cached_property import cached_property
class Env(object):
def step(self, action):
"""
Run one timestep of the environment's dynamics. When end of episode
is reached, reset() should be called to reset the environment's internal state.
Input
-----
action : an action provided by the environment
Outputs
-------
(observation, reward, done, info)
observation : agent's observation of the current environment
reward [Float] : amount of reward due to the previous action
done : a boolean, indicating whether the episode has ended
info : a dictionary containing other diagnostic information from the previous action
"""
raise NotImplementedError
def reset(self):
"""
Resets the state of the environment, returning an initial observation.
Outputs
-------
observation : the initial observation of the space. (Initial reward is assumed to be 0.)
"""
raise NotImplementedError
@property
def action_space(self):
"""
Returns a Space object
:rtype: rllab.spaces.base.Space
"""
raise NotImplementedError
@property
def observation_space(self):
"""
Returns a Space object
:rtype: rllab.spaces.base.Space
"""
raise NotImplementedError
# Helpers that derive from Spaces
@property
def action_dim(self):
return self.action_space.flat_dim
def render(self):
pass
def log_diagnostics(self, paths):
"""
Log extra information per iteration based on the collected paths
"""
pass
@cached_property
def spec(self):
return EnvSpec(
observation_space=self.observation_space,
action_space=self.action_space,
)
@property
def horizon(self):
"""
Horizon of the environment, if it has one
"""
raise NotImplementedError
def terminate(self):
"""
Clean up operation,
"""
pass
def get_param_values(self):
return None
def set_param_values(self, params):
pass
_Step = collections.namedtuple("Step", ["observation", "reward", "done", "info"])
def Step(observation, reward, done, **kwargs):
"""
Convenience method creating a namedtuple with the results of the
environment.step method.
Put extra diagnostic info in the kwargs
"""
return _Step(observation, reward, done, kwargs)
| 2,624 | 24.990099 | 96 | py |
rllab | rllab-master/rllab/envs/grid_world_env.py | import numpy as np
from .base import Env
from rllab.spaces import Discrete
from rllab.envs.base import Step
from rllab.core.serializable import Serializable
MAPS = {
"chain": [
"GFFFFFFFFFFFFFSFFFFFFFFFFFFFG"
],
"4x4_safe": [
"SFFF",
"FWFW",
"FFFW",
"WFFG"
],
"4x4": [
"SFFF",
"FHFH",
"FFFH",
"HFFG"
],
"8x8": [
"SFFFFFFF",
"FFFFFFFF",
"FFFHFFFF",
"FFFFFHFF",
"FFFHFFFF",
"FHHFFFHF",
"FHFFHFHF",
"FFFHFFFG"
],
}
class GridWorldEnv(Env, Serializable):
"""
'S' : starting point
'F' or '.': free space
'W' or 'x': wall
'H' or 'o': hole (terminates episode)
'G' : goal
"""
def __init__(self, desc='4x4'):
Serializable.quick_init(self, locals())
if isinstance(desc, str):
desc = MAPS[desc]
desc = np.array(list(map(list, desc)))
desc[desc == '.'] = 'F'
desc[desc == 'o'] = 'H'
desc[desc == 'x'] = 'W'
self.desc = desc
self.n_row, self.n_col = desc.shape
(start_x,), (start_y,) = np.nonzero(desc == 'S')
self.start_state = start_x * self.n_col + start_y
self.state = None
self.domain_fig = None
def reset(self):
self.state = self.start_state
return self.state
@staticmethod
def action_from_direction(d):
"""
Return the action corresponding to the given direction. This is a helper method for debugging and testing
purposes.
:return: the action index corresponding to the given direction
"""
return dict(
left=0,
down=1,
right=2,
up=3
)[d]
def step(self, action):
"""
action map:
0: left
1: down
2: right
3: up
:param action: should be a one-hot vector encoding the action
:return:
"""
possible_next_states = self.get_possible_next_states(self.state, action)
probs = [x[1] for x in possible_next_states]
next_state_idx = np.random.choice(len(probs), p=probs)
next_state = possible_next_states[next_state_idx][0]
next_x = next_state // self.n_col
next_y = next_state % self.n_col
next_state_type = self.desc[next_x, next_y]
if next_state_type == 'H':
done = True
reward = 0
elif next_state_type in ['F', 'S']:
done = False
reward = 0
elif next_state_type == 'G':
done = True
reward = 1
else:
raise NotImplementedError
self.state = next_state
return Step(observation=self.state, reward=reward, done=done)
def get_possible_next_states(self, state, action):
"""
Given the state and action, return a list of possible next states and their probabilities. Only next states
with nonzero probabilities will be returned
:param state: start state
:param action: action
:return: a list of pairs (s', p(s'|s,a))
"""
# assert self.observation_space.contains(state)
# assert self.action_space.contains(action)
x = state // self.n_col
y = state % self.n_col
coords = np.array([x, y])
increments = np.array([[0, -1], [1, 0], [0, 1], [-1, 0]])
next_coords = np.clip(
coords + increments[action],
[0, 0],
[self.n_row - 1, self.n_col - 1]
)
next_state = next_coords[0] * self.n_col + next_coords[1]
state_type = self.desc[x, y]
next_state_type = self.desc[next_coords[0], next_coords[1]]
if next_state_type == 'W' or state_type == 'H' or state_type == 'G':
return [(state, 1.)]
else:
return [(next_state, 1.)]
@property
def action_space(self):
return Discrete(4)
@property
def observation_space(self):
return Discrete(self.n_row * self.n_col)
| 4,094 | 26.119205 | 115 | py |
rllab | rllab-master/rllab/envs/normalized_env.py | import numpy as np
from rllab import spaces
from rllab.core.serializable import Serializable
from rllab.envs.proxy_env import ProxyEnv
from rllab.spaces.box import Box
from rllab.misc.overrides import overrides
from rllab.envs.base import Step
class NormalizedEnv(ProxyEnv, Serializable):
def __init__(
self,
env,
scale_reward=1.,
normalize_obs=False,
normalize_reward=False,
obs_alpha=0.001,
reward_alpha=0.001,
):
Serializable.quick_init(self, locals())
ProxyEnv.__init__(self, env)
self._scale_reward = scale_reward
self._normalize_obs = normalize_obs
self._normalize_reward = normalize_reward
self._obs_alpha = obs_alpha
self._obs_mean = np.zeros(env.observation_space.flat_dim)
self._obs_var = np.ones(env.observation_space.flat_dim)
self._reward_alpha = reward_alpha
self._reward_mean = 0.
self._reward_var = 1.
def _update_obs_estimate(self, obs):
flat_obs = self.wrapped_env.observation_space.flatten(obs)
self._obs_mean = (1 - self._obs_alpha) * self._obs_mean + self._obs_alpha * flat_obs
self._obs_var = (1 - self._obs_alpha) * self._obs_var + self._obs_alpha * np.square(flat_obs - self._obs_mean)
def _update_reward_estimate(self, reward):
self._reward_mean = (1 - self._reward_alpha) * self._reward_mean + self._reward_alpha * reward
self._reward_var = (1 - self._reward_alpha) * self._reward_var + self._reward_alpha * np.square(reward -
self._reward_mean)
def _apply_normalize_obs(self, obs):
self._update_obs_estimate(obs)
return (obs - self._obs_mean) / (np.sqrt(self._obs_var) + 1e-8)
def _apply_normalize_reward(self, reward):
self._update_reward_estimate(reward)
return reward / (np.sqrt(self._reward_var) + 1e-8)
def reset(self):
ret = self._wrapped_env.reset()
if self._normalize_obs:
return self._apply_normalize_obs(ret)
else:
return ret
def __getstate__(self):
d = Serializable.__getstate__(self)
d["_obs_mean"] = self._obs_mean
d["_obs_var"] = self._obs_var
return d
def __setstate__(self, d):
Serializable.__setstate__(self, d)
self._obs_mean = d["_obs_mean"]
self._obs_var = d["_obs_var"]
@property
@overrides
def action_space(self):
if isinstance(self._wrapped_env.action_space, Box):
ub = np.ones(self._wrapped_env.action_space.shape)
return spaces.Box(-1 * ub, ub)
return self._wrapped_env.action_space
@overrides
def step(self, action):
if isinstance(self._wrapped_env.action_space, Box):
# rescale the action
lb, ub = self._wrapped_env.action_space.bounds
scaled_action = lb + (action + 1.) * 0.5 * (ub - lb)
scaled_action = np.clip(scaled_action, lb, ub)
else:
scaled_action = action
wrapped_step = self._wrapped_env.step(scaled_action)
next_obs, reward, done, info = wrapped_step
if self._normalize_obs:
next_obs = self._apply_normalize_obs(next_obs)
if self._normalize_reward:
reward = self._apply_normalize_reward(reward)
return Step(next_obs, reward * self._scale_reward, done, **info)
def __str__(self):
return "Normalized: %s" % self._wrapped_env
# def log_diagnostics(self, paths):
# print "Obs mean:", self._obs_mean
# print "Obs std:", np.sqrt(self._obs_var)
# print "Reward mean:", self._reward_mean
# print "Reward std:", np.sqrt(self._reward_var)
normalize = NormalizedEnv
| 3,871 | 36.230769 | 122 | py |
rllab | rllab-master/rllab/envs/proxy_env.py | from rllab.core.serializable import Serializable
from .base import Env
class ProxyEnv(Env, Serializable):
def __init__(self, wrapped_env):
Serializable.quick_init(self, locals())
self._wrapped_env = wrapped_env
@property
def wrapped_env(self):
return self._wrapped_env
def reset(self, **kwargs):
return self._wrapped_env.reset(**kwargs)
@property
def action_space(self):
return self._wrapped_env.action_space
@property
def observation_space(self):
return self._wrapped_env.observation_space
def step(self, action):
return self._wrapped_env.step(action)
def render(self, *args, **kwargs):
return self._wrapped_env.render(*args, **kwargs)
def log_diagnostics(self, paths, *args, **kwargs):
self._wrapped_env.log_diagnostics(paths, *args, **kwargs)
@property
def horizon(self):
return self._wrapped_env.horizon
def terminate(self):
self._wrapped_env.terminate()
def get_param_values(self):
return self._wrapped_env.get_param_values()
def set_param_values(self, params):
self._wrapped_env.set_param_values(params)
| 1,191 | 24.913043 | 65 | py |
rllab | rllab-master/rllab/envs/noisy_env.py | import numpy as np
from rllab.core.serializable import Serializable
from rllab.envs.base import Step
from rllab.envs.proxy_env import ProxyEnv
from rllab.misc import autoargs
from rllab.misc.overrides import overrides
class NoisyObservationEnv(ProxyEnv, Serializable):
@autoargs.arg('obs_noise', type=float,
help='Noise added to the observations (note: this makes the '
'problem non-Markovian!)')
def __init__(self,
env,
obs_noise=1e-1,
):
super(NoisyObservationEnv, self).__init__(env)
Serializable.quick_init(self, locals())
self.obs_noise = obs_noise
def get_obs_noise_scale_factor(self, obs):
# return np.abs(obs)
return np.ones_like(obs)
def inject_obs_noise(self, obs):
"""
Inject entry-wise noise to the observation. This should not change
the dimension of the observation.
"""
noise = self.get_obs_noise_scale_factor(obs) * self.obs_noise * \
np.random.normal(size=obs.shape)
return obs + noise
def get_current_obs(self):
return self.inject_obs_noise(self._wrapped_env.get_current_obs())
@overrides
def reset(self):
obs = self._wrapped_env.reset()
return self.inject_obs_noise(obs)
@overrides
def step(self, action):
next_obs, reward, done, info = self._wrapped_env.step(action)
return Step(self.inject_obs_noise(next_obs), reward, done, **info)
class DelayedActionEnv(ProxyEnv, Serializable):
@autoargs.arg('action_delay', type=int,
help='Time steps before action is realized')
def __init__(self,
env,
action_delay=3,
):
assert action_delay > 0, "Should not use this env transformer"
super(DelayedActionEnv, self).__init__(env)
Serializable.quick_init(self, locals())
self.action_delay = action_delay
self._queued_actions = None
@overrides
def reset(self):
obs = self._wrapped_env.reset()
self._queued_actions = np.zeros(self.action_delay * self.action_dim)
return obs
@overrides
def step(self, action):
queued_action = self._queued_actions[:self.action_dim]
next_obs, reward, done, info = self._wrapped_env.step(queued_action)
self._queued_actions = np.concatenate([
self._queued_actions[self.action_dim:],
action
])
return Step(next_obs, reward, done, **info)
| 2,575 | 31.2 | 79 | py |
rllab | rllab-master/rllab/envs/gym_env.py | import gym
import gym.wrappers
import gym.envs
import gym.spaces
import traceback
import logging
try:
from gym.wrappers.monitoring import logger as monitor_logger
monitor_logger.setLevel(logging.WARNING)
except Exception as e:
traceback.print_exc()
import os
import os.path as osp
from rllab.envs.base import Env, Step
from rllab.core.serializable import Serializable
from rllab.spaces.box import Box
from rllab.spaces.discrete import Discrete
from rllab.spaces.product import Product
from rllab.misc import logger
def convert_gym_space(space):
if isinstance(space, gym.spaces.Box):
return Box(low=space.low, high=space.high)
elif isinstance(space, gym.spaces.Discrete):
return Discrete(n=space.n)
elif isinstance(space, gym.spaces.Tuple):
return Product([convert_gym_space(x) for x in space.spaces])
else:
raise NotImplementedError
class CappedCubicVideoSchedule(object):
# Copied from gym, since this method is frequently moved around
def __call__(self, count):
if count < 1000:
return int(round(count ** (1. / 3))) ** 3 == count
else:
return count % 1000 == 0
class FixedIntervalVideoSchedule(object):
def __init__(self, interval):
self.interval = interval
def __call__(self, count):
return count % self.interval == 0
class NoVideoSchedule(object):
def __call__(self, count):
return False
class GymEnv(Env, Serializable):
def __init__(self, env_name, record_video=True, video_schedule=None, log_dir=None, record_log=True,
force_reset=False):
if log_dir is None:
if logger.get_snapshot_dir() is None:
logger.log("Warning: skipping Gym environment monitoring since snapshot_dir not configured.")
else:
log_dir = os.path.join(logger.get_snapshot_dir(), "gym_log")
Serializable.quick_init(self, locals())
env = gym.envs.make(env_name)
self.env = env
self.env_id = env.spec.id
assert not (not record_log and record_video)
if log_dir is None or record_log is False:
self.monitoring = False
else:
if not record_video:
video_schedule = NoVideoSchedule()
else:
if video_schedule is None:
video_schedule = CappedCubicVideoSchedule()
self.env = gym.wrappers.Monitor(self.env, log_dir, video_callable=video_schedule, force=True)
self.monitoring = True
self._observation_space = convert_gym_space(env.observation_space)
logger.log("observation space: {}".format(self._observation_space))
self._action_space = convert_gym_space(env.action_space)
logger.log("action space: {}".format(self._action_space))
self._horizon = env.spec.tags['wrapper_config.TimeLimit.max_episode_steps']
self._log_dir = log_dir
self._force_reset = force_reset
@property
def observation_space(self):
return self._observation_space
@property
def action_space(self):
return self._action_space
@property
def horizon(self):
return self._horizon
def reset(self):
if self._force_reset and self.monitoring:
from gym.wrappers.monitoring import Monitor
assert isinstance(self.env, Monitor)
recorder = self.env.stats_recorder
if recorder is not None:
recorder.done = True
return self.env.reset()
def step(self, action):
next_obs, reward, done, info = self.env.step(action)
return Step(next_obs, reward, done, **info)
def render(self):
self.env.render()
def terminate(self):
if self.monitoring:
self.env._close()
if self._log_dir is not None:
print("""
***************************
Training finished! You can upload results to OpenAI Gym by running the following command:
python scripts/submit_gym.py %s
***************************
""" % self._log_dir)
| 4,134 | 29.858209 | 109 | py |
rllab | rllab-master/rllab/envs/occlusion_env.py | import numpy as np
from cached_property import cached_property
from rllab import spaces
from rllab.core.serializable import Serializable
from rllab.envs.proxy_env import ProxyEnv
from rllab.misc.overrides import overrides
from rllab.envs.base import Step
from rllab.envs.mujoco.mujoco_env import MujocoEnv
BIG = 1e6
class OcclusionEnv(ProxyEnv, Serializable):
''' Occludes part of the observation.'''
def __init__(self, env, sensor_idx):
'''
:param sensor_idx: list or ndarray of indices to be shown. Other indices will be occluded. Can be either list of
integer indices or boolean mask.
'''
Serializable.quick_init(self, locals())
self._set_sensor_mask(env, sensor_idx)
super(OcclusionEnv, self).__init__(env)
self._dt = 1
if isinstance(env, MujocoEnv):
self._dt = env.model.opt.timestep * env.frame_skip
def _set_sensor_mask(self, env, sensor_idx):
obsdim = env.observation_space.flat_dim
if len(sensor_idx) > obsdim:
raise ValueError("Length of sensor mask ({0}) cannot be greater than observation dim ({1})".format(len(sensor_idx), obsdim))
if len(sensor_idx) == obsdim and not np.any(np.array(sensor_idx) > 1):
sensor_mask = np.array(sensor_idx, dtype=np.bool)
elif np.any( np.unique(sensor_idx, return_counts=True)[1] > 1):
raise ValueError("Double entries or boolean mask with dim ({0}) < observation dim ({1})".format(len(sensor_idx), obsdim))
else:
sensor_mask = np.zeros((obsdim,), dtype=np.bool)
sensor_mask[sensor_idx] = 1
self._sensor_mask = sensor_mask
def occlude(self, obs):
return obs[self._sensor_mask]
def get_current_obs(self):
return self.occlude(self._wrapped_env.get_current_obs())
@cached_property
@overrides
def observation_space(self):
shp = self.get_current_obs().shape
ub = BIG * np.ones(shp)
return spaces.Box(ub * -1, ub)
@overrides
def reset(self):
obs = self._wrapped_env.reset()
return self.occlude(obs)
@overrides
def step(self, action):
next_obs, reward, done, info = self._wrapped_env.step(action)
return Step(self.occlude(next_obs), reward, done, **info)
@property
def dt(self):
return self._dt
@overrides
def log_diagnostics(self, paths):
pass # the wrapped env will be expecting its own observations in paths, but they're not
| 2,572 | 33.77027 | 136 | py |
rllab | rllab-master/rllab/envs/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/rllab/envs/sliding_mem_env.py | import numpy as np
from rllab.core.serializable import Serializable
from rllab.envs.base import Step
from rllab.envs.proxy_env import ProxyEnv
from rllab.misc import autoargs
from rllab.misc.overrides import overrides
from rllab.spaces import Box
class SlidingMemEnv(ProxyEnv, Serializable):
def __init__(
self,
env,
n_steps=4,
axis=0,
):
super().__init__(env)
Serializable.quick_init(self, locals())
self.n_steps = n_steps
self.axis = axis
self.buffer = None
def reset_buffer(self, new_):
assert self.axis == 0
self.buffer = np.zeros(self.observation_space.shape, dtype=np.float32)
self.buffer[0:] = new_
def add_to_buffer(self, new_):
assert self.axis == 0
self.buffer[1:] = self.buffer[:-1]
self.buffer[:1] = new_
@property
def observation_space(self):
origin = self._wrapped_env.observation_space
return Box(
*[
np.repeat(b, self.n_steps, axis=self.axis)
for b in origin.bounds
]
)
@overrides
def reset(self):
obs = self._wrapped_env.reset()
self.reset_buffer(obs)
return self.buffer
@overrides
def step(self, action):
next_obs, reward, done, info = self._wrapped_env.step(action)
self.add_to_buffer(next_obs)
return Step(self.buffer, reward, done, **info)
| 1,475 | 24.894737 | 78 | py |
rllab | rllab-master/rllab/envs/identification_env.py | from rllab.core.serializable import Serializable
from rllab.envs.proxy_env import ProxyEnv
from rllab.misc.overrides import overrides
class IdentificationEnv(ProxyEnv, Serializable):
def __init__(self, mdp_cls, mdp_args):
Serializable.quick_init(self, locals())
self.mdp_cls = mdp_cls
self.mdp_args = dict(mdp_args)
self.mdp_args["template_args"] = dict(noise=True)
mdp = self.gen_mdp()
super(IdentificationEnv, self).__init__(mdp)
def gen_mdp(self):
return self.mdp_cls(**self.mdp_args)
@overrides
def reset(self):
if getattr(self, "_mdp", None):
if hasattr(self._wrapped_env, "release"):
self._wrapped_env.release()
self._wrapped_env = self.gen_mdp()
return super(IdentificationEnv, self).reset()
| 829 | 29.740741 | 57 | py |
rllab | rllab-master/rllab/envs/env_spec.py | from rllab.core.serializable import Serializable
from rllab.spaces.base import Space
class EnvSpec(Serializable):
def __init__(
self,
observation_space,
action_space):
"""
:type observation_space: Space
:type action_space: Space
"""
Serializable.quick_init(self, locals())
self._observation_space = observation_space
self._action_space = action_space
@property
def observation_space(self):
return self._observation_space
@property
def action_space(self):
return self._action_space
| 614 | 22.653846 | 51 | py |
rllab | rllab-master/rllab/envs/mujoco/simple_humanoid_env.py | from rllab.envs.base import Step
from .mujoco_env import MujocoEnv
import numpy as np
from rllab.core.serializable import Serializable
from rllab.misc.overrides import overrides
from rllab.misc import logger
from rllab.misc import autoargs
class SimpleHumanoidEnv(MujocoEnv, Serializable):
FILE = 'simple_humanoid.xml'
@autoargs.arg('vel_deviation_cost_coeff', type=float,
help='cost coefficient for velocity deviation')
@autoargs.arg('alive_bonus', type=float,
help='bonus reward for being alive')
@autoargs.arg('ctrl_cost_coeff', type=float,
help='cost coefficient for control inputs')
@autoargs.arg('impact_cost_coeff', type=float,
help='cost coefficient for impact')
def __init__(
self,
vel_deviation_cost_coeff=1e-2,
alive_bonus=0.2,
ctrl_cost_coeff=1e-3,
impact_cost_coeff=1e-5,
*args, **kwargs):
self.vel_deviation_cost_coeff = vel_deviation_cost_coeff
self.alive_bonus = alive_bonus
self.ctrl_cost_coeff = ctrl_cost_coeff
self.impact_cost_coeff = impact_cost_coeff
super(SimpleHumanoidEnv, self).__init__(*args, **kwargs)
Serializable.quick_init(self, locals())
def get_current_obs(self):
data = self.model.data
return np.concatenate([
data.qpos.flat,
data.qvel.flat,
np.clip(data.cfrc_ext, -1, 1).flat,
self.get_body_com("torso").flat,
])
def _get_com(self):
data = self.model.data
mass = self.model.body_mass
xpos = data.xipos
return (np.sum(mass * xpos, 0) / np.sum(mass))[0]
def step(self, action):
self.forward_dynamics(action)
next_obs = self.get_current_obs()
alive_bonus = self.alive_bonus
data = self.model.data
comvel = self.get_body_comvel("torso")
lin_vel_reward = comvel[0]
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = .5 * self.ctrl_cost_coeff * np.sum(
np.square(action / scaling))
impact_cost = .5 * self.impact_cost_coeff * np.sum(
np.square(np.clip(data.cfrc_ext, -1, 1)))
vel_deviation_cost = 0.5 * self.vel_deviation_cost_coeff * np.sum(
np.square(comvel[1:]))
reward = lin_vel_reward + alive_bonus - ctrl_cost - \
impact_cost - vel_deviation_cost
done = data.qpos[2] < 0.8 or data.qpos[2] > 2.0
return Step(next_obs, reward, done)
@overrides
def log_diagnostics(self, paths):
progs = [
path["observations"][-1][-3] - path["observations"][0][-3]
for path in paths
]
logger.record_tabular('AverageForwardProgress', np.mean(progs))
logger.record_tabular('MaxForwardProgress', np.max(progs))
logger.record_tabular('MinForwardProgress', np.min(progs))
logger.record_tabular('StdForwardProgress', np.std(progs))
| 3,040 | 34.776471 | 74 | py |
rllab | rllab-master/rllab/envs/mujoco/ant_env.py | from rllab.envs.mujoco.mujoco_env import MujocoEnv
from rllab.core.serializable import Serializable
from rllab.envs.base import Step
from rllab.misc.overrides import overrides
from rllab.misc import logger
from rllab.envs.mujoco.mujoco_env import q_mult, q_inv
import numpy as np
import math
class AntEnv(MujocoEnv, Serializable):
FILE = 'ant.xml'
ORI_IND = 3
def __init__(self, *args, **kwargs):
super(AntEnv, self).__init__(*args, **kwargs)
Serializable.__init__(self, *args, **kwargs)
def get_current_obs(self):
return np.concatenate([
self.model.data.qpos.flat,
self.model.data.qvel.flat,
np.clip(self.model.data.cfrc_ext, -1, 1).flat,
self.get_body_xmat("torso").flat,
self.get_body_com("torso"),
]).reshape(-1)
def step(self, action):
self.forward_dynamics(action)
comvel = self.get_body_comvel("torso")
forward_reward = comvel[0]
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling))
contact_cost = 0.5 * 1e-3 * np.sum(
np.square(np.clip(self.model.data.cfrc_ext, -1, 1))),
survive_reward = 0.05
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self._state
notdone = np.isfinite(state).all() \
and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
ob = self.get_current_obs()
return Step(ob, float(reward), done)
@overrides
def get_ori(self):
ori = [0, 1, 0, 0]
rot = self.model.data.qpos[self.__class__.ORI_IND:self.__class__.ORI_IND + 4] # take the quaternion
ori = q_mult(q_mult(rot, ori), q_inv(rot))[1:3] # project onto x-y plane
ori = math.atan2(ori[1], ori[0])
return ori
@overrides
def log_diagnostics(self, paths):
progs = [
path["observations"][-1][-3] - path["observations"][0][-3]
for path in paths
]
logger.record_tabular('AverageForwardProgress', np.mean(progs))
logger.record_tabular('MaxForwardProgress', np.max(progs))
logger.record_tabular('MinForwardProgress', np.min(progs))
logger.record_tabular('StdForwardProgress', np.std(progs))
| 2,342 | 33.970149 | 108 | py |
rllab | rllab-master/rllab/envs/mujoco/inverted_double_pendulum_env.py | import numpy as np
from rllab.core.serializable import Serializable
from rllab.envs.base import Step
from rllab.envs.mujoco.mujoco_env import MujocoEnv
from rllab.misc import autoargs
from rllab.misc.overrides import overrides
class InvertedDoublePendulumEnv(MujocoEnv, Serializable):
FILE = 'inverted_double_pendulum.xml.mako'
@autoargs.arg("random_start", type=bool,
help="Randomized starting position by adjusting the angles"
"When this is false, the double pendulum started out"
"in balanced position")
def __init__(
self,
*args, **kwargs):
self.random_start = kwargs.get("random_start", True)
super(InvertedDoublePendulumEnv, self).__init__(*args, **kwargs)
Serializable.quick_init(self, locals())
@overrides
def get_current_obs(self):
return np.concatenate([
self.model.data.qpos[:1], # cart x pos
np.sin(self.model.data.qpos[1:]), # link angles
np.cos(self.model.data.qpos[1:]),
np.clip(self.model.data.qvel, -10, 10),
np.clip(self.model.data.qfrc_constraint, -10, 10)
]).reshape(-1)
@overrides
def step(self, action):
self.forward_dynamics(action)
next_obs = self.get_current_obs()
x, _, y = self.model.data.site_xpos[0]
dist_penalty = 0.01 * x ** 2 + (y - 2) ** 2
v1, v2 = self.model.data.qvel[1:3]
vel_penalty = 1e-3 * v1 ** 2 + 5e-3 * v2 ** 2
alive_bonus = 10
r = float(alive_bonus - dist_penalty - vel_penalty)
done = y <= 1
return Step(next_obs, r, done)
@overrides
def reset_mujoco(self, init_state=None):
assert init_state is None
qpos = np.copy(self.init_qpos)
if self.random_start:
qpos[1] = (np.random.rand() - 0.5) * 40 / 180. * np.pi
self.model.data.qpos = qpos
self.model.data.qvel = self.init_qvel
self.model.data.qacc = self.init_qacc
self.model.data.ctrl = self.init_ctrl
| 2,077 | 35.45614 | 77 | py |
rllab | rllab-master/rllab/envs/mujoco/mujoco_env.py | import numpy as np
import os.path as osp
from cached_property import cached_property
from rllab import spaces
from rllab.envs.base import Env
from rllab.misc.overrides import overrides
from rllab.mujoco_py import MjModel, MjViewer
from rllab.misc import autoargs
from rllab.misc import logger
import theano
import tempfile
import os
import mako.template
import mako.lookup
MODEL_DIR = osp.abspath(
osp.join(
osp.dirname(__file__),
'../../../vendor/mujoco_models'
)
)
BIG = 1e6
def q_inv(a):
return [a[0], -a[1], -a[2], -a[3]]
def q_mult(a, b): # multiply two quaternion
w = a[0]*b[0] - a[1]*b[1] - a[2]*b[2] - a[3]*b[3]
i = a[0]*b[1] + a[1]*b[0] + a[2]*b[3] - a[3]*b[2]
j = a[0]*b[2] - a[1]*b[3] + a[2]*b[0] + a[3]*b[1]
k = a[0]*b[3] + a[1]*b[2] - a[2]*b[1] + a[3]*b[0]
return [w, i, j, k]
class MujocoEnv(Env):
FILE = None
@autoargs.arg('action_noise', type=float,
help='Noise added to the controls, which will be '
'proportional to the action bounds')
def __init__(self, action_noise=0.0, file_path=None, template_args=None):
# compile template
if file_path is None:
if self.__class__.FILE is None:
raise "Mujoco file not specified"
file_path = osp.join(MODEL_DIR, self.__class__.FILE)
if file_path.endswith(".mako"):
lookup = mako.lookup.TemplateLookup(directories=[MODEL_DIR])
with open(file_path) as template_file:
template = mako.template.Template(
template_file.read(), lookup=lookup)
content = template.render(
opts=template_args if template_args is not None else {},
)
tmp_f, file_path = tempfile.mkstemp(text=True)
with open(file_path, 'w') as f:
f.write(content)
self.model = MjModel(file_path)
os.close(tmp_f)
else:
self.model = MjModel(file_path)
self.data = self.model.data
self.viewer = None
self.init_qpos = self.model.data.qpos
self.init_qvel = self.model.data.qvel
self.init_qacc = self.model.data.qacc
self.init_ctrl = self.model.data.ctrl
self.qpos_dim = self.init_qpos.size
self.qvel_dim = self.init_qvel.size
self.ctrl_dim = self.init_ctrl.size
self.action_noise = action_noise
if "frame_skip" in self.model.numeric_names:
frame_skip_id = self.model.numeric_names.index("frame_skip")
addr = self.model.numeric_adr.flat[frame_skip_id]
self.frame_skip = int(self.model.numeric_data.flat[addr])
else:
self.frame_skip = 1
if "init_qpos" in self.model.numeric_names:
init_qpos_id = self.model.numeric_names.index("init_qpos")
addr = self.model.numeric_adr.flat[init_qpos_id]
size = self.model.numeric_size.flat[init_qpos_id]
init_qpos = self.model.numeric_data.flat[addr:addr + size]
self.init_qpos = init_qpos
self.dcom = None
self.current_com = None
self.reset()
super(MujocoEnv, self).__init__()
@cached_property
@overrides
def action_space(self):
bounds = self.model.actuator_ctrlrange
lb = bounds[:, 0]
ub = bounds[:, 1]
return spaces.Box(lb, ub)
@cached_property
@overrides
def observation_space(self):
shp = self.get_current_obs().shape
ub = BIG * np.ones(shp)
return spaces.Box(ub * -1, ub)
@property
def action_bounds(self):
return self.action_space.bounds
def reset_mujoco(self, init_state=None):
if init_state is None:
self.model.data.qpos = self.init_qpos + \
np.random.normal(size=self.init_qpos.shape) * 0.01
self.model.data.qvel = self.init_qvel + \
np.random.normal(size=self.init_qvel.shape) * 0.1
self.model.data.qacc = self.init_qacc
self.model.data.ctrl = self.init_ctrl
else:
start = 0
for datum_name in ["qpos", "qvel", "qacc", "ctrl"]:
datum = getattr(self.model.data, datum_name)
datum_dim = datum.shape[0]
datum = init_state[start: start + datum_dim]
setattr(self.model.data, datum_name, datum)
start += datum_dim
@overrides
def reset(self, init_state=None):
self.reset_mujoco(init_state)
self.model.forward()
self.current_com = self.model.data.com_subtree[0]
self.dcom = np.zeros_like(self.current_com)
return self.get_current_obs()
def get_current_obs(self):
return self._get_full_obs()
def _get_full_obs(self):
data = self.model.data
cdists = np.copy(self.model.geom_margin).flat
for c in self.model.data.contact:
cdists[c.geom2] = min(cdists[c.geom2], c.dist)
obs = np.concatenate([
data.qpos.flat,
data.qvel.flat,
# data.cdof.flat,
data.cinert.flat,
data.cvel.flat,
# data.cacc.flat,
data.qfrc_actuator.flat,
data.cfrc_ext.flat,
data.qfrc_constraint.flat,
cdists,
# data.qfrc_bias.flat,
# data.qfrc_passive.flat,
self.dcom.flat,
])
return obs
@property
def _state(self):
return np.concatenate([
self.model.data.qpos.flat,
self.model.data.qvel.flat
])
@property
def _full_state(self):
return np.concatenate([
self.model.data.qpos,
self.model.data.qvel,
self.model.data.qacc,
self.model.data.ctrl,
]).ravel()
def inject_action_noise(self, action):
# generate action noise
noise = self.action_noise * \
np.random.normal(size=action.shape)
# rescale the noise to make it proportional to the action bounds
lb, ub = self.action_bounds
noise = 0.5 * (ub - lb) * noise
return action + noise
def forward_dynamics(self, action):
self.model.data.ctrl = self.inject_action_noise(action)
for _ in range(self.frame_skip):
self.model.step()
self.model.forward()
new_com = self.model.data.com_subtree[0]
self.dcom = new_com - self.current_com
self.current_com = new_com
def get_viewer(self):
if self.viewer is None:
self.viewer = MjViewer()
self.viewer.start()
self.viewer.set_model(self.model)
return self.viewer
def render(self, close=False, mode='human'):
if mode == 'human':
viewer = self.get_viewer()
viewer.loop_once()
elif mode == 'rgb_array':
viewer = self.get_viewer()
viewer.loop_once()
# self.get_viewer(config=config).render()
data, width, height = self.get_viewer().get_image()
return np.fromstring(data, dtype='uint8').reshape(height, width, 3)[::-1,:,:]
if close:
self.stop_viewer()
def start_viewer(self):
viewer = self.get_viewer()
if not viewer.running:
viewer.start()
def stop_viewer(self):
if self.viewer:
self.viewer.finish()
def release(self):
# temporarily alleviate the issue (but still some leak)
from rllab.mujoco_py.mjlib import mjlib
mjlib.mj_deleteModel(self.model._wrapped)
mjlib.mj_deleteData(self.data._wrapped)
def get_body_xmat(self, body_name):
idx = self.model.body_names.index(body_name)
return self.model.data.xmat[idx].reshape((3, 3))
def get_body_com(self, body_name):
idx = self.model.body_names.index(body_name)
return self.model.data.com_subtree[idx]
def get_body_comvel(self, body_name):
idx = self.model.body_names.index(body_name)
return self.model.body_comvels[idx]
def print_stats(self):
super(MujocoEnv, self).print_stats()
print("qpos dim:\t%d" % len(self.model.data.qpos))
def action_from_key(self, key):
raise NotImplementedError
| 8,368 | 33.020325 | 89 | py |
rllab | rllab-master/rllab/envs/mujoco/swimmer3d_env.py | from .swimmer_env import SwimmerEnv
class Swimmer3DEnv(SwimmerEnv):
FILE = 'swimmer3d.xml' | 95 | 23 | 35 | py |
rllab | rllab-master/rllab/envs/mujoco/half_cheetah_env.py | import numpy as np
from rllab.core.serializable import Serializable
from rllab.envs.base import Step
from rllab.envs.mujoco.mujoco_env import MujocoEnv
from rllab.misc import logger
from rllab.misc.overrides import overrides
def smooth_abs(x, param):
return np.sqrt(np.square(x) + np.square(param)) - param
class HalfCheetahEnv(MujocoEnv, Serializable):
FILE = 'half_cheetah.xml'
def __init__(self, *args, **kwargs):
super(HalfCheetahEnv, self).__init__(*args, **kwargs)
Serializable.__init__(self, *args, **kwargs)
def get_current_obs(self):
return np.concatenate([
self.model.data.qpos.flatten()[1:],
self.model.data.qvel.flat,
self.get_body_com("torso").flat,
])
def get_body_xmat(self, body_name):
idx = self.model.body_names.index(body_name)
return self.model.data.xmat[idx].reshape((3, 3))
def get_body_com(self, body_name):
idx = self.model.body_names.index(body_name)
return self.model.data.com_subtree[idx]
def step(self, action):
self.forward_dynamics(action)
next_obs = self.get_current_obs()
action = np.clip(action, *self.action_bounds)
ctrl_cost = 1e-1 * 0.5 * np.sum(np.square(action))
run_cost = -1 * self.get_body_comvel("torso")[0]
cost = ctrl_cost + run_cost
reward = -cost
done = False
return Step(next_obs, reward, done)
@overrides
def log_diagnostics(self, paths):
progs = [
path["observations"][-1][-3] - path["observations"][0][-3]
for path in paths
]
logger.record_tabular('AverageForwardProgress', np.mean(progs))
logger.record_tabular('MaxForwardProgress', np.max(progs))
logger.record_tabular('MinForwardProgress', np.min(progs))
logger.record_tabular('StdForwardProgress', np.std(progs))
| 1,909 | 31.931034 | 71 | py |
rllab | rllab-master/rllab/envs/mujoco/swimmer_env.py | from rllab.envs.base import Step
from rllab.misc.overrides import overrides
from .mujoco_env import MujocoEnv
import numpy as np
from rllab.core.serializable import Serializable
from rllab.misc import logger
from rllab.misc import autoargs
class SwimmerEnv(MujocoEnv, Serializable):
FILE = 'swimmer.xml'
ORI_IND = 2
@autoargs.arg('ctrl_cost_coeff', type=float,
help='cost coefficient for controls')
def __init__(
self,
ctrl_cost_coeff=1e-2,
*args, **kwargs):
self.ctrl_cost_coeff = ctrl_cost_coeff
super(SwimmerEnv, self).__init__(*args, **kwargs)
Serializable.quick_init(self, locals())
def get_current_obs(self):
return np.concatenate([
self.model.data.qpos.flat,
self.model.data.qvel.flat,
self.get_body_com("torso").flat,
]).reshape(-1)
def get_ori(self):
return self.model.data.qpos[self.__class__.ORI_IND]
def step(self, action):
self.forward_dynamics(action)
next_obs = self.get_current_obs()
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * self.ctrl_cost_coeff * np.sum(
np.square(action / scaling))
forward_reward = self.get_body_comvel("torso")[0]
reward = forward_reward - ctrl_cost
done = False
return Step(next_obs, reward, done)
@overrides
def log_diagnostics(self, paths):
if len(paths) > 0:
progs = [
path["observations"][-1][-3] - path["observations"][0][-3]
for path in paths
]
logger.record_tabular('AverageForwardProgress', np.mean(progs))
logger.record_tabular('MaxForwardProgress', np.max(progs))
logger.record_tabular('MinForwardProgress', np.min(progs))
logger.record_tabular('StdForwardProgress', np.std(progs))
else:
logger.record_tabular('AverageForwardProgress', np.nan)
logger.record_tabular('MaxForwardProgress', np.nan)
logger.record_tabular('MinForwardProgress', np.nan)
logger.record_tabular('StdForwardProgress', np.nan)
| 2,213 | 34.142857 | 75 | py |
rllab | rllab-master/rllab/envs/mujoco/hopper_env.py | import numpy as np
from rllab.core.serializable import Serializable
from rllab.envs.base import Step
from rllab.envs.mujoco.mujoco_env import MujocoEnv
from rllab.misc import autoargs
from rllab.misc import logger
from rllab.misc.overrides import overrides
# states: [
# 0: z-coord,
# 1: x-coord (forward distance),
# 2: forward pitch along y-axis,
# 6: z-vel (up = +),
# 7: xvel (forward = +)
class HopperEnv(MujocoEnv, Serializable):
FILE = 'hopper.xml'
@autoargs.arg('alive_coeff', type=float,
help='reward coefficient for being alive')
@autoargs.arg('ctrl_cost_coeff', type=float,
help='cost coefficient for controls')
def __init__(
self,
alive_coeff=1,
ctrl_cost_coeff=0.01,
*args, **kwargs):
self.alive_coeff = alive_coeff
self.ctrl_cost_coeff = ctrl_cost_coeff
super(HopperEnv, self).__init__(*args, **kwargs)
Serializable.quick_init(self, locals())
@overrides
def get_current_obs(self):
return np.concatenate([
self.model.data.qpos[0:1].flat,
self.model.data.qpos[2:].flat,
np.clip(self.model.data.qvel, -10, 10).flat,
np.clip(self.model.data.qfrc_constraint, -10, 10).flat,
self.get_body_com("torso").flat,
])
@overrides
def step(self, action):
self.forward_dynamics(action)
next_obs = self.get_current_obs()
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
vel = self.get_body_comvel("torso")[0]
reward = vel + self.alive_coeff - \
0.5 * self.ctrl_cost_coeff * np.sum(np.square(action / scaling))
state = self._state
notdone = np.isfinite(state).all() and \
(np.abs(state[3:]) < 100).all() and (state[0] > .7) and \
(abs(state[2]) < .2)
done = not notdone
return Step(next_obs, reward, done)
@overrides
def log_diagnostics(self, paths):
progs = [
path["observations"][-1][-3] - path["observations"][0][-3]
for path in paths
]
logger.record_tabular('AverageForwardProgress', np.mean(progs))
logger.record_tabular('MaxForwardProgress', np.max(progs))
logger.record_tabular('MinForwardProgress', np.min(progs))
logger.record_tabular('StdForwardProgress', np.std(progs))
| 2,412 | 32.054795 | 76 | py |
rllab | rllab-master/rllab/envs/mujoco/point_env.py | from rllab.envs.base import Step
from .mujoco_env import MujocoEnv
from rllab.core.serializable import Serializable
from rllab.misc.overrides import overrides
import numpy as np
import math
from rllab.mujoco_py import glfw
class PointEnv(MujocoEnv, Serializable):
"""
Use Left, Right, Up, Down, A (steer left), D (steer right)
"""
FILE = 'point.xml'
def __init__(self, *args, **kwargs):
super(PointEnv, self).__init__(*args, **kwargs)
Serializable.quick_init(self, locals())
def step(self, action):
qpos = np.copy(self.model.data.qpos)
qpos[2, 0] += action[1]
ori = qpos[2, 0]
# compute increment in each direction
dx = math.cos(ori) * action[0]
dy = math.sin(ori) * action[0]
# ensure that the robot is within reasonable range
qpos[0, 0] = np.clip(qpos[0, 0] + dx, -7, 7)
qpos[1, 0] = np.clip(qpos[1, 0] + dy, -7, 7)
self.model.data.qpos = qpos
self.model.forward()
next_obs = self.get_current_obs()
return Step(next_obs, 0, False)
def get_xy(self):
qpos = self.model.data.qpos
return qpos[0, 0], qpos[1, 0]
def set_xy(self, xy):
qpos = np.copy(self.model.data.qpos)
qpos[0, 0] = xy[0]
qpos[1, 0] = xy[1]
self.model.data.qpos = qpos
self.model.forward()
@overrides
def action_from_key(self, key):
lb, ub = self.action_bounds
if key == glfw.KEY_LEFT:
return np.array([0, ub[0]*0.3])
elif key == glfw.KEY_RIGHT:
return np.array([0, lb[0]*0.3])
elif key == glfw.KEY_UP:
return np.array([ub[1], 0])
elif key == glfw.KEY_DOWN:
return np.array([lb[1], 0])
else:
return np.array([0, 0])
| 1,815 | 28.290323 | 62 | py |
rllab | rllab-master/rllab/envs/mujoco/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/rllab/envs/mujoco/humanoid_env.py | from .simple_humanoid_env import SimpleHumanoidEnv
# Taken from Wojciech's code
class HumanoidEnv(SimpleHumanoidEnv):
FILE = 'humanoid.xml'
| 147 | 17.5 | 50 | py |
rllab | rllab-master/rllab/envs/mujoco/walker2d_env.py | import numpy as np
from rllab.core.serializable import Serializable
from rllab.envs.base import Step
from rllab.envs.mujoco.mujoco_env import MujocoEnv
from rllab.misc import autoargs
from rllab.misc import logger
from rllab.misc.overrides import overrides
def smooth_abs(x, param):
return np.sqrt(np.square(x) + np.square(param)) - param
class Walker2DEnv(MujocoEnv, Serializable):
FILE = 'walker2d.xml'
@autoargs.arg('ctrl_cost_coeff', type=float,
help='cost coefficient for controls')
def __init__(
self,
ctrl_cost_coeff=1e-2,
*args, **kwargs):
self.ctrl_cost_coeff = ctrl_cost_coeff
super(Walker2DEnv, self).__init__(*args, **kwargs)
Serializable.quick_init(self, locals())
def get_current_obs(self):
return np.concatenate([
self.model.data.qpos.flat,
self.model.data.qvel.flat,
self.get_body_com("torso").flat,
])
def step(self, action):
self.forward_dynamics(action)
next_obs = self.get_current_obs()
action = np.clip(action, *self.action_bounds)
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * self.ctrl_cost_coeff * \
np.sum(np.square(action / scaling))
forward_reward = self.get_body_comvel("torso")[0]
reward = forward_reward - ctrl_cost
qpos = self.model.data.qpos
done = not (qpos[0] > 0.8 and qpos[0] < 2.0
and qpos[2] > -1.0 and qpos[2] < 1.0)
return Step(next_obs, reward, done)
@overrides
def log_diagnostics(self, paths):
progs = [
path["observations"][-1][-3] - path["observations"][0][-3]
for path in paths
]
logger.record_tabular('AverageForwardProgress', np.mean(progs))
logger.record_tabular('MaxForwardProgress', np.max(progs))
logger.record_tabular('MinForwardProgress', np.min(progs))
logger.record_tabular('StdForwardProgress', np.std(progs))
| 2,058 | 32.209677 | 71 | py |
rllab | rllab-master/rllab/envs/mujoco/gather/point_gather_env.py | from rllab.envs.mujoco.gather.gather_env import GatherEnv
from rllab.envs.mujoco.point_env import PointEnv
class PointGatherEnv(GatherEnv):
MODEL_CLASS = PointEnv
ORI_IND = 2
| 186 | 19.777778 | 57 | py |
rllab | rllab-master/rllab/envs/mujoco/gather/swimmer_gather_env.py | from rllab.envs.mujoco.gather.gather_env import GatherEnv
from rllab.envs.mujoco.swimmer_env import SwimmerEnv
class SwimmerGatherEnv(GatherEnv):
MODEL_CLASS = SwimmerEnv
ORI_IND = 2
| 194 | 20.666667 | 57 | py |
rllab | rllab-master/rllab/envs/mujoco/gather/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/rllab/envs/mujoco/gather/gather_env.py | import math
import os.path as osp
import tempfile
import xml.etree.ElementTree as ET
from ctypes import byref
import numpy as np
from rllab.misc import logger
from rllab import spaces
from rllab.core.serializable import Serializable
from rllab.envs.proxy_env import ProxyEnv
from rllab.envs.base import Step
from rllab.envs.mujoco.gather.embedded_viewer import EmbeddedViewer
from rllab.envs.mujoco.mujoco_env import MODEL_DIR, BIG
from rllab.misc import autoargs
from rllab.misc.overrides import overrides
from rllab.mujoco_py import MjViewer, MjModel, mjcore, mjlib, \
mjextra, glfw
APPLE = 0
BOMB = 1
class GatherViewer(MjViewer):
def __init__(self, env):
self.env = env
super(GatherViewer, self).__init__()
green_ball_model = MjModel(osp.abspath(
osp.join(
MODEL_DIR, 'green_ball.xml'
)
))
self.green_ball_renderer = EmbeddedViewer()
self.green_ball_model = green_ball_model
self.green_ball_renderer.set_model(green_ball_model)
red_ball_model = MjModel(osp.abspath(
osp.join(
MODEL_DIR, 'red_ball.xml'
)
))
self.red_ball_renderer = EmbeddedViewer()
self.red_ball_model = red_ball_model
self.red_ball_renderer.set_model(red_ball_model)
def start(self):
super(GatherViewer, self).start()
self.green_ball_renderer.start(self.window)
self.red_ball_renderer.start(self.window)
def handle_mouse_move(self, window, xpos, ypos):
super(GatherViewer, self).handle_mouse_move(window, xpos, ypos)
self.green_ball_renderer.handle_mouse_move(window, xpos, ypos)
self.red_ball_renderer.handle_mouse_move(window, xpos, ypos)
def handle_scroll(self, window, x_offset, y_offset):
super(GatherViewer, self).handle_scroll(window, x_offset, y_offset)
self.green_ball_renderer.handle_scroll(window, x_offset, y_offset)
self.red_ball_renderer.handle_scroll(window, x_offset, y_offset)
def render(self):
super(GatherViewer, self).render()
tmpobjects = mjcore.MJVOBJECTS()
mjlib.mjlib.mjv_makeObjects(byref(tmpobjects), 1000)
for obj in self.env.objects:
x, y, typ = obj
# print x, y
qpos = np.zeros_like(self.green_ball_model.data.qpos)
qpos[0, 0] = x
qpos[1, 0] = y
if typ == APPLE:
self.green_ball_model.data.qpos = qpos
self.green_ball_model.forward()
self.green_ball_renderer.render()
mjextra.append_objects(
tmpobjects, self.green_ball_renderer.objects)
else:
self.red_ball_model.data.qpos = qpos
self.red_ball_model.forward()
self.red_ball_renderer.render()
mjextra.append_objects(
tmpobjects, self.red_ball_renderer.objects)
mjextra.append_objects(tmpobjects, self.objects)
mjlib.mjlib.mjv_makeLights(
self.model.ptr, self.data.ptr, byref(tmpobjects))
mjlib.mjlib.mjr_render(0, self.get_rect(), byref(tmpobjects), byref(
self.ropt), byref(self.cam.pose), byref(self.con))
try:
import OpenGL.GL as GL
except:
return
def draw_rect(x, y, width, height):
# start drawing a rectangle
GL.glBegin(GL.GL_QUADS)
# bottom left point
GL.glVertex2f(x, y)
# bottom right point
GL.glVertex2f(x + width, y)
# top right point
GL.glVertex2f(x + width, y + height)
# top left point
GL.glVertex2f(x, y + height)
GL.glEnd()
def refresh2d(width, height):
GL.glViewport(0, 0, width, height)
GL.glMatrixMode(GL.GL_PROJECTION)
GL.glLoadIdentity()
GL.glOrtho(0.0, width, 0.0, height, 0.0, 1.0)
GL.glMatrixMode(GL.GL_MODELVIEW)
GL.glLoadIdentity()
GL.glLoadIdentity()
width, height = glfw.get_framebuffer_size(self.window)
refresh2d(width, height)
GL.glDisable(GL.GL_LIGHTING)
GL.glEnable(GL.GL_BLEND)
GL.glColor4f(0.0, 0.0, 0.0, 0.8)
draw_rect(10, 10, 300, 100)
apple_readings, bomb_readings = self.env.get_readings()
for idx, reading in enumerate(apple_readings):
if reading > 0:
GL.glColor4f(0.0, 1.0, 0.0, reading)
draw_rect(20 * (idx + 1), 10, 5, 50)
for idx, reading in enumerate(bomb_readings):
if reading > 0:
GL.glColor4f(1.0, 0.0, 0.0, reading)
draw_rect(20 * (idx + 1), 60, 5, 50)
class GatherEnv(ProxyEnv, Serializable):
MODEL_CLASS = None
ORI_IND = None
@autoargs.arg('n_apples', type=int,
help='Number of apples in each episode')
@autoargs.arg('n_bombs', type=int,
help='Number of bombs in each episode')
@autoargs.arg('activity_range', type=float,
help='The span for generating objects '
'(x, y in [-range, range])')
@autoargs.arg('robot_object_spacing', type=float,
help='Number of objects in each episode')
@autoargs.arg('catch_range', type=float,
help='Minimum distance range to catch an object')
@autoargs.arg('n_bins', type=float,
help='Number of objects in each episode')
@autoargs.arg('sensor_range', type=float,
help='Maximum sensor range (how far it can go)')
@autoargs.arg('sensor_span', type=float,
help='Maximum sensor span (how wide it can span), in '
'radians')
def __init__(
self,
n_apples=8,
n_bombs=8,
activity_range=6.,
robot_object_spacing=2.,
catch_range=1.,
n_bins=10,
sensor_range=6.,
sensor_span=math.pi,
coef_inner_rew=0.,
dying_cost=-10,
*args, **kwargs
):
Serializable.quick_init(self, locals())
self.n_apples = n_apples
self.n_bombs = n_bombs
self.activity_range = activity_range
self.robot_object_spacing = robot_object_spacing
self.catch_range = catch_range
self.n_bins = n_bins
self.sensor_range = sensor_range
self.sensor_span = sensor_span
self.coef_inner_rew = coef_inner_rew
self.dying_cost = dying_cost
self.objects = []
self.viewer = None
# super(GatherEnv, self).__init__(*args, **kwargs)
model_cls = self.__class__.MODEL_CLASS
if model_cls is None:
raise "MODEL_CLASS unspecified!"
xml_path = osp.join(MODEL_DIR, model_cls.FILE)
tree = ET.parse(xml_path)
worldbody = tree.find(".//worldbody")
attrs = dict(
type="box", conaffinity="1", rgba="0.8 0.9 0.8 1", condim="3"
)
walldist = self.activity_range + 1
ET.SubElement(
worldbody, "geom", dict(
attrs,
name="wall1",
pos="0 -%d 0" % walldist,
size="%d.5 0.5 1" % walldist))
ET.SubElement(
worldbody, "geom", dict(
attrs,
name="wall2",
pos="0 %d 0" % walldist,
size="%d.5 0.5 1" % walldist))
ET.SubElement(
worldbody, "geom", dict(
attrs,
name="wall3",
pos="-%d 0 0" % walldist,
size="0.5 %d.5 1" % walldist))
ET.SubElement(
worldbody, "geom", dict(
attrs,
name="wall4",
pos="%d 0 0" % walldist,
size="0.5 %d.5 1" % walldist))
_, file_path = tempfile.mkstemp(text=True)
tree.write(file_path)
# pylint: disable=not-callable
inner_env = model_cls(*args, file_path=file_path, **kwargs)
# pylint: enable=not-callable
ProxyEnv.__init__(self, inner_env) # to access the inner env, do self.wrapped_env
def reset(self, also_wrapped=True):
self.objects = []
existing = set()
while len(self.objects) < self.n_apples:
x = np.random.randint(-self.activity_range / 2,
self.activity_range / 2) * 2
y = np.random.randint(-self.activity_range / 2,
self.activity_range / 2) * 2
# regenerate, since it is too close to the robot's initial position
if x ** 2 + y ** 2 < self.robot_object_spacing ** 2:
continue
if (x, y) in existing:
continue
typ = APPLE
self.objects.append((x, y, typ))
existing.add((x, y))
while len(self.objects) < self.n_apples + self.n_bombs:
x = np.random.randint(-self.activity_range / 2,
self.activity_range / 2) * 2
y = np.random.randint(-self.activity_range / 2,
self.activity_range / 2) * 2
# regenerate, since it is too close to the robot's initial position
if x ** 2 + y ** 2 < self.robot_object_spacing ** 2:
continue
if (x, y) in existing:
continue
typ = BOMB
self.objects.append((x, y, typ))
existing.add((x, y))
if also_wrapped:
self.wrapped_env.reset()
return self.get_current_obs()
def step(self, action):
_, inner_rew, done, info = self.wrapped_env.step(action)
info['inner_rew'] = inner_rew
info['outer_rew'] = 0
if done:
return Step(self.get_current_obs(), self.dying_cost, done, **info) # give a -10 rew if the robot dies
com = self.wrapped_env.get_body_com("torso")
x, y = com[:2]
reward = self.coef_inner_rew * inner_rew
new_objs = []
for obj in self.objects:
ox, oy, typ = obj
# object within zone!
if (ox - x) ** 2 + (oy - y) ** 2 < self.catch_range ** 2:
if typ == APPLE:
reward = reward + 1
info['outer_rew'] = 1
else:
reward = reward - 1
info['outer_rew'] = -1
else:
new_objs.append(obj)
self.objects = new_objs
done = len(self.objects) == 0
return Step(self.get_current_obs(), reward, done, **info)
def get_readings(self): # equivalent to get_current_maze_obs in maze_env.py
# compute sensor readings
# first, obtain current orientation
apple_readings = np.zeros(self.n_bins)
bomb_readings = np.zeros(self.n_bins)
robot_x, robot_y = self.wrapped_env.get_body_com("torso")[:2]
# sort objects by distance to the robot, so that farther objects'
# signals will be occluded by the closer ones'
sorted_objects = sorted(
self.objects, key=lambda o:
(o[0] - robot_x) ** 2 + (o[1] - robot_y) ** 2)[::-1]
# fill the readings
bin_res = self.sensor_span / self.n_bins
ori = self.get_ori() # overwrite this for Ant!
for ox, oy, typ in sorted_objects:
# compute distance between object and robot
dist = ((oy - robot_y) ** 2 + (ox - robot_x) ** 2) ** 0.5
# only include readings for objects within range
if dist > self.sensor_range:
continue
angle = math.atan2(oy - robot_y, ox - robot_x) - ori
if math.isnan(angle):
import ipdb; ipdb.set_trace()
angle = angle % (2 * math.pi)
if angle > math.pi:
angle = angle - 2 * math.pi
if angle < -math.pi:
angle = angle + 2 * math.pi
# outside of sensor span - skip this
half_span = self.sensor_span * 0.5
if abs(angle) > half_span:
continue
bin_number = int((angle + half_span) / bin_res)
intensity = 1.0 - dist / self.sensor_range
if typ == APPLE:
apple_readings[bin_number] = intensity
else:
bomb_readings[bin_number] = intensity
return apple_readings, bomb_readings
def get_current_robot_obs(self):
return self.wrapped_env.get_current_obs()
def get_current_obs(self):
# return sensor data along with data about itself
self_obs = self.wrapped_env.get_current_obs()
apple_readings, bomb_readings = self.get_readings()
return np.concatenate([self_obs, apple_readings, bomb_readings])
@property
@overrides
def observation_space(self):
shp = self.get_current_obs().shape
ub = BIG * np.ones(shp)
return spaces.Box(ub * -1, ub)
# space of only the robot observations (they go first in the get current obs)
@property
def robot_observation_space(self):
shp = self.get_current_robot_obs().shape
ub = BIG * np.ones(shp)
return spaces.Box(ub * -1, ub)
@property
def maze_observation_space(self):
shp = np.concatenate(self.get_readings()).shape
ub = BIG * np.ones(shp)
return spaces.Box(ub * -1, ub)
@property
@overrides
def action_space(self):
return self.wrapped_env.action_space
@property
def action_bounds(self):
return self.wrapped_env.action_bounds
# @property
# def viewer(self):
# return self.wrapped_env.viewer
def action_from_key(self, key):
return self.wrapped_env.action_from_key(key)
def get_viewer(self):
if self.wrapped_env.viewer is None:
self.wrapped_env.viewer = GatherViewer(self)
self.wrapped_env.viewer.start()
self.wrapped_env.viewer.set_model(self.wrapped_env.model)
return self.wrapped_env.viewer
def stop_viewer(self):
if self.wrapped_env.viewer:
self.wrapped_env.viewer.finish()
def render(self, mode='human', close=False):
if mode == 'rgb_array':
self.get_viewer().render()
data, width, height = self.get_viewer().get_image()
return np.fromstring(data, dtype='uint8').reshape(height, width, 3)[::-1,:,:]
elif mode == 'human':
self.get_viewer()
self.wrapped_env.render()
if close:
self.stop_viewer()
def get_ori(self):
"""
First it tries to use a get_ori from the wrapped env. If not successfull, falls
back to the default based on the ORI_IND specified in Maze (not accurate for quaternions)
"""
obj = self.wrapped_env
while not hasattr(obj, 'get_ori') and hasattr(obj, 'wrapped_env'):
obj = obj.wrapped_env
try:
return obj.get_ori()
except (NotImplementedError, AttributeError) as e:
pass
return self.wrapped_env.model.data.qpos[self.__class__.ORI_IND]
@overrides
def log_diagnostics(self, paths, log_prefix='Gather', *args, **kwargs):
# we call here any logging related to the gather, strip the maze obs and call log_diag with the stripped paths
# we need to log the purely gather reward!!
with logger.tabular_prefix(log_prefix + '_'):
gather_undiscounted_returns = [sum(path['env_infos']['outer_rew']) for path in paths]
logger.record_tabular_misc_stat('Return', gather_undiscounted_returns, placement='front')
stripped_paths = []
for path in paths:
stripped_path = {}
for k, v in path.items():
stripped_path[k] = v
stripped_path['observations'] = \
stripped_path['observations'][:, :self.wrapped_env.observation_space.flat_dim]
# this breaks if the obs of the robot are d>1 dimensional (not a vector)
stripped_paths.append(stripped_path)
with logger.tabular_prefix('wrapped_'):
if 'env_infos' in paths[0].keys() and 'inner_rew' in paths[0]['env_infos'].keys():
wrapped_undiscounted_return = np.mean([np.sum(path['env_infos']['inner_rew']) for path in paths])
logger.record_tabular('AverageReturn', wrapped_undiscounted_return)
self.wrapped_env.log_diagnostics(stripped_paths) # see swimmer_env.py for a scketch of the maze plotting!
| 16,731 | 37.731481 | 118 | py |
rllab | rllab-master/rllab/envs/mujoco/gather/embedded_viewer.py | from rllab.mujoco_py import glfw, mjcore
import rllab.mujoco_py.mjconstants as C
from rllab.mujoco_py.mjlib import mjlib
from ctypes import byref
import ctypes
from threading import Lock
mjCAT_ALL = 7
class EmbeddedViewer(object):
def __init__(self):
self.last_render_time = 0
self.objects = mjcore.MJVOBJECTS()
self.cam = mjcore.MJVCAMERA()
self.vopt = mjcore.MJVOPTION()
self.ropt = mjcore.MJROPTION()
self.con = mjcore.MJRCONTEXT()
self.running = False
self.speedtype = 1
self.window = None
self.model = None
self.gui_lock = Lock()
self.last_button = 0
self.last_click_time = 0
self.button_left_pressed = False
self.button_middle_pressed = False
self.button_right_pressed = False
self.last_mouse_x = 0
self.last_mouse_y = 0
self.frames = []
def set_model(self, model):
self.model = model
if model:
self.data = model.data
else:
self.data = None
if self.running:
if model:
mjlib.mjr_makeContext(model.ptr, byref(self.con), 150)
else:
mjlib.mjr_makeContext(None, byref(self.con), 150)
self.render()
if model:
self.autoscale()
def autoscale(self):
self.cam.lookat[0] = self.model.stat.center[0]
self.cam.lookat[1] = self.model.stat.center[1]
self.cam.lookat[2] = self.model.stat.center[2]
self.cam.distance = 1.0 * self.model.stat.extent
self.cam.camid = -1
self.cam.trackbodyid = -1
if self.window:
width, height = glfw.get_framebuffer_size(self.window)
mjlib.mjv_updateCameraPose(byref(self.cam), width * 1.0 / height)
def get_rect(self):
rect = mjcore.MJRRECT(0, 0, 0, 0)
rect.width, rect.height = glfw.get_framebuffer_size(self.window)
return rect
def record_frame(self, **kwargs):
self.frames.append({'pos': self.model.data.qpos, 'extra': kwargs})
def clear_frames(self):
self.frames = []
def render(self):
rect = self.get_rect()
arr = (ctypes.c_double * 3)(0, 0, 0)
mjlib.mjv_makeGeoms(
self.model.ptr, self.data.ptr, byref(self.objects),
byref(self.vopt), mjCAT_ALL, 0, None, None,
ctypes.cast(arr, ctypes.POINTER(ctypes.c_double)))
mjlib.mjv_setCamera(self.model.ptr, self.data.ptr, byref(self.cam))
mjlib.mjv_updateCameraPose(
byref(self.cam), rect.width * 1.0 / rect.height)
mjlib.mjr_render(0, rect, byref(self.objects), byref(
self.ropt), byref(self.cam.pose), byref(self.con))
def render_internal(self):
if not self.data:
return
self.gui_lock.acquire()
self.render()
self.gui_lock.release()
def start(self, window):
self.running = True
width, height = glfw.get_framebuffer_size(window)
width1, height = glfw.get_window_size(window)
self.scale = width * 1.0 / width1
self.window = window
mjlib.mjv_makeObjects(byref(self.objects), 1000)
mjlib.mjv_defaultCamera(byref(self.cam))
mjlib.mjv_defaultOption(byref(self.vopt))
mjlib.mjr_defaultOption(byref(self.ropt))
mjlib.mjr_defaultContext(byref(self.con))
if self.model:
mjlib.mjr_makeContext(self.model.ptr, byref(self.con), 150)
self.autoscale()
else:
mjlib.mjr_makeContext(None, byref(self.con), 150)
def handle_mouse_move(self, window, xpos, ypos):
# no buttons down: nothing to do
if not self.button_left_pressed \
and not self.button_middle_pressed \
and not self.button_right_pressed:
return
# compute mouse displacement, save
dx = int(self.scale * xpos) - self.last_mouse_x
dy = int(self.scale * ypos) - self.last_mouse_y
self.last_mouse_x = int(self.scale * xpos)
self.last_mouse_y = int(self.scale * ypos)
# require model
if not self.model:
return
# get current window size
width, height = glfw.get_framebuffer_size(self.window)
# get shift key state
mod_shift = glfw.get_key(window, glfw.KEY_LEFT_SHIFT) == glfw.PRESS \
or glfw.get_key(window, glfw.KEY_RIGHT_SHIFT) == glfw.PRESS
# determine action based on mouse button
action = None
if self.button_right_pressed:
action = C.MOUSE_MOVE_H if mod_shift else C.MOUSE_MOVE_V
elif self.button_left_pressed:
action = C.MOUSE_ROTATE_H if mod_shift else C.MOUSE_ROTATE_V
else:
action = C.MOUSE_ZOOM
self.gui_lock.acquire()
mjlib.mjv_moveCamera(action, dx, dy, byref(self.cam), width, height)
self.gui_lock.release()
def handle_mouse_button(self, window, button, act, mods):
# update button state
self.button_left_pressed = \
glfw.get_mouse_button(window, glfw.MOUSE_BUTTON_LEFT) == glfw.PRESS
self.button_middle_pressed = \
glfw.get_mouse_button(
window, glfw.MOUSE_BUTTON_MIDDLE) == glfw.PRESS
self.button_right_pressed = \
glfw.get_mouse_button(
window, glfw.MOUSE_BUTTON_RIGHT) == glfw.PRESS
# update mouse position
x, y = glfw.get_cursor_pos(window)
self.last_mouse_x = int(self.scale * x)
self.last_mouse_y = int(self.scale * y)
if not self.model:
return
self.gui_lock.acquire()
# save info
if act == glfw.PRESS:
self.last_button = button
self.last_click_time = glfw.get_time()
self.gui_lock.release()
def handle_scroll(self, window, x_offset, y_offset):
# require model
if not self.model:
return
# get current window size
width, height = glfw.get_framebuffer_size(window)
# scroll
self.gui_lock.acquire()
mjlib.mjv_moveCamera(C.MOUSE_ZOOM, 0, (-20 * y_offset),
byref(self.cam), width, height)
self.gui_lock.release()
def should_stop(self):
return glfw.window_should_close(self.window)
def loop_once(self):
self.render()
# Swap front and back buffers
glfw.swap_buffers(self.window)
# Poll for and process events
glfw.poll_events()
def finish(self):
glfw.terminate()
mjlib.mjr_freeContext(byref(self.con))
mjlib.mjv_freeObjects(byref(self.objects))
self.running = False
| 6,754 | 30.713615 | 79 | py |
rllab | rllab-master/rllab/envs/mujoco/gather/ant_gather_env.py | from rllab.envs.mujoco.gather.gather_env import GatherEnv
from rllab.envs.mujoco.ant_env import AntEnv
class AntGatherEnv(GatherEnv):
MODEL_CLASS = AntEnv
ORI_IND = 6
| 178 | 18.888889 | 57 | py |
rllab | rllab-master/rllab/envs/mujoco/maze/point_maze_env.py | from rllab.envs.mujoco.maze.maze_env import MazeEnv
from rllab.envs.mujoco.point_env import PointEnv
class PointMazeEnv(MazeEnv):
MODEL_CLASS = PointEnv
ORI_IND = 2
MAZE_HEIGHT = 2
MAZE_SIZE_SCALING = 3.0
MANUAL_COLLISION = True
| 254 | 17.214286 | 51 | py |
rllab | rllab-master/rllab/envs/mujoco/maze/maze_env.py | import os.path as osp
import tempfile
import xml.etree.ElementTree as ET
import math
import numpy as np
from rllab import spaces
from rllab.envs.base import Step
from rllab.envs.proxy_env import ProxyEnv
from rllab.envs.mujoco.maze.maze_env_utils import construct_maze
from rllab.envs.mujoco.mujoco_env import MODEL_DIR, BIG
from rllab.envs.mujoco.maze.maze_env_utils import ray_segment_intersect, point_distance
from rllab.core.serializable import Serializable
from rllab.misc.overrides import overrides
from rllab.misc import logger
class MazeEnv(ProxyEnv, Serializable):
MODEL_CLASS = None
ORI_IND = None
MAZE_HEIGHT = None
MAZE_SIZE_SCALING = None
MAZE_MAKE_CONTACTS = False
MAZE_STRUCTURE = [
[1, 1, 1, 1, 1],
[1, 'r', 0, 0, 1],
[1, 1, 1, 0, 1],
[1, 'g', 0, 0, 1],
[1, 1, 1, 1, 1],
]
MANUAL_COLLISION = False
def __init__(
self,
n_bins=20,
sensor_range=10.,
sensor_span=math.pi,
maze_id=0,
length=1,
maze_height=0.5,
maze_size_scaling=2,
coef_inner_rew=0., # a coef of 0 gives no reward to the maze from the wrapped env.
goal_rew=1., # reward obtained when reaching the goal
*args,
**kwargs):
Serializable.quick_init(self, locals())
Serializable.quick_init(self, locals())
self._n_bins = n_bins
self._sensor_range = sensor_range
self._sensor_span = sensor_span
self._maze_id = maze_id
self.length = length
self.coef_inner_rew = coef_inner_rew
self.goal_rew = goal_rew
model_cls = self.__class__.MODEL_CLASS
if model_cls is None:
raise "MODEL_CLASS unspecified!"
xml_path = osp.join(MODEL_DIR, model_cls.FILE)
tree = ET.parse(xml_path)
worldbody = tree.find(".//worldbody")
self.MAZE_HEIGHT = height = maze_height
self.MAZE_SIZE_SCALING = size_scaling = maze_size_scaling
self.MAZE_STRUCTURE = structure = construct_maze(maze_id=self._maze_id, length=self.length)
torso_x, torso_y = self._find_robot()
self._init_torso_x = torso_x
self._init_torso_y = torso_y
for i in range(len(structure)):
for j in range(len(structure[0])):
if str(structure[i][j]) == '1':
# offset all coordinates so that robot starts at the origin
ET.SubElement(
worldbody, "geom",
name="block_%d_%d" % (i, j),
pos="%f %f %f" % (j * size_scaling - torso_x,
i * size_scaling - torso_y,
height / 2 * size_scaling),
size="%f %f %f" % (0.5 * size_scaling,
0.5 * size_scaling,
height / 2 * size_scaling),
type="box",
material="",
contype="1",
conaffinity="1",
rgba="0.4 0.4 0.4 1"
)
torso = tree.find(".//body[@name='torso']")
geoms = torso.findall(".//geom")
for geom in geoms:
if 'name' not in geom.attrib:
raise Exception("Every geom of the torso must have a name "
"defined")
if self.__class__.MAZE_MAKE_CONTACTS:
contact = ET.SubElement(
tree.find("."), "contact"
)
for i in range(len(structure)):
for j in range(len(structure[0])):
if str(structure[i][j]) == '1':
for geom in geoms:
ET.SubElement(
contact, "pair",
geom1=geom.attrib["name"],
geom2="block_%d_%d" % (i, j)
)
_, file_path = tempfile.mkstemp(text=True)
tree.write(file_path) # here we write a temporal file with the robot specifications. Why not the original one??
self._goal_range = self._find_goal_range()
self._cached_segments = None
inner_env = model_cls(*args, file_path=file_path, **kwargs) # file to the robot specifications
ProxyEnv.__init__(self, inner_env) # here is where the robot env will be initialized
def get_current_maze_obs(self):
# The observation would include both information about the robot itself as well as the sensors around its
# environment
robot_x, robot_y = self.wrapped_env.get_body_com("torso")[:2]
ori = self.get_ori()
structure = self.MAZE_STRUCTURE
size_scaling = self.MAZE_SIZE_SCALING
segments = []
# compute the distance of all segments
# Get all line segments of the goal and the obstacles
for i in range(len(structure)):
for j in range(len(structure[0])):
if structure[i][j] == 1 or structure[i][j] == 'g':
cx = j * size_scaling - self._init_torso_x
cy = i * size_scaling - self._init_torso_y
x1 = cx - 0.5 * size_scaling
x2 = cx + 0.5 * size_scaling
y1 = cy - 0.5 * size_scaling
y2 = cy + 0.5 * size_scaling
struct_segments = [
((x1, y1), (x2, y1)),
((x2, y1), (x2, y2)),
((x2, y2), (x1, y2)),
((x1, y2), (x1, y1)),
]
for seg in struct_segments:
segments.append(dict(
segment=seg,
type=structure[i][j],
))
wall_readings = np.zeros(self._n_bins)
goal_readings = np.zeros(self._n_bins)
for ray_idx in range(self._n_bins):
ray_ori = ori - self._sensor_span * 0.5 + 1.0 * (2 * ray_idx + 1) / (2 * self._n_bins) * self._sensor_span
ray_segments = []
for seg in segments:
p = ray_segment_intersect(ray=((robot_x, robot_y), ray_ori), segment=seg["segment"])
if p is not None:
ray_segments.append(dict(
segment=seg["segment"],
type=seg["type"],
ray_ori=ray_ori,
distance=point_distance(p, (robot_x, robot_y)),
))
if len(ray_segments) > 0:
first_seg = sorted(ray_segments, key=lambda x: x["distance"])[0]
# print first_seg
if first_seg["type"] == 1:
# Wall -> add to wall readings
if first_seg["distance"] <= self._sensor_range:
wall_readings[ray_idx] = (self._sensor_range - first_seg["distance"]) / self._sensor_range
elif first_seg["type"] == 'g':
# Goal -> add to goal readings
if first_seg["distance"] <= self._sensor_range:
goal_readings[ray_idx] = (self._sensor_range - first_seg["distance"]) / self._sensor_range
else:
assert False
obs = np.concatenate([
wall_readings,
goal_readings
])
return obs
def get_current_robot_obs(self):
return self.wrapped_env.get_current_obs()
def get_current_obs(self):
return np.concatenate([self.wrapped_env.get_current_obs(),
self.get_current_maze_obs()
])
def get_ori(self):
"""
First it tries to use a get_ori from the wrapped env. If not successfull, falls
back to the default based on the ORI_IND specified in Maze (not accurate for quaternions)
"""
obj = self.wrapped_env
while not hasattr(obj, 'get_ori') and hasattr(obj, 'wrapped_env'):
obj = obj.wrapped_env
try:
return obj.get_ori()
except (NotImplementedError, AttributeError) as e:
pass
return self.wrapped_env.model.data.qpos[self.__class__.ORI_IND]
def reset(self):
self.wrapped_env.reset()
return self.get_current_obs()
@property
def viewer(self):
return self.wrapped_env.viewer
@property
@overrides
def observation_space(self):
shp = self.get_current_obs().shape
ub = BIG * np.ones(shp)
return spaces.Box(ub * -1, ub)
# space of only the robot observations (they go first in the get current obs) THIS COULD GO IN PROXYENV
@property
def robot_observation_space(self):
shp = self.get_current_robot_obs().shape
ub = BIG * np.ones(shp)
return spaces.Box(ub * -1, ub)
@property
def maze_observation_space(self):
shp = self.get_current_maze_obs().shape
ub = BIG * np.ones(shp)
return spaces.Box(ub * -1, ub)
def _find_robot(self):
structure = self.MAZE_STRUCTURE
size_scaling = self.MAZE_SIZE_SCALING
for i in range(len(structure)):
for j in range(len(structure[0])):
if structure[i][j] == 'r':
return j * size_scaling, i * size_scaling
assert False
def _find_goal_range(self): # this only finds one goal!
structure = self.MAZE_STRUCTURE
size_scaling = self.MAZE_SIZE_SCALING
for i in range(len(structure)):
for j in range(len(structure[0])):
if structure[i][j] == 'g':
minx = j * size_scaling - size_scaling * 0.5 - self._init_torso_x
maxx = j * size_scaling + size_scaling * 0.5 - self._init_torso_x
miny = i * size_scaling - size_scaling * 0.5 - self._init_torso_y
maxy = i * size_scaling + size_scaling * 0.5 - self._init_torso_y
return minx, maxx, miny, maxy
def _is_in_collision(self, pos):
x, y = pos
structure = self.MAZE_STRUCTURE
size_scaling = self.MAZE_SIZE_SCALING
for i in range(len(structure)):
for j in range(len(structure[0])):
if structure[i][j] == 1:
minx = j * size_scaling - size_scaling * 0.5 - self._init_torso_x
maxx = j * size_scaling + size_scaling * 0.5 - self._init_torso_x
miny = i * size_scaling - size_scaling * 0.5 - self._init_torso_y
maxy = i * size_scaling + size_scaling * 0.5 - self._init_torso_y
if minx <= x <= maxx and miny <= y <= maxy:
return True
return False
def step(self, action):
if self.MANUAL_COLLISION:
old_pos = self.wrapped_env.get_xy()
inner_next_obs, inner_rew, done, info = self.wrapped_env.step(action)
new_pos = self.wrapped_env.get_xy()
if self._is_in_collision(new_pos):
self.wrapped_env.set_xy(old_pos)
done = False
else:
inner_next_obs, inner_rew, done, info = self.wrapped_env.step(action)
next_obs = self.get_current_obs()
x, y = self.wrapped_env.get_body_com("torso")[:2]
# ref_x = x + self._init_torso_x
# ref_y = y + self._init_torso_y
info['outer_rew'] = 0
info['inner_rew'] = inner_rew
reward = self.coef_inner_rew * inner_rew
minx, maxx, miny, maxy = self._goal_range
if minx <= x <= maxx and miny <= y <= maxy:
done = True
reward += self.goal_rew
info['rew_rew'] = 1 # we keep here the original one, so that the AvgReturn is directly the freq of success
return Step(next_obs, reward, done, **info)
def action_from_key(self, key):
return self.wrapped_env.action_from_key(key)
@overrides
def log_diagnostics(self, paths, *args, **kwargs):
# we call here any logging related to the maze, strip the maze obs and call log_diag with the stripped paths
# we need to log the purely gather reward!!
with logger.tabular_prefix('Maze_'):
gather_undiscounted_returns = [sum(path['env_infos']['outer_rew']) for path in paths]
logger.record_tabular_misc_stat('Return', gather_undiscounted_returns, placement='front')
stripped_paths = []
for path in paths:
stripped_path = {}
for k, v in path.items():
stripped_path[k] = v
stripped_path['observations'] = \
stripped_path['observations'][:, :self.wrapped_env.observation_space.flat_dim]
# this breaks if the obs of the robot are d>1 dimensional (not a vector)
stripped_paths.append(stripped_path)
with logger.tabular_prefix('wrapped_'):
wrapped_undiscounted_return = np.mean([np.sum(path['env_infos']['inner_rew']) for path in paths])
logger.record_tabular('AverageReturn', wrapped_undiscounted_return)
self.wrapped_env.log_diagnostics(stripped_paths, *args, **kwargs)
| 13,398 | 39.975535 | 120 | py |
rllab | rllab-master/rllab/envs/mujoco/maze/maze_env_utils.py | from rllab.misc import logger
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import os.path as osp
import numpy as np
import math
def line_intersect(pt1, pt2, ptA, ptB):
"""
Taken from https://www.cs.hmc.edu/ACM/lectures/intersections.html
this returns the intersection of Line(pt1,pt2) and Line(ptA,ptB)
returns a tuple: (xi, yi, valid, r, s), where
(xi, yi) is the intersection
r is the scalar multiple such that (xi,yi) = pt1 + r*(pt2-pt1)
s is the scalar multiple such that (xi,yi) = pt1 + s*(ptB-ptA)
valid == 0 if there are 0 or inf. intersections (invalid)
valid == 1 if it has a unique intersection ON the segment
"""
DET_TOLERANCE = 0.00000001
# the first line is pt1 + r*(pt2-pt1)
# in component form:
x1, y1 = pt1
x2, y2 = pt2
dx1 = x2 - x1
dy1 = y2 - y1
# the second line is ptA + s*(ptB-ptA)
x, y = ptA
xB, yB = ptB
dx = xB - x
dy = yB - y
# we need to find the (typically unique) values of r and s
# that will satisfy
#
# (x1, y1) + r(dx1, dy1) = (x, y) + s(dx, dy)
#
# which is the same as
#
# [ dx1 -dx ][ r ] = [ x-x1 ]
# [ dy1 -dy ][ s ] = [ y-y1 ]
#
# whose solution is
#
# [ r ] = _1_ [ -dy dx ] [ x-x1 ]
# [ s ] = DET [ -dy1 dx1 ] [ y-y1 ]
#
# where DET = (-dx1 * dy + dy1 * dx)
#
# if DET is too small, they're parallel
#
DET = (-dx1 * dy + dy1 * dx)
if math.fabs(DET) < DET_TOLERANCE: return (0, 0, 0, 0, 0)
# now, the determinant should be OK
DETinv = 1.0 / DET
# find the scalar amount along the "self" segment
r = DETinv * (-dy * (x - x1) + dx * (y - y1))
# find the scalar amount along the input line
s = DETinv * (-dy1 * (x - x1) + dx1 * (y - y1))
# return the average of the two descriptions
xi = (x1 + r * dx1 + x + s * dx) / 2.0
yi = (y1 + r * dy1 + y + s * dy) / 2.0
return (xi, yi, 1, r, s)
def ray_segment_intersect(ray, segment):
"""
Check if the ray originated from (x, y) with direction theta intersects the line segment (x1, y1) -- (x2, y2),
and return the intersection point if there is one
"""
(x, y), theta = ray
# (x1, y1), (x2, y2) = segment
pt1 = (x, y)
len = 1
pt2 = (x + len * math.cos(theta), y + len * math.sin(theta))
xo, yo, valid, r, s = line_intersect(pt1, pt2, *segment)
if valid and r >= 0 and 0 <= s <= 1:
return (xo, yo)
return None
def point_distance(p1, p2):
x1, y1 = p1
x2, y2 = p2
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
def construct_maze(maze_id=0, length=1):
# define the maze to use
if maze_id == 0:
if length != 1:
raise NotImplementedError("Maze_id 0 only has length 1!")
structure = [
[1, 1, 1, 1, 1],
[1, 'r', 0, 0, 1],
[1, 1, 1, 0, 1],
[1, 'g', 0, 0, 1],
[1, 1, 1, 1, 1],
]
elif maze_id == 1: # donuts maze: can reach the single goal by 2 equal paths
c = length + 4
M = np.ones((c, c))
M[1:c - 1, (1, c - 2)] = 0
M[(1, c - 2), 1:c - 1] = 0
M = M.astype(int).tolist()
M[1][c // 2] = 'r'
M[c - 2][c // 2] = 'g'
structure = M
elif maze_id == 2: # spiral maze: need to use all the keys (only makes sense for length >=3)
c = length + 4
M = np.ones((c, c))
M[1:c - 1, (1, c - 2)] = 0
M[(1, c - 2), 1:c - 1] = 0
M = M.astype(int).tolist()
M[1][c // 2] = 'r'
# now block one of the ways and put the goal on the other side
M[1][c // 2 - 1] = 1
M[1][c // 2 - 2] = 'g'
structure = M
elif maze_id == 3: # corridor with goals at the 2 extremes
structure = [
[1] * (2 * length + 5),
[1, 'g'] + [0] * length + ['r'] + [0] * length + ['g', 1],
[1] * (2 * length + 5),
]
elif 4 <= maze_id <= 7: # cross corridor, goal in
c = 2 * length + 5
M = np.ones((c, c))
M = M - np.diag(np.ones(c))
M = M - np.diag(np.ones(c - 1), 1) - np.diag(np.ones(c - 1), -1)
i = np.arange(c)
j = i[::-1]
M[i, j] = 0
M[i[:-1], j[1:]] = 0
M[i[1:], j[:-1]] = 0
M[np.array([0, c - 1]), :] = 1
M[:, np.array([0, c - 1])] = 1
M = M.astype(int).tolist()
M[c // 2][c // 2] = 'r'
if maze_id == 4:
M[1][1] = 'g'
if maze_id == 5:
M[1][c - 2] = 'g'
if maze_id == 6:
M[c - 2][1] = 'g'
if maze_id == 7:
M[c - 2][c - 2] = 'g'
structure = M
elif maze_id == 8: # reflexion of benchmark maze
structure = [
[1, 1, 1, 1, 1],
[1, 'g', 0, 0, 1],
[1, 1, 1, 0, 1],
[1, 'r', 0, 0, 1],
[1, 1, 1, 1, 1],
]
elif maze_id == 9: # sym benchmark maze
structure = [
[1, 1, 1, 1, 1],
[1, 0, 0, 'r', 1],
[1, 0, 1, 1, 1],
[1, 0, 0, 'g', 1],
[1, 1, 1, 1, 1],
]
elif maze_id == 10: # reflexion of sym of benchmark maze
structure = [
[1, 1, 1, 1, 1],
[1, 0, 0, 'g', 1],
[1, 0, 1, 1, 1],
[1, 0, 0, 'r', 1],
[1, 1, 1, 1, 1],
]
if structure:
return structure
else:
raise NotImplementedError("The provided MazeId is not recognized")
def plot_ray(self, reading, ray_idx, color='r'):
structure = self.MAZE_STRUCTURE
size_scaling = self.MAZE_SIZE_SCALING
# duplicate cells to plot the maze
structure_plot = np.zeros(((len(structure) - 1) * 2, (len(structure[0]) - 1) * 2))
for i in range(len(structure)):
for j in range(len(structure[0])):
cell = structure[i][j]
if type(cell) is not int:
cell = 0.3 if cell == 'r' else 0.7
if i == 0:
if j == 0:
structure_plot[i, j] = cell
elif j == len(structure[0]) - 1:
structure_plot[i, 2 * j - 1] = cell
else:
structure_plot[i, 2 * j - 1:2 * j + 1] = cell
elif i == len(structure) - 1:
if j == 0:
structure_plot[2 * i - 1, j] = cell
elif j == len(structure[0]) - 1:
structure_plot[2 * i - 1, 2 * j - 1] = cell
else:
structure_plot[2 * i - 1, 2 * j - 1:2 * j + 1] = cell
else:
if j == 0:
structure_plot[2 * i - 1:2 * i + 1, j] = cell
elif j == len(structure[0]) - 1:
structure_plot[2 * i - 1:2 * i + 1, 2 * j - 1] = cell
else:
structure_plot[2 * i - 1:2 * i + 1, 2 * j - 1:2 * j + 1] = cell
fig, ax = plt.subplots()
im = ax.pcolor(-np.array(structure_plot), cmap='gray', edgecolor='black', linestyle=':', lw=1)
x_labels = list(range(len(structure[0])))
y_labels = list(range(len(structure)))
ax.grid(True) # elimiate this to avoid inner lines
ax.xaxis.set(ticks=2 * np.arange(len(x_labels)), ticklabels=x_labels)
ax.yaxis.set(ticks=2 * np.arange(len(y_labels)), ticklabels=y_labels)
robot_xy = np.array(self.wrapped_env.get_body_com("torso")[:2]) # the coordinates of this are wrt the init!!
ori = self.get_ori() # for Ant this is computed with atan2, which gives [-pi, pi]
# compute origin cell i_o, j_o coordinates and center of it x_o, y_o (with 0,0 in the top-right corner of struc)
o_xy = np.array(self._find_robot()) # this is self.init_torso_x, self.init_torso_y !!: center of the cell xy!
o_ij = (o_xy / size_scaling).astype(int) # this is the position in the grid (check if correct..)
o_xy_plot = o_xy / size_scaling * 2
robot_xy_plot = o_xy_plot + robot_xy / size_scaling * 2
plt.scatter(*robot_xy_plot)
# for ray_idx in range(self._n_bins):
length_wall = self._sensor_range - reading * self._sensor_range if reading else 1e-6
ray_ori = ori - self._sensor_span * 0.5 + ray_idx / (self._n_bins - 1) * self._sensor_span
if ray_ori > math.pi:
ray_ori -= 2 * math.pi
elif ray_ori < - math.pi:
ray_ori += 2 * math.pi
# find the end point wall
end_xy = (robot_xy + length_wall * np.array([math.cos(ray_ori), math.sin(ray_ori)]))
end_xy_plot = (o_ij + end_xy / size_scaling) * 2
plt.plot([robot_xy_plot[0], end_xy_plot[0]], [robot_xy_plot[1], end_xy_plot[1]], color)
ax.set_title('sensors debug')
print('plotting now, close the window')
# plt.show(fig)
# plt.close()
def plot_state(self, name='sensors', state=None):
if state:
self.wrapped_env.reset(state)
structure = self.__class__.MAZE_STRUCTURE
size_scaling = self.__class__.MAZE_SIZE_SCALING
# duplicate cells to plot the maze
structure_plot = np.zeros(((len(structure) - 1) * 2, (len(structure[0]) - 1) * 2))
for i in range(len(structure)):
for j in range(len(structure[0])):
cell = structure[i][j]
if type(cell) is not int:
cell = 0.3 if cell == 'r' else 0.7
if i == 0:
if j == 0:
structure_plot[i, j] = cell
elif j == len(structure[0]) - 1:
structure_plot[i, 2 * j - 1] = cell
else:
structure_plot[i, 2 * j - 1:2 * j + 1] = cell
elif i == len(structure) - 1:
if j == 0:
structure_plot[2 * i - 1, j] = cell
elif j == len(structure[0]) - 1:
structure_plot[2 * i - 1, 2 * j - 1] = cell
else:
structure_plot[2 * i - 1, 2 * j - 1:2 * j + 1] = cell
else:
if j == 0:
structure_plot[2 * i - 1:2 * i + 1, j] = cell
elif j == len(structure[0]) - 1:
structure_plot[2 * i - 1:2 * i + 1, 2 * j - 1] = cell
else:
structure_plot[2 * i - 1:2 * i + 1, 2 * j - 1:2 * j + 1] = cell
fig, ax = plt.subplots()
im = ax.pcolor(-np.array(structure_plot), cmap='gray', edgecolor='black', linestyle=':', lw=1)
x_labels = list(range(len(structure[0])))
y_labels = list(range(len(structure)))
ax.grid(True) # elimiate this to avoid inner lines
ax.xaxis.set(ticks=2 * np.arange(len(x_labels)), ticklabels=x_labels)
ax.yaxis.set(ticks=2 * np.arange(len(y_labels)), ticklabels=y_labels)
obs = self.get_current_maze_obs()
robot_xy = np.array(self.wrapped_env.get_body_com("torso")[:2]) # the coordinates of this are wrt the init
ori = self.get_ori() # for Ant this is computed with atan2, which gives [-pi, pi]
# compute origin cell i_o, j_o coordinates and center of it x_o, y_o (with 0,0 in the top-right corner of struc)
o_xy = np.array(self._find_robot()) # this is self.init_torso_x, self.init_torso_y: center of the cell xy!
o_ij = (o_xy / size_scaling).astype(int) # this is the position in the grid
o_xy_plot = o_xy / size_scaling * 2
robot_xy_plot = o_xy_plot + robot_xy / size_scaling * 2
plt.scatter(*robot_xy_plot)
for ray_idx in range(self._n_bins):
length_wall = self._sensor_range - obs[ray_idx] * self._sensor_range if obs[ray_idx] else 1e-6
ray_ori = ori - self._sensor_span * 0.5 + ray_idx / (self._n_bins - 1) * self._sensor_span
if ray_ori > math.pi:
ray_ori -= 2 * math.pi
elif ray_ori < - math.pi:
ray_ori += 2 * math.pi
# find the end point wall
end_xy = (robot_xy + length_wall * np.array([math.cos(ray_ori), math.sin(ray_ori)]))
end_xy_plot = (o_ij + end_xy / size_scaling) * 2
plt.plot([robot_xy_plot[0], end_xy_plot[0]], [robot_xy_plot[1], end_xy_plot[1]], 'r')
length_goal = self._sensor_range - obs[ray_idx + self._n_bins] * self._sensor_range if obs[
ray_idx + self._n_bins] else 1e-6
ray_ori = ori - self._sensor_span * 0.5 + ray_idx / (self._n_bins - 1) * self._sensor_span
# find the end point goal
end_xy = (robot_xy + length_goal * np.array([math.cos(ray_ori), math.sin(ray_ori)]))
end_xy_plot = (o_ij + end_xy / size_scaling) * 2
plt.plot([robot_xy_plot[0], end_xy_plot[0]], [robot_xy_plot[1], end_xy_plot[1]], 'g')
log_dir = logger.get_snapshot_dir()
ax.set_title('sensors: ' + name)
plt.savefig(osp.join(log_dir, name + '_sesors.png')) # this saves the current figure, here f
plt.close()
| 12,779 | 35.618911 | 116 | py |
rllab | rllab-master/rllab/envs/mujoco/maze/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/rllab/envs/mujoco/maze/swimmer_maze_env.py | from rllab.envs.mujoco.maze.maze_env import MazeEnv
from rllab.envs.mujoco.swimmer_env import SwimmerEnv
class SwimmerMazeEnv(MazeEnv):
MODEL_CLASS = SwimmerEnv
ORI_IND = 2
MAZE_HEIGHT = 0.5
MAZE_SIZE_SCALING = 4
MAZE_MAKE_CONTACTS = True
| 264 | 17.928571 | 52 | py |
rllab | rllab-master/rllab/envs/mujoco/maze/ant_maze_env.py | from rllab.envs.mujoco.maze.maze_env import MazeEnv
from rllab.envs.mujoco.ant_env import AntEnv
class AntMazeEnv(MazeEnv):
MODEL_CLASS = AntEnv
ORI_IND = 6
MAZE_HEIGHT = 2
MAZE_SIZE_SCALING = 3.0
| 218 | 15.846154 | 51 | py |
rllab | rllab-master/rllab/envs/mujoco/hill/walker2d_hill_env.py | import numpy as np
from rllab.envs.mujoco.hill.hill_env import HillEnv
from rllab.envs.mujoco.walker2d_env import Walker2DEnv
from rllab.misc.overrides import overrides
import rllab.envs.mujoco.hill.terrain as terrain
from rllab.spaces import Box
class Walker2DHillEnv(HillEnv):
MODEL_CLASS = Walker2DEnv
@overrides
def _mod_hfield(self, hfield):
# clear a flat patch for the robot to start off from
return terrain.clear_patch(hfield, Box(np.array([-2.0, -2.0]), np.array([-0.5, -0.5]))) | 523 | 31.75 | 95 | py |
rllab | rllab-master/rllab/envs/mujoco/hill/swimmer3d_hill_env.py | import numpy as np
from rllab.envs.mujoco.hill.hill_env import HillEnv
from rllab.envs.mujoco.swimmer3d_env import Swimmer3DEnv
from rllab.misc.overrides import overrides
import rllab.envs.mujoco.hill.terrain as terrain
from rllab.spaces import Box
class Swimmer3DHillEnv(HillEnv):
MODEL_CLASS = Swimmer3DEnv
@overrides
def _mod_hfield(self, hfield):
# clear a flat patch for the robot to start off from
return terrain.clear_patch(hfield, Box(np.array([-3.0, -1.5]), np.array([0.0, -0.5]))) | 526 | 31.9375 | 94 | py |
rllab | rllab-master/rllab/envs/mujoco/hill/hopper_hill_env.py | import numpy as np
from rllab.envs.mujoco.hill.hill_env import HillEnv
from rllab.envs.mujoco.hopper_env import HopperEnv
from rllab.misc.overrides import overrides
import rllab.envs.mujoco.hill.terrain as terrain
from rllab.spaces import Box
class HopperHillEnv(HillEnv):
MODEL_CLASS = HopperEnv
@overrides
def _mod_hfield(self, hfield):
# clear a flat patch for the robot to start off from
return terrain.clear_patch(hfield, Box(np.array([-1.0, -1.0]), np.array([-0.5, -0.5]))) | 515 | 31.25 | 95 | py |
rllab | rllab-master/rllab/envs/mujoco/hill/terrain.py | from scipy.stats import multivariate_normal
from scipy.signal import convolve2d
import matplotlib
try:
matplotlib.pyplot.figure()
matplotlib.pyplot.close()
except Exception:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
# the colormap should assign light colors to low values
TERRAIN_CMAP = 'Greens'
DEFAULT_PATH = '/tmp/mujoco_terrains'
STEP = 0.1
def generate_hills(width, height, nhills):
'''
@param width float, terrain width
@param height float, terrain height
@param nhills int, #hills to gen. #hills actually generted is sqrt(nhills)^2
'''
# setup coordinate grid
xmin, xmax = -width/2.0, width/2.0
ymin, ymax = -height/2.0, height/2.0
x, y = np.mgrid[xmin:xmax:STEP, ymin:ymax:STEP]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x; pos[:, :, 1] = y
# generate hilltops
xm, ym = np.mgrid[xmin:xmax:width/np.sqrt(nhills), ymin:ymax:height/np.sqrt(nhills)]
mu = np.c_[xm.flat, ym.flat]
sigma = float(width*height)/(nhills*8)
for i in range(mu.shape[0]):
mu[i] = multivariate_normal.rvs(mean=mu[i], cov=sigma)
# generate hills
sigma = sigma + sigma*np.random.rand(mu.shape[0])
rvs = [ multivariate_normal(mu[i,:], cov=sigma[i]) for i in range(mu.shape[0]) ]
hfield = np.max([ rv.pdf(pos) for rv in rvs ], axis=0)
return x, y, hfield
def clear_patch(hfield, box):
''' Clears a patch shaped like box, assuming robot is placed in center of hfield
@param box: rllab.spaces.Box-like
'''
if box.flat_dim > 2:
raise ValueError("Provide 2dim box")
# clear patch
h_center = int(0.5 * hfield.shape[0])
w_center = int(0.5 * hfield.shape[1])
fromrow, torow = w_center + int(box.low[0]/STEP), w_center + int(box.high[0] / STEP)
fromcol, tocol = h_center + int(box.low[1]/STEP), h_center + int(box.high[1] / STEP)
hfield[fromrow:torow, fromcol:tocol] = 0.0
# convolve to smoothen edges somewhat, in case hills were cut off
K = np.ones((10,10)) / 100.0
s = convolve2d(hfield[fromrow-9:torow+9, fromcol-9:tocol+9], K, mode='same', boundary='symm')
hfield[fromrow-9:torow+9, fromcol-9:tocol+9] = s
return hfield
def _checkpath(path_):
if path_ is None:
path_ = DEFAULT_PATH
if not os.path.exists(path_):
os.makedirs(path_)
return path_
def save_heightfield(x, y, hfield, fname, path=None):
'''
@param path, str (optional). If not provided, DEFAULT_PATH is used. Make sure the path + fname match the <file> attribute
of the <asset> element in the env XML where the height field is defined
'''
path = _checkpath(path)
plt.figure()
plt.contourf(x, y, -hfield, 100, cmap=TERRAIN_CMAP) # terrain_cmap is necessary to make sure tops get light color
plt.savefig(os.path.join(path, fname), bbox_inches='tight')
plt.close()
def save_texture(x, y, hfield, fname, path=None):
'''
@param path, str (optional). If not provided, DEFAULT_PATH is used. Make sure this matches the <texturedir> of the
<compiler> element in the env XML
'''
path = _checkpath(path)
plt.figure()
plt.contourf(x, y, -hfield, 100, cmap=TERRAIN_CMAP)
xmin, xmax = x.min(), x.max()
ymin, ymax = y.min(), y.max()
# for some reason plt.grid does not work here, so generate gridlines manually
for i in np.arange(xmin,xmax,0.5):
plt.plot([i,i], [ymin,ymax], 'k', linewidth=0.1)
for i in np.arange(ymin,ymax,0.5):
plt.plot([xmin,xmax],[i,i], 'k', linewidth=0.1)
plt.savefig(os.path.join(path, fname), bbox_inches='tight')
plt.close() | 3,671 | 35.72 | 125 | py |
rllab | rllab-master/rllab/envs/mujoco/hill/half_cheetah_hill_env.py | import numpy as np
from rllab.envs.mujoco.hill.hill_env import HillEnv
from rllab.envs.mujoco.half_cheetah_env import HalfCheetahEnv
from rllab.misc.overrides import overrides
import rllab.envs.mujoco.hill.terrain as terrain
from rllab.spaces import Box
class HalfCheetahHillEnv(HillEnv):
MODEL_CLASS = HalfCheetahEnv
@overrides
def _mod_hfield(self, hfield):
# clear a flat patch for the robot to start off from
return terrain.clear_patch(hfield, Box(np.array([-3.0, -1.5]), np.array([0.0, -0.5]))) | 535 | 32.5 | 94 | py |
rllab | rllab-master/rllab/envs/mujoco/hill/hill_env.py | import tempfile
import os
import time
import mako.template
import mako.lookup
from rllab.envs.proxy_env import ProxyEnv
from rllab.core.serializable import Serializable
import rllab.envs.mujoco.mujoco_env as mujoco_env
import rllab.envs.mujoco.hill.terrain as terrain
from rllab.misc import logger
MODEL_DIR = mujoco_env.MODEL_DIR
class HillEnv(ProxyEnv, Serializable):
HFIELD_FNAME = 'hills.png'
TEXTURE_FNAME = 'hills_texture.png'
MIN_DIFFICULTY = 0.05
def __init__(self,
difficulty=1.0,
texturedir='/tmp/mujoco_textures',
hfield_dir='/tmp/mujoco_terrains',
regen_terrain=True,
*args, **kwargs):
Serializable.quick_init(self, locals())
self.difficulty = max(difficulty, self.MIN_DIFFICULTY)
self.texturedir = texturedir
self.hfield_dir = hfield_dir
model_cls = self.__class__.MODEL_CLASS
if model_cls is None:
raise "MODEL_CLASS unspecified!"
template_file_name = 'hill_' + model_cls.__module__.split('.')[-1] + '.xml.mako'
template_options=dict(
difficulty=self.difficulty,
texturedir=self.texturedir,
hfield_file=os.path.join(self.hfield_dir, self.HFIELD_FNAME))
file_path = os.path.join(MODEL_DIR, template_file_name)
lookup = mako.lookup.TemplateLookup(directories=[MODEL_DIR])
with open(file_path) as template_file:
template = mako.template.Template(
template_file.read(), lookup=lookup)
content = template.render(opts=template_options)
tmp_f, file_path = tempfile.mkstemp(text=True)
with open(file_path, 'w') as f:
f.write(content)
if self._iam_terrain_generator(regen_terrain):
self._gen_terrain(regen_terrain)
os.remove(self._get_lock_path())
inner_env = model_cls(*args, file_path=file_path, **kwargs) # file to the robot specifications
ProxyEnv.__init__(self, inner_env) # here is where the robot env will be initialized
os.close(tmp_f)
def _get_lock_path(self):
return os.path.join(self.hfield_dir, '.lock')
def _iam_terrain_generator(self, regen):
''' When parallel processing, don't want each worker to generate its own terrain. This method ensures that
one worker generates the terrain, which is then used by other workers.
It's still possible to have each worker use their own terrain by passing each worker a different hfield and
texture dir.
'''
if not os.path.exists(self.hfield_dir):
os.makedirs(self.hfield_dir)
terrain_path = os.path.join(self.hfield_dir, self.HFIELD_FNAME)
lock_path = self._get_lock_path()
if regen or (not regen and not os.path.exists(terrain_path)):
# use a simple lock file to prevent different workers overwriting the file, and/or running their own unique terrains
if not os.path.exists(lock_path):
with open(lock_path, 'w') as f:
f.write(str(os.getpid()))
return True
else:
# wait for the worker that's generating the terrain to finish
total = 0
logger.log("Process {0} waiting for terrain generation...".format(os.getpid()))
while os.path.exists(lock_path) and total < 120:
time.sleep(5)
total += 5
if os.path.exists(lock_path):
raise "Process {0} timed out waiting for terrain generation, or stale lock file".format(os.getpid())
logger.log("Done.")
return False
def _gen_terrain(self, regen=True):
logger.log("Process {0} generating terrain...".format(os.getpid()))
x, y, hfield = terrain.generate_hills(40, 40, 500)
hfield = self._mod_hfield(hfield)
terrain.save_heightfield(x, y, hfield, self.HFIELD_FNAME, path=self.hfield_dir)
terrain.save_texture(x, y, hfield, self.TEXTURE_FNAME, path=self.texturedir)
logger.log("Generated.")
def _mod_hfield(self, hfield):
'''Subclasses can override this to modify hfield'''
return hfield
def get_current_obs(self):
return self._wrapped_env.get_current_obs()
| 4,488 | 39.809091 | 128 | py |
rllab | rllab-master/rllab/envs/mujoco/hill/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/rllab/envs/mujoco/hill/ant_hill_env.py | import numpy as np
from rllab.envs.mujoco.hill.hill_env import HillEnv
from rllab.envs.mujoco.ant_env import AntEnv
from rllab.misc.overrides import overrides
import rllab.envs.mujoco.hill.terrain as terrain
from rllab.spaces import Box
class AntHillEnv(HillEnv):
MODEL_CLASS = AntEnv
@overrides
def _mod_hfield(self, hfield):
# clear a flat patch for the robot to start off from
return terrain.clear_patch(hfield, Box(np.array([-2.0, -2.0]), np.array([0.0, 0.0]))) | 501 | 30.375 | 93 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.