id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
6,800
|
google-research/batch-ppo
|
agents/scripts/utility.py
|
define_saver
|
def define_saver(exclude=None):
"""Create a saver for the variables we want to checkpoint.
Args:
exclude: List of regexes to match variable names to exclude.
Returns:
Saver object.
"""
variables = []
exclude = exclude or []
exclude = [re.compile(regex) for regex in exclude]
for variable in tf.global_variables():
if any(regex.match(variable.name) for regex in exclude):
continue
variables.append(variable)
saver = tf.train.Saver(variables, keep_checkpoint_every_n_hours=5)
return saver
|
python
|
def define_saver(exclude=None):
"""Create a saver for the variables we want to checkpoint.
Args:
exclude: List of regexes to match variable names to exclude.
Returns:
Saver object.
"""
variables = []
exclude = exclude or []
exclude = [re.compile(regex) for regex in exclude]
for variable in tf.global_variables():
if any(regex.match(variable.name) for regex in exclude):
continue
variables.append(variable)
saver = tf.train.Saver(variables, keep_checkpoint_every_n_hours=5)
return saver
|
[
"def",
"define_saver",
"(",
"exclude",
"=",
"None",
")",
":",
"variables",
"=",
"[",
"]",
"exclude",
"=",
"exclude",
"or",
"[",
"]",
"exclude",
"=",
"[",
"re",
".",
"compile",
"(",
"regex",
")",
"for",
"regex",
"in",
"exclude",
"]",
"for",
"variable",
"in",
"tf",
".",
"global_variables",
"(",
")",
":",
"if",
"any",
"(",
"regex",
".",
"match",
"(",
"variable",
".",
"name",
")",
"for",
"regex",
"in",
"exclude",
")",
":",
"continue",
"variables",
".",
"append",
"(",
"variable",
")",
"saver",
"=",
"tf",
".",
"train",
".",
"Saver",
"(",
"variables",
",",
"keep_checkpoint_every_n_hours",
"=",
"5",
")",
"return",
"saver"
] |
Create a saver for the variables we want to checkpoint.
Args:
exclude: List of regexes to match variable names to exclude.
Returns:
Saver object.
|
[
"Create",
"a",
"saver",
"for",
"the",
"variables",
"we",
"want",
"to",
"checkpoint",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/utility.py#L80-L97
|
6,801
|
google-research/batch-ppo
|
agents/scripts/utility.py
|
initialize_variables
|
def initialize_variables(sess, saver, logdir, checkpoint=None, resume=None):
"""Initialize or restore variables from a checkpoint if available.
Args:
sess: Session to initialize variables in.
saver: Saver to restore variables.
logdir: Directory to search for checkpoints.
checkpoint: Specify what checkpoint name to use; defaults to most recent.
resume: Whether to expect recovering a checkpoint or starting a new run.
Raises:
ValueError: If resume expected but no log directory specified.
RuntimeError: If no resume expected but a checkpoint was found.
"""
sess.run(tf.group(
tf.local_variables_initializer(),
tf.global_variables_initializer()))
if resume and not (logdir or checkpoint):
raise ValueError('Need to specify logdir to resume a checkpoint.')
if logdir:
state = tf.train.get_checkpoint_state(logdir)
if checkpoint:
checkpoint = os.path.join(logdir, checkpoint)
if not checkpoint and state and state.model_checkpoint_path:
checkpoint = state.model_checkpoint_path
if checkpoint and resume is False:
message = 'Found unexpected checkpoint when starting a new run.'
raise RuntimeError(message)
if checkpoint:
saver.restore(sess, checkpoint)
|
python
|
def initialize_variables(sess, saver, logdir, checkpoint=None, resume=None):
"""Initialize or restore variables from a checkpoint if available.
Args:
sess: Session to initialize variables in.
saver: Saver to restore variables.
logdir: Directory to search for checkpoints.
checkpoint: Specify what checkpoint name to use; defaults to most recent.
resume: Whether to expect recovering a checkpoint or starting a new run.
Raises:
ValueError: If resume expected but no log directory specified.
RuntimeError: If no resume expected but a checkpoint was found.
"""
sess.run(tf.group(
tf.local_variables_initializer(),
tf.global_variables_initializer()))
if resume and not (logdir or checkpoint):
raise ValueError('Need to specify logdir to resume a checkpoint.')
if logdir:
state = tf.train.get_checkpoint_state(logdir)
if checkpoint:
checkpoint = os.path.join(logdir, checkpoint)
if not checkpoint and state and state.model_checkpoint_path:
checkpoint = state.model_checkpoint_path
if checkpoint and resume is False:
message = 'Found unexpected checkpoint when starting a new run.'
raise RuntimeError(message)
if checkpoint:
saver.restore(sess, checkpoint)
|
[
"def",
"initialize_variables",
"(",
"sess",
",",
"saver",
",",
"logdir",
",",
"checkpoint",
"=",
"None",
",",
"resume",
"=",
"None",
")",
":",
"sess",
".",
"run",
"(",
"tf",
".",
"group",
"(",
"tf",
".",
"local_variables_initializer",
"(",
")",
",",
"tf",
".",
"global_variables_initializer",
"(",
")",
")",
")",
"if",
"resume",
"and",
"not",
"(",
"logdir",
"or",
"checkpoint",
")",
":",
"raise",
"ValueError",
"(",
"'Need to specify logdir to resume a checkpoint.'",
")",
"if",
"logdir",
":",
"state",
"=",
"tf",
".",
"train",
".",
"get_checkpoint_state",
"(",
"logdir",
")",
"if",
"checkpoint",
":",
"checkpoint",
"=",
"os",
".",
"path",
".",
"join",
"(",
"logdir",
",",
"checkpoint",
")",
"if",
"not",
"checkpoint",
"and",
"state",
"and",
"state",
".",
"model_checkpoint_path",
":",
"checkpoint",
"=",
"state",
".",
"model_checkpoint_path",
"if",
"checkpoint",
"and",
"resume",
"is",
"False",
":",
"message",
"=",
"'Found unexpected checkpoint when starting a new run.'",
"raise",
"RuntimeError",
"(",
"message",
")",
"if",
"checkpoint",
":",
"saver",
".",
"restore",
"(",
"sess",
",",
"checkpoint",
")"
] |
Initialize or restore variables from a checkpoint if available.
Args:
sess: Session to initialize variables in.
saver: Saver to restore variables.
logdir: Directory to search for checkpoints.
checkpoint: Specify what checkpoint name to use; defaults to most recent.
resume: Whether to expect recovering a checkpoint or starting a new run.
Raises:
ValueError: If resume expected but no log directory specified.
RuntimeError: If no resume expected but a checkpoint was found.
|
[
"Initialize",
"or",
"restore",
"variables",
"from",
"a",
"checkpoint",
"if",
"available",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/utility.py#L100-L129
|
6,802
|
google-research/batch-ppo
|
agents/scripts/utility.py
|
save_config
|
def save_config(config, logdir=None):
"""Save a new configuration by name.
If a logging directory is specified, is will be created and the configuration
will be stored there. Otherwise, a log message will be printed.
Args:
config: Configuration object.
logdir: Location for writing summaries and checkpoints if specified.
Returns:
Configuration object.
"""
if logdir:
with config.unlocked:
config.logdir = logdir
message = 'Start a new run and write summaries and checkpoints to {}.'
tf.logging.info(message.format(config.logdir))
tf.gfile.MakeDirs(config.logdir)
config_path = os.path.join(config.logdir, 'config.yaml')
with tf.gfile.FastGFile(config_path, 'w') as file_:
yaml.dump(config, file_, default_flow_style=False)
else:
message = (
'Start a new run without storing summaries and checkpoints since no '
'logging directory was specified.')
tf.logging.info(message)
return config
|
python
|
def save_config(config, logdir=None):
"""Save a new configuration by name.
If a logging directory is specified, is will be created and the configuration
will be stored there. Otherwise, a log message will be printed.
Args:
config: Configuration object.
logdir: Location for writing summaries and checkpoints if specified.
Returns:
Configuration object.
"""
if logdir:
with config.unlocked:
config.logdir = logdir
message = 'Start a new run and write summaries and checkpoints to {}.'
tf.logging.info(message.format(config.logdir))
tf.gfile.MakeDirs(config.logdir)
config_path = os.path.join(config.logdir, 'config.yaml')
with tf.gfile.FastGFile(config_path, 'w') as file_:
yaml.dump(config, file_, default_flow_style=False)
else:
message = (
'Start a new run without storing summaries and checkpoints since no '
'logging directory was specified.')
tf.logging.info(message)
return config
|
[
"def",
"save_config",
"(",
"config",
",",
"logdir",
"=",
"None",
")",
":",
"if",
"logdir",
":",
"with",
"config",
".",
"unlocked",
":",
"config",
".",
"logdir",
"=",
"logdir",
"message",
"=",
"'Start a new run and write summaries and checkpoints to {}.'",
"tf",
".",
"logging",
".",
"info",
"(",
"message",
".",
"format",
"(",
"config",
".",
"logdir",
")",
")",
"tf",
".",
"gfile",
".",
"MakeDirs",
"(",
"config",
".",
"logdir",
")",
"config_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"config",
".",
"logdir",
",",
"'config.yaml'",
")",
"with",
"tf",
".",
"gfile",
".",
"FastGFile",
"(",
"config_path",
",",
"'w'",
")",
"as",
"file_",
":",
"yaml",
".",
"dump",
"(",
"config",
",",
"file_",
",",
"default_flow_style",
"=",
"False",
")",
"else",
":",
"message",
"=",
"(",
"'Start a new run without storing summaries and checkpoints since no '",
"'logging directory was specified.'",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"message",
")",
"return",
"config"
] |
Save a new configuration by name.
If a logging directory is specified, is will be created and the configuration
will be stored there. Otherwise, a log message will be printed.
Args:
config: Configuration object.
logdir: Location for writing summaries and checkpoints if specified.
Returns:
Configuration object.
|
[
"Save",
"a",
"new",
"configuration",
"by",
"name",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/utility.py#L132-L159
|
6,803
|
google-research/batch-ppo
|
agents/scripts/utility.py
|
load_config
|
def load_config(logdir):
# pylint: disable=missing-raises-doc
"""Load a configuration from the log directory.
Args:
logdir: The logging directory containing the configuration file.
Raises:
IOError: The logging directory does not contain a configuration file.
Returns:
Configuration object.
"""
config_path = logdir and os.path.join(logdir, 'config.yaml')
if not config_path or not tf.gfile.Exists(config_path):
message = (
'Cannot resume an existing run since the logging directory does not '
'contain a configuration file.')
raise IOError(message)
with tf.gfile.FastGFile(config_path, 'r') as file_:
config = yaml.load(file_, Loader=yaml.Loader)
message = 'Resume run and write summaries and checkpoints to {}.'
tf.logging.info(message.format(config.logdir))
return config
|
python
|
def load_config(logdir):
# pylint: disable=missing-raises-doc
"""Load a configuration from the log directory.
Args:
logdir: The logging directory containing the configuration file.
Raises:
IOError: The logging directory does not contain a configuration file.
Returns:
Configuration object.
"""
config_path = logdir and os.path.join(logdir, 'config.yaml')
if not config_path or not tf.gfile.Exists(config_path):
message = (
'Cannot resume an existing run since the logging directory does not '
'contain a configuration file.')
raise IOError(message)
with tf.gfile.FastGFile(config_path, 'r') as file_:
config = yaml.load(file_, Loader=yaml.Loader)
message = 'Resume run and write summaries and checkpoints to {}.'
tf.logging.info(message.format(config.logdir))
return config
|
[
"def",
"load_config",
"(",
"logdir",
")",
":",
"# pylint: disable=missing-raises-doc",
"config_path",
"=",
"logdir",
"and",
"os",
".",
"path",
".",
"join",
"(",
"logdir",
",",
"'config.yaml'",
")",
"if",
"not",
"config_path",
"or",
"not",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"config_path",
")",
":",
"message",
"=",
"(",
"'Cannot resume an existing run since the logging directory does not '",
"'contain a configuration file.'",
")",
"raise",
"IOError",
"(",
"message",
")",
"with",
"tf",
".",
"gfile",
".",
"FastGFile",
"(",
"config_path",
",",
"'r'",
")",
"as",
"file_",
":",
"config",
"=",
"yaml",
".",
"load",
"(",
"file_",
",",
"Loader",
"=",
"yaml",
".",
"Loader",
")",
"message",
"=",
"'Resume run and write summaries and checkpoints to {}.'",
"tf",
".",
"logging",
".",
"info",
"(",
"message",
".",
"format",
"(",
"config",
".",
"logdir",
")",
")",
"return",
"config"
] |
Load a configuration from the log directory.
Args:
logdir: The logging directory containing the configuration file.
Raises:
IOError: The logging directory does not contain a configuration file.
Returns:
Configuration object.
|
[
"Load",
"a",
"configuration",
"from",
"the",
"log",
"directory",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/utility.py#L162-L185
|
6,804
|
google-research/batch-ppo
|
agents/scripts/utility.py
|
set_up_logging
|
def set_up_logging():
"""Configure the TensorFlow logger."""
tf.logging.set_verbosity(tf.logging.INFO)
logging.getLogger('tensorflow').propagate = False
|
python
|
def set_up_logging():
"""Configure the TensorFlow logger."""
tf.logging.set_verbosity(tf.logging.INFO)
logging.getLogger('tensorflow').propagate = False
|
[
"def",
"set_up_logging",
"(",
")",
":",
"tf",
".",
"logging",
".",
"set_verbosity",
"(",
"tf",
".",
"logging",
".",
"INFO",
")",
"logging",
".",
"getLogger",
"(",
"'tensorflow'",
")",
".",
"propagate",
"=",
"False"
] |
Configure the TensorFlow logger.
|
[
"Configure",
"the",
"TensorFlow",
"logger",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/utility.py#L188-L191
|
6,805
|
google-research/batch-ppo
|
agents/scripts/visualize.py
|
_define_loop
|
def _define_loop(graph, eval_steps):
"""Create and configure an evaluation loop.
Args:
graph: Object providing graph elements via attributes.
eval_steps: Number of evaluation steps per epoch.
Returns:
Loop object.
"""
loop = tools.Loop(
None, graph.step, graph.should_log, graph.do_report, graph.force_reset)
loop.add_phase(
'eval', graph.done, graph.score, graph.summary, eval_steps,
report_every=eval_steps,
log_every=None,
checkpoint_every=None,
feed={graph.is_training: False})
return loop
|
python
|
def _define_loop(graph, eval_steps):
"""Create and configure an evaluation loop.
Args:
graph: Object providing graph elements via attributes.
eval_steps: Number of evaluation steps per epoch.
Returns:
Loop object.
"""
loop = tools.Loop(
None, graph.step, graph.should_log, graph.do_report, graph.force_reset)
loop.add_phase(
'eval', graph.done, graph.score, graph.summary, eval_steps,
report_every=eval_steps,
log_every=None,
checkpoint_every=None,
feed={graph.is_training: False})
return loop
|
[
"def",
"_define_loop",
"(",
"graph",
",",
"eval_steps",
")",
":",
"loop",
"=",
"tools",
".",
"Loop",
"(",
"None",
",",
"graph",
".",
"step",
",",
"graph",
".",
"should_log",
",",
"graph",
".",
"do_report",
",",
"graph",
".",
"force_reset",
")",
"loop",
".",
"add_phase",
"(",
"'eval'",
",",
"graph",
".",
"done",
",",
"graph",
".",
"score",
",",
"graph",
".",
"summary",
",",
"eval_steps",
",",
"report_every",
"=",
"eval_steps",
",",
"log_every",
"=",
"None",
",",
"checkpoint_every",
"=",
"None",
",",
"feed",
"=",
"{",
"graph",
".",
"is_training",
":",
"False",
"}",
")",
"return",
"loop"
] |
Create and configure an evaluation loop.
Args:
graph: Object providing graph elements via attributes.
eval_steps: Number of evaluation steps per epoch.
Returns:
Loop object.
|
[
"Create",
"and",
"configure",
"an",
"evaluation",
"loop",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/visualize.py#L74-L92
|
6,806
|
google-research/batch-ppo
|
agents/scripts/visualize.py
|
visualize
|
def visualize(
logdir, outdir, num_agents, num_episodes, checkpoint=None,
env_processes=True):
"""Recover checkpoint and render videos from it.
Args:
logdir: Logging directory of the trained algorithm.
outdir: Directory to store rendered videos in.
num_agents: Number of environments to simulate in parallel.
num_episodes: Total number of episodes to simulate.
checkpoint: Checkpoint name to load; defaults to most recent.
env_processes: Whether to step environments in separate processes.
"""
config = utility.load_config(logdir)
with tf.device('/cpu:0'):
batch_env = utility.define_batch_env(
lambda: _create_environment(config, outdir),
num_agents, env_processes)
graph = utility.define_simulation_graph(
batch_env, config.algorithm, config)
total_steps = num_episodes * config.max_length
loop = _define_loop(graph, total_steps)
saver = utility.define_saver(
exclude=(r'.*_temporary.*', r'global_step'))
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as sess:
utility.initialize_variables(
sess, saver, config.logdir, checkpoint, resume=True)
for unused_score in loop.run(sess, saver, total_steps):
pass
batch_env.close()
|
python
|
def visualize(
logdir, outdir, num_agents, num_episodes, checkpoint=None,
env_processes=True):
"""Recover checkpoint and render videos from it.
Args:
logdir: Logging directory of the trained algorithm.
outdir: Directory to store rendered videos in.
num_agents: Number of environments to simulate in parallel.
num_episodes: Total number of episodes to simulate.
checkpoint: Checkpoint name to load; defaults to most recent.
env_processes: Whether to step environments in separate processes.
"""
config = utility.load_config(logdir)
with tf.device('/cpu:0'):
batch_env = utility.define_batch_env(
lambda: _create_environment(config, outdir),
num_agents, env_processes)
graph = utility.define_simulation_graph(
batch_env, config.algorithm, config)
total_steps = num_episodes * config.max_length
loop = _define_loop(graph, total_steps)
saver = utility.define_saver(
exclude=(r'.*_temporary.*', r'global_step'))
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as sess:
utility.initialize_variables(
sess, saver, config.logdir, checkpoint, resume=True)
for unused_score in loop.run(sess, saver, total_steps):
pass
batch_env.close()
|
[
"def",
"visualize",
"(",
"logdir",
",",
"outdir",
",",
"num_agents",
",",
"num_episodes",
",",
"checkpoint",
"=",
"None",
",",
"env_processes",
"=",
"True",
")",
":",
"config",
"=",
"utility",
".",
"load_config",
"(",
"logdir",
")",
"with",
"tf",
".",
"device",
"(",
"'/cpu:0'",
")",
":",
"batch_env",
"=",
"utility",
".",
"define_batch_env",
"(",
"lambda",
":",
"_create_environment",
"(",
"config",
",",
"outdir",
")",
",",
"num_agents",
",",
"env_processes",
")",
"graph",
"=",
"utility",
".",
"define_simulation_graph",
"(",
"batch_env",
",",
"config",
".",
"algorithm",
",",
"config",
")",
"total_steps",
"=",
"num_episodes",
"*",
"config",
".",
"max_length",
"loop",
"=",
"_define_loop",
"(",
"graph",
",",
"total_steps",
")",
"saver",
"=",
"utility",
".",
"define_saver",
"(",
"exclude",
"=",
"(",
"r'.*_temporary.*'",
",",
"r'global_step'",
")",
")",
"sess_config",
"=",
"tf",
".",
"ConfigProto",
"(",
"allow_soft_placement",
"=",
"True",
")",
"sess_config",
".",
"gpu_options",
".",
"allow_growth",
"=",
"True",
"with",
"tf",
".",
"Session",
"(",
"config",
"=",
"sess_config",
")",
"as",
"sess",
":",
"utility",
".",
"initialize_variables",
"(",
"sess",
",",
"saver",
",",
"config",
".",
"logdir",
",",
"checkpoint",
",",
"resume",
"=",
"True",
")",
"for",
"unused_score",
"in",
"loop",
".",
"run",
"(",
"sess",
",",
"saver",
",",
"total_steps",
")",
":",
"pass",
"batch_env",
".",
"close",
"(",
")"
] |
Recover checkpoint and render videos from it.
Args:
logdir: Logging directory of the trained algorithm.
outdir: Directory to store rendered videos in.
num_agents: Number of environments to simulate in parallel.
num_episodes: Total number of episodes to simulate.
checkpoint: Checkpoint name to load; defaults to most recent.
env_processes: Whether to step environments in separate processes.
|
[
"Recover",
"checkpoint",
"and",
"render",
"videos",
"from",
"it",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/visualize.py#L95-L126
|
6,807
|
google-research/batch-ppo
|
agents/scripts/visualize.py
|
main
|
def main(_):
"""Load a trained algorithm and render videos."""
utility.set_up_logging()
if not FLAGS.logdir or not FLAGS.outdir:
raise KeyError('You must specify logging and outdirs directories.')
FLAGS.logdir = os.path.expanduser(FLAGS.logdir)
FLAGS.outdir = os.path.expanduser(FLAGS.outdir)
visualize(
FLAGS.logdir, FLAGS.outdir, FLAGS.num_agents, FLAGS.num_episodes,
FLAGS.checkpoint, FLAGS.env_processes)
|
python
|
def main(_):
"""Load a trained algorithm and render videos."""
utility.set_up_logging()
if not FLAGS.logdir or not FLAGS.outdir:
raise KeyError('You must specify logging and outdirs directories.')
FLAGS.logdir = os.path.expanduser(FLAGS.logdir)
FLAGS.outdir = os.path.expanduser(FLAGS.outdir)
visualize(
FLAGS.logdir, FLAGS.outdir, FLAGS.num_agents, FLAGS.num_episodes,
FLAGS.checkpoint, FLAGS.env_processes)
|
[
"def",
"main",
"(",
"_",
")",
":",
"utility",
".",
"set_up_logging",
"(",
")",
"if",
"not",
"FLAGS",
".",
"logdir",
"or",
"not",
"FLAGS",
".",
"outdir",
":",
"raise",
"KeyError",
"(",
"'You must specify logging and outdirs directories.'",
")",
"FLAGS",
".",
"logdir",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"FLAGS",
".",
"logdir",
")",
"FLAGS",
".",
"outdir",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"FLAGS",
".",
"outdir",
")",
"visualize",
"(",
"FLAGS",
".",
"logdir",
",",
"FLAGS",
".",
"outdir",
",",
"FLAGS",
".",
"num_agents",
",",
"FLAGS",
".",
"num_episodes",
",",
"FLAGS",
".",
"checkpoint",
",",
"FLAGS",
".",
"env_processes",
")"
] |
Load a trained algorithm and render videos.
|
[
"Load",
"a",
"trained",
"algorithm",
"and",
"render",
"videos",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/visualize.py#L129-L138
|
6,808
|
google-research/batch-ppo
|
agents/algorithms/ppo/utility.py
|
reinit_nested_vars
|
def reinit_nested_vars(variables, indices=None):
"""Reset all variables in a nested tuple to zeros.
Args:
variables: Nested tuple or list of variables.
indices: Batch indices to reset, defaults to all.
Returns:
Operation.
"""
if isinstance(variables, (tuple, list)):
return tf.group(*[
reinit_nested_vars(variable, indices) for variable in variables])
if indices is None:
return variables.assign(tf.zeros_like(variables))
else:
zeros = tf.zeros([tf.shape(indices)[0]] + variables.shape[1:].as_list())
return tf.scatter_update(variables, indices, zeros)
|
python
|
def reinit_nested_vars(variables, indices=None):
"""Reset all variables in a nested tuple to zeros.
Args:
variables: Nested tuple or list of variables.
indices: Batch indices to reset, defaults to all.
Returns:
Operation.
"""
if isinstance(variables, (tuple, list)):
return tf.group(*[
reinit_nested_vars(variable, indices) for variable in variables])
if indices is None:
return variables.assign(tf.zeros_like(variables))
else:
zeros = tf.zeros([tf.shape(indices)[0]] + variables.shape[1:].as_list())
return tf.scatter_update(variables, indices, zeros)
|
[
"def",
"reinit_nested_vars",
"(",
"variables",
",",
"indices",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"variables",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"return",
"tf",
".",
"group",
"(",
"*",
"[",
"reinit_nested_vars",
"(",
"variable",
",",
"indices",
")",
"for",
"variable",
"in",
"variables",
"]",
")",
"if",
"indices",
"is",
"None",
":",
"return",
"variables",
".",
"assign",
"(",
"tf",
".",
"zeros_like",
"(",
"variables",
")",
")",
"else",
":",
"zeros",
"=",
"tf",
".",
"zeros",
"(",
"[",
"tf",
".",
"shape",
"(",
"indices",
")",
"[",
"0",
"]",
"]",
"+",
"variables",
".",
"shape",
"[",
"1",
":",
"]",
".",
"as_list",
"(",
")",
")",
"return",
"tf",
".",
"scatter_update",
"(",
"variables",
",",
"indices",
",",
"zeros",
")"
] |
Reset all variables in a nested tuple to zeros.
Args:
variables: Nested tuple or list of variables.
indices: Batch indices to reset, defaults to all.
Returns:
Operation.
|
[
"Reset",
"all",
"variables",
"in",
"a",
"nested",
"tuple",
"to",
"zeros",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L28-L45
|
6,809
|
google-research/batch-ppo
|
agents/algorithms/ppo/utility.py
|
assign_nested_vars
|
def assign_nested_vars(variables, tensors, indices=None):
"""Assign tensors to matching nested tuple of variables.
Args:
variables: Nested tuple or list of variables to update.
tensors: Nested tuple or list of tensors to assign.
indices: Batch indices to assign to; default to all.
Returns:
Operation.
"""
if isinstance(variables, (tuple, list)):
return tf.group(*[
assign_nested_vars(variable, tensor)
for variable, tensor in zip(variables, tensors)])
if indices is None:
return variables.assign(tensors)
else:
return tf.scatter_update(variables, indices, tensors)
|
python
|
def assign_nested_vars(variables, tensors, indices=None):
"""Assign tensors to matching nested tuple of variables.
Args:
variables: Nested tuple or list of variables to update.
tensors: Nested tuple or list of tensors to assign.
indices: Batch indices to assign to; default to all.
Returns:
Operation.
"""
if isinstance(variables, (tuple, list)):
return tf.group(*[
assign_nested_vars(variable, tensor)
for variable, tensor in zip(variables, tensors)])
if indices is None:
return variables.assign(tensors)
else:
return tf.scatter_update(variables, indices, tensors)
|
[
"def",
"assign_nested_vars",
"(",
"variables",
",",
"tensors",
",",
"indices",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"variables",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"return",
"tf",
".",
"group",
"(",
"*",
"[",
"assign_nested_vars",
"(",
"variable",
",",
"tensor",
")",
"for",
"variable",
",",
"tensor",
"in",
"zip",
"(",
"variables",
",",
"tensors",
")",
"]",
")",
"if",
"indices",
"is",
"None",
":",
"return",
"variables",
".",
"assign",
"(",
"tensors",
")",
"else",
":",
"return",
"tf",
".",
"scatter_update",
"(",
"variables",
",",
"indices",
",",
"tensors",
")"
] |
Assign tensors to matching nested tuple of variables.
Args:
variables: Nested tuple or list of variables to update.
tensors: Nested tuple or list of tensors to assign.
indices: Batch indices to assign to; default to all.
Returns:
Operation.
|
[
"Assign",
"tensors",
"to",
"matching",
"nested",
"tuple",
"of",
"variables",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L48-L66
|
6,810
|
google-research/batch-ppo
|
agents/algorithms/ppo/utility.py
|
discounted_return
|
def discounted_return(reward, length, discount):
"""Discounted Monte-Carlo returns."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
return_ = tf.reverse(tf.transpose(tf.scan(
lambda agg, cur: cur + discount * agg,
tf.transpose(tf.reverse(mask * reward, [1]), [1, 0]),
tf.zeros_like(reward[:, -1]), 1, False), [1, 0]), [1])
return tf.check_numerics(tf.stop_gradient(return_), 'return')
|
python
|
def discounted_return(reward, length, discount):
"""Discounted Monte-Carlo returns."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
return_ = tf.reverse(tf.transpose(tf.scan(
lambda agg, cur: cur + discount * agg,
tf.transpose(tf.reverse(mask * reward, [1]), [1, 0]),
tf.zeros_like(reward[:, -1]), 1, False), [1, 0]), [1])
return tf.check_numerics(tf.stop_gradient(return_), 'return')
|
[
"def",
"discounted_return",
"(",
"reward",
",",
"length",
",",
"discount",
")",
":",
"timestep",
"=",
"tf",
".",
"range",
"(",
"reward",
".",
"shape",
"[",
"1",
"]",
".",
"value",
")",
"mask",
"=",
"tf",
".",
"cast",
"(",
"timestep",
"[",
"None",
",",
":",
"]",
"<",
"length",
"[",
":",
",",
"None",
"]",
",",
"tf",
".",
"float32",
")",
"return_",
"=",
"tf",
".",
"reverse",
"(",
"tf",
".",
"transpose",
"(",
"tf",
".",
"scan",
"(",
"lambda",
"agg",
",",
"cur",
":",
"cur",
"+",
"discount",
"*",
"agg",
",",
"tf",
".",
"transpose",
"(",
"tf",
".",
"reverse",
"(",
"mask",
"*",
"reward",
",",
"[",
"1",
"]",
")",
",",
"[",
"1",
",",
"0",
"]",
")",
",",
"tf",
".",
"zeros_like",
"(",
"reward",
"[",
":",
",",
"-",
"1",
"]",
")",
",",
"1",
",",
"False",
")",
",",
"[",
"1",
",",
"0",
"]",
")",
",",
"[",
"1",
"]",
")",
"return",
"tf",
".",
"check_numerics",
"(",
"tf",
".",
"stop_gradient",
"(",
"return_",
")",
",",
"'return'",
")"
] |
Discounted Monte-Carlo returns.
|
[
"Discounted",
"Monte",
"-",
"Carlo",
"returns",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L69-L77
|
6,811
|
google-research/batch-ppo
|
agents/algorithms/ppo/utility.py
|
fixed_step_return
|
def fixed_step_return(reward, value, length, discount, window):
"""N-step discounted return."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
return_ = tf.zeros_like(reward)
for _ in range(window):
return_ += reward
reward = discount * tf.concat(
[reward[:, 1:], tf.zeros_like(reward[:, -1:])], 1)
return_ += discount ** window * tf.concat(
[value[:, window:], tf.zeros_like(value[:, -window:])], 1)
return tf.check_numerics(tf.stop_gradient(mask * return_), 'return')
|
python
|
def fixed_step_return(reward, value, length, discount, window):
"""N-step discounted return."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
return_ = tf.zeros_like(reward)
for _ in range(window):
return_ += reward
reward = discount * tf.concat(
[reward[:, 1:], tf.zeros_like(reward[:, -1:])], 1)
return_ += discount ** window * tf.concat(
[value[:, window:], tf.zeros_like(value[:, -window:])], 1)
return tf.check_numerics(tf.stop_gradient(mask * return_), 'return')
|
[
"def",
"fixed_step_return",
"(",
"reward",
",",
"value",
",",
"length",
",",
"discount",
",",
"window",
")",
":",
"timestep",
"=",
"tf",
".",
"range",
"(",
"reward",
".",
"shape",
"[",
"1",
"]",
".",
"value",
")",
"mask",
"=",
"tf",
".",
"cast",
"(",
"timestep",
"[",
"None",
",",
":",
"]",
"<",
"length",
"[",
":",
",",
"None",
"]",
",",
"tf",
".",
"float32",
")",
"return_",
"=",
"tf",
".",
"zeros_like",
"(",
"reward",
")",
"for",
"_",
"in",
"range",
"(",
"window",
")",
":",
"return_",
"+=",
"reward",
"reward",
"=",
"discount",
"*",
"tf",
".",
"concat",
"(",
"[",
"reward",
"[",
":",
",",
"1",
":",
"]",
",",
"tf",
".",
"zeros_like",
"(",
"reward",
"[",
":",
",",
"-",
"1",
":",
"]",
")",
"]",
",",
"1",
")",
"return_",
"+=",
"discount",
"**",
"window",
"*",
"tf",
".",
"concat",
"(",
"[",
"value",
"[",
":",
",",
"window",
":",
"]",
",",
"tf",
".",
"zeros_like",
"(",
"value",
"[",
":",
",",
"-",
"window",
":",
"]",
")",
"]",
",",
"1",
")",
"return",
"tf",
".",
"check_numerics",
"(",
"tf",
".",
"stop_gradient",
"(",
"mask",
"*",
"return_",
")",
",",
"'return'",
")"
] |
N-step discounted return.
|
[
"N",
"-",
"step",
"discounted",
"return",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L80-L91
|
6,812
|
google-research/batch-ppo
|
agents/algorithms/ppo/utility.py
|
lambda_return
|
def lambda_return(reward, value, length, discount, lambda_):
"""TD-lambda returns."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
sequence = mask * reward + discount * value * (1 - lambda_)
discount = mask * discount * lambda_
sequence = tf.stack([sequence, discount], 2)
return_ = tf.reverse(tf.transpose(tf.scan(
lambda agg, cur: cur[0] + cur[1] * agg,
tf.transpose(tf.reverse(sequence, [1]), [1, 2, 0]),
tf.zeros_like(value[:, -1]), 1, False), [1, 0]), [1])
return tf.check_numerics(tf.stop_gradient(return_), 'return')
|
python
|
def lambda_return(reward, value, length, discount, lambda_):
"""TD-lambda returns."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
sequence = mask * reward + discount * value * (1 - lambda_)
discount = mask * discount * lambda_
sequence = tf.stack([sequence, discount], 2)
return_ = tf.reverse(tf.transpose(tf.scan(
lambda agg, cur: cur[0] + cur[1] * agg,
tf.transpose(tf.reverse(sequence, [1]), [1, 2, 0]),
tf.zeros_like(value[:, -1]), 1, False), [1, 0]), [1])
return tf.check_numerics(tf.stop_gradient(return_), 'return')
|
[
"def",
"lambda_return",
"(",
"reward",
",",
"value",
",",
"length",
",",
"discount",
",",
"lambda_",
")",
":",
"timestep",
"=",
"tf",
".",
"range",
"(",
"reward",
".",
"shape",
"[",
"1",
"]",
".",
"value",
")",
"mask",
"=",
"tf",
".",
"cast",
"(",
"timestep",
"[",
"None",
",",
":",
"]",
"<",
"length",
"[",
":",
",",
"None",
"]",
",",
"tf",
".",
"float32",
")",
"sequence",
"=",
"mask",
"*",
"reward",
"+",
"discount",
"*",
"value",
"*",
"(",
"1",
"-",
"lambda_",
")",
"discount",
"=",
"mask",
"*",
"discount",
"*",
"lambda_",
"sequence",
"=",
"tf",
".",
"stack",
"(",
"[",
"sequence",
",",
"discount",
"]",
",",
"2",
")",
"return_",
"=",
"tf",
".",
"reverse",
"(",
"tf",
".",
"transpose",
"(",
"tf",
".",
"scan",
"(",
"lambda",
"agg",
",",
"cur",
":",
"cur",
"[",
"0",
"]",
"+",
"cur",
"[",
"1",
"]",
"*",
"agg",
",",
"tf",
".",
"transpose",
"(",
"tf",
".",
"reverse",
"(",
"sequence",
",",
"[",
"1",
"]",
")",
",",
"[",
"1",
",",
"2",
",",
"0",
"]",
")",
",",
"tf",
".",
"zeros_like",
"(",
"value",
"[",
":",
",",
"-",
"1",
"]",
")",
",",
"1",
",",
"False",
")",
",",
"[",
"1",
",",
"0",
"]",
")",
",",
"[",
"1",
"]",
")",
"return",
"tf",
".",
"check_numerics",
"(",
"tf",
".",
"stop_gradient",
"(",
"return_",
")",
",",
"'return'",
")"
] |
TD-lambda returns.
|
[
"TD",
"-",
"lambda",
"returns",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L94-L105
|
6,813
|
google-research/batch-ppo
|
agents/algorithms/ppo/utility.py
|
lambda_advantage
|
def lambda_advantage(reward, value, length, discount, gae_lambda):
"""Generalized Advantage Estimation."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
next_value = tf.concat([value[:, 1:], tf.zeros_like(value[:, -1:])], 1)
delta = reward + discount * next_value - value
advantage = tf.reverse(tf.transpose(tf.scan(
lambda agg, cur: cur + gae_lambda * discount * agg,
tf.transpose(tf.reverse(mask * delta, [1]), [1, 0]),
tf.zeros_like(delta[:, -1]), 1, False), [1, 0]), [1])
return tf.check_numerics(tf.stop_gradient(advantage), 'advantage')
|
python
|
def lambda_advantage(reward, value, length, discount, gae_lambda):
"""Generalized Advantage Estimation."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
next_value = tf.concat([value[:, 1:], tf.zeros_like(value[:, -1:])], 1)
delta = reward + discount * next_value - value
advantage = tf.reverse(tf.transpose(tf.scan(
lambda agg, cur: cur + gae_lambda * discount * agg,
tf.transpose(tf.reverse(mask * delta, [1]), [1, 0]),
tf.zeros_like(delta[:, -1]), 1, False), [1, 0]), [1])
return tf.check_numerics(tf.stop_gradient(advantage), 'advantage')
|
[
"def",
"lambda_advantage",
"(",
"reward",
",",
"value",
",",
"length",
",",
"discount",
",",
"gae_lambda",
")",
":",
"timestep",
"=",
"tf",
".",
"range",
"(",
"reward",
".",
"shape",
"[",
"1",
"]",
".",
"value",
")",
"mask",
"=",
"tf",
".",
"cast",
"(",
"timestep",
"[",
"None",
",",
":",
"]",
"<",
"length",
"[",
":",
",",
"None",
"]",
",",
"tf",
".",
"float32",
")",
"next_value",
"=",
"tf",
".",
"concat",
"(",
"[",
"value",
"[",
":",
",",
"1",
":",
"]",
",",
"tf",
".",
"zeros_like",
"(",
"value",
"[",
":",
",",
"-",
"1",
":",
"]",
")",
"]",
",",
"1",
")",
"delta",
"=",
"reward",
"+",
"discount",
"*",
"next_value",
"-",
"value",
"advantage",
"=",
"tf",
".",
"reverse",
"(",
"tf",
".",
"transpose",
"(",
"tf",
".",
"scan",
"(",
"lambda",
"agg",
",",
"cur",
":",
"cur",
"+",
"gae_lambda",
"*",
"discount",
"*",
"agg",
",",
"tf",
".",
"transpose",
"(",
"tf",
".",
"reverse",
"(",
"mask",
"*",
"delta",
",",
"[",
"1",
"]",
")",
",",
"[",
"1",
",",
"0",
"]",
")",
",",
"tf",
".",
"zeros_like",
"(",
"delta",
"[",
":",
",",
"-",
"1",
"]",
")",
",",
"1",
",",
"False",
")",
",",
"[",
"1",
",",
"0",
"]",
")",
",",
"[",
"1",
"]",
")",
"return",
"tf",
".",
"check_numerics",
"(",
"tf",
".",
"stop_gradient",
"(",
"advantage",
")",
",",
"'advantage'",
")"
] |
Generalized Advantage Estimation.
|
[
"Generalized",
"Advantage",
"Estimation",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L108-L118
|
6,814
|
google-research/batch-ppo
|
agents/algorithms/ppo/utility.py
|
available_gpus
|
def available_gpus():
"""List of GPU device names detected by TensorFlow."""
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
|
python
|
def available_gpus():
"""List of GPU device names detected by TensorFlow."""
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
|
[
"def",
"available_gpus",
"(",
")",
":",
"local_device_protos",
"=",
"device_lib",
".",
"list_local_devices",
"(",
")",
"return",
"[",
"x",
".",
"name",
"for",
"x",
"in",
"local_device_protos",
"if",
"x",
".",
"device_type",
"==",
"'GPU'",
"]"
] |
List of GPU device names detected by TensorFlow.
|
[
"List",
"of",
"GPU",
"device",
"names",
"detected",
"by",
"TensorFlow",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L121-L124
|
6,815
|
google-research/batch-ppo
|
agents/algorithms/ppo/utility.py
|
gradient_summaries
|
def gradient_summaries(grad_vars, groups=None, scope='gradients'):
"""Create histogram summaries of the gradient.
Summaries can be grouped via regexes matching variables names.
Args:
grad_vars: List of (gradient, variable) tuples as returned by optimizers.
groups: Mapping of name to regex for grouping summaries.
scope: Name scope for this operation.
Returns:
Summary tensor.
"""
groups = groups or {r'all': r'.*'}
grouped = collections.defaultdict(list)
for grad, var in grad_vars:
if grad is None:
continue
for name, pattern in groups.items():
if re.match(pattern, var.name):
name = re.sub(pattern, name, var.name)
grouped[name].append(grad)
for name in groups:
if name not in grouped:
tf.logging.warn("No variables matching '{}' group.".format(name))
summaries = []
for name, grads in grouped.items():
grads = [tf.reshape(grad, [-1]) for grad in grads]
grads = tf.concat(grads, 0)
summaries.append(tf.summary.histogram(scope + '/' + name, grads))
return tf.summary.merge(summaries)
|
python
|
def gradient_summaries(grad_vars, groups=None, scope='gradients'):
"""Create histogram summaries of the gradient.
Summaries can be grouped via regexes matching variables names.
Args:
grad_vars: List of (gradient, variable) tuples as returned by optimizers.
groups: Mapping of name to regex for grouping summaries.
scope: Name scope for this operation.
Returns:
Summary tensor.
"""
groups = groups or {r'all': r'.*'}
grouped = collections.defaultdict(list)
for grad, var in grad_vars:
if grad is None:
continue
for name, pattern in groups.items():
if re.match(pattern, var.name):
name = re.sub(pattern, name, var.name)
grouped[name].append(grad)
for name in groups:
if name not in grouped:
tf.logging.warn("No variables matching '{}' group.".format(name))
summaries = []
for name, grads in grouped.items():
grads = [tf.reshape(grad, [-1]) for grad in grads]
grads = tf.concat(grads, 0)
summaries.append(tf.summary.histogram(scope + '/' + name, grads))
return tf.summary.merge(summaries)
|
[
"def",
"gradient_summaries",
"(",
"grad_vars",
",",
"groups",
"=",
"None",
",",
"scope",
"=",
"'gradients'",
")",
":",
"groups",
"=",
"groups",
"or",
"{",
"r'all'",
":",
"r'.*'",
"}",
"grouped",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
"grad",
",",
"var",
"in",
"grad_vars",
":",
"if",
"grad",
"is",
"None",
":",
"continue",
"for",
"name",
",",
"pattern",
"in",
"groups",
".",
"items",
"(",
")",
":",
"if",
"re",
".",
"match",
"(",
"pattern",
",",
"var",
".",
"name",
")",
":",
"name",
"=",
"re",
".",
"sub",
"(",
"pattern",
",",
"name",
",",
"var",
".",
"name",
")",
"grouped",
"[",
"name",
"]",
".",
"append",
"(",
"grad",
")",
"for",
"name",
"in",
"groups",
":",
"if",
"name",
"not",
"in",
"grouped",
":",
"tf",
".",
"logging",
".",
"warn",
"(",
"\"No variables matching '{}' group.\"",
".",
"format",
"(",
"name",
")",
")",
"summaries",
"=",
"[",
"]",
"for",
"name",
",",
"grads",
"in",
"grouped",
".",
"items",
"(",
")",
":",
"grads",
"=",
"[",
"tf",
".",
"reshape",
"(",
"grad",
",",
"[",
"-",
"1",
"]",
")",
"for",
"grad",
"in",
"grads",
"]",
"grads",
"=",
"tf",
".",
"concat",
"(",
"grads",
",",
"0",
")",
"summaries",
".",
"append",
"(",
"tf",
".",
"summary",
".",
"histogram",
"(",
"scope",
"+",
"'/'",
"+",
"name",
",",
"grads",
")",
")",
"return",
"tf",
".",
"summary",
".",
"merge",
"(",
"summaries",
")"
] |
Create histogram summaries of the gradient.
Summaries can be grouped via regexes matching variables names.
Args:
grad_vars: List of (gradient, variable) tuples as returned by optimizers.
groups: Mapping of name to regex for grouping summaries.
scope: Name scope for this operation.
Returns:
Summary tensor.
|
[
"Create",
"histogram",
"summaries",
"of",
"the",
"gradient",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L127-L157
|
6,816
|
google-research/batch-ppo
|
agents/algorithms/ppo/utility.py
|
variable_summaries
|
def variable_summaries(vars_, groups=None, scope='weights'):
"""Create histogram summaries for the provided variables.
Summaries can be grouped via regexes matching variables names.
Args:
vars_: List of variables to summarize.
groups: Mapping of name to regex for grouping summaries.
scope: Name scope for this operation.
Returns:
Summary tensor.
"""
groups = groups or {r'all': r'.*'}
grouped = collections.defaultdict(list)
for var in vars_:
for name, pattern in groups.items():
if re.match(pattern, var.name):
name = re.sub(pattern, name, var.name)
grouped[name].append(var)
for name in groups:
if name not in grouped:
tf.logging.warn("No variables matching '{}' group.".format(name))
summaries = []
# pylint: disable=redefined-argument-from-local
for name, vars_ in grouped.items():
vars_ = [tf.reshape(var, [-1]) for var in vars_]
vars_ = tf.concat(vars_, 0)
summaries.append(tf.summary.histogram(scope + '/' + name, vars_))
return tf.summary.merge(summaries)
|
python
|
def variable_summaries(vars_, groups=None, scope='weights'):
"""Create histogram summaries for the provided variables.
Summaries can be grouped via regexes matching variables names.
Args:
vars_: List of variables to summarize.
groups: Mapping of name to regex for grouping summaries.
scope: Name scope for this operation.
Returns:
Summary tensor.
"""
groups = groups or {r'all': r'.*'}
grouped = collections.defaultdict(list)
for var in vars_:
for name, pattern in groups.items():
if re.match(pattern, var.name):
name = re.sub(pattern, name, var.name)
grouped[name].append(var)
for name in groups:
if name not in grouped:
tf.logging.warn("No variables matching '{}' group.".format(name))
summaries = []
# pylint: disable=redefined-argument-from-local
for name, vars_ in grouped.items():
vars_ = [tf.reshape(var, [-1]) for var in vars_]
vars_ = tf.concat(vars_, 0)
summaries.append(tf.summary.histogram(scope + '/' + name, vars_))
return tf.summary.merge(summaries)
|
[
"def",
"variable_summaries",
"(",
"vars_",
",",
"groups",
"=",
"None",
",",
"scope",
"=",
"'weights'",
")",
":",
"groups",
"=",
"groups",
"or",
"{",
"r'all'",
":",
"r'.*'",
"}",
"grouped",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
"var",
"in",
"vars_",
":",
"for",
"name",
",",
"pattern",
"in",
"groups",
".",
"items",
"(",
")",
":",
"if",
"re",
".",
"match",
"(",
"pattern",
",",
"var",
".",
"name",
")",
":",
"name",
"=",
"re",
".",
"sub",
"(",
"pattern",
",",
"name",
",",
"var",
".",
"name",
")",
"grouped",
"[",
"name",
"]",
".",
"append",
"(",
"var",
")",
"for",
"name",
"in",
"groups",
":",
"if",
"name",
"not",
"in",
"grouped",
":",
"tf",
".",
"logging",
".",
"warn",
"(",
"\"No variables matching '{}' group.\"",
".",
"format",
"(",
"name",
")",
")",
"summaries",
"=",
"[",
"]",
"# pylint: disable=redefined-argument-from-local",
"for",
"name",
",",
"vars_",
"in",
"grouped",
".",
"items",
"(",
")",
":",
"vars_",
"=",
"[",
"tf",
".",
"reshape",
"(",
"var",
",",
"[",
"-",
"1",
"]",
")",
"for",
"var",
"in",
"vars_",
"]",
"vars_",
"=",
"tf",
".",
"concat",
"(",
"vars_",
",",
"0",
")",
"summaries",
".",
"append",
"(",
"tf",
".",
"summary",
".",
"histogram",
"(",
"scope",
"+",
"'/'",
"+",
"name",
",",
"vars_",
")",
")",
"return",
"tf",
".",
"summary",
".",
"merge",
"(",
"summaries",
")"
] |
Create histogram summaries for the provided variables.
Summaries can be grouped via regexes matching variables names.
Args:
vars_: List of variables to summarize.
groups: Mapping of name to regex for grouping summaries.
scope: Name scope for this operation.
Returns:
Summary tensor.
|
[
"Create",
"histogram",
"summaries",
"for",
"the",
"provided",
"variables",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L160-L189
|
6,817
|
google-research/batch-ppo
|
agents/algorithms/ppo/utility.py
|
set_dimension
|
def set_dimension(tensor, axis, value):
"""Set the length of a tensor along the specified dimension.
Args:
tensor: Tensor to define shape of.
axis: Dimension to set the static shape for.
value: Integer holding the length.
Raises:
ValueError: When the tensor already has a different length specified.
"""
shape = tensor.shape.as_list()
if shape[axis] not in (value, None):
message = 'Cannot set dimension {} of tensor {} to {}; is already {}.'
raise ValueError(message.format(axis, tensor.name, value, shape[axis]))
shape[axis] = value
tensor.set_shape(shape)
|
python
|
def set_dimension(tensor, axis, value):
"""Set the length of a tensor along the specified dimension.
Args:
tensor: Tensor to define shape of.
axis: Dimension to set the static shape for.
value: Integer holding the length.
Raises:
ValueError: When the tensor already has a different length specified.
"""
shape = tensor.shape.as_list()
if shape[axis] not in (value, None):
message = 'Cannot set dimension {} of tensor {} to {}; is already {}.'
raise ValueError(message.format(axis, tensor.name, value, shape[axis]))
shape[axis] = value
tensor.set_shape(shape)
|
[
"def",
"set_dimension",
"(",
"tensor",
",",
"axis",
",",
"value",
")",
":",
"shape",
"=",
"tensor",
".",
"shape",
".",
"as_list",
"(",
")",
"if",
"shape",
"[",
"axis",
"]",
"not",
"in",
"(",
"value",
",",
"None",
")",
":",
"message",
"=",
"'Cannot set dimension {} of tensor {} to {}; is already {}.'",
"raise",
"ValueError",
"(",
"message",
".",
"format",
"(",
"axis",
",",
"tensor",
".",
"name",
",",
"value",
",",
"shape",
"[",
"axis",
"]",
")",
")",
"shape",
"[",
"axis",
"]",
"=",
"value",
"tensor",
".",
"set_shape",
"(",
"shape",
")"
] |
Set the length of a tensor along the specified dimension.
Args:
tensor: Tensor to define shape of.
axis: Dimension to set the static shape for.
value: Integer holding the length.
Raises:
ValueError: When the tensor already has a different length specified.
|
[
"Set",
"the",
"length",
"of",
"a",
"tensor",
"along",
"the",
"specified",
"dimension",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L192-L208
|
6,818
|
google-research/batch-ppo
|
agents/scripts/configs.py
|
default
|
def default():
"""Default configuration for PPO."""
# General
algorithm = algorithms.PPO
num_agents = 30
eval_episodes = 30
use_gpu = False
# Environment
normalize_ranges = True
# Network
network = networks.feed_forward_gaussian
weight_summaries = dict(
all=r'.*', policy=r'.*/policy/.*', value=r'.*/value/.*')
policy_layers = 200, 100
value_layers = 200, 100
init_output_factor = 0.1
init_std = 0.35
# Optimization
update_every = 30
update_epochs = 25
optimizer = tf.train.AdamOptimizer
learning_rate = 1e-4
# Losses
discount = 0.995
kl_target = 1e-2
kl_cutoff_factor = 2
kl_cutoff_coef = 1000
kl_init_penalty = 1
return locals()
|
python
|
def default():
"""Default configuration for PPO."""
# General
algorithm = algorithms.PPO
num_agents = 30
eval_episodes = 30
use_gpu = False
# Environment
normalize_ranges = True
# Network
network = networks.feed_forward_gaussian
weight_summaries = dict(
all=r'.*', policy=r'.*/policy/.*', value=r'.*/value/.*')
policy_layers = 200, 100
value_layers = 200, 100
init_output_factor = 0.1
init_std = 0.35
# Optimization
update_every = 30
update_epochs = 25
optimizer = tf.train.AdamOptimizer
learning_rate = 1e-4
# Losses
discount = 0.995
kl_target = 1e-2
kl_cutoff_factor = 2
kl_cutoff_coef = 1000
kl_init_penalty = 1
return locals()
|
[
"def",
"default",
"(",
")",
":",
"# General",
"algorithm",
"=",
"algorithms",
".",
"PPO",
"num_agents",
"=",
"30",
"eval_episodes",
"=",
"30",
"use_gpu",
"=",
"False",
"# Environment",
"normalize_ranges",
"=",
"True",
"# Network",
"network",
"=",
"networks",
".",
"feed_forward_gaussian",
"weight_summaries",
"=",
"dict",
"(",
"all",
"=",
"r'.*'",
",",
"policy",
"=",
"r'.*/policy/.*'",
",",
"value",
"=",
"r'.*/value/.*'",
")",
"policy_layers",
"=",
"200",
",",
"100",
"value_layers",
"=",
"200",
",",
"100",
"init_output_factor",
"=",
"0.1",
"init_std",
"=",
"0.35",
"# Optimization",
"update_every",
"=",
"30",
"update_epochs",
"=",
"25",
"optimizer",
"=",
"tf",
".",
"train",
".",
"AdamOptimizer",
"learning_rate",
"=",
"1e-4",
"# Losses",
"discount",
"=",
"0.995",
"kl_target",
"=",
"1e-2",
"kl_cutoff_factor",
"=",
"2",
"kl_cutoff_coef",
"=",
"1000",
"kl_init_penalty",
"=",
"1",
"return",
"locals",
"(",
")"
] |
Default configuration for PPO.
|
[
"Default",
"configuration",
"for",
"PPO",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/configs.py#L29-L57
|
6,819
|
google-research/batch-ppo
|
agents/scripts/configs.py
|
pendulum
|
def pendulum():
"""Configuration for the pendulum classic control task."""
locals().update(default())
# Environment
env = 'Pendulum-v0'
max_length = 200
steps = 1e6 # 1M
# Optimization
batch_size = 20
chunk_length = 50
return locals()
|
python
|
def pendulum():
"""Configuration for the pendulum classic control task."""
locals().update(default())
# Environment
env = 'Pendulum-v0'
max_length = 200
steps = 1e6 # 1M
# Optimization
batch_size = 20
chunk_length = 50
return locals()
|
[
"def",
"pendulum",
"(",
")",
":",
"locals",
"(",
")",
".",
"update",
"(",
"default",
"(",
")",
")",
"# Environment",
"env",
"=",
"'Pendulum-v0'",
"max_length",
"=",
"200",
"steps",
"=",
"1e6",
"# 1M",
"# Optimization",
"batch_size",
"=",
"20",
"chunk_length",
"=",
"50",
"return",
"locals",
"(",
")"
] |
Configuration for the pendulum classic control task.
|
[
"Configuration",
"for",
"the",
"pendulum",
"classic",
"control",
"task",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/configs.py#L60-L70
|
6,820
|
google-research/batch-ppo
|
agents/scripts/configs.py
|
cartpole
|
def cartpole():
"""Configuration for the cart pole classic control task."""
locals().update(default())
# Environment
env = 'CartPole-v1'
max_length = 500
steps = 2e5 # 200k
normalize_ranges = False # The env reports wrong ranges.
# Network
network = networks.feed_forward_categorical
return locals()
|
python
|
def cartpole():
"""Configuration for the cart pole classic control task."""
locals().update(default())
# Environment
env = 'CartPole-v1'
max_length = 500
steps = 2e5 # 200k
normalize_ranges = False # The env reports wrong ranges.
# Network
network = networks.feed_forward_categorical
return locals()
|
[
"def",
"cartpole",
"(",
")",
":",
"locals",
"(",
")",
".",
"update",
"(",
"default",
"(",
")",
")",
"# Environment",
"env",
"=",
"'CartPole-v1'",
"max_length",
"=",
"500",
"steps",
"=",
"2e5",
"# 200k",
"normalize_ranges",
"=",
"False",
"# The env reports wrong ranges.",
"# Network",
"network",
"=",
"networks",
".",
"feed_forward_categorical",
"return",
"locals",
"(",
")"
] |
Configuration for the cart pole classic control task.
|
[
"Configuration",
"for",
"the",
"cart",
"pole",
"classic",
"control",
"task",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/configs.py#L73-L83
|
6,821
|
google-research/batch-ppo
|
agents/scripts/configs.py
|
reacher
|
def reacher():
"""Configuration for MuJoCo's reacher task."""
locals().update(default())
# Environment
env = 'Reacher-v2'
max_length = 1000
steps = 5e6 # 5M
discount = 0.985
update_every = 60
return locals()
|
python
|
def reacher():
"""Configuration for MuJoCo's reacher task."""
locals().update(default())
# Environment
env = 'Reacher-v2'
max_length = 1000
steps = 5e6 # 5M
discount = 0.985
update_every = 60
return locals()
|
[
"def",
"reacher",
"(",
")",
":",
"locals",
"(",
")",
".",
"update",
"(",
"default",
"(",
")",
")",
"# Environment",
"env",
"=",
"'Reacher-v2'",
"max_length",
"=",
"1000",
"steps",
"=",
"5e6",
"# 5M",
"discount",
"=",
"0.985",
"update_every",
"=",
"60",
"return",
"locals",
"(",
")"
] |
Configuration for MuJoCo's reacher task.
|
[
"Configuration",
"for",
"MuJoCo",
"s",
"reacher",
"task",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/configs.py#L86-L95
|
6,822
|
google-research/batch-ppo
|
agents/scripts/configs.py
|
bullet_ant
|
def bullet_ant():
"""Configuration for PyBullet's ant task."""
locals().update(default())
# Environment
import pybullet_envs # noqa pylint: disable=unused-import
env = 'AntBulletEnv-v0'
max_length = 1000
steps = 3e7 # 30M
update_every = 60
return locals()
|
python
|
def bullet_ant():
"""Configuration for PyBullet's ant task."""
locals().update(default())
# Environment
import pybullet_envs # noqa pylint: disable=unused-import
env = 'AntBulletEnv-v0'
max_length = 1000
steps = 3e7 # 30M
update_every = 60
return locals()
|
[
"def",
"bullet_ant",
"(",
")",
":",
"locals",
"(",
")",
".",
"update",
"(",
"default",
"(",
")",
")",
"# Environment",
"import",
"pybullet_envs",
"# noqa pylint: disable=unused-import",
"env",
"=",
"'AntBulletEnv-v0'",
"max_length",
"=",
"1000",
"steps",
"=",
"3e7",
"# 30M",
"update_every",
"=",
"60",
"return",
"locals",
"(",
")"
] |
Configuration for PyBullet's ant task.
|
[
"Configuration",
"for",
"PyBullet",
"s",
"ant",
"task",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/configs.py#L151-L160
|
6,823
|
google-research/batch-ppo
|
agents/tools/batch_env.py
|
BatchEnv.step
|
def step(self, actions):
"""Forward a batch of actions to the wrapped environments.
Args:
actions: Batched action to apply to the environment.
Raises:
ValueError: Invalid actions.
Returns:
Batch of observations, rewards, and done flags.
"""
for index, (env, action) in enumerate(zip(self._envs, actions)):
if not env.action_space.contains(action):
message = 'Invalid action at index {}: {}'
raise ValueError(message.format(index, action))
if self._blocking:
transitions = [
env.step(action)
for env, action in zip(self._envs, actions)]
else:
transitions = [
env.step(action, blocking=False)
for env, action in zip(self._envs, actions)]
transitions = [transition() for transition in transitions]
observs, rewards, dones, infos = zip(*transitions)
observ = np.stack(observs)
reward = np.stack(rewards)
done = np.stack(dones)
info = tuple(infos)
return observ, reward, done, info
|
python
|
def step(self, actions):
"""Forward a batch of actions to the wrapped environments.
Args:
actions: Batched action to apply to the environment.
Raises:
ValueError: Invalid actions.
Returns:
Batch of observations, rewards, and done flags.
"""
for index, (env, action) in enumerate(zip(self._envs, actions)):
if not env.action_space.contains(action):
message = 'Invalid action at index {}: {}'
raise ValueError(message.format(index, action))
if self._blocking:
transitions = [
env.step(action)
for env, action in zip(self._envs, actions)]
else:
transitions = [
env.step(action, blocking=False)
for env, action in zip(self._envs, actions)]
transitions = [transition() for transition in transitions]
observs, rewards, dones, infos = zip(*transitions)
observ = np.stack(observs)
reward = np.stack(rewards)
done = np.stack(dones)
info = tuple(infos)
return observ, reward, done, info
|
[
"def",
"step",
"(",
"self",
",",
"actions",
")",
":",
"for",
"index",
",",
"(",
"env",
",",
"action",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"self",
".",
"_envs",
",",
"actions",
")",
")",
":",
"if",
"not",
"env",
".",
"action_space",
".",
"contains",
"(",
"action",
")",
":",
"message",
"=",
"'Invalid action at index {}: {}'",
"raise",
"ValueError",
"(",
"message",
".",
"format",
"(",
"index",
",",
"action",
")",
")",
"if",
"self",
".",
"_blocking",
":",
"transitions",
"=",
"[",
"env",
".",
"step",
"(",
"action",
")",
"for",
"env",
",",
"action",
"in",
"zip",
"(",
"self",
".",
"_envs",
",",
"actions",
")",
"]",
"else",
":",
"transitions",
"=",
"[",
"env",
".",
"step",
"(",
"action",
",",
"blocking",
"=",
"False",
")",
"for",
"env",
",",
"action",
"in",
"zip",
"(",
"self",
".",
"_envs",
",",
"actions",
")",
"]",
"transitions",
"=",
"[",
"transition",
"(",
")",
"for",
"transition",
"in",
"transitions",
"]",
"observs",
",",
"rewards",
",",
"dones",
",",
"infos",
"=",
"zip",
"(",
"*",
"transitions",
")",
"observ",
"=",
"np",
".",
"stack",
"(",
"observs",
")",
"reward",
"=",
"np",
".",
"stack",
"(",
"rewards",
")",
"done",
"=",
"np",
".",
"stack",
"(",
"dones",
")",
"info",
"=",
"tuple",
"(",
"infos",
")",
"return",
"observ",
",",
"reward",
",",
"done",
",",
"info"
] |
Forward a batch of actions to the wrapped environments.
Args:
actions: Batched action to apply to the environment.
Raises:
ValueError: Invalid actions.
Returns:
Batch of observations, rewards, and done flags.
|
[
"Forward",
"a",
"batch",
"of",
"actions",
"to",
"the",
"wrapped",
"environments",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/batch_env.py#L69-L99
|
6,824
|
google-research/batch-ppo
|
agents/tools/wrappers.py
|
ExternalProcess.call
|
def call(self, name, *args, **kwargs):
"""Asynchronously call a method of the external environment.
Args:
name: Name of the method to call.
*args: Positional arguments to forward to the method.
**kwargs: Keyword arguments to forward to the method.
Returns:
Promise object that blocks and provides the return value when called.
"""
payload = name, args, kwargs
self._conn.send((self._CALL, payload))
return self._receive
|
python
|
def call(self, name, *args, **kwargs):
"""Asynchronously call a method of the external environment.
Args:
name: Name of the method to call.
*args: Positional arguments to forward to the method.
**kwargs: Keyword arguments to forward to the method.
Returns:
Promise object that blocks and provides the return value when called.
"""
payload = name, args, kwargs
self._conn.send((self._CALL, payload))
return self._receive
|
[
"def",
"call",
"(",
"self",
",",
"name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"payload",
"=",
"name",
",",
"args",
",",
"kwargs",
"self",
".",
"_conn",
".",
"send",
"(",
"(",
"self",
".",
"_CALL",
",",
"payload",
")",
")",
"return",
"self",
".",
"_receive"
] |
Asynchronously call a method of the external environment.
Args:
name: Name of the method to call.
*args: Positional arguments to forward to the method.
**kwargs: Keyword arguments to forward to the method.
Returns:
Promise object that blocks and provides the return value when called.
|
[
"Asynchronously",
"call",
"a",
"method",
"of",
"the",
"external",
"environment",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/wrappers.py#L363-L376
|
6,825
|
google-research/batch-ppo
|
agents/tools/wrappers.py
|
ExternalProcess.close
|
def close(self):
"""Send a close message to the external process and join it."""
try:
self._conn.send((self._CLOSE, None))
self._conn.close()
except IOError:
# The connection was already closed.
pass
self._process.join()
|
python
|
def close(self):
"""Send a close message to the external process and join it."""
try:
self._conn.send((self._CLOSE, None))
self._conn.close()
except IOError:
# The connection was already closed.
pass
self._process.join()
|
[
"def",
"close",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"_conn",
".",
"send",
"(",
"(",
"self",
".",
"_CLOSE",
",",
"None",
")",
")",
"self",
".",
"_conn",
".",
"close",
"(",
")",
"except",
"IOError",
":",
"# The connection was already closed.",
"pass",
"self",
".",
"_process",
".",
"join",
"(",
")"
] |
Send a close message to the external process and join it.
|
[
"Send",
"a",
"close",
"message",
"to",
"the",
"external",
"process",
"and",
"join",
"it",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/wrappers.py#L378-L386
|
6,826
|
google-research/batch-ppo
|
agents/tools/wrappers.py
|
ExternalProcess.step
|
def step(self, action, blocking=True):
"""Step the environment.
Args:
action: The action to apply to the environment.
blocking: Whether to wait for the result.
Returns:
Transition tuple when blocking, otherwise callable that returns the
transition tuple.
"""
promise = self.call('step', action)
if blocking:
return promise()
else:
return promise
|
python
|
def step(self, action, blocking=True):
"""Step the environment.
Args:
action: The action to apply to the environment.
blocking: Whether to wait for the result.
Returns:
Transition tuple when blocking, otherwise callable that returns the
transition tuple.
"""
promise = self.call('step', action)
if blocking:
return promise()
else:
return promise
|
[
"def",
"step",
"(",
"self",
",",
"action",
",",
"blocking",
"=",
"True",
")",
":",
"promise",
"=",
"self",
".",
"call",
"(",
"'step'",
",",
"action",
")",
"if",
"blocking",
":",
"return",
"promise",
"(",
")",
"else",
":",
"return",
"promise"
] |
Step the environment.
Args:
action: The action to apply to the environment.
blocking: Whether to wait for the result.
Returns:
Transition tuple when blocking, otherwise callable that returns the
transition tuple.
|
[
"Step",
"the",
"environment",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/wrappers.py#L388-L403
|
6,827
|
google-research/batch-ppo
|
agents/tools/wrappers.py
|
ExternalProcess._receive
|
def _receive(self):
"""Wait for a message from the worker process and return its payload.
Raises:
Exception: An exception was raised inside the worker process.
KeyError: The received message is of an unknown type.
Returns:
Payload object of the message.
"""
message, payload = self._conn.recv()
# Re-raise exceptions in the main process.
if message == self._EXCEPTION:
stacktrace = payload
raise Exception(stacktrace)
if message == self._RESULT:
return payload
raise KeyError('Received message of unexpected type {}'.format(message))
|
python
|
def _receive(self):
"""Wait for a message from the worker process and return its payload.
Raises:
Exception: An exception was raised inside the worker process.
KeyError: The received message is of an unknown type.
Returns:
Payload object of the message.
"""
message, payload = self._conn.recv()
# Re-raise exceptions in the main process.
if message == self._EXCEPTION:
stacktrace = payload
raise Exception(stacktrace)
if message == self._RESULT:
return payload
raise KeyError('Received message of unexpected type {}'.format(message))
|
[
"def",
"_receive",
"(",
"self",
")",
":",
"message",
",",
"payload",
"=",
"self",
".",
"_conn",
".",
"recv",
"(",
")",
"# Re-raise exceptions in the main process.",
"if",
"message",
"==",
"self",
".",
"_EXCEPTION",
":",
"stacktrace",
"=",
"payload",
"raise",
"Exception",
"(",
"stacktrace",
")",
"if",
"message",
"==",
"self",
".",
"_RESULT",
":",
"return",
"payload",
"raise",
"KeyError",
"(",
"'Received message of unexpected type {}'",
".",
"format",
"(",
"message",
")",
")"
] |
Wait for a message from the worker process and return its payload.
Raises:
Exception: An exception was raised inside the worker process.
KeyError: The received message is of an unknown type.
Returns:
Payload object of the message.
|
[
"Wait",
"for",
"a",
"message",
"from",
"the",
"worker",
"process",
"and",
"return",
"its",
"payload",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/wrappers.py#L421-L438
|
6,828
|
google-research/batch-ppo
|
agents/tools/wrappers.py
|
ExternalProcess._worker
|
def _worker(self, constructor, conn):
"""The process waits for actions and sends back environment results.
Args:
constructor: Constructor for the OpenAI Gym environment.
conn: Connection for communication to the main process.
Raises:
KeyError: When receiving a message of unknown type.
"""
try:
env = constructor()
while True:
try:
# Only block for short times to have keyboard exceptions be raised.
if not conn.poll(0.1):
continue
message, payload = conn.recv()
except (EOFError, KeyboardInterrupt):
break
if message == self._ACCESS:
name = payload
result = getattr(env, name)
conn.send((self._RESULT, result))
continue
if message == self._CALL:
name, args, kwargs = payload
result = getattr(env, name)(*args, **kwargs)
conn.send((self._RESULT, result))
continue
if message == self._CLOSE:
assert payload is None
break
raise KeyError('Received message of unknown type {}'.format(message))
except Exception: # pylint: disable=broad-except
stacktrace = ''.join(traceback.format_exception(*sys.exc_info()))
tf.logging.error('Error in environment process: {}'.format(stacktrace))
conn.send((self._EXCEPTION, stacktrace))
conn.close()
|
python
|
def _worker(self, constructor, conn):
"""The process waits for actions and sends back environment results.
Args:
constructor: Constructor for the OpenAI Gym environment.
conn: Connection for communication to the main process.
Raises:
KeyError: When receiving a message of unknown type.
"""
try:
env = constructor()
while True:
try:
# Only block for short times to have keyboard exceptions be raised.
if not conn.poll(0.1):
continue
message, payload = conn.recv()
except (EOFError, KeyboardInterrupt):
break
if message == self._ACCESS:
name = payload
result = getattr(env, name)
conn.send((self._RESULT, result))
continue
if message == self._CALL:
name, args, kwargs = payload
result = getattr(env, name)(*args, **kwargs)
conn.send((self._RESULT, result))
continue
if message == self._CLOSE:
assert payload is None
break
raise KeyError('Received message of unknown type {}'.format(message))
except Exception: # pylint: disable=broad-except
stacktrace = ''.join(traceback.format_exception(*sys.exc_info()))
tf.logging.error('Error in environment process: {}'.format(stacktrace))
conn.send((self._EXCEPTION, stacktrace))
conn.close()
|
[
"def",
"_worker",
"(",
"self",
",",
"constructor",
",",
"conn",
")",
":",
"try",
":",
"env",
"=",
"constructor",
"(",
")",
"while",
"True",
":",
"try",
":",
"# Only block for short times to have keyboard exceptions be raised.",
"if",
"not",
"conn",
".",
"poll",
"(",
"0.1",
")",
":",
"continue",
"message",
",",
"payload",
"=",
"conn",
".",
"recv",
"(",
")",
"except",
"(",
"EOFError",
",",
"KeyboardInterrupt",
")",
":",
"break",
"if",
"message",
"==",
"self",
".",
"_ACCESS",
":",
"name",
"=",
"payload",
"result",
"=",
"getattr",
"(",
"env",
",",
"name",
")",
"conn",
".",
"send",
"(",
"(",
"self",
".",
"_RESULT",
",",
"result",
")",
")",
"continue",
"if",
"message",
"==",
"self",
".",
"_CALL",
":",
"name",
",",
"args",
",",
"kwargs",
"=",
"payload",
"result",
"=",
"getattr",
"(",
"env",
",",
"name",
")",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"conn",
".",
"send",
"(",
"(",
"self",
".",
"_RESULT",
",",
"result",
")",
")",
"continue",
"if",
"message",
"==",
"self",
".",
"_CLOSE",
":",
"assert",
"payload",
"is",
"None",
"break",
"raise",
"KeyError",
"(",
"'Received message of unknown type {}'",
".",
"format",
"(",
"message",
")",
")",
"except",
"Exception",
":",
"# pylint: disable=broad-except",
"stacktrace",
"=",
"''",
".",
"join",
"(",
"traceback",
".",
"format_exception",
"(",
"*",
"sys",
".",
"exc_info",
"(",
")",
")",
")",
"tf",
".",
"logging",
".",
"error",
"(",
"'Error in environment process: {}'",
".",
"format",
"(",
"stacktrace",
")",
")",
"conn",
".",
"send",
"(",
"(",
"self",
".",
"_EXCEPTION",
",",
"stacktrace",
")",
")",
"conn",
".",
"close",
"(",
")"
] |
The process waits for actions and sends back environment results.
Args:
constructor: Constructor for the OpenAI Gym environment.
conn: Connection for communication to the main process.
Raises:
KeyError: When receiving a message of unknown type.
|
[
"The",
"process",
"waits",
"for",
"actions",
"and",
"sends",
"back",
"environment",
"results",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/wrappers.py#L440-L478
|
6,829
|
google-research/batch-ppo
|
agents/tools/wrappers.py
|
ConvertTo32Bit.step
|
def step(self, action):
"""Forward action to the wrapped environment.
Args:
action: Action to apply to the environment.
Raises:
ValueError: Invalid action.
Returns:
Converted observation, converted reward, done flag, and info object.
"""
observ, reward, done, info = self._env.step(action)
observ = self._convert_observ(observ)
reward = self._convert_reward(reward)
return observ, reward, done, info
|
python
|
def step(self, action):
"""Forward action to the wrapped environment.
Args:
action: Action to apply to the environment.
Raises:
ValueError: Invalid action.
Returns:
Converted observation, converted reward, done flag, and info object.
"""
observ, reward, done, info = self._env.step(action)
observ = self._convert_observ(observ)
reward = self._convert_reward(reward)
return observ, reward, done, info
|
[
"def",
"step",
"(",
"self",
",",
"action",
")",
":",
"observ",
",",
"reward",
",",
"done",
",",
"info",
"=",
"self",
".",
"_env",
".",
"step",
"(",
"action",
")",
"observ",
"=",
"self",
".",
"_convert_observ",
"(",
"observ",
")",
"reward",
"=",
"self",
".",
"_convert_reward",
"(",
"reward",
")",
"return",
"observ",
",",
"reward",
",",
"done",
",",
"info"
] |
Forward action to the wrapped environment.
Args:
action: Action to apply to the environment.
Raises:
ValueError: Invalid action.
Returns:
Converted observation, converted reward, done flag, and info object.
|
[
"Forward",
"action",
"to",
"the",
"wrapped",
"environment",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/wrappers.py#L503-L518
|
6,830
|
google-research/batch-ppo
|
agents/tools/wrappers.py
|
ConvertTo32Bit._convert_observ
|
def _convert_observ(self, observ):
"""Convert the observation to 32 bits.
Args:
observ: Numpy observation.
Raises:
ValueError: Observation contains infinite values.
Returns:
Numpy observation with 32-bit data type.
"""
if not np.isfinite(observ).all():
raise ValueError('Infinite observation encountered.')
if observ.dtype == np.float64:
return observ.astype(np.float32)
if observ.dtype == np.int64:
return observ.astype(np.int32)
return observ
|
python
|
def _convert_observ(self, observ):
"""Convert the observation to 32 bits.
Args:
observ: Numpy observation.
Raises:
ValueError: Observation contains infinite values.
Returns:
Numpy observation with 32-bit data type.
"""
if not np.isfinite(observ).all():
raise ValueError('Infinite observation encountered.')
if observ.dtype == np.float64:
return observ.astype(np.float32)
if observ.dtype == np.int64:
return observ.astype(np.int32)
return observ
|
[
"def",
"_convert_observ",
"(",
"self",
",",
"observ",
")",
":",
"if",
"not",
"np",
".",
"isfinite",
"(",
"observ",
")",
".",
"all",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'Infinite observation encountered.'",
")",
"if",
"observ",
".",
"dtype",
"==",
"np",
".",
"float64",
":",
"return",
"observ",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"if",
"observ",
".",
"dtype",
"==",
"np",
".",
"int64",
":",
"return",
"observ",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"return",
"observ"
] |
Convert the observation to 32 bits.
Args:
observ: Numpy observation.
Raises:
ValueError: Observation contains infinite values.
Returns:
Numpy observation with 32-bit data type.
|
[
"Convert",
"the",
"observation",
"to",
"32",
"bits",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/wrappers.py#L530-L548
|
6,831
|
google-research/batch-ppo
|
agents/tools/wrappers.py
|
ConvertTo32Bit._convert_reward
|
def _convert_reward(self, reward):
"""Convert the reward to 32 bits.
Args:
reward: Numpy reward.
Raises:
ValueError: Rewards contain infinite values.
Returns:
Numpy reward with 32-bit data type.
"""
if not np.isfinite(reward).all():
raise ValueError('Infinite reward encountered.')
return np.array(reward, dtype=np.float32)
|
python
|
def _convert_reward(self, reward):
"""Convert the reward to 32 bits.
Args:
reward: Numpy reward.
Raises:
ValueError: Rewards contain infinite values.
Returns:
Numpy reward with 32-bit data type.
"""
if not np.isfinite(reward).all():
raise ValueError('Infinite reward encountered.')
return np.array(reward, dtype=np.float32)
|
[
"def",
"_convert_reward",
"(",
"self",
",",
"reward",
")",
":",
"if",
"not",
"np",
".",
"isfinite",
"(",
"reward",
")",
".",
"all",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'Infinite reward encountered.'",
")",
"return",
"np",
".",
"array",
"(",
"reward",
",",
"dtype",
"=",
"np",
".",
"float32",
")"
] |
Convert the reward to 32 bits.
Args:
reward: Numpy reward.
Raises:
ValueError: Rewards contain infinite values.
Returns:
Numpy reward with 32-bit data type.
|
[
"Convert",
"the",
"reward",
"to",
"32",
"bits",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/wrappers.py#L550-L564
|
6,832
|
google-research/batch-ppo
|
agents/tools/streaming_mean.py
|
StreamingMean.value
|
def value(self):
"""The current value of the mean."""
return self._sum / tf.cast(self._count, self._dtype)
|
python
|
def value(self):
"""The current value of the mean."""
return self._sum / tf.cast(self._count, self._dtype)
|
[
"def",
"value",
"(",
"self",
")",
":",
"return",
"self",
".",
"_sum",
"/",
"tf",
".",
"cast",
"(",
"self",
".",
"_count",
",",
"self",
".",
"_dtype",
")"
] |
The current value of the mean.
|
[
"The",
"current",
"value",
"of",
"the",
"mean",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/streaming_mean.py#L42-L44
|
6,833
|
google-research/batch-ppo
|
agents/tools/streaming_mean.py
|
StreamingMean.submit
|
def submit(self, value):
"""Submit a single or batch tensor to refine the streaming mean."""
# Add a batch dimension if necessary.
if value.shape.ndims == self._sum.shape.ndims:
value = value[None, ...]
return tf.group(
self._sum.assign_add(tf.reduce_sum(value, 0)),
self._count.assign_add(tf.shape(value)[0]))
|
python
|
def submit(self, value):
"""Submit a single or batch tensor to refine the streaming mean."""
# Add a batch dimension if necessary.
if value.shape.ndims == self._sum.shape.ndims:
value = value[None, ...]
return tf.group(
self._sum.assign_add(tf.reduce_sum(value, 0)),
self._count.assign_add(tf.shape(value)[0]))
|
[
"def",
"submit",
"(",
"self",
",",
"value",
")",
":",
"# Add a batch dimension if necessary.",
"if",
"value",
".",
"shape",
".",
"ndims",
"==",
"self",
".",
"_sum",
".",
"shape",
".",
"ndims",
":",
"value",
"=",
"value",
"[",
"None",
",",
"...",
"]",
"return",
"tf",
".",
"group",
"(",
"self",
".",
"_sum",
".",
"assign_add",
"(",
"tf",
".",
"reduce_sum",
"(",
"value",
",",
"0",
")",
")",
",",
"self",
".",
"_count",
".",
"assign_add",
"(",
"tf",
".",
"shape",
"(",
"value",
")",
"[",
"0",
"]",
")",
")"
] |
Submit a single or batch tensor to refine the streaming mean.
|
[
"Submit",
"a",
"single",
"or",
"batch",
"tensor",
"to",
"refine",
"the",
"streaming",
"mean",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/streaming_mean.py#L51-L58
|
6,834
|
google-research/batch-ppo
|
agents/tools/streaming_mean.py
|
StreamingMean.clear
|
def clear(self):
"""Return the mean estimate and reset the streaming statistics."""
value = self._sum / tf.cast(self._count, self._dtype)
with tf.control_dependencies([value]):
reset_value = self._sum.assign(tf.zeros_like(self._sum))
reset_count = self._count.assign(0)
with tf.control_dependencies([reset_value, reset_count]):
return tf.identity(value)
|
python
|
def clear(self):
"""Return the mean estimate and reset the streaming statistics."""
value = self._sum / tf.cast(self._count, self._dtype)
with tf.control_dependencies([value]):
reset_value = self._sum.assign(tf.zeros_like(self._sum))
reset_count = self._count.assign(0)
with tf.control_dependencies([reset_value, reset_count]):
return tf.identity(value)
|
[
"def",
"clear",
"(",
"self",
")",
":",
"value",
"=",
"self",
".",
"_sum",
"/",
"tf",
".",
"cast",
"(",
"self",
".",
"_count",
",",
"self",
".",
"_dtype",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"value",
"]",
")",
":",
"reset_value",
"=",
"self",
".",
"_sum",
".",
"assign",
"(",
"tf",
".",
"zeros_like",
"(",
"self",
".",
"_sum",
")",
")",
"reset_count",
"=",
"self",
".",
"_count",
".",
"assign",
"(",
"0",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"reset_value",
",",
"reset_count",
"]",
")",
":",
"return",
"tf",
".",
"identity",
"(",
"value",
")"
] |
Return the mean estimate and reset the streaming statistics.
|
[
"Return",
"the",
"mean",
"estimate",
"and",
"reset",
"the",
"streaming",
"statistics",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/streaming_mean.py#L60-L67
|
6,835
|
google-research/batch-ppo
|
agents/tools/nested.py
|
zip_
|
def zip_(*structures, **kwargs):
# pylint: disable=differing-param-doc,missing-param-doc
"""Combine corresponding elements in multiple nested structure to tuples.
The nested structures can consist of any combination of lists, tuples, and
dicts. All provided structures must have the same nesting.
Args:
*structures: Nested structures.
flatten: Whether to flatten the resulting structure into a tuple. Keys of
dictionaries will be discarded.
Returns:
Nested structure.
"""
# Named keyword arguments are not allowed after *args in Python 2.
flatten = kwargs.pop('flatten', False)
assert not kwargs, 'zip() got unexpected keyword arguments.'
return map(
lambda *x: x if len(x) > 1 else x[0],
*structures,
flatten=flatten)
|
python
|
def zip_(*structures, **kwargs):
# pylint: disable=differing-param-doc,missing-param-doc
"""Combine corresponding elements in multiple nested structure to tuples.
The nested structures can consist of any combination of lists, tuples, and
dicts. All provided structures must have the same nesting.
Args:
*structures: Nested structures.
flatten: Whether to flatten the resulting structure into a tuple. Keys of
dictionaries will be discarded.
Returns:
Nested structure.
"""
# Named keyword arguments are not allowed after *args in Python 2.
flatten = kwargs.pop('flatten', False)
assert not kwargs, 'zip() got unexpected keyword arguments.'
return map(
lambda *x: x if len(x) > 1 else x[0],
*structures,
flatten=flatten)
|
[
"def",
"zip_",
"(",
"*",
"structures",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=differing-param-doc,missing-param-doc",
"# Named keyword arguments are not allowed after *args in Python 2.",
"flatten",
"=",
"kwargs",
".",
"pop",
"(",
"'flatten'",
",",
"False",
")",
"assert",
"not",
"kwargs",
",",
"'zip() got unexpected keyword arguments.'",
"return",
"map",
"(",
"lambda",
"*",
"x",
":",
"x",
"if",
"len",
"(",
"x",
")",
">",
"1",
"else",
"x",
"[",
"0",
"]",
",",
"*",
"structures",
",",
"flatten",
"=",
"flatten",
")"
] |
Combine corresponding elements in multiple nested structure to tuples.
The nested structures can consist of any combination of lists, tuples, and
dicts. All provided structures must have the same nesting.
Args:
*structures: Nested structures.
flatten: Whether to flatten the resulting structure into a tuple. Keys of
dictionaries will be discarded.
Returns:
Nested structure.
|
[
"Combine",
"corresponding",
"elements",
"in",
"multiple",
"nested",
"structure",
"to",
"tuples",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/nested.py#L29-L50
|
6,836
|
google-research/batch-ppo
|
agents/tools/nested.py
|
map_
|
def map_(function, *structures, **kwargs):
# pylint: disable=differing-param-doc,missing-param-doc
"""Apply a function to every element in a nested structure.
If multiple structures are provided as input, their structure must match and
the function will be applied to corresponding groups of elements. The nested
structure can consist of any combination of lists, tuples, and dicts.
Args:
function: The function to apply to the elements of the structure. Receives
one argument for every structure that is provided.
*structures: One of more nested structures.
flatten: Whether to flatten the resulting structure into a tuple. Keys of
dictionaries will be discarded.
Returns:
Nested structure.
"""
# Named keyword arguments are not allowed after *args in Python 2.
flatten = kwargs.pop('flatten', False)
assert not kwargs, 'map() got unexpected keyword arguments.'
def impl(function, *structures):
if len(structures) == 0: # pylint: disable=len-as-condition
return structures
if all(isinstance(s, (tuple, list)) for s in structures):
if len(set(len(x) for x in structures)) > 1:
raise ValueError('Cannot merge tuples or lists of different length.')
args = tuple((impl(function, *x) for x in _builtin_zip(*structures)))
if hasattr(structures[0], '_fields'): # namedtuple
return type(structures[0])(*args)
else: # tuple, list
return type(structures[0])(args)
if all(isinstance(s, dict) for s in structures):
if len(set(frozenset(x.keys()) for x in structures)) > 1:
raise ValueError('Cannot merge dicts with different keys.')
merged = {
k: impl(function, *(s[k] for s in structures))
for k in structures[0]}
return type(structures[0])(merged)
return function(*structures)
result = impl(function, *structures)
if flatten:
result = flatten_(result)
return result
|
python
|
def map_(function, *structures, **kwargs):
# pylint: disable=differing-param-doc,missing-param-doc
"""Apply a function to every element in a nested structure.
If multiple structures are provided as input, their structure must match and
the function will be applied to corresponding groups of elements. The nested
structure can consist of any combination of lists, tuples, and dicts.
Args:
function: The function to apply to the elements of the structure. Receives
one argument for every structure that is provided.
*structures: One of more nested structures.
flatten: Whether to flatten the resulting structure into a tuple. Keys of
dictionaries will be discarded.
Returns:
Nested structure.
"""
# Named keyword arguments are not allowed after *args in Python 2.
flatten = kwargs.pop('flatten', False)
assert not kwargs, 'map() got unexpected keyword arguments.'
def impl(function, *structures):
if len(structures) == 0: # pylint: disable=len-as-condition
return structures
if all(isinstance(s, (tuple, list)) for s in structures):
if len(set(len(x) for x in structures)) > 1:
raise ValueError('Cannot merge tuples or lists of different length.')
args = tuple((impl(function, *x) for x in _builtin_zip(*structures)))
if hasattr(structures[0], '_fields'): # namedtuple
return type(structures[0])(*args)
else: # tuple, list
return type(structures[0])(args)
if all(isinstance(s, dict) for s in structures):
if len(set(frozenset(x.keys()) for x in structures)) > 1:
raise ValueError('Cannot merge dicts with different keys.')
merged = {
k: impl(function, *(s[k] for s in structures))
for k in structures[0]}
return type(structures[0])(merged)
return function(*structures)
result = impl(function, *structures)
if flatten:
result = flatten_(result)
return result
|
[
"def",
"map_",
"(",
"function",
",",
"*",
"structures",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=differing-param-doc,missing-param-doc",
"# Named keyword arguments are not allowed after *args in Python 2.",
"flatten",
"=",
"kwargs",
".",
"pop",
"(",
"'flatten'",
",",
"False",
")",
"assert",
"not",
"kwargs",
",",
"'map() got unexpected keyword arguments.'",
"def",
"impl",
"(",
"function",
",",
"*",
"structures",
")",
":",
"if",
"len",
"(",
"structures",
")",
"==",
"0",
":",
"# pylint: disable=len-as-condition",
"return",
"structures",
"if",
"all",
"(",
"isinstance",
"(",
"s",
",",
"(",
"tuple",
",",
"list",
")",
")",
"for",
"s",
"in",
"structures",
")",
":",
"if",
"len",
"(",
"set",
"(",
"len",
"(",
"x",
")",
"for",
"x",
"in",
"structures",
")",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"'Cannot merge tuples or lists of different length.'",
")",
"args",
"=",
"tuple",
"(",
"(",
"impl",
"(",
"function",
",",
"*",
"x",
")",
"for",
"x",
"in",
"_builtin_zip",
"(",
"*",
"structures",
")",
")",
")",
"if",
"hasattr",
"(",
"structures",
"[",
"0",
"]",
",",
"'_fields'",
")",
":",
"# namedtuple",
"return",
"type",
"(",
"structures",
"[",
"0",
"]",
")",
"(",
"*",
"args",
")",
"else",
":",
"# tuple, list",
"return",
"type",
"(",
"structures",
"[",
"0",
"]",
")",
"(",
"args",
")",
"if",
"all",
"(",
"isinstance",
"(",
"s",
",",
"dict",
")",
"for",
"s",
"in",
"structures",
")",
":",
"if",
"len",
"(",
"set",
"(",
"frozenset",
"(",
"x",
".",
"keys",
"(",
")",
")",
"for",
"x",
"in",
"structures",
")",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"'Cannot merge dicts with different keys.'",
")",
"merged",
"=",
"{",
"k",
":",
"impl",
"(",
"function",
",",
"*",
"(",
"s",
"[",
"k",
"]",
"for",
"s",
"in",
"structures",
")",
")",
"for",
"k",
"in",
"structures",
"[",
"0",
"]",
"}",
"return",
"type",
"(",
"structures",
"[",
"0",
"]",
")",
"(",
"merged",
")",
"return",
"function",
"(",
"*",
"structures",
")",
"result",
"=",
"impl",
"(",
"function",
",",
"*",
"structures",
")",
"if",
"flatten",
":",
"result",
"=",
"flatten_",
"(",
"result",
")",
"return",
"result"
] |
Apply a function to every element in a nested structure.
If multiple structures are provided as input, their structure must match and
the function will be applied to corresponding groups of elements. The nested
structure can consist of any combination of lists, tuples, and dicts.
Args:
function: The function to apply to the elements of the structure. Receives
one argument for every structure that is provided.
*structures: One of more nested structures.
flatten: Whether to flatten the resulting structure into a tuple. Keys of
dictionaries will be discarded.
Returns:
Nested structure.
|
[
"Apply",
"a",
"function",
"to",
"every",
"element",
"in",
"a",
"nested",
"structure",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/nested.py#L53-L98
|
6,837
|
google-research/batch-ppo
|
agents/tools/nested.py
|
flatten_
|
def flatten_(structure):
"""Combine all leaves of a nested structure into a tuple.
The nested structure can consist of any combination of tuples, lists, and
dicts. Dictionary keys will be discarded but values will ordered by the
sorting of the keys.
Args:
structure: Nested structure.
Returns:
Flat tuple.
"""
if isinstance(structure, dict):
if structure:
structure = zip(*sorted(structure.items(), key=lambda x: x[0]))[1]
else:
# Zip doesn't work on an the items of an empty dictionary.
structure = ()
if isinstance(structure, (tuple, list)):
result = []
for element in structure:
result += flatten_(element)
return tuple(result)
return (structure,)
|
python
|
def flatten_(structure):
"""Combine all leaves of a nested structure into a tuple.
The nested structure can consist of any combination of tuples, lists, and
dicts. Dictionary keys will be discarded but values will ordered by the
sorting of the keys.
Args:
structure: Nested structure.
Returns:
Flat tuple.
"""
if isinstance(structure, dict):
if structure:
structure = zip(*sorted(structure.items(), key=lambda x: x[0]))[1]
else:
# Zip doesn't work on an the items of an empty dictionary.
structure = ()
if isinstance(structure, (tuple, list)):
result = []
for element in structure:
result += flatten_(element)
return tuple(result)
return (structure,)
|
[
"def",
"flatten_",
"(",
"structure",
")",
":",
"if",
"isinstance",
"(",
"structure",
",",
"dict",
")",
":",
"if",
"structure",
":",
"structure",
"=",
"zip",
"(",
"*",
"sorted",
"(",
"structure",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
")",
"[",
"1",
"]",
"else",
":",
"# Zip doesn't work on an the items of an empty dictionary.",
"structure",
"=",
"(",
")",
"if",
"isinstance",
"(",
"structure",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"result",
"=",
"[",
"]",
"for",
"element",
"in",
"structure",
":",
"result",
"+=",
"flatten_",
"(",
"element",
")",
"return",
"tuple",
"(",
"result",
")",
"return",
"(",
"structure",
",",
")"
] |
Combine all leaves of a nested structure into a tuple.
The nested structure can consist of any combination of tuples, lists, and
dicts. Dictionary keys will be discarded but values will ordered by the
sorting of the keys.
Args:
structure: Nested structure.
Returns:
Flat tuple.
|
[
"Combine",
"all",
"leaves",
"of",
"a",
"nested",
"structure",
"into",
"a",
"tuple",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/nested.py#L101-L125
|
6,838
|
google-research/batch-ppo
|
agents/tools/nested.py
|
filter_
|
def filter_(predicate, *structures, **kwargs):
# pylint: disable=differing-param-doc,missing-param-doc, too-many-branches
"""Select elements of a nested structure based on a predicate function.
If multiple structures are provided as input, their structure must match and
the function will be applied to corresponding groups of elements. The nested
structure can consist of any combination of lists, tuples, and dicts.
Args:
predicate: The function to determine whether an element should be kept.
Receives one argument for every structure that is provided.
*structures: One of more nested structures.
flatten: Whether to flatten the resulting structure into a tuple. Keys of
dictionaries will be discarded.
Returns:
Nested structure.
"""
# Named keyword arguments are not allowed after *args in Python 2.
flatten = kwargs.pop('flatten', False)
assert not kwargs, 'filter() got unexpected keyword arguments.'
def impl(predicate, *structures):
if len(structures) == 0: # pylint: disable=len-as-condition
return structures
if all(isinstance(s, (tuple, list)) for s in structures):
if len(set(len(x) for x in structures)) > 1:
raise ValueError('Cannot merge tuples or lists of different length.')
# Only wrap in tuples if more than one structure provided.
if len(structures) > 1:
filtered = (impl(predicate, *x) for x in _builtin_zip(*structures))
else:
filtered = (impl(predicate, x) for x in structures[0])
# Remove empty containers and construct result structure.
if hasattr(structures[0], '_fields'): # namedtuple
filtered = (x if x != () else None for x in filtered)
return type(structures[0])(*filtered)
else: # tuple, list
filtered = (
x for x in filtered if not isinstance(x, (tuple, list, dict)) or x)
return type(structures[0])(filtered)
if all(isinstance(s, dict) for s in structures):
if len(set(frozenset(x.keys()) for x in structures)) > 1:
raise ValueError('Cannot merge dicts with different keys.')
# Only wrap in tuples if more than one structure provided.
if len(structures) > 1:
filtered = {
k: impl(predicate, *(s[k] for s in structures))
for k in structures[0]}
else:
filtered = {k: impl(predicate, v) for k, v in structures[0].items()}
# Remove empty containers and construct result structure.
filtered = {
k: v for k, v in filtered.items()
if not isinstance(v, (tuple, list, dict)) or v}
return type(structures[0])(filtered)
if len(structures) > 1:
return structures if predicate(*structures) else ()
else:
return structures[0] if predicate(structures[0]) else ()
result = impl(predicate, *structures)
if flatten:
result = flatten_(result)
return result
|
python
|
def filter_(predicate, *structures, **kwargs):
# pylint: disable=differing-param-doc,missing-param-doc, too-many-branches
"""Select elements of a nested structure based on a predicate function.
If multiple structures are provided as input, their structure must match and
the function will be applied to corresponding groups of elements. The nested
structure can consist of any combination of lists, tuples, and dicts.
Args:
predicate: The function to determine whether an element should be kept.
Receives one argument for every structure that is provided.
*structures: One of more nested structures.
flatten: Whether to flatten the resulting structure into a tuple. Keys of
dictionaries will be discarded.
Returns:
Nested structure.
"""
# Named keyword arguments are not allowed after *args in Python 2.
flatten = kwargs.pop('flatten', False)
assert not kwargs, 'filter() got unexpected keyword arguments.'
def impl(predicate, *structures):
if len(structures) == 0: # pylint: disable=len-as-condition
return structures
if all(isinstance(s, (tuple, list)) for s in structures):
if len(set(len(x) for x in structures)) > 1:
raise ValueError('Cannot merge tuples or lists of different length.')
# Only wrap in tuples if more than one structure provided.
if len(structures) > 1:
filtered = (impl(predicate, *x) for x in _builtin_zip(*structures))
else:
filtered = (impl(predicate, x) for x in structures[0])
# Remove empty containers and construct result structure.
if hasattr(structures[0], '_fields'): # namedtuple
filtered = (x if x != () else None for x in filtered)
return type(structures[0])(*filtered)
else: # tuple, list
filtered = (
x for x in filtered if not isinstance(x, (tuple, list, dict)) or x)
return type(structures[0])(filtered)
if all(isinstance(s, dict) for s in structures):
if len(set(frozenset(x.keys()) for x in structures)) > 1:
raise ValueError('Cannot merge dicts with different keys.')
# Only wrap in tuples if more than one structure provided.
if len(structures) > 1:
filtered = {
k: impl(predicate, *(s[k] for s in structures))
for k in structures[0]}
else:
filtered = {k: impl(predicate, v) for k, v in structures[0].items()}
# Remove empty containers and construct result structure.
filtered = {
k: v for k, v in filtered.items()
if not isinstance(v, (tuple, list, dict)) or v}
return type(structures[0])(filtered)
if len(structures) > 1:
return structures if predicate(*structures) else ()
else:
return structures[0] if predicate(structures[0]) else ()
result = impl(predicate, *structures)
if flatten:
result = flatten_(result)
return result
|
[
"def",
"filter_",
"(",
"predicate",
",",
"*",
"structures",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=differing-param-doc,missing-param-doc, too-many-branches",
"# Named keyword arguments are not allowed after *args in Python 2.",
"flatten",
"=",
"kwargs",
".",
"pop",
"(",
"'flatten'",
",",
"False",
")",
"assert",
"not",
"kwargs",
",",
"'filter() got unexpected keyword arguments.'",
"def",
"impl",
"(",
"predicate",
",",
"*",
"structures",
")",
":",
"if",
"len",
"(",
"structures",
")",
"==",
"0",
":",
"# pylint: disable=len-as-condition",
"return",
"structures",
"if",
"all",
"(",
"isinstance",
"(",
"s",
",",
"(",
"tuple",
",",
"list",
")",
")",
"for",
"s",
"in",
"structures",
")",
":",
"if",
"len",
"(",
"set",
"(",
"len",
"(",
"x",
")",
"for",
"x",
"in",
"structures",
")",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"'Cannot merge tuples or lists of different length.'",
")",
"# Only wrap in tuples if more than one structure provided.",
"if",
"len",
"(",
"structures",
")",
">",
"1",
":",
"filtered",
"=",
"(",
"impl",
"(",
"predicate",
",",
"*",
"x",
")",
"for",
"x",
"in",
"_builtin_zip",
"(",
"*",
"structures",
")",
")",
"else",
":",
"filtered",
"=",
"(",
"impl",
"(",
"predicate",
",",
"x",
")",
"for",
"x",
"in",
"structures",
"[",
"0",
"]",
")",
"# Remove empty containers and construct result structure.",
"if",
"hasattr",
"(",
"structures",
"[",
"0",
"]",
",",
"'_fields'",
")",
":",
"# namedtuple",
"filtered",
"=",
"(",
"x",
"if",
"x",
"!=",
"(",
")",
"else",
"None",
"for",
"x",
"in",
"filtered",
")",
"return",
"type",
"(",
"structures",
"[",
"0",
"]",
")",
"(",
"*",
"filtered",
")",
"else",
":",
"# tuple, list",
"filtered",
"=",
"(",
"x",
"for",
"x",
"in",
"filtered",
"if",
"not",
"isinstance",
"(",
"x",
",",
"(",
"tuple",
",",
"list",
",",
"dict",
")",
")",
"or",
"x",
")",
"return",
"type",
"(",
"structures",
"[",
"0",
"]",
")",
"(",
"filtered",
")",
"if",
"all",
"(",
"isinstance",
"(",
"s",
",",
"dict",
")",
"for",
"s",
"in",
"structures",
")",
":",
"if",
"len",
"(",
"set",
"(",
"frozenset",
"(",
"x",
".",
"keys",
"(",
")",
")",
"for",
"x",
"in",
"structures",
")",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"'Cannot merge dicts with different keys.'",
")",
"# Only wrap in tuples if more than one structure provided.",
"if",
"len",
"(",
"structures",
")",
">",
"1",
":",
"filtered",
"=",
"{",
"k",
":",
"impl",
"(",
"predicate",
",",
"*",
"(",
"s",
"[",
"k",
"]",
"for",
"s",
"in",
"structures",
")",
")",
"for",
"k",
"in",
"structures",
"[",
"0",
"]",
"}",
"else",
":",
"filtered",
"=",
"{",
"k",
":",
"impl",
"(",
"predicate",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"structures",
"[",
"0",
"]",
".",
"items",
"(",
")",
"}",
"# Remove empty containers and construct result structure.",
"filtered",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"filtered",
".",
"items",
"(",
")",
"if",
"not",
"isinstance",
"(",
"v",
",",
"(",
"tuple",
",",
"list",
",",
"dict",
")",
")",
"or",
"v",
"}",
"return",
"type",
"(",
"structures",
"[",
"0",
"]",
")",
"(",
"filtered",
")",
"if",
"len",
"(",
"structures",
")",
">",
"1",
":",
"return",
"structures",
"if",
"predicate",
"(",
"*",
"structures",
")",
"else",
"(",
")",
"else",
":",
"return",
"structures",
"[",
"0",
"]",
"if",
"predicate",
"(",
"structures",
"[",
"0",
"]",
")",
"else",
"(",
")",
"result",
"=",
"impl",
"(",
"predicate",
",",
"*",
"structures",
")",
"if",
"flatten",
":",
"result",
"=",
"flatten_",
"(",
"result",
")",
"return",
"result"
] |
Select elements of a nested structure based on a predicate function.
If multiple structures are provided as input, their structure must match and
the function will be applied to corresponding groups of elements. The nested
structure can consist of any combination of lists, tuples, and dicts.
Args:
predicate: The function to determine whether an element should be kept.
Receives one argument for every structure that is provided.
*structures: One of more nested structures.
flatten: Whether to flatten the resulting structure into a tuple. Keys of
dictionaries will be discarded.
Returns:
Nested structure.
|
[
"Select",
"elements",
"of",
"a",
"nested",
"structure",
"based",
"on",
"a",
"predicate",
"function",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/nested.py#L128-L192
|
6,839
|
google-research/batch-ppo
|
agents/tools/loop.py
|
Loop.add_phase
|
def add_phase(
self, name, done, score, summary, steps,
report_every=None, log_every=None, checkpoint_every=None, feed=None):
"""Add a phase to the loop protocol.
If the model breaks long computation into multiple steps, the done tensor
indicates whether the current score should be added to the mean counter.
For example, in reinforcement learning we only have a valid score at the
end of the episode.
Score and done tensors can either be scalars or vectors, to support
single and batched computations.
Args:
name: Name for the phase, used for the summary writer.
done: Tensor indicating whether current score can be used.
score: Tensor holding the current, possibly intermediate, score.
summary: Tensor holding summary string to write if not an empty string.
steps: Duration of the phase in steps.
report_every: Yield mean score every this number of steps.
log_every: Request summaries via `log` tensor every this number of steps.
checkpoint_every: Write checkpoint every this number of steps.
feed: Additional feed dictionary for the session run call.
Raises:
ValueError: Unknown rank for done or score tensors.
"""
done = tf.convert_to_tensor(done, tf.bool)
score = tf.convert_to_tensor(score, tf.float32)
summary = tf.convert_to_tensor(summary, tf.string)
feed = feed or {}
if done.shape.ndims is None or score.shape.ndims is None:
raise ValueError("Rank of 'done' and 'score' tensors must be known.")
writer = self._logdir and tf.summary.FileWriter(
os.path.join(self._logdir, name), tf.get_default_graph(),
flush_secs=60)
op = self._define_step(done, score, summary)
batch = 1 if score.shape.ndims == 0 else score.shape[0].value
self._phases.append(_Phase(
name, writer, op, batch, int(steps), feed, report_every,
log_every, checkpoint_every))
|
python
|
def add_phase(
self, name, done, score, summary, steps,
report_every=None, log_every=None, checkpoint_every=None, feed=None):
"""Add a phase to the loop protocol.
If the model breaks long computation into multiple steps, the done tensor
indicates whether the current score should be added to the mean counter.
For example, in reinforcement learning we only have a valid score at the
end of the episode.
Score and done tensors can either be scalars or vectors, to support
single and batched computations.
Args:
name: Name for the phase, used for the summary writer.
done: Tensor indicating whether current score can be used.
score: Tensor holding the current, possibly intermediate, score.
summary: Tensor holding summary string to write if not an empty string.
steps: Duration of the phase in steps.
report_every: Yield mean score every this number of steps.
log_every: Request summaries via `log` tensor every this number of steps.
checkpoint_every: Write checkpoint every this number of steps.
feed: Additional feed dictionary for the session run call.
Raises:
ValueError: Unknown rank for done or score tensors.
"""
done = tf.convert_to_tensor(done, tf.bool)
score = tf.convert_to_tensor(score, tf.float32)
summary = tf.convert_to_tensor(summary, tf.string)
feed = feed or {}
if done.shape.ndims is None or score.shape.ndims is None:
raise ValueError("Rank of 'done' and 'score' tensors must be known.")
writer = self._logdir and tf.summary.FileWriter(
os.path.join(self._logdir, name), tf.get_default_graph(),
flush_secs=60)
op = self._define_step(done, score, summary)
batch = 1 if score.shape.ndims == 0 else score.shape[0].value
self._phases.append(_Phase(
name, writer, op, batch, int(steps), feed, report_every,
log_every, checkpoint_every))
|
[
"def",
"add_phase",
"(",
"self",
",",
"name",
",",
"done",
",",
"score",
",",
"summary",
",",
"steps",
",",
"report_every",
"=",
"None",
",",
"log_every",
"=",
"None",
",",
"checkpoint_every",
"=",
"None",
",",
"feed",
"=",
"None",
")",
":",
"done",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"done",
",",
"tf",
".",
"bool",
")",
"score",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"score",
",",
"tf",
".",
"float32",
")",
"summary",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"summary",
",",
"tf",
".",
"string",
")",
"feed",
"=",
"feed",
"or",
"{",
"}",
"if",
"done",
".",
"shape",
".",
"ndims",
"is",
"None",
"or",
"score",
".",
"shape",
".",
"ndims",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Rank of 'done' and 'score' tensors must be known.\"",
")",
"writer",
"=",
"self",
".",
"_logdir",
"and",
"tf",
".",
"summary",
".",
"FileWriter",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_logdir",
",",
"name",
")",
",",
"tf",
".",
"get_default_graph",
"(",
")",
",",
"flush_secs",
"=",
"60",
")",
"op",
"=",
"self",
".",
"_define_step",
"(",
"done",
",",
"score",
",",
"summary",
")",
"batch",
"=",
"1",
"if",
"score",
".",
"shape",
".",
"ndims",
"==",
"0",
"else",
"score",
".",
"shape",
"[",
"0",
"]",
".",
"value",
"self",
".",
"_phases",
".",
"append",
"(",
"_Phase",
"(",
"name",
",",
"writer",
",",
"op",
",",
"batch",
",",
"int",
"(",
"steps",
")",
",",
"feed",
",",
"report_every",
",",
"log_every",
",",
"checkpoint_every",
")",
")"
] |
Add a phase to the loop protocol.
If the model breaks long computation into multiple steps, the done tensor
indicates whether the current score should be added to the mean counter.
For example, in reinforcement learning we only have a valid score at the
end of the episode.
Score and done tensors can either be scalars or vectors, to support
single and batched computations.
Args:
name: Name for the phase, used for the summary writer.
done: Tensor indicating whether current score can be used.
score: Tensor holding the current, possibly intermediate, score.
summary: Tensor holding summary string to write if not an empty string.
steps: Duration of the phase in steps.
report_every: Yield mean score every this number of steps.
log_every: Request summaries via `log` tensor every this number of steps.
checkpoint_every: Write checkpoint every this number of steps.
feed: Additional feed dictionary for the session run call.
Raises:
ValueError: Unknown rank for done or score tensors.
|
[
"Add",
"a",
"phase",
"to",
"the",
"loop",
"protocol",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/loop.py#L66-L106
|
6,840
|
google-research/batch-ppo
|
agents/tools/loop.py
|
Loop.run
|
def run(self, sess, saver, max_step=None):
"""Run the loop schedule for a specified number of steps.
Call the operation of the current phase until the global step reaches the
specified maximum step. Phases are repeated over and over in the order they
were added.
Args:
sess: Session to use to run the phase operation.
saver: Saver used for checkpointing.
max_step: Run the operations until the step reaches this limit.
Yields:
Reported mean scores.
"""
global_step = sess.run(self._step)
steps_made = 1
while True:
if max_step and global_step >= max_step:
break
phase, epoch, steps_in = self._find_current_phase(global_step)
phase_step = epoch * phase.steps + steps_in
if steps_in % phase.steps < steps_made:
message = '\n' + ('-' * 50) + '\n'
message += 'Phase {} (phase step {}, global step {}).'
tf.logging.info(message.format(phase.name, phase_step, global_step))
# Populate book keeping tensors.
phase.feed[self._reset] = (steps_in < steps_made)
phase.feed[self._log] = (
phase.writer and
self._is_every_steps(phase_step, phase.batch, phase.log_every))
phase.feed[self._report] = (
self._is_every_steps(phase_step, phase.batch, phase.report_every))
summary, mean_score, global_step, steps_made = sess.run(
phase.op, phase.feed)
if self._is_every_steps(phase_step, phase.batch, phase.checkpoint_every):
self._store_checkpoint(sess, saver, global_step)
if self._is_every_steps(phase_step, phase.batch, phase.report_every):
yield mean_score
if summary and phase.writer:
# We want smaller phases to catch up at the beginnig of each epoch so
# that their graphs are aligned.
longest_phase = max(phase.steps for phase in self._phases)
summary_step = epoch * longest_phase + steps_in
phase.writer.add_summary(summary, summary_step)
|
python
|
def run(self, sess, saver, max_step=None):
"""Run the loop schedule for a specified number of steps.
Call the operation of the current phase until the global step reaches the
specified maximum step. Phases are repeated over and over in the order they
were added.
Args:
sess: Session to use to run the phase operation.
saver: Saver used for checkpointing.
max_step: Run the operations until the step reaches this limit.
Yields:
Reported mean scores.
"""
global_step = sess.run(self._step)
steps_made = 1
while True:
if max_step and global_step >= max_step:
break
phase, epoch, steps_in = self._find_current_phase(global_step)
phase_step = epoch * phase.steps + steps_in
if steps_in % phase.steps < steps_made:
message = '\n' + ('-' * 50) + '\n'
message += 'Phase {} (phase step {}, global step {}).'
tf.logging.info(message.format(phase.name, phase_step, global_step))
# Populate book keeping tensors.
phase.feed[self._reset] = (steps_in < steps_made)
phase.feed[self._log] = (
phase.writer and
self._is_every_steps(phase_step, phase.batch, phase.log_every))
phase.feed[self._report] = (
self._is_every_steps(phase_step, phase.batch, phase.report_every))
summary, mean_score, global_step, steps_made = sess.run(
phase.op, phase.feed)
if self._is_every_steps(phase_step, phase.batch, phase.checkpoint_every):
self._store_checkpoint(sess, saver, global_step)
if self._is_every_steps(phase_step, phase.batch, phase.report_every):
yield mean_score
if summary and phase.writer:
# We want smaller phases to catch up at the beginnig of each epoch so
# that their graphs are aligned.
longest_phase = max(phase.steps for phase in self._phases)
summary_step = epoch * longest_phase + steps_in
phase.writer.add_summary(summary, summary_step)
|
[
"def",
"run",
"(",
"self",
",",
"sess",
",",
"saver",
",",
"max_step",
"=",
"None",
")",
":",
"global_step",
"=",
"sess",
".",
"run",
"(",
"self",
".",
"_step",
")",
"steps_made",
"=",
"1",
"while",
"True",
":",
"if",
"max_step",
"and",
"global_step",
">=",
"max_step",
":",
"break",
"phase",
",",
"epoch",
",",
"steps_in",
"=",
"self",
".",
"_find_current_phase",
"(",
"global_step",
")",
"phase_step",
"=",
"epoch",
"*",
"phase",
".",
"steps",
"+",
"steps_in",
"if",
"steps_in",
"%",
"phase",
".",
"steps",
"<",
"steps_made",
":",
"message",
"=",
"'\\n'",
"+",
"(",
"'-'",
"*",
"50",
")",
"+",
"'\\n'",
"message",
"+=",
"'Phase {} (phase step {}, global step {}).'",
"tf",
".",
"logging",
".",
"info",
"(",
"message",
".",
"format",
"(",
"phase",
".",
"name",
",",
"phase_step",
",",
"global_step",
")",
")",
"# Populate book keeping tensors.",
"phase",
".",
"feed",
"[",
"self",
".",
"_reset",
"]",
"=",
"(",
"steps_in",
"<",
"steps_made",
")",
"phase",
".",
"feed",
"[",
"self",
".",
"_log",
"]",
"=",
"(",
"phase",
".",
"writer",
"and",
"self",
".",
"_is_every_steps",
"(",
"phase_step",
",",
"phase",
".",
"batch",
",",
"phase",
".",
"log_every",
")",
")",
"phase",
".",
"feed",
"[",
"self",
".",
"_report",
"]",
"=",
"(",
"self",
".",
"_is_every_steps",
"(",
"phase_step",
",",
"phase",
".",
"batch",
",",
"phase",
".",
"report_every",
")",
")",
"summary",
",",
"mean_score",
",",
"global_step",
",",
"steps_made",
"=",
"sess",
".",
"run",
"(",
"phase",
".",
"op",
",",
"phase",
".",
"feed",
")",
"if",
"self",
".",
"_is_every_steps",
"(",
"phase_step",
",",
"phase",
".",
"batch",
",",
"phase",
".",
"checkpoint_every",
")",
":",
"self",
".",
"_store_checkpoint",
"(",
"sess",
",",
"saver",
",",
"global_step",
")",
"if",
"self",
".",
"_is_every_steps",
"(",
"phase_step",
",",
"phase",
".",
"batch",
",",
"phase",
".",
"report_every",
")",
":",
"yield",
"mean_score",
"if",
"summary",
"and",
"phase",
".",
"writer",
":",
"# We want smaller phases to catch up at the beginnig of each epoch so",
"# that their graphs are aligned.",
"longest_phase",
"=",
"max",
"(",
"phase",
".",
"steps",
"for",
"phase",
"in",
"self",
".",
"_phases",
")",
"summary_step",
"=",
"epoch",
"*",
"longest_phase",
"+",
"steps_in",
"phase",
".",
"writer",
".",
"add_summary",
"(",
"summary",
",",
"summary_step",
")"
] |
Run the loop schedule for a specified number of steps.
Call the operation of the current phase until the global step reaches the
specified maximum step. Phases are repeated over and over in the order they
were added.
Args:
sess: Session to use to run the phase operation.
saver: Saver used for checkpointing.
max_step: Run the operations until the step reaches this limit.
Yields:
Reported mean scores.
|
[
"Run",
"the",
"loop",
"schedule",
"for",
"a",
"specified",
"number",
"of",
"steps",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/loop.py#L108-L152
|
6,841
|
google-research/batch-ppo
|
agents/tools/loop.py
|
Loop._is_every_steps
|
def _is_every_steps(self, phase_step, batch, every):
"""Determine whether a periodic event should happen at this step.
Args:
phase_step: The incrementing step.
batch: The number of steps progressed at once.
every: The interval of the period.
Returns:
Boolean of whether the event should happen.
"""
if not every:
return False
covered_steps = range(phase_step, phase_step + batch)
return any((step + 1) % every == 0 for step in covered_steps)
|
python
|
def _is_every_steps(self, phase_step, batch, every):
"""Determine whether a periodic event should happen at this step.
Args:
phase_step: The incrementing step.
batch: The number of steps progressed at once.
every: The interval of the period.
Returns:
Boolean of whether the event should happen.
"""
if not every:
return False
covered_steps = range(phase_step, phase_step + batch)
return any((step + 1) % every == 0 for step in covered_steps)
|
[
"def",
"_is_every_steps",
"(",
"self",
",",
"phase_step",
",",
"batch",
",",
"every",
")",
":",
"if",
"not",
"every",
":",
"return",
"False",
"covered_steps",
"=",
"range",
"(",
"phase_step",
",",
"phase_step",
"+",
"batch",
")",
"return",
"any",
"(",
"(",
"step",
"+",
"1",
")",
"%",
"every",
"==",
"0",
"for",
"step",
"in",
"covered_steps",
")"
] |
Determine whether a periodic event should happen at this step.
Args:
phase_step: The incrementing step.
batch: The number of steps progressed at once.
every: The interval of the period.
Returns:
Boolean of whether the event should happen.
|
[
"Determine",
"whether",
"a",
"periodic",
"event",
"should",
"happen",
"at",
"this",
"step",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/loop.py#L154-L168
|
6,842
|
google-research/batch-ppo
|
agents/tools/loop.py
|
Loop._find_current_phase
|
def _find_current_phase(self, global_step):
"""Determine the current phase based on the global step.
This ensures continuing the correct phase after restoring checkoints.
Args:
global_step: The global number of steps performed across all phases.
Returns:
Tuple of phase object, epoch number, and phase steps within the epoch.
"""
epoch_size = sum(phase.steps for phase in self._phases)
epoch = int(global_step // epoch_size)
steps_in = global_step % epoch_size
for phase in self._phases:
if steps_in < phase.steps:
return phase, epoch, steps_in
steps_in -= phase.steps
|
python
|
def _find_current_phase(self, global_step):
"""Determine the current phase based on the global step.
This ensures continuing the correct phase after restoring checkoints.
Args:
global_step: The global number of steps performed across all phases.
Returns:
Tuple of phase object, epoch number, and phase steps within the epoch.
"""
epoch_size = sum(phase.steps for phase in self._phases)
epoch = int(global_step // epoch_size)
steps_in = global_step % epoch_size
for phase in self._phases:
if steps_in < phase.steps:
return phase, epoch, steps_in
steps_in -= phase.steps
|
[
"def",
"_find_current_phase",
"(",
"self",
",",
"global_step",
")",
":",
"epoch_size",
"=",
"sum",
"(",
"phase",
".",
"steps",
"for",
"phase",
"in",
"self",
".",
"_phases",
")",
"epoch",
"=",
"int",
"(",
"global_step",
"//",
"epoch_size",
")",
"steps_in",
"=",
"global_step",
"%",
"epoch_size",
"for",
"phase",
"in",
"self",
".",
"_phases",
":",
"if",
"steps_in",
"<",
"phase",
".",
"steps",
":",
"return",
"phase",
",",
"epoch",
",",
"steps_in",
"steps_in",
"-=",
"phase",
".",
"steps"
] |
Determine the current phase based on the global step.
This ensures continuing the correct phase after restoring checkoints.
Args:
global_step: The global number of steps performed across all phases.
Returns:
Tuple of phase object, epoch number, and phase steps within the epoch.
|
[
"Determine",
"the",
"current",
"phase",
"based",
"on",
"the",
"global",
"step",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/loop.py#L170-L187
|
6,843
|
google-research/batch-ppo
|
agents/tools/loop.py
|
Loop._define_step
|
def _define_step(self, done, score, summary):
"""Combine operations of a phase.
Keeps track of the mean score and when to report it.
Args:
done: Tensor indicating whether current score can be used.
score: Tensor holding the current, possibly intermediate, score.
summary: Tensor holding summary string to write if not an empty string.
Returns:
Tuple of summary tensor, mean score, and new global step. The mean score
is zero for non reporting steps.
"""
if done.shape.ndims == 0:
done = done[None]
if score.shape.ndims == 0:
score = score[None]
score_mean = streaming_mean.StreamingMean((), tf.float32)
with tf.control_dependencies([done, score, summary]):
done_score = tf.gather(score, tf.where(done)[:, 0])
submit_score = tf.cond(
tf.reduce_any(done), lambda: score_mean.submit(done_score), tf.no_op)
with tf.control_dependencies([submit_score]):
mean_score = tf.cond(self._report, score_mean.clear, float)
steps_made = tf.shape(score)[0]
next_step = self._step.assign_add(steps_made)
with tf.control_dependencies([mean_score, next_step]):
return tf.identity(summary), mean_score, next_step, steps_made
|
python
|
def _define_step(self, done, score, summary):
"""Combine operations of a phase.
Keeps track of the mean score and when to report it.
Args:
done: Tensor indicating whether current score can be used.
score: Tensor holding the current, possibly intermediate, score.
summary: Tensor holding summary string to write if not an empty string.
Returns:
Tuple of summary tensor, mean score, and new global step. The mean score
is zero for non reporting steps.
"""
if done.shape.ndims == 0:
done = done[None]
if score.shape.ndims == 0:
score = score[None]
score_mean = streaming_mean.StreamingMean((), tf.float32)
with tf.control_dependencies([done, score, summary]):
done_score = tf.gather(score, tf.where(done)[:, 0])
submit_score = tf.cond(
tf.reduce_any(done), lambda: score_mean.submit(done_score), tf.no_op)
with tf.control_dependencies([submit_score]):
mean_score = tf.cond(self._report, score_mean.clear, float)
steps_made = tf.shape(score)[0]
next_step = self._step.assign_add(steps_made)
with tf.control_dependencies([mean_score, next_step]):
return tf.identity(summary), mean_score, next_step, steps_made
|
[
"def",
"_define_step",
"(",
"self",
",",
"done",
",",
"score",
",",
"summary",
")",
":",
"if",
"done",
".",
"shape",
".",
"ndims",
"==",
"0",
":",
"done",
"=",
"done",
"[",
"None",
"]",
"if",
"score",
".",
"shape",
".",
"ndims",
"==",
"0",
":",
"score",
"=",
"score",
"[",
"None",
"]",
"score_mean",
"=",
"streaming_mean",
".",
"StreamingMean",
"(",
"(",
")",
",",
"tf",
".",
"float32",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"done",
",",
"score",
",",
"summary",
"]",
")",
":",
"done_score",
"=",
"tf",
".",
"gather",
"(",
"score",
",",
"tf",
".",
"where",
"(",
"done",
")",
"[",
":",
",",
"0",
"]",
")",
"submit_score",
"=",
"tf",
".",
"cond",
"(",
"tf",
".",
"reduce_any",
"(",
"done",
")",
",",
"lambda",
":",
"score_mean",
".",
"submit",
"(",
"done_score",
")",
",",
"tf",
".",
"no_op",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"submit_score",
"]",
")",
":",
"mean_score",
"=",
"tf",
".",
"cond",
"(",
"self",
".",
"_report",
",",
"score_mean",
".",
"clear",
",",
"float",
")",
"steps_made",
"=",
"tf",
".",
"shape",
"(",
"score",
")",
"[",
"0",
"]",
"next_step",
"=",
"self",
".",
"_step",
".",
"assign_add",
"(",
"steps_made",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"mean_score",
",",
"next_step",
"]",
")",
":",
"return",
"tf",
".",
"identity",
"(",
"summary",
")",
",",
"mean_score",
",",
"next_step",
",",
"steps_made"
] |
Combine operations of a phase.
Keeps track of the mean score and when to report it.
Args:
done: Tensor indicating whether current score can be used.
score: Tensor holding the current, possibly intermediate, score.
summary: Tensor holding summary string to write if not an empty string.
Returns:
Tuple of summary tensor, mean score, and new global step. The mean score
is zero for non reporting steps.
|
[
"Combine",
"operations",
"of",
"a",
"phase",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/loop.py#L189-L217
|
6,844
|
google-research/batch-ppo
|
agents/tools/loop.py
|
Loop._store_checkpoint
|
def _store_checkpoint(self, sess, saver, global_step):
"""Store a checkpoint if a log directory was provided to the constructor.
The directory will be created if needed.
Args:
sess: Session containing variables to store.
saver: Saver used for checkpointing.
global_step: Step number of the checkpoint name.
"""
if not self._logdir or not saver:
return
tf.gfile.MakeDirs(self._logdir)
filename = os.path.join(self._logdir, 'model.ckpt')
saver.save(sess, filename, global_step)
|
python
|
def _store_checkpoint(self, sess, saver, global_step):
"""Store a checkpoint if a log directory was provided to the constructor.
The directory will be created if needed.
Args:
sess: Session containing variables to store.
saver: Saver used for checkpointing.
global_step: Step number of the checkpoint name.
"""
if not self._logdir or not saver:
return
tf.gfile.MakeDirs(self._logdir)
filename = os.path.join(self._logdir, 'model.ckpt')
saver.save(sess, filename, global_step)
|
[
"def",
"_store_checkpoint",
"(",
"self",
",",
"sess",
",",
"saver",
",",
"global_step",
")",
":",
"if",
"not",
"self",
".",
"_logdir",
"or",
"not",
"saver",
":",
"return",
"tf",
".",
"gfile",
".",
"MakeDirs",
"(",
"self",
".",
"_logdir",
")",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_logdir",
",",
"'model.ckpt'",
")",
"saver",
".",
"save",
"(",
"sess",
",",
"filename",
",",
"global_step",
")"
] |
Store a checkpoint if a log directory was provided to the constructor.
The directory will be created if needed.
Args:
sess: Session containing variables to store.
saver: Saver used for checkpointing.
global_step: Step number of the checkpoint name.
|
[
"Store",
"a",
"checkpoint",
"if",
"a",
"log",
"directory",
"was",
"provided",
"to",
"the",
"constructor",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/loop.py#L219-L233
|
6,845
|
google-research/batch-ppo
|
agents/scripts/train.py
|
_define_loop
|
def _define_loop(graph, logdir, train_steps, eval_steps):
"""Create and configure a training loop with training and evaluation phases.
Args:
graph: Object providing graph elements via attributes.
logdir: Log directory for storing checkpoints and summaries.
train_steps: Number of training steps per epoch.
eval_steps: Number of evaluation steps per epoch.
Returns:
Loop object.
"""
loop = tools.Loop(
logdir, graph.step, graph.should_log, graph.do_report,
graph.force_reset)
loop.add_phase(
'train', graph.done, graph.score, graph.summary, train_steps,
report_every=train_steps,
log_every=train_steps // 2,
checkpoint_every=None,
feed={graph.is_training: True})
loop.add_phase(
'eval', graph.done, graph.score, graph.summary, eval_steps,
report_every=eval_steps,
log_every=eval_steps // 2,
checkpoint_every=10 * eval_steps,
feed={graph.is_training: False})
return loop
|
python
|
def _define_loop(graph, logdir, train_steps, eval_steps):
"""Create and configure a training loop with training and evaluation phases.
Args:
graph: Object providing graph elements via attributes.
logdir: Log directory for storing checkpoints and summaries.
train_steps: Number of training steps per epoch.
eval_steps: Number of evaluation steps per epoch.
Returns:
Loop object.
"""
loop = tools.Loop(
logdir, graph.step, graph.should_log, graph.do_report,
graph.force_reset)
loop.add_phase(
'train', graph.done, graph.score, graph.summary, train_steps,
report_every=train_steps,
log_every=train_steps // 2,
checkpoint_every=None,
feed={graph.is_training: True})
loop.add_phase(
'eval', graph.done, graph.score, graph.summary, eval_steps,
report_every=eval_steps,
log_every=eval_steps // 2,
checkpoint_every=10 * eval_steps,
feed={graph.is_training: False})
return loop
|
[
"def",
"_define_loop",
"(",
"graph",
",",
"logdir",
",",
"train_steps",
",",
"eval_steps",
")",
":",
"loop",
"=",
"tools",
".",
"Loop",
"(",
"logdir",
",",
"graph",
".",
"step",
",",
"graph",
".",
"should_log",
",",
"graph",
".",
"do_report",
",",
"graph",
".",
"force_reset",
")",
"loop",
".",
"add_phase",
"(",
"'train'",
",",
"graph",
".",
"done",
",",
"graph",
".",
"score",
",",
"graph",
".",
"summary",
",",
"train_steps",
",",
"report_every",
"=",
"train_steps",
",",
"log_every",
"=",
"train_steps",
"//",
"2",
",",
"checkpoint_every",
"=",
"None",
",",
"feed",
"=",
"{",
"graph",
".",
"is_training",
":",
"True",
"}",
")",
"loop",
".",
"add_phase",
"(",
"'eval'",
",",
"graph",
".",
"done",
",",
"graph",
".",
"score",
",",
"graph",
".",
"summary",
",",
"eval_steps",
",",
"report_every",
"=",
"eval_steps",
",",
"log_every",
"=",
"eval_steps",
"//",
"2",
",",
"checkpoint_every",
"=",
"10",
"*",
"eval_steps",
",",
"feed",
"=",
"{",
"graph",
".",
"is_training",
":",
"False",
"}",
")",
"return",
"loop"
] |
Create and configure a training loop with training and evaluation phases.
Args:
graph: Object providing graph elements via attributes.
logdir: Log directory for storing checkpoints and summaries.
train_steps: Number of training steps per epoch.
eval_steps: Number of evaluation steps per epoch.
Returns:
Loop object.
|
[
"Create",
"and",
"configure",
"a",
"training",
"loop",
"with",
"training",
"and",
"evaluation",
"phases",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/train.py#L70-L97
|
6,846
|
google-research/batch-ppo
|
agents/scripts/train.py
|
train
|
def train(config, env_processes):
"""Training and evaluation entry point yielding scores.
Resolves some configuration attributes, creates environments, graph, and
training loop. By default, assigns all operations to the CPU.
Args:
config: Object providing configurations via attributes.
env_processes: Whether to step environments in separate processes.
Yields:
Evaluation scores.
"""
tf.reset_default_graph()
if config.update_every % config.num_agents:
tf.logging.warn('Number of agents should divide episodes per update.')
with tf.device('/cpu:0'):
batch_env = utility.define_batch_env(
lambda: _create_environment(config),
config.num_agents, env_processes)
graph = utility.define_simulation_graph(
batch_env, config.algorithm, config)
loop = _define_loop(
graph, config.logdir,
config.update_every * config.max_length,
config.eval_episodes * config.max_length)
total_steps = int(
config.steps / config.update_every *
(config.update_every + config.eval_episodes))
# Exclude episode related variables since the Python state of environments is
# not checkpointed and thus new episodes start after resuming.
saver = utility.define_saver(exclude=(r'.*_temporary.*',))
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as sess:
utility.initialize_variables(sess, saver, config.logdir)
for score in loop.run(sess, saver, total_steps):
yield score
batch_env.close()
|
python
|
def train(config, env_processes):
"""Training and evaluation entry point yielding scores.
Resolves some configuration attributes, creates environments, graph, and
training loop. By default, assigns all operations to the CPU.
Args:
config: Object providing configurations via attributes.
env_processes: Whether to step environments in separate processes.
Yields:
Evaluation scores.
"""
tf.reset_default_graph()
if config.update_every % config.num_agents:
tf.logging.warn('Number of agents should divide episodes per update.')
with tf.device('/cpu:0'):
batch_env = utility.define_batch_env(
lambda: _create_environment(config),
config.num_agents, env_processes)
graph = utility.define_simulation_graph(
batch_env, config.algorithm, config)
loop = _define_loop(
graph, config.logdir,
config.update_every * config.max_length,
config.eval_episodes * config.max_length)
total_steps = int(
config.steps / config.update_every *
(config.update_every + config.eval_episodes))
# Exclude episode related variables since the Python state of environments is
# not checkpointed and thus new episodes start after resuming.
saver = utility.define_saver(exclude=(r'.*_temporary.*',))
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as sess:
utility.initialize_variables(sess, saver, config.logdir)
for score in loop.run(sess, saver, total_steps):
yield score
batch_env.close()
|
[
"def",
"train",
"(",
"config",
",",
"env_processes",
")",
":",
"tf",
".",
"reset_default_graph",
"(",
")",
"if",
"config",
".",
"update_every",
"%",
"config",
".",
"num_agents",
":",
"tf",
".",
"logging",
".",
"warn",
"(",
"'Number of agents should divide episodes per update.'",
")",
"with",
"tf",
".",
"device",
"(",
"'/cpu:0'",
")",
":",
"batch_env",
"=",
"utility",
".",
"define_batch_env",
"(",
"lambda",
":",
"_create_environment",
"(",
"config",
")",
",",
"config",
".",
"num_agents",
",",
"env_processes",
")",
"graph",
"=",
"utility",
".",
"define_simulation_graph",
"(",
"batch_env",
",",
"config",
".",
"algorithm",
",",
"config",
")",
"loop",
"=",
"_define_loop",
"(",
"graph",
",",
"config",
".",
"logdir",
",",
"config",
".",
"update_every",
"*",
"config",
".",
"max_length",
",",
"config",
".",
"eval_episodes",
"*",
"config",
".",
"max_length",
")",
"total_steps",
"=",
"int",
"(",
"config",
".",
"steps",
"/",
"config",
".",
"update_every",
"*",
"(",
"config",
".",
"update_every",
"+",
"config",
".",
"eval_episodes",
")",
")",
"# Exclude episode related variables since the Python state of environments is",
"# not checkpointed and thus new episodes start after resuming.",
"saver",
"=",
"utility",
".",
"define_saver",
"(",
"exclude",
"=",
"(",
"r'.*_temporary.*'",
",",
")",
")",
"sess_config",
"=",
"tf",
".",
"ConfigProto",
"(",
"allow_soft_placement",
"=",
"True",
")",
"sess_config",
".",
"gpu_options",
".",
"allow_growth",
"=",
"True",
"with",
"tf",
".",
"Session",
"(",
"config",
"=",
"sess_config",
")",
"as",
"sess",
":",
"utility",
".",
"initialize_variables",
"(",
"sess",
",",
"saver",
",",
"config",
".",
"logdir",
")",
"for",
"score",
"in",
"loop",
".",
"run",
"(",
"sess",
",",
"saver",
",",
"total_steps",
")",
":",
"yield",
"score",
"batch_env",
".",
"close",
"(",
")"
] |
Training and evaluation entry point yielding scores.
Resolves some configuration attributes, creates environments, graph, and
training loop. By default, assigns all operations to the CPU.
Args:
config: Object providing configurations via attributes.
env_processes: Whether to step environments in separate processes.
Yields:
Evaluation scores.
|
[
"Training",
"and",
"evaluation",
"entry",
"point",
"yielding",
"scores",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/train.py#L100-L138
|
6,847
|
google-research/batch-ppo
|
agents/scripts/train.py
|
main
|
def main(_):
"""Create or load configuration and launch the trainer."""
utility.set_up_logging()
if not FLAGS.config:
raise KeyError('You must specify a configuration.')
logdir = FLAGS.logdir and os.path.expanduser(os.path.join(
FLAGS.logdir, '{}-{}'.format(FLAGS.timestamp, FLAGS.config)))
try:
config = utility.load_config(logdir)
except IOError:
config = tools.AttrDict(getattr(configs, FLAGS.config)())
config = utility.save_config(config, logdir)
for score in train(config, FLAGS.env_processes):
tf.logging.info('Score {}.'.format(score))
|
python
|
def main(_):
"""Create or load configuration and launch the trainer."""
utility.set_up_logging()
if not FLAGS.config:
raise KeyError('You must specify a configuration.')
logdir = FLAGS.logdir and os.path.expanduser(os.path.join(
FLAGS.logdir, '{}-{}'.format(FLAGS.timestamp, FLAGS.config)))
try:
config = utility.load_config(logdir)
except IOError:
config = tools.AttrDict(getattr(configs, FLAGS.config)())
config = utility.save_config(config, logdir)
for score in train(config, FLAGS.env_processes):
tf.logging.info('Score {}.'.format(score))
|
[
"def",
"main",
"(",
"_",
")",
":",
"utility",
".",
"set_up_logging",
"(",
")",
"if",
"not",
"FLAGS",
".",
"config",
":",
"raise",
"KeyError",
"(",
"'You must specify a configuration.'",
")",
"logdir",
"=",
"FLAGS",
".",
"logdir",
"and",
"os",
".",
"path",
".",
"expanduser",
"(",
"os",
".",
"path",
".",
"join",
"(",
"FLAGS",
".",
"logdir",
",",
"'{}-{}'",
".",
"format",
"(",
"FLAGS",
".",
"timestamp",
",",
"FLAGS",
".",
"config",
")",
")",
")",
"try",
":",
"config",
"=",
"utility",
".",
"load_config",
"(",
"logdir",
")",
"except",
"IOError",
":",
"config",
"=",
"tools",
".",
"AttrDict",
"(",
"getattr",
"(",
"configs",
",",
"FLAGS",
".",
"config",
")",
"(",
")",
")",
"config",
"=",
"utility",
".",
"save_config",
"(",
"config",
",",
"logdir",
")",
"for",
"score",
"in",
"train",
"(",
"config",
",",
"FLAGS",
".",
"env_processes",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"'Score {}.'",
".",
"format",
"(",
"score",
")",
")"
] |
Create or load configuration and launch the trainer.
|
[
"Create",
"or",
"load",
"configuration",
"and",
"launch",
"the",
"trainer",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/scripts/train.py#L141-L154
|
6,848
|
google-research/batch-ppo
|
agents/parts/iterate_sequences.py
|
iterate_sequences
|
def iterate_sequences(
consumer_fn, output_template, sequences, length, chunk_length=None,
batch_size=None, num_epochs=1, padding_value=0):
"""Iterate over batches of chunks of sequences for multiple epochs.
The batch dimension of the length tensor must be set because it is used to
infer buffer sizes.
Args:
consumer_fn: Function creating the operation to process the data.
output_template: Nested tensors of same shape and dtype as outputs.
sequences: Nested collection of tensors with batch and time dimension.
length: Tensor containing the length for each sequence.
chunk_length: Split sequences into chunks of this size; optional.
batch_size: Split epochs into batches of this size; optional.
num_epochs: How many times to repeat over the data.
padding_value: Value used for padding the last chunk after the sequence.
Raises:
ValueError: Unknown batch size of the length tensor.
Returns:
Concatenated nested tensors returned by the consumer.
"""
if not length.shape[0].value:
raise ValueError('Batch size of length tensor must be set.')
num_sequences = length.shape[0].value
sequences = dict(sequence=sequences, length=length)
dataset = tf.data.Dataset.from_tensor_slices(sequences)
dataset = dataset.repeat(num_epochs)
if chunk_length:
dataset = dataset.map(remove_padding).flat_map(
# pylint: disable=g-long-lambda
lambda x: tf.data.Dataset.from_tensor_slices(
chunk_sequence(x, chunk_length, padding_value)))
num_chunks = tf.reduce_sum((length - 1) // chunk_length + 1)
else:
num_chunks = num_sequences
if batch_size:
dataset = dataset.shuffle(num_sequences // 2)
dataset = dataset.batch(batch_size or num_sequences)
dataset = dataset.prefetch(num_epochs)
iterator = dataset.make_initializable_iterator()
with tf.control_dependencies([iterator.initializer]):
num_batches = num_epochs * num_chunks // (batch_size or num_sequences)
return tf.scan(
# pylint: disable=g-long-lambda
lambda _1, index: consumer_fn(iterator.get_next()),
tf.range(num_batches), output_template, parallel_iterations=1)
|
python
|
def iterate_sequences(
consumer_fn, output_template, sequences, length, chunk_length=None,
batch_size=None, num_epochs=1, padding_value=0):
"""Iterate over batches of chunks of sequences for multiple epochs.
The batch dimension of the length tensor must be set because it is used to
infer buffer sizes.
Args:
consumer_fn: Function creating the operation to process the data.
output_template: Nested tensors of same shape and dtype as outputs.
sequences: Nested collection of tensors with batch and time dimension.
length: Tensor containing the length for each sequence.
chunk_length: Split sequences into chunks of this size; optional.
batch_size: Split epochs into batches of this size; optional.
num_epochs: How many times to repeat over the data.
padding_value: Value used for padding the last chunk after the sequence.
Raises:
ValueError: Unknown batch size of the length tensor.
Returns:
Concatenated nested tensors returned by the consumer.
"""
if not length.shape[0].value:
raise ValueError('Batch size of length tensor must be set.')
num_sequences = length.shape[0].value
sequences = dict(sequence=sequences, length=length)
dataset = tf.data.Dataset.from_tensor_slices(sequences)
dataset = dataset.repeat(num_epochs)
if chunk_length:
dataset = dataset.map(remove_padding).flat_map(
# pylint: disable=g-long-lambda
lambda x: tf.data.Dataset.from_tensor_slices(
chunk_sequence(x, chunk_length, padding_value)))
num_chunks = tf.reduce_sum((length - 1) // chunk_length + 1)
else:
num_chunks = num_sequences
if batch_size:
dataset = dataset.shuffle(num_sequences // 2)
dataset = dataset.batch(batch_size or num_sequences)
dataset = dataset.prefetch(num_epochs)
iterator = dataset.make_initializable_iterator()
with tf.control_dependencies([iterator.initializer]):
num_batches = num_epochs * num_chunks // (batch_size or num_sequences)
return tf.scan(
# pylint: disable=g-long-lambda
lambda _1, index: consumer_fn(iterator.get_next()),
tf.range(num_batches), output_template, parallel_iterations=1)
|
[
"def",
"iterate_sequences",
"(",
"consumer_fn",
",",
"output_template",
",",
"sequences",
",",
"length",
",",
"chunk_length",
"=",
"None",
",",
"batch_size",
"=",
"None",
",",
"num_epochs",
"=",
"1",
",",
"padding_value",
"=",
"0",
")",
":",
"if",
"not",
"length",
".",
"shape",
"[",
"0",
"]",
".",
"value",
":",
"raise",
"ValueError",
"(",
"'Batch size of length tensor must be set.'",
")",
"num_sequences",
"=",
"length",
".",
"shape",
"[",
"0",
"]",
".",
"value",
"sequences",
"=",
"dict",
"(",
"sequence",
"=",
"sequences",
",",
"length",
"=",
"length",
")",
"dataset",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"sequences",
")",
"dataset",
"=",
"dataset",
".",
"repeat",
"(",
"num_epochs",
")",
"if",
"chunk_length",
":",
"dataset",
"=",
"dataset",
".",
"map",
"(",
"remove_padding",
")",
".",
"flat_map",
"(",
"# pylint: disable=g-long-lambda",
"lambda",
"x",
":",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"chunk_sequence",
"(",
"x",
",",
"chunk_length",
",",
"padding_value",
")",
")",
")",
"num_chunks",
"=",
"tf",
".",
"reduce_sum",
"(",
"(",
"length",
"-",
"1",
")",
"//",
"chunk_length",
"+",
"1",
")",
"else",
":",
"num_chunks",
"=",
"num_sequences",
"if",
"batch_size",
":",
"dataset",
"=",
"dataset",
".",
"shuffle",
"(",
"num_sequences",
"//",
"2",
")",
"dataset",
"=",
"dataset",
".",
"batch",
"(",
"batch_size",
"or",
"num_sequences",
")",
"dataset",
"=",
"dataset",
".",
"prefetch",
"(",
"num_epochs",
")",
"iterator",
"=",
"dataset",
".",
"make_initializable_iterator",
"(",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"iterator",
".",
"initializer",
"]",
")",
":",
"num_batches",
"=",
"num_epochs",
"*",
"num_chunks",
"//",
"(",
"batch_size",
"or",
"num_sequences",
")",
"return",
"tf",
".",
"scan",
"(",
"# pylint: disable=g-long-lambda",
"lambda",
"_1",
",",
"index",
":",
"consumer_fn",
"(",
"iterator",
".",
"get_next",
"(",
")",
")",
",",
"tf",
".",
"range",
"(",
"num_batches",
")",
",",
"output_template",
",",
"parallel_iterations",
"=",
"1",
")"
] |
Iterate over batches of chunks of sequences for multiple epochs.
The batch dimension of the length tensor must be set because it is used to
infer buffer sizes.
Args:
consumer_fn: Function creating the operation to process the data.
output_template: Nested tensors of same shape and dtype as outputs.
sequences: Nested collection of tensors with batch and time dimension.
length: Tensor containing the length for each sequence.
chunk_length: Split sequences into chunks of this size; optional.
batch_size: Split epochs into batches of this size; optional.
num_epochs: How many times to repeat over the data.
padding_value: Value used for padding the last chunk after the sequence.
Raises:
ValueError: Unknown batch size of the length tensor.
Returns:
Concatenated nested tensors returned by the consumer.
|
[
"Iterate",
"over",
"batches",
"of",
"chunks",
"of",
"sequences",
"for",
"multiple",
"epochs",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/iterate_sequences.py#L26-L74
|
6,849
|
google-research/batch-ppo
|
agents/parts/iterate_sequences.py
|
chunk_sequence
|
def chunk_sequence(sequence, chunk_length=200, padding_value=0):
"""Split a nested dict of sequence tensors into a batch of chunks.
This function does not expect a batch of sequences, but a single sequence. A
`length` key is added if it did not exist already.
Args:
sequence: Nested dict of tensors with time dimension.
chunk_length: Size of chunks the sequence will be split into.
padding_value: Value used for padding the last chunk after the sequence.
Returns:
Nested dict of sequence tensors with chunk dimension.
"""
if 'length' in sequence:
length = sequence.pop('length')
else:
length = tf.shape(tools.nested.flatten(sequence)[0])[0]
num_chunks = (length - 1) // chunk_length + 1
padding_length = chunk_length * num_chunks - length
padded = tools.nested.map(
# pylint: disable=g-long-lambda
lambda tensor: tf.concat([
tensor, 0 * tensor[:padding_length] + padding_value], 0),
sequence)
chunks = tools.nested.map(
# pylint: disable=g-long-lambda
lambda tensor: tf.reshape(
tensor, [num_chunks, chunk_length] + tensor.shape[1:].as_list()),
padded)
chunks['length'] = tf.concat([
chunk_length * tf.ones((num_chunks - 1,), dtype=tf.int32),
[chunk_length - padding_length]], 0)
return chunks
|
python
|
def chunk_sequence(sequence, chunk_length=200, padding_value=0):
"""Split a nested dict of sequence tensors into a batch of chunks.
This function does not expect a batch of sequences, but a single sequence. A
`length` key is added if it did not exist already.
Args:
sequence: Nested dict of tensors with time dimension.
chunk_length: Size of chunks the sequence will be split into.
padding_value: Value used for padding the last chunk after the sequence.
Returns:
Nested dict of sequence tensors with chunk dimension.
"""
if 'length' in sequence:
length = sequence.pop('length')
else:
length = tf.shape(tools.nested.flatten(sequence)[0])[0]
num_chunks = (length - 1) // chunk_length + 1
padding_length = chunk_length * num_chunks - length
padded = tools.nested.map(
# pylint: disable=g-long-lambda
lambda tensor: tf.concat([
tensor, 0 * tensor[:padding_length] + padding_value], 0),
sequence)
chunks = tools.nested.map(
# pylint: disable=g-long-lambda
lambda tensor: tf.reshape(
tensor, [num_chunks, chunk_length] + tensor.shape[1:].as_list()),
padded)
chunks['length'] = tf.concat([
chunk_length * tf.ones((num_chunks - 1,), dtype=tf.int32),
[chunk_length - padding_length]], 0)
return chunks
|
[
"def",
"chunk_sequence",
"(",
"sequence",
",",
"chunk_length",
"=",
"200",
",",
"padding_value",
"=",
"0",
")",
":",
"if",
"'length'",
"in",
"sequence",
":",
"length",
"=",
"sequence",
".",
"pop",
"(",
"'length'",
")",
"else",
":",
"length",
"=",
"tf",
".",
"shape",
"(",
"tools",
".",
"nested",
".",
"flatten",
"(",
"sequence",
")",
"[",
"0",
"]",
")",
"[",
"0",
"]",
"num_chunks",
"=",
"(",
"length",
"-",
"1",
")",
"//",
"chunk_length",
"+",
"1",
"padding_length",
"=",
"chunk_length",
"*",
"num_chunks",
"-",
"length",
"padded",
"=",
"tools",
".",
"nested",
".",
"map",
"(",
"# pylint: disable=g-long-lambda",
"lambda",
"tensor",
":",
"tf",
".",
"concat",
"(",
"[",
"tensor",
",",
"0",
"*",
"tensor",
"[",
":",
"padding_length",
"]",
"+",
"padding_value",
"]",
",",
"0",
")",
",",
"sequence",
")",
"chunks",
"=",
"tools",
".",
"nested",
".",
"map",
"(",
"# pylint: disable=g-long-lambda",
"lambda",
"tensor",
":",
"tf",
".",
"reshape",
"(",
"tensor",
",",
"[",
"num_chunks",
",",
"chunk_length",
"]",
"+",
"tensor",
".",
"shape",
"[",
"1",
":",
"]",
".",
"as_list",
"(",
")",
")",
",",
"padded",
")",
"chunks",
"[",
"'length'",
"]",
"=",
"tf",
".",
"concat",
"(",
"[",
"chunk_length",
"*",
"tf",
".",
"ones",
"(",
"(",
"num_chunks",
"-",
"1",
",",
")",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"[",
"chunk_length",
"-",
"padding_length",
"]",
"]",
",",
"0",
")",
"return",
"chunks"
] |
Split a nested dict of sequence tensors into a batch of chunks.
This function does not expect a batch of sequences, but a single sequence. A
`length` key is added if it did not exist already.
Args:
sequence: Nested dict of tensors with time dimension.
chunk_length: Size of chunks the sequence will be split into.
padding_value: Value used for padding the last chunk after the sequence.
Returns:
Nested dict of sequence tensors with chunk dimension.
|
[
"Split",
"a",
"nested",
"dict",
"of",
"sequence",
"tensors",
"into",
"a",
"batch",
"of",
"chunks",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/iterate_sequences.py#L77-L110
|
6,850
|
google-research/batch-ppo
|
agents/parts/iterate_sequences.py
|
remove_padding
|
def remove_padding(sequence):
"""Selects the used frames of a sequence, up to its length.
This function does not expect a batch of sequences, but a single sequence.
The sequence must be a dict with `length` key, which will removed from the
result.
Args:
sequence: Nested dict of tensors with time dimension.
Returns:
Nested dict of tensors with padding elements and `length` key removed.
"""
length = sequence.pop('length')
sequence = tools.nested.map(lambda tensor: tensor[:length], sequence)
return sequence
|
python
|
def remove_padding(sequence):
"""Selects the used frames of a sequence, up to its length.
This function does not expect a batch of sequences, but a single sequence.
The sequence must be a dict with `length` key, which will removed from the
result.
Args:
sequence: Nested dict of tensors with time dimension.
Returns:
Nested dict of tensors with padding elements and `length` key removed.
"""
length = sequence.pop('length')
sequence = tools.nested.map(lambda tensor: tensor[:length], sequence)
return sequence
|
[
"def",
"remove_padding",
"(",
"sequence",
")",
":",
"length",
"=",
"sequence",
".",
"pop",
"(",
"'length'",
")",
"sequence",
"=",
"tools",
".",
"nested",
".",
"map",
"(",
"lambda",
"tensor",
":",
"tensor",
"[",
":",
"length",
"]",
",",
"sequence",
")",
"return",
"sequence"
] |
Selects the used frames of a sequence, up to its length.
This function does not expect a batch of sequences, but a single sequence.
The sequence must be a dict with `length` key, which will removed from the
result.
Args:
sequence: Nested dict of tensors with time dimension.
Returns:
Nested dict of tensors with padding elements and `length` key removed.
|
[
"Selects",
"the",
"used",
"frames",
"of",
"a",
"sequence",
"up",
"to",
"its",
"length",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/iterate_sequences.py#L113-L128
|
6,851
|
google-research/batch-ppo
|
agents/parts/normalize.py
|
StreamingNormalize.transform
|
def transform(self, value):
"""Normalize a single or batch tensor.
Applies the activated transformations in the constructor using current
estimates of mean and variance.
Args:
value: Batch or single value tensor.
Returns:
Normalized batch or single value tensor.
"""
with tf.name_scope(self._name + '/transform'):
no_batch_dim = value.shape.ndims == self._mean.shape.ndims
if no_batch_dim:
# Add a batch dimension if necessary.
value = value[None, ...]
if self._center:
value -= self._mean[None, ...]
if self._scale:
# We cannot scale before seeing at least two samples.
value /= tf.cond(
self._count > 1, lambda: self._std() + 1e-8,
lambda: tf.ones_like(self._var_sum))[None]
if self._clip:
value = tf.clip_by_value(value, -self._clip, self._clip)
# Remove batch dimension if necessary.
if no_batch_dim:
value = value[0]
return tf.check_numerics(value, 'value')
|
python
|
def transform(self, value):
"""Normalize a single or batch tensor.
Applies the activated transformations in the constructor using current
estimates of mean and variance.
Args:
value: Batch or single value tensor.
Returns:
Normalized batch or single value tensor.
"""
with tf.name_scope(self._name + '/transform'):
no_batch_dim = value.shape.ndims == self._mean.shape.ndims
if no_batch_dim:
# Add a batch dimension if necessary.
value = value[None, ...]
if self._center:
value -= self._mean[None, ...]
if self._scale:
# We cannot scale before seeing at least two samples.
value /= tf.cond(
self._count > 1, lambda: self._std() + 1e-8,
lambda: tf.ones_like(self._var_sum))[None]
if self._clip:
value = tf.clip_by_value(value, -self._clip, self._clip)
# Remove batch dimension if necessary.
if no_batch_dim:
value = value[0]
return tf.check_numerics(value, 'value')
|
[
"def",
"transform",
"(",
"self",
",",
"value",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"self",
".",
"_name",
"+",
"'/transform'",
")",
":",
"no_batch_dim",
"=",
"value",
".",
"shape",
".",
"ndims",
"==",
"self",
".",
"_mean",
".",
"shape",
".",
"ndims",
"if",
"no_batch_dim",
":",
"# Add a batch dimension if necessary.",
"value",
"=",
"value",
"[",
"None",
",",
"...",
"]",
"if",
"self",
".",
"_center",
":",
"value",
"-=",
"self",
".",
"_mean",
"[",
"None",
",",
"...",
"]",
"if",
"self",
".",
"_scale",
":",
"# We cannot scale before seeing at least two samples.",
"value",
"/=",
"tf",
".",
"cond",
"(",
"self",
".",
"_count",
">",
"1",
",",
"lambda",
":",
"self",
".",
"_std",
"(",
")",
"+",
"1e-8",
",",
"lambda",
":",
"tf",
".",
"ones_like",
"(",
"self",
".",
"_var_sum",
")",
")",
"[",
"None",
"]",
"if",
"self",
".",
"_clip",
":",
"value",
"=",
"tf",
".",
"clip_by_value",
"(",
"value",
",",
"-",
"self",
".",
"_clip",
",",
"self",
".",
"_clip",
")",
"# Remove batch dimension if necessary.",
"if",
"no_batch_dim",
":",
"value",
"=",
"value",
"[",
"0",
"]",
"return",
"tf",
".",
"check_numerics",
"(",
"value",
",",
"'value'",
")"
] |
Normalize a single or batch tensor.
Applies the activated transformations in the constructor using current
estimates of mean and variance.
Args:
value: Batch or single value tensor.
Returns:
Normalized batch or single value tensor.
|
[
"Normalize",
"a",
"single",
"or",
"batch",
"tensor",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/normalize.py#L50-L79
|
6,852
|
google-research/batch-ppo
|
agents/parts/normalize.py
|
StreamingNormalize.update
|
def update(self, value):
"""Update the mean and variance estimates.
Args:
value: Batch or single value tensor.
Returns:
Summary tensor.
"""
with tf.name_scope(self._name + '/update'):
if value.shape.ndims == self._mean.shape.ndims:
# Add a batch dimension if necessary.
value = value[None, ...]
count = tf.shape(value)[0]
with tf.control_dependencies([self._count.assign_add(count)]):
step = tf.cast(self._count, tf.float32)
mean_delta = tf.reduce_sum(value - self._mean[None, ...], 0)
new_mean = self._mean + mean_delta / step
new_mean = tf.cond(self._count > 1, lambda: new_mean, lambda: value[0])
var_delta = (
value - self._mean[None, ...]) * (value - new_mean[None, ...])
new_var_sum = self._var_sum + tf.reduce_sum(var_delta, 0)
with tf.control_dependencies([new_mean, new_var_sum]):
update = self._mean.assign(new_mean), self._var_sum.assign(new_var_sum)
with tf.control_dependencies(update):
if value.shape.ndims == 1:
value = tf.reduce_mean(value)
return self._summary('value', tf.reduce_mean(value))
|
python
|
def update(self, value):
"""Update the mean and variance estimates.
Args:
value: Batch or single value tensor.
Returns:
Summary tensor.
"""
with tf.name_scope(self._name + '/update'):
if value.shape.ndims == self._mean.shape.ndims:
# Add a batch dimension if necessary.
value = value[None, ...]
count = tf.shape(value)[0]
with tf.control_dependencies([self._count.assign_add(count)]):
step = tf.cast(self._count, tf.float32)
mean_delta = tf.reduce_sum(value - self._mean[None, ...], 0)
new_mean = self._mean + mean_delta / step
new_mean = tf.cond(self._count > 1, lambda: new_mean, lambda: value[0])
var_delta = (
value - self._mean[None, ...]) * (value - new_mean[None, ...])
new_var_sum = self._var_sum + tf.reduce_sum(var_delta, 0)
with tf.control_dependencies([new_mean, new_var_sum]):
update = self._mean.assign(new_mean), self._var_sum.assign(new_var_sum)
with tf.control_dependencies(update):
if value.shape.ndims == 1:
value = tf.reduce_mean(value)
return self._summary('value', tf.reduce_mean(value))
|
[
"def",
"update",
"(",
"self",
",",
"value",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"self",
".",
"_name",
"+",
"'/update'",
")",
":",
"if",
"value",
".",
"shape",
".",
"ndims",
"==",
"self",
".",
"_mean",
".",
"shape",
".",
"ndims",
":",
"# Add a batch dimension if necessary.",
"value",
"=",
"value",
"[",
"None",
",",
"...",
"]",
"count",
"=",
"tf",
".",
"shape",
"(",
"value",
")",
"[",
"0",
"]",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"self",
".",
"_count",
".",
"assign_add",
"(",
"count",
")",
"]",
")",
":",
"step",
"=",
"tf",
".",
"cast",
"(",
"self",
".",
"_count",
",",
"tf",
".",
"float32",
")",
"mean_delta",
"=",
"tf",
".",
"reduce_sum",
"(",
"value",
"-",
"self",
".",
"_mean",
"[",
"None",
",",
"...",
"]",
",",
"0",
")",
"new_mean",
"=",
"self",
".",
"_mean",
"+",
"mean_delta",
"/",
"step",
"new_mean",
"=",
"tf",
".",
"cond",
"(",
"self",
".",
"_count",
">",
"1",
",",
"lambda",
":",
"new_mean",
",",
"lambda",
":",
"value",
"[",
"0",
"]",
")",
"var_delta",
"=",
"(",
"value",
"-",
"self",
".",
"_mean",
"[",
"None",
",",
"...",
"]",
")",
"*",
"(",
"value",
"-",
"new_mean",
"[",
"None",
",",
"...",
"]",
")",
"new_var_sum",
"=",
"self",
".",
"_var_sum",
"+",
"tf",
".",
"reduce_sum",
"(",
"var_delta",
",",
"0",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"new_mean",
",",
"new_var_sum",
"]",
")",
":",
"update",
"=",
"self",
".",
"_mean",
".",
"assign",
"(",
"new_mean",
")",
",",
"self",
".",
"_var_sum",
".",
"assign",
"(",
"new_var_sum",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"update",
")",
":",
"if",
"value",
".",
"shape",
".",
"ndims",
"==",
"1",
":",
"value",
"=",
"tf",
".",
"reduce_mean",
"(",
"value",
")",
"return",
"self",
".",
"_summary",
"(",
"'value'",
",",
"tf",
".",
"reduce_mean",
"(",
"value",
")",
")"
] |
Update the mean and variance estimates.
Args:
value: Batch or single value tensor.
Returns:
Summary tensor.
|
[
"Update",
"the",
"mean",
"and",
"variance",
"estimates",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/normalize.py#L81-L108
|
6,853
|
google-research/batch-ppo
|
agents/parts/normalize.py
|
StreamingNormalize.reset
|
def reset(self):
"""Reset the estimates of mean and variance.
Resets the full state of this class.
Returns:
Operation.
"""
with tf.name_scope(self._name + '/reset'):
return tf.group(
self._count.assign(0),
self._mean.assign(tf.zeros_like(self._mean)),
self._var_sum.assign(tf.zeros_like(self._var_sum)))
|
python
|
def reset(self):
"""Reset the estimates of mean and variance.
Resets the full state of this class.
Returns:
Operation.
"""
with tf.name_scope(self._name + '/reset'):
return tf.group(
self._count.assign(0),
self._mean.assign(tf.zeros_like(self._mean)),
self._var_sum.assign(tf.zeros_like(self._var_sum)))
|
[
"def",
"reset",
"(",
"self",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"self",
".",
"_name",
"+",
"'/reset'",
")",
":",
"return",
"tf",
".",
"group",
"(",
"self",
".",
"_count",
".",
"assign",
"(",
"0",
")",
",",
"self",
".",
"_mean",
".",
"assign",
"(",
"tf",
".",
"zeros_like",
"(",
"self",
".",
"_mean",
")",
")",
",",
"self",
".",
"_var_sum",
".",
"assign",
"(",
"tf",
".",
"zeros_like",
"(",
"self",
".",
"_var_sum",
")",
")",
")"
] |
Reset the estimates of mean and variance.
Resets the full state of this class.
Returns:
Operation.
|
[
"Reset",
"the",
"estimates",
"of",
"mean",
"and",
"variance",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/normalize.py#L110-L122
|
6,854
|
google-research/batch-ppo
|
agents/parts/normalize.py
|
StreamingNormalize.summary
|
def summary(self):
"""Summary string of mean and standard deviation.
Returns:
Summary tensor.
"""
with tf.name_scope(self._name + '/summary'):
mean_summary = tf.cond(
self._count > 0, lambda: self._summary('mean', self._mean), str)
std_summary = tf.cond(
self._count > 1, lambda: self._summary('stddev', self._std()), str)
return tf.summary.merge([mean_summary, std_summary])
|
python
|
def summary(self):
"""Summary string of mean and standard deviation.
Returns:
Summary tensor.
"""
with tf.name_scope(self._name + '/summary'):
mean_summary = tf.cond(
self._count > 0, lambda: self._summary('mean', self._mean), str)
std_summary = tf.cond(
self._count > 1, lambda: self._summary('stddev', self._std()), str)
return tf.summary.merge([mean_summary, std_summary])
|
[
"def",
"summary",
"(",
"self",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"self",
".",
"_name",
"+",
"'/summary'",
")",
":",
"mean_summary",
"=",
"tf",
".",
"cond",
"(",
"self",
".",
"_count",
">",
"0",
",",
"lambda",
":",
"self",
".",
"_summary",
"(",
"'mean'",
",",
"self",
".",
"_mean",
")",
",",
"str",
")",
"std_summary",
"=",
"tf",
".",
"cond",
"(",
"self",
".",
"_count",
">",
"1",
",",
"lambda",
":",
"self",
".",
"_summary",
"(",
"'stddev'",
",",
"self",
".",
"_std",
"(",
")",
")",
",",
"str",
")",
"return",
"tf",
".",
"summary",
".",
"merge",
"(",
"[",
"mean_summary",
",",
"std_summary",
"]",
")"
] |
Summary string of mean and standard deviation.
Returns:
Summary tensor.
|
[
"Summary",
"string",
"of",
"mean",
"and",
"standard",
"deviation",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/normalize.py#L124-L135
|
6,855
|
google-research/batch-ppo
|
agents/parts/normalize.py
|
StreamingNormalize._std
|
def _std(self):
"""Computes the current estimate of the standard deviation.
Note that the standard deviation is not defined until at least two samples
were seen.
Returns:
Tensor of current variance.
"""
variance = tf.cond(
self._count > 1,
lambda: self._var_sum / tf.cast(self._count - 1, tf.float32),
lambda: tf.ones_like(self._var_sum) * float('nan'))
# The epsilon corrects for small negative variance values caused by
# the algorithm. It was empirically chosen to work with all environments
# tested.
return tf.sqrt(variance + 1e-4)
|
python
|
def _std(self):
"""Computes the current estimate of the standard deviation.
Note that the standard deviation is not defined until at least two samples
were seen.
Returns:
Tensor of current variance.
"""
variance = tf.cond(
self._count > 1,
lambda: self._var_sum / tf.cast(self._count - 1, tf.float32),
lambda: tf.ones_like(self._var_sum) * float('nan'))
# The epsilon corrects for small negative variance values caused by
# the algorithm. It was empirically chosen to work with all environments
# tested.
return tf.sqrt(variance + 1e-4)
|
[
"def",
"_std",
"(",
"self",
")",
":",
"variance",
"=",
"tf",
".",
"cond",
"(",
"self",
".",
"_count",
">",
"1",
",",
"lambda",
":",
"self",
".",
"_var_sum",
"/",
"tf",
".",
"cast",
"(",
"self",
".",
"_count",
"-",
"1",
",",
"tf",
".",
"float32",
")",
",",
"lambda",
":",
"tf",
".",
"ones_like",
"(",
"self",
".",
"_var_sum",
")",
"*",
"float",
"(",
"'nan'",
")",
")",
"# The epsilon corrects for small negative variance values caused by",
"# the algorithm. It was empirically chosen to work with all environments",
"# tested.",
"return",
"tf",
".",
"sqrt",
"(",
"variance",
"+",
"1e-4",
")"
] |
Computes the current estimate of the standard deviation.
Note that the standard deviation is not defined until at least two samples
were seen.
Returns:
Tensor of current variance.
|
[
"Computes",
"the",
"current",
"estimate",
"of",
"the",
"standard",
"deviation",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/normalize.py#L137-L153
|
6,856
|
google-research/batch-ppo
|
agents/parts/normalize.py
|
StreamingNormalize._summary
|
def _summary(self, name, tensor):
"""Create a scalar or histogram summary matching the rank of the tensor.
Args:
name: Name for the summary.
tensor: Tensor to summarize.
Returns:
Summary tensor.
"""
if tensor.shape.ndims == 0:
return tf.summary.scalar(name, tensor)
else:
return tf.summary.histogram(name, tensor)
|
python
|
def _summary(self, name, tensor):
"""Create a scalar or histogram summary matching the rank of the tensor.
Args:
name: Name for the summary.
tensor: Tensor to summarize.
Returns:
Summary tensor.
"""
if tensor.shape.ndims == 0:
return tf.summary.scalar(name, tensor)
else:
return tf.summary.histogram(name, tensor)
|
[
"def",
"_summary",
"(",
"self",
",",
"name",
",",
"tensor",
")",
":",
"if",
"tensor",
".",
"shape",
".",
"ndims",
"==",
"0",
":",
"return",
"tf",
".",
"summary",
".",
"scalar",
"(",
"name",
",",
"tensor",
")",
"else",
":",
"return",
"tf",
".",
"summary",
".",
"histogram",
"(",
"name",
",",
"tensor",
")"
] |
Create a scalar or histogram summary matching the rank of the tensor.
Args:
name: Name for the summary.
tensor: Tensor to summarize.
Returns:
Summary tensor.
|
[
"Create",
"a",
"scalar",
"or",
"histogram",
"summary",
"matching",
"the",
"rank",
"of",
"the",
"tensor",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/normalize.py#L155-L168
|
6,857
|
google-research/batch-ppo
|
agents/parts/memory.py
|
EpisodeMemory.length
|
def length(self, rows=None):
"""Tensor holding the current length of episodes.
Args:
rows: Episodes to select length from, defaults to all.
Returns:
Batch tensor of sequence lengths.
"""
rows = tf.range(self._capacity) if rows is None else rows
return tf.gather(self._length, rows)
|
python
|
def length(self, rows=None):
"""Tensor holding the current length of episodes.
Args:
rows: Episodes to select length from, defaults to all.
Returns:
Batch tensor of sequence lengths.
"""
rows = tf.range(self._capacity) if rows is None else rows
return tf.gather(self._length, rows)
|
[
"def",
"length",
"(",
"self",
",",
"rows",
"=",
"None",
")",
":",
"rows",
"=",
"tf",
".",
"range",
"(",
"self",
".",
"_capacity",
")",
"if",
"rows",
"is",
"None",
"else",
"rows",
"return",
"tf",
".",
"gather",
"(",
"self",
".",
"_length",
",",
"rows",
")"
] |
Tensor holding the current length of episodes.
Args:
rows: Episodes to select length from, defaults to all.
Returns:
Batch tensor of sequence lengths.
|
[
"Tensor",
"holding",
"the",
"current",
"length",
"of",
"episodes",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/memory.py#L52-L62
|
6,858
|
google-research/batch-ppo
|
agents/parts/memory.py
|
EpisodeMemory.append
|
def append(self, transitions, rows=None):
"""Append a batch of transitions to rows of the memory.
Args:
transitions: Tuple of transition quantities with batch dimension.
rows: Episodes to append to, defaults to all.
Returns:
Operation.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
assert_capacity = tf.assert_less(
rows, self._capacity,
message='capacity exceeded')
with tf.control_dependencies([assert_capacity]):
assert_max_length = tf.assert_less(
tf.gather(self._length, rows), self._max_length,
message='max length exceeded')
with tf.control_dependencies([assert_max_length]):
timestep = tf.gather(self._length, rows)
indices = tf.stack([rows, timestep], 1)
append_ops = tools.nested.map(
lambda var, val: tf.scatter_nd_update(var, indices, val),
self._buffers, transitions, flatten=True)
with tf.control_dependencies(append_ops):
episode_mask = tf.reduce_sum(tf.one_hot(
rows, self._capacity, dtype=tf.int32), 0)
return self._length.assign_add(episode_mask)
|
python
|
def append(self, transitions, rows=None):
"""Append a batch of transitions to rows of the memory.
Args:
transitions: Tuple of transition quantities with batch dimension.
rows: Episodes to append to, defaults to all.
Returns:
Operation.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
assert_capacity = tf.assert_less(
rows, self._capacity,
message='capacity exceeded')
with tf.control_dependencies([assert_capacity]):
assert_max_length = tf.assert_less(
tf.gather(self._length, rows), self._max_length,
message='max length exceeded')
with tf.control_dependencies([assert_max_length]):
timestep = tf.gather(self._length, rows)
indices = tf.stack([rows, timestep], 1)
append_ops = tools.nested.map(
lambda var, val: tf.scatter_nd_update(var, indices, val),
self._buffers, transitions, flatten=True)
with tf.control_dependencies(append_ops):
episode_mask = tf.reduce_sum(tf.one_hot(
rows, self._capacity, dtype=tf.int32), 0)
return self._length.assign_add(episode_mask)
|
[
"def",
"append",
"(",
"self",
",",
"transitions",
",",
"rows",
"=",
"None",
")",
":",
"rows",
"=",
"tf",
".",
"range",
"(",
"self",
".",
"_capacity",
")",
"if",
"rows",
"is",
"None",
"else",
"rows",
"assert",
"rows",
".",
"shape",
".",
"ndims",
"==",
"1",
"assert_capacity",
"=",
"tf",
".",
"assert_less",
"(",
"rows",
",",
"self",
".",
"_capacity",
",",
"message",
"=",
"'capacity exceeded'",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"assert_capacity",
"]",
")",
":",
"assert_max_length",
"=",
"tf",
".",
"assert_less",
"(",
"tf",
".",
"gather",
"(",
"self",
".",
"_length",
",",
"rows",
")",
",",
"self",
".",
"_max_length",
",",
"message",
"=",
"'max length exceeded'",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"assert_max_length",
"]",
")",
":",
"timestep",
"=",
"tf",
".",
"gather",
"(",
"self",
".",
"_length",
",",
"rows",
")",
"indices",
"=",
"tf",
".",
"stack",
"(",
"[",
"rows",
",",
"timestep",
"]",
",",
"1",
")",
"append_ops",
"=",
"tools",
".",
"nested",
".",
"map",
"(",
"lambda",
"var",
",",
"val",
":",
"tf",
".",
"scatter_nd_update",
"(",
"var",
",",
"indices",
",",
"val",
")",
",",
"self",
".",
"_buffers",
",",
"transitions",
",",
"flatten",
"=",
"True",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"append_ops",
")",
":",
"episode_mask",
"=",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"one_hot",
"(",
"rows",
",",
"self",
".",
"_capacity",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"0",
")",
"return",
"self",
".",
"_length",
".",
"assign_add",
"(",
"episode_mask",
")"
] |
Append a batch of transitions to rows of the memory.
Args:
transitions: Tuple of transition quantities with batch dimension.
rows: Episodes to append to, defaults to all.
Returns:
Operation.
|
[
"Append",
"a",
"batch",
"of",
"transitions",
"to",
"rows",
"of",
"the",
"memory",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/memory.py#L64-L92
|
6,859
|
google-research/batch-ppo
|
agents/parts/memory.py
|
EpisodeMemory.replace
|
def replace(self, episodes, length, rows=None):
"""Replace full episodes.
Args:
episodes: Tuple of transition quantities with batch and time dimensions.
length: Batch of sequence lengths.
rows: Episodes to replace, defaults to all.
Returns:
Operation.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
assert_capacity = tf.assert_less(
rows, self._capacity, message='capacity exceeded')
with tf.control_dependencies([assert_capacity]):
assert_max_length = tf.assert_less_equal(
length, self._max_length, message='max length exceeded')
with tf.control_dependencies([assert_max_length]):
replace_ops = tools.nested.map(
lambda var, val: tf.scatter_update(var, rows, val),
self._buffers, episodes, flatten=True)
with tf.control_dependencies(replace_ops):
return tf.scatter_update(self._length, rows, length)
|
python
|
def replace(self, episodes, length, rows=None):
"""Replace full episodes.
Args:
episodes: Tuple of transition quantities with batch and time dimensions.
length: Batch of sequence lengths.
rows: Episodes to replace, defaults to all.
Returns:
Operation.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
assert_capacity = tf.assert_less(
rows, self._capacity, message='capacity exceeded')
with tf.control_dependencies([assert_capacity]):
assert_max_length = tf.assert_less_equal(
length, self._max_length, message='max length exceeded')
with tf.control_dependencies([assert_max_length]):
replace_ops = tools.nested.map(
lambda var, val: tf.scatter_update(var, rows, val),
self._buffers, episodes, flatten=True)
with tf.control_dependencies(replace_ops):
return tf.scatter_update(self._length, rows, length)
|
[
"def",
"replace",
"(",
"self",
",",
"episodes",
",",
"length",
",",
"rows",
"=",
"None",
")",
":",
"rows",
"=",
"tf",
".",
"range",
"(",
"self",
".",
"_capacity",
")",
"if",
"rows",
"is",
"None",
"else",
"rows",
"assert",
"rows",
".",
"shape",
".",
"ndims",
"==",
"1",
"assert_capacity",
"=",
"tf",
".",
"assert_less",
"(",
"rows",
",",
"self",
".",
"_capacity",
",",
"message",
"=",
"'capacity exceeded'",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"assert_capacity",
"]",
")",
":",
"assert_max_length",
"=",
"tf",
".",
"assert_less_equal",
"(",
"length",
",",
"self",
".",
"_max_length",
",",
"message",
"=",
"'max length exceeded'",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"assert_max_length",
"]",
")",
":",
"replace_ops",
"=",
"tools",
".",
"nested",
".",
"map",
"(",
"lambda",
"var",
",",
"val",
":",
"tf",
".",
"scatter_update",
"(",
"var",
",",
"rows",
",",
"val",
")",
",",
"self",
".",
"_buffers",
",",
"episodes",
",",
"flatten",
"=",
"True",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"replace_ops",
")",
":",
"return",
"tf",
".",
"scatter_update",
"(",
"self",
".",
"_length",
",",
"rows",
",",
"length",
")"
] |
Replace full episodes.
Args:
episodes: Tuple of transition quantities with batch and time dimensions.
length: Batch of sequence lengths.
rows: Episodes to replace, defaults to all.
Returns:
Operation.
|
[
"Replace",
"full",
"episodes",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/memory.py#L94-L117
|
6,860
|
google-research/batch-ppo
|
agents/parts/memory.py
|
EpisodeMemory.data
|
def data(self, rows=None):
"""Access a batch of episodes from the memory.
Padding elements after the length of each episode are unspecified and might
contain old data.
Args:
rows: Episodes to select, defaults to all.
Returns:
Tuple containing a tuple of transition quantities with batch and time
dimensions, and a batch of sequence lengths.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
episode = tools.nested.map(lambda var: tf.gather(var, rows), self._buffers)
length = tf.gather(self._length, rows)
return episode, length
|
python
|
def data(self, rows=None):
"""Access a batch of episodes from the memory.
Padding elements after the length of each episode are unspecified and might
contain old data.
Args:
rows: Episodes to select, defaults to all.
Returns:
Tuple containing a tuple of transition quantities with batch and time
dimensions, and a batch of sequence lengths.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
episode = tools.nested.map(lambda var: tf.gather(var, rows), self._buffers)
length = tf.gather(self._length, rows)
return episode, length
|
[
"def",
"data",
"(",
"self",
",",
"rows",
"=",
"None",
")",
":",
"rows",
"=",
"tf",
".",
"range",
"(",
"self",
".",
"_capacity",
")",
"if",
"rows",
"is",
"None",
"else",
"rows",
"assert",
"rows",
".",
"shape",
".",
"ndims",
"==",
"1",
"episode",
"=",
"tools",
".",
"nested",
".",
"map",
"(",
"lambda",
"var",
":",
"tf",
".",
"gather",
"(",
"var",
",",
"rows",
")",
",",
"self",
".",
"_buffers",
")",
"length",
"=",
"tf",
".",
"gather",
"(",
"self",
".",
"_length",
",",
"rows",
")",
"return",
"episode",
",",
"length"
] |
Access a batch of episodes from the memory.
Padding elements after the length of each episode are unspecified and might
contain old data.
Args:
rows: Episodes to select, defaults to all.
Returns:
Tuple containing a tuple of transition quantities with batch and time
dimensions, and a batch of sequence lengths.
|
[
"Access",
"a",
"batch",
"of",
"episodes",
"from",
"the",
"memory",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/memory.py#L119-L136
|
6,861
|
google-research/batch-ppo
|
agents/parts/memory.py
|
EpisodeMemory.clear
|
def clear(self, rows=None):
"""Reset episodes in the memory.
Internally, this only sets their lengths to zero. The memory entries will
be overridden by future calls to append() or replace().
Args:
rows: Episodes to clear, defaults to all.
Returns:
Operation.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
return tf.scatter_update(self._length, rows, tf.zeros_like(rows))
|
python
|
def clear(self, rows=None):
"""Reset episodes in the memory.
Internally, this only sets their lengths to zero. The memory entries will
be overridden by future calls to append() or replace().
Args:
rows: Episodes to clear, defaults to all.
Returns:
Operation.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
return tf.scatter_update(self._length, rows, tf.zeros_like(rows))
|
[
"def",
"clear",
"(",
"self",
",",
"rows",
"=",
"None",
")",
":",
"rows",
"=",
"tf",
".",
"range",
"(",
"self",
".",
"_capacity",
")",
"if",
"rows",
"is",
"None",
"else",
"rows",
"assert",
"rows",
".",
"shape",
".",
"ndims",
"==",
"1",
"return",
"tf",
".",
"scatter_update",
"(",
"self",
".",
"_length",
",",
"rows",
",",
"tf",
".",
"zeros_like",
"(",
"rows",
")",
")"
] |
Reset episodes in the memory.
Internally, this only sets their lengths to zero. The memory entries will
be overridden by future calls to append() or replace().
Args:
rows: Episodes to clear, defaults to all.
Returns:
Operation.
|
[
"Reset",
"episodes",
"in",
"the",
"memory",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/parts/memory.py#L138-L152
|
6,862
|
google-research/batch-ppo
|
agents/tools/in_graph_env.py
|
InGraphEnv._parse_shape
|
def _parse_shape(self, space):
"""Get a tensor shape from a OpenAI Gym space.
Args:
space: Gym space.
Raises:
NotImplementedError: For spaces other than Box and Discrete.
Returns:
Shape tuple.
"""
if isinstance(space, gym.spaces.Discrete):
return ()
if isinstance(space, gym.spaces.Box):
return space.shape
raise NotImplementedError()
|
python
|
def _parse_shape(self, space):
"""Get a tensor shape from a OpenAI Gym space.
Args:
space: Gym space.
Raises:
NotImplementedError: For spaces other than Box and Discrete.
Returns:
Shape tuple.
"""
if isinstance(space, gym.spaces.Discrete):
return ()
if isinstance(space, gym.spaces.Box):
return space.shape
raise NotImplementedError()
|
[
"def",
"_parse_shape",
"(",
"self",
",",
"space",
")",
":",
"if",
"isinstance",
"(",
"space",
",",
"gym",
".",
"spaces",
".",
"Discrete",
")",
":",
"return",
"(",
")",
"if",
"isinstance",
"(",
"space",
",",
"gym",
".",
"spaces",
".",
"Box",
")",
":",
"return",
"space",
".",
"shape",
"raise",
"NotImplementedError",
"(",
")"
] |
Get a tensor shape from a OpenAI Gym space.
Args:
space: Gym space.
Raises:
NotImplementedError: For spaces other than Box and Discrete.
Returns:
Shape tuple.
|
[
"Get",
"a",
"tensor",
"shape",
"from",
"a",
"OpenAI",
"Gym",
"space",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/in_graph_env.py#L134-L150
|
6,863
|
google-research/batch-ppo
|
agents/tools/in_graph_env.py
|
InGraphEnv._parse_dtype
|
def _parse_dtype(self, space):
"""Get a tensor dtype from a OpenAI Gym space.
Args:
space: Gym space.
Raises:
NotImplementedError: For spaces other than Box and Discrete.
Returns:
TensorFlow data type.
"""
if isinstance(space, gym.spaces.Discrete):
return tf.int32
if isinstance(space, gym.spaces.Box):
return tf.float32
raise NotImplementedError()
|
python
|
def _parse_dtype(self, space):
"""Get a tensor dtype from a OpenAI Gym space.
Args:
space: Gym space.
Raises:
NotImplementedError: For spaces other than Box and Discrete.
Returns:
TensorFlow data type.
"""
if isinstance(space, gym.spaces.Discrete):
return tf.int32
if isinstance(space, gym.spaces.Box):
return tf.float32
raise NotImplementedError()
|
[
"def",
"_parse_dtype",
"(",
"self",
",",
"space",
")",
":",
"if",
"isinstance",
"(",
"space",
",",
"gym",
".",
"spaces",
".",
"Discrete",
")",
":",
"return",
"tf",
".",
"int32",
"if",
"isinstance",
"(",
"space",
",",
"gym",
".",
"spaces",
".",
"Box",
")",
":",
"return",
"tf",
".",
"float32",
"raise",
"NotImplementedError",
"(",
")"
] |
Get a tensor dtype from a OpenAI Gym space.
Args:
space: Gym space.
Raises:
NotImplementedError: For spaces other than Box and Discrete.
Returns:
TensorFlow data type.
|
[
"Get",
"a",
"tensor",
"dtype",
"from",
"a",
"OpenAI",
"Gym",
"space",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/in_graph_env.py#L152-L168
|
6,864
|
google-research/batch-ppo
|
agents/algorithms/ppo/ppo.py
|
PPO.begin_episode
|
def begin_episode(self, agent_indices):
"""Reset the recurrent states and stored episode.
Args:
agent_indices: Tensor containing current batch indices.
Returns:
Summary tensor.
"""
with tf.name_scope('begin_episode/'):
if self._last_state is None:
reset_state = tf.no_op()
else:
reset_state = utility.reinit_nested_vars(
self._last_state, agent_indices)
reset_buffer = self._current_episodes.clear(agent_indices)
with tf.control_dependencies([reset_state, reset_buffer]):
return tf.constant('')
|
python
|
def begin_episode(self, agent_indices):
"""Reset the recurrent states and stored episode.
Args:
agent_indices: Tensor containing current batch indices.
Returns:
Summary tensor.
"""
with tf.name_scope('begin_episode/'):
if self._last_state is None:
reset_state = tf.no_op()
else:
reset_state = utility.reinit_nested_vars(
self._last_state, agent_indices)
reset_buffer = self._current_episodes.clear(agent_indices)
with tf.control_dependencies([reset_state, reset_buffer]):
return tf.constant('')
|
[
"def",
"begin_episode",
"(",
"self",
",",
"agent_indices",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'begin_episode/'",
")",
":",
"if",
"self",
".",
"_last_state",
"is",
"None",
":",
"reset_state",
"=",
"tf",
".",
"no_op",
"(",
")",
"else",
":",
"reset_state",
"=",
"utility",
".",
"reinit_nested_vars",
"(",
"self",
".",
"_last_state",
",",
"agent_indices",
")",
"reset_buffer",
"=",
"self",
".",
"_current_episodes",
".",
"clear",
"(",
"agent_indices",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"reset_state",
",",
"reset_buffer",
"]",
")",
":",
"return",
"tf",
".",
"constant",
"(",
"''",
")"
] |
Reset the recurrent states and stored episode.
Args:
agent_indices: Tensor containing current batch indices.
Returns:
Summary tensor.
|
[
"Reset",
"the",
"recurrent",
"states",
"and",
"stored",
"episode",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L81-L98
|
6,865
|
google-research/batch-ppo
|
agents/algorithms/ppo/ppo.py
|
PPO.perform
|
def perform(self, agent_indices, observ):
"""Compute batch of actions and a summary for a batch of observation.
Args:
agent_indices: Tensor containing current batch indices.
observ: Tensor of a batch of observations for all agents.
Returns:
Tuple of action batch tensor and summary tensor.
"""
with tf.name_scope('perform/'):
observ = self._observ_filter.transform(observ)
if self._last_state is None:
state = None
else:
state = tools.nested.map(
lambda x: tf.gather(x, agent_indices), self._last_state)
with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):
output = self._network(
observ[:, None], tf.ones(observ.shape[0]), state)
action = tf.cond(
self._is_training, output.policy.sample, output.policy.mode)
logprob = output.policy.log_prob(action)[:, 0]
# pylint: disable=g-long-lambda
summary = tf.cond(self._should_log, lambda: tf.summary.merge([
tf.summary.histogram('mode', output.policy.mode()[:, 0]),
tf.summary.histogram('action', action[:, 0]),
tf.summary.histogram('logprob', logprob)]), str)
# Remember current policy to append to memory in the experience callback.
if self._last_state is None:
assign_state = tf.no_op()
else:
assign_state = utility.assign_nested_vars(
self._last_state, output.state, agent_indices)
remember_last_action = tf.scatter_update(
self._last_action, agent_indices, action[:, 0])
policy_params = tools.nested.filter(
lambda x: isinstance(x, tf.Tensor), output.policy.parameters)
assert policy_params, 'Policy has no parameters to store.'
remember_last_policy = tools.nested.map(
lambda var, val: tf.scatter_update(var, agent_indices, val[:, 0]),
self._last_policy, policy_params, flatten=True)
with tf.control_dependencies((
assign_state, remember_last_action) + remember_last_policy):
return action[:, 0], tf.identity(summary)
|
python
|
def perform(self, agent_indices, observ):
"""Compute batch of actions and a summary for a batch of observation.
Args:
agent_indices: Tensor containing current batch indices.
observ: Tensor of a batch of observations for all agents.
Returns:
Tuple of action batch tensor and summary tensor.
"""
with tf.name_scope('perform/'):
observ = self._observ_filter.transform(observ)
if self._last_state is None:
state = None
else:
state = tools.nested.map(
lambda x: tf.gather(x, agent_indices), self._last_state)
with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):
output = self._network(
observ[:, None], tf.ones(observ.shape[0]), state)
action = tf.cond(
self._is_training, output.policy.sample, output.policy.mode)
logprob = output.policy.log_prob(action)[:, 0]
# pylint: disable=g-long-lambda
summary = tf.cond(self._should_log, lambda: tf.summary.merge([
tf.summary.histogram('mode', output.policy.mode()[:, 0]),
tf.summary.histogram('action', action[:, 0]),
tf.summary.histogram('logprob', logprob)]), str)
# Remember current policy to append to memory in the experience callback.
if self._last_state is None:
assign_state = tf.no_op()
else:
assign_state = utility.assign_nested_vars(
self._last_state, output.state, agent_indices)
remember_last_action = tf.scatter_update(
self._last_action, agent_indices, action[:, 0])
policy_params = tools.nested.filter(
lambda x: isinstance(x, tf.Tensor), output.policy.parameters)
assert policy_params, 'Policy has no parameters to store.'
remember_last_policy = tools.nested.map(
lambda var, val: tf.scatter_update(var, agent_indices, val[:, 0]),
self._last_policy, policy_params, flatten=True)
with tf.control_dependencies((
assign_state, remember_last_action) + remember_last_policy):
return action[:, 0], tf.identity(summary)
|
[
"def",
"perform",
"(",
"self",
",",
"agent_indices",
",",
"observ",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'perform/'",
")",
":",
"observ",
"=",
"self",
".",
"_observ_filter",
".",
"transform",
"(",
"observ",
")",
"if",
"self",
".",
"_last_state",
"is",
"None",
":",
"state",
"=",
"None",
"else",
":",
"state",
"=",
"tools",
".",
"nested",
".",
"map",
"(",
"lambda",
"x",
":",
"tf",
".",
"gather",
"(",
"x",
",",
"agent_indices",
")",
",",
"self",
".",
"_last_state",
")",
"with",
"tf",
".",
"device",
"(",
"'/gpu:0'",
"if",
"self",
".",
"_use_gpu",
"else",
"'/cpu:0'",
")",
":",
"output",
"=",
"self",
".",
"_network",
"(",
"observ",
"[",
":",
",",
"None",
"]",
",",
"tf",
".",
"ones",
"(",
"observ",
".",
"shape",
"[",
"0",
"]",
")",
",",
"state",
")",
"action",
"=",
"tf",
".",
"cond",
"(",
"self",
".",
"_is_training",
",",
"output",
".",
"policy",
".",
"sample",
",",
"output",
".",
"policy",
".",
"mode",
")",
"logprob",
"=",
"output",
".",
"policy",
".",
"log_prob",
"(",
"action",
")",
"[",
":",
",",
"0",
"]",
"# pylint: disable=g-long-lambda",
"summary",
"=",
"tf",
".",
"cond",
"(",
"self",
".",
"_should_log",
",",
"lambda",
":",
"tf",
".",
"summary",
".",
"merge",
"(",
"[",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'mode'",
",",
"output",
".",
"policy",
".",
"mode",
"(",
")",
"[",
":",
",",
"0",
"]",
")",
",",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'action'",
",",
"action",
"[",
":",
",",
"0",
"]",
")",
",",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'logprob'",
",",
"logprob",
")",
"]",
")",
",",
"str",
")",
"# Remember current policy to append to memory in the experience callback.",
"if",
"self",
".",
"_last_state",
"is",
"None",
":",
"assign_state",
"=",
"tf",
".",
"no_op",
"(",
")",
"else",
":",
"assign_state",
"=",
"utility",
".",
"assign_nested_vars",
"(",
"self",
".",
"_last_state",
",",
"output",
".",
"state",
",",
"agent_indices",
")",
"remember_last_action",
"=",
"tf",
".",
"scatter_update",
"(",
"self",
".",
"_last_action",
",",
"agent_indices",
",",
"action",
"[",
":",
",",
"0",
"]",
")",
"policy_params",
"=",
"tools",
".",
"nested",
".",
"filter",
"(",
"lambda",
"x",
":",
"isinstance",
"(",
"x",
",",
"tf",
".",
"Tensor",
")",
",",
"output",
".",
"policy",
".",
"parameters",
")",
"assert",
"policy_params",
",",
"'Policy has no parameters to store.'",
"remember_last_policy",
"=",
"tools",
".",
"nested",
".",
"map",
"(",
"lambda",
"var",
",",
"val",
":",
"tf",
".",
"scatter_update",
"(",
"var",
",",
"agent_indices",
",",
"val",
"[",
":",
",",
"0",
"]",
")",
",",
"self",
".",
"_last_policy",
",",
"policy_params",
",",
"flatten",
"=",
"True",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"(",
"assign_state",
",",
"remember_last_action",
")",
"+",
"remember_last_policy",
")",
":",
"return",
"action",
"[",
":",
",",
"0",
"]",
",",
"tf",
".",
"identity",
"(",
"summary",
")"
] |
Compute batch of actions and a summary for a batch of observation.
Args:
agent_indices: Tensor containing current batch indices.
observ: Tensor of a batch of observations for all agents.
Returns:
Tuple of action batch tensor and summary tensor.
|
[
"Compute",
"batch",
"of",
"actions",
"and",
"a",
"summary",
"for",
"a",
"batch",
"of",
"observation",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L100-L144
|
6,866
|
google-research/batch-ppo
|
agents/algorithms/ppo/ppo.py
|
PPO.experience
|
def experience(
self, agent_indices, observ, action, reward, unused_done, unused_nextob):
"""Process the transition tuple of the current step.
When training, add the current transition tuple to the memory and update
the streaming statistics for observations and rewards. A summary string is
returned if requested at this step.
Args:
agent_indices: Tensor containing current batch indices.
observ: Batch tensor of observations.
action: Batch tensor of actions.
reward: Batch tensor of rewards.
unused_done: Batch tensor of done flags.
unused_nextob: Batch tensor of successor observations.
Returns:
Summary tensor.
"""
with tf.name_scope('experience/'):
return tf.cond(
self._is_training,
# pylint: disable=g-long-lambda
lambda: self._define_experience(
agent_indices, observ, action, reward), str)
|
python
|
def experience(
self, agent_indices, observ, action, reward, unused_done, unused_nextob):
"""Process the transition tuple of the current step.
When training, add the current transition tuple to the memory and update
the streaming statistics for observations and rewards. A summary string is
returned if requested at this step.
Args:
agent_indices: Tensor containing current batch indices.
observ: Batch tensor of observations.
action: Batch tensor of actions.
reward: Batch tensor of rewards.
unused_done: Batch tensor of done flags.
unused_nextob: Batch tensor of successor observations.
Returns:
Summary tensor.
"""
with tf.name_scope('experience/'):
return tf.cond(
self._is_training,
# pylint: disable=g-long-lambda
lambda: self._define_experience(
agent_indices, observ, action, reward), str)
|
[
"def",
"experience",
"(",
"self",
",",
"agent_indices",
",",
"observ",
",",
"action",
",",
"reward",
",",
"unused_done",
",",
"unused_nextob",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'experience/'",
")",
":",
"return",
"tf",
".",
"cond",
"(",
"self",
".",
"_is_training",
",",
"# pylint: disable=g-long-lambda",
"lambda",
":",
"self",
".",
"_define_experience",
"(",
"agent_indices",
",",
"observ",
",",
"action",
",",
"reward",
")",
",",
"str",
")"
] |
Process the transition tuple of the current step.
When training, add the current transition tuple to the memory and update
the streaming statistics for observations and rewards. A summary string is
returned if requested at this step.
Args:
agent_indices: Tensor containing current batch indices.
observ: Batch tensor of observations.
action: Batch tensor of actions.
reward: Batch tensor of rewards.
unused_done: Batch tensor of done flags.
unused_nextob: Batch tensor of successor observations.
Returns:
Summary tensor.
|
[
"Process",
"the",
"transition",
"tuple",
"of",
"the",
"current",
"step",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L146-L170
|
6,867
|
google-research/batch-ppo
|
agents/algorithms/ppo/ppo.py
|
PPO.end_episode
|
def end_episode(self, agent_indices):
"""Add episodes to the memory and perform update steps if memory is full.
During training, add the collected episodes of the batch indices that
finished their episode to the memory. If the memory is full, train on it,
and then clear the memory. A summary string is returned if requested at
this step.
Args:
agent_indices: Tensor containing current batch indices.
Returns:
Summary tensor.
"""
with tf.name_scope('end_episode/'):
return tf.cond(
self._is_training,
lambda: self._define_end_episode(agent_indices), str)
|
python
|
def end_episode(self, agent_indices):
"""Add episodes to the memory and perform update steps if memory is full.
During training, add the collected episodes of the batch indices that
finished their episode to the memory. If the memory is full, train on it,
and then clear the memory. A summary string is returned if requested at
this step.
Args:
agent_indices: Tensor containing current batch indices.
Returns:
Summary tensor.
"""
with tf.name_scope('end_episode/'):
return tf.cond(
self._is_training,
lambda: self._define_end_episode(agent_indices), str)
|
[
"def",
"end_episode",
"(",
"self",
",",
"agent_indices",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'end_episode/'",
")",
":",
"return",
"tf",
".",
"cond",
"(",
"self",
".",
"_is_training",
",",
"lambda",
":",
"self",
".",
"_define_end_episode",
"(",
"agent_indices",
")",
",",
"str",
")"
] |
Add episodes to the memory and perform update steps if memory is full.
During training, add the collected episodes of the batch indices that
finished their episode to the memory. If the memory is full, train on it,
and then clear the memory. A summary string is returned if requested at
this step.
Args:
agent_indices: Tensor containing current batch indices.
Returns:
Summary tensor.
|
[
"Add",
"episodes",
"to",
"the",
"memory",
"and",
"perform",
"update",
"steps",
"if",
"memory",
"is",
"full",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L199-L216
|
6,868
|
google-research/batch-ppo
|
agents/algorithms/ppo/ppo.py
|
PPO._initialize_policy
|
def _initialize_policy(self):
"""Initialize the policy.
Run the policy network on dummy data to initialize its parameters for later
reuse and to analyze the policy distribution. Initializes the attributes
`self._network` and `self._policy_type`.
Raises:
ValueError: Invalid policy distribution.
Returns:
Parameters of the policy distribution and policy state.
"""
with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):
network = functools.partial(
self._config.network, self._config, self._batch_env.action_space)
self._network = tf.make_template('network', network)
output = self._network(
tf.zeros_like(self._batch_env.observ)[:, None],
tf.ones(len(self._batch_env)))
if output.policy.event_shape != self._batch_env.action.shape[1:]:
message = 'Policy event shape {} does not match action shape {}.'
message = message.format(
output.policy.event_shape, self._batch_env.action.shape[1:])
raise ValueError(message)
self._policy_type = type(output.policy)
is_tensor = lambda x: isinstance(x, tf.Tensor)
policy_params = tools.nested.filter(is_tensor, output.policy.parameters)
set_batch_dim = lambda x: utility.set_dimension(x, 0, len(self._batch_env))
tools.nested.map(set_batch_dim, policy_params)
if output.state is not None:
tools.nested.map(set_batch_dim, output.state)
return policy_params, output.state
|
python
|
def _initialize_policy(self):
"""Initialize the policy.
Run the policy network on dummy data to initialize its parameters for later
reuse and to analyze the policy distribution. Initializes the attributes
`self._network` and `self._policy_type`.
Raises:
ValueError: Invalid policy distribution.
Returns:
Parameters of the policy distribution and policy state.
"""
with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):
network = functools.partial(
self._config.network, self._config, self._batch_env.action_space)
self._network = tf.make_template('network', network)
output = self._network(
tf.zeros_like(self._batch_env.observ)[:, None],
tf.ones(len(self._batch_env)))
if output.policy.event_shape != self._batch_env.action.shape[1:]:
message = 'Policy event shape {} does not match action shape {}.'
message = message.format(
output.policy.event_shape, self._batch_env.action.shape[1:])
raise ValueError(message)
self._policy_type = type(output.policy)
is_tensor = lambda x: isinstance(x, tf.Tensor)
policy_params = tools.nested.filter(is_tensor, output.policy.parameters)
set_batch_dim = lambda x: utility.set_dimension(x, 0, len(self._batch_env))
tools.nested.map(set_batch_dim, policy_params)
if output.state is not None:
tools.nested.map(set_batch_dim, output.state)
return policy_params, output.state
|
[
"def",
"_initialize_policy",
"(",
"self",
")",
":",
"with",
"tf",
".",
"device",
"(",
"'/gpu:0'",
"if",
"self",
".",
"_use_gpu",
"else",
"'/cpu:0'",
")",
":",
"network",
"=",
"functools",
".",
"partial",
"(",
"self",
".",
"_config",
".",
"network",
",",
"self",
".",
"_config",
",",
"self",
".",
"_batch_env",
".",
"action_space",
")",
"self",
".",
"_network",
"=",
"tf",
".",
"make_template",
"(",
"'network'",
",",
"network",
")",
"output",
"=",
"self",
".",
"_network",
"(",
"tf",
".",
"zeros_like",
"(",
"self",
".",
"_batch_env",
".",
"observ",
")",
"[",
":",
",",
"None",
"]",
",",
"tf",
".",
"ones",
"(",
"len",
"(",
"self",
".",
"_batch_env",
")",
")",
")",
"if",
"output",
".",
"policy",
".",
"event_shape",
"!=",
"self",
".",
"_batch_env",
".",
"action",
".",
"shape",
"[",
"1",
":",
"]",
":",
"message",
"=",
"'Policy event shape {} does not match action shape {}.'",
"message",
"=",
"message",
".",
"format",
"(",
"output",
".",
"policy",
".",
"event_shape",
",",
"self",
".",
"_batch_env",
".",
"action",
".",
"shape",
"[",
"1",
":",
"]",
")",
"raise",
"ValueError",
"(",
"message",
")",
"self",
".",
"_policy_type",
"=",
"type",
"(",
"output",
".",
"policy",
")",
"is_tensor",
"=",
"lambda",
"x",
":",
"isinstance",
"(",
"x",
",",
"tf",
".",
"Tensor",
")",
"policy_params",
"=",
"tools",
".",
"nested",
".",
"filter",
"(",
"is_tensor",
",",
"output",
".",
"policy",
".",
"parameters",
")",
"set_batch_dim",
"=",
"lambda",
"x",
":",
"utility",
".",
"set_dimension",
"(",
"x",
",",
"0",
",",
"len",
"(",
"self",
".",
"_batch_env",
")",
")",
"tools",
".",
"nested",
".",
"map",
"(",
"set_batch_dim",
",",
"policy_params",
")",
"if",
"output",
".",
"state",
"is",
"not",
"None",
":",
"tools",
".",
"nested",
".",
"map",
"(",
"set_batch_dim",
",",
"output",
".",
"state",
")",
"return",
"policy_params",
",",
"output",
".",
"state"
] |
Initialize the policy.
Run the policy network on dummy data to initialize its parameters for later
reuse and to analyze the policy distribution. Initializes the attributes
`self._network` and `self._policy_type`.
Raises:
ValueError: Invalid policy distribution.
Returns:
Parameters of the policy distribution and policy state.
|
[
"Initialize",
"the",
"policy",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L218-L250
|
6,869
|
google-research/batch-ppo
|
agents/algorithms/ppo/ppo.py
|
PPO._initialize_memory
|
def _initialize_memory(self, policy_params):
"""Initialize temporary and permanent memory.
Args:
policy_params: Nested tuple of policy parameters with all dimensions set.
Initializes the attributes `self._current_episodes`,
`self._finished_episodes`, and `self._num_finished_episodes`. The episodes
memory serves to collect multiple episodes in parallel. Finished episodes
are copied into the next free slot of the second memory. The memory index
points to the next free slot.
"""
# We store observation, action, policy parameters, and reward.
template = (
self._batch_env.observ[0],
self._batch_env.action[0],
tools.nested.map(lambda x: x[0, 0], policy_params),
self._batch_env.reward[0])
with tf.variable_scope('ppo_temporary'):
self._current_episodes = parts.EpisodeMemory(
template, len(self._batch_env), self._config.max_length, 'episodes')
self._finished_episodes = parts.EpisodeMemory(
template, self._config.update_every, self._config.max_length, 'memory')
self._num_finished_episodes = tf.Variable(0, False)
|
python
|
def _initialize_memory(self, policy_params):
"""Initialize temporary and permanent memory.
Args:
policy_params: Nested tuple of policy parameters with all dimensions set.
Initializes the attributes `self._current_episodes`,
`self._finished_episodes`, and `self._num_finished_episodes`. The episodes
memory serves to collect multiple episodes in parallel. Finished episodes
are copied into the next free slot of the second memory. The memory index
points to the next free slot.
"""
# We store observation, action, policy parameters, and reward.
template = (
self._batch_env.observ[0],
self._batch_env.action[0],
tools.nested.map(lambda x: x[0, 0], policy_params),
self._batch_env.reward[0])
with tf.variable_scope('ppo_temporary'):
self._current_episodes = parts.EpisodeMemory(
template, len(self._batch_env), self._config.max_length, 'episodes')
self._finished_episodes = parts.EpisodeMemory(
template, self._config.update_every, self._config.max_length, 'memory')
self._num_finished_episodes = tf.Variable(0, False)
|
[
"def",
"_initialize_memory",
"(",
"self",
",",
"policy_params",
")",
":",
"# We store observation, action, policy parameters, and reward.",
"template",
"=",
"(",
"self",
".",
"_batch_env",
".",
"observ",
"[",
"0",
"]",
",",
"self",
".",
"_batch_env",
".",
"action",
"[",
"0",
"]",
",",
"tools",
".",
"nested",
".",
"map",
"(",
"lambda",
"x",
":",
"x",
"[",
"0",
",",
"0",
"]",
",",
"policy_params",
")",
",",
"self",
".",
"_batch_env",
".",
"reward",
"[",
"0",
"]",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"'ppo_temporary'",
")",
":",
"self",
".",
"_current_episodes",
"=",
"parts",
".",
"EpisodeMemory",
"(",
"template",
",",
"len",
"(",
"self",
".",
"_batch_env",
")",
",",
"self",
".",
"_config",
".",
"max_length",
",",
"'episodes'",
")",
"self",
".",
"_finished_episodes",
"=",
"parts",
".",
"EpisodeMemory",
"(",
"template",
",",
"self",
".",
"_config",
".",
"update_every",
",",
"self",
".",
"_config",
".",
"max_length",
",",
"'memory'",
")",
"self",
".",
"_num_finished_episodes",
"=",
"tf",
".",
"Variable",
"(",
"0",
",",
"False",
")"
] |
Initialize temporary and permanent memory.
Args:
policy_params: Nested tuple of policy parameters with all dimensions set.
Initializes the attributes `self._current_episodes`,
`self._finished_episodes`, and `self._num_finished_episodes`. The episodes
memory serves to collect multiple episodes in parallel. Finished episodes
are copied into the next free slot of the second memory. The memory index
points to the next free slot.
|
[
"Initialize",
"temporary",
"and",
"permanent",
"memory",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L252-L275
|
6,870
|
google-research/batch-ppo
|
agents/algorithms/ppo/ppo.py
|
PPO._training
|
def _training(self):
"""Perform multiple training iterations of both policy and value baseline.
Training on the episodes collected in the memory. Reset the memory
afterwards. Always returns a summary string.
Returns:
Summary tensor.
"""
with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):
with tf.name_scope('training'):
assert_full = tf.assert_equal(
self._num_finished_episodes, self._config.update_every)
with tf.control_dependencies([assert_full]):
data = self._finished_episodes.data()
(observ, action, old_policy_params, reward), length = data
# We set padding frames of the parameters to ones to prevent Gaussians
# with zero variance. This would result in an infinite KL divergence,
# which, even if masked out, would result in NaN gradients.
old_policy_params = tools.nested.map(
lambda param: self._mask(param, length, 1), old_policy_params)
with tf.control_dependencies([tf.assert_greater(length, 0)]):
length = tf.identity(length)
observ = self._observ_filter.transform(observ)
reward = self._reward_filter.transform(reward)
update_summary = self._perform_update_steps(
observ, action, old_policy_params, reward, length)
with tf.control_dependencies([update_summary]):
penalty_summary = self._adjust_penalty(
observ, old_policy_params, length)
with tf.control_dependencies([penalty_summary]):
clear_memory = tf.group(
self._finished_episodes.clear(),
self._num_finished_episodes.assign(0))
with tf.control_dependencies([clear_memory]):
weight_summary = utility.variable_summaries(
tf.trainable_variables(), self._config.weight_summaries)
return tf.summary.merge([
update_summary, penalty_summary, weight_summary])
|
python
|
def _training(self):
"""Perform multiple training iterations of both policy and value baseline.
Training on the episodes collected in the memory. Reset the memory
afterwards. Always returns a summary string.
Returns:
Summary tensor.
"""
with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):
with tf.name_scope('training'):
assert_full = tf.assert_equal(
self._num_finished_episodes, self._config.update_every)
with tf.control_dependencies([assert_full]):
data = self._finished_episodes.data()
(observ, action, old_policy_params, reward), length = data
# We set padding frames of the parameters to ones to prevent Gaussians
# with zero variance. This would result in an infinite KL divergence,
# which, even if masked out, would result in NaN gradients.
old_policy_params = tools.nested.map(
lambda param: self._mask(param, length, 1), old_policy_params)
with tf.control_dependencies([tf.assert_greater(length, 0)]):
length = tf.identity(length)
observ = self._observ_filter.transform(observ)
reward = self._reward_filter.transform(reward)
update_summary = self._perform_update_steps(
observ, action, old_policy_params, reward, length)
with tf.control_dependencies([update_summary]):
penalty_summary = self._adjust_penalty(
observ, old_policy_params, length)
with tf.control_dependencies([penalty_summary]):
clear_memory = tf.group(
self._finished_episodes.clear(),
self._num_finished_episodes.assign(0))
with tf.control_dependencies([clear_memory]):
weight_summary = utility.variable_summaries(
tf.trainable_variables(), self._config.weight_summaries)
return tf.summary.merge([
update_summary, penalty_summary, weight_summary])
|
[
"def",
"_training",
"(",
"self",
")",
":",
"with",
"tf",
".",
"device",
"(",
"'/gpu:0'",
"if",
"self",
".",
"_use_gpu",
"else",
"'/cpu:0'",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'training'",
")",
":",
"assert_full",
"=",
"tf",
".",
"assert_equal",
"(",
"self",
".",
"_num_finished_episodes",
",",
"self",
".",
"_config",
".",
"update_every",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"assert_full",
"]",
")",
":",
"data",
"=",
"self",
".",
"_finished_episodes",
".",
"data",
"(",
")",
"(",
"observ",
",",
"action",
",",
"old_policy_params",
",",
"reward",
")",
",",
"length",
"=",
"data",
"# We set padding frames of the parameters to ones to prevent Gaussians",
"# with zero variance. This would result in an infinite KL divergence,",
"# which, even if masked out, would result in NaN gradients.",
"old_policy_params",
"=",
"tools",
".",
"nested",
".",
"map",
"(",
"lambda",
"param",
":",
"self",
".",
"_mask",
"(",
"param",
",",
"length",
",",
"1",
")",
",",
"old_policy_params",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"tf",
".",
"assert_greater",
"(",
"length",
",",
"0",
")",
"]",
")",
":",
"length",
"=",
"tf",
".",
"identity",
"(",
"length",
")",
"observ",
"=",
"self",
".",
"_observ_filter",
".",
"transform",
"(",
"observ",
")",
"reward",
"=",
"self",
".",
"_reward_filter",
".",
"transform",
"(",
"reward",
")",
"update_summary",
"=",
"self",
".",
"_perform_update_steps",
"(",
"observ",
",",
"action",
",",
"old_policy_params",
",",
"reward",
",",
"length",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"update_summary",
"]",
")",
":",
"penalty_summary",
"=",
"self",
".",
"_adjust_penalty",
"(",
"observ",
",",
"old_policy_params",
",",
"length",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"penalty_summary",
"]",
")",
":",
"clear_memory",
"=",
"tf",
".",
"group",
"(",
"self",
".",
"_finished_episodes",
".",
"clear",
"(",
")",
",",
"self",
".",
"_num_finished_episodes",
".",
"assign",
"(",
"0",
")",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"clear_memory",
"]",
")",
":",
"weight_summary",
"=",
"utility",
".",
"variable_summaries",
"(",
"tf",
".",
"trainable_variables",
"(",
")",
",",
"self",
".",
"_config",
".",
"weight_summaries",
")",
"return",
"tf",
".",
"summary",
".",
"merge",
"(",
"[",
"update_summary",
",",
"penalty_summary",
",",
"weight_summary",
"]",
")"
] |
Perform multiple training iterations of both policy and value baseline.
Training on the episodes collected in the memory. Reset the memory
afterwards. Always returns a summary string.
Returns:
Summary tensor.
|
[
"Perform",
"multiple",
"training",
"iterations",
"of",
"both",
"policy",
"and",
"value",
"baseline",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L294-L332
|
6,871
|
google-research/batch-ppo
|
agents/algorithms/ppo/ppo.py
|
PPO._perform_update_steps
|
def _perform_update_steps(
self, observ, action, old_policy_params, reward, length):
"""Perform multiple update steps of value function and policy.
The advantage is computed once at the beginning and shared across
iterations. We need to decide for the summary of one iteration, and thus
choose the one after half of the iterations.
Args:
observ: Sequences of observations.
action: Sequences of actions.
old_policy_params: Parameters of the behavioral policy.
reward: Sequences of rewards.
length: Batch of sequence lengths.
Returns:
Summary tensor.
"""
return_ = utility.discounted_return(
reward, length, self._config.discount)
value = self._network(observ, length).value
if self._config.gae_lambda:
advantage = utility.lambda_advantage(
reward, value, length, self._config.discount,
self._config.gae_lambda)
else:
advantage = return_ - value
mean, variance = tf.nn.moments(advantage, axes=[0, 1], keep_dims=True)
advantage = (advantage - mean) / (tf.sqrt(variance) + 1e-8)
advantage = tf.Print(
advantage, [tf.reduce_mean(return_), tf.reduce_mean(value)],
'return and value: ')
advantage = tf.Print(
advantage, [tf.reduce_mean(advantage)],
'normalized advantage: ')
episodes = (observ, action, old_policy_params, reward, advantage)
value_loss, policy_loss, summary = parts.iterate_sequences(
self._update_step, [0., 0., ''], episodes, length,
self._config.chunk_length,
self._config.batch_size,
self._config.update_epochs,
padding_value=1)
print_losses = tf.group(
tf.Print(0, [tf.reduce_mean(value_loss)], 'value loss: '),
tf.Print(0, [tf.reduce_mean(policy_loss)], 'policy loss: '))
with tf.control_dependencies([value_loss, policy_loss, print_losses]):
return summary[self._config.update_epochs // 2]
|
python
|
def _perform_update_steps(
self, observ, action, old_policy_params, reward, length):
"""Perform multiple update steps of value function and policy.
The advantage is computed once at the beginning and shared across
iterations. We need to decide for the summary of one iteration, and thus
choose the one after half of the iterations.
Args:
observ: Sequences of observations.
action: Sequences of actions.
old_policy_params: Parameters of the behavioral policy.
reward: Sequences of rewards.
length: Batch of sequence lengths.
Returns:
Summary tensor.
"""
return_ = utility.discounted_return(
reward, length, self._config.discount)
value = self._network(observ, length).value
if self._config.gae_lambda:
advantage = utility.lambda_advantage(
reward, value, length, self._config.discount,
self._config.gae_lambda)
else:
advantage = return_ - value
mean, variance = tf.nn.moments(advantage, axes=[0, 1], keep_dims=True)
advantage = (advantage - mean) / (tf.sqrt(variance) + 1e-8)
advantage = tf.Print(
advantage, [tf.reduce_mean(return_), tf.reduce_mean(value)],
'return and value: ')
advantage = tf.Print(
advantage, [tf.reduce_mean(advantage)],
'normalized advantage: ')
episodes = (observ, action, old_policy_params, reward, advantage)
value_loss, policy_loss, summary = parts.iterate_sequences(
self._update_step, [0., 0., ''], episodes, length,
self._config.chunk_length,
self._config.batch_size,
self._config.update_epochs,
padding_value=1)
print_losses = tf.group(
tf.Print(0, [tf.reduce_mean(value_loss)], 'value loss: '),
tf.Print(0, [tf.reduce_mean(policy_loss)], 'policy loss: '))
with tf.control_dependencies([value_loss, policy_loss, print_losses]):
return summary[self._config.update_epochs // 2]
|
[
"def",
"_perform_update_steps",
"(",
"self",
",",
"observ",
",",
"action",
",",
"old_policy_params",
",",
"reward",
",",
"length",
")",
":",
"return_",
"=",
"utility",
".",
"discounted_return",
"(",
"reward",
",",
"length",
",",
"self",
".",
"_config",
".",
"discount",
")",
"value",
"=",
"self",
".",
"_network",
"(",
"observ",
",",
"length",
")",
".",
"value",
"if",
"self",
".",
"_config",
".",
"gae_lambda",
":",
"advantage",
"=",
"utility",
".",
"lambda_advantage",
"(",
"reward",
",",
"value",
",",
"length",
",",
"self",
".",
"_config",
".",
"discount",
",",
"self",
".",
"_config",
".",
"gae_lambda",
")",
"else",
":",
"advantage",
"=",
"return_",
"-",
"value",
"mean",
",",
"variance",
"=",
"tf",
".",
"nn",
".",
"moments",
"(",
"advantage",
",",
"axes",
"=",
"[",
"0",
",",
"1",
"]",
",",
"keep_dims",
"=",
"True",
")",
"advantage",
"=",
"(",
"advantage",
"-",
"mean",
")",
"/",
"(",
"tf",
".",
"sqrt",
"(",
"variance",
")",
"+",
"1e-8",
")",
"advantage",
"=",
"tf",
".",
"Print",
"(",
"advantage",
",",
"[",
"tf",
".",
"reduce_mean",
"(",
"return_",
")",
",",
"tf",
".",
"reduce_mean",
"(",
"value",
")",
"]",
",",
"'return and value: '",
")",
"advantage",
"=",
"tf",
".",
"Print",
"(",
"advantage",
",",
"[",
"tf",
".",
"reduce_mean",
"(",
"advantage",
")",
"]",
",",
"'normalized advantage: '",
")",
"episodes",
"=",
"(",
"observ",
",",
"action",
",",
"old_policy_params",
",",
"reward",
",",
"advantage",
")",
"value_loss",
",",
"policy_loss",
",",
"summary",
"=",
"parts",
".",
"iterate_sequences",
"(",
"self",
".",
"_update_step",
",",
"[",
"0.",
",",
"0.",
",",
"''",
"]",
",",
"episodes",
",",
"length",
",",
"self",
".",
"_config",
".",
"chunk_length",
",",
"self",
".",
"_config",
".",
"batch_size",
",",
"self",
".",
"_config",
".",
"update_epochs",
",",
"padding_value",
"=",
"1",
")",
"print_losses",
"=",
"tf",
".",
"group",
"(",
"tf",
".",
"Print",
"(",
"0",
",",
"[",
"tf",
".",
"reduce_mean",
"(",
"value_loss",
")",
"]",
",",
"'value loss: '",
")",
",",
"tf",
".",
"Print",
"(",
"0",
",",
"[",
"tf",
".",
"reduce_mean",
"(",
"policy_loss",
")",
"]",
",",
"'policy loss: '",
")",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"value_loss",
",",
"policy_loss",
",",
"print_losses",
"]",
")",
":",
"return",
"summary",
"[",
"self",
".",
"_config",
".",
"update_epochs",
"//",
"2",
"]"
] |
Perform multiple update steps of value function and policy.
The advantage is computed once at the beginning and shared across
iterations. We need to decide for the summary of one iteration, and thus
choose the one after half of the iterations.
Args:
observ: Sequences of observations.
action: Sequences of actions.
old_policy_params: Parameters of the behavioral policy.
reward: Sequences of rewards.
length: Batch of sequence lengths.
Returns:
Summary tensor.
|
[
"Perform",
"multiple",
"update",
"steps",
"of",
"value",
"function",
"and",
"policy",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L334-L380
|
6,872
|
google-research/batch-ppo
|
agents/algorithms/ppo/ppo.py
|
PPO._update_step
|
def _update_step(self, sequence):
"""Compute the current combined loss and perform a gradient update step.
The sequences must be a dict containing the keys `length` and `sequence`,
where the latter is a tuple containing observations, actions, parameters of
the behavioral policy, rewards, and advantages.
Args:
sequence: Sequences of episodes or chunks of episodes.
Returns:
Tuple of value loss, policy loss, and summary tensor.
"""
observ, action, old_policy_params, reward, advantage = sequence['sequence']
length = sequence['length']
old_policy = self._policy_type(**old_policy_params)
value_loss, value_summary = self._value_loss(observ, reward, length)
network = self._network(observ, length)
policy_loss, policy_summary = self._policy_loss(
old_policy, network.policy, action, advantage, length)
network_loss = network.get('loss', 0.0)
loss = policy_loss + value_loss + tf.reduce_mean(network_loss)
gradients, variables = (
zip(*self._optimizer.compute_gradients(loss)))
optimize = self._optimizer.apply_gradients(
zip(gradients, variables))
summary = tf.summary.merge([
value_summary, policy_summary,
tf.summary.histogram('network_loss', network_loss),
tf.summary.scalar('avg_network_loss', tf.reduce_mean(network_loss)),
tf.summary.scalar('gradient_norm', tf.global_norm(gradients)),
utility.gradient_summaries(zip(gradients, variables))])
with tf.control_dependencies([optimize]):
return [tf.identity(x) for x in (value_loss, policy_loss, summary)]
|
python
|
def _update_step(self, sequence):
"""Compute the current combined loss and perform a gradient update step.
The sequences must be a dict containing the keys `length` and `sequence`,
where the latter is a tuple containing observations, actions, parameters of
the behavioral policy, rewards, and advantages.
Args:
sequence: Sequences of episodes or chunks of episodes.
Returns:
Tuple of value loss, policy loss, and summary tensor.
"""
observ, action, old_policy_params, reward, advantage = sequence['sequence']
length = sequence['length']
old_policy = self._policy_type(**old_policy_params)
value_loss, value_summary = self._value_loss(observ, reward, length)
network = self._network(observ, length)
policy_loss, policy_summary = self._policy_loss(
old_policy, network.policy, action, advantage, length)
network_loss = network.get('loss', 0.0)
loss = policy_loss + value_loss + tf.reduce_mean(network_loss)
gradients, variables = (
zip(*self._optimizer.compute_gradients(loss)))
optimize = self._optimizer.apply_gradients(
zip(gradients, variables))
summary = tf.summary.merge([
value_summary, policy_summary,
tf.summary.histogram('network_loss', network_loss),
tf.summary.scalar('avg_network_loss', tf.reduce_mean(network_loss)),
tf.summary.scalar('gradient_norm', tf.global_norm(gradients)),
utility.gradient_summaries(zip(gradients, variables))])
with tf.control_dependencies([optimize]):
return [tf.identity(x) for x in (value_loss, policy_loss, summary)]
|
[
"def",
"_update_step",
"(",
"self",
",",
"sequence",
")",
":",
"observ",
",",
"action",
",",
"old_policy_params",
",",
"reward",
",",
"advantage",
"=",
"sequence",
"[",
"'sequence'",
"]",
"length",
"=",
"sequence",
"[",
"'length'",
"]",
"old_policy",
"=",
"self",
".",
"_policy_type",
"(",
"*",
"*",
"old_policy_params",
")",
"value_loss",
",",
"value_summary",
"=",
"self",
".",
"_value_loss",
"(",
"observ",
",",
"reward",
",",
"length",
")",
"network",
"=",
"self",
".",
"_network",
"(",
"observ",
",",
"length",
")",
"policy_loss",
",",
"policy_summary",
"=",
"self",
".",
"_policy_loss",
"(",
"old_policy",
",",
"network",
".",
"policy",
",",
"action",
",",
"advantage",
",",
"length",
")",
"network_loss",
"=",
"network",
".",
"get",
"(",
"'loss'",
",",
"0.0",
")",
"loss",
"=",
"policy_loss",
"+",
"value_loss",
"+",
"tf",
".",
"reduce_mean",
"(",
"network_loss",
")",
"gradients",
",",
"variables",
"=",
"(",
"zip",
"(",
"*",
"self",
".",
"_optimizer",
".",
"compute_gradients",
"(",
"loss",
")",
")",
")",
"optimize",
"=",
"self",
".",
"_optimizer",
".",
"apply_gradients",
"(",
"zip",
"(",
"gradients",
",",
"variables",
")",
")",
"summary",
"=",
"tf",
".",
"summary",
".",
"merge",
"(",
"[",
"value_summary",
",",
"policy_summary",
",",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'network_loss'",
",",
"network_loss",
")",
",",
"tf",
".",
"summary",
".",
"scalar",
"(",
"'avg_network_loss'",
",",
"tf",
".",
"reduce_mean",
"(",
"network_loss",
")",
")",
",",
"tf",
".",
"summary",
".",
"scalar",
"(",
"'gradient_norm'",
",",
"tf",
".",
"global_norm",
"(",
"gradients",
")",
")",
",",
"utility",
".",
"gradient_summaries",
"(",
"zip",
"(",
"gradients",
",",
"variables",
")",
")",
"]",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"optimize",
"]",
")",
":",
"return",
"[",
"tf",
".",
"identity",
"(",
"x",
")",
"for",
"x",
"in",
"(",
"value_loss",
",",
"policy_loss",
",",
"summary",
")",
"]"
] |
Compute the current combined loss and perform a gradient update step.
The sequences must be a dict containing the keys `length` and `sequence`,
where the latter is a tuple containing observations, actions, parameters of
the behavioral policy, rewards, and advantages.
Args:
sequence: Sequences of episodes or chunks of episodes.
Returns:
Tuple of value loss, policy loss, and summary tensor.
|
[
"Compute",
"the",
"current",
"combined",
"loss",
"and",
"perform",
"a",
"gradient",
"update",
"step",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L382-L415
|
6,873
|
google-research/batch-ppo
|
agents/algorithms/ppo/ppo.py
|
PPO._value_loss
|
def _value_loss(self, observ, reward, length):
"""Compute the loss function for the value baseline.
The value loss is the difference between empirical and approximated returns
over the collected episodes. Returns the loss tensor and a summary strin.
Args:
observ: Sequences of observations.
reward: Sequences of reward.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
"""
with tf.name_scope('value_loss'):
value = self._network(observ, length).value
return_ = utility.discounted_return(
reward, length, self._config.discount)
advantage = return_ - value
value_loss = 0.5 * self._mask(advantage ** 2, length)
summary = tf.summary.merge([
tf.summary.histogram('value_loss', value_loss),
tf.summary.scalar('avg_value_loss', tf.reduce_mean(value_loss))])
value_loss = tf.reduce_mean(value_loss)
return tf.check_numerics(value_loss, 'value_loss'), summary
|
python
|
def _value_loss(self, observ, reward, length):
"""Compute the loss function for the value baseline.
The value loss is the difference between empirical and approximated returns
over the collected episodes. Returns the loss tensor and a summary strin.
Args:
observ: Sequences of observations.
reward: Sequences of reward.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
"""
with tf.name_scope('value_loss'):
value = self._network(observ, length).value
return_ = utility.discounted_return(
reward, length, self._config.discount)
advantage = return_ - value
value_loss = 0.5 * self._mask(advantage ** 2, length)
summary = tf.summary.merge([
tf.summary.histogram('value_loss', value_loss),
tf.summary.scalar('avg_value_loss', tf.reduce_mean(value_loss))])
value_loss = tf.reduce_mean(value_loss)
return tf.check_numerics(value_loss, 'value_loss'), summary
|
[
"def",
"_value_loss",
"(",
"self",
",",
"observ",
",",
"reward",
",",
"length",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'value_loss'",
")",
":",
"value",
"=",
"self",
".",
"_network",
"(",
"observ",
",",
"length",
")",
".",
"value",
"return_",
"=",
"utility",
".",
"discounted_return",
"(",
"reward",
",",
"length",
",",
"self",
".",
"_config",
".",
"discount",
")",
"advantage",
"=",
"return_",
"-",
"value",
"value_loss",
"=",
"0.5",
"*",
"self",
".",
"_mask",
"(",
"advantage",
"**",
"2",
",",
"length",
")",
"summary",
"=",
"tf",
".",
"summary",
".",
"merge",
"(",
"[",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'value_loss'",
",",
"value_loss",
")",
",",
"tf",
".",
"summary",
".",
"scalar",
"(",
"'avg_value_loss'",
",",
"tf",
".",
"reduce_mean",
"(",
"value_loss",
")",
")",
"]",
")",
"value_loss",
"=",
"tf",
".",
"reduce_mean",
"(",
"value_loss",
")",
"return",
"tf",
".",
"check_numerics",
"(",
"value_loss",
",",
"'value_loss'",
")",
",",
"summary"
] |
Compute the loss function for the value baseline.
The value loss is the difference between empirical and approximated returns
over the collected episodes. Returns the loss tensor and a summary strin.
Args:
observ: Sequences of observations.
reward: Sequences of reward.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
|
[
"Compute",
"the",
"loss",
"function",
"for",
"the",
"value",
"baseline",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L417-L441
|
6,874
|
google-research/batch-ppo
|
agents/algorithms/ppo/ppo.py
|
PPO._policy_loss
|
def _policy_loss(
self, old_policy, policy, action, advantage, length):
"""Compute the policy loss composed of multiple components.
1. The policy gradient loss is importance sampled from the data-collecting
policy at the beginning of training.
2. The second term is a KL penalty between the policy at the beginning of
training and the current policy.
3. Additionally, if this KL already changed more than twice the target
amount, we activate a strong penalty discouraging further divergence.
Args:
old_policy: Action distribution of the behavioral policy.
policy: Sequences of distribution params of the current policy.
action: Sequences of actions.
advantage: Sequences of advantages.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
"""
with tf.name_scope('policy_loss'):
kl = tf.contrib.distributions.kl_divergence(old_policy, policy)
# Infinite values in the KL, even for padding frames that we mask out,
# cause NaN gradients since TensorFlow computes gradients with respect to
# the whole input tensor.
kl = tf.check_numerics(kl, 'kl')
kl = tf.reduce_mean(self._mask(kl, length), 1)
policy_gradient = tf.exp(
policy.log_prob(action) - old_policy.log_prob(action))
surrogate_loss = -tf.reduce_mean(self._mask(
policy_gradient * tf.stop_gradient(advantage), length), 1)
surrogate_loss = tf.check_numerics(surrogate_loss, 'surrogate_loss')
kl_penalty = self._penalty * kl
cutoff_threshold = self._config.kl_target * self._config.kl_cutoff_factor
cutoff_count = tf.reduce_sum(
tf.cast(kl > cutoff_threshold, tf.int32))
with tf.control_dependencies([tf.cond(
cutoff_count > 0,
lambda: tf.Print(0, [cutoff_count], 'kl cutoff! '), int)]):
kl_cutoff = (
self._config.kl_cutoff_coef *
tf.cast(kl > cutoff_threshold, tf.float32) *
(kl - cutoff_threshold) ** 2)
policy_loss = surrogate_loss + kl_penalty + kl_cutoff
entropy = tf.reduce_mean(policy.entropy(), axis=1)
if self._config.entropy_regularization:
policy_loss -= self._config.entropy_regularization * entropy
summary = tf.summary.merge([
tf.summary.histogram('entropy', entropy),
tf.summary.histogram('kl', kl),
tf.summary.histogram('surrogate_loss', surrogate_loss),
tf.summary.histogram('kl_penalty', kl_penalty),
tf.summary.histogram('kl_cutoff', kl_cutoff),
tf.summary.histogram('kl_penalty_combined', kl_penalty + kl_cutoff),
tf.summary.histogram('policy_loss', policy_loss),
tf.summary.scalar('avg_surr_loss', tf.reduce_mean(surrogate_loss)),
tf.summary.scalar('avg_kl_penalty', tf.reduce_mean(kl_penalty)),
tf.summary.scalar('avg_policy_loss', tf.reduce_mean(policy_loss))])
policy_loss = tf.reduce_mean(policy_loss, 0)
return tf.check_numerics(policy_loss, 'policy_loss'), summary
|
python
|
def _policy_loss(
self, old_policy, policy, action, advantage, length):
"""Compute the policy loss composed of multiple components.
1. The policy gradient loss is importance sampled from the data-collecting
policy at the beginning of training.
2. The second term is a KL penalty between the policy at the beginning of
training and the current policy.
3. Additionally, if this KL already changed more than twice the target
amount, we activate a strong penalty discouraging further divergence.
Args:
old_policy: Action distribution of the behavioral policy.
policy: Sequences of distribution params of the current policy.
action: Sequences of actions.
advantage: Sequences of advantages.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
"""
with tf.name_scope('policy_loss'):
kl = tf.contrib.distributions.kl_divergence(old_policy, policy)
# Infinite values in the KL, even for padding frames that we mask out,
# cause NaN gradients since TensorFlow computes gradients with respect to
# the whole input tensor.
kl = tf.check_numerics(kl, 'kl')
kl = tf.reduce_mean(self._mask(kl, length), 1)
policy_gradient = tf.exp(
policy.log_prob(action) - old_policy.log_prob(action))
surrogate_loss = -tf.reduce_mean(self._mask(
policy_gradient * tf.stop_gradient(advantage), length), 1)
surrogate_loss = tf.check_numerics(surrogate_loss, 'surrogate_loss')
kl_penalty = self._penalty * kl
cutoff_threshold = self._config.kl_target * self._config.kl_cutoff_factor
cutoff_count = tf.reduce_sum(
tf.cast(kl > cutoff_threshold, tf.int32))
with tf.control_dependencies([tf.cond(
cutoff_count > 0,
lambda: tf.Print(0, [cutoff_count], 'kl cutoff! '), int)]):
kl_cutoff = (
self._config.kl_cutoff_coef *
tf.cast(kl > cutoff_threshold, tf.float32) *
(kl - cutoff_threshold) ** 2)
policy_loss = surrogate_loss + kl_penalty + kl_cutoff
entropy = tf.reduce_mean(policy.entropy(), axis=1)
if self._config.entropy_regularization:
policy_loss -= self._config.entropy_regularization * entropy
summary = tf.summary.merge([
tf.summary.histogram('entropy', entropy),
tf.summary.histogram('kl', kl),
tf.summary.histogram('surrogate_loss', surrogate_loss),
tf.summary.histogram('kl_penalty', kl_penalty),
tf.summary.histogram('kl_cutoff', kl_cutoff),
tf.summary.histogram('kl_penalty_combined', kl_penalty + kl_cutoff),
tf.summary.histogram('policy_loss', policy_loss),
tf.summary.scalar('avg_surr_loss', tf.reduce_mean(surrogate_loss)),
tf.summary.scalar('avg_kl_penalty', tf.reduce_mean(kl_penalty)),
tf.summary.scalar('avg_policy_loss', tf.reduce_mean(policy_loss))])
policy_loss = tf.reduce_mean(policy_loss, 0)
return tf.check_numerics(policy_loss, 'policy_loss'), summary
|
[
"def",
"_policy_loss",
"(",
"self",
",",
"old_policy",
",",
"policy",
",",
"action",
",",
"advantage",
",",
"length",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'policy_loss'",
")",
":",
"kl",
"=",
"tf",
".",
"contrib",
".",
"distributions",
".",
"kl_divergence",
"(",
"old_policy",
",",
"policy",
")",
"# Infinite values in the KL, even for padding frames that we mask out,",
"# cause NaN gradients since TensorFlow computes gradients with respect to",
"# the whole input tensor.",
"kl",
"=",
"tf",
".",
"check_numerics",
"(",
"kl",
",",
"'kl'",
")",
"kl",
"=",
"tf",
".",
"reduce_mean",
"(",
"self",
".",
"_mask",
"(",
"kl",
",",
"length",
")",
",",
"1",
")",
"policy_gradient",
"=",
"tf",
".",
"exp",
"(",
"policy",
".",
"log_prob",
"(",
"action",
")",
"-",
"old_policy",
".",
"log_prob",
"(",
"action",
")",
")",
"surrogate_loss",
"=",
"-",
"tf",
".",
"reduce_mean",
"(",
"self",
".",
"_mask",
"(",
"policy_gradient",
"*",
"tf",
".",
"stop_gradient",
"(",
"advantage",
")",
",",
"length",
")",
",",
"1",
")",
"surrogate_loss",
"=",
"tf",
".",
"check_numerics",
"(",
"surrogate_loss",
",",
"'surrogate_loss'",
")",
"kl_penalty",
"=",
"self",
".",
"_penalty",
"*",
"kl",
"cutoff_threshold",
"=",
"self",
".",
"_config",
".",
"kl_target",
"*",
"self",
".",
"_config",
".",
"kl_cutoff_factor",
"cutoff_count",
"=",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"cast",
"(",
"kl",
">",
"cutoff_threshold",
",",
"tf",
".",
"int32",
")",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"tf",
".",
"cond",
"(",
"cutoff_count",
">",
"0",
",",
"lambda",
":",
"tf",
".",
"Print",
"(",
"0",
",",
"[",
"cutoff_count",
"]",
",",
"'kl cutoff! '",
")",
",",
"int",
")",
"]",
")",
":",
"kl_cutoff",
"=",
"(",
"self",
".",
"_config",
".",
"kl_cutoff_coef",
"*",
"tf",
".",
"cast",
"(",
"kl",
">",
"cutoff_threshold",
",",
"tf",
".",
"float32",
")",
"*",
"(",
"kl",
"-",
"cutoff_threshold",
")",
"**",
"2",
")",
"policy_loss",
"=",
"surrogate_loss",
"+",
"kl_penalty",
"+",
"kl_cutoff",
"entropy",
"=",
"tf",
".",
"reduce_mean",
"(",
"policy",
".",
"entropy",
"(",
")",
",",
"axis",
"=",
"1",
")",
"if",
"self",
".",
"_config",
".",
"entropy_regularization",
":",
"policy_loss",
"-=",
"self",
".",
"_config",
".",
"entropy_regularization",
"*",
"entropy",
"summary",
"=",
"tf",
".",
"summary",
".",
"merge",
"(",
"[",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'entropy'",
",",
"entropy",
")",
",",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'kl'",
",",
"kl",
")",
",",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'surrogate_loss'",
",",
"surrogate_loss",
")",
",",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'kl_penalty'",
",",
"kl_penalty",
")",
",",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'kl_cutoff'",
",",
"kl_cutoff",
")",
",",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'kl_penalty_combined'",
",",
"kl_penalty",
"+",
"kl_cutoff",
")",
",",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'policy_loss'",
",",
"policy_loss",
")",
",",
"tf",
".",
"summary",
".",
"scalar",
"(",
"'avg_surr_loss'",
",",
"tf",
".",
"reduce_mean",
"(",
"surrogate_loss",
")",
")",
",",
"tf",
".",
"summary",
".",
"scalar",
"(",
"'avg_kl_penalty'",
",",
"tf",
".",
"reduce_mean",
"(",
"kl_penalty",
")",
")",
",",
"tf",
".",
"summary",
".",
"scalar",
"(",
"'avg_policy_loss'",
",",
"tf",
".",
"reduce_mean",
"(",
"policy_loss",
")",
")",
"]",
")",
"policy_loss",
"=",
"tf",
".",
"reduce_mean",
"(",
"policy_loss",
",",
"0",
")",
"return",
"tf",
".",
"check_numerics",
"(",
"policy_loss",
",",
"'policy_loss'",
")",
",",
"summary"
] |
Compute the policy loss composed of multiple components.
1. The policy gradient loss is importance sampled from the data-collecting
policy at the beginning of training.
2. The second term is a KL penalty between the policy at the beginning of
training and the current policy.
3. Additionally, if this KL already changed more than twice the target
amount, we activate a strong penalty discouraging further divergence.
Args:
old_policy: Action distribution of the behavioral policy.
policy: Sequences of distribution params of the current policy.
action: Sequences of actions.
advantage: Sequences of advantages.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
|
[
"Compute",
"the",
"policy",
"loss",
"composed",
"of",
"multiple",
"components",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L443-L503
|
6,875
|
google-research/batch-ppo
|
agents/algorithms/ppo/ppo.py
|
PPO._adjust_penalty
|
def _adjust_penalty(self, observ, old_policy_params, length):
"""Adjust the KL policy between the behavioral and current policy.
Compute how much the policy actually changed during the multiple
update steps. Adjust the penalty strength for the next training phase if we
overshot or undershot the target divergence too much.
Args:
observ: Sequences of observations.
old_policy_params: Parameters of the behavioral policy.
length: Batch of sequence lengths.
Returns:
Summary tensor.
"""
old_policy = self._policy_type(**old_policy_params)
with tf.name_scope('adjust_penalty'):
network = self._network(observ, length)
print_penalty = tf.Print(0, [self._penalty], 'current penalty: ')
with tf.control_dependencies([print_penalty]):
kl_change = tf.reduce_mean(self._mask(
tf.contrib.distributions.kl_divergence(old_policy, network.policy),
length))
kl_change = tf.Print(kl_change, [kl_change], 'kl change: ')
maybe_increase = tf.cond(
kl_change > 1.3 * self._config.kl_target,
# pylint: disable=g-long-lambda
lambda: tf.Print(self._penalty.assign(
self._penalty * 1.5), [0], 'increase penalty '),
float)
maybe_decrease = tf.cond(
kl_change < 0.7 * self._config.kl_target,
# pylint: disable=g-long-lambda
lambda: tf.Print(self._penalty.assign(
self._penalty / 1.5), [0], 'decrease penalty '),
float)
with tf.control_dependencies([maybe_increase, maybe_decrease]):
return tf.summary.merge([
tf.summary.scalar('kl_change', kl_change),
tf.summary.scalar('penalty', self._penalty)])
|
python
|
def _adjust_penalty(self, observ, old_policy_params, length):
"""Adjust the KL policy between the behavioral and current policy.
Compute how much the policy actually changed during the multiple
update steps. Adjust the penalty strength for the next training phase if we
overshot or undershot the target divergence too much.
Args:
observ: Sequences of observations.
old_policy_params: Parameters of the behavioral policy.
length: Batch of sequence lengths.
Returns:
Summary tensor.
"""
old_policy = self._policy_type(**old_policy_params)
with tf.name_scope('adjust_penalty'):
network = self._network(observ, length)
print_penalty = tf.Print(0, [self._penalty], 'current penalty: ')
with tf.control_dependencies([print_penalty]):
kl_change = tf.reduce_mean(self._mask(
tf.contrib.distributions.kl_divergence(old_policy, network.policy),
length))
kl_change = tf.Print(kl_change, [kl_change], 'kl change: ')
maybe_increase = tf.cond(
kl_change > 1.3 * self._config.kl_target,
# pylint: disable=g-long-lambda
lambda: tf.Print(self._penalty.assign(
self._penalty * 1.5), [0], 'increase penalty '),
float)
maybe_decrease = tf.cond(
kl_change < 0.7 * self._config.kl_target,
# pylint: disable=g-long-lambda
lambda: tf.Print(self._penalty.assign(
self._penalty / 1.5), [0], 'decrease penalty '),
float)
with tf.control_dependencies([maybe_increase, maybe_decrease]):
return tf.summary.merge([
tf.summary.scalar('kl_change', kl_change),
tf.summary.scalar('penalty', self._penalty)])
|
[
"def",
"_adjust_penalty",
"(",
"self",
",",
"observ",
",",
"old_policy_params",
",",
"length",
")",
":",
"old_policy",
"=",
"self",
".",
"_policy_type",
"(",
"*",
"*",
"old_policy_params",
")",
"with",
"tf",
".",
"name_scope",
"(",
"'adjust_penalty'",
")",
":",
"network",
"=",
"self",
".",
"_network",
"(",
"observ",
",",
"length",
")",
"print_penalty",
"=",
"tf",
".",
"Print",
"(",
"0",
",",
"[",
"self",
".",
"_penalty",
"]",
",",
"'current penalty: '",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"print_penalty",
"]",
")",
":",
"kl_change",
"=",
"tf",
".",
"reduce_mean",
"(",
"self",
".",
"_mask",
"(",
"tf",
".",
"contrib",
".",
"distributions",
".",
"kl_divergence",
"(",
"old_policy",
",",
"network",
".",
"policy",
")",
",",
"length",
")",
")",
"kl_change",
"=",
"tf",
".",
"Print",
"(",
"kl_change",
",",
"[",
"kl_change",
"]",
",",
"'kl change: '",
")",
"maybe_increase",
"=",
"tf",
".",
"cond",
"(",
"kl_change",
">",
"1.3",
"*",
"self",
".",
"_config",
".",
"kl_target",
",",
"# pylint: disable=g-long-lambda",
"lambda",
":",
"tf",
".",
"Print",
"(",
"self",
".",
"_penalty",
".",
"assign",
"(",
"self",
".",
"_penalty",
"*",
"1.5",
")",
",",
"[",
"0",
"]",
",",
"'increase penalty '",
")",
",",
"float",
")",
"maybe_decrease",
"=",
"tf",
".",
"cond",
"(",
"kl_change",
"<",
"0.7",
"*",
"self",
".",
"_config",
".",
"kl_target",
",",
"# pylint: disable=g-long-lambda",
"lambda",
":",
"tf",
".",
"Print",
"(",
"self",
".",
"_penalty",
".",
"assign",
"(",
"self",
".",
"_penalty",
"/",
"1.5",
")",
",",
"[",
"0",
"]",
",",
"'decrease penalty '",
")",
",",
"float",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"maybe_increase",
",",
"maybe_decrease",
"]",
")",
":",
"return",
"tf",
".",
"summary",
".",
"merge",
"(",
"[",
"tf",
".",
"summary",
".",
"scalar",
"(",
"'kl_change'",
",",
"kl_change",
")",
",",
"tf",
".",
"summary",
".",
"scalar",
"(",
"'penalty'",
",",
"self",
".",
"_penalty",
")",
"]",
")"
] |
Adjust the KL policy between the behavioral and current policy.
Compute how much the policy actually changed during the multiple
update steps. Adjust the penalty strength for the next training phase if we
overshot or undershot the target divergence too much.
Args:
observ: Sequences of observations.
old_policy_params: Parameters of the behavioral policy.
length: Batch of sequence lengths.
Returns:
Summary tensor.
|
[
"Adjust",
"the",
"KL",
"policy",
"between",
"the",
"behavioral",
"and",
"current",
"policy",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L505-L544
|
6,876
|
google-research/batch-ppo
|
agents/algorithms/ppo/ppo.py
|
PPO._mask
|
def _mask(self, tensor, length, padding_value=0):
"""Set padding elements of a batch of sequences to a constant.
Useful for setting padding elements to zero before summing along the time
dimension, or for preventing infinite results in padding elements.
Args:
tensor: Tensor of sequences.
length: Batch of sequence lengths.
padding_value: Value to write into padding elements.
Returns:
Masked sequences.
"""
with tf.name_scope('mask'):
range_ = tf.range(tensor.shape[1].value)
mask = range_[None, :] < length[:, None]
if tensor.shape.ndims > 2:
for _ in range(tensor.shape.ndims - 2):
mask = mask[..., None]
mask = tf.tile(mask, [1, 1] + tensor.shape[2:].as_list())
masked = tf.where(mask, tensor, padding_value * tf.ones_like(tensor))
return tf.check_numerics(masked, 'masked')
|
python
|
def _mask(self, tensor, length, padding_value=0):
"""Set padding elements of a batch of sequences to a constant.
Useful for setting padding elements to zero before summing along the time
dimension, or for preventing infinite results in padding elements.
Args:
tensor: Tensor of sequences.
length: Batch of sequence lengths.
padding_value: Value to write into padding elements.
Returns:
Masked sequences.
"""
with tf.name_scope('mask'):
range_ = tf.range(tensor.shape[1].value)
mask = range_[None, :] < length[:, None]
if tensor.shape.ndims > 2:
for _ in range(tensor.shape.ndims - 2):
mask = mask[..., None]
mask = tf.tile(mask, [1, 1] + tensor.shape[2:].as_list())
masked = tf.where(mask, tensor, padding_value * tf.ones_like(tensor))
return tf.check_numerics(masked, 'masked')
|
[
"def",
"_mask",
"(",
"self",
",",
"tensor",
",",
"length",
",",
"padding_value",
"=",
"0",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'mask'",
")",
":",
"range_",
"=",
"tf",
".",
"range",
"(",
"tensor",
".",
"shape",
"[",
"1",
"]",
".",
"value",
")",
"mask",
"=",
"range_",
"[",
"None",
",",
":",
"]",
"<",
"length",
"[",
":",
",",
"None",
"]",
"if",
"tensor",
".",
"shape",
".",
"ndims",
">",
"2",
":",
"for",
"_",
"in",
"range",
"(",
"tensor",
".",
"shape",
".",
"ndims",
"-",
"2",
")",
":",
"mask",
"=",
"mask",
"[",
"...",
",",
"None",
"]",
"mask",
"=",
"tf",
".",
"tile",
"(",
"mask",
",",
"[",
"1",
",",
"1",
"]",
"+",
"tensor",
".",
"shape",
"[",
"2",
":",
"]",
".",
"as_list",
"(",
")",
")",
"masked",
"=",
"tf",
".",
"where",
"(",
"mask",
",",
"tensor",
",",
"padding_value",
"*",
"tf",
".",
"ones_like",
"(",
"tensor",
")",
")",
"return",
"tf",
".",
"check_numerics",
"(",
"masked",
",",
"'masked'",
")"
] |
Set padding elements of a batch of sequences to a constant.
Useful for setting padding elements to zero before summing along the time
dimension, or for preventing infinite results in padding elements.
Args:
tensor: Tensor of sequences.
length: Batch of sequence lengths.
padding_value: Value to write into padding elements.
Returns:
Masked sequences.
|
[
"Set",
"padding",
"elements",
"of",
"a",
"batch",
"of",
"sequences",
"to",
"a",
"constant",
"."
] |
3d09705977bae4e7c3eb20339a3b384d2a5531e4
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L546-L568
|
6,877
|
celery/cell
|
cell/workflow/entities.py
|
Server.main
|
def main(self, *args, **kwargs):
"""Implement the actor main loop by waiting forever for messages."""
self.start(*args, **kwargs)
try:
while 1:
body, message = yield self.receive()
handler = self.get_handler(message)
handler(body, message)
finally:
self.stop(*args, **kwargs)
|
python
|
def main(self, *args, **kwargs):
"""Implement the actor main loop by waiting forever for messages."""
self.start(*args, **kwargs)
try:
while 1:
body, message = yield self.receive()
handler = self.get_handler(message)
handler(body, message)
finally:
self.stop(*args, **kwargs)
|
[
"def",
"main",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"start",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"while",
"1",
":",
"body",
",",
"message",
"=",
"yield",
"self",
".",
"receive",
"(",
")",
"handler",
"=",
"self",
".",
"get_handler",
"(",
"message",
")",
"handler",
"(",
"body",
",",
"message",
")",
"finally",
":",
"self",
".",
"stop",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Implement the actor main loop by waiting forever for messages.
|
[
"Implement",
"the",
"actor",
"main",
"loop",
"by",
"waiting",
"forever",
"for",
"messages",
"."
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/workflow/entities.py#L73-L82
|
6,878
|
celery/cell
|
cell/actors.py
|
Actor.send
|
def send(self, method, args={}, to=None, nowait=False, **kwargs):
"""Call method on agent listening to ``routing_key``.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
j
"""
if to is None:
to = self.routing_key
r = self.call_or_cast(method, args, routing_key=to,
nowait=nowait, **kwargs)
if not nowait:
return r.get()
|
python
|
def send(self, method, args={}, to=None, nowait=False, **kwargs):
"""Call method on agent listening to ``routing_key``.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
j
"""
if to is None:
to = self.routing_key
r = self.call_or_cast(method, args, routing_key=to,
nowait=nowait, **kwargs)
if not nowait:
return r.get()
|
[
"def",
"send",
"(",
"self",
",",
"method",
",",
"args",
"=",
"{",
"}",
",",
"to",
"=",
"None",
",",
"nowait",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"to",
"is",
"None",
":",
"to",
"=",
"self",
".",
"routing_key",
"r",
"=",
"self",
".",
"call_or_cast",
"(",
"method",
",",
"args",
",",
"routing_key",
"=",
"to",
",",
"nowait",
"=",
"nowait",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"nowait",
":",
"return",
"r",
".",
"get",
"(",
")"
] |
Call method on agent listening to ``routing_key``.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
j
|
[
"Call",
"method",
"on",
"agent",
"listening",
"to",
"routing_key",
"."
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L259-L275
|
6,879
|
celery/cell
|
cell/actors.py
|
Actor.throw
|
def throw(self, method, args={}, nowait=False, **kwargs):
"""Call method on one of the agents in round robin.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
"""
r = self.call_or_cast(method, args, type=ACTOR_TYPE.RR,
nowait=nowait, **kwargs)
if not nowait:
return r
|
python
|
def throw(self, method, args={}, nowait=False, **kwargs):
"""Call method on one of the agents in round robin.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
"""
r = self.call_or_cast(method, args, type=ACTOR_TYPE.RR,
nowait=nowait, **kwargs)
if not nowait:
return r
|
[
"def",
"throw",
"(",
"self",
",",
"method",
",",
"args",
"=",
"{",
"}",
",",
"nowait",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"r",
"=",
"self",
".",
"call_or_cast",
"(",
"method",
",",
"args",
",",
"type",
"=",
"ACTOR_TYPE",
".",
"RR",
",",
"nowait",
"=",
"nowait",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"nowait",
":",
"return",
"r"
] |
Call method on one of the agents in round robin.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
|
[
"Call",
"method",
"on",
"one",
"of",
"the",
"agents",
"in",
"round",
"robin",
"."
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L277-L290
|
6,880
|
celery/cell
|
cell/actors.py
|
Actor.scatter
|
def scatter(self, method, args={}, nowait=False, timeout=None, **kwargs):
"""Broadcast method to all agents.
if nowait is False, returns generator to iterate over the results.
:keyword limit: Limit number of reads from the queue.
Unlimited by default.
:keyword timeout: the timeout (in float seconds) waiting for replies.
Default is :attr:`default_timeout`.
**Examples**
``scatter`` is a generator (if nowait is False)::
>>> res = scatter()
>>> res.next() # one event consumed, or timed out.
>>> res = scatter(limit=2):
>>> for i in res: # two events consumed or timeout
>>> pass
See :meth:`call_or_cast` for a full list of supported
arguments.
"""
timeout = timeout if timeout is not None else self.default_timeout
r = self.call_or_cast(method, args, type=ACTOR_TYPE.SCATTER,
nowait=nowait, timeout=timeout, **kwargs)
if not nowait:
return r.gather(timeout=timeout, **kwargs)
|
python
|
def scatter(self, method, args={}, nowait=False, timeout=None, **kwargs):
"""Broadcast method to all agents.
if nowait is False, returns generator to iterate over the results.
:keyword limit: Limit number of reads from the queue.
Unlimited by default.
:keyword timeout: the timeout (in float seconds) waiting for replies.
Default is :attr:`default_timeout`.
**Examples**
``scatter`` is a generator (if nowait is False)::
>>> res = scatter()
>>> res.next() # one event consumed, or timed out.
>>> res = scatter(limit=2):
>>> for i in res: # two events consumed or timeout
>>> pass
See :meth:`call_or_cast` for a full list of supported
arguments.
"""
timeout = timeout if timeout is not None else self.default_timeout
r = self.call_or_cast(method, args, type=ACTOR_TYPE.SCATTER,
nowait=nowait, timeout=timeout, **kwargs)
if not nowait:
return r.gather(timeout=timeout, **kwargs)
|
[
"def",
"scatter",
"(",
"self",
",",
"method",
",",
"args",
"=",
"{",
"}",
",",
"nowait",
"=",
"False",
",",
"timeout",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"timeout",
"=",
"timeout",
"if",
"timeout",
"is",
"not",
"None",
"else",
"self",
".",
"default_timeout",
"r",
"=",
"self",
".",
"call_or_cast",
"(",
"method",
",",
"args",
",",
"type",
"=",
"ACTOR_TYPE",
".",
"SCATTER",
",",
"nowait",
"=",
"nowait",
",",
"timeout",
"=",
"timeout",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"nowait",
":",
"return",
"r",
".",
"gather",
"(",
"timeout",
"=",
"timeout",
",",
"*",
"*",
"kwargs",
")"
] |
Broadcast method to all agents.
if nowait is False, returns generator to iterate over the results.
:keyword limit: Limit number of reads from the queue.
Unlimited by default.
:keyword timeout: the timeout (in float seconds) waiting for replies.
Default is :attr:`default_timeout`.
**Examples**
``scatter`` is a generator (if nowait is False)::
>>> res = scatter()
>>> res.next() # one event consumed, or timed out.
>>> res = scatter(limit=2):
>>> for i in res: # two events consumed or timeout
>>> pass
See :meth:`call_or_cast` for a full list of supported
arguments.
|
[
"Broadcast",
"method",
"to",
"all",
"agents",
"."
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L292-L320
|
6,881
|
celery/cell
|
cell/actors.py
|
Actor.call_or_cast
|
def call_or_cast(self, method, args={}, nowait=False, **kwargs):
"""Apply remote `method` asynchronously or synchronously depending
on the value of `nowait`.
:param method: The name of the remote method to perform.
:param args: Dictionary of arguments for the method.
:keyword nowait: If false the call will block until the result
is available and return it (default), if true the call will be
non-blocking and no result will be returned.
:keyword retry: If set to true then message sending will be retried
in the event of connection failures. Default is decided by the
:attr:`retry` attributed.
:keyword retry_policy: Override retry policies.
See :attr:`retry_policy`. This must be a dictionary, and keys will
be merged with the default retry policy.
:keyword timeout: Timeout to wait for replies in seconds as a float
(**only relevant in blocking mode**).
:keyword limit: Limit number of replies to wait for
(**only relevant in blocking mode**).
:keyword callback: If provided, this callback will be called for every
reply received (**only relevant in blocking mode**).
:keyword \*\*props: Additional message properties.
See :meth:`kombu.Producer.publish`.
"""
return (nowait and self.cast or self.call)(method, args, **kwargs)
|
python
|
def call_or_cast(self, method, args={}, nowait=False, **kwargs):
"""Apply remote `method` asynchronously or synchronously depending
on the value of `nowait`.
:param method: The name of the remote method to perform.
:param args: Dictionary of arguments for the method.
:keyword nowait: If false the call will block until the result
is available and return it (default), if true the call will be
non-blocking and no result will be returned.
:keyword retry: If set to true then message sending will be retried
in the event of connection failures. Default is decided by the
:attr:`retry` attributed.
:keyword retry_policy: Override retry policies.
See :attr:`retry_policy`. This must be a dictionary, and keys will
be merged with the default retry policy.
:keyword timeout: Timeout to wait for replies in seconds as a float
(**only relevant in blocking mode**).
:keyword limit: Limit number of replies to wait for
(**only relevant in blocking mode**).
:keyword callback: If provided, this callback will be called for every
reply received (**only relevant in blocking mode**).
:keyword \*\*props: Additional message properties.
See :meth:`kombu.Producer.publish`.
"""
return (nowait and self.cast or self.call)(method, args, **kwargs)
|
[
"def",
"call_or_cast",
"(",
"self",
",",
"method",
",",
"args",
"=",
"{",
"}",
",",
"nowait",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"(",
"nowait",
"and",
"self",
".",
"cast",
"or",
"self",
".",
"call",
")",
"(",
"method",
",",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Apply remote `method` asynchronously or synchronously depending
on the value of `nowait`.
:param method: The name of the remote method to perform.
:param args: Dictionary of arguments for the method.
:keyword nowait: If false the call will block until the result
is available and return it (default), if true the call will be
non-blocking and no result will be returned.
:keyword retry: If set to true then message sending will be retried
in the event of connection failures. Default is decided by the
:attr:`retry` attributed.
:keyword retry_policy: Override retry policies.
See :attr:`retry_policy`. This must be a dictionary, and keys will
be merged with the default retry policy.
:keyword timeout: Timeout to wait for replies in seconds as a float
(**only relevant in blocking mode**).
:keyword limit: Limit number of replies to wait for
(**only relevant in blocking mode**).
:keyword callback: If provided, this callback will be called for every
reply received (**only relevant in blocking mode**).
:keyword \*\*props: Additional message properties.
See :meth:`kombu.Producer.publish`.
|
[
"Apply",
"remote",
"method",
"asynchronously",
"or",
"synchronously",
"depending",
"on",
"the",
"value",
"of",
"nowait",
"."
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L322-L347
|
6,882
|
celery/cell
|
cell/actors.py
|
Actor.cast
|
def cast(self, method, args={}, declare=None, retry=None,
retry_policy=None, type=None, exchange=None, **props):
"""Send message to actor. Discarding replies."""
retry = self.retry if retry is None else retry
body = {'class': self.name, 'method': method, 'args': args}
_retry_policy = self.retry_policy
if retry_policy: # merge default and custom policies.
_retry_policy = dict(_retry_policy, **retry_policy)
if type and type not in self.types:
raise ValueError('Unsupported type: {0}'.format(type))
elif not type:
type = ACTOR_TYPE.DIRECT
props.setdefault('routing_key', self.routing_key)
props.setdefault('serializer', self.serializer)
exchange = exchange or self.type_to_exchange[type]()
declare = (maybe_list(declare) or []) + [exchange]
with producers[self._connection].acquire(block=True) as producer:
return producer.publish(body, exchange=exchange, declare=declare,
retry=retry, retry_policy=retry_policy,
**props)
|
python
|
def cast(self, method, args={}, declare=None, retry=None,
retry_policy=None, type=None, exchange=None, **props):
"""Send message to actor. Discarding replies."""
retry = self.retry if retry is None else retry
body = {'class': self.name, 'method': method, 'args': args}
_retry_policy = self.retry_policy
if retry_policy: # merge default and custom policies.
_retry_policy = dict(_retry_policy, **retry_policy)
if type and type not in self.types:
raise ValueError('Unsupported type: {0}'.format(type))
elif not type:
type = ACTOR_TYPE.DIRECT
props.setdefault('routing_key', self.routing_key)
props.setdefault('serializer', self.serializer)
exchange = exchange or self.type_to_exchange[type]()
declare = (maybe_list(declare) or []) + [exchange]
with producers[self._connection].acquire(block=True) as producer:
return producer.publish(body, exchange=exchange, declare=declare,
retry=retry, retry_policy=retry_policy,
**props)
|
[
"def",
"cast",
"(",
"self",
",",
"method",
",",
"args",
"=",
"{",
"}",
",",
"declare",
"=",
"None",
",",
"retry",
"=",
"None",
",",
"retry_policy",
"=",
"None",
",",
"type",
"=",
"None",
",",
"exchange",
"=",
"None",
",",
"*",
"*",
"props",
")",
":",
"retry",
"=",
"self",
".",
"retry",
"if",
"retry",
"is",
"None",
"else",
"retry",
"body",
"=",
"{",
"'class'",
":",
"self",
".",
"name",
",",
"'method'",
":",
"method",
",",
"'args'",
":",
"args",
"}",
"_retry_policy",
"=",
"self",
".",
"retry_policy",
"if",
"retry_policy",
":",
"# merge default and custom policies.",
"_retry_policy",
"=",
"dict",
"(",
"_retry_policy",
",",
"*",
"*",
"retry_policy",
")",
"if",
"type",
"and",
"type",
"not",
"in",
"self",
".",
"types",
":",
"raise",
"ValueError",
"(",
"'Unsupported type: {0}'",
".",
"format",
"(",
"type",
")",
")",
"elif",
"not",
"type",
":",
"type",
"=",
"ACTOR_TYPE",
".",
"DIRECT",
"props",
".",
"setdefault",
"(",
"'routing_key'",
",",
"self",
".",
"routing_key",
")",
"props",
".",
"setdefault",
"(",
"'serializer'",
",",
"self",
".",
"serializer",
")",
"exchange",
"=",
"exchange",
"or",
"self",
".",
"type_to_exchange",
"[",
"type",
"]",
"(",
")",
"declare",
"=",
"(",
"maybe_list",
"(",
"declare",
")",
"or",
"[",
"]",
")",
"+",
"[",
"exchange",
"]",
"with",
"producers",
"[",
"self",
".",
"_connection",
"]",
".",
"acquire",
"(",
"block",
"=",
"True",
")",
"as",
"producer",
":",
"return",
"producer",
".",
"publish",
"(",
"body",
",",
"exchange",
"=",
"exchange",
",",
"declare",
"=",
"declare",
",",
"retry",
"=",
"retry",
",",
"retry_policy",
"=",
"retry_policy",
",",
"*",
"*",
"props",
")"
] |
Send message to actor. Discarding replies.
|
[
"Send",
"message",
"to",
"actor",
".",
"Discarding",
"replies",
"."
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L398-L420
|
6,883
|
celery/cell
|
cell/actors.py
|
Actor.handle_call
|
def handle_call(self, body, message):
"""Handle call message."""
try:
r = self._DISPATCH(body, ticket=message.properties['reply_to'])
except self.Next:
# don't reply, delegate to other agents.
pass
else:
self.reply(message, r)
|
python
|
def handle_call(self, body, message):
"""Handle call message."""
try:
r = self._DISPATCH(body, ticket=message.properties['reply_to'])
except self.Next:
# don't reply, delegate to other agents.
pass
else:
self.reply(message, r)
|
[
"def",
"handle_call",
"(",
"self",
",",
"body",
",",
"message",
")",
":",
"try",
":",
"r",
"=",
"self",
".",
"_DISPATCH",
"(",
"body",
",",
"ticket",
"=",
"message",
".",
"properties",
"[",
"'reply_to'",
"]",
")",
"except",
"self",
".",
"Next",
":",
"# don't reply, delegate to other agents.",
"pass",
"else",
":",
"self",
".",
"reply",
"(",
"message",
",",
"r",
")"
] |
Handle call message.
|
[
"Handle",
"call",
"message",
"."
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L434-L442
|
6,884
|
celery/cell
|
cell/actors.py
|
Actor._on_message
|
def _on_message(self, body, message):
"""What to do when a message is received.
This is a kombu consumer callback taking the standard
``body`` and ``message`` arguments.
Note that if the properties of the message contains
a value for ``reply_to`` then a proper implementation
is expected to send a reply.
"""
if message.properties.get('reply_to'):
handler = self.handle_call
else:
handler = self.handle_cast
def handle():
# Do not ack the message if an exceptional error occurs,
# but do ack the message if SystemExit or KeyboardInterrupt
# is raised, as this is probably intended.
try:
handler(body, message)
except Exception:
raise
except BaseException:
message.ack()
raise
else:
message.ack()
handle()
|
python
|
def _on_message(self, body, message):
"""What to do when a message is received.
This is a kombu consumer callback taking the standard
``body`` and ``message`` arguments.
Note that if the properties of the message contains
a value for ``reply_to`` then a proper implementation
is expected to send a reply.
"""
if message.properties.get('reply_to'):
handler = self.handle_call
else:
handler = self.handle_cast
def handle():
# Do not ack the message if an exceptional error occurs,
# but do ack the message if SystemExit or KeyboardInterrupt
# is raised, as this is probably intended.
try:
handler(body, message)
except Exception:
raise
except BaseException:
message.ack()
raise
else:
message.ack()
handle()
|
[
"def",
"_on_message",
"(",
"self",
",",
"body",
",",
"message",
")",
":",
"if",
"message",
".",
"properties",
".",
"get",
"(",
"'reply_to'",
")",
":",
"handler",
"=",
"self",
".",
"handle_call",
"else",
":",
"handler",
"=",
"self",
".",
"handle_cast",
"def",
"handle",
"(",
")",
":",
"# Do not ack the message if an exceptional error occurs,",
"# but do ack the message if SystemExit or KeyboardInterrupt",
"# is raised, as this is probably intended.",
"try",
":",
"handler",
"(",
"body",
",",
"message",
")",
"except",
"Exception",
":",
"raise",
"except",
"BaseException",
":",
"message",
".",
"ack",
"(",
")",
"raise",
"else",
":",
"message",
".",
"ack",
"(",
")",
"handle",
"(",
")"
] |
What to do when a message is received.
This is a kombu consumer callback taking the standard
``body`` and ``message`` arguments.
Note that if the properties of the message contains
a value for ``reply_to`` then a proper implementation
is expected to send a reply.
|
[
"What",
"to",
"do",
"when",
"a",
"message",
"is",
"received",
"."
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L460-L489
|
6,885
|
celery/cell
|
cell/bin/base.py
|
Command.parse_options
|
def parse_options(self, prog_name, arguments):
"""Parse the available options."""
# Don't want to load configuration to just print the version,
# so we handle --version manually here.
if '--version' in arguments:
self.exit_status(self.version, fh=sys.stdout)
parser = self.create_parser(prog_name)
options, args = parser.parse_args(arguments)
return options, args
|
python
|
def parse_options(self, prog_name, arguments):
"""Parse the available options."""
# Don't want to load configuration to just print the version,
# so we handle --version manually here.
if '--version' in arguments:
self.exit_status(self.version, fh=sys.stdout)
parser = self.create_parser(prog_name)
options, args = parser.parse_args(arguments)
return options, args
|
[
"def",
"parse_options",
"(",
"self",
",",
"prog_name",
",",
"arguments",
")",
":",
"# Don't want to load configuration to just print the version,",
"# so we handle --version manually here.",
"if",
"'--version'",
"in",
"arguments",
":",
"self",
".",
"exit_status",
"(",
"self",
".",
"version",
",",
"fh",
"=",
"sys",
".",
"stdout",
")",
"parser",
"=",
"self",
".",
"create_parser",
"(",
"prog_name",
")",
"options",
",",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"arguments",
")",
"return",
"options",
",",
"args"
] |
Parse the available options.
|
[
"Parse",
"the",
"available",
"options",
"."
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/bin/base.py#L67-L75
|
6,886
|
celery/cell
|
cell/results.py
|
AsyncResult.get
|
def get(self, **kwargs):
"What kind of arguments should be pass here"
kwargs.setdefault('limit', 1)
return self._first(self.gather(**kwargs))
|
python
|
def get(self, **kwargs):
"What kind of arguments should be pass here"
kwargs.setdefault('limit', 1)
return self._first(self.gather(**kwargs))
|
[
"def",
"get",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"setdefault",
"(",
"'limit'",
",",
"1",
")",
"return",
"self",
".",
"_first",
"(",
"self",
".",
"gather",
"(",
"*",
"*",
"kwargs",
")",
")"
] |
What kind of arguments should be pass here
|
[
"What",
"kind",
"of",
"arguments",
"should",
"be",
"pass",
"here"
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/results.py#L30-L33
|
6,887
|
celery/cell
|
cell/results.py
|
AsyncResult._gather
|
def _gather(self, *args, **kwargs):
"""Generator over the results
"""
propagate = kwargs.pop('propagate', True)
return (self.to_python(reply, propagate=propagate)
for reply in self.actor._collect_replies(*args, **kwargs))
|
python
|
def _gather(self, *args, **kwargs):
"""Generator over the results
"""
propagate = kwargs.pop('propagate', True)
return (self.to_python(reply, propagate=propagate)
for reply in self.actor._collect_replies(*args, **kwargs))
|
[
"def",
"_gather",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"propagate",
"=",
"kwargs",
".",
"pop",
"(",
"'propagate'",
",",
"True",
")",
"return",
"(",
"self",
".",
"to_python",
"(",
"reply",
",",
"propagate",
"=",
"propagate",
")",
"for",
"reply",
"in",
"self",
".",
"actor",
".",
"_collect_replies",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")"
] |
Generator over the results
|
[
"Generator",
"over",
"the",
"results"
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/results.py#L47-L52
|
6,888
|
celery/cell
|
cell/results.py
|
AsyncResult.to_python
|
def to_python(self, reply, propagate=True):
"""Extracts the value out of the reply message.
:param reply: In the case of a successful call the reply message
will be::
{'ok': return_value, **default_fields}
Therefore the method returns: return_value, **default_fields
If the method raises an exception the reply message
will be::
{'nok': [repr exc, str traceback], **default_fields}
:keyword propagate - Propagate exceptions raised instead of returning
a result representation of the error.
"""
try:
return reply['ok']
except KeyError:
error = self.Error(*reply.get('nok') or ())
if propagate:
raise error
return error
|
python
|
def to_python(self, reply, propagate=True):
"""Extracts the value out of the reply message.
:param reply: In the case of a successful call the reply message
will be::
{'ok': return_value, **default_fields}
Therefore the method returns: return_value, **default_fields
If the method raises an exception the reply message
will be::
{'nok': [repr exc, str traceback], **default_fields}
:keyword propagate - Propagate exceptions raised instead of returning
a result representation of the error.
"""
try:
return reply['ok']
except KeyError:
error = self.Error(*reply.get('nok') or ())
if propagate:
raise error
return error
|
[
"def",
"to_python",
"(",
"self",
",",
"reply",
",",
"propagate",
"=",
"True",
")",
":",
"try",
":",
"return",
"reply",
"[",
"'ok'",
"]",
"except",
"KeyError",
":",
"error",
"=",
"self",
".",
"Error",
"(",
"*",
"reply",
".",
"get",
"(",
"'nok'",
")",
"or",
"(",
")",
")",
"if",
"propagate",
":",
"raise",
"error",
"return",
"error"
] |
Extracts the value out of the reply message.
:param reply: In the case of a successful call the reply message
will be::
{'ok': return_value, **default_fields}
Therefore the method returns: return_value, **default_fields
If the method raises an exception the reply message
will be::
{'nok': [repr exc, str traceback], **default_fields}
:keyword propagate - Propagate exceptions raised instead of returning
a result representation of the error.
|
[
"Extracts",
"the",
"value",
"out",
"of",
"the",
"reply",
"message",
"."
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/results.py#L54-L79
|
6,889
|
celery/cell
|
cell/agents.py
|
dAgent.spawn
|
def spawn(self, cls, kwargs={}, nowait=False):
"""Spawn a new actor on a celery worker by sending
a remote command to the worker.
:param cls: the name of the :class:`~.cell.actors.Actor` class or its
derivative.
:keyword kwargs: The keyword arguments to pass on to
actor __init__ (a :class:`dict`)
:keyword nowait: If set to True (default) the call waits for the
result of spawning the actor. if False, the spawning
is asynchronous.
:returns :class:`~.cell.actors.ActorProxy`:,
holding the id of the spawned actor.
"""
actor_id = uuid()
if str(qualname(cls)) == '__builtin__.unicode':
name = cls
else:
name = qualname(cls)
res = self.call('spawn', {'cls': name, 'id': actor_id,
'kwargs': kwargs},
type=ACTOR_TYPE.RR, nowait=nowait)
return ActorProxy(name, actor_id, res, agent=self,
connection=self.connection, **kwargs)
|
python
|
def spawn(self, cls, kwargs={}, nowait=False):
"""Spawn a new actor on a celery worker by sending
a remote command to the worker.
:param cls: the name of the :class:`~.cell.actors.Actor` class or its
derivative.
:keyword kwargs: The keyword arguments to pass on to
actor __init__ (a :class:`dict`)
:keyword nowait: If set to True (default) the call waits for the
result of spawning the actor. if False, the spawning
is asynchronous.
:returns :class:`~.cell.actors.ActorProxy`:,
holding the id of the spawned actor.
"""
actor_id = uuid()
if str(qualname(cls)) == '__builtin__.unicode':
name = cls
else:
name = qualname(cls)
res = self.call('spawn', {'cls': name, 'id': actor_id,
'kwargs': kwargs},
type=ACTOR_TYPE.RR, nowait=nowait)
return ActorProxy(name, actor_id, res, agent=self,
connection=self.connection, **kwargs)
|
[
"def",
"spawn",
"(",
"self",
",",
"cls",
",",
"kwargs",
"=",
"{",
"}",
",",
"nowait",
"=",
"False",
")",
":",
"actor_id",
"=",
"uuid",
"(",
")",
"if",
"str",
"(",
"qualname",
"(",
"cls",
")",
")",
"==",
"'__builtin__.unicode'",
":",
"name",
"=",
"cls",
"else",
":",
"name",
"=",
"qualname",
"(",
"cls",
")",
"res",
"=",
"self",
".",
"call",
"(",
"'spawn'",
",",
"{",
"'cls'",
":",
"name",
",",
"'id'",
":",
"actor_id",
",",
"'kwargs'",
":",
"kwargs",
"}",
",",
"type",
"=",
"ACTOR_TYPE",
".",
"RR",
",",
"nowait",
"=",
"nowait",
")",
"return",
"ActorProxy",
"(",
"name",
",",
"actor_id",
",",
"res",
",",
"agent",
"=",
"self",
",",
"connection",
"=",
"self",
".",
"connection",
",",
"*",
"*",
"kwargs",
")"
] |
Spawn a new actor on a celery worker by sending
a remote command to the worker.
:param cls: the name of the :class:`~.cell.actors.Actor` class or its
derivative.
:keyword kwargs: The keyword arguments to pass on to
actor __init__ (a :class:`dict`)
:keyword nowait: If set to True (default) the call waits for the
result of spawning the actor. if False, the spawning
is asynchronous.
:returns :class:`~.cell.actors.ActorProxy`:,
holding the id of the spawned actor.
|
[
"Spawn",
"a",
"new",
"actor",
"on",
"a",
"celery",
"worker",
"by",
"sending",
"a",
"remote",
"command",
"to",
"the",
"worker",
"."
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/agents.py#L99-L128
|
6,890
|
celery/cell
|
cell/agents.py
|
dAgent.select
|
def select(self, cls, **kwargs):
"""Get the id of already spawned actor
:keyword actor: the name of the :class:`Actor` class
"""
name = qualname(cls)
id = first_reply(
self.scatter('select', {'cls': name}, limit=1), cls)
return ActorProxy(name, id, agent=self,
connection=self.connection, **kwargs)
|
python
|
def select(self, cls, **kwargs):
"""Get the id of already spawned actor
:keyword actor: the name of the :class:`Actor` class
"""
name = qualname(cls)
id = first_reply(
self.scatter('select', {'cls': name}, limit=1), cls)
return ActorProxy(name, id, agent=self,
connection=self.connection, **kwargs)
|
[
"def",
"select",
"(",
"self",
",",
"cls",
",",
"*",
"*",
"kwargs",
")",
":",
"name",
"=",
"qualname",
"(",
"cls",
")",
"id",
"=",
"first_reply",
"(",
"self",
".",
"scatter",
"(",
"'select'",
",",
"{",
"'cls'",
":",
"name",
"}",
",",
"limit",
"=",
"1",
")",
",",
"cls",
")",
"return",
"ActorProxy",
"(",
"name",
",",
"id",
",",
"agent",
"=",
"self",
",",
"connection",
"=",
"self",
".",
"connection",
",",
"*",
"*",
"kwargs",
")"
] |
Get the id of already spawned actor
:keyword actor: the name of the :class:`Actor` class
|
[
"Get",
"the",
"id",
"of",
"already",
"spawned",
"actor"
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/agents.py#L130-L139
|
6,891
|
celery/cell
|
cell/agents.py
|
dAgent.process_message
|
def process_message(self, actor, body, message):
"""Process actor message depending depending on the the worker settings.
If greenlets are enabled in the worker, the actor message is processed
in a greenlet from the greenlet pool,
Otherwise, the message is processed by the same thread.
The method is invoked from the callback `cell.actors.Actor.on_message`
upon receiving of a message.
:keyword actor: instance of :class:`Actor` or its derivative.
The actor instance to process the message.
For the full list of arguments see
:meth:`cell.actors.Actor._on_message`.
"""
if actor is not self and self.is_green():
self.pool.spawn_n(actor._on_message, body, message)
else:
if not self.is_green() and message.properties.get('reply_to'):
warn('Starting a blocking call (%s) on actor (%s) '
'when greenlets are disabled.',
itemgetter('method')(body), actor.__class__)
actor._on_message(body, message)
|
python
|
def process_message(self, actor, body, message):
"""Process actor message depending depending on the the worker settings.
If greenlets are enabled in the worker, the actor message is processed
in a greenlet from the greenlet pool,
Otherwise, the message is processed by the same thread.
The method is invoked from the callback `cell.actors.Actor.on_message`
upon receiving of a message.
:keyword actor: instance of :class:`Actor` or its derivative.
The actor instance to process the message.
For the full list of arguments see
:meth:`cell.actors.Actor._on_message`.
"""
if actor is not self and self.is_green():
self.pool.spawn_n(actor._on_message, body, message)
else:
if not self.is_green() and message.properties.get('reply_to'):
warn('Starting a blocking call (%s) on actor (%s) '
'when greenlets are disabled.',
itemgetter('method')(body), actor.__class__)
actor._on_message(body, message)
|
[
"def",
"process_message",
"(",
"self",
",",
"actor",
",",
"body",
",",
"message",
")",
":",
"if",
"actor",
"is",
"not",
"self",
"and",
"self",
".",
"is_green",
"(",
")",
":",
"self",
".",
"pool",
".",
"spawn_n",
"(",
"actor",
".",
"_on_message",
",",
"body",
",",
"message",
")",
"else",
":",
"if",
"not",
"self",
".",
"is_green",
"(",
")",
"and",
"message",
".",
"properties",
".",
"get",
"(",
"'reply_to'",
")",
":",
"warn",
"(",
"'Starting a blocking call (%s) on actor (%s) '",
"'when greenlets are disabled.'",
",",
"itemgetter",
"(",
"'method'",
")",
"(",
"body",
")",
",",
"actor",
".",
"__class__",
")",
"actor",
".",
"_on_message",
"(",
"body",
",",
"message",
")"
] |
Process actor message depending depending on the the worker settings.
If greenlets are enabled in the worker, the actor message is processed
in a greenlet from the greenlet pool,
Otherwise, the message is processed by the same thread.
The method is invoked from the callback `cell.actors.Actor.on_message`
upon receiving of a message.
:keyword actor: instance of :class:`Actor` or its derivative.
The actor instance to process the message.
For the full list of arguments see
:meth:`cell.actors.Actor._on_message`.
|
[
"Process",
"actor",
"message",
"depending",
"depending",
"on",
"the",
"the",
"worker",
"settings",
"."
] |
c7f9b3a0c11ae3429eacb4114279cf2614e94a48
|
https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/agents.py#L164-L187
|
6,892
|
yhat/pandasql
|
pandasql/sqldf.py
|
get_outer_frame_variables
|
def get_outer_frame_variables():
""" Get a dict of local and global variables of the first outer frame from another file. """
cur_filename = inspect.getframeinfo(inspect.currentframe()).filename
outer_frame = next(f
for f in inspect.getouterframes(inspect.currentframe())
if f.filename != cur_filename)
variables = {}
variables.update(outer_frame.frame.f_globals)
variables.update(outer_frame.frame.f_locals)
return variables
|
python
|
def get_outer_frame_variables():
""" Get a dict of local and global variables of the first outer frame from another file. """
cur_filename = inspect.getframeinfo(inspect.currentframe()).filename
outer_frame = next(f
for f in inspect.getouterframes(inspect.currentframe())
if f.filename != cur_filename)
variables = {}
variables.update(outer_frame.frame.f_globals)
variables.update(outer_frame.frame.f_locals)
return variables
|
[
"def",
"get_outer_frame_variables",
"(",
")",
":",
"cur_filename",
"=",
"inspect",
".",
"getframeinfo",
"(",
"inspect",
".",
"currentframe",
"(",
")",
")",
".",
"filename",
"outer_frame",
"=",
"next",
"(",
"f",
"for",
"f",
"in",
"inspect",
".",
"getouterframes",
"(",
"inspect",
".",
"currentframe",
"(",
")",
")",
"if",
"f",
".",
"filename",
"!=",
"cur_filename",
")",
"variables",
"=",
"{",
"}",
"variables",
".",
"update",
"(",
"outer_frame",
".",
"frame",
".",
"f_globals",
")",
"variables",
".",
"update",
"(",
"outer_frame",
".",
"frame",
".",
"f_locals",
")",
"return",
"variables"
] |
Get a dict of local and global variables of the first outer frame from another file.
|
[
"Get",
"a",
"dict",
"of",
"local",
"and",
"global",
"variables",
"of",
"the",
"first",
"outer",
"frame",
"from",
"another",
"file",
"."
] |
e799c6f53be9653e8998a25adb5e2f1643442699
|
https://github.com/yhat/pandasql/blob/e799c6f53be9653e8998a25adb5e2f1643442699/pandasql/sqldf.py#L98-L107
|
6,893
|
yhat/pandasql
|
pandasql/sqldf.py
|
extract_table_names
|
def extract_table_names(query):
""" Extract table names from an SQL query. """
# a good old fashioned regex. turns out this worked better than actually parsing the code
tables_blocks = re.findall(r'(?:FROM|JOIN)\s+(\w+(?:\s*,\s*\w+)*)', query, re.IGNORECASE)
tables = [tbl
for block in tables_blocks
for tbl in re.findall(r'\w+', block)]
return set(tables)
|
python
|
def extract_table_names(query):
""" Extract table names from an SQL query. """
# a good old fashioned regex. turns out this worked better than actually parsing the code
tables_blocks = re.findall(r'(?:FROM|JOIN)\s+(\w+(?:\s*,\s*\w+)*)', query, re.IGNORECASE)
tables = [tbl
for block in tables_blocks
for tbl in re.findall(r'\w+', block)]
return set(tables)
|
[
"def",
"extract_table_names",
"(",
"query",
")",
":",
"# a good old fashioned regex. turns out this worked better than actually parsing the code",
"tables_blocks",
"=",
"re",
".",
"findall",
"(",
"r'(?:FROM|JOIN)\\s+(\\w+(?:\\s*,\\s*\\w+)*)'",
",",
"query",
",",
"re",
".",
"IGNORECASE",
")",
"tables",
"=",
"[",
"tbl",
"for",
"block",
"in",
"tables_blocks",
"for",
"tbl",
"in",
"re",
".",
"findall",
"(",
"r'\\w+'",
",",
"block",
")",
"]",
"return",
"set",
"(",
"tables",
")"
] |
Extract table names from an SQL query.
|
[
"Extract",
"table",
"names",
"from",
"an",
"SQL",
"query",
"."
] |
e799c6f53be9653e8998a25adb5e2f1643442699
|
https://github.com/yhat/pandasql/blob/e799c6f53be9653e8998a25adb5e2f1643442699/pandasql/sqldf.py#L110-L117
|
6,894
|
yhat/pandasql
|
pandasql/sqldf.py
|
write_table
|
def write_table(df, tablename, conn):
""" Write a dataframe to the database. """
with catch_warnings():
filterwarnings('ignore',
message='The provided table name \'%s\' is not found exactly as such in the database' % tablename)
to_sql(df, name=tablename, con=conn,
index=not any(name is None for name in df.index.names))
|
python
|
def write_table(df, tablename, conn):
""" Write a dataframe to the database. """
with catch_warnings():
filterwarnings('ignore',
message='The provided table name \'%s\' is not found exactly as such in the database' % tablename)
to_sql(df, name=tablename, con=conn,
index=not any(name is None for name in df.index.names))
|
[
"def",
"write_table",
"(",
"df",
",",
"tablename",
",",
"conn",
")",
":",
"with",
"catch_warnings",
"(",
")",
":",
"filterwarnings",
"(",
"'ignore'",
",",
"message",
"=",
"'The provided table name \\'%s\\' is not found exactly as such in the database'",
"%",
"tablename",
")",
"to_sql",
"(",
"df",
",",
"name",
"=",
"tablename",
",",
"con",
"=",
"conn",
",",
"index",
"=",
"not",
"any",
"(",
"name",
"is",
"None",
"for",
"name",
"in",
"df",
".",
"index",
".",
"names",
")",
")"
] |
Write a dataframe to the database.
|
[
"Write",
"a",
"dataframe",
"to",
"the",
"database",
"."
] |
e799c6f53be9653e8998a25adb5e2f1643442699
|
https://github.com/yhat/pandasql/blob/e799c6f53be9653e8998a25adb5e2f1643442699/pandasql/sqldf.py#L120-L126
|
6,895
|
bsmurphy/PyKrige
|
benchmarks/kriging_benchmarks.py
|
make_benchark
|
def make_benchark(n_train, n_test, n_dim=2):
""" Compute the benchmarks for Ordianry Kriging
Parameters
----------
n_train : int
number of points in the training set
n_test : int
number of points in the test set
n_dim : int
number of dimensions (default=2)
Returns
-------
res : dict
a dictionary with the timing results
"""
X_train = np.random.rand(n_train, n_dim)
y_train = np.random.rand(n_train)
X_test = np.random.rand(n_test, n_dim)
res = {}
for variogram_model in VARIOGRAM_MODELS:
tic = time()
OK = OrdinaryKriging(X_train[:, 0], X_train[:, 1], y_train,
variogram_model='linear',
verbose=False, enable_plotting=False)
res['t_train_{}'.format(variogram_model)] = time() - tic
# All the following tests are performed with the linear variogram model
for backend in BACKENDS:
for n_closest_points in N_MOVING_WINDOW:
if backend == 'vectorized' and n_closest_points is not None:
continue # this is not supported
tic = time()
OK.execute('points', X_test[:, 0], X_test[:, 1],
backend=backend,
n_closest_points=n_closest_points)
res['t_test_{}_{}'.format(backend, n_closest_points)] = time() - tic
return res
|
python
|
def make_benchark(n_train, n_test, n_dim=2):
""" Compute the benchmarks for Ordianry Kriging
Parameters
----------
n_train : int
number of points in the training set
n_test : int
number of points in the test set
n_dim : int
number of dimensions (default=2)
Returns
-------
res : dict
a dictionary with the timing results
"""
X_train = np.random.rand(n_train, n_dim)
y_train = np.random.rand(n_train)
X_test = np.random.rand(n_test, n_dim)
res = {}
for variogram_model in VARIOGRAM_MODELS:
tic = time()
OK = OrdinaryKriging(X_train[:, 0], X_train[:, 1], y_train,
variogram_model='linear',
verbose=False, enable_plotting=False)
res['t_train_{}'.format(variogram_model)] = time() - tic
# All the following tests are performed with the linear variogram model
for backend in BACKENDS:
for n_closest_points in N_MOVING_WINDOW:
if backend == 'vectorized' and n_closest_points is not None:
continue # this is not supported
tic = time()
OK.execute('points', X_test[:, 0], X_test[:, 1],
backend=backend,
n_closest_points=n_closest_points)
res['t_test_{}_{}'.format(backend, n_closest_points)] = time() - tic
return res
|
[
"def",
"make_benchark",
"(",
"n_train",
",",
"n_test",
",",
"n_dim",
"=",
"2",
")",
":",
"X_train",
"=",
"np",
".",
"random",
".",
"rand",
"(",
"n_train",
",",
"n_dim",
")",
"y_train",
"=",
"np",
".",
"random",
".",
"rand",
"(",
"n_train",
")",
"X_test",
"=",
"np",
".",
"random",
".",
"rand",
"(",
"n_test",
",",
"n_dim",
")",
"res",
"=",
"{",
"}",
"for",
"variogram_model",
"in",
"VARIOGRAM_MODELS",
":",
"tic",
"=",
"time",
"(",
")",
"OK",
"=",
"OrdinaryKriging",
"(",
"X_train",
"[",
":",
",",
"0",
"]",
",",
"X_train",
"[",
":",
",",
"1",
"]",
",",
"y_train",
",",
"variogram_model",
"=",
"'linear'",
",",
"verbose",
"=",
"False",
",",
"enable_plotting",
"=",
"False",
")",
"res",
"[",
"'t_train_{}'",
".",
"format",
"(",
"variogram_model",
")",
"]",
"=",
"time",
"(",
")",
"-",
"tic",
"# All the following tests are performed with the linear variogram model",
"for",
"backend",
"in",
"BACKENDS",
":",
"for",
"n_closest_points",
"in",
"N_MOVING_WINDOW",
":",
"if",
"backend",
"==",
"'vectorized'",
"and",
"n_closest_points",
"is",
"not",
"None",
":",
"continue",
"# this is not supported",
"tic",
"=",
"time",
"(",
")",
"OK",
".",
"execute",
"(",
"'points'",
",",
"X_test",
"[",
":",
",",
"0",
"]",
",",
"X_test",
"[",
":",
",",
"1",
"]",
",",
"backend",
"=",
"backend",
",",
"n_closest_points",
"=",
"n_closest_points",
")",
"res",
"[",
"'t_test_{}_{}'",
".",
"format",
"(",
"backend",
",",
"n_closest_points",
")",
"]",
"=",
"time",
"(",
")",
"-",
"tic",
"return",
"res"
] |
Compute the benchmarks for Ordianry Kriging
Parameters
----------
n_train : int
number of points in the training set
n_test : int
number of points in the test set
n_dim : int
number of dimensions (default=2)
Returns
-------
res : dict
a dictionary with the timing results
|
[
"Compute",
"the",
"benchmarks",
"for",
"Ordianry",
"Kriging"
] |
a4db3003b0b5688658c12faeb95a5a8b2b14b433
|
https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/benchmarks/kriging_benchmarks.py#L14-L57
|
6,896
|
bsmurphy/PyKrige
|
benchmarks/kriging_benchmarks.py
|
print_benchmark
|
def print_benchmark(n_train, n_test, n_dim, res):
""" Print the benchmarks
Parameters
----------
n_train : int
number of points in the training set
n_test : int
number of points in the test set
n_dim : int
number of dimensions (default=2)
res : dict
a dictionary with the timing results
"""
print('='*80)
print(' '*10, 'N_dim={}, N_train={}, N_test={}'.format(n_dim,
n_train, n_test))
print('='*80)
print('\n', '# Training the model', '\n')
print('|'.join(['{:>11} '.format(el) for el in ['t_train (s)'] +
VARIOGRAM_MODELS]))
print('-' * (11 + 2) * (len(VARIOGRAM_MODELS) + 1))
print('|'.join(['{:>11} '.format('Training')] +
['{:>11.2} '.format(el) for el in
[res['t_train_{}'.format(mod)]
for mod in VARIOGRAM_MODELS]]))
print('\n', '# Predicting kriging points', '\n')
print('|'.join(['{:>11} '.format(el) for el in ['t_test (s)'] + BACKENDS]))
print('-' * (11 + 2) * (len(BACKENDS) + 1))
for n_closest_points in N_MOVING_WINDOW:
timing_results = [res.get(
't_test_{}_{}'.format(mod, n_closest_points), '')
for mod in BACKENDS]
print('|'.join(['{:>11} '.format('N_nn=' + str(n_closest_points))] +
['{:>11.2} '.format(el) for el in timing_results]))
|
python
|
def print_benchmark(n_train, n_test, n_dim, res):
""" Print the benchmarks
Parameters
----------
n_train : int
number of points in the training set
n_test : int
number of points in the test set
n_dim : int
number of dimensions (default=2)
res : dict
a dictionary with the timing results
"""
print('='*80)
print(' '*10, 'N_dim={}, N_train={}, N_test={}'.format(n_dim,
n_train, n_test))
print('='*80)
print('\n', '# Training the model', '\n')
print('|'.join(['{:>11} '.format(el) for el in ['t_train (s)'] +
VARIOGRAM_MODELS]))
print('-' * (11 + 2) * (len(VARIOGRAM_MODELS) + 1))
print('|'.join(['{:>11} '.format('Training')] +
['{:>11.2} '.format(el) for el in
[res['t_train_{}'.format(mod)]
for mod in VARIOGRAM_MODELS]]))
print('\n', '# Predicting kriging points', '\n')
print('|'.join(['{:>11} '.format(el) for el in ['t_test (s)'] + BACKENDS]))
print('-' * (11 + 2) * (len(BACKENDS) + 1))
for n_closest_points in N_MOVING_WINDOW:
timing_results = [res.get(
't_test_{}_{}'.format(mod, n_closest_points), '')
for mod in BACKENDS]
print('|'.join(['{:>11} '.format('N_nn=' + str(n_closest_points))] +
['{:>11.2} '.format(el) for el in timing_results]))
|
[
"def",
"print_benchmark",
"(",
"n_train",
",",
"n_test",
",",
"n_dim",
",",
"res",
")",
":",
"print",
"(",
"'='",
"*",
"80",
")",
"print",
"(",
"' '",
"*",
"10",
",",
"'N_dim={}, N_train={}, N_test={}'",
".",
"format",
"(",
"n_dim",
",",
"n_train",
",",
"n_test",
")",
")",
"print",
"(",
"'='",
"*",
"80",
")",
"print",
"(",
"'\\n'",
",",
"'# Training the model'",
",",
"'\\n'",
")",
"print",
"(",
"'|'",
".",
"join",
"(",
"[",
"'{:>11} '",
".",
"format",
"(",
"el",
")",
"for",
"el",
"in",
"[",
"'t_train (s)'",
"]",
"+",
"VARIOGRAM_MODELS",
"]",
")",
")",
"print",
"(",
"'-'",
"*",
"(",
"11",
"+",
"2",
")",
"*",
"(",
"len",
"(",
"VARIOGRAM_MODELS",
")",
"+",
"1",
")",
")",
"print",
"(",
"'|'",
".",
"join",
"(",
"[",
"'{:>11} '",
".",
"format",
"(",
"'Training'",
")",
"]",
"+",
"[",
"'{:>11.2} '",
".",
"format",
"(",
"el",
")",
"for",
"el",
"in",
"[",
"res",
"[",
"'t_train_{}'",
".",
"format",
"(",
"mod",
")",
"]",
"for",
"mod",
"in",
"VARIOGRAM_MODELS",
"]",
"]",
")",
")",
"print",
"(",
"'\\n'",
",",
"'# Predicting kriging points'",
",",
"'\\n'",
")",
"print",
"(",
"'|'",
".",
"join",
"(",
"[",
"'{:>11} '",
".",
"format",
"(",
"el",
")",
"for",
"el",
"in",
"[",
"'t_test (s)'",
"]",
"+",
"BACKENDS",
"]",
")",
")",
"print",
"(",
"'-'",
"*",
"(",
"11",
"+",
"2",
")",
"*",
"(",
"len",
"(",
"BACKENDS",
")",
"+",
"1",
")",
")",
"for",
"n_closest_points",
"in",
"N_MOVING_WINDOW",
":",
"timing_results",
"=",
"[",
"res",
".",
"get",
"(",
"'t_test_{}_{}'",
".",
"format",
"(",
"mod",
",",
"n_closest_points",
")",
",",
"''",
")",
"for",
"mod",
"in",
"BACKENDS",
"]",
"print",
"(",
"'|'",
".",
"join",
"(",
"[",
"'{:>11} '",
".",
"format",
"(",
"'N_nn='",
"+",
"str",
"(",
"n_closest_points",
")",
")",
"]",
"+",
"[",
"'{:>11.2} '",
".",
"format",
"(",
"el",
")",
"for",
"el",
"in",
"timing_results",
"]",
")",
")"
] |
Print the benchmarks
Parameters
----------
n_train : int
number of points in the training set
n_test : int
number of points in the test set
n_dim : int
number of dimensions (default=2)
res : dict
a dictionary with the timing results
|
[
"Print",
"the",
"benchmarks"
] |
a4db3003b0b5688658c12faeb95a5a8b2b14b433
|
https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/benchmarks/kriging_benchmarks.py#L60-L96
|
6,897
|
bsmurphy/PyKrige
|
pykrige/uk.py
|
UniversalKriging.display_variogram_model
|
def display_variogram_model(self):
"""Displays variogram model with the actual binned data."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.lags, self.semivariance, 'r*')
ax.plot(self.lags,
self.variogram_function(self.variogram_model_parameters,
self.lags), 'k-')
plt.show()
|
python
|
def display_variogram_model(self):
"""Displays variogram model with the actual binned data."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.lags, self.semivariance, 'r*')
ax.plot(self.lags,
self.variogram_function(self.variogram_model_parameters,
self.lags), 'k-')
plt.show()
|
[
"def",
"display_variogram_model",
"(",
"self",
")",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"111",
")",
"ax",
".",
"plot",
"(",
"self",
".",
"lags",
",",
"self",
".",
"semivariance",
",",
"'r*'",
")",
"ax",
".",
"plot",
"(",
"self",
".",
"lags",
",",
"self",
".",
"variogram_function",
"(",
"self",
".",
"variogram_model_parameters",
",",
"self",
".",
"lags",
")",
",",
"'k-'",
")",
"plt",
".",
"show",
"(",
")"
] |
Displays variogram model with the actual binned data.
|
[
"Displays",
"variogram",
"model",
"with",
"the",
"actual",
"binned",
"data",
"."
] |
a4db3003b0b5688658c12faeb95a5a8b2b14b433
|
https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/pykrige/uk.py#L608-L616
|
6,898
|
bsmurphy/PyKrige
|
pykrige/uk.py
|
UniversalKriging.plot_epsilon_residuals
|
def plot_epsilon_residuals(self):
"""Plots the epsilon residuals for the variogram fit."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(range(self.epsilon.size), self.epsilon, c='k', marker='*')
ax.axhline(y=0.0)
plt.show()
|
python
|
def plot_epsilon_residuals(self):
"""Plots the epsilon residuals for the variogram fit."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(range(self.epsilon.size), self.epsilon, c='k', marker='*')
ax.axhline(y=0.0)
plt.show()
|
[
"def",
"plot_epsilon_residuals",
"(",
"self",
")",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"111",
")",
"ax",
".",
"scatter",
"(",
"range",
"(",
"self",
".",
"epsilon",
".",
"size",
")",
",",
"self",
".",
"epsilon",
",",
"c",
"=",
"'k'",
",",
"marker",
"=",
"'*'",
")",
"ax",
".",
"axhline",
"(",
"y",
"=",
"0.0",
")",
"plt",
".",
"show",
"(",
")"
] |
Plots the epsilon residuals for the variogram fit.
|
[
"Plots",
"the",
"epsilon",
"residuals",
"for",
"the",
"variogram",
"fit",
"."
] |
a4db3003b0b5688658c12faeb95a5a8b2b14b433
|
https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/pykrige/uk.py#L647-L653
|
6,899
|
bsmurphy/PyKrige
|
pykrige/uk.py
|
UniversalKriging.print_statistics
|
def print_statistics(self):
"""Prints out the Q1, Q2, and cR statistics for the variogram fit.
NOTE that ideally Q1 is close to zero, Q2 is close to 1,
and cR is as small as possible.
"""
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR)
|
python
|
def print_statistics(self):
"""Prints out the Q1, Q2, and cR statistics for the variogram fit.
NOTE that ideally Q1 is close to zero, Q2 is close to 1,
and cR is as small as possible.
"""
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR)
|
[
"def",
"print_statistics",
"(",
"self",
")",
":",
"print",
"(",
"\"Q1 =\"",
",",
"self",
".",
"Q1",
")",
"print",
"(",
"\"Q2 =\"",
",",
"self",
".",
"Q2",
")",
"print",
"(",
"\"cR =\"",
",",
"self",
".",
"cR",
")"
] |
Prints out the Q1, Q2, and cR statistics for the variogram fit.
NOTE that ideally Q1 is close to zero, Q2 is close to 1,
and cR is as small as possible.
|
[
"Prints",
"out",
"the",
"Q1",
"Q2",
"and",
"cR",
"statistics",
"for",
"the",
"variogram",
"fit",
".",
"NOTE",
"that",
"ideally",
"Q1",
"is",
"close",
"to",
"zero",
"Q2",
"is",
"close",
"to",
"1",
"and",
"cR",
"is",
"as",
"small",
"as",
"possible",
"."
] |
a4db3003b0b5688658c12faeb95a5a8b2b14b433
|
https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/pykrige/uk.py#L661-L668
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.