Search is not available for this dataset
text
stringlengths
75
104k
def from_spec(spec, kwargs): """ Creates an agent from a specification dict. """ agent = util.get_object( obj=spec, predefined_objects=tensorforce.agents.agents, kwargs=kwargs ) assert isinstance(agent, Agent) return agent
def get_named_tensor(self, name): """ Returns a named tensor if available. Returns: valid: True if named tensor found, False otherwise tensor: If valid, will be a tensor, otherwise None """ if name in self.named_tensors: return True, self.named_tensors[name] else: return False, None
def from_spec(spec, kwargs=None): """ Creates a network from a specification dict. """ network = util.get_object( obj=spec, default_object=LayeredNetwork, kwargs=kwargs ) assert isinstance(network, Network) return network
def put(self, item, priority=None): """ Stores a transition in replay memory. If the memory is full, the oldest entry is replaced. """ if not self._isfull(): self._memory.append(None) position = self._next_position_then_increment() old_priority = 0 if self._memory[position] is None \ else (self._memory[position].priority or 0) row = _SumRow(item, priority) self._memory[position] = row self._update_internal_nodes( position, (row.priority or 0) - old_priority)
def move(self, external_index, new_priority): """ Change the priority of a leaf node """ index = external_index + (self._capacity - 1) return self._move(index, new_priority)
def _move(self, index, new_priority): """ Change the priority of a leaf node. """ item, old_priority = self._memory[index] old_priority = old_priority or 0 self._memory[index] = _SumRow(item, new_priority) self._update_internal_nodes(index, new_priority - old_priority)
def _update_internal_nodes(self, index, delta): """ Update internal priority sums when leaf priority has been changed. Args: index: leaf node index delta: change in priority """ # Move up tree, increasing position, updating sum while index > 0: index = (index - 1) // 2 self._memory[index] += delta
def _next_position_then_increment(self): """ Similar to position++. """ start = self._capacity - 1 position = start + self._position self._position = (self._position + 1) % self._capacity return position
def _sample_with_priority(self, p): """ Sample random element with priority greater than p. """ parent = 0 while True: left = 2 * parent + 1 if left >= len(self._memory): # parent points to a leaf node already. return parent left_p = self._memory[left] if left < self._capacity - 1 \ else (self._memory[left].priority or 0) if p <= left_p: parent = left else: if left + 1 >= len(self._memory): raise RuntimeError('Right child is expected to exist.') p -= left_p parent = left + 1
def sample_minibatch(self, batch_size): """ Sample minibatch of size batch_size. """ pool_size = len(self) if pool_size == 0: return [] delta_p = self._memory[0] / batch_size chosen_idx = [] # if all priorities sum to ~0 choose randomly otherwise random sample if abs(self._memory[0]) < util.epsilon: chosen_idx = np.random.randint(self._capacity - 1, self._capacity - 1 + len(self), size=batch_size).tolist() else: for i in xrange(batch_size): lower = max(i * delta_p, 0) upper = min((i + 1) * delta_p, self._memory[0]) p = random.uniform(lower, upper) chosen_idx.append(self._sample_with_priority(p)) return [(i, self._memory[i]) for i in chosen_idx]
def get_batch(self, batch_size, next_states=False): """ Samples a batch of the specified size according to priority. Args: batch_size: The batch size next_states: A boolean flag indicating whether 'next_states' values should be included Returns: A dict containing states, actions, rewards, terminals, internal states (and next states) """ if batch_size > len(self.observations): raise TensorForceError( "Requested batch size is larger than observations in memory: increase config.first_update.") # Init empty states states = {name: np.zeros((batch_size,) + tuple(state['shape']), dtype=util.np_dtype( state['type'])) for name, state in self.states_spec.items()} internals = [np.zeros((batch_size,) + shape, dtype) for shape, dtype in self.internals_spec] actions = {name: np.zeros((batch_size,) + tuple(action['shape']), dtype=util.np_dtype(action['type'])) for name, action in self.actions_spec.items()} terminal = np.zeros((batch_size,), dtype=util.np_dtype('bool')) reward = np.zeros((batch_size,), dtype=util.np_dtype('float')) if next_states: next_states = {name: np.zeros((batch_size,) + tuple(state['shape']), dtype=util.np_dtype( state['type'])) for name, state in self.states_spec.items()} next_internals = [np.zeros((batch_size,) + shape, dtype) for shape, dtype in self.internals_spec] # Start with unseen observations unseen_indices = list(xrange( self.none_priority_index + self.observations._capacity - 1, len(self.observations) + self.observations._capacity - 1) ) self.batch_indices = unseen_indices[:batch_size] # Get remaining observations using weighted sampling remaining = batch_size - len(self.batch_indices) if remaining: samples = self.observations.sample_minibatch(remaining) sample_indices = [i for i, o in samples] self.batch_indices += sample_indices # Shuffle np.random.shuffle(self.batch_indices) # Collect observations for n, index in enumerate(self.batch_indices): observation, _ = self.observations._memory[index] for name, state in states.items(): state[n] = observation[0][name] for k, internal in enumerate(internals): internal[n] = observation[1][k] for name, action in actions.items(): action[n] = observation[2][name] terminal[n] = observation[3] reward[n] = observation[4] if next_states: for name, next_state in next_states.items(): next_state[n] = observation[5][name] for k, next_internal in enumerate(next_internals): next_internal[n] = observation[6][k] if next_states: return dict( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward, next_states=next_states, next_internals=next_internals ) else: return dict( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward )
def update_batch(self, loss_per_instance): """ Computes priorities according to loss. Args: loss_per_instance: """ if self.batch_indices is None: raise TensorForceError("Need to call get_batch before each update_batch call.") # if len(loss_per_instance) != len(self.batch_indices): # raise TensorForceError("For all instances a loss value has to be provided.") for index, loss in zip(self.batch_indices, loss_per_instance): # Sampling priority is proportional to the largest absolute temporal difference error. new_priority = (np.abs(loss) + self.prioritization_constant) ** self.prioritization_weight self.observations._move(index, new_priority) self.none_priority_index += 1
def import_experience(self, experiences): """ Imports experiences. Args: experiences: """ if isinstance(experiences, dict): if self.unique_state: experiences['states'] = dict(state=experiences['states']) if self.unique_action: experiences['actions'] = dict(action=experiences['actions']) self.model.import_experience(**experiences) else: if self.unique_state: states = dict(state=list()) else: states = {name: list() for name in experiences[0]['states']} internals = [list() for _ in experiences[0]['internals']] if self.unique_action: actions = dict(action=list()) else: actions = {name: list() for name in experiences[0]['actions']} terminal = list() reward = list() for experience in experiences: if self.unique_state: states['state'].append(experience['states']) else: for name in sorted(states): states[name].append(experience['states'][name]) for n, internal in enumerate(internals): internal.append(experience['internals'][n]) if self.unique_action: actions['action'].append(experience['actions']) else: for name in sorted(actions): actions[name].append(experience['actions'][name]) terminal.append(experience['terminal']) reward.append(experience['reward']) self.model.import_experience( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward )
def connect(self, timeout=600): """ Starts the server tcp connection on the given host:port. Args: timeout (int): The time (in seconds) for which we will attempt a connection to the remote (every 5sec). After that (or if timeout is None or 0), an error is raised. """ # If we are already connected, return error. if self.socket: raise TensorForceError("Already connected to {}:{}. Only one connection allowed at a time. " + "Close first by calling `close`!".format(self.host, self.port)) self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if timeout < 5 or timeout is None: timeout = 5 err = 0 start_time = time.time() while time.time() - start_time < timeout: self.socket.settimeout(5) err = self.socket.connect_ex((self.host, self.port)) if err == 0: break time.sleep(1) if err != 0: raise TensorForceError("Error when trying to connect to {}:{}: errno={} errcode='{}' '{}'". format(self.host, self.port, err, errno.errorcode[err], os.strerror(err)))
def disconnect(self): """ Ends our server tcp connection. """ # If we are not connected, return error. if not self.socket: logging.warning("No active socket to close!") return # Close our socket. self.socket.close() self.socket = None
def send(self, message, socket_): """ Sends a message (dict) to the socket. Message consists of a 8-byte len header followed by a msgpack-numpy encoded dict. Args: message: The message dict (e.g. {"cmd": "reset"}) socket_: The python socket object to use. """ if not socket_: raise TensorForceError("No socket given in call to `send`!") elif not isinstance(message, dict): raise TensorForceError("Message to be sent must be a dict!") message = msgpack.packb(message) len_ = len(message) # prepend 8-byte len field to all our messages socket_.send(bytes("{:08d}".format(len_), encoding="ascii") + message)
def recv(self, socket_, encoding=None): """ Receives a message as msgpack-numpy encoded byte-string from the given socket object. Blocks until something was received. Args: socket_: The python socket object to use. encoding (str): The encoding to use for unpacking messages from the socket. Returns: The decoded (as dict) message received. """ unpacker = msgpack.Unpacker(encoding=encoding) # Wait for an immediate response. response = socket_.recv(8) # get the length of the message if response == b"": raise TensorForceError("No data received by socket.recv in call to method `recv` " + "(listener possibly closed)!") orig_len = int(response) received_len = 0 while True: data = socket_.recv(min(orig_len - received_len, self.max_msg_len)) # There must be a response. if not data: raise TensorForceError("No data of len {} received by socket.recv in call to method `recv`!". format(orig_len - received_len)) data_len = len(data) received_len += data_len unpacker.feed(data) if received_len == orig_len: break # Get the data. for message in unpacker: sts = message.get("status", message.get(b"status")) if sts: if sts == "ok" or sts == b"ok": return message else: raise TensorForceError("RemoteEnvironment server error: {}". format(message.get("message", "not specified"))) else: raise TensorForceError("Message without field 'status' received!") raise TensorForceError("No message encoded in data stream (data stream had len={})". format(orig_len))
def is_action_available(self, action): """Determines whether action is available. That is, executing it would change the state. """ temp_state = np.rot90(self._state, action) return self._is_action_available_left(temp_state)
def _is_action_available_left(self, state): """Determines whether action 'Left' is available.""" # True if any field is 0 (empty) on the left of a tile or two tiles can # be merged. for row in range(4): has_empty = False for col in range(4): has_empty |= state[row, col] == 0 if state[row, col] != 0 and has_empty: return True if (state[row, col] != 0 and col > 0 and state[row, col] == state[row, col - 1]): return True return False
def do_action(self, action): """Execute action, add a new tile, update the score & return the reward.""" temp_state = np.rot90(self._state, action) reward = self._do_action_left(temp_state) self._state = np.rot90(temp_state, -action) self._score += reward self.add_random_tile() return reward
def _do_action_left(self, state): """Executes action 'Left'.""" reward = 0 for row in range(4): # Always the rightmost tile in the current row that was already moved merge_candidate = -1 merged = np.zeros((4,), dtype=np.bool) for col in range(4): if state[row, col] == 0: continue if (merge_candidate != -1 and not merged[merge_candidate] and state[row, merge_candidate] == state[row, col]): # Merge tile with merge_candidate state[row, col] = 0 merged[merge_candidate] = True state[row, merge_candidate] += 1 reward += 2 ** state[row, merge_candidate] else: # Move tile to the left merge_candidate += 1 if col != merge_candidate: state[row, merge_candidate] = state[row, col] state[row, col] = 0 return reward
def add_random_tile(self): """Adds a random tile to the grid. Assumes that it has empty fields.""" x_pos, y_pos = np.where(self._state == 0) assert len(x_pos) != 0 empty_index = np.random.choice(len(x_pos)) value = np.random.choice([1, 2], p=[0.9, 0.1]) self._state[x_pos[empty_index], y_pos[empty_index]] = value
def print_state(self): """Prints the current state.""" def tile_string(value): """Concert value to string.""" if value > 0: return '% 5d' % (2 ** value,) return " " separator_line = '-' * 25 print(separator_line) for row in range(4): print("|" + "|".join([tile_string(v) for v in self._state[row, :]]) + "|") print(separator_line)
def setup(self): """ Sets up the TensorFlow model graph, starts the servers (distributed mode), creates summarizers and savers, initializes (and enters) the TensorFlow session. """ # Create/get our graph, setup local model/global model links, set scope and device. graph_default_context = self.setup_graph() # Start a tf Server (in case of distributed setup). Only start once. if self.execution_type == "distributed" and self.server is None and self.is_local_model: self.start_server() # build the graph with tf.device(device_name_or_function=self.device): with tf.variable_scope(name_or_scope=self.scope, reuse=False): # Variables and summaries self.variables = dict() self.all_variables = dict() self.registered_variables = set() # Build the graph's placeholders, tf_functions, etc self.setup_placeholders() # Create model's "external" components. # Create tensorflow functions from "tf_"-methods. self.setup_components_and_tf_funcs() # Create core variables (timestep, episode counters, buffers for states/actions/internals). self.fn_initialize() if self.summarizer_spec is not None: with tf.name_scope(name='summarizer'): self.summarizer = tf.contrib.summary.create_file_writer( logdir=self.summarizer_spec['directory'], max_queue=None, flush_millis=(self.summarizer_spec.get('flush', 10) * 1000), filename_suffix=None, name=None ) default_summarizer = self.summarizer.as_default() # Problem: not all parts of the graph are called on every step assert 'steps' not in self.summarizer_spec # if 'steps' in self.summarizer_spec: # record_summaries = tf.contrib.summary.record_summaries_every_n_global_steps( # n=self.summarizer_spec['steps'], # global_step=self.global_timestep # ) # else: record_summaries = tf.contrib.summary.always_record_summaries() default_summarizer.__enter__() record_summaries.__enter__() # Input tensors states = util.map_tensors(fn=tf.identity, tensors=self.states_input) internals = util.map_tensors(fn=tf.identity, tensors=self.internals_input) actions = util.map_tensors(fn=tf.identity, tensors=self.actions_input) terminal = tf.identity(input=self.terminal_input) reward = tf.identity(input=self.reward_input) # Probably both deterministic and independent should be the same at some point. deterministic = tf.identity(input=self.deterministic_input) independent = tf.identity(input=self.independent_input) episode_index = tf.identity(input=self.episode_index_input) states, actions, reward = self.fn_preprocess(states=states, actions=actions, reward=reward) self.create_operations( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward, deterministic=deterministic, independent=independent, index=episode_index ) # Add all summaries specified in summary_labels if 'inputs' in self.summary_labels or 'states' in self.summary_labels: for name in sorted(states): tf.contrib.summary.histogram(name=('states-' + name), tensor=states[name]) if 'inputs' in self.summary_labels or 'actions' in self.summary_labels: for name in sorted(actions): tf.contrib.summary.histogram(name=('actions-' + name), tensor=actions[name]) if 'inputs' in self.summary_labels or 'reward' in self.summary_labels: tf.contrib.summary.histogram(name='reward', tensor=reward) if 'graph' in self.summary_labels: with tf.name_scope(name='summarizer'): graph_def = self.graph.as_graph_def() graph_str = tf.constant( value=graph_def.SerializeToString(), dtype=tf.string, shape=() ) self.graph_summary = tf.contrib.summary.graph( param=graph_str, step=self.global_timestep ) if 'meta_param_recorder_class' in self.summarizer_spec: self.graph_summary = tf.group( self.graph_summary, *self.summarizer_spec['meta_param_recorder_class'].build_metagraph_list() ) if self.summarizer_spec is not None: record_summaries.__exit__(None, None, None) default_summarizer.__exit__(None, None, None) with tf.name_scope(name='summarizer'): self.flush_summarizer = tf.contrib.summary.flush() self.summarizer_init_op = tf.contrib.summary.summary_writer_initializer_op() assert len(self.summarizer_init_op) == 1 self.summarizer_init_op = self.summarizer_init_op[0] # If we are a global model -> return here. # Saving, syncing, finalizing graph, session is done by local replica model. if self.execution_type == "distributed" and not self.is_local_model: return # Saver/Summary -> Scaffold. self.setup_saver() self.setup_scaffold() # Create necessary hooks for the upcoming session. hooks = self.setup_hooks() # We are done constructing: Finalize our graph, create and enter the session. self.setup_session(self.server, hooks, graph_default_context)
def setup_graph(self): """ Creates our Graph and figures out, which shared/global model to hook up to. If we are in a global-model's setup procedure, we do not create a new graph (return None as the context). We will instead use the already existing local replica graph of the model. Returns: None or the graph's as_default()-context. """ graph_default_context = None # Single (non-distributed) mode. if self.execution_type == "single": self.graph = tf.Graph() graph_default_context = self.graph.as_default() graph_default_context.__enter__() self.global_model = None # Distributed tf elif self.execution_type == "distributed": # Parameter-server -> Do not build any graph. if self.distributed_spec["job"] == "ps": return None # worker -> construct the global (main) model; the one hosted on the ps, elif self.distributed_spec["job"] == "worker": # The local replica model. if self.is_local_model: graph = tf.Graph() graph_default_context = graph.as_default() graph_default_context.__enter__() # Now that the graph is created and entered -> deepcopoy ourselves and setup global model first, # then continue. self.global_model = deepcopy(self) # Switch on global construction/setup-mode for the pass to setup(). self.global_model.is_local_model = False self.global_model.setup() self.graph = graph self.as_local_model() self.scope += '-worker' + str(self.distributed_spec["task_index"]) # The global_model (whose Variables are hosted by the ps). else: self.graph = tf.get_default_graph() # lives in the same graph as local model self.global_model = None self.device = tf.train.replica_device_setter( # Place its Variables on the parameter server(s) (round robin). #ps_device="/job:ps", # default # Train-ops for the global_model are hosted locally (on this worker's node). worker_device=self.device, cluster=self.distributed_spec["cluster_spec"] ) else: raise TensorForceError("Unsupported job type: {}!".format(self.distributed_spec["job"])) else: raise TensorForceError("Unsupported distributed type: {}!".format(self.distributed_spec["type"])) return graph_default_context
def start_server(self): """ Creates and stores a tf server (and optionally joins it if we are a parameter-server). Only relevant, if we are running in distributed mode. """ self.server = tf.train.Server( server_or_cluster_def=self.distributed_spec["cluster_spec"], job_name=self.distributed_spec["job"], task_index=self.distributed_spec["task_index"], protocol=self.distributed_spec.get("protocol"), config=self.distributed_spec.get("session_config"), start=True ) if self.distributed_spec["job"] == "ps": self.server.join() # This is unreachable? quit()
def setup_placeholders(self): """ Creates the TensorFlow placeholders, variables, ops and functions for this model. NOTE: Does not add the internal state placeholders and initialization values to the model yet as that requires the model's Network (if any) to be generated first. """ # States for name in sorted(self.states_spec): self.states_input[name] = tf.placeholder( dtype=util.tf_dtype(self.states_spec[name]['type']), shape=(None,) + tuple(self.states_spec[name]['shape']), name=('state-' + name) ) # States preprocessing if self.states_preprocessing_spec is None: for name in sorted(self.states_spec): self.states_spec[name]['unprocessed_shape'] = self.states_spec[name]['shape'] elif not isinstance(self.states_preprocessing_spec, list) and \ all(name in self.states_spec for name in self.states_preprocessing_spec): for name in sorted(self.states_spec): if name in self.states_preprocessing_spec: preprocessing = PreprocessorStack.from_spec( spec=self.states_preprocessing_spec[name], kwargs=dict(shape=self.states_spec[name]['shape']) ) self.states_spec[name]['unprocessed_shape'] = self.states_spec[name]['shape'] self.states_spec[name]['shape'] = preprocessing.processed_shape(shape=self.states_spec[name]['unprocessed_shape']) self.states_preprocessing[name] = preprocessing else: self.states_spec[name]['unprocessed_shape'] = self.states_spec[name]['shape'] # Single preprocessor for all components of our state space elif "type" in self.states_preprocessing_spec: preprocessing = PreprocessorStack.from_spec(spec=self.states_preprocessing_spec, kwargs=dict(shape=self.states_spec[name]['shape'])) for name in sorted(self.states_spec): self.states_spec[name]['unprocessed_shape'] = self.states_spec[name]['shape'] self.states_spec[name]['shape'] = preprocessing.processed_shape(shape=self.states_spec[name]['unprocessed_shape']) self.states_preprocessing[name] = preprocessing else: for name in sorted(self.states_spec): preprocessing = PreprocessorStack.from_spec( spec=self.states_preprocessing_spec, kwargs=dict(shape=self.states_spec[name]['shape']) ) self.states_spec[name]['unprocessed_shape'] = self.states_spec[name]['shape'] self.states_spec[name]['shape'] = preprocessing.processed_shape(shape=self.states_spec[name]['unprocessed_shape']) self.states_preprocessing[name] = preprocessing # Actions for name in sorted(self.actions_spec): self.actions_input[name] = tf.placeholder( dtype=util.tf_dtype(self.actions_spec[name]['type']), shape=(None,) + tuple(self.actions_spec[name]['shape']), name=('action-' + name) ) # Actions exploration if self.actions_exploration_spec is None: pass elif all(name in self.actions_spec for name in self.actions_exploration_spec): for name in sorted(self.actions_spec): if name in self.actions_exploration: self.actions_exploration[name] = Exploration.from_spec(spec=self.actions_exploration_spec[name]) else: for name in sorted(self.actions_spec): self.actions_exploration[name] = Exploration.from_spec(spec=self.actions_exploration_spec) # Terminal self.terminal_input = tf.placeholder(dtype=util.tf_dtype('bool'), shape=(None,), name='terminal') # Reward self.reward_input = tf.placeholder(dtype=util.tf_dtype('float'), shape=(None,), name='reward') # Reward preprocessing if self.reward_preprocessing_spec is not None: self.reward_preprocessing = PreprocessorStack.from_spec( spec=self.reward_preprocessing_spec, # TODO this can eventually have more complex shapes? kwargs=dict(shape=()) ) if self.reward_preprocessing.processed_shape(shape=()) != (): raise TensorForceError("Invalid reward preprocessing!") # Deterministic/independent action flag (should probably be the same) self.deterministic_input = tf.placeholder(dtype=util.tf_dtype('bool'), shape=(), name='deterministic') self.independent_input = tf.placeholder(dtype=util.tf_dtype('bool'), shape=(), name='independent')
def setup_components_and_tf_funcs(self, custom_getter=None): """ Allows child models to create model's component objects, such as optimizer(s), memory(s), etc.. Creates all tensorflow functions via tf.make_template calls on all the class' "tf_"-methods. Args: custom_getter: The `custom_getter_` object to use for `tf.make_template` when creating TensorFlow functions. If None, use a default custom_getter_. Returns: The custom_getter passed in (or a default one if custom_getter was None). """ if custom_getter is None: def custom_getter(getter, name, registered=False, **kwargs): """ To be passed to tf.make_template() as 'custom_getter_'. """ if registered: self.registered_variables.add(name) elif name in self.registered_variables: registered = True # Top-level, hence no 'registered' argument. variable = getter(name=name, **kwargs) if registered: pass elif name in self.all_variables: assert variable is self.all_variables[name] if kwargs.get('trainable', True): assert variable is self.variables[name] if 'variables' in self.summary_labels: tf.contrib.summary.histogram(name=name, tensor=variable) else: self.all_variables[name] = variable if kwargs.get('trainable', True): self.variables[name] = variable if 'variables' in self.summary_labels: tf.contrib.summary.histogram(name=name, tensor=variable) return variable self.fn_initialize = tf.make_template( name_='initialize', func_=self.tf_initialize, custom_getter_=custom_getter ) self.fn_preprocess = tf.make_template( name_='preprocess', func_=self.tf_preprocess, custom_getter_=custom_getter ) self.fn_actions_and_internals = tf.make_template( name_='actions-and-internals', func_=self.tf_actions_and_internals, custom_getter_=custom_getter ) self.fn_observe_timestep = tf.make_template( name_='observe-timestep', func_=self.tf_observe_timestep, custom_getter_=custom_getter ) self.fn_action_exploration = tf.make_template( name_='action-exploration', func_=self.tf_action_exploration, custom_getter_=custom_getter ) return custom_getter
def setup_saver(self): """ Creates the tf.train.Saver object and stores it in self.saver. """ if self.execution_type == "single": global_variables = self.get_variables(include_submodules=True, include_nontrainable=True) else: global_variables = self.global_model.get_variables(include_submodules=True, include_nontrainable=True) # global_variables += [self.global_episode, self.global_timestep] for c in self.get_savable_components(): c.register_saver_ops() # TensorFlow saver object # TODO potentially make other options configurable via saver spec. self.saver = tf.train.Saver( var_list=global_variables, # should be given? reshape=False, sharded=False, max_to_keep=5, keep_checkpoint_every_n_hours=10000.0, name=None, restore_sequentially=False, saver_def=None, builder=None, defer_build=False, allow_empty=True, write_version=tf.train.SaverDef.V2, pad_step_number=False, save_relative_paths=True # filename=None )
def setup_scaffold(self): """ Creates the tf.train.Scaffold object and assigns it to self.scaffold. Other fields of the Scaffold are generated automatically. """ if self.execution_type == "single": global_variables = self.get_variables(include_submodules=True, include_nontrainable=True) # global_variables += [self.global_episode, self.global_timestep] init_op = tf.variables_initializer(var_list=global_variables) if self.summarizer_init_op is not None: init_op = tf.group(init_op, self.summarizer_init_op) if self.graph_summary is None: ready_op = tf.report_uninitialized_variables(var_list=global_variables) ready_for_local_init_op = None local_init_op = None else: ready_op = None ready_for_local_init_op = tf.report_uninitialized_variables(var_list=global_variables) local_init_op = self.graph_summary else: # Global and local variable initializers. global_variables = self.global_model.get_variables(include_submodules=True, include_nontrainable=True) # global_variables += [self.global_episode, self.global_timestep] local_variables = self.get_variables(include_submodules=True, include_nontrainable=True) init_op = tf.variables_initializer(var_list=global_variables) if self.summarizer_init_op is not None: init_op = tf.group(init_op, self.summarizer_init_op) ready_op = tf.report_uninitialized_variables(var_list=(global_variables + local_variables)) ready_for_local_init_op = tf.report_uninitialized_variables(var_list=global_variables) if self.graph_summary is None: local_init_op = tf.group( tf.variables_initializer(var_list=local_variables), # Synchronize values of trainable variables. *(tf.assign(ref=local_var, value=global_var) for local_var, global_var in zip( self.get_variables(include_submodules=True), self.global_model.get_variables(include_submodules=True) )) ) else: local_init_op = tf.group( tf.variables_initializer(var_list=local_variables), self.graph_summary, # Synchronize values of trainable variables. *(tf.assign(ref=local_var, value=global_var) for local_var, global_var in zip( self.get_variables(include_submodules=True), self.global_model.get_variables(include_submodules=True) )) ) def init_fn(scaffold, session): if self.saver_spec is not None and self.saver_spec.get('load', True): directory = self.saver_spec['directory'] file = self.saver_spec.get('file') if file is None: file = tf.train.latest_checkpoint( checkpoint_dir=directory, latest_filename=None # Corresponds to argument of saver.save() in Model.save(). ) elif not os.path.isfile(file): file = os.path.join(directory, file) if file is not None: try: scaffold.saver.restore(sess=session, save_path=file) session.run(fetches=self.list_buffer_index_reset_op) except tf.errors.NotFoundError: raise TensorForceError("Error: Existing checkpoint could not be loaded! Set \"load\" to false in saver_spec.") # TensorFlow scaffold object # TODO explain what it does. self.scaffold = tf.train.Scaffold( init_op=init_op, init_feed_dict=None, init_fn=init_fn, ready_op=ready_op, ready_for_local_init_op=ready_for_local_init_op, local_init_op=local_init_op, summary_op=None, saver=self.saver, copy_from_scaffold=None )
def setup_hooks(self): """ Creates and returns a list of hooks to use in a session. Populates self.saver_directory. Returns: List of hooks to use in a session. """ hooks = list() # Checkpoint saver hook if self.saver_spec is not None and (self.execution_type == 'single' or self.distributed_spec['task_index'] == 0): self.saver_directory = self.saver_spec['directory'] hooks.append(tf.train.CheckpointSaverHook( checkpoint_dir=self.saver_directory, save_secs=self.saver_spec.get('seconds', None if 'steps' in self.saver_spec else 600), save_steps=self.saver_spec.get('steps'), # Either one or the other has to be set. saver=None, # None since given via 'scaffold' argument. checkpoint_basename=self.saver_spec.get('basename', 'model.ckpt'), scaffold=self.scaffold, listeners=None )) else: self.saver_directory = None # Stop at step hook # hooks.append(tf.train.StopAtStepHook( # num_steps=???, # This makes more sense, if load and continue training. # last_step=None # Either one or the other has to be set. # )) # # Step counter hook # hooks.append(tf.train.StepCounterHook( # every_n_steps=counter_config.get('steps', 100), # Either one or the other has to be set. # every_n_secs=counter_config.get('secs'), # Either one or the other has to be set. # output_dir=None, # None since given via 'summary_writer' argument. # summary_writer=summary_writer # )) # Other available hooks: # tf.train.FinalOpsHook(final_ops, final_ops_feed_dict=None) # tf.train.GlobalStepWaiterHook(wait_until_step) # tf.train.LoggingTensorHook(tensors, every_n_iter=None, every_n_secs=None) # tf.train.NanTensorHook(loss_tensor, fail_on_nan_loss=True) # tf.train.ProfilerHook(save_steps=None, save_secs=None, output_dir='', show_dataflow=True, show_memory=False) return hooks
def setup_session(self, server, hooks, graph_default_context): """ Creates and then enters the session for this model (finalizes the graph). Args: server (tf.train.Server): The tf.train.Server object to connect to (None for single execution). hooks (list): A list of (saver, summary, etc..) hooks to be passed to the session. graph_default_context: The graph as_default() context that we are currently in. """ if self.execution_type == "distributed": # if self.distributed_spec['task_index'] == 0: # TensorFlow chief session creator object session_creator = tf.train.ChiefSessionCreator( scaffold=self.scaffold, master=server.target, config=self.session_config, checkpoint_dir=None, checkpoint_filename_with_path=None ) # else: # # TensorFlow worker session creator object # session_creator = tf.train.WorkerSessionCreator( # scaffold=self.scaffold, # master=server.target, # config=self.execution_spec.get('session_config'), # ) # TensorFlow monitored session object self.monitored_session = tf.train.MonitoredSession( session_creator=session_creator, hooks=hooks, stop_grace_period_secs=120 # Default value. ) # Add debug session.run dumping? if self.tf_session_dump_dir != "": self.monitored_session = DumpingDebugWrapperSession(self.monitored_session, self.tf_session_dump_dir) else: # TensorFlow non-distributed monitored session object self.monitored_session = tf.train.SingularMonitoredSession( hooks=hooks, scaffold=self.scaffold, master='', # Default value. config=self.session_config, # self.execution_spec.get('session_config'), checkpoint_dir=None ) if graph_default_context: graph_default_context.__exit__(None, None, None) self.graph.finalize() # enter the session to be ready for acting/learning self.monitored_session.__enter__() self.session = self.monitored_session._tf_sess()
def close(self): """ Saves the model (of saver dir is given) and closes the session. """ if self.flush_summarizer is not None: self.monitored_session.run(fetches=self.flush_summarizer) if self.saver_directory is not None: self.save(append_timestep=True) self.monitored_session.__exit__(None, None, None)
def tf_initialize(self): """ Creates tf Variables for the local state/internals/action-buffers and for the local and global counters for timestep and episode. """ # Timesteps/Episodes # Global: (force on global device; local and global model point to the same (global) data). with tf.device(device_name_or_function=(self.global_model.device if self.global_model else self.device)): # Global timestep collection = self.graph.get_collection(name='global-timestep') if len(collection) == 0: self.global_timestep = tf.get_variable( name='global-timestep', shape=(), dtype=tf.int64, trainable=False, initializer=tf.constant_initializer(value=0, dtype=tf.int64), collections=['global-timestep', tf.GraphKeys.GLOBAL_STEP] ) else: assert len(collection) == 1 self.global_timestep = collection[0] # Global episode collection = self.graph.get_collection(name='global-episode') if len(collection) == 0: self.global_episode = tf.get_variable( name='global-episode', shape=(), dtype=tf.int64, trainable=False, initializer=tf.constant_initializer(value=0, dtype=tf.int64), collections=['global-episode'] ) else: assert len(collection) == 1 self.global_episode = collection[0] # Local counters: local device self.timestep = tf.get_variable( name='timestep', shape=(), dtype=tf.int64, initializer=tf.constant_initializer(value=0, dtype=tf.int64), trainable=False ) self.episode = tf.get_variable( name='episode', shape=(), dtype=tf.int64, initializer=tf.constant_initializer(value=0, dtype=tf.int64), trainable=False ) self.episode_index_input = tf.placeholder( name='episode_index', shape=(), dtype=tf.int32, ) # States buffer variable for name in sorted(self.states_spec): self.list_states_buffer[name] = tf.get_variable( name=('state-{}'.format(name)), shape=((self.num_parallel, self.batching_capacity,) + tuple(self.states_spec[name]['shape'])), dtype=util.tf_dtype(self.states_spec[name]['type']), trainable=False ) # Internals buffer variable for name in sorted(self.internals_spec): self.list_internals_buffer[name] = tf.get_variable( name=('internal-{}'.format(name)), shape=((self.num_parallel, self.batching_capacity,) + tuple(self.internals_spec[name]['shape'])), dtype=util.tf_dtype(self.internals_spec[name]['type']), trainable=False ) # Actions buffer variable for name in sorted(self.actions_spec): self.list_actions_buffer[name]= tf.get_variable( name=('action-{}'.format(name)), shape=((self.num_parallel, self.batching_capacity,) + tuple(self.actions_spec[name]['shape'])), dtype=util.tf_dtype(self.actions_spec[name]['type']), trainable=False ) # Buffer index # for index in range(self.num_parallel): self.list_buffer_index = tf.get_variable( name='buffer-index', shape=(self.num_parallel,), dtype=util.tf_dtype('int'), trainable=False )
def tf_preprocess(self, states, actions, reward): """ Applies preprocessing ops to the raw states/action/reward inputs. Args: states (dict): Dict of raw state tensors. actions (dict): Dict or raw action tensors. reward: 1D (float) raw rewards tensor. Returns: The preprocessed versions of the input tensors. """ # States preprocessing for name in sorted(self.states_preprocessing): states[name] = self.states_preprocessing[name].process(tensor=states[name]) # Reward preprocessing if self.reward_preprocessing is not None: reward = self.reward_preprocessing.process(tensor=reward) return states, actions, reward
def tf_action_exploration(self, action, exploration, action_spec): """ Applies optional exploration to the action (post-processor for action outputs). Args: action (tf.Tensor): The original output action tensor (to be post-processed). exploration (Exploration): The Exploration object to use. action_spec (dict): Dict specifying the action space. Returns: The post-processed action output tensor. """ action_shape = tf.shape(input=action) exploration_value = exploration.tf_explore( episode=self.global_episode, timestep=self.global_timestep, shape=action_spec['shape'] ) exploration_value = tf.expand_dims(input=exploration_value, axis=0) if action_spec['type'] == 'bool': action = tf.where( condition=(tf.random_uniform(shape=action_shape) < exploration_value), x=(tf.random_uniform(shape=action_shape) < 0.5), y=action ) elif action_spec['type'] == 'int': action = tf.where( condition=(tf.random_uniform(shape=action_shape) < exploration_value), x=tf.random_uniform(shape=action_shape, maxval=action_spec['num_actions'], dtype=util.tf_dtype('int')), y=action ) elif action_spec['type'] == 'float': noise = tf.random_normal(shape=action_shape, dtype=util.tf_dtype('float')) action += noise * exploration_value if 'min_value' in action_spec: action = tf.clip_by_value( t=action, clip_value_min=action_spec['min_value'], clip_value_max=action_spec['max_value'] ) return action
def create_act_operations(self, states, internals, deterministic, independent, index): """ Creates and stores tf operations that are fetched when calling act(): actions_output, internals_output and timestep_output. Args: states (dict): Dict of state tensors (each key represents one state space component). internals (dict): Dict of prior internal state tensors (each key represents one internal state component). deterministic: 0D (bool) tensor (whether to not use action exploration). independent (bool): 0D (bool) tensor (whether to store states/internals/action in local buffer). """ # Optional variable noise operations = list() if self.variable_noise is not None and self.variable_noise > 0.0: # Initialize variables self.fn_actions_and_internals( states=states, internals=internals, deterministic=deterministic ) noise_deltas = list() for variable in self.get_variables(): noise_delta = tf.random_normal(shape=util.shape(variable), mean=0.0, stddev=self.variable_noise) noise_deltas.append(noise_delta) operations.append(variable.assign_add(delta=noise_delta)) # Retrieve actions and internals with tf.control_dependencies(control_inputs=operations): self.actions_output, self.internals_output = self.fn_actions_and_internals( states=states, internals=internals, deterministic=deterministic ) # Subtract variable noise # TODO this is an untested/incomplete feature and maybe should be removed for now. with tf.control_dependencies(control_inputs=[self.actions_output[name] for name in sorted(self.actions_output)]): operations = list() if self.variable_noise is not None and self.variable_noise > 0.0: for variable, noise_delta in zip(self.get_variables(), noise_deltas): operations.append(variable.assign_sub(delta=noise_delta)) # Actions exploration with tf.control_dependencies(control_inputs=operations): for name in sorted(self.actions_exploration): self.actions_output[name] = tf.cond( pred=self.deterministic_input, true_fn=(lambda: self.actions_output[name]), false_fn=(lambda: self.fn_action_exploration( action=self.actions_output[name], exploration=self.actions_exploration[name], action_spec=self.actions_spec[name] )) ) # Independent act not followed by observe. def independent_act(): """ Does not store state, action, internal in buffer. Hence, does not have any influence on learning. Does not increase timesteps. """ return self.global_timestep # Normal act followed by observe, with additional operations. def normal_act(): """ Stores current states, internals and actions in buffer. Increases timesteps. """ operations = list() batch_size = tf.shape(input=states[next(iter(sorted(states)))])[0] for name in sorted(states): operations.append(tf.assign( ref=self.list_states_buffer[name][index, self.list_buffer_index[index]: self.list_buffer_index[index] + batch_size], value=states[name] )) for name in sorted(internals): operations.append(tf.assign( ref=self.list_internals_buffer[name][index, self.list_buffer_index[index]: self.list_buffer_index[index] + batch_size], value=internals[name] )) for name in sorted(self.actions_output): operations.append(tf.assign( ref=self.list_actions_buffer[name][index, self.list_buffer_index[index]: self.list_buffer_index[index] + batch_size], value=self.actions_output[name] )) with tf.control_dependencies(control_inputs=operations): operations = list() operations.append(tf.assign( ref=self.list_buffer_index[index: index+1], value=tf.add(self.list_buffer_index[index: index+1], tf.constant([1])) )) # Increment timestep operations.append(tf.assign_add( ref=self.timestep, value=tf.to_int64(x=batch_size) )) operations.append(tf.assign_add( ref=self.global_timestep, value=tf.to_int64(x=batch_size) )) with tf.control_dependencies(control_inputs=operations): # Trivial operation to enforce control dependency # TODO why not return no-op? return self.global_timestep + 0 # Only increment timestep and update buffer if act not independent self.timestep_output = tf.cond( pred=independent, true_fn=independent_act, false_fn=normal_act )
def create_observe_operations(self, terminal, reward, index): """ Returns the tf op to fetch when an observation batch is passed in (e.g. an episode's rewards and terminals). Uses the filled tf buffers for states, actions and internals to run the tf_observe_timestep (model-dependent), resets buffer index and increases counters (episodes, timesteps). Args: terminal: The 1D tensor (bool) of terminal signals to process (more than one True within that list is ok). reward: The 1D tensor (float) of rewards to process. Returns: Tf op to fetch when `observe()` is called. """ # Increment episode num_episodes = tf.count_nonzero(input_tensor=terminal, dtype=util.tf_dtype('int')) increment_episode = tf.assign_add(ref=self.episode, value=tf.to_int64(x=num_episodes)) increment_global_episode = tf.assign_add(ref=self.global_episode, value=tf.to_int64(x=num_episodes)) with tf.control_dependencies(control_inputs=(increment_episode, increment_global_episode)): # Stop gradients fn = (lambda x: tf.stop_gradient(input=x[:self.list_buffer_index[index]])) states = util.map_tensors(fn=fn, tensors=self.list_states_buffer, index=index) internals = util.map_tensors(fn=fn, tensors=self.list_internals_buffer, index=index) actions = util.map_tensors(fn=fn, tensors=self.list_actions_buffer, index=index) terminal = tf.stop_gradient(input=terminal) reward = tf.stop_gradient(input=reward) # Observation observation = self.fn_observe_timestep( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward ) with tf.control_dependencies(control_inputs=(observation,)): # Reset buffer index. reset_index = tf.assign(ref=self.list_buffer_index[index], value=0) with tf.control_dependencies(control_inputs=(reset_index,)): # Trivial operation to enforce control dependency. self.episode_output = self.global_episode + 0 self.list_buffer_index_reset_op = tf.group( *(tf.assign(ref=self.list_buffer_index[n], value=0) for n in range(self.num_parallel)) )
def create_atomic_observe_operations(self, states, actions, internals, terminal, reward, index): """ Returns the tf op to fetch when unbuffered observations are passed in. Args: states (any): One state (usually a value tuple) or dict of states if multiple states are expected. actions (any): One action (usually a value tuple) or dict of states if multiple actions are expected. internals (any): Internal list. terminal (bool): boolean indicating if the episode terminated after the observation. reward (float): scalar reward that resulted from executing the action. Returns: Tf op to fetch when `observe()` is called. """ # Increment episode num_episodes = tf.count_nonzero(input_tensor=terminal, dtype=util.tf_dtype('int')) increment_episode = tf.assign_add(ref=self.episode, value=tf.to_int64(x=num_episodes)) increment_global_episode = tf.assign_add(ref=self.global_episode, value=tf.to_int64(x=num_episodes)) with tf.control_dependencies(control_inputs=(increment_episode, increment_global_episode)): # Stop gradients # Not using buffers here. states = util.map_tensors(fn=tf.stop_gradient, tensors=states) internals = util.map_tensors(fn=tf.stop_gradient, tensors=internals) actions = util.map_tensors(fn=tf.stop_gradient, tensors=actions) terminal = tf.stop_gradient(input=terminal) reward = tf.stop_gradient(input=reward) # Observation observation = self.fn_observe_timestep( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward ) with tf.control_dependencies(control_inputs=(observation,)): # Trivial operation to enforce control dependency. self.unbuffered_episode_output = self.global_episode + 0
def create_operations(self, states, internals, actions, terminal, reward, deterministic, independent, index): """ Creates and stores tf operations for when `act()` and `observe()` are called. """ self.create_act_operations( states=states, internals=internals, deterministic=deterministic, independent=independent, index=index ) self.create_observe_operations( reward=reward, terminal=terminal, index=index ) self.create_atomic_observe_operations( states=states, actions=actions, internals=internals, reward=reward, terminal=terminal, index=index )
def get_variables(self, include_submodules=False, include_nontrainable=False): """ Returns the TensorFlow variables used by the model. Args: include_submodules: Includes variables of submodules (e.g. baseline, target network) if true. include_nontrainable: Includes non-trainable variables if true. Returns: List of variables. """ if include_nontrainable: model_variables = [self.all_variables[key] for key in sorted(self.all_variables)] states_preprocessing_variables = [ variable for name in sorted(self.states_preprocessing) for variable in self.states_preprocessing[name].get_variables() ] model_variables += states_preprocessing_variables actions_exploration_variables = [ variable for name in sorted(self.actions_exploration) for variable in self.actions_exploration[name].get_variables() ] model_variables += actions_exploration_variables if self.reward_preprocessing is not None: reward_preprocessing_variables = self.reward_preprocessing.get_variables() model_variables += reward_preprocessing_variables else: model_variables = [self.variables[key] for key in sorted(self.variables)] return model_variables
def reset(self): """ Resets the model to its initial state on episode start. This should also reset all preprocessor(s). Returns: tuple: Current episode, timestep counter and the shallow-copied list of internal state initialization Tensors. """ fetches = [self.global_episode, self.global_timestep] # Loop through all preprocessors and reset them as well. for name in sorted(self.states_preprocessing): fetch = self.states_preprocessing[name].reset() if fetch is not None: fetches.extend(fetch) if self.flush_summarizer is not None: fetches.append(self.flush_summarizer) # Get the updated episode and timestep counts. fetch_list = self.monitored_session.run(fetches=fetches) episode, timestep = fetch_list[:2] return episode, timestep, self.internals_init
def get_feed_dict( self, states=None, internals=None, actions=None, terminal=None, reward=None, deterministic=None, independent=None, index=None ): """ Returns the feed-dict for the model's acting and observing tf fetches. Args: states (dict): Dict of state values (each key represents one state space component). internals (dict): Dict of internal state values (each key represents one internal state component). actions (dict): Dict of actions (each key represents one action space component). terminal (List[bool]): List of is-terminal signals. reward (List[float]): List of reward signals. deterministic (bool): Whether actions should be picked without exploration. independent (bool): Whether we are doing an independent act (not followed by call to observe; not to be stored in model's buffer). Returns: The feed dict to use for the fetch. """ feed_dict = dict() batched = None if states is not None: if batched is None: name = next(iter(states)) state = np.asarray(states[name]) batched = (state.ndim != len(self.states_spec[name]['unprocessed_shape'])) if batched: feed_dict.update({self.states_input[name]: states[name] for name in sorted(self.states_input)}) else: feed_dict.update({self.states_input[name]: (states[name],) for name in sorted(self.states_input)}) if internals is not None: if batched is None: name = next(iter(internals)) internal = np.asarray(internals[name]) batched = (internal.ndim != len(self.internals_spec[name]['shape'])) if batched: feed_dict.update({self.internals_input[name]: internals[name] for name in sorted(self.internals_input)}) else: feed_dict.update({self.internals_input[name]: (internals[name],) for name in sorted(self.internals_input)}) if actions is not None: if batched is None: name = next(iter(actions)) action = np.asarray(actions[name]) batched = (action.ndim != len(self.actions_spec[name]['shape'])) if batched: feed_dict.update({self.actions_input[name]: actions[name] for name in sorted(self.actions_input)}) else: feed_dict.update({self.actions_input[name]: (actions[name],) for name in sorted(self.actions_input)}) if terminal is not None: if batched is None: terminal = np.asarray(terminal) batched = (terminal.ndim == 1) if batched: feed_dict[self.terminal_input] = terminal else: feed_dict[self.terminal_input] = (terminal,) if reward is not None: if batched is None: reward = np.asarray(reward) batched = (reward.ndim == 1) if batched: feed_dict[self.reward_input] = reward else: feed_dict[self.reward_input] = (reward,) if deterministic is not None: feed_dict[self.deterministic_input] = deterministic if independent is not None: feed_dict[self.independent_input] = independent feed_dict[self.episode_index_input] = index return feed_dict
def act(self, states, internals, deterministic=False, independent=False, fetch_tensors=None, index=0): """ Does a forward pass through the model to retrieve action (outputs) given inputs for state (and internal state, if applicable (e.g. RNNs)) Args: states (dict): Dict of state values (each key represents one state space component). internals (dict): Dict of internal state values (each key represents one internal state component). deterministic (bool): If True, will not apply exploration after actions are calculated. independent (bool): If true, action is not followed by observe (and hence not included in updates). fetch_tensors (list): List of names of additional tensors (from the model's network) to fetch (and return). index: (int) index of the episode we want to produce the next action Returns: tuple: - Actual action-outputs (batched if state input is a batch). - Actual values of internal states (if applicable) (batched if state input is a batch). - The timestep (int) after calculating the (batch of) action(s). """ name = next(iter(states)) state = np.asarray(states[name]) batched = (state.ndim != len(self.states_spec[name]['unprocessed_shape'])) if batched: assert state.shape[0] <= self.batching_capacity fetches = [self.actions_output, self.internals_output, self.timestep_output] if self.network is not None and fetch_tensors is not None: for name in fetch_tensors: valid, tensor = self.network.get_named_tensor(name) if valid: fetches.append(tensor) else: keys = self.network.get_list_of_named_tensor() raise TensorForceError('Cannot fetch named tensor "{}", Available {}.'.format(name, keys)) # feed_dict[self.deterministic_input] = deterministic feed_dict = self.get_feed_dict( states=states, internals=internals, deterministic=deterministic, independent=independent, index=index ) fetch_list = self.monitored_session.run(fetches=fetches, feed_dict=feed_dict) actions, internals, timestep = fetch_list[0:3] # Extract the first (and only) action/internal from the batch to make return values non-batched if not batched: actions = {name: actions[name][0] for name in sorted(actions)} internals = {name: internals[name][0] for name in sorted(internals)} if self.network is not None and fetch_tensors is not None: fetch_dict = dict() for index_, tensor in enumerate(fetch_list[3:]): name = fetch_tensors[index_] fetch_dict[name] = tensor return actions, internals, timestep, fetch_dict else: return actions, internals, timestep
def observe(self, terminal, reward, index=0): """ Adds an observation (reward and is-terminal) to the model without updating its trainable variables. Args: terminal (List[bool]): List of is-terminal signals. reward (List[float]): List of reward signals. index: (int) parallel episode you want to observe Returns: The value of the model-internal episode counter. """ fetches = self.episode_output feed_dict = self.get_feed_dict(terminal=terminal, reward=reward, index=index) episode = self.monitored_session.run(fetches=fetches, feed_dict=feed_dict) return episode
def save(self, directory=None, append_timestep=True): """ Save TensorFlow model. If no checkpoint directory is given, the model's default saver directory is used. Optionally appends current timestep to prevent overwriting previous checkpoint files. Turn off to be able to load model from the same given path argument as given here. Args: directory: Optional checkpoint directory. append_timestep: Appends the current timestep to the checkpoint file if true. Returns: Checkpoint path where the model was saved. """ if self.flush_summarizer is not None: self.monitored_session.run(fetches=self.flush_summarizer) return self.saver.save( sess=self.session, save_path=(self.saver_directory if directory is None else directory), global_step=(self.global_timestep if append_timestep else None), # latest_filename=None, # Defaults to 'checkpoint'. meta_graph_suffix='meta', write_meta_graph=True, write_state=True )
def restore(self, directory=None, file=None): """ Restore TensorFlow model. If no checkpoint file is given, the latest checkpoint is restored. If no checkpoint directory is given, the model's default saver directory is used (unless file specifies the entire path). Args: directory: Optional checkpoint directory. file: Optional checkpoint file, or path if directory not given. """ if file is None: file = tf.train.latest_checkpoint( checkpoint_dir=(self.saver_directory if directory is None else directory), # latest_filename=None # Corresponds to argument of saver.save() in Model.save(). ) elif directory is None: file = os.path.join(self.saver_directory, file) elif not os.path.isfile(file): file = os.path.join(directory, file) # if not os.path.isfile(file): # raise TensorForceError("Invalid model directory/file.") self.saver.restore(sess=self.session, save_path=file) self.session.run(fetches=self.list_buffer_index_reset_op)
def get_savable_components(self): """ Returns the list of all of the components this model consists of that can be individually saved and restored. For instance the network or distribution. Returns: List of util.SavableComponent """ components = self.get_components() components = [components[name] for name in sorted(components)] return set(filter(lambda x: isinstance(x, util.SavableComponent), components))
def save_component(self, component_name, save_path): """ Saves a component of this model to the designated location. Args: component_name: The component to save. save_path: The location to save to. Returns: Checkpoint path where the component was saved. """ component = self.get_component(component_name=component_name) self._validate_savable(component=component, component_name=component_name) return component.save(sess=self.session, save_path=save_path)
def restore_component(self, component_name, save_path): """ Restores a component's parameters from a save location. Args: component_name: The component to restore. save_path: The save location. """ component = self.get_component(component_name=component_name) self._validate_savable(component=component, component_name=component_name) component.restore(sess=self.session, save_path=save_path)
def get_component(self, component_name): """ Looks up a component by its name. Args: component_name: The name of the component to look up. Returns: The component for the provided name or None if there is no such component. """ mapping = self.get_components() return mapping[component_name] if component_name in mapping else None
def import_demonstrations(self, demonstrations): """ Imports demonstrations, i.e. expert observations. Note that for large numbers of observations, set_demonstrations is more appropriate, which directly sets memory contents to an array an expects a different layout. Args: demonstrations: List of observation dicts """ if isinstance(demonstrations, dict): if self.unique_state: demonstrations['states'] = dict(state=demonstrations['states']) if self.unique_action: demonstrations['actions'] = dict(action=demonstrations['actions']) self.model.import_demo_experience(**demonstrations) else: if self.unique_state: states = dict(state=list()) else: states = {name: list() for name in demonstrations[0]['states']} internals = {name: list() for name in demonstrations[0]['internals']} if self.unique_action: actions = dict(action=list()) else: actions = {name: list() for name in demonstrations[0]['actions']} terminal = list() reward = list() for demonstration in demonstrations: if self.unique_state: states['state'].append(demonstration['states']) else: for name, state in states.items(): state.append(demonstration['states'][name]) for name, internal in internals.items(): internal.append(demonstration['internals'][name]) if self.unique_action: actions['action'].append(demonstration['actions']) else: for name, action in actions.items(): action.append(demonstration['actions'][name]) terminal.append(demonstration['terminal']) reward.append(demonstration['reward']) self.model.import_demo_experience( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward )
def seed(self, seed): # pylint: disable=E0202 """ Sets the random seed of the environment to the given value (current time, if seed=None). Naturally deterministic Environments (e.g. ALE or some gym Envs) don't have to implement this method. Args: seed (int): The seed to use for initializing the pseudo-random number generator (default=epoch time in sec). Returns: The actual seed (int) used OR None if Environment did not override this method (no seeding supported). """ if seed is None: self.env.seed = round(time.time()) else: self.env.seed = seed return self.env.seed
def execute(self, action): """ Executes action, observes next state and reward. Args: actions: Action to execute. Returns: (Dict of) next state(s), boolean indicating terminal, and reward signal. """ if self.env.game_over(): return self.env.getScreenRGB(), True, 0 action_space = self.env.getActionSet() reward = self.env.act(action_space[action]) new_state = self.env.getScreenRGB() done = self.env.game_over() return new_state, done, reward
def states(self): """ Return the state space. Might include subdicts if multiple states are available simultaneously. Returns: dict of state properties (shape and type). """ screen = self.env.getScreenRGB() return dict(shape=screen.shape, type='int')
def sanity_check_states(states_spec): """ Sanity checks a states dict, used to define the state space for an MDP. Throws an error or warns if mismatches are found. Args: states_spec (Union[None,dict]): The spec-dict to check (or None). Returns: Tuple of 1) the state space desc and 2) whether there is only one component in the state space. """ # Leave incoming states dict intact. states = copy.deepcopy(states_spec) # Unique state shortform. is_unique = ('shape' in states) if is_unique: states = dict(state=states) # Normalize states. for name, state in states.items(): # Convert int to unary tuple. if isinstance(state['shape'], int): state['shape'] = (state['shape'],) # Set default type to float. if 'type' not in state: state['type'] = 'float' return states, is_unique
def sanity_check_actions(actions_spec): """ Sanity checks an actions dict, used to define the action space for an MDP. Throws an error or warns if mismatches are found. Args: actions_spec (Union[None,dict]): The spec-dict to check (or None). Returns: Tuple of 1) the action space desc and 2) whether there is only one component in the action space. """ # Leave incoming spec-dict intact. actions = copy.deepcopy(actions_spec) # Unique action shortform. is_unique = ('type' in actions) if is_unique: actions = dict(action=actions) # Normalize actions. for name, action in actions.items(): # Set default type to int if 'type' not in action: action['type'] = 'int' # Check required values if action['type'] == 'int': if 'num_actions' not in action: raise TensorForceError("Action requires value 'num_actions' set!") elif action['type'] == 'float': if ('min_value' in action) != ('max_value' in action): raise TensorForceError("Action requires both values 'min_value' and 'max_value' set!") # Set default shape to empty tuple (single-int, discrete action space) if 'shape' not in action: action['shape'] = () # Convert int to unary tuple if isinstance(action['shape'], int): action['shape'] = (action['shape'],) return actions, is_unique
def sanity_check_execution_spec(execution_spec): """ Sanity checks a execution_spec dict, used to define execution logic (distributed vs single, shared memories, etc..) and distributed learning behavior of agents/models. Throws an error or warns if mismatches are found. Args: execution_spec (Union[None,dict]): The spec-dict to check (or None). Dict needs to have the following keys: - type: "single", "distributed" - distributed_spec: The distributed_spec dict with the following fields: - cluster_spec: TensorFlow ClusterSpec object (required). - job: The tf-job name. - task_index: integer (required). - protocol: communication protocol (default: none, i.e. 'grpc'). - session_config: dict with options for a TensorFlow ConfigProto object (default: None). Returns: A cleaned-up (in-place) version of the given execution-spec. """ # default spec: single mode def_ = dict(type="single", distributed_spec=None, session_config=None) if execution_spec is None: return def_ assert isinstance(execution_spec, dict), "ERROR: execution-spec needs to be of type dict (but is of type {})!".\ format(type(execution_spec).__name__) type_ = execution_spec.get("type") # TODO: Figure out what exactly we need for options and what types we should support. if type_ == "distributed": def_ = dict(job="ps", task_index=0, cluster_spec={ "ps": ["localhost:22222"], "worker": ["localhost:22223"] }) def_.update(execution_spec.get("distributed_spec", {})) execution_spec["distributed_spec"] = def_ execution_spec["session_config"] = execution_spec.get("session_config") return execution_spec elif type_ == "multi-threaded": return execution_spec elif type_ == "single": return execution_spec if execution_spec.get('num_parallel') != None: assert type(execution_spec['num_parallel']) is int, "ERROR: num_parallel needs to be of type int but is of type {}!".format(type(execution_spec['num_parallel']).__name__) assert execution_spec['num_parallel'] > 0, "ERROR: num_parallel needs to be > 0 but is equal to {}".format(execution_spec['num_parallel']) return execution_spec raise TensorForceError("Unsupported execution type specified ({})!".format(type_))
def make_game(): """Builds and returns an Extraterrestrial Marauders game.""" return ascii_art.ascii_art_to_game( GAME_ART, what_lies_beneath=' ', sprites=dict( [('P', PlayerSprite)] + [(c, UpwardLaserBoltSprite) for c in UPWARD_BOLT_CHARS] + [(c, DownwardLaserBoltSprite) for c in DOWNWARD_BOLT_CHARS]), drapes=dict(X=MarauderDrape, B=BunkerDrape), update_schedule=['P', 'B', 'X'] + list(_ALL_BOLT_CHARS))
def _fly(self, board, layers, things, the_plot): """Handles the behaviour of visible bolts flying toward Marauders.""" # Disappear if we've hit a Marauder or a bunker. if (self.character in the_plot['bunker_hitters'] or self.character in the_plot['marauder_hitters']): return self._teleport((-1, -1)) # Otherwise, northward! self._north(board, the_plot)
def _fire(self, layers, things, the_plot): """Launches a new bolt from the player.""" # We don't fire if the player fired another bolt just now. if the_plot.get('last_player_shot') == the_plot.frame: return the_plot['last_player_shot'] = the_plot.frame # We start just above the player. row, col = things['P'].position self._teleport((row-1, col))
def _fly(self, board, layers, things, the_plot): """Handles the behaviour of visible bolts flying toward the player.""" # Disappear if we've hit a bunker. if self.character in the_plot['bunker_hitters']: return self._teleport((-1, -1)) # End the game if we've hit the player. if self.position == things['P'].position: the_plot.terminate_episode() self._south(board, the_plot)
def _fire(self, layers, the_plot): """Launches a new bolt from a random Marauder.""" # We don't fire if another Marauder fired a bolt just now. if the_plot.get('last_marauder_shot') == the_plot.frame: return the_plot['last_marauder_shot'] = the_plot.frame # Which Marauder should fire the laser bolt? col = np.random.choice(np.nonzero(layers['X'].sum(axis=0))[0]) row = np.nonzero(layers['X'][:, col])[0][-1] + 1 # Move ourselves just below that Marauder. self._teleport((row, col))
def reset(self, history=None): """ Resets the Runner's internal stats counters. If history is empty, use default values in history.get(). Args: history (dict): A dictionary containing an already run experiment's results. Keys should be: episode_rewards (list of rewards), episode_timesteps (lengths of episodes), episode_times (run-times) """ if not history: history = dict() self.episode_rewards = history.get("episode_rewards", list()) self.episode_timesteps = history.get("episode_timesteps", list()) self.episode_times = history.get("episode_times", list())
def run(self, num_episodes, num_timesteps, max_episode_timesteps, deterministic, episode_finished, summary_report, summary_interval): """ Executes this runner by starting to act (via Agent(s)) in the given Environment(s). Stops execution according to certain conditions (e.g. max. number of episodes, etc..). Calls callback functions after each episode and/or after some summary criteria are met. Args: num_episodes (int): Max. number of episodes to run globally in total (across all threads/workers). num_timesteps (int): Max. number of time steps to run globally in total (across all threads/workers) max_episode_timesteps (int): Max. number of timesteps per episode. deterministic (bool): Whether to use exploration when selecting actions. episode_finished (callable): A function to be called once an episodes has finished. Should take a BaseRunner object and some worker ID (e.g. thread-ID or task-ID). Can decide for itself every how many episodes it should report something and what to report. summary_report (callable): Deprecated; Function that could produce a summary over the training progress so far. summary_interval (int): Deprecated; The number of time steps to execute (globally) before summary_report is called. """ raise NotImplementedError
def tf_retrieve_indices(self, buffer_elements, priority_indices): """ Fetches experiences for given indices by combining entries from buffer which have no priorities, and entries from priority memory. Args: buffer_elements: Number of buffer elements to retrieve priority_indices: Index tensor for priority memory Returns: Batch of experiences """ states = dict() buffer_start = self.buffer_index - buffer_elements buffer_end = self.buffer_index # Fetch entries from respective memories, concat. for name in sorted(self.states_memory): buffer_state_memory = self.states_buffer[name] # Slicing is more efficient than gathering, and buffer elements are always # fetched using contiguous indices. buffer_states = buffer_state_memory[buffer_start:buffer_end] # Memory indices are obtained via priority sampling, hence require gather. memory_states = tf.gather(params=self.states_memory[name], indices=priority_indices) states[name] = tf.concat(values=(buffer_states, memory_states), axis=0) internals = dict() for name in sorted(self.internals_memory): internal_buffer_memory = self.internals_buffer[name] buffer_internals = internal_buffer_memory[buffer_start:buffer_end] memory_internals = tf.gather(params=self.internals_memory[name], indices=priority_indices) internals[name] = tf.concat(values=(buffer_internals, memory_internals), axis=0) actions = dict() for name in sorted(self.actions_memory): action_buffer_memory = self.actions_buffer[name] buffer_action = action_buffer_memory[buffer_start:buffer_end] memory_action = tf.gather(params=self.actions_memory[name], indices=priority_indices) actions[name] = tf.concat(values=(buffer_action, memory_action), axis=0) buffer_terminal = self.terminal_buffer[buffer_start:buffer_end] priority_terminal = tf.gather(params=self.terminal_memory, indices=priority_indices) terminal = tf.concat(values=(buffer_terminal, priority_terminal), axis=0) buffer_reward = self.reward_buffer[buffer_start:buffer_end] priority_reward = tf.gather(params=self.reward_memory, indices=priority_indices) reward = tf.concat(values=(buffer_reward, priority_reward), axis=0) if self.include_next_states: assert util.rank(priority_indices) == 1 next_priority_indices = (priority_indices + 1) % self.capacity next_buffer_start = (buffer_start + 1) % self.buffer_size next_buffer_end = (buffer_end + 1) % self.buffer_size next_states = dict() for name in sorted(self.states_memory): buffer_state_memory = self.states_buffer[name] buffer_next_states = buffer_state_memory[next_buffer_start:next_buffer_end] memory_next_states = tf.gather(params=self.states_memory[name], indices=next_priority_indices) next_states[name] = tf.concat(values=(buffer_next_states, memory_next_states), axis=0) next_internals = dict() for name in sorted(self.internals_memory): buffer_internal_memory = self.internals_buffer[name] buffer_next_internals = buffer_internal_memory[next_buffer_start:next_buffer_end] memory_next_internals = tf.gather(params=self.internals_memory[name], indices=next_priority_indices) next_internals[name] = tf.concat(values=(buffer_next_internals, memory_next_internals), axis=0) return dict( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward, next_states=next_states, next_internals=next_internals ) else: return dict( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward )
def tf_update_batch(self, loss_per_instance): """ Updates priority memory by performing the following steps: 1. Use saved indices from prior retrieval to reconstruct the batch elements which will have their priorities updated. 2. Compute priorities for these elements. 3. Insert buffer elements to memory, potentially overwriting existing elements. 4. Update priorities of existing memory elements 5. Resort memory. 6. Update buffer insertion index. Note that this implementation could be made more efficient by maintaining a sorted version via sum trees. :param loss_per_instance: Losses from recent batch to perform priority update """ # 1. We reconstruct the batch from the buffer and the priority memory via # the TensorFlow variables holding the respective indices. mask = tf.not_equal( x=self.batch_indices, y=tf.zeros(shape=tf.shape(input=self.batch_indices), dtype=tf.int32) ) priority_indices = tf.reshape(tensor=tf.where(condition=mask), shape=[-1]) # These are elements from the buffer which first need to be inserted into the main memory. sampled_buffer_batch = self.tf_retrieve_indices( buffer_elements=self.last_batch_buffer_elems, priority_indices=priority_indices ) # Extract batch elements. states = sampled_buffer_batch['states'] internals = sampled_buffer_batch['internals'] actions = sampled_buffer_batch['actions'] terminal = sampled_buffer_batch['terminal'] reward = sampled_buffer_batch['reward'] # 2. Compute priorities for all batch elements. priorities = loss_per_instance ** self.prioritization_weight assignments = list() # 3. Insert the buffer elements from the recent batch into memory, # overwrite memory if full. memory_end_index = self.memory_index + self.last_batch_buffer_elems memory_insert_indices = tf.range( start=self.memory_index, limit=memory_end_index ) % self.capacity for name in sorted(states): assignments.append(tf.scatter_update( ref=self.states_memory[name], indices=memory_insert_indices, # Only buffer elements from batch. updates=states[name][0:self.last_batch_buffer_elems]) ) for name in sorted(internals): assignments.append(tf.scatter_update( ref=self.internals_buffer[name], indices=memory_insert_indices, updates=internals[name][0:self.last_batch_buffer_elems] )) assignments.append(tf.scatter_update( ref=self.priorities, indices=memory_insert_indices, updates=priorities[0:self.last_batch_buffer_elems] )) assignments.append(tf.scatter_update( ref=self.terminal_memory, indices=memory_insert_indices, updates=terminal[0:self.last_batch_buffer_elems]) ) assignments.append(tf.scatter_update( ref=self.reward_memory, indices=memory_insert_indices, updates=reward[0:self.last_batch_buffer_elems]) ) for name in sorted(actions): assignments.append(tf.scatter_update( ref=self.actions_memory[name], indices=memory_insert_indices, updates=actions[name][0:self.last_batch_buffer_elems] )) # 4.Update the priorities of the elements already in the memory. # Slice out remaining elements - [] if all batch elements were from buffer. main_memory_priorities = priorities[self.last_batch_buffer_elems:] # Note that priority indices can have a different shape because multiple # samples can be from the same index. main_memory_priorities = main_memory_priorities[0:tf.shape(priority_indices)[0]] assignments.append(tf.scatter_update( ref=self.priorities, indices=priority_indices, updates=main_memory_priorities )) with tf.control_dependencies(control_inputs=assignments): # 5. Re-sort memory according to priorities. assignments = list() # Obtain sorted order and indices. sorted_priorities, sorted_indices = tf.nn.top_k( input=self.priorities, k=self.capacity, sorted=True ) # Re-assign elements according to priorities. # Priorities was the tensor we used to sort, so this can be directly assigned. assignments.append(tf.assign(ref=self.priorities, value=sorted_priorities)) # All other memory variables are assigned via scatter updates using the indices # returned by the sort: assignments.append(tf.scatter_update( ref=self.terminal_memory, indices=sorted_indices, updates=self.terminal_memory )) for name in sorted(self.states_memory): assignments.append(tf.scatter_update( ref=self.states_memory[name], indices=sorted_indices, updates=self.states_memory[name] )) for name in sorted(self.actions_memory): assignments.append(tf.scatter_update( ref=self.actions_memory[name], indices=sorted_indices, updates=self.actions_memory[name] )) for name in sorted(self.internals_memory): assignments.append(tf.scatter_update( ref=self.internals_memory[name], indices=sorted_indices, updates=self.internals_memory[name] )) assignments.append(tf.scatter_update( ref=self.reward_memory, indices=sorted_indices, updates=self.reward_memory )) # 6. Reset buffer index and increment memory index by inserted elements. with tf.control_dependencies(control_inputs=assignments): assignments = list() # Decrement pointer of last elements used. assignments.append(tf.assign_sub(ref=self.buffer_index, value=self.last_batch_buffer_elems)) # Keep track of memory size as to know whether we can sample from the main memory. # Since the memory pointer can set to 0, we want to know if we are at capacity. total_inserted_elements = self.memory_size + self.last_batch_buffer_elems assignments.append(tf.assign( ref=self.memory_size, value=tf.minimum(x=total_inserted_elements, y=self.capacity)) ) # Update memory insertion index. assignments.append(tf.assign(ref=self.memory_index, value=memory_end_index)) # Reset batch indices. assignments.append(tf.assign( ref=self.batch_indices, value=tf.zeros(shape=tf.shape(self.batch_indices), dtype=tf.int32) )) with tf.control_dependencies(control_inputs=assignments): return tf.no_op()
def setup_components_and_tf_funcs(self, custom_getter=None): """ Creates and stores Network and Distribution objects. Generates and stores all template functions. """ # Create network before super-call, since non-empty internals_spec attribute (for RNN) is required subsequently. self.network = Network.from_spec( spec=self.network_spec, kwargs=dict(summary_labels=self.summary_labels) ) # Now that we have the network component: We can create the internals placeholders. assert len(self.internals_spec) == 0 self.internals_spec = self.network.internals_spec() for name in sorted(self.internals_spec): internal = self.internals_spec[name] self.internals_input[name] = tf.placeholder( dtype=util.tf_dtype(internal['type']), shape=(None,) + tuple(internal['shape']), name=('internal-' + name) ) if internal['initialization'] == 'zeros': self.internals_init[name] = np.zeros(shape=internal['shape']) else: raise TensorForceError("Invalid internal initialization value.") # And only then call super. custom_getter = super(DistributionModel, self).setup_components_and_tf_funcs(custom_getter) # Distributions self.distributions = self.create_distributions() # KL divergence function self.fn_kl_divergence = tf.make_template( name_='kl-divergence', func_=self.tf_kl_divergence, custom_getter_=custom_getter ) return custom_getter
def create_distributions(self): """ Creates and returns the Distribution objects based on self.distributions_spec. Returns: Dict of distributions according to self.distributions_spec. """ distributions = dict() for name in sorted(self.actions_spec): action = self.actions_spec[name] if self.distributions_spec is not None and name in self.distributions_spec: kwargs = dict(action) kwargs['scope'] = name kwargs['summary_labels'] = self.summary_labels distributions[name] = Distribution.from_spec( spec=self.distributions_spec[name], kwargs=kwargs ) elif action['type'] == 'bool': distributions[name] = Bernoulli( shape=action['shape'], scope=name, summary_labels=self.summary_labels ) elif action['type'] == 'int': distributions[name] = Categorical( shape=action['shape'], num_actions=action['num_actions'], scope=name, summary_labels=self.summary_labels ) elif action['type'] == 'float': if 'min_value' in action: distributions[name] = Beta( shape=action['shape'], min_value=action['min_value'], max_value=action['max_value'], scope=name, summary_labels=self.summary_labels ) else: distributions[name] = Gaussian( shape=action['shape'], scope=name, summary_labels=self.summary_labels ) return distributions
def from_spec(spec): """ Creates an exploration object from a specification dict. """ exploration = util.get_object( obj=spec, predefined_objects=tensorforce.core.explorations.explorations ) assert isinstance(exploration, Exploration) return exploration
def from_spec(spec, kwargs=None): """ Creates a memory from a specification dict. """ memory = util.get_object( obj=spec, predefined_objects=tensorforce.core.memories.memories, kwargs=kwargs ) assert isinstance(memory, Memory) return memory
def tf_step( self, time, variables, arguments, **kwargs ): """ Creates the TensorFlow operations for performing an optimization step. Args: time: Time tensor. variables: List of variables to optimize. arguments: Dict of arguments for callables, like fn_loss. **kwargs: Additional arguments passed on to the internal optimizer. Returns: List of delta tensors corresponding to the updates for each optimized variable. """ # Get some (batched) argument to determine batch size. arguments_iter = iter(arguments.values()) some_argument = next(arguments_iter) try: while not isinstance(some_argument, tf.Tensor) or util.rank(some_argument) == 0: if isinstance(some_argument, dict): if some_argument: arguments_iter = iter(some_argument.values()) some_argument = next(arguments_iter) elif isinstance(some_argument, list): if some_argument: arguments_iter = iter(some_argument) some_argument = next(arguments_iter) elif some_argument is None or util.rank(some_argument) == 0: # Non-batched argument some_argument = next(arguments_iter) else: raise TensorForceError("Invalid argument type.") except StopIteration: raise TensorForceError("Invalid argument type.") batch_size = tf.shape(input=some_argument)[0] num_samples = tf.cast( x=(self.fraction * tf.cast(x=batch_size, dtype=util.tf_dtype('float'))), dtype=util.tf_dtype('int') ) num_samples = tf.maximum(x=num_samples, y=1) indices = tf.random_uniform(shape=(num_samples,), maxval=batch_size, dtype=tf.int32) subsampled_arguments = util.map_tensors( fn=(lambda arg: arg if util.rank(arg) == 0 else tf.gather(params=arg, indices=indices)), tensors=arguments ) return self.optimizer.step( time=time, variables=variables, arguments=subsampled_arguments, **kwargs )
def run(self, num_timesteps=None, num_episodes=None, max_episode_timesteps=None, deterministic=False, episode_finished=None, summary_report=None, summary_interval=None, timesteps=None, episodes=None, testing=False, sleep=None ): """ Args: timesteps (int): Deprecated; see num_timesteps. episodes (int): Deprecated; see num_episodes. """ # deprecation warnings if timesteps is not None: num_timesteps = timesteps warnings.warn("WARNING: `timesteps` parameter is deprecated, use `num_timesteps` instead.", category=DeprecationWarning) if episodes is not None: num_episodes = episodes warnings.warn("WARNING: `episodes` parameter is deprecated, use `num_episodes` instead.", category=DeprecationWarning) # figure out whether we are using the deprecated way of "episode_finished" reporting old_episode_finished = False if episode_finished is not None and len(getargspec(episode_finished).args) == 1: old_episode_finished = True # Keep track of episode reward and episode length for statistics. self.start_time = time.time() self.agent.reset() if num_episodes is not None: num_episodes += self.agent.episode if num_timesteps is not None: num_timesteps += self.agent.timestep # add progress bar with tqdm(total=num_episodes) as pbar: # episode loop while True: episode_start_time = time.time() state = self.environment.reset() self.agent.reset() # Update global counters. self.global_episode = self.agent.episode # global value (across all agents) self.global_timestep = self.agent.timestep # global value (across all agents) episode_reward = 0 self.current_timestep = 0 # time step (within episode) loop while True: action = self.agent.act(states=state, deterministic=deterministic) reward = 0 for _ in xrange(self.repeat_actions): state, terminal, step_reward = self.environment.execute(action=action) reward += step_reward if terminal: break if max_episode_timesteps is not None and self.current_timestep >= max_episode_timesteps: terminal = True if not testing: self.agent.observe(terminal=terminal, reward=reward) self.global_timestep += 1 self.current_timestep += 1 episode_reward += reward if terminal or self.agent.should_stop(): # TODO: should_stop also terminate? break if sleep is not None: time.sleep(sleep) # Update our episode stats. time_passed = time.time() - episode_start_time self.episode_rewards.append(episode_reward) self.episode_timesteps.append(self.current_timestep) self.episode_times.append(time_passed) self.global_episode += 1 pbar.update(1) # Check, whether we should stop this run. if episode_finished is not None: # deprecated way (passing in only runner object): if old_episode_finished: if not episode_finished(self): break # new unified way (passing in BaseRunner AND some worker ID): elif not episode_finished(self, self.id): break if (num_episodes is not None and self.global_episode >= num_episodes) or \ (num_timesteps is not None and self.global_timestep >= num_timesteps) or \ self.agent.should_stop(): break pbar.update(num_episodes - self.global_episode)
def tf_retrieve_indices(self, indices): """ Fetches experiences for given indices. Args: indices: Index tensor Returns: Batch of experiences """ states = dict() for name in sorted(self.states_memory): states[name] = tf.gather(params=self.states_memory[name], indices=indices) internals = dict() for name in sorted(self.internals_memory): internals[name] = tf.gather(params=self.internals_memory[name], indices=indices) actions = dict() for name in sorted(self.actions_memory): actions[name] = tf.gather(params=self.actions_memory[name], indices=indices) terminal = tf.gather(params=self.terminal_memory, indices=indices) reward = tf.gather(params=self.reward_memory, indices=indices) if self.include_next_states: assert util.rank(indices) == 1 next_indices = (indices + 1) % self.capacity next_states = dict() for name in sorted(self.states_memory): next_states[name] = tf.gather(params=self.states_memory[name], indices=next_indices) next_internals = dict() for name in sorted(self.internals_memory): next_internals[name] = tf.gather(params=self.internals_memory[name], indices=next_indices) return dict( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward, next_states=next_states, next_internals=next_internals ) else: return dict( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward )
def tf_solve(self, fn_x, x_init, base_value, target_value, estimated_improvement=None): """ Iteratively optimizes $f(x)$ for $x$ on the line between $x'$ and $x_0$. Args: fn_x: A callable returning the value $f(x)$ at $x$. x_init: Initial solution guess $x_0$. base_value: Value $f(x')$ at $x = x'$. target_value: Value $f(x_0)$ at $x = x_0$. estimated_improvement: Estimated improvement for $x = x_0$, $f(x')$ if None. Returns: A solution $x$ to the problem as given by the solver. """ return super(LineSearch, self).tf_solve(fn_x, x_init, base_value, target_value, estimated_improvement)
def tf_initialize(self, x_init, base_value, target_value, estimated_improvement): """ Initialization step preparing the arguments for the first iteration of the loop body. Args: x_init: Initial solution guess $x_0$. base_value: Value $f(x')$ at $x = x'$. target_value: Value $f(x_0)$ at $x = x_0$. estimated_improvement: Estimated value at $x = x_0$, $f(x')$ if None. Returns: Initial arguments for tf_step. """ self.base_value = base_value if estimated_improvement is None: # TODO: Is this a good alternative? estimated_improvement = tf.abs(x=base_value) first_step = super(LineSearch, self).tf_initialize(x_init) improvement = tf.divide( x=(target_value - self.base_value), y=tf.maximum(x=estimated_improvement, y=util.epsilon) ) last_improvement = improvement - 1.0 if self.mode == 'linear': deltas = [-t * self.parameter for t in x_init] self.estimated_incr = -estimated_improvement * self.parameter elif self.mode == 'exponential': deltas = [-t * self.parameter for t in x_init] return first_step + (deltas, improvement, last_improvement, estimated_improvement)
def tf_step(self, x, iteration, deltas, improvement, last_improvement, estimated_improvement): """ Iteration loop body of the line search algorithm. Args: x: Current solution estimate $x_t$. iteration: Current iteration counter $t$. deltas: Current difference $x_t - x'$. improvement: Current improvement $(f(x_t) - f(x')) / v'$. last_improvement: Last improvement $(f(x_{t-1}) - f(x')) / v'$. estimated_improvement: Current estimated value $v'$. Returns: Updated arguments for next iteration. """ x, next_iteration, deltas, improvement, last_improvement, estimated_improvement = super(LineSearch, self).tf_step( x, iteration, deltas, improvement, last_improvement, estimated_improvement ) next_x = [t + delta for t, delta in zip(x, deltas)] if self.mode == 'linear': next_deltas = deltas next_estimated_improvement = estimated_improvement + self.estimated_incr elif self.mode == 'exponential': next_deltas = [delta * self.parameter for delta in deltas] next_estimated_improvement = estimated_improvement * self.parameter target_value = self.fn_x(next_deltas) next_improvement = tf.divide( x=(target_value - self.base_value), y=tf.maximum(x=next_estimated_improvement, y=util.epsilon) ) return next_x, next_iteration, next_deltas, next_improvement, improvement, next_estimated_improvement
def tf_next_step(self, x, iteration, deltas, improvement, last_improvement, estimated_improvement): """ Termination condition: max number of iterations, or no improvement for last step, or improvement less than acceptable ratio, or estimated value not positive. Args: x: Current solution estimate $x_t$. iteration: Current iteration counter $t$. deltas: Current difference $x_t - x'$. improvement: Current improvement $(f(x_t) - f(x')) / v'$. last_improvement: Last improvement $(f(x_{t-1}) - f(x')) / v'$. estimated_improvement: Current estimated value $v'$. Returns: True if another iteration should be performed. """ next_step = super(LineSearch, self).tf_next_step( x, iteration, deltas, improvement, last_improvement, estimated_improvement ) def undo_deltas(): value = self.fn_x([-delta for delta in deltas]) with tf.control_dependencies(control_inputs=(value,)): # Trivial operation to enforce control dependency return tf.less(x=value, y=value) # == False improved = tf.cond( pred=(improvement > last_improvement), true_fn=(lambda: True), false_fn=undo_deltas ) next_step = tf.logical_and(x=next_step, y=improved) next_step = tf.logical_and(x=next_step, y=(improvement < self.accept_ratio)) return tf.logical_and(x=next_step, y=(estimated_improvement > util.epsilon))
def escape(text, quote=False, smart_amp=True): """Replace special characters "&", "<" and ">" to HTML-safe sequences. The original cgi.escape will always escape "&", but you can control this one for a smart escape amp. :param quote: if set to True, " and ' will be escaped. :param smart_amp: if set to False, & will always be escaped. """ if smart_amp: text = _escape_pattern.sub('&amp;', text) else: text = text.replace('&', '&amp;') text = text.replace('<', '&lt;') text = text.replace('>', '&gt;') if quote: text = text.replace('"', '&quot;') text = text.replace("'", '&#39;') return text
def escape_link(url): """Remove dangerous URL schemes like javascript: and escape afterwards.""" lower_url = url.lower().strip('\x00\x1a \n\r\t') for scheme in _scheme_blacklist: if lower_url.startswith(scheme): return '' return escape(url, quote=True, smart_amp=False)
def markdown(text, escape=True, **kwargs): """Render markdown formatted text to html. :param text: markdown formatted text content. :param escape: if set to False, all html tags will not be escaped. :param use_xhtml: output with xhtml tags. :param hard_wrap: if set to True, it will use the GFM line breaks feature. :param parse_block_html: parse text only in block level html. :param parse_inline_html: parse text only in inline level html. """ return Markdown(escape=escape, **kwargs)(text)
def parse_lheading(self, m): """Parse setext heading.""" self.tokens.append({ 'type': 'heading', 'level': 1 if m.group(2) == '=' else 2, 'text': m.group(1), })
def hard_wrap(self): """Grammar for hard wrap linebreak. You don't need to add two spaces at the end of a line. """ self.linebreak = re.compile(r'^ *\n(?!\s*$)') self.text = re.compile( r'^[\s\S]+?(?=[\\<!\[_*`~]|https?://| *\n|$)' )
def block_code(self, code, lang=None): """Rendering block level code. ``pre > code``. :param code: text content of the code block. :param lang: language of the given code. """ code = code.rstrip('\n') if not lang: code = escape(code, smart_amp=False) return '<pre><code>%s\n</code></pre>\n' % code code = escape(code, quote=True, smart_amp=False) return '<pre><code class="lang-%s">%s\n</code></pre>\n' % (lang, code)
def block_html(self, html): """Rendering block level pure html content. :param html: text content of the html snippet. """ if self.options.get('skip_style') and \ html.lower().startswith('<style'): return '' if self.options.get('escape'): return escape(html) return html
def list(self, body, ordered=True): """Rendering list tags like ``<ul>`` and ``<ol>``. :param body: body contents of the list. :param ordered: whether this list is ordered or not. """ tag = 'ul' if ordered: tag = 'ol' return '<%s>\n%s</%s>\n' % (tag, body, tag)
def table_cell(self, content, **flags): """Rendering a table cell. Like ``<th>`` ``<td>``. :param content: content of current table cell. :param header: whether this is header or not. :param align: align of current table cell. """ if flags['header']: tag = 'th' else: tag = 'td' align = flags['align'] if not align: return '<%s>%s</%s>\n' % (tag, content, tag) return '<%s style="text-align:%s">%s</%s>\n' % ( tag, align, content, tag )
def codespan(self, text): """Rendering inline `code` text. :param text: text content for inline code. """ text = escape(text.rstrip(), smart_amp=False) return '<code>%s</code>' % text
def autolink(self, link, is_email=False): """Rendering a given link or email address. :param link: link content or email address. :param is_email: whether this is an email or not. """ text = link = escape(link) if is_email: link = 'mailto:%s' % link return '<a href="%s">%s</a>' % (link, text)
def link(self, link, title, text): """Rendering a given link with content and title. :param link: href link for ``<a>`` tag. :param title: title content for `title` attribute. :param text: text content for description. """ link = escape_link(link) if not title: return '<a href="%s">%s</a>' % (link, text) title = escape(title, quote=True) return '<a href="%s" title="%s">%s</a>' % (link, title, text)
def image(self, src, title, text): """Rendering a image with title and text. :param src: source link of the image. :param title: title text of the image. :param text: alt text of the image. """ src = escape_link(src) text = escape(text, quote=True) if title: title = escape(title, quote=True) html = '<img src="%s" alt="%s" title="%s"' % (src, text, title) else: html = '<img src="%s" alt="%s"' % (src, text) if self.options.get('use_xhtml'): return '%s />' % html return '%s>' % html
def footnote_ref(self, key, index): """Rendering the ref anchor of a footnote. :param key: identity key for the footnote. :param index: the index count of current footnote. """ html = ( '<sup class="footnote-ref" id="fnref-%s">' '<a href="#fn-%s">%d</a></sup>' ) % (escape(key), escape(key), index) return html
def footnote_item(self, key, text): """Rendering a footnote item. :param key: identity key for the footnote. :param text: text content of the footnote. """ back = ( '<a href="#fnref-%s" class="footnote">&#8617;</a>' ) % escape(key) text = text.rstrip() if text.endswith('</p>'): text = re.sub(r'<\/p>$', r'%s</p>' % back, text) else: text = '%s<p>%s</p>' % (text, back) html = '<li id="fn-%s">%s</li>\n' % (escape(key), text) return html
def build_metagraph_list(self): """ Convert MetaParams into TF Summary Format and create summary_op. Returns: Merged TF Op for TEXT summary elements, should only be executed once to reduce data duplication. """ ops = [] self.ignore_unknown_dtypes = True for key in sorted(self.meta_params): value = self.convert_data_to_string(self.meta_params[key]) if len(value) == 0: continue if isinstance(value, str): ops.append(tf.contrib.summary.generic(name=key, tensor=tf.convert_to_tensor(str(value)))) else: ops.append(tf.contrib.summary.generic(name=key, tensor=tf.as_string(tf.convert_to_tensor(value)))) return ops
def process_docstring(app, what, name, obj, options, lines): """Enable markdown syntax in docstrings""" markdown = "\n".join(lines) # ast = cm_parser.parse(markdown) # html = cm_renderer.render(ast) rest = m2r(markdown) rest.replace("\r\n", "\n") del lines[:] lines.extend(rest.split("\n"))
def tf_step( self, time, variables, arguments, fn_loss, fn_kl_divergence, return_estimated_improvement=False, **kwargs ): """ Creates the TensorFlow operations for performing an optimization step. Args: time: Time tensor. variables: List of variables to optimize. arguments: Dict of arguments for callables, like fn_loss. fn_loss: A callable returning the loss of the current model. fn_kl_divergence: A callable returning the KL-divergence relative to the current model. return_estimated_improvement: Returns the estimated improvement resulting from the natural gradient calculation if true. **kwargs: Additional arguments, not used. Returns: List of delta tensors corresponding to the updates for each optimized variable. """ # Optimize: argmin(w) loss(w + delta) such that kldiv(P(w) || P(w + delta)) = learning_rate # For more details, see our blogpost: # https://reinforce.io/blog/end-to-end-computation-graphs-for-reinforcement-learning/ # from tensorforce import util # arguments = util.map_tensors(fn=tf.stop_gradient, tensors=arguments) # kldiv kldiv = fn_kl_divergence(**arguments) # grad(kldiv) kldiv_gradients = tf.gradients(ys=kldiv, xs=variables) # Calculates the product x * F of a given vector x with the fisher matrix F. # Incorporating the product prevents having to calculate the entire matrix explicitly. def fisher_matrix_product(deltas): # Gradient is not propagated through solver. deltas = [tf.stop_gradient(input=delta) for delta in deltas] # delta' * grad(kldiv) delta_kldiv_gradients = tf.add_n(inputs=[ tf.reduce_sum(input_tensor=(delta * grad)) for delta, grad in zip(deltas, kldiv_gradients) ]) # [delta' * F] = grad(delta' * grad(kldiv)) return tf.gradients(ys=delta_kldiv_gradients, xs=variables) # loss loss = fn_loss(**arguments) # grad(loss) loss_gradients = tf.gradients(ys=loss, xs=variables) # Solve the following system for delta' via the conjugate gradient solver. # [delta' * F] * delta' = -grad(loss) # --> delta' (= lambda * delta) deltas = self.solver.solve(fn_x=fisher_matrix_product, x_init=None, b=[-grad for grad in loss_gradients]) # delta' * F delta_fisher_matrix_product = fisher_matrix_product(deltas=deltas) # c' = 0.5 * delta' * F * delta' (= lambda * c) # TODO: Why constant and hence KL-divergence sometimes negative? constant = 0.5 * tf.add_n(inputs=[ tf.reduce_sum(input_tensor=(delta_F * delta)) for delta_F, delta in zip(delta_fisher_matrix_product, deltas) ]) # Natural gradient step if constant > 0 def natural_gradient_step(): # lambda = sqrt(c' / c) lagrange_multiplier = tf.sqrt(x=(constant / self.learning_rate)) # delta = delta' / lambda estimated_deltas = [delta / lagrange_multiplier for delta in deltas] # improvement = grad(loss) * delta (= loss_new - loss_old) estimated_improvement = tf.add_n(inputs=[ tf.reduce_sum(input_tensor=(grad * delta)) for grad, delta in zip(loss_gradients, estimated_deltas) ]) # Apply natural gradient improvement. applied = self.apply_step(variables=variables, deltas=estimated_deltas) with tf.control_dependencies(control_inputs=(applied,)): # Trivial operation to enforce control dependency if return_estimated_improvement: return [estimated_delta + 0.0 for estimated_delta in estimated_deltas], estimated_improvement else: return [estimated_delta + 0.0 for estimated_delta in estimated_deltas] # Zero step if constant <= 0 def zero_step(): if return_estimated_improvement: return [tf.zeros_like(tensor=delta) for delta in deltas], 0.0 else: return [tf.zeros_like(tensor=delta) for delta in deltas] # Natural gradient step only works if constant > 0 return tf.cond(pred=(constant > 0.0), true_fn=natural_gradient_step, false_fn=zero_step)
def tf_step(self, time, variables, arguments, fn_reference=None, **kwargs): """ Creates the TensorFlow operations for performing an optimization step. Args: time: Time tensor. variables: List of variables to optimize. arguments: Dict of arguments for callables, like fn_loss. fn_reference: A callable returning the reference values, in case of a comparative loss. **kwargs: Additional arguments passed on to the internal optimizer. Returns: List of delta tensors corresponding to the updates for each optimized variable. """ # Set reference to compare with at each optimization step, in case of a comparative loss. arguments['reference'] = fn_reference(**arguments) # First step deltas = self.optimizer.step(time=time, variables=variables, arguments=arguments, **kwargs) if self.unroll_loop: # Unrolled for loop for _ in xrange(self.num_steps - 1): with tf.control_dependencies(control_inputs=deltas): step_deltas = self.optimizer.step(time=time, variables=variables, arguments=arguments, **kwargs) deltas = [delta1 + delta2 for delta1, delta2 in zip(deltas, step_deltas)] return deltas else: # TensorFlow while loop def body(iteration, deltas): with tf.control_dependencies(control_inputs=deltas): step_deltas = self.optimizer.step(time=time, variables=variables, arguments=arguments, **kwargs) deltas = [delta1 + delta2 for delta1, delta2 in zip(deltas, step_deltas)] return iteration + 1, deltas def cond(iteration, deltas): return iteration < self.num_steps - 1 _, deltas = tf.while_loop(cond=cond, body=body, loop_vars=(0, deltas)) return deltas
def tf_baseline_loss(self, states, internals, reward, update, reference=None): """ Creates the TensorFlow operations for calculating the baseline loss of a batch. Args: states: Dict of state tensors. internals: List of prior internal state tensors. reward: Reward tensor. update: Boolean tensor indicating whether this call happens during an update. reference: Optional reference tensor(s), in case of a comparative loss. Returns: Loss tensor. """ if self.baseline_mode == 'states': loss = self.baseline.loss( states=states, internals=internals, reward=reward, update=update, reference=reference ) elif self.baseline_mode == 'network': loss = self.baseline.loss( states=self.network.apply(x=states, internals=internals, update=update), internals=internals, reward=reward, update=update, reference=reference ) regularization_loss = self.baseline.regularization_loss() if regularization_loss is not None: loss += regularization_loss return loss
def baseline_optimizer_arguments(self, states, internals, reward): """ Returns the baseline optimizer arguments including the time, the list of variables to optimize, and various functions which the optimizer might require to perform an update step. Args: states: Dict of state tensors. internals: List of prior internal state tensors. reward: Reward tensor. Returns: Baseline optimizer arguments as dict. """ arguments = dict( time=self.global_timestep, variables=self.baseline.get_variables(), arguments=dict( states=states, internals=internals, reward=reward, update=tf.constant(value=True), ), fn_reference=self.baseline.reference, fn_loss=self.fn_baseline_loss, # source_variables=self.network.get_variables() ) if self.global_model is not None: arguments['global_variables'] = self.global_model.baseline.get_variables() return arguments
def tf_step(self, time, variables, source_variables, **kwargs): """ Creates the TensorFlow operations for performing an optimization step. Args: time: Time tensor. variables: List of variables to optimize. source_variables: List of source variables to synchronize with. **kwargs: Additional arguments, not used. Returns: List of delta tensors corresponding to the updates for each optimized variable. """ assert all(util.shape(source) == util.shape(target) for source, target in zip(source_variables, variables)) last_sync = tf.get_variable( name='last-sync', shape=(), dtype=tf.int64, initializer=tf.constant_initializer(value=(-self.sync_frequency), dtype=tf.int64), trainable=False ) def sync(): deltas = list() for source_variable, target_variable in zip(source_variables, variables): delta = self.update_weight * (source_variable - target_variable) deltas.append(delta) applied = self.apply_step(variables=variables, deltas=deltas) last_sync_updated = last_sync.assign(value=time) with tf.control_dependencies(control_inputs=(applied, last_sync_updated)): # Trivial operation to enforce control dependency return [delta + 0.0 for delta in deltas] def no_sync(): deltas = list() for variable in variables: delta = tf.zeros(shape=util.shape(variable)) deltas.append(delta) return deltas do_sync = (time - last_sync >= self.sync_frequency) return tf.cond(pred=do_sync, true_fn=sync, false_fn=no_sync)