signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
@staticmethod<EOL><INDENT>def get(logdir):<DEDENT>
with FileWriterCache._lock:<EOL><INDENT>if logdir not in FileWriterCache._cache:<EOL><INDENT>FileWriterCache._cache[logdir] = FileWriter(<EOL>logdir, graph=tf.compat.v1.get_default_graph())<EOL><DEDENT>return FileWriterCache._cache[logdir]<EOL><DEDENT>
Returns the FileWriter for the specified directory. Args: logdir: str, name of the directory. Returns: A `FileWriter`.
f8071:c1:m0
def readahead_file_path(path, unused_readahead=None):
return path<EOL>
Readahead files not implemented; simply returns given path.
f8072:m0
def get_logger():
return _logger<EOL>
Returns TensorBoard logger
f8073:m0
def load_ipython_extension(ipython):
notebook._load_ipython_extension(ipython)<EOL>
IPython API entry point. Only intended to be called by the IPython runtime. See: https://ipython.readthedocs.io/en/stable/config/extensions/index.html
f8075:m3
def Cleanse(obj, encoding='<STR_LIT:utf-8>'):
if isinstance(obj, int):<EOL><INDENT>return obj<EOL><DEDENT>elif isinstance(obj, float):<EOL><INDENT>if obj == _INFINITY:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>elif obj == _NEGATIVE_INFINITY:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>elif math.isnan(obj):<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>return obj<EOL><DEDENT><DEDENT>elif isinstance(obj, bytes):<EOL><INDENT>return tf.compat.as_text(obj, encoding)<EOL><DEDENT>elif isinstance(obj, (list, tuple)):<EOL><INDENT>return [Cleanse(i, encoding) for i in obj]<EOL><DEDENT>elif isinstance(obj, set):<EOL><INDENT>return [Cleanse(i, encoding) for i in sorted(obj)]<EOL><DEDENT>elif isinstance(obj, dict):<EOL><INDENT>return {Cleanse(k, encoding): Cleanse(v, encoding) for k, v in obj.items()}<EOL><DEDENT>else:<EOL><INDENT>return obj<EOL><DEDENT>
Makes Python object appropriate for JSON serialization. - Replaces instances of Infinity/-Infinity/NaN with strings. - Turns byte strings into unicode strings. - Turns sets into sorted lists. - Turns tuples into lists. Args: obj: Python data structure. encoding: Charset used to decode byte strings. Returns: Unicode JSON data structure.
f8079:m0
def _assertWrapsAs(self, to_wrap, expected):
actual = json_util.Cleanse(to_wrap)<EOL>for a, e in zip(actual, expected):<EOL><INDENT>self.assertEqual(e, a)<EOL><DEDENT>
Asserts that |to_wrap| becomes |expected| when wrapped.
f8080:c0:m0
def __init__(self, path):
self._path = path<EOL>self.reload_called = False<EOL>self._plugin_to_tag_to_content = {<EOL>'<STR_LIT>': {<EOL>'<STR_LIT:foo>': '<STR_LIT>',<EOL>'<STR_LIT:bar>': '<STR_LIT>',<EOL>}<EOL>}<EOL>
Constructs a fake accumulator with some fake events. Args: path: The path for the run that this accumulator is for.
f8081:c0:m0
def add3RunsToMultiplexer(self, logdir, multiplexer):
run1_dir = os.path.join(logdir, '<STR_LIT>')<EOL>run2_dir = os.path.join(logdir, '<STR_LIT>')<EOL>run3_dir = os.path.join(logdir, '<STR_LIT>')<EOL>for dirname in [run1_dir, run2_dir, run3_dir]:<EOL><INDENT>_AddEvents(dirname)<EOL><DEDENT>multiplexer.AddRun(run1_dir, '<STR_LIT>')<EOL>multiplexer.AddRun(run2_dir, '<STR_LIT>')<EOL>multiplexer.AddRun(run3_dir, '<STR_LIT>')<EOL>
Creates and adds 3 runs to the multiplexer.
f8081:c2:m1
def _IsDirectory(parent, item):
return tf.io.gfile.isdir(os.path.join(parent, item))<EOL>
Helper that returns if parent/item is a directory.
f8083:m0
def PluginDirectory(logdir, plugin_name):
return os.path.join(logdir, _PLUGINS_DIR, plugin_name)<EOL>
Returns the plugin directory for plugin_name.
f8083:m1
def ListPlugins(logdir):
plugins_dir = os.path.join(logdir, _PLUGINS_DIR)<EOL>try:<EOL><INDENT>entries = tf.io.gfile.listdir(plugins_dir)<EOL><DEDENT>except tf.errors.NotFoundError:<EOL><INDENT>return []<EOL><DEDENT>return [x.rstrip('<STR_LIT:/>') for x in entries<EOL>if x.endswith('<STR_LIT:/>') or _IsDirectory(plugins_dir, x)]<EOL>
List all the plugins that have registered assets in logdir. If the plugins_dir does not exist, it returns an empty list. This maintains compatibility with old directories that have no plugins written. Args: logdir: A directory that was created by a TensorFlow events writer. Returns: a list of plugin names, as strings
f8083:m2
def ListAssets(logdir, plugin_name):
plugin_dir = PluginDirectory(logdir, plugin_name)<EOL>try:<EOL><INDENT>return [x.rstrip('<STR_LIT:/>') for x in tf.io.gfile.listdir(plugin_dir)]<EOL><DEDENT>except tf.errors.NotFoundError:<EOL><INDENT>return []<EOL><DEDENT>
List all the assets that are available for given plugin in a logdir. Args: logdir: A directory that was created by a TensorFlow summary.FileWriter. plugin_name: A string name of a plugin to list assets for. Returns: A string list of available plugin assets. If the plugin subdirectory does not exist (either because the logdir doesn't exist, or because the plugin didn't register) an empty list is returned.
f8083:m3
def RetrieveAsset(logdir, plugin_name, asset_name):
asset_path = os.path.join(PluginDirectory(logdir, plugin_name), asset_name)<EOL>try:<EOL><INDENT>with tf.io.gfile.GFile(asset_path, "<STR_LIT:r>") as f:<EOL><INDENT>return f.read()<EOL><DEDENT><DEDENT>except tf.errors.NotFoundError:<EOL><INDENT>raise KeyError("<STR_LIT>" % asset_path)<EOL><DEDENT>except tf.errors.OpError as e:<EOL><INDENT>raise KeyError("<STR_LIT>" % (asset_path, e))<EOL><DEDENT>
Retrieve a particular plugin asset from a logdir. Args: logdir: A directory that was created by a TensorFlow summary.FileWriter. plugin_name: The plugin we want an asset from. asset_name: The name of the requested asset. Returns: string contents of the plugin asset. Raises: KeyError: if the asset does not exist.
f8083:m4
def __init__(self, size, seed=<NUM_LIT:0>, always_keep_last=True):
if size < <NUM_LIT:0> or size != round(size):<EOL><INDENT>raise ValueError('<STR_LIT>' % size)<EOL><DEDENT>self._buckets = collections.defaultdict(<EOL>lambda: _ReservoirBucket(size, random.Random(seed), always_keep_last))<EOL>self._mutex = threading.Lock()<EOL>self.size = size<EOL>self.always_keep_last = always_keep_last<EOL>
Creates a new reservoir. Args: size: The number of values to keep in the reservoir for each tag. If 0, all values will be kept. seed: The seed of the random number generator to use when sampling. Different values for |seed| will produce different samples from the same input items. always_keep_last: Whether to always keep the latest seen item in the end of the reservoir. Defaults to True. Raises: ValueError: If size is negative or not an integer.
f8084:c0:m0
def Keys(self):
with self._mutex:<EOL><INDENT>return list(self._buckets.keys())<EOL><DEDENT>
Return all the keys in the reservoir. Returns: ['list', 'of', 'keys'] in the Reservoir.
f8084:c0:m1
def Items(self, key):
with self._mutex:<EOL><INDENT>if key not in self._buckets:<EOL><INDENT>raise KeyError('<STR_LIT>' % key)<EOL><DEDENT>bucket = self._buckets[key]<EOL><DEDENT>return bucket.Items()<EOL>
Return items associated with given key. Args: key: The key for which we are finding associated items. Raises: KeyError: If the key is not found in the reservoir. Returns: [list, of, items] associated with that key.
f8084:c0:m2
def AddItem(self, key, item, f=lambda x: x):
with self._mutex:<EOL><INDENT>bucket = self._buckets[key]<EOL><DEDENT>bucket.AddItem(item, f)<EOL>
Add a new item to the Reservoir with the given tag. If the reservoir has not yet reached full size, the new item is guaranteed to be added. If the reservoir is full, then behavior depends on the always_keep_last boolean. If always_keep_last was set to true, the new item is guaranteed to be added to the reservoir, and either the previous last item will be replaced, or (with low probability) an older item will be replaced. If always_keep_last was set to false, then the new item will replace an old item with low probability. If f is provided, it will be applied to transform item (lazily, iff item is going to be included in the reservoir). Args: key: The key to store the item under. item: The item to add to the reservoir. f: An optional function to transform the item prior to addition.
f8084:c0:m3
def FilterItems(self, filterFn, key=None):
with self._mutex:<EOL><INDENT>if key:<EOL><INDENT>if key in self._buckets:<EOL><INDENT>return self._buckets[key].FilterItems(filterFn)<EOL><DEDENT>else:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT><DEDENT>else:<EOL><INDENT>return sum(bucket.FilterItems(filterFn)<EOL>for bucket in self._buckets.values())<EOL><DEDENT><DEDENT>
Filter items within a Reservoir, using a filtering function. Args: filterFn: A function that returns True for the items to be kept. key: An optional bucket key to filter. If not specified, will filter all all buckets. Returns: The number of items removed.
f8084:c0:m4
def __init__(self, _max_size, _random=None, always_keep_last=True):
if _max_size < <NUM_LIT:0> or _max_size != round(_max_size):<EOL><INDENT>raise ValueError('<STR_LIT>' % _max_size)<EOL><DEDENT>self.items = []<EOL>self._mutex = threading.Lock()<EOL>self._max_size = _max_size<EOL>self._num_items_seen = <NUM_LIT:0><EOL>if _random is not None:<EOL><INDENT>self._random = _random<EOL><DEDENT>else:<EOL><INDENT>self._random = random.Random(<NUM_LIT:0>)<EOL><DEDENT>self.always_keep_last = always_keep_last<EOL>
Create the _ReservoirBucket. Args: _max_size: The maximum size the reservoir bucket may grow to. If size is zero, the bucket has unbounded size. _random: The random number generator to use. If not specified, defaults to random.Random(0). always_keep_last: Whether the latest seen item should always be included in the end of the bucket. Raises: ValueError: if the size is not a nonnegative integer.
f8084:c1:m0
def AddItem(self, item, f=lambda x: x):
with self._mutex:<EOL><INDENT>if len(self.items) < self._max_size or self._max_size == <NUM_LIT:0>:<EOL><INDENT>self.items.append(f(item))<EOL><DEDENT>else:<EOL><INDENT>r = self._random.randint(<NUM_LIT:0>, self._num_items_seen)<EOL>if r < self._max_size:<EOL><INDENT>self.items.pop(r)<EOL>self.items.append(f(item))<EOL><DEDENT>elif self.always_keep_last:<EOL><INDENT>self.items[-<NUM_LIT:1>] = f(item)<EOL><DEDENT><DEDENT>self._num_items_seen += <NUM_LIT:1><EOL><DEDENT>
Add an item to the ReservoirBucket, replacing an old item if necessary. The new item is guaranteed to be added to the bucket, and to be the last element in the bucket. If the bucket has reached capacity, then an old item will be replaced. With probability (_max_size/_num_items_seen) a random item in the bucket will be popped out and the new item will be appended to the end. With probability (1 - _max_size/_num_items_seen) the last item in the bucket will be replaced. Since the O(n) replacements occur with O(1/_num_items_seen) likelihood, the amortized runtime is O(1). Args: item: The item to add to the bucket. f: A function to transform item before addition, if it will be kept in the reservoir.
f8084:c1:m1
def FilterItems(self, filterFn):
with self._mutex:<EOL><INDENT>size_before = len(self.items)<EOL>self.items = list(filter(filterFn, self.items))<EOL>size_diff = size_before - len(self.items)<EOL>prop_remaining = len(self.items) / float(<EOL>size_before) if size_before > <NUM_LIT:0> else <NUM_LIT:0><EOL>self._num_items_seen = int(round(self._num_items_seen * prop_remaining))<EOL>return size_diff<EOL><DEDENT>
Filter items in a ReservoirBucket, using a filtering function. Filtering items from the reservoir bucket must update the internal state variable self._num_items_seen, which is used for determining the rate of replacement in reservoir sampling. Ideally, self._num_items_seen would contain the exact number of items that have ever seen by the ReservoirBucket and satisfy filterFn. However, the ReservoirBucket does not have access to all items seen -- it only has access to the subset of items that have survived sampling (self.items). Therefore, we estimate self._num_items_seen by scaling it by the same ratio as the ratio of items not removed from self.items. Args: filterFn: A function that returns True for items to be kept. Returns: The number of items removed from the bucket.
f8084:c1:m2
def Items(self):
with self._mutex:<EOL><INDENT>return list(self.items)<EOL><DEDENT>
Get all the items in the bucket.
f8084:c1:m3
def _GetPurgeMessage(most_recent_step, most_recent_wall_time, event_step,<EOL>event_wall_time, num_expired_scalars, num_expired_histos,<EOL>num_expired_comp_histos, num_expired_images,<EOL>num_expired_audio):
return ('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>').format(most_recent_step, most_recent_wall_time,<EOL>event_step, event_wall_time,<EOL>num_expired_scalars, num_expired_histos,<EOL>num_expired_comp_histos, num_expired_images,<EOL>num_expired_audio)<EOL>
Return the string message associated with TensorBoard purges.
f8085:m0
def _GeneratorFromPath(path):
if not path:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if io_wrapper.IsTensorFlowEventsFile(path):<EOL><INDENT>return event_file_loader.EventFileLoader(path)<EOL><DEDENT>else:<EOL><INDENT>return directory_watcher.DirectoryWatcher(<EOL>path,<EOL>event_file_loader.EventFileLoader,<EOL>io_wrapper.IsTensorFlowEventsFile)<EOL><DEDENT>
Create an event generator for file or directory at given path string.
f8085:m1
def _ParseFileVersion(file_version):
tokens = file_version.split('<STR_LIT>')<EOL>try:<EOL><INDENT>return float(tokens[-<NUM_LIT:1>])<EOL><DEDENT>except ValueError:<EOL><INDENT>logger.warn(<EOL>('<STR_LIT>'<EOL>'<STR_LIT>'))<EOL>return -<NUM_LIT:1><EOL><DEDENT>
Convert the string file_version in event.proto into a float. Args: file_version: String file_version from event.proto Returns: Version number as a float.
f8085:m2
def __init__(self,<EOL>path,<EOL>size_guidance=None,<EOL>compression_bps=NORMAL_HISTOGRAM_BPS,<EOL>purge_orphaned_data=True):
size_guidance = size_guidance or DEFAULT_SIZE_GUIDANCE<EOL>sizes = {}<EOL>for key in DEFAULT_SIZE_GUIDANCE:<EOL><INDENT>if key in size_guidance:<EOL><INDENT>sizes[key] = size_guidance[key]<EOL><DEDENT>else:<EOL><INDENT>sizes[key] = DEFAULT_SIZE_GUIDANCE[key]<EOL><DEDENT><DEDENT>self._first_event_timestamp = None<EOL>self.scalars = reservoir.Reservoir(size=sizes[SCALARS])<EOL>self._graph = None<EOL>self._graph_from_metagraph = False<EOL>self._meta_graph = None<EOL>self._tagged_metadata = {}<EOL>self.summary_metadata = {}<EOL>self.histograms = reservoir.Reservoir(size=sizes[HISTOGRAMS])<EOL>self.compressed_histograms = reservoir.Reservoir(<EOL>size=sizes[COMPRESSED_HISTOGRAMS], always_keep_last=False)<EOL>self.images = reservoir.Reservoir(size=sizes[IMAGES])<EOL>self.audios = reservoir.Reservoir(size=sizes[AUDIO])<EOL>self.tensors = reservoir.Reservoir(size=sizes[TENSORS])<EOL>self._plugin_to_tag_to_content = collections.defaultdict(dict)<EOL>self._generator_mutex = threading.Lock()<EOL>self.path = path<EOL>self._generator = _GeneratorFromPath(path)<EOL>self._compression_bps = compression_bps<EOL>self.purge_orphaned_data = purge_orphaned_data<EOL>self.most_recent_step = -<NUM_LIT:1><EOL>self.most_recent_wall_time = -<NUM_LIT:1><EOL>self.file_version = None<EOL>self.accumulated_attrs = ('<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>self._tensor_summaries = {}<EOL>
Construct the `EventAccumulator`. Args: path: A file path to a directory containing tf events files, or a single tf events file. The accumulator will load events from this path. size_guidance: Information on how much data the EventAccumulator should store in memory. The DEFAULT_SIZE_GUIDANCE tries not to store too much so as to avoid OOMing the client. The size_guidance should be a map from a `tagType` string to an integer representing the number of items to keep per tag for items of that `tagType`. If the size is 0, all events are stored. compression_bps: Information on how the `EventAccumulator` should compress histogram data for the `CompressedHistograms` tag (for details see `ProcessCompressedHistogram`). purge_orphaned_data: Whether to discard any events that were "orphaned" by a TensorFlow restart.
f8085:c0:m0
def Reload(self):
with self._generator_mutex:<EOL><INDENT>for event in self._generator.Load():<EOL><INDENT>self._ProcessEvent(event)<EOL><DEDENT><DEDENT>return self<EOL>
Loads all events added since the last call to `Reload`. If `Reload` was never called, loads all events in the file. Returns: The `EventAccumulator`.
f8085:c0:m1
def PluginAssets(self, plugin_name):
return plugin_asset_util.ListAssets(self.path, plugin_name)<EOL>
Return a list of all plugin assets for the given plugin. Args: plugin_name: The string name of a plugin to retrieve assets for. Returns: A list of string plugin asset names, or empty list if none are available. If the plugin was not registered, an empty list is returned.
f8085:c0:m2
def RetrievePluginAsset(self, plugin_name, asset_name):
return plugin_asset_util.RetrieveAsset(self.path, plugin_name, asset_name)<EOL>
Return the contents of a given plugin asset. Args: plugin_name: The string name of a plugin. asset_name: The string name of an asset. Returns: The string contents of the plugin asset. Raises: KeyError: If the asset is not available.
f8085:c0:m3
def FirstEventTimestamp(self):
if self._first_event_timestamp is not None:<EOL><INDENT>return self._first_event_timestamp<EOL><DEDENT>with self._generator_mutex:<EOL><INDENT>try:<EOL><INDENT>event = next(self._generator.Load())<EOL>self._ProcessEvent(event)<EOL>return self._first_event_timestamp<EOL><DEDENT>except StopIteration:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>
Returns the timestamp in seconds of the first event. If the first event has been loaded (either by this method or by `Reload`, this returns immediately. Otherwise, it will load in the first event. Note that this means that calling `Reload` will cause this to block until `Reload` has finished. Returns: The timestamp in seconds of the first event that was loaded. Raises: ValueError: If no events have been loaded and there were no events found on disk.
f8085:c0:m4
def PluginTagToContent(self, plugin_name):
if plugin_name not in self._plugin_to_tag_to_content:<EOL><INDENT>raise KeyError('<STR_LIT>' % plugin_name)<EOL><DEDENT>return self._plugin_to_tag_to_content[plugin_name]<EOL>
Returns a dict mapping tags to content specific to that plugin. Args: plugin_name: The name of the plugin for which to fetch plugin-specific content. Raises: KeyError: if the plugin name is not found. Returns: A dict mapping tags to plugin-specific content (which are always strings). Those strings are often serialized protos.
f8085:c0:m5
def SummaryMetadata(self, tag):
return self.summary_metadata[tag]<EOL>
Given a summary tag name, return the associated metadata object. Args: tag: The name of a tag, as a string. Raises: KeyError: If the tag is not found. Returns: A `SummaryMetadata` protobuf.
f8085:c0:m6
def _ProcessEvent(self, event):
if self._first_event_timestamp is None:<EOL><INDENT>self._first_event_timestamp = event.wall_time<EOL><DEDENT>if event.HasField('<STR_LIT>'):<EOL><INDENT>new_file_version = _ParseFileVersion(event.file_version)<EOL>if self.file_version and self.file_version != new_file_version:<EOL><INDENT>logger.warn(('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>').format(self.file_version,<EOL>new_file_version))<EOL><DEDENT>self.file_version = new_file_version<EOL><DEDENT>self._MaybePurgeOrphanedData(event)<EOL>if event.HasField('<STR_LIT>'):<EOL><INDENT>if self._graph is not None:<EOL><INDENT>logger.warn(<EOL>('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'))<EOL><DEDENT>self._graph = event.graph_def<EOL>self._graph_from_metagraph = False<EOL><DEDENT>elif event.HasField('<STR_LIT>'):<EOL><INDENT>if self._meta_graph is not None:<EOL><INDENT>logger.warn(('<STR_LIT>'<EOL>'<STR_LIT>'))<EOL><DEDENT>self._meta_graph = event.meta_graph_def<EOL>if self._graph is None or self._graph_from_metagraph:<EOL><INDENT>meta_graph = meta_graph_pb2.MetaGraphDef()<EOL>meta_graph.ParseFromString(self._meta_graph)<EOL>if meta_graph.graph_def:<EOL><INDENT>if self._graph is not None:<EOL><INDENT>logger.warn(<EOL>('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'))<EOL><DEDENT>self._graph_from_metagraph = True<EOL>self._graph = meta_graph.graph_def.SerializeToString()<EOL><DEDENT><DEDENT><DEDENT>elif event.HasField('<STR_LIT>'):<EOL><INDENT>tag = event.tagged_run_metadata.tag<EOL>if tag in self._tagged_metadata:<EOL><INDENT>logger.warn('<STR_LIT>' +<EOL>tag + '<STR_LIT>')<EOL><DEDENT>self._tagged_metadata[tag] = event.tagged_run_metadata.run_metadata<EOL><DEDENT>elif event.HasField('<STR_LIT>'):<EOL><INDENT>for value in event.summary.value:<EOL><INDENT>if value.HasField('<STR_LIT>'):<EOL><INDENT>tag = value.tag<EOL>if tag not in self.summary_metadata:<EOL><INDENT>self.summary_metadata[tag] = value.metadata<EOL>plugin_data = value.metadata.plugin_data<EOL>if plugin_data.plugin_name:<EOL><INDENT>self._plugin_to_tag_to_content[plugin_data.plugin_name][tag] = (<EOL>plugin_data.content)<EOL><DEDENT>else:<EOL><INDENT>logger.warn(<EOL>('<STR_LIT>'<EOL>'<STR_LIT>'), tag)<EOL><DEDENT><DEDENT><DEDENT>for summary_type, summary_func in SUMMARY_TYPES.items():<EOL><INDENT>if value.HasField(summary_type):<EOL><INDENT>datum = getattr(value, summary_type)<EOL>tag = value.tag<EOL>if summary_type == '<STR_LIT>' and not tag:<EOL><INDENT>tag = value.node_name<EOL><DEDENT>getattr(self, summary_func)(tag, event.wall_time, event.step, datum)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>
Called whenever an event is loaded.
f8085:c0:m7
def Tags(self):
return {<EOL>IMAGES: self.images.Keys(),<EOL>AUDIO: self.audios.Keys(),<EOL>HISTOGRAMS: self.histograms.Keys(),<EOL>SCALARS: self.scalars.Keys(),<EOL>COMPRESSED_HISTOGRAMS: self.compressed_histograms.Keys(),<EOL>TENSORS: self.tensors.Keys(),<EOL>GRAPH: self._graph is not None,<EOL>META_GRAPH: self._meta_graph is not None,<EOL>RUN_METADATA: list(self._tagged_metadata.keys())<EOL>}<EOL>
Return all tags found in the value stream. Returns: A `{tagType: ['list', 'of', 'tags']}` dictionary.
f8085:c0:m8
def Scalars(self, tag):
return self.scalars.Items(tag)<EOL>
Given a summary tag, return all associated `ScalarEvent`s. Args: tag: A string tag associated with the events. Raises: KeyError: If the tag is not found. Returns: An array of `ScalarEvent`s.
f8085:c0:m9
def Graph(self):
graph = graph_pb2.GraphDef()<EOL>if self._graph is not None:<EOL><INDENT>graph.ParseFromString(self._graph)<EOL>return graph<EOL><DEDENT>raise ValueError('<STR_LIT>')<EOL>
Return the graph definition, if there is one. If the graph is stored directly, return that. If no graph is stored directly but a metagraph is stored containing a graph, return that. Raises: ValueError: If there is no graph for this run. Returns: The `graph_def` proto.
f8085:c0:m10
def MetaGraph(self):
if self._meta_graph is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>meta_graph = meta_graph_pb2.MetaGraphDef()<EOL>meta_graph.ParseFromString(self._meta_graph)<EOL>return meta_graph<EOL>
Return the metagraph definition, if there is one. Raises: ValueError: If there is no metagraph for this run. Returns: The `meta_graph_def` proto.
f8085:c0:m11
def RunMetadata(self, tag):
if tag not in self._tagged_metadata:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>run_metadata = config_pb2.RunMetadata()<EOL>run_metadata.ParseFromString(self._tagged_metadata[tag])<EOL>return run_metadata<EOL>
Given a tag, return the associated session.run() metadata. Args: tag: A string tag associated with the event. Raises: ValueError: If the tag is not found. Returns: The metadata in form of `RunMetadata` proto.
f8085:c0:m12
def Histograms(self, tag):
return self.histograms.Items(tag)<EOL>
Given a summary tag, return all associated histograms. Args: tag: A string tag associated with the events. Raises: KeyError: If the tag is not found. Returns: An array of `HistogramEvent`s.
f8085:c0:m13
def CompressedHistograms(self, tag):
return self.compressed_histograms.Items(tag)<EOL>
Given a summary tag, return all associated compressed histograms. Args: tag: A string tag associated with the events. Raises: KeyError: If the tag is not found. Returns: An array of `CompressedHistogramEvent`s.
f8085:c0:m14
def Images(self, tag):
return self.images.Items(tag)<EOL>
Given a summary tag, return all associated images. Args: tag: A string tag associated with the events. Raises: KeyError: If the tag is not found. Returns: An array of `ImageEvent`s.
f8085:c0:m15
def Audio(self, tag):
return self.audios.Items(tag)<EOL>
Given a summary tag, return all associated audio. Args: tag: A string tag associated with the events. Raises: KeyError: If the tag is not found. Returns: An array of `AudioEvent`s.
f8085:c0:m16
def Tensors(self, tag):
return self.tensors.Items(tag)<EOL>
Given a summary tag, return all associated tensors. Args: tag: A string tag associated with the events. Raises: KeyError: If the tag is not found. Returns: An array of `TensorEvent`s.
f8085:c0:m17
def _MaybePurgeOrphanedData(self, event):
if not self.purge_orphaned_data:<EOL><INDENT>return<EOL><DEDENT>if self.file_version and self.file_version >= <NUM_LIT:2>:<EOL><INDENT>self._CheckForRestartAndMaybePurge(event)<EOL><DEDENT>else:<EOL><INDENT>self._CheckForOutOfOrderStepAndMaybePurge(event)<EOL><DEDENT>
Maybe purge orphaned data due to a TensorFlow crash. When TensorFlow crashes at step T+O and restarts at step T, any events written after step T are now "orphaned" and will be at best misleading if they are included in TensorBoard. This logic attempts to determine if there is orphaned data, and purge it if it is found. Args: event: The event to use as a reference, to determine if a purge is needed.
f8085:c0:m18
def _CheckForRestartAndMaybePurge(self, event):
if event.HasField(<EOL>'<STR_LIT>') and event.session_log.status == event_pb2.SessionLog.START:<EOL><INDENT>self._Purge(event, by_tags=False)<EOL><DEDENT>
Check and discard expired events using SessionLog.START. Check for a SessionLog.START event and purge all previously seen events with larger steps, because they are out of date. Because of supervisor threading, it is possible that this logic will cause the first few event messages to be discarded since supervisor threading does not guarantee that the START message is deterministically written first. This method is preferred over _CheckForOutOfOrderStepAndMaybePurge which can inadvertently discard events due to supervisor threading. Args: event: The event to use as reference. If the event is a START event, all previously seen events with a greater event.step will be purged.
f8085:c0:m19
def _CheckForOutOfOrderStepAndMaybePurge(self, event):
if event.step < self.most_recent_step and event.HasField('<STR_LIT>'):<EOL><INDENT>self._Purge(event, by_tags=True)<EOL><DEDENT>else:<EOL><INDENT>self.most_recent_step = event.step<EOL>self.most_recent_wall_time = event.wall_time<EOL><DEDENT>
Check for out-of-order event.step and discard expired events for tags. Check if the event is out of order relative to the global most recent step. If it is, purge outdated summaries for tags that the event contains. Args: event: The event to use as reference. If the event is out-of-order, all events with the same tags, but with a greater event.step will be purged.
f8085:c0:m20
def _ProcessHistogram(self, tag, wall_time, step, histo):
histo = self._ConvertHistogramProtoToTuple(histo)<EOL>histo_ev = HistogramEvent(wall_time, step, histo)<EOL>self.histograms.AddItem(tag, histo_ev)<EOL>self.compressed_histograms.AddItem(tag, histo_ev, self._CompressHistogram)<EOL>
Processes a proto histogram by adding it to accumulated state.
f8085:c0:m22
def _CompressHistogram(self, histo_ev):
return CompressedHistogramEvent(<EOL>histo_ev.wall_time,<EOL>histo_ev.step,<EOL>compressor.compress_histogram_proto(<EOL>histo_ev.histogram_value, self._compression_bps))<EOL>
Callback for _ProcessHistogram.
f8085:c0:m23
def _ProcessImage(self, tag, wall_time, step, image):
event = ImageEvent(wall_time=wall_time,<EOL>step=step,<EOL>encoded_image_string=image.encoded_image_string,<EOL>width=image.width,<EOL>height=image.height)<EOL>self.images.AddItem(tag, event)<EOL>
Processes an image by adding it to accumulated state.
f8085:c0:m24
def _ProcessAudio(self, tag, wall_time, step, audio):
event = AudioEvent(wall_time=wall_time,<EOL>step=step,<EOL>encoded_audio_string=audio.encoded_audio_string,<EOL>content_type=audio.content_type,<EOL>sample_rate=audio.sample_rate,<EOL>length_frames=audio.length_frames)<EOL>self.audios.AddItem(tag, event)<EOL>
Processes a audio by adding it to accumulated state.
f8085:c0:m25
def _ProcessScalar(self, tag, wall_time, step, scalar):
sv = ScalarEvent(wall_time=wall_time, step=step, value=scalar)<EOL>self.scalars.AddItem(tag, sv)<EOL>
Processes a simple value by adding it to accumulated state.
f8085:c0:m26
def _Purge(self, event, by_tags):
<EOL>_NotExpired = lambda x: x.step < event.step<EOL>if by_tags:<EOL><INDENT>def _ExpiredPerTag(value):<EOL><INDENT>return [getattr(self, x).FilterItems(_NotExpired, value.tag)<EOL>for x in self.accumulated_attrs]<EOL><DEDENT>expired_per_tags = [_ExpiredPerTag(value)<EOL>for value in event.summary.value]<EOL>expired_per_type = [sum(x) for x in zip(*expired_per_tags)]<EOL><DEDENT>else:<EOL><INDENT>expired_per_type = [getattr(self, x).FilterItems(_NotExpired)<EOL>for x in self.accumulated_attrs]<EOL><DEDENT>if sum(expired_per_type) > <NUM_LIT:0>:<EOL><INDENT>purge_msg = _GetPurgeMessage(self.most_recent_step,<EOL>self.most_recent_wall_time, event.step,<EOL>event.wall_time, *expired_per_type)<EOL>logger.warn(purge_msg)<EOL><DEDENT>
Purge all events that have occurred after the given event.step. If by_tags is True, purge all events that occurred after the given event.step, but only for the tags that the event has. Non-sequential event.steps suggest that a TensorFlow restart occurred, and we discard the out-of-order events to display a consistent view in TensorBoard. Discarding by tags is the safer method, when we are unsure whether a restart has occurred, given that threading in supervisor can cause events of different tags to arrive with unsynchronized step values. If by_tags is False, then purge all events with event.step greater than the given event.step. This can be used when we are certain that a TensorFlow restart has occurred and these events can be discarded. Args: event: The event to use as reference for the purge. All events with the same tags, but with a greater event.step will be purged. by_tags: Bool to dictate whether to discard all out-of-order events or only those that are associated with the given reference event.
f8085:c0:m28
def IsTensorFlowEventsFile(path):
if not path:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return '<STR_LIT>' in tf.compat.as_str_any(os.path.basename(path))<EOL>
Check the path name to see if it is probably a TF Events file. Args: path: A file path to check if it is an event file. Raises: ValueError: If the path is an empty string. Returns: If path is formatted like a TensorFlowEventsFile.
f8086:m2
def ListDirectoryAbsolute(directory):
return (os.path.join(directory, path)<EOL>for path in tf.io.gfile.listdir(directory))<EOL>
Yields all files in the given directory. The paths are absolute.
f8086:m3
def _EscapeGlobCharacters(path):
drive, path = os.path.splitdrive(path)<EOL>return '<STR_LIT>' % (drive, _ESCAPE_GLOB_CHARACTERS_REGEX.sub(r'<STR_LIT>', path))<EOL>
Escapes the glob characters in a path. Python 3 has a glob.escape method, but python 2 lacks it, so we manually implement this method. Args: path: The absolute path to escape. Returns: The escaped path string.
f8086:m4
def ListRecursivelyViaGlobbing(top):
current_glob_string = os.path.join(_EscapeGlobCharacters(top), '<STR_LIT:*>')<EOL>level = <NUM_LIT:0><EOL>while True:<EOL><INDENT>logger.info('<STR_LIT>', level)<EOL>glob = tf.io.gfile.glob(current_glob_string)<EOL>logger.info(<EOL>'<STR_LIT>', len(glob), level)<EOL>if not glob:<EOL><INDENT>return<EOL><DEDENT>pairs = collections.defaultdict(list)<EOL>for file_path in glob:<EOL><INDENT>pairs[os.path.dirname(file_path)].append(file_path)<EOL><DEDENT>for dir_name, file_paths in six.iteritems(pairs):<EOL><INDENT>yield (dir_name, tuple(file_paths))<EOL><DEDENT>if len(pairs) == <NUM_LIT:1>:<EOL><INDENT>current_glob_string = os.path.join(list(pairs.keys())[<NUM_LIT:0>], '<STR_LIT:*>')<EOL><DEDENT>current_glob_string = os.path.join(current_glob_string, '<STR_LIT:*>')<EOL>level += <NUM_LIT:1><EOL><DEDENT>
Recursively lists all files within the directory. This method does not list subdirectories (in addition to regular files), and the file paths are all absolute. If the directory does not exist, this yields nothing. This method does so by glob-ing deeper and deeper directories, ie foo/*, foo/*/*, foo/*/*/* and so on until all files are listed. All file paths are absolute, and this method lists subdirectories too. For certain file systems, globbing via this method may prove significantly faster than recursively walking a directory. Specifically, TF file systems that implement TensorFlow's FileSystem.GetMatchingPaths method could save costly disk reads by using this method. However, for other file systems, this method might prove slower because the file system performs a walk per call to glob (in which case it might as well just perform 1 walk). Args: top: A path to a directory. Yields: A (dir_path, file_paths) tuple for each directory/subdirectory.
f8086:m5
def ListRecursivelyViaWalking(top):
for dir_path, _, filenames in tf.io.gfile.walk(top, topdown=True):<EOL><INDENT>yield (dir_path, (os.path.join(dir_path, filename)<EOL>for filename in filenames))<EOL><DEDENT>
Walks a directory tree, yielding (dir_path, file_paths) tuples. For each of `top` and its subdirectories, yields a tuple containing the path to the directory and the path to each of the contained files. Note that unlike os.Walk()/tf.io.gfile.walk()/ListRecursivelyViaGlobbing, this does not list subdirectories. The file paths are all absolute. If the directory does not exist, this yields nothing. Walking may be incredibly slow on certain file systems. Args: top: A path to a directory. Yields: A (dir_path, file_paths) tuple for each directory/subdirectory.
f8086:m6
def GetLogdirSubdirectories(path):
if not tf.io.gfile.exists(path):<EOL><INDENT>return ()<EOL><DEDENT>if not tf.io.gfile.isdir(path):<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>' % path)<EOL><DEDENT>if IsCloudPath(path):<EOL><INDENT>logger.info(<EOL>'<STR_LIT>')<EOL>traversal_method = ListRecursivelyViaGlobbing<EOL><DEDENT>else:<EOL><INDENT>logger.info(<EOL>'<STR_LIT>')<EOL>traversal_method = ListRecursivelyViaWalking<EOL><DEDENT>return (<EOL>subdir<EOL>for (subdir, files) in traversal_method(path)<EOL>if any(IsTensorFlowEventsFile(f) for f in files)<EOL>)<EOL>
Obtains all subdirectories with events files. The order of the subdirectories returned is unspecified. The internal logic that determines order varies by scenario. Args: path: The path to a directory under which to find subdirectories. Returns: A tuple of absolute paths of all subdirectories each with at least 1 events file directly within the subdirectory. Raises: ValueError: If the path passed to the method exists and is not a directory.
f8086:m7
def _CreateDeepDirectoryStructure(self, top_directory):
<EOL>directory_names = (<EOL>'<STR_LIT:foo>',<EOL>'<STR_LIT:bar>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>)<EOL>for directory_name in directory_names:<EOL><INDENT>os.makedirs(os.path.join(top_directory, directory_name))<EOL><DEDENT>file_names = (<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>)<EOL>for file_name in file_names:<EOL><INDENT>open(os.path.join(top_directory, file_name), '<STR_LIT:w>').close()<EOL><DEDENT>
Creates a reasonable deep structure of subdirectories with files. Args: top_directory: The absolute path of the top level directory in which to create the directory structure.
f8087:c0:m16
def _CompareFilesPerSubdirectory(self, expected, gotten):
expected_directory_to_listing = {<EOL>result[<NUM_LIT:0>]: list(result[<NUM_LIT:1>]) for result in expected}<EOL>gotten_directory_to_listing = {<EOL>result[<NUM_LIT:0>]: list(result[<NUM_LIT:1>]) for result in gotten}<EOL>self.assertItemsEqual(<EOL>expected_directory_to_listing.keys(),<EOL>gotten_directory_to_listing.keys())<EOL>for subdirectory, expected_listing in expected_directory_to_listing.items():<EOL><INDENT>gotten_listing = gotten_directory_to_listing[subdirectory]<EOL>self.assertItemsEqual(<EOL>expected_listing,<EOL>gotten_listing,<EOL>'<STR_LIT>' % (<EOL>subdirectory, expected_listing, gotten_listing))<EOL><DEDENT>
Compares iterables of (subdirectory path, list of absolute paths) Args: expected: The expected iterable of 2-tuples. gotten: The gotten iterable of 2-tuples.
f8087:c0:m17
def __init__(self, directory, loader_factory, path_filter=lambda x: True):
if directory is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if loader_factory is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>self._directory = directory<EOL>self._path = None<EOL>self._loader_factory = loader_factory<EOL>self._loader = None<EOL>self._path_filter = path_filter<EOL>self._ooo_writes_detected = False<EOL>self._finalized_sizes = {}<EOL>
Constructs a new DirectoryWatcher. Args: directory: The directory to load files from. loader_factory: A factory for creating loaders. The factory should take a path and return an object that has a Load method returning an iterator that will yield all events that have not been yielded yet. path_filter: If specified, only paths matching this filter are loaded. Raises: ValueError: If path_provider or loader_factory are None.
f8088:c0:m0
def Load(self):
try:<EOL><INDENT>for event in self._LoadInternal():<EOL><INDENT>yield event<EOL><DEDENT><DEDENT>except tf.errors.OpError:<EOL><INDENT>if not tf.io.gfile.exists(self._directory):<EOL><INDENT>raise DirectoryDeletedError(<EOL>'<STR_LIT>' % self._directory)<EOL><DEDENT><DEDENT>
Loads new values. The watcher will load from one path at a time; as soon as that path stops yielding events, it will move on to the next path. We assume that old paths are never modified after a newer path has been written. As a result, Load() can be called multiple times in a row without losing events that have not been yielded yet. In other words, we guarantee that every event will be yielded exactly once. Yields: All values that have not been yielded yet. Raises: DirectoryDeletedError: If the directory has been permanently deleted (as opposed to being temporarily unavailable).
f8088:c0:m1
def _LoadInternal(self):
<EOL>if not self._loader:<EOL><INDENT>self._InitializeLoader()<EOL><DEDENT>while True:<EOL><INDENT>for event in self._loader.Load():<EOL><INDENT>yield event<EOL><DEDENT>next_path = self._GetNextPath()<EOL>if not next_path:<EOL><INDENT>logger.info('<STR_LIT>', self._path)<EOL>return<EOL><DEDENT>for event in self._loader.Load():<EOL><INDENT>yield event<EOL><DEDENT>logger.info('<STR_LIT>', self._path,<EOL>next_path)<EOL>self._SetPath(next_path)<EOL><DEDENT>
Internal implementation of Load(). The only difference between this and Load() is that the latter will throw DirectoryDeletedError on I/O errors if it thinks that the directory has been permanently deleted. Yields: All values that have not been yielded yet.
f8088:c0:m2
def OutOfOrderWritesDetected(self):
return self._ooo_writes_detected<EOL>
Returns whether any out-of-order writes have been detected. Out-of-order writes are only checked as part of the Load() iterator. Once an out-of-order write is detected, this function will always return true. Note that out-of-order write detection is not performed on GCS paths, so this function will always return false. Returns: Whether any out-of-order write has ever been detected by this watcher.
f8088:c0:m3
def _SetPath(self, path):
old_path = self._path<EOL>if old_path and not io_wrapper.IsCloudPath(old_path):<EOL><INDENT>try:<EOL><INDENT>size = tf.io.gfile.stat(old_path).length<EOL>logger.debug('<STR_LIT>', old_path, size)<EOL>self._finalized_sizes[old_path] = size<EOL><DEDENT>except tf.errors.OpError as e:<EOL><INDENT>logger.error('<STR_LIT>', old_path, e)<EOL><DEDENT><DEDENT>self._path = path<EOL>self._loader = self._loader_factory(path)<EOL>
Sets the current path to watch for new events. This also records the size of the old path, if any. If the size can't be found, an error is logged. Args: path: The full path of the file to watch.
f8088:c0:m5
def _GetNextPath(self):
paths = sorted(path<EOL>for path in io_wrapper.ListDirectoryAbsolute(self._directory)<EOL>if self._path_filter(path))<EOL>if not paths:<EOL><INDENT>return None<EOL><DEDENT>if self._path is None:<EOL><INDENT>return paths[<NUM_LIT:0>]<EOL><DEDENT>if not io_wrapper.IsCloudPath(paths[<NUM_LIT:0>]) and not self._ooo_writes_detected:<EOL><INDENT>current_path_index = bisect.bisect_left(paths, self._path)<EOL>ooo_check_start = max(<NUM_LIT:0>, current_path_index - self._OOO_WRITE_CHECK_COUNT)<EOL>for path in paths[ooo_check_start:current_path_index]:<EOL><INDENT>if self._HasOOOWrite(path):<EOL><INDENT>self._ooo_writes_detected = True<EOL>break<EOL><DEDENT><DEDENT><DEDENT>next_paths = list(path<EOL>for path in paths<EOL>if self._path is None or path > self._path)<EOL>if next_paths:<EOL><INDENT>return min(next_paths)<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>
Gets the next path to load from. This function also does the checking for out-of-order writes as it iterates through the paths. Returns: The next path to load events from, or None if there are no more paths.
f8088:c0:m6
def _HasOOOWrite(self, path):
<EOL>size = tf.io.gfile.stat(path).length<EOL>old_size = self._finalized_sizes.get(path, None)<EOL>if size != old_size:<EOL><INDENT>if old_size is None:<EOL><INDENT>logger.error('<STR_LIT>'<EOL>'<STR_LIT>', path, self._path)<EOL><DEDENT>else:<EOL><INDENT>logger.error('<STR_LIT>',<EOL>path, self._path)<EOL><DEDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>
Returns whether the path has had an out-of-order write.
f8088:c0:m7
def _GetPurgeMessage(most_recent_step, most_recent_wall_time, event_step,<EOL>event_wall_time, num_expired):
return ('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>).format(num_expired, most_recent_step, most_recent_wall_time,<EOL>event_step, event_wall_time)<EOL>
Return the string message associated with TensorBoard purges.
f8089:m0
def _GeneratorFromPath(path):
if not path:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if io_wrapper.IsTensorFlowEventsFile(path):<EOL><INDENT>return event_file_loader.EventFileLoader(path)<EOL><DEDENT>else:<EOL><INDENT>return directory_watcher.DirectoryWatcher(<EOL>path,<EOL>event_file_loader.EventFileLoader,<EOL>io_wrapper.IsTensorFlowEventsFile)<EOL><DEDENT>
Create an event generator for file or directory at given path string.
f8089:m1
def _ParseFileVersion(file_version):
tokens = file_version.split('<STR_LIT>')<EOL>try:<EOL><INDENT>return float(tokens[-<NUM_LIT:1>])<EOL><DEDENT>except ValueError:<EOL><INDENT>logger.warn(<EOL>('<STR_LIT>'<EOL>'<STR_LIT>'))<EOL>return -<NUM_LIT:1><EOL><DEDENT>
Convert the string file_version in event.proto into a float. Args: file_version: String file_version from event.proto Returns: Version number as a float.
f8089:m2
def __init__(self,<EOL>path,<EOL>size_guidance=None,<EOL>tensor_size_guidance=None,<EOL>purge_orphaned_data=True):
size_guidance = dict(size_guidance or DEFAULT_SIZE_GUIDANCE)<EOL>sizes = {}<EOL>for key in DEFAULT_SIZE_GUIDANCE:<EOL><INDENT>if key in size_guidance:<EOL><INDENT>sizes[key] = size_guidance[key]<EOL><DEDENT>else:<EOL><INDENT>sizes[key] = DEFAULT_SIZE_GUIDANCE[key]<EOL><DEDENT><DEDENT>self._size_guidance = size_guidance<EOL>self._tensor_size_guidance = dict(tensor_size_guidance or {})<EOL>self._first_event_timestamp = None<EOL>self._graph = None<EOL>self._graph_from_metagraph = False<EOL>self._meta_graph = None<EOL>self._tagged_metadata = {}<EOL>self.summary_metadata = {}<EOL>self.tensors_by_tag = {}<EOL>self._tensors_by_tag_lock = threading.Lock()<EOL>self._plugin_to_tag_to_content = collections.defaultdict(dict)<EOL>self._plugin_tag_locks = collections.defaultdict(threading.Lock)<EOL>self.path = path<EOL>self._generator = _GeneratorFromPath(path)<EOL>self._generator_mutex = threading.Lock()<EOL>self.purge_orphaned_data = purge_orphaned_data<EOL>self.most_recent_step = -<NUM_LIT:1><EOL>self.most_recent_wall_time = -<NUM_LIT:1><EOL>self.file_version = None<EOL>
Construct the `EventAccumulator`. Args: path: A file path to a directory containing tf events files, or a single tf events file. The accumulator will load events from this path. size_guidance: Information on how much data the EventAccumulator should store in memory. The DEFAULT_SIZE_GUIDANCE tries not to store too much so as to avoid OOMing the client. The size_guidance should be a map from a `tagType` string to an integer representing the number of items to keep per tag for items of that `tagType`. If the size is 0, all events are stored. tensor_size_guidance: Like `size_guidance`, but allowing finer granularity for tensor summaries. Should be a map from the `plugin_name` field on the `PluginData` proto to an integer representing the number of items to keep per tag. Plugins for which there is no entry in this map will default to the value of `size_guidance[event_accumulator.TENSORS]`. Defaults to `{}`. purge_orphaned_data: Whether to discard any events that were "orphaned" by a TensorFlow restart.
f8089:c0:m0
def Reload(self):
with self._generator_mutex:<EOL><INDENT>for event in self._generator.Load():<EOL><INDENT>self._ProcessEvent(event)<EOL><DEDENT><DEDENT>return self<EOL>
Loads all events added since the last call to `Reload`. If `Reload` was never called, loads all events in the file. Returns: The `EventAccumulator`.
f8089:c0:m1
def PluginAssets(self, plugin_name):
return plugin_asset_util.ListAssets(self.path, plugin_name)<EOL>
Return a list of all plugin assets for the given plugin. Args: plugin_name: The string name of a plugin to retrieve assets for. Returns: A list of string plugin asset names, or empty list if none are available. If the plugin was not registered, an empty list is returned.
f8089:c0:m2
def RetrievePluginAsset(self, plugin_name, asset_name):
return plugin_asset_util.RetrieveAsset(self.path, plugin_name, asset_name)<EOL>
Return the contents of a given plugin asset. Args: plugin_name: The string name of a plugin. asset_name: The string name of an asset. Returns: The string contents of the plugin asset. Raises: KeyError: If the asset is not available.
f8089:c0:m3
def FirstEventTimestamp(self):
if self._first_event_timestamp is not None:<EOL><INDENT>return self._first_event_timestamp<EOL><DEDENT>with self._generator_mutex:<EOL><INDENT>try:<EOL><INDENT>event = next(self._generator.Load())<EOL>self._ProcessEvent(event)<EOL>return self._first_event_timestamp<EOL><DEDENT>except StopIteration:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>
Returns the timestamp in seconds of the first event. If the first event has been loaded (either by this method or by `Reload`, this returns immediately. Otherwise, it will load in the first event. Note that this means that calling `Reload` will cause this to block until `Reload` has finished. Returns: The timestamp in seconds of the first event that was loaded. Raises: ValueError: If no events have been loaded and there were no events found on disk.
f8089:c0:m4
def PluginTagToContent(self, plugin_name):
if plugin_name not in self._plugin_to_tag_to_content:<EOL><INDENT>raise KeyError('<STR_LIT>' % plugin_name)<EOL><DEDENT>with self._plugin_tag_locks[plugin_name]:<EOL><INDENT>return dict(self._plugin_to_tag_to_content[plugin_name])<EOL><DEDENT>
Returns a dict mapping tags to content specific to that plugin. Args: plugin_name: The name of the plugin for which to fetch plugin-specific content. Raises: KeyError: if the plugin name is not found. Returns: A dict mapping tags to plugin-specific content (which are always strings). Those strings are often serialized protos.
f8089:c0:m5
def SummaryMetadata(self, tag):
return self.summary_metadata[tag]<EOL>
Given a summary tag name, return the associated metadata object. Args: tag: The name of a tag, as a string. Raises: KeyError: If the tag is not found. Returns: A `SummaryMetadata` protobuf.
f8089:c0:m6
def _ProcessEvent(self, event):
if self._first_event_timestamp is None:<EOL><INDENT>self._first_event_timestamp = event.wall_time<EOL><DEDENT>if event.HasField('<STR_LIT>'):<EOL><INDENT>new_file_version = _ParseFileVersion(event.file_version)<EOL>if self.file_version and self.file_version != new_file_version:<EOL><INDENT>logger.warn(('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>').format(self.file_version,<EOL>new_file_version))<EOL><DEDENT>self.file_version = new_file_version<EOL><DEDENT>self._MaybePurgeOrphanedData(event)<EOL>if event.HasField('<STR_LIT>'):<EOL><INDENT>if self._graph is not None:<EOL><INDENT>logger.warn(<EOL>('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'))<EOL><DEDENT>self._graph = event.graph_def<EOL>self._graph_from_metagraph = False<EOL><DEDENT>elif event.HasField('<STR_LIT>'):<EOL><INDENT>if self._meta_graph is not None:<EOL><INDENT>logger.warn(('<STR_LIT>'<EOL>'<STR_LIT>'))<EOL><DEDENT>self._meta_graph = event.meta_graph_def<EOL>if self._graph is None or self._graph_from_metagraph:<EOL><INDENT>meta_graph = meta_graph_pb2.MetaGraphDef()<EOL>meta_graph.ParseFromString(self._meta_graph)<EOL>if meta_graph.graph_def:<EOL><INDENT>if self._graph is not None:<EOL><INDENT>logger.warn(<EOL>('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'))<EOL><DEDENT>self._graph_from_metagraph = True<EOL>self._graph = meta_graph.graph_def.SerializeToString()<EOL><DEDENT><DEDENT><DEDENT>elif event.HasField('<STR_LIT>'):<EOL><INDENT>tag = event.tagged_run_metadata.tag<EOL>if tag in self._tagged_metadata:<EOL><INDENT>logger.warn('<STR_LIT>' +<EOL>tag + '<STR_LIT>')<EOL><DEDENT>self._tagged_metadata[tag] = event.tagged_run_metadata.run_metadata<EOL><DEDENT>elif event.HasField('<STR_LIT>'):<EOL><INDENT>for value in event.summary.value:<EOL><INDENT>value = data_compat.migrate_value(value)<EOL>if value.HasField('<STR_LIT>'):<EOL><INDENT>tag = value.tag<EOL>if tag not in self.summary_metadata:<EOL><INDENT>self.summary_metadata[tag] = value.metadata<EOL>plugin_data = value.metadata.plugin_data<EOL>if plugin_data.plugin_name:<EOL><INDENT>with self._plugin_tag_locks[plugin_data.plugin_name]:<EOL><INDENT>self._plugin_to_tag_to_content[plugin_data.plugin_name][tag] = (<EOL>plugin_data.content)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>logger.warn(<EOL>('<STR_LIT>'<EOL>'<STR_LIT>'), tag)<EOL><DEDENT><DEDENT><DEDENT>for summary_type, summary_func in SUMMARY_TYPES.items():<EOL><INDENT>if value.HasField(summary_type):<EOL><INDENT>datum = getattr(value, summary_type)<EOL>tag = value.tag<EOL>if summary_type == '<STR_LIT>' and not tag:<EOL><INDENT>tag = value.node_name<EOL><DEDENT>getattr(self, summary_func)(tag, event.wall_time, event.step, datum)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>
Called whenever an event is loaded.
f8089:c0:m7
def Tags(self):
return {<EOL>TENSORS: list(self.tensors_by_tag.keys()),<EOL>GRAPH: self._graph is not None,<EOL>META_GRAPH: self._meta_graph is not None,<EOL>RUN_METADATA: list(self._tagged_metadata.keys())<EOL>}<EOL>
Return all tags found in the value stream. Returns: A `{tagType: ['list', 'of', 'tags']}` dictionary.
f8089:c0:m8
def Graph(self):
graph = graph_pb2.GraphDef()<EOL>if self._graph is not None:<EOL><INDENT>graph.ParseFromString(self._graph)<EOL>return graph<EOL><DEDENT>raise ValueError('<STR_LIT>')<EOL>
Return the graph definition, if there is one. If the graph is stored directly, return that. If no graph is stored directly but a metagraph is stored containing a graph, return that. Raises: ValueError: If there is no graph for this run. Returns: The `graph_def` proto.
f8089:c0:m9
def MetaGraph(self):
if self._meta_graph is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>meta_graph = meta_graph_pb2.MetaGraphDef()<EOL>meta_graph.ParseFromString(self._meta_graph)<EOL>return meta_graph<EOL>
Return the metagraph definition, if there is one. Raises: ValueError: If there is no metagraph for this run. Returns: The `meta_graph_def` proto.
f8089:c0:m10
def RunMetadata(self, tag):
if tag not in self._tagged_metadata:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>run_metadata = config_pb2.RunMetadata()<EOL>run_metadata.ParseFromString(self._tagged_metadata[tag])<EOL>return run_metadata<EOL>
Given a tag, return the associated session.run() metadata. Args: tag: A string tag associated with the event. Raises: ValueError: If the tag is not found. Returns: The metadata in form of `RunMetadata` proto.
f8089:c0:m11
def Tensors(self, tag):
return self.tensors_by_tag[tag].Items(_TENSOR_RESERVOIR_KEY)<EOL>
Given a summary tag, return all associated tensors. Args: tag: A string tag associated with the events. Raises: KeyError: If the tag is not found. Returns: An array of `TensorEvent`s.
f8089:c0:m12
def _MaybePurgeOrphanedData(self, event):
if not self.purge_orphaned_data:<EOL><INDENT>return<EOL><DEDENT>if self.file_version and self.file_version >= <NUM_LIT:2>:<EOL><INDENT>self._CheckForRestartAndMaybePurge(event)<EOL><DEDENT>else:<EOL><INDENT>self._CheckForOutOfOrderStepAndMaybePurge(event)<EOL><DEDENT>if event.HasField('<STR_LIT>'):<EOL><INDENT>self.most_recent_step = event.step<EOL>self.most_recent_wall_time = event.wall_time<EOL><DEDENT>
Maybe purge orphaned data due to a TensorFlow crash. When TensorFlow crashes at step T+O and restarts at step T, any events written after step T are now "orphaned" and will be at best misleading if they are included in TensorBoard. This logic attempts to determine if there is orphaned data, and purge it if it is found. Args: event: The event to use as a reference, to determine if a purge is needed.
f8089:c0:m13
def _CheckForRestartAndMaybePurge(self, event):
if event.HasField(<EOL>'<STR_LIT>') and event.session_log.status == event_pb2.SessionLog.START:<EOL><INDENT>self._Purge(event, by_tags=False)<EOL><DEDENT>
Check and discard expired events using SessionLog.START. Check for a SessionLog.START event and purge all previously seen events with larger steps, because they are out of date. Because of supervisor threading, it is possible that this logic will cause the first few event messages to be discarded since supervisor threading does not guarantee that the START message is deterministically written first. This method is preferred over _CheckForOutOfOrderStepAndMaybePurge which can inadvertently discard events due to supervisor threading. Args: event: The event to use as reference. If the event is a START event, all previously seen events with a greater event.step will be purged.
f8089:c0:m14
def _CheckForOutOfOrderStepAndMaybePurge(self, event):
if event.step < self.most_recent_step and event.HasField('<STR_LIT>'):<EOL><INDENT>self._Purge(event, by_tags=True)<EOL><DEDENT>
Check for out-of-order event.step and discard expired events for tags. Check if the event is out of order relative to the global most recent step. If it is, purge outdated summaries for tags that the event contains. Args: event: The event to use as reference. If the event is out-of-order, all events with the same tags, but with a greater event.step will be purged.
f8089:c0:m15
def _Purge(self, event, by_tags):
<EOL>_NotExpired = lambda x: x.step < event.step<EOL>num_expired = <NUM_LIT:0><EOL>if by_tags:<EOL><INDENT>for value in event.summary.value:<EOL><INDENT>if value.tag in self.tensors_by_tag:<EOL><INDENT>tag_reservoir = self.tensors_by_tag[value.tag]<EOL>num_expired += tag_reservoir.FilterItems(<EOL>_NotExpired, _TENSOR_RESERVOIR_KEY)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for tag_reservoir in six.itervalues(self.tensors_by_tag):<EOL><INDENT>num_expired += tag_reservoir.FilterItems(<EOL>_NotExpired, _TENSOR_RESERVOIR_KEY)<EOL><DEDENT><DEDENT>if num_expired > <NUM_LIT:0>:<EOL><INDENT>purge_msg = _GetPurgeMessage(self.most_recent_step,<EOL>self.most_recent_wall_time, event.step,<EOL>event.wall_time, num_expired)<EOL>logger.warn(purge_msg)<EOL><DEDENT>
Purge all events that have occurred after the given event.step. If by_tags is True, purge all events that occurred after the given event.step, but only for the tags that the event has. Non-sequential event.steps suggest that a TensorFlow restart occurred, and we discard the out-of-order events to display a consistent view in TensorBoard. Discarding by tags is the safer method, when we are unsure whether a restart has occurred, given that threading in supervisor can cause events of different tags to arrive with unsynchronized step values. If by_tags is False, then purge all events with event.step greater than the given event.step. This can be used when we are certain that a TensorFlow restart has occurred and these events can be discarded. Args: event: The event to use as reference for the purge. All events with the same tags, but with a greater event.step will be purged. by_tags: Bool to dictate whether to discard all out-of-order events or only those that are associated with the given reference event.
f8089:c0:m18
def get_field_to_observations_map(generator, query_for_tag='<STR_LIT>'):
def increment(stat, event, tag='<STR_LIT>'):<EOL><INDENT>assert stat in TRACKED_FIELDS<EOL>field_to_obs[stat].append(Observation(step=event.step,<EOL>wall_time=event.wall_time,<EOL>tag=tag)._asdict())<EOL><DEDENT>field_to_obs = dict([(t, []) for t in TRACKED_FIELDS])<EOL>for event in generator:<EOL><INDENT>if event.HasField('<STR_LIT>') and (not query_for_tag):<EOL><INDENT>increment('<STR_LIT>', event)<EOL><DEDENT>if event.HasField('<STR_LIT>') and (not query_for_tag):<EOL><INDENT>status = event.session_log.status<EOL>if status == event_pb2.SessionLog.START:<EOL><INDENT>increment('<STR_LIT>', event)<EOL><DEDENT>elif status == event_pb2.SessionLog.STOP:<EOL><INDENT>increment('<STR_LIT>', event)<EOL><DEDENT>elif status == event_pb2.SessionLog.CHECKPOINT:<EOL><INDENT>increment('<STR_LIT>', event)<EOL><DEDENT><DEDENT>elif event.HasField('<STR_LIT>'):<EOL><INDENT>for value in event.summary.value:<EOL><INDENT>if query_for_tag and value.tag != query_for_tag:<EOL><INDENT>continue<EOL><DEDENT>for proto_name, display_name in SUMMARY_TYPE_TO_FIELD.items():<EOL><INDENT>if value.HasField(proto_name):<EOL><INDENT>increment(display_name, event, value.tag)<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>return field_to_obs<EOL>
Return a field to `Observations` dict for the event generator. Args: generator: A generator over event protos. query_for_tag: A string that if specified, only create observations for events with this tag name. Returns: A dict mapping keys in `TRACKED_FIELDS` to an `Observation` list.
f8090:m0
def get_unique_tags(field_to_obs):
return {field: sorted(set([x.get('<STR_LIT>', '<STR_LIT>') for x in observations]))<EOL>for field, observations in field_to_obs.items()<EOL>if field in TAG_FIELDS}<EOL>
Returns a dictionary of tags that a user could query over. Args: field_to_obs: Dict that maps string field to `Observation` list. Returns: A dict that maps keys in `TAG_FIELDS` to a list of string tags present in the event files. If the dict does not have any observations of the type, maps to an empty list so that we can render this to console.
f8090:m1
def print_dict(d, show_missing=True):
for k, v in sorted(d.items()):<EOL><INDENT>if (not v) and show_missing:<EOL><INDENT>print('<STR_LIT>'.format(k))<EOL><DEDENT>elif isinstance(v, list):<EOL><INDENT>print(k)<EOL>for item in v:<EOL><INDENT>print('<STR_LIT>'.format(item))<EOL><DEDENT><DEDENT>elif isinstance(v, dict):<EOL><INDENT>print(k)<EOL>for kk, vv in sorted(v.items()):<EOL><INDENT>print('<STR_LIT>'.format(kk, vv))<EOL><DEDENT><DEDENT><DEDENT>
Prints a shallow dict to console. Args: d: Dict to print. show_missing: Whether to show keys with empty values.
f8090:m2
def get_dict_to_print(field_to_obs):
def compressed_steps(steps):<EOL><INDENT>return {'<STR_LIT>': len(set(steps)),<EOL>'<STR_LIT>': min(steps),<EOL>'<STR_LIT>': max(steps),<EOL>'<STR_LIT>': steps[-<NUM_LIT:1>],<EOL>'<STR_LIT>': steps[<NUM_LIT:0>],<EOL>'<STR_LIT>': get_out_of_order(steps)}<EOL><DEDENT>def full_steps(steps):<EOL><INDENT>return {'<STR_LIT>': steps, '<STR_LIT>': get_out_of_order(steps)}<EOL><DEDENT>output = {}<EOL>for field, observations in field_to_obs.items():<EOL><INDENT>if not observations:<EOL><INDENT>output[field] = None<EOL>continue<EOL><DEDENT>steps = [x['<STR_LIT>'] for x in observations]<EOL>if field in SHORT_FIELDS:<EOL><INDENT>output[field] = compressed_steps(steps)<EOL><DEDENT>if field in LONG_FIELDS:<EOL><INDENT>output[field] = full_steps(steps)<EOL><DEDENT><DEDENT>return output<EOL>
Transform the field-to-obs mapping into a printable dictionary. Args: field_to_obs: Dict that maps string field to `Observation` list. Returns: A dict with the keys and values to print to console.
f8090:m3
def get_out_of_order(list_of_numbers):
<EOL>result = []<EOL>for i in range(len(list_of_numbers)):<EOL><INDENT>if i == <NUM_LIT:0>:<EOL><INDENT>continue<EOL><DEDENT>if list_of_numbers[i] < list_of_numbers[i - <NUM_LIT:1>]:<EOL><INDENT>result.append((list_of_numbers[i - <NUM_LIT:1>], list_of_numbers[i]))<EOL><DEDENT><DEDENT>return result<EOL>
Returns elements that break the monotonically non-decreasing trend. This is used to find instances of global step values that are "out-of-order", which may trigger TensorBoard event discarding logic. Args: list_of_numbers: A list of numbers. Returns: A list of tuples in which each tuple are two elements are adjacent, but the second element is lower than the first.
f8090:m4
def generators_from_logdir(logdir):
subdirs = io_wrapper.GetLogdirSubdirectories(logdir)<EOL>generators = [<EOL>itertools.chain(*[<EOL>generator_from_event_file(os.path.join(subdir, f))<EOL>for f in tf.io.gfile.listdir(subdir)<EOL>if io_wrapper.IsTensorFlowEventsFile(os.path.join(subdir, f))<EOL>]) for subdir in subdirs<EOL>]<EOL>return generators<EOL>
Returns a list of event generators for subdirectories with event files. The number of generators returned should equal the number of directories within logdir that contain event files. If only logdir contains event files, returns a list of length one. Args: logdir: A log directory that contains event files. Returns: List of event generators for each subdirectory with event files.
f8090:m5
def generator_from_event_file(event_file):
return event_file_loader.EventFileLoader(event_file).Load()<EOL>
Returns a generator that yields events from an event file.
f8090:m6
def get_inspection_units(logdir='<STR_LIT>', event_file='<STR_LIT>', tag='<STR_LIT>'):
if logdir:<EOL><INDENT>subdirs = io_wrapper.GetLogdirSubdirectories(logdir)<EOL>inspection_units = []<EOL>for subdir in subdirs:<EOL><INDENT>generator = itertools.chain(*[<EOL>generator_from_event_file(os.path.join(subdir, f))<EOL>for f in tf.io.gfile.listdir(subdir)<EOL>if io_wrapper.IsTensorFlowEventsFile(os.path.join(subdir, f))<EOL>])<EOL>inspection_units.append(InspectionUnit(<EOL>name=subdir,<EOL>generator=generator,<EOL>field_to_obs=get_field_to_observations_map(generator, tag)))<EOL><DEDENT>if inspection_units:<EOL><INDENT>print('<STR_LIT>'.format('<STR_LIT:\n>'.join(<EOL>[u.name for u in inspection_units])))<EOL><DEDENT>elif io_wrapper.IsTensorFlowEventsFile(logdir):<EOL><INDENT>print(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(logdir))<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>'.format(logdir))<EOL><DEDENT>return inspection_units<EOL><DEDENT>elif event_file:<EOL><INDENT>generator = generator_from_event_file(event_file)<EOL>return [InspectionUnit(<EOL>name=event_file,<EOL>generator=generator,<EOL>field_to_obs=get_field_to_observations_map(generator, tag))]<EOL><DEDENT>return []<EOL>
Returns a list of InspectionUnit objects given either logdir or event_file. If logdir is given, the number of InspectionUnits should equal the number of directories or subdirectories that contain event files. If event_file is given, the number of InspectionUnits should be 1. Args: logdir: A log directory that contains event files. event_file: Or, a particular event file path. tag: An optional tag name to query for. Returns: A list of InspectionUnit objects.
f8090:m7
def inspect(logdir='<STR_LIT>', event_file='<STR_LIT>', tag='<STR_LIT>'):
print(PRINT_SEPARATOR +<EOL>'<STR_LIT>' +<EOL>PRINT_SEPARATOR)<EOL>inspection_units = get_inspection_units(logdir, event_file, tag)<EOL>for unit in inspection_units:<EOL><INDENT>if tag:<EOL><INDENT>print('<STR_LIT>'.format(tag, unit.name))<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>'.format(unit.name))<EOL>print_dict(get_unique_tags(unit.field_to_obs))<EOL>print(PRINT_SEPARATOR)<EOL>print('<STR_LIT>'.format(unit.name))<EOL><DEDENT>print_dict(get_dict_to_print(unit.field_to_obs), show_missing=(not tag))<EOL>print(PRINT_SEPARATOR)<EOL><DEDENT>
Main function for inspector that prints out a digest of event files. Args: logdir: A log directory that contains event files. event_file: Or, a particular event file path. tag: An optional tag name to query for. Raises: ValueError: If neither logdir and event_file are given, or both are given.
f8090:m8
def __init__(self,<EOL>run_path_map=None,<EOL>size_guidance=None,<EOL>purge_orphaned_data=True):
logger.info('<STR_LIT>')<EOL>self._accumulators_mutex = threading.Lock()<EOL>self._accumulators = {}<EOL>self._paths = {}<EOL>self._reload_called = False<EOL>self._size_guidance = (size_guidance or<EOL>event_accumulator.DEFAULT_SIZE_GUIDANCE)<EOL>self.purge_orphaned_data = purge_orphaned_data<EOL>if run_path_map is not None:<EOL><INDENT>logger.info('<STR_LIT>',<EOL>run_path_map)<EOL>for (run, path) in six.iteritems(run_path_map):<EOL><INDENT>self.AddRun(path, run)<EOL><DEDENT><DEDENT>logger.info('<STR_LIT>')<EOL>
Constructor for the `EventMultiplexer`. Args: run_path_map: Dict `{run: path}` which specifies the name of a run, and the path to find the associated events. If it is None, then the EventMultiplexer initializes without any runs. size_guidance: A dictionary mapping from `tagType` to the number of items to store for each tag of that type. See `event_accumulator.EventAccumulator` for details. purge_orphaned_data: Whether to discard any events that were "orphaned" by a TensorFlow restart.
f8091:c0:m0
def AddRun(self, path, name=None):
name = name or path<EOL>accumulator = None<EOL>with self._accumulators_mutex:<EOL><INDENT>if name not in self._accumulators or self._paths[name] != path:<EOL><INDENT>if name in self._paths and self._paths[name] != path:<EOL><INDENT>logger.warn('<STR_LIT>',<EOL>name, self._paths[name], path)<EOL><DEDENT>logger.info('<STR_LIT>', path)<EOL>accumulator = event_accumulator.EventAccumulator(<EOL>path,<EOL>size_guidance=self._size_guidance,<EOL>purge_orphaned_data=self.purge_orphaned_data)<EOL>self._accumulators[name] = accumulator<EOL>self._paths[name] = path<EOL><DEDENT><DEDENT>if accumulator:<EOL><INDENT>if self._reload_called:<EOL><INDENT>accumulator.Reload()<EOL><DEDENT><DEDENT>return self<EOL>
Add a run to the multiplexer. If the name is not specified, it is the same as the path. If a run by that name exists, and we are already watching the right path, do nothing. If we are watching a different path, replace the event accumulator. If `Reload` has been called, it will `Reload` the newly created accumulators. Args: path: Path to the event files (or event directory) for given run. name: Name of the run to add. If not provided, is set to path. Returns: The `EventMultiplexer`.
f8091:c0:m1
def AddRunsFromDirectory(self, path, name=None):
logger.info('<STR_LIT>', path)<EOL>for subdir in io_wrapper.GetLogdirSubdirectories(path):<EOL><INDENT>logger.info('<STR_LIT>', subdir)<EOL>rpath = os.path.relpath(subdir, path)<EOL>subname = os.path.join(name, rpath) if name else rpath<EOL>self.AddRun(subdir, name=subname)<EOL><DEDENT>logger.info('<STR_LIT>', path)<EOL>return self<EOL>
Load runs from a directory; recursively walks subdirectories. If path doesn't exist, no-op. This ensures that it is safe to call `AddRunsFromDirectory` multiple times, even before the directory is made. If path is a directory, load event files in the directory (if any exist) and recursively call AddRunsFromDirectory on any subdirectories. This mean you can call AddRunsFromDirectory at the root of a tree of event logs and TensorBoard will load them all. If the `EventMultiplexer` is already loaded this will cause the newly created accumulators to `Reload()`. Args: path: A string path to a directory to load runs from. name: Optionally, what name to apply to the runs. If name is provided and the directory contains run subdirectories, the name of each subrun is the concatenation of the parent name and the subdirectory name. If name is provided and the directory contains event files, then a run is added called "name" and with the events from the path. Raises: ValueError: If the path exists and isn't a directory. Returns: The `EventMultiplexer`.
f8091:c0:m2
def Reload(self):
logger.info('<STR_LIT>')<EOL>self._reload_called = True<EOL>with self._accumulators_mutex:<EOL><INDENT>items = list(self._accumulators.items())<EOL><DEDENT>names_to_delete = set()<EOL>for name, accumulator in items:<EOL><INDENT>try:<EOL><INDENT>accumulator.Reload()<EOL><DEDENT>except (OSError, IOError) as e:<EOL><INDENT>logger.error("<STR_LIT>", name, e)<EOL><DEDENT>except directory_watcher.DirectoryDeletedError:<EOL><INDENT>names_to_delete.add(name)<EOL><DEDENT><DEDENT>with self._accumulators_mutex:<EOL><INDENT>for name in names_to_delete:<EOL><INDENT>logger.warn("<STR_LIT>", name)<EOL>del self._accumulators[name]<EOL><DEDENT><DEDENT>logger.info('<STR_LIT>')<EOL>return self<EOL>
Call `Reload` on every `EventAccumulator`.
f8091:c0:m3