signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
def __lt__(self, other):
other = as_dimension(other)<EOL>if self._value is None or other.value is None:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>return self._value < other.value<EOL><DEDENT>
Returns True if `self` is known to be less than `other`. Dimensions are compared as follows: ```python (tf.Dimension(m) < tf.Dimension(n)) == (m < n) (tf.Dimension(m) < tf.Dimension(None)) == None (tf.Dimension(None) < tf.Dimension(n)) == None (tf.Dimension(None) < tf.Dimension(None)) == None ``` Args: other: Another Dimension. Returns: The value of `self.value < other.value` if both are known, otherwise None.
f7910:c0:m23
def __le__(self, other):
other = as_dimension(other)<EOL>if self._value is None or other.value is None:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>return self._value <= other.value<EOL><DEDENT>
Returns True if `self` is known to be less than or equal to `other`. Dimensions are compared as follows: ```python (tf.Dimension(m) <= tf.Dimension(n)) == (m <= n) (tf.Dimension(m) <= tf.Dimension(None)) == None (tf.Dimension(None) <= tf.Dimension(n)) == None (tf.Dimension(None) <= tf.Dimension(None)) == None ``` Args: other: Another Dimension. Returns: The value of `self.value <= other.value` if both are known, otherwise None.
f7910:c0:m24
def __gt__(self, other):
other = as_dimension(other)<EOL>if self._value is None or other.value is None:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>return self._value > other.value<EOL><DEDENT>
Returns True if `self` is known to be greater than `other`. Dimensions are compared as follows: ```python (tf.Dimension(m) > tf.Dimension(n)) == (m > n) (tf.Dimension(m) > tf.Dimension(None)) == None (tf.Dimension(None) > tf.Dimension(n)) == None (tf.Dimension(None) > tf.Dimension(None)) == None ``` Args: other: Another Dimension. Returns: The value of `self.value > other.value` if both are known, otherwise None.
f7910:c0:m25
def __ge__(self, other):
other = as_dimension(other)<EOL>if self._value is None or other.value is None:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>return self._value >= other.value<EOL><DEDENT>
Returns True if `self` is known to be greater than or equal to `other`. Dimensions are compared as follows: ```python (tf.Dimension(m) >= tf.Dimension(n)) == (m >= n) (tf.Dimension(m) >= tf.Dimension(None)) == None (tf.Dimension(None) >= tf.Dimension(n)) == None (tf.Dimension(None) >= tf.Dimension(None)) == None ``` Args: other: Another Dimension. Returns: The value of `self.value >= other.value` if both are known, otherwise None.
f7910:c0:m26
def __init__(self, dims):
<EOL>if dims is None:<EOL><INDENT>self._dims = None<EOL><DEDENT>elif isinstance(dims, compat.bytes_or_text_types):<EOL><INDENT>raise TypeError(<EOL>"<STR_LIT>"<EOL>"<STR_LIT>" % dims<EOL>)<EOL><DEDENT>elif isinstance(dims, tensor_shape_pb2.TensorShapeProto):<EOL><INDENT>if dims.unknown_rank:<EOL><INDENT>self._dims = None<EOL><DEDENT>else:<EOL><INDENT>self._dims = [<EOL>as_dimension(dim.size if dim.size != -<NUM_LIT:1> else None)<EOL>for dim in dims.dim<EOL>]<EOL><DEDENT><DEDENT>elif isinstance(dims, TensorShape):<EOL><INDENT>self._dims = dims.dims<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>dims_iter = iter(dims)<EOL><DEDENT>except TypeError:<EOL><INDENT>self._dims = [as_dimension(dims)]<EOL><DEDENT>else:<EOL><INDENT>self._dims = [as_dimension(d) for d in dims_iter]<EOL><DEDENT><DEDENT>self._ndims = None<EOL>
Creates a new TensorShape with the given dimensions. Args: dims: A list of Dimensions, or None if the shape is unspecified. DEPRECATED: A single integer is treated as a singleton list. Raises: TypeError: If dims cannot be converted to a list of dimensions.
f7910:c1:m0
@property<EOL><INDENT>def dims(self):<DEDENT>
return self._dims<EOL>
Returns a list of Dimensions, or None if the shape is unspecified.
f7910:c1:m3
@property<EOL><INDENT>def ndims(self):<DEDENT>
if self._dims is None:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>if self._ndims is None:<EOL><INDENT>self._ndims = len(self._dims)<EOL><DEDENT>return self._ndims<EOL><DEDENT>
Returns the rank of this shape, or None if it is unspecified.
f7910:c1:m5
def __len__(self):
if self._dims is None:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>return self.ndims<EOL>
Returns the rank of this shape, or raises ValueError if unspecified.
f7910:c1:m6
def __bool__(self):
return self._dims is not None<EOL>
Returns True if this shape contains non-zero information.
f7910:c1:m7
def __iter__(self):
if self._dims is None:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>return iter(self._dims)<EOL><DEDENT>
Returns `self.dims` if the rank is known, otherwise raises ValueError.
f7910:c1:m8
def __getitem__(self, key):
if self._dims is not None:<EOL><INDENT>if isinstance(key, slice):<EOL><INDENT>return TensorShape(self._dims[key])<EOL><DEDENT>else:<EOL><INDENT>return self._dims[key]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if isinstance(key, slice):<EOL><INDENT>start = key.start if key.start is not None else <NUM_LIT:0><EOL>stop = key.stop<EOL>if key.step is not None:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if stop is None:<EOL><INDENT>return unknown_shape()<EOL><DEDENT>elif start < <NUM_LIT:0> or stop < <NUM_LIT:0>:<EOL><INDENT>return unknown_shape()<EOL><DEDENT>else:<EOL><INDENT>return unknown_shape(ndims=stop - start)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return Dimension(None)<EOL><DEDENT><DEDENT>
Returns the value of a dimension or a shape, depending on the key. Args: key: If `key` is an integer, returns the dimension at that index; otherwise if `key` is a slice, returns a TensorShape whose dimensions are those selected by the slice from `self`. Returns: A dimension if `key` is an integer, or a `TensorShape` if `key` is a slice. Raises: ValueError: If `key` is a slice, and any of its elements are negative, or if `self` is completely unknown and the step is set.
f7910:c1:m9
def num_elements(self):
if self.is_fully_defined():<EOL><INDENT>size = <NUM_LIT:1><EOL>for dim in self._dims:<EOL><INDENT>size *= dim.value<EOL><DEDENT>return size<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>
Returns the total number of elements, or none for incomplete shapes.
f7910:c1:m10
def merge_with(self, other):
other = as_shape(other)<EOL>if self._dims is None:<EOL><INDENT>return other<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>self.assert_same_rank(other)<EOL>new_dims = []<EOL>for i, dim in enumerate(self._dims):<EOL><INDENT>new_dims.append(dim.merge_with(other[i]))<EOL><DEDENT>return TensorShape(new_dims)<EOL><DEDENT>except ValueError:<EOL><INDENT>raise ValueError("<STR_LIT>" % (self, other))<EOL><DEDENT><DEDENT>
Returns a `TensorShape` combining the information in `self` and `other`. The dimensions in `self` and `other` are merged elementwise, according to the rules defined for `Dimension.merge_with()`. Args: other: Another `TensorShape`. Returns: A `TensorShape` containing the combined information of `self` and `other`. Raises: ValueError: If `self` and `other` are not convertible.
f7910:c1:m11
def concatenate(self, other):
<EOL>other = as_shape(other)<EOL>if self._dims is None or other.dims is None:<EOL><INDENT>return unknown_shape()<EOL><DEDENT>else:<EOL><INDENT>return TensorShape(self._dims + other.dims)<EOL><DEDENT>
Returns the concatenation of the dimension in `self` and `other`. *N.B.* If either `self` or `other` is completely unknown, concatenation will discard information about the other shape. In future, we might support concatenation that preserves this information for use with slicing. Args: other: Another `TensorShape`. Returns: A `TensorShape` whose dimensions are the concatenation of the dimensions in `self` and `other`.
f7910:c1:m12
def assert_same_rank(self, other):
other = as_shape(other)<EOL>if self.ndims is not None and other.ndims is not None:<EOL><INDENT>if self.ndims != other.ndims:<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>" % (self, other)<EOL>)<EOL><DEDENT><DEDENT>
Raises an exception if `self` and `other` do not have convertible ranks. Args: other: Another `TensorShape`. Raises: ValueError: If `self` and `other` do not represent shapes with the same rank.
f7910:c1:m13
def assert_has_rank(self, rank):
if self.ndims not in (None, rank):<EOL><INDENT>raise ValueError("<STR_LIT>" % (self, rank))<EOL><DEDENT>
Raises an exception if `self` is not convertible with the given `rank`. Args: rank: An integer. Raises: ValueError: If `self` does not represent a shape with the given `rank`.
f7910:c1:m14
def with_rank(self, rank):
try:<EOL><INDENT>return self.merge_with(unknown_shape(ndims=rank))<EOL><DEDENT>except ValueError:<EOL><INDENT>raise ValueError("<STR_LIT>" % (self, rank))<EOL><DEDENT>
Returns a shape based on `self` with the given rank. This method promotes a completely unknown shape to one with a known rank. Args: rank: An integer. Returns: A shape that is at least as specific as `self` with the given rank. Raises: ValueError: If `self` does not represent a shape with the given `rank`.
f7910:c1:m15
def with_rank_at_least(self, rank):
if self.ndims is not None and self.ndims < rank:<EOL><INDENT>raise ValueError("<STR_LIT>" % (self, rank))<EOL><DEDENT>else:<EOL><INDENT>return self<EOL><DEDENT>
Returns a shape based on `self` with at least the given rank. Args: rank: An integer. Returns: A shape that is at least as specific as `self` with at least the given rank. Raises: ValueError: If `self` does not represent a shape with at least the given `rank`.
f7910:c1:m16
def with_rank_at_most(self, rank):
if self.ndims is not None and self.ndims > rank:<EOL><INDENT>raise ValueError("<STR_LIT>" % (self, rank))<EOL><DEDENT>else:<EOL><INDENT>return self<EOL><DEDENT>
Returns a shape based on `self` with at most the given rank. Args: rank: An integer. Returns: A shape that is at least as specific as `self` with at most the given rank. Raises: ValueError: If `self` does not represent a shape with at most the given `rank`.
f7910:c1:m17
def is_convertible_with(self, other):
other = as_shape(other)<EOL>if self._dims is not None and other.dims is not None:<EOL><INDENT>if self.ndims != other.ndims:<EOL><INDENT>return False<EOL><DEDENT>for x_dim, y_dim in zip(self._dims, other.dims):<EOL><INDENT>if not x_dim.is_convertible_with(y_dim):<EOL><INDENT>return False<EOL><DEDENT><DEDENT><DEDENT>return True<EOL>
Returns True iff `self` is convertible with `other`. Two possibly-partially-defined shapes are convertible if there exists a fully-defined shape that both shapes can represent. Thus, convertibility allows the shape inference code to reason about partially-defined shapes. For example: * TensorShape(None) is convertible with all shapes. * TensorShape([None, None]) is convertible with all two-dimensional shapes, such as TensorShape([32, 784]), and also TensorShape(None). It is not convertible with, for example, TensorShape([None]) or TensorShape([None, None, None]). * TensorShape([32, None]) is convertible with all two-dimensional shapes with size 32 in the 0th dimension, and also TensorShape([None, None]) and TensorShape(None). It is not convertible with, for example, TensorShape([32]), TensorShape([32, None, 1]) or TensorShape([64, None]). * TensorShape([32, 784]) is convertible with itself, and also TensorShape([32, None]), TensorShape([None, 784]), TensorShape([None, None]) and TensorShape(None). It is not convertible with, for example, TensorShape([32, 1, 784]) or TensorShape([None]). The convertibility relation is reflexive and symmetric, but not transitive. For example, TensorShape([32, 784]) is convertible with TensorShape(None), and TensorShape(None) is convertible with TensorShape([4, 4]), but TensorShape([32, 784]) is not convertible with TensorShape([4, 4]). Args: other: Another TensorShape. Returns: True iff `self` is convertible with `other`.
f7910:c1:m18
def assert_is_convertible_with(self, other):
if not self.is_convertible_with(other):<EOL><INDENT>raise ValueError("<STR_LIT>" % (self, other))<EOL><DEDENT>
Raises exception if `self` and `other` do not represent the same shape. This method can be used to assert that there exists a shape that both `self` and `other` represent. Args: other: Another TensorShape. Raises: ValueError: If `self` and `other` do not represent the same shape.
f7910:c1:m19
def most_specific_convertible_shape(self, other):
other = as_shape(other)<EOL>if self._dims is None or other.dims is None or self.ndims != other.ndims:<EOL><INDENT>return unknown_shape()<EOL><DEDENT>dims = [(Dimension(None))] * self.ndims<EOL>for i, (d1, d2) in enumerate(zip(self._dims, other.dims)):<EOL><INDENT>if d1 is not None and d2 is not None and d1 == d2:<EOL><INDENT>dims[i] = d1<EOL><DEDENT><DEDENT>return TensorShape(dims)<EOL>
Returns the most specific TensorShape convertible with `self` and `other`. * TensorShape([None, 1]) is the most specific TensorShape convertible with both TensorShape([2, 1]) and TensorShape([5, 1]). Note that TensorShape(None) is also convertible with above mentioned TensorShapes. * TensorShape([1, 2, 3]) is the most specific TensorShape convertible with both TensorShape([1, 2, 3]) and TensorShape([1, 2, 3]). There are more less specific TensorShapes convertible with above mentioned TensorShapes, e.g. TensorShape([1, 2, None]), TensorShape(None). Args: other: Another `TensorShape`. Returns: A `TensorShape` which is the most specific convertible shape of `self` and `other`.
f7910:c1:m20
def is_fully_defined(self):
return self._dims is not None and all(<EOL>dim.value is not None for dim in self._dims<EOL>)<EOL>
Returns True iff `self` is fully defined in every dimension.
f7910:c1:m21
def assert_is_fully_defined(self):
if not self.is_fully_defined():<EOL><INDENT>raise ValueError("<STR_LIT>" % self)<EOL><DEDENT>
Raises an exception if `self` is not fully defined in every dimension. Raises: ValueError: If `self` does not have a known value for every dimension.
f7910:c1:m22
def as_list(self):
if self._dims is None:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>return [dim.value for dim in self._dims]<EOL>
Returns a list of integers or `None` for each dimension. Returns: A list of integers or `None` for each dimension. Raises: ValueError: If `self` is an unknown shape with an unknown rank.
f7910:c1:m23
def as_proto(self):
if self._dims is None:<EOL><INDENT>return tensor_shape_pb2.TensorShapeProto(unknown_rank=True)<EOL><DEDENT>else:<EOL><INDENT>return tensor_shape_pb2.TensorShapeProto(<EOL>dim=[<EOL>tensor_shape_pb2.TensorShapeProto.Dim(<EOL>size=-<NUM_LIT:1> if d.value is None else d.value<EOL>)<EOL>for d in self._dims<EOL>]<EOL>)<EOL><DEDENT>
Returns this shape as a `TensorShapeProto`.
f7910:c1:m24
def __eq__(self, other):
try:<EOL><INDENT>other = as_shape(other)<EOL><DEDENT>except TypeError:<EOL><INDENT>return NotImplemented<EOL><DEDENT>return self._dims == other.dims<EOL>
Returns True if `self` is equivalent to `other`.
f7910:c1:m25
def __ne__(self, other):
try:<EOL><INDENT>other = as_shape(other)<EOL><DEDENT>except TypeError:<EOL><INDENT>return NotImplemented<EOL><DEDENT>if self.ndims is None or other.ndims is None:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if self.ndims != other.ndims:<EOL><INDENT>return True<EOL><DEDENT>return self._dims != other.dims<EOL>
Returns True if `self` is known to be different from `other`.
f7910:c1:m26
def _wrap_define_function(original_function):
def wrapper(*args, **kwargs):<EOL><INDENT>"""<STR_LIT>"""<EOL>has_old_names = False<EOL>for old_name, new_name in _six.iteritems(_RENAMED_ARGUMENTS):<EOL><INDENT>if old_name in kwargs:<EOL><INDENT>has_old_names = True<EOL>value = kwargs.pop(old_name)<EOL>kwargs[new_name] = value<EOL><DEDENT><DEDENT>if has_old_names:<EOL><INDENT>_logging.warning(<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>)<EOL><DEDENT>return original_function(*args, **kwargs)<EOL><DEDENT>return wrapper<EOL>
Wraps absl.flags's define functions so tf.flags accepts old names.
f7911:m0
def _usage(shorthelp):
doc = _sys.modules['<STR_LIT:__main__>'].__doc__<EOL>if not doc:<EOL><INDENT>doc = '<STR_LIT>' % _sys.argv[<NUM_LIT:0>]<EOL>doc = flags.text_wrap(doc, indent='<STR_LIT:U+0020>', firstline_indent='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>num_specifiers = doc.count('<STR_LIT:%>') - <NUM_LIT:2> * doc.count('<STR_LIT>')<EOL>try:<EOL><INDENT>doc %= (_sys.argv[<NUM_LIT:0>],) * num_specifiers<EOL><DEDENT>except (OverflowError, TypeError, ValueError):<EOL><INDENT>pass<EOL><DEDENT><DEDENT>if shorthelp:<EOL><INDENT>flag_str = flags.FLAGS.main_module_help()<EOL><DEDENT>else:<EOL><INDENT>flag_str = str(flags.FLAGS)<EOL><DEDENT>try:<EOL><INDENT>_sys.stdout.write(doc)<EOL>if flag_str:<EOL><INDENT>_sys.stdout.write('<STR_LIT>')<EOL>_sys.stdout.write(flag_str)<EOL><DEDENT>_sys.stdout.write('<STR_LIT:\n>')<EOL><DEDENT>except IOError as e:<EOL><INDENT>if e.errno != _errno.EPIPE:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>
Writes __main__'s docstring to stdout with some help text. Args: shorthelp: bool, if True, prints only flags from the main module, rather than all flags.
f7913:m0
def run(main=None, argv=None):
<EOL>_define_help_flags()<EOL>argv = flags.FLAGS(_sys.argv if argv is None else argv, known_only=True)<EOL>main = main or _sys.modules['<STR_LIT:__main__>'].main<EOL>_sys.exit(main(argv))<EOL>
Runs the program with an optional 'main' function and 'argv' list.
f7913:m2
@_lazy.lazy_load('<STR_LIT>')<EOL>def tf():
try:<EOL><INDENT>from tensorboard.compat import notf <EOL><DEDENT>except ImportError:<EOL><INDENT>try:<EOL><INDENT>import tensorflow <EOL>return tensorflow<EOL><DEDENT>except ImportError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>from tensorboard.compat import tensorflow_stub <EOL>return tensorflow_stub<EOL>
Provide the root module of a TF-like API for use within TensorBoard. By default this is equivalent to `import tensorflow as tf`, but it can be used in combination with //tensorboard/compat:tensorflow (to fall back to a stub TF API implementation if the real one is not available) or with //tensorboard/compat:no_tensorflow (to force unconditional use of the stub). Returns: The root module of a TF-like API, if available. Raises: ImportError: if a TF-like API is not available.
f7914:m0
@_lazy.lazy_load('<STR_LIT>')<EOL>def tf2():
<EOL>if tf.__version__.startswith('<STR_LIT>'):<EOL><INDENT>return tf<EOL><DEDENT>elif hasattr(tf, '<STR_LIT>') and hasattr(tf.compat, '<STR_LIT>'):<EOL><INDENT>return tf.compat.v2<EOL><DEDENT>raise ImportError('<STR_LIT>')<EOL>
Provide the root module of a TF-2.0 API for use within TensorBoard. Returns: The root module of a TF-2.0 API, if available. Raises: ImportError: if a TF-2.0 API is not available.
f7914:m1
@_lazy.lazy_load('<STR_LIT>')<EOL>def _pywrap_tensorflow():
try:<EOL><INDENT>from tensorboard.compat import notf <EOL><DEDENT>except ImportError:<EOL><INDENT>try:<EOL><INDENT>from tensorflow.python import pywrap_tensorflow <EOL>return pywrap_tensorflow<EOL><DEDENT>except ImportError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>from tensorboard.compat.tensorflow_stub import pywrap_tensorflow <EOL>return pywrap_tensorflow<EOL>
Provide pywrap_tensorflow access in TensorBoard. pywrap_tensorflow cannot be accessed from tf.python.pywrap_tensorflow and needs to be imported using `from tensorflow.python import pywrap_tensorflow`. Therefore, we provide a separate accessor function for it here. NOTE: pywrap_tensorflow is not part of TensorFlow API and this dependency will go away soon. Returns: pywrap_tensorflow import, if available. Raises: ImportError: if we couldn't import pywrap_tensorflow.
f7914:m2
def get_plugins():
return _PLUGINS[:]<EOL>
Returns a list specifying TensorBoard's default first-party plugins. Plugins are specified in this list either via a TBLoader instance to load the plugin, or the TBPlugin class itself which will be loaded using a BasicLoader. This list can be passed to the `tensorboard.program.TensorBoard` API. :rtype: list[Union[base_plugin.TBLoader, Type[base_plugin.TBPlugin]]]
f7915:m0
def migrate_value(value):
handler = {<EOL>'<STR_LIT>': _migrate_histogram_value,<EOL>'<STR_LIT:image>': _migrate_image_value,<EOL>'<STR_LIT>': _migrate_audio_value,<EOL>'<STR_LIT>': _migrate_scalar_value,<EOL>}.get(value.WhichOneof('<STR_LIT:value>'))<EOL>return handler(value) if handler else value<EOL>
Convert `value` to a new-style value, if necessary and possible. An "old-style" value is a value that uses any `value` field other than the `tensor` field. A "new-style" value is a value that uses the `tensor` field. TensorBoard continues to support old-style values on disk; this method converts them to new-style values so that further code need only deal with one data format. Arguments: value: A `Summary.Value` object. This argument is not modified. Returns: If the `value` is an old-style value for which there is a new-style equivalent, the result is the new-style value. Otherwise---if the value is already new-style or does not yet have a new-style equivalent---the value will be returned unchanged. :type value: Summary.Value :rtype: Summary.Value
f7916:m0
def bench(image, thread_count):
threads = [threading.Thread(target=lambda: encoder.encode_png(image))<EOL>for _ in xrange(thread_count)]<EOL>start_time = datetime.datetime.now()<EOL>for thread in threads:<EOL><INDENT>thread.start()<EOL><DEDENT>for thread in threads:<EOL><INDENT>thread.join()<EOL><DEDENT>end_time = datetime.datetime.now()<EOL>delta = (end_time - start_time).total_seconds()<EOL>return delta<EOL>
Encode `image` to PNG on `thread_count` threads in parallel. Returns: A `float` representing number of seconds that it takes all threads to finish encoding `image`.
f7917:m0
def _image_of_size(image_size):
return np.random.uniform(<NUM_LIT:0>, <NUM_LIT>, [image_size, image_size, <NUM_LIT:3>]).astype(np.uint8)<EOL>
Generate a square RGB test image of the given side length.
f7917:m1
def _format_line(headers, fields):
assert len(fields) == len(headers), (fields, headers)<EOL>fields = ["<STR_LIT>" % field if isinstance(field, float) else str(field)<EOL>for field in fields]<EOL>return '<STR_LIT:U+0020>'.join('<STR_LIT:U+0020>' * max(<NUM_LIT:0>, len(header) - len(field)) + field<EOL>for (header, field) in zip(headers, fields))<EOL>
Format a line of a table. Arguments: headers: A list of strings that are used as the table headers. fields: A list of the same length as `headers` where `fields[i]` is the entry for `headers[i]` in this row. Elements can be of arbitrary types. Pass `headers` to print the header row. Returns: A pretty string.
f7917:m2
def get_temp_dir():
global _temp_dir<EOL>if not _temp_dir:<EOL><INDENT>if os.environ.get('<STR_LIT>'):<EOL><INDENT>temp_dir = tempfile.mkdtemp(prefix=os.environ['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>temp_dir = tempfile.mkdtemp()<EOL><DEDENT>def delete_temp_dir(dirname=temp_dir):<EOL><INDENT>try:<EOL><INDENT>shutil.rmtree(dirname)<EOL><DEDENT>except OSError as e:<EOL><INDENT>logger.error('<STR_LIT>', dirname, e)<EOL><DEDENT><DEDENT>atexit.register(delete_temp_dir)<EOL>_temp_dir = temp_dir<EOL><DEDENT>return _temp_dir<EOL>
Return a temporary directory for tests to use.
f7918:m0
def main(*args, **kwargs):
return unittest.main(*args, **kwargs)<EOL>
Pass args and kwargs through to unittest main
f7918:m1
def assertItemsEqual(self, actual, expected, msg=None):
return six.assertCountEqual(super(TestCase, self), actual, expected, msg)<EOL>
Test that sequence actual contains the same elements as expected, regardless of their order. Same as assertCountEqual in Python 3 with unittest.TestCase.
f7918:c0:m1
def assertStartsWith(self, actual, expected_start, msg=None):
if not actual.startswith(expected_start):<EOL><INDENT>fail_msg = '<STR_LIT>' % (actual, expected_start)<EOL>fail_msg += '<STR_LIT>' % (msg) if msg else '<STR_LIT>'<EOL>self.fail(fail_msg)<EOL><DEDENT>
Test that string actual starts with string expected_start.
f7918:c0:m2
def get_temp_dir(self):
if not self._tempdir:<EOL><INDENT>self._tempdir = tempfile.mkdtemp(dir=get_temp_dir())<EOL><DEDENT>return self._tempdir<EOL>
Returns a unique temporary directory for the test to use. If you call this method multiple times during in a test, it will return the same folder. However, across different runs the directories will be different. This will ensure that across different runs tests will not be able to pollute each others environment. If you need multiple unique directories within a single test, you should use tempfile.mkdtemp as follows: tempfile.mkdtemp(dir=self.get_temp_dir()): Returns: string, the path to the unique temporary directory created for this test.
f7918:c0:m3
def run_main():
program.setup_environment()<EOL>if getattr(tf, '<STR_LIT>', '<STR_LIT>') == '<STR_LIT>':<EOL><INDENT>print("<STR_LIT>",<EOL>file=sys.stderr)<EOL><DEDENT>tensorboard = program.TensorBoard(default.get_plugins(),<EOL>program.get_default_assets_zip_provider())<EOL>try:<EOL><INDENT>from absl import app<EOL>from absl.flags import argparse_flags<EOL>app.run(tensorboard.main, flags_parser=tensorboard.configure)<EOL>raise AssertionError("<STR_LIT>")<EOL><DEDENT>except ImportError:<EOL><INDENT>pass<EOL><DEDENT>except base_plugin.FlagsError as e:<EOL><INDENT>print("<STR_LIT>" % e, file=sys.stderr)<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>tensorboard.configure(sys.argv)<EOL>sys.exit(tensorboard.main())<EOL>
Initializes flags and calls main().
f7920:m0
def __init__(self, context):
self._histograms_plugin = histograms_plugin.HistogramsPlugin(context)<EOL>self._multiplexer = context.multiplexer<EOL>
Instantiates DistributionsPlugin via TensorBoard core. Args: context: A base_plugin.TBContext instance.
f7923:c0:m0
def is_active(self):
return self._histograms_plugin.is_active()<EOL>
This plugin is active iff any run has at least one histogram tag. (The distributions plugin uses the same data source as the histogram plugin.)
f7923:c0:m2
def distributions_impl(self, tag, run):
(histograms, mime_type) = self._histograms_plugin.histograms_impl(<EOL>tag, run, downsample_to=self.SAMPLE_SIZE)<EOL>return ([self._compress(histogram) for histogram in histograms],<EOL>mime_type)<EOL>
Result of the form `(body, mime_type)`, or `ValueError`.
f7923:c0:m3
@wrappers.Request.application<EOL><INDENT>def distributions_route(self, request):<DEDENT>
tag = request.args.get('<STR_LIT>')<EOL>run = request.args.get('<STR_LIT>')<EOL>try:<EOL><INDENT>(body, mime_type) = self.distributions_impl(tag, run)<EOL>code = <NUM_LIT:200><EOL><DEDENT>except ValueError as e:<EOL><INDENT>(body, mime_type) = (str(e), '<STR_LIT>')<EOL>code = <NUM_LIT><EOL><DEDENT>return http_util.Respond(request, body, mime_type, code=code)<EOL>
Given a tag and single run, return an array of compressed histograms.
f7923:c0:m7
def compress_histogram_proto(histo, bps=NORMAL_HISTOGRAM_BPS):
<EOL>if not histo.num:<EOL><INDENT>return [CompressedHistogramValue(b, <NUM_LIT:0.0>) for b in bps]<EOL><DEDENT>bucket = np.array(histo.bucket)<EOL>bucket_limit = list(histo.bucket_limit)<EOL>weights = (bucket * bps[-<NUM_LIT:1>] / (bucket.sum() or <NUM_LIT:1.0>)).cumsum()<EOL>values = []<EOL>j = <NUM_LIT:0><EOL>while j < len(bps):<EOL><INDENT>i = np.searchsorted(weights, bps[j], side='<STR_LIT:right>')<EOL>while i < len(weights):<EOL><INDENT>cumsum = weights[i]<EOL>cumsum_prev = weights[i - <NUM_LIT:1>] if i > <NUM_LIT:0> else <NUM_LIT:0.0><EOL>if cumsum == cumsum_prev: <EOL><INDENT>i += <NUM_LIT:1><EOL>continue<EOL><DEDENT>if not i or not cumsum_prev:<EOL><INDENT>lhs = histo.min<EOL><DEDENT>else:<EOL><INDENT>lhs = max(bucket_limit[i - <NUM_LIT:1>], histo.min)<EOL><DEDENT>rhs = min(bucket_limit[i], histo.max)<EOL>weight = _lerp(bps[j], cumsum_prev, cumsum, lhs, rhs)<EOL>values.append(CompressedHistogramValue(bps[j], weight))<EOL>j += <NUM_LIT:1><EOL>break<EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>while j < len(bps):<EOL><INDENT>values.append(CompressedHistogramValue(bps[j], histo.max))<EOL>j += <NUM_LIT:1><EOL><DEDENT>return values<EOL>
Creates fixed size histogram by adding compression to accumulated state. This routine transforms a histogram at a particular step by interpolating its variable number of buckets to represent their cumulative weight at a constant number of compression points. This significantly reduces the size of the histogram and makes it suitable for a two-dimensional area plot where the output of this routine constitutes the ranges for a single x coordinate. Args: histo: A HistogramProto object. bps: Compression points represented in basis points, 1/100ths of a percent. Defaults to normal distribution. Returns: List of values for each basis point.
f7924:m0
def compress_histogram(buckets, bps=NORMAL_HISTOGRAM_BPS):
<EOL>buckets = np.array(buckets)<EOL>if not buckets.size:<EOL><INDENT>return [CompressedHistogramValue(b, <NUM_LIT:0.0>) for b in bps]<EOL><DEDENT>(minmin, maxmax) = (buckets[<NUM_LIT:0>][<NUM_LIT:0>], buckets[-<NUM_LIT:1>][<NUM_LIT:1>])<EOL>counts = buckets[:, <NUM_LIT:2>]<EOL>right_edges = list(buckets[:, <NUM_LIT:1>])<EOL>weights = (counts * bps[-<NUM_LIT:1>] / (counts.sum() or <NUM_LIT:1.0>)).cumsum()<EOL>result = []<EOL>bp_index = <NUM_LIT:0><EOL>while bp_index < len(bps):<EOL><INDENT>i = np.searchsorted(weights, bps[bp_index], side='<STR_LIT:right>')<EOL>while i < len(weights):<EOL><INDENT>cumsum = weights[i]<EOL>cumsum_prev = weights[i - <NUM_LIT:1>] if i > <NUM_LIT:0> else <NUM_LIT:0.0><EOL>if cumsum == cumsum_prev: <EOL><INDENT>i += <NUM_LIT:1><EOL>continue<EOL><DEDENT>if not i or not cumsum_prev:<EOL><INDENT>lhs = minmin<EOL><DEDENT>else:<EOL><INDENT>lhs = max(right_edges[i - <NUM_LIT:1>], minmin)<EOL><DEDENT>rhs = min(right_edges[i], maxmax)<EOL>weight = _lerp(bps[bp_index], cumsum_prev, cumsum, lhs, rhs)<EOL>result.append(CompressedHistogramValue(bps[bp_index], weight))<EOL>bp_index += <NUM_LIT:1><EOL>break<EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>while bp_index < len(bps):<EOL><INDENT>result.append(CompressedHistogramValue(bps[bp_index], maxmax))<EOL>bp_index += <NUM_LIT:1><EOL><DEDENT>return result<EOL>
Creates fixed size histogram by adding compression to accumulated state. This routine transforms a histogram at a particular step by linearly interpolating its variable number of buckets to represent their cumulative weight at a constant number of compression points. This significantly reduces the size of the histogram and makes it suitable for a two-dimensional area plot where the output of this routine constitutes the ranges for a single x coordinate. Args: buckets: A list of buckets, each of which is a 3-tuple of the form `(min, max, count)`. bps: Compression points represented in basis points, 1/100ths of a percent. Defaults to normal distribution. Returns: List of values for each basis point.
f7924:m1
def _lerp(x, x0, x1, y0, y1):
return y0 + (x - x0) * float(y1 - y0) / (x1 - x0)<EOL>
Affinely map from [x0, x1] onto [y0, y1].
f7924:m2
def _parse_positive_int_param(request, param_name):
param = request.args.get(param_name)<EOL>if not param:<EOL><INDENT>return None<EOL><DEDENT>try:<EOL><INDENT>param = int(param)<EOL>if param <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError()<EOL><DEDENT>return param<EOL><DEDENT>except ValueError:<EOL><INDENT>return -<NUM_LIT:1><EOL><DEDENT>
Parses and asserts a positive (>0) integer query parameter. Args: request: The Werkzeug Request object param_name: Name of the parameter. Returns: Param, or None, or -1 if parameter is not a positive integer.
f7925:m3
def _using_tf():
return tf.__version__ != '<STR_LIT>'<EOL>
Return true if we're not using the fake TF API stub implementation.
f7925:m5
def __init__(self, num_points):
self.num_points = num_points<EOL>self.column_names = []<EOL>self.name_to_values = {}<EOL>
Constructs a metadata for an embedding of the specified size. Args: num_points: Number of points in the embedding.
f7925:c1:m0
def add_column(self, column_name, column_values):
<EOL>if isinstance(column_values, list) and isinstance(column_values[<NUM_LIT:0>], list):<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>if isinstance(column_values, np.ndarray) and column_values.ndim != <NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>' % column_values.ndim)<EOL><DEDENT>if len(column_values) != self.num_points:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>' % (self.num_points, len(column_values)))<EOL><DEDENT>if column_name in self.name_to_values:<EOL><INDENT>raise ValueError('<STR_LIT>' % column_name)<EOL><DEDENT>self.column_names.append(column_name)<EOL>self.name_to_values[column_name] = column_values<EOL>
Adds a named column of metadata values. Args: column_name: Name of the column. column_values: 1D array/list/iterable holding the column values. Must be of length `num_points`. The i-th value corresponds to the i-th point. Raises: ValueError: If `column_values` is not 1D array, or of length `num_points`, or the `name` is already used.
f7925:c1:m1
def __init__(self, context):
self.multiplexer = context.multiplexer<EOL>self.logdir = context.logdir<EOL>self._handlers = None<EOL>self.readers = {}<EOL>self.run_paths = None<EOL>self._configs = {}<EOL>self.old_num_run_paths = None<EOL>self.config_fpaths = None<EOL>self.tensor_cache = LRUCache(_TENSOR_CACHE_CAPACITY)<EOL>self._is_active = False<EOL>self._thread_for_determining_is_active = None<EOL>if self.multiplexer:<EOL><INDENT>self.run_paths = self.multiplexer.RunPaths()<EOL><DEDENT>
Instantiates ProjectorPlugin via TensorBoard core. Args: context: A base_plugin.TBContext instance.
f7925:c2:m0
def is_active(self):
if not self.multiplexer:<EOL><INDENT>return False<EOL><DEDENT>if self._is_active:<EOL><INDENT>return True<EOL><DEDENT>if self._thread_for_determining_is_active:<EOL><INDENT>return self._is_active<EOL><DEDENT>new_thread = threading.Thread(<EOL>target=self._determine_is_active,<EOL>name='<STR_LIT>')<EOL>self._thread_for_determining_is_active = new_thread<EOL>new_thread.start()<EOL>return False<EOL>
Determines whether this plugin is active. This plugin is only active if any run has an embedding. Returns: Whether any run has embedding data to show in the projector.
f7925:c2:m2
def _determine_is_active(self):
if self.configs:<EOL><INDENT>self._is_active = True<EOL><DEDENT>self._thread_for_determining_is_active = None<EOL>
Determines whether the plugin is active. This method is run in a separate thread so that the plugin can offer an immediate response to whether it is active and determine whether it should be active in a separate thread.
f7925:c2:m3
@property<EOL><INDENT>def configs(self):<DEDENT>
run_path_pairs = list(self.run_paths.items())<EOL>self._append_plugin_asset_directories(run_path_pairs)<EOL>if not run_path_pairs:<EOL><INDENT>run_path_pairs.append(('<STR_LIT:.>', self.logdir))<EOL><DEDENT>if (self._run_paths_changed() or<EOL>_latest_checkpoints_changed(self._configs, run_path_pairs)):<EOL><INDENT>self.readers = {}<EOL>self._configs, self.config_fpaths = self._read_latest_config_files(<EOL>run_path_pairs)<EOL>self._augment_configs_with_checkpoint_info()<EOL><DEDENT>return self._configs<EOL>
Returns a map of run paths to `ProjectorConfig` protos.
f7925:c2:m4
@wrappers.Request.application<EOL><INDENT>def _serve_runs(self, request):<DEDENT>
return Respond(request, list(self.configs.keys()), '<STR_LIT:application/json>')<EOL>
Returns a list of runs that have embeddings.
f7925:c2:m14
def visualize_embeddings(summary_writer, config):
logdir = summary_writer.get_logdir()<EOL>if logdir is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>config_pbtxt = _text_format.MessageToString(config)<EOL>path = os.path.join(logdir, _projector_plugin.PROJECTOR_FILENAME)<EOL>with tf.io.gfile.GFile(path, '<STR_LIT:w>') as f:<EOL><INDENT>f.write(config_pbtxt)<EOL><DEDENT>
Stores a config file used by the embedding projector. Args: summary_writer: The summary writer used for writing events. config: `tf.contrib.tensorboard.plugins.projector.ProjectorConfig` proto that holds the configuration for the projector such as paths to checkpoint files and metadata files for the embeddings. If `config.model_checkpoint_path` is none, it defaults to the `logdir` used by the summary_writer. Raises: ValueError: If the summary writer does not have a `logdir`.
f7926:m0
def run():
step = tf.compat.v1.placeholder(tf.float32, shape=[])<EOL>with tf.name_scope('<STR_LIT>'):<EOL><INDENT>summary_lib.scalar('<STR_LIT:foo>', tf.pow(<NUM_LIT>, step))<EOL>summary_lib.scalar('<STR_LIT:bar>', tf.pow(<NUM_LIT>, step + <NUM_LIT:2>))<EOL>middle_baz_value = step + <NUM_LIT:4> * tf.random.uniform([]) - <NUM_LIT:2><EOL>summary_lib.scalar('<STR_LIT>', middle_baz_value)<EOL>summary_lib.scalar('<STR_LIT>',<EOL>middle_baz_value - <NUM_LIT> - tf.random.uniform([]))<EOL>summary_lib.scalar('<STR_LIT>',<EOL>middle_baz_value + <NUM_LIT> + tf.random.uniform([]))<EOL><DEDENT>with tf.name_scope('<STR_LIT>'):<EOL><INDENT>summary_lib.scalar('<STR_LIT>', tf.cos(step))<EOL>summary_lib.scalar('<STR_LIT>', tf.sin(step))<EOL>summary_lib.scalar('<STR_LIT>', tf.tan(step))<EOL><DEDENT>merged_summary = tf.compat.v1.summary.merge_all()<EOL>with tf.compat.v1.Session() as sess, tf.summary.FileWriter(LOGDIR) as writer:<EOL><INDENT>layout_summary = summary_lib.custom_scalar_pb(<EOL>layout_pb2.Layout(category=[<EOL>layout_pb2.Category(<EOL>title='<STR_LIT>',<EOL>chart=[<EOL>layout_pb2.Chart(<EOL>title='<STR_LIT>',<EOL>multiline=layout_pb2.MultilineChartContent(<EOL>tag=[r'<STR_LIT>'],)),<EOL>layout_pb2.Chart(<EOL>title='<STR_LIT>',<EOL>margin=layout_pb2.MarginChartContent(<EOL>series=[<EOL>layout_pb2.MarginChartContent.Series(<EOL>value='<STR_LIT>',<EOL>lower='<STR_LIT>',<EOL>upper='<STR_LIT>'<EOL>),<EOL>],)),<EOL>]),<EOL>layout_pb2.Category(<EOL>title='<STR_LIT>',<EOL>chart=[<EOL>layout_pb2.Chart(<EOL>title='<STR_LIT>',<EOL>multiline=layout_pb2.MultilineChartContent(<EOL>tag=[<EOL>r'<STR_LIT>', r'<STR_LIT>'<EOL>],)),<EOL>layout_pb2.Chart(<EOL>title='<STR_LIT>',<EOL>multiline=layout_pb2.MultilineChartContent(<EOL>tag=[r'<STR_LIT>'],)),<EOL>],<EOL>closed=True),<EOL>]))<EOL>writer.add_summary(layout_summary)<EOL>for i in xrange(<NUM_LIT>):<EOL><INDENT>summary = sess.run(merged_summary, feed_dict={step: i})<EOL>writer.add_summary(summary, global_step=i)<EOL><DEDENT><DEDENT>
Run custom scalar demo and generate event files.
f7929:m0
def op(scalars_layout, collections=None):
<EOL>import tensorflow.compat.v1 as tf<EOL>assert isinstance(scalars_layout, layout_pb2.Layout)<EOL>summary_metadata = metadata.create_summary_metadata()<EOL>return tf.summary.tensor_summary(name=metadata.CONFIG_SUMMARY_TAG,<EOL>tensor=tf.constant(<EOL>scalars_layout.SerializeToString(),<EOL>dtype=tf.string),<EOL>collections=collections,<EOL>summary_metadata=summary_metadata)<EOL>
Creates a summary that contains a layout. When users navigate to the custom scalars dashboard, they will see a layout based on the proto provided to this function. Args: scalars_layout: The scalars_layout_pb2.Layout proto that specifies the layout. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[Graph Keys.SUMMARIES]`. Returns: A tensor summary op that writes the layout to disk.
f7930:m0
def pb(scalars_layout):
<EOL>import tensorflow.compat.v1 as tf<EOL>assert isinstance(scalars_layout, layout_pb2.Layout)<EOL>tensor = tf.make_tensor_proto(<EOL>scalars_layout.SerializeToString(), dtype=tf.string)<EOL>tf_summary_metadata = tf.SummaryMetadata.FromString(<EOL>metadata.create_summary_metadata().SerializeToString())<EOL>summary = tf.Summary()<EOL>summary.value.add(tag=metadata.CONFIG_SUMMARY_TAG,<EOL>metadata=tf_summary_metadata,<EOL>tensor=tensor)<EOL>return summary<EOL>
Creates a summary that contains a layout. When users navigate to the custom scalars dashboard, they will see a layout based on the proto provided to this function. Args: scalars_layout: The scalars_layout_pb2.Layout proto that specifies the layout. Returns: A summary proto containing the layout.
f7930:m1
def create_summary_metadata():
return summary_pb2.SummaryMetadata(<EOL>plugin_data=summary_pb2.SummaryMetadata.PluginData(<EOL>plugin_name=PLUGIN_NAME))<EOL>
Create a `SummaryMetadata` proto for custom scalar plugin data. Returns: A `summary_pb2.SummaryMetadata` protobuf object.
f7932:m0
def __init__(self, context):
self._logdir = context.logdir<EOL>self._multiplexer = context.multiplexer<EOL>self._plugin_name_to_instance = context.plugin_name_to_instance<EOL>
Instantiates ScalarsPlugin via TensorBoard core. Args: context: A base_plugin.TBContext instance.
f7934:c0:m0
def _get_scalars_plugin(self):
if scalars_metadata.PLUGIN_NAME in self._plugin_name_to_instance:<EOL><INDENT>return self._plugin_name_to_instance[scalars_metadata.PLUGIN_NAME]<EOL><DEDENT>return None<EOL>
Tries to get the scalars plugin. Returns: The scalars plugin. Or None if it is not yet registered.
f7934:c0:m1
def is_active(self):
if not self._multiplexer:<EOL><INDENT>return False<EOL><DEDENT>scalars_plugin_instance = self._get_scalars_plugin()<EOL>if not (scalars_plugin_instance and<EOL>scalars_plugin_instance.is_active()):<EOL><INDENT>return False<EOL><DEDENT>return bool(self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME))<EOL>
This plugin is active if 2 conditions hold. 1. The scalars plugin is registered and active. 2. There is a custom layout for the dashboard. Returns: A boolean. Whether the plugin is active.
f7934:c0:m3
def download_data_impl(self, run, tag, response_format):
scalars_plugin_instance = self._get_scalars_plugin()<EOL>if not scalars_plugin_instance:<EOL><INDENT>raise ValueError(('<STR_LIT>'<EOL>'<STR_LIT>'))<EOL><DEDENT>body, mime_type = scalars_plugin_instance.scalars_impl(<EOL>tag, run, None, response_format)<EOL>return body, mime_type<EOL>
Provides a response for downloading scalars data for a data series. Args: run: The run. tag: The specific tag. response_format: A string. One of the values of the OutputFormat enum of the scalar plugin. Raises: ValueError: If the scalars plugin is not registered. Returns: 2 entities: - A JSON object response body. - A mime type (string) for the response.
f7934:c0:m5
@wrappers.Request.application<EOL><INDENT>def scalars_route(self, request):<DEDENT>
<EOL>tag_regex_string = request.args.get('<STR_LIT>')<EOL>run = request.args.get('<STR_LIT>')<EOL>mime_type = '<STR_LIT:application/json>'<EOL>try:<EOL><INDENT>body = self.scalars_impl(run, tag_regex_string)<EOL><DEDENT>except ValueError as e:<EOL><INDENT>return http_util.Respond(<EOL>request=request,<EOL>content=str(e),<EOL>content_type='<STR_LIT>',<EOL>code=<NUM_LIT>)<EOL><DEDENT>return http_util.Respond(request, body, mime_type)<EOL>
Given a tag regex and single run, return ScalarEvents. This route takes 2 GET params: run: A run string to find tags for. tag: A string that is a regex used to find matching tags. The response is a JSON object: { // Whether the regular expression is valid. Also false if empty. regexValid: boolean, // An object mapping tag name to a list of ScalarEvents. payload: Object<string, ScalarEvent[]>, }
f7934:c0:m6
def scalars_impl(self, run, tag_regex_string):
if not tag_regex_string:<EOL><INDENT>return {<EOL>_REGEX_VALID_PROPERTY: False,<EOL>_TAG_TO_EVENTS_PROPERTY: {},<EOL>}<EOL><DEDENT>try:<EOL><INDENT>regex = re.compile(tag_regex_string)<EOL><DEDENT>except re.error:<EOL><INDENT>return {<EOL>_REGEX_VALID_PROPERTY: False,<EOL>_TAG_TO_EVENTS_PROPERTY: {},<EOL>}<EOL><DEDENT>run_to_data = self._multiplexer.PluginRunToTagToContent(<EOL>scalars_metadata.PLUGIN_NAME)<EOL>tag_to_data = None<EOL>try:<EOL><INDENT>tag_to_data = run_to_data[run]<EOL><DEDENT>except KeyError:<EOL><INDENT>payload = {}<EOL><DEDENT>if tag_to_data:<EOL><INDENT>scalars_plugin_instance = self._get_scalars_plugin()<EOL>if not scalars_plugin_instance:<EOL><INDENT>raise ValueError(('<STR_LIT>'<EOL>'<STR_LIT>'))<EOL><DEDENT>form = scalars_plugin.OutputFormat.JSON<EOL>payload = {<EOL>tag: scalars_plugin_instance.scalars_impl(tag, run, None, form)[<NUM_LIT:0>]<EOL>for tag in tag_to_data.keys()<EOL>if regex.match(tag)<EOL>}<EOL><DEDENT>return {<EOL>_REGEX_VALID_PROPERTY: True,<EOL>_TAG_TO_EVENTS_PROPERTY: payload,<EOL>}<EOL>
Given a tag regex and single run, return ScalarEvents. Args: run: A run string. tag_regex_string: A regular expression that captures portions of tags. Raises: ValueError: if the scalars plugin is not registered. Returns: A dictionary that is the JSON-able response.
f7934:c0:m7
@wrappers.Request.application<EOL><INDENT>def layout_route(self, request):<DEDENT>
body = self.layout_impl()<EOL>return http_util.Respond(request, body, '<STR_LIT:application/json>')<EOL>
r"""Fetches the custom layout specified by the config file in the logdir. If more than 1 run contains a layout, this method merges the layouts by merging charts within individual categories. If 2 categories with the same name are found, the charts within are merged. The merging is based on the order of the runs to which the layouts are written. The response is a JSON object mirroring properties of the Layout proto if a layout for any run is found. The response is an empty object if no layout could be found.
f7934:c0:m8
@abstractmethod<EOL><INDENT>def get_plugin_apps(self):<DEDENT>
raise NotImplementedError()<EOL>
Returns a set of WSGI applications that the plugin implements. Each application gets registered with the tensorboard app and is served under a prefix path that includes the name of the plugin. Returns: A dict mapping route paths to WSGI applications. Each route path should include a leading slash.
f7935:c0:m0
@abstractmethod<EOL><INDENT>def is_active(self):<DEDENT>
raise NotImplementedError()<EOL>
Determines whether this plugin is active. A plugin may not be active for instance if it lacks relevant data. If a plugin is inactive, the frontend may avoid issuing requests to its routes. Returns: A boolean value. Whether this plugin is active.
f7935:c0:m1
def __init__(<EOL>self,<EOL>assets_zip_provider=None,<EOL>db_connection_provider=None,<EOL>db_module=None,<EOL>db_uri=None,<EOL>flags=None,<EOL>logdir=None,<EOL>multiplexer=None,<EOL>plugin_name_to_instance=None,<EOL>window_title=None):
self.assets_zip_provider = assets_zip_provider<EOL>self.db_connection_provider = db_connection_provider<EOL>self.db_module = db_module<EOL>self.db_uri = db_uri<EOL>self.flags = flags<EOL>self.logdir = logdir<EOL>self.multiplexer = multiplexer<EOL>self.plugin_name_to_instance = plugin_name_to_instance<EOL>self.window_title = window_title<EOL>
Instantiates magic container. The argument list is sorted and may be extended in the future; therefore, callers must pass only named arguments to this constructor. Args: assets_zip_provider: A function that returns a newly opened file handle for a zip file containing all static assets. The file names inside the zip file are considered absolute paths on the web server. The file handle this function returns must be closed. It is assumed that you will pass this file handle to zipfile.ZipFile. This zip file should also have been created by the tensorboard_zip_file build rule. db_connection_provider: Function taking no arguments that returns a PEP-249 database Connection object, or None if multiplexer should be used instead. The returned value must be closed, and is safe to use in a `with` statement. It is also safe to assume that calling this function is cheap. The returned connection must only be used by a single thread. Things like connection pooling are considered implementation details of the provider. db_module: A PEP-249 DB Module, e.g. sqlite3. This is useful for accessing things like date time constructors. This value will be None if we are not in SQL mode and multiplexer should be used instead. db_uri: The string db URI TensorBoard was started with. If this is set, the logdir should be None. flags: An object of the runtime flags provided to TensorBoard to their values. logdir: The string logging directory TensorBoard was started with. If this is set, the db_uri should be None. multiplexer: An EventMultiplexer with underlying TB data. Plugins should copy this data over to the database when the db fields are set. plugin_name_to_instance: A mapping between plugin name to instance. Plugins may use this property to access other plugins. The context object is passed to plugins during their construction, so a given plugin may be absent from this mapping until it is registered. Plugin logic should handle cases in which a plugin is absent from this mapping, lest a KeyError is raised. window_title: A string specifying the window title.
f7935:c1:m0
def define_flags(self, parser):
pass<EOL>
Adds plugin-specific CLI flags to parser. The default behavior is to do nothing. When overriding this method, it's recommended that plugins call the `parser.add_argument_group(plugin_name)` method for readability. No flags should be specified that would cause `parse_args([])` to fail. Args: parser: The argument parsing object, which may be mutated.
f7935:c2:m0
def fix_flags(self, flags):
pass<EOL>
Allows flag values to be corrected or validated after parsing. Args: flags: The parsed argparse.Namespace object. Raises: base_plugin.FlagsError: If a flag is invalid or a required flag is not passed.
f7935:c2:m1
def __init__(self, plugin_class):
self._plugin_class = plugin_class<EOL>
Creates simple plugin instance maker. :param plugin_class: :class:`TBPlugin`
f7935:c3:m0
def __init__(self,<EOL>events_writer_manager,<EOL>numerics_alert_callback=None):
super(DebuggerDataStreamHandler, self).__init__()<EOL>self._events_writer_manager = events_writer_manager<EOL>self._numerics_alert_callback = numerics_alert_callback<EOL>self._session_run_index = -<NUM_LIT:1><EOL>
Constructor of DebuggerDataStreamHandler. Args: events_writer_manager: Manages writing events to disk. numerics_alert_callback: An optional callback run every time a health pill event with bad values (Nan, -Inf, or +Inf) is received. The callback takes the event as a parameter.
f7939:c0:m0
def on_core_metadata_event(self, event):
self._session_run_index = self._parse_session_run_index(event)<EOL>
Implementation of the core metadata-carrying Event proto callback. Args: event: An Event proto that contains core metadata about the debugged Session::Run() in its log_message.message field, as a JSON string. See the doc string of debug_data.DebugDumpDir.core_metadata for details.
f7939:c0:m1
def on_graph_def(self, graph_def, device_name, wall_time):
<EOL>del device_name<EOL>del wall_time<EOL>del graph_def<EOL>
Implementation of the GraphDef-carrying Event proto callback. Args: graph_def: A GraphDef proto. N.B.: The GraphDef is from the core runtime of a debugged Session::Run() call, after graph partition. Therefore it may differ from the GraphDef available to the general TensorBoard. For example, the GraphDef in general TensorBoard may get partitioned for multiple devices (CPUs and GPUs), each of which will generate a GraphDef event proto sent to this method. device_name: Name of the device on which the graph was created. wall_time: An epoch timestamp (in microseconds) for the graph.
f7939:c0:m2
def on_value_event(self, event):
if not event.summary.value:<EOL><INDENT>logger.warn("<STR_LIT>")<EOL>return<EOL><DEDENT>watch_key = event.summary.value[<NUM_LIT:0>].node_name<EOL>if not watch_key.endswith(constants.DEBUG_NUMERIC_SUMMARY_SUFFIX):<EOL><INDENT>return<EOL><DEDENT>node_name_and_output_slot = watch_key[<EOL>:-len(constants.DEBUG_NUMERIC_SUMMARY_SUFFIX)]<EOL>shape = tensor_util.make_ndarray(event.summary.value[<NUM_LIT:0>].tensor).shape<EOL>if (len(shape) != <NUM_LIT:1> or<EOL>shape[<NUM_LIT:0>] < constants.MIN_DEBUG_NUMERIC_SUMMARY_TENSOR_LENGTH):<EOL><INDENT>logger.warn("<STR_LIT>"<EOL>"<STR_LIT>" % shape)<EOL>return<EOL><DEDENT>match = re.match(r"<STR_LIT>", node_name_and_output_slot)<EOL>if not match:<EOL><INDENT>logger.warn(<EOL>("<STR_LIT>"<EOL>"<STR_LIT>"),<EOL>node_name_and_output_slot)<EOL>return<EOL><DEDENT>if self._session_run_index >= <NUM_LIT:0>:<EOL><INDENT>event.step = self._session_run_index<EOL><DEDENT>else:<EOL><INDENT>event.step = int(time.time() * <NUM_LIT>)<EOL><DEDENT>self._events_writer_manager.write_event(event)<EOL>alert = numerics_alert.extract_numerics_alert(event)<EOL>if self._numerics_alert_callback and alert:<EOL><INDENT>self._numerics_alert_callback(alert)<EOL><DEDENT>
Records the summary values based on an updated message from the debugger. Logs an error message if writing the event to disk fails. Args: event: The Event proto to be processed.
f7939:c0:m3
def _parse_session_run_index(self, event):
metadata_string = event.log_message.message<EOL>try:<EOL><INDENT>metadata = json.loads(metadata_string)<EOL><DEDENT>except ValueError as e:<EOL><INDENT>logger.error(<EOL>"<STR_LIT>",<EOL>metadata_string, e)<EOL>return constants.SENTINEL_FOR_UNDETERMINED_STEP<EOL><DEDENT>try:<EOL><INDENT>return metadata["<STR_LIT>"]<EOL><DEDENT>except KeyError:<EOL><INDENT>logger.error(<EOL>"<STR_LIT>",<EOL>metadata_string)<EOL>return constants.SENTINEL_FOR_UNDETERMINED_STEP<EOL><DEDENT>
Parses the session_run_index value from the event proto. Args: event: The event with metadata that contains the session_run_index. Returns: The int session_run_index value. Or constants.SENTINEL_FOR_UNDETERMINED_STEP if it could not be determined.
f7939:c0:m4
def __init__(self,<EOL>receive_port,<EOL>logdir,<EOL>always_flush=False):
<EOL>debugger_directory = os.path.join(<EOL>os.path.expanduser(logdir), constants.DEBUGGER_DATA_DIRECTORY_NAME)<EOL>if not tf.io.gfile.exists(debugger_directory):<EOL><INDENT>try:<EOL><INDENT>tf.io.gfile.makedirs(debugger_directory)<EOL>logger.info("<STR_LIT>",<EOL>debugger_directory)<EOL><DEDENT>except tf.errors.OpError as e:<EOL><INDENT>logger.fatal(<EOL>"<STR_LIT>",<EOL>debugger_directory, e)<EOL><DEDENT><DEDENT>self._events_writer_manager = events_writer_manager_lib.EventsWriterManager(<EOL>events_directory=debugger_directory,<EOL>always_flush=always_flush)<EOL>try:<EOL><INDENT>self._events_writer_manager.write_event(<EOL>tf.compat.v1.Event(<EOL>wall_time=<NUM_LIT:0>, step=<NUM_LIT:0>, file_version=constants.EVENTS_VERSION))<EOL><DEDENT>except IOError as e:<EOL><INDENT>logger.error(<EOL>"<STR_LIT>",<EOL>self._events_writer_manager.get_current_file_name(), e)<EOL><DEDENT>self._registry_backup_file_path = os.path.join(<EOL>debugger_directory, constants.ALERT_REGISTRY_BACKUP_FILE_NAME)<EOL>initial_data = None<EOL>if tf.io.gfile.exists(self._registry_backup_file_path):<EOL><INDENT>with tf.io.gfile.GFile(self._registry_backup_file_path, "<STR_LIT:r>") as backup_file:<EOL><INDENT>try:<EOL><INDENT>initial_data = json.load(backup_file)<EOL><DEDENT>except ValueError as err:<EOL><INDENT>logger.error(<EOL>"<STR_LIT>",<EOL>self._registry_backup_file_path, err)<EOL><DEDENT><DEDENT><DEDENT>self._numerics_alert_registry = numerics_alert.NumericsAlertRegistry(<EOL>initialization_list=initial_data)<EOL>self._numerics_alert_lock = threading.Lock()<EOL>curried_handler_constructor = functools.partial(<EOL>DebuggerDataStreamHandler,<EOL>self._events_writer_manager,<EOL>self._numerics_alert_callback)<EOL>grpc_debug_server.EventListenerBaseServicer.__init__(<EOL>self, receive_port, curried_handler_constructor)<EOL>
Receives health pills from a debugger and writes them to disk. Args: receive_port: The port at which to receive health pills from the TensorFlow debugger. logdir: The directory in which to write events files that TensorBoard will read. always_flush: A boolean indicating whether the EventsWriter will be flushed after every write. Can be used for testing.
f7939:c1:m0
def start_the_debugger_data_receiving_server(self):
self.run_server()<EOL>
Starts the HTTP server for receiving health pills at `receive_port`. After this method is called, health pills issued to host:receive_port will be stored by this object. Calling this method also creates a file within the log directory for storing health pill summary events.
f7939:c1:m1
def get_events_file_name(self):
return self._events_writer_manager.get_current_file_name()<EOL>
Gets the name of the debugger events file currently being written to. Returns: The string name of the debugger events file currently being written to. This is just the name of that file, not the full path to that file.
f7939:c1:m2
def _numerics_alert_callback(self, alert):
with self._numerics_alert_lock:<EOL><INDENT>self._numerics_alert_registry.register(alert)<EOL><DEDENT>
Handles the case in which we receive a bad value (NaN, -/+ Inf). Args: alert: The alert to be registered.
f7939:c1:m3
def numerics_alert_report(self):
with self._numerics_alert_lock:<EOL><INDENT>return self._numerics_alert_registry.report()<EOL><DEDENT>
Get a report of the numerics alerts that have occurred. Returns: A list of `numerics_alert.NumericsAlertReportRow`, sorted in ascending order of first_timestamp.
f7939:c1:m4
def dispose(self):
self._events_writer_manager.dispose()<EOL>
Disposes of this object. Call only after this is done being used.
f7939:c1:m5
def __init__(self, events_output_list):
self.events_written = events_output_list<EOL>
Constructs a fake events writer, which appends events to a list. Args: events_output_list: The list to append events that would be written to disk.
f7940:c0:m0
def dispose(self):
Does nothing. This implementation creates no file.
f7940:c0:m1
def write_event(self, event):
self.events_written.append(event)<EOL>
Pretends to write an event to disk. Args: event: The event proto.
f7940:c0:m2
def _create_event_with_float_tensor(self, node_name, output_slot, debug_op,<EOL>list_of_values):
event = event_pb2.Event()<EOL>value = event.summary.value.add(<EOL>tag=node_name,<EOL>node_name="<STR_LIT>" % (node_name, output_slot, debug_op),<EOL>tensor=tensor_util.make_tensor_proto(<EOL>list_of_values, dtype=tf.float64, shape=[len(list_of_values)]))<EOL>plugin_content = debugger_event_metadata_pb2.DebuggerEventMetadata(<EOL>device="<STR_LIT>", output_slot=output_slot)<EOL>value.metadata.plugin_data.plugin_name = constants.DEBUGGER_PLUGIN_NAME<EOL>value.metadata.plugin_data.content = tf.compat.as_bytes(<EOL>json_format.MessageToJson(<EOL>plugin_content, including_default_value_fields=True))<EOL>return event<EOL>
Creates event with float64 (double) tensors. Args: node_name: The string name of the op. This lacks both the output slot as well as the name of the debug op. output_slot: The number that is the output slot. debug_op: The name of the debug op to use. list_of_values: A python list of values within the tensor. Returns: A `Event` with a summary containing that node name and a float64 tensor with those values.
f7940:c1:m2
def _verify_event_lists_have_same_tensor_values(self, expected, gotten):
self.assertEqual(len(expected), len(gotten))<EOL>for expected_event, gotten_event in zip(expected, gotten):<EOL><INDENT>self.assertEqual(expected_event.summary.value[<NUM_LIT:0>].node_name,<EOL>gotten_event.summary.value[<NUM_LIT:0>].node_name)<EOL>self.assertAllClose(<EOL>tensor_util.make_ndarray(expected_event.summary.value[<NUM_LIT:0>].tensor),<EOL>tensor_util.make_ndarray(gotten_event.summary.value[<NUM_LIT:0>].tensor))<EOL>self.assertEqual(expected_event.summary.value[<NUM_LIT:0>].tag,<EOL>gotten_event.summary.value[<NUM_LIT:0>].tag)<EOL><DEDENT>
Checks that two lists of events have the same tensor values. Args: expected: The expected list of events. gotten: The list of events we actually got.
f7940:c1:m3
def extract_numerics_alert(event):
value = event.summary.value[<NUM_LIT:0>]<EOL>debugger_plugin_metadata_content = None<EOL>if value.HasField("<STR_LIT>"):<EOL><INDENT>plugin_data = value.metadata.plugin_data<EOL>if plugin_data.plugin_name == constants.DEBUGGER_PLUGIN_NAME:<EOL><INDENT>debugger_plugin_metadata_content = plugin_data.content<EOL><DEDENT><DEDENT>if not debugger_plugin_metadata_content:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>debugger_plugin_metadata_content = tf.compat.as_text(<EOL>debugger_plugin_metadata_content)<EOL>try:<EOL><INDENT>content_object = json.loads(debugger_plugin_metadata_content)<EOL>device_name = content_object["<STR_LIT>"]<EOL><DEDENT>except (KeyError, ValueError) as e:<EOL><INDENT>raise ValueError("<STR_LIT>" %<EOL>(debugger_plugin_metadata_content, e))<EOL><DEDENT>debug_op_suffix = "<STR_LIT>"<EOL>if not value.node_name.endswith(debug_op_suffix):<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>" %<EOL>debug_op_suffix)<EOL><DEDENT>tensor_name = value.node_name[:-len(debug_op_suffix)]<EOL>elements = tf_debug.load_tensor_from_event(event)<EOL>nan_count = elements[constants.NAN_NUMERIC_SUMMARY_OP_INDEX]<EOL>neg_inf_count = elements[constants.NEG_INF_NUMERIC_SUMMARY_OP_INDEX]<EOL>pos_inf_count = elements[constants.POS_INF_NUMERIC_SUMMARY_OP_INDEX]<EOL>if nan_count > <NUM_LIT:0> or neg_inf_count > <NUM_LIT:0> or pos_inf_count > <NUM_LIT:0>:<EOL><INDENT>return NumericsAlert(<EOL>device_name, tensor_name, event.wall_time, nan_count, neg_inf_count,<EOL>pos_inf_count)<EOL><DEDENT>return None<EOL>
Determines whether a health pill event contains bad values. A bad value is one of NaN, -Inf, or +Inf. Args: event: (`Event`) A `tensorflow.Event` proto from `DebugNumericSummary` ops. Returns: An instance of `NumericsAlert`, if bad values are found. `None`, if no bad values are found. Raises: ValueError: if the event does not have the expected tag prefix or the debug op name is not the expected debug op name suffix.
f7941:m0
def __init__(self, event_count=<NUM_LIT:0>, first_timestamp=-<NUM_LIT:1>, last_timestamp=-<NUM_LIT:1>):
<EOL>self.event_count = event_count<EOL>self.first_timestamp = first_timestamp<EOL>self.last_timestamp = last_timestamp<EOL>
Tracks events for a single category of values. Args: event_count: The initial event count to use. first_timestamp: The timestamp of the first event with this value. last_timestamp: The timestamp of the last event with this category of values.
f7941:c0:m0
def __init__(self, initialization_list=None):
if initialization_list:<EOL><INDENT>self._trackers = {}<EOL>for value_category_key, description_list in initialization_list.items():<EOL><INDENT>description = EventTrackerDescription._make(description_list)<EOL>self._trackers[value_category_key] = _EventTracker(<EOL>event_count=description.event_count,<EOL>first_timestamp=description.first_timestamp,<EOL>last_timestamp=description.last_timestamp)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self._trackers = {<EOL>constants.NAN_KEY: _EventTracker(),<EOL>constants.NEG_INF_KEY: _EventTracker(),<EOL>constants.POS_INF_KEY: _EventTracker(),<EOL>}<EOL><DEDENT>
Stores alert history for a single device, tensor pair. Args: initialization_list: (`list`) An optional list parsed from JSON read from disk. That entity is used to initialize this NumericsAlertHistory. Use the create_jsonable_object method of this class to create such an object.
f7941:c1:m0
def first_timestamp(self, event_key=None):
if event_key is None:<EOL><INDENT>timestamps = [self._trackers[key].first_timestamp<EOL>for key in self._trackers]<EOL>return min(timestamp for timestamp in timestamps if timestamp >= <NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>return self._trackers[event_key].first_timestamp<EOL><DEDENT>
Obtain the first timestamp. Args: event_key: the type key of the sought events (e.g., constants.NAN_KEY). If None, includes all event type keys. Returns: First (earliest) timestamp of all the events of the given type (or all event types if event_key is None).
f7941:c1:m2
def last_timestamp(self, event_key=None):
if event_key is None:<EOL><INDENT>timestamps = [self._trackers[key].first_timestamp<EOL>for key in self._trackers]<EOL>return max(timestamp for timestamp in timestamps if timestamp >= <NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>return self._trackers[event_key].last_timestamp<EOL><DEDENT>
Obtain the last timestamp. Args: event_key: the type key of the sought events (e.g., constants.NAN_KEY). If None, includes all event type keys. Returns: Last (latest) timestamp of all the events of the given type (or all event types if event_key is None).
f7941:c1:m3