signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
def _get_value(self, scalar_data_blob, dtype_enum):
tensorflow_dtype = tf.DType(dtype_enum)<EOL>buf = np.frombuffer(scalar_data_blob, dtype=tensorflow_dtype.as_numpy_dtype)<EOL>return np.asscalar(buf)<EOL>
Obtains value for scalar event given blob and dtype enum. Args: scalar_data_blob: The blob obtained from the database. dtype_enum: The enum representing the dtype. Returns: The scalar value.
f7971:c1:m5
@wrappers.Request.application<EOL><INDENT>def scalars_route(self, request):<DEDENT>
<EOL>tag = request.args.get('<STR_LIT>')<EOL>run = request.args.get('<STR_LIT>')<EOL>experiment = request.args.get('<STR_LIT>')<EOL>output_format = request.args.get('<STR_LIT>')<EOL>(body, mime_type) = self.scalars_impl(tag, run, experiment, output_format)<EOL>return http_util.Respond(request, body, mime_type)<EOL>
Given a tag and single run, return array of ScalarEvents.
f7971:c1:m7
def create_summary_metadata(display_name, description):
content = plugin_data_pb2.ScalarPluginData(version=PROTO_VERSION)<EOL>metadata = summary_pb2.SummaryMetadata(<EOL>display_name=display_name,<EOL>summary_description=description,<EOL>plugin_data=summary_pb2.SummaryMetadata.PluginData(<EOL>plugin_name=PLUGIN_NAME,<EOL>content=content.SerializeToString()))<EOL>return metadata<EOL>
Create a `summary_pb2.SummaryMetadata` proto for scalar plugin data. Returns: A `summary_pb2.SummaryMetadata` protobuf object.
f7972:m0
def parse_plugin_metadata(content):
if not isinstance(content, bytes):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>result = plugin_data_pb2.ScalarPluginData.FromString(content)<EOL>if result.version == <NUM_LIT:0>:<EOL><INDENT>return result<EOL><DEDENT>else:<EOL><INDENT>logger.warn(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>', result.version, PROTO_VERSION)<EOL>return result<EOL><DEDENT>
Parse summary metadata to a Python object. Arguments: content: The `content` field of a `SummaryMetadata` proto corresponding to the scalar plugin. Returns: A `ScalarPluginData` protobuf object.
f7972:m1
def scalar(name, data, step=None, description=None):
summary_metadata = metadata.create_summary_metadata(<EOL>display_name=None, description=description)<EOL>summary_scope = (<EOL>getattr(tf.summary.experimental, '<STR_LIT>', None) or<EOL>tf.summary.summary_scope)<EOL>with summary_scope(<EOL>name, '<STR_LIT>', values=[data, step]) as (tag, _):<EOL><INDENT>tf.debugging.assert_scalar(data)<EOL>return tf.summary.write(tag=tag,<EOL>tensor=tf.cast(data, tf.float32),<EOL>step=step,<EOL>metadata=summary_metadata)<EOL><DEDENT>
Write a scalar summary. Arguments: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A real numeric scalar value, convertible to a `float32` Tensor. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. Returns: True on success, or false if no summary was written because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None.
f7974:m0
def scalar_pb(tag, data, description=None):
arr = np.array(data)<EOL>if arr.shape != ():<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>% arr.shape)<EOL><DEDENT>if arr.dtype.kind not in ('<STR_LIT:b>', '<STR_LIT:i>', '<STR_LIT:u>', '<STR_LIT:f>'): <EOL><INDENT>raise ValueError('<STR_LIT>' % arr.dtype.name)<EOL><DEDENT>tensor_proto = tensor_util.make_tensor_proto(arr.astype(np.float32))<EOL>summary_metadata = metadata.create_summary_metadata(<EOL>display_name=None, description=description)<EOL>summary = summary_pb2.Summary()<EOL>summary.value.add(tag=tag,<EOL>metadata=summary_metadata,<EOL>tensor=tensor_proto)<EOL>return summary<EOL>
Create a scalar summary_pb2.Summary protobuf. Arguments: tag: String tag for the summary. data: A 0-dimensional `np.array` or a compatible python number type. description: Optional long-form description for this summary, as a `str`. Markdown is supported. Defaults to empty. Raises: ValueError: If the type or shape of the data is unsupported. Returns: A `summary_pb2.Summary` protobuf object.
f7974:m1
def _DeserializeResponse(self, byte_content):
return json.loads(byte_content.decode("<STR_LIT:utf-8>"))<EOL>
Deserializes byte content that is a JSON encoding. Args: byte_content: The byte content of a response. Returns: The deserialized python object decoded from JSON.
f7976:c0:m2
def op(name,<EOL>audio,<EOL>sample_rate,<EOL>labels=None,<EOL>max_outputs=<NUM_LIT:3>,<EOL>encoding=None,<EOL>display_name=None,<EOL>description=None,<EOL>collections=None):
<EOL>import tensorflow <EOL>import tensorflow.compat.v1 as tf<EOL>if display_name is None:<EOL><INDENT>display_name = name<EOL><DEDENT>if encoding is None:<EOL><INDENT>encoding = '<STR_LIT>'<EOL><DEDENT>if encoding == '<STR_LIT>':<EOL><INDENT>encoding = metadata.Encoding.Value('<STR_LIT>')<EOL>encoder = functools.partial(tensorflow.contrib.ffmpeg.encode_audio,<EOL>samples_per_second=sample_rate,<EOL>file_format='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>' % encoding)<EOL><DEDENT>with tf.name_scope(name),tf.control_dependencies([tf.assert_rank(audio, <NUM_LIT:3>)]):<EOL><INDENT>limited_audio = audio[:max_outputs]<EOL>encoded_audio = tf.map_fn(encoder, limited_audio,<EOL>dtype=tf.string,<EOL>name='<STR_LIT>')<EOL>if labels is None:<EOL><INDENT>limited_labels = tf.tile(['<STR_LIT>'], tf.shape(input=limited_audio)[:<NUM_LIT:1>])<EOL><DEDENT>else:<EOL><INDENT>limited_labels = labels[:max_outputs]<EOL><DEDENT>tensor = tf.transpose(a=tf.stack([encoded_audio, limited_labels]))<EOL>summary_metadata = metadata.create_summary_metadata(<EOL>display_name=display_name,<EOL>description=description,<EOL>encoding=encoding)<EOL>return tf.summary.tensor_summary(name='<STR_LIT>',<EOL>tensor=tensor,<EOL>collections=collections,<EOL>summary_metadata=summary_metadata)<EOL><DEDENT>
Create a legacy audio summary op for use in a TensorFlow graph. Arguments: name: A unique name for the generated summary node. audio: A `Tensor` representing audio data with shape `[k, t, c]`, where `k` is the number of audio clips, `t` is the number of frames, and `c` is the number of channels. Elements should be floating-point values in `[-1.0, 1.0]`. Any of the dimensions may be statically unknown (i.e., `None`). sample_rate: An `int` or rank-0 `int32` `Tensor` that represents the sample rate, in Hz. Must be positive. labels: Optional `string` `Tensor`, a vector whose length is the first dimension of `audio`, where `labels[i]` contains arbitrary textual information about `audio[i]`. (For instance, this could be some text that a TTS system was supposed to produce.) Markdown is supported. Contents should be UTF-8. max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this many audio clips will be emitted at each step. When more than `max_outputs` many clips are provided, the first `max_outputs` many clips will be used and the rest silently discarded. encoding: A constant `str` (not string tensor) indicating the desired encoding. You can choose any format you like, as long as it's "wav". Please see the "API compatibility note" below. display_name: Optional name for this summary in TensorBoard, as a constant `str`. Defaults to `name`. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[Graph Keys.SUMMARIES]`. Returns: A TensorFlow summary op. API compatibility note: The default value of the `encoding` argument is _not_ guaranteed to remain unchanged across TensorBoard versions. In the future, we will by default encode as FLAC instead of as WAV. If the specific format is important to you, please provide a file format explicitly.
f7977:m0
def pb(name,<EOL>audio,<EOL>sample_rate,<EOL>labels=None,<EOL>max_outputs=<NUM_LIT:3>,<EOL>encoding=None,<EOL>display_name=None,<EOL>description=None):
<EOL>import tensorflow.compat.v1 as tf<EOL>audio = np.array(audio)<EOL>if audio.ndim != <NUM_LIT:3>:<EOL><INDENT>raise ValueError('<STR_LIT>' % (audio.shape,))<EOL><DEDENT>if encoding is None:<EOL><INDENT>encoding = '<STR_LIT>'<EOL><DEDENT>if encoding == '<STR_LIT>':<EOL><INDENT>encoding = metadata.Encoding.Value('<STR_LIT>')<EOL>encoder = functools.partial(encoder_util.encode_wav,<EOL>samples_per_second=sample_rate)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>' % encoding)<EOL><DEDENT>limited_audio = audio[:max_outputs]<EOL>if labels is None:<EOL><INDENT>limited_labels = [b'<STR_LIT>'] * len(limited_audio)<EOL><DEDENT>else:<EOL><INDENT>limited_labels = [tf.compat.as_bytes(label)<EOL>for label in labels[:max_outputs]]<EOL><DEDENT>encoded_audio = [encoder(a) for a in limited_audio]<EOL>content = np.array([encoded_audio, limited_labels]).transpose()<EOL>tensor = tf.make_tensor_proto(content, dtype=tf.string)<EOL>if display_name is None:<EOL><INDENT>display_name = name<EOL><DEDENT>summary_metadata = metadata.create_summary_metadata(<EOL>display_name=display_name,<EOL>description=description,<EOL>encoding=encoding)<EOL>tf_summary_metadata = tf.SummaryMetadata.FromString(<EOL>summary_metadata.SerializeToString())<EOL>summary = tf.Summary()<EOL>summary.value.add(tag='<STR_LIT>' % name,<EOL>metadata=tf_summary_metadata,<EOL>tensor=tensor)<EOL>return summary<EOL>
Create a legacy audio summary protobuf. This behaves as if you were to create an `op` with the same arguments (wrapped with constant tensors where appropriate) and then execute that summary op in a TensorFlow session. Arguments: name: A unique name for the generated summary node. audio: An `np.array` representing audio data with shape `[k, t, c]`, where `k` is the number of audio clips, `t` is the number of frames, and `c` is the number of channels. Elements should be floating-point values in `[-1.0, 1.0]`. sample_rate: An `int` that represents the sample rate, in Hz. Must be positive. labels: Optional list (or rank-1 `np.array`) of textstrings or UTF-8 bytestrings whose length is the first dimension of `audio`, where `labels[i]` contains arbitrary textual information about `audio[i]`. (For instance, this could be some text that a TTS system was supposed to produce.) Markdown is supported. max_outputs: Optional `int`. At most this many audio clips will be emitted. When more than `max_outputs` many clips are provided, the first `max_outputs` many clips will be used and the rest silently discarded. encoding: A constant `str` indicating the desired encoding. You can choose any format you like, as long as it's "wav". Please see the "API compatibility note" below. display_name: Optional name for this summary in TensorBoard, as a `str`. Defaults to `name`. description: Optional long-form description for this summary, as a `str`. Markdown is supported. Defaults to empty. Returns: A `tf.Summary` protobuf object. API compatibility note: The default value of the `encoding` argument is _not_ guaranteed to remain unchanged across TensorBoard versions. In the future, we will by default encode as FLAC instead of as WAV. If the specific format is important to you, please provide a file format explicitly.
f7977:m1
def create_summary_metadata(display_name, description, encoding):
content = plugin_data_pb2.AudioPluginData(<EOL>version=PROTO_VERSION, encoding=encoding)<EOL>metadata = summary_pb2.SummaryMetadata(<EOL>display_name=display_name,<EOL>summary_description=description,<EOL>plugin_data=summary_pb2.SummaryMetadata.PluginData(<EOL>plugin_name=PLUGIN_NAME,<EOL>content=content.SerializeToString()))<EOL>return metadata<EOL>
Create a `SummaryMetadata` proto for audio plugin data. Returns: A `SummaryMetadata` protobuf object.
f7978:m0
def parse_plugin_metadata(content):
if not isinstance(content, bytes):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>result = plugin_data_pb2.AudioPluginData.FromString(content)<EOL>if result.version == <NUM_LIT:0>:<EOL><INDENT>return result<EOL><DEDENT>else:<EOL><INDENT>logger.warn(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>', result.version, PROTO_VERSION)<EOL>return result<EOL><DEDENT>
Parse summary metadata to a Python object. Arguments: content: The `content` field of a `SummaryMetadata` proto corresponding to the audio plugin. Returns: An `AudioPluginData` protobuf object.
f7978:m1
def _samples():
return int(FLAGS.sample_rate * FLAGS.duration)<EOL>
Compute how many samples should be included in each waveform.
f7979:m0
def run(logdir, run_name, wave_name, wave_constructor):
tf.compat.v1.reset_default_graph()<EOL>tf.compat.v1.set_random_seed(<NUM_LIT:0>)<EOL>step_placeholder = tf.compat.v1.placeholder(tf.float32, shape=[])<EOL>with tf.name_scope('<STR_LIT>'):<EOL><INDENT>f_min = <NUM_LIT><EOL>f_max = <NUM_LIT><EOL>t = step_placeholder / (FLAGS.steps - <NUM_LIT:1>)<EOL>frequency = f_min * (<NUM_LIT:1.0> - t) + f_max * t<EOL><DEDENT>tf.compat.v1.summary.scalar('<STR_LIT>', frequency)<EOL>with tf.name_scope(wave_name):<EOL><INDENT>waveform = wave_constructor(frequency)<EOL><DEDENT>with tf.name_scope('<STR_LIT>'):<EOL><INDENT>samples = tf.shape(input=waveform)[<NUM_LIT:0>]<EOL>wave_types = tf.tile(["<STR_LIT>" % wave_name], [samples])<EOL>frequencies = tf.strings.join([<EOL>"<STR_LIT>",<EOL>tf.tile([tf.as_string(frequency, precision=<NUM_LIT:2>)], [samples]),<EOL>"<STR_LIT>",<EOL>])<EOL>samples = tf.strings.join([<EOL>"<STR_LIT>", tf.as_string(tf.range(samples) + <NUM_LIT:1>),<EOL>"<STR_LIT>", tf.as_string(samples), "<STR_LIT:.>",<EOL>])<EOL>labels = tf.strings.join([wave_types, frequencies, samples], separator="<STR_LIT:U+0020>")<EOL><DEDENT>source = '<STR_LIT:\n>'.join('<STR_LIT>' % line.rstrip()<EOL>for line in inspect.getsourcelines(wave_constructor)[<NUM_LIT:0>])<EOL>description = ("<STR_LIT>"<EOL>% (wave_name, source))<EOL>summary.op('<STR_LIT>', waveform, FLAGS.sample_rate,<EOL>labels=labels,<EOL>display_name=wave_name,<EOL>description=description)<EOL>summ = tf.compat.v1.summary.merge_all()<EOL>sess = tf.compat.v1.Session()<EOL>writer = tf.summary.FileWriter(os.path.join(logdir, run_name))<EOL>writer.add_graph(sess.graph)<EOL>sess.run(tf.compat.v1.global_variables_initializer())<EOL>for step in xrange(FLAGS.steps):<EOL><INDENT>s = sess.run(summ, feed_dict={step_placeholder: float(step)})<EOL>writer.add_summary(s, global_step=step)<EOL><DEDENT>writer.close()<EOL>
Generate wave data of the given form. The provided function `wave_constructor` should accept a scalar tensor of type float32, representing the frequency (in Hz) at which to construct a wave, and return a tensor of shape [1, _samples(), `n`] representing audio data (for some number of channels `n`). Waves will be generated at frequencies ranging from A4 to A5. Arguments: logdir: the top-level directory into which to write summary data run_name: the name of this run; will be created as a subdirectory under logdir wave_name: the name of the wave being generated wave_constructor: see above
f7979:m1
def sine_wave(frequency):
xs = tf.reshape(tf.range(_samples(), dtype=tf.float32), [<NUM_LIT:1>, _samples(), <NUM_LIT:1>])<EOL>ts = xs / FLAGS.sample_rate<EOL>return tf.sin(<NUM_LIT:2> * math.pi * frequency * ts)<EOL>
Emit a sine wave at the given frequency.
f7979:m2
def square_wave(frequency):
<EOL>return tf.sign(sine_wave(frequency))<EOL>
Emit a square wave at the given frequency.
f7979:m3
def triangle_wave(frequency):
xs = tf.reshape(tf.range(_samples(), dtype=tf.float32), [<NUM_LIT:1>, _samples(), <NUM_LIT:1>])<EOL>ts = xs / FLAGS.sample_rate<EOL>half_pulse_index = ts * (frequency * <NUM_LIT:2>)<EOL>half_pulse_angle = half_pulse_index % <NUM_LIT:1.0> <EOL>absolute_amplitude = (<NUM_LIT:0.5> - tf.abs(half_pulse_angle - <NUM_LIT:0.5>)) / <NUM_LIT:0.5><EOL>half_pulse_parity = tf.sign(<NUM_LIT:1> - (half_pulse_index % <NUM_LIT>))<EOL>amplitude = half_pulse_parity * absolute_amplitude<EOL>return amplitude<EOL>
Emit a triangle wave at the given frequency.
f7979:m4
def bisine_wave(frequency):
<EOL>f_hi = frequency<EOL>f_lo = frequency / <NUM_LIT><EOL>with tf.name_scope('<STR_LIT>'):<EOL><INDENT>sine_hi = sine_wave(f_hi)<EOL><DEDENT>with tf.name_scope('<STR_LIT>'):<EOL><INDENT>sine_lo = sine_wave(f_lo)<EOL><DEDENT>return tf.concat([sine_lo, sine_hi], axis=<NUM_LIT:2>)<EOL>
Emit two sine waves, in stereo at different octaves.
f7979:m5
def bisine_wahwah_wave(frequency):
<EOL>waves_a = bisine_wave(frequency)<EOL>waves_b = tf.reverse(waves_a, axis=[<NUM_LIT:2>])<EOL>iterations = <NUM_LIT:4><EOL>xs = tf.reshape(tf.range(_samples(), dtype=tf.float32), [<NUM_LIT:1>, _samples(), <NUM_LIT:1>])<EOL>thetas = xs / _samples() * iterations<EOL>ts = (tf.sin(math.pi * <NUM_LIT:2> * thetas) + <NUM_LIT:1>) / <NUM_LIT:2><EOL>wave = ts * waves_a + (<NUM_LIT:1.0> - ts) * waves_b<EOL>exaggerated_wave = wave ** <NUM_LIT><EOL>return tf.concat([wave, exaggerated_wave], axis=<NUM_LIT:0>)<EOL>
Emit two sine waves with balance oscillating left and right.
f7979:m6
def run_all(logdir, verbose=False):
waves = [sine_wave, square_wave, triangle_wave,<EOL>bisine_wave, bisine_wahwah_wave]<EOL>for (i, wave_constructor) in enumerate(waves):<EOL><INDENT>wave_name = wave_constructor.__name__<EOL>run_name = '<STR_LIT>' % (i + <NUM_LIT:1>, wave_name)<EOL>if verbose:<EOL><INDENT>print('<STR_LIT>' % run_name)<EOL><DEDENT>run(logdir, run_name, wave_name, wave_constructor)<EOL><DEDENT>
Generate waves of the shapes defined above. Arguments: logdir: the directory into which to store all the runs' data verbose: if true, print out each run's name as it begins
f7979:m7
def __init__(self, context):
self._multiplexer = context.multiplexer<EOL>
Instantiates AudioPlugin via TensorBoard core. Args: context: A base_plugin.TBContext instance.
f7980:c0:m0
def is_active(self):
if not self._multiplexer:<EOL><INDENT>return False<EOL><DEDENT>return bool(self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME))<EOL>
The audio plugin is active iff any run has at least one relevant tag.
f7980:c0:m2
def _index_impl(self):
runs = self._multiplexer.Runs()<EOL>result = {run: {} for run in runs}<EOL>mapping = self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME)<EOL>for (run, tag_to_content) in six.iteritems(mapping):<EOL><INDENT>for tag in tag_to_content:<EOL><INDENT>summary_metadata = self._multiplexer.SummaryMetadata(run, tag)<EOL>tensor_events = self._multiplexer.Tensors(run, tag)<EOL>samples = max([self._number_of_samples(event.tensor_proto)<EOL>for event in tensor_events] + [<NUM_LIT:0>])<EOL>result[run][tag] = {'<STR_LIT>': summary_metadata.display_name,<EOL>'<STR_LIT:description>': plugin_util.markdown_to_safe_html(<EOL>summary_metadata.summary_description),<EOL>'<STR_LIT>': samples}<EOL><DEDENT><DEDENT>return result<EOL>
Return information about the tags in each run. Result is a dictionary of the form { "runName1": { "tagName1": { "displayName": "The first tag", "description": "<p>Long ago there was just one tag...</p>", "samples": 3 }, "tagName2": ..., ... }, "runName2": ..., ... } For each tag, `samples` is the greatest number of audio clips that appear at any particular step. (It's not related to "samples of a waveform.") For example, if for tag `minibatch_input` there are five audio clips at step 0 and ten audio clips at step 1, then the dictionary for `"minibatch_input"` will contain `"samples": 10`.
f7980:c0:m3
def _number_of_samples(self, tensor_proto):
<EOL>return tensor_proto.tensor_shape.dim[<NUM_LIT:0>].size<EOL>
Count the number of samples of an audio TensorProto.
f7980:c0:m4
@wrappers.Request.application<EOL><INDENT>def _serve_audio_metadata(self, request):<DEDENT>
tag = request.args.get('<STR_LIT>')<EOL>run = request.args.get('<STR_LIT>')<EOL>sample = int(request.args.get('<STR_LIT>', <NUM_LIT:0>))<EOL>events = self._multiplexer.Tensors(run, tag)<EOL>response = self._audio_response_for_run(events, run, tag, sample)<EOL>return http_util.Respond(request, response, '<STR_LIT:application/json>')<EOL>
Given a tag and list of runs, serve a list of metadata for audio. Note that the actual audio data are not sent; instead, we respond with URLs to the audio. The frontend should treat these URLs as opaque and should not try to parse information about them or generate them itself, as the format may change. Args: request: A werkzeug.wrappers.Request object. Returns: A werkzeug.Response application.
f7980:c0:m6
def _audio_response_for_run(self, tensor_events, run, tag, sample):
response = []<EOL>index = <NUM_LIT:0><EOL>filtered_events = self._filter_by_sample(tensor_events, sample)<EOL>content_type = self._get_mime_type(run, tag)<EOL>for (index, tensor_event) in enumerate(filtered_events):<EOL><INDENT>data = tensor_util.make_ndarray(tensor_event.tensor_proto)<EOL>label = data[sample, <NUM_LIT:1>]<EOL>response.append({<EOL>'<STR_LIT>': tensor_event.wall_time,<EOL>'<STR_LIT>': tensor_event.step,<EOL>'<STR_LIT:label>': plugin_util.markdown_to_safe_html(label),<EOL>'<STR_LIT>': content_type,<EOL>'<STR_LIT>': self._query_for_individual_audio(run, tag, sample, index)<EOL>})<EOL><DEDENT>return response<EOL>
Builds a JSON-serializable object with information about audio. Args: tensor_events: A list of image event_accumulator.TensorEvent objects. run: The name of the run. tag: The name of the tag the audio entries all belong to. sample: The zero-indexed sample of the audio sample for which to retrieve information. For instance, setting `sample` to `2` will fetch information about only the third audio clip of each batch, and steps with fewer than three audio clips will be omitted from the results. Returns: A list of dictionaries containing the wall time, step, URL, width, and height for each audio entry.
f7980:c0:m7
def _query_for_individual_audio(self, run, tag, sample, index):
query_string = urllib.parse.urlencode({<EOL>'<STR_LIT>': run,<EOL>'<STR_LIT>': tag,<EOL>'<STR_LIT>': sample,<EOL>'<STR_LIT:index>': index,<EOL>})<EOL>return query_string<EOL>
Builds a URL for accessing the specified audio. This should be kept in sync with _serve_audio_metadata. Note that the URL is *not* guaranteed to always return the same audio, since audio may be unloaded from the reservoir as new audio entries come in. Args: run: The name of the run. tag: The tag. index: The index of the audio entry. Negative values are OK. Returns: A string representation of a URL that will load the index-th sampled audio in the given run with the given tag.
f7980:c0:m8
@wrappers.Request.application<EOL><INDENT>def _serve_individual_audio(self, request):<DEDENT>
tag = request.args.get('<STR_LIT>')<EOL>run = request.args.get('<STR_LIT>')<EOL>index = int(request.args.get('<STR_LIT:index>'))<EOL>sample = int(request.args.get('<STR_LIT>', <NUM_LIT:0>))<EOL>events = self._filter_by_sample(self._multiplexer.Tensors(run, tag), sample)<EOL>data = tensor_util.make_ndarray(events[index].tensor_proto)[sample, <NUM_LIT:0>]<EOL>mime_type = self._get_mime_type(run, tag)<EOL>return http_util.Respond(request, data, mime_type)<EOL>
Serve encoded audio data.
f7980:c0:m10
def audio(name,<EOL>data,<EOL>sample_rate,<EOL>step=None,<EOL>max_outputs=<NUM_LIT:3>,<EOL>encoding=None,<EOL>description=None):
audio_ops = getattr(tf, '<STR_LIT>', None)<EOL>if audio_ops is None:<EOL><INDENT>from tensorflow.python.ops import gen_audio_ops as audio_ops<EOL><DEDENT>if encoding is None:<EOL><INDENT>encoding = '<STR_LIT>'<EOL><DEDENT>if encoding != '<STR_LIT>':<EOL><INDENT>raise ValueError('<STR_LIT>' % encoding)<EOL><DEDENT>summary_metadata = metadata.create_summary_metadata(<EOL>display_name=None,<EOL>description=description,<EOL>encoding=metadata.Encoding.Value('<STR_LIT>'))<EOL>inputs = [data, sample_rate, max_outputs, step]<EOL>summary_scope = (<EOL>getattr(tf.summary.experimental, '<STR_LIT>', None) or<EOL>tf.summary.summary_scope)<EOL>with summary_scope(<EOL>name, '<STR_LIT>', values=inputs) as (tag, _):<EOL><INDENT>tf.debugging.assert_rank(data, <NUM_LIT:3>)<EOL>tf.debugging.assert_non_negative(max_outputs)<EOL>limited_audio = data[:max_outputs]<EOL>encode_fn = functools.partial(audio_ops.encode_wav,<EOL>sample_rate=sample_rate)<EOL>encoded_audio = tf.map_fn(encode_fn, limited_audio,<EOL>dtype=tf.string,<EOL>name='<STR_LIT>')<EOL>encoded_audio = tf.cond(<EOL>tf.shape(input=encoded_audio)[<NUM_LIT:0>] > <NUM_LIT:0>,<EOL>lambda: encoded_audio, lambda: tf.constant([], tf.string))<EOL>limited_labels = tf.tile(['<STR_LIT>'], tf.shape(input=limited_audio)[:<NUM_LIT:1>])<EOL>tensor = tf.transpose(a=tf.stack([encoded_audio, limited_labels]))<EOL>return tf.summary.write(<EOL>tag=tag, tensor=tensor, step=step, metadata=summary_metadata)<EOL><DEDENT>
Write an audio summary. Arguments: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A `Tensor` representing audio data with shape `[k, t, c]`, where `k` is the number of audio clips, `t` is the number of frames, and `c` is the number of channels. Elements should be floating-point values in `[-1.0, 1.0]`. Any of the dimensions may be statically unknown (i.e., `None`). sample_rate: An `int` or rank-0 `int32` `Tensor` that represents the sample rate, in Hz. Must be positive. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this many audio clips will be emitted at each step. When more than `max_outputs` many clips are provided, the first `max_outputs` many clips will be used and the rest silently discarded. encoding: Optional constant `str` for the desired encoding. Only "wav" is currently supported, but this is not guaranteed to remain the default, so if you want "wav" in particular, set this explicitly. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. Returns: True on success, or false if no summary was emitted because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None.
f7982:m0
def __init__(self, context):
self._logdir = context.logdir<EOL>self._db_uri = context.db_uri<EOL>self._window_title = context.window_title<EOL>self._multiplexer = context.multiplexer<EOL>self._db_connection_provider = context.db_connection_provider<EOL>self._assets_zip_provider = context.assets_zip_provider<EOL>
Instantiates CorePlugin. Args: context: A base_plugin.TBContext instance.
f7983:c0:m0
@wrappers.Request.application<EOL><INDENT>def _serve_asset(self, path, gzipped_asset_bytes, request):<DEDENT>
mimetype = mimetypes.guess_type(path)[<NUM_LIT:0>] or '<STR_LIT>'<EOL>return http_util.Respond(<EOL>request, gzipped_asset_bytes, mimetype, content_encoding='<STR_LIT>')<EOL>
Serves a pre-gzipped static asset from the zip file.
f7983:c0:m5
@wrappers.Request.application<EOL><INDENT>def _serve_environment(self, request):<DEDENT>
return http_util.Respond(<EOL>request,<EOL>{<EOL>'<STR_LIT>': self._logdir or self._db_uri,<EOL>'<STR_LIT>': '<STR_LIT>' if self._db_uri else '<STR_LIT>',<EOL>'<STR_LIT>': self._window_title,<EOL>},<EOL>'<STR_LIT:application/json>')<EOL>
Serve a JSON object containing some base properties used by the frontend. * data_location is either a path to a directory or an address to a database (depending on which mode TensorBoard is running in). * window_title is the title of the TensorBoard web page.
f7983:c0:m6
@wrappers.Request.application<EOL><INDENT>def _serve_logdir(self, request):<DEDENT>
<EOL>return http_util.Respond(<EOL>request, {'<STR_LIT>': self._logdir}, '<STR_LIT:application/json>')<EOL>
Respond with a JSON object containing this TensorBoard's logdir.
f7983:c0:m7
@wrappers.Request.application<EOL><INDENT>def _serve_window_properties(self, request):<DEDENT>
<EOL>return http_util.Respond(<EOL>request, {'<STR_LIT>': self._window_title}, '<STR_LIT:application/json>')<EOL>
Serve a JSON object containing this TensorBoard's window properties.
f7983:c0:m8
@wrappers.Request.application<EOL><INDENT>def _serve_runs(self, request):<DEDENT>
if self._db_connection_provider:<EOL><INDENT>db = self._db_connection_provider()<EOL>cursor = db.execute(
Serve a JSON array of run names, ordered by run started time. Sort order is by started time (aka first event time) with empty times sorted last, and then ties are broken by sorting on the run name.
f7983:c0:m9
@wrappers.Request.application<EOL><INDENT>def _serve_experiments(self, request):<DEDENT>
results = self.list_experiments_impl()<EOL>return http_util.Respond(request, results, '<STR_LIT:application/json>')<EOL>
Serve a JSON array of experiments. Experiments are ordered by experiment started time (aka first event time) with empty times sorted last, and then ties are broken by sorting on the experiment name.
f7983:c0:m10
@wrappers.Request.application<EOL><INDENT>def _serve_experiment_runs(self, request):<DEDENT>
results = []<EOL>if self._db_connection_provider:<EOL><INDENT>exp_id = request.args.get('<STR_LIT>')<EOL>runs_dict = collections.OrderedDict()<EOL>db = self._db_connection_provider()<EOL>cursor = db.execute(
Serve a JSON runs of an experiment, specified with query param `experiment`, with their nested data, tag, populated. Runs returned are ordered by started time (aka first event time) with empty times sorted last, and then ties are broken by sorting on the run name. Tags are sorted by its name, displayName, and lastly, inserted time.
f7983:c0:m12
def define_flags(self, parser):
parser.add_argument(<EOL>'<STR_LIT>',<EOL>metavar='<STR_LIT>',<EOL>type=str,<EOL>default='<STR_LIT>',<EOL>help='''<STR_LIT>'''t to listen to. Defaults to serving on all interfaces. Other<EOL>used values are <NUM_LIT><NUM_LIT><NUM_LIT> (localhost) and :: (for IPv6).<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>metavar='<STR_LIT>',<EOL>type=lambda s: (None if s == "<STR_LIT:default>" else int(s)),<EOL>default="<STR_LIT:default>",<EOL>help='''<STR_LIT>'''to purge data that may have been orphaned due to TensorBoard<EOL>. Setting --purge_orphaned_data=False can be used to debug data<EOL>rance. (default: %(default)s)<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>metavar='<STR_LIT>',<EOL>type=float,<EOL>default=<NUM_LIT>,<EOL>help='''<STR_LIT>'''ental] sets SQL database URI and enables DB backend mode, which is<EOL>y unless --db_import is also passed.<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>action='<STR_LIT:store_true>',<EOL>help='''<STR_LIT>'''ental] in combination with --db_import, if passed, use TensorFlow's<EOL>vent() op for importing event data, otherwise use TensorBoard's own<EOL>ngestion logic.<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>action='<STR_LIT:store_true>',<EOL>help='''<STR_LIT>'''icular event file to query for. Only used if --inspect is<EOL>and --logdir is not specified.<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>metavar='<STR_LIT>',<EOL>type=str,<EOL>default='<STR_LIT>',<EOL>help='''<STR_LIT>'''number of threads that TensorBoard can use to reload runs. Not<EOL>for db read-only mode. Each thread reloads one run at a time.<EOL>: %(default)s)<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>metavar='<STR_LIT>',<EOL>type=str,<EOL>default='<STR_LIT>',<EOL>choices=['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'],<EOL>help='''<STR_LIT>'''nal comma separated list of plugin_name=num_samples pairs to<EOL>ly specify how many samples to keep per tag for that plugin. For<EOL>ied plugins, TensorBoard randomly downsamples logged summaries<EOL>nable values to prevent out-of-memory errors for long running<EOL>is flag allows fine control over that downsampling. Note that <NUM_LIT:0><EOL>ep all samples of that type. For instance "<STR_LIT>"<EOL><NUM_LIT:0> scalars and all images. Most users should not need to set this<EOL>
Adds standard TensorBoard CLI flags to parser.
f7983:c1:m0
def fix_flags(self, flags):
FlagsError = base_plugin.FlagsError<EOL>if flags.version_tb:<EOL><INDENT>pass<EOL><DEDENT>elif flags.inspect:<EOL><INDENT>if flags.logdir and flags.event_file:<EOL><INDENT>raise FlagsError(<EOL>'<STR_LIT>')<EOL><DEDENT>if not (flags.logdir or flags.event_file):<EOL><INDENT>raise FlagsError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif not flags.db and not flags.logdir:<EOL><INDENT>raise FlagsError('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>if flags.path_prefix.endswith('<STR_LIT:/>'):<EOL><INDENT>flags.path_prefix = flags.path_prefix[:-<NUM_LIT:1>]<EOL><DEDENT>
Fixes standard TensorBoard CLI flags to parser.
f7983:c1:m1
def load(self, context):
return CorePlugin(context)<EOL>
Creates CorePlugin instance.
f7983:c1:m2
def __init__(self, context):
self._db_connection_provider = context.db_connection_provider<EOL>self._multiplexer = context.multiplexer<EOL>
Instantiates a PrCurvesPlugin. Args: context: A base_plugin.TBContext instance. A magic container that TensorBoard uses to make objects available to the plugin.
f7985:c0:m0
@wrappers.Request.application<EOL><INDENT>def pr_curves_route(self, request):<DEDENT>
runs = request.args.getlist('<STR_LIT>')<EOL>if not runs:<EOL><INDENT>return http_util.Respond(<EOL>request, '<STR_LIT>', <NUM_LIT>)<EOL><DEDENT>tag = request.args.get('<STR_LIT>')<EOL>if not tag:<EOL><INDENT>return http_util.Respond(<EOL>request, '<STR_LIT>', <NUM_LIT>)<EOL><DEDENT>try:<EOL><INDENT>response = http_util.Respond(<EOL>request, self.pr_curves_impl(runs, tag), '<STR_LIT:application/json>')<EOL><DEDENT>except ValueError as e:<EOL><INDENT>return http_util.Respond(request, str(e), '<STR_LIT>', <NUM_LIT>)<EOL><DEDENT>return response<EOL>
A route that returns a JSON mapping between runs and PR curve data. Returns: Given a tag and a comma-separated list of runs (both stored within GET parameters), fetches a JSON object that maps between run name and objects containing data required for PR curves for that run. Runs that either cannot be found or that lack tags will be excluded from the response.
f7985:c0:m1
def pr_curves_impl(self, runs, tag):
if self._db_connection_provider:<EOL><INDENT>db = self._db_connection_provider()<EOL>cursor = db.execute(
Creates the JSON object for the PR curves response for a run-tag combo. Arguments: runs: A list of runs to fetch the curves for. tag: The tag to fetch the curves for. Raises: ValueError: If no PR curves could be fetched for a run and tag. Returns: The JSON object for the PR curves route response.
f7985:c0:m2
def _compute_thresholds(self, num_thresholds):
return [float(v) / num_thresholds for v in range(<NUM_LIT:1>, num_thresholds + <NUM_LIT:1>)]<EOL>
Computes a list of specific thresholds from the number of thresholds. Args: num_thresholds: The number of thresholds. Returns: A list of specific thresholds (floats).
f7985:c0:m3
@wrappers.Request.application<EOL><INDENT>def tags_route(self, request):<DEDENT>
return http_util.Respond(<EOL>request, self.tags_impl(), '<STR_LIT:application/json>')<EOL>
A route (HTTP handler) that returns a response with tags. Returns: A response that contains a JSON object. The keys of the object are all the runs. Each run is mapped to a (potentially empty) dictionary whose keys are tags associated with run and whose values are metadata (dictionaries). The metadata dictionaries contain 2 keys: - displayName: For the display name used atop visualizations in TensorBoard. - description: The description that appears near visualizations upon the user hovering over a certain icon.
f7985:c0:m4
def tags_impl(self):
if self._db_connection_provider:<EOL><INDENT>db = self._db_connection_provider()<EOL>cursor = db.execute(
Creates the JSON object for the tags route response. Returns: The JSON object for the tags route response.
f7985:c0:m5
@wrappers.Request.application<EOL><INDENT>def available_time_entries_route(self, request):<DEDENT>
return http_util.Respond(<EOL>request, self.available_time_entries_impl(), '<STR_LIT:application/json>')<EOL>
Gets a dict mapping run to a list of time entries. Returns: A dict with string keys (all runs with PR curve data). The values of the dict are lists of time entries (consisting of the fields below) to be used in populating values within time sliders.
f7985:c0:m6
def available_time_entries_impl(self):
result = {}<EOL>if self._db_connection_provider:<EOL><INDENT>db = self._db_connection_provider()<EOL>cursor = db.execute(<EOL>'''<STR_LIT>''', (metadata.PLUGIN_NAME,))<EOL>for (run, step, wall_time) in cursor:<EOL><INDENT>if run not in result:<EOL><INDENT>result[run] = []<EOL><DEDENT>result[run].append(self._create_time_entry(step, wall_time))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>all_runs = self._multiplexer.PluginRunToTagToContent(<EOL>metadata.PLUGIN_NAME)<EOL>for run, tag_to_content in all_runs.items():<EOL><INDENT>if not tag_to_content:<EOL><INDENT>continue<EOL><DEDENT>tensor_events = self._multiplexer.Tensors(<EOL>run, min(six.iterkeys(tag_to_content)))<EOL>result[run] = [self._create_time_entry(e.step, e.wall_time)<EOL>for e in tensor_events]<EOL><DEDENT><DEDENT>return result<EOL>
Creates the JSON object for the available time entries route response. Returns: The JSON object for the available time entries route response.
f7985:c0:m7
def _create_time_entry(self, step, wall_time):
return {<EOL>'<STR_LIT>': step,<EOL>'<STR_LIT>': wall_time,<EOL>}<EOL>
Creates a time entry given a tensor event. Arguments: step: The step for the time entry. wall_time: The wall time for the time entry. Returns: A JSON-able time entry to be passed to the frontend in order to construct the slider.
f7985:c0:m8
def get_plugin_apps(self):
return {<EOL>'<STR_LIT>': self.tags_route,<EOL>'<STR_LIT>': self.pr_curves_route,<EOL>'<STR_LIT>': self.available_time_entries_route,<EOL>}<EOL>
Gets all routes offered by the plugin. Returns: A dictionary mapping URL path to route that handles it.
f7985:c0:m9
def is_active(self):
if self._db_connection_provider:<EOL><INDENT>db = self._db_connection_provider()<EOL>cursor = db.execute(<EOL>'''<STR_LIT>''',<EOL>(metadata.PLUGIN_NAME,))<EOL>return bool(list(cursor))<EOL><DEDENT>if not self._multiplexer:<EOL><INDENT>return False<EOL><DEDENT>all_runs = self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME)<EOL>return any(six.itervalues(all_runs))<EOL>
Determines whether this plugin is active. This plugin is active only if PR curve summary data is read by TensorBoard. Returns: Whether this plugin is active.
f7985:c0:m10
def _process_tensor_event(self, event, thresholds):
return self._make_pr_entry(<EOL>event.step,<EOL>event.wall_time,<EOL>tensor_util.make_ndarray(event.tensor_proto),<EOL>thresholds)<EOL>
Converts a TensorEvent into a dict that encapsulates information on it. Args: event: The TensorEvent to convert. thresholds: An array of floats that ranges from 0 to 1 (in that direction and inclusive of 0 and 1). Returns: A JSON-able dictionary of PR curve data for 1 step.
f7985:c0:m11
def _make_pr_entry(self, step, wall_time, data_array, thresholds):
<EOL>true_positives = [int(v) for v in data_array[metadata.TRUE_POSITIVES_INDEX]]<EOL>false_positives = [<EOL>int(v) for v in data_array[metadata.FALSE_POSITIVES_INDEX]]<EOL>tp_index = metadata.TRUE_POSITIVES_INDEX<EOL>fp_index = metadata.FALSE_POSITIVES_INDEX<EOL>positives = data_array[[tp_index, fp_index], :].astype(int).sum(axis=<NUM_LIT:0>)<EOL>end_index_inclusive = len(positives) - <NUM_LIT:1><EOL>while end_index_inclusive > <NUM_LIT:0> and positives[end_index_inclusive] == <NUM_LIT:0>:<EOL><INDENT>end_index_inclusive -= <NUM_LIT:1><EOL><DEDENT>end_index = end_index_inclusive + <NUM_LIT:1><EOL>return {<EOL>'<STR_LIT>': wall_time,<EOL>'<STR_LIT>': step,<EOL>'<STR_LIT>': data_array[metadata.PRECISION_INDEX, :end_index].tolist(),<EOL>'<STR_LIT>': data_array[metadata.RECALL_INDEX, :end_index].tolist(),<EOL>'<STR_LIT>': true_positives[:end_index],<EOL>'<STR_LIT>': false_positives[:end_index],<EOL>'<STR_LIT>':<EOL>[int(v) for v in<EOL>data_array[metadata.TRUE_NEGATIVES_INDEX][:end_index]],<EOL>'<STR_LIT>':<EOL>[int(v) for v in<EOL>data_array[metadata.FALSE_NEGATIVES_INDEX][:end_index]],<EOL>'<STR_LIT>': thresholds[:end_index],<EOL>}<EOL>
Creates an entry for PR curve data. Each entry corresponds to 1 step. Args: step: The step. wall_time: The wall time. data_array: A numpy array of PR curve data stored in the summary format. thresholds: An array of floating point thresholds. Returns: A PR curve entry.
f7985:c0:m12
def op(<EOL>name,<EOL>labels,<EOL>predictions,<EOL>num_thresholds=None,<EOL>weights=None,<EOL>display_name=None,<EOL>description=None,<EOL>collections=None):
<EOL>import tensorflow.compat.v1 as tf<EOL>if num_thresholds is None:<EOL><INDENT>num_thresholds = _DEFAULT_NUM_THRESHOLDS<EOL><DEDENT>if weights is None:<EOL><INDENT>weights = <NUM_LIT:1.0><EOL><DEDENT>dtype = predictions.dtype<EOL>with tf.name_scope(name, values=[labels, predictions, weights]):<EOL><INDENT>tf.assert_type(labels, tf.bool)<EOL>f_labels = tf.cast(labels, dtype)<EOL>predictions = tf.minimum(<NUM_LIT:1.0>, tf.maximum(<NUM_LIT:0.0>, predictions))<EOL>true_labels = f_labels * weights<EOL>false_labels = (<NUM_LIT:1.0> - f_labels) * weights<EOL>predictions = tf.reshape(predictions, [-<NUM_LIT:1>])<EOL>true_labels = tf.reshape(true_labels, [-<NUM_LIT:1>, <NUM_LIT:1>])<EOL>false_labels = tf.reshape(false_labels, [-<NUM_LIT:1>, <NUM_LIT:1>])<EOL>bucket_indices = tf.cast(<EOL>tf.floor(predictions * (num_thresholds - <NUM_LIT:1>)), tf.int32)<EOL>tp_buckets = tf.reduce_sum(<EOL>input_tensor=tf.one_hot(bucket_indices, depth=num_thresholds) * true_labels,<EOL>axis=<NUM_LIT:0>)<EOL>fp_buckets = tf.reduce_sum(<EOL>input_tensor=tf.one_hot(bucket_indices, depth=num_thresholds) * false_labels,<EOL>axis=<NUM_LIT:0>)<EOL>tp = tf.cumsum(tp_buckets, reverse=True, name='<STR_LIT>')<EOL>fp = tf.cumsum(fp_buckets, reverse=True, name='<STR_LIT>')<EOL>tn = fp[<NUM_LIT:0>] - fp<EOL>fn = tp[<NUM_LIT:0>] - tp<EOL>precision = tp / tf.maximum(_MINIMUM_COUNT, tp + fp)<EOL>recall = tp / tf.maximum(_MINIMUM_COUNT, tp + fn)<EOL>return _create_tensor_summary(<EOL>name,<EOL>tp,<EOL>fp,<EOL>tn,<EOL>fn,<EOL>precision,<EOL>recall,<EOL>num_thresholds,<EOL>display_name,<EOL>description,<EOL>collections)<EOL><DEDENT>
Create a PR curve summary op for a single binary classifier. Computes true/false positive/negative values for the given `predictions` against the ground truth `labels`, against a list of evenly distributed threshold values in `[0, 1]` of length `num_thresholds`. Each number in `predictions`, a float in `[0, 1]`, is compared with its corresponding boolean label in `labels`, and counts as a single tp/fp/tn/fn value at each threshold. This is then multiplied with `weights` which can be used to reweight certain values, or more commonly used for masking values. Args: name: A tag attached to the summary. Used by TensorBoard for organization. labels: The ground truth values. A Tensor of `bool` values with arbitrary shape. predictions: A float32 `Tensor` whose values are in the range `[0, 1]`. Dimensions must match those of `labels`. num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to compute PR metrics for. Should be `>= 2`. This value should be a constant integer value, not a Tensor that stores an integer. weights: Optional float32 `Tensor`. Individual counts are multiplied by this value. This tensor must be either the same shape as or broadcastable to the `labels` tensor. display_name: Optional name for this summary in TensorBoard, as a constant `str`. Defaults to `name`. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[Graph Keys.SUMMARIES]`. Returns: A summary operation for use in a TensorFlow graph. The float32 tensor produced by the summary operation is of dimension (6, num_thresholds). The first dimension (of length 6) is of the order: true positives, false positives, true negatives, false negatives, precision, recall.
f7986:m0
def pb(name,<EOL>labels,<EOL>predictions,<EOL>num_thresholds=None,<EOL>weights=None,<EOL>display_name=None,<EOL>description=None):
<EOL>import tensorflow.compat.v1 as tf<EOL>if num_thresholds is None:<EOL><INDENT>num_thresholds = _DEFAULT_NUM_THRESHOLDS<EOL><DEDENT>if weights is None:<EOL><INDENT>weights = <NUM_LIT:1.0><EOL><DEDENT>bucket_indices = np.int32(np.floor(predictions * (num_thresholds - <NUM_LIT:1>)))<EOL>float_labels = labels.astype(np.float)<EOL>histogram_range = (<NUM_LIT:0>, num_thresholds - <NUM_LIT:1>)<EOL>tp_buckets, _ = np.histogram(<EOL>bucket_indices,<EOL>bins=num_thresholds,<EOL>range=histogram_range,<EOL>weights=float_labels * weights)<EOL>fp_buckets, _ = np.histogram(<EOL>bucket_indices,<EOL>bins=num_thresholds,<EOL>range=histogram_range,<EOL>weights=(<NUM_LIT:1.0> - float_labels) * weights)<EOL>tp = np.cumsum(tp_buckets[::-<NUM_LIT:1>])[::-<NUM_LIT:1>]<EOL>fp = np.cumsum(fp_buckets[::-<NUM_LIT:1>])[::-<NUM_LIT:1>]<EOL>tn = fp[<NUM_LIT:0>] - fp<EOL>fn = tp[<NUM_LIT:0>] - tp<EOL>precision = tp / np.maximum(_MINIMUM_COUNT, tp + fp)<EOL>recall = tp / np.maximum(_MINIMUM_COUNT, tp + fn)<EOL>return raw_data_pb(name,<EOL>true_positive_counts=tp,<EOL>false_positive_counts=fp,<EOL>true_negative_counts=tn,<EOL>false_negative_counts=fn,<EOL>precision=precision,<EOL>recall=recall,<EOL>num_thresholds=num_thresholds,<EOL>display_name=display_name,<EOL>description=description)<EOL>
Create a PR curves summary protobuf. Arguments: name: A name for the generated node. Will also serve as a series name in TensorBoard. labels: The ground truth values. A bool numpy array. predictions: A float32 numpy array whose values are in the range `[0, 1]`. Dimensions must match those of `labels`. num_thresholds: Optional number of thresholds, evenly distributed in `[0, 1]`, to compute PR metrics for. When provided, should be an int of value at least 2. Defaults to 201. weights: Optional float or float32 numpy array. Individual counts are multiplied by this value. This tensor must be either the same shape as or broadcastable to the `labels` numpy array. display_name: Optional name for this summary in TensorBoard, as a `str`. Defaults to `name`. description: Optional long-form description for this summary, as a `str`. Markdown is supported. Defaults to empty.
f7986:m1
def streaming_op(name,<EOL>labels,<EOL>predictions,<EOL>num_thresholds=None,<EOL>weights=None,<EOL>metrics_collections=None,<EOL>updates_collections=None,<EOL>display_name=None,<EOL>description=None):
<EOL>import tensorflow.compat.v1 as tf<EOL>if num_thresholds is None:<EOL><INDENT>num_thresholds = _DEFAULT_NUM_THRESHOLDS<EOL><DEDENT>thresholds = [i / float(num_thresholds - <NUM_LIT:1>)<EOL>for i in range(num_thresholds)]<EOL>with tf.name_scope(name, values=[labels, predictions, weights]):<EOL><INDENT>tp, update_tp = tf.metrics.true_positives_at_thresholds(<EOL>labels=labels,<EOL>predictions=predictions,<EOL>thresholds=thresholds,<EOL>weights=weights)<EOL>fp, update_fp = tf.metrics.false_positives_at_thresholds(<EOL>labels=labels,<EOL>predictions=predictions,<EOL>thresholds=thresholds,<EOL>weights=weights)<EOL>tn, update_tn = tf.metrics.true_negatives_at_thresholds(<EOL>labels=labels,<EOL>predictions=predictions,<EOL>thresholds=thresholds,<EOL>weights=weights)<EOL>fn, update_fn = tf.metrics.false_negatives_at_thresholds(<EOL>labels=labels,<EOL>predictions=predictions,<EOL>thresholds=thresholds,<EOL>weights=weights)<EOL>def compute_summary(tp, fp, tn, fn, collections):<EOL><INDENT>precision = tp / tf.maximum(_MINIMUM_COUNT, tp + fp)<EOL>recall = tp / tf.maximum(_MINIMUM_COUNT, tp + fn)<EOL>return _create_tensor_summary(<EOL>name,<EOL>tp,<EOL>fp,<EOL>tn,<EOL>fn,<EOL>precision,<EOL>recall,<EOL>num_thresholds,<EOL>display_name,<EOL>description,<EOL>collections)<EOL><DEDENT>pr_curve = compute_summary(tp, fp, tn, fn, metrics_collections)<EOL>update_op = tf.group(update_tp, update_fp, update_tn, update_fn)<EOL>if updates_collections:<EOL><INDENT>for collection in updates_collections:<EOL><INDENT>tf.add_to_collection(collection, update_op)<EOL><DEDENT><DEDENT>return pr_curve, update_op<EOL><DEDENT>
Computes a precision-recall curve summary across batches of data. This function is similar to op() above, but can be used to compute the PR curve across multiple batches of labels and predictions, in the same style as the metrics found in tf.metrics. This function creates multiple local variables for storing true positives, true negative, etc. accumulated over each batch of data, and uses these local variables for computing the final PR curve summary. These variables can be updated with the returned update_op. Args: name: A tag attached to the summary. Used by TensorBoard for organization. labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. num_thresholds: The number of evenly spaced thresholds to generate for computing the PR curve. Defaults to 201. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `auc` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. display_name: Optional name for this summary in TensorBoard, as a constant `str`. Defaults to `name`. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. Returns: pr_curve: A string `Tensor` containing a single value: the serialized PR curve Tensor summary. The summary contains a float32 `Tensor` of dimension (6, num_thresholds). The first dimension (of length 6) is of the order: true positives, false positives, true negatives, false negatives, precision, recall. update_op: An operation that updates the summary with the latest data.
f7986:m2
def raw_data_op(<EOL>name,<EOL>true_positive_counts,<EOL>false_positive_counts,<EOL>true_negative_counts,<EOL>false_negative_counts,<EOL>precision,<EOL>recall,<EOL>num_thresholds=None,<EOL>display_name=None,<EOL>description=None,<EOL>collections=None):
<EOL>import tensorflow.compat.v1 as tf<EOL>with tf.name_scope(name, values=[<EOL>true_positive_counts,<EOL>false_positive_counts,<EOL>true_negative_counts,<EOL>false_negative_counts,<EOL>precision,<EOL>recall,<EOL>]):<EOL><INDENT>return _create_tensor_summary(<EOL>name,<EOL>true_positive_counts,<EOL>false_positive_counts,<EOL>true_negative_counts,<EOL>false_negative_counts,<EOL>precision,<EOL>recall,<EOL>num_thresholds,<EOL>display_name,<EOL>description,<EOL>collections)<EOL><DEDENT>
Create an op that collects data for visualizing PR curves. Unlike the op above, this one avoids computing precision, recall, and the intermediate counts. Instead, it accepts those tensors as arguments and relies on the caller to ensure that the calculations are correct (and the counts yield the provided precision and recall values). This op is useful when a caller seeks to compute precision and recall differently but still use the PR curves plugin. Args: name: A tag attached to the summary. Used by TensorBoard for organization. true_positive_counts: A rank-1 tensor of true positive counts. Must contain `num_thresholds` elements and be castable to float32. Values correspond to thresholds that increase from left to right (from 0 to 1). false_positive_counts: A rank-1 tensor of false positive counts. Must contain `num_thresholds` elements and be castable to float32. Values correspond to thresholds that increase from left to right (from 0 to 1). true_negative_counts: A rank-1 tensor of true negative counts. Must contain `num_thresholds` elements and be castable to float32. Values correspond to thresholds that increase from left to right (from 0 to 1). false_negative_counts: A rank-1 tensor of false negative counts. Must contain `num_thresholds` elements and be castable to float32. Values correspond to thresholds that increase from left to right (from 0 to 1). precision: A rank-1 tensor of precision values. Must contain `num_thresholds` elements and be castable to float32. Values correspond to thresholds that increase from left to right (from 0 to 1). recall: A rank-1 tensor of recall values. Must contain `num_thresholds` elements and be castable to float32. Values correspond to thresholds that increase from left to right (from 0 to 1). num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to compute PR metrics for. Should be `>= 2`. This value should be a constant integer value, not a Tensor that stores an integer. display_name: Optional name for this summary in TensorBoard, as a constant `str`. Defaults to `name`. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[Graph Keys.SUMMARIES]`. Returns: A summary operation for use in a TensorFlow graph. See docs for the `op` method for details on the float32 tensor produced by this summary.
f7986:m3
def raw_data_pb(<EOL>name,<EOL>true_positive_counts,<EOL>false_positive_counts,<EOL>true_negative_counts,<EOL>false_negative_counts,<EOL>precision,<EOL>recall,<EOL>num_thresholds=None,<EOL>display_name=None,<EOL>description=None):
<EOL>import tensorflow.compat.v1 as tf<EOL>if display_name is None:<EOL><INDENT>display_name = name<EOL><DEDENT>summary_metadata = metadata.create_summary_metadata(<EOL>display_name=display_name if display_name is not None else name,<EOL>description=description or '<STR_LIT>',<EOL>num_thresholds=num_thresholds)<EOL>tf_summary_metadata = tf.SummaryMetadata.FromString(<EOL>summary_metadata.SerializeToString())<EOL>summary = tf.Summary()<EOL>data = np.stack(<EOL>(true_positive_counts,<EOL>false_positive_counts,<EOL>true_negative_counts,<EOL>false_negative_counts,<EOL>precision,<EOL>recall))<EOL>tensor = tf.make_tensor_proto(np.float32(data), dtype=tf.float32)<EOL>summary.value.add(tag='<STR_LIT>' % name,<EOL>metadata=tf_summary_metadata,<EOL>tensor=tensor)<EOL>return summary<EOL>
Create a PR curves summary protobuf from raw data values. Args: name: A tag attached to the summary. Used by TensorBoard for organization. true_positive_counts: A rank-1 numpy array of true positive counts. Must contain `num_thresholds` elements and be castable to float32. false_positive_counts: A rank-1 numpy array of false positive counts. Must contain `num_thresholds` elements and be castable to float32. true_negative_counts: A rank-1 numpy array of true negative counts. Must contain `num_thresholds` elements and be castable to float32. false_negative_counts: A rank-1 numpy array of false negative counts. Must contain `num_thresholds` elements and be castable to float32. precision: A rank-1 numpy array of precision values. Must contain `num_thresholds` elements and be castable to float32. recall: A rank-1 numpy array of recall values. Must contain `num_thresholds` elements and be castable to float32. num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to compute PR metrics for. Should be an int `>= 2`. display_name: Optional name for this summary in TensorBoard, as a `str`. Defaults to `name`. description: Optional long-form description for this summary, as a `str`. Markdown is supported. Defaults to empty. Returns: A summary operation for use in a TensorFlow graph. See docs for the `op` method for details on the float32 tensor produced by this summary.
f7986:m4
def _create_tensor_summary(<EOL>name,<EOL>true_positive_counts,<EOL>false_positive_counts,<EOL>true_negative_counts,<EOL>false_negative_counts,<EOL>precision,<EOL>recall,<EOL>num_thresholds=None,<EOL>display_name=None,<EOL>description=None,<EOL>collections=None):
<EOL>import tensorflow.compat.v1 as tf<EOL>summary_metadata = metadata.create_summary_metadata(<EOL>display_name=display_name if display_name is not None else name,<EOL>description=description or '<STR_LIT>',<EOL>num_thresholds=num_thresholds)<EOL>combined_data = tf.stack([<EOL>tf.cast(true_positive_counts, tf.float32),<EOL>tf.cast(false_positive_counts, tf.float32),<EOL>tf.cast(true_negative_counts, tf.float32),<EOL>tf.cast(false_negative_counts, tf.float32),<EOL>tf.cast(precision, tf.float32),<EOL>tf.cast(recall, tf.float32)])<EOL>return tf.summary.tensor_summary(<EOL>name='<STR_LIT>',<EOL>tensor=combined_data,<EOL>collections=collections,<EOL>summary_metadata=summary_metadata)<EOL>
A private helper method for generating a tensor summary. We use a helper method instead of having `op` directly call `raw_data_op` to prevent the scope of `raw_data_op` from being embedded within `op`. Arguments are the same as for raw_data_op. Returns: A tensor summary that collects data for PR curves.
f7986:m5
def start_runs(<EOL>logdir,<EOL>steps,<EOL>run_name,<EOL>thresholds,<EOL>mask_every_other_prediction=False):
tf.compat.v1.reset_default_graph()<EOL>tf.compat.v1.set_random_seed(<NUM_LIT>)<EOL>distribution = tf.compat.v1.distributions.Normal(loc=<NUM_LIT:0.>, scale=<NUM_LIT>)<EOL>number_of_reds = <NUM_LIT:100><EOL>true_reds = tf.clip_by_value(<EOL>tf.concat([<EOL><NUM_LIT:255> - tf.abs(distribution.sample([number_of_reds, <NUM_LIT:1>], seed=<NUM_LIT:11>)),<EOL>tf.abs(distribution.sample([number_of_reds, <NUM_LIT:2>], seed=<NUM_LIT>))<EOL>], axis=<NUM_LIT:1>),<EOL><NUM_LIT:0>, <NUM_LIT:255>)<EOL>number_of_greens = <NUM_LIT:200><EOL>true_greens = tf.clip_by_value(<EOL>tf.concat([<EOL>tf.abs(distribution.sample([number_of_greens, <NUM_LIT:1>], seed=<NUM_LIT>)),<EOL><NUM_LIT:255> - tf.abs(distribution.sample([number_of_greens, <NUM_LIT:1>], seed=<NUM_LIT>)),<EOL>tf.abs(distribution.sample([number_of_greens, <NUM_LIT:1>], seed=<NUM_LIT>))<EOL>], axis=<NUM_LIT:1>),<EOL><NUM_LIT:0>, <NUM_LIT:255>)<EOL>number_of_blues = <NUM_LIT><EOL>true_blues = tf.clip_by_value(<EOL>tf.concat([<EOL>tf.abs(distribution.sample([number_of_blues, <NUM_LIT:2>], seed=<NUM_LIT>)),<EOL><NUM_LIT:255> - tf.abs(distribution.sample([number_of_blues, <NUM_LIT:1>], seed=<NUM_LIT>))<EOL>], axis=<NUM_LIT:1>),<EOL><NUM_LIT:0>, <NUM_LIT:255>)<EOL>labels = tf.concat([<EOL>tf.tile(tf.constant([[True, False, False]]), (number_of_reds, <NUM_LIT:1>)),<EOL>tf.tile(tf.constant([[False, True, False]]), (number_of_greens, <NUM_LIT:1>)),<EOL>tf.tile(tf.constant([[False, False, True]]), (number_of_blues, <NUM_LIT:1>)),<EOL>], axis=<NUM_LIT:0>)<EOL>initial_standard_deviations = [v + FLAGS.steps for v in (<NUM_LIT>, <NUM_LIT:200>, <NUM_LIT>)]<EOL>iteration = tf.compat.v1.placeholder(tf.int32, shape=[])<EOL>red_predictor = tf.compat.v1.distributions.Normal(<EOL>loc=<NUM_LIT:0.>,<EOL>scale=tf.cast(<EOL>initial_standard_deviations[<NUM_LIT:0>] - iteration,<EOL>dtype=tf.float32))<EOL>green_predictor = tf.compat.v1.distributions.Normal(<EOL>loc=<NUM_LIT:0.>,<EOL>scale=tf.cast(<EOL>initial_standard_deviations[<NUM_LIT:1>] - iteration,<EOL>dtype=tf.float32))<EOL>blue_predictor = tf.compat.v1.distributions.Normal(<EOL>loc=<NUM_LIT:0.>,<EOL>scale=tf.cast(<EOL>initial_standard_deviations[<NUM_LIT:2>] - iteration,<EOL>dtype=tf.float32))<EOL>examples = tf.concat([true_reds, true_greens, true_blues], axis=<NUM_LIT:0>)<EOL>probabilities_colors_are_red = (<NUM_LIT:1> - red_predictor.cdf(<EOL>tf.norm(tensor=examples - tf.constant([<NUM_LIT>, <NUM_LIT:0>, <NUM_LIT:0>]), axis=<NUM_LIT:1>))) * <NUM_LIT:2><EOL>probabilities_colors_are_green = (<NUM_LIT:1> - green_predictor.cdf(<EOL>tf.norm(tensor=examples - tf.constant([<NUM_LIT:0>, <NUM_LIT>, <NUM_LIT:0>]), axis=<NUM_LIT:1>))) * <NUM_LIT:2><EOL>probabilities_colors_are_blue = (<NUM_LIT:1> - blue_predictor.cdf(<EOL>tf.norm(tensor=examples - tf.constant([<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT>]), axis=<NUM_LIT:1>))) * <NUM_LIT:2><EOL>predictions = (<EOL>probabilities_colors_are_red,<EOL>probabilities_colors_are_green,<EOL>probabilities_colors_are_blue<EOL>)<EOL>for i, color in enumerate(('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')):<EOL><INDENT>description = ('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>initial_standard_deviations[i])<EOL>weights = None<EOL>if mask_every_other_prediction:<EOL><INDENT>consecutive_indices = tf.reshape(<EOL>tf.range(tf.size(input=predictions[i])), tf.shape(input=predictions[i]))<EOL>weights = tf.cast(consecutive_indices % <NUM_LIT:2>, dtype=tf.float32)<EOL><DEDENT>summary.op(<EOL>name=color,<EOL>labels=labels[:, i],<EOL>predictions=predictions[i],<EOL>num_thresholds=thresholds,<EOL>weights=weights,<EOL>display_name='<STR_LIT>' % color,<EOL>description=description)<EOL><DEDENT>merged_summary_op = tf.compat.v1.summary.merge_all()<EOL>events_directory = os.path.join(logdir, run_name)<EOL>sess = tf.compat.v1.Session()<EOL>writer = tf.compat.v1.summary.FileWriter(events_directory, sess.graph)<EOL>for step in xrange(steps):<EOL><INDENT>feed_dict = {<EOL>iteration: step,<EOL>}<EOL>merged_summary = sess.run(merged_summary_op, feed_dict=feed_dict)<EOL>writer.add_summary(merged_summary, step)<EOL><DEDENT>writer.close()<EOL>
Generate a PR curve with precision and recall evenly weighted. Arguments: logdir: The directory into which to store all the runs' data. steps: The number of steps to run for. run_name: The name of the run. thresholds: The number of thresholds to use for PR curves. mask_every_other_prediction: Whether to mask every other prediction by alternating weights between 0 and 1.
f7987:m0
def run_all(logdir, steps, thresholds, verbose=False):
<EOL>run_name = '<STR_LIT>'<EOL>if verbose:<EOL><INDENT>print('<STR_LIT>' % run_name)<EOL><DEDENT>start_runs(<EOL>logdir=logdir,<EOL>steps=steps,<EOL>run_name=run_name,<EOL>thresholds=thresholds)<EOL>run_name = '<STR_LIT>'<EOL>if verbose:<EOL><INDENT>print('<STR_LIT>' % run_name)<EOL><DEDENT>start_runs(<EOL>logdir=logdir,<EOL>steps=steps,<EOL>run_name=run_name,<EOL>thresholds=thresholds,<EOL>mask_every_other_prediction=True)<EOL>
Generate PR curve summaries. Arguments: logdir: The directory into which to store all the runs' data. steps: The number of steps to run for. verbose: Whether to print the names of runs into stdout during execution. thresholds: The number of thresholds to use for PR curves.
f7987:m1
def create_summary_metadata(display_name, description, num_thresholds):
pr_curve_plugin_data = plugin_data_pb2.PrCurvePluginData(<EOL>version=PROTO_VERSION, num_thresholds=num_thresholds)<EOL>content = pr_curve_plugin_data.SerializeToString()<EOL>return summary_pb2.SummaryMetadata(<EOL>display_name=display_name,<EOL>summary_description=description,<EOL>plugin_data=summary_pb2.SummaryMetadata.PluginData(<EOL>plugin_name=PLUGIN_NAME,<EOL>content=content))<EOL>
Create a `summary_pb2.SummaryMetadata` proto for pr_curves plugin data. Arguments: display_name: The display name used in TensorBoard. description: The description to show in TensorBoard. num_thresholds: The number of thresholds to use for PR curves. Returns: A `summary_pb2.SummaryMetadata` protobuf object.
f7988:m0
def parse_plugin_metadata(content):
if not isinstance(content, bytes):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>result = plugin_data_pb2.PrCurvePluginData.FromString(content)<EOL>if result.version == <NUM_LIT:0>:<EOL><INDENT>return result<EOL><DEDENT>else:<EOL><INDENT>logger.warn(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>', result.version, PROTO_VERSION)<EOL>return result<EOL><DEDENT>
Parse summary metadata to a Python object. Arguments: content: The `content` field of a `SummaryMetadata` proto corresponding to the pr_curves plugin. Returns: A `PrCurvesPlugin` protobuf object.
f7988:m1
def normalize_summary_pb(self, pb):
result = summary_pb2.Summary()<EOL>if not isinstance(pb, summary_pb2.Summary):<EOL><INDENT>pb = test_util.ensure_tb_summary_proto(pb)<EOL><DEDENT>result.MergeFrom(pb)<EOL>for value in result.value:<EOL><INDENT>if value.HasField('<STR_LIT>'):<EOL><INDENT>new_tensor = tensor_util.make_tensor_proto(<EOL>tensor_util.make_ndarray(value.tensor))<EOL>value.ClearField('<STR_LIT>')<EOL>value.tensor.MergeFrom(new_tensor)<EOL><DEDENT><DEDENT>return result<EOL>
Pass `pb`'s `TensorProto` through a marshalling roundtrip. `TensorProto`s can be equal in value even if they are not identical in representation, because data can be stored in either the `tensor_content` field or the `${dtype}_value` field. This normalization ensures a canonical form, and should be used before comparing two `Summary`s for equality.
f7989:c0:m2
def compute_and_check_summary_pb(self,<EOL>name,<EOL>labels,<EOL>predictions,<EOL>num_thresholds,<EOL>weights=None,<EOL>display_name=None,<EOL>description=None,<EOL>feed_dict=None):
labels_tensor = tf.constant(labels)<EOL>predictions_tensor = tf.constant(predictions)<EOL>weights_tensor = None if weights is None else tf.constant(weights)<EOL>op = summary.op(<EOL>name=name,<EOL>labels=labels_tensor,<EOL>predictions=predictions_tensor,<EOL>num_thresholds=num_thresholds,<EOL>weights=weights_tensor,<EOL>display_name=display_name,<EOL>description=description)<EOL>pb = self.normalize_summary_pb(summary.pb(<EOL>name=name,<EOL>labels=labels,<EOL>predictions=predictions,<EOL>num_thresholds=num_thresholds,<EOL>weights=weights,<EOL>display_name=display_name,<EOL>description=description))<EOL>pb_via_op = self.normalize_summary_pb(<EOL>self.pb_via_op(op, feed_dict=feed_dict))<EOL>self.assertProtoEquals(pb, pb_via_op)<EOL>return pb<EOL>
Use both `op` and `pb` to get a summary, asserting equality. Returns: a `Summary` protocol buffer
f7989:c0:m3
def validatePrCurveEntry(<EOL>self,<EOL>expected_step,<EOL>expected_precision,<EOL>expected_recall,<EOL>expected_true_positives,<EOL>expected_false_positives,<EOL>expected_true_negatives,<EOL>expected_false_negatives,<EOL>expected_thresholds,<EOL>pr_curve_entry):
self.assertEqual(expected_step, pr_curve_entry['<STR_LIT>'])<EOL>assert_allclose(expected_precision, pr_curve_entry['<STR_LIT>'])<EOL>assert_allclose(expected_recall, pr_curve_entry['<STR_LIT>'])<EOL>self.assertListEqual(<EOL>expected_true_positives, pr_curve_entry['<STR_LIT>'])<EOL>self.assertListEqual(<EOL>expected_false_positives, pr_curve_entry['<STR_LIT>'])<EOL>self.assertListEqual(<EOL>expected_true_negatives, pr_curve_entry['<STR_LIT>'])<EOL>self.assertListEqual(<EOL>expected_false_negatives, pr_curve_entry['<STR_LIT>'])<EOL>assert_allclose(expected_thresholds, pr_curve_entry['<STR_LIT>'])<EOL>
Checks that the values stored within a tensor are correct. Args: expected_step: The expected step. expected_precision: A list of float values. expected_recall: A list of float values. expected_true_positives: A list of int values. expected_false_positives: A list of int values. expected_true_negatives: A list of int values. expected_false_negatives: A list of int values. expected_thresholds: A list of floats ranging from 0 to 1. pr_curve_entry: The PR curve entry to evaluate.
f7990:c0:m1
def computeCorrectDescription(self, standard_deviation):
description = ('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>') % standard_deviation<EOL>return description<EOL>
Generates a correct description. Arguments: standard_deviation: An integer standard deviation value. Returns: The correct description given a standard deviation value.
f7990:c0:m2
def run_tag_from_session_and_metric(session_name, metric_name):
assert isinstance(session_name, six.string_types)<EOL>assert isinstance(metric_name, api_pb2.MetricName)<EOL>run = os.path.normpath(os.path.join(session_name, metric_name.group))<EOL>tag = metric_name.tag<EOL>return run, tag<EOL>
Returns a (run,tag) tuple storing the evaluations of the specified metric. Args: session_name: str. metric_name: MetricName protobuffer. Returns: (run, tag) tuple.
f7992:m0
def last_metric_eval(multiplexer, session_name, metric_name):
try:<EOL><INDENT>run, tag = run_tag_from_session_and_metric(session_name, metric_name)<EOL>tensor_events = multiplexer.Tensors(run=run, tag=tag)<EOL><DEDENT>except KeyError as e:<EOL><INDENT>raise KeyError(<EOL>'<STR_LIT>'<EOL>% (metric_name, session_name, e))<EOL><DEDENT>last_event = tensor_events[-<NUM_LIT:1>]<EOL>return (last_event.wall_time,<EOL>last_event.step,<EOL>tf.make_ndarray(last_event.tensor_proto).item())<EOL>
Returns the last evaluations of the given metric at the given session. Args: multiplexer: The EventMultiplexer instance allowing access to the exported summary data. session_name: String. The session name for which to get the metric evaluations. metric_name: api_pb2.MetricName proto. The name of the metric to use. Returns: A 3-tuples, of the form [wall-time, step, value], denoting the last evaluation of the metric, where wall-time denotes the wall time in seconds since UNIX epoch of the time of the evaluation, step denotes the training step at which the model is evaluated, and value denotes the (scalar real) value of the metric. Raises: KeyError if the given session does not have the metric.
f7992:m1
def _find_longest_parent_path(path_set, path):
<EOL>while path not in path_set:<EOL><INDENT>if not path:<EOL><INDENT>return None<EOL><DEDENT>path = os.path.dirname(path)<EOL><DEDENT>return path<EOL>
Finds the longest "parent-path" of 'path' in 'path_set'. This function takes and returns "path-like" strings which are strings made of strings separated by os.sep. No file access is performed here, so these strings need not correspond to actual files in some file-system.. This function returns the longest ancestor path For example, for path_set=["/foo/bar", "/foo", "/bar/foo"] and path="/foo/bar/sub_dir", returns "/foo/bar". Args: path_set: set of path-like strings -- e.g. a list of strings separated by os.sep. No actual disk-access is performed here, so these need not correspond to actual files. path: a path-like string. Returns: The element in path_set which is the longest parent directory of 'path'.
f7994:m0
def _protobuf_value_type(value):
if value.HasField("<STR_LIT>"):<EOL><INDENT>return api_pb2.DATA_TYPE_FLOAT64<EOL><DEDENT>if value.HasField("<STR_LIT>"):<EOL><INDENT>return api_pb2.DATA_TYPE_STRING<EOL><DEDENT>if value.HasField("<STR_LIT>"):<EOL><INDENT>return api_pb2.DATA_TYPE_BOOL<EOL><DEDENT>return None<EOL>
Returns the type of the google.protobuf.Value message as an api.DataType. Returns None if the type of 'value' is not one of the types supported in api_pb2.DataType. Args: value: google.protobuf.Value message.
f7994:m1
def _protobuf_value_to_string(value):
value_in_json = json_format.MessageToJson(value)<EOL>if value.HasField("<STR_LIT>"):<EOL><INDENT>return value_in_json[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL><DEDENT>return value_in_json<EOL>
Returns a string representation of given google.protobuf.Value message. Args: value: google.protobuf.Value message. Assumed to be of type 'number', 'string' or 'bool'.
f7994:m2
def __init__(self,<EOL>tb_context,<EOL>max_domain_discrete_len=<NUM_LIT:10>):
self._tb_context = tb_context<EOL>self._experiment_from_tag = None<EOL>self._experiment_from_tag_lock = threading.Lock()<EOL>self._max_domain_discrete_len = max_domain_discrete_len<EOL>
Instantiates a context. Args: tb_context: base_plugin.TBContext. The "base" context we extend. max_domain_discrete_len: int. Only used when computing the experiment from the session runs. The maximum number of disticnt values a string hyperparameter can have for us to populate its 'domain_discrete' field. Typically, only tests should specify a value for this parameter.
f7994:c0:m0
def experiment(self):
experiment = self._find_experiment_tag()<EOL>if experiment is None:<EOL><INDENT>return self._compute_experiment_from_runs()<EOL><DEDENT>return experiment<EOL>
Returns the experiment protobuffer defining the experiment. This method first attempts to find a metadata.EXPERIMENT_TAG tag and retrieve the associated protobuffer. If no such tag is found, the method will attempt to build a minimal experiment protobuffer by scanning for all metadata.SESSION_START_INFO_TAG tags (to compute the hparam_infos field of the experiment) and for all scalar tags (to compute the metric_infos field of the experiment). Returns: The experiment protobuffer. If no tags are found from which an experiment protobuffer can be built (possibly, because the event data has not been completely loaded yet), returns None.
f7994:c0:m1
def _find_experiment_tag(self):
with self._experiment_from_tag_lock:<EOL><INDENT>if self._experiment_from_tag is None:<EOL><INDENT>mapping = self.multiplexer.PluginRunToTagToContent(<EOL>metadata.PLUGIN_NAME)<EOL>for tag_to_content in mapping.values():<EOL><INDENT>if metadata.EXPERIMENT_TAG in tag_to_content:<EOL><INDENT>self._experiment_from_tag = metadata.parse_experiment_plugin_data(<EOL>tag_to_content[metadata.EXPERIMENT_TAG])<EOL>break<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return self._experiment_from_tag<EOL>
Finds the experiment associcated with the metadata.EXPERIMENT_TAG tag. Caches the experiment if it was found. Returns: The experiment or None if no such experiment is found.
f7994:c0:m4
def _compute_experiment_from_runs(self):
hparam_infos = self._compute_hparam_infos()<EOL>if not hparam_infos:<EOL><INDENT>return None<EOL><DEDENT>metric_infos = self._compute_metric_infos()<EOL>return api_pb2.Experiment(hparam_infos=hparam_infos,<EOL>metric_infos=metric_infos)<EOL>
Computes a minimal Experiment protocol buffer by scanning the runs.
f7994:c0:m5
def _compute_hparam_infos(self):
run_to_tag_to_content = self.multiplexer.PluginRunToTagToContent(<EOL>metadata.PLUGIN_NAME)<EOL>hparams = collections.defaultdict(list)<EOL>for tag_to_content in run_to_tag_to_content.values():<EOL><INDENT>if metadata.SESSION_START_INFO_TAG not in tag_to_content:<EOL><INDENT>continue<EOL><DEDENT>start_info = metadata.parse_session_start_info_plugin_data(<EOL>tag_to_content[metadata.SESSION_START_INFO_TAG])<EOL>for (name, value) in six.iteritems(start_info.hparams):<EOL><INDENT>hparams[name].append(value)<EOL><DEDENT><DEDENT>result = []<EOL>for (name, values) in six.iteritems(hparams):<EOL><INDENT>hparam_info = self._compute_hparam_info_from_values(name, values)<EOL>if hparam_info is not None:<EOL><INDENT>result.append(hparam_info)<EOL><DEDENT><DEDENT>return result<EOL>
Computes a list of api_pb2.HParamInfo from the current run, tag info. Finds all the SessionStartInfo messages and collects the hparams values appearing in each one. For each hparam attempts to deduce a type that fits all its values. Finally, sets the 'domain' of the resulting HParamInfo to be discrete if the type is string and the number of distinct values is small enough. Returns: A list of api_pb2.HParamInfo messages.
f7994:c0:m6
def _compute_hparam_info_from_values(self, name, values):
<EOL>result = api_pb2.HParamInfo(name=name, type=api_pb2.DATA_TYPE_UNSET)<EOL>distinct_values = set(<EOL>_protobuf_value_to_string(v) for v in values if _protobuf_value_type(v))<EOL>for v in values:<EOL><INDENT>v_type = _protobuf_value_type(v)<EOL>if not v_type:<EOL><INDENT>continue<EOL><DEDENT>if result.type == api_pb2.DATA_TYPE_UNSET:<EOL><INDENT>result.type = v_type<EOL><DEDENT>elif result.type != v_type:<EOL><INDENT>result.type = api_pb2.DATA_TYPE_STRING<EOL><DEDENT>if result.type == api_pb2.DATA_TYPE_STRING:<EOL><INDENT>break<EOL><DEDENT><DEDENT>if result.type == api_pb2.DATA_TYPE_UNSET:<EOL><INDENT>return None<EOL><DEDENT>if (result.type == api_pb2.DATA_TYPE_STRING<EOL>and len(distinct_values) <= self._max_domain_discrete_len):<EOL><INDENT>result.domain_discrete.extend(distinct_values)<EOL><DEDENT>return result<EOL>
Builds an HParamInfo message from the hparam name and list of values. Args: name: string. The hparam name. values: list of google.protobuf.Value messages. The list of values for the hparam. Returns: An api_pb2.HParamInfo message.
f7994:c0:m7
def _compute_metric_names(self):
session_runs = self._build_session_runs_set()<EOL>metric_names_set = set()<EOL>run_to_tag_to_content = self.multiplexer.PluginRunToTagToContent(<EOL>scalar_metadata.PLUGIN_NAME)<EOL>for (run, tag_to_content) in six.iteritems(run_to_tag_to_content):<EOL><INDENT>session = _find_longest_parent_path(session_runs, run)<EOL>if not session:<EOL><INDENT>continue<EOL><DEDENT>group = os.path.relpath(run, session)<EOL>if group == "<STR_LIT:.>":<EOL><INDENT>group = "<STR_LIT>"<EOL><DEDENT>metric_names_set.update((tag, group) for tag in tag_to_content.keys())<EOL><DEDENT>metric_names_list = list(metric_names_set)<EOL>metric_names_list.sort()<EOL>return metric_names_list<EOL>
Computes the list of metric names from all the scalar (run, tag) pairs. The return value is a list of (tag, group) pairs representing the metric names. The list is sorted in Python tuple-order (lexicographical). For example, if the scalar (run, tag) pairs are: ("exp/session1", "loss") ("exp/session2", "loss") ("exp/session2/eval", "loss") ("exp/session2/validation", "accuracy") ("exp/no-session", "loss_2"), and the runs corresponding to sessions are "exp/session1", "exp/session2", this method will return [("loss", ""), ("loss", "/eval"), ("accuracy", "/validation")] More precisely, each scalar (run, tag) pair is converted to a (tag, group) metric name, where group is the suffix of run formed by removing the longest prefix which is a session run. If no session run is a prefix of 'run', the pair is skipped. Returns: A python list containing pairs. Each pair is a (tag, group) pair representing a metric name used in some session.
f7994:c0:m9
def model_fn(hparams, seed):
rng = random.Random(seed)<EOL>model = tf.keras.models.Sequential()<EOL>model.add(tf.keras.layers.Input(INPUT_SHAPE))<EOL>model.add(tf.keras.layers.Reshape(INPUT_SHAPE + (<NUM_LIT:1>,))) <EOL>conv_filters = <NUM_LIT:8><EOL>for _ in xrange(hparams[HP_CONV_LAYERS]):<EOL><INDENT>model.add(tf.keras.layers.Conv2D(<EOL>filters=conv_filters,<EOL>kernel_size=hparams[HP_CONV_KERNEL_SIZE],<EOL>padding="<STR_LIT>",<EOL>activation="<STR_LIT:relu>",<EOL>))<EOL>model.add(tf.keras.layers.MaxPool2D(pool_size=<NUM_LIT:2>, padding="<STR_LIT>"))<EOL>conv_filters *= <NUM_LIT:2><EOL><DEDENT>model.add(tf.keras.layers.Flatten())<EOL>model.add(tf.keras.layers.Dropout(hparams[HP_DROPOUT], seed=rng.random()))<EOL>dense_neurons = <NUM_LIT:32><EOL>for _ in xrange(hparams[HP_DENSE_LAYERS]):<EOL><INDENT>model.add(tf.keras.layers.Dense(dense_neurons, activation="<STR_LIT:relu>"))<EOL>dense_neurons *= <NUM_LIT:2><EOL><DEDENT>model.add(tf.keras.layers.Dense(OUTPUT_CLASSES, activation="<STR_LIT>"))<EOL>model.compile(<EOL>loss="<STR_LIT>",<EOL>optimizer=hparams[HP_OPTIMIZER],<EOL>metrics=["<STR_LIT>"],<EOL>)<EOL>return model<EOL>
Create a Keras model with the given hyperparameters. Args: hparams: A dict mapping hyperparameters in `HPARAMS` to values. seed: A hashable object to be used as a random seed (e.g., to construct dropout layers in the model). Returns: A compiled Keras model.
f7995:m0
def run(data, base_logdir, session_id, group_id, hparams):
model = model_fn(hparams=hparams, seed=session_id)<EOL>logdir = os.path.join(base_logdir, session_id)<EOL>callback = tf.keras.callbacks.TensorBoard(<EOL>logdir,<EOL>update_freq=flags.FLAGS.summary_freq,<EOL>profile_batch=<NUM_LIT:0>, <EOL>)<EOL>hparams_callback = hp.KerasCallback(logdir, hparams, group_name=group_id)<EOL>((x_train, y_train), (x_test, y_test)) = data<EOL>result = model.fit(<EOL>x=x_train,<EOL>y=y_train,<EOL>epochs=flags.FLAGS.num_epochs,<EOL>shuffle=False,<EOL>validation_data=(x_test, y_test),<EOL>callbacks=[callback, hparams_callback],<EOL>)<EOL>
Run a training/validation session. Flags must have been parsed for this function to behave. Args: data: The data as loaded by `prepare_data()`. base_logdir: The top-level logdir to which to write summary data. session_id: A unique string ID for this session. group_id: The string ID of the session group that includes this session. hparams: A dict mapping hyperparameters in `HPARAMS` to values.
f7995:m1
def prepare_data():
((x_train, y_train), (x_test, y_test)) = DATASET.load_data()<EOL>x_train = x_train.astype("<STR_LIT>")<EOL>x_test = x_test.astype("<STR_LIT>")<EOL>x_train /= <NUM_LIT><EOL>x_test /= <NUM_LIT><EOL>return ((x_train, y_train), (x_test, y_test))<EOL>
Load and normalize data.
f7995:m2
def run_all(logdir, verbose=False):
data = prepare_data()<EOL>rng = random.Random(<NUM_LIT:0>)<EOL>base_writer = tf.summary.create_file_writer(logdir)<EOL>with base_writer.as_default():<EOL><INDENT>experiment = hp.Experiment(hparams=HPARAMS, metrics=METRICS)<EOL>experiment_string = experiment.summary_pb().SerializeToString()<EOL>tf.summary.experimental.write_raw_pb(experiment_string, step=<NUM_LIT:0>)<EOL>base_writer.flush()<EOL><DEDENT>base_writer.close()<EOL>sessions_per_group = <NUM_LIT:2><EOL>num_sessions = flags.FLAGS.num_session_groups * sessions_per_group<EOL>session_index = <NUM_LIT:0> <EOL>for group_index in xrange(flags.FLAGS.num_session_groups):<EOL><INDENT>hparams = {h: sample_uniform(h.domain, rng) for h in HPARAMS}<EOL>hparams_string = str(hparams)<EOL>group_id = hashlib.sha256(hparams_string.encode("<STR_LIT:utf-8>")).hexdigest()<EOL>for repeat_index in xrange(sessions_per_group):<EOL><INDENT>session_id = str(session_index)<EOL>session_index += <NUM_LIT:1><EOL>if verbose:<EOL><INDENT>print(<EOL>"<STR_LIT>"<EOL>% (session_index, num_sessions)<EOL>)<EOL>print(hparams_string)<EOL>print("<STR_LIT>" % (repeat_index + <NUM_LIT:1>))<EOL><DEDENT>run(<EOL>data=data,<EOL>base_logdir=logdir,<EOL>session_id=session_id,<EOL>group_id=group_id,<EOL>hparams=hparams,<EOL>)<EOL><DEDENT><DEDENT>
Perform random search over the hyperparameter space. Arguments: logdir: The top-level directory into which to write data. This directory should be empty or nonexistent. verbose: If true, print out each run's name as it begins.
f7995:m3
def sample_uniform(domain, rng):
if isinstance(domain, hp.IntInterval):<EOL><INDENT>return rng.randint(domain.min_value, domain.max_value)<EOL><DEDENT>elif isinstance(domain, hp.RealInterval):<EOL><INDENT>return rng.uniform(domain.min_value, domain.max_value)<EOL><DEDENT>elif isinstance(domain, hp.Discrete):<EOL><INDENT>return rng.choice(domain.values)<EOL><DEDENT>else:<EOL><INDENT>raise TypeError("<STR_LIT>" % (domain,))<EOL><DEDENT>
Sample a value uniformly from a domain. Args: domain: An `IntInterval`, `RealInterval`, or `Discrete` domain. rng: A `random.Random` object; defaults to the `random` module. Raises: TypeError: If `domain` is not a known kind of domain. IndexError: If the domain is empty.
f7995:m4
def load(self, context):
try:<EOL><INDENT>import tensorflow<EOL><DEDENT>except ImportError:<EOL><INDENT>return<EOL><DEDENT>from tensorboard.plugins.hparams.hparams_plugin import HParamsPlugin<EOL>return HParamsPlugin(context)<EOL>
Returns the plugin, if possible. Args: context: The TBContext flags. Returns: A HParamsPlugin instance or None if it couldn't be loaded.
f7996:c0:m0
def _create_key_func(extractor, none_is_largest):
if none_is_largest:<EOL><INDENT>def key_func_none_is_largest(session_group):<EOL><INDENT>value = extractor(session_group)<EOL>return (value is None, value)<EOL><DEDENT>return key_func_none_is_largest<EOL><DEDENT>def key_func_none_is_smallest(session_group):<EOL><INDENT>value = extractor(session_group)<EOL>return (value is not None, value)<EOL><DEDENT>return key_func_none_is_smallest<EOL>
Returns a key_func to be used in list.sort(). Returns a key_func to be used in list.sort() that sorts session groups by the value extracted by extractor. 'None' extracted values will either be considered largest or smallest as specified by the "none_is_largest" boolean parameter. Args: extractor: An extractor function that extract the key from the session group. none_is_largest: bool. If true treats 'None's as largest; otherwise smallest.
f7997:m0
def _create_extractors(col_params):
result = []<EOL>for col_param in col_params:<EOL><INDENT>result.append(_create_extractor(col_param))<EOL><DEDENT>return result<EOL>
Creates extractors to extract properties corresponding to 'col_params'. Args: col_params: List of ListSessionGroupsRequest.ColParam protobufs. Returns: A list of extractor functions. The ith element in the returned list extracts the column corresponding to the ith element of _request.col_params
f7997:m1
def _create_metric_extractor(metric_name):
def extractor_fn(session_or_group):<EOL><INDENT>metric_value = _find_metric_value(session_or_group,<EOL>metric_name)<EOL>return metric_value.value if metric_value else None<EOL><DEDENT>return extractor_fn<EOL>
Returns function that extracts a metric from a session group or a session. Args: metric_name: tensorboard.hparams.MetricName protobuffer. Identifies the metric to extract from the session group. Returns: A function that takes a tensorboard.hparams.SessionGroup or tensorborad.hparams.Session protobuffer and returns the value of the metric identified by 'metric_name' or None if the value doesn't exist.
f7997:m3
def _find_metric_value(session_or_group, metric_name):
<EOL>for metric_value in session_or_group.metric_values:<EOL><INDENT>if (metric_value.name.tag == metric_name.tag and<EOL>metric_value.name.group == metric_name.group):<EOL><INDENT>return metric_value<EOL><DEDENT><DEDENT>
Returns the metric_value for a given metric in a session or session group. Args: session_or_group: A Session protobuffer or SessionGroup protobuffer. metric_name: A MetricName protobuffer. The metric to search for. Returns: A MetricValue protobuffer representing the value of the given metric or None if no such metric was found in session_or_group.
f7997:m4
def _create_hparam_extractor(hparam_name):
def extractor_fn(session_group):<EOL><INDENT>if hparam_name in session_group.hparams:<EOL><INDENT>return _value_to_python(session_group.hparams[hparam_name])<EOL><DEDENT>return None<EOL><DEDENT>return extractor_fn<EOL>
Returns an extractor function that extracts an hparam from a session group. Args: hparam_name: str. Identies the hparam to extract from the session group. Returns: A function that takes a tensorboard.hparams.SessionGroup protobuffer and returns the value, as a native Python object, of the hparam identified by 'hparam_name'.
f7997:m5
def _create_filters(col_params, extractors):
result = []<EOL>for col_param, extractor in zip(col_params, extractors):<EOL><INDENT>a_filter = _create_filter(col_param, extractor)<EOL>if a_filter:<EOL><INDENT>result.append(a_filter)<EOL><DEDENT><DEDENT>return result<EOL>
Creates filters for the given col_params. Args: col_params: List of ListSessionGroupsRequest.ColParam protobufs. extractors: list of extractor functions of the same length as col_params. Each element should extract the column described by the corresponding element of col_params. Returns: A list of filter functions. Each corresponding to a single col_params.filter oneof field of _request
f7997:m6
def _create_filter(col_param, extractor):
include_missing_values = not col_param.exclude_missing_values<EOL>if col_param.HasField('<STR_LIT>'):<EOL><INDENT>value_filter_fn = _create_regexp_filter(col_param.filter_regexp)<EOL><DEDENT>elif col_param.HasField('<STR_LIT>'):<EOL><INDENT>value_filter_fn = _create_interval_filter(col_param.filter_interval)<EOL><DEDENT>elif col_param.HasField('<STR_LIT>'):<EOL><INDENT>value_filter_fn = _create_discrete_set_filter(col_param.filter_discrete)<EOL><DEDENT>elif include_missing_values:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>value_filter_fn = lambda _: True<EOL><DEDENT>def filter_fn(session_group):<EOL><INDENT>value = extractor(session_group)<EOL>if value is None:<EOL><INDENT>return include_missing_values<EOL><DEDENT>return value_filter_fn(value)<EOL><DEDENT>return filter_fn<EOL>
Creates a filter for the given col_param and extractor. Args: col_param: A tensorboard.hparams.ColParams object identifying the column and describing the filter to apply. extractor: A function that extract the column value identified by 'col_param' from a tensorboard.hparams.SessionGroup protobuffer. Returns: A boolean function taking a tensorboard.hparams.SessionGroup protobuffer returning True if the session group passes the filter described by 'col_param'. If col_param does not specify a filter (i.e. any session group passes) returns None.
f7997:m7
def _create_regexp_filter(regex):
<EOL>compiled_regex = re.compile(regex)<EOL>def filter_fn(value):<EOL><INDENT>if not isinstance(value, six.string_types):<EOL><INDENT>raise error.HParamsError(<EOL>'<STR_LIT>' %<EOL>(type(value), value))<EOL><DEDENT>return re.search(compiled_regex, value) is not None<EOL><DEDENT>return filter_fn<EOL>
Returns a boolean function that filters strings based on a regular exp. Args: regex: A string describing the regexp to use. Returns: A function taking a string and returns True if any of its substrings matches regex.
f7997:m8
def _create_interval_filter(interval):
def filter_fn(value):<EOL><INDENT>if (not isinstance(value, six.integer_types) and<EOL>not isinstance(value, float)):<EOL><INDENT>raise error.HParamsError(<EOL>'<STR_LIT>' %<EOL>(type(value), value))<EOL><DEDENT>return interval.min_value <= value and value <= interval.max_value<EOL><DEDENT>return filter_fn<EOL>
Returns a function that checkes whether a number belongs to an interval. Args: interval: A tensorboard.hparams.Interval protobuf describing the interval. Returns: A function taking a number (a float or an object of a type in six.integer_types) that returns True if the number belongs to (the closed) 'interval'.
f7997:m9
def _create_discrete_set_filter(discrete_set):
def filter_fn(value):<EOL><INDENT>return value in discrete_set<EOL><DEDENT>return filter_fn<EOL>
Returns a function that checks whether a value belongs to a set. Args: discrete_set: A list of objects representing the set. Returns: A function taking an object and returns True if its in the set. Membership is tested using the Python 'in' operator (thus, equality of distinct objects is computed using the '==' operator).
f7997:m10
def _value_to_python(value):
assert isinstance(value, struct_pb2.Value)<EOL>field = value.WhichOneof('<STR_LIT>')<EOL>if field == '<STR_LIT>':<EOL><INDENT>return value.number_value<EOL><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>return value.string_value<EOL><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>return value.bool_value<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>' % field)<EOL><DEDENT>
Converts a google.protobuf.Value to a native Python object.
f7997:m11
def _set_avg_session_metrics(session_group):
assert session_group.sessions, '<STR_LIT>'<EOL>metric_stats = collections.defaultdict(_MetricStats)<EOL>for session in session_group.sessions:<EOL><INDENT>for metric_value in session.metric_values:<EOL><INDENT>metric_name = _MetricIdentifier(group=metric_value.name.group,<EOL>tag=metric_value.name.tag)<EOL>stats = metric_stats[metric_name]<EOL>stats.total += metric_value.value<EOL>stats.count += <NUM_LIT:1><EOL>stats.total_step += metric_value.training_step<EOL>stats.total_wall_time_secs += metric_value.wall_time_secs<EOL><DEDENT><DEDENT>del session_group.metric_values[:]<EOL>for (metric_name, stats) in six.iteritems(metric_stats):<EOL><INDENT>session_group.metric_values.add(<EOL>name=api_pb2.MetricName(group=metric_name.group, tag=metric_name.tag),<EOL>value=float(stats.total)/float(stats.count),<EOL>training_step=stats.total_step // stats.count,<EOL>wall_time_secs=stats.total_wall_time_secs / stats.count)<EOL><DEDENT>
Sets the metrics for the group to be the average of its sessions. The resulting session group metrics consist of the union of metrics across the group's sessions. The value of each session group metric is the average of that metric values across the sessions in the group. The 'step' and 'wall_time_secs' fields of the resulting MetricValue field in the session group are populated with the corresponding averages (truncated for 'step') as well. Args: session_group: A SessionGroup protobuffer.
f7997:m12
def _set_median_session_metrics(session_group, aggregation_metric):
measurements = sorted(_measurements(session_group, aggregation_metric),<EOL>key=operator.attrgetter('<STR_LIT>'))<EOL>median_session = measurements[(len(measurements) - <NUM_LIT:1>) // <NUM_LIT:2>].session_index<EOL>del session_group.metric_values[:]<EOL>session_group.metric_values.MergeFrom(<EOL>session_group.sessions[median_session].metric_values)<EOL>
Sets the metrics for session_group to those of its "median session". The median session is the session in session_group with the median value of the metric given by 'aggregation_metric'. The median is taken over the subset of sessions in the group whose 'aggregation_metric' was measured at the largest training step among the sessions in the group. Args: session_group: A SessionGroup protobuffer. aggregation_metric: A MetricName protobuffer.
f7997:m13
def _set_extremum_session_metrics(session_group, aggregation_metric,<EOL>extremum_fn):
measurements = _measurements(session_group, aggregation_metric)<EOL>ext_session = extremum_fn(<EOL>measurements,<EOL>key=operator.attrgetter('<STR_LIT>')).session_index<EOL>del session_group.metric_values[:]<EOL>session_group.metric_values.MergeFrom(<EOL>session_group.sessions[ext_session].metric_values)<EOL>
Sets the metrics for session_group to those of its "extremum session". The extremum session is the session in session_group with the extremum value of the metric given by 'aggregation_metric'. The extremum is taken over the subset of sessions in the group whose 'aggregation_metric' was measured at the largest training step among the sessions in the group. Args: session_group: A SessionGroup protobuffer. aggregation_metric: A MetricName protobuffer. extremum_fn: callable. Must be either 'min' or 'max'. Determines the type of extremum to compute.
f7997:m14
def _measurements(session_group, metric_name):
for session_index, session in enumerate(session_group.sessions):<EOL><INDENT>metric_value = _find_metric_value(session, metric_name)<EOL>if not metric_value:<EOL><INDENT>continue<EOL><DEDENT>yield _Measurement(metric_value, session_index)<EOL><DEDENT>
A generator for the values of the metric across the sessions in the group. Args: session_group: A SessionGroup protobuffer. metric_name: A MetricName protobuffer. Yields: The next metric value wrapped in a _Measurement instance.
f7997:m15
def __init__(self, context, request):
self._context = context<EOL>self._request = request<EOL>self._extractors = _create_extractors(request.col_params)<EOL>self._filters = _create_filters(request.col_params, self._extractors)<EOL>self._experiment = context.experiment()<EOL>
Constructor. Args: context: A backend_context.Context instance. request: A ListSessionGroupsRequest protobuf.
f7997:c0:m0