signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
def get_example_features(example):
return (example.features.feature if isinstance(example, tf.train.Example)<EOL>else example.context.feature)<EOL>
Returns the non-sequence features from the provided example.
f8029:m11
def run_inference_for_inference_results(examples, serving_bundle):
inference_result_proto = run_inference(examples, serving_bundle)<EOL>inferences = wrap_inference_results(inference_result_proto)<EOL>infer_json = json_format.MessageToJson(<EOL>inferences, including_default_value_fields=True)<EOL>return json.loads(infer_json)<EOL>
Calls servo and wraps the inference results.
f8029:m12
def get_eligible_features(examples, num_mutants):
features_dict = (<EOL>get_numeric_features_to_observed_range(<EOL>examples))<EOL>features_dict.update(<EOL>get_categorical_features_to_sampling(<EOL>examples, num_mutants))<EOL>features_list = []<EOL>for k, v in sorted(features_dict.items()):<EOL><INDENT>v['<STR_LIT:name>'] = k<EOL>features_list.append(v)<EOL><DEDENT>return features_list<EOL>
Returns a list of JSON objects for each feature in the examples. This list is used to drive partial dependence plots in the plugin. Args: examples: Examples to examine to determine the eligible features. num_mutants: The number of mutations to make over each feature. Returns: A list with a JSON object for each feature. Numeric features are represented as {name: observedMin: observedMax:}. Categorical features are repesented as {name: samples:[]}.
f8029:m13
def get_label_vocab(vocab_path):
if vocab_path:<EOL><INDENT>try:<EOL><INDENT>with tf.io.gfile.GFile(vocab_path, '<STR_LIT:r>') as f:<EOL><INDENT>return [line.rstrip('<STR_LIT:\n>') for line in f]<EOL><DEDENT><DEDENT>except tf.errors.NotFoundError as err:<EOL><INDENT>tf.logging.error('<STR_LIT>', err)<EOL><DEDENT><DEDENT>return []<EOL>
Returns a list of label strings loaded from the provided path.
f8029:m14
def create_sprite_image(examples):
def generate_image_from_thubnails(thumbnails, thumbnail_dims):<EOL><INDENT>"""<STR_LIT>"""<EOL>num_thumbnails = tf.shape(thumbnails)[<NUM_LIT:0>].eval()<EOL>images_per_row = int(math.ceil(math.sqrt(num_thumbnails)))<EOL>thumb_height = thumbnail_dims[<NUM_LIT:0>]<EOL>thumb_width = thumbnail_dims[<NUM_LIT:1>]<EOL>master_height = images_per_row * thumb_height<EOL>master_width = images_per_row * thumb_width<EOL>num_channels = <NUM_LIT:3><EOL>master = np.zeros([master_height, master_width, num_channels])<EOL>for idx, image in enumerate(thumbnails.eval()):<EOL><INDENT>left_idx = idx % images_per_row<EOL>top_idx = int(math.floor(idx / images_per_row))<EOL>left_start = left_idx * thumb_width<EOL>left_end = left_start + thumb_width<EOL>top_start = top_idx * thumb_height<EOL>top_end = top_start + thumb_height<EOL>master[top_start:top_end, left_start:left_end, :] = image<EOL><DEDENT>return tf.image.encode_png(master)<EOL><DEDENT>image_feature_name = '<STR_LIT>'<EOL>sprite_thumbnail_dim_px = <NUM_LIT:32><EOL>with tf.compat.v1.Session():<EOL><INDENT>keys_to_features = {<EOL>image_feature_name:<EOL>tf.FixedLenFeature((), tf.string, default_value='<STR_LIT>'),<EOL>}<EOL>parsed = tf.parse_example(examples, keys_to_features)<EOL>images = tf.zeros([<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>], tf.float32)<EOL>i = tf.constant(<NUM_LIT:0>)<EOL>thumbnail_dims = (sprite_thumbnail_dim_px,<EOL>sprite_thumbnail_dim_px)<EOL>num_examples = tf.constant(len(examples))<EOL>encoded_images = parsed[image_feature_name]<EOL>def loop_body(i, encoded_images, images):<EOL><INDENT>encoded_image = encoded_images[i]<EOL>image = tf.image.decode_jpeg(encoded_image, channels=<NUM_LIT:3>)<EOL>resized_image = tf.image.resize(image, thumbnail_dims)<EOL>expanded_image = tf.expand_dims(resized_image, <NUM_LIT:0>)<EOL>images = tf.cond(<EOL>tf.equal(i, <NUM_LIT:0>), lambda: expanded_image,<EOL>lambda: tf.concat([images, expanded_image], <NUM_LIT:0>))<EOL>return i + <NUM_LIT:1>, encoded_images, images<EOL><DEDENT>loop_out = tf.while_loop(<EOL>lambda i, encoded_images, images: tf.less(i, num_examples),<EOL>loop_body, [i, encoded_images, images],<EOL>shape_invariants=[<EOL>i.get_shape(),<EOL>encoded_images.get_shape(),<EOL>tf.TensorShape(None)<EOL>])<EOL>sprite = generate_image_from_thubnails(loop_out[<NUM_LIT:2>], thumbnail_dims)<EOL>return sprite.eval()<EOL><DEDENT>
Returns an encoded sprite image for use in Facets Dive. Args: examples: A list of serialized example protos to get images for. Returns: An encoded PNG.
f8029:m15
def run_inference(examples, serving_bundle):
batch_size = <NUM_LIT:64><EOL>if serving_bundle.estimator and serving_bundle.feature_spec:<EOL><INDENT>preds = serving_bundle.estimator.predict(<EOL>lambda: tf.data.Dataset.from_tensor_slices(<EOL>tf.parse_example([ex.SerializeToString() for ex in examples],<EOL>serving_bundle.feature_spec)).batch(batch_size))<EOL>if serving_bundle.use_predict:<EOL><INDENT>preds_key = serving_bundle.predict_output_tensor<EOL><DEDENT>elif serving_bundle.model_type == '<STR_LIT>':<EOL><INDENT>preds_key = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>preds_key = '<STR_LIT>'<EOL><DEDENT>values = []<EOL>for pred in preds:<EOL><INDENT>values.append(pred[preds_key])<EOL><DEDENT>return common_utils.convert_prediction_values(values, serving_bundle)<EOL><DEDENT>elif serving_bundle.custom_predict_fn:<EOL><INDENT>values = serving_bundle.custom_predict_fn(examples)<EOL>return common_utils.convert_prediction_values(values, serving_bundle)<EOL><DEDENT>else:<EOL><INDENT>return platform_utils.call_servo(examples, serving_bundle)<EOL><DEDENT>
Run inference on examples given model information Args: examples: A list of examples that matches the model spec. serving_bundle: A `ServingBundle` object that contains the information to make the inference request. Returns: A ClassificationResponse or RegressionResponse proto.
f8029:m16
def __init__(self, x_min, x_max, examples, num_mutants,<EOL>feature_index_pattern):
def to_float_or_none(x):<EOL><INDENT>try:<EOL><INDENT>return float(x)<EOL><DEDENT>except (ValueError, TypeError):<EOL><INDENT>return None<EOL><DEDENT><DEDENT>def to_int(x):<EOL><INDENT>try:<EOL><INDENT>return int(x)<EOL><DEDENT>except (ValueError, TypeError) as e:<EOL><INDENT>raise common_utils.InvalidUserInputError(e)<EOL><DEDENT><DEDENT>def convert_pattern_to_indices(pattern):<EOL><INDENT>"""<STR_LIT>"""<EOL>pieces = [token.strip() for token in pattern.split('<STR_LIT:U+002C>')]<EOL>indices = []<EOL>for piece in pieces:<EOL><INDENT>if '<STR_LIT:->' in piece:<EOL><INDENT>lower, upper = [int(x.strip()) for x in piece.split('<STR_LIT:->', <NUM_LIT:1>)]<EOL>indices.extend(range(lower, upper + <NUM_LIT:1>))<EOL><DEDENT>else:<EOL><INDENT>indices.append(int(piece.strip()))<EOL><DEDENT><DEDENT>return sorted(indices)<EOL><DEDENT>self.x_min = to_float_or_none(x_min)<EOL>self.x_max = to_float_or_none(x_max)<EOL>self.examples = examples<EOL>self.num_mutants = to_int(num_mutants)<EOL>self.feature_indices = []<EOL>if feature_index_pattern:<EOL><INDENT>try:<EOL><INDENT>self.feature_indices = convert_pattern_to_indices(<EOL>feature_index_pattern)<EOL><DEDENT>except ValueError as e:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>
Inits VizParams may raise InvalidUserInputError for bad user inputs.
f8029:c0:m0
def __init__(self, feature_name, original_value, feature_type):
self.feature_name = feature_name<EOL>self.original_value = original_value<EOL>self.feature_type = feature_type<EOL>self.length = sum(<NUM_LIT:1> for _ in original_value)<EOL>
Inits OriginalFeatureList.
f8029:c1:m0
def __init__(self, original_feature, index, mutant_value):
if not isinstance(original_feature, OriginalFeatureList):<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(type(original_feature)))<EOL><DEDENT>self.original_feature = original_feature<EOL>if index is not None and not isinstance(index, int):<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'.format(<EOL>type(index)))<EOL><DEDENT>self.index = index<EOL>self.mutant_value = mutant_value<EOL>
Inits MutantFeatureValue.
f8029:c2:m0
def __init__(self, inference_address, model_name, model_type, model_version,<EOL>signature, use_predict, predict_input_tensor,<EOL>predict_output_tensor, estimator=None, feature_spec=None,<EOL>custom_predict_fn=None):
if not isinstance(inference_address, string_types):<EOL><INDENT>raise ValueError('<STR_LIT>'.format(<EOL>type(inference_address)))<EOL><DEDENT>self.inference_address = inference_address.replace('<STR_LIT>', '<STR_LIT>').replace(<EOL>'<STR_LIT>', '<STR_LIT>')<EOL>if not isinstance(model_name, string_types):<EOL><INDENT>raise ValueError('<STR_LIT>'.format(<EOL>type(model_name)))<EOL><DEDENT>self.model_name = model_name<EOL>if model_type not in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(model_type))<EOL><DEDENT>self.model_type = model_type<EOL>self.model_version = int(model_version) if model_version else None<EOL>self.signature = signature if signature else None<EOL>self.use_predict = use_predict<EOL>self.predict_input_tensor = predict_input_tensor<EOL>self.predict_output_tensor = predict_output_tensor<EOL>self.estimator = estimator<EOL>self.feature_spec = feature_spec<EOL>self.custom_predict_fn = custom_predict_fn<EOL>
Inits ServingBundle.
f8029:c3:m0
def make_fake_example(single_int_val=<NUM_LIT:0>):
example = tf.train.Example()<EOL>example.features.feature['<STR_LIT>'].float_list.value.extend(<EOL>[<NUM_LIT:1.0>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>])<EOL>example.features.feature['<STR_LIT>'].int64_list.value.extend([<NUM_LIT:10>, <NUM_LIT:20>])<EOL>example.features.feature['<STR_LIT>'].int64_list.value.extend(<EOL>[single_int_val])<EOL>example.features.feature['<STR_LIT>'].float_list.value.extend([<NUM_LIT>])<EOL>example.features.feature['<STR_LIT>'].bytes_list.value.extend(<EOL>[b'<STR_LIT>', b'<STR_LIT>', b'<STR_LIT>'])<EOL>return example<EOL>
Make a fake example with numeric and string features.
f8030:m0
def write_out_examples(examples, path):
writer = tf.io.TFRecordWriter(path)<EOL>for example in examples:<EOL><INDENT>writer.write(example.SerializeToString())<EOL><DEDENT>
Writes protos to the CNS path.
f8030:m1
def value_from_example(example, feature_name):
feature = example.features.feature[feature_name]<EOL>feature_type = feature.WhichOneof('<STR_LIT>')<EOL>return getattr(feature, feature_type).value[:]<EOL>
Returns the feature as a Python list.
f8030:m2
def __init__(self, context):
self._logdir = context.logdir<EOL>self._has_auth_group = (context.flags and<EOL>'<STR_LIT>' in context.flags and<EOL>context.flags.authorized_groups is not '<STR_LIT>')<EOL>
Constructs an interactive inference plugin for TensorBoard. Args: context: A base_plugin.TBContext instance.
f8031:c0:m0
def get_plugin_apps(self):
return {<EOL>'<STR_LIT>': self._infer,<EOL>'<STR_LIT>': self._update_example,<EOL>'<STR_LIT>': self._examples_from_path_handler,<EOL>'<STR_LIT>': self._serve_sprite,<EOL>'<STR_LIT>': self._duplicate_example,<EOL>'<STR_LIT>': self._delete_example,<EOL>'<STR_LIT>': self._infer_mutants_handler,<EOL>'<STR_LIT>': self._eligible_features_from_example_handler,<EOL>}<EOL>
Obtains a mapping between routes and handlers. Stores the logdir. Returns: A mapping between routes and handlers (functions that respond to requests).
f8031:c0:m1
def is_active(self):
<EOL>return False<EOL>
Determines whether this plugin is active. Returns: A boolean. Whether this plugin is active.
f8031:c0:m2
@wrappers.Request.application<EOL><INDENT>def _examples_from_path_handler(self, request):<DEDENT>
examples_count = int(request.args.get('<STR_LIT>'))<EOL>examples_path = request.args.get('<STR_LIT>')<EOL>sampling_odds = float(request.args.get('<STR_LIT>'))<EOL>self.example_class = (tf.train.SequenceExample<EOL>if request.args.get('<STR_LIT>') == '<STR_LIT:true>'<EOL>else tf.train.Example)<EOL>try:<EOL><INDENT>platform_utils.throw_if_file_access_not_allowed(examples_path,<EOL>self._logdir,<EOL>self._has_auth_group)<EOL>example_strings = platform_utils.example_protos_from_path(<EOL>examples_path, examples_count, parse_examples=False,<EOL>sampling_odds=sampling_odds, example_class=self.example_class)<EOL>self.examples = [<EOL>self.example_class.FromString(ex) for ex in example_strings]<EOL>self.generate_sprite(example_strings)<EOL>json_examples = [<EOL>json_format.MessageToJson(example) for example in self.examples<EOL>]<EOL>self.updated_example_indices = set(range(len(json_examples)))<EOL>return http_util.Respond(<EOL>request,<EOL>{'<STR_LIT>': json_examples,<EOL>'<STR_LIT>': True if self.sprite else False}, '<STR_LIT:application/json>')<EOL><DEDENT>except common_utils.InvalidUserInputError as e:<EOL><INDENT>return http_util.Respond(request, {'<STR_LIT:error>': e.message},<EOL>'<STR_LIT:application/json>', code=<NUM_LIT>)<EOL><DEDENT>
Returns JSON of the specified examples. Args: request: A request that should contain 'examples_path' and 'max_examples'. Returns: JSON of up to max_examlpes of the examples in the path.
f8031:c0:m4
@wrappers.Request.application<EOL><INDENT>def _update_example(self, request):<DEDENT>
if request.method != '<STR_LIT:POST>':<EOL><INDENT>return http_util.Respond(request, {'<STR_LIT:error>': '<STR_LIT>'},<EOL>'<STR_LIT:application/json>', code=<NUM_LIT>)<EOL><DEDENT>example_json = request.form['<STR_LIT>']<EOL>index = int(request.form['<STR_LIT:index>'])<EOL>if index >= len(self.examples):<EOL><INDENT>return http_util.Respond(request, {'<STR_LIT:error>': '<STR_LIT>'},<EOL>'<STR_LIT:application/json>', code=<NUM_LIT>)<EOL><DEDENT>new_example = self.example_class()<EOL>json_format.Parse(example_json, new_example)<EOL>self.examples[index] = new_example<EOL>self.updated_example_indices.add(index)<EOL>self.generate_sprite([ex.SerializeToString() for ex in self.examples])<EOL>return http_util.Respond(request, {}, '<STR_LIT:application/json>')<EOL>
Updates the specified example. Args: request: A request that should contain 'index' and 'example'. Returns: An empty response.
f8031:c0:m6
@wrappers.Request.application<EOL><INDENT>def _duplicate_example(self, request):<DEDENT>
index = int(request.args.get('<STR_LIT:index>'))<EOL>if index >= len(self.examples):<EOL><INDENT>return http_util.Respond(request, {'<STR_LIT:error>': '<STR_LIT>'},<EOL>'<STR_LIT:application/json>', code=<NUM_LIT>)<EOL><DEDENT>new_example = self.example_class()<EOL>new_example.CopyFrom(self.examples[index])<EOL>self.examples.append(new_example)<EOL>self.updated_example_indices.add(len(self.examples) - <NUM_LIT:1>)<EOL>self.generate_sprite([ex.SerializeToString() for ex in self.examples])<EOL>return http_util.Respond(request, {}, '<STR_LIT:application/json>')<EOL>
Duplicates the specified example. Args: request: A request that should contain 'index'. Returns: An empty response.
f8031:c0:m7
@wrappers.Request.application<EOL><INDENT>def _delete_example(self, request):<DEDENT>
index = int(request.args.get('<STR_LIT:index>'))<EOL>if index >= len(self.examples):<EOL><INDENT>return http_util.Respond(request, {'<STR_LIT:error>': '<STR_LIT>'},<EOL>'<STR_LIT:application/json>', code=<NUM_LIT>)<EOL><DEDENT>del self.examples[index]<EOL>self.updated_example_indices = set([<EOL>i if i < index else i - <NUM_LIT:1> for i in self.updated_example_indices])<EOL>self.generate_sprite([ex.SerializeToString() for ex in self.examples])<EOL>return http_util.Respond(request, {}, '<STR_LIT:application/json>')<EOL>
Deletes the specified example. Args: request: A request that should contain 'index'. Returns: An empty response.
f8031:c0:m8
def _parse_request_arguments(self, request):
inference_addresses = request.args.get('<STR_LIT>').split('<STR_LIT:U+002C>')<EOL>model_names = request.args.get('<STR_LIT>').split('<STR_LIT:U+002C>')<EOL>model_versions = request.args.get('<STR_LIT>').split('<STR_LIT:U+002C>')<EOL>model_signatures = request.args.get('<STR_LIT>').split('<STR_LIT:U+002C>')<EOL>if len(model_names) != len(inference_addresses):<EOL><INDENT>raise common_utils.InvalidUserInputError('<STR_LIT>' +<EOL>'<STR_LIT>')<EOL><DEDENT>return inference_addresses, model_names, model_versions, model_signatures<EOL>
Parses comma separated request arguments Args: request: A request that should contain 'inference_address', 'model_name', 'model_version', 'model_signature'. Returns: A tuple of lists for model parameters
f8031:c0:m9
@wrappers.Request.application<EOL><INDENT>def _infer(self, request):<DEDENT>
label_vocab = inference_utils.get_label_vocab(<EOL>request.args.get('<STR_LIT>'))<EOL>try:<EOL><INDENT>if request.method != '<STR_LIT:GET>':<EOL><INDENT>logger.error('<STR_LIT>', request.method)<EOL>return http_util.Respond(request, {'<STR_LIT:error>': '<STR_LIT>'},<EOL>'<STR_LIT:application/json>', code=<NUM_LIT>)<EOL><DEDENT>(inference_addresses, model_names, model_versions,<EOL>model_signatures) = self._parse_request_arguments(request)<EOL>indices_to_infer = sorted(self.updated_example_indices)<EOL>examples_to_infer = [self.examples[index] for index in indices_to_infer]<EOL>infer_objs = []<EOL>for model_num in xrange(len(inference_addresses)):<EOL><INDENT>serving_bundle = inference_utils.ServingBundle(<EOL>inference_addresses[model_num],<EOL>model_names[model_num],<EOL>request.args.get('<STR_LIT>'),<EOL>model_versions[model_num],<EOL>model_signatures[model_num],<EOL>request.args.get('<STR_LIT>') == '<STR_LIT:true>',<EOL>request.args.get('<STR_LIT>'),<EOL>request.args.get('<STR_LIT>'))<EOL>infer_objs.append(inference_utils.run_inference_for_inference_results(<EOL>examples_to_infer, serving_bundle))<EOL><DEDENT>resp = {'<STR_LIT>': indices_to_infer, '<STR_LIT>': infer_objs}<EOL>self.updated_example_indices = set()<EOL>return http_util.Respond(request, {'<STR_LIT>': json.dumps(resp),<EOL>'<STR_LIT>': json.dumps(label_vocab)},<EOL>'<STR_LIT:application/json>')<EOL><DEDENT>except common_utils.InvalidUserInputError as e:<EOL><INDENT>return http_util.Respond(request, {'<STR_LIT:error>': e.message},<EOL>'<STR_LIT:application/json>', code=<NUM_LIT>)<EOL><DEDENT>except AbortionError as e:<EOL><INDENT>return http_util.Respond(request, {'<STR_LIT:error>': e.details},<EOL>'<STR_LIT:application/json>', code=<NUM_LIT>)<EOL><DEDENT>
Returns JSON for the `vz-line-chart`s for a feature. Args: request: A request that should contain 'inference_address', 'model_name', 'model_type, 'model_version', 'model_signature' and 'label_vocab_path'. Returns: A list of JSON objects, one for each chart.
f8031:c0:m10
@wrappers.Request.application<EOL><INDENT>def _eligible_features_from_example_handler(self, request):<DEDENT>
features_list = inference_utils.get_eligible_features(<EOL>self.examples[<NUM_LIT:0>: NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS)<EOL>return http_util.Respond(request, features_list, '<STR_LIT:application/json>')<EOL>
Returns a list of JSON objects for each feature in the example. Args: request: A request for features. Returns: A list with a JSON object for each feature. Numeric features are represented as {name: observedMin: observedMax:}. Categorical features are repesented as {name: samples:[]}.
f8031:c0:m11
@wrappers.Request.application<EOL><INDENT>def _infer_mutants_handler(self, request):<DEDENT>
try:<EOL><INDENT>if request.method != '<STR_LIT:GET>':<EOL><INDENT>logger.error('<STR_LIT>', request.method)<EOL>return http_util.Respond(request, {'<STR_LIT:error>': '<STR_LIT>'},<EOL>'<STR_LIT:application/json>', code=<NUM_LIT>)<EOL><DEDENT>example_index = int(request.args.get('<STR_LIT>', '<STR_LIT:0>'))<EOL>feature_name = request.args.get('<STR_LIT>')<EOL>examples = (self.examples if example_index == -<NUM_LIT:1><EOL>else [self.examples[example_index]])<EOL>(inference_addresses, model_names, model_versions,<EOL>model_signatures) = self._parse_request_arguments(request)<EOL>serving_bundles = []<EOL>for model_num in xrange(len(inference_addresses)):<EOL><INDENT>serving_bundles.append(inference_utils.ServingBundle(<EOL>inference_addresses[model_num],<EOL>model_names[model_num],<EOL>request.args.get('<STR_LIT>'),<EOL>model_versions[model_num],<EOL>model_signatures[model_num],<EOL>request.args.get('<STR_LIT>') == '<STR_LIT:true>',<EOL>request.args.get('<STR_LIT>'),<EOL>request.args.get('<STR_LIT>')))<EOL><DEDENT>viz_params = inference_utils.VizParams(<EOL>request.args.get('<STR_LIT>'), request.args.get('<STR_LIT>'),<EOL>self.examples[<NUM_LIT:0>:NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS,<EOL>request.args.get('<STR_LIT>'))<EOL>json_mapping = inference_utils.mutant_charts_for_feature(<EOL>examples, feature_name, serving_bundles, viz_params)<EOL>return http_util.Respond(request, json_mapping, '<STR_LIT:application/json>')<EOL><DEDENT>except common_utils.InvalidUserInputError as e:<EOL><INDENT>return http_util.Respond(request, {'<STR_LIT:error>': e.message},<EOL>'<STR_LIT:application/json>', code=<NUM_LIT>)<EOL><DEDENT>
Returns JSON for the `vz-line-chart`s for a feature. Args: request: A request that should contain 'feature_name', 'example_index', 'inference_address', 'model_name', 'model_type', 'model_version', and 'model_signature'. Returns: A list of JSON objects, one for each chart.
f8031:c0:m12
def _DeserializeResponse(self, byte_content):
return json.loads(byte_content.decode('<STR_LIT:utf-8>'))<EOL>
Deserializes byte content that is a JSON encoding. Args: byte_content: The byte content of a JSON response. Returns: The deserialized python object decoded from JSON.
f8032:c0:m7
def image_data(verbose=False):
<EOL>global _IMAGE_DATA <EOL>if _IMAGE_DATA is None:<EOL><INDENT>if verbose:<EOL><INDENT>logger.info("<STR_LIT>")<EOL><DEDENT>with contextlib.closing(urllib.request.urlopen(IMAGE_URL)) as infile:<EOL><INDENT>_IMAGE_DATA = infile.read()<EOL><DEDENT><DEDENT>return _IMAGE_DATA<EOL>
Get the raw encoded image data, downloading it if necessary.
f8033:m0
def convolve(image, pixel_filter, channels=<NUM_LIT:3>, name=None):
with tf.name_scope(name, '<STR_LIT>'):<EOL><INDENT>tf.compat.v1.assert_type(image, tf.float32)<EOL>channel_filter = tf.eye(channels)<EOL>filter_ = (tf.expand_dims(tf.expand_dims(pixel_filter, -<NUM_LIT:1>), -<NUM_LIT:1>) *<EOL>tf.expand_dims(tf.expand_dims(channel_filter, <NUM_LIT:0>), <NUM_LIT:0>))<EOL>result_batch = tf.nn.conv2d(tf.stack([image]), <EOL>filter=filter_,<EOL>strides=[<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>],<EOL>padding='<STR_LIT>')<EOL>return result_batch[<NUM_LIT:0>]<EOL><DEDENT>
Perform a 2D pixel convolution on the given image. Arguments: image: A 3D `float32` `Tensor` of shape `[height, width, channels]`, where `channels` is the third argument to this function and the first two dimensions are arbitrary. pixel_filter: A 2D `Tensor`, representing pixel weightings for the kernel. This will be used to create a 4D kernel---the extra two dimensions are for channels (see `tf.nn.conv2d` documentation), and the kernel will be constructed so that the channels are independent: each channel only observes the data from neighboring pixels of the same channel. channels: An integer representing the number of channels in the image (e.g., 3 for RGB). Returns: A 3D `float32` `Tensor` of the same shape as the input.
f8033:m1
def get_image(verbose=False):
base_data = tf.constant(image_data(verbose=verbose))<EOL>base_image = tf.image.decode_image(base_data, channels=<NUM_LIT:3>)<EOL>base_image.set_shape((IMAGE_HEIGHT, IMAGE_WIDTH, <NUM_LIT:3>))<EOL>parsed_image = tf.Variable(base_image, name='<STR_LIT:image>', dtype=tf.uint8)<EOL>return parsed_image<EOL>
Get the image as a TensorFlow variable. Returns: A `tf.Variable`, which must be initialized prior to use: invoke `sess.run(result.initializer)`.
f8033:m2
def run_box_to_gaussian(logdir, verbose=False):
if verbose:<EOL><INDENT>logger.info('<STR_LIT>')<EOL><DEDENT>tf.compat.v1.reset_default_graph()<EOL>tf.compat.v1.set_random_seed(<NUM_LIT:0>)<EOL>image = get_image(verbose=verbose)<EOL>blur_radius = tf.compat.v1.placeholder(shape=(), dtype=tf.int32)<EOL>with tf.name_scope('<STR_LIT>'):<EOL><INDENT>blur_side_length = blur_radius * <NUM_LIT:2> + <NUM_LIT:1><EOL>pixel_filter = tf.ones((blur_side_length, blur_side_length))<EOL>pixel_filter = (pixel_filter<EOL>/ tf.cast(tf.size(input=pixel_filter), tf.float32)) <EOL><DEDENT>iterations = <NUM_LIT:4><EOL>images = [tf.cast(image, tf.float32) / <NUM_LIT>]<EOL>for _ in xrange(iterations):<EOL><INDENT>images.append(convolve(images[-<NUM_LIT:1>], pixel_filter))<EOL><DEDENT>with tf.name_scope('<STR_LIT>'):<EOL><INDENT>images = tf.stack(<EOL>[tf.cast(<NUM_LIT:255> * tf.clip_by_value(image_, <NUM_LIT:0.0>, <NUM_LIT:1.0>), tf.uint8)<EOL>for image_ in images])<EOL><DEDENT>summ = image_summary.op(<EOL>'<STR_LIT>', images, max_outputs=iterations,<EOL>display_name='<STR_LIT>',<EOL>description=('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT:%s>'<EOL>% ('<STR_LIT>',<EOL>IMAGE_CREDIT)))<EOL>with tf.compat.v1.Session() as sess:<EOL><INDENT>sess.run(image.initializer)<EOL>writer = tf.summary.FileWriter(os.path.join(logdir, '<STR_LIT>'))<EOL>writer.add_graph(sess.graph)<EOL>for step in xrange(<NUM_LIT:8>):<EOL><INDENT>if verbose:<EOL><INDENT>logger.info('<STR_LIT>' % step)<EOL>feed_dict = {blur_radius: step}<EOL><DEDENT>run_options = tf.compat.v1.RunOptions(trace_level=tf.compat.v1.RunOptions.FULL_TRACE)<EOL>run_metadata = config_pb2.RunMetadata()<EOL>s = sess.run(summ, feed_dict=feed_dict,<EOL>options=run_options, run_metadata=run_metadata)<EOL>writer.add_summary(s, global_step=step)<EOL>writer.add_run_metadata(run_metadata, '<STR_LIT>' % step)<EOL><DEDENT>writer.close()<EOL><DEDENT>
Run a box-blur-to-Gaussian-blur demonstration. See the summary description for more details. Arguments: logdir: Directory into which to write event logs. verbose: Boolean; whether to log any output.
f8033:m3
def run_sobel(logdir, verbose=False):
if verbose:<EOL><INDENT>logger.info('<STR_LIT>')<EOL><DEDENT>tf.compat.v1.reset_default_graph()<EOL>tf.compat.v1.set_random_seed(<NUM_LIT:0>)<EOL>image = get_image(verbose=verbose)<EOL>kernel_radius = tf.compat.v1.placeholder(shape=(), dtype=tf.int32)<EOL>with tf.name_scope('<STR_LIT>'):<EOL><INDENT>kernel_side_length = kernel_radius * <NUM_LIT:2> + <NUM_LIT:1><EOL>weighting_kernel = (<EOL><NUM_LIT:1.0> - tf.abs(tf.linspace(-<NUM_LIT:1.0>, <NUM_LIT:1.0>, num=kernel_side_length)))<EOL>differentiation_kernel = tf.linspace(-<NUM_LIT:1.0>, <NUM_LIT:1.0>, num=kernel_side_length)<EOL>horizontal_kernel = tf.matmul(tf.expand_dims(weighting_kernel, <NUM_LIT:1>),<EOL>tf.expand_dims(differentiation_kernel, <NUM_LIT:0>))<EOL><DEDENT>with tf.name_scope('<STR_LIT>'):<EOL><INDENT>vertical_kernel = tf.transpose(a=horizontal_kernel)<EOL><DEDENT>float_image = tf.cast(image, tf.float32)<EOL>dx = convolve(float_image, horizontal_kernel, name='<STR_LIT>')<EOL>dy = convolve(float_image, vertical_kernel, name='<STR_LIT>')<EOL>gradient_magnitude = tf.norm(tensor=[dx, dy], axis=<NUM_LIT:0>, name='<STR_LIT>')<EOL>with tf.name_scope('<STR_LIT>'):<EOL><INDENT>normalized_gradient = gradient_magnitude / tf.reduce_max(input_tensor=gradient_magnitude)<EOL><DEDENT>with tf.name_scope('<STR_LIT>'):<EOL><INDENT>output_image = tf.cast(<NUM_LIT:255> * normalized_gradient, tf.uint8)<EOL><DEDENT>summ = image_summary.op(<EOL>'<STR_LIT>', tf.stack([output_image]),<EOL>display_name='<STR_LIT>',<EOL>description=(u'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>u'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>"<STR_LIT:%s>"<EOL>% ('<STR_LIT>',<EOL>IMAGE_CREDIT)))<EOL>with tf.compat.v1.Session() as sess:<EOL><INDENT>sess.run(image.initializer)<EOL>writer = tf.summary.FileWriter(os.path.join(logdir, '<STR_LIT>'))<EOL>writer.add_graph(sess.graph)<EOL>for step in xrange(<NUM_LIT:8>):<EOL><INDENT>if verbose:<EOL><INDENT>logger.info("<STR_LIT>" % step)<EOL>feed_dict = {kernel_radius: step}<EOL><DEDENT>run_options = tf.compat.v1.RunOptions(trace_level=tf.compat.v1.RunOptions.FULL_TRACE)<EOL>run_metadata = config_pb2.RunMetadata()<EOL>s = sess.run(summ, feed_dict=feed_dict,<EOL>options=run_options, run_metadata=run_metadata)<EOL>writer.add_summary(s, global_step=step)<EOL>writer.add_run_metadata(run_metadata, '<STR_LIT>' % step)<EOL><DEDENT>writer.close()<EOL><DEDENT>
Run a Sobel edge detection demonstration. See the summary description for more details. Arguments: logdir: Directory into which to write event logs. verbose: Boolean; whether to log any output.
f8033:m4
def run_all(logdir, verbose=False):
run_box_to_gaussian(logdir, verbose=verbose)<EOL>run_sobel(logdir, verbose=verbose)<EOL>
Run simulations on a reasonable set of parameters. Arguments: logdir: the directory into which to store all the runs' data verbose: if true, print out each run's name as it begins
f8033:m5
def _DeserializeResponse(self, byte_content):
return json.loads(byte_content.decode("<STR_LIT:utf-8>"))<EOL>
Deserializes byte content that is a JSON encoding. Args: byte_content: The byte content of a response. Returns: The deserialized python object decoded from JSON.
f8034:c0:m2
def op(name,<EOL>images,<EOL>max_outputs=<NUM_LIT:3>,<EOL>display_name=None,<EOL>description=None,<EOL>collections=None):
<EOL>import tensorflow.compat.v1 as tf<EOL>if display_name is None:<EOL><INDENT>display_name = name<EOL><DEDENT>summary_metadata = metadata.create_summary_metadata(<EOL>display_name=display_name, description=description)<EOL>with tf.name_scope(name),tf.control_dependencies([tf.assert_rank(images, <NUM_LIT:4>),<EOL>tf.assert_type(images, tf.uint8),<EOL>tf.assert_non_negative(max_outputs)]):<EOL><INDENT>limited_images = images[:max_outputs]<EOL>encoded_images = tf.map_fn(tf.image.encode_png, limited_images,<EOL>dtype=tf.string,<EOL>name='<STR_LIT>')<EOL>image_shape = tf.shape(input=images)<EOL>dimensions = tf.stack([tf.as_string(image_shape[<NUM_LIT:2>], name='<STR_LIT:width>'),<EOL>tf.as_string(image_shape[<NUM_LIT:1>], name='<STR_LIT>')],<EOL>name='<STR_LIT>')<EOL>tensor = tf.concat([dimensions, encoded_images], axis=<NUM_LIT:0>)<EOL>return tf.summary.tensor_summary(name='<STR_LIT>',<EOL>tensor=tensor,<EOL>collections=collections,<EOL>summary_metadata=summary_metadata)<EOL><DEDENT>
Create a legacy image summary op for use in a TensorFlow graph. Arguments: name: A unique name for the generated summary node. images: A `Tensor` representing pixel data with shape `[k, h, w, c]`, where `k` is the number of images, `h` and `w` are the height and width of the images, and `c` is the number of channels, which should be 1, 3, or 4. Any of the dimensions may be statically unknown (i.e., `None`). max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this many images will be emitted at each step. When more than `max_outputs` many images are provided, the first `max_outputs` many images will be used and the rest silently discarded. display_name: Optional name for this summary in TensorBoard, as a constant `str`. Defaults to `name`. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[Graph Keys.SUMMARIES]`. Returns: A TensorFlow summary op.
f8035:m0
def pb(name, images, max_outputs=<NUM_LIT:3>, display_name=None, description=None):
<EOL>import tensorflow.compat.v1 as tf<EOL>images = np.array(images).astype(np.uint8)<EOL>if images.ndim != <NUM_LIT:4>:<EOL><INDENT>raise ValueError('<STR_LIT>' % (images.shape, ))<EOL><DEDENT>limited_images = images[:max_outputs]<EOL>encoded_images = [encoder.encode_png(image) for image in limited_images]<EOL>(width, height) = (images.shape[<NUM_LIT:2>], images.shape[<NUM_LIT:1>])<EOL>content = [str(width), str(height)] + encoded_images<EOL>tensor = tf.make_tensor_proto(content, dtype=tf.string)<EOL>if display_name is None:<EOL><INDENT>display_name = name<EOL><DEDENT>summary_metadata = metadata.create_summary_metadata(<EOL>display_name=display_name, description=description)<EOL>tf_summary_metadata = tf.SummaryMetadata.FromString(<EOL>summary_metadata.SerializeToString())<EOL>summary = tf.Summary()<EOL>summary.value.add(tag='<STR_LIT>' % name,<EOL>metadata=tf_summary_metadata,<EOL>tensor=tensor)<EOL>return summary<EOL>
Create a legacy image summary protobuf. This behaves as if you were to create an `op` with the same arguments (wrapped with constant tensors where appropriate) and then execute that summary op in a TensorFlow session. Arguments: name: A unique name for the generated summary, including any desired name scopes. images: An `np.array` representing pixel data with shape `[k, h, w, c]`, where `k` is the number of images, `w` and `h` are the width and height of the images, and `c` is the number of channels, which should be 1, 3, or 4. max_outputs: Optional `int`. At most this many images will be emitted. If more than this many images are provided, the first `max_outputs` many images will be used and the rest silently discarded. display_name: Optional name for this summary in TensorBoard, as a `str`. Defaults to `name`. description: Optional long-form description for this summary, as a `str`. Markdown is supported. Defaults to empty. Returns: A `tf.Summary` protobuf object.
f8035:m1
def create_summary_metadata(display_name, description):
content = plugin_data_pb2.ImagePluginData(version=PROTO_VERSION)<EOL>metadata = summary_pb2.SummaryMetadata(<EOL>display_name=display_name,<EOL>summary_description=description,<EOL>plugin_data=summary_pb2.SummaryMetadata.PluginData(<EOL>plugin_name=PLUGIN_NAME,<EOL>content=content.SerializeToString()))<EOL>return metadata<EOL>
Create a `summary_pb2.SummaryMetadata` proto for image plugin data. Returns: A `summary_pb2.SummaryMetadata` protobuf object.
f8036:m0
def parse_plugin_metadata(content):
if not isinstance(content, bytes):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>result = plugin_data_pb2.ImagePluginData.FromString(content)<EOL>if result.version == <NUM_LIT:0>:<EOL><INDENT>return result<EOL><DEDENT>else:<EOL><INDENT>logger.warn(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>', result.version, PROTO_VERSION)<EOL>return result<EOL><DEDENT>
Parse summary metadata to a Python object. Arguments: content: The `content` field of a `SummaryMetadata` proto corresponding to the image plugin. Returns: An `ImagePluginData` protobuf object.
f8036:m1
def image(name,<EOL>data,<EOL>step=None,<EOL>max_outputs=<NUM_LIT:3>,<EOL>description=None):
summary_metadata = metadata.create_summary_metadata(<EOL>display_name=None, description=description)<EOL>summary_scope = (<EOL>getattr(tf.summary.experimental, '<STR_LIT>', None) or<EOL>tf.summary.summary_scope)<EOL>with summary_scope(<EOL>name, '<STR_LIT>', values=[data, max_outputs, step]) as (tag, _):<EOL><INDENT>tf.debugging.assert_rank(data, <NUM_LIT:4>)<EOL>tf.debugging.assert_non_negative(max_outputs)<EOL>images = tf.image.convert_image_dtype(data, tf.uint8, saturate=True)<EOL>limited_images = images[:max_outputs]<EOL>encoded_images = tf.map_fn(tf.image.encode_png, limited_images,<EOL>dtype=tf.string,<EOL>name='<STR_LIT>')<EOL>encoded_images = tf.cond(<EOL>tf.shape(input=encoded_images)[<NUM_LIT:0>] > <NUM_LIT:0>,<EOL>lambda: encoded_images, lambda: tf.constant([], tf.string))<EOL>image_shape = tf.shape(input=images)<EOL>dimensions = tf.stack([tf.as_string(image_shape[<NUM_LIT:2>], name='<STR_LIT:width>'),<EOL>tf.as_string(image_shape[<NUM_LIT:1>], name='<STR_LIT>')],<EOL>name='<STR_LIT>')<EOL>tensor = tf.concat([dimensions, encoded_images], axis=<NUM_LIT:0>)<EOL>return tf.summary.write(<EOL>tag=tag, tensor=tensor, step=step, metadata=summary_metadata)<EOL><DEDENT>
Write an image summary. Arguments: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A `Tensor` representing pixel data with shape `[k, h, w, c]`, where `k` is the number of images, `h` and `w` are the height and width of the images, and `c` is the number of channels, which should be 1, 2, 3, or 4 (grayscale, grayscale with alpha, RGB, RGBA). Any of the dimensions may be statically unknown (i.e., `None`). Floating point data will be clipped to the range [0,1). step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this many images will be emitted at each step. When more than `max_outputs` many images are provided, the first `max_outputs` many images will be used and the rest silently discarded. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. Returns: True on success, or false if no summary was emitted because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None.
f8038:m0
def __init__(self, context):
self._multiplexer = context.multiplexer<EOL>self._db_connection_provider = context.db_connection_provider<EOL>
Instantiates ImagesPlugin via TensorBoard core. Args: context: A base_plugin.TBContext instance.
f8039:c0:m0
def is_active(self):
if self._db_connection_provider:<EOL><INDENT>db = self._db_connection_provider()<EOL>cursor = db.execute(<EOL>'''<STR_LIT>''',<EOL>(metadata.PLUGIN_NAME,))<EOL>return bool(list(cursor))<EOL><DEDENT>if not self._multiplexer:<EOL><INDENT>return False<EOL><DEDENT>return bool(self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME))<EOL>
The images plugin is active iff any run has at least one relevant tag.
f8039:c0:m2
@wrappers.Request.application<EOL><INDENT>def _serve_image_metadata(self, request):<DEDENT>
tag = request.args.get('<STR_LIT>')<EOL>run = request.args.get('<STR_LIT>')<EOL>sample = int(request.args.get('<STR_LIT>', <NUM_LIT:0>))<EOL>response = self._image_response_for_run(run, tag, sample)<EOL>return http_util.Respond(request, response, '<STR_LIT:application/json>')<EOL>
Given a tag and list of runs, serve a list of metadata for images. Note that the images themselves are not sent; instead, we respond with URLs to the images. The frontend should treat these URLs as opaque and should not try to parse information about them or generate them itself, as the format may change. Args: request: A werkzeug.wrappers.Request object. Returns: A werkzeug.Response application.
f8039:c0:m4
def _image_response_for_run(self, run, tag, sample):
if self._db_connection_provider:<EOL><INDENT>db = self._db_connection_provider()<EOL>cursor = db.execute(<EOL>'''<STR_LIT>''',<EOL>{'<STR_LIT>': run, '<STR_LIT>': tag, '<STR_LIT>': tf.string.as_datatype_enum})<EOL>return [{<EOL>'<STR_LIT>': computed_time,<EOL>'<STR_LIT>': step,<EOL>'<STR_LIT:width>': width,<EOL>'<STR_LIT>': height,<EOL>'<STR_LIT>': self._query_for_individual_image(run, tag, sample, index)<EOL>} for index, (computed_time, step, width, height) in enumerate(cursor)]<EOL><DEDENT>response = []<EOL>index = <NUM_LIT:0><EOL>tensor_events = self._multiplexer.Tensors(run, tag)<EOL>filtered_events = self._filter_by_sample(tensor_events, sample)<EOL>for (index, tensor_event) in enumerate(filtered_events):<EOL><INDENT>(width, height) = tensor_event.tensor_proto.string_val[:<NUM_LIT:2>]<EOL>response.append({<EOL>'<STR_LIT>': tensor_event.wall_time,<EOL>'<STR_LIT>': tensor_event.step,<EOL>'<STR_LIT:width>': int(width),<EOL>'<STR_LIT>': int(height),<EOL>'<STR_LIT>': self._query_for_individual_image(run, tag, sample, index)<EOL>})<EOL><DEDENT>return response<EOL>
Builds a JSON-serializable object with information about images. Args: run: The name of the run. tag: The name of the tag the images all belong to. sample: The zero-indexed sample of the image for which to retrieve information. For instance, setting `sample` to `2` will fetch information about only the third image of each batch. Steps with fewer than three images will be omitted from the results. Returns: A list of dictionaries containing the wall time, step, URL, width, and height for each image.
f8039:c0:m5
def _query_for_individual_image(self, run, tag, sample, index):
query_string = urllib.parse.urlencode({<EOL>'<STR_LIT>': run,<EOL>'<STR_LIT>': tag,<EOL>'<STR_LIT>': sample,<EOL>'<STR_LIT:index>': index,<EOL>})<EOL>return query_string<EOL>
Builds a URL for accessing the specified image. This should be kept in sync with _serve_image_metadata. Note that the URL is *not* guaranteed to always return the same image, since images may be unloaded from the reservoir as new images come in. Args: run: The name of the run. tag: The tag. sample: The relevant sample index, zero-indexed. See documentation on `_image_response_for_run` for more details. index: The index of the image. Negative values are OK. Returns: A string representation of a URL that will load the index-th sampled image in the given run with the given tag.
f8039:c0:m7
def _get_individual_image(self, run, tag, index, sample):
if self._db_connection_provider:<EOL><INDENT>db = self._db_connection_provider()<EOL>cursor = db.execute(<EOL>'''<STR_LIT>''',<EOL>{'<STR_LIT>': run,<EOL>'<STR_LIT>': tag,<EOL>'<STR_LIT>': sample,<EOL>'<STR_LIT:index>': index,<EOL>'<STR_LIT>': tf.string.as_datatype_enum})<EOL>(data,) = cursor.fetchone()<EOL>return six.binary_type(data)<EOL><DEDENT>events = self._filter_by_sample(self._multiplexer.Tensors(run, tag), sample)<EOL>images = events[index].tensor_proto.string_val[<NUM_LIT:2>:] <EOL>return images[sample]<EOL>
Returns the actual image bytes for a given image. Args: run: The name of the run the image belongs to. tag: The name of the tag the images belongs to. index: The index of the image in the current reservoir. sample: The zero-indexed sample of the image to retrieve (for example, setting `sample` to `2` will fetch the third image sample at `step`). Returns: A bytestring of the raw image bytes.
f8039:c0:m8
@wrappers.Request.application<EOL><INDENT>def _serve_individual_image(self, request):<DEDENT>
run = request.args.get('<STR_LIT>')<EOL>tag = request.args.get('<STR_LIT>')<EOL>index = int(request.args.get('<STR_LIT:index>'))<EOL>sample = int(request.args.get('<STR_LIT>', <NUM_LIT:0>))<EOL>data = self._get_individual_image(run, tag, index, sample)<EOL>image_type = imghdr.what(None, data)<EOL>content_type = _IMGHDR_TO_MIMETYPE.get(image_type, _DEFAULT_IMAGE_MIMETYPE)<EOL>return http_util.Respond(request, data, content_type)<EOL>
Serves an individual image.
f8039:c0:m9
def _buckets(data, bucket_count=None):
<EOL>import tensorflow.compat.v1 as tf<EOL>if bucket_count is None:<EOL><INDENT>bucket_count = summary_v2.DEFAULT_BUCKET_COUNT<EOL><DEDENT>with tf.name_scope('<STR_LIT>', values=[data, bucket_count]),tf.control_dependencies([tf.assert_scalar(bucket_count),<EOL>tf.assert_type(bucket_count, tf.int32)]):<EOL><INDENT>data = tf.reshape(data, shape=[-<NUM_LIT:1>]) <EOL>data = tf.cast(data, tf.float64)<EOL>is_empty = tf.equal(tf.size(input=data), <NUM_LIT:0>)<EOL>def when_empty():<EOL><INDENT>return tf.constant([], shape=(<NUM_LIT:0>, <NUM_LIT:3>), dtype=tf.float64)<EOL><DEDENT>def when_nonempty():<EOL><INDENT>min_ = tf.reduce_min(input_tensor=data)<EOL>max_ = tf.reduce_max(input_tensor=data)<EOL>range_ = max_ - min_<EOL>is_singular = tf.equal(range_, <NUM_LIT:0>)<EOL>def when_nonsingular():<EOL><INDENT>bucket_width = range_ / tf.cast(bucket_count, tf.float64)<EOL>offsets = data - min_<EOL>bucket_indices = tf.cast(tf.floor(offsets / bucket_width),<EOL>dtype=tf.int32)<EOL>clamped_indices = tf.minimum(bucket_indices, bucket_count - <NUM_LIT:1>)<EOL>one_hots = tf.one_hot(clamped_indices, depth=bucket_count)<EOL>bucket_counts = tf.cast(tf.reduce_sum(input_tensor=one_hots, axis=<NUM_LIT:0>),<EOL>dtype=tf.float64)<EOL>edges = tf.linspace(min_, max_, bucket_count + <NUM_LIT:1>)<EOL>left_edges = edges[:-<NUM_LIT:1>]<EOL>right_edges = edges[<NUM_LIT:1>:]<EOL>return tf.transpose(a=tf.stack(<EOL>[left_edges, right_edges, bucket_counts]))<EOL><DEDENT>def when_singular():<EOL><INDENT>center = min_<EOL>bucket_starts = tf.stack([center - <NUM_LIT:0.5>])<EOL>bucket_ends = tf.stack([center + <NUM_LIT:0.5>])<EOL>bucket_counts = tf.stack([tf.cast(tf.size(input=data), tf.float64)])<EOL>return tf.transpose(<EOL>a=tf.stack([bucket_starts, bucket_ends, bucket_counts]))<EOL><DEDENT>return tf.cond(is_singular, when_singular, when_nonsingular)<EOL><DEDENT>return tf.cond(is_empty, when_empty, when_nonempty)<EOL><DEDENT>
Create a TensorFlow op to group data into histogram buckets. Arguments: data: A `Tensor` of any shape. Must be castable to `float64`. bucket_count: Optional positive `int` or scalar `int32` `Tensor`. Returns: A `Tensor` of shape `[k, 3]` and type `float64`. The `i`th row is a triple `[left_edge, right_edge, count]` for a single bucket. The value of `k` is either `bucket_count` or `1` or `0`.
f8040:m0
def op(name,<EOL>data,<EOL>bucket_count=None,<EOL>display_name=None,<EOL>description=None,<EOL>collections=None):
<EOL>import tensorflow.compat.v1 as tf<EOL>if display_name is None:<EOL><INDENT>display_name = name<EOL><DEDENT>summary_metadata = metadata.create_summary_metadata(<EOL>display_name=display_name, description=description)<EOL>with tf.name_scope(name):<EOL><INDENT>tensor = _buckets(data, bucket_count=bucket_count)<EOL>return tf.summary.tensor_summary(name='<STR_LIT>',<EOL>tensor=tensor,<EOL>collections=collections,<EOL>summary_metadata=summary_metadata)<EOL><DEDENT>
Create a legacy histogram summary op. Arguments: name: A unique name for the generated summary node. data: A `Tensor` of any shape. Must be castable to `float64`. bucket_count: Optional positive `int`. The output will have this many buckets, except in two edge cases. If there is no data, then there are no buckets. If there is data but all points have the same value, then there is one bucket whose left and right endpoints are the same. display_name: Optional name for this summary in TensorBoard, as a constant `str`. Defaults to `name`. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[Graph Keys.SUMMARIES]`. Returns: A TensorFlow summary op.
f8040:m1
def pb(name, data, bucket_count=None, display_name=None, description=None):
<EOL>import tensorflow.compat.v1 as tf<EOL>if bucket_count is None:<EOL><INDENT>bucket_count = summary_v2.DEFAULT_BUCKET_COUNT<EOL><DEDENT>data = np.array(data).flatten().astype(float)<EOL>if data.size == <NUM_LIT:0>:<EOL><INDENT>buckets = np.array([]).reshape((<NUM_LIT:0>, <NUM_LIT:3>))<EOL><DEDENT>else:<EOL><INDENT>min_ = np.min(data)<EOL>max_ = np.max(data)<EOL>range_ = max_ - min_<EOL>if range_ == <NUM_LIT:0>:<EOL><INDENT>center = min_<EOL>buckets = np.array([[center - <NUM_LIT:0.5>, center + <NUM_LIT:0.5>, float(data.size)]])<EOL><DEDENT>else:<EOL><INDENT>bucket_width = range_ / bucket_count<EOL>offsets = data - min_<EOL>bucket_indices = np.floor(offsets / bucket_width).astype(int)<EOL>clamped_indices = np.minimum(bucket_indices, bucket_count - <NUM_LIT:1>)<EOL>one_hots = (np.array([clamped_indices]).transpose()<EOL>== np.arange(<NUM_LIT:0>, bucket_count)) <EOL>assert one_hots.shape == (data.size, bucket_count), (<EOL>one_hots.shape, (data.size, bucket_count))<EOL>bucket_counts = np.sum(one_hots, axis=<NUM_LIT:0>)<EOL>edges = np.linspace(min_, max_, bucket_count + <NUM_LIT:1>)<EOL>left_edges = edges[:-<NUM_LIT:1>]<EOL>right_edges = edges[<NUM_LIT:1>:]<EOL>buckets = np.array([left_edges, right_edges, bucket_counts]).transpose()<EOL><DEDENT><DEDENT>tensor = tf.make_tensor_proto(buckets, dtype=tf.float64)<EOL>if display_name is None:<EOL><INDENT>display_name = name<EOL><DEDENT>summary_metadata = metadata.create_summary_metadata(<EOL>display_name=display_name, description=description)<EOL>tf_summary_metadata = tf.SummaryMetadata.FromString(<EOL>summary_metadata.SerializeToString())<EOL>summary = tf.Summary()<EOL>summary.value.add(tag='<STR_LIT>' % name,<EOL>metadata=tf_summary_metadata,<EOL>tensor=tensor)<EOL>return summary<EOL>
Create a legacy histogram summary protobuf. Arguments: name: A unique name for the generated summary, including any desired name scopes. data: A `np.array` or array-like form of any shape. Must have type castable to `float`. bucket_count: Optional positive `int`. The output will have this many buckets, except in two edge cases. If there is no data, then there are no buckets. If there is data but all points have the same value, then there is one bucket whose left and right endpoints are the same. display_name: Optional name for this summary in TensorBoard, as a `str`. Defaults to `name`. description: Optional long-form description for this summary, as a `str`. Markdown is supported. Defaults to empty. Returns: A `tf.Summary` protobuf object.
f8040:m2
def run_all(logdir, verbose=False, num_summaries=<NUM_LIT>):
del verbose<EOL>tf.compat.v1.set_random_seed(<NUM_LIT:0>)<EOL>k = tf.compat.v1.placeholder(tf.float32)<EOL>mean_moving_normal = tf.random.normal(shape=[<NUM_LIT:1000>], mean=(<NUM_LIT:5>*k), stddev=<NUM_LIT:1>)<EOL>histogram_summary.op("<STR_LIT>",<EOL>mean_moving_normal,<EOL>description="<STR_LIT>"<EOL>"<STR_LIT>")<EOL>shrinking_normal = tf.random.normal(shape=[<NUM_LIT:1000>], mean=<NUM_LIT:0>, stddev=<NUM_LIT:1>-(k))<EOL>histogram_summary.op("<STR_LIT>", shrinking_normal,<EOL>description="<STR_LIT>"<EOL>"<STR_LIT>")<EOL>normal_combined = tf.concat([mean_moving_normal, shrinking_normal], <NUM_LIT:0>)<EOL>histogram_summary.op("<STR_LIT>", normal_combined,<EOL>description="<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>")<EOL>gamma = tf.random.gamma(shape=[<NUM_LIT:1000>], alpha=k)<EOL>histogram_summary.op("<STR_LIT>", gamma,<EOL>description="<STR_LIT>"<EOL>"<STR_LIT>")<EOL>poisson = tf.compat.v1.random_poisson(shape=[<NUM_LIT:1000>], lam=k)<EOL>histogram_summary.op("<STR_LIT>", poisson,<EOL>description="<STR_LIT>"<EOL>"<STR_LIT>")<EOL>uniform = tf.random.uniform(shape=[<NUM_LIT:1000>], maxval=k*<NUM_LIT:10>)<EOL>histogram_summary.op("<STR_LIT>", uniform,<EOL>description="<STR_LIT>")<EOL>all_distributions = [mean_moving_normal, shrinking_normal,<EOL>gamma, poisson, uniform]<EOL>all_combined = tf.concat(all_distributions, <NUM_LIT:0>)<EOL>histogram_summary.op("<STR_LIT>", all_combined,<EOL>description="<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>")<EOL>summaries = tf.compat.v1.summary.merge_all()<EOL>sess = tf.compat.v1.Session()<EOL>writer = tf.summary.FileWriter(logdir)<EOL>N = num_summaries<EOL>for step in xrange(N):<EOL><INDENT>k_val = step/float(N)<EOL>summ = sess.run(summaries, feed_dict={k: k_val})<EOL>writer.add_summary(summ, global_step=step)<EOL><DEDENT>
Generate a bunch of histogram data, and write it to logdir.
f8041:m0
def __init__(self, context):
self._db_connection_provider = context.db_connection_provider<EOL>self._multiplexer = context.multiplexer<EOL>
Instantiates HistogramsPlugin via TensorBoard core. Args: context: A base_plugin.TBContext instance.
f8043:c0:m0
def is_active(self):
if self._db_connection_provider:<EOL><INDENT>db = self._db_connection_provider()<EOL>cursor = db.execute(
This plugin is active iff any run has at least one histograms tag.
f8043:c0:m2
def index_impl(self):
if self._db_connection_provider:<EOL><INDENT>db = self._db_connection_provider()<EOL>cursor = db.execute(
Return {runName: {tagName: {displayName: ..., description: ...}}}.
f8043:c0:m3
def histograms_impl(self, tag, run, downsample_to=None):
if self._db_connection_provider:<EOL><INDENT>db = self._db_connection_provider()<EOL>cursor = db.cursor()<EOL>cursor.execute(<EOL>'''<STR_LIT>''',<EOL>{'<STR_LIT>': run, '<STR_LIT>': tag, '<STR_LIT>': metadata.PLUGIN_NAME})<EOL>row = cursor.fetchone()<EOL>if not row:<EOL><INDENT>raise ValueError('<STR_LIT>' % (tag, run))<EOL><DEDENT>(tag_id,) = row<EOL>cursor.execute(<EOL>'''<STR_LIT>''',<EOL>{'<STR_LIT>': tag_id, '<STR_LIT>': downsample_to})<EOL>events = [(computed_time, step, self._get_values(data, dtype, shape))<EOL>for step, computed_time, data, dtype, shape in cursor]<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>tensor_events = self._multiplexer.Tensors(run, tag)<EOL><DEDENT>except KeyError:<EOL><INDENT>raise ValueError('<STR_LIT>' % (tag, run))<EOL><DEDENT>if downsample_to is not None and len(tensor_events) > downsample_to:<EOL><INDENT>rand_indices = random.Random(<NUM_LIT:0>).sample(<EOL>six.moves.xrange(len(tensor_events)), downsample_to)<EOL>indices = sorted(rand_indices)<EOL>tensor_events = [tensor_events[i] for i in indices]<EOL><DEDENT>events = [[e.wall_time, e.step, tensor_util.make_ndarray(e.tensor_proto).tolist()]<EOL>for e in tensor_events]<EOL><DEDENT>return (events, '<STR_LIT:application/json>')<EOL>
Result of the form `(body, mime_type)`, or `ValueError`. At most `downsample_to` events will be returned. If this value is `None`, then no downsampling will be performed.
f8043:c0:m4
def _get_values(self, data_blob, dtype_enum, shape_string):
buf = np.frombuffer(data_blob, dtype=tf.DType(dtype_enum).as_numpy_dtype)<EOL>return buf.reshape([int(i) for i in shape_string.split('<STR_LIT:U+002C>')]).tolist()<EOL>
Obtains values for histogram data given blob and dtype enum. Args: data_blob: The blob obtained from the database. dtype_enum: The enum representing the dtype. shape_string: A comma-separated string of numbers denoting shape. Returns: The histogram values as a list served to the frontend.
f8043:c0:m5
@wrappers.Request.application<EOL><INDENT>def histograms_route(self, request):<DEDENT>
tag = request.args.get('<STR_LIT>')<EOL>run = request.args.get('<STR_LIT>')<EOL>try:<EOL><INDENT>(body, mime_type) = self.histograms_impl(<EOL>tag, run, downsample_to=self.SAMPLE_SIZE)<EOL>code = <NUM_LIT:200><EOL><DEDENT>except ValueError as e:<EOL><INDENT>(body, mime_type) = (str(e), '<STR_LIT>')<EOL>code = <NUM_LIT><EOL><DEDENT>return http_util.Respond(request, body, mime_type, code=code)<EOL>
Given a tag and single run, return array of histogram values.
f8043:c0:m7
def create_summary_metadata(display_name, description):
content = plugin_data_pb2.HistogramPluginData(version=PROTO_VERSION)<EOL>return summary_pb2.SummaryMetadata(<EOL>display_name=display_name,<EOL>summary_description=description,<EOL>plugin_data=summary_pb2.SummaryMetadata.PluginData(<EOL>plugin_name=PLUGIN_NAME,<EOL>content=content.SerializeToString()))<EOL>
Create a `summary_pb2.SummaryMetadata` proto for histogram plugin data. Returns: A `summary_pb2.SummaryMetadata` protobuf object.
f8044:m0
def parse_plugin_metadata(content):
if not isinstance(content, bytes):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if content == b'<STR_LIT:{}>':<EOL><INDENT>return plugin_data_pb2.HistogramPluginData()<EOL><DEDENT>else:<EOL><INDENT>result = plugin_data_pb2.HistogramPluginData.FromString(content)<EOL>if result.version == <NUM_LIT:0>:<EOL><INDENT>return result<EOL><DEDENT>else:<EOL><INDENT>logger.warn(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>', result.version, PROTO_VERSION)<EOL>return result<EOL><DEDENT><DEDENT>
Parse summary metadata to a Python object. Arguments: content: The `content` field of a `SummaryMetadata` proto corresponding to the histogram plugin. Returns: A `HistogramPluginData` protobuf object.
f8044:m1
def histogram(name, data, step=None, buckets=None, description=None):
summary_metadata = metadata.create_summary_metadata(<EOL>display_name=None, description=description)<EOL>summary_scope = (<EOL>getattr(tf.summary.experimental, '<STR_LIT>', None) or<EOL>tf.summary.summary_scope)<EOL>with summary_scope(<EOL>name, '<STR_LIT>', values=[data, buckets, step]) as (tag, _):<EOL><INDENT>tensor = _buckets(data, bucket_count=buckets)<EOL>return tf.summary.write(<EOL>tag=tag, tensor=tensor, step=step, metadata=summary_metadata)<EOL><DEDENT>
Write a histogram summary. Arguments: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A `Tensor` of any shape. Must be castable to `float64`. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. buckets: Optional positive `int`. The output will have this many buckets, except in two edge cases. If there is no data, then there are no buckets. If there is data but all points have the same value, then there is one bucket whose left and right endpoints are the same. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. Returns: True on success, or false if no summary was emitted because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None.
f8046:m0
def _buckets(data, bucket_count=None):
if bucket_count is None:<EOL><INDENT>bucket_count = DEFAULT_BUCKET_COUNT<EOL><DEDENT>with tf.name_scope('<STR_LIT>'):<EOL><INDENT>tf.debugging.assert_scalar(bucket_count)<EOL>tf.debugging.assert_type(bucket_count, tf.int32)<EOL>data = tf.reshape(data, shape=[-<NUM_LIT:1>]) <EOL>data = tf.cast(data, tf.float64)<EOL>is_empty = tf.equal(tf.size(input=data), <NUM_LIT:0>)<EOL>def when_empty():<EOL><INDENT>return tf.constant([], shape=(<NUM_LIT:0>, <NUM_LIT:3>), dtype=tf.float64)<EOL><DEDENT>def when_nonempty():<EOL><INDENT>min_ = tf.reduce_min(input_tensor=data)<EOL>max_ = tf.reduce_max(input_tensor=data)<EOL>range_ = max_ - min_<EOL>is_singular = tf.equal(range_, <NUM_LIT:0>)<EOL>def when_nonsingular():<EOL><INDENT>bucket_width = range_ / tf.cast(bucket_count, tf.float64)<EOL>offsets = data - min_<EOL>bucket_indices = tf.cast(tf.floor(offsets / bucket_width),<EOL>dtype=tf.int32)<EOL>clamped_indices = tf.minimum(bucket_indices, bucket_count - <NUM_LIT:1>)<EOL>one_hots = tf.one_hot(clamped_indices, depth=bucket_count)<EOL>bucket_counts = tf.cast(tf.reduce_sum(input_tensor=one_hots, axis=<NUM_LIT:0>),<EOL>dtype=tf.float64)<EOL>edges = tf.linspace(min_, max_, bucket_count + <NUM_LIT:1>)<EOL>edges = tf.concat([edges[:-<NUM_LIT:1>], [max_]], <NUM_LIT:0>)<EOL>left_edges = edges[:-<NUM_LIT:1>]<EOL>right_edges = edges[<NUM_LIT:1>:]<EOL>return tf.transpose(a=tf.stack(<EOL>[left_edges, right_edges, bucket_counts]))<EOL><DEDENT>def when_singular():<EOL><INDENT>center = min_<EOL>bucket_starts = tf.stack([center - <NUM_LIT:0.5>])<EOL>bucket_ends = tf.stack([center + <NUM_LIT:0.5>])<EOL>bucket_counts = tf.stack([tf.cast(tf.size(input=data), tf.float64)])<EOL>return tf.transpose(<EOL>a=tf.stack([bucket_starts, bucket_ends, bucket_counts]))<EOL><DEDENT>return tf.cond(is_singular, when_singular, when_nonsingular)<EOL><DEDENT>return tf.cond(is_empty, when_empty, when_nonempty)<EOL><DEDENT>
Create a TensorFlow op to group data into histogram buckets. Arguments: data: A `Tensor` of any shape. Must be castable to `float64`. bucket_count: Optional positive `int` or scalar `int32` `Tensor`. Returns: A `Tensor` of shape `[k, 3]` and type `float64`. The `i`th row is a triple `[left_edge, right_edge, count]` for a single bucket. The value of `k` is either `bucket_count` or `1` or `0`.
f8046:m1
def histogram_pb(tag, data, buckets=None, description=None):
bucket_count = DEFAULT_BUCKET_COUNT if buckets is None else buckets<EOL>data = np.array(data).flatten().astype(float)<EOL>if data.size == <NUM_LIT:0>:<EOL><INDENT>buckets = np.array([]).reshape((<NUM_LIT:0>, <NUM_LIT:3>))<EOL><DEDENT>else:<EOL><INDENT>min_ = np.min(data)<EOL>max_ = np.max(data)<EOL>range_ = max_ - min_<EOL>if range_ == <NUM_LIT:0>:<EOL><INDENT>center = min_<EOL>buckets = np.array([[center - <NUM_LIT:0.5>, center + <NUM_LIT:0.5>, float(data.size)]])<EOL><DEDENT>else:<EOL><INDENT>bucket_width = range_ / bucket_count<EOL>offsets = data - min_<EOL>bucket_indices = np.floor(offsets / bucket_width).astype(int)<EOL>clamped_indices = np.minimum(bucket_indices, bucket_count - <NUM_LIT:1>)<EOL>one_hots = (np.array([clamped_indices]).transpose()<EOL>== np.arange(<NUM_LIT:0>, bucket_count)) <EOL>assert one_hots.shape == (data.size, bucket_count), (<EOL>one_hots.shape, (data.size, bucket_count))<EOL>bucket_counts = np.sum(one_hots, axis=<NUM_LIT:0>)<EOL>edges = np.linspace(min_, max_, bucket_count + <NUM_LIT:1>)<EOL>left_edges = edges[:-<NUM_LIT:1>]<EOL>right_edges = edges[<NUM_LIT:1>:]<EOL>buckets = np.array([left_edges, right_edges, bucket_counts]).transpose()<EOL><DEDENT><DEDENT>tensor = tensor_util.make_tensor_proto(buckets, dtype=np.float64)<EOL>summary_metadata = metadata.create_summary_metadata(<EOL>display_name=None, description=description)<EOL>summary = summary_pb2.Summary()<EOL>summary.value.add(tag=tag,<EOL>metadata=summary_metadata,<EOL>tensor=tensor)<EOL>return summary<EOL>
Create a histogram summary protobuf. Arguments: tag: String tag for the summary. data: A `np.array` or array-like form of any shape. Must have type castable to `float`. buckets: Optional positive `int`. The output will have this many buckets, except in two edge cases. If there is no data, then there are no buckets. If there is data but all points have the same value, then there is one bucket whose left and right endpoints are the same. description: Optional long-form description for this summary, as a `str`. Markdown is supported. Defaults to empty. Returns: A `summary_pb2.Summary` protobuf object.
f8046:m2
def _get_config(self):
filename = '<STR_LIT>'.format(self.PLUGIN_LOGDIR, CONFIG_FILENAME)<EOL>modified_time = os.path.getmtime(filename)<EOL>if modified_time != self.config_last_modified_time:<EOL><INDENT>config = read_pickle(filename, default=self.previous_config)<EOL>self.previous_config = config<EOL><DEDENT>else:<EOL><INDENT>config = self.previous_config<EOL><DEDENT>self.config_last_modified_time = modified_time<EOL>return config<EOL>
Reads the config file from disk or creates a new one.
f8050:c0:m1
def _write_summary(self, session, frame):
summary = session.run(self.summary_op, feed_dict={<EOL>self.frame_placeholder: frame<EOL>})<EOL>path = '<STR_LIT>'.format(self.PLUGIN_LOGDIR, SUMMARY_FILENAME)<EOL>write_file(summary, path)<EOL>
Writes the frame to disk as a tensor summary.
f8050:c0:m2
def _enough_time_has_passed(self, FPS):
if FPS == <NUM_LIT:0>:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>earliest_time = self.last_update_time + (<NUM_LIT:1.0> / FPS)<EOL>return time.time() >= earliest_time<EOL><DEDENT>
For limiting how often frames are computed.
f8050:c0:m4
def _update_recording(self, frame, config):
<EOL>should_record = config['<STR_LIT>']<EOL>if should_record:<EOL><INDENT>if not self.is_recording:<EOL><INDENT>self.is_recording = True<EOL>logger.info(<EOL>'<STR_LIT>',<EOL>self.video_writer.current_output().name())<EOL><DEDENT>self.video_writer.write_frame(frame)<EOL><DEDENT>elif self.is_recording:<EOL><INDENT>self.is_recording = False<EOL>self.video_writer.finish()<EOL>logger.info('<STR_LIT>')<EOL><DEDENT>
Adds a frame to the current video output.
f8050:c0:m6
def update(self, session, arrays=None, frame=None):
new_config = self._get_config()<EOL>if self._enough_time_has_passed(self.previous_config['<STR_LIT>']):<EOL><INDENT>self.visualizer.update(new_config)<EOL>self.last_update_time = time.time()<EOL>final_image = self._update_frame(session, arrays, frame, new_config)<EOL>self._update_recording(final_image, new_config)<EOL><DEDENT>
Creates a frame and writes it to disk. Args: arrays: a list of np arrays. Use the "custom" option in the client. frame: a 2D np array. This way the plugin can be used for video of any kind, not just the visualization that comes with the plugin. frame can also be a function, which only is evaluated when the "frame" option is selected by the client.
f8050:c0:m7
@staticmethod<EOL><INDENT>def gradient_helper(optimizer, loss, var_list=None):<DEDENT>
if var_list is None:<EOL><INDENT>var_list = tf.compat.v1.trainable_variables()<EOL><DEDENT>grads_and_vars = optimizer.compute_gradients(loss, var_list=var_list)<EOL>grads = [pair[<NUM_LIT:0>] for pair in grads_and_vars]<EOL>return grads, optimizer.apply_gradients(grads_and_vars)<EOL>
A helper to get the gradients out at each step. Args: optimizer: the optimizer op. loss: the op that computes your loss value. Returns: the gradient tensors and the train_step op.
f8050:c0:m8
def __init__(self, logdir):
self._logdir = logdir<EOL>self.beholder = None<EOL>
Creates new Hook instance Args: logdir: Directory where Beholder should write data.
f8050:c1:m0
def load(self, context):
try:<EOL><INDENT>import tensorflow<EOL><DEDENT>except ImportError:<EOL><INDENT>return<EOL><DEDENT>from tensorboard.plugins.beholder.beholder_plugin import BeholderPlugin<EOL>return BeholderPlugin(context)<EOL>
Returns the plugin, if possible. Args: context: The TBContext flags. Returns: A BeholderPlugin instance or None if it couldn't be loaded.
f8053:c0:m0
def scale_sections(sections, scaling_scope):
new_sections = []<EOL>if scaling_scope == '<STR_LIT>':<EOL><INDENT>for section in sections:<EOL><INDENT>new_sections.append(scale_image_for_display(section))<EOL><DEDENT><DEDENT>elif scaling_scope == '<STR_LIT>':<EOL><INDENT>global_min, global_max = global_extrema(sections)<EOL>for section in sections:<EOL><INDENT>new_sections.append(scale_image_for_display(section,<EOL>global_min,<EOL>global_max))<EOL><DEDENT><DEDENT>return new_sections<EOL>
input: unscaled sections. returns: sections scaled to [0, 255]
f8055:m1
def _reshape_conv_array(self, array, section_height, image_width):
<EOL>if array.shape[<NUM_LIT:1>] == array.shape[<NUM_LIT:2>] and array.shape[<NUM_LIT:0>] != array.shape[<NUM_LIT:1>]:<EOL><INDENT>array = np.rollaxis(np.rollaxis(array, <NUM_LIT:2>), <NUM_LIT:2>)<EOL><DEDENT>block_height, block_width, in_channels = array.shape[:<NUM_LIT:3>]<EOL>rows = []<EOL>max_element_count = section_height * int(image_width / MIN_SQUARE_SIZE)<EOL>element_count = <NUM_LIT:0><EOL>for i in range(in_channels):<EOL><INDENT>rows.append(array[:, :, i, :].reshape(block_height, -<NUM_LIT:1>, order='<STR_LIT:F>'))<EOL>if element_count >= max_element_count and not self.config['<STR_LIT>']:<EOL><INDENT>break<EOL><DEDENT>element_count += block_height * in_channels * block_width<EOL><DEDENT>return np.vstack(rows)<EOL>
Reshape a rank 4 array to be rank 2, where each column of block_width is a filter, and each row of block height is an input channel. For example: [[[[ 11, 21, 31, 41], [ 51, 61, 71, 81], [ 91, 101, 111, 121]], [[ 12, 22, 32, 42], [ 52, 62, 72, 82], [ 92, 102, 112, 122]], [[ 13, 23, 33, 43], [ 53, 63, 73, 83], [ 93, 103, 113, 123]]], [[[ 14, 24, 34, 44], [ 54, 64, 74, 84], [ 94, 104, 114, 124]], [[ 15, 25, 35, 45], [ 55, 65, 75, 85], [ 95, 105, 115, 125]], [[ 16, 26, 36, 46], [ 56, 66, 76, 86], [ 96, 106, 116, 126]]], [[[ 17, 27, 37, 47], [ 57, 67, 77, 87], [ 97, 107, 117, 127]], [[ 18, 28, 38, 48], [ 58, 68, 78, 88], [ 98, 108, 118, 128]], [[ 19, 29, 39, 49], [ 59, 69, 79, 89], [ 99, 109, 119, 129]]]] should be reshaped to: [[ 11, 12, 13, 21, 22, 23, 31, 32, 33, 41, 42, 43], [ 14, 15, 16, 24, 25, 26, 34, 35, 36, 44, 45, 46], [ 17, 18, 19, 27, 28, 29, 37, 38, 39, 47, 48, 49], [ 51, 52, 53, 61, 62, 63, 71, 72, 73, 81, 82, 83], [ 54, 55, 56, 64, 65, 66, 74, 75, 76, 84, 85, 86], [ 57, 58, 59, 67, 68, 69, 77, 78, 79, 87, 88, 89], [ 91, 92, 93, 101, 102, 103, 111, 112, 113, 121, 122, 123], [ 94, 95, 96, 104, 105, 106, 114, 115, 116, 124, 125, 126], [ 97, 98, 99, 107, 108, 109, 117, 118, 119, 127, 128, 129]]
f8057:c0:m1
def _reshape_irregular_array(self, array, section_height, image_width):
section_area = section_height * image_width<EOL>flattened_array = np.ravel(array)<EOL>if not self.config['<STR_LIT>']:<EOL><INDENT>flattened_array = flattened_array[:int(section_area/MIN_SQUARE_SIZE)]<EOL><DEDENT>cell_count = np.prod(flattened_array.shape)<EOL>cell_area = section_area / cell_count<EOL>cell_side_length = max(<NUM_LIT:1>, floor(sqrt(cell_area)))<EOL>row_count = max(<NUM_LIT:1>, int(section_height / cell_side_length))<EOL>col_count = int(cell_count / row_count)<EOL>section = np.reshape(flattened_array[:row_count * col_count],<EOL>(row_count, col_count))<EOL>return section<EOL>
Reshapes arrays of ranks not in {1, 2, 4}
f8057:c0:m2
def _arrays_to_sections(self, arrays):
sections = []<EOL>sections_to_resize_later = {}<EOL>show_all = self.config['<STR_LIT>']<EOL>image_width = self._determine_image_width(arrays, show_all)<EOL>for array_number, array in enumerate(arrays):<EOL><INDENT>rank = len(array.shape)<EOL>section_height = self._determine_section_height(array, show_all)<EOL>if rank == <NUM_LIT:1>:<EOL><INDENT>section = np.atleast_2d(array)<EOL><DEDENT>elif rank == <NUM_LIT:2>:<EOL><INDENT>section = array<EOL><DEDENT>elif rank == <NUM_LIT:4>:<EOL><INDENT>section = self._reshape_conv_array(array, section_height, image_width)<EOL><DEDENT>else:<EOL><INDENT>section = self._reshape_irregular_array(array,<EOL>section_height,<EOL>image_width)<EOL><DEDENT>section_size = section_height * image_width<EOL>array_size = np.prod(array.shape)<EOL>if section_size > array_size:<EOL><INDENT>sections.append(section)<EOL>sections_to_resize_later[array_number] = section_height<EOL><DEDENT>else:<EOL><INDENT>sections.append(im_util.resize(section, section_height, image_width))<EOL><DEDENT><DEDENT>self.sections_over_time.append(sections)<EOL>if self.config['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>sections = self._sections_to_variance_sections(self.sections_over_time)<EOL><DEDENT>for array_number, height in sections_to_resize_later.items():<EOL><INDENT>sections[array_number] = im_util.resize(sections[array_number],<EOL>height,<EOL>image_width)<EOL><DEDENT>return sections<EOL>
input: unprocessed numpy arrays. returns: columns of the size that they will appear in the image, not scaled for display. That needs to wait until after variance is computed.
f8057:c0:m5
def _sections_to_variance_sections(self, sections_over_time):
variance_sections = []<EOL>for i in range(len(sections_over_time[<NUM_LIT:0>])):<EOL><INDENT>time_sections = [sections[i] for sections in sections_over_time]<EOL>variance = np.var(time_sections, axis=<NUM_LIT:0>)<EOL>variance_sections.append(variance)<EOL><DEDENT>return variance_sections<EOL>
Computes the variance of corresponding sections over time. Returns: a list of np arrays.
f8057:c0:m6
def _maybe_clear_deque(self):
for config_item in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>if self.config[config_item] != self.old_config[config_item]:<EOL><INDENT>self.sections_over_time.clear()<EOL>break<EOL><DEDENT><DEDENT>self.old_config = self.config<EOL>window_size = self.config['<STR_LIT>']<EOL>if window_size != self.sections_over_time.maxlen:<EOL><INDENT>self.sections_over_time = deque(self.sections_over_time, window_size)<EOL><DEDENT>
Clears the deque if certain parts of the config have changed.
f8057:c0:m8
def generate_run(self, run_name, include_graph, include_run_metadata):
raise NotImplementedError('<STR_LIT>')<EOL>
Create a run
f8059:c0:m2
def generate_run(self, run_name, include_graph, include_run_metadata):
tf.compat.v1.reset_default_graph()<EOL>k1 = tf.constant(math.pi, name='<STR_LIT>')<EOL>k2 = tf.constant(math.e, name='<STR_LIT>')<EOL>result = (k1 ** k2) - k1<EOL>expected = tf.constant(<NUM_LIT>, name='<STR_LIT>')<EOL>error = tf.abs(result - expected, name='<STR_LIT:error>')<EOL>message_prefix_value = '<STR_LIT>' * <NUM_LIT:1000><EOL>true_length = len(message_prefix_value)<EOL>assert true_length > self._MESSAGE_PREFIX_LENGTH_LOWER_BOUND, true_length<EOL>message_prefix = tf.constant(message_prefix_value, name='<STR_LIT>')<EOL>error_message = tf.strings.join([message_prefix,<EOL>tf.as_string(error, name='<STR_LIT>')],<EOL>name='<STR_LIT>')<EOL>summary_message = tf.compat.v1.summary.text('<STR_LIT>', error_message)<EOL>sess = tf.compat.v1.Session()<EOL>writer = test_util.FileWriter(os.path.join(self.logdir, run_name))<EOL>if include_graph:<EOL><INDENT>writer.add_graph(sess.graph)<EOL><DEDENT>options = tf.compat.v1.RunOptions(trace_level=tf.compat.v1.RunOptions.FULL_TRACE)<EOL>run_metadata = config_pb2.RunMetadata()<EOL>s = sess.run(summary_message, options=options, run_metadata=run_metadata)<EOL>writer.add_summary(s)<EOL>if include_run_metadata:<EOL><INDENT>writer.add_run_metadata(run_metadata, self._METADATA_TAG)<EOL><DEDENT>writer.close()<EOL>
Create a run with a text summary, metadata, and optionally a graph.
f8059:c1:m0
def _get_graph(self, *args, **kwargs):
self.set_up_with_runs()<EOL>(graph_pbtxt, mime_type) = self.plugin.graph_impl(<EOL>self._RUN_WITH_GRAPH, *args, **kwargs)<EOL>self.assertEqual(mime_type, '<STR_LIT>')<EOL>return text_format.Parse(graph_pbtxt, tf.compat.v1.GraphDef())<EOL>
Set up runs, then fetch and return the graph as a proto.
f8059:c1:m1
def _get_graph(self, *args, **kwargs):
(graph_pbtxt, mime_type) = self.plugin.graph_impl(*args, **kwargs)<EOL>self.assertEqual(mime_type, '<STR_LIT>')<EOL>return text_format.Parse(graph_pbtxt, graph_pb2.GraphDef())<EOL>
Fetch and return the graph as a proto.
f8061:c0:m1
def _safe_copy_proto_list_values(dst_proto_list, src_proto_list, get_key):
def _assert_proto_container_unique_keys(proto_list, get_key):<EOL><INDENT>"""<STR_LIT>"""<EOL>keys = set()<EOL>for item in proto_list:<EOL><INDENT>key = get_key(item)<EOL>if key in keys:<EOL><INDENT>raise _ProtoListDuplicateKeyError(key)<EOL><DEDENT>keys.add(key)<EOL><DEDENT><DEDENT>_assert_proto_container_unique_keys(dst_proto_list, get_key)<EOL>_assert_proto_container_unique_keys(src_proto_list, get_key)<EOL>key_to_proto = {}<EOL>for proto in dst_proto_list:<EOL><INDENT>key = get_key(proto)<EOL>key_to_proto[key] = proto<EOL><DEDENT>for proto in src_proto_list:<EOL><INDENT>key = get_key(proto)<EOL>if key in key_to_proto:<EOL><INDENT>if proto != key_to_proto.get(key):<EOL><INDENT>raise _SameKeyDiffContentError(key)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>dst_proto_list.add().CopyFrom(proto)<EOL><DEDENT><DEDENT>
Safely merge values from `src_proto_list` into `dst_proto_list`. Each element in `dst_proto_list` must be mapped by `get_key` to a key value that is unique within that list; likewise for `src_proto_list`. If an element of `src_proto_list` has the same key as an existing element in `dst_proto_list`, then the elements must also be equal. Args: dst_proto_list: A `RepeatedCompositeContainer` or `RepeatedScalarContainer` into which values should be copied. src_proto_list: A container holding the same kind of values as in `dst_proto_list` from which values should be copied. get_key: A function that takes an element of `dst_proto_list` or `src_proto_list` and returns a key, such that if two elements have the same key then it is required that they be deep-equal. For instance, if `dst_proto_list` is a list of nodes, then `get_key` might be `lambda node: node.name` to indicate that if two nodes have the same name then they must be the same node. All keys must be hashable. Raises: _ProtoListDuplicateKeyError: A proto_list contains items with duplicate keys. _SameKeyDiffContentError: An item with the same key has different contents.
f8062:m0
def combine_graph_defs(to_proto, from_proto):
if from_proto.version != to_proto.version:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>_safe_copy_proto_list_values(<EOL>to_proto.node,<EOL>from_proto.node,<EOL>lambda n: n.name)<EOL><DEDENT>except _ProtoListDuplicateKeyError as exc:<EOL><INDENT>raise ValueError('<STR_LIT>' % exc)<EOL><DEDENT>except _SameKeyDiffContentError as exc:<EOL><INDENT>raise ValueError(<EOL>('<STR_LIT>'<EOL>'<STR_LIT>') % exc)<EOL><DEDENT>try:<EOL><INDENT>_safe_copy_proto_list_values(<EOL>to_proto.library.function,<EOL>from_proto.library.function,<EOL>lambda n: n.signature.name)<EOL><DEDENT>except _ProtoListDuplicateKeyError as exc:<EOL><INDENT>raise ValueError('<STR_LIT>' % exc)<EOL><DEDENT>except _SameKeyDiffContentError as exc:<EOL><INDENT>raise ValueError(<EOL>('<STR_LIT>'<EOL>'<STR_LIT>') % exc)<EOL><DEDENT>try:<EOL><INDENT>_safe_copy_proto_list_values(<EOL>to_proto.library.gradient,<EOL>from_proto.library.gradient,<EOL>lambda g: g.gradient_func)<EOL><DEDENT>except _ProtoListDuplicateKeyError as exc:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>' % exc)<EOL><DEDENT>except _SameKeyDiffContentError as exc:<EOL><INDENT>raise ValueError(<EOL>('<STR_LIT>'<EOL>'<STR_LIT>') % exc)<EOL><DEDENT>return to_proto<EOL>
Combines two GraphDefs by adding nodes from from_proto into to_proto. All GraphDefs are expected to be of TensorBoard's. It assumes node names are unique across GraphDefs if contents differ. The names can be the same if the NodeDef content are exactly the same. Args: to_proto: A destination TensorBoard GraphDef. from_proto: A TensorBoard GraphDef to copy contents from. Returns: to_proto Raises: ValueError in case any assumption about GraphDef is violated: A GraphDef should have unique node, function, and gradient function names. Also, when merging GraphDefs, they should have not have nodes, functions, or gradient function mappings that share the name but details do not match.
f8062:m1
def _walk_layers(keras_layer):
yield ('<STR_LIT>', keras_layer)<EOL>if keras_layer.get('<STR_LIT>').get('<STR_LIT>'):<EOL><INDENT>name_scope = keras_layer.get('<STR_LIT>').get('<STR_LIT:name>')<EOL>for layer in keras_layer.get('<STR_LIT>').get('<STR_LIT>'):<EOL><INDENT>for (sub_name_scope, sublayer) in _walk_layers(layer):<EOL><INDENT>sub_name_scope = '<STR_LIT>' % (<EOL>name_scope, sub_name_scope) if sub_name_scope else name_scope<EOL>yield (sub_name_scope, sublayer)<EOL><DEDENT><DEDENT><DEDENT>
Walks the nested keras layer configuration in preorder. Args: keras_layer: Keras configuration from model.to_json. Yields: A tuple of (name_scope, layer_config). name_scope: a string representing a scope name, similar to that of tf.name_scope. layer_config: a dict representing a Keras layer configuration.
f8063:m0
def _scoped_name(name_scope, node_name):
if name_scope:<EOL><INDENT>return '<STR_LIT>' % (name_scope, node_name)<EOL><DEDENT>return node_name<EOL>
Returns scoped name for a node as a string in the form '<scope>/<node name>'. Args: name_scope: a string representing a scope name, similar to that of tf.name_scope. node_name: a string representing the current node name. Returns A string representing a scoped name.
f8063:m1
def _is_model(layer):
return layer.get('<STR_LIT>').get('<STR_LIT>') is not None<EOL>
Returns True if layer is a model. Args: layer: a dict representing a Keras model configuration. Returns: bool: True if layer is a model.
f8063:m2
def _norm_to_list_of_layers(maybe_layers):
return (maybe_layers if isinstance(maybe_layers[<NUM_LIT:0>], (list,))<EOL>else [maybe_layers])<EOL>
Normalizes to a list of layers. Args: maybe_layers: A list of data[1] or a list of list of data. Returns: List of list of data. [1]: A Functional model has fields 'inbound_nodes' and 'output_layers' which can look like below: - ['in_layer_name', 0, 0] - [['in_layer_is_model', 1, 0], ['in_layer_is_model', 1, 1]] The data inside the list seems to describe [name, size, index].
f8063:m3
def _update_dicts(name_scope,<EOL>model_layer,<EOL>input_to_in_layer,<EOL>model_name_to_output,<EOL>prev_node_name):
layer_config = model_layer.get('<STR_LIT>')<EOL>if not layer_config.get('<STR_LIT>'):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>node_name = _scoped_name(name_scope, layer_config.get('<STR_LIT:name>'))<EOL>input_layers = layer_config.get('<STR_LIT>')<EOL>output_layers = layer_config.get('<STR_LIT>')<EOL>inbound_nodes = model_layer.get('<STR_LIT>')<EOL>is_functional_model = bool(input_layers and output_layers)<EOL>is_parent_functional_model = bool(inbound_nodes)<EOL>if is_parent_functional_model and is_functional_model:<EOL><INDENT>for (input_layer, inbound_node) in zip(input_layers, inbound_nodes):<EOL><INDENT>input_layer_name = _scoped_name(node_name, input_layer)<EOL>inbound_node_name = _scoped_name(name_scope, inbound_node[<NUM_LIT:0>])<EOL>input_to_in_layer[input_layer_name] = inbound_node_name<EOL><DEDENT><DEDENT>elif is_parent_functional_model and not is_functional_model:<EOL><INDENT>prev_node_name = _scoped_name(name_scope, inbound_nodes[<NUM_LIT:0>][<NUM_LIT:0>][<NUM_LIT:0>])<EOL><DEDENT>elif not is_parent_functional_model and prev_node_name and is_functional_model:<EOL><INDENT>assert len(input_layers) == <NUM_LIT:1>, (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % len(input_layer))<EOL>input_layer = input_layers[<NUM_LIT:0>]<EOL>input_layer_name = _scoped_name(node_name, input_layer)<EOL>input_to_in_layer[input_layer_name] = prev_node_name<EOL><DEDENT>if is_functional_model and output_layers:<EOL><INDENT>layers = _norm_to_list_of_layers(output_layers)<EOL>layer_names = [_scoped_name(node_name, layer[<NUM_LIT:0>]) for layer in layers]<EOL>model_name_to_output[node_name] = layer_names<EOL><DEDENT>else:<EOL><INDENT>last_layer = layer_config.get('<STR_LIT>')[-<NUM_LIT:1>]<EOL>last_layer_name = last_layer.get('<STR_LIT>').get('<STR_LIT:name>')<EOL>output_node = _scoped_name(node_name, last_layer_name)<EOL>model_name_to_output[node_name] = [output_node]<EOL><DEDENT>return (input_to_in_layer, model_name_to_output, prev_node_name)<EOL>
Updates input_to_in_layer, model_name_to_output, and prev_node_name based on the model_layer. Args: name_scope: a string representing a scope name, similar to that of tf.name_scope. model_layer: a dict representing a Keras model configuration. input_to_in_layer: a dict mapping Keras.layers.Input to inbound layer. model_name_to_output: a dict mapping Keras Model name to output layer of the model. prev_node_name: a string representing a previous, in sequential model layout, node name. Returns: A tuple of (input_to_in_layer, model_name_to_output, prev_node_name). input_to_in_layer: a dict mapping Keras.layers.Input to inbound layer. model_name_to_output: a dict mapping Keras Model name to output layer of the model. prev_node_name: a string representing a previous, in sequential model layout, node name.
f8063:m4
def keras_model_to_graph_def(keras_layer):
input_to_layer = {}<EOL>model_name_to_output = {}<EOL>g = GraphDef()<EOL>prev_node_name = None<EOL>for (name_scope, layer) in _walk_layers(keras_layer):<EOL><INDENT>if _is_model(layer):<EOL><INDENT>(input_to_layer, model_name_to_output, prev_node_name) = _update_dicts(<EOL>name_scope, layer, input_to_layer, model_name_to_output, prev_node_name)<EOL>continue<EOL><DEDENT>layer_config = layer.get('<STR_LIT>')<EOL>node_name = _scoped_name(name_scope, layer_config.get('<STR_LIT:name>'))<EOL>node_def = g.node.add()<EOL>node_def.name = node_name<EOL>if layer.get('<STR_LIT>') is not None:<EOL><INDENT>keras_cls_name = layer.get('<STR_LIT>').encode('<STR_LIT:ascii>')<EOL>node_def.attr['<STR_LIT>'].s = keras_cls_name<EOL><DEDENT>if layer_config.get('<STR_LIT>') is not None:<EOL><INDENT>tf_dtype = dtypes.as_dtype(layer_config.get('<STR_LIT>'))<EOL>node_def.attr['<STR_LIT>'].type = tf_dtype.as_datatype_enum<EOL><DEDENT>if layer.get('<STR_LIT>') is not None:<EOL><INDENT>for maybe_inbound_node in layer.get('<STR_LIT>'):<EOL><INDENT>inbound_nodes = _norm_to_list_of_layers(maybe_inbound_node)<EOL>for [name, size, index, _] in inbound_nodes:<EOL><INDENT>inbound_name = _scoped_name(name_scope, name)<EOL>inbound_node_names = model_name_to_output.get(<EOL>inbound_name, [inbound_name])<EOL>node_def.input.append(inbound_node_names[index])<EOL><DEDENT><DEDENT><DEDENT>elif prev_node_name is not None:<EOL><INDENT>node_def.input.append(prev_node_name)<EOL><DEDENT>if node_name in input_to_layer:<EOL><INDENT>node_def.input.append(input_to_layer.get(node_name))<EOL><DEDENT>prev_node_name = node_def.name<EOL><DEDENT>return g<EOL>
Returns a GraphDef representation of the Keras model in a dict form. Note that it only supports models that implemented to_json(). Args: keras_layer: A dict from Keras model.to_json(). Returns: A GraphDef representation of the layers in the model.
f8063:m5
def __init__(self, context):
self._multiplexer = context.multiplexer<EOL>
Instantiates GraphsPlugin via TensorBoard core. Args: context: A base_plugin.TBContext instance.
f8064:c0:m0
def is_active(self):
return bool(self._multiplexer and self.info_impl())<EOL>
The graphs plugin is active iff any run has a graph.
f8064:c0:m2
def info_impl(self):
result = {}<EOL>def add_row_item(run, tag=None):<EOL><INDENT>run_item = result.setdefault(run, {<EOL>'<STR_LIT>': run,<EOL>'<STR_LIT>': {},<EOL>'<STR_LIT>': False})<EOL>tag_item = None<EOL>if tag:<EOL><INDENT>tag_item = run_item.get('<STR_LIT>').setdefault(tag, {<EOL>'<STR_LIT>': tag,<EOL>'<STR_LIT>': False,<EOL>'<STR_LIT>': False,<EOL>'<STR_LIT>': False})<EOL><DEDENT>return (run_item, tag_item)<EOL><DEDENT>mapping = self._multiplexer.PluginRunToTagToContent(<EOL>_PLUGIN_NAME_RUN_METADATA_WITH_GRAPH)<EOL>for run_name, tag_to_content in six.iteritems(mapping):<EOL><INDENT>for (tag, content) in six.iteritems(tag_to_content):<EOL><INDENT>if content != b'<STR_LIT:1>':<EOL><INDENT>logger.warn('<STR_LIT>')<EOL>continue<EOL><DEDENT>(_, tag_item) = add_row_item(run_name, tag)<EOL>tag_item['<STR_LIT>'] = True<EOL><DEDENT><DEDENT>mapping = self._multiplexer.PluginRunToTagToContent(<EOL>_PLUGIN_NAME_RUN_METADATA)<EOL>for run_name, tag_to_content in six.iteritems(mapping):<EOL><INDENT>for (tag, content) in six.iteritems(tag_to_content):<EOL><INDENT>if content != b'<STR_LIT:1>':<EOL><INDENT>logger.warn('<STR_LIT>')<EOL>continue<EOL><DEDENT>(_, tag_item) = add_row_item(run_name, tag)<EOL>tag_item['<STR_LIT>'] = True<EOL>tag_item['<STR_LIT>'] = True<EOL><DEDENT><DEDENT>mapping = self._multiplexer.PluginRunToTagToContent(<EOL>_PLUGIN_NAME_KERAS_MODEL)<EOL>for run_name, tag_to_content in six.iteritems(mapping):<EOL><INDENT>for (tag, content) in six.iteritems(tag_to_content):<EOL><INDENT>if content != b'<STR_LIT:1>':<EOL><INDENT>logger.warn('<STR_LIT>')<EOL>continue<EOL><DEDENT>(_, tag_item) = add_row_item(run_name, tag)<EOL>tag_item['<STR_LIT>'] = True<EOL><DEDENT><DEDENT>for (run_name, run_data) in six.iteritems(self._multiplexer.Runs()):<EOL><INDENT>if run_data.get(event_accumulator.GRAPH):<EOL><INDENT>(run_item, _) = add_row_item(run_name, None)<EOL>run_item['<STR_LIT>'] = True<EOL><DEDENT><DEDENT>for (run_name, run_data) in six.iteritems(self._multiplexer.Runs()):<EOL><INDENT>if event_accumulator.RUN_METADATA in run_data:<EOL><INDENT>for tag in run_data[event_accumulator.RUN_METADATA]:<EOL><INDENT>(_, tag_item) = add_row_item(run_name, tag)<EOL>tag_item['<STR_LIT>'] = True<EOL><DEDENT><DEDENT><DEDENT>return result<EOL>
Returns a dict of all runs and tags and their data availabilities.
f8064:c0:m3
def graph_impl(self, run, tag, is_conceptual, limit_attr_size=None, large_attrs_key=None):
if is_conceptual:<EOL><INDENT>tensor_events = self._multiplexer.Tensors(run, tag)<EOL>keras_model_config = json.loads(tensor_events[<NUM_LIT:0>].tensor_proto.string_val[<NUM_LIT:0>])<EOL>graph = keras_util.keras_model_to_graph_def(keras_model_config)<EOL><DEDENT>elif tag:<EOL><INDENT>tensor_events = self._multiplexer.Tensors(run, tag)<EOL>run_metadata = config_pb2.RunMetadata.FromString(<EOL>tensor_events[<NUM_LIT:0>].tensor_proto.string_val[<NUM_LIT:0>])<EOL>graph = graph_pb2.GraphDef()<EOL>for func_graph in run_metadata.function_graphs:<EOL><INDENT>graph_util.combine_graph_defs(graph, func_graph.pre_optimization_graph)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>graph = self._multiplexer.Graph(run)<EOL><DEDENT>process_graph.prepare_graph_for_ui(graph, limit_attr_size, large_attrs_key)<EOL>return (str(graph), '<STR_LIT>')<EOL>
Result of the form `(body, mime_type)`, or `None` if no graph exists.
f8064:c0:m4
def run_metadata_impl(self, run, tag):
try:<EOL><INDENT>run_metadata = self._multiplexer.RunMetadata(run, tag)<EOL><DEDENT>except ValueError:<EOL><INDENT>tensor_events = self._multiplexer.Tensors(run, tag)<EOL>if tensor_events is None:<EOL><INDENT>return None<EOL><DEDENT>run_metadata = config_pb2.RunMetadata.FromString(<EOL>tensor_events[<NUM_LIT:0>].tensor_proto.string_val[<NUM_LIT:0>])<EOL><DEDENT>if run_metadata is None:<EOL><INDENT>return None<EOL><DEDENT>return (str(run_metadata), '<STR_LIT>')<EOL>
Result of the form `(body, mime_type)`, or `None` if no data exists.
f8064:c0:m5
@wrappers.Request.application<EOL><INDENT>def graph_route(self, request):<DEDENT>
run = request.args.get('<STR_LIT>')<EOL>tag = request.args.get('<STR_LIT>', '<STR_LIT>')<EOL>conceptual_arg = request.args.get('<STR_LIT>', False)<EOL>is_conceptual = True if conceptual_arg == '<STR_LIT:true>' else False<EOL>if run is None:<EOL><INDENT>return http_util.Respond(<EOL>request, '<STR_LIT>', '<STR_LIT>', <NUM_LIT>)<EOL><DEDENT>limit_attr_size = request.args.get('<STR_LIT>', None)<EOL>if limit_attr_size is not None:<EOL><INDENT>try:<EOL><INDENT>limit_attr_size = int(limit_attr_size)<EOL><DEDENT>except ValueError:<EOL><INDENT>return http_util.Respond(<EOL>request, '<STR_LIT>',<EOL>'<STR_LIT>', <NUM_LIT>)<EOL><DEDENT><DEDENT>large_attrs_key = request.args.get('<STR_LIT>', None)<EOL>try:<EOL><INDENT>result = self.graph_impl(run, tag, is_conceptual, limit_attr_size, large_attrs_key)<EOL><DEDENT>except ValueError as e:<EOL><INDENT>return http_util.Respond(request, e.message, '<STR_LIT>', code=<NUM_LIT>)<EOL><DEDENT>else:<EOL><INDENT>if result is not None:<EOL><INDENT>(body, mime_type) = result <EOL>return http_util.Respond(request, body, mime_type)<EOL><DEDENT>else:<EOL><INDENT>return http_util.Respond(request, '<STR_LIT>', '<STR_LIT>',<EOL>code=<NUM_LIT>)<EOL><DEDENT><DEDENT>
Given a single run, return the graph definition in protobuf format.
f8064:c0:m7
@wrappers.Request.application<EOL><INDENT>def run_metadata_route(self, request):<DEDENT>
tag = request.args.get('<STR_LIT>')<EOL>run = request.args.get('<STR_LIT>')<EOL>if tag is None:<EOL><INDENT>return http_util.Respond(<EOL>request, '<STR_LIT>', '<STR_LIT>', <NUM_LIT>)<EOL><DEDENT>if run is None:<EOL><INDENT>return http_util.Respond(<EOL>request, '<STR_LIT>', '<STR_LIT>', <NUM_LIT>)<EOL><DEDENT>result = self.run_metadata_impl(run, tag)<EOL>if result is not None:<EOL><INDENT>(body, mime_type) = result <EOL>return http_util.Respond(request, body, mime_type)<EOL><DEDENT>else:<EOL><INDENT>return http_util.Respond(request, '<STR_LIT>', '<STR_LIT>',<EOL>code=<NUM_LIT>)<EOL><DEDENT>
Given a tag and a run, return the session.run() metadata.
f8064:c0:m8
def _GetDenseDimensions(list_of_lists):
if not isinstance(list_of_lists, (list, tuple)):<EOL><INDENT>return []<EOL><DEDENT>elif not list_of_lists:<EOL><INDENT>return [<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>return [len(list_of_lists)] + _GetDenseDimensions(list_of_lists[<NUM_LIT:0>])<EOL><DEDENT>
Returns the inferred dense dimensions of a list of lists.
f8067:m17
def make_tensor_proto(values, dtype=None, shape=None, verify_shape=False):
if isinstance(values, tensor_pb2.TensorProto):<EOL><INDENT>return values<EOL><DEDENT>if dtype:<EOL><INDENT>dtype = dtypes.as_dtype(dtype)<EOL><DEDENT>is_quantized = dtype in [<EOL>dtypes.qint8,<EOL>dtypes.quint8,<EOL>dtypes.qint16,<EOL>dtypes.quint16,<EOL>dtypes.qint32,<EOL>]<EOL>if isinstance(values, (np.ndarray, np.generic)):<EOL><INDENT>if dtype:<EOL><INDENT>nparray = values.astype(dtype.as_numpy_dtype)<EOL><DEDENT>else:<EOL><INDENT>nparray = values<EOL><DEDENT><DEDENT>elif callable(getattr(values, "<STR_LIT>", None)) or isinstance(<EOL>getattr(values, "<STR_LIT>", None), dict<EOL>):<EOL><INDENT>nparray = np.asarray(values, dtype=dtype)<EOL>values = nparray<EOL><DEDENT>else:<EOL><INDENT>if values is None:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if dtype and dtype.is_numpy_compatible:<EOL><INDENT>np_dt = dtype.as_numpy_dtype<EOL><DEDENT>else:<EOL><INDENT>np_dt = None<EOL><DEDENT>if shape is not None and np.prod(shape, dtype=np.int64) == <NUM_LIT:0>:<EOL><INDENT>nparray = np.empty(shape, dtype=np_dt)<EOL><DEDENT>else:<EOL><INDENT>_Assertconvertible(values, dtype)<EOL>nparray = np.array(values, dtype=np_dt)<EOL>if list(nparray.shape) != _GetDenseDimensions(values) and not is_quantized:<EOL><INDENT>raise ValueError(<EOL>"""<STR_LIT>"""<EOL>"""<STR_LIT>"""<EOL>% (values, list(nparray.shape), _GetDenseDimensions(values))<EOL>)<EOL><DEDENT><DEDENT>if (nparray.dtype == np.float64) and dtype is None:<EOL><INDENT>nparray = nparray.astype(np.float32)<EOL><DEDENT>elif (nparray.dtype == np.int64) and dtype is None:<EOL><INDENT>downcasted_array = nparray.astype(np.int32)<EOL>if np.array_equal(downcasted_array, nparray):<EOL><INDENT>nparray = downcasted_array<EOL><DEDENT><DEDENT><DEDENT>numpy_dtype = dtypes.as_dtype(nparray.dtype)<EOL>if numpy_dtype is None:<EOL><INDENT>raise TypeError("<STR_LIT>" % nparray.dtype)<EOL><DEDENT>if is_quantized:<EOL><INDENT>numpy_dtype = dtype<EOL><DEDENT>if dtype is not None and (<EOL>not hasattr(dtype, "<STR_LIT>") or dtype.base_dtype != numpy_dtype.base_dtype<EOL>):<EOL><INDENT>raise TypeError(<EOL>"<STR_LIT>"<EOL>% (dtype, nparray.dtype, values)<EOL>)<EOL><DEDENT>if shape is None:<EOL><INDENT>shape = nparray.shape<EOL>is_same_size = True<EOL>shape_size = nparray.size<EOL><DEDENT>else:<EOL><INDENT>shape = [int(dim) for dim in shape]<EOL>shape_size = np.prod(shape, dtype=np.int64)<EOL>is_same_size = shape_size == nparray.size<EOL>if verify_shape:<EOL><INDENT>if not nparray.shape == tuple(shape):<EOL><INDENT>raise TypeError(<EOL>"<STR_LIT>"<EOL>% (tuple(shape), nparray.shape)<EOL>)<EOL><DEDENT><DEDENT>if nparray.size > shape_size:<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>"<EOL>% (shape_size, nparray.size)<EOL>)<EOL><DEDENT><DEDENT>tensor_proto = tensor_pb2.TensorProto(<EOL>dtype=numpy_dtype.as_datatype_enum,<EOL>tensor_shape=tensor_shape.as_shape(shape).as_proto(),<EOL>)<EOL>if is_same_size and numpy_dtype in _TENSOR_CONTENT_TYPES and shape_size > <NUM_LIT:1>:<EOL><INDENT>if nparray.size * nparray.itemsize >= (<NUM_LIT:1> << <NUM_LIT>):<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>"<EOL>)<EOL><DEDENT>tensor_proto.tensor_content = nparray.tostring()<EOL>return tensor_proto<EOL><DEDENT>if numpy_dtype == dtypes.string and not isinstance(values, np.ndarray):<EOL><INDENT>proto_values = _FlattenToStrings(values)<EOL>try:<EOL><INDENT>str_values = [compat.as_bytes(x) for x in proto_values]<EOL><DEDENT>except TypeError:<EOL><INDENT>raise TypeError(<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>" % (type(values), values)<EOL>)<EOL><DEDENT>tensor_proto.string_val.extend(str_values)<EOL>return tensor_proto<EOL><DEDENT>proto_values = nparray.ravel()<EOL>append_fn = GetNumpyAppendFn(proto_values.dtype)<EOL>if append_fn is None:<EOL><INDENT>raise TypeError(<EOL>"<STR_LIT>" % numpy_dtype.name<EOL>)<EOL><DEDENT>append_fn(tensor_proto, proto_values)<EOL>return tensor_proto<EOL>
Create a TensorProto. Args: values: Values to put in the TensorProto. dtype: Optional tensor_pb2 DataType value. shape: List of integers representing the dimensions of tensor. verify_shape: Boolean that enables verification of a shape of values. Returns: A `TensorProto`. Depending on the type, it may contain data in the "tensor_content" attribute, which is not directly useful to Python programs. To access the values you should convert the proto back to a numpy ndarray with `tensor_util.MakeNdarray(proto)`. If `values` is a `TensorProto`, it is immediately returned; `dtype` and `shape` are ignored. Raises: TypeError: if unsupported types are provided. ValueError: if arguments have inappropriate values or if verify_shape is True and shape of values is not equals to a shape from the argument. make_tensor_proto accepts "values" of a python scalar, a python list, a numpy ndarray, or a numpy scalar. If "values" is a python scalar or a python list, make_tensor_proto first convert it to numpy ndarray. If dtype is None, the conversion tries its best to infer the right numpy data type. Otherwise, the resulting numpy array has a convertible data type with the given dtype. In either case above, the numpy ndarray (either the caller provided or the auto converted) must have the convertible type with dtype. make_tensor_proto then converts the numpy array to a tensor proto. If "shape" is None, the resulting tensor proto represents the numpy array precisely. Otherwise, "shape" specifies the tensor's shape and the numpy array can not have more elements than what "shape" specifies.
f8067:m28
def make_ndarray(tensor):
shape = [d.size for d in tensor.tensor_shape.dim]<EOL>num_elements = np.prod(shape, dtype=np.int64)<EOL>tensor_dtype = dtypes.as_dtype(tensor.dtype)<EOL>dtype = tensor_dtype.as_numpy_dtype<EOL>if tensor.tensor_content:<EOL><INDENT>return np.frombuffer(tensor.tensor_content, dtype=dtype).copy().reshape(shape)<EOL><DEDENT>elif tensor_dtype == dtypes.float16 or tensor_dtype == dtypes.bfloat16:<EOL><INDENT>if len(tensor.half_val) == <NUM_LIT:1>:<EOL><INDENT>tmp = np.array(tensor.half_val[<NUM_LIT:0>], dtype=np.uint16)<EOL>tmp.dtype = tensor_dtype.as_numpy_dtype<EOL>return np.repeat(tmp, num_elements).reshape(shape)<EOL><DEDENT>else:<EOL><INDENT>tmp = np.fromiter(tensor.half_val, dtype=np.uint16)<EOL>tmp.dtype = tensor_dtype.as_numpy_dtype<EOL>return tmp.reshape(shape)<EOL><DEDENT><DEDENT>elif tensor_dtype == dtypes.float32:<EOL><INDENT>if len(tensor.float_val) == <NUM_LIT:1>:<EOL><INDENT>return np.repeat(<EOL>np.array(tensor.float_val[<NUM_LIT:0>], dtype=dtype), num_elements<EOL>).reshape(shape)<EOL><DEDENT>else:<EOL><INDENT>return np.fromiter(tensor.float_val, dtype=dtype).reshape(shape)<EOL><DEDENT><DEDENT>elif tensor_dtype == dtypes.float64:<EOL><INDENT>if len(tensor.double_val) == <NUM_LIT:1>:<EOL><INDENT>return np.repeat(<EOL>np.array(tensor.double_val[<NUM_LIT:0>], dtype=dtype), num_elements<EOL>).reshape(shape)<EOL><DEDENT>else:<EOL><INDENT>return np.fromiter(tensor.double_val, dtype=dtype).reshape(shape)<EOL><DEDENT><DEDENT>elif tensor_dtype in [<EOL>dtypes.int32,<EOL>dtypes.uint8,<EOL>dtypes.uint16,<EOL>dtypes.int16,<EOL>dtypes.int8,<EOL>dtypes.qint32,<EOL>dtypes.quint8,<EOL>dtypes.qint8,<EOL>dtypes.qint16,<EOL>dtypes.quint16,<EOL>]:<EOL><INDENT>if len(tensor.int_val) == <NUM_LIT:1>:<EOL><INDENT>return np.repeat(<EOL>np.array(tensor.int_val[<NUM_LIT:0>], dtype=dtype), num_elements<EOL>).reshape(shape)<EOL><DEDENT>else:<EOL><INDENT>return np.fromiter(tensor.int_val, dtype=dtype).reshape(shape)<EOL><DEDENT><DEDENT>elif tensor_dtype == dtypes.int64:<EOL><INDENT>if len(tensor.int64_val) == <NUM_LIT:1>:<EOL><INDENT>return np.repeat(<EOL>np.array(tensor.int64_val[<NUM_LIT:0>], dtype=dtype), num_elements<EOL>).reshape(shape)<EOL><DEDENT>else:<EOL><INDENT>return np.fromiter(tensor.int64_val, dtype=dtype).reshape(shape)<EOL><DEDENT><DEDENT>elif tensor_dtype == dtypes.string:<EOL><INDENT>if len(tensor.string_val) == <NUM_LIT:1>:<EOL><INDENT>return np.repeat(<EOL>np.array(tensor.string_val[<NUM_LIT:0>], dtype=dtype), num_elements<EOL>).reshape(shape)<EOL><DEDENT>else:<EOL><INDENT>return np.array([x for x in tensor.string_val], dtype=dtype).reshape(shape)<EOL><DEDENT><DEDENT>elif tensor_dtype == dtypes.complex64:<EOL><INDENT>it = iter(tensor.scomplex_val)<EOL>if len(tensor.scomplex_val) == <NUM_LIT:2>:<EOL><INDENT>return np.repeat(<EOL>np.array(<EOL>complex(tensor.scomplex_val[<NUM_LIT:0>], tensor.scomplex_val[<NUM_LIT:1>]), dtype=dtype<EOL>),<EOL>num_elements,<EOL>).reshape(shape)<EOL><DEDENT>else:<EOL><INDENT>return np.array(<EOL>[complex(x[<NUM_LIT:0>], x[<NUM_LIT:1>]) for x in zip(it, it)], dtype=dtype<EOL>).reshape(shape)<EOL><DEDENT><DEDENT>elif tensor_dtype == dtypes.complex128:<EOL><INDENT>it = iter(tensor.dcomplex_val)<EOL>if len(tensor.dcomplex_val) == <NUM_LIT:2>:<EOL><INDENT>return np.repeat(<EOL>np.array(<EOL>complex(tensor.dcomplex_val[<NUM_LIT:0>], tensor.dcomplex_val[<NUM_LIT:1>]), dtype=dtype<EOL>),<EOL>num_elements,<EOL>).reshape(shape)<EOL><DEDENT>else:<EOL><INDENT>return np.array(<EOL>[complex(x[<NUM_LIT:0>], x[<NUM_LIT:1>]) for x in zip(it, it)], dtype=dtype<EOL>).reshape(shape)<EOL><DEDENT><DEDENT>elif tensor_dtype == dtypes.bool:<EOL><INDENT>if len(tensor.bool_val) == <NUM_LIT:1>:<EOL><INDENT>return np.repeat(<EOL>np.array(tensor.bool_val[<NUM_LIT:0>], dtype=dtype), num_elements<EOL>).reshape(shape)<EOL><DEDENT>else:<EOL><INDENT>return np.fromiter(tensor.bool_val, dtype=dtype).reshape(shape)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise TypeError("<STR_LIT>" % tensor.dtype)<EOL><DEDENT>
Create a numpy ndarray from a tensor. Create a numpy ndarray with the same shape and data as the tensor. Args: tensor: A TensorProto. Returns: A numpy array with the tensor contents. Raises: TypeError: if tensor has unsupported type.
f8067:m29
def _lazily_initialize(self):
<EOL>import tensorflow.compat.v1 as tf<EOL>with self._initialization_lock:<EOL><INDENT>if self._session:<EOL><INDENT>return<EOL><DEDENT>graph = tf.Graph()<EOL>with graph.as_default():<EOL><INDENT>self.initialize_graph()<EOL><DEDENT>config = tf.ConfigProto(device_count={'<STR_LIT>': <NUM_LIT:0>})<EOL>self._session = tf.Session(graph=graph, config=config)<EOL><DEDENT>
Initialize the graph and session, if this has not yet been done.
f8068:c0:m1
def initialize_graph(self):
raise NotImplementedError('<STR_LIT>')<EOL>
Create the TensorFlow graph needed to compute this operation. This should write ops to the default graph and return `None`.
f8068:c0:m2
def run(self, *args, **kwargs):
raise NotImplementedError('<STR_LIT>')<EOL>
Evaluate the ops with the given input. When this function is called, the default session will have the graph defined by a previous call to `initialize_graph`. This function should evaluate any ops necessary to compute the result of the query for the given *args and **kwargs, likely returning the result of a call to `some_op.eval(...)`.
f8068:c0:m3
def ensure_tb_summary_proto(summary):
if isinstance(summary, summary_pb2.Summary):<EOL><INDENT>return summary<EOL><DEDENT>return summary_pb2.Summary.FromString(summary.SerializeToString())<EOL>
Ensures summary is TensorBoard Summary proto. TB v1 summary API returns TF Summary proto. To make test for v1 and v2 API congruent, one can use this API to convert result of v1 API to TB Summary proto.
f8071:m0
def _run_conditionally(guard, name, default_reason=None):
def _impl(reason=None):<EOL><INDENT>if reason is None:<EOL><INDENT>if default_reason is None:<EOL><INDENT>raise ValueError('<STR_LIT>' % name)<EOL><DEDENT>reason = default_reason<EOL><DEDENT>return unittest.skipUnless(guard(), reason)<EOL><DEDENT>return _impl<EOL>
Create a decorator factory that skips a test when guard returns False. The factory raises ValueError when default_reason is None and reason is not passed to the factory. Args: guard: A lambda that returns True if a test should be executed. name: A human readable name for the decorator for an error message. default_reason: A string describing why a test should be skipped. If it is None, the decorator will make sure the reason is supplied by the consumer of the decorator. Default is None. Raises: ValueError when both reason and default_reason are None. Returns: A function that returns a decorator.
f8071:m1