signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
def event_count(self, event_key):
return self._trackers[event_key].event_count<EOL>
Obtain event count. Args: event_key: the type key of the sought events (e.g., constants.NAN_KEY). If None, includes all event type keys. Returns: If event_key is None, return the sum of the event_count of all event types. Otherwise, return the event_count of the specified event type.
f7941:c1:m4
def create_jsonable_history(self):
return {value_category_key: tracker.get_description()<EOL>for (value_category_key, tracker) in self._trackers.items()}<EOL>
Creates a JSON-able representation of this object. Returns: A dictionary mapping key to EventTrackerDescription (which can be used to create event trackers).
f7941:c1:m5
def __init__(self, capacity=<NUM_LIT:100>, initialization_list=None):
self._capacity = capacity<EOL>self._data = dict()<EOL>if initialization_list:<EOL><INDENT>for entry in initialization_list:<EOL><INDENT>triplet = HistoryTriplet._make(entry)<EOL>self._data[(triplet.device, triplet.tensor)] = NumericsAlertHistory(<EOL>initialization_list=triplet.jsonable_history)<EOL><DEDENT><DEDENT>
Constructor. Args: capacity: (`int`) maximum number of device-tensor keys to store. initialization_list: (`list`) An optional list (parsed from JSON) that is used to initialize the data within this registry. Use the create_jsonable_registry method of NumericsAlertRegistry to create such a list.
f7941:c2:m0
def register(self, numerics_alert):
key = (numerics_alert.device_name, numerics_alert.tensor_name)<EOL>if key in self._data:<EOL><INDENT>self._data[key].add(numerics_alert)<EOL><DEDENT>else:<EOL><INDENT>if len(self._data) < self._capacity:<EOL><INDENT>history = NumericsAlertHistory()<EOL>history.add(numerics_alert)<EOL>self._data[key] = history<EOL><DEDENT><DEDENT>
Register an alerting numeric event. Args: numerics_alert: An instance of `NumericsAlert`.
f7941:c2:m1
def report(self, device_name_filter=None, tensor_name_filter=None):
report = []<EOL>for key in self._data:<EOL><INDENT>device_name, tensor_name = key<EOL>history = self._data[key]<EOL>report.append(<EOL>NumericsAlertReportRow(<EOL>device_name=device_name,<EOL>tensor_name=tensor_name,<EOL>first_timestamp=history.first_timestamp(),<EOL>nan_event_count=history.event_count(constants.NAN_KEY),<EOL>neg_inf_event_count=history.event_count(constants.NEG_INF_KEY),<EOL>pos_inf_event_count=history.event_count(constants.POS_INF_KEY)))<EOL><DEDENT>if device_name_filter:<EOL><INDENT>device_name_pattern = re.compile(device_name_filter)<EOL>report = [item for item in report<EOL>if device_name_pattern.match(item.device_name)]<EOL><DEDENT>if tensor_name_filter:<EOL><INDENT>tensor_name_pattern = re.compile(tensor_name_filter)<EOL>report = [item for item in report<EOL>if tensor_name_pattern.match(item.tensor_name)]<EOL><DEDENT>return sorted(report, key=lambda x: x.first_timestamp)<EOL>
Get a report of offending device/tensor names. The report includes information about the device name, tensor name, first (earliest) timestamp of the alerting events from the tensor, in addition to counts of nan, positive inf and negative inf events. Args: device_name_filter: regex filter for device name, or None (not filtered). tensor_name_filter: regex filter for tensor name, or None (not filtered). Returns: A list of NumericsAlertReportRow, sorted by the first_timestamp in asecnding order.
f7941:c2:m2
def create_jsonable_registry(self):
<EOL>return [HistoryTriplet(pair[<NUM_LIT:0>], pair[<NUM_LIT:1>], history.create_jsonable_history())<EOL>for (pair, history) in self._data.items()]<EOL>
Creates a JSON-able representation of this object. Returns: A dictionary mapping (device, tensor name) to JSON-able object representations of NumericsAlertHistory.
f7941:c2:m3
def __init__(self,<EOL>watch_key,<EOL>mem_bytes_limit=<NUM_LIT>):
self._watch_key = watch_key<EOL>self._mem_bytes_limit = mem_bytes_limit<EOL>self._in_mem_bytes = <NUM_LIT:0><EOL>self._disposed = False<EOL>self._data = []<EOL>
Constructor of _WatchStore. The overflowing works as follows: The most recent tensor values are stored in memory, up to `mem_bytes_limit` bytes. But at least one (the most recent) value is always stored in memory. For older tensors exceeding that limit, they are discarded. Args: watch_key: A string representing the debugger tensor watch, with th format: <NODE_NAME>:<OUTPUT_SLOT>:<DEBUG_OP_NAME> e.g., 'Dense_1/BiasAdd:0:DebugIdentity'. mem_bytes_limit: Limit on number of bytes to store in memory.
f7942:c1:m0
def add(self, value):
if self._disposed:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>self._data.append(value)<EOL>if hasattr(value, '<STR_LIT>'):<EOL><INDENT>self._in_mem_bytes += value.nbytes<EOL>self._ensure_bytes_limits()<EOL><DEDENT>
Add a tensor the watch store.
f7942:c1:m1
def num_total(self):
return len(self._data)<EOL>
Get the total number of values.
f7942:c1:m3
def num_in_memory(self):
n = len(self._data) - <NUM_LIT:1><EOL>while n >= <NUM_LIT:0>:<EOL><INDENT>if isinstance(self._data[n], _TensorValueDiscarded):<EOL><INDENT>break<EOL><DEDENT>n -= <NUM_LIT:1><EOL><DEDENT>return len(self._data) - <NUM_LIT:1> - n<EOL>
Get number of values in memory.
f7942:c1:m4
def num_discarded(self):
if not self._data:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>n = <NUM_LIT:0><EOL>while n < len(self._data):<EOL><INDENT>if not isinstance(self._data[n], _TensorValueDiscarded):<EOL><INDENT>break<EOL><DEDENT>n += <NUM_LIT:1><EOL><DEDENT>return n<EOL>
Get the number of values discarded due to exceeding both limits.
f7942:c1:m5
def query(self, time_indices):
if self._disposed:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>if not isinstance(time_indices, (tuple, list)):<EOL><INDENT>time_indices = [time_indices]<EOL><DEDENT>output = []<EOL>for time_index in time_indices:<EOL><INDENT>if isinstance(self._data[time_index], _TensorValueDiscarded):<EOL><INDENT>output.append(None)<EOL><DEDENT>else:<EOL><INDENT>data_item = self._data[time_index]<EOL>if (hasattr(data_item, '<STR_LIT>') and<EOL>tensor_helper.translate_dtype(data_item.dtype) == '<STR_LIT:string>'):<EOL><INDENT>_, _, data_item = tensor_helper.array_view(data_item)<EOL>data_item = np.array(<EOL>tensor_helper.process_buffers_for_display(data_item),<EOL>dtype=np.object)<EOL><DEDENT>output.append(data_item)<EOL><DEDENT><DEDENT>return output<EOL>
Query the values at given time indices. Args: time_indices: 0-based time indices to query, as a `list` of `int`. Returns: Values as a list of `numpy.ndarray` (for time indices in memory) or `None` (for time indices discarded).
f7942:c1:m6
def __init__(self, watch_mem_bytes_limit=<NUM_LIT>):
self._watch_mem_bytes_limit = watch_mem_bytes_limit<EOL>self._tensor_data = dict()<EOL>
Constructor of TensorStore. Args: watch_mem_bytes_limit: Limit on number of bytes to store in memory for each watch key.
f7942:c2:m0
def add(self, watch_key, tensor_value):
if watch_key not in self._tensor_data:<EOL><INDENT>self._tensor_data[watch_key] = _WatchStore(<EOL>watch_key,<EOL>mem_bytes_limit=self._watch_mem_bytes_limit)<EOL><DEDENT>self._tensor_data[watch_key].add(tensor_value)<EOL>
Add a tensor value. Args: watch_key: A string representing the debugger tensor watch, e.g., 'Dense_1/BiasAdd:0:DebugIdentity'. tensor_value: The value of the tensor as a numpy.ndarray.
f7942:c2:m1
def query(self,<EOL>watch_key,<EOL>time_indices=None,<EOL>slicing=None,<EOL>mapping=None):
if watch_key not in self._tensor_data:<EOL><INDENT>raise KeyError("<STR_LIT>" % watch_key)<EOL><DEDENT>if time_indices is None:<EOL><INDENT>time_indices = '<STR_LIT>'<EOL><DEDENT>time_slicing = tensor_helper.parse_time_indices(time_indices)<EOL>all_time_indices = list(range(self._tensor_data[watch_key].num_total()))<EOL>sliced_time_indices = all_time_indices[time_slicing]<EOL>if not isinstance(sliced_time_indices, list):<EOL><INDENT>sliced_time_indices = [sliced_time_indices]<EOL><DEDENT>recombine_and_map = False<EOL>step_mapping = mapping<EOL>if len(sliced_time_indices) > <NUM_LIT:1> and mapping not in (None, ):<EOL><INDENT>recombine_and_map = True<EOL>step_mapping = None<EOL><DEDENT>output = []<EOL>for index in sliced_time_indices:<EOL><INDENT>value = self._tensor_data[watch_key].query(index)[<NUM_LIT:0>]<EOL>if (value is not None and<EOL>not isinstance(value, debug_data.InconvertibleTensorProto)):<EOL><INDENT>output.append(tensor_helper.array_view(<EOL>value, slicing=slicing, mapping=step_mapping)[<NUM_LIT:2>])<EOL><DEDENT>else:<EOL><INDENT>output.append(None)<EOL><DEDENT><DEDENT>if recombine_and_map:<EOL><INDENT>if mapping == '<STR_LIT>':<EOL><INDENT>output = tensor_helper.array_to_base64_png(output)<EOL><DEDENT>elif mapping and mapping != '<STR_LIT:none>':<EOL><INDENT>logger.warn(<EOL>'<STR_LIT>',<EOL>mapping)<EOL><DEDENT><DEDENT>return output<EOL>
Query tensor store for a given watch_key. Args: watch_key: The watch key to query. time_indices: A numpy-style slicing string for time indices. E.g., `-1`, `:-2`, `[::2]`. If not provided (`None`), will use -1. slicing: A numpy-style slicing string for individual time steps. mapping: An mapping string or a list of them. Supported mappings: `{None, 'image/png', 'health-pill'}`. Returns: The potentially sliced values as a nested list of values or its mapped format. A `list` of nested `list` of values. Raises: ValueError: If the shape of the sliced array is incompatible with mapping mode. Or if the mapping type is invalid.
f7942:c2:m2
def get_gated_grpc_tensors(self, matching_debug_op=None):
with self._grpc_gated_lock:<EOL><INDENT>matching_debug_op = matching_debug_op or '<STR_LIT>'<EOL>if matching_debug_op not in self._grpc_gated_tensors:<EOL><INDENT>node_name_to_op_type = dict(<EOL>(node.name, node.op) for node in self._graph_def.node)<EOL>gated = []<EOL>for node in self._graph_def.node:<EOL><INDENT>if node.op == matching_debug_op:<EOL><INDENT>for attr_key in node.attr:<EOL><INDENT>if attr_key == '<STR_LIT>' and node.attr[attr_key].b:<EOL><INDENT>node_name, output_slot, _, debug_op = (<EOL>debug_graphs.parse_debug_node_name(node.name))<EOL>gated.append(<EOL>(node_name, node_name_to_op_type[node_name], output_slot,<EOL>debug_op))<EOL>break<EOL><DEDENT><DEDENT><DEDENT><DEDENT>self._grpc_gated_tensors[matching_debug_op] = gated<EOL><DEDENT>return self._grpc_gated_tensors[matching_debug_op]<EOL><DEDENT>
Extract all nodes with gated-gRPC debug ops attached. Uses cached values if available. This method is thread-safe. Args: graph_def: A tf.GraphDef proto. matching_debug_op: Return tensors and nodes with only matching the specified debug op name (optional). If `None`, will extract only `DebugIdentity` debug ops. Returns: A list of (node_name, op_type, output_slot, debug_op) tuples.
f7943:c0:m1
def maybe_base_expanded_node_name(self, node_name):
with self._node_name_lock:<EOL><INDENT>if self._maybe_base_expanded_node_names is None:<EOL><INDENT>self._maybe_base_expanded_node_names = dict()<EOL>sorted_names = sorted(node.name for node in self._graph_def.node)<EOL>for i, name in enumerate(sorted_names):<EOL><INDENT>j = i + <NUM_LIT:1><EOL>while j < len(sorted_names) and sorted_names[j].startswith(name):<EOL><INDENT>if sorted_names[j].startswith(name + '<STR_LIT:/>'):<EOL><INDENT>self._maybe_base_expanded_node_names[name] = (<EOL>name + '<STR_LIT>' + name.split('<STR_LIT:/>')[-<NUM_LIT:1>] + '<STR_LIT:)>')<EOL>break<EOL><DEDENT>j += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>return self._maybe_base_expanded_node_names.get(node_name, node_name)<EOL><DEDENT>
Expand the base name if there are node names nested under the node. For example, if there are two nodes in the graph, "a" and "a/read", then calling this function on "a" will give "a/(a)", a form that points at a leaf node in the nested TensorBoard graph. Calling this function on "a/read" will just return "a/read", because there is no node nested under it. This method is thread-safe. Args: node_name: Name of the node. graph_def: The `GraphDef` that the node is a part of. Returns: Possibly base-expanded node name.
f7943:c0:m2
def _serverGet(self, path, params=None, expected_status_code=<NUM_LIT:200>):
url = _SERVER_URL_PREFIX + path<EOL>if params:<EOL><INDENT>url += '<STR_LIT:?>' + urllib.parse.urlencode(params)<EOL><DEDENT>response = self._server.get(url)<EOL>self.assertEqual(expected_status_code, response.status_code)<EOL>return response<EOL>
Send the serve a GET request and obtain the response. Args: path: URL path (excluding the prefix), without parameters encoded. params: Query parameters to be encoded in the URL, as a dict. expected_status_code: Expected status code. Returns: Response from server.
f7946:c0:m2
def _deserializeResponse(self, response):
return json.loads(response.get_data().decode("<STR_LIT:utf-8>"))<EOL>
Deserializes byte content that is a JSON encoding. Args: response: A response object. Returns: The deserialized python object decoded from JSON.
f7946:c0:m3
def _CreateEventWithDebugNumericSummary(<EOL>self, device_name, op_name, output_slot, wall_time, step, list_of_values):
event = tf.compat.v1.Event(step=step, wall_time=wall_time)<EOL>tensor = tf.compat.v1.make_tensor_proto(<EOL>list_of_values, dtype=tf.float64, shape=[len(list_of_values)])<EOL>value = event.summary.value.add(<EOL>tag=op_name,<EOL>node_name='<STR_LIT>' % (op_name, output_slot),<EOL>tensor=tensor)<EOL>content_proto = debugger_event_metadata_pb2.DebuggerEventMetadata(<EOL>device=device_name, output_slot=output_slot)<EOL>value.metadata.plugin_data.plugin_name = constants.DEBUGGER_PLUGIN_NAME<EOL>value.metadata.plugin_data.content = tf.compat.as_bytes(<EOL>json_format.MessageToJson(<EOL>content_proto, including_default_value_fields=True))<EOL>return event<EOL>
Creates event with a health pill summary. Note the debugger plugin only works with TensorFlow and, thus, uses TF protos and TF EventsWriter. Args: device_name: The name of the op's device. op_name: The name of the op to which a DebugNumericSummary was attached. output_slot: The numeric output slot for the tensor. wall_time: The numeric wall time of the event. step: The step of the event. list_of_values: A python list of values within the tensor. Returns: A `tf.Event` with a health pill summary.
f7947:c0:m3
def _DeserializeResponse(self, byte_content):
return json.loads(byte_content.decode('<STR_LIT:utf-8>'))<EOL>
Deserializes byte content that is a JSON encoding. Args: byte_content: The byte content of a JSON response. Returns: The deserialized python object decoded from JSON.
f7947:c0:m4
def define_flags(self, parser):
group = parser.add_argument_group('<STR_LIT>')<EOL>group.add_argument(<EOL>'<STR_LIT>',<EOL>metavar='<STR_LIT>',<EOL>type=int,<EOL>default=-<NUM_LIT:1>,<EOL>help='''<STR_LIT>'''at which the interactive debugger data server (to be started by<EOL>gger plugin) should receive debugging data via gRPC from one or<EOL>ugger-enabled TensorFlow runtimes. No debugger plugin or<EOL>data server will be started if this flag is not provided. This<EOL>fers from the`--debugger_data_server_grpc_port` flag in that it<EOL>n interactive mode that allows user to pause at selected nodes<EOL>TensorFlow Graph or between Session.runs. It is for use with<EOL>ractive Debugger Dashboard. This flag is mutually exclusive with<EOL>ger_data_server_grpc_port`.<EOL>
Adds DebuggerPlugin CLI flags to parser.
f7949:c0:m0
def fix_flags(self, flags):
<EOL>if flags.debugger_data_server_grpc_port > <NUM_LIT:0> and flags.debugger_port > <NUM_LIT:0>:<EOL><INDENT>raise base_plugin.FlagsError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>
Fixes Debugger related flags. Raises: ValueError: If both the `debugger_data_server_grpc_port` and `debugger_port` flags are specified as >= 0.
f7949:c0:m1
def load(self, context):
if not (context.flags.debugger_data_server_grpc_port > <NUM_LIT:0> or<EOL>context.flags.debugger_port > <NUM_LIT:0>):<EOL><INDENT>return None<EOL><DEDENT>flags = context.flags<EOL>try:<EOL><INDENT>import tensorflow<EOL><DEDENT>except ImportError:<EOL><INDENT>raise ImportError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>from tensorboard.plugins.debugger import debugger_plugin as debugger_plugin_lib<EOL>from tensorboard.plugins.debugger import interactive_debugger_plugin as interactive_debugger_plugin_lib<EOL><DEDENT>except ImportError as e:<EOL><INDENT>e_type, e_value, e_traceback = sys.exc_info()<EOL>message = e.msg if hasattr(e, '<STR_LIT>') else e.message <EOL>if '<STR_LIT>' in message:<EOL><INDENT>e_value = ImportError(<EOL>message +<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>six.reraise(e_type, e_value, e_traceback)<EOL><DEDENT>if flags.debugger_port > <NUM_LIT:0>:<EOL><INDENT>interactive_plugin = (<EOL>interactive_debugger_plugin_lib.InteractiveDebuggerPlugin(context))<EOL>logger.info('<STR_LIT>',<EOL>flags.debugger_data_server_grpc_port)<EOL>interactive_plugin.listen(flags.debugger_port)<EOL>return interactive_plugin<EOL><DEDENT>elif flags.debugger_data_server_grpc_port > <NUM_LIT:0>:<EOL><INDENT>noninteractive_plugin = debugger_plugin_lib.DebuggerPlugin(context)<EOL>logger.info('<STR_LIT>',<EOL>flags.debugger_data_server_grpc_port)<EOL>noninteractive_plugin.listen(flags.debugger_data_server_grpc_port)<EOL>return noninteractive_plugin<EOL><DEDENT>raise AssertionError()<EOL>
Returns the debugger plugin, if possible. Args: context: The TBContext flags including `add_arguments`. Returns: A DebuggerPlugin instance or None if it couldn't be loaded.
f7949:c0:m2
def __init__(self,<EOL>events_directory,<EOL>single_file_size_cap_bytes=_DEFAULT_EVENTS_FILE_SIZE_CAP_BYTES,<EOL>check_this_often=_DEFAULT_CHECK_EVENT_FILES_SIZE_CAP_EVERY,<EOL>total_file_size_cap_bytes=_DEFAULT_TOTAL_SIZE_CAP_BYTES,<EOL>always_flush=False):
self._events_directory = events_directory<EOL>self._single_file_size_cap_bytes = single_file_size_cap_bytes<EOL>self.total_file_size_cap_bytes = total_file_size_cap_bytes<EOL>self._check_this_often = check_this_often<EOL>self._always_flush = always_flush<EOL>self._events_file_count = <NUM_LIT:0><EOL>events_file_names = self._fetch_events_files_on_disk()<EOL>if events_file_names:<EOL><INDENT>self._events_file_count = self._obtain_file_index(<EOL>events_file_names[-<NUM_LIT:1>]) + <NUM_LIT:1><EOL><DEDENT>self._event_count = <NUM_LIT:0><EOL>self._lock = threading.Lock()<EOL>self._events_writer = self._create_events_writer(events_directory)<EOL>
Constructs an EventsWriterManager. Args: events_directory: (`string`) The log directory in which debugger events reside. single_file_size_cap_bytes: (`int`) A number of bytes. During a check, if the manager determines that the events file being written to exceeds this size, it creates a new events file to write to. Note that events file may still exceed this size - the events writer manager just creates a new events file if it finds that the current file exceeds this size. check_this_often: (`int`) The manager performs a file size check every this many events. We want to avoid checking upon every event for performance reasons. If provided, must be greater than 1. total_file_size_cap_bytes: A cap on the total number of bytes occupied by all events. When a new events writer is created, the least recently created events file will be deleted if the total size occupied by debugger-related events on disk exceeds this cap. Note that the total size could now and then be larger than this cap because the events writer manager only checks when it creates a new events file. always_flush: (`bool`) Whether to flush to disk after every write. Useful for testing.
f7950:c0:m0
def write_event(self, event):
self._lock.acquire()<EOL>try:<EOL><INDENT>self._events_writer.WriteEvent(event)<EOL>self._event_count += <NUM_LIT:1><EOL>if self._always_flush:<EOL><INDENT>self._events_writer.Flush()<EOL><DEDENT>if self._event_count == self._check_this_often:<EOL><INDENT>self._event_count = <NUM_LIT:0><EOL>self._events_writer.Flush()<EOL>file_path = os.path.join(self._events_directory,<EOL>self.get_current_file_name())<EOL>if not tf.io.gfile.exists(file_path):<EOL><INDENT>self._events_writer.Close()<EOL>self._events_writer = self._create_events_writer(<EOL>self._events_directory)<EOL><DEDENT>elif tf.io.gfile.stat(file_path).length > self._single_file_size_cap_bytes:<EOL><INDENT>self._events_writer.Close()<EOL>self._events_writer = self._create_events_writer(<EOL>self._events_directory)<EOL><DEDENT><DEDENT><DEDENT>except IOError as err:<EOL><INDENT>logger.error(<EOL>"<STR_LIT>", self.get_current_file_name(), err)<EOL><DEDENT>self._lock.release()<EOL>
Writes an event proto to disk. This method is threadsafe with respect to invocations of itself. Args: event: The event proto. Raises: IOError: If writing the event proto to disk fails.
f7950:c0:m1
def get_current_file_name(self):
return tf.compat.as_text(self._events_writer.FileName())<EOL>
Gets the name of the events file currently being written to. Returns: The name of the events file being written to.
f7950:c0:m2
def dispose(self):
self._lock.acquire()<EOL>self._events_writer.Close()<EOL>self._events_writer = None<EOL>self._lock.release()<EOL>
Disposes of this events writer manager, making it no longer usable. Call this method when this object is done being used in order to clean up resources and handlers. This method should ever only be called once.
f7950:c0:m3
def _create_events_writer(self, directory):
total_size = <NUM_LIT:0><EOL>events_files = self._fetch_events_files_on_disk()<EOL>for file_name in events_files:<EOL><INDENT>file_path = os.path.join(self._events_directory, file_name)<EOL>total_size += tf.io.gfile.stat(file_path).length<EOL><DEDENT>if total_size >= self.total_file_size_cap_bytes:<EOL><INDENT>for file_name in events_files:<EOL><INDENT>if total_size < self.total_file_size_cap_bytes:<EOL><INDENT>break<EOL><DEDENT>file_path = os.path.join(self._events_directory, file_name)<EOL>file_size = tf.io.gfile.stat(file_path).length<EOL>try:<EOL><INDENT>tf.io.gfile.remove(file_path)<EOL>total_size -= file_size<EOL>logger.info(<EOL>"<STR_LIT>",<EOL>file_path, self.total_file_size_cap_bytes)<EOL><DEDENT>except IOError as err:<EOL><INDENT>logger.error("<STR_LIT>", file_path, err)<EOL><DEDENT><DEDENT><DEDENT>self._events_file_count += <NUM_LIT:1><EOL>file_path = "<STR_LIT>" % (<EOL>os.path.join(directory, DEBUGGER_EVENTS_FILE_STARTING_TEXT),<EOL>time.time(), self._events_file_count)<EOL>logger.info("<STR_LIT>", file_path)<EOL>return pywrap_tensorflow.EventsWriter(tf.compat.as_bytes(file_path))<EOL>
Creates a new events writer. Args: directory: The directory in which to write files containing events. Returns: A new events writer, which corresponds to a new events file.
f7950:c0:m4
def _fetch_events_files_on_disk(self):
all_files = tf.io.gfile.listdir(self._events_directory)<EOL>relevant_files = [<EOL>file_name for file_name in all_files<EOL>if _DEBUGGER_EVENTS_FILE_NAME_REGEX.match(file_name)<EOL>]<EOL>return sorted(relevant_files, key=self._obtain_file_index)<EOL>
Obtains the names of debugger-related events files within the directory. Returns: The names of the debugger-related events files written to disk. The names are sorted in increasing events file index.
f7950:c0:m5
def _obtain_file_index(self, file_name):
return int(_DEBUGGER_EVENTS_FILE_NAME_REGEX.match(file_name).group(<NUM_LIT:1>))<EOL>
Obtains the file index associated with an events file. The index is stored within a file name and is incremented every time a new events file is created. Assumes that the file name is a valid debugger events file name. Args: file_name: The name of the debugger-related events file. The file index is stored within the file name. Returns: The integer events file index.
f7950:c0:m6
def put(self, message):
with self._outgoing_lock:<EOL><INDENT>self._outgoing.append(message)<EOL>self._outgoing_counter += <NUM_LIT:1><EOL>if self._outgoing_counter in self._outgoing_pending_queues:<EOL><INDENT>for q in self._outgoing_pending_queues[self._outgoing_counter]:<EOL><INDENT>q.put(message)<EOL><DEDENT>del self._outgoing_pending_queues[self._outgoing_counter]<EOL><DEDENT><DEDENT>
Put a message into the outgoing message stack. Outgoing message will be stored indefinitely to support multi-users.
f7952:c0:m1
def get(self, pos):
if pos <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>' % pos)<EOL><DEDENT>with self._outgoing_lock:<EOL><INDENT>if self._outgoing_counter >= pos:<EOL><INDENT>return self._outgoing[pos - <NUM_LIT:1>], self._outgoing_counter<EOL><DEDENT>else:<EOL><INDENT>if pos not in self._outgoing_pending_queues:<EOL><INDENT>self._outgoing_pending_queues[pos] = []<EOL><DEDENT>q = queue.Queue(maxsize=<NUM_LIT:1>)<EOL>self._outgoing_pending_queues[pos].append(q)<EOL><DEDENT><DEDENT>value = q.get()<EOL>with self._outgoing_lock:<EOL><INDENT>return value, self._outgoing_counter<EOL><DEDENT>
Get message(s) from the outgoing message stack. Blocks until an item at stack position pos becomes available. This method is thread safe. Args: pos: An int specifying the top position of the message stack to access. For example, if the stack counter is at 3 and pos == 2, then the 2nd item on the stack will be returned, together with an int that indicates the current stack heigh (3 in this case). Returns: 1. The item at stack position pos. 2. The height of the stack when the retun values are generated. Raises: ValueError: If input `pos` is zero or negative.
f7952:c0:m2
def __init__(self, context):
self._event_multiplexer = context.multiplexer<EOL>self._logdir = context.logdir<EOL>self._debugger_data_server = None<EOL>self._grpc_port = None<EOL>
Constructs a debugger plugin for TensorBoard. This plugin adds handlers for retrieving debugger-related data. The plugin also starts a debugger data server once the log directory is passed to the plugin via the call to get_plugin_apps. Args: context: A base_plugin.TBContext instance.
f7955:c0:m0
def listen(self, grpc_port):
if self._grpc_port:<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>" %<EOL>self._grpc_port)<EOL><DEDENT>self._grpc_port = grpc_port<EOL>sys.stderr.write('<STR_LIT>' %<EOL>(self._grpc_port, self._logdir))<EOL>sys.stderr.flush()<EOL>self._debugger_data_server = debugger_server_lib.DebuggerDataServer(<EOL>self._grpc_port, self._logdir)<EOL>threading.Thread(target=self._debugger_data_server.<EOL>start_the_debugger_data_receiving_server).start()<EOL>
Start listening on the given gRPC port. This method of an instance of DebuggerPlugin can be invoked at most once. This method is not thread safe. Args: grpc_port: port number to listen at. Raises: ValueError: If this instance is already listening at a gRPC port.
f7955:c0:m1
def get_plugin_apps(self):
return {<EOL>_HEALTH_PILLS_ROUTE: self._serve_health_pills_handler,<EOL>_NUMERICS_ALERT_REPORT_ROUTE: self._serve_numerics_alert_report_handler,<EOL>}<EOL>
Obtains a mapping between routes and handlers. This function also starts a debugger data server on separate thread if the plugin has not started one yet. Returns: A mapping between routes and handlers (functions that respond to requests).
f7955:c0:m2
def is_active(self):
return bool(<EOL>self._grpc_port is not None and<EOL>self._event_multiplexer and<EOL>self._event_multiplexer.PluginRunToTagToContent(<EOL>constants.DEBUGGER_PLUGIN_NAME))<EOL>
Determines whether this plugin is active. This plugin is active if any health pills information is present for any run. Returns: A boolean. Whether this plugin is active.
f7955:c0:m3
@wrappers.Request.application<EOL><INDENT>def _serve_health_pills_handler(self, request):<DEDENT>
if request.method != '<STR_LIT:POST>':<EOL><INDENT>return wrappers.Response(response=(<EOL>'<STR_LIT>' %<EOL>request.method), status=<NUM_LIT>)<EOL><DEDENT>if _NODE_NAMES_POST_KEY not in request.form:<EOL><INDENT>return wrappers.Response(response=(<EOL>'<STR_LIT>' %<EOL>_NODE_NAMES_POST_KEY), status=<NUM_LIT>)<EOL><DEDENT>jsonified_node_names = request.form[_NODE_NAMES_POST_KEY]<EOL>try:<EOL><INDENT>node_names = json.loads(tf.compat.as_text(jsonified_node_names))<EOL><DEDENT>except Exception as e: <EOL><INDENT>logger.error('<STR_LIT>',<EOL>jsonified_node_names, e)<EOL>return wrappers.Response(status=<NUM_LIT>)<EOL><DEDENT>if not isinstance(node_names, list):<EOL><INDENT>logger.error('<STR_LIT>',<EOL>jsonified_node_names)<EOL>return wrappers.Response(status=<NUM_LIT>)<EOL><DEDENT>run = request.form.get(_RUN_POST_KEY, _DEFAULT_RUN)<EOL>step_string = request.form.get(_STEP_POST_KEY, None)<EOL>if step_string is None:<EOL><INDENT>mapping = self._obtain_sampled_health_pills(run, node_names)<EOL><DEDENT>else:<EOL><INDENT>events_directory = self._logdir<EOL>if run != _DEFAULT_RUN:<EOL><INDENT>events_directory = os.path.join(events_directory, run)<EOL><DEDENT>step = int(step_string)<EOL>try:<EOL><INDENT>mapping = self._obtain_health_pills_at_step(<EOL>events_directory, node_names, step)<EOL><DEDENT>except IOError as error:<EOL><INDENT>logger.error(<EOL>'<STR_LIT>', step, error)<EOL>return wrappers.Response(status=<NUM_LIT>)<EOL><DEDENT><DEDENT>jsonable_mapping = {}<EOL>for node_name, events in mapping.items():<EOL><INDENT>jsonable_mapping[node_name] = [e._asdict() for e in events]<EOL><DEDENT>return http_util.Respond(request, jsonable_mapping, '<STR_LIT:application/json>')<EOL>
A (wrapped) werkzeug handler for serving health pills. Accepts POST requests and responds with health pills. The request accepts several POST parameters: node_names: (required string) A JSON-ified list of node names for which the client would like to request health pills. run: (optional string) The run to retrieve health pills for. Defaults to '.'. This data is sent via POST (not GET) since URL length is limited. step: (optional integer): The session run step for which to retrieve health pills. If provided, the handler reads the health pills of that step from disk (which is slow) and produces a response with only health pills at that step. If not provided, the handler returns a response with health pills at all steps sampled by the event multiplexer (the fast path). The motivation here is that, sometimes, one desires to examine health pills at a specific step (to say find the first step that causes a model to blow up with NaNs). get_plugin_apps must be called before this slower feature is used because that method passes the logdir (directory path) to this plugin. This handler responds with a JSON-ified object mapping from node names to a list (of size 1) of health pill event objects, each of which has these properties. { 'wall_time': float, 'step': int, 'node_name': string, 'output_slot': int, # A list of 12 floats that summarizes the elements of the tensor. 'value': float[], } Node names for which there are no health pills to be found are excluded from the mapping. Args: request: The request issued by the client for health pills. Returns: A werkzeug BaseResponse object.
f7955:c0:m4
def _obtain_sampled_health_pills(self, run, node_names):
runs_to_tags_to_content = self._event_multiplexer.PluginRunToTagToContent(<EOL>constants.DEBUGGER_PLUGIN_NAME)<EOL>if run not in runs_to_tags_to_content:<EOL><INDENT>return {}<EOL><DEDENT>tags_to_content = runs_to_tags_to_content[run]<EOL>mapping = {}<EOL>for node_name in node_names:<EOL><INDENT>if node_name not in tags_to_content:<EOL><INDENT>continue<EOL><DEDENT>health_pills = []<EOL>for tensor_event in self._event_multiplexer.Tensors(run, node_name):<EOL><INDENT>json_string = tags_to_content[node_name]<EOL>try:<EOL><INDENT>content_object = json.loads(tf.compat.as_text(json_string))<EOL>device_name = content_object['<STR_LIT>']<EOL>output_slot = content_object['<STR_LIT>']<EOL>health_pills.append(<EOL>self._tensor_proto_to_health_pill(tensor_event, node_name,<EOL>device_name, output_slot))<EOL><DEDENT>except (KeyError, ValueError) as e:<EOL><INDENT>logger.error('<STR_LIT>'<EOL>'<STR_LIT>', json_string, e)<EOL><DEDENT><DEDENT>mapping[node_name] = health_pills<EOL><DEDENT>return mapping<EOL>
Obtains the health pills for a run sampled by the event multiplexer. This is much faster than the alternative path of reading health pills from disk. Args: run: The run to fetch health pills for. node_names: A list of node names for which to retrieve health pills. Returns: A dictionary mapping from node name to a list of event_accumulator.HealthPillEvents.
f7955:c0:m5
def _tensor_proto_to_health_pill(self, tensor_event, node_name, device,<EOL>output_slot):
return self._process_health_pill_value(<EOL>wall_time=tensor_event.wall_time,<EOL>step=tensor_event.step,<EOL>device_name=device,<EOL>output_slot=output_slot,<EOL>node_name=node_name,<EOL>tensor_proto=tensor_event.tensor_proto)<EOL>
Converts an event_accumulator.TensorEvent to a HealthPillEvent. Args: tensor_event: The event_accumulator.TensorEvent to convert. node_name: The name of the node (without the output slot). device: The device. output_slot: The integer output slot this health pill is relevant to. Returns: A HealthPillEvent.
f7955:c0:m6
def _obtain_health_pills_at_step(self, events_directory, node_names, step):
<EOL>pattern = os.path.join(events_directory, _DEBUGGER_EVENTS_GLOB_PATTERN)<EOL>file_paths = glob.glob(pattern)<EOL>if not file_paths:<EOL><INDENT>raise IOError(<EOL>'<STR_LIT>' % pattern)<EOL><DEDENT>file_paths.sort()<EOL>mapping = collections.defaultdict(list)<EOL>node_name_set = frozenset(node_names)<EOL>for file_path in file_paths:<EOL><INDENT>should_stop = self._process_health_pill_event(<EOL>node_name_set, mapping, step, file_path)<EOL>if should_stop:<EOL><INDENT>break<EOL><DEDENT><DEDENT>return mapping<EOL>
Reads disk to obtain the health pills for a run at a specific step. This could be much slower than the alternative path of just returning all health pills sampled by the event multiplexer. It could take tens of minutes to complete this call for large graphs for big step values (in the thousands). Args: events_directory: The directory containing events for the desired run. node_names: A list of node names for which to retrieve health pills. step: The step to obtain health pills for. Returns: A dictionary mapping from node name to a list of health pill objects (see docs for _serve_health_pills_handler for properties of those objects). Raises: IOError: If no files with health pill events could be found.
f7955:c0:m7
def _process_health_pill_event(self, node_name_set, mapping, target_step,<EOL>file_path):
events_loader = event_file_loader.EventFileLoader(file_path)<EOL>for event in events_loader.Load():<EOL><INDENT>if not event.HasField('<STR_LIT>'):<EOL><INDENT>logger.warn(<EOL>'<STR_LIT>')<EOL>continue<EOL><DEDENT>if event.step < target_step:<EOL><INDENT>continue<EOL><DEDENT>if event.step > target_step:<EOL><INDENT>return True<EOL><DEDENT>for value in event.summary.value:<EOL><INDENT>summary_metadata = value.metadata<EOL>plugin_data = summary_metadata.plugin_data<EOL>if plugin_data.plugin_name == constants.DEBUGGER_PLUGIN_NAME:<EOL><INDENT>try:<EOL><INDENT>content = json.loads(<EOL>tf.compat.as_text(summary_metadata.plugin_data.content))<EOL><DEDENT>except ValueError as err:<EOL><INDENT>logger.warn(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>', content, err)<EOL>continue<EOL><DEDENT>device_name = content['<STR_LIT>']<EOL>output_slot = content['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>logger.error(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>', value.tag, value.node_name)<EOL>continue<EOL><DEDENT>if not value.HasField('<STR_LIT>'):<EOL><INDENT>logger.warn(<EOL>'<STR_LIT>')<EOL>continue<EOL><DEDENT>match = re.match(r'<STR_LIT>', value.node_name)<EOL>if not match:<EOL><INDENT>logger.warn(<EOL>('<STR_LIT>'<EOL>'<STR_LIT>'), value.node_name)<EOL>return None<EOL><DEDENT>health_pill = self._process_health_pill_value(<EOL>wall_time=event.wall_time,<EOL>step=event.step,<EOL>device_name=device_name,<EOL>output_slot=output_slot,<EOL>node_name=match.group(<NUM_LIT:1>),<EOL>tensor_proto=value.tensor,<EOL>node_name_set=node_name_set)<EOL>if not health_pill:<EOL><INDENT>continue<EOL><DEDENT>mapping[health_pill.node_name].append(health_pill)<EOL><DEDENT><DEDENT>return False<EOL>
Creates health pills out of data in an event. Creates health pills out of the event and adds them to the mapping. Args: node_name_set: A set of node names that are relevant. mapping: The mapping from node name to HealthPillEvents. This object may be destructively modified. target_step: The target step at which to obtain health pills. file_path: The path to the file with health pill events. Returns: Whether we should stop reading events because future events are no longer relevant.
f7955:c0:m8
def _process_health_pill_value(self,<EOL>wall_time,<EOL>step,<EOL>device_name,<EOL>output_slot,<EOL>node_name,<EOL>tensor_proto,<EOL>node_name_set=None):
if node_name_set and node_name not in node_name_set:<EOL><INDENT>return None<EOL><DEDENT>elements = list(tensor_util.make_ndarray(tensor_proto))<EOL>return HealthPillEvent(<EOL>wall_time=wall_time,<EOL>step=step,<EOL>device_name=device_name,<EOL>output_slot=output_slot,<EOL>node_name=node_name,<EOL>dtype=repr(tf.as_dtype(elements[<NUM_LIT:12>])),<EOL>shape=elements[<NUM_LIT>:],<EOL>value=elements)<EOL>
Creates a HealthPillEvent containing various properties of a health pill. Args: wall_time: The wall time in seconds. step: The session run step of the event. device_name: The name of the node's device. output_slot: The numeric output slot. node_name: The name of the node (without the output slot). tensor_proto: A tensor proto of data. node_name_set: An optional set of node names that are relevant. If not provided, no filtering by relevance occurs. Returns: An event_accumulator.HealthPillEvent. Or None if one could not be created.
f7955:c0:m9
@wrappers.Request.application<EOL><INDENT>def _serve_numerics_alert_report_handler(self, request):<DEDENT>
if request.method != '<STR_LIT:GET>':<EOL><INDENT>logger.error(<EOL>'<STR_LIT>', request.method)<EOL>return wrappers.Response(status=<NUM_LIT>)<EOL><DEDENT>report = self._debugger_data_server.numerics_alert_report()<EOL>response = [r._asdict() for r in report] <EOL>return http_util.Respond(request, response, '<STR_LIT:application/json>')<EOL>
A (wrapped) werkzeug handler for serving numerics alert report. Accepts GET requests and responds with an array of JSON-ified NumericsAlertReportRow. Each JSON-ified NumericsAlertReportRow object has the following format: { 'device_name': string, 'tensor_name': string, 'first_timestamp': float, 'nan_event_count': int, 'neg_inf_event_count': int, 'pos_inf_event_count': int } These objects are sorted by ascending order of first_timestamp in the response array. Args: request: The request, currently assumed to be empty. Returns: A werkzeug BaseResponse object.
f7955:c0:m10
def __init__(self, context):
del context <EOL>self._debugger_data_server = None<EOL>self._server_thread = None<EOL>self._grpc_port = None<EOL>
Constructs a debugger plugin for TensorBoard. This plugin adds handlers for retrieving debugger-related data. The plugin also starts a debugger data server once the log directory is passed to the plugin via the call to get_plugin_apps. Args: context: A base_plugin.TBContext instance.
f7956:c0:m0
def listen(self, grpc_port):
if self._grpc_port:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % self._grpc_port)<EOL><DEDENT>self._grpc_port = grpc_port<EOL>sys.stderr.write('<STR_LIT>' %<EOL>self._grpc_port)<EOL>sys.stderr.flush()<EOL>self._debugger_data_server = (<EOL>interactive_debugger_server_lib.InteractiveDebuggerDataServer(<EOL>self._grpc_port))<EOL>self._server_thread = threading.Thread(<EOL>target=self._debugger_data_server.run_server)<EOL>self._server_thread.start()<EOL>signal.signal(signal.SIGINT, self.signal_handler)<EOL>
Start listening on the given gRPC port. This method of an instance of InteractiveDebuggerPlugin can be invoked at most once. This method is not thread safe. Args: grpc_port: port number to listen at. Raises: ValueError: If this instance is already listening at a gRPC port.
f7956:c0:m1
def get_plugin_apps(self):
return {<EOL>_ACK_ROUTE: self._serve_ack,<EOL>_COMM_ROUTE: self._serve_comm,<EOL>_DEBUGGER_GRPC_HOST_PORT_ROUTE: self._serve_debugger_grpc_host_port,<EOL>_DEBUGGER_GRAPH_ROUTE: self._serve_debugger_graph,<EOL>_GATED_GRPC_ROUTE: self._serve_gated_grpc,<EOL>_TENSOR_DATA_ROUTE: self._serve_tensor_data,<EOL>_SOURCE_CODE_ROUTE: self._serve_source_code,<EOL>}<EOL>
Obtains a mapping between routes and handlers. This function also starts a debugger data server on separate thread if the plugin has not started one yet. Returns: A mapping between routes and handlers (functions that respond to requests).
f7956:c0:m3
def is_active(self):
return self._grpc_port is not None<EOL>
Determines whether this plugin is active. This plugin is active if any health pills information is present for any run. Returns: A boolean. Whether this plugin is active.
f7956:c0:m4
def calc_health_pill(tensor):
health_pill = [<NUM_LIT:0.0>] * <NUM_LIT><EOL>if not isinstance(tensor, np.ndarray):<EOL><INDENT>return health_pill<EOL><DEDENT>health_pill[<NUM_LIT:0>] = <NUM_LIT:1.0><EOL>if not (np.issubdtype(tensor.dtype, np.float) or<EOL>np.issubdtype(tensor.dtype, np.complex) or<EOL>np.issubdtype(tensor.dtype, np.integer) or<EOL>tensor.dtype == np.bool):<EOL><INDENT>return None<EOL><DEDENT>health_pill[<NUM_LIT:1>] = float(np.size(tensor))<EOL>nan_mask = np.isnan(tensor)<EOL>inf_mask = np.isinf(tensor)<EOL>health_pill[<NUM_LIT:2>] = float(np.sum(nan_mask))<EOL>health_pill[<NUM_LIT:3>] = float(np.sum(tensor == -np.inf))<EOL>health_pill[<NUM_LIT:4>] = float(np.sum(<EOL>np.logical_and(np.logical_not(inf_mask), tensor < <NUM_LIT:0.0>)))<EOL>health_pill[<NUM_LIT:5>] = float(np.sum(tensor == <NUM_LIT:0.0>))<EOL>health_pill[<NUM_LIT:6>] = float(np.sum(<EOL>np.logical_and(np.logical_not(inf_mask), tensor > <NUM_LIT:0.0>)))<EOL>health_pill[<NUM_LIT:7>] = float(np.sum(tensor == np.inf))<EOL>finite_subset = tensor[<EOL>np.logical_and(np.logical_not(nan_mask), np.logical_not(inf_mask))]<EOL>if np.size(finite_subset):<EOL><INDENT>health_pill[<NUM_LIT:8>] = float(np.min(finite_subset))<EOL>health_pill[<NUM_LIT:9>] = float(np.max(finite_subset))<EOL>health_pill[<NUM_LIT:10>] = float(np.mean(finite_subset))<EOL>health_pill[<NUM_LIT:11>] = float(np.var(finite_subset))<EOL><DEDENT>else:<EOL><INDENT>health_pill[<NUM_LIT:8>] = np.inf<EOL>health_pill[<NUM_LIT:9>] = -np.inf<EOL>health_pill[<NUM_LIT:10>] = np.nan<EOL>health_pill[<NUM_LIT:11>] = np.nan<EOL><DEDENT>health_pill[<NUM_LIT:12>] = -<NUM_LIT:1.0><EOL>health_pill[<NUM_LIT>] = float(len(tensor.shape))<EOL>health_pill.extend([float(x) for x in tensor.shape])<EOL>return health_pill<EOL>
Calculate health pill of a tensor. Args: tensor: An instance of `np.array` (for initialized tensors) or `tensorflow.python.debug.lib.debug_data.InconvertibleTensorProto` (for unininitialized tensors). Returns: If `tensor` is an initialized tensor of numeric or boolean types: the calculated health pill, as a `list` of `float`s. Else if `tensor` is an initialized tensor with `string`, `resource` or any other non-numeric types: `None`. Else (i.e., if `tensor` is uninitialized): An all-zero `list`, with the first element signifying that the tensor is uninitialized.
f7958:m0
def numel(shape):
output = <NUM_LIT:1><EOL>for dim in shape:<EOL><INDENT>output *= dim<EOL><DEDENT>return output<EOL>
Obtain total number of elements from a tensor (ndarray) shape. Args: shape: A list or tuple represenitng a tensor (ndarray) shape.
f7959:m0
def parse_time_indices(s):
if not s.startswith('<STR_LIT:[>'):<EOL><INDENT>s = '<STR_LIT:[>' + s + '<STR_LIT:]>'<EOL><DEDENT>parsed = command_parser._parse_slices(s)<EOL>if len(parsed) != <NUM_LIT:1>:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>' % len(parsed))<EOL><DEDENT>else:<EOL><INDENT>return parsed[<NUM_LIT:0>]<EOL><DEDENT>
Parse a string as time indices. Args: s: A valid slicing string for time indices. E.g., '-1', '[:]', ':', '2:10' Returns: A slice object. Raises: ValueError: If `s` does not represent valid time indices.
f7959:m1
def translate_dtype(dtype):
out = str(dtype)<EOL>return '<STR_LIT:string>' if out == '<STR_LIT:object>' else out<EOL>
Translate numpy dtype into a string. The 'object' type is understood as a TensorFlow string and translated into 'string'. Args: dtype: A numpy dtype object. Returns: A string representing the data type.
f7959:m2
def process_buffers_for_display(s, limit=<NUM_LIT>):
if isinstance(s, (list, tuple)):<EOL><INDENT>return [process_buffers_for_display(elem, limit=limit) for elem in s]<EOL><DEDENT>else:<EOL><INDENT>length = len(s)<EOL>if length > limit:<EOL><INDENT>return (binascii.b2a_qp(s[:limit]) +<EOL>b'<STR_LIT>' % (length, limit))<EOL><DEDENT>else:<EOL><INDENT>return binascii.b2a_qp(s)<EOL><DEDENT><DEDENT>
Process a buffer for human-readable display. This function performs the following operation on each of the buffers in `s`. 1. Truncate input buffer if the length of the buffer is greater than `limit`, to prevent large strings from overloading the frontend. 2. Apply `binascii.b2a_qp` on the truncated buffer to make the buffer printable and convertible to JSON. 3. If truncation happened (in step 1), append a string at the end describing the original length and the truncation. Args: s: The buffer to be processed, either a single buffer or a nested array of them. limit: Length limit for each buffer, beyond which truncation will occur. Return: A single processed buffer or a nested array of processed buffers.
f7959:m3
def array_view(array, slicing=None, mapping=None):
dtype = translate_dtype(array.dtype)<EOL>sliced_array = (array[command_parser._parse_slices(slicing)] if slicing<EOL>else array)<EOL>if np.isscalar(sliced_array) and str(dtype) == '<STR_LIT:string>':<EOL><INDENT>ndims = len(array.shape)<EOL>slice_shape = []<EOL>for _ in range(ndims):<EOL><INDENT>sliced_array = [sliced_array]<EOL>slice_shape.append(<NUM_LIT:1>)<EOL><DEDENT>return dtype, tuple(slice_shape), sliced_array<EOL><DEDENT>else:<EOL><INDENT>shape = sliced_array.shape<EOL>if mapping == "<STR_LIT>":<EOL><INDENT>if len(sliced_array.shape) == <NUM_LIT:2>:<EOL><INDENT>return dtype, shape, array_to_base64_png(sliced_array)<EOL><DEDENT>elif len(sliced_array.shape) == <NUM_LIT:3>:<EOL><INDENT>raise NotImplementedError(<EOL>"<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>" %<EOL>len(sliced_array.shape))<EOL><DEDENT><DEDENT>elif mapping == '<STR_LIT>':<EOL><INDENT>health_pill = health_pill_calc.calc_health_pill(array)<EOL>return dtype, shape, health_pill<EOL><DEDENT>elif mapping is None or mapping == '<STR_LIT>' or mapping.lower() == '<STR_LIT:none>':<EOL><INDENT>return dtype, shape, sliced_array.tolist()<EOL><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>" % mapping)<EOL><DEDENT><DEDENT>
View a slice or the entirety of an ndarray. Args: array: The input array, as an numpy.ndarray. slicing: Optional slicing string, e.g., "[:, 1:3, :]". mapping: Optional mapping string. Supported mappings: `None` or case-insensitive `'None'`: Unmapped nested list. `'image/png'`: Image encoding of a 2D sliced array or 3D sliced array with 3 as the last dimension. If the sliced array is not 2D or 3D with 3 as the last dimension, a `ValueError` will be thrown. `health-pill`: A succinct summary of the numeric values of a tensor. See documentation in [`health_pill_calc.py`] for more details. Returns: 1. dtype as a `str`. 2. shape of the sliced array, as a tuple of `int`s. 3. the potentially sliced values, as a nested `list`.
f7959:m4
def array_to_base64_png(array):
<EOL>array = np.array(array, dtype=np.float32)<EOL>if len(array.shape) != <NUM_LIT:2>:<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>" % len(array.shape))<EOL><DEDENT>if not np.size(array):<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>" % (array.shape,))<EOL><DEDENT>is_infinity = np.isinf(array)<EOL>is_positive = array > <NUM_LIT:0.0><EOL>is_positive_infinity = np.logical_and(is_infinity, is_positive)<EOL>is_negative_infinity = np.logical_and(is_infinity,<EOL>np.logical_not(is_positive))<EOL>is_nan = np.isnan(array)<EOL>finite_indices = np.where(np.logical_and(np.logical_not(is_infinity),<EOL>np.logical_not(is_nan)))<EOL>if np.size(finite_indices):<EOL><INDENT>minval = np.min(array[finite_indices])<EOL>maxval = np.max(array[finite_indices])<EOL>scaled = np.array((array - minval) / (maxval - minval) * <NUM_LIT:255>,<EOL>dtype=np.uint8)<EOL>rgb = np.repeat(np.expand_dims(scaled, -<NUM_LIT:1>), IMAGE_COLOR_CHANNELS, axis=-<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>rgb = np.zeros(array.shape + (IMAGE_COLOR_CHANNELS,), dtype=np.uint8)<EOL><DEDENT>rgb[is_positive_infinity] = POSITIVE_INFINITY_RGB<EOL>rgb[is_negative_infinity] = NEGATIVE_INFINITY_RGB<EOL>rgb[is_nan] = NAN_RGB<EOL>image_encoded = base64.b64encode(encoder.encode_png(rgb))<EOL>return image_encoded<EOL>
Convert an array into base64-enoded PNG image. Args: array: A 2D np.ndarray or nested list of items. Returns: A base64-encoded string the image. The image is grayscale if the array is 2D. The image is RGB color if the image is 3D with lsat dimension equal to 3. Raises: ValueError: If the input `array` is not rank-2, or if the rank-2 `array` is empty.
f7959:m5
def _extract_device_name_from_event(event):
plugin_data_content = json.loads(<EOL>tf.compat.as_str(event.summary.value[<NUM_LIT:0>].metadata.plugin_data.content))<EOL>return plugin_data_content['<STR_LIT>']<EOL>
Extract device name from a tf.Event proto carrying tensor value.
f7960:m0
def _comm_tensor_data(device_name,<EOL>node_name,<EOL>maybe_base_expanded_node_name,<EOL>output_slot,<EOL>debug_op,<EOL>tensor_value,<EOL>wall_time):
output_slot = int(output_slot)<EOL>logger.info(<EOL>'<STR_LIT>', node_name, output_slot, debug_op)<EOL>tensor_values = None<EOL>if isinstance(tensor_value, debug_data.InconvertibleTensorProto):<EOL><INDENT>if not tensor_value.initialized:<EOL><INDENT>tensor_dtype = UNINITIALIZED_TAG<EOL>tensor_shape = UNINITIALIZED_TAG<EOL><DEDENT>else:<EOL><INDENT>tensor_dtype = UNSUPPORTED_TAG<EOL>tensor_shape = UNSUPPORTED_TAG<EOL><DEDENT>tensor_values = NA_TAG<EOL><DEDENT>else:<EOL><INDENT>tensor_dtype = tensor_helper.translate_dtype(tensor_value.dtype)<EOL>tensor_shape = tensor_value.shape<EOL>if tensor_helper.numel(tensor_shape) < <NUM_LIT:5>:<EOL><INDENT>_, _, tensor_values = tensor_helper.array_view(tensor_value)<EOL>if tensor_dtype == '<STR_LIT:string>' and tensor_value is not None:<EOL><INDENT>tensor_values = tensor_helper.process_buffers_for_display(<EOL>tensor_values, limit=STRING_ELEMENT_MAX_LEN)<EOL><DEDENT><DEDENT><DEDENT>return {<EOL>'<STR_LIT:type>': '<STR_LIT>',<EOL>'<STR_LIT>': wall_time,<EOL>'<STR_LIT:data>': {<EOL>'<STR_LIT>': device_name,<EOL>'<STR_LIT>': node_name,<EOL>'<STR_LIT>': maybe_base_expanded_node_name,<EOL>'<STR_LIT>': output_slot,<EOL>'<STR_LIT>': debug_op,<EOL>'<STR_LIT>': tensor_dtype,<EOL>'<STR_LIT>': tensor_shape,<EOL>'<STR_LIT>': tensor_values,<EOL>},<EOL>}<EOL>
Create a dict() as the outgoing data in the tensor data comm route. Note: The tensor data in the comm route does not include the value of the tensor in its entirety in general. Only if a tensor satisfies the following conditions will its entire value be included in the return value of this method: 1. Has a numeric data type (e.g., float32, int32) and has fewer than 5 elements. 2. Is a string tensor and has fewer than 5 elements. Each string element is up to 40 bytes. Args: device_name: Name of the device that the tensor is on. node_name: (Original) name of the node that produces the tensor. maybe_base_expanded_node_name: Possbily base-expanded node name. output_slot: Output slot number. debug_op: Name of the debug op. tensor_value: Value of the tensor, as a numpy.ndarray. wall_time: Wall timestamp for the tensor. Returns: A dict representing the tensor data.
f7960:m2
def __init__(self, breakpoints_func=None):
<EOL>self._run_key_to_original_graphs = dict()<EOL>self._run_key_to_debug_graphs = dict()<EOL>if breakpoints_func:<EOL><INDENT>assert callable(breakpoints_func)<EOL>self._breakpoints_func = breakpoints_func<EOL><DEDENT>
Constructor of RunStates. Args: breakpoint_func: A callable of the signatuer: def breakpoint_func(): which returns all the currently activated breakpoints.
f7960:c0:m0
def add_graph(self, run_key, device_name, graph_def, debug=False):
graph_dict = (self._run_key_to_debug_graphs if debug else<EOL>self._run_key_to_original_graphs)<EOL>if not run_key in graph_dict:<EOL><INDENT>graph_dict[run_key] = dict() <EOL><DEDENT>graph_dict[run_key][tf.compat.as_str(device_name)] = (<EOL>debug_graphs_helper.DebugGraphWrapper(graph_def))<EOL>
Add a GraphDef. Args: run_key: A key for the run, containing information about the feeds, fetches, and targets. device_name: The name of the device that the `GraphDef` is for. graph_def: An instance of the `GraphDef` proto. debug: Whether `graph_def` consists of the debug ops.
f7960:c0:m1
def get_graphs(self, run_key, debug=False):
graph_dict = (self._run_key_to_debug_graphs if debug else<EOL>self._run_key_to_original_graphs)<EOL>graph_wrappers = graph_dict.get(run_key, {})<EOL>graph_defs = dict()<EOL>for device_name, wrapper in graph_wrappers.items():<EOL><INDENT>graph_defs[device_name] = wrapper.graph_def<EOL><DEDENT>return graph_defs<EOL>
Get the runtime GraphDef protos associated with a run key. Args: run_key: A Session.run kay. debug: Whether the debugger-decoratedgraph is to be retrieved. Returns: A `dict` mapping device name to `GraphDef` protos.
f7960:c0:m2
def get_graph(self, run_key, device_name, debug=False):
return self.get_graphs(run_key, debug=debug).get(device_name, None)<EOL>
Get the runtime GraphDef proto associated with a run key and a device. Args: run_key: A Session.run kay. device_name: Name of the device in question. debug: Whether the debugger-decoratedgraph is to be retrieved. Returns: A `GraphDef` proto.
f7960:c0:m3
def get_breakpoints(self):
return self._breakpoints_func()<EOL>
Obtain all the currently activated breakpoints.
f7960:c0:m4
def get_maybe_base_expanded_node_name(self, node_name, run_key, device_name):
device_name = tf.compat.as_str(device_name)<EOL>if run_key not in self._run_key_to_original_graphs:<EOL><INDENT>raise ValueError('<STR_LIT>' % run_key)<EOL><DEDENT>if device_name not in self._run_key_to_original_graphs[run_key]:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>' % (run_key, device_name))<EOL><DEDENT>return self._run_key_to_original_graphs[<EOL>run_key][device_name].maybe_base_expanded_node_name(node_name)<EOL>
Obtain possibly base-expanded node name. Base-expansion is the transformation of a node name which happens to be the name scope of other nodes in the same graph. For example, if two nodes, called 'a/b' and 'a/b/read' in a graph, the name of the first node will be base-expanded to 'a/b/(b)'. This method uses caching to avoid unnecessary recomputation. Args: node_name: Name of the node. run_key: The run key to which the node belongs. graph_def: GraphDef to which the node belongs. Raises: ValueError: If `run_key` and/or `device_name` do not exist in the record.
f7960:c0:m6
def __init__(<EOL>self, incoming_channel, outgoing_channel, run_states, tensor_store):
super(InteractiveDebuggerDataStreamHandler, self).__init__()<EOL>self._incoming_channel = incoming_channel<EOL>self._outgoing_channel = outgoing_channel<EOL>self._run_states = run_states<EOL>self._tensor_store = tensor_store<EOL>self._run_key = None<EOL>self._graph_defs = dict() <EOL>self._graph_defs_arrive_first = True<EOL>
Constructor of InteractiveDebuggerDataStreamHandler. Args: incoming_channel: An instance of FIFO queue, which manages incoming data, e.g., ACK signals from the client side unblock breakpoints. outgoing_channel: An instance of `CommChannel`, which manages outgoing data, i.e., data regarding the starting of Session.runs and hitting of tensor breakpoint.s run_states: An instance of `RunStates`, which keeps track of the states (graphs and breakpoints) of debugged Session.run() calls. tensor_store: An instance of `TensorStore`, which stores Tensor values from debugged Session.run() calls.
f7960:c1:m0
def on_core_metadata_event(self, event):
core_metadata = json.loads(event.log_message.message)<EOL>input_names = '<STR_LIT:U+002C>'.join(core_metadata['<STR_LIT>'])<EOL>output_names = '<STR_LIT:U+002C>'.join(core_metadata['<STR_LIT>'])<EOL>target_nodes = '<STR_LIT:U+002C>'.join(core_metadata['<STR_LIT>'])<EOL>self._run_key = RunKey(input_names, output_names, target_nodes)<EOL>if not self._graph_defs:<EOL><INDENT>self._graph_defs_arrive_first = False<EOL><DEDENT>else:<EOL><INDENT>for device_name in self._graph_defs:<EOL><INDENT>self._add_graph_def(device_name, self._graph_defs[device_name])<EOL><DEDENT><DEDENT>self._outgoing_channel.put(_comm_metadata(self._run_key, event.wall_time))<EOL>logger.info('<STR_LIT>')<EOL>self._incoming_channel.get()<EOL>logger.info('<STR_LIT>')<EOL>
Implementation of the core metadata-carrying Event proto callback. Args: event: An Event proto that contains core metadata about the debugged Session::Run() in its log_message.message field, as a JSON string. See the doc string of debug_data.DebugDumpDir.core_metadata for details.
f7960:c1:m1
def on_graph_def(self, graph_def, device_name, wall_time):
<EOL>del wall_time<EOL>self._graph_defs[device_name] = graph_def<EOL>if not self._graph_defs_arrive_first:<EOL><INDENT>self._add_graph_def(device_name, graph_def)<EOL>self._incoming_channel.get()<EOL><DEDENT>
Implementation of the GraphDef-carrying Event proto callback. Args: graph_def: A GraphDef proto. N.B.: The GraphDef is from the core runtime of a debugged Session::Run() call, after graph partition. Therefore it may differ from the GraphDef available to the general TensorBoard. For example, the GraphDef in general TensorBoard may get partitioned for multiple devices (CPUs and GPUs), each of which will generate a GraphDef event proto sent to this method. device_name: Name of the device on which the graph was created. wall_time: An epoch timestamp (in microseconds) for the graph.
f7960:c1:m3
def on_value_event(self, event):
if not event.summary.value:<EOL><INDENT>logger.info('<STR_LIT>')<EOL>return None<EOL><DEDENT>watch_key = event.summary.value[<NUM_LIT:0>].node_name<EOL>tensor_value = debug_data.load_tensor_from_event(event)<EOL>device_name = _extract_device_name_from_event(event)<EOL>node_name, output_slot, debug_op = (<EOL>event.summary.value[<NUM_LIT:0>].node_name.split('<STR_LIT::>'))<EOL>maybe_base_expanded_node_name = (<EOL>self._run_states.get_maybe_base_expanded_node_name(node_name,<EOL>self._run_key,<EOL>device_name))<EOL>self._tensor_store.add(watch_key, tensor_value)<EOL>self._outgoing_channel.put(_comm_tensor_data(<EOL>device_name, node_name, maybe_base_expanded_node_name, output_slot,<EOL>debug_op, tensor_value, event.wall_time))<EOL>logger.info('<STR_LIT>')<EOL>self._incoming_channel.get()<EOL>logger.info('<STR_LIT>')<EOL>if self._is_debug_node_in_breakpoints(event.summary.value[<NUM_LIT:0>].node_name):<EOL><INDENT>logger.info('<STR_LIT>',<EOL>event.summary.value[<NUM_LIT:0>].node_name)<EOL>return debug_service_pb2.EventReply()<EOL><DEDENT>return None<EOL>
Records the summary values based on an updated message from the debugger. Logs an error message if writing the event to disk fails. Args: event: The Event proto to be processed.
f7960:c1:m4
def add_debugged_source_file(self, debugged_source_file):
<EOL>key = debugged_source_file.file_path<EOL>self._source_file_host[key] = debugged_source_file.host<EOL>self._source_file_last_modified[key] = debugged_source_file.last_modified<EOL>self._source_file_bytes[key] = debugged_source_file.bytes<EOL>self._source_file_content[key] = debugged_source_file.lines<EOL>
Add a DebuggedSourceFile proto.
f7960:c2:m1
def get_paths(self):
return list(self._source_file_content.keys())<EOL>
Get the paths to all available source files.
f7960:c2:m3
def get_content(self, file_path):
return self._source_file_content[file_path]<EOL>
Get the content of a source file. # TODO(cais): Maybe support getting a range of lines by line number. Args: file_path: Path to the source file.
f7960:c2:m4
def get_op_traceback(self, op_name):
if not self._graph_traceback:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>for op_log_entry in self._graph_traceback.log_entries:<EOL><INDENT>if op_log_entry.name == op_name:<EOL><INDENT>return self._code_def_to_traceback_list(op_log_entry.code_def)<EOL><DEDENT><DEDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % (op_name, self._graph_version))<EOL>
Get the traceback of an op in the latest version of the TF graph. Args: op_name: Name of the op. Returns: Creation traceback of the op, in the form of a list of 2-tuples: (file_path, lineno) Raises: ValueError: If the op with the given name cannot be found in the latest version of the graph that this SourceManager instance has received, or if this SourceManager instance has not received any graph traceback yet.
f7960:c2:m5
def get_file_tracebacks(self, file_path):
if file_path not in self._source_file_content:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % file_path)<EOL><DEDENT>lineno_to_op_names_and_stack_position = dict()<EOL>for op_log_entry in self._graph_traceback.log_entries:<EOL><INDENT>for stack_pos, trace in enumerate(op_log_entry.code_def.traces):<EOL><INDENT>if self._graph_traceback.id_to_string[trace.file_id] == file_path:<EOL><INDENT>if trace.lineno not in lineno_to_op_names_and_stack_position:<EOL><INDENT>lineno_to_op_names_and_stack_position[trace.lineno] = []<EOL><DEDENT>lineno_to_op_names_and_stack_position[trace.lineno].append(<EOL>(op_log_entry.name, stack_pos))<EOL><DEDENT><DEDENT><DEDENT>return lineno_to_op_names_and_stack_position<EOL>
Get the lists of ops created at lines of a specified source file. Args: file_path: Path to the source file. Returns: A dict mapping line number to a list of 2-tuples, `(op_name, stack_position)` `op_name` is the name of the name of the op whose creation traceback includes the line. `stack_position` is the position of the line in the op's creation traceback, represented as a 0-based integer. Raises: ValueError: If `file_path` does not point to a source file that has been received by this instance of `SourceManager`.
f7960:c2:m6
def __init__(self, receive_port):
super(InteractiveDebuggerDataServer, self).__init__(<EOL>receive_port, InteractiveDebuggerDataStreamHandler)<EOL>self._incoming_channel = queue.Queue()<EOL>self._outgoing_channel = comm_channel_lib.CommChannel()<EOL>self._run_states = RunStates(breakpoints_func=lambda: self.breakpoints)<EOL>self._tensor_store = tensor_store_lib.TensorStore()<EOL>self._source_manager = SourceManager()<EOL>curried_handler_constructor = functools.partial(<EOL>InteractiveDebuggerDataStreamHandler,<EOL>self._incoming_channel, self._outgoing_channel, self._run_states,<EOL>self._tensor_store)<EOL>grpc_debug_server.EventListenerBaseServicer.__init__(<EOL>self, receive_port, curried_handler_constructor)<EOL>
Receives health pills from a debugger and writes them to disk. Args: receive_port: The port at which to receive health pills from the TensorFlow debugger. always_flush: A boolean indicating whether the EventsWriter will be flushed after every write. Can be used for testing.
f7960:c3:m0
def query_tensor_store(self,<EOL>watch_key,<EOL>time_indices=None,<EOL>slicing=None,<EOL>mapping=None):
return self._tensor_store.query(watch_key,<EOL>time_indices=time_indices,<EOL>slicing=slicing,<EOL>mapping=mapping)<EOL>
Query tensor store for a given debugged tensor value. Args: watch_key: The watch key of the debugged tensor being sought. Format: <node_name>:<output_slot>:<debug_op> E.g., Dense_1/MatMul:0:DebugIdentity. time_indices: Optional time indices string By default, the lastest time index ('-1') is returned. slicing: Optional slicing string. mapping: Optional mapping string, e.g., 'image/png'. Returns: If mapping is `None`, the possibly sliced values as a nested list of values or its mapped format. A `list` of nested `list` of values, If mapping is not `None`, the format of the return value will depend on the mapping.
f7960:c3:m8
def query_source_file_paths(self):
return self._source_manager.get_paths()<EOL>
Query the source files involved in the current debugged TF program. Returns: A `list` of file paths. The files that belong to the TensorFlow Python library itself are *not* included.
f7960:c3:m9
def query_source_file_content(self, file_path):
return list(self._source_manager.get_content(file_path))<EOL>
Query the content of a given source file. # TODO(cais): Allow query only a range of the source lines. Returns: The source lines as a list of `str`.
f7960:c3:m10
def query_op_traceback(self, op_name):
return self._source_manager.get_op_traceback(op_name)<EOL>
Query the tracebacks of ops in a TensorFlow graph. Returns: TODO(cais):
f7960:c3:m11
def query_file_tracebacks(self, file_path):
return self._source_manager.get_file_tracebacks(file_path)<EOL>
Query the lists of ops created at lines of a given source file. Args: file_path: Path to the source file to get the tracebacks for. Returns: A `dict` mapping line number in the specified source file to a list of 2-tuples: `(op_name, stack_position)`. `op_name` is the name of the name of the op whose creation traceback includes the line. `stack_position` is the position of the line in the op's creation traceback, represented as a 0-based integer.
f7960:c3:m12
def dispose(self):
self._tensor_store.dispose()<EOL>
Disposes of this object. Call only after this is done being used.
f7960:c3:m13
def process_raw_trace(raw_trace):
trace = trace_events_pb2.Trace()<EOL>trace.ParseFromString(raw_trace)<EOL>return '<STR_LIT>'.join(trace_events_json.TraceEventsJsonStream(trace))<EOL>
Processes raw trace data and returns the UI data.
f7961:m0
def __init__(self, context):
self.logdir = context.logdir<EOL>self.multiplexer = context.multiplexer<EOL>self.plugin_logdir = plugin_asset_util.PluginDirectory(<EOL>self.logdir, PLUGIN_NAME)<EOL>self.stub = None<EOL>self.master_tpu_unsecure_channel = context.flags.master_tpu_unsecure_channel<EOL>self._is_active = False<EOL>self._is_active_lock = threading.Lock()<EOL>
Constructs a profiler plugin for TensorBoard. This plugin adds handlers for performance-related frontends. Args: context: A base_plugin.TBContext instance.
f7961:c0:m0
def is_active(self):
<EOL>if not self._is_active and self._is_active_lock.acquire(False):<EOL><INDENT>if self._is_active:<EOL><INDENT>self._is_active_lock.release()<EOL><DEDENT>else:<EOL><INDENT>def compute_is_active():<EOL><INDENT>self._is_active = any(self.generate_run_to_tools())<EOL>self._is_active_lock.release()<EOL><DEDENT>new_thread = threading.Thread(<EOL>target=compute_is_active,<EOL>name='<STR_LIT>')<EOL>new_thread.start()<EOL><DEDENT><DEDENT>return self._is_active<EOL>
Whether this plugin is active and has any profile data to show. Detecting profile data is expensive, so this process runs asynchronously and the value reported by this method is the cached value and may be stale. Returns: Whether any run has profile data.
f7961:c0:m1
def _run_dir(self, run):
run = run.rstrip('<STR_LIT:/>')<EOL>if '<STR_LIT:/>' not in run:<EOL><INDENT>run = '<STR_LIT>' + run<EOL><DEDENT>tb_run_name, _, profile_run_name = run.rpartition('<STR_LIT:/>')<EOL>tb_run_directory = self.multiplexer.RunPaths().get(tb_run_name)<EOL>if tb_run_directory is None:<EOL><INDENT>if tb_run_name == '<STR_LIT:.>' and tf.io.gfile.isdir(self.logdir):<EOL><INDENT>tb_run_directory = self.logdir<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError("<STR_LIT>" % run)<EOL><DEDENT><DEDENT>plugin_directory = plugin_asset_util.PluginDirectory(<EOL>tb_run_directory, PLUGIN_NAME)<EOL>return os.path.join(plugin_directory, profile_run_name)<EOL>
Helper that maps a frontend run name to a profile "run" directory. The frontend run name consists of the TensorBoard run name (aka the relative path from the logdir root to the directory containing the data) path-joined to the Profile plugin's "run" concept (which is a subdirectory of the plugins/profile directory representing an individual run of the tool), with the special case that TensorBoard run is the logdir root (which is the run named '.') then only the Profile plugin "run" name is used, for backwards compatibility. To convert back to the actual run directory, we apply the following transformation: - If the run name doesn't contain '/', prepend './' - Split on the rightmost instance of '/' - Assume the left side is a TensorBoard run name and map it to a directory path using EventMultiplexer.RunPaths(), then map that to the profile plugin directory via PluginDirectory() - Assume the right side is a Profile plugin "run" and path-join it to the preceding path to get the final directory Args: run: the frontend run name, as described above, e.g. train/run1. Returns: The resolved directory path, e.g. /logdir/train/plugins/profile/run1.
f7961:c0:m3
def generate_run_to_tools(self):
self.start_grpc_stub_if_necessary()<EOL>plugin_assets = self.multiplexer.PluginAssets(PLUGIN_NAME)<EOL>tb_run_names_to_dirs = self.multiplexer.RunPaths()<EOL>if '<STR_LIT:.>' not in plugin_assets and tf.io.gfile.isdir(self.logdir):<EOL><INDENT>tb_run_names_to_dirs['<STR_LIT:.>'] = self.logdir<EOL>plugin_assets['<STR_LIT:.>'] = plugin_asset_util.ListAssets(<EOL>self.logdir, PLUGIN_NAME)<EOL><DEDENT>for tb_run_name, profile_runs in six.iteritems(plugin_assets):<EOL><INDENT>tb_run_dir = tb_run_names_to_dirs[tb_run_name]<EOL>tb_plugin_dir = plugin_asset_util.PluginDirectory(<EOL>tb_run_dir, PLUGIN_NAME)<EOL>for profile_run in profile_runs:<EOL><INDENT>profile_run = profile_run.rstrip('<STR_LIT:/>')<EOL>if tb_run_name == '<STR_LIT:.>':<EOL><INDENT>frontend_run = profile_run<EOL><DEDENT>else:<EOL><INDENT>frontend_run = '<STR_LIT:/>'.join([tb_run_name, profile_run])<EOL><DEDENT>profile_run_dir = os.path.join(tb_plugin_dir, profile_run)<EOL>if tf.io.gfile.isdir(profile_run_dir):<EOL><INDENT>yield frontend_run, self._get_active_tools(profile_run_dir)<EOL><DEDENT><DEDENT><DEDENT>
Generator for pairs of "run name" and a list of tools for that run. The "run name" here is a "frontend run name" - see _run_dir() for the definition of a "frontend run name" and how it maps to a directory of profile data for a specific profile "run". The profile plugin concept of "run" is different from the normal TensorBoard run; each run in this case represents a single instance of profile data collection, more similar to a "step" of data in typical TensorBoard semantics. These runs reside in subdirectories of the plugins/profile directory within any regular TensorBoard run directory (defined as a subdirectory of the logdir that contains at least one tfevents file) or within the logdir root directory itself (even if it contains no tfevents file and would thus not be considered a normal TensorBoard run, for backwards compatibility). Within those "profile run directories", there are files in the directory that correspond to different profiling tools. The file that contains profile for a specific tool "x" will have a suffix name TOOLS["x"]. Example: logs/ plugins/ profile/ run1/ hostA.trace train/ events.out.tfevents.foo plugins/ profile/ run1/ hostA.trace hostB.trace run2/ hostA.trace validation/ events.out.tfevents.foo plugins/ profile/ run1/ hostA.trace Yields: A sequence of tuples mapping "frontend run names" to lists of tool names available for those runs. For the above example, this would be: ("run1", ["trace_viewer"]) ("train/run1", ["trace_viewer"]) ("train/run2", ["trace_viewer"]) ("validation/run1", ["trace_viewer"])
f7961:c0:m4
def host_impl(self, run, tool):
hosts = {}<EOL>run_dir = self._run_dir(run)<EOL>if not run_dir:<EOL><INDENT>logger.warn("<STR_LIT>", run)<EOL>return hosts<EOL><DEDENT>tool_pattern = '<STR_LIT:*>' + TOOLS[tool]<EOL>try:<EOL><INDENT>files = tf.io.gfile.glob(os.path.join(run_dir, tool_pattern))<EOL>hosts = [os.path.basename(f).replace(TOOLS[tool], '<STR_LIT>') for f in files]<EOL><DEDENT>except tf.errors.OpError as e:<EOL><INDENT>logger.warn("<STR_LIT>",<EOL>run_dir, e)<EOL><DEDENT>return hosts<EOL>
Returns available hosts for the run and tool in the log directory. In the plugin log directory, each directory contains profile data for a single run (identified by the directory name), and files in the run directory contains data for different tools and hosts. The file that contains profile for a specific tool "x" will have a prefix name TOOLS["x"]. Example: log/ run1/ plugins/ profile/ host1.trace host2.trace run2/ plugins/ profile/ host1.trace host2.trace Returns: A list of host names e.g. {"host1", "host2", "host3"} for the example.
f7961:c0:m7
def data_impl(self, request):
run = request.args.get('<STR_LIT>')<EOL>tool = request.args.get('<STR_LIT>')<EOL>host = request.args.get('<STR_LIT:host>')<EOL>run_dir = self._run_dir(run)<EOL>profile_run = os.path.basename(run_dir)<EOL>if tool not in TOOLS:<EOL><INDENT>return None<EOL><DEDENT>self.start_grpc_stub_if_necessary()<EOL>if tool == '<STR_LIT>' and self.stub is not None:<EOL><INDENT>from tensorflow.contrib.tpu.profiler import tpu_profiler_analysis_pb2<EOL>grpc_request = tpu_profiler_analysis_pb2.ProfileSessionDataRequest()<EOL>grpc_request.repository_root = run_dir<EOL>grpc_request.session_id = profile_run[:-<NUM_LIT:1>]<EOL>grpc_request.tool_name = '<STR_LIT>'<EOL>grpc_request.host_name = host.rstrip('<STR_LIT:.>')<EOL>grpc_request.parameters['<STR_LIT>'] = request.args.get('<STR_LIT>')<EOL>if request.args.get('<STR_LIT>') is not None:<EOL><INDENT>grpc_request.parameters['<STR_LIT>'] = request.args.get(<EOL>'<STR_LIT>')<EOL><DEDENT>if request.args.get('<STR_LIT>') is not None:<EOL><INDENT>grpc_request.parameters['<STR_LIT>'] = request.args.get('<STR_LIT>')<EOL><DEDENT>grpc_response = self.stub.GetSessionToolData(grpc_request)<EOL>return grpc_response.output<EOL><DEDENT>if tool not in TOOLS:<EOL><INDENT>return None<EOL><DEDENT>tool_name = str(host) + TOOLS[tool]<EOL>asset_path = os.path.join(run_dir, tool_name)<EOL>raw_data = None<EOL>try:<EOL><INDENT>with tf.io.gfile.GFile(asset_path, '<STR_LIT:rb>') as f:<EOL><INDENT>raw_data = f.read()<EOL><DEDENT><DEDENT>except tf.errors.NotFoundError:<EOL><INDENT>logger.warn('<STR_LIT>', asset_path)<EOL><DEDENT>except tf.errors.OpError as e:<EOL><INDENT>logger.warn("<STR_LIT>", asset_path, e)<EOL><DEDENT>if raw_data is None:<EOL><INDENT>return None<EOL><DEDENT>if tool == '<STR_LIT>':<EOL><INDENT>return process_raw_trace(raw_data)<EOL><DEDENT>if tool in _RAW_DATA_TOOLS:<EOL><INDENT>return raw_data<EOL><DEDENT>return None<EOL>
Retrieves and processes the tool data for a run and a host. Args: request: XMLHttpRequest Returns: A string that can be served to the frontend tool or None if tool, run or host is invalid.
f7961:c0:m9
def __init__(self, proto):
self._proto = proto<EOL>
Create an iterable JSON stream over the supplied Trace. Args: proto: a tensorboard.profile.Trace protobuf
f7962:c0:m0
def _events(self):
for did, device in sorted(six.iteritems(self._proto.devices)):<EOL><INDENT>if device.name:<EOL><INDENT>yield dict(<EOL>ph=_TYPE_METADATA,<EOL>pid=did,<EOL>name='<STR_LIT>',<EOL>args=dict(name=device.name))<EOL><DEDENT>yield dict(<EOL>ph=_TYPE_METADATA,<EOL>pid=did,<EOL>name='<STR_LIT>',<EOL>args=dict(sort_index=did))<EOL>for rid, resource in sorted(six.iteritems(device.resources)):<EOL><INDENT>if resource.name:<EOL><INDENT>yield dict(<EOL>ph=_TYPE_METADATA,<EOL>pid=did,<EOL>tid=rid,<EOL>name='<STR_LIT>',<EOL>args=dict(name=resource.name))<EOL><DEDENT>yield dict(<EOL>ph=_TYPE_METADATA,<EOL>pid=did,<EOL>tid=rid,<EOL>name='<STR_LIT>',<EOL>args=dict(sort_index=rid))<EOL><DEDENT><DEDENT>for event in self._proto.trace_events:<EOL><INDENT>yield self._event(event)<EOL><DEDENT>
Iterator over all catapult trace events, as python values.
f7962:c0:m1
def _event(self, event):
result = dict(<EOL>pid=event.device_id,<EOL>tid=event.resource_id,<EOL>name=event.name,<EOL>ts=event.timestamp_ps / <NUM_LIT>)<EOL>if event.duration_ps:<EOL><INDENT>result['<STR_LIT>'] = _TYPE_COMPLETE<EOL>result['<STR_LIT>'] = event.duration_ps / <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>result['<STR_LIT>'] = _TYPE_INSTANT<EOL>result['<STR_LIT:s>'] = _SCOPE_THREAD<EOL><DEDENT>for key in dict(event.args):<EOL><INDENT>if '<STR_LIT:args>' not in result:<EOL><INDENT>result['<STR_LIT:args>'] = {}<EOL><DEDENT>result['<STR_LIT:args>'][key] = event.args[key]<EOL><DEDENT>return result<EOL>
Converts a TraceEvent proto into a catapult trace event python value.
f7962:c0:m2
def __iter__(self):
yield '<STR_LIT>'<EOL>yield '<STR_LIT>'<EOL>for event in self._events():<EOL><INDENT>yield json.dumps(event)<EOL>yield '<STR_LIT>'<EOL><DEDENT>yield '<STR_LIT>'<EOL>
Returns an iterator of string chunks of a complete JSON document.
f7962:c0:m3
def load(self, context):
try:<EOL><INDENT>import tensorflow<EOL>from tensorflow.python.eager import profiler_client<EOL><DEDENT>except ImportError:<EOL><INDENT>return<EOL><DEDENT>from tensorboard.plugins.profile.profile_plugin import ProfilePlugin<EOL>return ProfilePlugin(context)<EOL>
Returns the plugin, if possible. Args: context: The TBContext flags. Returns: A ProfilePlugin instance or None if it couldn't be loaded.
f7963:c0:m1
def dump_data(logdir):
<EOL>write_empty_event_file(logdir)<EOL>plugin_logdir = plugin_asset_util.PluginDirectory(<EOL>logdir, profile_plugin.ProfilePlugin.plugin_name)<EOL>_maybe_create_directory(plugin_logdir)<EOL>for run in profile_demo_data.RUNS:<EOL><INDENT>run_dir = os.path.join(plugin_logdir, run)<EOL>_maybe_create_directory(run_dir)<EOL>if run in profile_demo_data.TRACES:<EOL><INDENT>with open(os.path.join(run_dir, '<STR_LIT>'), '<STR_LIT:w>') as f:<EOL><INDENT>proto = trace_events_pb2.Trace()<EOL>text_format.Merge(profile_demo_data.TRACES[run], proto)<EOL>f.write(proto.SerializeToString())<EOL><DEDENT><DEDENT>if run not in profile_demo_data.TRACE_ONLY:<EOL><INDENT>shutil.copyfile('<STR_LIT>',<EOL>os.path.join(run_dir, '<STR_LIT>'))<EOL>shutil.copyfile(<EOL>'<STR_LIT>',<EOL>os.path.join(run_dir, '<STR_LIT>'))<EOL>shutil.copyfile(<EOL>'<STR_LIT>',<EOL>os.path.join(run_dir, '<STR_LIT>'))<EOL>shutil.copyfile(<EOL>'<STR_LIT>',<EOL>os.path.join(run_dir, '<STR_LIT>'))<EOL><DEDENT><DEDENT>run_dir = os.path.join(plugin_logdir, '<STR_LIT>')<EOL>_maybe_create_directory(run_dir)<EOL>with open(os.path.join(run_dir, '<STR_LIT>'), '<STR_LIT:w>') as f:<EOL><INDENT>f.write('<STR_LIT>')<EOL><DEDENT>
Dumps plugin data to the log directory.
f7965:m2
def op(name,<EOL>data,<EOL>display_name=None,<EOL>description=None,<EOL>collections=None):
<EOL>import tensorflow.compat.v1 as tf<EOL>if display_name is None:<EOL><INDENT>display_name = name<EOL><DEDENT>summary_metadata = metadata.create_summary_metadata(<EOL>display_name=display_name, description=description)<EOL>with tf.name_scope(name):<EOL><INDENT>with tf.control_dependencies([tf.assert_scalar(data)]):<EOL><INDENT>return tf.summary.tensor_summary(name='<STR_LIT>',<EOL>tensor=tf.cast(data, tf.float32),<EOL>collections=collections,<EOL>summary_metadata=summary_metadata)<EOL><DEDENT><DEDENT>
Create a legacy scalar summary op. Arguments: name: A unique name for the generated summary node. data: A real numeric rank-0 `Tensor`. Must have `dtype` castable to `float32`. display_name: Optional name for this summary in TensorBoard, as a constant `str`. Defaults to `name`. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[Graph Keys.SUMMARIES]`. Returns: A TensorFlow summary op.
f7969:m0
def pb(name, data, display_name=None, description=None):
<EOL>import tensorflow.compat.v1 as tf<EOL>data = np.array(data)<EOL>if data.shape != ():<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>% data.shape)<EOL><DEDENT>if data.dtype.kind not in ('<STR_LIT:b>', '<STR_LIT:i>', '<STR_LIT:u>', '<STR_LIT:f>'): <EOL><INDENT>raise ValueError('<STR_LIT>' % data.dtype.name)<EOL><DEDENT>tensor = tf.make_tensor_proto(data.astype(np.float32))<EOL>if display_name is None:<EOL><INDENT>display_name = name<EOL><DEDENT>summary_metadata = metadata.create_summary_metadata(<EOL>display_name=display_name, description=description)<EOL>tf_summary_metadata = tf.SummaryMetadata.FromString(<EOL>summary_metadata.SerializeToString())<EOL>summary = tf.Summary()<EOL>summary.value.add(tag='<STR_LIT>' % name,<EOL>metadata=tf_summary_metadata,<EOL>tensor=tensor)<EOL>return summary<EOL>
Create a legacy scalar summary protobuf. Arguments: name: A unique name for the generated summary, including any desired name scopes. data: A rank-0 `np.array` or array-like form (so raw `int`s and `float`s are fine, too). display_name: Optional name for this summary in TensorBoard, as a `str`. Defaults to `name`. description: Optional long-form description for this summary, as a `str`. Markdown is supported. Defaults to empty. Returns: A `tf.Summary` protobuf object.
f7969:m1
def run(logdir, run_name,<EOL>initial_temperature, ambient_temperature, heat_coefficient):
tf.compat.v1.reset_default_graph()<EOL>tf.compat.v1.set_random_seed(<NUM_LIT:0>)<EOL>with tf.name_scope('<STR_LIT>'):<EOL><INDENT>temperature = tf.Variable(tf.constant(initial_temperature),<EOL>name='<STR_LIT>')<EOL>summary.op('<STR_LIT>', temperature,<EOL>display_name='<STR_LIT>',<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>')<EOL>ambient_difference = temperature - ambient_temperature<EOL>summary.op('<STR_LIT>', ambient_difference,<EOL>display_name='<STR_LIT>',<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>noise = <NUM_LIT:50> * tf.random.normal([])<EOL>delta = -heat_coefficient * (ambient_difference + noise)<EOL>summary.op('<STR_LIT>', delta,<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>')<EOL>summ = tf.compat.v1.summary.merge_all()<EOL>with tf.control_dependencies([summ]):<EOL><INDENT>update_step = temperature.assign_add(delta)<EOL><DEDENT>sess = tf.compat.v1.Session()<EOL>writer = tf.summary.FileWriter(os.path.join(logdir, run_name))<EOL>writer.add_graph(sess.graph)<EOL>sess.run(tf.compat.v1.global_variables_initializer())<EOL>for step in xrange(STEPS):<EOL><INDENT>(s, _) = sess.run([summ, update_step])<EOL>writer.add_summary(s, global_step=step)<EOL><DEDENT>writer.close()<EOL>
Run a temperature simulation. This will simulate an object at temperature `initial_temperature` sitting at rest in a large room at temperature `ambient_temperature`. The object has some intrinsic `heat_coefficient`, which indicates how much thermal conductivity it has: for instance, metals have high thermal conductivity, while the thermal conductivity of water is low. Over time, the object's temperature will adjust to match the temperature of its environment. We'll track the object's temperature, how far it is from the room's temperature, and how much it changes at each time step. Arguments: logdir: the top-level directory into which to write summary data run_name: the name of this run; will be created as a subdirectory under logdir initial_temperature: float; the object's initial temperature ambient_temperature: float; the temperature of the enclosing room heat_coefficient: float; a measure of the object's thermal conductivity
f7970:m0
def run_all(logdir, verbose=False):
for initial_temperature in [<NUM_LIT>, <NUM_LIT>, <NUM_LIT>]:<EOL><INDENT>for final_temperature in [<NUM_LIT>, <NUM_LIT>, <NUM_LIT>]:<EOL><INDENT>for heat_coefficient in [<NUM_LIT>, <NUM_LIT>]:<EOL><INDENT>run_name = '<STR_LIT>' % (<EOL>initial_temperature, final_temperature, heat_coefficient)<EOL>if verbose:<EOL><INDENT>print('<STR_LIT>' % run_name)<EOL><DEDENT>run(logdir, run_name,<EOL>initial_temperature, final_temperature, heat_coefficient)<EOL><DEDENT><DEDENT><DEDENT>
Run simulations on a reasonable set of parameters. Arguments: logdir: the directory into which to store all the runs' data verbose: if true, print out each run's name as it begins
f7970:m1
def __init__(self, context):
self._multiplexer = context.multiplexer<EOL>self._db_connection_provider = context.db_connection_provider<EOL>
Instantiates ScalarsPlugin via TensorBoard core. Args: context: A base_plugin.TBContext instance.
f7971:c1:m0
def is_active(self):
if self._db_connection_provider:<EOL><INDENT>db = self._db_connection_provider()<EOL>cursor = db.execute(
The scalars plugin is active iff any run has at least one scalar tag.
f7971:c1:m2
def index_impl(self):
if self._db_connection_provider:<EOL><INDENT>db = self._db_connection_provider()<EOL>cursor = db.execute(
Return {runName: {tagName: {displayName: ..., description: ...}}}.
f7971:c1:m3
def scalars_impl(self, tag, run, experiment, output_format):
if self._db_connection_provider:<EOL><INDENT>db = self._db_connection_provider()<EOL>cursor = db.execute(
Result of the form `(body, mime_type)`.
f7971:c1:m4