signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
def handle_error(self, request, client_address):
del request <EOL>exc_info = sys.exc_info()<EOL>e = exc_info[<NUM_LIT:1>]<EOL>if isinstance(e, IOError) and e.errno == errno.EPIPE:<EOL><INDENT>logger.warn('<STR_LIT>' % str(client_address))<EOL><DEDENT>else:<EOL><INDENT>logger.error('<STR_LIT>', exc_info=exc_info)<EOL><DEDENT>
Override to get rid of noisy EPIPE errors.
f7897:c4:m3
def __init__(self, node_def, op, message, error_code):
super(OpError, self).__init__()<EOL>self._message = message<EOL>self._node_def = node_def<EOL>self._op = op<EOL>self._error_code = error_code<EOL>
Creates a new `OpError` indicating that a particular op failed. Args: node_def: The `node_def_pb2.NodeDef` proto representing the op that failed, if known; otherwise None. op: The `ops.Operation` that failed, if known; otherwise None. message: The message string describing the failure. error_code: The `error_codes.Code` describing the error.
f7900:c0:m0
@property<EOL><INDENT>def message(self):<DEDENT>
return self._message<EOL>
The error message that describes the error.
f7900:c0:m1
@property<EOL><INDENT>def op(self):<DEDENT>
return self._op<EOL>
The operation that failed, if known. *N.B.* If the failed op was synthesized at runtime, e.g. a `Send` or `Recv` op, there will be no corresponding @{tf.Operation} object. In that case, this will return `None`, and you should instead use the @{tf.OpError.node_def} to discover information about the op. Returns: The `Operation` that failed, or None.
f7900:c0:m2
@property<EOL><INDENT>def error_code(self):<DEDENT>
return self._error_code<EOL>
The integer error code that describes the error.
f7900:c0:m3
@property<EOL><INDENT>def node_def(self):<DEDENT>
return self._node_def<EOL>
The `NodeDef` proto representing the op that failed.
f7900:c0:m4
def __init__(self, node_def, op, message):
super(CancelledError, self).__init__(node_def, op, message, CANCELLED)<EOL>
Creates a `CancelledError`.
f7900:c1:m0
def __init__(self, node_def, op, message, error_code=UNKNOWN):
super(UnknownError, self).__init__(node_def, op, message, error_code)<EOL>
Creates an `UnknownError`.
f7900:c2:m0
def __init__(self, node_def, op, message):
super(InvalidArgumentError, self).__init__(<EOL>node_def, op, message, INVALID_ARGUMENT<EOL>)<EOL>
Creates an `InvalidArgumentError`.
f7900:c3:m0
def __init__(self, node_def, op, message):
super(DeadlineExceededError, self).__init__(<EOL>node_def, op, message, DEADLINE_EXCEEDED<EOL>)<EOL>
Creates a `DeadlineExceededError`.
f7900:c4:m0
def __init__(self, node_def, op, message):
super(NotFoundError, self).__init__(node_def, op, message, NOT_FOUND)<EOL>
Creates a `NotFoundError`.
f7900:c5:m0
def __init__(self, node_def, op, message):
super(AlreadyExistsError, self).__init__(node_def, op, message, ALREADY_EXISTS)<EOL>
Creates an `AlreadyExistsError`.
f7900:c6:m0
def __init__(self, node_def, op, message):
super(PermissionDeniedError, self).__init__(<EOL>node_def, op, message, PERMISSION_DENIED<EOL>)<EOL>
Creates a `PermissionDeniedError`.
f7900:c7:m0
def __init__(self, node_def, op, message):
super(UnauthenticatedError, self).__init__(<EOL>node_def, op, message, UNAUTHENTICATED<EOL>)<EOL>
Creates an `UnauthenticatedError`.
f7900:c8:m0
def __init__(self, node_def, op, message):
super(ResourceExhaustedError, self).__init__(<EOL>node_def, op, message, RESOURCE_EXHAUSTED<EOL>)<EOL>
Creates a `ResourceExhaustedError`.
f7900:c9:m0
def __init__(self, node_def, op, message):
super(FailedPreconditionError, self).__init__(<EOL>node_def, op, message, FAILED_PRECONDITION<EOL>)<EOL>
Creates a `FailedPreconditionError`.
f7900:c10:m0
def __init__(self, node_def, op, message):
super(AbortedError, self).__init__(node_def, op, message, ABORTED)<EOL>
Creates an `AbortedError`.
f7900:c11:m0
def __init__(self, node_def, op, message):
super(OutOfRangeError, self).__init__(node_def, op, message, OUT_OF_RANGE)<EOL>
Creates an `OutOfRangeError`.
f7900:c12:m0
def __init__(self, node_def, op, message):
super(UnimplementedError, self).__init__(node_def, op, message, UNIMPLEMENTED)<EOL>
Creates an `UnimplementedError`.
f7900:c13:m0
def __init__(self, node_def, op, message):
super(InternalError, self).__init__(node_def, op, message, INTERNAL)<EOL>
Creates an `InternalError`.
f7900:c14:m0
def __init__(self, node_def, op, message):
super(UnavailableError, self).__init__(node_def, op, message, UNAVAILABLE)<EOL>
Creates an `UnavailableError`.
f7900:c15:m0
def __init__(self, node_def, op, message):
super(DataLossError, self).__init__(node_def, op, message, DATA_LOSS)<EOL>
Creates a `DataLossError`.
f7900:c16:m0
def as_bytes(bytes_or_text, encoding="<STR_LIT:utf-8>"):
if isinstance(bytes_or_text, _six.text_type):<EOL><INDENT>return bytes_or_text.encode(encoding)<EOL><DEDENT>elif isinstance(bytes_or_text, bytes):<EOL><INDENT>return bytes_or_text<EOL><DEDENT>else:<EOL><INDENT>raise TypeError("<STR_LIT>" % (bytes_or_text,))<EOL><DEDENT>
Converts either bytes or unicode to `bytes`, using utf-8 encoding for text. Args: bytes_or_text: A `bytes`, `str`, or `unicode` object. encoding: A string indicating the charset for encoding unicode. Returns: A `bytes` object. Raises: TypeError: If `bytes_or_text` is not a binary or unicode string.
f7902:m0
def as_text(bytes_or_text, encoding="<STR_LIT:utf-8>"):
if isinstance(bytes_or_text, _six.text_type):<EOL><INDENT>return bytes_or_text<EOL><DEDENT>elif isinstance(bytes_or_text, bytes):<EOL><INDENT>return bytes_or_text.decode(encoding)<EOL><DEDENT>else:<EOL><INDENT>raise TypeError("<STR_LIT>" % bytes_or_text)<EOL><DEDENT>
Returns the given argument as a unicode string. Args: bytes_or_text: A `bytes`, `str`, or `unicode` object. encoding: A string indicating the charset for decoding unicode. Returns: A `unicode` (Python 2) or `str` (Python 3) object. Raises: TypeError: If `bytes_or_text` is not a binary or unicode string.
f7902:m1
def as_str_any(value):
if isinstance(value, bytes):<EOL><INDENT>return as_str(value)<EOL><DEDENT>else:<EOL><INDENT>return str(value)<EOL><DEDENT>
Converts to `str` as `str(value)`, but use `as_str` for `bytes`. Args: value: A object that can be converted to `str`. Returns: A `str` object.
f7902:m2
def path_to_str(path):
if hasattr(path, "<STR_LIT>"):<EOL><INDENT>path = as_str_any(path.__fspath__())<EOL><DEDENT>return path<EOL>
Returns the file system path representation of a `PathLike` object, else as it is. Args: path: An object that can be converted to path representation. Returns: A `str` object.
f7902:m3
def _CreateDeepDirectoryStructure(self, top_directory):
<EOL>directory_names = (<EOL>'<STR_LIT:foo>',<EOL>'<STR_LIT:bar>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>)<EOL>for directory_name in directory_names:<EOL><INDENT>os.makedirs(os.path.join(top_directory, directory_name))<EOL><DEDENT>file_names = (<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>)<EOL>for file_name in file_names:<EOL><INDENT>open(os.path.join(top_directory, file_name), '<STR_LIT:w>').close()<EOL><DEDENT>
Creates a reasonable deep structure of subdirectories with files. Args: top_directory: The absolute path of the top level directory in which to create the directory structure.
f7903:c0:m11
def _CompareFilesPerSubdirectory(self, expected, gotten):
expected_directory_to_files = {<EOL>result[<NUM_LIT:0>]: list(result[<NUM_LIT:1>]) for result in expected}<EOL>gotten_directory_to_files = {<EOL>result[<NUM_LIT:0>]: list(result[<NUM_LIT:2>]) for result in gotten}<EOL>six.assertCountEqual(<EOL>self,<EOL>expected_directory_to_files.keys(),<EOL>gotten_directory_to_files.keys())<EOL>for subdir, expected_listing in expected_directory_to_files.items():<EOL><INDENT>gotten_listing = gotten_directory_to_files[subdir]<EOL>six.assertCountEqual(<EOL>self,<EOL>expected_listing,<EOL>gotten_listing,<EOL>'<STR_LIT>' % (<EOL>subdir, expected_listing, gotten_listing))<EOL><DEDENT>
Compares iterables of (subdirectory path, list of absolute paths) Args: expected: The expected iterable of 2-tuples. gotten: The gotten iterable of 2-tuples.
f7903:c0:m12
def _PathJoin(self, top_directory, sub_path):
return top_directory + "<STR_LIT:/>" + sub_path<EOL>
Join directory and path with slash and not local separator
f7904:c0:m9
def _CompareFilesPerSubdirectory(self, expected, gotten):
expected_directory_to_files = {<EOL>result[<NUM_LIT:0>]: list(result[<NUM_LIT:1>]) for result in expected}<EOL>gotten_directory_to_files = {<EOL>result[<NUM_LIT:0>]: list(result[<NUM_LIT:2>]) for result in gotten}<EOL>six.assertCountEqual(<EOL>self,<EOL>expected_directory_to_files.keys(),<EOL>gotten_directory_to_files.keys())<EOL>for subdir, expected_listing in expected_directory_to_files.items():<EOL><INDENT>gotten_listing = gotten_directory_to_files[subdir]<EOL>six.assertCountEqual(<EOL>self,<EOL>expected_listing,<EOL>gotten_listing,<EOL>'<STR_LIT>' % (<EOL>subdir, expected_listing, gotten_listing))<EOL><DEDENT>
Compares iterables of (subdirectory path, list of absolute paths) Args: expected: The expected iterable of 2-tuples. gotten: The gotten iterable of 2-tuples.
f7904:c0:m11
def get_filesystem(filename):
filename = compat.as_str_any(filename)<EOL>prefix = "<STR_LIT>"<EOL>index = filename.find("<STR_LIT>")<EOL>if index >= <NUM_LIT:0>:<EOL><INDENT>prefix = filename[:index]<EOL><DEDENT>fs = _REGISTERED_FILESYSTEMS.get(prefix, None)<EOL>if fs is None:<EOL><INDENT>raise ValueError("<STR_LIT>" % prefix)<EOL><DEDENT>return fs<EOL>
Return the registered filesystem for the given file.
f7905:m1
def exists(filename):
return get_filesystem(filename).exists(filename)<EOL>
Determines whether a path exists or not. Args: filename: string, a path Returns: True if the path exists, whether its a file or a directory. False if the path does not exist and there are no filesystem errors. Raises: errors.OpError: Propagates any errors reported by the FileSystem API.
f7905:m2
def glob(filename):
return get_filesystem(filename).glob(filename)<EOL>
Returns a list of files that match the given pattern(s). Args: filename: string or iterable of strings. The glob pattern(s). Returns: A list of strings containing filenames that match the given pattern(s). Raises: errors.OpError: If there are filesystem / directory listing errors.
f7905:m3
def isdir(dirname):
return get_filesystem(dirname).isdir(dirname)<EOL>
Returns whether the path is a directory or not. Args: dirname: string, path to a potential directory Returns: True, if the path is a directory; False otherwise
f7905:m4
def listdir(dirname):
return get_filesystem(dirname).listdir(dirname)<EOL>
Returns a list of entries contained within a directory. The list is in arbitrary order. It does not contain the special entries "." and "..". Args: dirname: string, path to a directory Returns: [filename1, filename2, ... filenameN] as strings Raises: errors.NotFoundError if directory doesn't exist
f7905:m5
def walk(top, topdown=True, onerror=None):
top = compat.as_str_any(top)<EOL>fs = get_filesystem(top)<EOL>try:<EOL><INDENT>listing = listdir(top)<EOL><DEDENT>except errors.NotFoundError as err:<EOL><INDENT>if onerror:<EOL><INDENT>onerror(err)<EOL><DEDENT>else:<EOL><INDENT>return<EOL><DEDENT><DEDENT>files = []<EOL>subdirs = []<EOL>for item in listing:<EOL><INDENT>full_path = fs.join(top, compat.as_str_any(item))<EOL>if isdir(full_path):<EOL><INDENT>subdirs.append(item)<EOL><DEDENT>else:<EOL><INDENT>files.append(item)<EOL><DEDENT><DEDENT>here = (top, subdirs, files)<EOL>if topdown:<EOL><INDENT>yield here<EOL><DEDENT>for subdir in subdirs:<EOL><INDENT>joined_subdir = fs.join(top, compat.as_str_any(subdir))<EOL>for subitem in walk(joined_subdir, topdown, onerror=onerror):<EOL><INDENT>yield subitem<EOL><DEDENT><DEDENT>if not topdown:<EOL><INDENT>yield here<EOL><DEDENT>
Recursive directory tree generator for directories. Args: top: string, a Directory name topdown: bool, Traverse pre order if True, post order if False. onerror: optional handler for errors. Should be a function, it will be called with the error as argument. Rethrowing the error aborts the walk. Errors that happen while listing directories are ignored. Yields: Each yield is a 3-tuple: the pathname of a directory, followed by lists of all its subdirectories and leaf files. (dirname, [subdirname, subdirname, ...], [filename, filename, ...]) as strings
f7905:m6
def stat(filename):
return get_filesystem(filename).stat(filename)<EOL>
Returns file statistics for a given path. Args: filename: string, path to a file Returns: FileStatistics struct that contains information about the path Raises: errors.OpError: If the operation fails.
f7905:m7
def exists(self, filename):
return os.path.exists(compat.as_bytes(filename))<EOL>
Determines whether a path exists or not.
f7905:c0:m0
def join(self, path, *paths):
return os.path.join(path, *paths)<EOL>
Join paths with path delimiter.
f7905:c0:m1
def read(self, filename, binary_mode=False, size=None, offset=None):
mode = "<STR_LIT:rb>" if binary_mode else "<STR_LIT:r>"<EOL>with io.open(filename, mode) as f:<EOL><INDENT>if offset is not None:<EOL><INDENT>f.seek(offset)<EOL><DEDENT>if size is not None:<EOL><INDENT>return f.read(size)<EOL><DEDENT>else:<EOL><INDENT>return f.read()<EOL><DEDENT><DEDENT>
Reads contents of a file to a string. Args: filename: string, a path binary_mode: bool, read as binary if True, otherwise text size: int, number of bytes or characters to read, otherwise read all the contents of the file from the offset offset: int, offset into file to read from, otherwise read from the very beginning Returns: Subset of the contents of the file as a string or bytes.
f7905:c0:m2
def glob(self, filename):
if isinstance(filename, six.string_types):<EOL><INDENT>return [<EOL>compat.as_str_any(matching_filename)<EOL>for matching_filename in py_glob.glob(<EOL>compat.as_bytes(filename))<EOL>]<EOL><DEDENT>else:<EOL><INDENT>return [<EOL>compat.as_str_any(matching_filename)<EOL>for single_filename in filename<EOL>for matching_filename in py_glob.glob(<EOL>compat.as_bytes(single_filename))<EOL>]<EOL><DEDENT>
Returns a list of files that match the given pattern(s).
f7905:c0:m3
def isdir(self, dirname):
return os.path.isdir(compat.as_bytes(dirname))<EOL>
Returns whether the path is a directory or not.
f7905:c0:m4
def listdir(self, dirname):
if not self.isdir(dirname):<EOL><INDENT>raise errors.NotFoundError(None, None, "<STR_LIT>")<EOL><DEDENT>entries = os.listdir(compat.as_str_any(dirname))<EOL>entries = [compat.as_str_any(item) for item in entries]<EOL>return entries<EOL>
Returns a list of entries contained within a directory.
f7905:c0:m5
def stat(self, filename):
<EOL>try:<EOL><INDENT>len = os.stat(compat.as_bytes(filename)).st_size<EOL><DEDENT>except OSError:<EOL><INDENT>raise errors.NotFoundError(None, None, "<STR_LIT>")<EOL><DEDENT>return StatData(len)<EOL>
Returns file statistics for a given path.
f7905:c0:m6
def bucket_and_path(self, url):
url = compat.as_str_any(url)<EOL>if url.startswith("<STR_LIT>"):<EOL><INDENT>url = url[len("<STR_LIT>"):]<EOL><DEDENT>idx = url.index("<STR_LIT:/>")<EOL>bucket = url[:idx]<EOL>path = url[(idx + <NUM_LIT:1>):]<EOL>return bucket, path<EOL>
Split an S3-prefixed URL into bucket and path.
f7905:c1:m1
def exists(self, filename):
client = boto3.client("<STR_LIT>")<EOL>bucket, path = self.bucket_and_path(filename)<EOL>r = client.list_objects(Bucket=bucket, Prefix=path, Delimiter="<STR_LIT:/>")<EOL>if r.get("<STR_LIT>") or r.get("<STR_LIT>"):<EOL><INDENT>return True<EOL><DEDENT>return False<EOL>
Determines whether a path exists or not.
f7905:c1:m2
def join(self, path, *paths):
return "<STR_LIT:/>".join((path,) + paths)<EOL>
Join paths with a slash.
f7905:c1:m3
def read(self, filename, binary_mode=False, size=None, offset=None):
s3 = boto3.resource("<STR_LIT>")<EOL>bucket, path = self.bucket_and_path(filename)<EOL>args = {}<EOL>endpoint = <NUM_LIT:0><EOL>if size is not None or offset is not None:<EOL><INDENT>if offset is None:<EOL><INDENT>offset = <NUM_LIT:0><EOL><DEDENT>endpoint = '<STR_LIT>' if size is None else (offset + size)<EOL>args['<STR_LIT>'] = '<STR_LIT>'.format(offset, endpoint)<EOL><DEDENT>try:<EOL><INDENT>stream = s3.Object(bucket, path).get(**args)['<STR_LIT>'].read()<EOL><DEDENT>except botocore.exceptions.ClientError as exc:<EOL><INDENT>if exc.response['<STR_LIT>']['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>if size is not None:<EOL><INDENT>client = boto3.client("<STR_LIT>")<EOL>obj = client.head_object(Bucket=bucket, Key=path)<EOL>len = obj['<STR_LIT>']<EOL>endpoint = min(len, offset + size)<EOL><DEDENT>if offset == endpoint:<EOL><INDENT>stream = b'<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>args['<STR_LIT>'] = '<STR_LIT>'.format(offset, endpoint)<EOL>stream = s3.Object(bucket, path).get(**args)['<STR_LIT>'].read()<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>if binary_mode:<EOL><INDENT>return bytes(stream)<EOL><DEDENT>else:<EOL><INDENT>return stream.decode('<STR_LIT:utf-8>')<EOL><DEDENT>
Reads contents of a file to a string. Args: filename: string, a path binary_mode: bool, read as binary if True, otherwise text size: int, number of bytes or characters to read, otherwise read all the contents of the file from the offset offset: int, offset into file to read from, otherwise read from the very beginning Returns: Subset of the contents of the file as a string or bytes.
f7905:c1:m4
def glob(self, filename):
<EOL>star_i = filename.find('<STR_LIT:*>')<EOL>quest_i = filename.find('<STR_LIT:?>')<EOL>if quest_i >= <NUM_LIT:0>:<EOL><INDENT>raise NotImplementedError(<EOL>"<STR_LIT>".format(filename))<EOL><DEDENT>if star_i != len(filename) - <NUM_LIT:1>:<EOL><INDENT>return []<EOL><DEDENT>filename = filename[:-<NUM_LIT:1>]<EOL>client = boto3.client("<STR_LIT>")<EOL>bucket, path = self.bucket_and_path(filename)<EOL>p = client.get_paginator("<STR_LIT>")<EOL>keys = []<EOL>for r in p.paginate(Bucket=bucket, Prefix=path):<EOL><INDENT>for o in r.get("<STR_LIT>", []):<EOL><INDENT>key = o["<STR_LIT>"][len(path):]<EOL>if key: <EOL><INDENT>keys.append(filename + key)<EOL><DEDENT><DEDENT><DEDENT>return keys<EOL>
Returns a list of files that match the given pattern(s).
f7905:c1:m5
def isdir(self, dirname):
client = boto3.client("<STR_LIT>")<EOL>bucket, path = self.bucket_and_path(dirname)<EOL>if not path.endswith("<STR_LIT:/>"):<EOL><INDENT>path += "<STR_LIT:/>" <EOL><DEDENT>r = client.list_objects(Bucket=bucket, Prefix=path, Delimiter="<STR_LIT:/>")<EOL>if r.get("<STR_LIT>") or r.get("<STR_LIT>"):<EOL><INDENT>return True<EOL><DEDENT>return False<EOL>
Returns whether the path is a directory or not.
f7905:c1:m6
def listdir(self, dirname):
client = boto3.client("<STR_LIT>")<EOL>bucket, path = self.bucket_and_path(dirname)<EOL>p = client.get_paginator("<STR_LIT>")<EOL>if not path.endswith("<STR_LIT:/>"):<EOL><INDENT>path += "<STR_LIT:/>" <EOL><DEDENT>keys = []<EOL>for r in p.paginate(Bucket=bucket, Prefix=path, Delimiter="<STR_LIT:/>"):<EOL><INDENT>keys.extend(o["<STR_LIT>"][len(path):-<NUM_LIT:1>] for o in r.get("<STR_LIT>", []))<EOL>for o in r.get("<STR_LIT>", []):<EOL><INDENT>key = o["<STR_LIT>"][len(path):]<EOL>if key: <EOL><INDENT>keys.append(key)<EOL><DEDENT><DEDENT><DEDENT>return keys<EOL>
Returns a list of entries contained within a directory.
f7905:c1:m7
def stat(self, filename):
<EOL>client = boto3.client("<STR_LIT>")<EOL>bucket, path = self.bucket_and_path(filename)<EOL>try:<EOL><INDENT>obj = client.head_object(Bucket=bucket, Key=path)<EOL>return StatData(obj['<STR_LIT>'])<EOL><DEDENT>except botocore.exceptions.ClientError as exc:<EOL><INDENT>if exc.response['<STR_LIT>']['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>raise errors.NotFoundError(None, None, "<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>
Returns file statistics for a given path.
f7905:c1:m8
def as_dtype(type_value):
if isinstance(type_value, DType):<EOL><INDENT>return type_value<EOL><DEDENT>try:<EOL><INDENT>return _INTERN_TABLE[type_value]<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>return _STRING_TO_TF[type_value]<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>return _PYTHON_TO_TF[type_value]<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>if isinstance(type_value, np.dtype):<EOL><INDENT>if type_value.type == np.string_ or type_value.type == np.unicode_:<EOL><INDENT>return string<EOL><DEDENT><DEDENT>if isinstance(type_value, (type, np.dtype)):<EOL><INDENT>for key, val in _NP_TO_TF:<EOL><INDENT>try:<EOL><INDENT>if key == type_value:<EOL><INDENT>return val<EOL><DEDENT><DEDENT>except TypeError as e:<EOL><INDENT>raise TypeError(<EOL>"<STR_LIT>".format(type_value, e)<EOL>)<EOL><DEDENT><DEDENT><DEDENT>raise TypeError("<STR_LIT>" % type_value)<EOL>
Converts the given `type_value` to a `DType`. Args: type_value: A value that can be converted to a `tf.DType` object. This may currently be a `tf.DType` object, a [`DataType` enum](https://www.tensorflow.org/code/tensorflow/core/framework/types.proto), a string type name, or a `numpy.dtype`. Returns: A `DType` corresponding to `type_value`. Raises: TypeError: If `type_value` cannot be converted to a `DType`.
f7907:m0
def __init__(self, type_enum):
<EOL>type_enum = int(type_enum)<EOL>if (<EOL>type_enum not in types_pb2.DataType.values()<EOL>or type_enum == types_pb2.DT_INVALID<EOL>):<EOL><INDENT>raise TypeError(<EOL>"<STR_LIT>" % type_enum<EOL>)<EOL><DEDENT>self._type_enum = type_enum<EOL>
Creates a new `DataType`. NOTE(mrry): In normal circumstances, you should not need to construct a `DataType` object directly. Instead, use the `tf.as_dtype()` function. Args: type_enum: A `types_pb2.DataType` enum value. Raises: TypeError: If `type_enum` is not a value `types_pb2.DataType`.
f7907:c0:m0
@property<EOL><INDENT>def _is_ref_dtype(self):<DEDENT>
return self._type_enum > <NUM_LIT:100><EOL>
Returns `True` if this `DType` represents a reference type.
f7907:c0:m1
@property<EOL><INDENT>def _as_ref(self):<DEDENT>
if self._is_ref_dtype:<EOL><INDENT>return self<EOL><DEDENT>else:<EOL><INDENT>return _INTERN_TABLE[self._type_enum + <NUM_LIT:100>]<EOL><DEDENT>
Returns a reference `DType` based on this `DType`.
f7907:c0:m2
@property<EOL><INDENT>def base_dtype(self):<DEDENT>
if self._is_ref_dtype:<EOL><INDENT>return _INTERN_TABLE[self._type_enum - <NUM_LIT:100>]<EOL><DEDENT>else:<EOL><INDENT>return self<EOL><DEDENT>
Returns a non-reference `DType` based on this `DType`.
f7907:c0:m3
@property<EOL><INDENT>def real_dtype(self):<DEDENT>
base = self.base_dtype<EOL>if base == complex64:<EOL><INDENT>return float32<EOL><DEDENT>elif base == complex128:<EOL><INDENT>return float64<EOL><DEDENT>else:<EOL><INDENT>return self<EOL><DEDENT>
Returns the dtype correspond to this dtype's real part.
f7907:c0:m4
@property<EOL><INDENT>def as_numpy_dtype(self):<DEDENT>
return _TF_TO_NP[self._type_enum]<EOL>
Returns a `numpy.dtype` based on this `DType`.
f7907:c0:m6
@property<EOL><INDENT>def as_datatype_enum(self):<DEDENT>
return self._type_enum<EOL>
Returns a `types_pb2.DataType` enum value based on this `DType`.
f7907:c0:m7
@property<EOL><INDENT>def is_bool(self):<DEDENT>
return self.base_dtype == bool<EOL>
Returns whether this is a boolean data type
f7907:c0:m8
@property<EOL><INDENT>def is_integer(self):<DEDENT>
return (<EOL>self.is_numpy_compatible<EOL>and not self.is_quantized<EOL>and np.issubdtype(self.as_numpy_dtype, np.integer)<EOL>)<EOL>
Returns whether this is a (non-quantized) integer type.
f7907:c0:m9
@property<EOL><INDENT>def is_floating(self):<DEDENT>
return (<EOL>self.is_numpy_compatible and np.issubdtype(self.as_numpy_dtype, np.floating)<EOL>) or self.base_dtype == bfloat16<EOL>
Returns whether this is a (non-quantized, real) floating point type.
f7907:c0:m10
@property<EOL><INDENT>def is_complex(self):<DEDENT>
return self.base_dtype in (complex64, complex128)<EOL>
Returns whether this is a complex floating point type.
f7907:c0:m11
@property<EOL><INDENT>def is_quantized(self):<DEDENT>
return self.base_dtype in _QUANTIZED_DTYPES_NO_REF<EOL>
Returns whether this is a quantized data type.
f7907:c0:m12
@property<EOL><INDENT>def is_unsigned(self):<DEDENT>
try:<EOL><INDENT>return self.min == <NUM_LIT:0><EOL><DEDENT>except TypeError:<EOL><INDENT>return False<EOL><DEDENT>
Returns whether this type is unsigned. Non-numeric, unordered, and quantized types are not considered unsigned, and this function returns `False`. Returns: Whether a `DType` is unsigned.
f7907:c0:m13
@property<EOL><INDENT>def min(self):<DEDENT>
if self.is_quantized or self.base_dtype in (<EOL>bool,<EOL>string,<EOL>complex64,<EOL>complex128,<EOL>):<EOL><INDENT>raise TypeError("<STR_LIT>" % self)<EOL><DEDENT>try:<EOL><INDENT>return np.finfo(self.as_numpy_dtype()).min<EOL><DEDENT>except: <EOL><INDENT>try:<EOL><INDENT>return np.iinfo(self.as_numpy_dtype()).min<EOL><DEDENT>except:<EOL><INDENT>if self.base_dtype == bfloat16:<EOL><INDENT>return _np_bfloat16(float.fromhex("<STR_LIT>"))<EOL><DEDENT>raise TypeError("<STR_LIT>" % self)<EOL><DEDENT><DEDENT>
Returns the minimum representable value in this data type. Raises: TypeError: if this is a non-numeric, unordered, or quantized type.
f7907:c0:m14
@property<EOL><INDENT>def max(self):<DEDENT>
if self.is_quantized or self.base_dtype in (<EOL>bool,<EOL>string,<EOL>complex64,<EOL>complex128,<EOL>):<EOL><INDENT>raise TypeError("<STR_LIT>" % self)<EOL><DEDENT>try:<EOL><INDENT>return np.finfo(self.as_numpy_dtype()).max<EOL><DEDENT>except: <EOL><INDENT>try:<EOL><INDENT>return np.iinfo(self.as_numpy_dtype()).max<EOL><DEDENT>except:<EOL><INDENT>if self.base_dtype == bfloat16:<EOL><INDENT>return _np_bfloat16(float.fromhex("<STR_LIT>"))<EOL><DEDENT>raise TypeError("<STR_LIT>" % self)<EOL><DEDENT><DEDENT>
Returns the maximum representable value in this data type. Raises: TypeError: if this is a non-numeric, unordered, or quantized type.
f7907:c0:m15
@property<EOL><INDENT>def limits(self, clip_negative=True):<DEDENT>
min, max = dtype_range[self.as_numpy_dtype] <EOL>if clip_negative:<EOL><INDENT>min = <NUM_LIT:0> <EOL><DEDENT>return min, max<EOL>
Return intensity limits, i.e. (min, max) tuple, of the dtype. Args: clip_negative : bool, optional If True, clip the negative range (i.e. return 0 for min intensity) even if the image dtype allows negative values. Returns min, max : tuple Lower and upper intensity limits.
f7907:c0:m16
def is_compatible_with(self, other):
other = as_dtype(other)<EOL>return self._type_enum in (<EOL>other.as_datatype_enum,<EOL>other.base_dtype.as_datatype_enum,<EOL>)<EOL>
Returns True if the `other` DType will be converted to this DType. The conversion rules are as follows: ```python DType(T) .is_compatible_with(DType(T)) == True DType(T) .is_compatible_with(DType(T).as_ref) == True DType(T).as_ref.is_compatible_with(DType(T)) == False DType(T).as_ref.is_compatible_with(DType(T).as_ref) == True ``` Args: other: A `DType` (or object that may be converted to a `DType`). Returns: True if a Tensor of the `other` `DType` will be implicitly converted to this `DType`.
f7907:c0:m17
def __eq__(self, other):
if other is None:<EOL><INDENT>return False<EOL><DEDENT>try:<EOL><INDENT>dtype = as_dtype(other).as_datatype_enum<EOL>return self._type_enum == dtype <EOL><DEDENT>except TypeError:<EOL><INDENT>return False<EOL><DEDENT>
Returns True iff this DType refers to the same type as `other`.
f7907:c0:m18
def __ne__(self, other):
return not self.__eq__(other)<EOL>
Returns True iff self != other.
f7907:c0:m19
@property<EOL><INDENT>def name(self):<DEDENT>
return _TYPE_TO_STRING[self._type_enum]<EOL>
Returns the string name for this `DType`.
f7907:c0:m20
def crc_update(crc, data):
if type(data) != array.array or data.itemsize != <NUM_LIT:1>:<EOL><INDENT>buf = array.array("<STR_LIT:B>", data)<EOL><DEDENT>else:<EOL><INDENT>buf = data<EOL><DEDENT>crc ^= _MASK<EOL>for b in buf:<EOL><INDENT>table_index = (crc ^ b) & <NUM_LIT><EOL>crc = (CRC_TABLE[table_index] ^ (crc >> <NUM_LIT:8>)) & _MASK<EOL><DEDENT>return crc ^ _MASK<EOL>
Update CRC-32C checksum with data. Args: crc: 32-bit checksum to update as long. data: byte array, string or iterable over bytes. Returns: 32-bit updated CRC-32C as long.
f7909:m4
def crc_finalize(crc):
return crc & _MASK<EOL>
Finalize CRC-32C checksum. This function should be called as last step of crc calculation. Args: crc: 32-bit checksum as long. Returns: finalized 32-bit checksum as long
f7909:m5
def crc32c(data):
return crc_finalize(crc_update(CRC_INIT, data))<EOL>
Compute CRC-32C checksum of the data. Args: data: byte array, string or iterable over bytes. Returns: 32-bit CRC-32C checksum of data as long.
f7909:m6
def as_dimension(value):
if isinstance(value, Dimension):<EOL><INDENT>return value<EOL><DEDENT>else:<EOL><INDENT>return Dimension(value)<EOL><DEDENT>
Converts the given value to a Dimension. A Dimension input will be returned unmodified. An input of `None` will be converted to an unknown Dimension. An integer input will be converted to a Dimension with that value. Args: value: The value to be converted. Returns: A Dimension corresponding to the given value.
f7910:m0
def as_shape(shape):
if isinstance(shape, TensorShape):<EOL><INDENT>return shape<EOL><DEDENT>else:<EOL><INDENT>return TensorShape(shape)<EOL><DEDENT>
Converts the given object to a TensorShape.
f7910:m1
def unknown_shape(ndims=None):
if ndims is None:<EOL><INDENT>return TensorShape(None)<EOL><DEDENT>else:<EOL><INDENT>return TensorShape([Dimension(None)] * ndims)<EOL><DEDENT>
Returns an unknown TensorShape, optionally with a known rank. Args: ndims: (Optional) If specified, the number of dimensions in the shape. Returns: An unknown TensorShape.
f7910:m2
def scalar():
return _SCALAR_SHAPE<EOL>
Returns a shape representing a scalar.
f7910:m3
def vector(length):
return TensorShape([length])<EOL>
Returns a shape representing a vector. Args: length: The length of the vector, which may be None if unknown. Returns: A TensorShape representing a vector of the given length.
f7910:m4
def matrix(rows, cols):
return TensorShape([rows, cols])<EOL>
Returns a shape representing a matrix. Args: rows: The number of rows in the matrix, which may be None if unknown. cols: The number of columns in the matrix, which may be None if unknown. Returns: A TensorShape representing a matrix of the given size.
f7910:m5
def __init__(self, value):
if value is None:<EOL><INDENT>self._value = None<EOL><DEDENT>elif isinstance(value, dtypes.DType):<EOL><INDENT>raise TypeError("<STR_LIT>" % value)<EOL><DEDENT>else:<EOL><INDENT>self._value = int(value)<EOL>if (<EOL>not isinstance(value, compat.bytes_or_text_types)<EOL>and self._value != value<EOL>):<EOL><INDENT>raise ValueError("<STR_LIT>" % value)<EOL><DEDENT>if self._value < <NUM_LIT:0>:<EOL><INDENT>raise ValueError("<STR_LIT>" % self._value)<EOL><DEDENT><DEDENT>
Creates a new Dimension with the given value.
f7910:c0:m0
def __eq__(self, other):
try:<EOL><INDENT>other = as_dimension(other)<EOL><DEDENT>except (TypeError, ValueError):<EOL><INDENT>return NotImplemented<EOL><DEDENT>if self._value is None or other.value is None:<EOL><INDENT>return None<EOL><DEDENT>return self._value == other.value<EOL>
Returns true if `other` has the same known value as this Dimension.
f7910:c0:m3
def __ne__(self, other):
try:<EOL><INDENT>other = as_dimension(other)<EOL><DEDENT>except (TypeError, ValueError):<EOL><INDENT>return NotImplemented<EOL><DEDENT>if self._value is None or other.value is None:<EOL><INDENT>return None<EOL><DEDENT>return self._value != other.value<EOL>
Returns true if `other` has a different known value from `self`.
f7910:c0:m4
@property<EOL><INDENT>def value(self):<DEDENT>
return self._value<EOL>
The value of this dimension, or None if it is unknown.
f7910:c0:m8
def is_convertible_with(self, other):
other = as_dimension(other)<EOL>return self._value is None or other.value is None or self._value == other.value<EOL>
Returns true if `other` is convertible with this Dimension. Two known Dimensions are convertible if they have the same value. An unknown Dimension is convertible with all other Dimensions. Args: other: Another Dimension. Returns: True if this Dimension and `other` are convertible.
f7910:c0:m9
def assert_is_convertible_with(self, other):
if not self.is_convertible_with(other):<EOL><INDENT>raise ValueError("<STR_LIT>" % (self, other))<EOL><DEDENT>
Raises an exception if `other` is not convertible with this Dimension. Args: other: Another Dimension. Raises: ValueError: If `self` and `other` are not convertible (see is_convertible_with).
f7910:c0:m10
def merge_with(self, other):
other = as_dimension(other)<EOL>self.assert_is_convertible_with(other)<EOL>if self._value is None:<EOL><INDENT>return Dimension(other.value)<EOL><DEDENT>else:<EOL><INDENT>return Dimension(self._value)<EOL><DEDENT>
Returns a Dimension that combines the information in `self` and `other`. Dimensions are combined as follows: ```python tf.Dimension(n) .merge_with(tf.Dimension(n)) == tf.Dimension(n) tf.Dimension(n) .merge_with(tf.Dimension(None)) == tf.Dimension(n) tf.Dimension(None).merge_with(tf.Dimension(n)) == tf.Dimension(n) tf.Dimension(None).merge_with(tf.Dimension(None)) == tf.Dimension(None) tf.Dimension(n) .merge_with(tf.Dimension(m)) # raises ValueError for n != m ``` Args: other: Another Dimension. Returns: A Dimension containing the combined information of `self` and `other`. Raises: ValueError: If `self` and `other` are not convertible (see is_convertible_with).
f7910:c0:m11
def __add__(self, other):
other = as_dimension(other)<EOL>if self._value is None or other.value is None:<EOL><INDENT>return Dimension(None)<EOL><DEDENT>else:<EOL><INDENT>return Dimension(self._value + other.value)<EOL><DEDENT>
Returns the sum of `self` and `other`. Dimensions are summed as follows: ```python tf.Dimension(m) + tf.Dimension(n) == tf.Dimension(m + n) tf.Dimension(m) + tf.Dimension(None) == tf.Dimension(None) tf.Dimension(None) + tf.Dimension(n) == tf.Dimension(None) tf.Dimension(None) + tf.Dimension(None) == tf.Dimension(None) ``` Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the sum of `self` and `other`.
f7910:c0:m12
def __radd__(self, other):
return self + other<EOL>
Returns the sum of `other` and `self`. Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the sum of `self` and `other`.
f7910:c0:m13
def __sub__(self, other):
other = as_dimension(other)<EOL>if self._value is None or other.value is None:<EOL><INDENT>return Dimension(None)<EOL><DEDENT>else:<EOL><INDENT>return Dimension(self._value - other.value)<EOL><DEDENT>
Returns the subtraction of `other` from `self`. Dimensions are subtracted as follows: ```python tf.Dimension(m) - tf.Dimension(n) == tf.Dimension(m - n) tf.Dimension(m) - tf.Dimension(None) == tf.Dimension(None) tf.Dimension(None) - tf.Dimension(n) == tf.Dimension(None) tf.Dimension(None) - tf.Dimension(None) == tf.Dimension(None) ``` Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the subtraction of `other` from `self`.
f7910:c0:m14
def __rsub__(self, other):
other = as_dimension(other)<EOL>if self._value is None or other.value is None:<EOL><INDENT>return Dimension(None)<EOL><DEDENT>else:<EOL><INDENT>return Dimension(other.value - self._value)<EOL><DEDENT>
Returns the subtraction of `self` from `other`. Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the subtraction of `self` from `other`.
f7910:c0:m15
def __mul__(self, other):
try:<EOL><INDENT>other = as_dimension(other)<EOL><DEDENT>except (TypeError, ValueError):<EOL><INDENT>return NotImplemented<EOL><DEDENT>if self._value is None or other.value is None:<EOL><INDENT>return Dimension(None)<EOL><DEDENT>else:<EOL><INDENT>return Dimension(self._value * other.value)<EOL><DEDENT>
Returns the product of `self` and `other`. Dimensions are summed as follows: ```python tf.Dimension(m) * tf.Dimension(n) == tf.Dimension(m * n) tf.Dimension(m) * tf.Dimension(None) == tf.Dimension(None) tf.Dimension(None) * tf.Dimension(n) == tf.Dimension(None) tf.Dimension(None) * tf.Dimension(None) == tf.Dimension(None) ``` Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the product of `self` and `other`.
f7910:c0:m16
def __rmul__(self, other):
return self * other<EOL>
Returns the product of `self` and `other`. Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the product of `self` and `other`.
f7910:c0:m17
def __floordiv__(self, other):
try:<EOL><INDENT>other = as_dimension(other)<EOL><DEDENT>except (TypeError, ValueError):<EOL><INDENT>return NotImplemented<EOL><DEDENT>if self._value is None or other.value is None:<EOL><INDENT>return Dimension(None)<EOL><DEDENT>else:<EOL><INDENT>return Dimension(self._value // other.value)<EOL><DEDENT>
Returns the quotient of `self` and `other` rounded down. Dimensions are divided as follows: ```python tf.Dimension(m) // tf.Dimension(n) == tf.Dimension(m // n) tf.Dimension(m) // tf.Dimension(None) == tf.Dimension(None) tf.Dimension(None) // tf.Dimension(n) == tf.Dimension(None) tf.Dimension(None) // tf.Dimension(None) == tf.Dimension(None) ``` Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A `Dimension` whose value is the integer quotient of `self` and `other`.
f7910:c0:m18
def __rfloordiv__(self, other):
other = as_dimension(other)<EOL>if self._value is None or other.value is None:<EOL><INDENT>return Dimension(None)<EOL><DEDENT>else:<EOL><INDENT>return Dimension(other.value // self._value)<EOL><DEDENT>
Returns the quotient of `other` and `self` rounded down. Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A `Dimension` whose value is the integer quotient of `self` and `other`.
f7910:c0:m19
def __div__(self, other):
return self // other<EOL>
DEPRECATED: Use `__floordiv__` via `x // y` instead. This function exists only for backwards convertibility purposes; new code should use `__floordiv__` via the syntax `x // y`. Using `x // y` communicates clearly that the result rounds down, and is forward convertible to Python 3. Args: other: Another `Dimension`. Returns: A `Dimension` whose value is the integer quotient of `self` and `other`.
f7910:c0:m20
def __mod__(self, other):
try:<EOL><INDENT>other = as_dimension(other)<EOL><DEDENT>except (TypeError, ValueError):<EOL><INDENT>return NotImplemented<EOL><DEDENT>if self._value is None or other.value is None:<EOL><INDENT>return Dimension(None)<EOL><DEDENT>else:<EOL><INDENT>return Dimension(self._value % other.value)<EOL><DEDENT>
Returns `self` modulo `other`. Dimension moduli are computed as follows: ```python tf.Dimension(m) % tf.Dimension(n) == tf.Dimension(m % n) tf.Dimension(m) % tf.Dimension(None) == tf.Dimension(None) tf.Dimension(None) % tf.Dimension(n) == tf.Dimension(None) tf.Dimension(None) % tf.Dimension(None) == tf.Dimension(None) ``` Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is `self` modulo `other`.
f7910:c0:m21
def __rmod__(self, other):
try:<EOL><INDENT>other = as_dimension(other)<EOL><DEDENT>except (TypeError, ValueError):<EOL><INDENT>return NotImplemented<EOL><DEDENT>return other % self<EOL>
Returns `other` modulo `self`. Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is `other` modulo `self`.
f7910:c0:m22