Search is not available for this dataset
text stringlengths 75 104k |
|---|
def _product_file_hash(self, product=None):
"""
Get the hash of the each product file
"""
if self.hasher is None:
return None
else:
products = self._rectify_products(product)
product_file_hash = [
util_hash.hash_file(p, hasher=self.hasher, base='hex')
for p in products
]
return product_file_hash |
def expired(self, cfgstr=None, product=None):
"""
Check to see if a previously existing stamp is still valid and if the
expected result of that computation still exists.
Args:
cfgstr (str, optional): override the default cfgstr if specified
product (PathLike or Sequence[PathLike], optional): override the
default product if specified
"""
products = self._rectify_products(product)
certificate = self._get_certificate(cfgstr=cfgstr)
if certificate is None:
# We dont have a certificate, so we are expired
is_expired = True
elif products is None:
# We dont have a product to check, so assume not expired
is_expired = False
elif not all(map(os.path.exists, products)):
# We are expired if the expected product does not exist
is_expired = True
else:
# We are expired if the hash of the existing product data
# does not match the expected hash in the certificate
product_file_hash = self._product_file_hash(products)
certificate_hash = certificate.get('product_file_hash', None)
is_expired = product_file_hash != certificate_hash
return is_expired |
def renew(self, cfgstr=None, product=None):
"""
Recertify that the product has been recomputed by writing a new
certificate to disk.
"""
products = self._rectify_products(product)
certificate = {
'timestamp': util_time.timestamp(),
'product': products,
}
if products is not None:
if not all(map(os.path.exists, products)):
raise IOError(
'The stamped product must exist: {}'.format(products))
certificate['product_file_hash'] = self._product_file_hash(products)
self.cacher.save(certificate, cfgstr=cfgstr)
return certificate |
def isatty(self): # nocover
"""
Returns true of the redirect is a terminal.
Notes:
Needed for IPython.embed to work properly when this class is used
to override stdout / stderr.
"""
return (self.redirect is not None and
hasattr(self.redirect, 'isatty') and self.redirect.isatty()) |
def encoding(self):
"""
Gets the encoding of the `redirect` IO object
Doctest:
>>> redirect = io.StringIO()
>>> assert TeeStringIO(redirect).encoding is None
>>> assert TeeStringIO(None).encoding is None
>>> assert TeeStringIO(sys.stdout).encoding is sys.stdout.encoding
>>> redirect = io.TextIOWrapper(io.StringIO())
>>> assert TeeStringIO(redirect).encoding is redirect.encoding
"""
if self.redirect is not None:
return self.redirect.encoding
else:
return super(TeeStringIO, self).encoding |
def write(self, msg):
"""
Write to this and the redirected stream
"""
if self.redirect is not None:
self.redirect.write(msg)
if six.PY2:
from xdoctest.utils.util_str import ensure_unicode
msg = ensure_unicode(msg)
super(TeeStringIO, self).write(msg) |
def flush(self): # nocover
"""
Flush to this and the redirected stream
"""
if self.redirect is not None:
self.redirect.flush()
super(TeeStringIO, self).flush() |
def log_part(self):
""" Log what has been captured so far """
self.cap_stdout.seek(self._pos)
text = self.cap_stdout.read()
self._pos = self.cap_stdout.tell()
self.parts.append(text)
self.text = text |
def stop(self):
"""
Doctest:
>>> CaptureStdout(enabled=False).stop()
>>> CaptureStdout(enabled=True).stop()
"""
if self.enabled:
self.started = False
sys.stdout = self.orig_stdout |
def platform_data_dir():
"""
Returns path for user-specific data files
Returns:
PathLike : path to the data dir used by the current operating system
"""
if LINUX: # nocover
dpath_ = os.environ.get('XDG_DATA_HOME', '~/.local/share')
elif DARWIN: # nocover
dpath_ = '~/Library/Application Support'
elif WIN32: # nocover
dpath_ = os.environ.get('APPDATA', '~/AppData/Roaming')
else: # nocover
raise '~/AppData/Local'
dpath = normpath(expanduser(dpath_))
return dpath |
def platform_config_dir():
"""
Returns a directory which should be writable for any application
This should be used for persistent configuration files.
Returns:
PathLike : path to the cahce dir used by the current operating system
"""
if LINUX: # nocover
dpath_ = os.environ.get('XDG_CONFIG_HOME', '~/.config')
elif DARWIN: # nocover
dpath_ = '~/Library/Application Support'
elif WIN32: # nocover
dpath_ = os.environ.get('APPDATA', '~/AppData/Roaming')
else: # nocover
raise NotImplementedError('Unknown Platform %r' % (sys.platform,))
dpath = normpath(expanduser(dpath_))
return dpath |
def ensure_app_data_dir(appname, *args):
"""
Calls `get_app_data_dir` but ensures the directory exists.
Args:
appname (str): the name of the application
*args: any other subdirectories may be specified
SeeAlso:
get_app_data_dir
Example:
>>> import ubelt as ub
>>> dpath = ub.ensure_app_data_dir('ubelt')
>>> assert exists(dpath)
"""
from ubelt import util_path
dpath = get_app_data_dir(appname, *args)
util_path.ensuredir(dpath)
return dpath |
def ensure_app_config_dir(appname, *args):
"""
Calls `get_app_config_dir` but ensures the directory exists.
Args:
appname (str): the name of the application
*args: any other subdirectories may be specified
SeeAlso:
get_app_config_dir
Example:
>>> import ubelt as ub
>>> dpath = ub.ensure_app_config_dir('ubelt')
>>> assert exists(dpath)
"""
from ubelt import util_path
dpath = get_app_config_dir(appname, *args)
util_path.ensuredir(dpath)
return dpath |
def ensure_app_cache_dir(appname, *args):
"""
Calls `get_app_cache_dir` but ensures the directory exists.
Args:
appname (str): the name of the application
*args: any other subdirectories may be specified
SeeAlso:
get_app_cache_dir
Example:
>>> import ubelt as ub
>>> dpath = ub.ensure_app_cache_dir('ubelt')
>>> assert exists(dpath)
"""
from ubelt import util_path
dpath = get_app_cache_dir(appname, *args)
util_path.ensuredir(dpath)
return dpath |
def startfile(fpath, verbose=True): # nocover
"""
Uses default program defined by the system to open a file.
This is done via `os.startfile` on windows, `open` on mac, and `xdg-open`
on linux.
Args:
fpath (PathLike): a file to open using the program associated with the
files extension type.
verbose (int): verbosity
References:
http://stackoverflow.com/questions/2692873/quote-posix
DisableExample:
>>> # This test interacts with a GUI frontend, not sure how to test.
>>> import ubelt as ub
>>> base = ub.ensure_app_cache_dir('ubelt')
>>> fpath1 = join(base, 'test_open.txt')
>>> ub.touch(fpath1)
>>> proc = ub.startfile(fpath1)
"""
from ubelt import util_cmd
if verbose:
print('[ubelt] startfile("{}")'.format(fpath))
fpath = normpath(fpath)
if not exists(fpath):
raise Exception('Cannot start nonexistant file: %r' % fpath)
if not WIN32:
import pipes
fpath = pipes.quote(fpath)
if LINUX:
info = util_cmd.cmd(('xdg-open', fpath), detach=True, verbose=verbose)
elif DARWIN:
info = util_cmd.cmd(('open', fpath), detach=True, verbose=verbose)
elif WIN32:
os.startfile(fpath)
info = None
else:
raise RuntimeError('Unknown Platform')
if info is not None:
if not info['proc']:
raise Exception('startfile failed') |
def find_exe(name, multi=False, path=None):
"""
Locate a command.
Search your local filesystem for an executable and return the first
matching file with executable permission.
Args:
name (str): globstr of matching filename
multi (bool): if True return all matches instead of just the first.
Defaults to False.
path (str or Iterable[PathLike]): overrides the system PATH variable.
Returns:
PathLike or List[PathLike] or None: returns matching executable(s).
SeeAlso:
shutil.which - which is available in Python 3.3+.
Notes:
This is essentially the `which` UNIX command
References:
https://stackoverflow.com/questions/377017/test-if-executable-exists-in-python/377028#377028
https://docs.python.org/dev/library/shutil.html#shutil.which
Example:
>>> find_exe('ls')
>>> find_exe('ping')
>>> assert find_exe('which') == find_exe(find_exe('which'))
>>> find_exe('which', multi=True)
>>> find_exe('ping', multi=True)
>>> find_exe('cmake', multi=True)
>>> find_exe('nvcc', multi=True)
>>> find_exe('noexist', multi=True)
Example:
>>> assert not find_exe('noexist', multi=False)
>>> assert find_exe('ping', multi=False)
>>> assert not find_exe('noexist', multi=True)
>>> assert find_exe('ping', multi=True)
Benchmark:
>>> # xdoctest: +IGNORE_WANT
>>> import ubelt as ub
>>> import shutil
>>> for timer in ub.Timerit(100, bestof=10, label='ub.find_exe'):
>>> ub.find_exe('which')
>>> for timer in ub.Timerit(100, bestof=10, label='shutil.which'):
>>> shutil.which('which')
Timed best=58.71 µs, mean=59.64 ± 0.96 µs for ub.find_exe
Timed best=72.75 µs, mean=73.07 ± 0.22 µs for shutil.which
"""
candidates = find_path(name, path=path, exact=True)
mode = os.X_OK | os.F_OK
results = (fpath for fpath in candidates
if os.access(fpath, mode) and not isdir(fpath))
if not multi:
for fpath in results:
return fpath
else:
return list(results) |
def find_path(name, path=None, exact=False):
"""
Search for a file or directory on your local filesystem by name
(file must be in a directory specified in a PATH environment variable)
Args:
fname (PathLike or str): file name to match.
If exact is False this may be a glob pattern
path (str or Iterable[PathLike]): list of directories to search either
specified as an os.pathsep separated string or a list of
directories. Defaults to environment PATH.
exact (bool): if True, only returns exact matches. Default False.
Notes:
For recursive behavior set `path=(d for d, _, _ in os.walk('.'))`,
where '.' might be replaced by the root directory of interest.
Example:
>>> list(find_path('ping', exact=True))
>>> list(find_path('bin'))
>>> list(find_path('bin'))
>>> list(find_path('*cc*'))
>>> list(find_path('cmake*'))
Example:
>>> import ubelt as ub
>>> from os.path import dirname
>>> path = dirname(dirname(ub.util_platform.__file__))
>>> res = sorted(find_path('ubelt/util_*.py', path=path))
>>> assert len(res) >= 10
>>> res = sorted(find_path('ubelt/util_platform.py', path=path, exact=True))
>>> print(res)
>>> assert len(res) == 1
"""
path = os.environ.get('PATH', os.defpath) if path is None else path
dpaths = path.split(os.pathsep) if isinstance(path, six.string_types) else path
candidates = (join(dpath, name) for dpath in dpaths)
if exact:
if WIN32: # nocover
pathext = [''] + os.environ.get('PATHEXT', '').split(os.pathsep)
candidates = (p + ext for p in candidates for ext in pathext)
candidates = filter(exists, candidates)
else:
import glob
candidates = it.chain.from_iterable(
glob.glob(pattern) for pattern in candidates)
return candidates |
def editfile(fpath, verbose=True): # nocover
"""
DEPRICATED: This has been ported to xdev, please use that version.
Opens a file or code corresponding to a live python object in your
preferred visual editor. This function is mainly useful in an interactive
IPython session.
The visual editor is determined by the `VISUAL` environment variable. If
this is not specified it defaults to gvim.
Args:
fpath (PathLike): a file path or python module / function
verbose (int): verbosity
DisableExample:
>>> # This test interacts with a GUI frontend, not sure how to test.
>>> import ubelt as ub
>>> ub.editfile(ub.util_platform.__file__)
>>> ub.editfile(ub)
>>> ub.editfile(ub.editfile)
"""
from six import types
from ubelt import util_cmd
import warnings
warnings.warn('Please use xdev.editfile instead', DeprecationWarning)
if not isinstance(fpath, six.string_types):
if isinstance(fpath, types.ModuleType):
fpath = fpath.__file__
else:
fpath = sys.modules[fpath.__module__].__file__
fpath_py = fpath.replace('.pyc', '.py')
if exists(fpath_py):
fpath = fpath_py
if verbose:
print('[ubelt] editfile("{}")'.format(fpath))
editor = os.environ.get('VISUAL', 'gvim')
if not exists(fpath):
raise IOError('Cannot start nonexistant file: %r' % fpath)
util_cmd.cmd([editor, fpath], fpath, detach=True) |
def augpath(path, suffix='', prefix='', ext=None, base=None, multidot=False):
"""
Augments a path with a new basename, extension, prefix and/or suffix.
A prefix is inserted before the basename. A suffix is inserted
between the basename and the extension. The basename and extension can be
replaced with a new one.
Args:
path (PathLike): string representation of a path
suffix (str): placed between the basename and extension
prefix (str): placed in front of the basename
ext (str): if specified, replaces the extension
base (str): if specified, replaces the basename (without extension)
multidot (bool): if False, everything after the last dot in the
basename is the extension. If True, everything after the first dot
in the basename is the extension (Defaults to False).
Returns:
PathLike: augmented path
CommandLine:
python -m ubelt.util_path augpath
Example:
>>> import ubelt as ub
>>> path = 'foo.bar'
>>> suffix = '_suff'
>>> prefix = 'pref_'
>>> ext = '.baz'
>>> newpath = ub.augpath(path, suffix, prefix, ext=ext, base='bar')
>>> print('newpath = %s' % (newpath,))
newpath = pref_bar_suff.baz
Example:
>>> augpath('foo.bar')
'foo.bar'
>>> augpath('foo.bar', ext='.BAZ')
'foo.BAZ'
>>> augpath('foo.bar', suffix='_')
'foo_.bar'
>>> augpath('foo.bar', prefix='_')
'_foo.bar'
>>> augpath('foo.bar', base='baz')
'baz.bar'
>>> augpath('foo.tar.gz', ext='.zip', multidot=True)
foo.zip
>>> augpath('foo.tar.gz', ext='.zip', multidot=False)
foo.tar.zip
"""
# Breakup path
dpath, fname = split(path)
if multidot:
parts = fname.split('.')
fname_noext = '.'.join(parts[:1])
orig_ext = '.'.join(parts[1:])
else:
fname_noext, orig_ext = splitext(fname)
ext = orig_ext if ext is None else ext
fname_noext = fname_noext if base is None else base
# Augment and recombine into new path
new_fname = ''.join((prefix, fname_noext, suffix, ext))
newpath = join(dpath, new_fname)
return newpath |
def userhome(username=None):
"""
Returns the user's home directory.
If `username` is None, this is the directory for the current user.
Args:
username (str): name of a user on the system
Returns:
PathLike: userhome_dpath: path to the home directory
Example:
>>> import getpass
>>> username = getpass.getuser()
>>> assert userhome() == expanduser('~')
>>> assert userhome(username) == expanduser('~')
"""
if username is None:
# get home directory for the current user
if 'HOME' in os.environ:
userhome_dpath = os.environ['HOME']
else: # nocover
if sys.platform.startswith('win32'):
# win32 fallback when HOME is not defined
if 'USERPROFILE' in os.environ:
userhome_dpath = os.environ['USERPROFILE']
elif 'HOMEPATH' in os.environ:
drive = os.environ.get('HOMEDRIVE', '')
userhome_dpath = join(drive, os.environ['HOMEPATH'])
else:
raise OSError("Cannot determine the user's home directory")
else:
# posix fallback when HOME is not defined
import pwd
userhome_dpath = pwd.getpwuid(os.getuid()).pw_dir
else:
# A specific user directory was requested
if sys.platform.startswith('win32'): # nocover
# get the directory name for the current user
c_users = dirname(userhome())
userhome_dpath = join(c_users, username)
if not exists(userhome_dpath):
raise KeyError('Unknown user: {}'.format(username))
else:
import pwd
try:
pwent = pwd.getpwnam(username)
except KeyError: # nocover
raise KeyError('Unknown user: {}'.format(username))
userhome_dpath = pwent.pw_dir
return userhome_dpath |
def compressuser(path, home='~'):
"""
Inverse of `os.path.expanduser`
Args:
path (PathLike): path in system file structure
home (str): symbol used to replace the home path. Defaults to '~', but
you might want to use '$HOME' or '%USERPROFILE%' instead.
Returns:
PathLike: path: shortened path replacing the home directory with a tilde
CommandLine:
xdoctest -m ubelt.util_path compressuser
Example:
>>> path = expanduser('~')
>>> assert path != '~'
>>> assert compressuser(path) == '~'
>>> assert compressuser(path + '1') == path + '1'
>>> assert compressuser(path + '/1') == join('~', '1')
>>> assert compressuser(path + '/1', '$HOME') == join('$HOME', '1')
"""
path = normpath(path)
userhome_dpath = userhome()
if path.startswith(userhome_dpath):
if len(path) == len(userhome_dpath):
path = home
elif path[len(userhome_dpath)] == os.path.sep:
path = home + path[len(userhome_dpath):]
return path |
def truepath(path, real=False):
"""
Normalizes a string representation of a path and does shell-like expansion.
Args:
path (PathLike): string representation of a path
real (bool): if True, all symbolic links are followed. (default: False)
Returns:
PathLike : normalized path
Note:
This function is similar to the composition of expanduser, expandvars,
normpath, and (realpath if `real` else abspath). However, on windows
backslashes are then replaced with forward slashes to offer a
consistent unix-like experience across platforms.
On windows expanduser will expand environment variables formatted as
%name%, whereas on unix, this will not occur.
CommandLine:
python -m ubelt.util_path truepath
Example:
>>> import ubelt as ub
>>> assert ub.truepath('~/foo') == join(ub.userhome(), 'foo')
>>> assert ub.truepath('~/foo') == ub.truepath('~/foo/bar/..')
>>> assert ub.truepath('~/foo', real=True) == ub.truepath('~/foo')
"""
path = expanduser(path)
path = expandvars(path)
if real:
path = realpath(path)
else:
path = abspath(path)
path = normpath(path)
return path |
def ensuredir(dpath, mode=0o1777, verbose=None):
r"""
Ensures that directory will exist. Creates new dir with sticky bits by
default
Args:
dpath (PathLike): dir to ensure. Can also be a tuple to send to join
mode (int): octal mode of directory (default 0o1777)
verbose (int): verbosity (default 0)
Returns:
PathLike: path: the ensured directory
Notes:
This function is not thread-safe in Python2
Example:
>>> from ubelt.util_platform import * # NOQA
>>> import ubelt as ub
>>> cache_dpath = ub.ensure_app_cache_dir('ubelt')
>>> dpath = join(cache_dpath, 'ensuredir')
>>> if exists(dpath):
... os.rmdir(dpath)
>>> assert not exists(dpath)
>>> ub.ensuredir(dpath)
>>> assert exists(dpath)
>>> os.rmdir(dpath)
"""
if verbose is None: # nocover
verbose = 0
if isinstance(dpath, (list, tuple)): # nocover
dpath = join(*dpath)
if not exists(dpath):
if verbose: # nocover
print('Ensuring new directory (%r)' % dpath)
if sys.version_info.major == 2: # nocover
os.makedirs(normpath(dpath), mode=mode)
else:
os.makedirs(normpath(dpath), mode=mode, exist_ok=True)
else:
if verbose: # nocover
print('Ensuring existing directory (%r)' % dpath)
return dpath |
def parse_version(package):
"""
Statically parse the version number from __init__.py
CommandLine:
python -c "import setup; print(setup.parse_version('ubelt'))"
"""
from os.path import dirname, join, exists
import ast
# Check if the package is a single-file or multi-file package
_candiates = [
join(dirname(__file__), package + '.py'),
join(dirname(__file__), package, '__init__.py'),
]
_found = [init_fpath for init_fpath in _candiates if exists(init_fpath)]
if len(_found) > 0:
init_fpath = _found[0]
elif len(_found) > 1:
raise Exception('parse_version found multiple init files')
elif len(_found) == 0:
raise Exception('Cannot find package init file')
with open(init_fpath, 'r') as file_:
sourcecode = file_.read()
pt = ast.parse(sourcecode)
class VersionVisitor(ast.NodeVisitor):
def visit_Assign(self, node):
for target in node.targets:
if getattr(target, 'id', None) == '__version__':
self.version = node.value.s
visitor = VersionVisitor()
visitor.visit(pt)
return visitor.version |
def parse_description():
"""
Parse the description in the README file
CommandLine:
pandoc --from=markdown --to=rst --output=README.rst README.md
python -c "import setup; print(setup.parse_description())"
"""
from os.path import dirname, join, exists
readme_fpath = join(dirname(__file__), 'README.rst')
# This breaks on pip install, so check that it exists.
if exists(readme_fpath):
with open(readme_fpath, 'r') as f:
text = f.read()
return text
return '' |
def parse_requirements_alt(fname='requirements.txt'):
"""
pip install requirements-parser
fname='requirements.txt'
"""
import requirements
from os.path import dirname, join, exists
require_fpath = join(dirname(__file__), fname)
if exists(require_fpath):
# Dont use until this handles platform specific dependencies
with open(require_fpath, 'r') as file:
requires = list(requirements.parse(file))
packages = [r.name for r in requires]
return packages
return [] |
def parse_requirements(fname='requirements.txt'):
"""
Parse the package dependencies listed in a requirements file but strips
specific versioning information.
TODO:
perhaps use https://github.com/davidfischer/requirements-parser instead
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
from os.path import dirname, join, exists
import re
require_fpath = join(dirname(__file__), fname)
def parse_line(line):
"""
Parse information from a line in a requirements text file
"""
info = {}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip, rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
return info
# This breaks on pip install, so check that it exists.
if exists(require_fpath):
with open(require_fpath, 'r') as f:
packages = []
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
info = parse_line(line)
package = info['package']
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
package += ';' + platform_deps
packages.append(package)
return packages
return [] |
def inject_method(self, func, name=None):
"""
Injects a function into an object instance as a bound method
The main use case of this function is for monkey patching. While monkey
patching is sometimes necessary it should generally be avoided. Thus, we
simply remind the developer that there might be a better way.
Args:
self (object): instance to inject a function into
func (func): the function to inject (must contain an arg for self)
name (str): name of the method. optional. If not specified the name
of the function is used.
Example:
>>> class Foo(object):
>>> def bar(self):
>>> return 'bar'
>>> def baz(self):
>>> return 'baz'
>>> self = Foo()
>>> assert self.bar() == 'bar'
>>> assert not hasattr(self, 'baz')
>>> inject_method(self, baz)
>>> assert not hasattr(Foo, 'baz'), 'should only change one instance'
>>> assert self.baz() == 'baz'
>>> inject_method(self, baz, 'bar')
>>> assert self.bar() == 'baz'
"""
# TODO: if func is a bound method we should probably unbind it
new_method = func.__get__(self, self.__class__)
if name is None:
name = func.__name__
setattr(self, name, new_method) |
def writeto(fpath, to_write, aslines=False, verbose=None):
r"""
Writes (utf8) text to a file.
Args:
fpath (PathLike): file path
to_write (str): text to write (must be unicode text)
aslines (bool): if True to_write is assumed to be a list of lines
verbose (bool): verbosity flag
CommandLine:
python -m ubelt.util_io writeto --verbose
Example:
>>> import ubelt as ub
>>> dpath = ub.ensure_app_cache_dir('ubelt')
>>> fpath = dpath + '/' + 'testwrite.txt'
>>> if exists(fpath):
>>> os.remove(fpath)
>>> to_write = 'utf-8 symbols Δ, Й, ק, م, ๗, あ, 叶, 葉, and 말.'
>>> writeto(fpath, to_write)
>>> read_ = ub.readfrom(fpath)
>>> print('read_ = ' + read_)
>>> print('to_write = ' + to_write)
>>> assert read_ == to_write
Example:
>>> import ubelt as ub
>>> dpath = ub.ensure_app_cache_dir('ubelt')
>>> fpath = dpath + '/' + 'testwrite2.txt'
>>> if exists(fpath):
>>> os.remove(fpath)
>>> to_write = ['a\n', 'b\n', 'c\n', 'd\n']
>>> writeto(fpath, to_write, aslines=True)
>>> read_ = ub.readfrom(fpath, aslines=True)
>>> print('read_ = {}'.format(read_))
>>> print('to_write = {}'.format(to_write))
>>> assert read_ == to_write
"""
if verbose:
print('Writing to text file: %r ' % (fpath,))
with open(fpath, 'wb') as file:
if aslines:
to_write = map(_ensure_bytes , to_write)
file.writelines(to_write)
else:
# convert to bytes for writing
bytes = _ensure_bytes(to_write)
file.write(bytes) |
def readfrom(fpath, aslines=False, errors='replace', verbose=None):
"""
Reads (utf8) text from a file.
Args:
fpath (PathLike): file path
aslines (bool): if True returns list of lines
verbose (bool): verbosity flag
Returns:
str: text from fpath (this is unicode)
"""
if verbose:
print('Reading text file: %r ' % (fpath,))
if not exists(fpath):
raise IOError('File %r does not exist' % (fpath,))
with open(fpath, 'rb') as file:
if aslines:
text = [line.decode('utf8', errors=errors)
for line in file.readlines()]
if sys.platform.startswith('win32'): # nocover
# fix line endings on windows
text = [
line[:-2] + '\n' if line.endswith('\r\n') else line
for line in text
]
else:
text = file.read().decode('utf8', errors=errors)
return text |
def touch(fpath, mode=0o666, dir_fd=None, verbose=0, **kwargs):
"""
change file timestamps
Works like the touch unix utility
Args:
fpath (PathLike): name of the file
mode (int): file permissions (python3 and unix only)
dir_fd (file): optional directory file descriptor. If specified, fpath
is interpreted as relative to this descriptor (python 3 only).
verbose (int): verbosity
**kwargs : extra args passed to `os.utime` (python 3 only).
Returns:
PathLike: path to the file
References:
https://stackoverflow.com/questions/1158076/implement-touch-using-python
Example:
>>> import ubelt as ub
>>> dpath = ub.ensure_app_cache_dir('ubelt')
>>> fpath = join(dpath, 'touch_file')
>>> assert not exists(fpath)
>>> ub.touch(fpath)
>>> assert exists(fpath)
>>> os.unlink(fpath)
"""
if verbose:
print('Touching file {}'.format(fpath))
if six.PY2: # nocover
with open(fpath, 'a'):
os.utime(fpath, None)
else:
flags = os.O_CREAT | os.O_APPEND
with os.fdopen(os.open(fpath, flags=flags, mode=mode, dir_fd=dir_fd)) as f:
os.utime(f.fileno() if os.utime in os.supports_fd else fpath,
dir_fd=None if os.supports_fd else dir_fd, **kwargs)
return fpath |
def delete(path, verbose=False):
"""
Removes a file or recursively removes a directory.
If a path does not exist, then this is does nothing.
Args:
path (PathLike): file or directory to remove
verbose (bool): if True prints what is being done
SeeAlso:
send2trash - A cross-platform Python package for sending files
to the trash instead of irreversibly deleting them.
https://github.com/hsoft/send2trash
Doctest:
>>> import ubelt as ub
>>> base = ub.ensure_app_cache_dir('ubelt', 'delete_test')
>>> dpath1 = ub.ensuredir(join(base, 'dir'))
>>> ub.ensuredir(join(base, 'dir', 'subdir'))
>>> ub.touch(join(base, 'dir', 'to_remove1.txt'))
>>> fpath1 = join(base, 'dir', 'subdir', 'to_remove3.txt')
>>> fpath2 = join(base, 'dir', 'subdir', 'to_remove2.txt')
>>> ub.touch(fpath1)
>>> ub.touch(fpath2)
>>> assert all(map(exists, (dpath1, fpath1, fpath2)))
>>> ub.delete(fpath1)
>>> assert all(map(exists, (dpath1, fpath2)))
>>> assert not exists(fpath1)
>>> ub.delete(dpath1)
>>> assert not any(map(exists, (dpath1, fpath1, fpath2)))
Doctest:
>>> import ubelt as ub
>>> dpath = ub.ensure_app_cache_dir('ubelt', 'delete_test2')
>>> dpath1 = ub.ensuredir(join(dpath, 'dir'))
>>> fpath1 = ub.touch(join(dpath1, 'to_remove.txt'))
>>> assert exists(fpath1)
>>> ub.delete(dpath)
>>> assert not exists(fpath1)
"""
if not os.path.exists(path):
# if the file does exists and is not a broken link
if os.path.islink(path):
if verbose: # nocover
print('Deleting broken link="{}"'.format(path))
os.unlink(path)
elif os.path.isdir(path): # nocover
# Only on windows will a file be a directory and not exist
if verbose:
print('Deleting broken directory link="{}"'.format(path))
os.rmdir(path)
elif os.path.isfile(path): # nocover
# This is a windows only case
if verbose:
print('Deleting broken file link="{}"'.format(path))
os.unlink(path)
else:
if verbose: # nocover
print('Not deleting non-existant path="{}"'.format(path))
else:
if os.path.islink(path):
if verbose: # nocover
print('Deleting symbolic link="{}"'.format(path))
os.unlink(path)
elif os.path.isfile(path):
if verbose: # nocover
print('Deleting file="{}"'.format(path))
os.unlink(path)
elif os.path.isdir(path):
if verbose: # nocover
print('Deleting directory="{}"'.format(path))
if sys.platform.startswith('win32'): # nocover
# Workaround bug that prevents shutil from working if
# the directory contains junctions
from ubelt import _win32_links
_win32_links._win32_rmtree(path, verbose=verbose)
else:
import shutil
shutil.rmtree(path) |
def repr2(data, **kwargs):
"""
Makes a pretty and easy-to-doctest string representation!
This is an alternative to repr, and `pprint.pformat` that attempts to be
both more configurable and generate output that is consistent between
python versions.
Notes:
This function has many keyword arguments that can be used to customize
the final representation. For convinience some of the more frequently
used kwargs have short aliases. See `Args` for more details.
Args:
data (object): an arbitrary python object
**kwargs: see `the Kwargs` section
Kwargs:
si, stritems, (bool):
dict/list items use str instead of repr
strkeys, sk (bool):
dict keys use str instead of repr
strvals, sv (bool):
dict values use str instead of repr
nl, newlines (int | bool):
number of top level nestings to place a newline after. If true all
items are followed by newlines regardless of nesting level.
Defaults to 1 for lists and True for dicts.
nobr, nobraces (bool, default=False):
if True, text will not contain outer braces for containers
cbr, compact_brace (bool, default=False):
if True, braces are compactified (i.e. they will not have newlines
placed directly after them, think java / K&R / 1TBS)
trailsep, trailing_sep (bool):
if True, a separator is placed after the last item in a sequence.
By default this is True if there are any `nl > 0`.
explicit (bool, default=False):
changes dict representation from `{k1: v1, ...}` to
`dict(k1=v1, ...)`.
precision (int, default=None):
if specified floats are formatted with this precision
kvsep (str, default=': '):
separator between keys and values
itemsep (str, default=' '):
separator between items
sort (bool):
if True, attempts to sort all unordered collections in the returned
text. NOTE: currently if True this will sort lists, this may not be
a correct thing to do, as such the behavior of this arg is subject
to change.
suppress_small (bool):
passed to `numpy.array2string` for ndarrays
max_line_width (int):
passed to `numpy.array2string` for ndarrays
with_dtype (bool):
only relevant to ndarrays. if True includes the dtype.
Returns:
str: outstr: output string
Notes:
There are also internal kwargs, which should not be used:
_return_info (bool): return information about child context
_root_info (depth): information about parent context
CommandLine:
python -m ubelt.util_format repr2:0
python -m ubelt.util_format repr2:1
Example:
>>> from ubelt.util_format import *
>>> import ubelt as ub
>>> dict_ = {
... 'custom_types': [slice(0, 1, None), 1/3],
... 'nest_dict': {'k1': [1, 2, {3: {4, 5}}],
... 'key2': [1, 2, {3: {4, 5}}],
... 'key3': [1, 2, {3: {4, 5}}],
... },
... 'nest_dict2': {'k': [1, 2, {3: {4, 5}}]},
... 'nested_tuples': [tuple([1]), tuple([2, 3]), frozenset([4, 5, 6])],
... 'one_tup': tuple([1]),
... 'simple_dict': {'spam': 'eggs', 'ham': 'jam'},
... 'simple_list': [1, 2, 'red', 'blue'],
... 'odict': ub.odict([(1, '1'), (2, '2')]),
... }
>>> result = repr2(dict_, nl=3, precision=2); print(result)
>>> result = repr2(dict_, nl=2, precision=2); print(result)
>>> result = repr2(dict_, nl=1, precision=2); print(result)
>>> result = repr2(dict_, nl=1, precision=2, itemsep='', explicit=True); print(result)
>>> result = repr2(dict_, nl=1, precision=2, nobr=1, itemsep='', explicit=True); print(result)
>>> result = repr2(dict_, nl=3, precision=2, cbr=True); print(result)
>>> result = repr2(dict_, nl=3, precision=2, si=True); print(result)
>>> result = repr2(dict_, nl=3, sort=True); print(result)
>>> result = repr2(dict_, nl=3, sort=False, trailing_sep=False); print(result)
>>> result = repr2(dict_, nl=3, sort=False, trailing_sep=False, nobr=True); print(result)
Example:
>>> from ubelt.util_format import *
>>> def _nest(d, w):
... if d == 0:
... return {}
... else:
... return {'n{}'.format(d): _nest(d - 1, w + 1), 'm{}'.format(d): _nest(d - 1, w + 1)}
>>> dict_ = _nest(d=4, w=1)
>>> result = repr2(dict_, nl=6, precision=2, cbr=1)
>>> print('---')
>>> print(result)
>>> result = repr2(dict_, nl=-1, precision=2)
>>> print('---')
>>> print(result)
"""
custom_extensions = kwargs.get('extensions', None)
_return_info = kwargs.get('_return_info', False)
kwargs['_root_info'] = _rectify_root_info(kwargs.get('_root_info', None))
outstr = None
_leaf_info = None
if custom_extensions:
func = custom_extensions.lookup(data)
if func is not None:
outstr = func(data, **kwargs)
if outstr is None:
if isinstance(data, dict):
outstr, _leaf_info = _format_dict(data, **kwargs)
elif isinstance(data, (list, tuple, set, frozenset)):
outstr, _leaf_info = _format_list(data, **kwargs)
if outstr is None:
# check any globally registered functions for special formatters
func = _FORMATTER_EXTENSIONS.lookup(data)
if func is not None:
outstr = func(data, **kwargs)
else:
outstr = _format_object(data, **kwargs)
if _return_info:
_leaf_info = _rectify_leaf_info(_leaf_info)
return outstr, _leaf_info
else:
return outstr |
def _format_list(list_, **kwargs):
"""
Makes a pretty printable / human-readable string representation of a
sequence. In most cases this string could be evaled.
Args:
list_ (list): input list
**kwargs: nl, newlines, packed, nobr, nobraces, itemsep, trailing_sep,
strvals indent_, precision, use_numpy, with_dtype, force_dtype,
stritems, strkeys, explicit, sort, key_order, maxlen
Returns:
Tuple[str, Dict] : retstr, _leaf_info
Example:
>>> print(_format_list([])[0])
[]
>>> print(_format_list([], nobr=True)[0])
[]
>>> print(_format_list([1], nl=0)[0])
[1]
>>> print(_format_list([1], nobr=True)[0])
1,
"""
kwargs['_root_info'] = _rectify_root_info(kwargs.get('_root_info', None))
kwargs['_root_info']['depth'] += 1
newlines = kwargs.pop('nl', kwargs.pop('newlines', 1))
kwargs['nl'] = _rectify_countdown_or_bool(newlines)
nobraces = kwargs.pop('nobr', kwargs.pop('nobraces', False))
itemsep = kwargs.get('itemsep', ' ')
compact_brace = kwargs.get('cbr', kwargs.get('compact_brace', False))
# kwargs['cbr'] = _rectify_countdown_or_bool(compact_brace)
itemstrs, _leaf_info = _list_itemstrs(list_, **kwargs)
if len(itemstrs) == 0:
nobraces = False # force braces to prevent empty output
is_tuple = isinstance(list_, tuple)
is_set = isinstance(list_, (set, frozenset,))
if nobraces:
lbr, rbr = '', ''
elif is_tuple:
lbr, rbr = '(', ')'
elif is_set:
lbr, rbr = '{', '}'
else:
lbr, rbr = '[', ']'
# Doesn't actually put in trailing comma if on same line
trailing_sep = kwargs.get('trailsep', kwargs.get('trailing_sep', newlines > 0 and len(itemstrs)))
# The trailing separator is always needed for single item tuples
if is_tuple and len(list_) <= 1:
trailing_sep = True
if len(itemstrs) == 0:
newlines = False
retstr = _join_itemstrs(itemstrs, itemsep, newlines, _leaf_info, nobraces,
trailing_sep, compact_brace, lbr, rbr)
return retstr, _leaf_info |
def _format_dict(dict_, **kwargs):
"""
Makes a pretty printable / human-readable string representation of a
dictionary. In most cases this string could be evaled.
Args:
dict_ (dict): a dictionary
**kwargs: si, stritems, strkeys, strvals, sk, sv, nl, newlines, nobr,
nobraces, cbr, compact_brace, trailing_sep,
explicit, itemsep, precision, kvsep, sort
Returns:
Tuple[str, Dict] : retstr, _leaf_info
Kwargs:
sort (None): if True, sorts ALL collections and subcollections,
note, collections with undefined orders (e.g. dicts, sets) are
sorted by default. (default = None)
nl (int): preferred alias for newline. can be a countdown variable
(default = None)
explicit (int): can be a countdown variable. if True, uses
dict(a=b) syntax instead of {'a': b}
nobr (bool): removes outer braces (default = False)
"""
kwargs['_root_info'] = _rectify_root_info(kwargs.get('_root_info', None))
kwargs['_root_info']['depth'] += 1
stritems = kwargs.pop('si', kwargs.pop('stritems', False))
if stritems:
kwargs['strkeys'] = True
kwargs['strvals'] = True
kwargs['strkeys'] = kwargs.pop('sk', kwargs.pop('strkeys', False))
kwargs['strvals'] = kwargs.pop('sv', kwargs.pop('strvals', False))
newlines = kwargs.pop('nl', kwargs.pop('newlines', True))
kwargs['nl'] = _rectify_countdown_or_bool(newlines)
nobraces = kwargs.pop('nobr', kwargs.pop('nobraces', False))
compact_brace = kwargs.get('cbr', kwargs.get('compact_brace', False))
# kwargs['cbr'] = _rectify_countdown_or_bool(compact_brace)
# Doesn't actually put in trailing comma if on same line
trailing_sep = kwargs.get('trailsep', kwargs.get('trailing_sep', newlines > 0))
explicit = kwargs.get('explicit', False)
itemsep = kwargs.get('itemsep', ' ')
if len(dict_) == 0:
retstr = 'dict()' if explicit else '{}'
_leaf_info = None
else:
itemstrs, _leaf_info = _dict_itemstrs(dict_, **kwargs)
if nobraces:
lbr, rbr = '', ''
elif explicit:
lbr, rbr = 'dict(', ')'
else:
lbr, rbr = '{', '}'
retstr = _join_itemstrs(itemstrs, itemsep, newlines, _leaf_info, nobraces,
trailing_sep, compact_brace, lbr, rbr)
return retstr, _leaf_info |
def _join_itemstrs(itemstrs, itemsep, newlines, _leaf_info, nobraces,
trailing_sep, compact_brace, lbr, rbr):
"""
Joins string-ified items with separators newlines and container-braces.
"""
# positive newlines means start counting from the root
use_newline = newlines > 0
# negative countdown values mean start counting from the leafs
# if compact_brace < 0:
# compact_brace = (-compact_brace) >= _leaf_info['max_height']
if newlines < 0:
use_newline = (-newlines) < _leaf_info['max_height']
if use_newline:
sep = ',\n'
if nobraces:
body_str = sep.join(itemstrs)
if trailing_sep and len(itemstrs) > 0:
body_str += ','
retstr = body_str
else:
if compact_brace:
# Why must we modify the indentation below and not here?
# prefix = ''
# rest = [ub.indent(s, prefix) for s in itemstrs[1:]]
# indented = itemstrs[0:1] + rest
indented = itemstrs
else:
import ubelt as ub
prefix = ' ' * 4
indented = [ub.indent(s, prefix) for s in itemstrs]
body_str = sep.join(indented)
if trailing_sep and len(itemstrs) > 0:
body_str += ','
if compact_brace:
# Why can we modify the indentation here but not above?
braced_body_str = (lbr + body_str.replace('\n', '\n ') + rbr)
else:
braced_body_str = (lbr + '\n' + body_str + '\n' + rbr)
retstr = braced_body_str
else:
sep = ',' + itemsep
body_str = sep.join(itemstrs)
if trailing_sep and len(itemstrs) > 0:
body_str += ','
retstr = (lbr + body_str + rbr)
return retstr |
def _dict_itemstrs(dict_, **kwargs):
"""
Create a string representation for each item in a dict.
Example:
>>> from ubelt.util_format import *
>>> dict_ = {'b': .1, 'l': 'st', 'g': 1.0, 's': 10, 'm': 0.9, 'w': .5}
>>> kwargs = {'strkeys': True}
>>> itemstrs, _ = _dict_itemstrs(dict_, **kwargs)
>>> char_order = [p[0] for p in itemstrs]
>>> assert char_order == ['b', 'g', 'l', 'm', 's', 'w']
"""
import ubelt as ub
explicit = kwargs.get('explicit', False)
kwargs['explicit'] = _rectify_countdown_or_bool(explicit)
precision = kwargs.get('precision', None)
kvsep = kwargs.get('kvsep', ': ')
if explicit:
kvsep = '='
def make_item_str(key, val):
if explicit or kwargs.get('strkeys', False):
key_str = six.text_type(key)
else:
key_str = repr2(key, precision=precision, newlines=0)
prefix = key_str + kvsep
kwargs['_return_info'] = True
val_str, _leaf_info = repr2(val, **kwargs)
# If the first line does not end with an open nest char
# (e.g. for ndarrays), otherwise we need to worry about
# residual indentation.
pos = val_str.find('\n')
first_line = val_str if pos == -1 else val_str[:pos]
compact_brace = kwargs.get('cbr', kwargs.get('compact_brace', False))
if compact_brace or not first_line.rstrip().endswith(tuple('([{<')):
rest = '' if pos == -1 else val_str[pos:]
val_str = first_line.lstrip() + rest
if '\n' in prefix:
# Fix issue with keys that span new lines
item_str = prefix + val_str
else:
item_str = ub.hzcat([prefix, val_str])
else:
item_str = prefix + val_str
return item_str, _leaf_info
items = list(six.iteritems(dict_))
_tups = [make_item_str(key, val) for (key, val) in items]
itemstrs = [t[0] for t in _tups]
max_height = max([t[1]['max_height'] for t in _tups]) if _tups else 0
_leaf_info = {
'max_height': max_height + 1,
}
sort = kwargs.get('sort', None)
if sort is None:
# Force ordering on unordered dicts
sort = True
if isinstance(dict_, collections.OrderedDict):
# never sort ordered dicts; they are perfect just the way they are!
sort = False
if sort:
itemstrs = _sort_itemstrs(items, itemstrs)
return itemstrs, _leaf_info |
def _list_itemstrs(list_, **kwargs):
"""
Create a string representation for each item in a list.
"""
items = list(list_)
kwargs['_return_info'] = True
_tups = [repr2(item, **kwargs) for item in items]
itemstrs = [t[0] for t in _tups]
max_height = max([t[1]['max_height'] for t in _tups]) if _tups else 0
_leaf_info = {
'max_height': max_height + 1,
}
sort = kwargs.get('sort', None)
if sort is None:
# Force orderings on sets.
sort = isinstance(list_, (set, frozenset))
if sort:
itemstrs = _sort_itemstrs(items, itemstrs)
return itemstrs, _leaf_info |
def _sort_itemstrs(items, itemstrs):
"""
Equivalent to `sorted(items)` except if `items` are unorderable, then
string values are used to define an ordering.
"""
# First try to sort items by their normal values
# If that doesnt work, then sort by their string values
import ubelt as ub
try:
# Set ordering is not unique. Sort by strings values instead.
if _peek_isinstance(items, (set, frozenset)):
raise TypeError
sortx = ub.argsort(items)
except TypeError:
sortx = ub.argsort(itemstrs)
itemstrs = [itemstrs[x] for x in sortx]
return itemstrs |
def _rectify_countdown_or_bool(count_or_bool):
"""
used by recursive functions to specify which level to turn a bool on in
counting down yields True, True, ..., False
counting up yields False, False, False, ... True
Args:
count_or_bool (bool or int): if positive and an integer, it will count
down, otherwise it will remain the same.
Returns:
int or bool: count_or_bool_
CommandLine:
python -m utool.util_str --test-_rectify_countdown_or_bool
Example:
>>> from ubelt.util_format import _rectify_countdown_or_bool # NOQA
>>> count_or_bool = True
>>> a1 = (_rectify_countdown_or_bool(2))
>>> a2 = (_rectify_countdown_or_bool(1))
>>> a3 = (_rectify_countdown_or_bool(0))
>>> a4 = (_rectify_countdown_or_bool(-1))
>>> a5 = (_rectify_countdown_or_bool(-2))
>>> a6 = (_rectify_countdown_or_bool(True))
>>> a7 = (_rectify_countdown_or_bool(False))
>>> a8 = (_rectify_countdown_or_bool(None))
>>> result = [a1, a2, a3, a4, a5, a6, a7, a8]
>>> print(result)
[1, 0, 0, -1, -2, True, False, False]
"""
if count_or_bool is True or count_or_bool is False:
count_or_bool_ = count_or_bool
elif isinstance(count_or_bool, int):
if count_or_bool == 0:
return 0
elif count_or_bool > 0:
count_or_bool_ = count_or_bool - 1
else:
# We dont countup negatives anymore
count_or_bool_ = count_or_bool
else:
count_or_bool_ = False
return count_or_bool_ |
def register(self, type):
"""
Registers a custom formatting function with ub.repr2
"""
def _decorator(func):
if isinstance(type, tuple):
for t in type:
self.func_registry[t] = func
else:
self.func_registry[type] = func
return func
return _decorator |
def lookup(self, data):
"""
Returns an appropriate function to format `data` if one has been
registered.
"""
for func in self.lazy_init:
func()
for type, func in self.func_registry.items():
if isinstance(data, type):
return func |
def _register_numpy_extensions(self):
"""
CommandLine:
python -m ubelt.util_format FormatterExtensions._register_numpy_extensions
Example:
>>> import sys
>>> import pytest
>>> import ubelt as ub
>>> if not ub.modname_to_modpath('numpy'):
... raise pytest.skip()
>>> # xdoctest: +IGNORE_WHITESPACE
>>> import numpy as np
>>> data = np.array([[.2, 42, 5], [21.2, 3, .4]])
>>> print(ub.repr2(data))
np.array([[ 0.2, 42. , 5. ],
[21.2, 3. , 0.4]], dtype=np.float64)
>>> print(ub.repr2(data, with_dtype=False))
np.array([[ 0.2, 42. , 5. ],
[21.2, 3. , 0.4]])
>>> print(ub.repr2(data, strvals=True))
[[ 0.2, 42. , 5. ],
[21.2, 3. , 0.4]]
>>> data = np.empty((0, 10), dtype=np.float64)
>>> print(ub.repr2(data, strvals=False))
np.empty((0, 10), dtype=np.float64)
>>> print(ub.repr2(data, strvals=True))
[]
>>> data = np.ma.empty((0, 10), dtype=np.float64)
>>> print(ub.repr2(data, strvals=False))
np.ma.empty((0, 10), dtype=np.float64)
"""
import numpy as np
@self.register(np.ndarray)
def format_ndarray(data, **kwargs):
import re
strvals = kwargs.get('sv', kwargs.get('strvals', False))
itemsep = kwargs.get('itemsep', ' ')
precision = kwargs.get('precision', None)
suppress_small = kwargs.get('supress_small', None)
max_line_width = kwargs.get('max_line_width', None)
with_dtype = kwargs.get('with_dtype', kwargs.get('dtype', not strvals))
newlines = kwargs.pop('nl', kwargs.pop('newlines', 1))
# if with_dtype and strvals:
# raise ValueError('cannot format with strvals and dtype')
separator = ',' + itemsep
if strvals:
prefix = ''
suffix = ''
else:
modname = type(data).__module__
# substitute shorthand for numpy module names
np_nice = 'np'
modname = re.sub('\\bnumpy\\b', np_nice, modname)
modname = re.sub('\\bma.core\\b', 'ma', modname)
class_name = type(data).__name__
if class_name == 'ndarray':
class_name = 'array'
prefix = modname + '.' + class_name + '('
if with_dtype:
dtype_repr = data.dtype.name
# dtype_repr = np.core.arrayprint.dtype_short_repr(data.dtype)
suffix = ',{}dtype={}.{})'.format(itemsep, np_nice, dtype_repr)
else:
suffix = ')'
if not strvals and data.size == 0 and data.shape != (0,):
# Special case for displaying empty data
prefix = modname + '.empty('
body = repr(tuple(map(int, data.shape)))
else:
body = np.array2string(data, precision=precision,
separator=separator,
suppress_small=suppress_small,
prefix=prefix,
max_line_width=max_line_width)
if not newlines:
# remove newlines if we need to
body = re.sub('\n *', '', body)
formatted = prefix + body + suffix
return formatted
# Hack, make sure we also register numpy floats
self.register(np.float32)(self.func_registry[float]) |
def _rectify_hasher(hasher):
"""
Convert a string-based key into a hasher class
Notes:
In terms of speed on 64bit systems, sha1 is the fastest followed by md5
and sha512. The slowest algorithm is sha256. If xxhash is installed
the fastest algorithm is xxh64.
Example:
>>> assert _rectify_hasher(NoParam) is DEFAULT_HASHER
>>> assert _rectify_hasher('sha1') is hashlib.sha1
>>> assert _rectify_hasher('sha256') is hashlib.sha256
>>> assert _rectify_hasher('sha512') is hashlib.sha512
>>> assert _rectify_hasher('md5') is hashlib.md5
>>> assert _rectify_hasher(hashlib.sha1) is hashlib.sha1
>>> assert _rectify_hasher(hashlib.sha1())().name == 'sha1'
>>> import pytest
>>> assert pytest.raises(KeyError, _rectify_hasher, '42')
>>> #assert pytest.raises(TypeError, _rectify_hasher, object)
>>> if xxhash:
>>> assert _rectify_hasher('xxh64') is xxhash.xxh64
>>> assert _rectify_hasher('xxh32') is xxhash.xxh32
"""
if xxhash is not None: # pragma: nobranch
if hasher in {'xxh32', 'xx32', 'xxhash'}:
return xxhash.xxh32
if hasher in {'xxh64', 'xx64'}:
return xxhash.xxh64
if hasher is NoParam or hasher == 'default':
hasher = DEFAULT_HASHER
elif isinstance(hasher, six.string_types):
if hasher not in hashlib.algorithms_available:
raise KeyError('unknown hasher: {}'.format(hasher))
else:
hasher = getattr(hashlib, hasher)
elif isinstance(hasher, HASH):
# by default the result of this function is a class we will make an
# instance of, if we already have an instance, wrap it in a callable
# so the external syntax does not need to change.
return lambda: hasher
return hasher |
def _rectify_base(base):
"""
transforms base shorthand into the full list representation
Example:
>>> assert _rectify_base(NoParam) is DEFAULT_ALPHABET
>>> assert _rectify_base('hex') is _ALPHABET_16
>>> assert _rectify_base('abc') is _ALPHABET_26
>>> assert _rectify_base(10) is _ALPHABET_10
>>> assert _rectify_base(['1', '2']) == ['1', '2']
>>> import pytest
>>> assert pytest.raises(TypeError, _rectify_base, 'uselist')
"""
if base is NoParam or base == 'default':
return DEFAULT_ALPHABET
elif base in [26, 'abc', 'alpha']:
return _ALPHABET_26
elif base in [16, 'hex']:
return _ALPHABET_16
elif base in [10, 'dec']:
return _ALPHABET_10
else:
if not isinstance(base, (list, tuple)):
raise TypeError(
'Argument `base` must be a key, list, or tuple; not {}'.format(
type(base)))
return base |
def _hashable_sequence(data, types=True):
r"""
Extracts the sequence of bytes that would be hashed by hash_data
Example:
>>> data = [2, (3, 4)]
>>> result1 = (b''.join(_hashable_sequence(data, types=False)))
>>> result2 = (b''.join(_hashable_sequence(data, types=True)))
>>> assert result1 == b'_[_\x02_,__[_\x03_,_\x04_,__]__]_'
>>> assert result2 == b'_[_INT\x02_,__[_INT\x03_,_INT\x04_,__]__]_'
"""
hasher = _HashTracer()
_update_hasher(hasher, data, types=types)
return hasher.sequence |
def _convert_to_hashable(data, types=True):
r"""
Converts `data` into a hashable byte representation if an appropriate
hashing function is known.
Args:
data (object): ordered data with structure
types (bool): include type prefixes in the hash
Returns:
tuple(bytes, bytes): prefix, hashable:
a prefix hinting the original data type and the byte representation
of `data`.
Raises:
TypeError : if data has no registered hash methods
Example:
>>> assert _convert_to_hashable(None) == (b'NULL', b'NONE')
>>> assert _convert_to_hashable('string') == (b'TXT', b'string')
>>> assert _convert_to_hashable(1) == (b'INT', b'\x01')
>>> assert _convert_to_hashable(1.0) == (b'FLT', b'\x01/\x01')
>>> assert _convert_to_hashable(_intlike[-1](1)) == (b'INT', b'\x01')
"""
# HANDLE MOST COMMON TYPES FIRST
if data is None:
hashable = b'NONE'
prefix = b'NULL'
elif isinstance(data, six.binary_type):
hashable = data
prefix = b'TXT'
elif isinstance(data, six.text_type):
# convert unicode into bytes
hashable = data.encode('utf-8')
prefix = b'TXT'
elif isinstance(data, _intlike):
# warnings.warn('Hashing ints is slow, numpy is prefered')
hashable = _int_to_bytes(data)
# hashable = data.to_bytes(8, byteorder='big')
prefix = b'INT'
elif isinstance(data, float):
a, b = float(data).as_integer_ratio()
hashable = _int_to_bytes(a) + b'/' + _int_to_bytes(b)
prefix = b'FLT'
else:
# Then dynamically look up any other type
hash_func = _HASHABLE_EXTENSIONS.lookup(data)
prefix, hashable = hash_func(data)
if types:
return prefix, hashable
else:
return b'', hashable |
def _update_hasher(hasher, data, types=True):
"""
Converts `data` into a byte representation and calls update on the hasher
`hashlib.HASH` algorithm.
Args:
hasher (HASH): instance of a hashlib algorithm
data (object): ordered data with structure
types (bool): include type prefixes in the hash
Example:
>>> hasher = hashlib.sha512()
>>> data = [1, 2, ['a', 2, 'c']]
>>> _update_hasher(hasher, data)
>>> print(hasher.hexdigest()[0:8])
e2c67675
2ba8d82b
"""
# Determine if the data should be hashed directly or iterated through
if isinstance(data, (tuple, list, zip)):
needs_iteration = True
else:
needs_iteration = any(check(data) for check in
_HASHABLE_EXTENSIONS.iterable_checks)
if needs_iteration:
# Denote that we are hashing over an iterable
# Multiple structure bytes makes it harder accidently make conflicts
SEP = b'_,_'
ITER_PREFIX = b'_[_'
ITER_SUFFIX = b'_]_'
iter_ = iter(data)
hasher.update(ITER_PREFIX)
# first, try to nest quickly without recursive calls
# (this works if all data in the sequence is a non-iterable)
try:
for item in iter_:
prefix, hashable = _convert_to_hashable(item, types)
binary_data = prefix + hashable + SEP
hasher.update(binary_data)
except TypeError:
# need to use recursive calls
# Update based on current item
_update_hasher(hasher, item, types)
for item in iter_:
# Ensure the items have a spacer between them
_update_hasher(hasher, item, types)
hasher.update(SEP)
hasher.update(ITER_SUFFIX)
else:
prefix, hashable = _convert_to_hashable(data, types)
binary_data = prefix + hashable
hasher.update(binary_data) |
def _convert_hexstr_base(hexstr, base):
r"""
Packs a long hexstr into a shorter length string with a larger base.
Args:
hexstr (str): string of hexidecimal symbols to convert
base (list): symbols of the conversion base
Example:
>>> print(_convert_hexstr_base('ffffffff', _ALPHABET_26))
nxmrlxv
>>> print(_convert_hexstr_base('0', _ALPHABET_26))
0
>>> print(_convert_hexstr_base('-ffffffff', _ALPHABET_26))
-nxmrlxv
>>> print(_convert_hexstr_base('aafffff1', _ALPHABET_16))
aafffff1
Sympy:
>>> import sympy as sy
>>> # Determine the length savings with lossless conversion
>>> consts = dict(hexbase=16, hexlen=256, baselen=27)
>>> symbols = sy.symbols('hexbase, hexlen, baselen, newlen')
>>> haexbase, hexlen, baselen, newlen = symbols
>>> eqn = sy.Eq(16 ** hexlen, baselen ** newlen)
>>> newlen_ans = sy.solve(eqn, newlen)[0].subs(consts).evalf()
>>> print('newlen_ans = %r' % (newlen_ans,))
>>> # for a 26 char base we can get 216
>>> print('Required length for lossless conversion len2 = %r' % (len2,))
>>> def info(base, len):
... bits = base ** len
... print('base = %r' % (base,))
... print('len = %r' % (len,))
... print('bits = %r' % (bits,))
>>> info(16, 256)
>>> info(27, 16)
>>> info(27, 64)
>>> info(27, 216)
"""
if base is _ALPHABET_16:
# already in hex, no conversion needed
return hexstr
baselen = len(base)
x = int(hexstr, 16) # first convert to base 16
if x == 0:
return '0'
sign = 1 if x > 0 else -1
x *= sign
digits = []
while x:
digits.append(base[x % baselen])
x //= baselen
if sign < 0:
digits.append('-')
digits.reverse()
newbase_str = ''.join(digits)
return newbase_str |
def _digest_hasher(hasher, hashlen, base):
""" counterpart to _update_hasher """
# Get a 128 character hex string
hex_text = hasher.hexdigest()
# Shorten length of string (by increasing base)
base_text = _convert_hexstr_base(hex_text, base)
# Truncate
text = base_text[:hashlen]
return text |
def hash_data(data, hasher=NoParam, base=NoParam, types=False,
hashlen=NoParam, convert=False):
"""
Get a unique hash depending on the state of the data.
Args:
data (object):
Any sort of loosely organized data
hasher (str or HASHER):
Hash algorithm from hashlib, defaults to `sha512`.
base (str or List[str]):
Shorthand key or a list of symbols. Valid keys are: 'abc', 'hex',
and 'dec'. Defaults to 'hex'.
types (bool):
If True data types are included in the hash, otherwise only the raw
data is hashed. Defaults to False.
hashlen (int):
Maximum number of symbols in the returned hash. If not specified,
all are returned. DEPRECATED. Use slice syntax instead.
convert (bool, optional, default=True):
if True, try and convert the data to json an the json is hashed
instead. This can improve runtime in some instances, however the
hash may differ from the case where convert=False.
Notes:
alphabet26 is a pretty nice base, I recommend it.
However we default to hex because it is standard.
This means the output of hashdata with base=sha1 will be the same as
the output of `sha1sum`.
Returns:
str: text - hash string
Example:
>>> import ubelt as ub
>>> print(ub.hash_data([1, 2, (3, '4')], convert=False))
60b758587f599663931057e6ebdf185a...
>>> print(ub.hash_data([1, 2, (3, '4')], base='abc', hasher='sha512')[:32])
hsrgqvfiuxvvhcdnypivhhthmrolkzej
"""
if convert and isinstance(data, six.string_types): # nocover
try:
data = json.dumps(data)
except TypeError as ex:
# import warnings
# warnings.warn('Unable to encode input as json due to: {!r}'.format(ex))
pass
base = _rectify_base(base)
hashlen = _rectify_hashlen(hashlen)
hasher = _rectify_hasher(hasher)()
# Feed the data into the hasher
_update_hasher(hasher, data, types=types)
# Get the hashed representation
text = _digest_hasher(hasher, hashlen, base)
return text |
def hash_file(fpath, blocksize=65536, stride=1, hasher=NoParam,
hashlen=NoParam, base=NoParam):
"""
Hashes the data in a file on disk.
Args:
fpath (PathLike): file path string
blocksize (int): 2 ** 16. Affects speed of reading file
stride (int): strides > 1 skip data to hash, useful for faster
hashing, but less accurate, also makes hash dependant on
blocksize.
hasher (HASH): hash algorithm from hashlib, defaults to `sha512`.
hashlen (int): maximum number of symbols in the returned hash. If
not specified, all are returned.
base (list, str): list of symbols or shorthand key. Valid keys are
'abc', 'hex', and 'dec'. Defaults to 'hex'.
Notes:
For better hashes keep stride = 1
For faster hashes set stride > 1
blocksize matters when stride > 1
References:
http://stackoverflow.com/questions/3431825/md5-checksum-of-a-file
http://stackoverflow.com/questions/5001893/when-to-use-sha-1-vs-sha-2
Example:
>>> import ubelt as ub
>>> from os.path import join
>>> fpath = join(ub.ensure_app_cache_dir('ubelt'), 'tmp.txt')
>>> ub.writeto(fpath, 'foobar')
>>> print(ub.hash_file(fpath, hasher='sha1', base='hex'))
8843d7f92416211de9ebb963ff4ce28125932878
Example:
>>> import ubelt as ub
>>> from os.path import join
>>> fpath = ub.touch(join(ub.ensure_app_cache_dir('ubelt'), 'empty_file'))
>>> # Test that the output is the same as sha1sum
>>> if ub.find_exe('sha1sum'):
>>> want = ub.cmd(['sha1sum', fpath], verbose=2)['out'].split(' ')[0]
>>> got = ub.hash_file(fpath, hasher='sha1')
>>> print('want = {!r}'.format(want))
>>> print('got = {!r}'.format(got))
>>> assert want.endswith(got)
>>> # Do the same for sha512 sum and md5sum
>>> if ub.find_exe('sha512sum'):
>>> want = ub.cmd(['sha512sum', fpath], verbose=2)['out'].split(' ')[0]
>>> got = ub.hash_file(fpath, hasher='sha512')
>>> print('want = {!r}'.format(want))
>>> print('got = {!r}'.format(got))
>>> assert want.endswith(got)
>>> if ub.find_exe('md5sum'):
>>> want = ub.cmd(['md5sum', fpath], verbose=2)['out'].split(' ')[0]
>>> got = ub.hash_file(fpath, hasher='md5')
>>> print('want = {!r}'.format(want))
>>> print('got = {!r}'.format(got))
>>> assert want.endswith(got)
"""
base = _rectify_base(base)
hashlen = _rectify_hashlen(hashlen)
hasher = _rectify_hasher(hasher)()
with open(fpath, 'rb') as file:
buf = file.read(blocksize)
if stride > 1:
# skip blocks when stride is greater than 1
while len(buf) > 0:
hasher.update(buf)
file.seek(blocksize * (stride - 1), 1)
buf = file.read(blocksize)
else:
# otherwise hash the entire file
while len(buf) > 0:
hasher.update(buf)
buf = file.read(blocksize)
# Get the hashed representation
text = _digest_hasher(hasher, hashlen, base)
return text |
def register(self, hash_types):
"""
Registers a function to generate a hash for data of the appropriate
types. This can be used to register custom classes. Internally this is
used to define how to hash non-builtin objects like ndarrays and uuids.
The registered function should return a tuple of bytes. First a small
prefix hinting at the data type, and second the raw bytes that can be
hashed.
Args:
hash_types (class or tuple of classes):
Returns:
func: closure to be used as the decorator
Example:
>>> # xdoctest: +SKIP
>>> # Skip this doctest because we dont want tests to modify
>>> # the global state.
>>> import ubelt as ub
>>> import pytest
>>> class MyType(object):
... def __init__(self, id):
... self.id = id
>>> data = MyType(1)
>>> # Custom types wont work with ub.hash_data by default
>>> with pytest.raises(TypeError):
... ub.hash_data(data)
>>> # You can register your functions with ubelt's internal
>>> # hashable_extension registery.
>>> @ub.util_hash._HASHABLE_EXTENSIONS.register(MyType)
>>> def hash_my_type(data):
... return b'mytype', six.b(ub.hash_data(data.id))
>>> # TODO: allow hash_data to take an new instance of
>>> # HashableExtensions, so we dont have to modify the global
>>> # ubelt state when we run tests.
>>> my_instance = MyType(1)
>>> ub.hash_data(my_instance)
"""
# ensure iterable
if not isinstance(hash_types, (list, tuple)):
hash_types = [hash_types]
def _decor_closure(hash_func):
for hash_type in hash_types:
key = (hash_type.__module__, hash_type.__name__)
self.keyed_extensions[key] = (hash_type, hash_func)
return hash_func
return _decor_closure |
def lookup(self, data):
"""
Returns an appropriate function to hash `data` if one has been
registered.
Raises:
TypeError : if data has no registered hash methods
Example:
>>> import ubelt as ub
>>> import pytest
>>> if not ub.modname_to_modpath('numpy'):
... raise pytest.skip('numpy is optional')
>>> self = HashableExtensions()
>>> self._register_numpy_extensions()
>>> self._register_builtin_class_extensions()
>>> import numpy as np
>>> data = np.array([1, 2, 3])
>>> self.lookup(data[0])
>>> class Foo(object):
>>> def __init__(f):
>>> f.attr = 1
>>> data = Foo()
>>> assert pytest.raises(TypeError, self.lookup, data)
>>> # If ub.hash_data doesnt support your object,
>>> # then you can register it.
>>> @self.register(Foo)
>>> def _hashfoo(data):
>>> return b'FOO', data.attr
>>> func = self.lookup(data)
>>> assert func(data)[1] == 1
>>> data = uuid.uuid4()
>>> self.lookup(data)
"""
# Maybe try using functools.singledispatch instead?
# First try O(1) lookup
query_hash_type = data.__class__
key = (query_hash_type.__module__, query_hash_type.__name__)
try:
hash_type, hash_func = self.keyed_extensions[key]
except KeyError:
raise TypeError('No registered hash func for hashable type=%r' % (
query_hash_type))
return hash_func |
def _register_numpy_extensions(self):
"""
Numpy extensions are builtin
"""
# system checks
import numpy as np
numpy_floating_types = (np.float16, np.float32, np.float64)
if hasattr(np, 'float128'): # nocover
numpy_floating_types = numpy_floating_types + (np.float128,)
@self.add_iterable_check
def is_object_ndarray(data):
# ndarrays of objects cannot be hashed directly.
return isinstance(data, np.ndarray) and data.dtype.kind == 'O'
@self.register(np.ndarray)
def hash_numpy_array(data):
"""
Example:
>>> import ubelt as ub
>>> if not ub.modname_to_modpath('numpy'):
... raise pytest.skip()
>>> import numpy as np
>>> data_f32 = np.zeros((3, 3, 3), dtype=np.float64)
>>> data_i64 = np.zeros((3, 3, 3), dtype=np.int64)
>>> data_i32 = np.zeros((3, 3, 3), dtype=np.int32)
>>> hash_f64 = _hashable_sequence(data_f32, types=True)
>>> hash_i64 = _hashable_sequence(data_i64, types=True)
>>> hash_i32 = _hashable_sequence(data_i64, types=True)
>>> assert hash_i64 != hash_f64
>>> assert hash_i64 != hash_i32
"""
if data.dtype.kind == 'O':
msg = 'directly hashing ndarrays with dtype=object is unstable'
raise TypeError(msg)
else:
# tobytes() views the array in 1D (via ravel())
# encode the shape as well
header = b''.join(_hashable_sequence((len(data.shape), data.shape)))
dtype = b''.join(_hashable_sequence(data.dtype.descr))
hashable = header + dtype + data.tobytes()
prefix = b'NDARR'
return prefix, hashable
@self.register((np.int64, np.int32, np.int16, np.int8) +
(np.uint64, np.uint32, np.uint16, np.uint8))
def _hash_numpy_int(data):
return _convert_to_hashable(int(data))
@self.register(numpy_floating_types)
def _hash_numpy_float(data):
return _convert_to_hashable(float(data))
@self.register(np.random.RandomState)
def _hash_numpy_random_state(data):
"""
Example:
>>> import ubelt as ub
>>> if not ub.modname_to_modpath('numpy'):
... raise pytest.skip()
>>> import numpy as np
>>> rng = np.random.RandomState(0)
>>> _hashable_sequence(rng, types=True)
"""
hashable = b''.join(_hashable_sequence(data.get_state()))
prefix = b'RNG'
return prefix, hashable |
def _register_builtin_class_extensions(self):
"""
Register hashing extensions for a selection of classes included in
python stdlib.
Example:
>>> data = uuid.UUID('7e9d206b-dc02-4240-8bdb-fffe858121d0')
>>> print(hash_data(data, base='abc', hasher='sha512', types=True)[0:8])
cryarepd
>>> data = OrderedDict([('a', 1), ('b', 2), ('c', [1, 2, 3]),
>>> (4, OrderedDict())])
>>> print(hash_data(data, base='abc', hasher='sha512', types=True)[0:8])
qjspicvv
gpxtclct
"""
@self.register(uuid.UUID)
def _hash_uuid(data):
hashable = data.bytes
prefix = b'UUID'
return prefix, hashable
@self.register(OrderedDict)
def _hash_ordered_dict(data):
"""
Note, we should not be hashing dicts because they are unordered
"""
hashable = b''.join(_hashable_sequence(list(data.items())))
prefix = b'ODICT'
return prefix, hashable |
def _textio_iterlines(stream):
"""
Iterates over lines in a TextIO stream until an EOF is encountered.
This is the iterator version of stream.readlines()
"""
line = stream.readline()
while line != '':
yield line
line = stream.readline() |
def _proc_async_iter_stream(proc, stream, buffersize=1):
"""
Reads output from a process in a separate thread
"""
from six.moves import queue
from threading import Thread
def enqueue_output(proc, stream, stream_queue):
while proc.poll() is None:
line = stream.readline()
# print('ENQUEUE LIVE {!r} {!r}'.format(stream, line))
stream_queue.put(line)
for line in _textio_iterlines(stream):
# print('ENQUEUE FINAL {!r} {!r}'.format(stream, line))
stream_queue.put(line)
# print("STREAM IS DONE {!r}".format(stream))
stream_queue.put(None) # signal that the stream is finished
# stream.close()
stream_queue = queue.Queue(maxsize=buffersize)
_thread = Thread(target=enqueue_output, args=(proc, stream, stream_queue))
_thread.daemon = True # thread dies with the program
_thread.start()
return stream_queue |
def _proc_iteroutput_thread(proc):
"""
Iterates over output from a process line by line
Note:
WARNING. Current implementation might have bugs with other threads.
This behavior was seen when using earlier versions of tqdm. I'm not
sure if this was our bug or tqdm's. Newer versions of tqdm fix this,
but I cannot guarantee that there isn't an issue on our end.
Yields:
Tuple[str, str]: oline, eline: stdout and stderr line
References:
https://stackoverflow.com/questions/375427/non-blocking-read-subproc
"""
from six.moves import queue
# Create threads that read stdout / stderr and queue up the output
stdout_queue = _proc_async_iter_stream(proc, proc.stdout)
stderr_queue = _proc_async_iter_stream(proc, proc.stderr)
stdout_live = True
stderr_live = True
# read from the output asynchronously until
while stdout_live or stderr_live:
if stdout_live: # pragma: nobranch
try:
oline = stdout_queue.get_nowait()
stdout_live = oline is not None
except queue.Empty:
oline = None
if stderr_live:
try:
eline = stderr_queue.get_nowait()
stderr_live = eline is not None
except queue.Empty:
eline = None
if oline is not None or eline is not None:
yield oline, eline |
def _proc_iteroutput_select(proc):
"""
Iterates over output from a process line by line
UNIX only. Use `_proc_iteroutput_thread` instead for a cross platform
solution based on threads.
Yields:
Tuple[str, str]: oline, eline: stdout and stderr line
"""
from six.moves import zip_longest
# Read output while the external program is running
while proc.poll() is None:
reads = [proc.stdout.fileno(), proc.stderr.fileno()]
ret = select.select(reads, [], [])
oline = eline = None
for fd in ret[0]:
if fd == proc.stdout.fileno():
oline = proc.stdout.readline()
if fd == proc.stderr.fileno():
eline = proc.stderr.readline()
yield oline, eline
# Grab any remaining data in stdout and stderr after the process finishes
oline_iter = _textio_iterlines(proc.stdout)
eline_iter = _textio_iterlines(proc.stderr)
for oline, eline in zip_longest(oline_iter, eline_iter):
yield oline, eline |
def _tee_output(make_proc, stdout=None, stderr=None, backend='auto'):
"""
Simultaneously reports and captures stdout and stderr from a process
subprocess must be created using (stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
"""
logged_out = []
logged_err = []
if backend == 'auto':
# backend = 'select' if POSIX else 'thread'
backend = 'thread'
if backend == 'select':
if not POSIX: # nocover
raise NotImplementedError('select is only available on posix')
# the select-based version is stable, but slow
_proc_iteroutput = _proc_iteroutput_select
elif backend == 'thread':
# the thread version is fast, but might run into issues.
_proc_iteroutput = _proc_iteroutput_thread
else:
raise ValueError('backend must be select, thread, or auto')
proc = make_proc()
for oline, eline in _proc_iteroutput(proc):
if oline:
if stdout: # pragma: nobranch
stdout.write(oline)
stdout.flush()
logged_out.append(oline)
if eline:
if stderr: # pragma: nobranch
stderr.write(eline)
stderr.flush()
logged_err.append(eline)
return proc, logged_out, logged_err |
def cmd(command, shell=False, detach=False, verbose=0, tee=None, cwd=None,
env=None, tee_backend='auto', verbout=None, **kwargs):
"""
Executes a command in a subprocess.
The advantage of this wrapper around subprocess is that
(1) you control if the subprocess prints to stdout,
(2) the text written to stdout and stderr is returned for parsing,
(3) cross platform behavior that lets you specify the command as a string
or tuple regardless of whether or not shell=True.
(4) ability to detach, return the process object and allow the process to
run in the background (eventually we may return a Future object instead).
Args:
command (str or Sequence): bash-like command string or tuple of
executable and args
shell (bool): if True, process is run in shell, defaults to False.
detach (bool): if True, process is detached and run in background,
defaults to False.
verbose (int): verbosity mode. Can be 0, 1, 2, or 3. Defaults to 0.
tee (bool, optional): if True, simultaneously writes to stdout while
capturing output from the command. If not specified, defaults to
True if verbose > 0. If detech is True, then this argument is
ignored.
cwd (PathLike, optional): path to run command
env (str, optional): environment passed to Popen
tee_backend (str, optional): backend for tee output.
Valid choices are: "auto", "select" (POSIX only), and "thread".
**kwargs: only used to support deprecated arguments
Returns:
dict: info - information about command status.
if detach is False `info` contains captured standard out,
standard error, and the return code
if detach is False `info` contains a reference to the process.
Notes:
Inputs can either be text or tuple based. On UNIX we ensure conversion
to text if shell=True, and to tuple if shell=False. On windows, the
input is always text based. See [3] for a potential cross-platform
shlex solution for windows.
CommandLine:
python -m ubelt.util_cmd cmd
python -c "import ubelt as ub; ub.cmd('ping localhost -c 2', verbose=2)"
References:
[1] https://stackoverflow.com/questions/11495783/redirect-subprocess-stderr-to-stdout
[2] https://stackoverflow.com/questions/7729336/how-can-i-print-and-display-subprocess-stdout-and-stderr-output-without-distorti
[3] https://stackoverflow.com/questions/33560364/python-windows-parsing-command-lines-with-shlex
Example:
>>> info = cmd(('echo', 'simple cmdline interface'), verbose=1)
simple cmdline interface
>>> assert info['ret'] == 0
>>> assert info['out'].strip() == 'simple cmdline interface'
>>> assert info['err'].strip() == ''
Doctest:
>>> info = cmd('echo str noshell', verbose=0)
>>> assert info['out'].strip() == 'str noshell'
Doctest:
>>> # windows echo will output extra single quotes
>>> info = cmd(('echo', 'tuple noshell'), verbose=0)
>>> assert info['out'].strip().strip("'") == 'tuple noshell'
Doctest:
>>> # Note this command is formatted to work on win32 and unix
>>> info = cmd('echo str&&echo shell', verbose=0, shell=True)
>>> assert info['out'].strip() == 'str' + chr(10) + 'shell'
Doctest:
>>> info = cmd(('echo', 'tuple shell'), verbose=0, shell=True)
>>> assert info['out'].strip().strip("'") == 'tuple shell'
Doctest:
>>> import ubelt as ub
>>> from os.path import join, exists
>>> fpath1 = join(ub.get_app_cache_dir('ubelt'), 'cmdout1.txt')
>>> fpath2 = join(ub.get_app_cache_dir('ubelt'), 'cmdout2.txt')
>>> ub.delete(fpath1)
>>> ub.delete(fpath2)
>>> info1 = ub.cmd(('touch', fpath1), detach=True)
>>> info2 = ub.cmd('echo writing2 > ' + fpath2, shell=True, detach=True)
>>> while not exists(fpath1):
... pass
>>> while not exists(fpath2):
... pass
>>> assert ub.readfrom(fpath1) == ''
>>> assert ub.readfrom(fpath2).strip() == 'writing2'
>>> info1['proc'].wait()
>>> info2['proc'].wait()
"""
if kwargs: # nocover
if 'verbout' in kwargs:
warnings.warn(
'`verbout` is deprecated and will be removed. '
'Use `tee` instead', DeprecationWarning)
tee = kwargs.pop('verbout')
if 'detatch' in kwargs:
warnings.warn(
'`detatch` is deprecated (misspelled) and will be removed. '
'Use `detach` instead', DeprecationWarning)
detach = kwargs.pop('detatch')
if kwargs:
raise ValueError('Unknown kwargs: {}'.format(list(kwargs.keys)))
# Determine if command is specified as text or a tuple
if isinstance(command, six.string_types):
command_text = command
command_tup = None
else:
import pipes
command_tup = command
command_text = ' '.join(list(map(pipes.quote, command_tup)))
if shell or sys.platform.startswith('win32'):
# When shell=True, args is sent to the shell (e.g. bin/sh) as text
args = command_text
else:
# When shell=False, args is a list of executable and arguments
if command_tup is None:
# parse this out of the string
# NOTE: perhaps use the solution from [3] here?
import shlex
command_tup = shlex.split(command_text)
# command_tup = shlex.split(command_text, posix=not WIN32)
args = command_tup
if tee is None:
tee = verbose > 0
if verbose > 1:
import os
import platform
import getpass
from ubelt import util_path
if verbose > 2:
try:
print('┌─── START CMD ───')
except Exception: # nocover
print('+=== START CMD ===')
cwd_ = os.getcwd() if cwd is None else cwd
compname = platform.node()
username = getpass.getuser()
cwd_ = util_path.compressuser(cwd_)
ps1 = '[ubelt.cmd] {}@{}:{}$ '.format(username, compname, cwd_)
print(ps1 + command_text)
# Create a new process to execute the command
def make_proc():
# delay the creation of the process until we validate all args
import subprocess
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=shell,
universal_newlines=True, cwd=cwd, env=env)
return proc
if detach:
info = {'proc': make_proc(), 'command': command_text}
if verbose > 0: # nocover
print('...detaching')
else:
if tee:
# we need to tee output and start threads if tee is False?
stdout, stderr = sys.stdout, sys.stderr
proc, logged_out, logged_err = _tee_output(make_proc, stdout, stderr,
backend=tee_backend)
try:
out = ''.join(logged_out)
except UnicodeDecodeError: # nocover
out = '\n'.join(_.decode('utf-8') for _ in logged_out)
try:
err = ''.join(logged_err)
except UnicodeDecodeError: # nocover
err = '\n'.join(_.decode('utf-8') for _ in logged_err)
(out_, err_) = proc.communicate()
else:
proc = make_proc()
(out, err) = proc.communicate()
# calling wait means that the process will terminate and it is safe to
# return a reference to the process object.
ret = proc.wait()
info = {
'out': out,
'err': err,
'ret': ret,
'proc': proc,
'cwd': cwd,
'command': command_text
}
if verbose > 2:
# https://en.wikipedia.org/wiki/Box-drawing_character
try:
print('└─── END CMD ───')
except Exception: # nocover
print('L___ END CMD ___')
return info |
def timestamp(method='iso8601'):
"""
make an iso8601 timestamp
Args:
method (str): type of timestamp
Example:
>>> stamp = timestamp()
>>> print('stamp = {!r}'.format(stamp))
stamp = ...-...-...T...
"""
if method == 'iso8601':
# ISO 8601
# datetime.datetime.utcnow().isoformat()
# datetime.datetime.now().isoformat()
# utcnow
tz_hour = time.timezone // 3600
utc_offset = str(tz_hour) if tz_hour < 0 else '+' + str(tz_hour)
stamp = time.strftime('%Y-%m-%dT%H%M%S') + utc_offset
return stamp
else:
raise ValueError('only iso8601 is accepted for now') |
def benchmark_hash_data():
"""
CommandLine:
python ~/code/ubelt/dev/bench_hash.py --convert=True --show
python ~/code/ubelt/dev/bench_hash.py --convert=False --show
"""
import ubelt as ub
#ITEM = 'JUST A STRING' * 100
ITEM = [0, 1, 'a', 'b', ['JUST A STRING'] * 4]
HASHERS = ['sha1', 'sha512', 'xxh32', 'xxh64']
scales = list(range(5, 13))
results = ub.AutoDict()
# Use json is faster or at least as fast it most cases
# xxhash is also significantly faster than sha512
convert = ub.argval('--convert', default='True').lower() == 'True'
print('convert = {!r}'.format(convert))
ti = ub.Timerit(9, bestof=3, verbose=1, unit='ms')
for s in ub.ProgIter(scales, desc='benchmark', verbose=3):
N = 2 ** s
print(' --- s={s}, N={N} --- '.format(s=s, N=N))
data = [ITEM] * N
for hasher in HASHERS:
for timer in ti.reset(hasher):
ub.hash_data(data, hasher=hasher, convert=convert)
results[hasher].update({N: ti.mean()})
col = {h: results[h][N] for h in HASHERS}
sortx = ub.argsort(col)
ranking = ub.dict_subset(col, sortx)
print('walltime: ' + ub.repr2(ranking, precision=9, nl=0))
best = next(iter(ranking))
#pairs = list(ub.iter_window( 2))
pairs = [(k, best) for k in ranking]
ratios = [ranking[k1] / ranking[k2] for k1, k2 in pairs]
nicekeys = ['{}/{}'.format(k1, k2) for k1, k2 in pairs]
relratios = ub.odict(zip(nicekeys, ratios))
print('speedup: ' + ub.repr2(relratios, precision=4, nl=0))
# xdoc +REQUIRES(--show)
# import pytest
# pytest.skip()
import pandas as pd
df = pd.DataFrame.from_dict(results)
df.columns.name = 'hasher'
df.index.name = 'N'
ratios = df.copy().drop(columns=df.columns)
for k1, k2 in [('sha512', 'xxh32'), ('sha1', 'xxh32'), ('xxh64', 'xxh32')]:
ratios['{}/{}'.format(k1, k2)] = df[k1] / df[k2]
print()
print('Seconds per iteration')
print(df.to_string(float_format='%.9f'))
print()
print('Ratios of seconds')
print(ratios.to_string(float_format='%.2f'))
print()
print('Average Ratio (over all N)')
print('convert = {!r}'.format(convert))
print(ratios.mean().sort_values())
if ub.argflag('--show'):
import netharn.util as kwel
kwel.autompl()
xdata = sorted(ub.peek(results.values()).keys())
ydata = ub.map_vals(lambda d: [d[x] for x in xdata], results)
kwel.multi_plot(xdata, ydata, xlabel='N', ylabel='seconds', title='convert = {}'.format(convert))
kwel.show_if_requested() |
def import_module_from_path(modpath, index=-1):
"""
Imports a module via its path
Args:
modpath (PathLike): path to the module on disk or within a zipfile.
Returns:
module: the imported module
References:
https://stackoverflow.com/questions/67631/import-module-given-path
Notes:
If the module is part of a package, the package will be imported first.
These modules may cause problems when reloading via IPython magic
This can import a module from within a zipfile. To do this modpath
should specify the path to the zipfile and the path to the module
within that zipfile separated by a colon or pathsep.
E.g. `/path/to/archive.zip:mymodule.py`
Warning:
It is best to use this with paths that will not conflict with
previously existing modules.
If the modpath conflicts with a previously existing module name. And
the target module does imports of its own relative to this conflicting
path. In this case, the module that was loaded first will win.
For example if you try to import '/foo/bar/pkg/mod.py' from the folder
structure:
- foo/
+- bar/
+- pkg/
+ __init__.py
|- mod.py
|- helper.py
If there exists another module named `pkg` already in sys.modules
and mod.py does something like `from . import helper`, Python will
assume helper belongs to the `pkg` module already in sys.modules.
This can cause a NameError or worse --- a incorrect helper module.
Example:
>>> import xdoctest
>>> modpath = xdoctest.__file__
>>> module = import_module_from_path(modpath)
>>> assert module is xdoctest
Example:
>>> # Test importing a module from within a zipfile
>>> import zipfile
>>> from xdoctest import utils
>>> from os.path import join, expanduser
>>> dpath = expanduser('~/.cache/xdoctest')
>>> dpath = utils.ensuredir(dpath)
>>> #dpath = utils.TempDir().ensure()
>>> # Write to an external module named bar
>>> external_modpath = join(dpath, 'bar.py')
>>> open(external_modpath, 'w').write('testvar = 1')
>>> internal = 'folder/bar.py'
>>> # Move the external bar module into a zipfile
>>> zippath = join(dpath, 'myzip.zip')
>>> with zipfile.ZipFile(zippath, 'w') as myzip:
>>> myzip.write(external_modpath, internal)
>>> # Import the bar module from within the zipfile
>>> modpath = zippath + ':' + internal
>>> modpath = zippath + os.path.sep + internal
>>> module = import_module_from_path(modpath)
>>> assert module.__name__ == os.path.normpath('folder/bar')
>>> assert module.testvar == 1
Doctest:
>>> import pytest
>>> with pytest.raises(IOError):
>>> import_module_from_path('does-not-exist')
>>> with pytest.raises(IOError):
>>> import_module_from_path('does-not-exist.zip/')
"""
import os
if not os.path.exists(modpath):
import re
import zipimport
# We allow (if not prefer or force) the colon to be a path.sep in order
# to agree with the mod.__name__ attribute that will be produced
# zip followed by colon or slash
pat = '(.zip[' + re.escape(os.path.sep) + '/:])'
parts = re.split(pat, modpath, flags=re.IGNORECASE)
if len(parts) > 2:
archivepath = ''.join(parts[:-1])[:-1]
internal = parts[-1]
modname = os.path.splitext(internal)[0]
modname = os.path.normpath(modname)
if os.path.exists(archivepath):
zimp_file = zipimport.zipimporter(archivepath)
module = zimp_file.load_module(modname)
return module
raise IOError('modpath={} does not exist'.format(modpath))
else:
# the importlib version doesnt work in pytest
module = _custom_import_modpath(modpath)
# TODO: use this implementation once pytest fixes importlib
# module = _pkgutil_import_modpath(modpath)
return module |
def import_module_from_name(modname):
"""
Imports a module from its string name (__name__)
Args:
modname (str): module name
Returns:
module: module
Example:
>>> # test with modules that wont be imported in normal circumstances
>>> # todo write a test where we gaurentee this
>>> modname_list = [
>>> 'pickletools',
>>> 'lib2to3.fixes.fix_apply',
>>> ]
>>> #assert not any(m in sys.modules for m in modname_list)
>>> modules = [import_module_from_name(modname) for modname in modname_list]
>>> assert [m.__name__ for m in modules] == modname_list
>>> assert all(m in sys.modules for m in modname_list)
"""
if True:
# See if this fixes the Docker issue we saw but were unable to
# reproduce on another environment. Either way its better to use the
# standard importlib implementation than the one I wrote a long time
# ago.
import importlib
module = importlib.import_module(modname)
else:
# The __import__ statment is weird
if '.' in modname:
fromlist = modname.split('.')[-1]
fromlist_ = list(map(str, fromlist)) # needs to be ascii for python2.7
module = __import__(modname, {}, {}, fromlist_, 0)
else:
module = __import__(modname, {}, {}, [], 0)
return module |
def _extension_module_tags():
"""
Returns valid tags an extension module might have
"""
import sysconfig
tags = []
if six.PY2:
# see also 'SHLIB_EXT'
multiarch = sysconfig.get_config_var('MULTIARCH')
if multiarch is not None:
tags.append(multiarch)
else:
# handle PEP 3149 -- ABI version tagged .so files
# ABI = application binary interface
tags.append(sysconfig.get_config_var('SOABI'))
tags.append('abi3') # not sure why this one is valid but it is
tags = [t for t in tags if t]
return tags |
def _platform_pylib_exts(): # nocover
"""
Returns .so, .pyd, or .dylib depending on linux, win or mac.
On python3 return the previous with and without abi (e.g.
.cpython-35m-x86_64-linux-gnu) flags. On python2 returns with
and without multiarch.
"""
import sysconfig
valid_exts = []
if six.PY2:
# see also 'SHLIB_EXT'
base_ext = '.' + sysconfig.get_config_var('SO').split('.')[-1]
else:
# return with and without API flags
# handle PEP 3149 -- ABI version tagged .so files
base_ext = '.' + sysconfig.get_config_var('EXT_SUFFIX').split('.')[-1]
for tag in _extension_module_tags():
valid_exts.append('.' + tag + base_ext)
valid_exts.append(base_ext)
return tuple(valid_exts) |
def _syspath_modname_to_modpath(modname, sys_path=None, exclude=None):
"""
syspath version of modname_to_modpath
Args:
modname (str): name of module to find
sys_path (List[PathLike], default=None):
if specified overrides `sys.path`
exclude (List[PathLike], default=None):
list of directory paths. if specified prevents these directories
from being searched.
Notes:
This is much slower than the pkgutil mechanisms.
CommandLine:
python -m xdoctest.static_analysis _syspath_modname_to_modpath
Example:
>>> print(_syspath_modname_to_modpath('xdoctest.static_analysis'))
...static_analysis.py
>>> print(_syspath_modname_to_modpath('xdoctest'))
...xdoctest
>>> print(_syspath_modname_to_modpath('_ctypes'))
..._ctypes...
>>> assert _syspath_modname_to_modpath('xdoctest', sys_path=[]) is None
>>> assert _syspath_modname_to_modpath('xdoctest.static_analysis', sys_path=[]) is None
>>> assert _syspath_modname_to_modpath('_ctypes', sys_path=[]) is None
>>> assert _syspath_modname_to_modpath('this', sys_path=[]) is None
Example:
>>> # test what happens when the module is not visible in the path
>>> modname = 'xdoctest.static_analysis'
>>> modpath = _syspath_modname_to_modpath(modname)
>>> exclude = [split_modpath(modpath)[0]]
>>> found = _syspath_modname_to_modpath(modname, exclude=exclude)
>>> # this only works if installed in dev mode, pypi fails
>>> assert found is None, 'should not have found {}'.format(found)
"""
def _isvalid(modpath, base):
# every directory up to the module, should have an init
subdir = dirname(modpath)
while subdir and subdir != base:
if not exists(join(subdir, '__init__.py')):
return False
subdir = dirname(subdir)
return True
_fname_we = modname.replace('.', os.path.sep)
candidate_fnames = [
_fname_we + '.py',
# _fname_we + '.pyc',
# _fname_we + '.pyo',
]
# Add extension library suffixes
candidate_fnames += [_fname_we + ext for ext in _platform_pylib_exts()]
if sys_path is None:
sys_path = sys.path
# the empty string in sys.path indicates cwd. Change this to a '.'
candidate_dpaths = ['.' if p == '' else p for p in sys_path]
if exclude:
def normalize(p):
if sys.platform.startswith('win32'): # nocover
return realpath(p).lower()
else:
return realpath(p)
# Keep only the paths not in exclude
real_exclude = {normalize(p) for p in exclude}
candidate_dpaths = [p for p in candidate_dpaths
if normalize(p) not in real_exclude]
for dpath in candidate_dpaths:
# Check for directory-based modules (has presidence over files)
modpath = join(dpath, _fname_we)
if exists(modpath):
if isfile(join(modpath, '__init__.py')):
if _isvalid(modpath, dpath):
return modpath
# If that fails, check for file-based modules
for fname in candidate_fnames:
modpath = join(dpath, fname)
if isfile(modpath):
if _isvalid(modpath, dpath):
return modpath |
def modname_to_modpath(modname, hide_init=True, hide_main=False, sys_path=None):
"""
Finds the path to a python module from its name.
Determines the path to a python module without directly import it
Converts the name of a module (__name__) to the path (__file__) where it is
located without importing the module. Returns None if the module does not
exist.
Args:
modname (str): module filepath
hide_init (bool): if False, __init__.py will be returned for packages
hide_main (bool): if False, and hide_init is True, __main__.py will be
returned for packages, if it exists.
sys_path (list): if specified overrides `sys.path` (default None)
Returns:
str: modpath - path to the module, or None if it doesn't exist
CommandLine:
python -m xdoctest.static_analysis modname_to_modpath:0
pytest /home/joncrall/code/xdoctest/xdoctest/static_analysis.py::modname_to_modpath:0
Example:
>>> modname = 'xdoctest.__main__'
>>> modpath = modname_to_modpath(modname, hide_main=False)
>>> assert modpath.endswith('__main__.py')
>>> modname = 'xdoctest'
>>> modpath = modname_to_modpath(modname, hide_init=False)
>>> assert modpath.endswith('__init__.py')
>>> modpath = basename(modname_to_modpath('_ctypes'))
>>> assert 'ctypes' in modpath
"""
modpath = _syspath_modname_to_modpath(modname, sys_path)
if modpath is None:
return None
modpath = normalize_modpath(modpath, hide_init=hide_init,
hide_main=hide_main)
return modpath |
def normalize_modpath(modpath, hide_init=True, hide_main=False):
"""
Normalizes __init__ and __main__ paths.
Notes:
Adds __init__ if reasonable, but only removes __main__ by default
Args:
hide_init (bool): if True, always return package modules
as __init__.py files otherwise always return the dpath.
hide_init (bool): if True, always strip away main files otherwise
ignore __main__.py.
CommandLine:
xdoctest -m xdoctest.static_analysis normalize_modpath
Example:
>>> import xdoctest.static_analysis as static
>>> modpath = static.__file__
>>> assert static.normalize_modpath(modpath) == modpath.replace('.pyc', '.py')
>>> dpath = dirname(modpath)
>>> res0 = static.normalize_modpath(dpath, hide_init=0, hide_main=0)
>>> res1 = static.normalize_modpath(dpath, hide_init=0, hide_main=1)
>>> res2 = static.normalize_modpath(dpath, hide_init=1, hide_main=0)
>>> res3 = static.normalize_modpath(dpath, hide_init=1, hide_main=1)
>>> assert res0.endswith('__init__.py')
>>> assert res1.endswith('__init__.py')
>>> assert not res2.endswith('.py')
>>> assert not res3.endswith('.py')
"""
if six.PY2:
if modpath.endswith('.pyc'):
modpath = modpath[:-1]
if hide_init:
if basename(modpath) == '__init__.py':
modpath = dirname(modpath)
hide_main = True
else:
# add in init, if reasonable
modpath_with_init = join(modpath, '__init__.py')
if exists(modpath_with_init):
modpath = modpath_with_init
if hide_main:
# We can remove main, but dont add it
if basename(modpath) == '__main__.py':
# corner case where main might just be a module name not in a pkg
parallel_init = join(dirname(modpath), '__init__.py')
if exists(parallel_init):
modpath = dirname(modpath)
return modpath |
def modpath_to_modname(modpath, hide_init=True, hide_main=False, check=True,
relativeto=None):
"""
Determines importable name from file path
Converts the path to a module (__file__) to the importable python name
(__name__) without importing the module.
The filename is converted to a module name, and parent directories are
recursively included until a directory without an __init__.py file is
encountered.
Args:
modpath (str): module filepath
hide_init (bool): removes the __init__ suffix (default True)
hide_main (bool): removes the __main__ suffix (default False)
check (bool): if False, does not raise an error if modpath is a dir
and does not contain an __init__ file.
relativeto (str, optional): if specified, all checks are ignored and
this is considered the path to the root module.
Returns:
str: modname
Raises:
ValueError: if check is True and the path does not exist
CommandLine:
xdoctest -m xdoctest.static_analysis modpath_to_modname
Example:
>>> from xdoctest import static_analysis
>>> modpath = static_analysis.__file__.replace('.pyc', '.py')
>>> modpath = modpath.replace('.pyc', '.py')
>>> modname = modpath_to_modname(modpath)
>>> assert modname == 'xdoctest.static_analysis'
Example:
>>> import xdoctest
>>> assert modpath_to_modname(xdoctest.__file__.replace('.pyc', '.py')) == 'xdoctest'
>>> assert modpath_to_modname(dirname(xdoctest.__file__.replace('.pyc', '.py'))) == 'xdoctest'
Example:
>>> modpath = modname_to_modpath('_ctypes')
>>> modname = modpath_to_modname(modpath)
>>> assert modname == '_ctypes'
"""
if check and relativeto is None:
if not exists(modpath):
raise ValueError('modpath={} does not exist'.format(modpath))
modpath_ = abspath(expanduser(modpath))
modpath_ = normalize_modpath(modpath_, hide_init=hide_init,
hide_main=hide_main)
if relativeto:
dpath = dirname(abspath(expanduser(relativeto)))
rel_modpath = relpath(modpath_, dpath)
else:
dpath, rel_modpath = split_modpath(modpath_, check=check)
modname = splitext(rel_modpath)[0]
if '.' in modname:
modname, abi_tag = modname.split('.')
modname = modname.replace('/', '.')
modname = modname.replace('\\', '.')
return modname |
def split_modpath(modpath, check=True):
"""
Splits the modpath into the dir that must be in PYTHONPATH for the module
to be imported and the modulepath relative to this directory.
Args:
modpath (str): module filepath
check (bool): if False, does not raise an error if modpath is a
directory and does not contain an `__init__.py` file.
Returns:
tuple: (directory, rel_modpath)
Raises:
ValueError: if modpath does not exist or is not a package
Example:
>>> from xdoctest import static_analysis
>>> modpath = static_analysis.__file__.replace('.pyc', '.py')
>>> modpath = abspath(modpath)
>>> dpath, rel_modpath = split_modpath(modpath)
>>> recon = join(dpath, rel_modpath)
>>> assert recon == modpath
>>> assert rel_modpath == join('xdoctest', 'static_analysis.py')
"""
if six.PY2:
if modpath.endswith('.pyc'):
modpath = modpath[:-1]
modpath_ = abspath(expanduser(modpath))
if check:
if not exists(modpath_):
if not exists(modpath):
raise ValueError('modpath={} does not exist'.format(modpath))
raise ValueError('modpath={} is not a module'.format(modpath))
if isdir(modpath_) and not exists(join(modpath, '__init__.py')):
# dirs without inits are not modules
raise ValueError('modpath={} is not a module'.format(modpath))
full_dpath, fname_ext = split(modpath_)
_relmod_parts = [fname_ext]
# Recurse down directories until we are out of the package
dpath = full_dpath
while exists(join(dpath, '__init__.py')):
dpath, dname = split(dpath)
_relmod_parts.append(dname)
relmod_parts = _relmod_parts[::-1]
rel_modpath = os.path.sep.join(relmod_parts)
return dpath, rel_modpath |
def argval(key, default=util_const.NoParam, argv=None):
"""
Get the value of a keyword argument specified on the command line.
Values can be specified as `<key> <value>` or `<key>=<value>`
Args:
key (str or tuple): string or tuple of strings. Each key should be
prefixed with two hyphens (i.e. `--`)
default (Optional[object]): value to return if not specified
argv (Optional[list]): overrides `sys.argv` if specified
Returns:
str: value : the value specified after the key. It they key is
specified multiple times, then the first value is returned.
TODO:
- [ ] Can we handle the case where the value is a list of long paths?
- [ ] Should we default the first or last specified instance of the flag.
Example:
>>> import ubelt as ub
>>> argv = ['--ans', '42', '--quest=the grail', '--ans=6', '--bad']
>>> assert ub.argval('--spam', argv=argv) == ub.NoParam
>>> assert ub.argval('--quest', argv=argv) == 'the grail'
>>> assert ub.argval('--ans', argv=argv) == '42'
>>> assert ub.argval('--bad', argv=argv) == ub.NoParam
>>> assert ub.argval(('--bad', '--bar'), argv=argv) == ub.NoParam
Example:
>>> # Test fix for GH Issue #41
>>> import ubelt as ub
>>> argv = ['--path=/path/with/k=3']
>>> ub.argval('--path', argv=argv) == '/path/with/k=3'
"""
if argv is None: # nocover
argv = sys.argv
keys = [key] if isinstance(key, six.string_types) else key
n_max = len(argv) - 1
for argx, item in enumerate(argv):
for key_ in keys:
if item == key_:
if argx < n_max:
value = argv[argx + 1]
return value
elif item.startswith(key_ + '='):
value = '='.join(item.split('=')[1:])
return value
value = default
return value |
def argflag(key, argv=None):
"""
Determines if a key is specified on the command line
Args:
key (str or tuple): string or tuple of strings. Each key should be
prefixed with two hyphens (i.e. `--`)
argv (Optional[list]): overrides `sys.argv` if specified
Returns:
bool: flag : True if the key (or any of the keys) was specified
Example:
>>> import ubelt as ub
>>> argv = ['--spam', '--eggs', 'foo']
>>> assert ub.argflag('--eggs', argv=argv) is True
>>> assert ub.argflag('--ans', argv=argv) is False
>>> assert ub.argflag('foo', argv=argv) is True
>>> assert ub.argflag(('bar', '--spam'), argv=argv) is True
"""
if argv is None: # nocover
argv = sys.argv
keys = [key] if isinstance(key, six.string_types) else key
flag = any(k in argv for k in keys)
return flag |
def hzcat(args, sep=''):
"""
Horizontally concatenates strings preserving indentation
Concatenates a list of objects ensuring that the next item in the list is
all the way to the right of any previous items.
Args:
args (List[str]): strings to concatenate
sep (str): separator (defaults to '')
CommandLine:
python -m ubelt.util_str hzcat
Example1:
>>> import ubelt as ub
>>> B = ub.repr2([[1, 2], [3, 457]], nl=1, cbr=True, trailsep=False)
>>> C = ub.repr2([[5, 6], [7, 8]], nl=1, cbr=True, trailsep=False)
>>> args = ['A = ', B, ' * ', C]
>>> print(ub.hzcat(args))
A = [[1, 2], * [[5, 6],
[3, 457]] [7, 8]]
Example2:
>>> from ubelt.util_str import *
>>> import ubelt as ub
>>> import unicodedata
>>> aa = unicodedata.normalize('NFD', 'á') # a unicode char with len2
>>> B = ub.repr2([['θ', aa], [aa, aa, aa]], nl=1, si=True, cbr=True, trailsep=False)
>>> C = ub.repr2([[5, 6], [7, 'θ']], nl=1, si=True, cbr=True, trailsep=False)
>>> args = ['A', '=', B, '*', C]
>>> print(ub.hzcat(args, sep='|'))
A|=|[[θ, á], |*|[[5, 6],
| | [á, á, á]]| | [7, θ]]
"""
import unicodedata
if '\n' in sep or '\r' in sep:
raise ValueError('`sep` cannot contain newline characters')
# TODO: ensure unicode data works correctly for python2
args = [unicodedata.normalize('NFC', ensure_unicode(val)) for val in args]
arglines = [a.split('\n') for a in args]
height = max(map(len, arglines))
# Do vertical padding
arglines = [lines + [''] * (height - len(lines)) for lines in arglines]
# Initialize output
all_lines = ['' for _ in range(height)]
width = 0
n_args = len(args)
for sx, lines in enumerate(arglines):
# Concatenate the new string
for lx, line in enumerate(lines):
all_lines[lx] += line
# Find the new maximum horizontal width
width = max(width, max(map(len, all_lines)))
if sx < n_args - 1:
# Horizontal padding on all but last iter
for lx, line in list(enumerate(all_lines)):
residual = width - len(line)
all_lines[lx] = line + (' ' * residual) + sep
width += len(sep)
# Clean up trailing whitespace
all_lines = [line.rstrip(' ') for line in all_lines]
ret = '\n'.join(all_lines)
return ret |
def ensure_unicode(text):
r"""
Casts bytes into utf8 (mostly for python2 compatibility)
References:
http://stackoverflow.com/questions/12561063/extract-data-from-file
Example:
>>> from ubelt.util_str import *
>>> import codecs # NOQA
>>> assert ensure_unicode('my ünicôdé strįng') == 'my ünicôdé strįng'
>>> assert ensure_unicode('text1') == 'text1'
>>> assert ensure_unicode('text1'.encode('utf8')) == 'text1'
>>> assert ensure_unicode('text1'.encode('utf8')) == 'text1'
>>> assert (codecs.BOM_UTF8 + 'text»¿'.encode('utf8')).decode('utf8')
"""
if isinstance(text, six.text_type):
return text
elif isinstance(text, six.binary_type):
return text.decode('utf8')
else: # nocover
raise ValueError('unknown input type {!r}'.format(text)) |
def symlink(real_path, link_path, overwrite=False, verbose=0):
"""
Create a symbolic link.
This will work on linux or windows, however windows does have some corner
cases. For more details see notes in `ubelt._win32_links`.
Args:
path (PathLike): path to real file or directory
link_path (PathLike): path to desired location for symlink
overwrite (bool): overwrite existing symlinks.
This will not overwrite real files on systems with proper symlinks.
However, on older versions of windows junctions are
indistinguishable from real files, so we cannot make this
guarantee. (default = False)
verbose (int): verbosity level (default=0)
Returns:
PathLike: link path
CommandLine:
python -m ubelt.util_links symlink:0
Example:
>>> import ubelt as ub
>>> dpath = ub.ensure_app_cache_dir('ubelt', 'test_symlink0')
>>> real_path = join(dpath, 'real_file.txt')
>>> link_path = join(dpath, 'link_file.txt')
>>> [ub.delete(p) for p in [real_path, link_path]]
>>> ub.writeto(real_path, 'foo')
>>> result = symlink(real_path, link_path)
>>> assert ub.readfrom(result) == 'foo'
>>> [ub.delete(p) for p in [real_path, link_path]]
Example:
>>> import ubelt as ub
>>> from os.path import dirname
>>> dpath = ub.ensure_app_cache_dir('ubelt', 'test_symlink1')
>>> ub.delete(dpath)
>>> ub.ensuredir(dpath)
>>> _dirstats(dpath)
>>> real_dpath = ub.ensuredir((dpath, 'real_dpath'))
>>> link_dpath = ub.augpath(real_dpath, base='link_dpath')
>>> real_path = join(dpath, 'afile.txt')
>>> link_path = join(dpath, 'afile.txt')
>>> [ub.delete(p) for p in [real_path, link_path]]
>>> ub.writeto(real_path, 'foo')
>>> result = symlink(real_dpath, link_dpath)
>>> assert ub.readfrom(link_path) == 'foo', 'read should be same'
>>> ub.writeto(link_path, 'bar')
>>> _dirstats(dpath)
>>> assert ub.readfrom(link_path) == 'bar', 'very bad bar'
>>> assert ub.readfrom(real_path) == 'bar', 'changing link did not change real'
>>> ub.writeto(real_path, 'baz')
>>> _dirstats(dpath)
>>> assert ub.readfrom(real_path) == 'baz', 'very bad baz'
>>> assert ub.readfrom(link_path) == 'baz', 'changing real did not change link'
>>> ub.delete(link_dpath, verbose=1)
>>> _dirstats(dpath)
>>> assert not exists(link_dpath), 'link should not exist'
>>> assert exists(real_path), 'real path should exist'
>>> _dirstats(dpath)
>>> ub.delete(dpath, verbose=1)
>>> _dirstats(dpath)
>>> assert not exists(real_path)
"""
path = normpath(real_path)
link = normpath(link_path)
if not os.path.isabs(path):
# if path is not absolute it must be specified relative to link
if _can_symlink():
path = os.path.relpath(path, os.path.dirname(link))
else: # nocover
# On windows, we need to use absolute paths
path = os.path.abspath(path)
if verbose:
print('Symlink: {path} -> {link}'.format(path=path, link=link))
if islink(link):
if verbose:
print('... already exists')
pointed = _readlink(link)
if pointed == path:
if verbose > 1:
print('... and points to the right place')
return link
if verbose > 1:
if not exists(link):
print('... but it is broken and points somewhere else: {}'.format(pointed))
else:
print('... but it points somewhere else: {}'.format(pointed))
if overwrite:
util_io.delete(link, verbose=verbose > 1)
elif exists(link):
if _win32_links is None:
if verbose:
print('... already exists, but its a file. This will error.')
raise FileExistsError(
'cannot overwrite a physical path: "{}"'.format(path))
else: # nocover
if verbose:
print('... already exists, and is either a file or hard link. '
'Assuming it is a hard link. '
'On non-win32 systems this would error.')
if _win32_links is None:
os.symlink(path, link)
else: # nocover
_win32_links._symlink(path, link, overwrite=overwrite, verbose=verbose)
return link |
def _dirstats(dpath=None): # nocover
"""
Testing helper for printing directory information
(mostly for investigating windows weirdness)
CommandLine:
python -m ubelt.util_links _dirstats
"""
from ubelt import util_colors
if dpath is None:
dpath = os.getcwd()
print('===============')
print('Listing for dpath={}'.format(dpath))
print('E L F D J - path')
print('--------------')
if not os.path.exists(dpath):
print('... does not exist')
return
paths = sorted(os.listdir(dpath))
for path in paths:
full_path = join(dpath, path)
E = os.path.exists(full_path)
L = os.path.islink(full_path)
F = os.path.isfile(full_path)
D = os.path.isdir(full_path)
J = util_platform.WIN32 and _win32_links._win32_is_junction(full_path)
ELFDJ = [E, L, F, D, J]
if ELFDJ == [1, 0, 0, 1, 0]:
# A directory
path = util_colors.color_text(path, 'green')
elif ELFDJ == [1, 0, 1, 0, 0]:
# A file (or a hard link they are indistinguishable with one query)
path = util_colors.color_text(path, 'white')
elif ELFDJ == [1, 0, 0, 1, 1]:
# A directory junction
path = util_colors.color_text(path, 'yellow')
elif ELFDJ == [1, 1, 1, 0, 0]:
# A file link
path = util_colors.color_text(path, 'turquoise')
elif ELFDJ == [1, 1, 0, 1, 0]:
# A directory link
path = util_colors.color_text(path, 'teal')
elif ELFDJ == [0, 1, 0, 0, 0]:
# A broken file link
path = util_colors.color_text(path, 'red')
elif ELFDJ == [0, 1, 0, 1, 0]:
# A broken directory link
path = util_colors.color_text(path, 'darkred')
elif ELFDJ == [0, 0, 0, 1, 1]:
# A broken directory junction
path = util_colors.color_text(path, 'purple')
elif ELFDJ == [1, 0, 1, 0, 1]:
# A file junction? Thats not good.
# I guess this is a windows 7 thing?
path = util_colors.color_text(path, 'red')
elif ELFDJ == [1, 1, 0, 0, 0]:
# Windows? Why? What does this mean!?
# A directory link that cant be resolved?
path = util_colors.color_text(path, 'red')
else:
print('dpath = {!r}'.format(dpath))
print('path = {!r}'.format(path))
raise AssertionError(str(ELFDJ) + str(path))
line = '{E:d} {L:d} {F:d} {D:d} {J:d} - {path}'.format(**locals())
if os.path.islink(full_path):
line += ' -> ' + os.readlink(full_path)
elif _win32_links is not None:
if _win32_links._win32_is_junction(full_path):
line += ' => ' + _win32_links._win32_read_junction(full_path)
print(line) |
def _make_signature_key(args, kwargs):
"""
Transforms function args into a key that can be used by the cache
CommandLine:
xdoctest -m ubelt.util_memoize _make_signature_key
Example:
>>> args = (4, [1, 2])
>>> kwargs = {'a': 'b'}
>>> key = _make_signature_key(args, kwargs)
>>> print('key = {!r}'.format(key))
>>> # Some mutable types cannot be handled by ub.hash_data
>>> import pytest
>>> import six
>>> if six.PY2:
>>> import collections as abc
>>> else:
>>> from collections import abc
>>> with pytest.raises(TypeError):
>>> _make_signature_key((4, [1, 2], {1: 2, 'a': 'b'}), kwargs={})
>>> class Dummy(abc.MutableSet):
>>> def __contains__(self, item): return None
>>> def __iter__(self): return iter([])
>>> def __len__(self): return 0
>>> def add(self, item, loc): return None
>>> def discard(self, item): return None
>>> with pytest.raises(TypeError):
>>> _make_signature_key((Dummy(),), kwargs={})
"""
kwitems = kwargs.items()
# TODO: we should check if Python is at least 3.7 and sort by kwargs
# keys otherwise. Should we use hash_data for key generation
if (sys.version_info.major, sys.version_info.minor) < (3, 7): # nocover
# We can sort because they keys are gaurenteed to be strings
kwitems = sorted(kwitems)
kwitems = tuple(kwitems)
try:
key = _hashable(args), _hashable(kwitems)
except TypeError:
raise TypeError('Signature is not hashable: args={} kwargs{}'.format(args, kwargs))
return key |
def memoize(func):
"""
memoization decorator that respects args and kwargs
References:
https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize
Args:
func (Callable): live python function
Returns:
func: memoized wrapper
CommandLine:
xdoctest -m ubelt.util_memoize memoize
Example:
>>> import ubelt as ub
>>> closure = {'a': 'b', 'c': 'd'}
>>> incr = [0]
>>> def foo(key):
>>> value = closure[key]
>>> incr[0] += 1
>>> return value
>>> foo_memo = ub.memoize(foo)
>>> assert foo('a') == 'b' and foo('c') == 'd'
>>> assert incr[0] == 2
>>> print('Call memoized version')
>>> assert foo_memo('a') == 'b' and foo_memo('c') == 'd'
>>> assert incr[0] == 4
>>> assert foo_memo('a') == 'b' and foo_memo('c') == 'd'
>>> print('Counter should no longer increase')
>>> assert incr[0] == 4
>>> print('Closure changes result without memoization')
>>> closure = {'a': 0, 'c': 1}
>>> assert foo('a') == 0 and foo('c') == 1
>>> assert incr[0] == 6
>>> assert foo_memo('a') == 'b' and foo_memo('c') == 'd'
"""
cache = {}
@functools.wraps(func)
def memoizer(*args, **kwargs):
key = _make_signature_key(args, kwargs)
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
memoizer.cache = cache
return memoizer |
def memoize_property(fget):
"""
Return a property attribute for new-style classes that only calls its
getter on the first access. The result is stored and on subsequent accesses
is returned, preventing the need to call the getter any more.
This decorator can either be used by itself or by decorating another
property. In either case the method will always become a property.
Notes:
implementation is a modified version of [1].
References:
..[1] https://github.com/estebistec/python-memoized-property
CommandLine:
xdoctest -m ubelt.util_memoize memoize_property
Example:
>>> class C(object):
... load_name_count = 0
... @memoize_property
... def name(self):
... "name's docstring"
... self.load_name_count += 1
... return "the name"
... @memoize_property
... @property
... def another_name(self):
... "name's docstring"
... self.load_name_count += 1
... return "the name"
>>> c = C()
>>> c.load_name_count
0
>>> c.name
'the name'
>>> c.load_name_count
1
>>> c.name
'the name'
>>> c.load_name_count
1
>>> c.another_name
"""
# Unwrap any existing property decorator
while hasattr(fget, 'fget'):
fget = fget.fget
attr_name = '_' + fget.__name__
@functools.wraps(fget)
def fget_memoized(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fget(self))
return getattr(self, attr_name)
return property(fget_memoized) |
def highlight_code(text, lexer_name='python', **kwargs):
"""
Highlights a block of text using ANSI tags based on language syntax.
Args:
text (str): plain text to highlight
lexer_name (str): name of language
**kwargs: passed to pygments.lexers.get_lexer_by_name
Returns:
str: text : highlighted text
If pygments is not installed, the plain text is returned.
CommandLine:
python -c "import pygments.formatters; print(list(pygments.formatters.get_all_formatters()))"
Example:
>>> import ubelt as ub
>>> text = 'import ubelt as ub; print(ub)'
>>> new_text = ub.highlight_code(text)
>>> print(new_text)
"""
# Resolve extensions to languages
lexer_name = {
'py': 'python',
'h': 'cpp',
'cpp': 'cpp',
'cxx': 'cpp',
'c': 'cpp',
}.get(lexer_name.replace('.', ''), lexer_name)
try:
import pygments
import pygments.lexers
import pygments.formatters
import pygments.formatters.terminal
if sys.platform.startswith('win32'): # nocover
# Hack on win32 to support colored output
import colorama
colorama.init()
formater = pygments.formatters.terminal.TerminalFormatter(bg='dark')
lexer = pygments.lexers.get_lexer_by_name(lexer_name, **kwargs)
new_text = pygments.highlight(text, lexer, formater)
except ImportError: # nocover
import warnings
warnings.warn('pygments is not installed, code will not be highlighted')
new_text = text
return new_text |
def color_text(text, color):
r"""
Colorizes text a single color using ansii tags.
Args:
text (str): text to colorize
color (str): may be one of the following: yellow, blink, lightgray,
underline, darkyellow, blue, darkblue, faint, fuchsia, black,
white, red, brown, turquoise, bold, darkred, darkgreen, reset,
standout, darkteal, darkgray, overline, purple, green, teal, fuscia
Returns:
str: text : colorized text.
If pygments is not installed plain text is returned.
CommandLine:
python -c "import pygments.console; print(sorted(pygments.console.codes.keys()))"
python -m ubelt.util_colors color_text
Example:
>>> text = 'raw text'
>>> import pytest
>>> import ubelt as ub
>>> if ub.modname_to_modpath('pygments'):
>>> # Colors text only if pygments is installed
>>> assert color_text(text, 'red') == '\x1b[31;01mraw text\x1b[39;49;00m'
>>> assert color_text(text, None) == 'raw text'
>>> else:
>>> # Otherwise text passes through unchanged
>>> assert color_text(text, 'red') == 'raw text'
>>> assert color_text(text, None) == 'raw text'
"""
if color is None:
return text
try:
import pygments
import pygments.console
if sys.platform.startswith('win32'): # nocover
# Hack on win32 to support colored output
import colorama
colorama.init()
ansi_text = pygments.console.colorize(color, text)
return ansi_text
except ImportError: # nocover
import warnings
warnings.warn('pygments is not installed, text will not be colored')
return text |
def iterable(obj, strok=False):
"""
Checks if the input implements the iterator interface. An exception is made
for strings, which return False unless `strok` is True
Args:
obj (object): a scalar or iterable input
strok (bool): if True allow strings to be interpreted as iterable
Returns:
bool: True if the input is iterable
Example:
>>> obj_list = [3, [3], '3', (3,), [3, 4, 5], {}]
>>> result = [iterable(obj) for obj in obj_list]
>>> assert result == [False, True, False, True, True, True]
>>> result = [iterable(obj, strok=True) for obj in obj_list]
>>> assert result == [False, True, True, True, True, True]
"""
try:
iter(obj)
except Exception:
return False
else:
return strok or not isinstance(obj, six.string_types) |
def unique(items, key=None):
"""
Generates unique items in the order they appear.
Args:
items (Iterable): list of items
key (Callable, optional): custom normalization function.
If specified returns items where `key(item)` is unique.
Yields:
object: a unique item from the input sequence
CommandLine:
python -m utool.util_list --exec-unique_ordered
Example:
>>> import ubelt as ub
>>> items = [4, 6, 6, 0, 6, 1, 0, 2, 2, 1]
>>> unique_items = list(ub.unique(items))
>>> assert unique_items == [4, 6, 0, 1, 2]
Example:
>>> import ubelt as ub
>>> items = ['A', 'a', 'b', 'B', 'C', 'c', 'D', 'e', 'D', 'E']
>>> unique_items = list(ub.unique(items, key=six.text_type.lower))
>>> assert unique_items == ['A', 'b', 'C', 'D', 'e']
>>> unique_items = list(ub.unique(items))
>>> assert unique_items == ['A', 'a', 'b', 'B', 'C', 'c', 'D', 'e', 'E']
"""
seen = set()
if key is None:
for item in items:
if item not in seen:
seen.add(item)
yield item
else:
for item in items:
norm = key(item)
if norm not in seen:
seen.add(norm)
yield item |
def argunique(items, key=None):
"""
Returns indices corresponding to the first instance of each unique item.
Args:
items (Sequence): indexable collection of items
key (Callable, optional): custom normalization function.
If specified returns items where `key(item)` is unique.
Yields:
int : indices of the unique items
Example:
>>> items = [0, 2, 5, 1, 1, 0, 2, 4]
>>> indices = list(argunique(items))
>>> assert indices == [0, 1, 2, 3, 7]
>>> indices = list(argunique(items, key=lambda x: x % 2 == 0))
>>> assert indices == [0, 2]
"""
# yield from unique(range(len(items)), key=lambda i: items[i])
if key is None:
return unique(range(len(items)), key=lambda i: items[i])
else:
return unique(range(len(items)), key=lambda i: key(items[i])) |
def unique_flags(items, key=None):
"""
Returns a list of booleans corresponding to the first instance of each
unique item.
Args:
items (Sequence): indexable collection of items
key (Callable, optional): custom normalization function.
If specified returns items where `key(item)` is unique.
Returns:
List[bool] : flags the items that are unique
Example:
>>> import ubelt as ub
>>> items = [0, 2, 1, 1, 0, 9, 2]
>>> flags = unique_flags(items)
>>> assert flags == [True, True, True, False, False, True, False]
>>> flags = unique_flags(items, key=lambda x: x % 2 == 0)
>>> assert flags == [True, False, True, False, False, False, False]
"""
len_ = len(items)
if key is None:
item_to_index = dict(zip(reversed(items), reversed(range(len_))))
indices = item_to_index.values()
else:
indices = argunique(items, key=key)
flags = boolmask(indices, len_)
return flags |
def boolmask(indices, maxval=None):
"""
Constructs a list of booleans where an item is True if its position is in
`indices` otherwise it is False.
Args:
indices (list): list of integer indices
maxval (int): length of the returned list. If not specified
this is inferred from `indices`
Note:
In the future the arg `maxval` may change its name to `shape`
Returns:
list: mask: list of booleans. mask[idx] is True if idx in indices
Example:
>>> import ubelt as ub
>>> indices = [0, 1, 4]
>>> mask = ub.boolmask(indices, maxval=6)
>>> assert mask == [True, True, False, False, True, False]
>>> mask = ub.boolmask(indices)
>>> assert mask == [True, True, False, False, True]
"""
if maxval is None:
indices = list(indices)
maxval = max(indices) + 1
mask = [False] * maxval
for index in indices:
mask[index] = True
return mask |
def allsame(iterable, eq=operator.eq):
"""
Determine if all items in a sequence are the same
Args:
iterable (Iterable): items to determine if they are all the same
eq (Callable, optional): function to determine equality
(default: operator.eq)
Example:
>>> allsame([1, 1, 1, 1])
True
>>> allsame([])
True
>>> allsame([0, 1])
False
>>> iterable = iter([0, 1, 1, 1])
>>> next(iterable)
>>> allsame(iterable)
True
>>> allsame(range(10))
False
>>> allsame(range(10), lambda a, b: True)
True
"""
iter_ = iter(iterable)
try:
first = next(iter_)
except StopIteration:
return True
return all(eq(first, item) for item in iter_) |
def argsort(indexable, key=None, reverse=False):
"""
Returns the indices that would sort a indexable object.
This is similar to `numpy.argsort`, but it is written in pure python and
works on both lists and dictionaries.
Args:
indexable (Iterable or Mapping): indexable to sort by
key (Callable, optional): customizes the ordering of the indexable
reverse (bool, optional): if True returns in descending order
Returns:
list: indices: list of indices such that sorts the indexable
Example:
>>> import ubelt as ub
>>> # argsort works on dicts by returning keys
>>> dict_ = {'a': 3, 'b': 2, 'c': 100}
>>> indices = ub.argsort(dict_)
>>> assert list(ub.take(dict_, indices)) == sorted(dict_.values())
>>> # argsort works on lists by returning indices
>>> indexable = [100, 2, 432, 10]
>>> indices = ub.argsort(indexable)
>>> assert list(ub.take(indexable, indices)) == sorted(indexable)
>>> # Can use iterators, but be careful. It exhausts them.
>>> indexable = reversed(range(100))
>>> indices = ub.argsort(indexable)
>>> assert indices[0] == 99
>>> # Can use key just like sorted
>>> indexable = [[0, 1, 2], [3, 4], [5]]
>>> indices = ub.argsort(indexable, key=len)
>>> assert indices == [2, 1, 0]
>>> # Can use reverse just like sorted
>>> indexable = [0, 2, 1]
>>> indices = ub.argsort(indexable, reverse=True)
>>> assert indices == [1, 2, 0]
"""
# Create an iterator of value/key pairs
if isinstance(indexable, collections_abc.Mapping):
vk_iter = ((v, k) for k, v in indexable.items())
else:
vk_iter = ((v, k) for k, v in enumerate(indexable))
# Sort by values and extract the indices
if key is None:
indices = [k for v, k in sorted(vk_iter, reverse=reverse)]
else:
# If key is provided, call it using the value as input
indices = [k for v, k in sorted(vk_iter, key=lambda vk: key(vk[0]),
reverse=reverse)]
return indices |
def argmax(indexable, key=None):
"""
Returns index / key of the item with the largest value.
This is similar to `numpy.argmax`, but it is written in pure python and
works on both lists and dictionaries.
Args:
indexable (Iterable or Mapping): indexable to sort by
key (Callable, optional): customizes the ordering of the indexable
CommandLine:
python -m ubelt.util_list argmax
Example:
>>> assert argmax({'a': 3, 'b': 2, 'c': 100}) == 'c'
>>> assert argmax(['a', 'c', 'b', 'z', 'f']) == 3
>>> assert argmax([[0, 1], [2, 3, 4], [5]], key=len) == 1
>>> assert argmax({'a': 3, 'b': 2, 3: 100, 4: 4}) == 3
>>> assert argmax(iter(['a', 'c', 'b', 'z', 'f'])) == 3
"""
if key is None and isinstance(indexable, collections_abc.Mapping):
return max(indexable.items(), key=operator.itemgetter(1))[0]
elif hasattr(indexable, 'index'):
if key is None:
return indexable.index(max(indexable))
else:
return indexable.index(max(indexable, key=key))
else:
# less efficient, but catch all solution
return argsort(indexable, key=key)[-1] |
def argmin(indexable, key=None):
"""
Returns index / key of the item with the smallest value.
This is similar to `numpy.argmin`, but it is written in pure python and
works on both lists and dictionaries.
Args:
indexable (Iterable or Mapping): indexable to sort by
key (Callable, optional): customizes the ordering of the indexable
Example:
>>> assert argmin({'a': 3, 'b': 2, 'c': 100}) == 'b'
>>> assert argmin(['a', 'c', 'b', 'z', 'f']) == 0
>>> assert argmin([[0, 1], [2, 3, 4], [5]], key=len) == 2
>>> assert argmin({'a': 3, 'b': 2, 3: 100, 4: 4}) == 'b'
>>> assert argmin(iter(['a', 'c', 'A', 'z', 'f'])) == 2
"""
if key is None and isinstance(indexable, collections_abc.Mapping):
return min(indexable.items(), key=operator.itemgetter(1))[0]
elif hasattr(indexable, 'index'):
if key is None:
return indexable.index(min(indexable))
else:
return indexable.index(min(indexable, key=key))
else:
# less efficient, but catch all solution
return argsort(indexable, key=key)[0] |
def dzip(items1, items2, cls=dict):
"""
Zips elementwise pairs between items1 and items2 into a dictionary. Values
from items2 can be broadcast onto items1.
Args:
items1 (Iterable): full sequence
items2 (Iterable): can either be a sequence of one item or a sequence
of equal length to `items1`
cls (Type[dict]): dictionary type to use. Defaults to dict, but could
be ordered dict instead.
Returns:
dict: similar to dict(zip(items1, items2))
Example:
>>> assert dzip([1, 2, 3], [4]) == {1: 4, 2: 4, 3: 4}
>>> assert dzip([1, 2, 3], [4, 4, 4]) == {1: 4, 2: 4, 3: 4}
>>> assert dzip([], [4]) == {}
"""
try:
len(items1)
except TypeError:
items1 = list(items1)
try:
len(items2)
except TypeError:
items2 = list(items2)
if len(items1) == 0 and len(items2) == 1:
# Corner case:
# allow the first list to be empty and the second list to broadcast a
# value. This means that the equality check wont work for the case
# where items1 and items2 are supposed to correspond, but the length of
# items2 is 1.
items2 = []
if len(items2) == 1 and len(items1) > 1:
items2 = items2 * len(items1)
if len(items1) != len(items2):
raise ValueError('out of alignment len(items1)=%r, len(items2)=%r' % (
len(items1), len(items2)))
return cls(zip(items1, items2)) |
def group_items(items, groupids):
r"""
Groups a list of items by group id.
Args:
items (Iterable): a list of items to group
groupids (Iterable or Callable): a corresponding list of item groupids
or a function mapping an item to a groupid.
Returns:
dict: groupid_to_items: maps a groupid to a list of items
CommandLine:
python -m ubelt.util_dict group_items
Example:
>>> import ubelt as ub
>>> items = ['ham', 'jam', 'spam', 'eggs', 'cheese', 'banana']
>>> groupids = ['protein', 'fruit', 'protein', 'protein', 'dairy', 'fruit']
>>> groupid_to_items = ub.group_items(items, groupids)
>>> print(ub.repr2(groupid_to_items, nl=0))
{'dairy': ['cheese'], 'fruit': ['jam', 'banana'], 'protein': ['ham', 'spam', 'eggs']}
"""
if callable(groupids):
keyfunc = groupids
pair_list = ((keyfunc(item), item) for item in items)
else:
pair_list = zip(groupids, items)
# Initialize a dict of lists
groupid_to_items = defaultdict(list)
# Insert each item into the correct group
for key, item in pair_list:
groupid_to_items[key].append(item)
return groupid_to_items |
def dict_hist(item_list, weight_list=None, ordered=False, labels=None):
"""
Builds a histogram of items, counting the number of time each item appears
in the input.
Args:
item_list (Iterable): hashable items (usually containing duplicates)
weight_list (Iterable): corresponding weights for each item
ordered (bool): if True the result is ordered by frequency
labels (Iterable, optional): expected labels (default None)
Allows this function to pre-initialize the histogram.
If specified the frequency of each label is initialized to
zero and item_list can only contain items specified in labels.
Returns:
dict : dictionary where the keys are items in item_list, and the values
are the number of times the item appears in item_list.
CommandLine:
python -m ubelt.util_dict dict_hist
Example:
>>> import ubelt as ub
>>> item_list = [1, 2, 39, 900, 1232, 900, 1232, 2, 2, 2, 900]
>>> hist = ub.dict_hist(item_list)
>>> print(ub.repr2(hist, nl=0))
{1: 1, 2: 4, 39: 1, 900: 3, 1232: 2}
Example:
>>> import ubelt as ub
>>> item_list = [1, 2, 39, 900, 1232, 900, 1232, 2, 2, 2, 900]
>>> hist1 = ub.dict_hist(item_list)
>>> hist2 = ub.dict_hist(item_list, ordered=True)
>>> try:
>>> hist3 = ub.dict_hist(item_list, labels=[])
>>> except KeyError:
>>> pass
>>> else:
>>> raise AssertionError('expected key error')
>>> #result = ub.repr2(hist_)
>>> weight_list = [1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1]
>>> hist4 = ub.dict_hist(item_list, weight_list=weight_list)
>>> print(ub.repr2(hist1, nl=0))
{1: 1, 2: 4, 39: 1, 900: 3, 1232: 2}
>>> print(ub.repr2(hist4, nl=0))
{1: 1, 2: 4, 39: 1, 900: 1, 1232: 0}
"""
if labels is None:
hist_ = defaultdict(lambda: 0)
else:
hist_ = {k: 0 for k in labels}
if weight_list is None:
weight_list = it.repeat(1)
# Accumulate frequency
for item, weight in zip(item_list, weight_list):
hist_[item] += weight
if ordered:
# Order by value
getval = op.itemgetter(1)
hist = OrderedDict([
(key, value)
for (key, value) in sorted(hist_.items(), key=getval)
])
else:
# Cast to a normal dictionary
hist = dict(hist_)
return hist |
def find_duplicates(items, k=2, key=None):
"""
Find all duplicate items in a list.
Search for all items that appear more than `k` times and return a mapping
from each (k)-duplicate item to the positions it appeared in.
Args:
items (Iterable): hashable items possibly containing duplicates
k (int): only return items that appear at least `k` times (default=2)
key (Callable, optional): Returns indices where `key(items[i])`
maps to a particular value at least k times.
Returns:
dict: maps each duplicate item to the indices at which it appears
CommandLine:
python -m ubelt.util_dict find_duplicates
Example:
>>> import ubelt as ub
>>> items = [0, 0, 1, 2, 3, 3, 0, 12, 2, 9]
>>> duplicates = ub.find_duplicates(items)
>>> print('items = %r' % (items,))
>>> print('duplicates = %r' % (duplicates,))
>>> assert duplicates == {0: [0, 1, 6], 2: [3, 8], 3: [4, 5]}
>>> assert ub.find_duplicates(items, 3) == {0: [0, 1, 6]}
Example:
>>> import ubelt as ub
>>> items = [0, 0, 1, 2, 3, 3, 0, 12, 2, 9]
>>> # note: k can be 0
>>> duplicates = ub.find_duplicates(items, k=0)
>>> print(ub.repr2(duplicates, nl=0))
{0: [0, 1, 6], 1: [2], 2: [3, 8], 3: [4, 5], 9: [9], 12: [7]}
Example:
>>> import ubelt as ub
>>> items = [10, 11, 12, 13, 14, 15, 16]
>>> duplicates = ub.find_duplicates(items, key=lambda x: x // 2)
>>> print(ub.repr2(duplicates, nl=0))
{5: [0, 1], 6: [2, 3], 7: [4, 5]}
"""
# Build mapping from items to the indices at which they appear
# if key is not None:
# items = map(key, items)
duplicates = defaultdict(list)
if key is None:
for count, item in enumerate(items):
duplicates[item].append(count)
else:
for count, item in enumerate(items):
duplicates[key(item)].append(count)
# remove items seen fewer than k times.
for key in list(duplicates.keys()):
if len(duplicates[key]) < k:
del duplicates[key]
duplicates = dict(duplicates)
return duplicates |
def dict_take(dict_, keys, default=util_const.NoParam):
r"""
Generates values from a dictionary
Args:
dict_ (Mapping): a dictionary to take from
keys (Iterable): the keys to take
default (object, optional): if specified uses default if keys are missing
CommandLine:
python -m ubelt.util_dict dict_take_gen
Example:
>>> import ubelt as ub
>>> dict_ = {1: 'a', 2: 'b', 3: 'c'}
>>> keys = [1, 2, 3, 4, 5]
>>> result = list(ub.dict_take(dict_, keys, None))
>>> assert result == ['a', 'b', 'c', None, None]
Example:
>>> import ubelt as ub
>>> dict_ = {1: 'a', 2: 'b', 3: 'c'}
>>> keys = [1, 2, 3, 4, 5]
>>> try:
>>> print(list(ub.dict_take(dict_, keys)))
>>> raise AssertionError('did not get key error')
>>> except KeyError:
>>> print('correctly got key error')
"""
if default is util_const.NoParam:
for key in keys:
yield dict_[key]
else:
for key in keys:
yield dict_.get(key, default) |
def dict_union(*args):
"""
Combines the disjoint keys in multiple dictionaries. For intersecting keys,
dictionaries towards the end of the sequence are given precedence.
Args:
*args : a sequence of dictionaries
Returns:
Dict | OrderedDict :
OrderedDict if the first argument is an OrderedDict, otherwise dict
SeeAlso:
collections.ChainMap - a standard python builtin data structure that
provides a view that treats multiple dicts as a single dict.
https://docs.python.org/3/library/collections.html#chainmap-objects
Example:
>>> result = dict_union({'a': 1, 'b': 1}, {'b': 2, 'c': 2})
>>> assert result == {'a': 1, 'b': 2, 'c': 2}
>>> dict_union(odict([('a', 1), ('b', 2)]), odict([('c', 3), ('d', 4)]))
OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 4)])
>>> dict_union()
{}
"""
if not args:
return {}
else:
dictclass = OrderedDict if isinstance(args[0], OrderedDict) else dict
return dictclass(it.chain.from_iterable(d.items() for d in args)) |
def dict_isect(*args):
"""
Constructs a dictionary that contains keys common between all inputs.
The returned values will only belong to the first dictionary.
Args:
*args : a sequence of dictionaries (or sets of keys)
Returns:
Dict | OrderedDict :
OrderedDict if the first argument is an OrderedDict, otherwise dict
Notes:
This function can be used as an alternative to `dict_subset` where any
key not in the dictionary is ignored. See the following example:
>>> dict_isect({'a': 1, 'b': 2, 'c': 3}, ['a', 'c', 'd'])
{'a': 1, 'c': 3}
Example:
>>> dict_isect({'a': 1, 'b': 1}, {'b': 2, 'c': 2})
{'b': 1}
>>> dict_isect(odict([('a', 1), ('b', 2)]), odict([('c', 3)]))
OrderedDict()
>>> dict_isect()
{}
"""
if not args:
return {}
else:
dictclass = OrderedDict if isinstance(args[0], OrderedDict) else dict
common_keys = set.intersection(*map(set, args))
first_dict = args[0]
return dictclass((k, first_dict[k]) for k in common_keys) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.