code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
if kwargs.get('file', None) is None:
kwargs['file'] = sys.stderr
msg = kwargs.get('sep', ' ').join(str(a) for a in args)
print('debug: {}'.format(msg), **kwargs)
|
def debug(*args, **kwargs)
|
Just prints to stderr, unless printdebug is installed. Then it
will be replaced in `main()` by `printdebug.debug`.
| 2.801006
| 2.944267
| 0.951342
|
val = default
with suppress(KeyError):
val = d.pop(key)
return val
|
def dict_pop_or(d, key, default=None)
|
Try popping a key from a dict.
Instead of raising KeyError, just return the default value.
| 4.38842
| 6.038904
| 0.726691
|
fore = parse_colr_arg(
get_name_arg(argd, '--fore', 'FORE', default=None),
rgb_mode=argd['--truecolor'],
)
back = parse_colr_arg(
get_name_arg(argd, '--back', 'BACK', default=None),
rgb_mode=argd['--truecolor'],
)
style = get_name_arg(argd, '--style', 'STYLE', default=None)
if argd['--gradient']:
# Build a gradient from user args.
return C(txt).gradient(
name=argd['--gradient'],
spread=try_int(argd['--spread'], 1, minimum=0),
fore=fore,
back=back,
style=style,
rgb_mode=argd['--truecolor'],
)
if argd['--gradientrgb']:
# Build an rgb gradient from user args.
rgb_start, rgb_stop = parse_gradient_rgb_args(argd['--gradientrgb'])
return C(txt).gradient_rgb(
fore=fore,
back=back,
style=style,
start=rgb_start,
stop=rgb_stop,
)
if argd['--rainbow']:
return C(txt).rainbow(
fore=fore,
back=back,
style=style,
freq=try_float(argd['--frequency'], 0.1, minimum=0),
offset=try_int(argd['--offset'], randint(0, 255), minimum=0),
spread=try_float(argd['--spread'], 3.0, minimum=0),
rgb_mode=argd['--truecolor'],
)
# Normal colored output.
return C(txt, fore=fore, back=back, style=style)
|
def get_colr(txt, argd)
|
Return a Colr instance based on user args.
| 2.399155
| 2.32194
| 1.033254
|
val = None
for argname in argnames:
if argd[argname]:
val = argd[argname].lower().strip()
break
return val if val else default
|
def get_name_arg(argd, *argnames, default=None)
|
Return the first argument value given in a docopt arg dict.
When not given, return default.
| 3.580421
| 4.11234
| 0.870653
|
if DEBUG:
print_err(traceback.format_exc(), color=False)
else:
print_err(*args, newline=True)
|
def handle_err(*args)
|
Handle fatal errors, caught in __main__ scope.
If DEBUG is set, print a real traceback.
Otherwise, `print_err` any arguments passed.
| 6.619842
| 4.586578
| 1.443307
|
methodmap = {
'--ljust': clr.ljust,
'--rjust': clr.rjust,
'--center': clr.center,
}
for flag in methodmap:
if argd[flag]:
if argd[flag] in ('0', '-'):
val = get_terminal_size(default=(80, 35))[0]
else:
val = try_int(argd[flag], minimum=None)
if val < 0:
# Negative value, subtract from terminal width.
val = get_terminal_size(default=(80, 35))[0] + val
return methodmap[flag](val)
# No justify args given.
return clr
|
def justify(clr, argd)
|
Justify str/Colr based on user args.
| 3.562602
| 3.568578
| 0.998325
|
total = 0
for codedesc in get_known_codes(s, unique=unique, rgb_mode=rgb_mode):
total += 1
print(codedesc)
plural = 'code' if total == 1 else 'codes'
codetype = ' unique' if unique else ''
print('\nFound {}{} escape {}.'.format(total, codetype, plural))
return 0 if total > 0 else 1
|
def list_known_codes(s, unique=True, rgb_mode=False)
|
Find and print all known escape codes in a string,
using get_known_codes.
| 3.882536
| 3.487793
| 1.113179
|
names = get_all_names()
# This is 375 right now. Probably won't ever change, but I'm not sure.
nameslen = len(names)
print('\nListing {} names:\n'.format(nameslen))
# Using 3 columns of names, still alphabetically sorted from the top down.
# Longest name so far: lightgoldenrodyellow (20 chars)
namewidth = 20
# namewidth * columns == 60, colorwidth * columns == 18, final == 78.
swatch = ' ' * 9
third = nameslen // 3
lastthird = third * 2
cols = (
names[0: third],
names[third: lastthird],
names[lastthird:],
)
# Exactly enough spaces to fill in a blank item (+2 for ': ').
# This may not ever be used, unless another 'known name' is added.
blankitem = ' ' * (namewidth + len(swatch) + 2)
for i in range(third):
nameset = []
for colset in cols:
try:
nameset.append(colset[i])
except IndexError:
nameset.append(None)
continue
line = C('').join(
C(': ').join(
C(name.rjust(namewidth)),
C(swatch, back=name),
) if name else blankitem
for name in nameset
)
print(line)
return 0
|
def list_names()
|
List all known color names.
| 7.366923
| 7.267499
| 1.013681
|
arglen = len(args)
if arglen < 1 or arglen > 2:
raise InvalidArg(arglen, label='Expecting 1 or 2 \'-G\' flags, got')
start_rgb = try_rgb(args[0]) if args else None
stop_rgb = try_rgb(args[1]) if arglen > 1 else None
return start_rgb, stop_rgb
|
def parse_gradient_rgb_args(args)
|
Parse one or two rgb args given with --gradientrgb.
Raises InvalidArg for invalid rgb values.
Returns a tuple of (start_rgb, stop_rgb), where the stop_rgb may be
None if only one arg value was given and start_rgb may be None if
no values were given.
| 4.368561
| 3.562118
| 1.226394
|
if kwargs.get('file', None) is None:
kwargs['file'] = sys.stderr
color = dict_pop_or(kwargs, 'color', True)
# Use color if asked, but only if the file is a tty.
if color and kwargs['file'].isatty():
# Keep any Colr args passed, convert strs into Colrs.
msg = kwargs.get('sep', ' ').join(
str(a) if isinstance(a, C) else str(C(a, 'red'))
for a in args
)
else:
# The file is not a tty anyway, no escape codes.
msg = kwargs.get('sep', ' ').join(
str(a.stripped() if isinstance(a, C) else a)
for a in args
)
newline = dict_pop_or(kwargs, 'newline', False)
if newline:
msg = '\n{}'.format(msg)
print(msg, **kwargs)
|
def print_err(*args, **kwargs)
|
A wrapper for print() that uses stderr by default.
| 3.936827
| 3.770285
| 1.044172
|
if sys.stdin.isatty() and sys.stdout.isatty():
print('\nReading from stdin until end of file (Ctrl + D)...')
return sys.stdin.read()
|
def read_stdin()
|
Read text from stdin, and print a helpful message for ttys.
| 4.962317
| 4.076143
| 1.217405
|
for code in usercodes:
code = code.strip().lower()
if code.isalpha() and (code in codes['fore']):
# Basic color name.
yield translate_basic(code)
else:
if ',' in code:
try:
r, g, b = (int(c.strip()) for c in code.split(','))
except (TypeError, ValueError):
raise InvalidColr(code)
code = (r, g, b)
colorcode = ColorCode(code, rgb_mode=rgb_mode)
if disabled():
yield str(colorcode)
yield colorcode.example()
|
def translate(usercodes, rgb_mode=False)
|
Translate one or more hex, term, or rgb value into the others.
Yields strings with the results for each code translated.
| 5.196143
| 5.17305
| 1.004464
|
codenum = get_code_num(codes['fore'][usercode])
colorcode = codeformat(codenum)
msg = 'Name: {:>10}, Number: {:>3}, EscapeCode: {!r}'.format(
usercode,
codenum,
colorcode
)
if disabled():
return msg
return str(C(msg, fore=usercode))
|
def translate_basic(usercode)
|
Translate a basic color name to color with explanation.
| 11.736608
| 10.099758
| 1.162068
|
if not s:
return default
try:
val = float(s)
except (TypeError, ValueError):
raise InvalidNumber(s, label='Invalid float value')
if (minimum is not None) and (val < minimum):
val = minimum
return val
|
def try_float(s, default=None, minimum=None)
|
Try parsing a string into a float.
If None is passed, default is returned.
On failure, InvalidFloat is raised.
| 3.478119
| 3.606602
| 0.964376
|
if not s:
return default
try:
val = int(s)
except (TypeError, ValueError):
raise InvalidNumber(s)
if (minimum is not None) and (val < minimum):
val = minimum
return val
|
def try_int(s, default=None, minimum=None)
|
Try parsing a string into an integer.
If None is passed, default is returned.
On failure, InvalidNumber is raised.
| 2.954003
| 2.642563
| 1.117856
|
if not s:
return default
try:
r, g, b = (int(x.strip()) for x in s.split(','))
except ValueError:
raise InvalidRgb(s)
if not all(in_range(x, 0, 255) for x in (r, g, b)):
raise InvalidRgb(s)
return r, g, b
|
def try_rgb(s, default=None)
|
Try parsing a string into an rgb value (int, int, int),
where the ints are 0-255 inclusive.
If None is passed, default is returned.
On failure, InvalidArg is raised.
| 2.262766
| 2.349912
| 0.962915
|
try:
mainret = main()
except (EOFError, KeyboardInterrupt):
print_err('\nUser cancelled.\n')
mainret = 2
except BrokenPipeError:
print_err('\nBroken pipe, input/output was interrupted.\n')
mainret = 3
except InvalidArg as exarg:
handle_err(exarg.as_colr())
mainret = 4
except ValueError as exnum:
handle_err(exnum)
mainret = 4
sys.exit(mainret)
|
def entry_point()
|
An entry point for setuptools. This is required because
`if __name__ == '__main__'` is not fired when the entry point
is 'main()'. This just wraps the old behavior in a function so
it can be called from setuptools.
| 4.819429
| 5.22442
| 0.922481
|
if hasattr(ssl, 'create_default_context'):
# Python 2.7.9+, Python 3.4+: take a server_side boolean or None, in
# addition to the ssl.Purpose.XX values. This allows a user to write
# code that works on all supported Python versions.
if purpose is None or purpose is False:
purpose = ssl.Purpose.SERVER_AUTH
elif purpose is True:
purpose = ssl.Purpose.CLIENT_AUTH
return ssl.create_default_context(purpose, **kwargs)
# Python 2.7.8, Python 3.3
context = SSLContext(ssl.PROTOCOL_SSLv23)
if kwargs.get('cafile'):
context.load_verify_locations(kwargs['cafile'])
return context
|
def create_default_context(purpose=None, **kwargs)
|
Create a new SSL context in the most secure way available on the current
Python version. See :func:`ssl.create_default_context`.
| 3.47232
| 3.477378
| 0.998545
|
# Many class instances have their own logger. Share them to save memory if
# possible, i.e. when *context* is not set.
if name is None:
name = _logger_name
if context is None and name in _logger_dict:
return _logger_dict[name]
if context is not None and not isinstance(context, six.string_types):
context = util.objref(context)
logger = logging.getLogger(name)
logger = ContextLogger(logger, context)
if context is None:
_logger_dict[name] = logger
return logger
|
def get_logger(context=None, name=None)
|
Return a logger for *context*.
Return a :class:`ContextLogger` instance. The instance implements the
standard library's :class:`logging.Logger` interface.
| 4.135983
| 4.12902
| 1.001686
|
tid = threading.current_thread().name
if tid == 'MainThread':
tid = 'Main'
current = fibers.current()
fid = getattr(current, 'name') if current.parent else 'Root'
return '{}/{}'.format(tid, fid)
|
def thread_info(self)
|
Return a string identifying the current thread and fiber.
| 7.34233
| 5.609948
| 1.308806
|
if not self._logger.isEnabledFor(logging.DEBUG):
return ''
f = sys._getframe(3)
fname = os.path.split(f.f_code.co_filename)[1]
return '{}:{}'.format(fname, f.f_lineno)
|
def frame_info(self)
|
Return a string identifying the current frame.
| 3.044948
| 2.598106
| 1.171988
|
if high is None:
high = self.default_buffer_size
if low is None:
low = high // 2
self._buffer_high = high
self._buffer_low = low
|
def set_buffer_limits(self, high=None, low=None)
|
Set the low and high watermarks for the read buffer.
| 2.622205
| 2.31323
| 1.133569
|
self._buffers.append(data)
self._buffer_size += len(data)
self._maybe_pause_transport()
self._can_read.set()
|
def feed(self, data)
|
Add *data* to the buffer.
| 5.833256
| 5.324397
| 1.095571
|
# By default we want write_through behavior, unless the user specifies
# something else.
if 'line_buffering' not in textio_args and 'write_through' not in textio_args:
textio_args['write_through'] = True
return compat.TextIOWrapper(self, encoding, **textio_args)
|
def wrap(self, encoding, **textio_args)
|
Return a :class:`io.TextIOWrapper` that wraps the stream.
The wrapper provides text IO on top of the byte stream, using the
specified *encoding*. The *textio_args* keyword arguments are
additional keyword arguments passed to the :class:`~io.TextIOWrapper`
constructor. Unless another buffering scheme is specified, the
*write_through* option is enabled.
| 3.703863
| 3.687705
| 1.004382
|
self._check_readable()
chunks = []
bytes_read = 0
bytes_left = size
while True:
chunk = self._buffer.get_chunk(bytes_left)
if not chunk:
break
chunks.append(chunk)
bytes_read += len(chunk)
if bytes_read == size or not chunk:
break
if bytes_left > 0:
bytes_left -= len(chunk)
# If EOF was set, always return that instead of any error.
if not chunks and not self._buffer.eof and self._buffer.error:
raise compat.saved_exc(self._buffer.error)
return b''.join(chunks)
|
def read(self, size=-1)
|
Read up to *size* bytes.
This function reads from the buffer multiple times until the requested
number of bytes can be satisfied. This means that this function may
block to wait for more data, even if some data is available. The only
time a short read is returned, is on EOF or error.
If *size* is not specified or negative, read until EOF.
| 3.319104
| 3.343184
| 0.992798
|
self._check_readable()
chunk = self._buffer.get_chunk(size)
if not chunk and not self._buffer.eof and self._buffer.error:
raise compat.saved_exc(self._buffer.error)
return chunk
|
def read1(self, size=-1)
|
Read up to *size* bytes.
This function reads from the buffer only once. It is useful in case you
need to read a large input, and want to do so efficiently. If *size* is
big enough, then this method will return the chunks passed into the
memory buffer verbatim without any copying or slicing.
| 5.738431
| 5.914677
| 0.970202
|
self._check_readable()
chunks = []
while True:
chunk = self._buffer.get_chunk(limit, delim)
if not chunk:
break
chunks.append(chunk)
if chunk.endswith(delim):
break
if limit >= 0:
limit -= len(chunk)
if limit == 0:
break
if not chunks and not self._buffer.eof and self._buffer.error:
raise compat.saved_exc(self._buffer.error)
return b''.join(chunks)
|
def readline(self, limit=-1, delim=b'\n')
|
Read a single line.
If EOF is reached before a full line can be read, a partial line is
returned. If *limit* is specified, at most this many bytes will be read.
| 2.821364
| 3.020599
| 0.934041
|
self._check_readable()
lines = []
chunks = []
bytes_read = 0
while True:
chunk = self._buffer.get_chunk(-1, b'\n')
if not chunk:
break
chunks.append(chunk)
if chunk.endswith(b'\n'):
lines.append(b''.join(chunks))
del chunks[:]
bytes_read += len(lines[-1])
if hint >= 0 and bytes_read > hint:
break
if chunks:
lines.append(b''.join(chunks))
if not lines and not self._buffer.eof and self._buffer.error:
raise compat.saved_exc(self._buffer.error)
return lines
|
def readlines(self, hint=-1)
|
Read lines until EOF, and return them as a list.
If *hint* is specified, then stop reading lines as soon as the total
size of all lines exceeds *hint*.
| 2.902993
| 2.930032
| 0.990772
|
self._check_writable()
self._transport._can_write.wait()
self._transport.write(data)
|
def write(self, data)
|
Write *data* to the transport.
This method will block if the transport's write buffer is at capacity.
| 7.652576
| 7.082409
| 1.080505
|
self._check_writable()
for line in seq:
self._transport._can_write.wait()
self._transport.write(line)
|
def writelines(self, seq)
|
Write the elements of the sequence *seq* to the transport.
This method will block if the transport's write buffer is at capacity.
| 6.14457
| 5.763406
| 1.066135
|
self._check_writable()
self._transport._can_write.wait()
self._transport.write_eof()
|
def write_eof(self)
|
Close the write direction of the transport.
This method will block if the transport's write buffer is at capacity.
| 8.773983
| 7.549585
| 1.162181
|
if self._closed:
return
if self._autoclose:
self._transport.close()
self._transport._closed.wait()
self._transport = None
self._closed = True
|
def close(self)
|
Close the stream.
If *autoclose* was passed to the constructor then the underlying
transport will be closed as well.
| 4.593401
| 3.961135
| 1.159617
|
pool = get_io_pool()
fut = pool.submit(func, *args, **kwargs)
return fut.result()
|
def blocking(func, *args, **kwargs)
|
Run a function that uses blocking IO.
The function is run in the IO thread pool.
| 4.119806
| 3.609731
| 1.141305
|
for obj in objects:
if not hasattr(obj, 'add_done_callback'):
raise TypeError('Expecting sequence of waitable objects')
if count is None:
count = len(objects)
if count < 0 or count > len(objects):
raise ValueError('count must be between 0 and len(objects)')
if count == 0:
return
pending = list(objects)
for obj in _wait(pending, timeout):
yield obj
count -= 1
if count == 0:
break
|
def as_completed(objects, count=None, timeout=None)
|
Wait for one or more waitable objects, yielding them as they become
ready.
This is the iterator/generator version of :func:`wait`.
| 2.837284
| 2.971767
| 0.954746
|
for obj in objects:
if not hasattr(obj, 'add_done_callback'):
raise TypeError('Expecting sequence of waitable objects')
if count is None:
count = len(objects)
if count < 0 or count > len(objects):
raise ValueError('count must be between 0 and len(objects)')
if count == 0:
return [], objects
pending = list(objects)
done = []
try:
for obj in _wait(pending, timeout):
done.append(obj)
if len(done) == count:
break
except Timeout:
pass
return done, list(filter(bool, pending))
|
def wait(objects, count=None, timeout=None)
|
Wait for one or more waitable objects.
This method waits until *count* elements from the sequence of waitable
objects *objects* have become ready. If *count* is ``None`` (the default),
then wait for all objects to become ready.
What "ready" is means depends on the object type. A waitable object is a
objects that implements the ``add_done_callback()`` and
``remove_done_callback`` methods. This currently includes:
* :class:`~gruvi.Event` - an event is ready when its internal flag is set.
* :class:`~gruvi.Future` - a future is ready when its result is set.
* :class:`~gruvi.Fiber` - a fiber is ready when has terminated.
* :class:`~gruvi.Process` - a process is ready when the child has exited.
| 2.886833
| 3.056418
| 0.944515
|
return self._state == self.S_EXCEPTION and isinstance(self._result, Cancelled)
|
def cancelled(self)
|
Return whether this future was successfully cancelled.
| 15.756161
| 14.59941
| 1.079233
|
# We leverage/abuse our _done Event's thread lock as our own lock.
# Since it's a private copy it should be OK, and it saves some memory.
# Just be sure that we don't modify the event with the lock held.
with self._lock:
if self._state not in (self.S_PENDING, self.S_RUNNING):
return False
self._result = Cancelled('cancelled by Future.cancel()')
self._state = self.S_EXCEPTION
self._done.set()
return True
|
def cancel(self)
|
Cancel the execution of the async function, if possible.
This method marks the future as done and sets the :class:`Cancelled`
exception.
A future that is not running can always be cancelled. However when a
future is running, the ability to cancel it depends on the pool
implementation. For example, a fiber pool can cancel running fibers but
a thread pool cannot.
Return ``True`` if the future could be cancelled, ``False`` otherwise.
| 9.755252
| 9.065942
| 1.076033
|
if not self._done.wait(timeout):
raise Timeout('timeout waiting for future')
# No more state changes after _done is set so no lock needed.
if self._state == self.S_EXCEPTION:
raise compat.saved_exc(self._result)
return self._result
|
def result(self, timeout=None)
|
Wait for the future to complete and return its result.
If the function returned normally, its return value is returned here.
If the function raised an exception, the exception is re-raised here.
| 9.867634
| 9.137566
| 1.079897
|
if not self._done.wait(timeout):
raise Timeout('timeout waiting for future')
if self._state == self.S_EXCEPTION:
return self._result
|
def exception(self, timeout=None)
|
Wait for the async function to complete and return its exception.
If the function did not raise an exception this returns ``None``.
| 6.616474
| 7.050745
| 0.938408
|
with self._lock:
if self._state not in (self.S_DONE, self.S_EXCEPTION):
return add_callback(self, callback, args)
callback(*args)
|
def add_done_callback(self, callback, *args)
|
Add a callback that gets called when the future completes.
The callback will be called in the context of the fiber that sets the
future's result. The callback is called with the positional arguments
*args* provided to this method.
The return value is an opaque handle that can be used with
:meth:`~gruvi.Future.remove_done_callback` to remove the callback.
If the future has already completed, then the callback is called
immediately from this method and the return value will be ``None``.
| 5.359251
| 7.202807
| 0.74405
|
with self._lock:
if self._closing:
raise RuntimeError('pool is closing/closed')
result = Future()
self._queue.put_nowait((func, args, result))
self._spawn_workers()
return result
|
def submit(self, func, *args)
|
Run *func* asynchronously.
The function is run in the pool which will run it asynchrously. The
function is called with positional argument *args*.
The return value is a :class:`Future` that captures the state and the
future result of the asynchronous function call.
| 4.711378
| 5.038167
| 0.935137
|
with self._lock:
if self._closing:
raise RuntimeError('pool is closing/closed')
timeout = kwargs.pop('timeout', None)
futures = []
for args in zip(*iterables):
result = Future()
self._queue.put_nowait((func, args, result))
futures.append(result)
self._spawn_workers()
try:
with switch_back(timeout):
for future in futures:
yield future.result()
except Exception:
# Timeout, GeneratorExit or future.set_exception()
for future in futures:
if not future.done():
future.cancel()
raise
|
def map(self, func, *iterables, **kwargs)
|
Apply *func* to the elements of the sequences in *iterables*.
All invocations of *func* are run in the pool. If multiple iterables
are provided, then *func* must take this many arguments, and is applied
with one element from each iterable. All iterables must yield the same
number of elements.
An optional *timeout* keyword argument may be provided to specify a
timeout.
This returns a generator yielding the results.
| 4.092672
| 4.261974
| 0.960276
|
with self._lock:
if self._closing:
return
self._closing = True
if not self._workers:
self._closed.set()
return
self._queue.put_nowait(self._PoolClosing)
self._closed.wait()
|
def close(self)
|
Close the pool and wait for all workers to exit.
New submissions will be blocked. Workers will exit once their current
job is finished. This method will return after all workers have exited.
| 4.307395
| 4.043226
| 1.065336
|
# FFT the signal and extract frequency components
arr2D = mlab.specgram(
channel_samples,
NFFT=wsize,
Fs=Fs,
window=mlab.window_hanning,
noverlap=int(wsize * wratio))[0]
arr2D = 10 * np.log10(arr2D)
arr2D[arr2D == -np.inf] = 0 # replace infs with zeros
# find local maxima
local_maxima = get_2D_peaks(arr2D, plot=False, amp_min=amp_min)
# return hashes
return generate_hashes(local_maxima, fan_value=fan_value)
|
def fingerprint(channel_samples: list, Fs: int = DEFAULT_FS,
wsize: int = DEFAULT_WINDOW_SIZE,
wratio: Union[int, float] = DEFAULT_OVERLAP_RATIO,
fan_value: int = DEFAULT_FAN_VALUE,
amp_min: Union[int, float] = DEFAULT_AMP_MIN)-> Iterator[tuple]
|
FFT the channel, log transform output, find local maxima, then return
locally sensitive hashes.
#
| 3.191436
| 2.878155
| 1.108848
|
if PEAK_SORT:
peaks = sorted(peaks, key=lambda x: x[1])
# peaks.sort(key=itemgetter(1))
for i in range(len(peaks)):
for j in range(1, fan_value):
if (i + j) < len(peaks):
freq1 = peaks[i][IDX_FREQ_I]
freq2 = peaks[i + j][IDX_FREQ_I]
t1 = peaks[i][IDX_TIME_J]
t2 = peaks[i + j][IDX_TIME_J]
t_delta = t2 - t1
if MIN_HASH_TIME_DELTA <= t_delta <= MAX_HASH_TIME_DELTA:
key = "{}|{}|{}".format(freq1, freq2, t_delta)
h = hashlib.sha1(key.encode('utf-8'))
yield (h.hexdigest()[0:FINGERPRINT_REDUCTION], t1)
|
def generate_hashes(peaks, fan_value: int = DEFAULT_FAN_VALUE)
|
Hash list structure:
sha1_hash[0:20] time_offset
[(e05b341a9b77a51fd26, 32), ... ]
| 2.805281
| 2.691611
| 1.042231
|
if self._state != self.S_UNWRAPPED:
raise RuntimeError('handshake in progress or completed')
self._sslobj = sslcompat.wrap_bio(self._context, self._incoming, self._outgoing,
self._server_side, self._server_hostname)
self._state = self.S_DO_HANDSHAKE
self._handshake_cb = callback
ssldata, appdata = self.feed_ssldata(b'')
assert len(appdata) == 0
return ssldata
|
def do_handshake(self, callback=None)
|
Start the SSL handshake. Return a list of ssldata.
The optional *callback* argument can be used to install a callback that
will be called when the handshake is complete. The callback will be
called without arguments.
| 5.371429
| 5.188555
| 1.035246
|
if self._state == self.S_UNWRAPPED:
raise RuntimeError('no security layer present')
self._state = self.S_SHUTDOWN
self._shutdown_cb = callback
ssldata, appdata = self.feed_ssldata(b'')
assert appdata == [] or appdata == [b'']
return ssldata
|
def shutdown(self, callback=None)
|
Start the SSL shutdown sequence. Return a list of ssldata.
The optional *callback* argument can be used to install a callback that
will be called when the shutdown is complete. The callback will be
called without arguments.
| 6.807727
| 6.364618
| 1.069621
|
self._incoming.write_eof()
ssldata, appdata = self.feed_ssldata(b'')
assert appdata == [] or appdata == [b'']
|
def feed_eof(self)
|
Send a potentially "ragged" EOF.
This method will raise an SSL_ERROR_EOF exception if the EOF is
unexpected.
| 11.526489
| 12.283837
| 0.938346
|
if self._state == self.S_UNWRAPPED:
# If unwrapped, pass plaintext data straight through.
return ([], [data] if data else [])
ssldata = []; appdata = []
self._need_ssldata = False
if data:
self._incoming.write(data)
try:
if self._state == self.S_DO_HANDSHAKE:
# Call do_handshake() until it doesn't raise anymore.
self._sslobj.do_handshake()
self._state = self.S_WRAPPED
if self._handshake_cb:
self._handshake_cb()
if self._state == self.S_WRAPPED:
# Main state: read data from SSL until close_notify
while True:
chunk = self._sslobj.read(self.bufsize)
appdata.append(chunk)
if not chunk: # close_notify
break
if self._state == self.S_SHUTDOWN:
# Call shutdown() until it doesn't raise anymore.
self._sslobj.unwrap()
self._sslobj = None
self._state = self.S_UNWRAPPED
if self._shutdown_cb:
self._shutdown_cb()
if self._state == self.S_UNWRAPPED:
# Drain possible plaintext data after close_notify.
appdata.append(self._incoming.read())
except (ssl.SSLError, sslcompat.CertificateError) as e:
if getattr(e, 'errno', None) not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE, ssl.SSL_ERROR_SYSCALL):
if self._state == self.S_DO_HANDSHAKE and self._handshake_cb:
self._handshake_cb(e)
raise
self._need_ssldata = e.errno == ssl.SSL_ERROR_WANT_READ
# Check for record level data that needs to be sent back.
# Happens for the initial handshake and renegotiations.
if self._outgoing.pending:
ssldata.append(self._outgoing.read())
return (ssldata, appdata)
|
def feed_ssldata(self, data)
|
Feed SSL record level data into the pipe.
The data must be a bytes instance. It is OK to send an empty bytes
instance. This can be used to get ssldata for a handshake initiated by
this endpoint.
Return a (ssldata, appdata) tuple. The ssldata element is a list of
buffers containing SSL data that needs to be sent to the remote SSL.
The appdata element is a list of buffers containing plaintext data that
needs to be forwarded to the application. The appdata list may contain
an empty buffer indicating an SSL "close_notify" alert. This alert must
be acknowledged by calling :meth:`shutdown`.
| 3.127568
| 2.948758
| 1.060639
|
if self._state == self.S_UNWRAPPED:
# pass through data in unwrapped mode
return ([data[offset:]] if offset < len(data) else [], len(data))
ssldata = []
view = memoryview(data)
while True:
self._need_ssldata = False
try:
if offset < len(view):
offset += self._sslobj.write(view[offset:])
except ssl.SSLError as e:
# It is not allowed to call write() after unwrap() until the
# close_notify is acknowledged. We return the condition to the
# caller as a short write.
if sslcompat.get_reason(e) == 'PROTOCOL_IS_SHUTDOWN':
e.errno = ssl.SSL_ERROR_WANT_READ
if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE,
ssl.SSL_ERROR_SYSCALL):
raise
self._need_ssldata = e.errno == ssl.SSL_ERROR_WANT_READ
# See if there's any record level data back for us.
if self._outgoing.pending:
ssldata.append(self._outgoing.read())
if offset == len(view) or self._need_ssldata:
break
return (ssldata, offset)
|
def feed_appdata(self, data, offset=0)
|
Feed plaintext data into the pipe.
Return an (ssldata, offset) tuple. The ssldata element is a list of
buffers containing record level data that needs to be sent to the
remote SSL instance. The offset is the number of plaintext bytes that
were processed, which may be less than the length of data.
NOTE: In case of short writes, this call MUST be retried with the SAME
buffer passed into the *data* argument (i.e. the ``id()`` must be the
same). This is an OpenSSL requirement. A further particularity is that
a short write will always have offset == 0, because the _ssl module
does not enable partial writes. And even though the offset is zero,
there will still be encrypted data in ssldata.
| 4.552585
| 3.988544
| 1.141415
|
if name == 'ssl':
return self._sslpipe.ssl_object
elif name == 'sslctx':
return self._sslpipe.context
else:
return super(SslTransport, self).get_extra_info(name, default)
|
def get_extra_info(self, name, default=None)
|
Return transport specific data.
The following fields are available, in addition to the information
exposed by :meth:`Transport.get_extra_info`.
====================== ===============================================
Name Description
====================== ===============================================
``'ssl'`` The internal ``ssl.SSLObject`` instance used by
this transport.
``'sslctx'`` The ``ssl.SSLContext`` instance used to create
the SSL object.
====================== ===============================================
| 4.242472
| 2.914838
| 1.455475
|
if self._error:
raise compat.saved_exc(self._error)
elif self._closing or self._handle.closed:
raise TransportError('SSL transport is closing/closed')
self._write_backlog.append([b'', True])
self._process_write_backlog()
|
def do_handshake(self)
|
Start the SSL handshake.
This method only needs to be called if this transport was created with
*do_handshake_on_connect* set to False (the default is True).
The handshake needs to be synchronized between the both endpoints, so
that SSL record level data is not incidentially interpreted as
plaintext. Usually this is done by starting the handshake directly
after a connection is established, but you can also use an application
level protocol.
| 9.346072
| 9.117069
| 1.025118
|
if self._error:
raise compat.saved_exc(self._error)
elif self._closing or self._handle.closed:
raise TransportError('SSL transport is closing/closed')
self._close_on_unwrap = False
self._write_backlog.append([b'', False])
self._process_write_backlog()
|
def unwrap(self)
|
Remove the security layer.
Use this method only if you want to send plaintext data on the
connection after the security layer has been removed. In all other
cases, use :meth:`close`.
If the unwrap is initiated by us, then any data sent after it will be
buffered until the corresponding close_notify response is received from
our peer.
If the unwrap is initiated by the remote peer, then this method will
acknowledge it. You need an application level protocol to determine
when to do this because the receipt of a close_notify is not
communicated to the application.
| 9.581858
| 8.302616
| 1.154077
|
if self._closing or self._handle.closed:
return
self._closing = True
self._write_backlog.append([b'', False])
self._process_write_backlog()
|
def close(self)
|
Cleanly shut down the SSL protocol and close the transport.
| 6.709712
| 6.16361
| 1.088601
|
if hasattr(lock, 'locked'):
return lock.locked()
elif hasattr(lock, '_is_owned'):
return lock._is_owned()
else:
raise TypeError('expecting Lock/RLock')
|
def is_locked(lock)
|
Return whether a lock is locked.
Suppors :class:`Lock`, :class:`RLock`, :class:`threading.Lock` and
:class:`threading.RLock` instances.
| 3.330528
| 3.225213
| 1.032653
|
if hasattr(lock, '_acquire_restore'):
lock._acquire_restore(state)
elif hasattr(lock, 'acquire'):
lock.acquire()
else:
raise TypeError('expecting Lock/RLock')
|
def acquire_restore(lock, state)
|
Acquire a lock and restore its state.
| 3.355481
| 3.448951
| 0.972899
|
if hasattr(lock, '_release_save'):
return lock._release_save()
elif hasattr(lock, 'release'):
lock.release()
else:
raise TypeError('expecting Lock/RLock')
|
def release_save(lock)
|
Release a lock and return its state.
| 3.733277
| 3.431821
| 1.087841
|
if hasattr(lock, '_lock'):
return lock._lock
elif hasattr(lock, 'acquire'):
return lock
else:
raise TypeError('expecting Lock/RLock')
|
def thread_lock(lock)
|
Return the thread lock for *lock*.
| 3.97731
| 3.44658
| 1.153987
|
hub = get_hub()
try:
# switcher.__call__ needs to be synchronized with a lock IF it can
# be called from different threads. This is the case here because
# this method may be called from multiple threads and the callbacks
# are run in the calling thread. So pass it our _lock.
with switch_back(timeout, lock=self._lock) as switcher:
with self._lock:
if not self._locked:
self._locked = 1
self._owner = fibers.current()
return True
elif self._reentrant and self._owner is fibers.current():
self._locked += 1
return True
elif not blocking:
return False
handle = add_callback(self, switcher)
# It is safe to call hub.switch() outside the lock. Another
# thread could have called acquire()+release(), thereby firing
# the switchback. However the switchback only schedules the
# switchback in our hub, it won't execute it yet. So the
# switchback won't actually happen until we switch to the hub.
hub.switch()
# Here the lock should be ours because _release() wakes up only
# the fiber that it passed the lock.
assert self._locked > 0
assert self._owner is fibers.current()
except BaseException as e:
# Likely a Timeout but could also be e.g. Cancelled
with self._lock:
# Clean up the callback. It might have been popped by
# _release() but that is OK.
remove_callback(self, handle)
# This fiber was passed the lock but before that an exception
# was already scheduled with run_callback() (likely through
# Fiber.throw())
if self._owner is fibers.current():
self._release()
if e is switcher.timeout:
return False
raise
return True
|
def acquire(self, blocking=True, timeout=None)
|
Acquire the lock.
If *blocking* is true (the default), then this will block until the
lock can be acquired. The *timeout* parameter specifies an optional
timeout in seconds.
The return value is a boolean indicating whether the lock was acquired.
| 7.836555
| 8.027246
| 0.976245
|
with self._lock:
if not self._locked:
raise RuntimeError('lock not currently held')
elif self._reentrant and self._owner is not fibers.current():
raise RuntimeError('lock not owned by this fiber')
self._release()
|
def release(self)
|
Release the lock.
| 7.48049
| 5.677062
| 1.317669
|
with self._lock:
if self._flag:
return
self._flag = True
with assert_no_switchpoints():
run_callbacks(self)
|
def set(self)
|
Set the internal flag, and wake up any fibers blocked on :meth:`wait`.
| 10.400318
| 9.674145
| 1.075063
|
# Optimization for the case the Event is already set.
if self._flag:
return True
hub = get_hub()
try:
with switch_back(timeout, lock=self._lock) as switcher:
with self._lock:
# Need to check the flag again, now under the lock.
if self._flag:
return True
# Allow other fibers to wake us up via callback in set().
# The callback goes to switcher.switch directly() instead of
# __call__(), because the latter would try to lock our lock
# which is already held when callbacks are run by set().
handle = add_callback(self, switcher.switch)
# See note in Lock.acquire() why we can call to hub.switch()
# outside the lock.
hub.switch()
except BaseException as e:
with self._lock:
remove_callback(self, handle)
if e is switcher.timeout:
return False
raise
return True
|
def wait(self, timeout=None)
|
If the internal flag is set, return immediately. Otherwise block
until the flag gets set by another fiber calling :meth:`set`.
| 8.992538
| 8.093973
| 1.111017
|
if not is_locked(self._lock):
raise RuntimeError('lock is not locked')
notified = [0] # Work around lack of "nonlocal" in py27
def walker(switcher, predicate):
if not switcher.active:
return False # not not keep switcher that timed out
if predicate and not predicate():
return True
if n >= 0 and notified[0] >= n:
return True
switcher.switch()
notified[0] += 1
return False # only notify once
walk_callbacks(self, walker)
|
def notify(self, n=1)
|
Raise the condition and wake up fibers waiting on it.
The optional *n* parameter specifies how many fibers will be notified.
By default, one fiber is notified.
| 7.77483
| 7.903038
| 0.983777
|
if not is_locked(self._lock):
raise RuntimeError('lock is not locked')
hub = get_hub()
try:
with switch_back(timeout, lock=thread_lock(self._lock)) as switcher:
handle = add_callback(self, switcher, predicate)
# See the comment in Lock.acquire() why it is OK to release the
# lock here before calling hub.switch().
# Also if this is a reentrant lock make sure it is fully released.
state = release_save(self._lock)
hub.switch()
except BaseException as e:
with self._lock:
remove_callback(self, handle)
if e is switcher.timeout:
return False
raise
finally:
acquire_restore(self._lock, state)
return True
|
def wait_for(self, predicate, timeout=None)
|
Like :meth:`wait` but additionally for *predicate* to be true.
The *predicate* argument must be a callable that takes no arguments.
Its result is interpreted as a boolean value.
| 6.995693
| 7.162063
| 0.976771
|
if size is None:
size = 1
with self._lock:
priority = self._get_item_priority(item)
while self._size + size > self.maxsize > 0:
if not block:
raise QueueFull
if not self._notfull.wait_for(lambda: self._size+size <= self.maxsize, timeout):
raise QueueFull
heapq.heappush(self._heap, (priority, size, item))
self._size += size
self._unfinished_tasks += 1
self._notempty.notify()
|
def put(self, item, block=True, timeout=None, size=None)
|
Put *item* into the queue.
If the queue is currently full and *block* is True (the default), then
wait up to *timeout* seconds for space to become available. If no
timeout is specified, then wait indefinitely.
If the queue is full and *block* is False or a timeout occurs, then
raise a :class:`QueueFull` exception.
The optional *size* argument may be used to specify a custom size for
the item. The total :meth:`qsize` of the queue is the sum of the sizes
of all the items. The default size for an item is 1.
| 2.962005
| 3.004489
| 0.98586
|
# Don't mark this method into a switchpoint as put() will never switch
# if block is False.
return self.put(item, False, size=size)
|
def put_nowait(self, item, size=None)
|
Equivalent of ``put(item, False)``.
| 22.553539
| 19.485773
| 1.157436
|
with self._lock:
while not self._heap:
if not block:
raise QueueEmpty
if not self._notempty.wait(timeout):
raise QueueEmpty
prio, size, item = heapq.heappop(self._heap)
self._size -= size
if 0 <= self._size < self.maxsize:
self._notfull.notify()
return item
|
def get(self, block=True, timeout=None)
|
Pop an item from the queue.
If the queue is not empty, an item is returned immediately. Otherwise,
if *block* is True (the default), wait up to *timeout* seconds for an
item to become available. If not timeout is provided, then wait
indefinitely.
If the queue is empty and *block* is false or a timeout occurs, then
raise a :class:`QueueEmpty` exception.
| 3.479552
| 3.454461
| 1.007263
|
with self._lock:
unfinished = self._unfinished_tasks - 1
if unfinished < 0:
raise RuntimeError('task_done() called too many times')
elif unfinished == 0:
self._alldone.notify()
self._unfinished_tasks = unfinished
|
def task_done(self)
|
Mark a task as done.
| 2.934352
| 2.874259
| 1.020907
|
if self._process:
raise RuntimeError('child process already spawned')
self._child_exited.clear()
self._closed.clear()
self._exit_status = None
self._term_signal = None
hub = get_hub()
if isinstance(args, str):
args = [args]
flags |= pyuv.UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS
else:
args = list(args)
if shell:
if hasattr(os, 'fork'):
# Unix
if executable is None:
executable = '/bin/sh'
args = [executable, '-c'] + args
else:
# Windows
if executable is None:
executable = os.environ.get('COMSPEC', 'cmd.exe')
args = [executable, '/c'] + args
if executable is None:
executable = args[0]
kwargs = {}
if env is not None:
kwargs['env'] = env
if cwd is not None:
kwargs['cwd'] = cwd
kwargs['flags'] = flags
handles = self._get_child_handles(hub.loop, stdin, stdout, stderr, extra_handles)
kwargs['stdio'] = handles
process = pyuv.Process.spawn(hub.loop, args, executable,
exit_callback=self._on_child_exit, **kwargs)
# Create stdin/stdout/stderr transports/protocols.
if handles[0].stream:
self._stdin = self._connect_child_handle(handles[0])
if handles[1].stream:
self._stdout = self._connect_child_handle(handles[1])
if handles[2].stream:
self._stderr = self._connect_child_handle(handles[2])
self._process = process
|
def spawn(self, args, executable=None, stdin=None, stdout=None, stderr=None,
shell=False, cwd=None, env=None, flags=0, extra_handles=None)
|
Spawn a new child process.
The executable to spawn and its arguments are determined by *args*,
*executable* and *shell*.
When *shell* is set to ``False`` (the default), *args* is normally a
sequence and it contains both the program to execute (at index 0), and
its arguments.
When *shell* is set to ``True``, then *args* is normally a string and
it indicates the command to execute through the shell.
The *executable* argument can be used to override the executable to
execute. If *shell* is ``False``, it overrides ``args[0]``. This is
sometimes used on Unix to implement "fat" executables that behave
differently based on argv[0]. If *shell* is ``True``, it overrides the
shell to use. The default shell is ``'/bin/sh'`` on Unix, and the value
of $COMSPEC (or ``'cmd.exe'`` if it is unset) on Windows.
The *stdin*, *stdout* and *stderr* arguments specify how to handle
standard input, output, and error, respectively. If set to None, then
the child will inherit our respective stdio handle. If set to the
special constant ``PIPE`` then a pipe is created. The pipe will be
connected to a :class:`gruvi.StreamProtocol` which you can use to read
or write from it. The stream protocol instance is available under
either :attr:`stdin`, :attr:`stdout` or :attr:`stderr`. All 3 stdio
arguments can also be a file descriptor, a file-like object, or a pyuv
``Stream`` instance.
The *extra_handles* specifies any extra handles to pass to the client.
It must be a sequence where each element is either a file descriptor, a
file-like objects, or a ``pyuv.Stream`` instance. The position in the
sequence determines the file descriptor in the client. The first
position corresponds to FD 3, the second to 4, etc. This places these
file descriptors directly after the stdio handles.
The *cwd* argument specifies the directory to change to before
executing the child. If not provided, the current directory is used.
The *env* argument specifies the environment to use when executing the
child. If provided, it must be a dictionary. By default, the current
environment is used.
The *flags* argument can be used to specify optional libuv
``uv_process_flags``. The only relevant flags are
``pyuv.UV_PROCESS_DETACHED`` and ``pyuv.UV_PROCESS_WINDOWS_HIDE``. Both
are Windows specific and are silently ignored on Unix.
| 2.665015
| 2.620879
| 1.01684
|
if self._process is None:
return
waitfor = []
if not self._process.closed:
self._process.close(self._on_close_complete)
waitfor.append(self._closed)
# For each of stdin/stdout/stderr, close the transport. This schedules
# an on-close callback that will close the protocol, which we wait for.
if self._stdin:
self._stdin[1].close()
waitfor.append(self._stdin[1]._closed)
if self._stdout:
self._stdout[1].close()
waitfor.append(self._stdout[1]._closed)
if self._stderr:
self._stderr[1].close()
waitfor.append(self._stderr[1]._closed)
futures.wait(waitfor)
self._process = None
self._stdin = self._stdout = self._stderr = None
|
def close(self)
|
Close the process and frees its associated resources.
This method waits for the resources to be freed by the event loop.
| 3.087958
| 2.972623
| 1.038799
|
if self._process is None:
raise RuntimeError('no child process')
self._process.kill(signum)
|
def send_signal(self, signum)
|
Send the signal *signum* to the child.
On Windows, SIGTERM, SIGKILL and SIGINT are emulated using
TerminateProcess(). This will cause the child to exit unconditionally
with status 1. No other signals can be sent on Windows.
| 6.474596
| 6.223326
| 1.040376
|
try:
self.send_signal(signal.SIGTERM)
except pyuv.error.ProcessError as e:
if e.args[0] != pyuv.errno.UV_ESRCH:
raise
|
def terminate(self)
|
Terminate the child process.
It is not an error to call this method when the child has already exited.
| 3.882779
| 3.573692
| 1.086489
|
if self._process is None:
raise RuntimeError('no child process')
if timeout == -1:
timeout = self._timeout
if not self._child_exited.wait(timeout):
raise Timeout('timeout waiting for child to exit')
return self.returncode
|
def wait(self, timeout=-1)
|
Wait for the child to exit.
Wait for at most *timeout* seconds, or indefinitely if *timeout* is
None. Return the value of the :attr:`returncode` attribute.
| 4.016484
| 3.715691
| 1.080952
|
if self._process is None:
raise RuntimeError('no child process')
if timeout == -1:
timeout = self._timeout
output = [[], []]
def writer(stream, data):
offset = 0
while offset < len(data):
buf = data[offset:offset+4096]
stream.write(buf)
offset += len(buf)
stream.close()
def reader(stream, data):
while True:
if self._encoding:
buf = stream.read(4096)
else:
buf = stream.read1()
if not buf:
break
data.append(buf)
if self.stdin:
fibers.spawn(writer, self.stdin, input or b'')
if self.stdout:
fibers.spawn(reader, self.stdout, output[0])
if self.stderr:
fibers.spawn(reader, self.stderr, output[1])
self.wait(timeout)
empty = '' if self._encoding else b''
stdout_data = empty.join(output[0])
stderr_data = empty.join(output[1])
return (stdout_data, stderr_data)
|
def communicate(self, input=None, timeout=-1)
|
Communicate with the child and return its output.
If *input* is provided, it is sent to the client. Concurrent with
sending the input, the child's standard output and standard error are
read, until the child exits.
The return value is a tuple ``(stdout_data, stderr_data)`` containing
the data read from standard output and standard error.
| 2.284511
| 2.254264
| 1.013417
|
with open(os.path.join(topdir, 'requirements.txt')) as fin:
lines = fin.readlines()
lines = [line.strip() for line in lines]
return lines
|
def get_requirements()
|
Parse a requirements.txt file and return as a list.
| 2.941486
| 2.797897
| 1.05132
|
if not isinstance(node, Node):
raise TypeError('expecting Node instance')
if node._list is None:
return
if node._list is not self:
raise RuntimeError('node is not contained in list')
if node._next is None:
self._last = node._prev # last node
else:
node._next._prev = node._prev
if node._prev is None:
self._first = node._next # first node
else:
node._prev._next = node._next
node._list = node._prev = node._next = None
self._size -= 1
|
def remove(self, node)
|
Remove a node from the list.
| 2.289408
| 2.092059
| 1.094332
|
node._list = self
if self._first is None:
self._first = self._last = node # first node in list
self._size += 1
return node
if before is None:
self._last._next = node # insert as last node
node._prev = self._last
self._last = node
else:
node._next = before
node._prev = before._prev
if node._prev:
node._prev._next = node
else:
self._first = node # inserting as first node
node._next._prev = node
self._size += 1
return node
|
def insert(self, node, before=None)
|
Insert a new node in the list.
If *before* is specified, the new node is inserted before this node.
Otherwise, the node is inserted at the end of the list.
| 2.369008
| 2.358552
| 1.004433
|
node = self._first
while node is not None:
next_node = node._next
node._list = node._prev = node._next = None
node = next_node
self._size = 0
|
def clear(self)
|
Remove all nodes from the list.
| 3.372533
| 2.803358
| 1.203033
|
built = {
'fore': {},
'back': {},
'style': {},
} # type: Dict[str, Dict[str, str]]
# Set codes for forecolors (30-37) and backcolors (40-47)
# Names are given to some of the 256-color variants as 'light' colors.
for name, number in _namemap:
# Not using format_* functions here, no validation needed.
built['fore'][name] = codeformat(30 + number)
built['back'][name] = codeformat(40 + number)
litename = 'light{}'.format(name) # type: str
built['fore'][litename] = codeformat(90 + number)
built['back'][litename] = codeformat(100 + number)
# Set reset codes for fore/back.
built['fore']['reset'] = codeformat(39)
built['back']['reset'] = codeformat(49)
# Set style codes.
for code, names in _stylemap:
for alias in names:
built['style'][alias] = codeformat(code)
# Extended (256 color codes)
for i in range(256):
built['fore'][str(i)] = extforeformat(i)
built['back'][str(i)] = extbackformat(i)
return built
|
def _build_codes() -> Dict[str, Dict[str, str]]
|
Build code map, encapsulated to reduce module-level globals.
| 3.592456
| 3.473379
| 1.034283
|
built = {} # type: Dict[str, Dict[str, str]]
for codetype, codemap in codes.items():
for name, escapecode in codemap.items():
# Skip shorcut aliases to avoid overwriting long names.
if len(name) < 2:
continue
if built.get(codetype, None) is None:
built[codetype] = {}
built[codetype][escapecode] = name
return built
|
def _build_codes_reverse(
codes: Dict[str, Dict[str, str]]) -> Dict[str, Dict[str, str]]
|
Build a reverse escape-code to name map, based on an existing
name to escape-code map.
| 3.751058
| 3.083472
| 1.216504
|
if enabled:
if not all(getattr(f, 'isatty', lambda: False)() for f in fds):
disable()
else:
enable()
|
def auto_disable(
enabled: Optional[bool] = True,
fds: Optional[Sequence[IO]] = (sys.stdout, sys.stderr)) -> None
|
Automatically decide whether to disable color codes if stdout or
stderr are not ttys.
Arguments:
enabled : Whether to automatically disable color codes.
When set to True, the fds will be checked for ttys.
When set to False, enable() is called.
fds : Open file descriptors to check for ttys.
If any non-ttys are found, colors will be disabled.
Objects must have a isatty() method.
| 3.811048
| 3.657426
| 1.042003
|
if backcolor:
codetype = 'back'
# A dict of codeformat funcs. These funcs return an escape code str.
formatters = {
'code': lambda n: codeformat(40 + n),
'lightcode': lambda n: codeformat(100 + n),
'ext': lambda n: extbackformat(n),
'rgb': lambda r, g, b: rgbbackformat(r, g, b),
} # type: Dict[str, Callable[..., str]]
else:
codetype = 'fore'
formatters = {
'code': lambda n: codeformat(30 + n),
'lightcode': lambda n: codeformat(90 + n),
'ext': lambda n: extforeformat(n),
'rgb': lambda r, g, b: rgbforeformat(r, g, b),
}
try:
r, g, b = (int(x) for x in number) # type: ignore
except (TypeError, ValueError):
# Not an rgb code.
# This variable, and it's cast is only to satisfy the type checks.
try:
n = int(cast(int, number))
except ValueError:
# Not an rgb code, or a valid code number.
raise InvalidColr(
number,
'Expecting RGB or 0-255 for {} code.'.format(codetype)
)
if light:
if not in_range(n, 0, 9):
raise InvalidColr(
n,
'Expecting 0-9 for light {} code.'.format(codetype)
)
return formatters['lightcode'](n)
elif extended:
if not in_range(n, 0, 255):
raise InvalidColr(
n,
'Expecting 0-255 for ext. {} code.'.format(codetype)
)
return formatters['ext'](n)
if not in_range(n, 0, 9):
raise InvalidColr(
n,
'Expecting 0-9 for {} code.'.format(codetype)
)
return formatters['code'](n)
# Rgb code.
try:
if not all(in_range(x, 0, 255) for x in (r, g, b)):
raise InvalidColr(
(r, g, b),
'RGB value for {} not in range 0-255.'.format(codetype)
)
except TypeError:
# Was probably a 3-char string. Not an rgb code though.
raise InvalidColr(
(r, g, b),
'RGB value for {} contains invalid number.'.format(codetype)
)
return formatters['rgb'](r, g, b)
|
def _format_code(
number: FormatArg,
backcolor: Optional[bool] = False,
light: Optional[bool] = False,
extended: Optional[bool] = False) -> str
|
Return an escape code for a fore/back color, by number.
This is a convenience method for handling the different code types
all in one shot.
It also handles some validation.
format_fore/format_back wrap this function to reduce code duplication.
Arguments:
number : Integer or RGB tuple to format into an escape code.
backcolor : Whether this is for a back color, otherwise it's fore.
light : Whether this should be a 'light' color.
extended : Whether this should be an extended (256) color.
If `light` and `extended` are both given, only `light` is used.
| 2.659977
| 2.500395
| 1.063823
|
return _format_code(
number,
backcolor=True,
light=light,
extended=extended
)
|
def format_back(
number: FormatArg,
light: Optional[bool] = False,
extended: Optional[bool] = False) -> str
|
Return an escape code for a back color, by number.
This is a convenience method for handling the different code types
all in one shot.
It also handles some validation.
| 7.850254
| 4.998885
| 1.570401
|
return _format_code(
number,
backcolor=False,
light=light,
extended=extended
)
|
def format_fore(
number: FormatArg,
light: Optional[bool] = False,
extended: Optional[bool] = False) -> str
|
Return an escape code for a fore color, by number.
This is a convenience method for handling the different code types
all in one shot.
It also handles some validation.
| 9.011732
| 6.112208
| 1.474382
|
if str(number) not in _stylenums:
raise InvalidStyle(number)
return codeformat(number)
|
def format_style(number: int) -> str
|
Return an escape code for a style, by number.
This handles invalid style numbers.
| 19.919601
| 10.190228
| 1.954775
|
names = list(basic_names)
names.extend(name_data)
return tuple(sorted(set(names)))
|
def get_all_names() -> Tuple[str]
|
Retrieve a tuple of all known color names, basic and 'known names'.
| 5.966545
| 4.960374
| 1.202842
|
if ';' in s:
# Extended fore/back codes.
numberstr = s.rpartition(';')[-1][:-1]
else:
# Fore, back, style, codes.
numberstr = s.rpartition('[')[-1][:-1]
num = try_parse_int(
numberstr,
default=None,
minimum=0,
maximum=255
)
if num is None:
raise InvalidEscapeCode(numberstr)
return num
|
def get_code_num(s: str) -> Optional[int]
|
Get code number from an escape code.
Raises InvalidEscapeCode if an invalid number is found.
| 5.499093
| 4.480412
| 1.227363
|
parts = s.split(';')
if len(parts) != 5:
raise InvalidRgbEscapeCode(s, reason='Count is off.')
rgbparts = parts[-3:]
if not rgbparts[2].endswith('m'):
raise InvalidRgbEscapeCode(s, reason='Missing \'m\' on the end.')
rgbparts[2] = rgbparts[2].rstrip('m')
try:
r, g, b = [int(x) for x in rgbparts]
except ValueError as ex:
raise InvalidRgbEscapeCode(s) from ex
if not all(in_range(x, 0, 255) for x in (r, g, b)):
raise InvalidRgbEscapeCode(s, reason='Not in range 0-255.')
return r, g, b
|
def get_code_num_rgb(s: str) -> Optional[Tuple[int, int, int]]
|
Get rgb code numbers from an RGB escape code.
Raises InvalidRgbEscapeCode if an invalid number is found.
| 3.026499
| 2.612002
| 1.158689
|
isdisabled = disabled()
orderedcodes = tuple((c, get_known_name(c)) for c in get_codes(s))
codesdone = set() # type: Set[str]
for code, codeinfo in orderedcodes:
# Do the codes in order, but don't do the same code twice.
if unique:
if code in codesdone:
continue
codesdone.add(code)
if codeinfo is None:
continue
codetype, name = codeinfo
typedesc = '{:>13}: {!r:<23}'.format(codetype.title(), code)
if codetype.startswith(('extended', 'rgb')):
if isdisabled:
codedesc = str(ColorCode(name, rgb_mode=rgb_mode))
else:
codedesc = ColorCode(name, rgb_mode=rgb_mode).example()
else:
codedesc = ''.join((
code,
str(name).lstrip('(').rstrip(')'),
codes['style']['reset_all']
))
yield ' '.join((
typedesc,
codedesc
))
|
def get_known_codes(
s: Union[str, 'Colr'],
unique: Optional[bool] = True,
rgb_mode: Optional[bool] = False)
|
Get all known escape codes from a string, and yield the explanations.
| 5.540474
| 5.278809
| 1.049569
|
if not s.endswith('m'):
# All codes end with 'm', so...
return None
if s.startswith('\033[38;5;'):
# Extended fore.
name = codes_reverse['fore'].get(s, None)
if name is None:
num = get_code_num(s)
return ('extended fore', num)
else:
return ('extended fore', name)
elif s.startswith('\033[48;5;'):
# Extended back.
name = codes_reverse['back'].get(s, None)
if name is None:
num = get_code_num(s)
return ('extended back', num)
else:
return ('extended back', name)
elif s.startswith('\033[38;2'):
# RGB fore.
vals = get_code_num_rgb(s)
if vals is not None:
return ('rgb fore', vals)
elif s.startswith('\033[48;2'):
# RGB back.
vals = get_code_num_rgb(s)
if vals is not None:
return ('rgb back', vals)
elif s.startswith('\033['):
# Fore, back, style.
number = get_code_num(s)
# Get code type based on number.
if (number <= 7) or (number == 22):
codetype = 'style'
elif (((number >= 30) and (number < 40)) or
((number >= 90) and (number < 100))):
codetype = 'fore'
elif (((number >= 40) and (number < 50)) or
((number >= 100) and (number < 110))):
codetype = 'back'
else:
raise InvalidEscapeCode(
number,
'Expecting 0-7, 22, 30-39, or 40-49 for escape code',
)
name = codes_reverse[codetype].get(s, None)
if name is not None:
return (codetype, name)
# Not a known escape code.
return None
|
def get_known_name(s: str) -> Optional[Tuple[str, ColorArg]]
|
Reverse translate a terminal code to a known color name, if possible.
Returns a tuple of (codetype, knownname) on success.
Returns None on failure.
| 2.268541
| 2.179652
| 1.040781
|
return (x >= minimum and x <= maximum)
|
def in_range(x: int, minimum: int, maximum: int) -> bool
|
Return True if x is >= minimum and <= maximum.
| 9.826672
| 4.790022
| 2.051488
|
if not s:
return default
val = s.strip().lower()
try:
# Try as int.
intval = int(val)
except ValueError:
# Try as rgb.
try:
r, g, b = (int(x.strip()) for x in val.split(','))
except ValueError:
if ',' in val:
# User tried rgb value and failed.
raise InvalidColr(val)
# Try as name (fore/back have the same names)
code = codes['fore'].get(val, None)
if code:
# Valid basic code from fore, bask, or style.
return val
# Not a basic code, try known names.
named_data = name_data.get(val, None)
if named_data is not None:
# A known named color.
return val
# Not a basic/extended/known name, try as hex.
try:
if rgb_mode:
return hex2rgb(val, allow_short=True)
return hex2termhex(val, allow_short=True)
except ValueError:
raise InvalidColr(val)
else:
# Got rgb. Do some validation.
if not all((in_range(x, 0, 255) for x in (r, g, b))):
raise InvalidColr(val)
# Valid rgb.
return r, g, b
else:
# Int value.
if not in_range(intval, 0, 255):
# May have been a hex value confused as an int.
if len(val) in (3, 6):
try:
if rgb_mode:
return hex2rgb(val, allow_short=True)
return hex2termhex(val, allow_short=True)
except ValueError:
raise InvalidColr(val)
raise InvalidColr(intval)
# Valid int value.
return intval
|
def parse_colr_arg(
s: str,
default: Optional[Any] = None,
rgb_mode: Optional[bool] = False) -> ColorArg
|
Parse a user argument into a usable fore/back color value for Colr.
If a falsey value is passed, default is returned.
Raises InvalidColr if the argument is unusable.
Returns: A usable value for Colr(fore/back).
This validates basic/extended color names.
This validates the range for basic/extended values (0-255).
This validates the length/range for rgb values (0-255, 0-255, 0-255).
Arguments:
s : User's color value argument.
Example: "1", "255", "black", "25,25,25"
| 3.569916
| 3.377188
| 1.057068
|
try:
n = int(s)
except ValueError:
return default
if (minimum is not None) and (n < minimum):
return default
elif (maximum is not None) and (n > maximum):
return default
return n
|
def try_parse_int(
s: str,
default: Optional[Any] = None,
minimum: Optional[int] = None,
maximum: Optional[int] = None) -> Optional[Any]
|
Try parsing a string into an integer.
On failure, return `default`.
If the number is less then `minimum` or greater than `maximum`,
return `default`.
Returns an integer on success.
| 2.054133
| 2.132071
| 0.963445
|
if attr in codes['fore']:
# Fore method
return partial(self.chained, fore=attr)
elif attr in codes['style']:
# Style method
return partial(self.chained, style=attr)
elif attr.startswith('bg'):
# Back method
name = attr[2:].lstrip('_')
if name in codes['back']:
return partial(self.chained, back=name)
elif attr.startswith(('b256_', 'b_')):
# Back 256 method
# Remove the b256_ portion.
name = attr.partition('_')[2]
return self._ext_attr_to_partial(name, 'back')
elif attr.startswith(('f256_', 'f_')):
# Fore 256 method
name = attr.partition('_')[2]
return self._ext_attr_to_partial(name, 'fore')
return None
|
def _attr_to_method(self, attr)
|
Return the correct color function by method name.
Uses `partial` to build kwargs on the `chained` func.
On failure/unknown name, returns None.
| 3.29183
| 2.746336
| 1.198626
|
if _disabled:
# No colorization when disabled. Just use str.
return obj
clr = obj.__colr__()
if not isinstance(clr, cls):
# __colr__ should always return a Colr.
# Future development may assume a Colr was returned.
raise TypeError(
' '.join((
'__colr__ methods should return a {} instance.',
'Got: {}',
)).format(
cls.__name__,
type(clr).__name__,
)
)
return clr
|
def _call_dunder_colr(cls, obj)
|
Call __colr__ on an object, after some checks.
If color is disabled, the object itself is returned.
If __colr__ doesn't return a Colr instance, TypeError is raised.
On success, a Colr instance is returned from obj.__colr__().
| 6.219319
| 5.005934
| 1.242389
|
try:
intval = int(name)
except ValueError:
# Try as an extended name_data name.
info = name_data.get(name, None)
if info is None:
# Not an int value or name_data name.
return None
kws = {kwarg_key: info['code']}
return partial(self.chained, **kws)
# Integer str passed, use the int value.
kws = {kwarg_key: intval}
return partial(self.chained, **kws)
|
def _ext_attr_to_partial(self, name, kwarg_key)
|
Convert a string like '233' or 'aliceblue' into partial for
self.chained.
| 5.444453
| 4.670853
| 1.165623
|
if start < 232:
start = 232
elif start > 255:
start = 255
if reverse:
codes = list(range(start, 231, -1))
else:
codes = list(range(start, 256))
return ''.join((
self._iter_text_wave(
text,
codes,
step=step,
fore=fore,
back=back,
style=style,
rgb_mode=rgb_mode
)
))
|
def _gradient_black_line(
self, text, start, step=1,
fore=None, back=None, style=None, reverse=False, rgb_mode=False)
|
Yield colorized characters,
within the 24-length black gradient.
| 2.786563
| 2.798338
| 0.995792
|
if not movefactor:
def factor(i):
return start
else:
# Increase the start for each line.
def factor(i):
return start + (i * movefactor)
return '\n'.join((
self._gradient_black_line(
line,
start=factor(i),
step=step,
fore=fore,
back=back,
style=style,
reverse=reverse,
rgb_mode=rgb_mode,
)
for i, line in enumerate(text.splitlines())
))
|
def _gradient_black_lines(
self, text, start, step=1,
fore=None, back=None, style=None, reverse=False,
movefactor=2, rgb_mode=False)
|
Yield colorized characters,
within the 24-length black gradient,
treating each line separately.
| 2.491673
| 2.431903
| 1.024577
|
return self._gradient_rgb_line_from_morph(
text,
list(self._morph_rgb(start, stop, step=step)),
fore=fore,
back=back,
style=style
)
|
def _gradient_rgb_line(
self, text, start, stop, step=1,
fore=None, back=None, style=None)
|
Yield colorized characters, morphing from one rgb value to
another.
| 3.80678
| 3.413946
| 1.115067
|
try:
listlen = len(morphlist)
except TypeError:
morphlist = list(morphlist)
listlen = len(morphlist)
neededsteps = listlen // len(text)
iterstep = 1
if neededsteps > iterstep:
# Skip some members of morphlist, to be sure to reach the end.
iterstep = neededsteps
usevals = morphlist
if iterstep > 1:
# Rebuild the morphlist, skipping some.
usevals = [usevals[i] for i in range(0, listlen, iterstep)]
return ''.join((
self._iter_text_wave(
text,
usevals,
fore=fore,
back=back,
style=style,
rgb_mode=False,
)
))
|
def _gradient_rgb_line_from_morph(
self, text, morphlist, fore=None, back=None, style=None)
|
Yield colorized characters, morphing from one rgb value to
another.
| 4.900284
| 4.749986
| 1.031642
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.