_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q14000
|
ParsedUrl.target
|
train
|
def target(self):
"""The "target" i.e. local part of the URL, consisting of the path and query."""
target = self.path or '/'
if self.query:
target = '{}?{}'.format(target, self.query)
return target
|
python
|
{
"resource": ""
}
|
q14001
|
HttpRequest.start_request
|
train
|
def start_request(self, method, url, headers=None, bodylen=None):
"""Start a new HTTP request.
The optional *headers* argument contains the headers to send. It must
be a sequence of ``(name, value)`` tuples.
The optional *bodylen* parameter is a hint that specifies the length of
the body that will follow. A length of -1 indicates no body, 0 means an
empty body, and a positive number indicates the body size in bytes.
This parameter helps determine whether to use the chunked transfer
encoding. Normally when the body size is known chunked encoding is not used.
"""
self._headers = headers or []
agent = host = clen = trailer = None
# Check the headers provided, and capture some information about the
# request from them.
for name, value in self._headers:
lname = name.lower()
# Only HTTP applications are allowed to set "hop-by-hop" headers.
if lname in hop_by_hop:
raise ValueError('header {} is hop-by-hop'.format(name))
elif lname == 'user-agent':
agent = value
elif lname == 'host':
host = value
elif lname == 'content-length':
clen = int(value)
elif lname == 'trailer':
trailer = parse_trailer(value)
elif lname == 'content-type' and value.startswith('text/'):
ctype, params = parse_content_type(value)
self._charset = params.get('charset')
version = self._protocol._version
# The Host header is mandatory in 1.1. Add it if it's missing.
if host is None and version == '1.1' and self._protocol._server_name:
self._headers.append(('Host', self._protocol._server_name))
# Identify ourselves.
if agent is None:
self._headers.append(('User-Agent', self._protocol.identifier))
# Check if we need to use chunked encoding due to unknown body size.
if clen is None and bodylen is None:
if version == '1.0':
raise HttpError('body size unknown for HTTP/1.0')
self._chunked = True
self._content_length = clen
# Check if trailers are requested and if so need to switch to chunked.
if trailer:
if version == '1.0':
raise HttpError('cannot support trailers for HTTP/1.0')
if clen is not None:
remove_headers(self._headers, 'Content-Length')
self._chunked = True
self._trailer = trailer
# Add Content-Length if we know the body size and are not using chunked.
if not self._chunked and clen is None and bodylen >= 0:
self._headers.append(('Content-Length', str(bodylen)))
self._content_length = bodylen
# Complete the "Hop by hop" headers.
if version == '1.0':
self._headers.append(('Connection', 'keep-alive'))
elif version == '1.1':
self._headers.append(('Connection', 'te'))
self._headers.append(('TE', 'trailers'))
if self._chunked:
self._headers.append(('Transfer-Encoding', 'chunked'))
# Start the request
self._protocol._requests.append(method)
header = create_request(version, method, url, self._headers)
self._protocol.writer.write(header)
|
python
|
{
"resource": ""
}
|
q14002
|
HttpRequest.end_request
|
train
|
def end_request(self):
"""End the request body."""
if not self._chunked:
return
trailers = [(n, get_header(self._headers, n)) for n in self._trailer] \
if self._trailer else None
ending = create_chunked_body_end(trailers)
self._protocol.writer.write(ending)
|
python
|
{
"resource": ""
}
|
q14003
|
HttpProtocol.request
|
train
|
def request(self, method, url, headers=None, body=None):
"""Make a new HTTP request.
The *method* argument is the HTTP method as a string, for example
``'GET'`` or ``'POST'``. The *url* argument specifies the URL.
The optional *headers* argument specifies extra HTTP headers to use in
the request. It must be a sequence of ``(name, value)`` tuples.
The optional *body* argument may be used to include a body in the
request. It must be a ``bytes`` instance, a file-like object opened in
binary mode, or an iterable producing ``bytes`` instances. To send
potentially large bodies, use the file or iterator interfaces. This has
the benefit that only a single chunk is kept in memory at a time.
The response to the request can be obtained by calling the
:meth:`getresponse` method. You may make multiple requests before
reading a response. For every request that you make however, you must
call :meth:`getresponse` exactly once. The remote HTTP implementation
will send by the responses in the same order as the requests.
This method will use the "chunked" transfer encoding if here is a body
and the body size is unknown ahead of time. This happens when the file
or interator interface is used in the abence of a "Content-Length"
header.
"""
if self._error:
raise compat.saved_exc(self._error)
elif self._transport is None:
raise HttpError('not connected')
request = HttpRequest(self)
bodylen = -1 if body is None else \
len(body) if isinstance(body, bytes) else None
request.start_request(method, url, headers, bodylen)
if isinstance(body, bytes):
request.write(body)
elif hasattr(body, 'read'):
while True:
chunk = body.read(4096)
if not chunk:
break
request.write(chunk)
elif hasattr(body, '__iter__'):
for chunk in body:
request.write(chunk)
request.end_request()
|
python
|
{
"resource": ""
}
|
q14004
|
HttpProtocol.getresponse
|
train
|
def getresponse(self):
"""Wait for and return a HTTP response.
The return value will be a :class:`HttpMessage`. When this method
returns only the response header has been read. The response body can
be read using :meth:`~gruvi.Stream.read` and similar methods on
the message :attr:`~HttpMessage.body`.
Note that if you use persistent connections (the default), it is
required that you read the entire body of each response. If you don't
then deadlocks may occur.
"""
if self._error:
raise compat.saved_exc(self._error)
elif self._transport is None:
raise HttpError('not connected')
message = self._queue.get(timeout=self._timeout)
if isinstance(message, Exception):
raise compat.saved_exc(message)
return message
|
python
|
{
"resource": ""
}
|
q14005
|
unique_hash
|
train
|
def unique_hash(filepath: str, blocksize: int=80)->str:
""" Small function to generate a hash to uniquely generate a file.
Default blocksize is `500`
"""
s = sha1()
with open(filepath, "rb") as f:
buf = f.read(blocksize)
s.update(buf)
return s.hexdigest()
|
python
|
{
"resource": ""
}
|
q14006
|
path_to_songname
|
train
|
def path_to_songname(path: str)->str:
"""
Extracts song name from a filepath. Used to identify which songs
have already been fingerprinted on disk.
"""
return os.path.splitext(os.path.basename(path))[0]
|
python
|
{
"resource": ""
}
|
q14007
|
create_server
|
train
|
def create_server(protocol_factory, address=None, ssl=False, family=0, flags=0,
ipc=False, backlog=128):
"""
Create a new network server.
This creates one or more :class:`pyuv.Handle` instances bound to *address*,
puts them in listen mode and starts accepting new connections. For each
accepted connection, a new transport is created which is connected to a new
protocol instance obtained by calling *protocol_factory*.
The *address* argument may be either be a string, a ``(host, port)`` tuple,
or a ``pyuv.Stream`` handle:
* If the address is a string, this method creates a new :class:`pyuv.Pipe`
instance and binds it to *address*.
* If the address is a tuple, this method creates one or more
:class:`pyuv.TCP` handles. The first element of the tuple specifies the
IP address or DNS name, and the second element specifies the port number
or service name. A transport is created for each resolved address.
* If the address is a ``pyuv.Stream`` handle, it must already be bound to
an address.
The *ssl* parameter indicates whether SSL should be used for accepted
connections. See :func:`create_connection` for a description.
The *family* and *flags* keyword arguments are used to customize address
resolution for TCP handles as described in :func:`socket.getaddrinfo`.
The *ipc* parameter indicates whether this server will accept new
connections via file descriptor passing. This works for `pyuv.Pipe` handles
only, and the user is required to call :meth:`Server.accept_connection`
whenever a new connection is pending.
The *backlog* parameter specifies the listen backlog i.e the maximum number
of not yet accepted active opens to queue. To disable listening for new
connections (useful when *ipc* was set), set the backlog to ``None``.
The return value is a :class:`Server` instance.
"""
server = Server(protocol_factory)
server.listen(address, ssl=ssl, family=family, flags=flags, backlog=backlog)
return server
|
python
|
{
"resource": ""
}
|
q14008
|
Server.close
|
train
|
def close(self):
"""Close the listening sockets and all accepted connections."""
for handle in self._handles:
if not handle.closed:
handle.close()
del self._handles[:]
for transport, _ in self.connections:
transport.close()
self._all_closed.wait()
|
python
|
{
"resource": ""
}
|
q14009
|
dict_pop_or
|
train
|
def dict_pop_or(d, key, default=None):
""" Try popping a key from a dict.
Instead of raising KeyError, just return the default value.
"""
val = default
with suppress(KeyError):
val = d.pop(key)
return val
|
python
|
{
"resource": ""
}
|
q14010
|
get_colr
|
train
|
def get_colr(txt, argd):
""" Return a Colr instance based on user args. """
fore = parse_colr_arg(
get_name_arg(argd, '--fore', 'FORE', default=None),
rgb_mode=argd['--truecolor'],
)
back = parse_colr_arg(
get_name_arg(argd, '--back', 'BACK', default=None),
rgb_mode=argd['--truecolor'],
)
style = get_name_arg(argd, '--style', 'STYLE', default=None)
if argd['--gradient']:
# Build a gradient from user args.
return C(txt).gradient(
name=argd['--gradient'],
spread=try_int(argd['--spread'], 1, minimum=0),
fore=fore,
back=back,
style=style,
rgb_mode=argd['--truecolor'],
)
if argd['--gradientrgb']:
# Build an rgb gradient from user args.
rgb_start, rgb_stop = parse_gradient_rgb_args(argd['--gradientrgb'])
return C(txt).gradient_rgb(
fore=fore,
back=back,
style=style,
start=rgb_start,
stop=rgb_stop,
)
if argd['--rainbow']:
return C(txt).rainbow(
fore=fore,
back=back,
style=style,
freq=try_float(argd['--frequency'], 0.1, minimum=0),
offset=try_int(argd['--offset'], randint(0, 255), minimum=0),
spread=try_float(argd['--spread'], 3.0, minimum=0),
rgb_mode=argd['--truecolor'],
)
# Normal colored output.
return C(txt, fore=fore, back=back, style=style)
|
python
|
{
"resource": ""
}
|
q14011
|
get_name_arg
|
train
|
def get_name_arg(argd, *argnames, default=None):
""" Return the first argument value given in a docopt arg dict.
When not given, return default.
"""
val = None
for argname in argnames:
if argd[argname]:
val = argd[argname].lower().strip()
break
return val if val else default
|
python
|
{
"resource": ""
}
|
q14012
|
list_known_codes
|
train
|
def list_known_codes(s, unique=True, rgb_mode=False):
""" Find and print all known escape codes in a string,
using get_known_codes.
"""
total = 0
for codedesc in get_known_codes(s, unique=unique, rgb_mode=rgb_mode):
total += 1
print(codedesc)
plural = 'code' if total == 1 else 'codes'
codetype = ' unique' if unique else ''
print('\nFound {}{} escape {}.'.format(total, codetype, plural))
return 0 if total > 0 else 1
|
python
|
{
"resource": ""
}
|
q14013
|
list_names
|
train
|
def list_names():
""" List all known color names. """
names = get_all_names()
# This is 375 right now. Probably won't ever change, but I'm not sure.
nameslen = len(names)
print('\nListing {} names:\n'.format(nameslen))
# Using 3 columns of names, still alphabetically sorted from the top down.
# Longest name so far: lightgoldenrodyellow (20 chars)
namewidth = 20
# namewidth * columns == 60, colorwidth * columns == 18, final == 78.
swatch = ' ' * 9
third = nameslen // 3
lastthird = third * 2
cols = (
names[0: third],
names[third: lastthird],
names[lastthird:],
)
# Exactly enough spaces to fill in a blank item (+2 for ': ').
# This may not ever be used, unless another 'known name' is added.
blankitem = ' ' * (namewidth + len(swatch) + 2)
for i in range(third):
nameset = []
for colset in cols:
try:
nameset.append(colset[i])
except IndexError:
nameset.append(None)
continue
line = C('').join(
C(': ').join(
C(name.rjust(namewidth)),
C(swatch, back=name),
) if name else blankitem
for name in nameset
)
print(line)
return 0
|
python
|
{
"resource": ""
}
|
q14014
|
read_stdin
|
train
|
def read_stdin():
""" Read text from stdin, and print a helpful message for ttys. """
if sys.stdin.isatty() and sys.stdout.isatty():
print('\nReading from stdin until end of file (Ctrl + D)...')
return sys.stdin.read()
|
python
|
{
"resource": ""
}
|
q14015
|
translate
|
train
|
def translate(usercodes, rgb_mode=False):
""" Translate one or more hex, term, or rgb value into the others.
Yields strings with the results for each code translated.
"""
for code in usercodes:
code = code.strip().lower()
if code.isalpha() and (code in codes['fore']):
# Basic color name.
yield translate_basic(code)
else:
if ',' in code:
try:
r, g, b = (int(c.strip()) for c in code.split(','))
except (TypeError, ValueError):
raise InvalidColr(code)
code = (r, g, b)
colorcode = ColorCode(code, rgb_mode=rgb_mode)
if disabled():
yield str(colorcode)
yield colorcode.example()
|
python
|
{
"resource": ""
}
|
q14016
|
translate_basic
|
train
|
def translate_basic(usercode):
""" Translate a basic color name to color with explanation. """
codenum = get_code_num(codes['fore'][usercode])
colorcode = codeformat(codenum)
msg = 'Name: {:>10}, Number: {:>3}, EscapeCode: {!r}'.format(
usercode,
codenum,
colorcode
)
if disabled():
return msg
return str(C(msg, fore=usercode))
|
python
|
{
"resource": ""
}
|
q14017
|
try_float
|
train
|
def try_float(s, default=None, minimum=None):
""" Try parsing a string into a float.
If None is passed, default is returned.
On failure, InvalidFloat is raised.
"""
if not s:
return default
try:
val = float(s)
except (TypeError, ValueError):
raise InvalidNumber(s, label='Invalid float value')
if (minimum is not None) and (val < minimum):
val = minimum
return val
|
python
|
{
"resource": ""
}
|
q14018
|
try_int
|
train
|
def try_int(s, default=None, minimum=None):
""" Try parsing a string into an integer.
If None is passed, default is returned.
On failure, InvalidNumber is raised.
"""
if not s:
return default
try:
val = int(s)
except (TypeError, ValueError):
raise InvalidNumber(s)
if (minimum is not None) and (val < minimum):
val = minimum
return val
|
python
|
{
"resource": ""
}
|
q14019
|
ContextLogger.thread_info
|
train
|
def thread_info(self):
"""Return a string identifying the current thread and fiber."""
tid = threading.current_thread().name
if tid == 'MainThread':
tid = 'Main'
current = fibers.current()
fid = getattr(current, 'name') if current.parent else 'Root'
return '{}/{}'.format(tid, fid)
|
python
|
{
"resource": ""
}
|
q14020
|
ContextLogger.frame_info
|
train
|
def frame_info(self):
"""Return a string identifying the current frame."""
if not self._logger.isEnabledFor(logging.DEBUG):
return ''
f = sys._getframe(3)
fname = os.path.split(f.f_code.co_filename)[1]
return '{}:{}'.format(fname, f.f_lineno)
|
python
|
{
"resource": ""
}
|
q14021
|
StreamBuffer.set_buffer_limits
|
train
|
def set_buffer_limits(self, high=None, low=None):
"""Set the low and high watermarks for the read buffer."""
if high is None:
high = self.default_buffer_size
if low is None:
low = high // 2
self._buffer_high = high
self._buffer_low = low
|
python
|
{
"resource": ""
}
|
q14022
|
Stream.readline
|
train
|
def readline(self, limit=-1, delim=b'\n'):
"""Read a single line.
If EOF is reached before a full line can be read, a partial line is
returned. If *limit* is specified, at most this many bytes will be read.
"""
self._check_readable()
chunks = []
while True:
chunk = self._buffer.get_chunk(limit, delim)
if not chunk:
break
chunks.append(chunk)
if chunk.endswith(delim):
break
if limit >= 0:
limit -= len(chunk)
if limit == 0:
break
if not chunks and not self._buffer.eof and self._buffer.error:
raise compat.saved_exc(self._buffer.error)
return b''.join(chunks)
|
python
|
{
"resource": ""
}
|
q14023
|
Stream.readlines
|
train
|
def readlines(self, hint=-1):
"""Read lines until EOF, and return them as a list.
If *hint* is specified, then stop reading lines as soon as the total
size of all lines exceeds *hint*.
"""
self._check_readable()
lines = []
chunks = []
bytes_read = 0
while True:
chunk = self._buffer.get_chunk(-1, b'\n')
if not chunk:
break
chunks.append(chunk)
if chunk.endswith(b'\n'):
lines.append(b''.join(chunks))
del chunks[:]
bytes_read += len(lines[-1])
if hint >= 0 and bytes_read > hint:
break
if chunks:
lines.append(b''.join(chunks))
if not lines and not self._buffer.eof and self._buffer.error:
raise compat.saved_exc(self._buffer.error)
return lines
|
python
|
{
"resource": ""
}
|
q14024
|
Stream.write_eof
|
train
|
def write_eof(self):
"""Close the write direction of the transport.
This method will block if the transport's write buffer is at capacity.
"""
self._check_writable()
self._transport._can_write.wait()
self._transport.write_eof()
|
python
|
{
"resource": ""
}
|
q14025
|
blocking
|
train
|
def blocking(func, *args, **kwargs):
"""Run a function that uses blocking IO.
The function is run in the IO thread pool.
"""
pool = get_io_pool()
fut = pool.submit(func, *args, **kwargs)
return fut.result()
|
python
|
{
"resource": ""
}
|
q14026
|
as_completed
|
train
|
def as_completed(objects, count=None, timeout=None):
"""Wait for one or more waitable objects, yielding them as they become
ready.
This is the iterator/generator version of :func:`wait`.
"""
for obj in objects:
if not hasattr(obj, 'add_done_callback'):
raise TypeError('Expecting sequence of waitable objects')
if count is None:
count = len(objects)
if count < 0 or count > len(objects):
raise ValueError('count must be between 0 and len(objects)')
if count == 0:
return
pending = list(objects)
for obj in _wait(pending, timeout):
yield obj
count -= 1
if count == 0:
break
|
python
|
{
"resource": ""
}
|
q14027
|
wait
|
train
|
def wait(objects, count=None, timeout=None):
"""Wait for one or more waitable objects.
This method waits until *count* elements from the sequence of waitable
objects *objects* have become ready. If *count* is ``None`` (the default),
then wait for all objects to become ready.
What "ready" is means depends on the object type. A waitable object is a
objects that implements the ``add_done_callback()`` and
``remove_done_callback`` methods. This currently includes:
* :class:`~gruvi.Event` - an event is ready when its internal flag is set.
* :class:`~gruvi.Future` - a future is ready when its result is set.
* :class:`~gruvi.Fiber` - a fiber is ready when has terminated.
* :class:`~gruvi.Process` - a process is ready when the child has exited.
"""
for obj in objects:
if not hasattr(obj, 'add_done_callback'):
raise TypeError('Expecting sequence of waitable objects')
if count is None:
count = len(objects)
if count < 0 or count > len(objects):
raise ValueError('count must be between 0 and len(objects)')
if count == 0:
return [], objects
pending = list(objects)
done = []
try:
for obj in _wait(pending, timeout):
done.append(obj)
if len(done) == count:
break
except Timeout:
pass
return done, list(filter(bool, pending))
|
python
|
{
"resource": ""
}
|
q14028
|
Future.cancelled
|
train
|
def cancelled(self):
"""Return whether this future was successfully cancelled."""
return self._state == self.S_EXCEPTION and isinstance(self._result, Cancelled)
|
python
|
{
"resource": ""
}
|
q14029
|
Future.cancel
|
train
|
def cancel(self):
"""Cancel the execution of the async function, if possible.
This method marks the future as done and sets the :class:`Cancelled`
exception.
A future that is not running can always be cancelled. However when a
future is running, the ability to cancel it depends on the pool
implementation. For example, a fiber pool can cancel running fibers but
a thread pool cannot.
Return ``True`` if the future could be cancelled, ``False`` otherwise.
"""
# We leverage/abuse our _done Event's thread lock as our own lock.
# Since it's a private copy it should be OK, and it saves some memory.
# Just be sure that we don't modify the event with the lock held.
with self._lock:
if self._state not in (self.S_PENDING, self.S_RUNNING):
return False
self._result = Cancelled('cancelled by Future.cancel()')
self._state = self.S_EXCEPTION
self._done.set()
return True
|
python
|
{
"resource": ""
}
|
q14030
|
Future.result
|
train
|
def result(self, timeout=None):
"""Wait for the future to complete and return its result.
If the function returned normally, its return value is returned here.
If the function raised an exception, the exception is re-raised here.
"""
if not self._done.wait(timeout):
raise Timeout('timeout waiting for future')
# No more state changes after _done is set so no lock needed.
if self._state == self.S_EXCEPTION:
raise compat.saved_exc(self._result)
return self._result
|
python
|
{
"resource": ""
}
|
q14031
|
Future.exception
|
train
|
def exception(self, timeout=None):
"""Wait for the async function to complete and return its exception.
If the function did not raise an exception this returns ``None``.
"""
if not self._done.wait(timeout):
raise Timeout('timeout waiting for future')
if self._state == self.S_EXCEPTION:
return self._result
|
python
|
{
"resource": ""
}
|
q14032
|
Future.add_done_callback
|
train
|
def add_done_callback(self, callback, *args):
"""Add a callback that gets called when the future completes.
The callback will be called in the context of the fiber that sets the
future's result. The callback is called with the positional arguments
*args* provided to this method.
The return value is an opaque handle that can be used with
:meth:`~gruvi.Future.remove_done_callback` to remove the callback.
If the future has already completed, then the callback is called
immediately from this method and the return value will be ``None``.
"""
with self._lock:
if self._state not in (self.S_DONE, self.S_EXCEPTION):
return add_callback(self, callback, args)
callback(*args)
|
python
|
{
"resource": ""
}
|
q14033
|
PoolBase.close
|
train
|
def close(self):
"""Close the pool and wait for all workers to exit.
New submissions will be blocked. Workers will exit once their current
job is finished. This method will return after all workers have exited.
"""
with self._lock:
if self._closing:
return
self._closing = True
if not self._workers:
self._closed.set()
return
self._queue.put_nowait(self._PoolClosing)
self._closed.wait()
|
python
|
{
"resource": ""
}
|
q14034
|
SslPipe.do_handshake
|
train
|
def do_handshake(self, callback=None):
"""Start the SSL handshake. Return a list of ssldata.
The optional *callback* argument can be used to install a callback that
will be called when the handshake is complete. The callback will be
called without arguments.
"""
if self._state != self.S_UNWRAPPED:
raise RuntimeError('handshake in progress or completed')
self._sslobj = sslcompat.wrap_bio(self._context, self._incoming, self._outgoing,
self._server_side, self._server_hostname)
self._state = self.S_DO_HANDSHAKE
self._handshake_cb = callback
ssldata, appdata = self.feed_ssldata(b'')
assert len(appdata) == 0
return ssldata
|
python
|
{
"resource": ""
}
|
q14035
|
SslPipe.shutdown
|
train
|
def shutdown(self, callback=None):
"""Start the SSL shutdown sequence. Return a list of ssldata.
The optional *callback* argument can be used to install a callback that
will be called when the shutdown is complete. The callback will be
called without arguments.
"""
if self._state == self.S_UNWRAPPED:
raise RuntimeError('no security layer present')
self._state = self.S_SHUTDOWN
self._shutdown_cb = callback
ssldata, appdata = self.feed_ssldata(b'')
assert appdata == [] or appdata == [b'']
return ssldata
|
python
|
{
"resource": ""
}
|
q14036
|
SslPipe.feed_eof
|
train
|
def feed_eof(self):
"""Send a potentially "ragged" EOF.
This method will raise an SSL_ERROR_EOF exception if the EOF is
unexpected.
"""
self._incoming.write_eof()
ssldata, appdata = self.feed_ssldata(b'')
assert appdata == [] or appdata == [b'']
|
python
|
{
"resource": ""
}
|
q14037
|
SslPipe.feed_ssldata
|
train
|
def feed_ssldata(self, data):
"""Feed SSL record level data into the pipe.
The data must be a bytes instance. It is OK to send an empty bytes
instance. This can be used to get ssldata for a handshake initiated by
this endpoint.
Return a (ssldata, appdata) tuple. The ssldata element is a list of
buffers containing SSL data that needs to be sent to the remote SSL.
The appdata element is a list of buffers containing plaintext data that
needs to be forwarded to the application. The appdata list may contain
an empty buffer indicating an SSL "close_notify" alert. This alert must
be acknowledged by calling :meth:`shutdown`.
"""
if self._state == self.S_UNWRAPPED:
# If unwrapped, pass plaintext data straight through.
return ([], [data] if data else [])
ssldata = []; appdata = []
self._need_ssldata = False
if data:
self._incoming.write(data)
try:
if self._state == self.S_DO_HANDSHAKE:
# Call do_handshake() until it doesn't raise anymore.
self._sslobj.do_handshake()
self._state = self.S_WRAPPED
if self._handshake_cb:
self._handshake_cb()
if self._state == self.S_WRAPPED:
# Main state: read data from SSL until close_notify
while True:
chunk = self._sslobj.read(self.bufsize)
appdata.append(chunk)
if not chunk: # close_notify
break
if self._state == self.S_SHUTDOWN:
# Call shutdown() until it doesn't raise anymore.
self._sslobj.unwrap()
self._sslobj = None
self._state = self.S_UNWRAPPED
if self._shutdown_cb:
self._shutdown_cb()
if self._state == self.S_UNWRAPPED:
# Drain possible plaintext data after close_notify.
appdata.append(self._incoming.read())
except (ssl.SSLError, sslcompat.CertificateError) as e:
if getattr(e, 'errno', None) not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE, ssl.SSL_ERROR_SYSCALL):
if self._state == self.S_DO_HANDSHAKE and self._handshake_cb:
self._handshake_cb(e)
raise
self._need_ssldata = e.errno == ssl.SSL_ERROR_WANT_READ
# Check for record level data that needs to be sent back.
# Happens for the initial handshake and renegotiations.
if self._outgoing.pending:
ssldata.append(self._outgoing.read())
return (ssldata, appdata)
|
python
|
{
"resource": ""
}
|
q14038
|
SslPipe.feed_appdata
|
train
|
def feed_appdata(self, data, offset=0):
"""Feed plaintext data into the pipe.
Return an (ssldata, offset) tuple. The ssldata element is a list of
buffers containing record level data that needs to be sent to the
remote SSL instance. The offset is the number of plaintext bytes that
were processed, which may be less than the length of data.
NOTE: In case of short writes, this call MUST be retried with the SAME
buffer passed into the *data* argument (i.e. the ``id()`` must be the
same). This is an OpenSSL requirement. A further particularity is that
a short write will always have offset == 0, because the _ssl module
does not enable partial writes. And even though the offset is zero,
there will still be encrypted data in ssldata.
"""
if self._state == self.S_UNWRAPPED:
# pass through data in unwrapped mode
return ([data[offset:]] if offset < len(data) else [], len(data))
ssldata = []
view = memoryview(data)
while True:
self._need_ssldata = False
try:
if offset < len(view):
offset += self._sslobj.write(view[offset:])
except ssl.SSLError as e:
# It is not allowed to call write() after unwrap() until the
# close_notify is acknowledged. We return the condition to the
# caller as a short write.
if sslcompat.get_reason(e) == 'PROTOCOL_IS_SHUTDOWN':
e.errno = ssl.SSL_ERROR_WANT_READ
if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE,
ssl.SSL_ERROR_SYSCALL):
raise
self._need_ssldata = e.errno == ssl.SSL_ERROR_WANT_READ
# See if there's any record level data back for us.
if self._outgoing.pending:
ssldata.append(self._outgoing.read())
if offset == len(view) or self._need_ssldata:
break
return (ssldata, offset)
|
python
|
{
"resource": ""
}
|
q14039
|
SslTransport.get_extra_info
|
train
|
def get_extra_info(self, name, default=None):
"""Return transport specific data.
The following fields are available, in addition to the information
exposed by :meth:`Transport.get_extra_info`.
====================== ===============================================
Name Description
====================== ===============================================
``'ssl'`` The internal ``ssl.SSLObject`` instance used by
this transport.
``'sslctx'`` The ``ssl.SSLContext`` instance used to create
the SSL object.
====================== ===============================================
"""
if name == 'ssl':
return self._sslpipe.ssl_object
elif name == 'sslctx':
return self._sslpipe.context
else:
return super(SslTransport, self).get_extra_info(name, default)
|
python
|
{
"resource": ""
}
|
q14040
|
SslTransport.do_handshake
|
train
|
def do_handshake(self):
"""Start the SSL handshake.
This method only needs to be called if this transport was created with
*do_handshake_on_connect* set to False (the default is True).
The handshake needs to be synchronized between the both endpoints, so
that SSL record level data is not incidentially interpreted as
plaintext. Usually this is done by starting the handshake directly
after a connection is established, but you can also use an application
level protocol.
"""
if self._error:
raise compat.saved_exc(self._error)
elif self._closing or self._handle.closed:
raise TransportError('SSL transport is closing/closed')
self._write_backlog.append([b'', True])
self._process_write_backlog()
|
python
|
{
"resource": ""
}
|
q14041
|
SslTransport.unwrap
|
train
|
def unwrap(self):
"""Remove the security layer.
Use this method only if you want to send plaintext data on the
connection after the security layer has been removed. In all other
cases, use :meth:`close`.
If the unwrap is initiated by us, then any data sent after it will be
buffered until the corresponding close_notify response is received from
our peer.
If the unwrap is initiated by the remote peer, then this method will
acknowledge it. You need an application level protocol to determine
when to do this because the receipt of a close_notify is not
communicated to the application.
"""
if self._error:
raise compat.saved_exc(self._error)
elif self._closing or self._handle.closed:
raise TransportError('SSL transport is closing/closed')
self._close_on_unwrap = False
self._write_backlog.append([b'', False])
self._process_write_backlog()
|
python
|
{
"resource": ""
}
|
q14042
|
SslTransport.close
|
train
|
def close(self):
"""Cleanly shut down the SSL protocol and close the transport."""
if self._closing or self._handle.closed:
return
self._closing = True
self._write_backlog.append([b'', False])
self._process_write_backlog()
|
python
|
{
"resource": ""
}
|
q14043
|
is_locked
|
train
|
def is_locked(lock):
"""Return whether a lock is locked.
Suppors :class:`Lock`, :class:`RLock`, :class:`threading.Lock` and
:class:`threading.RLock` instances.
"""
if hasattr(lock, 'locked'):
return lock.locked()
elif hasattr(lock, '_is_owned'):
return lock._is_owned()
else:
raise TypeError('expecting Lock/RLock')
|
python
|
{
"resource": ""
}
|
q14044
|
acquire_restore
|
train
|
def acquire_restore(lock, state):
"""Acquire a lock and restore its state."""
if hasattr(lock, '_acquire_restore'):
lock._acquire_restore(state)
elif hasattr(lock, 'acquire'):
lock.acquire()
else:
raise TypeError('expecting Lock/RLock')
|
python
|
{
"resource": ""
}
|
q14045
|
release_save
|
train
|
def release_save(lock):
"""Release a lock and return its state."""
if hasattr(lock, '_release_save'):
return lock._release_save()
elif hasattr(lock, 'release'):
lock.release()
else:
raise TypeError('expecting Lock/RLock')
|
python
|
{
"resource": ""
}
|
q14046
|
Condition.notify
|
train
|
def notify(self, n=1):
"""Raise the condition and wake up fibers waiting on it.
The optional *n* parameter specifies how many fibers will be notified.
By default, one fiber is notified.
"""
if not is_locked(self._lock):
raise RuntimeError('lock is not locked')
notified = [0] # Work around lack of "nonlocal" in py27
def walker(switcher, predicate):
if not switcher.active:
return False # not not keep switcher that timed out
if predicate and not predicate():
return True
if n >= 0 and notified[0] >= n:
return True
switcher.switch()
notified[0] += 1
return False # only notify once
walk_callbacks(self, walker)
|
python
|
{
"resource": ""
}
|
q14047
|
Queue.get
|
train
|
def get(self, block=True, timeout=None):
"""Pop an item from the queue.
If the queue is not empty, an item is returned immediately. Otherwise,
if *block* is True (the default), wait up to *timeout* seconds for an
item to become available. If not timeout is provided, then wait
indefinitely.
If the queue is empty and *block* is false or a timeout occurs, then
raise a :class:`QueueEmpty` exception.
"""
with self._lock:
while not self._heap:
if not block:
raise QueueEmpty
if not self._notempty.wait(timeout):
raise QueueEmpty
prio, size, item = heapq.heappop(self._heap)
self._size -= size
if 0 <= self._size < self.maxsize:
self._notfull.notify()
return item
|
python
|
{
"resource": ""
}
|
q14048
|
Queue.task_done
|
train
|
def task_done(self):
"""Mark a task as done."""
with self._lock:
unfinished = self._unfinished_tasks - 1
if unfinished < 0:
raise RuntimeError('task_done() called too many times')
elif unfinished == 0:
self._alldone.notify()
self._unfinished_tasks = unfinished
|
python
|
{
"resource": ""
}
|
q14049
|
Process.spawn
|
train
|
def spawn(self, args, executable=None, stdin=None, stdout=None, stderr=None,
shell=False, cwd=None, env=None, flags=0, extra_handles=None):
"""Spawn a new child process.
The executable to spawn and its arguments are determined by *args*,
*executable* and *shell*.
When *shell* is set to ``False`` (the default), *args* is normally a
sequence and it contains both the program to execute (at index 0), and
its arguments.
When *shell* is set to ``True``, then *args* is normally a string and
it indicates the command to execute through the shell.
The *executable* argument can be used to override the executable to
execute. If *shell* is ``False``, it overrides ``args[0]``. This is
sometimes used on Unix to implement "fat" executables that behave
differently based on argv[0]. If *shell* is ``True``, it overrides the
shell to use. The default shell is ``'/bin/sh'`` on Unix, and the value
of $COMSPEC (or ``'cmd.exe'`` if it is unset) on Windows.
The *stdin*, *stdout* and *stderr* arguments specify how to handle
standard input, output, and error, respectively. If set to None, then
the child will inherit our respective stdio handle. If set to the
special constant ``PIPE`` then a pipe is created. The pipe will be
connected to a :class:`gruvi.StreamProtocol` which you can use to read
or write from it. The stream protocol instance is available under
either :attr:`stdin`, :attr:`stdout` or :attr:`stderr`. All 3 stdio
arguments can also be a file descriptor, a file-like object, or a pyuv
``Stream`` instance.
The *extra_handles* specifies any extra handles to pass to the client.
It must be a sequence where each element is either a file descriptor, a
file-like objects, or a ``pyuv.Stream`` instance. The position in the
sequence determines the file descriptor in the client. The first
position corresponds to FD 3, the second to 4, etc. This places these
file descriptors directly after the stdio handles.
The *cwd* argument specifies the directory to change to before
executing the child. If not provided, the current directory is used.
The *env* argument specifies the environment to use when executing the
child. If provided, it must be a dictionary. By default, the current
environment is used.
The *flags* argument can be used to specify optional libuv
``uv_process_flags``. The only relevant flags are
``pyuv.UV_PROCESS_DETACHED`` and ``pyuv.UV_PROCESS_WINDOWS_HIDE``. Both
are Windows specific and are silently ignored on Unix.
"""
if self._process:
raise RuntimeError('child process already spawned')
self._child_exited.clear()
self._closed.clear()
self._exit_status = None
self._term_signal = None
hub = get_hub()
if isinstance(args, str):
args = [args]
flags |= pyuv.UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS
else:
args = list(args)
if shell:
if hasattr(os, 'fork'):
# Unix
if executable is None:
executable = '/bin/sh'
args = [executable, '-c'] + args
else:
# Windows
if executable is None:
executable = os.environ.get('COMSPEC', 'cmd.exe')
args = [executable, '/c'] + args
if executable is None:
executable = args[0]
kwargs = {}
if env is not None:
kwargs['env'] = env
if cwd is not None:
kwargs['cwd'] = cwd
kwargs['flags'] = flags
handles = self._get_child_handles(hub.loop, stdin, stdout, stderr, extra_handles)
kwargs['stdio'] = handles
process = pyuv.Process.spawn(hub.loop, args, executable,
exit_callback=self._on_child_exit, **kwargs)
# Create stdin/stdout/stderr transports/protocols.
if handles[0].stream:
self._stdin = self._connect_child_handle(handles[0])
if handles[1].stream:
self._stdout = self._connect_child_handle(handles[1])
if handles[2].stream:
self._stderr = self._connect_child_handle(handles[2])
self._process = process
|
python
|
{
"resource": ""
}
|
q14050
|
Process.close
|
train
|
def close(self):
"""Close the process and frees its associated resources.
This method waits for the resources to be freed by the event loop.
"""
if self._process is None:
return
waitfor = []
if not self._process.closed:
self._process.close(self._on_close_complete)
waitfor.append(self._closed)
# For each of stdin/stdout/stderr, close the transport. This schedules
# an on-close callback that will close the protocol, which we wait for.
if self._stdin:
self._stdin[1].close()
waitfor.append(self._stdin[1]._closed)
if self._stdout:
self._stdout[1].close()
waitfor.append(self._stdout[1]._closed)
if self._stderr:
self._stderr[1].close()
waitfor.append(self._stderr[1]._closed)
futures.wait(waitfor)
self._process = None
self._stdin = self._stdout = self._stderr = None
|
python
|
{
"resource": ""
}
|
q14051
|
Process.terminate
|
train
|
def terminate(self):
"""Terminate the child process.
It is not an error to call this method when the child has already exited.
"""
try:
self.send_signal(signal.SIGTERM)
except pyuv.error.ProcessError as e:
if e.args[0] != pyuv.errno.UV_ESRCH:
raise
|
python
|
{
"resource": ""
}
|
q14052
|
Process.wait
|
train
|
def wait(self, timeout=-1):
"""Wait for the child to exit.
Wait for at most *timeout* seconds, or indefinitely if *timeout* is
None. Return the value of the :attr:`returncode` attribute.
"""
if self._process is None:
raise RuntimeError('no child process')
if timeout == -1:
timeout = self._timeout
if not self._child_exited.wait(timeout):
raise Timeout('timeout waiting for child to exit')
return self.returncode
|
python
|
{
"resource": ""
}
|
q14053
|
Process.communicate
|
train
|
def communicate(self, input=None, timeout=-1):
"""Communicate with the child and return its output.
If *input* is provided, it is sent to the client. Concurrent with
sending the input, the child's standard output and standard error are
read, until the child exits.
The return value is a tuple ``(stdout_data, stderr_data)`` containing
the data read from standard output and standard error.
"""
if self._process is None:
raise RuntimeError('no child process')
if timeout == -1:
timeout = self._timeout
output = [[], []]
def writer(stream, data):
offset = 0
while offset < len(data):
buf = data[offset:offset+4096]
stream.write(buf)
offset += len(buf)
stream.close()
def reader(stream, data):
while True:
if self._encoding:
buf = stream.read(4096)
else:
buf = stream.read1()
if not buf:
break
data.append(buf)
if self.stdin:
fibers.spawn(writer, self.stdin, input or b'')
if self.stdout:
fibers.spawn(reader, self.stdout, output[0])
if self.stderr:
fibers.spawn(reader, self.stderr, output[1])
self.wait(timeout)
empty = '' if self._encoding else b''
stdout_data = empty.join(output[0])
stderr_data = empty.join(output[1])
return (stdout_data, stderr_data)
|
python
|
{
"resource": ""
}
|
q14054
|
get_requirements
|
train
|
def get_requirements():
"""Parse a requirements.txt file and return as a list."""
with open(os.path.join(topdir, 'requirements.txt')) as fin:
lines = fin.readlines()
lines = [line.strip() for line in lines]
return lines
|
python
|
{
"resource": ""
}
|
q14055
|
dllist.insert
|
train
|
def insert(self, node, before=None):
"""Insert a new node in the list.
If *before* is specified, the new node is inserted before this node.
Otherwise, the node is inserted at the end of the list.
"""
node._list = self
if self._first is None:
self._first = self._last = node # first node in list
self._size += 1
return node
if before is None:
self._last._next = node # insert as last node
node._prev = self._last
self._last = node
else:
node._next = before
node._prev = before._prev
if node._prev:
node._prev._next = node
else:
self._first = node # inserting as first node
node._next._prev = node
self._size += 1
return node
|
python
|
{
"resource": ""
}
|
q14056
|
dllist.clear
|
train
|
def clear(self):
"""Remove all nodes from the list."""
node = self._first
while node is not None:
next_node = node._next
node._list = node._prev = node._next = None
node = next_node
self._size = 0
|
python
|
{
"resource": ""
}
|
q14057
|
_build_codes
|
train
|
def _build_codes() -> Dict[str, Dict[str, str]]:
""" Build code map, encapsulated to reduce module-level globals. """
built = {
'fore': {},
'back': {},
'style': {},
} # type: Dict[str, Dict[str, str]]
# Set codes for forecolors (30-37) and backcolors (40-47)
# Names are given to some of the 256-color variants as 'light' colors.
for name, number in _namemap:
# Not using format_* functions here, no validation needed.
built['fore'][name] = codeformat(30 + number)
built['back'][name] = codeformat(40 + number)
litename = 'light{}'.format(name) # type: str
built['fore'][litename] = codeformat(90 + number)
built['back'][litename] = codeformat(100 + number)
# Set reset codes for fore/back.
built['fore']['reset'] = codeformat(39)
built['back']['reset'] = codeformat(49)
# Set style codes.
for code, names in _stylemap:
for alias in names:
built['style'][alias] = codeformat(code)
# Extended (256 color codes)
for i in range(256):
built['fore'][str(i)] = extforeformat(i)
built['back'][str(i)] = extbackformat(i)
return built
|
python
|
{
"resource": ""
}
|
q14058
|
_build_codes_reverse
|
train
|
def _build_codes_reverse(
codes: Dict[str, Dict[str, str]]) -> Dict[str, Dict[str, str]]:
""" Build a reverse escape-code to name map, based on an existing
name to escape-code map.
"""
built = {} # type: Dict[str, Dict[str, str]]
for codetype, codemap in codes.items():
for name, escapecode in codemap.items():
# Skip shorcut aliases to avoid overwriting long names.
if len(name) < 2:
continue
if built.get(codetype, None) is None:
built[codetype] = {}
built[codetype][escapecode] = name
return built
|
python
|
{
"resource": ""
}
|
q14059
|
auto_disable
|
train
|
def auto_disable(
enabled: Optional[bool] = True,
fds: Optional[Sequence[IO]] = (sys.stdout, sys.stderr)) -> None:
""" Automatically decide whether to disable color codes if stdout or
stderr are not ttys.
Arguments:
enabled : Whether to automatically disable color codes.
When set to True, the fds will be checked for ttys.
When set to False, enable() is called.
fds : Open file descriptors to check for ttys.
If any non-ttys are found, colors will be disabled.
Objects must have a isatty() method.
"""
if enabled:
if not all(getattr(f, 'isatty', lambda: False)() for f in fds):
disable()
else:
enable()
|
python
|
{
"resource": ""
}
|
q14060
|
format_back
|
train
|
def format_back(
number: FormatArg,
light: Optional[bool] = False,
extended: Optional[bool] = False) -> str:
""" Return an escape code for a back color, by number.
This is a convenience method for handling the different code types
all in one shot.
It also handles some validation.
"""
return _format_code(
number,
backcolor=True,
light=light,
extended=extended
)
|
python
|
{
"resource": ""
}
|
q14061
|
format_fore
|
train
|
def format_fore(
number: FormatArg,
light: Optional[bool] = False,
extended: Optional[bool] = False) -> str:
""" Return an escape code for a fore color, by number.
This is a convenience method for handling the different code types
all in one shot.
It also handles some validation.
"""
return _format_code(
number,
backcolor=False,
light=light,
extended=extended
)
|
python
|
{
"resource": ""
}
|
q14062
|
format_style
|
train
|
def format_style(number: int) -> str:
""" Return an escape code for a style, by number.
This handles invalid style numbers.
"""
if str(number) not in _stylenums:
raise InvalidStyle(number)
return codeformat(number)
|
python
|
{
"resource": ""
}
|
q14063
|
get_all_names
|
train
|
def get_all_names() -> Tuple[str]:
""" Retrieve a tuple of all known color names, basic and 'known names'.
"""
names = list(basic_names)
names.extend(name_data)
return tuple(sorted(set(names)))
|
python
|
{
"resource": ""
}
|
q14064
|
get_code_num
|
train
|
def get_code_num(s: str) -> Optional[int]:
""" Get code number from an escape code.
Raises InvalidEscapeCode if an invalid number is found.
"""
if ';' in s:
# Extended fore/back codes.
numberstr = s.rpartition(';')[-1][:-1]
else:
# Fore, back, style, codes.
numberstr = s.rpartition('[')[-1][:-1]
num = try_parse_int(
numberstr,
default=None,
minimum=0,
maximum=255
)
if num is None:
raise InvalidEscapeCode(numberstr)
return num
|
python
|
{
"resource": ""
}
|
q14065
|
get_code_num_rgb
|
train
|
def get_code_num_rgb(s: str) -> Optional[Tuple[int, int, int]]:
""" Get rgb code numbers from an RGB escape code.
Raises InvalidRgbEscapeCode if an invalid number is found.
"""
parts = s.split(';')
if len(parts) != 5:
raise InvalidRgbEscapeCode(s, reason='Count is off.')
rgbparts = parts[-3:]
if not rgbparts[2].endswith('m'):
raise InvalidRgbEscapeCode(s, reason='Missing \'m\' on the end.')
rgbparts[2] = rgbparts[2].rstrip('m')
try:
r, g, b = [int(x) for x in rgbparts]
except ValueError as ex:
raise InvalidRgbEscapeCode(s) from ex
if not all(in_range(x, 0, 255) for x in (r, g, b)):
raise InvalidRgbEscapeCode(s, reason='Not in range 0-255.')
return r, g, b
|
python
|
{
"resource": ""
}
|
q14066
|
get_known_codes
|
train
|
def get_known_codes(
s: Union[str, 'Colr'],
unique: Optional[bool] = True,
rgb_mode: Optional[bool] = False):
""" Get all known escape codes from a string, and yield the explanations.
"""
isdisabled = disabled()
orderedcodes = tuple((c, get_known_name(c)) for c in get_codes(s))
codesdone = set() # type: Set[str]
for code, codeinfo in orderedcodes:
# Do the codes in order, but don't do the same code twice.
if unique:
if code in codesdone:
continue
codesdone.add(code)
if codeinfo is None:
continue
codetype, name = codeinfo
typedesc = '{:>13}: {!r:<23}'.format(codetype.title(), code)
if codetype.startswith(('extended', 'rgb')):
if isdisabled:
codedesc = str(ColorCode(name, rgb_mode=rgb_mode))
else:
codedesc = ColorCode(name, rgb_mode=rgb_mode).example()
else:
codedesc = ''.join((
code,
str(name).lstrip('(').rstrip(')'),
codes['style']['reset_all']
))
yield ' '.join((
typedesc,
codedesc
))
|
python
|
{
"resource": ""
}
|
q14067
|
try_parse_int
|
train
|
def try_parse_int(
s: str,
default: Optional[Any] = None,
minimum: Optional[int] = None,
maximum: Optional[int] = None) -> Optional[Any]:
""" Try parsing a string into an integer.
On failure, return `default`.
If the number is less then `minimum` or greater than `maximum`,
return `default`.
Returns an integer on success.
"""
try:
n = int(s)
except ValueError:
return default
if (minimum is not None) and (n < minimum):
return default
elif (maximum is not None) and (n > maximum):
return default
return n
|
python
|
{
"resource": ""
}
|
q14068
|
Colr._ext_attr_to_partial
|
train
|
def _ext_attr_to_partial(self, name, kwarg_key):
""" Convert a string like '233' or 'aliceblue' into partial for
self.chained.
"""
try:
intval = int(name)
except ValueError:
# Try as an extended name_data name.
info = name_data.get(name, None)
if info is None:
# Not an int value or name_data name.
return None
kws = {kwarg_key: info['code']}
return partial(self.chained, **kws)
# Integer str passed, use the int value.
kws = {kwarg_key: intval}
return partial(self.chained, **kws)
|
python
|
{
"resource": ""
}
|
q14069
|
Colr._gradient_black_line
|
train
|
def _gradient_black_line(
self, text, start, step=1,
fore=None, back=None, style=None, reverse=False, rgb_mode=False):
""" Yield colorized characters,
within the 24-length black gradient.
"""
if start < 232:
start = 232
elif start > 255:
start = 255
if reverse:
codes = list(range(start, 231, -1))
else:
codes = list(range(start, 256))
return ''.join((
self._iter_text_wave(
text,
codes,
step=step,
fore=fore,
back=back,
style=style,
rgb_mode=rgb_mode
)
))
|
python
|
{
"resource": ""
}
|
q14070
|
Colr._gradient_black_lines
|
train
|
def _gradient_black_lines(
self, text, start, step=1,
fore=None, back=None, style=None, reverse=False,
movefactor=2, rgb_mode=False):
""" Yield colorized characters,
within the 24-length black gradient,
treating each line separately.
"""
if not movefactor:
def factor(i):
return start
else:
# Increase the start for each line.
def factor(i):
return start + (i * movefactor)
return '\n'.join((
self._gradient_black_line(
line,
start=factor(i),
step=step,
fore=fore,
back=back,
style=style,
reverse=reverse,
rgb_mode=rgb_mode,
)
for i, line in enumerate(text.splitlines())
))
|
python
|
{
"resource": ""
}
|
q14071
|
Colr._gradient_rgb_lines
|
train
|
def _gradient_rgb_lines(
self, text, start, stop, step=1,
fore=None, back=None, style=None, movefactor=None):
""" Yield colorized characters, morphing from one rgb value to
another. This treats each line separately.
"""
morphlist = list(self._morph_rgb(start, stop, step=step))
if movefactor:
# Moving means we need the morph to wrap around.
morphlist.extend(self._morph_rgb(stop, start, step=step))
if movefactor < 0:
# Increase the start for each line.
def move():
popped = []
for _ in range(abs(movefactor)):
try:
popped.append(morphlist.pop(0))
except IndexError:
pass
morphlist.extend(popped)
return morphlist
else:
# Decrease start for each line.
def move():
for _ in range(movefactor):
try:
val = morphlist.pop(-1)
except IndexError:
pass
else:
morphlist.insert(0, val)
return morphlist
return '\n'.join((
self._gradient_rgb_line_from_morph(
line,
move() if movefactor else morphlist,
fore=fore,
back=back,
style=style,
)
for i, line in enumerate(text.splitlines())
))
|
python
|
{
"resource": ""
}
|
q14072
|
Colr._morph_rgb
|
train
|
def _morph_rgb(self, rgb1, rgb2, step=1):
""" Morph an rgb value into another, yielding each step along the way.
"""
pos1, pos2 = list(rgb1), list(rgb2)
indexes = [i for i, _ in enumerate(pos1)]
def step_value(a, b):
""" Returns the amount to add to `a` to make it closer to `b`,
multiplied by `step`.
"""
if a < b:
return step
if a > b:
return -step
return 0
steps = [step_value(pos1[x], pos2[x]) for x in indexes]
stepcnt = 0
while (pos1 != pos2):
stepcnt += 1
stop = yield tuple(pos1)
if stop:
break
for x in indexes:
if pos1[x] != pos2[x]:
pos1[x] += steps[x]
if (steps[x] < 0) and (pos1[x] < pos2[x]):
# Over stepped, negative.
pos1[x] = pos2[x]
if (steps[x] > 0) and (pos1[x] > pos2[x]):
# Over stepped, positive.
pos1[x] = pos2[x]
yield tuple(pos1)
|
python
|
{
"resource": ""
}
|
q14073
|
Colr.chained
|
train
|
def chained(self, text=None, fore=None, back=None, style=None):
""" Called by the various 'color' methods to colorize a single string.
The RESET_ALL code is appended to the string unless text is empty.
Raises ValueError on invalid color names.
Arguments:
text : String to colorize, or None for BG/Style change.
fore : Name of fore color to use.
back : Name of back color to use.
style : Name of style to use.
"""
self.data = ''.join((
self.data,
self.color(text=text, fore=fore, back=back, style=style),
))
return self
|
python
|
{
"resource": ""
}
|
q14074
|
Colr.color
|
train
|
def color(
self, text=None, fore=None, back=None, style=None,
no_closing=False):
""" A method that colorizes strings, not Colr objects.
Raises InvalidColr for invalid color names.
The 'reset_all' code is appended if text is given.
"""
has_args = (
(fore is not None) or
(back is not None) or
(style is not None)
)
if hasattr(text, '__colr__') and not has_args:
# Use custom __colr__ method in the absence of arguments.
return str(self._call_dunder_colr(text))
# Stringify everything before operating on it.
text = str(text) if text is not None else ''
if _disabled:
return text
# Considered to have unclosed codes if embedded codes exist and
# the last code was not a color code.
embedded_codes = get_codes(text)
has_end_code = embedded_codes and embedded_codes[-1] == closing_code
# Add closing code if not already added, there is text, and
# some kind of color/style was used (whether from args, or
# color codes were included in the text already).
# If the last code embedded in the text was a closing code,
# then it is not added.
# This can be overriden with `no_closing`.
needs_closing = (
text and
(not no_closing) and
(not has_end_code) and
(has_args or embedded_codes)
)
if needs_closing:
end = closing_code
else:
end = ''
return ''.join((
self.color_code(fore=fore, back=back, style=style),
text,
end,
))
|
python
|
{
"resource": ""
}
|
q14075
|
Colr.format
|
train
|
def format(self, *args, **kwargs):
""" Like str.format, except it returns a Colr. """
return self.__class__(self.data.format(*args, **kwargs))
|
python
|
{
"resource": ""
}
|
q14076
|
Colr.get_escape_code
|
train
|
def get_escape_code(self, codetype, value):
""" Convert user arg to escape code. """
valuefmt = str(value).lower()
code = codes[codetype].get(valuefmt, None)
if code:
# Basic code from fore, back, or style.
return code
named_funcs = {
'fore': format_fore,
'back': format_back,
'style': format_style,
}
# Not a basic code, try known names.
converter = named_funcs.get(codetype, None)
if converter is None:
raise ValueError(
'Invalid code type. Expecting {}, got: {!r}'.format(
', '.join(named_funcs),
codetype
)
)
# Try as hex.
with suppress(ValueError):
value = int(hex2term(value, allow_short=True))
return converter(value, extended=True)
named_data = name_data.get(valuefmt, None)
if named_data is not None:
# A known named color.
try:
return converter(named_data['code'], extended=True)
except TypeError:
# Passing a known name as a style?
if codetype == 'style':
raise InvalidStyle(value)
raise
# Not a known color name/value, try rgb.
try:
r, g, b = (int(x) for x in value)
# This does not mean we have a 3 int tuple. It could '111'.
# The converter should catch it though.
except (TypeError, ValueError):
# Not an rgb value.
if codetype == 'style':
raise InvalidStyle(value)
try:
escapecode = converter(value)
except ValueError as ex:
raise InvalidColr(value) from ex
return escapecode
|
python
|
{
"resource": ""
}
|
q14077
|
Colr.lstrip
|
train
|
def lstrip(self, chars=None):
""" Like str.lstrip, except it returns the Colr instance. """
return self.__class__(
self._str_strip('lstrip', chars),
no_closing=chars and (closing_code in chars),
)
|
python
|
{
"resource": ""
}
|
q14078
|
Colr.print
|
train
|
def print(self, *args, **kwargs):
""" Chainable print method. Prints self.data and then clears it. """
print(self, *args, **kwargs)
self.data = ''
return self
|
python
|
{
"resource": ""
}
|
q14079
|
Colr.rstrip
|
train
|
def rstrip(self, chars=None):
""" Like str.rstrip, except it returns the Colr instance. """
return self.__class__(
self._str_strip('rstrip', chars),
no_closing=chars and (closing_code in chars),
)
|
python
|
{
"resource": ""
}
|
q14080
|
Colr.strip
|
train
|
def strip(self, chars=None):
""" Like str.strip, except it returns the Colr instance. """
return self.__class__(
self._str_strip('strip', chars),
no_closing=chars and (closing_code in chars),
)
|
python
|
{
"resource": ""
}
|
q14081
|
ensure_tty
|
train
|
def ensure_tty(file=sys.stdout):
""" Ensure a file object is a tty. It must have an `isatty` method that
returns True.
TypeError is raised if the method doesn't exist, or returns False.
"""
isatty = getattr(file, 'isatty', None)
if isatty is None:
raise TypeError(
'Cannot detect tty, file has no `isatty` method: {}'.format(
getattr(file, 'name', type(file).__name__)
)
)
if not isatty():
raise TypeError(
'This will not work, file object is not a tty: {}'.format(
getattr(file, 'name', type(file).__name__)
)
)
return True
|
python
|
{
"resource": ""
}
|
q14082
|
move_back
|
train
|
def move_back(columns=1, file=sys.stdout):
""" Move the cursor back a number of columns.
Esc[<columns>D:
Moves the cursor back by the specified number of columns without
changing lines. If the cursor is already in the leftmost column,
ANSI.SYS ignores this sequence.
"""
move.back(columns).write(file=file)
|
python
|
{
"resource": ""
}
|
q14083
|
move_column
|
train
|
def move_column(column=1, file=sys.stdout):
""" Move the cursor to the specified column, default 1.
Esc[<column>G
"""
move.column(column).write(file=file)
|
python
|
{
"resource": ""
}
|
q14084
|
move_down
|
train
|
def move_down(lines=1, file=sys.stdout):
""" Move the cursor down a number of lines.
Esc[<lines>B:
Moves the cursor down by the specified number of lines without
changing columns. If the cursor is already on the bottom line,
ANSI.SYS ignores this sequence.
"""
move.down(lines).write(file=file)
|
python
|
{
"resource": ""
}
|
q14085
|
move_forward
|
train
|
def move_forward(columns=1, file=sys.stdout):
""" Move the cursor forward a number of columns.
Esc[<columns>C:
Moves the cursor forward by the specified number of columns without
changing lines. If the cursor is already in the rightmost column,
ANSI.SYS ignores this sequence.
"""
move.forward(columns).write(file=file)
|
python
|
{
"resource": ""
}
|
q14086
|
move_pos
|
train
|
def move_pos(line=1, column=1, file=sys.stdout):
""" Move the cursor to a new position. Values are 1-based, and default
to 1.
Esc[<line>;<column>H
or
Esc[<line>;<column>f
"""
move.pos(line=line, col=column).write(file=file)
|
python
|
{
"resource": ""
}
|
q14087
|
move_up
|
train
|
def move_up(lines=1, file=sys.stdout):
""" Move the cursor up a number of lines.
Esc[ValueA:
Moves the cursor up by the specified number of lines without changing
columns. If the cursor is already on the top line, ANSI.SYS ignores
this sequence.
"""
move.up(lines).write(file=file)
|
python
|
{
"resource": ""
}
|
q14088
|
scroll_down
|
train
|
def scroll_down(lines=1, file=sys.stdout):
""" Scroll the whole page down a number of lines, new lines are added to
the top.
Esc[<lines>T
"""
scroll.down(lines).write(file=file)
|
python
|
{
"resource": ""
}
|
q14089
|
scroll_up
|
train
|
def scroll_up(lines=1, file=sys.stdout):
""" Scroll the whole page up a number of lines, new lines are added to
the bottom.
Esc[<lines>S
"""
scroll.up(lines).write(file=file)
|
python
|
{
"resource": ""
}
|
q14090
|
Control.last_code
|
train
|
def last_code(self):
""" Return the last escape code in `self.data`.
If no escape codes are found, '' is returned.
"""
codes = self.data.split(escape_sequence)
if not codes:
return ''
return ''.join((escape_sequence, codes[-1]))
|
python
|
{
"resource": ""
}
|
q14091
|
Control.repeat
|
train
|
def repeat(self, count=2):
""" Repeat the last control code a number of times.
Returns a new Control with this one's data and the repeated code.
"""
# Subtracting one from the count means the code mentioned is
# truly repeated exactly `count` times.
# Control().move_up().repeat(3) ==
# Control().move_up().move_up().move_up()
try:
return self.__class__(''.join((
str(self),
self.last_code() * (count - 1),
)))
except TypeError as ex:
raise TypeError(
'`count` must be an integer. Got: {!r}'.format(count)
) from ex
|
python
|
{
"resource": ""
}
|
q14092
|
Control.repeat_all
|
train
|
def repeat_all(self, count=2):
""" Repeat this entire Control code a number of times.
Returns a new Control with this one's data repeated.
"""
try:
return self.__class__(''.join(str(self) * count))
except TypeError:
raise TypeError(
'`count` must be an integer. Got: {!r}'.format(count)
)
|
python
|
{
"resource": ""
}
|
q14093
|
hex2term
|
train
|
def hex2term(hexval: str, allow_short: bool = False) -> str:
""" Convert a hex value into the nearest terminal code number. """
return rgb2term(*hex2rgb(hexval, allow_short=allow_short))
|
python
|
{
"resource": ""
}
|
q14094
|
hex2termhex
|
train
|
def hex2termhex(hexval: str, allow_short: bool = False) -> str:
""" Convert a hex value into the nearest terminal color matched hex. """
return rgb2termhex(*hex2rgb(hexval, allow_short=allow_short))
|
python
|
{
"resource": ""
}
|
q14095
|
print_all
|
train
|
def print_all() -> None:
""" Print all 256 xterm color codes. """
for code in sorted(term2hex_map):
print(' '.join((
'\033[48;5;{code}m{code:<3}:{hexval:<6}\033[0m',
'\033[38;5;{code}m{code:<3}:{hexval:<6}\033[0m'
)).format(code=code, hexval=term2hex_map[code]))
|
python
|
{
"resource": ""
}
|
q14096
|
rgb2hex
|
train
|
def rgb2hex(r: int, g: int, b: int) -> str:
""" Convert rgb values to a hex code. """
return '{:02x}{:02x}{:02x}'.format(r, g, b)
|
python
|
{
"resource": ""
}
|
q14097
|
rgb2term
|
train
|
def rgb2term(r: int, g: int, b: int) -> str:
""" Convert an rgb value to a terminal code. """
return hex2term_map[rgb2termhex(r, g, b)]
|
python
|
{
"resource": ""
}
|
q14098
|
rgb2termhex
|
train
|
def rgb2termhex(r: int, g: int, b: int) -> str:
""" Convert an rgb value to the nearest hex value that matches a term code.
The hex value will be one in `hex2term_map`.
"""
incs = [0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff]
res = []
parts = r, g, b
for part in parts:
if (part < 0) or (part > 255):
raise ValueError(
'Expecting 0-255 for RGB code, got: {!r}'.format(parts)
)
i = 0
while i < len(incs) - 1:
s, b = incs[i], incs[i + 1] # smaller, bigger
if s <= part <= b:
s1 = abs(s - part)
b1 = abs(b - part)
if s1 < b1:
closest = s
else:
closest = b
res.append(closest)
break
i += 1
# Convert back into nearest hex value.
return rgb2hex(*res)
|
python
|
{
"resource": ""
}
|
q14099
|
ColorCode._init_code
|
train
|
def _init_code(self, code: int) -> None:
""" Initialize from an int terminal code. """
if -1 < code < 256:
self.code = '{:02}'.format(code)
self.hexval = term2hex(code)
self.rgb = hex2rgb(self.hexval)
else:
raise ValueError(' '.join((
'Code must be in the range 0-255, inclusive.',
'Got: {} ({})'
)).format(code, getattr(code, '__name__', type(code).__name__)))
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.