_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q28600
message_from_string
train
def message_from_string(s, *args, **kws): """Parse a string into a Message object model. Optional _class and strict are passed to the Parser constructor. """ from future.backports.email.parser import Parser return Parser(*args, **kws).parsestr(s)
python
{ "resource": "" }
q28601
message_from_bytes
train
def message_from_bytes(s, *args, **kws): """Parse a bytes string into a Message object model. Optional _class and strict are passed to the Parser constructor. """ from future.backports.email.parser import BytesParser return BytesParser(*args, **kws).parsebytes(s)
python
{ "resource": "" }
q28602
message_from_file
train
def message_from_file(fp, *args, **kws): """Read a file and parse its contents into a Message object model. Optional _class and strict are passed to the Parser constructor. """ from future.backports.email.parser import Parser return Parser(*args, **kws).parse(fp)
python
{ "resource": "" }
q28603
message_from_binary_file
train
def message_from_binary_file(fp, *args, **kws): """Read a binary file and parse its contents into a Message object model. Optional _class and strict are passed to the Parser constructor. """ from future.backports.email.parser import BytesParser return BytesParser(*args, **kws).parse(fp)
python
{ "resource": "" }
q28604
HTTPResponse._safe_read
train
def _safe_read(self, amt): """Read the number of bytes requested, compensating for partial reads. Normally, we have a blocking socket, but a read() can be interrupted by a signal (resulting in a partial read). Note that we cannot distinguish between EOF and an interrupt when zero bytes have been read. IncompleteRead() will be raised in this situation. This function should be used when <amt> bytes "should" be present for reading. If the bytes are truly not available (due to EOF), then the IncompleteRead exception can be used to detect the problem. """ s = [] while amt > 0: chunk = self.fp.read(min(amt, MAXAMOUNT)) if not chunk: raise IncompleteRead(bytes(b'').join(s), amt) s.append(chunk) amt -= len(chunk) return bytes(b"").join(s)
python
{ "resource": "" }
q28605
HTTPResponse._safe_readinto
train
def _safe_readinto(self, b): """Same as _safe_read, but for reading into a buffer.""" total_bytes = 0 mvb = memoryview(b) while total_bytes < len(b): if MAXAMOUNT < len(mvb): temp_mvb = mvb[0:MAXAMOUNT] n = self.fp.readinto(temp_mvb) else: n = self.fp.readinto(mvb) if not n: raise IncompleteRead(bytes(mvb[0:total_bytes]), len(b)) mvb = mvb[n:] total_bytes += n return total_bytes
python
{ "resource": "" }
q28606
HTTPConnection.set_tunnel
train
def set_tunnel(self, host, port=None, headers=None): """ Sets up the host and the port for the HTTP CONNECT Tunnelling. The headers argument should be a mapping of extra HTTP headers to send with the CONNECT request. """ self._tunnel_host = host self._tunnel_port = port if headers: self._tunnel_headers = headers else: self._tunnel_headers.clear()
python
{ "resource": "" }
q28607
HTTPConnection.close
train
def close(self): """Close the connection to the HTTP server.""" if self.sock: self.sock.close() # close it manually... there may be other refs self.sock = None if self.__response: self.__response.close() self.__response = None self.__state = _CS_IDLE
python
{ "resource": "" }
q28608
HTTPConnection._send_output
train
def _send_output(self, message_body=None): """Send the currently buffered request and clear the buffer. Appends an extra \\r\\n to the buffer. A message_body may be specified, to be appended to the request. """ self._buffer.extend((bytes(b""), bytes(b""))) msg = bytes(b"\r\n").join(self._buffer) del self._buffer[:] # If msg and message_body are sent in a single send() call, # it will avoid performance problems caused by the interaction # between delayed ack and the Nagle algorithm. if isinstance(message_body, bytes): msg += message_body message_body = None self.send(msg) if message_body is not None: # message_body was not a string (i.e. it is a file), and # we must run the risk of Nagle. self.send(message_body)
python
{ "resource": "" }
q28609
HTTPConnection.putheader
train
def putheader(self, header, *values): """Send a request header line to the server. For example: h.putheader('Accept', 'text/html') """ if self.__state != _CS_REQ_STARTED: raise CannotSendHeader() if hasattr(header, 'encode'): header = header.encode('ascii') values = list(values) for i, one_value in enumerate(values): if hasattr(one_value, 'encode'): values[i] = one_value.encode('latin-1') elif isinstance(one_value, int): values[i] = str(one_value).encode('ascii') value = bytes(b'\r\n\t').join(values) header = header + bytes(b': ') + value self._output(header)
python
{ "resource": "" }
q28610
HTTPConnection.endheaders
train
def endheaders(self, message_body=None): """Indicate that the last header line has been sent to the server. This method sends the request to the server. The optional message_body argument can be used to pass a message body associated with the request. The message body will be sent in the same packet as the message headers if it is a string, otherwise it is sent as a separate packet. """ if self.__state == _CS_REQ_STARTED: self.__state = _CS_REQ_SENT else: raise CannotSendHeader() self._send_output(message_body)
python
{ "resource": "" }
q28611
HTTPConnection.request
train
def request(self, method, url, body=None, headers={}): """Send a complete request to the server.""" self._send_request(method, url, body, headers)
python
{ "resource": "" }
q28612
HTTPConnection.getresponse
train
def getresponse(self): """Get the response from the server. If the HTTPConnection is in the correct state, returns an instance of HTTPResponse or of whatever object is returned by class the response_class variable. If a request has not been sent or if a previous response has not be handled, ResponseNotReady is raised. If the HTTP response indicates that the connection should be closed, then it will be closed before the response is returned. When the connection is closed, the underlying socket is closed. """ # if a prior response has been completed, then forget about it. if self.__response and self.__response.isclosed(): self.__response = None # if a prior response exists, then it must be completed (otherwise, we # cannot read this response's header to determine the connection-close # behavior) # # note: if a prior response existed, but was connection-close, then the # socket and response were made independent of this HTTPConnection # object since a new request requires that we open a whole new # connection # # this means the prior response had one of two states: # 1) will_close: this connection was reset and the prior socket and # response operate independently # 2) persistent: the response was retained and we await its # isclosed() status to become true. # if self.__state != _CS_REQ_SENT or self.__response: raise ResponseNotReady(self.__state) if self.debuglevel > 0: response = self.response_class(self.sock, self.debuglevel, method=self._method) else: response = self.response_class(self.sock, method=self._method) response.begin() assert response.will_close != _UNKNOWN self.__state = _CS_IDLE if response.will_close: # this effectively passes the connection to the response self.close() else: # remember this, so we can tell when it is complete self.__response = response return response
python
{ "resource": "" }
q28613
_validate_xtext
train
def _validate_xtext(xtext): """If input token contains ASCII non-printables, register a defect.""" non_printables = _non_printable_finder(xtext) if non_printables: xtext.defects.append(errors.NonPrintableDefect(non_printables)) if utils._has_surrogates(xtext): xtext.defects.append(errors.UndecodableBytesDefect( "Non-ASCII characters found in header token"))
python
{ "resource": "" }
q28614
_decode_ew_run
train
def _decode_ew_run(value): """ Decode a run of RFC2047 encoded words. _decode_ew_run(value) -> (text, value, defects) Scans the supplied value for a run of tokens that look like they are RFC 2047 encoded words, decodes those words into text according to RFC 2047 rules (whitespace between encoded words is discarded), and returns the text and the remaining value (including any leading whitespace on the remaining value), as well as a list of any defects encountered while decoding. The input value may not have any leading whitespace. """ res = [] defects = [] last_ws = '' while value: try: tok, ws, value = _wsp_splitter(value, 1) except ValueError: tok, ws, value = value, '', '' if not (tok.startswith('=?') and tok.endswith('?=')): return ''.join(res), last_ws + tok + ws + value, defects text, charset, lang, new_defects = _ew.decode(tok) res.append(text) defects.extend(new_defects) last_ws = ws return ''.join(res), last_ws, defects
python
{ "resource": "" }
q28615
get_encoded_word
train
def get_encoded_word(value): """ encoded-word = "=?" charset "?" encoding "?" encoded-text "?=" """ ew = EncodedWord() if not value.startswith('=?'): raise errors.HeaderParseError( "expected encoded word but found {}".format(value)) _3to2list1 = list(value[2:].split('?=', 1)) tok, remainder, = _3to2list1[:1] + [_3to2list1[1:]] if tok == value[2:]: raise errors.HeaderParseError( "expected encoded word but found {}".format(value)) remstr = ''.join(remainder) if remstr[:2].isdigit(): _3to2list3 = list(remstr.split('?=', 1)) rest, remainder, = _3to2list3[:1] + [_3to2list3[1:]] tok = tok + '?=' + rest if len(tok.split()) > 1: ew.defects.append(errors.InvalidHeaderDefect( "whitespace inside encoded word")) ew.cte = value value = ''.join(remainder) try: text, charset, lang, defects = _ew.decode('=?' + tok + '?=') except ValueError: raise errors.HeaderParseError( "encoded word format invalid: '{}'".format(ew.cte)) ew.charset = charset ew.lang = lang ew.defects.extend(defects) while text: if text[0] in WSP: token, text = get_fws(text) ew.append(token) continue _3to2list5 = list(_wsp_splitter(text, 1)) chars, remainder, = _3to2list5[:1] + [_3to2list5[1:]] vtext = ValueTerminal(chars, 'vtext') _validate_xtext(vtext) ew.append(vtext) text = ''.join(remainder) return ew, value
python
{ "resource": "" }
q28616
get_display_name
train
def get_display_name(value): """ display-name = phrase Because this is simply a name-rule, we don't return a display-name token containing a phrase, but rather a display-name token with the content of the phrase. """ display_name = DisplayName() token, value = get_phrase(value) display_name.extend(token[:]) display_name.defects = token.defects[:] return display_name, value
python
{ "resource": "" }
q28617
get_invalid_mailbox
train
def get_invalid_mailbox(value, endchars): """ Read everything up to one of the chars in endchars. This is outside the formal grammar. The InvalidMailbox TokenList that is returned acts like a Mailbox, but the data attributes are None. """ invalid_mailbox = InvalidMailbox() while value and value[0] not in endchars: if value[0] in PHRASE_ENDS: invalid_mailbox.append(ValueTerminal(value[0], 'misplaced-special')) value = value[1:] else: token, value = get_phrase(value) invalid_mailbox.append(token) return invalid_mailbox, value
python
{ "resource": "" }
q28618
get_invalid_parameter
train
def get_invalid_parameter(value): """ Read everything up to the next ';'. This is outside the formal grammar. The InvalidParameter TokenList that is returned acts like a Parameter, but the data attributes are None. """ invalid_parameter = InvalidParameter() while value and value[0] != ';': if value[0] in PHRASE_ENDS: invalid_parameter.append(ValueTerminal(value[0], 'misplaced-special')) value = value[1:] else: token, value = get_phrase(value) invalid_parameter.append(token) return invalid_parameter, value
python
{ "resource": "" }
q28619
_find_mime_parameters
train
def _find_mime_parameters(tokenlist, value): """Do our best to find the parameters in an invalid MIME header """ while value and value[0] != ';': if value[0] in PHRASE_ENDS: tokenlist.append(ValueTerminal(value[0], 'misplaced-special')) value = value[1:] else: token, value = get_phrase(value) tokenlist.append(token) if not value: return tokenlist.append(ValueTerminal(';', 'parameter-separator')) tokenlist.append(parse_mime_parameters(value[1:]))
python
{ "resource": "" }
q28620
header_length
train
def header_length(bytearray): """Return the length of s when it is encoded with base64.""" groups_of_3, leftover = divmod(len(bytearray), 3) # 4 bytes out for each 3 bytes (or nonzero fraction thereof) in. n = groups_of_3 * 4 if leftover: n += 4 return n
python
{ "resource": "" }
q28621
header_encode
train
def header_encode(header_bytes, charset='iso-8859-1'): """Encode a single header line with Base64 encoding in a given charset. charset names the character set to use to encode the header. It defaults to iso-8859-1. Base64 encoding is defined in RFC 2045. """ if not header_bytes: return "" if isinstance(header_bytes, str): header_bytes = header_bytes.encode(charset) encoded = b64encode(header_bytes).decode("ascii") return '=?%s?b?%s?=' % (charset, encoded)
python
{ "resource": "" }
q28622
body_encode
train
def body_encode(s, maxlinelen=76, eol=NL): r"""Encode a string with base64. Each line will be wrapped at, at most, maxlinelen characters (defaults to 76 characters). Each line of encoded text will end with eol, which defaults to "\n". Set this to "\r\n" if you will be using the result of this function directly in an email. """ if not s: return s encvec = [] max_unencoded = maxlinelen * 3 // 4 for i in range(0, len(s), max_unencoded): # BAW: should encode() inherit b2a_base64()'s dubious behavior in # adding a newline to the encoded string? enc = b2a_base64(s[i:i + max_unencoded]).decode("ascii") if enc.endswith(NL) and eol != NL: enc = enc[:-1] + eol encvec.append(enc) return EMPTYSTRING.join(encvec)
python
{ "resource": "" }
q28623
decode
train
def decode(string): """Decode a raw base64 string, returning a bytes object. This function does not parse a full MIME header value encoded with base64 (like =?iso-8895-1?b?bmloISBuaWgh?=) -- please use the high level email.header class for that functionality. """ if not string: return bytes() elif isinstance(string, str): return a2b_base64(string.encode('raw-unicode-escape')) else: return a2b_base64(string)
python
{ "resource": "" }
q28624
urldefrag
train
def urldefrag(url): """Removes any existing fragment from URL. Returns a tuple of the defragmented URL and the fragment. If the URL contained no fragments, the second element is the empty string. """ url, _coerce_result = _coerce_args(url) if '#' in url: s, n, p, a, q, frag = urlparse(url) defrag = urlunparse((s, n, p, a, q, '')) else: frag = '' defrag = url return _coerce_result(DefragResult(defrag, frag))
python
{ "resource": "" }
q28625
body_encode
train
def body_encode(body, maxlinelen=76, eol=NL): """Encode with quoted-printable, wrapping at maxlinelen characters. Each line of encoded text will end with eol, which defaults to "\\n". Set this to "\\r\\n" if you will be using the result of this function directly in an email. Each line will be wrapped at, at most, maxlinelen characters before the eol string (maxlinelen defaults to 76 characters, the maximum value permitted by RFC 2045). Long lines will have the 'soft line break' quoted-printable character "=" appended to them, so the decoded text will be identical to the original text. The minimum maxlinelen is 4 to have room for a quoted character ("=XX") followed by a soft line break. Smaller values will generate a ValueError. """ if maxlinelen < 4: raise ValueError("maxlinelen must be at least 4") if not body: return body # The last line may or may not end in eol, but all other lines do. last_has_eol = (body[-1] in '\r\n') # This accumulator will make it easier to build the encoded body. encoded_body = _body_accumulator(maxlinelen, eol) lines = body.splitlines() last_line_no = len(lines) - 1 for line_no, line in enumerate(lines): last_char_index = len(line) - 1 for i, c in enumerate(line): if body_check(ord(c)): c = quote(c) encoded_body.write_char(c, i==last_char_index) # Add an eol if input line had eol. All input lines have eol except # possibly the last one. if line_no < last_line_no or last_has_eol: encoded_body.newline() return encoded_body.getvalue()
python
{ "resource": "" }
q28626
decode
train
def decode(encoded, eol=NL): """Decode a quoted-printable string. Lines are separated with eol, which defaults to \\n. """ if not encoded: return encoded # BAW: see comment in encode() above. Again, we're building up the # decoded string with string concatenation, which could be done much more # efficiently. decoded = '' for line in encoded.splitlines(): line = line.rstrip() if not line: decoded += eol continue i = 0 n = len(line) while i < n: c = line[i] if c != '=': decoded += c i += 1 # Otherwise, c == "=". Are we at the end of the line? If so, add # a soft line break. elif i+1 == n: i += 1 continue # Decode if in form =AB elif i+2 < n and line[i+1] in hexdigits and line[i+2] in hexdigits: decoded += unquote(line[i:i+3]) i += 3 # Otherwise, not in form =AB, pass literally else: decoded += c i += 1 if i == n: decoded += eol # Special case if original string did not end with eol if encoded[-1] not in '\r\n' and decoded.endswith(eol): decoded = decoded[:-1] return decoded
python
{ "resource": "" }
q28627
header_decode
train
def header_decode(s): """Decode a string encoded with RFC 2045 MIME header `Q' encoding. This function does not parse a full MIME header value encoded with quoted-printable (like =?iso-8895-1?q?Hello_World?=) -- please use the high level email.header class for that functionality. """ s = s.replace('_', ' ') return re.sub(r'=[a-fA-F0-9]{2}', _unquote_match, s, re.ASCII)
python
{ "resource": "" }
q28628
_body_accumulator.write_str
train
def write_str(self, s): """Add string s to the accumulated body.""" self.write(s) self.room -= len(s)
python
{ "resource": "" }
q28629
_body_accumulator.newline
train
def newline(self): """Write eol, then start new line.""" self.write_str(self.eol) self.room = self.maxlinelen
python
{ "resource": "" }
q28630
_body_accumulator.write_wrapped
train
def write_wrapped(self, s, extra_room=0): """Add a soft line break if needed, then write s.""" if self.room < len(s) + extra_room: self.write_soft_break() self.write_str(s)
python
{ "resource": "" }
q28631
HTMLParser.reset
train
def reset(self): """Reset this instance. Loses all unprocessed data.""" self.rawdata = '' self.lasttag = '???' self.interesting = interesting_normal self.cdata_elem = None _markupbase.ParserBase.reset(self)
python
{ "resource": "" }
q28632
HTMLParser.feed
train
def feed(self, data): r"""Feed data to the parser. Call this as often as you want, with as little or as much text as you want (may include '\n'). """ self.rawdata = self.rawdata + data self.goahead(0)
python
{ "resource": "" }
q28633
encode_base64
train
def encode_base64(msg): """Encode the message's payload in Base64. Also, add an appropriate Content-Transfer-Encoding header. """ orig = msg.get_payload() encdata = str(_bencode(orig), 'ascii') msg.set_payload(encdata) msg['Content-Transfer-Encoding'] = 'base64'
python
{ "resource": "" }
q28634
encode_quopri
train
def encode_quopri(msg): """Encode the message's payload in quoted-printable. Also, add an appropriate Content-Transfer-Encoding header. """ orig = msg.get_payload() encdata = _qencode(orig) msg.set_payload(encdata) msg['Content-Transfer-Encoding'] = 'quoted-printable'
python
{ "resource": "" }
q28635
encode_7or8bit
train
def encode_7or8bit(msg): """Set the Content-Transfer-Encoding header to 7bit or 8bit.""" orig = msg.get_payload() if orig is None: # There's no payload. For backwards compatibility we use 7bit msg['Content-Transfer-Encoding'] = '7bit' return # We play a trick to make this go fast. If encoding/decode to ASCII # succeeds, we know the data must be 7bit, otherwise treat it as 8bit. try: if isinstance(orig, str): orig.encode('ascii') else: orig.decode('ascii') except UnicodeError: charset = msg.get_charset() output_cset = charset and charset.output_charset # iso-2022-* is non-ASCII but encodes to a 7-bit representation if output_cset and output_cset.lower().startswith('iso-2022-'): msg['Content-Transfer-Encoding'] = '7bit' else: msg['Content-Transfer-Encoding'] = '8bit' else: msg['Content-Transfer-Encoding'] = '7bit' if not isinstance(orig, str): msg.set_payload(orig.decode('ascii', 'surrogateescape'))
python
{ "resource": "" }
q28636
encode_noop
train
def encode_noop(msg): """Do nothing.""" # Well, not quite *nothing*: in Python3 we have to turn bytes into a string # in our internal surrogateescaped form in order to keep the model # consistent. orig = msg.get_payload() if not isinstance(orig, str): msg.set_payload(orig.decode('ascii', 'surrogateescape'))
python
{ "resource": "" }
q28637
is_py2_stdlib_module
train
def is_py2_stdlib_module(m): """ Tries to infer whether the module m is from the Python 2 standard library. This may not be reliable on all systems. """ if PY3: return False if not 'stdlib_path' in is_py2_stdlib_module.__dict__: stdlib_files = [contextlib.__file__, os.__file__, copy.__file__] stdlib_paths = [os.path.split(f)[0] for f in stdlib_files] if not len(set(stdlib_paths)) == 1: # This seems to happen on travis-ci.org. Very strange. We'll try to # ignore it. flog.warn('Multiple locations found for the Python standard ' 'library: %s' % stdlib_paths) # Choose the first one arbitrarily is_py2_stdlib_module.stdlib_path = stdlib_paths[0] if m.__name__ in sys.builtin_module_names: return True if hasattr(m, '__file__'): modpath = os.path.split(m.__file__) if (modpath[0].startswith(is_py2_stdlib_module.stdlib_path) and 'site-packages' not in modpath[0]): return True return False
python
{ "resource": "" }
q28638
restore_sys_modules
train
def restore_sys_modules(scrubbed): """ Add any previously scrubbed modules back to the sys.modules cache, but only if it's safe to do so. """ clash = set(sys.modules) & set(scrubbed) if len(clash) != 0: # If several, choose one arbitrarily to raise an exception about first = list(clash)[0] raise ImportError('future module {} clashes with Py2 module' .format(first)) sys.modules.update(scrubbed)
python
{ "resource": "" }
q28639
install_hooks
train
def install_hooks(): """ This function installs the future.standard_library import hook into sys.meta_path. """ if PY3: return install_aliases() flog.debug('sys.meta_path was: {0}'.format(sys.meta_path)) flog.debug('Installing hooks ...') # Add it unless it's there already newhook = RenameImport(RENAMES) if not detect_hooks(): sys.meta_path.append(newhook) flog.debug('sys.meta_path is now: {0}'.format(sys.meta_path))
python
{ "resource": "" }
q28640
remove_hooks
train
def remove_hooks(scrub_sys_modules=False): """ This function removes the import hook from sys.meta_path. """ if PY3: return flog.debug('Uninstalling hooks ...') # Loop backwards, so deleting items keeps the ordering: for i, hook in list(enumerate(sys.meta_path))[::-1]: if hasattr(hook, 'RENAMER'): del sys.meta_path[i] # Explicit is better than implicit. In the future the interface should # probably change so that scrubbing the import hooks requires a separate # function call. Left as is for now for backward compatibility with # v0.11.x. if scrub_sys_modules: scrub_future_sys_modules()
python
{ "resource": "" }
q28641
detect_hooks
train
def detect_hooks(): """ Returns True if the import hooks are installed, False if not. """ flog.debug('Detecting hooks ...') present = any([hasattr(hook, 'RENAMER') for hook in sys.meta_path]) if present: flog.debug('Detected.') else: flog.debug('Not detected.') return present
python
{ "resource": "" }
q28642
RenameImport._find_and_load_module
train
def _find_and_load_module(self, name, path=None): """ Finds and loads it. But if there's a . in the name, handles it properly. """ bits = name.split('.') while len(bits) > 1: # Treat the first bit as a package packagename = bits.pop(0) package = self._find_and_load_module(packagename, path) try: path = package.__path__ except AttributeError: # This could be e.g. moves. flog.debug('Package {0} has no __path__.'.format(package)) if name in sys.modules: return sys.modules[name] flog.debug('What to do here?') name = bits[0] module_info = imp.find_module(name, path) return imp.load_module(name, *module_info)
python
{ "resource": "" }
q28643
format_datetime
train
def format_datetime(dt, usegmt=False): """Turn a datetime into a date string as specified in RFC 2822. If usegmt is True, dt must be an aware datetime with an offset of zero. In this case 'GMT' will be rendered instead of the normal +0000 required by RFC2822. This is to support HTTP headers involving date stamps. """ now = dt.timetuple() if usegmt: if dt.tzinfo is None or dt.tzinfo != datetime.timezone.utc: raise ValueError("usegmt option requires a UTC datetime") zone = 'GMT' elif dt.tzinfo is None: zone = '-0000' else: zone = dt.strftime("%z") return _format_timetuple_and_zone(now, zone)
python
{ "resource": "" }
q28644
unquote
train
def unquote(str): """Remove quotes from a string.""" if len(str) > 1: if str.startswith('"') and str.endswith('"'): return str[1:-1].replace('\\\\', '\\').replace('\\"', '"') if str.startswith('<') and str.endswith('>'): return str[1:-1] return str
python
{ "resource": "" }
q28645
decode_rfc2231
train
def decode_rfc2231(s): """Decode string according to RFC 2231""" parts = s.split(TICK, 2) if len(parts) <= 2: return None, None, s return parts
python
{ "resource": "" }
q28646
encode_rfc2231
train
def encode_rfc2231(s, charset=None, language=None): """Encode string according to RFC 2231. If neither charset nor language is given, then s is returned as-is. If charset is given but not language, the string is encoded using the empty string for language. """ s = url_quote(s, safe='', encoding=charset or 'ascii') if charset is None and language is None: return s if language is None: language = '' return "%s'%s'%s" % (charset, language, s)
python
{ "resource": "" }
q28647
decode_params
train
def decode_params(params): """Decode parameters list according to RFC 2231. params is a sequence of 2-tuples containing (param name, string value). """ # Copy params so we don't mess with the original params = params[:] new_params = [] # Map parameter's name to a list of continuations. The values are a # 3-tuple of the continuation number, the string value, and a flag # specifying whether a particular segment is %-encoded. rfc2231_params = {} name, value = params.pop(0) new_params.append((name, value)) while params: name, value = params.pop(0) if name.endswith('*'): encoded = True else: encoded = False value = unquote(value) mo = rfc2231_continuation.match(name) if mo: name, num = mo.group('name', 'num') if num is not None: num = int(num) rfc2231_params.setdefault(name, []).append((num, value, encoded)) else: new_params.append((name, '"%s"' % quote(value))) if rfc2231_params: for name, continuations in rfc2231_params.items(): value = [] extended = False # Sort by number continuations.sort() # And now append all values in numerical order, converting # %-encodings for the encoded segments. If any of the # continuation names ends in a *, then the entire string, after # decoding segments and concatenating, must have the charset and # language specifiers at the beginning of the string. for num, s, encoded in continuations: if encoded: # Decode as "latin-1", so the characters in s directly # represent the percent-encoded octet values. # collapse_rfc2231_value treats this as an octet sequence. s = url_unquote(s, encoding="latin-1") extended = True value.append(s) value = quote(EMPTYSTRING.join(value)) if extended: charset, language, value = decode_rfc2231(value) new_params.append((name, (charset, language, '"%s"' % value))) else: new_params.append((name, '"%s"' % value)) return new_params
python
{ "resource": "" }
q28648
localtime
train
def localtime(dt=None, isdst=-1): """Return local time as an aware datetime object. If called without arguments, return current time. Otherwise *dt* argument should be a datetime instance, and it is converted to the local time zone according to the system time zone database. If *dt* is naive (that is, dt.tzinfo is None), it is assumed to be in local time. In this case, a positive or zero value for *isdst* causes localtime to presume initially that summer time (for example, Daylight Saving Time) is or is not (respectively) in effect for the specified time. A negative value for *isdst* causes the localtime() function to attempt to divine whether summer time is in effect for the specified time. """ if dt is None: return datetime.datetime.now(datetime.timezone.utc).astimezone() if dt.tzinfo is not None: return dt.astimezone() # We have a naive datetime. Convert to a (localtime) timetuple and pass to # system mktime together with the isdst hint. System mktime will return # seconds since epoch. tm = dt.timetuple()[:-1] + (isdst,) seconds = time.mktime(tm) localtm = time.localtime(seconds) try: delta = datetime.timedelta(seconds=localtm.tm_gmtoff) tz = datetime.timezone(delta, localtm.tm_zone) except AttributeError: # Compute UTC offset and compare with the value implied by tm_isdst. # If the values match, use the zone name implied by tm_isdst. delta = dt - datetime.datetime(*time.gmtime(seconds)[:6]) dst = time.daylight and localtm.tm_isdst > 0 gmtoff = -(time.altzone if dst else time.timezone) if delta == datetime.timedelta(seconds=gmtoff): tz = datetime.timezone(delta, time.tzname[dst]) else: tz = datetime.timezone(delta) return dt.replace(tzinfo=tz)
python
{ "resource": "" }
q28649
filepath_to_uri
train
def filepath_to_uri(path): """Convert a file system path to a URI portion that is suitable for inclusion in a URL. We are assuming input is either UTF-8 or unicode already. This method will encode certain chars that would normally be recognized as special chars for URIs. Note that this method does not encode the ' character, as it is a valid character within URIs. See encodeURIComponent() JavaScript function for more details. Returns an ASCII string containing the encoded result. """ if path is None: return path # I know about `os.sep` and `os.altsep` but I want to leave # some flexibility for hardcoding separators. return quote(force_bytes(path).replace(b"\\", b"/"), safe=b"/~!*()'")
python
{ "resource": "" }
q28650
encode
train
def encode(string, charset='utf-8', encoding=None, lang=''): """Encode string using the CTE encoding that produces the shorter result. Produces an RFC 2047/2243 encoded word of the form: =?charset*lang?cte?encoded_string?= where '*lang' is omitted unless the 'lang' parameter is given a value. Optional argument charset (defaults to utf-8) specifies the charset to use to encode the string to binary before CTE encoding it. Optional argument 'encoding' is the cte specifier for the encoding that should be used ('q' or 'b'); if it is None (the default) the encoding which produces the shortest encoded sequence is used, except that 'q' is preferred if it is up to five characters longer. Optional argument 'lang' (default '') gives the RFC 2243 language string to specify in the encoded word. """ string = str(string) if charset == 'unknown-8bit': bstring = string.encode('ascii', 'surrogateescape') else: bstring = string.encode(charset) if encoding is None: qlen = _cte_encode_length['q'](bstring) blen = _cte_encode_length['b'](bstring) # Bias toward q. 5 is arbitrary. encoding = 'q' if qlen - blen < 5 else 'b' encoded = _cte_encoders[encoding](bstring) if lang: lang = '*' + lang return "=?{0}{1}?{2}?{3}?=".format(charset, lang, encoding, encoded)
python
{ "resource": "" }
q28651
bind_method
train
def bind_method(cls, name, func): """Bind a method to class, python 2 and python 3 compatible. Parameters ---------- cls : type class to receive bound method name : basestring name of method on class instance func : function function to be bound as method Returns ------- None """ # only python 2 has bound/unbound method issue if not PY3: setattr(cls, name, types.MethodType(func, None, cls)) else: setattr(cls, name, func)
python
{ "resource": "" }
q28652
_PolicyBase.clone
train
def clone(self, **kw): """Return a new instance with specified attributes changed. The new instance has the same attribute values as the current object, except for the changes passed in as keyword arguments. """ newpolicy = self.__class__.__new__(self.__class__) for attr, value in self.__dict__.items(): object.__setattr__(newpolicy, attr, value) for attr, value in kw.items(): if not hasattr(self, attr): raise TypeError( "{!r} is an invalid keyword argument for {}".format( attr, self.__class__.__name__)) object.__setattr__(newpolicy, attr, value) return newpolicy
python
{ "resource": "" }
q28653
Policy.handle_defect
train
def handle_defect(self, obj, defect): """Based on policy, either raise defect or call register_defect. handle_defect(obj, defect) defect should be a Defect subclass, but in any case must be an Exception subclass. obj is the object on which the defect should be registered if it is not raised. If the raise_on_defect is True, the defect is raised as an error, otherwise the object and the defect are passed to register_defect. This method is intended to be called by parsers that discover defects. The email package parsers always call it with Defect instances. """ if self.raise_on_defect: raise defect self.register_defect(obj, defect)
python
{ "resource": "" }
q28654
_quote
train
def _quote(str, LegalChars=_LegalChars): r"""Quote a string for use in a cookie header. If the string does not need to be double-quoted, then just return the string. Otherwise, surround the string in doublequotes and quote (with a \) special characters. """ if all(c in LegalChars for c in str): return str else: return '"' + _nulljoin(_Translator.get(s, s) for s in str) + '"'
python
{ "resource": "" }
q28655
BaseCookie.output
train
def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"): """Return a string suitable for HTTP.""" result = [] items = sorted(self.items()) for key, value in items: result.append(value.output(attrs, header)) return sep.join(result)
python
{ "resource": "" }
q28656
BaseCookie.js_output
train
def js_output(self, attrs=None): """Return a string suitable for JavaScript.""" result = [] items = sorted(self.items()) for key, value in items: result.append(value.js_output(attrs)) return _nulljoin(result)
python
{ "resource": "" }
q28657
_whatsnd
train
def _whatsnd(data): """Try to identify a sound file type. sndhdr.what() has a pretty cruddy interface, unfortunately. This is why we re-do it here. It would be easier to reverse engineer the Unix 'file' command and use the standard 'magic' file, as shipped with a modern Unix. """ hdr = data[:512] fakefile = BytesIO(hdr) for testfn in sndhdr.tests: res = testfn(hdr, fakefile) if res is not None: return _sndhdr_MIMEmap.get(res[0]) return None
python
{ "resource": "" }
q28658
fixup_parse_tree
train
def fixup_parse_tree(cls_node): """ one-line classes don't get a suite in the parse tree so we add one to normalize the tree """ for node in cls_node.children: if node.type == syms.suite: # already in the preferred format, do nothing return # !%@#! oneliners have no suite node, we have to fake one up for i, node in enumerate(cls_node.children): if node.type == token.COLON: break else: raise ValueError("No class suite and no ':'!") # move everything into a suite node suite = Node(syms.suite, []) while cls_node.children[i+1:]: move_node = cls_node.children[i+1] suite.append_child(move_node.clone()) move_node.remove() cls_node.append_child(suite) node = suite
python
{ "resource": "" }
q28659
newbytes.index
train
def index(self, sub, *args): ''' Returns index of sub in bytes. Raises ValueError if byte is not in bytes and TypeError if can't be converted bytes or its length is not 1. ''' if isinstance(sub, int): if len(args) == 0: start, end = 0, len(self) elif len(args) == 1: start = args[0] elif len(args) == 2: start, end = args else: raise TypeError('takes at most 3 arguments') return list(self)[start:end].index(sub) if not isinstance(sub, bytes): try: sub = self.__class__(sub) except (TypeError, ValueError): raise TypeError("can't convert sub to bytes") try: return super(newbytes, self).index(sub, *args) except ValueError: raise ValueError('substring not found')
python
{ "resource": "" }
q28660
isidentifier
train
def isidentifier(s, dotted=False): ''' A function equivalent to the str.isidentifier method on Py3 ''' if dotted: return all(isidentifier(a) for a in s.split('.')) if PY3: return s.isidentifier() else: import re _name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$") return bool(_name_re.match(s))
python
{ "resource": "" }
q28661
viewitems
train
def viewitems(obj, **kwargs): """ Function for iterating over dictionary items with the same set-like behaviour on Py2.7 as on Py3. Passes kwargs to method.""" func = getattr(obj, "viewitems", None) if not func: func = obj.items return func(**kwargs)
python
{ "resource": "" }
q28662
viewkeys
train
def viewkeys(obj, **kwargs): """ Function for iterating over dictionary keys with the same set-like behaviour on Py2.7 as on Py3. Passes kwargs to method.""" func = getattr(obj, "viewkeys", None) if not func: func = obj.keys return func(**kwargs)
python
{ "resource": "" }
q28663
viewvalues
train
def viewvalues(obj, **kwargs): """ Function for iterating over dictionary values with the same set-like behaviour on Py2.7 as on Py3. Passes kwargs to method.""" func = getattr(obj, "viewvalues", None) if not func: func = obj.values return func(**kwargs)
python
{ "resource": "" }
q28664
_get_caller_globals_and_locals
train
def _get_caller_globals_and_locals(): """ Returns the globals and locals of the calling frame. Is there an alternative to frame hacking here? """ caller_frame = inspect.stack()[2] myglobals = caller_frame[0].f_globals mylocals = caller_frame[0].f_locals return myglobals, mylocals
python
{ "resource": "" }
q28665
_repr_strip
train
def _repr_strip(mystring): """ Returns the string without any initial or final quotes. """ r = repr(mystring) if r.startswith("'") and r.endswith("'"): return r[1:-1] else: return r
python
{ "resource": "" }
q28666
as_native_str
train
def as_native_str(encoding='utf-8'): ''' A decorator to turn a function or method call that returns text, i.e. unicode, into one that returns a native platform str. Use it as a decorator like this:: from __future__ import unicode_literals class MyClass(object): @as_native_str(encoding='ascii') def __repr__(self): return next(self._iter).upper() ''' if PY3: return lambda f: f else: def encoder(f): @functools.wraps(f) def wrapper(*args, **kwargs): return f(*args, **kwargs).encode(encoding=encoding) return wrapper return encoder
python
{ "resource": "" }
q28667
Plotter.bind
train
def bind(self, source=None, destination=None, node=None, edge_title=None, edge_label=None, edge_color=None, edge_weight=None, point_title=None, point_label=None, point_color=None, point_size=None): """Relate data attributes to graph structure and visual representation. To facilitate reuse and replayable notebooks, the binding call is chainable. Invocation does not effect the old binding: it instead returns a new Plotter instance with the new bindings added to the existing ones. Both the old and new bindings can then be used for different graphs. :param source: Attribute containing an edge's source ID :type source: String. :param destination: Attribute containing an edge's destination ID :type destination: String. :param node: Attribute containing a node's ID :type node: String. :param edge_title: Attribute overriding edge's minimized label text. By default, the edge source and destination is used. :type edge_title: HtmlString. :param edge_label: Attribute overriding edge's expanded label text. By default, scrollable list of attribute/value mappings. :type edge_label: HtmlString. :param edge_color: Attribute overriding edge's color. `See palette definitions <https://graphistry.github.io/docs/legacy/api/0.9.2/api.html#extendedpalette>`_ for values. Based on Color Brewer. :type edge_color: String. :param edge_weight: Attribute overriding edge weight. Default is 1. Advanced layout controls will relayout edges based on this value. :type edge_weight: String. :param point_title: Attribute overriding node's minimized label text. By default, the node ID is used. :type point_title: HtmlString. :param point_label: Attribute overriding node's expanded label text. By default, scrollable list of attribute/value mappings. :type point_label: HtmlString. :param point_color: Attribute overriding node's color. `See palette definitions <https://graphistry.github.io/docs/legacy/api/0.9.2/api.html#extendedpalette>`_ for values. Based on Color Brewer. :type point_color: Integer. :param point_size: Attribute overriding node's size. By default, uses the node degree. The visualization will normalize point sizes and adjust dynamically using semantic zoom. :type point_size: HtmlString. :returns: Plotter. :rtype: Plotter. **Example: Minimal** :: import graphistry g = graphistry.bind() g = g.bind(source='src', destination='dst') **Example: Node colors** :: import graphistry g = graphistry.bind() g = g.bind(source='src', destination='dst', node='id', point_color='color') **Example: Chaining** :: import graphistry g = graphistry.bind(source='src', destination='dst', node='id') g1 = g.bind(point_color='color1', point_size='size1') g.bind(point_color='color1b') g2a = g1.bind(point_color='color2a') g2b = g1.bind(point_color='color2b', point_size='size2b') g3a = g2a.bind(point_size='size3a') g3b = g2b.bind(point_size='size3b') In the above **Chaining** example, all bindings use src/dst/id. Colors and sizes bind to: :: g: default/default g1: color1/size1 g2a: color2a/size1 g2b: color2b/size2b g3a: color2a/size3a g3b: color2b/size3b """ res = copy.copy(self) res._source = source or self._source res._destination = destination or self._destination res._node = node or self._node res._edge_title = edge_title or self._edge_title res._edge_label = edge_label or self._edge_label res._edge_color = edge_color or self._edge_color res._edge_weight = edge_weight or self._edge_weight res._point_title = point_title or self._point_title res._point_label = point_label or self._point_label res._point_color = point_color or self._point_color res._point_size = point_size or self._point_size return res
python
{ "resource": "" }
q28668
Plotter.nodes
train
def nodes(self, nodes): """Specify the set of nodes and associated data. Must include any nodes referenced in the edge list. :param nodes: Nodes and their attributes. :type point_size: Pandas dataframe :returns: Plotter. :rtype: Plotter. **Example** :: import graphistry es = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]}) g = graphistry .bind(source='src', destination='dst') .edges(es) vs = pandas.DataFrame({'v': [0,1,2], 'lbl': ['a', 'b', 'c']}) g = g.bind(node='v').nodes(vs) g.plot() """ res = copy.copy(self) res._nodes = nodes return res
python
{ "resource": "" }
q28669
Plotter.edges
train
def edges(self, edges): """Specify edge list data and associated edge attribute values. :param edges: Edges and their attributes. :type point_size: Pandas dataframe, NetworkX graph, or IGraph graph. :returns: Plotter. :rtype: Plotter. **Example** :: import graphistry df = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]}) graphistry .bind(source='src', destination='dst') .edges(df) .plot() """ res = copy.copy(self) res._edges = edges return res
python
{ "resource": "" }
q28670
Plotter.graph
train
def graph(self, ig): """Specify the node and edge data. :param ig: Graph with node and edge attributes. :type ig: NetworkX graph or an IGraph graph. :returns: Plotter. :rtype: Plotter. """ res = copy.copy(self) res._edges = ig res._nodes = None return res
python
{ "resource": "" }
q28671
Plotter.settings
train
def settings(self, height=None, url_params={}, render=None): """Specify iframe height and add URL parameter dictionary. The library takes care of URI component encoding for the dictionary. :param height: Height in pixels. :type height: Integer. :param url_params: Dictionary of querystring parameters to append to the URL. :type url_params: Dictionary :param render: Whether to render the visualization using the native notebook environment (default True), or return the visualization URL :type render: Boolean """ res = copy.copy(self) res._height = height or self._height res._url_params = dict(self._url_params, **url_params) res._render = self._render if render == None else render return res
python
{ "resource": "" }
q28672
Plotter.plot
train
def plot(self, graph=None, nodes=None, name=None, render=None, skip_upload=False): """Upload data to the Graphistry server and show as an iframe of it. name, Uses the currently bound schema structure and visual encodings. Optional parameters override the current bindings. When used in a notebook environment, will also show an iframe of the visualization. :param graph: Edge table or graph. :type graph: Pandas dataframe, NetworkX graph, or IGraph graph. :param nodes: Nodes table. :type nodes: Pandas dataframe. :param render: Whether to render the visualization using the native notebook environment (default True), or return the visualization URL :type render: Boolean :param skip_upload: Return node/edge/bindings that would have been uploaded. By default, upload happens. :type skip_upload: Boolean. **Example: Simple** :: import graphistry es = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]}) graphistry .bind(source='src', destination='dst') .edges(es) .plot() **Example: Shorthand** :: import graphistry es = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]}) graphistry .bind(source='src', destination='dst') .plot(es) """ if graph is None: if self._edges is None: util.error('Graph/edges must be specified.') g = self._edges else: g = graph n = self._nodes if nodes is None else nodes name = name or util.random_string(10) self._check_mandatory_bindings(not isinstance(n, type(None))) api_version = PyGraphistry.api_version() if (api_version == 1): dataset = self._plot_dispatch(g, n, name, 'json') if skip_upload: return dataset info = PyGraphistry._etl1(dataset) elif (api_version == 2): dataset = self._plot_dispatch(g, n, name, 'vgraph') if skip_upload: return dataset info = PyGraphistry._etl2(dataset) viz_url = PyGraphistry._viz_url(info, self._url_params) full_url = '%s:%s' % (PyGraphistry._config['protocol'], viz_url) if render == False or (render == None and not self._render): return full_url elif util.in_ipython(): from IPython.core.display import HTML return HTML(util.make_iframe(viz_url, self._height, PyGraphistry._config['protocol'])) else: import webbrowser webbrowser.open(full_url) return full_url
python
{ "resource": "" }
q28673
Plotter.pandas2igraph
train
def pandas2igraph(self, edges, directed=True): """Convert a pandas edge dataframe to an IGraph graph. Uses current bindings. Defaults to treating edges as directed. **Example** :: import graphistry g = graphistry.bind() es = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]}) g = g.bind(source='src', destination='dst') ig = g.pandas2igraph(es) ig.vs['community'] = ig.community_infomap().membership g.bind(point_color='community').plot(ig) """ import igraph self._check_mandatory_bindings(False) self._check_bound_attribs(edges, ['source', 'destination'], 'Edge') self._node = self._node or Plotter._defaultNodeId eattribs = edges.columns.values.tolist() eattribs.remove(self._source) eattribs.remove(self._destination) cols = [self._source, self._destination] + eattribs etuples = [tuple(x) for x in edges[cols].values] return igraph.Graph.TupleList(etuples, directed=directed, edge_attrs=eattribs, vertex_name_attr=self._node)
python
{ "resource": "" }
q28674
Plotter.igraph2pandas
train
def igraph2pandas(self, ig): """Under current bindings, transform an IGraph into a pandas edges dataframe and a nodes dataframe. **Example** :: import graphistry g = graphistry.bind() es = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]}) g = g.bind(source='src', destination='dst').edges(es) ig = g.pandas2igraph(es) ig.vs['community'] = ig.community_infomap().membership (es2, vs2) = g.igraph2pandas(ig) g.nodes(vs2).bind(point_color='community').plot() """ def get_edgelist(ig): idmap = dict(enumerate(ig.vs[self._node])) for e in ig.es: t = e.tuple yield dict({self._source: idmap[t[0]], self._destination: idmap[t[1]]}, **e.attributes()) self._check_mandatory_bindings(False) if self._node is None: ig.vs[Plotter._defaultNodeId] = [v.index for v in ig.vs] self._node = Plotter._defaultNodeId elif self._node not in ig.vs.attributes(): util.error('Vertex attribute "%s" bound to "node" does not exist.' % self._node) edata = get_edgelist(ig) ndata = [v.attributes() for v in ig.vs] nodes = pandas.DataFrame(ndata, columns=ig.vs.attributes()) cols = [self._source, self._destination] + ig.es.attributes() edges = pandas.DataFrame(edata, columns=cols) return (edges, nodes)
python
{ "resource": "" }
q28675
PyGraphistry.authenticate
train
def authenticate(): """Authenticate via already provided configuration. This is called once automatically per session when uploading and rendering a visualization.""" key = PyGraphistry.api_key() #Mocks may set to True, so bypass in that case if (key is None) and PyGraphistry._is_authenticated == False: util.error('API key not set explicitly in `register()` or available at ' + EnvVarNames['api_key']) if not PyGraphistry._is_authenticated: PyGraphistry._check_key_and_version() PyGraphistry._is_authenticated = True
python
{ "resource": "" }
q28676
PyGraphistry.api_key
train
def api_key(value=None): """Set or get the API key. Also set via environment variable GRAPHISTRY_API_KEY.""" if value is None: return PyGraphistry._config['api_key'] # setter if value is not PyGraphistry._config['api_key']: PyGraphistry._config['api_key'] = value.strip() PyGraphistry._is_authenticated = False
python
{ "resource": "" }
q28677
PyGraphistry.register
train
def register(key=None, server=None, protocol=None, api=None, certificate_validation=None, bolt=None): """API key registration and server selection Changing the key effects all derived Plotter instances. :param key: API key. :type key: String. :param server: URL of the visualization server. :type server: Optional string. :param protocol: Protocol used to contact visualization server :type protocol: Optional string. :returns: None. :rtype: None. **Example: Standard** :: import graphistry graphistry.register(key="my api key") **Example: Developer** :: import graphistry graphistry.register('my api key', server='staging', protocol='https') **Example: Through environment variable** :: export GRAPHISTRY_API_KEY = 'my api key' :: import graphistry graphistry.register() """ PyGraphistry.api_key(key) PyGraphistry.server(server) PyGraphistry.api_version(api) PyGraphistry.protocol(protocol) PyGraphistry.certificate_validation(certificate_validation) PyGraphistry.authenticate() PyGraphistry.set_bolt_driver(bolt)
python
{ "resource": "" }
q28678
PyGraphistry.hypergraph
train
def hypergraph(raw_events, entity_types=None, opts={}, drop_na=True, drop_edge_attrs=False, verbose=True, direct=False): """Transform a dataframe into a hypergraph. :param Dataframe raw_events: Dataframe to transform :param List entity_types: Optional list of columns (strings) to turn into nodes, None signifies all :param Dict opts: See below :param bool drop_edge_attrs: Whether to include each row's attributes on its edges, defaults to False (include) :param bool verbose: Whether to print size information :param bool direct: Omit hypernode and instead strongly connect nodes in an event Create a graph out of the dataframe, and return the graph components as dataframes, and the renderable result Plotter. It reveals relationships between the rows and between column values. This transform is useful for lists of events, samples, relationships, and other structured high-dimensional data. The transform creates a node for every row, and turns a row's column entries into node attributes. If direct=False (default), every unique value within a column is also turned into a node. Edges are added to connect a row's nodes to each of its column nodes, or if direct=True, to one another. Nodes are given the attribute 'type' corresponding to the originating column name, or in the case of a row, 'EventID'. Consider a list of events. Each row represents a distinct event, and each column some metadata about an event. If multiple events have common metadata, they will be transitively connected through those metadata values. The layout algorithm will try to cluster the events together. Conversely, if an event has unique metadata, the unique metadata will turn into nodes that only have connections to the event node, and the clustering algorithm will cause them to form a ring around the event node. Best practice is to set EVENTID to a row's unique ID, SKIP to all non-categorical columns (or entity_types to all categorical columns), and CATEGORY to group columns with the same kinds of values. The optional ``opts={...}`` configuration options are: * 'EVENTID': Column name to inspect for a row ID. By default, uses the row index. * 'CATEGORIES': Dictionary mapping a category name to inhabiting columns. E.g., {'IP': ['srcAddress', 'dstAddress']}. If the same IP appears in both columns, this makes the transform generate one node for it, instead of one for each column. * 'DELIM': When creating node IDs, defines the separator used between the column name and node value * 'SKIP': List of column names to not turn into nodes. For example, dates and numbers are often skipped. * 'EDGES': For direct=True, instead of making all edges, pick column pairs. E.g., {'a': ['b', 'd'], 'd': ['d']} creates edges between columns a->b and a->d, and self-edges d->d. :returns: {'entities': DF, 'events': DF, 'edges': DF, 'nodes': DF, 'graph': Plotter} :rtype: Dictionary **Example** :: import graphistry h = graphistry.hypergraph(my_df) g = h['graph'].plot() """ from . import hyper return hyper.Hypergraph().hypergraph(PyGraphistry, raw_events, entity_types, opts, drop_na, drop_edge_attrs, verbose, direct)
python
{ "resource": "" }
q28679
PyGraphistry.bind
train
def bind(node=None, source=None, destination=None, edge_title=None, edge_label=None, edge_color=None, edge_weight=None, point_title=None, point_label=None, point_color=None, point_size=None): """Create a base plotter. Typically called at start of a program. For parameters, see ``plotter.bind()`` . :returns: Plotter. :rtype: Plotter. **Example** :: import graphistry g = graphistry.bind() """ from . import plotter return plotter.Plotter().bind(source, destination, node, \ edge_title, edge_label, edge_color, edge_weight, \ point_title, point_label, point_color, point_size)
python
{ "resource": "" }
q28680
UserAgentRequestHandler.do_HEAD
train
def do_HEAD(self): """Serve a HEAD request.""" self.queue.put(self.headers.get("User-Agent")) self.send_response(six.moves.BaseHTTPServer.HTTPStatus.OK) self.send_header("Location", self.path) self.end_headers()
python
{ "resource": "" }
q28681
ProfileLooter.pages
train
def pages(self): # type: () -> ProfileIterator """Obtain an iterator over Instagram post pages. Returns: PageIterator: an iterator over the instagram post pages. Raises: ValueError: when the requested user does not exist. RuntimeError: when the user is a private account and there is no logged user (or the logged user does not follow that account). """ if self._owner_id is None: it = ProfileIterator.from_username(self._username, self.session) self._owner_id = it.owner_id return it return ProfileIterator(self._owner_id, self.session, self.rhx)
python
{ "resource": "" }
q28682
PostLooter.medias
train
def medias(self, timeframe=None): """Return a generator that yields only the refered post. Yields: dict: a media dictionary obtained from the given post. Raises: StopIteration: if the post does not fit the timeframe. """ info = self.info if timeframe is not None: start, end = TimedMediasIterator.get_times(timeframe) timestamp = info.get("taken_at_timestamp") or info["media"] if not (start >= timestamp >= end): raise StopIteration yield info
python
{ "resource": "" }
q28683
PostLooter.download
train
def download(self, destination, # type: Union[str, fs.base.FS] condition=None, # type: Optional[Callable[[dict], bool]] media_count=None, # type: Optional[int] timeframe=None, # type: Optional[_Timeframe] new_only=False, # type: bool pgpbar_cls=None, # type: Optional[Type[ProgressBar]] dlpbar_cls=None, # type: Optional[Type[ProgressBar]] ): # type: (...) -> int """Download the refered post to the destination. See `InstaLooter.download` for argument reference. Note: This function, opposed to other *looter* implementations, will not spawn new threads, but simply use the main thread to download the files. Since a worker is in charge of downloading a *media* at a time (and not a *file*), there would be no point in spawning more. """ destination, close_destination = self._init_destfs(destination) queue = Queue() # type: Queue[Dict] medias_queued = self._fill_media_queue( queue, destination, iter(self.medias()), media_count, new_only, condition) queue.put(None) worker = InstaDownloader( queue=queue, destination=destination, namegen=self.namegen, add_metadata=self.add_metadata, dump_json=self.dump_json, dump_only=self.dump_only, pbar=None, session=self.session) worker.run() return medias_queued
python
{ "resource": "" }
q28684
warn_logging
train
def warn_logging(logger): # type: (logging.Logger) -> Callable """Create a `showwarning` function that uses the given logger. Arguments: logger (~logging.Logger): the logger to use. Returns: function: a function that can be used as the `warnings.showwarning` callback. """ def showwarning(message, category, filename, lineno, file=None, line=None): logger.warning(message) return showwarning
python
{ "resource": "" }
q28685
wrap_warnings
train
def wrap_warnings(logger): """Have the function patch `warnings.showwarning` with the given logger. Arguments: logger (~logging.logger): the logger to wrap warnings with when the decorated function is called. Returns: `function`: a decorator function. """ def decorator(func): @functools.wraps(func) def new_func(*args, **kwargs): showwarning = warnings.showwarning warnings.showwarning = warn_logging(logger) try: return func(*args, **kwargs) finally: warnings.showwarning = showwarning return new_func return decorator
python
{ "resource": "" }
q28686
date_from_isoformat
train
def date_from_isoformat(isoformat_date): """Convert an ISO-8601 date into a `datetime.date` object. Argument: isoformat_date (str): a date in ISO-8601 format (YYYY-MM-DD) Returns: ~datetime.date: the object corresponding to the given ISO date. Raises: ValueError: when the date could not be converted successfully. See Also: `ISO-8601 specification <https://en.wikipedia.org/wiki/ISO_8601>`_. """ year, month, day = isoformat_date.split('-') return datetime.date(int(year), int(month), int(day))
python
{ "resource": "" }
q28687
get_times_from_cli
train
def get_times_from_cli(cli_token): """Convert a CLI token to a datetime tuple. Argument: cli_token (str): an isoformat datetime token ([ISO date]:[ISO date]) or a special value among: * thisday * thisweek * thismonth * thisyear Returns: tuple: a datetime.date objects couple, where the first item is the start of a time frame and the second item the end of the time frame. Both elements can also be None, if no date was provided. Raises: ValueError: when the CLI token is not in the right format (no colon in the token, not one of the special values, dates are not in proper ISO-8601 format.) See Also: `ISO-8601 specification <https://en.wikipedia.org/wiki/ISO_8601>`_. """ today = datetime.date.today() if cli_token=="thisday": return today, today elif cli_token=="thisweek": return today, today - dateutil.relativedelta.relativedelta(days=7) elif cli_token=="thismonth": return today, today - dateutil.relativedelta.relativedelta(months=1) elif cli_token=="thisyear": return today, today - dateutil.relativedelta.relativedelta(years=1) else: try: start_date, stop_date = cli_token.split(':') except ValueError: raise ValueError("--time parameter must contain a colon (:)") if not start_date and not stop_date: # ':', no start date, no stop date return None, None try: start_date = date_from_isoformat(start_date) if start_date else None stop_date = date_from_isoformat(stop_date) if stop_date else None except ValueError: raise ValueError("--time parameter was not provided ISO formatted dates") return start_date, stop_date
python
{ "resource": "" }
q28688
BatchRunner.run_all
train
def run_all(self): # type: () -> None """Run all the jobs specified in the configuration file. """ logger.debug("Creating batch session") session = Session() for section_id in self.parser.sections(): self.run_job(section_id, session=session)
python
{ "resource": "" }
q28689
BatchRunner.run_job
train
def run_job(self, section_id, session=None): # type: (Text, Optional[Session]) -> None """Run a job as described in the section named ``section_id``. Raises: KeyError: when the section could not be found. """ if not self.parser.has_section(section_id): raise KeyError('section not found: {}'.format(section_id)) session = session or Session() for name, looter_cls in six.iteritems(self._CLS_MAP): targets = self.get_targets(self._get(section_id, name)) quiet = self._getboolean( section_id, "quiet", self.args.get("--quiet", False)) if targets: logger.info("Launching {} job for section {}".format(name, section_id)) for target, directory in six.iteritems(targets): try: logger.info("Downloading {} to {}".format(target, directory)) looter = looter_cls( target, add_metadata=self._getboolean(section_id, 'add-metadata', False), get_videos=self._getboolean(section_id, 'get-videos', False), videos_only=self._getboolean(section_id, 'videos-only', False), jobs=self._getint(section_id, 'jobs', 16), template=self._get(section_id, 'template', '{id}'), dump_json=self._getboolean(section_id, 'dump-json', False), dump_only=self._getboolean(section_id, 'dump-only', False), extended_dump=self._getboolean(section_id, 'extended-dump', False), session=session) if self.parser.has_option(section_id, 'username'): looter.logout() username = self._get(section_id, 'username') password = self._get(section_id, 'password') or \ getpass.getpass('Password for "{}": '.format(username)) looter.login(username, password) n = looter.download( directory, media_count=self._getint(section_id, 'num-to-dl'), # FIXME: timeframe=self._get(section_id, 'timeframe'), new_only=self._getboolean(section_id, 'new', False), pgpbar_cls=None if quiet else TqdmProgressBar, dlpbar_cls=None if quiet else TqdmProgressBar) logger.success("Downloaded %i medias !", n) except Exception as exception: logger.error(six.text_type(exception))
python
{ "resource": "" }
q28690
html_page_context
train
def html_page_context(app, pagename, templatename, context, doctree): """Event handler for the html-page-context signal. Modifies the context directly. - Replaces the 'toc' value created by the HTML builder with one that shows all document titles and the local table of contents. - Sets display_toc to True so the table of contents is always displayed, even on empty pages. - Replaces the 'toctree' function with one that uses the entire document structure, ignores the maxdepth argument, and uses only prune and collapse. """ rendered_toc = get_rendered_toctree(app.builder, pagename) context['toc'] = rendered_toc context['display_toc'] = True # force toctree to display if "toctree" not in context: # json builder doesn't use toctree func, so nothing to replace return def make_toctree(collapse=True): return get_rendered_toctree(app.builder, pagename, prune=False, collapse=collapse, ) context['toctree'] = make_toctree
python
{ "resource": "" }
q28691
get_rendered_toctree
train
def get_rendered_toctree(builder, docname, prune=False, collapse=True): """Build the toctree relative to the named document, with the given parameters, and then return the rendered HTML fragment. """ fulltoc = build_full_toctree(builder, docname, prune=prune, collapse=collapse, ) rendered_toc = builder.render_partial(fulltoc)['fragment'] return rendered_toc
python
{ "resource": "" }
q28692
build_full_toctree
train
def build_full_toctree(builder, docname, prune, collapse): """Return a single toctree starting from docname containing all sub-document doctrees. """ env = builder.env doctree = env.get_doctree(env.config.master_doc) toctrees = [] for toctreenode in doctree.traverse(addnodes.toctree): toctree = env.resolve_toctree(docname, builder, toctreenode, collapse=collapse, prune=prune, ) toctrees.append(toctree) if not toctrees: return None result = toctrees[0] for toctree in toctrees[1:]: if toctree: result.extend(toctree.children) env.resolve_references(result, docname, builder) return result
python
{ "resource": "" }
q28693
awsRetry
train
def awsRetry(f): """ This decorator retries the wrapped function if aws throws unexpected errors errors. It should wrap any function that makes use of boto """ @wraps(f) def wrapper(*args, **kwargs): for attempt in retry(delays=truncExpBackoff(), timeout=300, predicate=awsRetryPredicate): with attempt: return f(*args, **kwargs) return wrapper
python
{ "resource": "" }
q28694
AWSProvisioner._readClusterSettings
train
def _readClusterSettings(self): """ Reads the cluster settings from the instance metadata, which assumes the instance is the leader. """ instanceMetaData = get_instance_metadata() region = zoneToRegion(self._zone) conn = boto.ec2.connect_to_region(region) instance = conn.get_all_instances(instance_ids=[instanceMetaData["instance-id"]])[0].instances[0] self.clusterName = str(instance.tags["Name"]) self._buildContext() self._subnetID = instance.subnet_id self._leaderPrivateIP = instanceMetaData['local-ipv4'] # this is PRIVATE IP self._keyName = list(instanceMetaData['public-keys'].keys())[0] self._tags = self.getLeader().tags self._masterPublicKey = self._setSSH()
python
{ "resource": "" }
q28695
AWSProvisioner.destroyCluster
train
def destroyCluster(self): """ Terminate instances and delete the profile and security group. """ assert self._ctx def expectedShutdownErrors(e): return e.status == 400 and 'dependent object' in e.body instances = self._getNodesInCluster(nodeType=None, both=True) spotIDs = self._getSpotRequestIDs() if spotIDs: self._ctx.ec2.cancel_spot_instance_requests(request_ids=spotIDs) instancesToTerminate = awsFilterImpairedNodes(instances, self._ctx.ec2) vpcId = None if instancesToTerminate: vpcId = instancesToTerminate[0].vpc_id self._deleteIAMProfiles(instances=instancesToTerminate) self._terminateInstances(instances=instancesToTerminate) if len(instances) == len(instancesToTerminate): logger.debug('Deleting security group...') removed = False for attempt in retry(timeout=300, predicate=expectedShutdownErrors): with attempt: for sg in self._ctx.ec2.get_all_security_groups(): if sg.name == self.clusterName and vpcId and sg.vpc_id == vpcId: try: self._ctx.ec2.delete_security_group(group_id=sg.id) removed = True except BotoServerError as e: if e.error_code == 'InvalidGroup.NotFound': pass else: raise if removed: logger.debug('... Succesfully deleted security group') else: assert len(instances) > len(instancesToTerminate) # the security group can't be deleted until all nodes are terminated logger.warning('The TOIL_AWS_NODE_DEBUG environment variable is set and some nodes ' 'have failed health checks. As a result, the security group & IAM ' 'roles will not be deleted.')
python
{ "resource": "" }
q28696
AWSProvisioner._waitForIP
train
def _waitForIP(cls, instance): """ Wait until the instances has a public IP address assigned to it. :type instance: boto.ec2.instance.Instance """ logger.debug('Waiting for ip...') while True: time.sleep(a_short_time) instance.update() if instance.ip_address or instance.public_dns_name or instance.private_ip_address: logger.debug('...got ip') break
python
{ "resource": "" }
q28697
wait_instances_running
train
def wait_instances_running(ec2, instances): """ Wait until no instance in the given iterable is 'pending'. Yield every instance that entered the running state as soon as it does. :param boto.ec2.connection.EC2Connection ec2: the EC2 connection to use for making requests :param Iterator[Instance] instances: the instances to wait on :rtype: Iterator[Instance] """ running_ids = set() other_ids = set() while True: pending_ids = set() for i in instances: if i.state == 'pending': pending_ids.add(i.id) elif i.state == 'running': assert i.id not in running_ids running_ids.add(i.id) yield i else: assert i.id not in other_ids other_ids.add(i.id) yield i log.info('%i instance(s) pending, %i running, %i other.', *map(len, (pending_ids, running_ids, other_ids))) if not pending_ids: break seconds = max(a_short_time, min(len(pending_ids), 10 * a_short_time)) log.info('Sleeping for %is', seconds) time.sleep(seconds) for attempt in retry_ec2(): with attempt: instances = ec2.get_only_instances(list(pending_ids))
python
{ "resource": "" }
q28698
wait_spot_requests_active
train
def wait_spot_requests_active(ec2, requests, timeout=None, tentative=False): """ Wait until no spot request in the given iterator is in the 'open' state or, optionally, a timeout occurs. Yield spot requests as soon as they leave the 'open' state. :param Iterator[SpotInstanceRequest] requests: :param float timeout: Maximum time in seconds to spend waiting or None to wait forever. If a timeout occurs, the remaining open requests will be cancelled. :param bool tentative: if True, give up on a spot request at the earliest indication of it not being fulfilled immediately :rtype: Iterator[list[SpotInstanceRequest]] """ if timeout is not None: timeout = time.time() + timeout active_ids = set() other_ids = set() open_ids = None def cancel(): log.warn('Cancelling remaining %i spot requests.', len(open_ids)) ec2.cancel_spot_instance_requests(list(open_ids)) def spot_request_not_found(e): error_code = 'InvalidSpotInstanceRequestID.NotFound' return isinstance(e, EC2ResponseError) and e.error_code == error_code try: while True: open_ids, eval_ids, fulfill_ids = set(), set(), set() batch = [] for r in requests: if r.state == 'open': open_ids.add(r.id) if r.status.code == 'pending-evaluation': eval_ids.add(r.id) elif r.status.code == 'pending-fulfillment': fulfill_ids.add(r.id) else: log.info( 'Request %s entered status %s indicating that it will not be ' 'fulfilled anytime soon.', r.id, r.status.code) elif r.state == 'active': assert r.id not in active_ids active_ids.add(r.id) batch.append(r) else: assert r.id not in other_ids other_ids.add(r.id) batch.append(r) if batch: yield batch log.info('%i spot requests(s) are open (%i of which are pending evaluation and %i ' 'are pending fulfillment), %i are active and %i are in another state.', *map(len, (open_ids, eval_ids, fulfill_ids, active_ids, other_ids))) if not open_ids or tentative and not eval_ids and not fulfill_ids: break sleep_time = 2 * a_short_time if timeout is not None and time.time() + sleep_time >= timeout: log.warn('Timed out waiting for spot requests.') break log.info('Sleeping for %is', sleep_time) time.sleep(sleep_time) for attempt in retry_ec2(retry_while=spot_request_not_found): with attempt: requests = ec2.get_all_spot_instance_requests( list(open_ids)) except BaseException: if open_ids: with panic(log): cancel() raise else: if open_ids: cancel()
python
{ "resource": "" }
q28699
create_ondemand_instances
train
def create_ondemand_instances(ec2, image_id, spec, num_instances=1): """ Requests the RunInstances EC2 API call but accounts for the race between recently created instance profiles, IAM roles and an instance creation that refers to them. :rtype: list[Instance] """ instance_type = spec['instance_type'] log.info('Creating %s instance(s) ... ', instance_type) for attempt in retry_ec2(retry_for=a_long_time, retry_while=inconsistencies_detected): with attempt: return ec2.run_instances(image_id, min_count=num_instances, max_count=num_instances, **spec).instances
python
{ "resource": "" }