desc stringlengths 3 26.7k | decl stringlengths 11 7.89k | bodies stringlengths 8 553k |
|---|---|---|
'When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss.'
| def rebuild_auth(self, prepared_request, response):
| headers = prepared_request.headers
url = prepared_request.url
if ('Authorization' in headers):
original_parsed = urlparse(response.request.url)
redirect_parsed = urlparse(url)
if (original_parsed.hostname != redirect_parsed.hostname):
del headers['Authorization']
new_... |
'This method re-evaluates the proxy configuration by considering the
environment variables. If we are redirected to a URL covered by
NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
proxy keys for this URL (in case they were stripped by a previous
redirect).
This method also replaces the Proxy-Auth... | def rebuild_proxies(self, prepared_request, proxies):
| headers = prepared_request.headers
url = prepared_request.url
scheme = urlparse(url).scheme
new_proxies = (proxies.copy() if (proxies is not None) else {})
if (self.trust_env and (not should_bypass_proxies(url))):
environ_proxies = get_environ_proxies(url)
proxy = environ_proxies.get... |
'Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session\'s settings.'
| def prepare_request(self, request):
| cookies = (request.cookies or {})
if (not isinstance(cookies, cookielib.CookieJar)):
cookies = cookiejar_from_dict(cookies)
merged_cookies = merge_cookies(merge_cookies(RequestsCookieJar(), self.cookies), cookies)
auth = request.auth
if (self.trust_env and (not auth) and (not self.auth)):
... |
'Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Re... | def request(self, method, url, params=None, data=None, headers=None, cookies=None, files=None, auth=None, timeout=None, allow_redirects=True, proxies=None, hooks=None, stream=None, verify=None, cert=None, json=None):
| req = Request(method=method.upper(), url=url, headers=headers, files=files, data=(data or {}), json=json, params=(params or {}), auth=auth, cookies=cookies, hooks=hooks)
prep = self.prepare_request(req)
proxies = (proxies or {})
settings = self.merge_environment_settings(prep.url, proxies, stream, verif... |
'Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.'
| def get(self, url, **kwargs):
| kwargs.setdefault('allow_redirects', True)
return self.request('GET', url, **kwargs)
|
'Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.'
| def options(self, url, **kwargs):
| kwargs.setdefault('allow_redirects', True)
return self.request('OPTIONS', url, **kwargs)
|
'Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.'
| def head(self, url, **kwargs):
| kwargs.setdefault('allow_redirects', False)
return self.request('HEAD', url, **kwargs)
|
'Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional ... | def post(self, url, data=None, json=None, **kwargs):
| return self.request('POST', url, data=data, json=json, **kwargs)
|
'Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.'
| def put(self, url, data=None, **kwargs):
| return self.request('PUT', url, data=data, **kwargs)
|
'Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.'
| def patch(self, url, data=None, **kwargs):
| return self.request('PATCH', url, data=data, **kwargs)
|
'Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.'
| def delete(self, url, **kwargs):
| return self.request('DELETE', url, **kwargs)
|
'Send a given PreparedRequest.'
| def send(self, request, **kwargs):
| kwargs.setdefault('stream', self.stream)
kwargs.setdefault('verify', self.verify)
kwargs.setdefault('cert', self.cert)
kwargs.setdefault('proxies', self.proxies)
if (not isinstance(request, PreparedRequest)):
raise ValueError('You can only send PreparedRequests.')
checked_url... |
'Check the environment and merge it with some settings.'
| def merge_environment_settings(self, url, proxies, stream, verify, cert):
| if self.trust_env:
env_proxies = (get_environ_proxies(url) or {})
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
if ((verify is True) or (verify is None)):
verify = (os.environ.get('REQUESTS_CA_BUNDLE') or os.environ.get('CURL_CA_BUNDLE'))
proxies = m... |
'Returns the appropriate connection adapter for the given URL.'
| def get_adapter(self, url):
| for (prefix, adapter) in self.adapters.items():
if url.lower().startswith(prefix):
return adapter
raise InvalidSchema(("No connection adapters were found for '%s'" % url))
|
'Closes all adapters and as such the session'
| def close(self):
| for v in self.adapters.values():
v.close()
|
'Registers a connection adapter to a prefix.
Adapters are sorted in descending order by key length.'
| def mount(self, prefix, adapter):
| self.adapters[prefix] = adapter
keys_to_move = [k for k in self.adapters if (len(k) < len(prefix))]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
|
'Like iteritems(), but with all lowercase keys.'
| def lower_items(self):
| return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items())
|
'Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in th... | def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
| self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, block=block, strict=True, **pool_kwargs)
|
'Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to con... | def proxy_manager_for(self, proxy, **proxy_kwargs):
| if (not (proxy in self.proxy_manager)):
proxy_headers = self.proxy_headers(proxy)
self.proxy_manager[proxy] = proxy_from_url(proxy, proxy_headers=proxy_headers, num_pools=self._pool_connections, maxsize=self._pool_maxsize, block=self._pool_block, **proxy_kwargs)
return self.proxy_manager[proxy]
|
'Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually... | def cert_verify(self, conn, url, verify, cert):
| if (url.lower().startswith('https') and verify):
cert_loc = None
if (verify is not True):
cert_loc = verify
if (not cert_loc):
cert_loc = DEFAULT_CA_BUNDLE_PATH
if (not cert_loc):
raise Exception('Could not find a suitable SSL CA ... |
'Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param... | def build_response(self, req, resp):
| response = Response()
response.status_code = getattr(resp, 'status', None)
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, byt... |
'Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.'
| def get_connection(self, url, proxies=None):
| proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.... |
'Disposes of any internal state.
Currently, this just closes the PoolManager, which closes pooled
connections.'
| def close(self):
| self.poolmanager.clear()
|
'Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapte... | def request_url(self, request, proxies):
| proxy = select_proxy(request.url, proxies)
scheme = urlparse(request.url).scheme
if (proxy and (scheme != 'https')):
url = urldefragauth(request.url)
else:
url = request.path_url
return url
|
'Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapter... | def add_headers(self, request, **kwargs):
| pass
|
'Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:cla... | def proxy_headers(self, proxy):
| headers = {}
(username, password) = get_auth_from_url(proxy)
if (username and password):
headers['Proxy-Authorization'] = _basic_auth_str(username, password)
return headers
|
'Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect ti... | def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
| conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = (not ((request.body is None) or ('Content-Length' in request.headers)))
if isinstance(timeout, tuple):
try:
... |
'Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)'
| def is_package(self, fullname):
| return hasattr(self.__get_module(fullname), '__path__')
|
'Return None
Required, if is_package is implemented'
| def get_code(self, fullname):
| self.__get_module(fullname)
return None
|
'Helper for clearing all the keys in a database. Use with
caution!'
| def clear(self):
| for key in self.conn.keys():
self.conn.delete(key)
|
'Verify our vary headers match and construct a real urllib3
HTTPResponse object.'
| def prepare_response(self, request, cached):
| if ('*' in cached.get('vary', {})):
return
for (header, value) in cached.get('vary', {}).items():
if (request.headers.get(header, None) != value):
return
body_raw = cached['response'].pop('body')
try:
body = io.BytesIO(body_raw)
except TypeError:
body = io... |
'Return a valid 1xx warning header value describing the cache
adjustments.
The response is provided too allow warnings like 113
http://tools.ietf.org/html/rfc7234#section-5.5.4 where we need
to explicitly say response is over 24 hours old.'
| def warning(self, response):
| return '110 - "Response is Stale"'
|
'Update the response headers with any new headers.
NOTE: This SHOULD always include some Warning header to
signify that the response was cached by the client, not
by way of the provided headers.'
| def update_headers(self, response):
| return {}
|
'Send a request. Use the request information to see if it
exists in the cache and cache the response if we need to and can.'
| def send(self, request, **kw):
| if (request.method == 'GET'):
cached_response = self.controller.cached_request(request)
if cached_response:
return self.build_response(request, cached_response, from_cache=True)
request.headers.update(self.controller.conditional_headers(request))
resp = super(CacheControlAdap... |
'Build a response by making a request or using the cache.
This will end up calling send and returning a potentially
cached response'
| def build_response(self, request, response, from_cache=False):
| if ((not from_cache) and (request.method == 'GET')):
if (response.status == 304):
cached_response = self.controller.update_cached_response(request, response)
if (cached_response is not response):
from_cache = True
response.read(decode_content=False)
... |
'Normalize the URL to create a safe key for the cache'
| @classmethod
def _urlnorm(cls, uri):
| (scheme, authority, path, query, fragment) = parse_uri(uri)
if ((not scheme) or (not authority)):
raise Exception(('Only absolute URIs are allowed. uri = %s' % uri))
scheme = scheme.lower()
authority = authority.lower()
if (not path):
path = '/'
request_uri =... |
'Parse the cache control headers returning a dictionary with values
for the different directives.'
| def parse_cache_control(self, headers):
| retval = {}
cc_header = 'cache-control'
if ('Cache-Control' in headers):
cc_header = 'Cache-Control'
if (cc_header in headers):
parts = headers[cc_header].split(',')
parts_with_args = [tuple([x.strip().lower() for x in part.split('=', 1)]) for part in parts if ((-1) != part.find(... |
'Return a cached response if it exists in the cache, otherwise
return False.'
| def cached_request(self, request):
| cache_url = self.cache_url(request.url)
logger.debug('Looking up "%s" in the cache', cache_url)
cc = self.parse_cache_control(request.headers)
if ('no-cache' in cc):
logger.debug('Request header has "no-cache", cache bypassed')
return False
if (('max-age... |
'Algorithm for caching requests.
This assumes a requests Response object.'
| def cache_response(self, request, response, body=None):
| cacheable_status_codes = [200, 203, 300, 301]
if (response.status not in cacheable_status_codes):
logger.debug('Status code %s not in %s', response.status, cacheable_status_codes)
return
response_headers = CaseInsensitiveDict(response.headers)
if ((body is not None) and ('... |
'On a 304 we will get a new set of headers that we want to
update our cached value with, assuming we have one.
This should only ever be called when we\'ve sent an ETag and
gotten a 304 as the response.'
| def update_cached_response(self, request, response):
| cache_url = self.cache_url(request.url)
cached_response = self.serializer.loads(request, self.cache.get(cache_url))
if (not cached_response):
return response
excluded_headers = ['content-length']
cached_response.headers.update(dict(((k, v) for (k, v) in response.headers.items() if (k.lower()... |
'This is where the magic happens.
We do our usually processing through the states and when we have a token
to return we yield the token which pauses processing until the next token
is requested.'
| def __iter__(self):
| self.tokenQueue = deque([])
while self.state():
while self.stream.errors:
(yield {u'type': tokenTypes[u'ParseError'], u'data': self.stream.errors.pop(0)})
while self.tokenQueue:
(yield self.tokenQueue.popleft())
|
'This function returns either U+FFFD or the character based on the
decimal or hexadecimal representation. It also discards ";" if present.
If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.'
| def consumeNumberEntity(self, isHex):
| allowed = digits
radix = 10
if isHex:
allowed = hexDigits
radix = 16
charStack = []
c = self.stream.char()
while ((c in allowed) and (c is not EOF)):
charStack.append(c)
c = self.stream.char()
charAsInt = int(u''.join(charStack), radix)
if (charAsInt in re... |
'This method replaces the need for "entityInAttributeValueState".'
| def processEntityInAttribute(self, allowedChar):
| self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
|
'This method is a generic handler for emitting the tags. It also sets
the state to "data" because that\'s what\'s needed after a token has been
emitted.'
| def emitCurrentToken(self):
| token = self.currentToken
if (token[u'type'] in tagTokenTypes):
if self.lowercaseElementName:
token[u'name'] = token[u'name'].translate(asciiUpper2Lower)
if (token[u'type'] == tokenTypes[u'EndTag']):
if token[u'data']:
self.tokenQueue.append({u'type': toke... |
'Initialize HTMLSerializer.
Keyword options (default given first unless specified) include:
inject_meta_charset=True|False
Whether it insert a meta element to define the character set of the
document.
quote_attr_values=True|False
Whether to quote attribute values that don\'t require quoting
per HTML5 parsing rules.
quo... | def __init__(self, **kwargs):
| if (u'quote_char' in kwargs):
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
|
'Node representing an item in the tree.
name - The tag name associated with the node
parent - The parent of the current node (or None for the document node)
value - The value of the current node (applies to text nodes and
comments
attributes - a dict holding name, value pairs for attributes of the node
childNodes - a l... | def __init__(self, name):
| self.name = name
self.parent = None
self.value = None
self.attributes = {}
self.childNodes = []
self._flags = []
|
'Insert node as a child of the current node'
| def appendChild(self, node):
| raise NotImplementedError
|
'Insert data as text in the current node, positioned before the
start of node insertBefore or to the end of the node\'s text.'
| def insertText(self, data, insertBefore=None):
| raise NotImplementedError
|
'Insert node as a child of the current node, before refNode in the
list of child nodes. Raises ValueError if refNode is not a child of
the current node'
| def insertBefore(self, node, refNode):
| raise NotImplementedError
|
'Remove node from the children of the current node'
| def removeChild(self, node):
| raise NotImplementedError
|
'Move all the children of the current node to newParent.
This is needed so that trees that don\'t store text as nodes move the
text in the correct way'
| def reparentChildren(self, newParent):
| for child in self.childNodes:
newParent.appendChild(child)
self.childNodes = []
|
'Return a shallow copy of the current node i.e. a node with the same
name and attributes but with no parent or child nodes'
| def cloneNode(self):
| raise NotImplementedError
|
'Return true if the node has children or text, false otherwise'
| def hasContent(self):
| raise NotImplementedError
|
'Check if an element exists between the end of the active
formatting elements and the last marker. If it does, return it, else
return false'
| def elementInActiveFormattingElements(self, name):
| for item in self.activeFormattingElements[::(-1)]:
if (item == Marker):
break
elif (item.name == name):
return item
return False
|
'Create an element but don\'t insert it anywhere'
| def createElement(self, token):
| name = token[u'name']
namespace = token.get(u'namespace', self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token[u'data']
return element
|
'Switch the function used to insert an element from the
normal one to the misnested table one and back again'
| def _setInsertFromTable(self, value):
| self._insertFromTable = value
if value:
self.insertElement = self.insertElementTable
else:
self.insertElement = self.insertElementNormal
|
'Create an element and insert it into the tree'
| def insertElementTable(self, token):
| element = self.createElement(token)
if (self.openElements[(-1)].name not in tableInsertModeElements):
return self.insertElementNormal(token)
else:
(parent, insertBefore) = self.getTableMisnestedNodePosition()
if (insertBefore is None):
parent.appendChild(element)
... |
'Insert text data.'
| def insertText(self, data, parent=None):
| if (parent is None):
parent = self.openElements[(-1)]
if ((not self.insertFromTable) or (self.insertFromTable and (self.openElements[(-1)].name not in tableInsertModeElements))):
parent.insertText(data)
else:
(parent, insertBefore) = self.getTableMisnestedNodePosition()
paren... |
'Get the foster parent element, and sibling to insert before
(or None) when inserting a misnested table node'
| def getTableMisnestedNodePosition(self):
| lastTable = None
fosterParent = None
insertBefore = None
for elm in self.openElements[::(-1)]:
if (elm.name == u'table'):
lastTable = elm
break
if lastTable:
if lastTable.parent:
fosterParent = lastTable.parent
insertBefore = lastTable
... |
'Return the final tree'
| def getDocument(self):
| return self.document
|
'Return the final fragment'
| def getFragment(self):
| fragment = self.fragmentClass()
self.openElements[0].reparentChildren(fragment)
return fragment
|
'Serialize the subtree of node in the format required by unit tests
node - the node from which to start serializing'
| def testSerializer(self, node):
| raise NotImplementedError
|
'Create the document root'
| def insertRoot(self, token):
| docStr = u''
if self.doctype:
assert self.doctype.name
docStr += (u'<!DOCTYPE %s' % self.doctype.name)
if ((self.doctype.publicId is not None) or (self.doctype.systemId is not None)):
docStr += (u' PUBLIC "%s" ' % self.infosetFilter.coercePubid((self.doctype.publi... |
'strict - raise an exception when a parse error is encountered
tree - a treebuilder class controlling the type of tree that will be
returned. Built in treebuilders can be accessed through
html5lib.treebuilders.getTreeBuilder(treeType)
tokenizer - a class that provides a stream of tokens to the treebuilder.
This may be ... | def __init__(self, tree=None, tokenizer=tokenizer.HTMLTokenizer, strict=False, namespaceHTMLElements=True, debug=False):
| self.strict = strict
if (tree is None):
tree = treebuilders.getTreeBuilder(u'etree')
self.tree = tree(namespaceHTMLElements)
self.tokenizer_class = tokenizer
self.errors = []
self.phases = dict([(name, cls(self, self.tree)) for (name, cls) in getPhases(debug).items()])
|
'The name of the character encoding
that was used to decode the input stream,
or :obj:`None` if that is not determined yet.'
| @property
def documentEncoding(self):
| if (not hasattr(self, u'tokenizer')):
return None
return self.tokenizer.stream.charEncoding[0]
|
'Parse a HTML document into a well-formed tree
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)'
| def parse(self, stream, encoding=None, parseMeta=True, useChardet=True):
| self._parse(stream, innerHTML=False, encoding=encoding, parseMeta=parseMeta, useChardet=useChardet)
return self.tree.getDocument()
|
'Parse a HTML fragment into a well-formed tree fragment
container - name of the element we\'re setting the innerHTML property
if set to None, default to \'div\'
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specifi... | def parseFragment(self, stream, container=u'div', encoding=None, parseMeta=False, useChardet=True):
| self._parse(stream, True, container=container, encoding=encoding)
return self.tree.getFragment()
|
'HTML5 specific normalizations to the token stream'
| def normalizeToken(self, token):
| if (token[u'type'] == tokenTypes[u'StartTag']):
token[u'data'] = dict(token[u'data'][::(-1)])
return token
|
'Generic RCDATA/RAWTEXT Parsing algorithm
contentType - RCDATA or RAWTEXT'
| def parseRCDataRawtext(self, token, contentType):
| assert (contentType in (u'RAWTEXT', u'RCDATA'))
self.tree.insertElement(token)
if (contentType == u'RAWTEXT'):
self.tokenizer.state = self.tokenizer.rawtextState
else:
self.tokenizer.state = self.tokenizer.rcdataState
self.originalPhase = self.phase
self.phase = self.phases[u'tex... |
'Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless ... | def __init__(self, source):
| if (not utils.supports_lone_surrogates):
self.reportCharacterErrors = None
self.replaceCharactersRegexp = None
elif (len(u'\U0010ffff') == 1):
self.reportCharacterErrors = self.characterErrorsUCS4
self.replaceCharactersRegexp = re.compile(eval(u'"[\\uD800-\\uDFFF]"'))
else:
... |
'Produces a file object from source.
source can be either a file object, local filename or a string.'
| def openStream(self, source):
| if hasattr(source, u'read'):
stream = source
else:
stream = StringIO(source)
return stream
|
'Returns (line, col) of the current position in the stream.'
| def position(self):
| (line, col) = self._position(self.chunkOffset)
return ((line + 1), col)
|
'Read one character from the stream or queue if available. Return
EOF when EOF is reached.'
| def char(self):
| if (self.chunkOffset >= self.chunkSize):
if (not self.readChunk()):
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = (chunkOffset + 1)
return char
|
'Returns a string of characters from the stream up to but not
including any character in \'characters\' or EOF. \'characters\' must be
a container that supports the \'in\' method and iteration over its
characters.'
| def charsUntil(self, characters, opposite=False):
| try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert (ord(c) < 128)
regex = u''.join([(u'\\x%02x' % ord(c)) for c in characters])
if (not opposite):
regex = (u'^%s' % regex)
... |
'Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless ... | def __init__(self, source, encoding=None, parseMeta=True, chardet=True):
| self.rawStream = self.openStream(source)
HTMLUnicodeInputStream.__init__(self, self.rawStream)
self.charEncoding = (codecName(encoding), u'certain')
self.numBytesMeta = 512
self.numBytesChardet = 100
self.defaultEncoding = u'windows-1252'
if (self.charEncoding[0] is None):
self.charE... |
'Produces a file object from source.
source can be either a file object, local filename or a string.'
| def openStream(self, source):
| if hasattr(source, u'read'):
stream = source
else:
stream = BytesIO(source)
try:
stream.seek(stream.tell())
except:
stream = BufferedStream(stream)
return stream
|
'Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None'
| def detectBOM(self):
| bomDict = {codecs.BOM_UTF8: u'utf-8', codecs.BOM_UTF16_LE: u'utf-16-le', codecs.BOM_UTF16_BE: u'utf-16-be', codecs.BOM_UTF32_LE: u'utf-32-le', codecs.BOM_UTF32_BE: u'utf-32-be'}
string = self.rawStream.read(4)
assert isinstance(string, bytes)
encoding = bomDict.get(string[:3])
seek = 3
if (not e... |
'Report the encoding declared by the meta element'
| def detectEncodingMeta(self):
| buffer = self.rawStream.read(self.numBytesMeta)
assert isinstance(buffer, bytes)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if (encoding in (u'utf-16', u'utf-16-be', u'utf-16-le')):
encoding = u'utf-8'
return encoding
|
'Skip past a list of characters'
| def skip(self, chars=spaceCharactersBytes):
| p = self.position
while (p < len(self)):
c = self[p:(p + 1)]
if (c not in chars):
self._position = p
return c
p += 1
self._position = p
return None
|
'Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone'
| def matchBytes(self, bytes):
| p = self.position
data = self[p:(p + len(bytes))]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv
|
'Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match'
| def jumpTo(self, bytes):
| newPosition = self[self.position:].find(bytes)
if (newPosition > (-1)):
if (self._position == (-1)):
self._position = 0
self._position += ((newPosition + len(bytes)) - 1)
return True
else:
raise StopIteration
|
'string - the data to work on for encoding detection'
| def __init__(self, data):
| self.data = EncodingBytes(data)
self.encoding = None
|
'Skip over comments'
| def handleComment(self):
| return self.data.jumpTo('-->')
|
'Return a name,value pair for the next attribute in the stream,
if one is found, or None'
| def getAttribute(self):
| data = self.data
c = data.skip((spaceCharactersBytes | frozenset(['/'])))
assert ((c is None) or (len(c) == 1))
if (c in ('>', None)):
return None
attrName = []
attrValue = []
while True:
if ((c == '=') and attrName):
break
elif (c in spaceCharactersBytes)... |
'Initialise an instance.
:param url: The URL of the index. If not specified, the URL for PyPI is
used.'
| def __init__(self, url=None):
| self.url = (url or DEFAULT_INDEX)
self.read_configuration()
(scheme, netloc, path, params, query, frag) = urlparse(self.url)
if (params or query or frag or (scheme not in ('http', 'https'))):
raise DistlibException(('invalid repository: %s' % self.url))
self.password_handler = None
... |
'Get the distutils command for interacting with PyPI configurations.
:return: the command.'
| def _get_pypirc_command(self):
| from distutils.core import Distribution
from distutils.config import PyPIRCCommand
d = Distribution()
return PyPIRCCommand(d)
|
'Read the PyPI access configuration as supported by distutils, getting
PyPI to do the acutal work. This populates ``username``, ``password``,
``realm`` and ``url`` attributes from the configuration.'
| def read_configuration(self):
| c = self._get_pypirc_command()
c.repository = self.url
cfg = c._read_pypirc()
self.username = cfg.get('username')
self.password = cfg.get('password')
self.realm = cfg.get('realm', 'pypi')
self.url = cfg.get('repository', self.url)
|
'Save the PyPI access configuration. You must have set ``username`` and
``password`` attributes before calling this method.
Again, distutils is used to do the actual work.'
| def save_configuration(self):
| self.check_credentials()
c = self._get_pypirc_command()
c._store_pypirc(self.username, self.password)
|
'Check that ``username`` and ``password`` have been set, and raise an
exception if not.'
| def check_credentials(self):
| if ((self.username is None) or (self.password is None)):
raise DistlibException('username and password must be set')
pm = HTTPPasswordMgr()
(_, netloc, _, _, _, _) = urlparse(self.url)
pm.add_password(self.realm, netloc, self.username, self.password)
self.password_handler = HT... |
'Register a distribution on PyPI, using the provided metadata.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the distribution to be
registered.
:return: The HTTP response received from PyPI upon submission of the
request.'
| def register(self, metadata):
| self.check_credentials()
metadata.validate()
d = metadata.todict()
d[':action'] = 'verify'
request = self.encode_request(d.items(), [])
response = self.send_request(request)
d[':action'] = 'submit'
request = self.encode_request(d.items(), [])
return self.send_request(request)
|
'Thread runner for reading lines of from a subprocess into a buffer.
:param name: The logical name of the stream (used for logging only).
:param stream: The stream to read from. This will typically a pipe
connected to the output stream of a subprocess.
:param outbuf: The list to append the read lines to.'
| def _reader(self, name, stream, outbuf):
| while True:
s = stream.readline()
if (not s):
break
s = s.decode('utf-8').rstrip()
outbuf.append(s)
logger.debug(('%s: %s' % (name, s)))
stream.close()
|
'Return a suitable command for signing a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer\'s
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in ve... | def get_sign_command(self, filename, signer, sign_password, keystore=None):
| cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if (keystore is None):
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
if (sign_password is not None):
cmd.extend(['--batch', '--passphrase-fd', '0'])
td = tempfile.mkdtemp()
sf = os.path.join(td, ... |
'Run a command in a child process , passing it any input data specified.
:param cmd: The command to run.
:param input_data: If specified, this must be a byte string containing
data to be sent to the child process.
:return: A tuple consisting of the subprocess\' exit code, a list of
lines read from the subprocess\' ``st... | def run_command(self, cmd, input_data=None):
| kwargs = {'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE}
if (input_data is not None):
kwargs['stdin'] = subprocess.PIPE
stdout = []
stderr = []
p = subprocess.Popen(cmd, **kwargs)
t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout))
t1.start()
t2 = Thread(ta... |
'Sign a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer\'s
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the inst... | def sign_file(self, filename, signer, sign_password, keystore=None):
| (cmd, sig_file) = self.get_sign_command(filename, signer, sign_password, keystore)
(rc, stdout, stderr) = self.run_command(cmd, sign_password.encode('utf-8'))
if (rc != 0):
raise DistlibException(('sign command failed with error code %s' % rc))
return sig_file
|
'Upload a release file to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the file to be uploaded.
:param filename: The pathname of the file to be uploaded.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the sign... | def upload_file(self, metadata, filename, signer=None, sign_password=None, filetype='sdist', pyversion='source', keystore=None):
| self.check_credentials()
if (not os.path.exists(filename)):
raise DistlibException(('not found: %s' % filename))
metadata.validate()
d = metadata.todict()
sig_file = None
if signer:
if (not self.gpg):
logger.warning('no signing program available - ... |
'Upload documentation to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the documentation to be
uploaded.
:param doc_dir: The pathname of the directory which contains the
documentation. This should be the directory that
contains the ``index.html`` for the docume... | def upload_documentation(self, metadata, doc_dir):
| self.check_credentials()
if (not os.path.isdir(doc_dir)):
raise DistlibException(('not a directory: %r' % doc_dir))
fn = os.path.join(doc_dir, 'index.html')
if (not os.path.exists(fn)):
raise DistlibException(('not found: %r' % fn))
metadata.validate()
(name, versi... |
'Return a suitable command for verifying a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance\... | def get_verify_command(self, signature_filename, data_filename, keystore=None):
| cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if (keystore is None):
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
cmd.extend(['--verify', signature_filename, data_filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd
|
'Verify a signature for a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance\'s ``gpg_home`` a... | def verify_signature(self, signature_filename, data_filename, keystore=None):
| if (not self.gpg):
raise DistlibException('verification unavailable because gpg unavailable')
cmd = self.get_verify_command(signature_filename, data_filename, keystore)
(rc, stdout, stderr) = self.run_command(cmd)
if (rc not in (0, 1)):
raise DistlibException(('verify comm... |
'This is a convenience method for downloading a file from an URL.
Normally, this will be a file from the index, though currently
no check is made for this (i.e. a file can be downloaded from
anywhere).
The method is just like the :func:`urlretrieve` function in the
standard library, except that it allows digest computa... | def download_file(self, url, destfile, digest=None, reporthook=None):
| if (digest is None):
digester = None
logger.debug('No digest specified')
else:
if isinstance(digest, (list, tuple)):
(hasher, digest) = digest
else:
hasher = 'md5'
digester = getattr(hashlib, hasher)()
logger.debug(('Digest specifi... |
'Send a standard library :class:`Request` to PyPI and return its
response.
:param req: The request to send.
:return: The HTTP response from PyPI (a standard library HTTPResponse).'
| def send_request(self, req):
| handlers = []
if self.password_handler:
handlers.append(self.password_handler)
if self.ssl_verifier:
handlers.append(self.ssl_verifier)
opener = build_opener(*handlers)
return opener.open(req)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.