signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
@property<EOL><INDENT>def qop(self):<DEDENT>
def on_update(header_set):<EOL><INDENT>if not header_set and '<STR_LIT>' in self:<EOL><INDENT>del self['<STR_LIT>']<EOL><DEDENT>elif header_set:<EOL><INDENT>self['<STR_LIT>'] = header_set.to_header()<EOL><DEDENT><DEDENT>return parse_set_header(self.get('<STR_LIT>'), on_update)<EOL>
Indicates what "quality of protection" the client has applied to the message for HTTP digest auth.
f5880:c31:m1
def set_basic(self, realm='<STR_LIT>'):
dict.clear(self)<EOL>dict.update(self, {'<STR_LIT>': '<STR_LIT>', '<STR_LIT>': realm})<EOL>if self.on_update:<EOL><INDENT>self.on_update(self)<EOL><DEDENT>
Clear the auth info and enable basic auth.
f5880:c32:m1
def set_digest(self, realm, nonce, qop=('<STR_LIT>',), opaque=None,<EOL>algorithm=None, stale=False):
d = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': realm,<EOL>'<STR_LIT>': nonce,<EOL>'<STR_LIT>': dump_header(qop)<EOL>}<EOL>if stale:<EOL><INDENT>d['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>if opaque is not None:<EOL><INDENT>d['<STR_LIT>'] = opaque<EOL><DEDENT>if algorithm is not None:<EOL><INDENT>d['<STR_LIT>'] = algorithm<EOL><DEDENT>dict.clear(self)<EOL>dict.update(self, d)<EOL>if self.on_update:<EOL><INDENT>self.on_update(self)<EOL><DEDENT>
Clear the auth info and enable digest auth.
f5880:c32:m2
def to_header(self):
d = dict(self)<EOL>auth_type = d.pop('<STR_LIT>', None) or '<STR_LIT>'<EOL>return '<STR_LIT>' % (auth_type.title(), '<STR_LIT:U+002CU+0020>'.join([<EOL>'<STR_LIT>' % (key, quote_header_value(value,<EOL>allow_token=key not in self._require_quoting))<EOL>for key, value in iteritems(d)<EOL>]))<EOL>
Convert the stored values into a WWW-Authenticate header.
f5880:c32:m3
def auth_property(name, doc=None):
def _set_value(self, value):<EOL><INDENT>if value is None:<EOL><INDENT>self.pop(name, None)<EOL><DEDENT>else:<EOL><INDENT>self[name] = str(value)<EOL><DEDENT><DEDENT>return property(lambda x: x.get(name), _set_value, doc=doc)<EOL>
A static helper function for subclasses to add extra authentication system properties onto a class:: class FooAuthenticate(WWWAuthenticate): special_realm = auth_property('special_realm') For more information have a look at the sourcecode to see how the regular properties (:attr:`realm` etc.) are implemented.
f5880:c32:m6
@property<EOL><INDENT>def content_type(self):<DEDENT>
return self.headers.get('<STR_LIT>')<EOL>
The content-type sent in the header. Usually not available
f5880:c33:m2
@property<EOL><INDENT>def content_length(self):<DEDENT>
return int(self.headers.get('<STR_LIT>') or <NUM_LIT:0>)<EOL>
The content-length sent in the header. Usually not available
f5880:c33:m3
@property<EOL><INDENT>def mimetype(self):<DEDENT>
self._parse_content_type()<EOL>return self._parsed_content_type[<NUM_LIT:0>]<EOL>
Like :attr:`content_type` but without parameters (eg, without charset, type etc.). For example if the content type is ``text/html; charset=utf-8`` the mimetype would be ``'text/html'``. .. versionadded:: 0.7
f5880:c33:m4
@property<EOL><INDENT>def mimetype_params(self):<DEDENT>
self._parse_content_type()<EOL>return self._parsed_content_type[<NUM_LIT:1>]<EOL>
The mimetype parameters as dict. For example if the content type is ``text/html; charset=utf-8`` the params would be ``{'charset': 'utf-8'}``. .. versionadded:: 0.7
f5880:c33:m5
def save(self, dst, buffer_size=<NUM_LIT>):
from shutil import copyfileobj<EOL>close_dst = False<EOL>if isinstance(dst, string_types):<EOL><INDENT>dst = open(dst, '<STR_LIT:wb>')<EOL>close_dst = True<EOL><DEDENT>try:<EOL><INDENT>copyfileobj(self.stream, dst, buffer_size)<EOL><DEDENT>finally:<EOL><INDENT>if close_dst:<EOL><INDENT>dst.close()<EOL><DEDENT><DEDENT>
Save the file to a destination path or file object. If the destination is a file object you have to close it yourself after the call. The buffer size is the number of bytes held in memory during the copy process. It defaults to 16KB. For secure file saving also have a look at :func:`secure_filename`. :param dst: a filename or open file object the uploaded file is saved to. :param buffer_size: the size of the buffer. This works the same as the `length` parameter of :func:`shutil.copyfileobj`.
f5880:c33:m6
def close(self):
try:<EOL><INDENT>self.stream.close()<EOL><DEDENT>except Exception:<EOL><INDENT>pass<EOL><DEDENT>
Close the underlying file if possible.
f5880:c33:m7
def responder(f):
return update_wrapper(lambda *a: f(*a)(*a[-<NUM_LIT:2>:]), f)<EOL>
Marks a function as responder. Decorate a function with it and it will automatically call the return value as WSGI application. Example:: @responder def application(environ, start_response): return Response('Hello World!')
f5881:m0
def get_current_url(environ, root_only=False, strip_querystring=False,<EOL>host_only=False, trusted_hosts=None):
tmp = [environ['<STR_LIT>'], '<STR_LIT>', get_host(environ, trusted_hosts)]<EOL>cat = tmp.append<EOL>if host_only:<EOL><INDENT>return uri_to_iri('<STR_LIT>'.join(tmp) + '<STR_LIT:/>')<EOL><DEDENT>cat(url_quote(wsgi_get_bytes(environ.get('<STR_LIT>', '<STR_LIT>'))).rstrip('<STR_LIT:/>'))<EOL>cat('<STR_LIT:/>')<EOL>if not root_only:<EOL><INDENT>cat(url_quote(wsgi_get_bytes(environ.get('<STR_LIT>', '<STR_LIT>')).lstrip(b'<STR_LIT:/>')))<EOL>if not strip_querystring:<EOL><INDENT>qs = get_query_string(environ)<EOL>if qs:<EOL><INDENT>cat('<STR_LIT:?>' + qs)<EOL><DEDENT><DEDENT><DEDENT>return uri_to_iri('<STR_LIT>'.join(tmp))<EOL>
A handy helper function that recreates the full URL for the current request or parts of it. Here an example: >>> from werkzeug.test import create_environ >>> env = create_environ("/?param=foo", "http://localhost/script") >>> get_current_url(env) 'http://localhost/script/?param=foo' >>> get_current_url(env, root_only=True) 'http://localhost/script/' >>> get_current_url(env, host_only=True) 'http://localhost/' >>> get_current_url(env, strip_querystring=True) 'http://localhost/script/' This optionally it verifies that the host is in a list of trusted hosts. If the host is not in there it will raise a :exc:`~werkzeug.exceptions.SecurityError`. :param environ: the WSGI environment to get the current URL from. :param root_only: set `True` if you only want the root URL. :param strip_querystring: set to `True` if you don't want the querystring. :param host_only: set to `True` if the host URL should be returned. :param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted` for more information.
f5881:m1
def host_is_trusted(hostname, trusted_list):
if not hostname:<EOL><INDENT>return False<EOL><DEDENT>if isinstance(trusted_list, string_types):<EOL><INDENT>trusted_list = [trusted_list]<EOL><DEDENT>def _normalize(hostname):<EOL><INDENT>if '<STR_LIT::>' in hostname:<EOL><INDENT>hostname = hostname.rsplit('<STR_LIT::>', <NUM_LIT:1>)[<NUM_LIT:0>]<EOL><DEDENT>return _encode_idna(hostname)<EOL><DEDENT>hostname = _normalize(hostname)<EOL>for ref in trusted_list:<EOL><INDENT>if ref.startswith('<STR_LIT:.>'):<EOL><INDENT>ref = ref[<NUM_LIT:1>:]<EOL>suffix_match = True<EOL><DEDENT>else:<EOL><INDENT>suffix_match = False<EOL><DEDENT>ref = _normalize(ref)<EOL>if ref == hostname:<EOL><INDENT>return True<EOL><DEDENT>if suffix_match and hostname.endswith('<STR_LIT:.>' + ref):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL>
Checks if a host is trusted against a list. This also takes care of port normalization. .. versionadded:: 0.9 :param hostname: the hostname to check :param trusted_list: a list of hostnames to check against. If a hostname starts with a dot it will match against all subdomains as well.
f5881:m2
def get_host(environ, trusted_hosts=None):
if '<STR_LIT>' in environ:<EOL><INDENT>rv = environ['<STR_LIT>'].split('<STR_LIT:U+002C>')[<NUM_LIT:0>].strip()<EOL><DEDENT>elif '<STR_LIT>' in environ:<EOL><INDENT>rv = environ['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>rv = environ['<STR_LIT>']<EOL>if (environ['<STR_LIT>'], environ['<STR_LIT>']) notin (('<STR_LIT>', '<STR_LIT>'), ('<STR_LIT:http>', '<STR_LIT>')):<EOL><INDENT>rv += '<STR_LIT::>' + environ['<STR_LIT>']<EOL><DEDENT><DEDENT>if trusted_hosts is not None:<EOL><INDENT>if not host_is_trusted(rv, trusted_hosts):<EOL><INDENT>from werkzeug.exceptions import SecurityError<EOL>raise SecurityError('<STR_LIT>' % rv)<EOL><DEDENT><DEDENT>return rv<EOL>
Return the real host for the given WSGI environment. This takes care of the `X-Forwarded-Host` header. Optionally it verifies that the host is in a list of trusted hosts. If the host is not in there it will raise a :exc:`~werkzeug.exceptions.SecurityError`. :param environ: the WSGI environment to get the host of. :param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted` for more information.
f5881:m3
def get_content_length(environ):
content_length = environ.get('<STR_LIT>')<EOL>if content_length is not None:<EOL><INDENT>try:<EOL><INDENT>return max(<NUM_LIT:0>, int(content_length))<EOL><DEDENT>except (ValueError, TypeError):<EOL><INDENT>pass<EOL><DEDENT><DEDENT>
Returns the content length from the WSGI environment as integer. If it's not available `None` is returned. .. versionadded:: 0.9 :param environ: the WSGI environ to fetch the content length from.
f5881:m4
def get_input_stream(environ, safe_fallback=True):
stream = environ['<STR_LIT>']<EOL>content_length = get_content_length(environ)<EOL>if environ.get('<STR_LIT>'):<EOL><INDENT>return stream<EOL><DEDENT>if content_length is None:<EOL><INDENT>return safe_fallback and _empty_stream or stream<EOL><DEDENT>return LimitedStream(stream, content_length)<EOL>
Returns the input stream from the WSGI environment and wraps it in the most sensible way possible. The stream returned is not the raw WSGI stream in most cases but one that is safe to read from without taking into account the content length. .. versionadded:: 0.9 :param environ: the WSGI environ to fetch the stream from. :param safe: indicates weather the function should use an empty stream as safe fallback or just return the original WSGI input stream if it can't wrap it safely. The default is to return an empty string in those cases.
f5881:m5
def get_query_string(environ):
qs = wsgi_get_bytes(environ.get('<STR_LIT>', '<STR_LIT>'))<EOL>return try_coerce_native(url_quote(qs, safe='<STR_LIT>'))<EOL>
Returns the `QUERY_STRING` from the WSGI environment. This also takes care about the WSGI decoding dance on Python 3 environments as a native string. The string returned will be restricted to ASCII characters. .. versionadded:: 0.9 :param environ: the WSGI environment object to get the query string from.
f5881:m6
def get_path_info(environ, charset='<STR_LIT:utf-8>', errors='<STR_LIT:replace>'):
path = wsgi_get_bytes(environ.get('<STR_LIT>', '<STR_LIT>'))<EOL>return to_unicode(path, charset, errors, allow_none_charset=True)<EOL>
Returns the `PATH_INFO` from the WSGI environment and properly decodes it. This also takes care about the WSGI decoding dance on Python 3 environments. if the `charset` is set to `None` a bytestring is returned. .. versionadded:: 0.9 :param environ: the WSGI environment object to get the path from. :param charset: the charset for the path info, or `None` if no decoding should be performed. :param errors: the decoding error handling.
f5881:m7
def get_script_name(environ, charset='<STR_LIT:utf-8>', errors='<STR_LIT:replace>'):
path = wsgi_get_bytes(environ.get('<STR_LIT>', '<STR_LIT>'))<EOL>return to_unicode(path, charset, errors, allow_none_charset=True)<EOL>
Returns the `SCRIPT_NAME` from the WSGI environment and properly decodes it. This also takes care about the WSGI decoding dance on Python 3 environments. if the `charset` is set to `None` a bytestring is returned. .. versionadded:: 0.9 :param environ: the WSGI environment object to get the path from. :param charset: the charset for the path, or `None` if no decoding should be performed. :param errors: the decoding error handling.
f5881:m8
def pop_path_info(environ, charset='<STR_LIT:utf-8>', errors='<STR_LIT:replace>'):
path = environ.get('<STR_LIT>')<EOL>if not path:<EOL><INDENT>return None<EOL><DEDENT>script_name = environ.get('<STR_LIT>', '<STR_LIT>')<EOL>old_path = path<EOL>path = path.lstrip('<STR_LIT:/>')<EOL>if path != old_path:<EOL><INDENT>script_name += '<STR_LIT:/>' * (len(old_path) - len(path))<EOL><DEDENT>if '<STR_LIT:/>' not in path:<EOL><INDENT>environ['<STR_LIT>'] = '<STR_LIT>'<EOL>environ['<STR_LIT>'] = script_name + path<EOL>rv = wsgi_get_bytes(path)<EOL><DEDENT>else:<EOL><INDENT>segment, path = path.split('<STR_LIT:/>', <NUM_LIT:1>)<EOL>environ['<STR_LIT>'] = '<STR_LIT:/>' + path<EOL>environ['<STR_LIT>'] = script_name + segment<EOL>rv = wsgi_get_bytes(segment)<EOL><DEDENT>return to_unicode(rv, charset, errors, allow_none_charset=True)<EOL>
Removes and returns the next segment of `PATH_INFO`, pushing it onto `SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`. If the `charset` is set to `None` a bytestring is returned. If there are empty segments (``'/foo//bar``) these are ignored but properly pushed to the `SCRIPT_NAME`: >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'} >>> pop_path_info(env) 'a' >>> env['SCRIPT_NAME'] '/foo/a' >>> pop_path_info(env) 'b' >>> env['SCRIPT_NAME'] '/foo/a/b' .. versionadded:: 0.5 .. versionchanged:: 0.9 The path is now decoded and a charset and encoding parameter can be provided. :param environ: the WSGI environment that is modified.
f5881:m9
def peek_path_info(environ, charset='<STR_LIT:utf-8>', errors='<STR_LIT:replace>'):
segments = environ.get('<STR_LIT>', '<STR_LIT>').lstrip('<STR_LIT:/>').split('<STR_LIT:/>', <NUM_LIT:1>)<EOL>if segments:<EOL><INDENT>return to_unicode(wsgi_get_bytes(segments[<NUM_LIT:0>]),<EOL>charset, errors, allow_none_charset=True)<EOL><DEDENT>
Returns the next segment on the `PATH_INFO` or `None` if there is none. Works like :func:`pop_path_info` without modifying the environment: >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'} >>> peek_path_info(env) 'a' >>> peek_path_info(env) 'a' If the `charset` is set to `None` a bytestring is returned. .. versionadded:: 0.5 .. versionchanged:: 0.9 The path is now decoded and a charset and encoding parameter can be provided. :param environ: the WSGI environment that is checked.
f5881:m10
def extract_path_info(environ_or_baseurl, path_or_url, charset='<STR_LIT:utf-8>',<EOL>errors='<STR_LIT:replace>', collapse_http_schemes=True):
def _normalize_netloc(scheme, netloc):<EOL><INDENT>parts = netloc.split(u'<STR_LIT:@>', <NUM_LIT:1>)[-<NUM_LIT:1>].split(u'<STR_LIT::>', <NUM_LIT:1>)<EOL>if len(parts) == <NUM_LIT:2>:<EOL><INDENT>netloc, port = parts<EOL>if (scheme == u'<STR_LIT:http>' and port == u'<STR_LIT>') or(scheme == u'<STR_LIT>' and port == u'<STR_LIT>'):<EOL><INDENT>port = None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>netloc = parts[<NUM_LIT:0>]<EOL>port = None<EOL><DEDENT>if port is not None:<EOL><INDENT>netloc += u'<STR_LIT::>' + port<EOL><DEDENT>return netloc<EOL><DEDENT>path = uri_to_iri(path_or_url, charset, errors)<EOL>if isinstance(environ_or_baseurl, dict):<EOL><INDENT>environ_or_baseurl = get_current_url(environ_or_baseurl,<EOL>root_only=True)<EOL><DEDENT>base_iri = uri_to_iri(environ_or_baseurl, charset, errors)<EOL>base_scheme, base_netloc, base_path = url_parse(base_iri)[:<NUM_LIT:3>]<EOL>cur_scheme, cur_netloc, cur_path, =url_parse(url_join(base_iri, path))[:<NUM_LIT:3>]<EOL>base_netloc = _normalize_netloc(base_scheme, base_netloc)<EOL>cur_netloc = _normalize_netloc(cur_scheme, cur_netloc)<EOL>if collapse_http_schemes:<EOL><INDENT>for scheme in base_scheme, cur_scheme:<EOL><INDENT>if scheme not in (u'<STR_LIT:http>', u'<STR_LIT>'):<EOL><INDENT>return None<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>if not (base_scheme in (u'<STR_LIT:http>', u'<STR_LIT>') andbase_scheme == cur_scheme):<EOL><INDENT>return None<EOL><DEDENT><DEDENT>if base_netloc != cur_netloc:<EOL><INDENT>return None<EOL><DEDENT>base_path = base_path.rstrip(u'<STR_LIT:/>')<EOL>if not cur_path.startswith(base_path):<EOL><INDENT>return None<EOL><DEDENT>return u'<STR_LIT:/>' + cur_path[len(base_path):].lstrip(u'<STR_LIT:/>')<EOL>
Extracts the path info from the given URL (or WSGI environment) and path. The path info returned is a unicode string, not a bytestring suitable for a WSGI environment. The URLs might also be IRIs. If the path info could not be determined, `None` is returned. Some examples: >>> extract_path_info('http://example.com/app', '/app/hello') u'/hello' >>> extract_path_info('http://example.com/app', ... 'https://example.com/app/hello') u'/hello' >>> extract_path_info('http://example.com/app', ... 'https://example.com/app/hello', ... collapse_http_schemes=False) is None True Instead of providing a base URL you can also pass a WSGI environment. .. versionadded:: 0.6 :param environ_or_baseurl: a WSGI environment dict, a base URL or base IRI. This is the root of the application. :param path_or_url: an absolute path from the server root, a relative path (in which case it's the path info) or a full URL. Also accepts IRIs and unicode parameters. :param charset: the charset for byte data in URLs :param errors: the error handling on decode :param collapse_http_schemes: if set to `False` the algorithm does not assume that http and https on the same server point to the same resource.
f5881:m11
def wrap_file(environ, file, buffer_size=<NUM_LIT>):
return environ.get('<STR_LIT>', FileWrapper)(file, buffer_size)<EOL>
Wraps a file. This uses the WSGI server's file wrapper if available or otherwise the generic :class:`FileWrapper`. .. versionadded:: 0.5 If the file wrapper from the WSGI server is used it's important to not iterate over it from inside the application but to pass it through unchanged. If you want to pass out a file wrapper inside a response object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`. More information about file wrappers are available in :pep:`333`. :param file: a :class:`file`-like object with a :meth:`~file.read` method. :param buffer_size: number of bytes for one iteration.
f5881:m12
def _make_chunk_iter(stream, limit, buffer_size):
if isinstance(stream, (bytes, bytearray, text_type)):<EOL><INDENT>raise TypeError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>if not hasattr(stream, '<STR_LIT>'):<EOL><INDENT>for item in stream:<EOL><INDENT>if item:<EOL><INDENT>yield item<EOL><DEDENT><DEDENT>return<EOL><DEDENT>if not isinstance(stream, LimitedStream) and limit is not None:<EOL><INDENT>stream = LimitedStream(stream, limit)<EOL><DEDENT>_read = stream.read<EOL>while <NUM_LIT:1>:<EOL><INDENT>item = _read(buffer_size)<EOL>if not item:<EOL><INDENT>break<EOL><DEDENT>yield item<EOL><DEDENT>
Helper for the line and chunk iter functions.
f5881:m13
def make_line_iter(stream, limit=None, buffer_size=<NUM_LIT:10> * <NUM_LIT>):
_iter = _make_chunk_iter(stream, limit, buffer_size)<EOL>first_item = next(_iter, '<STR_LIT>')<EOL>if not first_item:<EOL><INDENT>return<EOL><DEDENT>s = make_literal_wrapper(first_item)<EOL>empty = s('<STR_LIT>')<EOL>cr = s('<STR_LIT:\r>')<EOL>lf = s('<STR_LIT:\n>')<EOL>crlf = s('<STR_LIT:\r\n>')<EOL>_iter = chain((first_item,), _iter)<EOL>def _iter_basic_lines():<EOL><INDENT>_join = empty.join<EOL>buffer = []<EOL>while <NUM_LIT:1>:<EOL><INDENT>new_data = next(_iter, '<STR_LIT>')<EOL>if not new_data:<EOL><INDENT>break<EOL><DEDENT>new_buf = []<EOL>for item in chain(buffer, new_data.splitlines(True)):<EOL><INDENT>new_buf.append(item)<EOL>if item and item[-<NUM_LIT:1>:] in crlf:<EOL><INDENT>yield _join(new_buf)<EOL>new_buf = []<EOL><DEDENT><DEDENT>buffer = new_buf<EOL><DEDENT>if buffer:<EOL><INDENT>yield _join(buffer)<EOL><DEDENT><DEDENT>previous = empty<EOL>for item in _iter_basic_lines():<EOL><INDENT>if item == lf and previous[-<NUM_LIT:1>:] == cr:<EOL><INDENT>previous += item<EOL>item = empty<EOL><DEDENT>if previous:<EOL><INDENT>yield previous<EOL><DEDENT>previous = item<EOL><DEDENT>if previous:<EOL><INDENT>yield previous<EOL><DEDENT>
Safely iterates line-based over an input stream. If the input stream is not a :class:`LimitedStream` the `limit` parameter is mandatory. This uses the stream's :meth:`~file.read` method internally as opposite to the :meth:`~file.readline` method that is unsafe and can only be used in violation of the WSGI specification. The same problem applies to the `__iter__` function of the input stream which calls :meth:`~file.readline` without arguments. If you need line-by-line processing it's strongly recommended to iterate over the input stream using this helper function. .. versionchanged:: 0.8 This function now ensures that the limit was reached. .. versionadded:: 0.9 added support for iterators as input stream. :param stream: the stream or iterate to iterate over. :param limit: the limit in bytes for the stream. (Usually content length. Not necessary if the `stream` is a :class:`LimitedStream`. :param buffer_size: The optional buffer size.
f5881:m14
def make_chunk_iter(stream, separator, limit=None, buffer_size=<NUM_LIT:10> * <NUM_LIT>):
_iter = _make_chunk_iter(stream, limit, buffer_size)<EOL>first_item = next(_iter, '<STR_LIT>')<EOL>if not first_item:<EOL><INDENT>return<EOL><DEDENT>_iter = chain((first_item,), _iter)<EOL>if isinstance(first_item, text_type):<EOL><INDENT>separator = to_unicode(separator)<EOL>_split = re.compile(r'<STR_LIT>' % re.escape(separator)).split<EOL>_join = u'<STR_LIT>'.join<EOL><DEDENT>else:<EOL><INDENT>separator = to_bytes(separator)<EOL>_split = re.compile(b'<STR_LIT:(>' + re.escape(separator) + b'<STR_LIT:)>').split<EOL>_join = b'<STR_LIT>'.join<EOL><DEDENT>buffer = []<EOL>while <NUM_LIT:1>:<EOL><INDENT>new_data = next(_iter, '<STR_LIT>')<EOL>if not new_data:<EOL><INDENT>break<EOL><DEDENT>chunks = _split(new_data)<EOL>new_buf = []<EOL>for item in chain(buffer, chunks):<EOL><INDENT>if item == separator:<EOL><INDENT>yield _join(new_buf)<EOL>new_buf = []<EOL><DEDENT>else:<EOL><INDENT>new_buf.append(item)<EOL><DEDENT><DEDENT>buffer = new_buf<EOL><DEDENT>if buffer:<EOL><INDENT>yield _join(buffer)<EOL><DEDENT>
Works like :func:`make_line_iter` but accepts a separator which divides chunks. If you want newline based processing you should use :func:`make_line_iter` instead as it supports arbitrary newline markers. .. versionadded:: 0.8 .. versionadded:: 0.9 added support for iterators as input stream. :param stream: the stream or iterate to iterate over. :param separator: the separator that divides chunks. :param limit: the limit in bytes for the stream. (Usually content length. Not necessary if the `stream` is otherwise already limited). :param buffer_size: The optional buffer size.
f5881:m15
def is_allowed(self, filename):
return True<EOL>
Subclasses can override this method to disallow the access to certain files. However by providing `disallow` in the constructor this method is overwritten.
f5881:c0:m1
@property<EOL><INDENT>def is_exhausted(self):<DEDENT>
return self._pos >= self.limit<EOL>
If the stream is exhausted this attribute is `True`.
f5881:c4:m2
def on_exhausted(self):
<EOL>return self._read(<NUM_LIT:0>)<EOL>
This is called when the stream tries to read past the limit. The return value of this function is returned from the reading function.
f5881:c4:m3
def on_disconnect(self):
from werkzeug.exceptions import ClientDisconnected<EOL>raise ClientDisconnected()<EOL>
What should happen if a disconnect is detected? The return value of this function is returned from read functions in case the client went away. By default a :exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised.
f5881:c4:m4
def exhaust(self, chunk_size=<NUM_LIT> * <NUM_LIT:64>):
to_read = self.limit - self._pos<EOL>chunk = chunk_size<EOL>while to_read > <NUM_LIT:0>:<EOL><INDENT>chunk = min(to_read, chunk)<EOL>self.read(chunk)<EOL>to_read -= chunk<EOL><DEDENT>
Exhaust the stream. This consumes all the data left until the limit is reached. :param chunk_size: the size for a chunk. It will read the chunk until the stream is exhausted and throw away the results.
f5881:c4:m5
def read(self, size=None):
if self._pos >= self.limit:<EOL><INDENT>return self.on_exhausted()<EOL><DEDENT>if size is None or size == -<NUM_LIT:1>: <EOL><INDENT>size = self.limit<EOL><DEDENT>to_read = min(self.limit - self._pos, size)<EOL>try:<EOL><INDENT>read = self._read(to_read)<EOL><DEDENT>except (IOError, ValueError):<EOL><INDENT>return self.on_disconnect()<EOL><DEDENT>if to_read and len(read) != to_read:<EOL><INDENT>return self.on_disconnect()<EOL><DEDENT>self._pos += len(read)<EOL>return read<EOL>
Read `size` bytes or if size is not provided everything is read. :param size: the number of bytes read.
f5881:c4:m6
def readline(self, size=None):
if self._pos >= self.limit:<EOL><INDENT>return self.on_exhausted()<EOL><DEDENT>if size is None:<EOL><INDENT>size = self.limit - self._pos<EOL><DEDENT>else:<EOL><INDENT>size = min(size, self.limit - self._pos)<EOL><DEDENT>try:<EOL><INDENT>line = self._readline(size)<EOL><DEDENT>except (ValueError, IOError):<EOL><INDENT>return self.on_disconnect()<EOL><DEDENT>if size and not line:<EOL><INDENT>return self.on_disconnect()<EOL><DEDENT>self._pos += len(line)<EOL>return line<EOL>
Reads one line from the stream.
f5881:c4:m7
def readlines(self, size=None):
last_pos = self._pos<EOL>result = []<EOL>if size is not None:<EOL><INDENT>end = min(self.limit, last_pos + size)<EOL><DEDENT>else:<EOL><INDENT>end = self.limit<EOL><DEDENT>while <NUM_LIT:1>:<EOL><INDENT>if size is not None:<EOL><INDENT>size -= last_pos - self._pos<EOL><DEDENT>if self._pos >= end:<EOL><INDENT>break<EOL><DEDENT>result.append(self.readline(size))<EOL>if size is not None:<EOL><INDENT>last_pos = self._pos<EOL><DEDENT><DEDENT>return result<EOL>
Reads a file into a list of strings. It calls :meth:`readline` until the file is read to the end. It does support the optional `size` argument if the underlaying stream supports it for `readline`.
f5881:c4:m8
def tell(self):
return self._pos<EOL>
Returns the position of the stream. .. versionadded:: 0.9
f5881:c4:m9
def __dir__(self):
result = list(new_module.__all__)<EOL>result.extend(('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>'))<EOL>return result<EOL>
Just show what we want to show.
f5882:c0:m1
def url_parse(url, scheme=None, allow_fragments=True):
s = make_literal_wrapper(url)<EOL>is_text_based = isinstance(url, text_type)<EOL>if scheme is None:<EOL><INDENT>scheme = s('<STR_LIT>')<EOL><DEDENT>netloc = query = fragment = s('<STR_LIT>')<EOL>i = url.find(s('<STR_LIT::>'))<EOL>if i > <NUM_LIT:0> and _scheme_re.match(to_native(url[:i], errors='<STR_LIT:replace>')):<EOL><INDENT>rest = url[i + <NUM_LIT:1>:]<EOL>if not rest or any(c not in s('<STR_LIT>') for c in rest):<EOL><INDENT>scheme, url = url[:i].lower(), rest<EOL><DEDENT><DEDENT>if url[:<NUM_LIT:2>] == s('<STR_LIT>'):<EOL><INDENT>delim = len(url)<EOL>for c in s('<STR_LIT>'):<EOL><INDENT>wdelim = url.find(c, <NUM_LIT:2>)<EOL>if wdelim >= <NUM_LIT:0>:<EOL><INDENT>delim = min(delim, wdelim)<EOL><DEDENT><DEDENT>netloc, url = url[<NUM_LIT:2>:delim], url[delim:]<EOL>if ((s('<STR_LIT:[>') in netloc and s('<STR_LIT:]>') not in netloc) or<EOL>(s('<STR_LIT:]>') in netloc and s('<STR_LIT:[>') not in netloc)):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>if allow_fragments and s('<STR_LIT:#>') in url:<EOL><INDENT>url, fragment = url.split(s('<STR_LIT:#>'), <NUM_LIT:1>)<EOL><DEDENT>if s('<STR_LIT:?>') in url:<EOL><INDENT>url, query = url.split(s('<STR_LIT:?>'), <NUM_LIT:1>)<EOL><DEDENT>result_type = is_text_based and URL or BytesURL<EOL>return result_type(scheme, netloc, url, query, fragment)<EOL>
Parses a URL from a string into a :class:`URL` tuple. If the URL is lacking a scheme it can be provided as second argument. Otherwise, it is ignored. Optionally fragments can be stripped from the URL by setting `allow_fragments` to `False`. The inverse of this function is :func:`url_unparse`. :param url: the URL to parse. :param scheme: the default schema to use if the URL is schemaless. :param allow_fragments: if set to `False` a fragment will be removed from the URL.
f5883:m3
def url_quote(string, charset='<STR_LIT:utf-8>', errors='<STR_LIT:strict>', safe='<STR_LIT>'):
if not isinstance(string, (text_type, bytes, bytearray)):<EOL><INDENT>string = text_type(string)<EOL><DEDENT>if isinstance(string, text_type):<EOL><INDENT>string = string.encode(charset, errors)<EOL><DEDENT>if isinstance(safe, text_type):<EOL><INDENT>safe = safe.encode(charset, errors)<EOL><DEDENT>safe = frozenset(bytearray(safe) + _always_safe)<EOL>rv = bytearray()<EOL>for char in bytearray(string):<EOL><INDENT>if char in safe:<EOL><INDENT>rv.append(char)<EOL><DEDENT>else:<EOL><INDENT>rv.extend(('<STR_LIT>' % char).encode('<STR_LIT:ascii>'))<EOL><DEDENT><DEDENT>return to_native(bytes(rv))<EOL>
URL encode a single string with a given encoding. :param s: the string to quote. :param charset: the charset to be used. :param safe: an optional sequence of safe characters.
f5883:m4
def url_quote_plus(string, charset='<STR_LIT:utf-8>', errors='<STR_LIT:strict>', safe='<STR_LIT>'):
return url_quote(string, charset, errors, safe + '<STR_LIT:U+0020>').replace('<STR_LIT:U+0020>', '<STR_LIT:+>')<EOL>
URL encode a single string with the given encoding and convert whitespace to "+". :param s: The string to quote. :param charset: The charset to be used. :param safe: An optional sequence of safe characters.
f5883:m5
def url_unparse(components):
scheme, netloc, path, query, fragment =normalize_string_tuple(components)<EOL>s = make_literal_wrapper(scheme)<EOL>url = s('<STR_LIT>')<EOL>if netloc or (scheme and path.startswith(s('<STR_LIT:/>'))):<EOL><INDENT>if path and path[:<NUM_LIT:1>] != s('<STR_LIT:/>'):<EOL><INDENT>path = s('<STR_LIT:/>') + path<EOL><DEDENT>url = s('<STR_LIT>') + (netloc or s('<STR_LIT>')) + path<EOL><DEDENT>elif path:<EOL><INDENT>url += path<EOL><DEDENT>if scheme:<EOL><INDENT>url = scheme + s('<STR_LIT::>') + url<EOL><DEDENT>if query:<EOL><INDENT>url = url + s('<STR_LIT:?>') + query<EOL><DEDENT>if fragment:<EOL><INDENT>url = url + s('<STR_LIT:#>') + fragment<EOL><DEDENT>return url<EOL>
The reverse operation to :meth:`url_parse`. This accepts arbitrary as well as :class:`URL` tuples and returns a URL as a string. :param components: the parsed URL as tuple which should be converted into a URL string.
f5883:m6
def url_unquote(string, charset='<STR_LIT:utf-8>', errors='<STR_LIT:replace>', unsafe='<STR_LIT>'):
rv = _unquote_to_bytes(string, unsafe)<EOL>if charset is not None:<EOL><INDENT>rv = rv.decode(charset, errors)<EOL><DEDENT>return rv<EOL>
URL decode a single string with a given encoding. If the charset is set to `None` no unicode decoding is performed and raw bytes are returned. :param s: the string to unquote. :param charset: the charset of the query string. If set to `None` no unicode decoding will take place. :param errors: the error handling for the charset decoding.
f5883:m7
def url_unquote_plus(s, charset='<STR_LIT:utf-8>', errors='<STR_LIT:replace>'):
if isinstance(s, text_type):<EOL><INDENT>s = s.replace(u'<STR_LIT:+>', u'<STR_LIT:U+0020>')<EOL><DEDENT>else:<EOL><INDENT>s = s.replace(b'<STR_LIT:+>', b'<STR_LIT:U+0020>')<EOL><DEDENT>return url_unquote(s, charset, errors)<EOL>
URL decode a single string with the given `charset` and decode "+" to whitespace. Per default encoding errors are ignored. If you want a different behavior you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a :exc:`HTTPUnicodeError` is raised. :param s: The string to unquote. :param charset: the charset of the query string. If set to `None` no unicode decoding will take place. :param errors: The error handling for the `charset` decoding.
f5883:m8
def url_fix(s, charset='<STR_LIT:utf-8>'):
scheme, netloc, path, qs, anchor = url_parse(to_unicode(s, charset, '<STR_LIT:replace>'))<EOL>path = url_quote(path, charset, safe='<STR_LIT>')<EOL>qs = url_quote_plus(qs, charset, safe='<STR_LIT>')<EOL>return to_native(url_unparse((scheme, netloc, path, qs, anchor)))<EOL>
r"""Sometimes you get an URL by a user that just isn't a real URL because it contains unsafe characters like ' ' and so on. This function can fix some of the problems in a similar way browsers handle data entered by the user: >>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)') 'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)' :param s: the string with the URL to fix. :param charset: The target charset for the URL if the url was given as unicode string.
f5883:m9
def uri_to_iri(uri, charset='<STR_LIT:utf-8>', errors='<STR_LIT:replace>'):
if isinstance(uri, tuple):<EOL><INDENT>uri = url_unparse(uri)<EOL><DEDENT>uri = url_parse(to_unicode(uri, charset))<EOL>path = url_unquote(uri.path, charset, errors, '<STR_LIT>')<EOL>query = url_unquote(uri.query, charset, errors, '<STR_LIT>')<EOL>fragment = url_unquote(uri.fragment, charset, errors, '<STR_LIT>')<EOL>return url_unparse((uri.scheme, uri.decode_netloc(),<EOL>path, query, fragment))<EOL>
r""" Converts a URI in a given charset to a IRI. Examples for URI versus IRI: >>> uri_to_iri(b'http://xn--n3h.net/') u'http://\u2603.net/' >>> uri_to_iri(b'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th') u'http://\xfcser:p\xe4ssword@\u2603.net/p\xe5th' Query strings are left unchanged: >>> uri_to_iri('/?foo=24&x=%26%2f') u'/?foo=24&x=%26%2f' .. versionadded:: 0.6 :param uri: The URI to convert. :param charset: The charset of the URI. :param errors: The error handling on decode.
f5883:m10
def iri_to_uri(iri, charset='<STR_LIT:utf-8>', errors='<STR_LIT:strict>'):
if isinstance(iri, tuple):<EOL><INDENT>iri = url_unparse(iri)<EOL><DEDENT>iri = url_parse(to_unicode(iri, charset, errors))<EOL>netloc = iri.encode_netloc().decode('<STR_LIT:ascii>')<EOL>path = url_quote(iri.path, charset, errors, '<STR_LIT>')<EOL>query = url_quote(iri.query, charset, errors, '<STR_LIT>')<EOL>fragment = url_quote(iri.fragment, charset, errors, '<STR_LIT>')<EOL>return to_native(url_unparse((iri.scheme, netloc,<EOL>path, query, fragment)))<EOL>
r""" Converts any unicode based IRI to an acceptable ASCII URI. Werkzeug always uses utf-8 URLs internally because this is what browsers and HTTP do as well. In some places where it accepts an URL it also accepts a unicode IRI and converts it into a URI. Examples for IRI versus URI: >>> iri_to_uri(u'http://☃.net/') 'http://xn--n3h.net/' >>> iri_to_uri(u'http://üser:pässword@☃.net/påth') 'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th' .. versionadded:: 0.6 :param iri: The IRI to convert. :param charset: The charset for the URI.
f5883:m11
def url_decode(s, charset='<STR_LIT:utf-8>', decode_keys=False, include_empty=True,<EOL>errors='<STR_LIT:replace>', separator='<STR_LIT:&>', cls=None):
if cls is None:<EOL><INDENT>cls = MultiDict<EOL><DEDENT>if isinstance(s, text_type) and not isinstance(separator, text_type):<EOL><INDENT>separator = separator.decode(charset or '<STR_LIT:ascii>')<EOL><DEDENT>elif isinstance(s, bytes) and not isinstance(separator, bytes):<EOL><INDENT>separator = separator.encode(charset or '<STR_LIT:ascii>')<EOL><DEDENT>return cls(_url_decode_impl(s.split(separator), charset, decode_keys,<EOL>include_empty, errors))<EOL>
Parse a querystring and return it as :class:`MultiDict`. There is a difference in key decoding on different Python versions. On Python 3 keys will always be fully decoded whereas on Python 2, keys will remain bytestrings if they fit into ASCII. On 2.x keys can be forced to be unicode by setting `decode_keys` to `True`. If the charset is set to `None` no unicode decoding will happen and raw bytes will be returned. Per default a missing value for a key will default to an empty key. If you don't want that behavior you can set `include_empty` to `False`. Per default encoding errors are ignored. If you want a different behavior you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a `HTTPUnicodeError` is raised. .. versionchanged:: 0.5 In previous versions ";" and "&" could be used for url decoding. This changed in 0.5 where only "&" is supported. If you want to use ";" instead a different `separator` can be provided. The `cls` parameter was added. :param s: a string with the query string to decode. :param charset: the charset of the query string. If set to `None` no unicode decoding will take place. :param decode_keys: Used on Python 2.x to control whether keys should be forced to be unicode objects. If set to `True` then keys will be unicode in all cases. Otherwise, they remain `str` if they fit into ASCII. :param include_empty: Set to `False` if you don't want empty values to appear in the dict. :param errors: the decoding error behavior. :param separator: the pair separator to be used, defaults to ``&`` :param cls: an optional dict class to use. If this is not specified or `None` the default :class:`MultiDict` is used.
f5883:m12
def url_decode_stream(stream, charset='<STR_LIT:utf-8>', decode_keys=False,<EOL>include_empty=True, errors='<STR_LIT:replace>', separator='<STR_LIT:&>',<EOL>cls=None, limit=None, return_iterator=False):
from werkzeug.wsgi import make_chunk_iter<EOL>if return_iterator:<EOL><INDENT>cls = lambda x: x<EOL><DEDENT>elif cls is None:<EOL><INDENT>cls = MultiDict<EOL><DEDENT>pair_iter = make_chunk_iter(stream, separator, limit)<EOL>return cls(_url_decode_impl(pair_iter, charset, decode_keys,<EOL>include_empty, errors))<EOL>
Works like :func:`url_decode` but decodes a stream. The behavior of stream and limit follows functions like :func:`~werkzeug.wsgi.make_line_iter`. The generator of pairs is directly fed to the `cls` so you can consume the data while it's parsed. .. versionadded:: 0.8 :param stream: a stream with the encoded querystring :param charset: the charset of the query string. If set to `None` no unicode decoding will take place. :param decode_keys: Used on Python 2.x to control whether keys should be forced to be unicode objects. If set to `True`, keys will be unicode in all cases. Otherwise, they remain `str` if they fit into ASCII. :param include_empty: Set to `False` if you don't want empty values to appear in the dict. :param errors: the decoding error behavior. :param separator: the pair separator to be used, defaults to ``&`` :param cls: an optional dict class to use. If this is not specified or `None` the default :class:`MultiDict` is used. :param limit: the content length of the URL data. Not necessary if a limited stream is provided. :param return_iterator: if set to `True` the `cls` argument is ignored and an iterator over all decoded pairs is returned
f5883:m13
def url_encode(obj, charset='<STR_LIT:utf-8>', encode_keys=False, sort=False, key=None,<EOL>separator=b'<STR_LIT:&>'):
separator = to_native(separator, '<STR_LIT:ascii>')<EOL>return separator.join(_url_encode_impl(obj, charset, encode_keys, sort, key))<EOL>
URL encode a dict/`MultiDict`. If a value is `None` it will not appear in the result string. Per default only values are encoded into the target charset strings. If `encode_keys` is set to ``True`` unicode keys are supported too. If `sort` is set to `True` the items are sorted by `key` or the default sorting algorithm. .. versionadded:: 0.5 `sort`, `key`, and `separator` were added. :param obj: the object to encode into a query string. :param charset: the charset of the query string. :param encode_keys: set to `True` if you have unicode keys. (Ignored on Python 3.x) :param sort: set to `True` if you want parameters to be sorted by `key`. :param separator: the separator to be used for the pairs. :param key: an optional function to be used for sorting. For more details check out the :func:`sorted` documentation.
f5883:m15
def url_encode_stream(obj, stream=None, charset='<STR_LIT:utf-8>', encode_keys=False,<EOL>sort=False, key=None, separator=b'<STR_LIT:&>'):
separator = to_native(separator, '<STR_LIT:ascii>')<EOL>gen = _url_encode_impl(obj, charset, encode_keys, sort, key)<EOL>if stream is None:<EOL><INDENT>return gen<EOL><DEDENT>for idx, chunk in enumerate(gen):<EOL><INDENT>if idx:<EOL><INDENT>stream.write(separator)<EOL><DEDENT>stream.write(chunk)<EOL><DEDENT>
Like :meth:`url_encode` but writes the results to a stream object. If the stream is `None` a generator over all encoded pairs is returned. .. versionadded:: 0.8 :param obj: the object to encode into a query string. :param stream: a stream to write the encoded object into or `None` if an iterator over the encoded pairs should be returned. In that case the separator argument is ignored. :param charset: the charset of the query string. :param encode_keys: set to `True` if you have unicode keys. (Ignored on Python 3.x) :param sort: set to `True` if you want parameters to be sorted by `key`. :param separator: the separator to be used for the pairs. :param key: an optional function to be used for sorting. For more details check out the :func:`sorted` documentation.
f5883:m16
def url_join(base, url, allow_fragments=True):
if isinstance(base, tuple):<EOL><INDENT>base = url_unparse(base)<EOL><DEDENT>if isinstance(url, tuple):<EOL><INDENT>url = url_unparse(url)<EOL><DEDENT>base, url = normalize_string_tuple((base, url))<EOL>s = make_literal_wrapper(base)<EOL>if not base:<EOL><INDENT>return url<EOL><DEDENT>if not url:<EOL><INDENT>return base<EOL><DEDENT>bscheme, bnetloc, bpath, bquery, bfragment =url_parse(base, allow_fragments=allow_fragments)<EOL>scheme, netloc, path, query, fragment =url_parse(url, bscheme, allow_fragments)<EOL>if scheme != bscheme:<EOL><INDENT>return url<EOL><DEDENT>if netloc:<EOL><INDENT>return url_unparse((scheme, netloc, path, query, fragment))<EOL><DEDENT>netloc = bnetloc<EOL>if path[:<NUM_LIT:1>] == s('<STR_LIT:/>'):<EOL><INDENT>segments = path.split(s('<STR_LIT:/>'))<EOL><DEDENT>elif not path:<EOL><INDENT>segments = bpath.split(s('<STR_LIT:/>'))<EOL>if not query:<EOL><INDENT>query = bquery<EOL><DEDENT><DEDENT>else:<EOL><INDENT>segments = bpath.split(s('<STR_LIT:/>'))[:-<NUM_LIT:1>] + path.split(s('<STR_LIT:/>'))<EOL><DEDENT>if segments[-<NUM_LIT:1>] == s('<STR_LIT:.>'):<EOL><INDENT>segments[-<NUM_LIT:1>] = s('<STR_LIT>')<EOL><DEDENT>segments = [segment for segment in segments if segment != s('<STR_LIT:.>')]<EOL>while <NUM_LIT:1>:<EOL><INDENT>i = <NUM_LIT:1><EOL>n = len(segments) - <NUM_LIT:1><EOL>while i < n:<EOL><INDENT>if segments[i] == s('<STR_LIT:..>') andsegments[i - <NUM_LIT:1>] not in (s('<STR_LIT>'), s('<STR_LIT:..>')):<EOL><INDENT>del segments[i - <NUM_LIT:1>:i + <NUM_LIT:1>]<EOL>break<EOL><DEDENT>i += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>unwanted_marker = [s('<STR_LIT>'), s('<STR_LIT:..>')]<EOL>while segments[:<NUM_LIT:2>] == unwanted_marker:<EOL><INDENT>del segments[<NUM_LIT:1>]<EOL><DEDENT>path = s('<STR_LIT:/>').join(segments)<EOL>return url_unparse((scheme, netloc, path, query, fragment))<EOL>
Join a base URL and a possibly relative URL to form an absolute interpretation of the latter. :param base: the base URL for the join operation. :param url: the URL to join. :param allow_fragments: indicates whether fragments should be allowed.
f5883:m17
def replace(self, **kwargs):
return self._replace(**kwargs)<EOL>
Return an URL with the same values, except for those parameters given new values by whichever keyword arguments are specified.
f5883:c0:m0
@property<EOL><INDENT>def host(self):<DEDENT>
return self._split_host()[<NUM_LIT:0>]<EOL>
The host part of the URL if available, otherwise `None`. The host is either the hostname or the IP address mentioned in the URL. It will not contain the port.
f5883:c0:m1
@property<EOL><INDENT>def ascii_host(self):<DEDENT>
rv = self.host<EOL>if rv is not None and isinstance(rv, text_type):<EOL><INDENT>rv = _encode_idna(rv)<EOL><DEDENT>return to_native(rv, '<STR_LIT:ascii>', '<STR_LIT:ignore>')<EOL>
Works exactly like :attr:`host` but will return a result that is restricted to ASCII. If it finds a netloc that is not ASCII it will attempt to idna decode it. This is useful for socket operations when the URL might include internationalized characters.
f5883:c0:m2
@property<EOL><INDENT>def port(self):<DEDENT>
try:<EOL><INDENT>rv = int(to_native(self._split_host()[<NUM_LIT:1>]))<EOL>if <NUM_LIT:0> <= rv <= <NUM_LIT>:<EOL><INDENT>return rv<EOL><DEDENT><DEDENT>except (ValueError, TypeError):<EOL><INDENT>pass<EOL><DEDENT>
The port in the URL as an integer if it was present, `None` otherwise. This does not fill in default ports.
f5883:c0:m3
@property<EOL><INDENT>def auth(self):<DEDENT>
return self._split_netloc()[<NUM_LIT:0>]<EOL>
The authentication part in the URL if available, `None` otherwise.
f5883:c0:m4
@property<EOL><INDENT>def username(self):<DEDENT>
rv = self._split_auth()[<NUM_LIT:0>]<EOL>if rv is not None:<EOL><INDENT>return _url_unquote_legacy(rv)<EOL><DEDENT>
The username if it was part of the URL, `None` otherwise. This undergoes URL decoding and will always be a unicode string.
f5883:c0:m5
@property<EOL><INDENT>def raw_username(self):<DEDENT>
return self._split_auth()[<NUM_LIT:0>]<EOL>
The username if it was part of the URL, `None` otherwise. Unlike :attr:`username` this one is not being decoded.
f5883:c0:m6
@property<EOL><INDENT>def password(self):<DEDENT>
rv = self._split_auth()[<NUM_LIT:1>]<EOL>if rv is not None:<EOL><INDENT>return _url_unquote_legacy(rv)<EOL><DEDENT>
The password if it was part of the URL, `None` otherwise. This undergoes URL decoding and will always be a unicode string.
f5883:c0:m7
@property<EOL><INDENT>def raw_password(self):<DEDENT>
return self._split_auth()[<NUM_LIT:1>]<EOL>
The password if it was part of the URL, `None` otherwise. Unlike :attr:`password` this one is not being decoded.
f5883:c0:m8
def decode_query(self, *args, **kwargs):
return url_decode(self.query, *args, **kwargs)<EOL>
Decodes the query part of the URL. Ths is a shortcut for calling :func:`url_decode` on the query argument. The arguments and keyword arguments are forwarded to :func:`url_decode` unchanged.
f5883:c0:m9
def join(self, *args, **kwargs):
return url_parse(url_join(self, *args, **kwargs))<EOL>
Joins this URL with another one. This is just a convenience function for calling into :meth:`url_join` and then parsing the return value again.
f5883:c0:m10
def to_url(self):
return url_unparse(self)<EOL>
Returns a URL string or bytes depending on the type of the information stored. This is just a convenience function for calling :meth:`url_unparse` for this URL.
f5883:c0:m11
def decode_netloc(self):
rv = _decode_idna(self.host or '<STR_LIT>')<EOL>if '<STR_LIT::>' in rv:<EOL><INDENT>rv = '<STR_LIT>' % rv<EOL><DEDENT>port = self.port<EOL>if port is not None:<EOL><INDENT>rv = '<STR_LIT>' % (rv, port)<EOL><DEDENT>auth = '<STR_LIT::>'.join(filter(None, [<EOL>_url_unquote_legacy(self.raw_username or '<STR_LIT>', '<STR_LIT>'),<EOL>_url_unquote_legacy(self.raw_password or '<STR_LIT>', '<STR_LIT>'),<EOL>]))<EOL>if auth:<EOL><INDENT>rv = '<STR_LIT>' % (auth, rv)<EOL><DEDENT>return rv<EOL>
Decodes the netloc part into a string.
f5883:c0:m12
def to_uri_tuple(self):
return url_parse(iri_to_uri(self).encode('<STR_LIT:ascii>'))<EOL>
Returns a :class:`BytesURL` tuple that holds a URI. This will encode all the information in the URL properly to ASCII using the rules a web browser would follow. It's usually more interesting to directly call :meth:`iri_to_uri` which will return a string.
f5883:c0:m13
def to_iri_tuple(self):
return url_parse(uri_to_iri(self))<EOL>
Returns a :class:`URL` tuple that holds a IRI. This will try to decode as much information as possible in the URL without losing information similar to how a web browser does it for the URL bar. It's usually more interesting to directly call :meth:`uri_to_iri` which will return a string.
f5883:c0:m14
def encode_netloc(self):
rv = self.ascii_host or '<STR_LIT>'<EOL>if '<STR_LIT::>' in rv:<EOL><INDENT>rv = '<STR_LIT>' % rv<EOL><DEDENT>port = self.port<EOL>if port is not None:<EOL><INDENT>rv = '<STR_LIT>' % (rv, port)<EOL><DEDENT>auth = '<STR_LIT::>'.join(filter(None, [<EOL>url_quote(self.raw_username or '<STR_LIT>', '<STR_LIT:utf-8>', '<STR_LIT:strict>', '<STR_LIT>'),<EOL>url_quote(self.raw_password or '<STR_LIT>', '<STR_LIT:utf-8>', '<STR_LIT:strict>', '<STR_LIT>'),<EOL>]))<EOL>if auth:<EOL><INDENT>rv = '<STR_LIT>' % (auth, rv)<EOL><DEDENT>return rv.encode('<STR_LIT:ascii>')<EOL>
Encodes the netloc part to an ASCII safe URL as bytes.
f5883:c1:m1
def encode(self, charset='<STR_LIT:utf-8>', errors='<STR_LIT:replace>'):
return BytesURL(<EOL>self.scheme.encode('<STR_LIT:ascii>'),<EOL>self.encode_netloc(),<EOL>self.path.encode(charset, errors),<EOL>self.query.encode(charset, errors),<EOL>self.fragment.encode(charset, errors)<EOL>)<EOL>
Encodes the URL to a tuple made out of bytes. The charset is only being used for the path, query and fragment.
f5883:c1:m2
def encode_netloc(self):
return self.netloc<EOL>
Returns the netloc unchanged as bytes.
f5883:c2:m1
def decode(self, charset='<STR_LIT:utf-8>', errors='<STR_LIT:replace>'):
return URL(<EOL>self.scheme.decode('<STR_LIT:ascii>'),<EOL>self.decode_netloc(),<EOL>self.path.decode(charset, errors),<EOL>self.query.decode(charset, errors),<EOL>self.fragment.decode(charset, errors)<EOL>)<EOL>
Decodes the URL to a tuple made out of strings. The charset is only being used for the path, query and fragment.
f5883:c2:m2
def wsgi_to_bytes(data):
if isinstance(data, bytes):<EOL><INDENT>return data<EOL><DEDENT>return data.encode('<STR_LIT>')<EOL>
coerce wsgi unicode represented bytes to real ones
f5884:m0
def quote_header_value(value, extra_chars='<STR_LIT>', allow_token=True):
if isinstance(value, bytes):<EOL><INDENT>value = bytes_to_wsgi(value)<EOL><DEDENT>value = str(value)<EOL>if allow_token:<EOL><INDENT>token_chars = _token_chars | set(extra_chars)<EOL>if set(value).issubset(token_chars):<EOL><INDENT>return value<EOL><DEDENT><DEDENT>return '<STR_LIT>' % value.replace('<STR_LIT:\\>', '<STR_LIT>').replace('<STR_LIT:">', '<STR_LIT>')<EOL>
Quote a header value if necessary. .. versionadded:: 0.5 :param value: the value to quote. :param extra_chars: a list of extra characters to skip quoting. :param allow_token: if this is enabled token values are returned unchanged.
f5884:m2
def unquote_header_value(value, is_filename=False):
if value and value[<NUM_LIT:0>] == value[-<NUM_LIT:1>] == '<STR_LIT:">':<EOL><INDENT>value = value[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL>if not is_filename or value[:<NUM_LIT:2>] != '<STR_LIT>':<EOL><INDENT>return value.replace('<STR_LIT>', '<STR_LIT:\\>').replace('<STR_LIT>', '<STR_LIT:">')<EOL><DEDENT><DEDENT>return value<EOL>
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`). This does not use the real unquoting but what browsers are actually using for quoting. .. versionadded:: 0.5 :param value: the header value to unquote.
f5884:m3
def dump_options_header(header, options):
segments = []<EOL>if header is not None:<EOL><INDENT>segments.append(header)<EOL><DEDENT>for key, value in iteritems(options):<EOL><INDENT>if value is None:<EOL><INDENT>segments.append(key)<EOL><DEDENT>else:<EOL><INDENT>segments.append('<STR_LIT>' % (key, quote_header_value(value)))<EOL><DEDENT><DEDENT>return '<STR_LIT>'.join(segments)<EOL>
The reverse function to :func:`parse_options_header`. :param header: the header to dump :param options: a dict of options to append.
f5884:m4
def dump_header(iterable, allow_token=True):
if isinstance(iterable, dict):<EOL><INDENT>items = []<EOL>for key, value in iteritems(iterable):<EOL><INDENT>if value is None:<EOL><INDENT>items.append(key)<EOL><DEDENT>else:<EOL><INDENT>items.append('<STR_LIT>' % (<EOL>key,<EOL>quote_header_value(value, allow_token=allow_token)<EOL>))<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>items = [quote_header_value(x, allow_token=allow_token)<EOL>for x in iterable]<EOL><DEDENT>return '<STR_LIT:U+002CU+0020>'.join(items)<EOL>
Dump an HTTP header again. This is the reversal of :func:`parse_list_header`, :func:`parse_set_header` and :func:`parse_dict_header`. This also quotes strings that include an equals sign unless you pass it as dict of key, value pairs. >>> dump_header({'foo': 'bar baz'}) 'foo="bar baz"' >>> dump_header(('foo', 'bar baz')) 'foo, "bar baz"' :param iterable: the iterable or dict of values to quote. :param allow_token: if set to `False` tokens as values are disallowed. See :func:`quote_header_value` for more details.
f5884:m5
def parse_list_header(value):
result = []<EOL>for item in _parse_list_header(value):<EOL><INDENT>if item[:<NUM_LIT:1>] == item[-<NUM_LIT:1>:] == '<STR_LIT:">':<EOL><INDENT>item = unquote_header_value(item[<NUM_LIT:1>:-<NUM_LIT:1>])<EOL><DEDENT>result.append(item)<EOL><DEDENT>return result<EOL>
Parse lists as described by RFC 2068 Section 2. In particular, parse comma-separated lists where the elements of the list may include quoted-strings. A quoted-string could contain a comma. A non-quoted string could have quotes in the middle. Quotes are removed automatically after parsing. It basically works like :func:`parse_set_header` just that items may appear multiple times and case sensitivity is preserved. The return value is a standard :class:`list`: >>> parse_list_header('token, "quoted value"') ['token', 'quoted value'] To create a header from the :class:`list` again, use the :func:`dump_header` function. :param value: a string with a list header. :return: :class:`list`
f5884:m6
def parse_dict_header(value, cls=dict):
result = cls()<EOL>if not isinstance(value, text_type):<EOL><INDENT>value = bytes_to_wsgi(value)<EOL><DEDENT>for item in _parse_list_header(value):<EOL><INDENT>if '<STR_LIT:=>' not in item:<EOL><INDENT>result[item] = None<EOL>continue<EOL><DEDENT>name, value = item.split('<STR_LIT:=>', <NUM_LIT:1>)<EOL>if value[:<NUM_LIT:1>] == value[-<NUM_LIT:1>:] == '<STR_LIT:">':<EOL><INDENT>value = unquote_header_value(value[<NUM_LIT:1>:-<NUM_LIT:1>])<EOL><DEDENT>result[name] = value<EOL><DEDENT>return result<EOL>
Parse lists of key, value pairs as described by RFC 2068 Section 2 and convert them into a python dict (or any other mapping object created from the type with a dict like interface provided by the `cls` arugment): >>> d = parse_dict_header('foo="is a fish", bar="as well"') >>> type(d) is dict True >>> sorted(d.items()) [('bar', 'as well'), ('foo', 'is a fish')] If there is no value for a key it will be `None`: >>> parse_dict_header('key_without_value') {'key_without_value': None} To create a header from the :class:`dict` again, use the :func:`dump_header` function. .. versionchanged:: 0.9 Added support for `cls` argument. :param value: a string with a dict header. :param cls: callable to use for storage of parsed results. :return: an instance of `cls`
f5884:m7
def parse_options_header(value):
def _tokenize(string):<EOL><INDENT>for match in _option_header_piece_re.finditer(string):<EOL><INDENT>key, value = match.groups()<EOL>key = unquote_header_value(key)<EOL>if value is not None:<EOL><INDENT>value = unquote_header_value(value, key == '<STR_LIT:filename>')<EOL><DEDENT>yield key, value<EOL><DEDENT><DEDENT>if not value:<EOL><INDENT>return '<STR_LIT>', {}<EOL><DEDENT>parts = _tokenize('<STR_LIT:;>' + value)<EOL>name = next(parts)[<NUM_LIT:0>]<EOL>extra = dict(parts)<EOL>return name, extra<EOL>
Parse a ``Content-Type`` like header into a tuple with the content type and the options: >>> parse_options_header('text/html; charset=utf8') ('text/html', {'charset': 'utf8'}) This should not be used to parse ``Cache-Control`` like headers that use a slightly different format. For these headers use the :func:`parse_dict_header` function. .. versionadded:: 0.5 :param value: the header to parse. :return: (str, options)
f5884:m8
def parse_accept_header(value, cls=None):
if cls is None:<EOL><INDENT>cls = Accept<EOL><DEDENT>if not value:<EOL><INDENT>return cls(None)<EOL><DEDENT>result = []<EOL>for match in _accept_re.finditer(value):<EOL><INDENT>quality = match.group(<NUM_LIT:2>)<EOL>if not quality:<EOL><INDENT>quality = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>quality = max(min(float(quality), <NUM_LIT:1>), <NUM_LIT:0>)<EOL><DEDENT>result.append((match.group(<NUM_LIT:1>), quality))<EOL><DEDENT>return cls(result)<EOL>
Parses an HTTP Accept-* header. This does not implement a complete valid algorithm but one that supports at least value and quality extraction. Returns a new :class:`Accept` object (basically a list of ``(value, quality)`` tuples sorted by the quality with some additional accessor methods). The second parameter can be a subclass of :class:`Accept` that is created with the parsed values and returned. :param value: the accept header string to be parsed. :param cls: the wrapper class for the return value (can be :class:`Accept` or a subclass thereof) :return: an instance of `cls`.
f5884:m9
def parse_cache_control_header(value, on_update=None, cls=None):
if cls is None:<EOL><INDENT>cls = RequestCacheControl<EOL><DEDENT>if not value:<EOL><INDENT>return cls(None, on_update)<EOL><DEDENT>return cls(parse_dict_header(value), on_update)<EOL>
Parse a cache control header. The RFC differs between response and request cache control, this method does not. It's your responsibility to not use the wrong control statements. .. versionadded:: 0.5 The `cls` was added. If not specified an immutable :class:`~werkzeug.datastructures.RequestCacheControl` is returned. :param value: a cache control header to be parsed. :param on_update: an optional callable that is called every time a value on the :class:`~werkzeug.datastructures.CacheControl` object is changed. :param cls: the class for the returned object. By default :class:`~werkzeug.datastructures.RequestCacheControl` is used. :return: a `cls` object.
f5884:m10
def parse_set_header(value, on_update=None):
if not value:<EOL><INDENT>return HeaderSet(None, on_update)<EOL><DEDENT>return HeaderSet(parse_list_header(value), on_update)<EOL>
Parse a set-like header and return a :class:`~werkzeug.datastructures.HeaderSet` object: >>> hs = parse_set_header('token, "quoted value"') The return value is an object that treats the items case-insensitively and keeps the order of the items: >>> 'TOKEN' in hs True >>> hs.index('quoted value') 1 >>> hs HeaderSet(['token', 'quoted value']) To create a header from the :class:`HeaderSet` again, use the :func:`dump_header` function. :param value: a set header to be parsed. :param on_update: an optional callable that is called every time a value on the :class:`~werkzeug.datastructures.HeaderSet` object is changed. :return: a :class:`~werkzeug.datastructures.HeaderSet`
f5884:m11
def parse_authorization_header(value):
if not value:<EOL><INDENT>return<EOL><DEDENT>value = wsgi_to_bytes(value)<EOL>try:<EOL><INDENT>auth_type, auth_info = value.split(None, <NUM_LIT:1>)<EOL>auth_type = auth_type.lower()<EOL><DEDENT>except ValueError:<EOL><INDENT>return<EOL><DEDENT>if auth_type == b'<STR_LIT>':<EOL><INDENT>try:<EOL><INDENT>username, password = base64.b64decode(auth_info).split(b'<STR_LIT::>', <NUM_LIT:1>)<EOL><DEDENT>except Exception as e:<EOL><INDENT>return<EOL><DEDENT>return Authorization('<STR_LIT>', {'<STR_LIT:username>': bytes_to_wsgi(username),<EOL>'<STR_LIT:password>': bytes_to_wsgi(password)})<EOL><DEDENT>elif auth_type == b'<STR_LIT>':<EOL><INDENT>auth_map = parse_dict_header(auth_info)<EOL>for key in '<STR_LIT:username>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>':<EOL><INDENT>if not key in auth_map:<EOL><INDENT>return<EOL><DEDENT><DEDENT>if '<STR_LIT>' in auth_map:<EOL><INDENT>if not auth_map.get('<STR_LIT>') or not auth_map.get('<STR_LIT>'):<EOL><INDENT>return<EOL><DEDENT><DEDENT>return Authorization('<STR_LIT>', auth_map)<EOL><DEDENT>
Parse an HTTP basic/digest authorization header transmitted by the web browser. The return value is either `None` if the header was invalid or not given, otherwise an :class:`~werkzeug.datastructures.Authorization` object. :param value: the authorization header to parse. :return: a :class:`~werkzeug.datastructures.Authorization` object or `None`.
f5884:m12
def parse_www_authenticate_header(value, on_update=None):
if not value:<EOL><INDENT>return WWWAuthenticate(on_update=on_update)<EOL><DEDENT>try:<EOL><INDENT>auth_type, auth_info = value.split(None, <NUM_LIT:1>)<EOL>auth_type = auth_type.lower()<EOL><DEDENT>except (ValueError, AttributeError):<EOL><INDENT>return WWWAuthenticate(value.strip().lower(), on_update=on_update)<EOL><DEDENT>return WWWAuthenticate(auth_type, parse_dict_header(auth_info),<EOL>on_update)<EOL>
Parse an HTTP WWW-Authenticate header into a :class:`~werkzeug.datastructures.WWWAuthenticate` object. :param value: a WWW-Authenticate header to parse. :param on_update: an optional callable that is called every time a value on the :class:`~werkzeug.datastructures.WWWAuthenticate` object is changed. :return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object.
f5884:m13
def parse_if_range_header(value):
if not value:<EOL><INDENT>return IfRange()<EOL><DEDENT>date = parse_date(value)<EOL>if date is not None:<EOL><INDENT>return IfRange(date=date)<EOL><DEDENT>return IfRange(unquote_etag(value)[<NUM_LIT:0>])<EOL>
Parses an if-range header which can be an etag or a date. Returns a :class:`~werkzeug.datastructures.IfRange` object. .. versionadded:: 0.7
f5884:m14
def parse_range_header(value, make_inclusive=True):
if not value or '<STR_LIT:=>' not in value:<EOL><INDENT>return None<EOL><DEDENT>ranges = []<EOL>last_end = <NUM_LIT:0><EOL>units, rng = value.split('<STR_LIT:=>', <NUM_LIT:1>)<EOL>units = units.strip().lower()<EOL>for item in rng.split('<STR_LIT:U+002C>'):<EOL><INDENT>item = item.strip()<EOL>if '<STR_LIT:->' not in item:<EOL><INDENT>return None<EOL><DEDENT>if item.startswith('<STR_LIT:->'):<EOL><INDENT>if last_end < <NUM_LIT:0>:<EOL><INDENT>return None<EOL><DEDENT>begin = int(item)<EOL>end = None<EOL>last_end = -<NUM_LIT:1><EOL><DEDENT>elif '<STR_LIT:->' in item:<EOL><INDENT>begin, end = item.split('<STR_LIT:->', <NUM_LIT:1>)<EOL>begin = int(begin)<EOL>if begin < last_end or last_end < <NUM_LIT:0>:<EOL><INDENT>return None<EOL><DEDENT>if end:<EOL><INDENT>end = int(end) + <NUM_LIT:1><EOL>if begin >= end:<EOL><INDENT>return None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>end = None<EOL><DEDENT>last_end = end<EOL><DEDENT>ranges.append((begin, end))<EOL><DEDENT>return Range(units, ranges)<EOL>
Parses a range header into a :class:`~werkzeug.datastructures.Range` object. If the header is missing or malformed `None` is returned. `ranges` is a list of ``(start, stop)`` tuples where the ranges are non-inclusive. .. versionadded:: 0.7
f5884:m15
def parse_content_range_header(value, on_update=None):
if value is None:<EOL><INDENT>return None<EOL><DEDENT>try:<EOL><INDENT>units, rangedef = (value or '<STR_LIT>').strip().split(None, <NUM_LIT:1>)<EOL><DEDENT>except ValueError:<EOL><INDENT>return None<EOL><DEDENT>if '<STR_LIT:/>' not in rangedef:<EOL><INDENT>return None<EOL><DEDENT>rng, length = rangedef.split('<STR_LIT:/>', <NUM_LIT:1>)<EOL>if length == '<STR_LIT:*>':<EOL><INDENT>length = None<EOL><DEDENT>elif length.isdigit():<EOL><INDENT>length = int(length)<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>if rng == '<STR_LIT:*>':<EOL><INDENT>return ContentRange(units, None, None, length, on_update=on_update)<EOL><DEDENT>elif '<STR_LIT:->' not in rng:<EOL><INDENT>return None<EOL><DEDENT>start, stop = rng.split('<STR_LIT:->', <NUM_LIT:1>)<EOL>try:<EOL><INDENT>start = int(start)<EOL>stop = int(stop) + <NUM_LIT:1><EOL><DEDENT>except ValueError:<EOL><INDENT>return None<EOL><DEDENT>if is_byte_range_valid(start, stop, length):<EOL><INDENT>return ContentRange(units, start, stop, length, on_update=on_update)<EOL><DEDENT>
Parses a range header into a :class:`~werkzeug.datastructures.ContentRange` object or `None` if parsing is not possible. .. versionadded:: 0.7 :param value: a content range header to be parsed. :param on_update: an optional callable that is called every time a value on the :class:`~werkzeug.datastructures.ContentRange` object is changed.
f5884:m16
def quote_etag(etag, weak=False):
if '<STR_LIT:">' in etag:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>etag = '<STR_LIT>' % etag<EOL>if weak:<EOL><INDENT>etag = '<STR_LIT>' + etag<EOL><DEDENT>return etag<EOL>
Quote an etag. :param etag: the etag to quote. :param weak: set to `True` to tag it "weak".
f5884:m17
def unquote_etag(etag):
if not etag:<EOL><INDENT>return None, None<EOL><DEDENT>etag = etag.strip()<EOL>weak = False<EOL>if etag[:<NUM_LIT:2>] in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>weak = True<EOL>etag = etag[<NUM_LIT:2>:]<EOL><DEDENT>if etag[:<NUM_LIT:1>] == etag[-<NUM_LIT:1>:] == '<STR_LIT:">':<EOL><INDENT>etag = etag[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL><DEDENT>return etag, weak<EOL>
Unquote a single etag: >>> unquote_etag('w/"bar"') ('bar', True) >>> unquote_etag('"bar"') ('bar', False) :param etag: the etag identifier to unquote. :return: a ``(etag, weak)`` tuple.
f5884:m18
def parse_etags(value):
if not value:<EOL><INDENT>return ETags()<EOL><DEDENT>strong = []<EOL>weak = []<EOL>end = len(value)<EOL>pos = <NUM_LIT:0><EOL>while pos < end:<EOL><INDENT>match = _etag_re.match(value, pos)<EOL>if match is None:<EOL><INDENT>break<EOL><DEDENT>is_weak, quoted, raw = match.groups()<EOL>if raw == '<STR_LIT:*>':<EOL><INDENT>return ETags(star_tag=True)<EOL><DEDENT>elif quoted:<EOL><INDENT>raw = quoted<EOL><DEDENT>if is_weak:<EOL><INDENT>weak.append(raw)<EOL><DEDENT>else:<EOL><INDENT>strong.append(raw)<EOL><DEDENT>pos = match.end()<EOL><DEDENT>return ETags(strong, weak)<EOL>
Parse an etag header. :param value: the tag header to parse :return: an :class:`~werkzeug.datastructures.ETags` object.
f5884:m19
def generate_etag(data):
return md5(data).hexdigest()<EOL>
Generate an etag for some data.
f5884:m20
def parse_date(value):
if value:<EOL><INDENT>t = parsedate_tz(value.strip())<EOL>if t is not None:<EOL><INDENT>try:<EOL><INDENT>year = t[<NUM_LIT:0>]<EOL>if year >= <NUM_LIT:0> and year <= <NUM_LIT>:<EOL><INDENT>year += <NUM_LIT><EOL><DEDENT>elif year >= <NUM_LIT> and year <= <NUM_LIT>:<EOL><INDENT>year += <NUM_LIT><EOL><DEDENT>return datetime(*((year,) + t[<NUM_LIT:1>:<NUM_LIT:7>])) -timedelta(seconds=t[-<NUM_LIT:1>] or <NUM_LIT:0>)<EOL><DEDENT>except (ValueError, OverflowError):<EOL><INDENT>return None<EOL><DEDENT><DEDENT><DEDENT>
Parse one of the following date formats into a datetime object: .. sourcecode:: text Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123 Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036 Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format If parsing fails the return value is `None`. :param value: a string with a supported date format. :return: a :class:`datetime.datetime` object.
f5884:m21
def _dump_date(d, delim):
if d is None:<EOL><INDENT>d = gmtime()<EOL><DEDENT>elif isinstance(d, datetime):<EOL><INDENT>d = d.utctimetuple()<EOL><DEDENT>elif isinstance(d, (integer_types, float)):<EOL><INDENT>d = gmtime(d)<EOL><DEDENT>return '<STR_LIT>' % (<EOL>('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')[d.tm_wday],<EOL>d.tm_mday, delim,<EOL>('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>')[d.tm_mon - <NUM_LIT:1>],<EOL>delim, str(d.tm_year), d.tm_hour, d.tm_min, d.tm_sec<EOL>)<EOL>
Used for `http_date` and `cookie_date`.
f5884:m22
def cookie_date(expires=None):
return _dump_date(expires, '<STR_LIT:->')<EOL>
Formats the time to ensure compatibility with Netscape's cookie standard. Accepts a floating point number expressed in seconds since the epoch in, a datetime object or a timetuple. All times in UTC. The :func:`parse_date` function can be used to parse such a date. Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``. :param expires: If provided that date is used, otherwise the current.
f5884:m23
def http_date(timestamp=None):
return _dump_date(timestamp, '<STR_LIT:U+0020>')<EOL>
Formats the time to match the RFC1123 date format. Accepts a floating point number expressed in seconds since the epoch in, a datetime object or a timetuple. All times in UTC. The :func:`parse_date` function can be used to parse such a date. Outputs a string in the format ``Wdy, DD Mon YYYY HH:MM:SS GMT``. :param timestamp: If provided that date is used, otherwise the current.
f5884:m24
def is_resource_modified(environ, etag=None, data=None, last_modified=None):
if etag is None and data is not None:<EOL><INDENT>etag = generate_etag(data)<EOL><DEDENT>elif data is not None:<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if environ['<STR_LIT>'] not in ('<STR_LIT:GET>', '<STR_LIT>'):<EOL><INDENT>return False<EOL><DEDENT>unmodified = False<EOL>if isinstance(last_modified, string_types):<EOL><INDENT>last_modified = parse_date(last_modified)<EOL><DEDENT>if last_modified is not None:<EOL><INDENT>last_modified = last_modified.replace(microsecond=<NUM_LIT:0>)<EOL><DEDENT>modified_since = parse_date(environ.get('<STR_LIT>'))<EOL>if modified_since and last_modified and last_modified <= modified_since:<EOL><INDENT>unmodified = True<EOL><DEDENT>if etag:<EOL><INDENT>if_none_match = parse_etags(environ.get('<STR_LIT>'))<EOL>if if_none_match:<EOL><INDENT>unmodified = if_none_match.contains_raw(etag)<EOL><DEDENT><DEDENT>return not unmodified<EOL>
Convenience method for conditional requests. :param environ: the WSGI environment of the request to be checked. :param etag: the etag for the response for comparison. :param data: or alternatively the data of the response to automatically generate an etag using :func:`generate_etag`. :param last_modified: an optional date of the last modification. :return: `True` if the resource was modified, otherwise `False`.
f5884:m25
def remove_entity_headers(headers, allowed=('<STR_LIT>', '<STR_LIT>')):
allowed = set(x.lower() for x in allowed)<EOL>headers[:] = [(key, value) for key, value in headers if<EOL>not is_entity_header(key) or key.lower() in allowed]<EOL>
Remove all entity headers from a list or :class:`Headers` object. This operation works in-place. `Expires` and `Content-Location` headers are by default not removed. The reason for this is :rfc:`2616` section 10.3.5 which specifies some entity headers that should be sent. .. versionchanged:: 0.5 added `allowed` parameter. :param headers: a list or :class:`Headers` object. :param allowed: a list of headers that should still be allowed even though they are entity headers.
f5884:m26
def remove_hop_by_hop_headers(headers):
headers[:] = [(key, value) for key, value in headers if<EOL>not is_hop_by_hop_header(key)]<EOL>
Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or :class:`Headers` object. This operation works in-place. .. versionadded:: 0.5 :param headers: a list or :class:`Headers` object.
f5884:m27
def is_entity_header(header):
return header.lower() in _entity_headers<EOL>
Check if a header is an entity header. .. versionadded:: 0.5 :param header: the header to test. :return: `True` if it's an entity header, `False` otherwise.
f5884:m28
def is_hop_by_hop_header(header):
return header.lower() in _hop_by_hop_headers<EOL>
Check if a header is an HTTP/1.1 "Hop-by-Hop" header. .. versionadded:: 0.5 :param header: the header to test. :return: `True` if it's an entity header, `False` otherwise.
f5884:m29
def parse_cookie(header, charset='<STR_LIT:utf-8>', errors='<STR_LIT:replace>', cls=None):
if isinstance(header, dict):<EOL><INDENT>header = header.get('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>elif header is None:<EOL><INDENT>header = '<STR_LIT>'<EOL><DEDENT>if isinstance(header, text_type):<EOL><INDENT>header = header.encode('<STR_LIT>', '<STR_LIT:replace>')<EOL><DEDENT>if cls is None:<EOL><INDENT>cls = TypeConversionDict<EOL><DEDENT>def _parse_pairs():<EOL><INDENT>for key, val in _cookie_parse_impl(header):<EOL><INDENT>key = to_unicode(key, charset, errors, allow_none_charset=True)<EOL>val = to_unicode(val, charset, errors, allow_none_charset=True)<EOL>yield try_coerce_native(key), val<EOL><DEDENT><DEDENT>return cls(_parse_pairs())<EOL>
Parse a cookie. Either from a string or WSGI environ. Per default encoding errors are ignored. If you want a different behavior you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a :exc:`HTTPUnicodeError` is raised. .. versionchanged:: 0.5 This function now returns a :class:`TypeConversionDict` instead of a regular dict. The `cls` parameter was added. :param header: the header to be used to parse the cookie. Alternatively this can be a WSGI environment. :param charset: the charset for the cookie values. :param errors: the error behavior for the charset decoding. :param cls: an optional dict class to use. If this is not specified or `None` the default :class:`TypeConversionDict` is used.
f5884:m30