signature
stringlengths 8
3.44k
| body
stringlengths 0
1.41M
| docstring
stringlengths 1
122k
| id
stringlengths 5
17
|
|---|---|---|---|
def make_buffer(cls=six.BytesIO, initial_value=None, name=None):
|
buf = cls(initial_value) if initial_value else cls()<EOL>if name is not None:<EOL><INDENT>buf.name = name<EOL><DEDENT>if six.PY2:<EOL><INDENT>buf.__enter__ = lambda: buf<EOL>buf.__exit__ = lambda exc_type, exc_val, exc_tb: None<EOL><DEDENT>return buf<EOL>
|
Construct a new in-memory file object aka "buffer".
:param cls: Class of the file object. Meaningful values are BytesIO and StringIO.
:param initial_value: Passed directly to the constructor, this is the content of the returned buffer.
:param name: Associated file path. Not assigned if is None (default).
:return: Instance of `cls`.
|
f8209:m0
|
def close(self):
|
logger.debug("<STR_LIT>")<EOL>self._sub.terminate()<EOL>self._sub = None<EOL>
|
Flush and close this stream.
|
f8210:c0:m1
|
def readable(self):
|
return self._sub is not None<EOL>
|
Return True if the stream can be read from.
|
f8210:c0:m2
|
def seekable(self):
|
return False<EOL>
|
If False, seek(), tell() and truncate() will raise IOError.
|
f8210:c0:m3
|
def detach(self):
|
raise io.UnsupportedOperation<EOL>
|
Unsupported.
|
f8210:c0:m4
|
def read(self, size=-<NUM_LIT:1>):
|
return self._sub.stdout.read(size)<EOL>
|
Read up to size bytes from the object and return them.
|
f8210:c0:m5
|
def read1(self, size=-<NUM_LIT:1>):
|
return self.read(size=size)<EOL>
|
This is the same as read().
|
f8210:c0:m6
|
def readinto(self, b):
|
data = self.read(len(b))<EOL>if not data:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>b[:len(data)] = data<EOL>return len(data)<EOL>
|
Read up to len(b) bytes into b, and return the number of bytes
read.
|
f8210:c0:m7
|
def writeable(self):
|
return self._sub is not None<EOL>
|
Return True if this object is writeable.
|
f8210:c1:m3
|
def seekable(self):
|
return False<EOL>
|
If False, seek(), tell() and truncate() will raise IOError.
|
f8210:c1:m4
|
def open(uri, mode, min_part_size=WEBHDFS_MIN_PART_SIZE):
|
if mode == '<STR_LIT:rb>':<EOL><INDENT>return BufferedInputBase(uri)<EOL><DEDENT>elif mode == '<STR_LIT:wb>':<EOL><INDENT>return BufferedOutputBase(uri, min_part_size=min_part_size)<EOL><DEDENT>else:<EOL><INDENT>raise NotImplementedError('<STR_LIT>' % mode)<EOL><DEDENT>
|
Parameters
----------
min_part_size: int, optional
For writing only.
|
f8211:m0
|
def close(self):
|
logger.debug("<STR_LIT>")<EOL>
|
Flush and close this stream.
|
f8211:c0:m1
|
def readable(self):
|
return True<EOL>
|
Return True if the stream can be read from.
|
f8211:c0:m2
|
def seekable(self):
|
return False<EOL>
|
If False, seek(), tell() and truncate() will raise IOError.
We offer only seek support, and no truncate support.
|
f8211:c0:m3
|
def detach(self):
|
raise io.UnsupportedOperation<EOL>
|
Unsupported.
|
f8211:c0:m4
|
def read1(self, size=-<NUM_LIT:1>):
|
return self.read(size=size)<EOL>
|
This is the same as read().
|
f8211:c0:m6
|
def readinto(self, b):
|
data = self.read(len(b))<EOL>if not data:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>b[:len(data)] = data<EOL>return len(data)<EOL>
|
Read up to len(b) bytes into b, and return the number of bytes
read.
|
f8211:c0:m7
|
def __init__(self, uri_path, min_part_size=WEBHDFS_MIN_PART_SIZE):
|
self.uri_path = uri_path<EOL>self._closed = False<EOL>self.min_part_size = min_part_size<EOL>payload = {"<STR_LIT>": "<STR_LIT>", "<STR_LIT>": True}<EOL>init_response = requests.put("<STR_LIT>" + self.uri_path,<EOL>params=payload, allow_redirects=False)<EOL>if not init_response.status_code == httplib.TEMPORARY_REDIRECT:<EOL><INDENT>raise WebHdfsException(str(init_response.status_code) + "<STR_LIT:\n>" + init_response.content)<EOL><DEDENT>uri = init_response.headers['<STR_LIT:location>']<EOL>response = requests.put(uri, data="<STR_LIT>", headers={'<STR_LIT>': '<STR_LIT>'})<EOL>if not response.status_code == httplib.CREATED:<EOL><INDENT>raise WebHdfsException(str(response.status_code) + "<STR_LIT:\n>" + response.content)<EOL><DEDENT>self.lines = []<EOL>self.parts = <NUM_LIT:0><EOL>self.chunk_bytes = <NUM_LIT:0><EOL>self.total_size = <NUM_LIT:0><EOL>self.raw = None<EOL>
|
Parameters
----------
min_part_size: int, optional
For writing only.
|
f8211:c1:m0
|
def writable(self):
|
return True<EOL>
|
Return True if the stream supports writing.
|
f8211:c1:m1
|
def write(self, b):
|
if self._closed:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if not isinstance(b, six.binary_type):<EOL><INDENT>raise TypeError("<STR_LIT>")<EOL><DEDENT>self.lines.append(b)<EOL>self.chunk_bytes += len(b)<EOL>self.total_size += len(b)<EOL>if self.chunk_bytes >= self.min_part_size:<EOL><INDENT>buff = b"<STR_LIT>".join(self.lines)<EOL>logger.info(<EOL>"<STR_LIT>",<EOL>self.parts, len(buff), self.total_size / <NUM_LIT> ** <NUM_LIT:3><EOL>)<EOL>self._upload(buff)<EOL>logger.debug("<STR_LIT>", self.parts)<EOL>self.parts += <NUM_LIT:1><EOL>self.lines, self.chunk_bytes = [], <NUM_LIT:0><EOL><DEDENT>
|
Write the given bytes (binary string) into the WebHDFS file from constructor.
|
f8211:c1:m4
|
def open(<EOL>bucket_id,<EOL>key_id,<EOL>mode,<EOL>buffer_size=DEFAULT_BUFFER_SIZE,<EOL>min_part_size=DEFAULT_MIN_PART_SIZE,<EOL>session=None,<EOL>resource_kwargs=None,<EOL>multipart_upload_kwargs=None,<EOL>):
|
logger.debug('<STR_LIT>', locals())<EOL>if mode not in MODES:<EOL><INDENT>raise NotImplementedError('<STR_LIT>' % (mode, MODES))<EOL><DEDENT>if resource_kwargs is None:<EOL><INDENT>resource_kwargs = {}<EOL><DEDENT>if multipart_upload_kwargs is None:<EOL><INDENT>multipart_upload_kwargs = {}<EOL><DEDENT>if mode == READ_BINARY:<EOL><INDENT>fileobj = SeekableBufferedInputBase(<EOL>bucket_id,<EOL>key_id,<EOL>buffer_size=buffer_size,<EOL>session=session,<EOL>resource_kwargs=resource_kwargs,<EOL>)<EOL><DEDENT>elif mode == WRITE_BINARY:<EOL><INDENT>fileobj = BufferedOutputBase(<EOL>bucket_id,<EOL>key_id,<EOL>min_part_size=min_part_size,<EOL>session=session,<EOL>multipart_upload_kwargs=multipart_upload_kwargs,<EOL>resource_kwargs=resource_kwargs,<EOL>)<EOL><DEDENT>else:<EOL><INDENT>assert False, '<STR_LIT>' % mode<EOL><DEDENT>return fileobj<EOL>
|
Open an S3 object for reading or writing.
Parameters
----------
bucket_id: str
The name of the bucket this object resides in.
key_id: str
The name of the key within the bucket.
mode: str
The mode for opening the object. Must be either "rb" or "wb".
buffer_size: int, optional
The buffer size to use when performing I/O.
min_part_size: int, optional
The minimum part size for multipart uploads. For writing only.
session: object, optional
The S3 session to use when working with boto3.
resource_kwargs: dict, optional
Keyword arguments to use when accessing the S3 resource for reading or writing.
multipart_upload_kwargs: dict, optional
Additional parameters to pass to boto3's initiate_multipart_upload function.
For writing only.
|
f8213:m2
|
def iter_bucket(bucket_name, prefix='<STR_LIT>', accept_key=None,<EOL>key_limit=None, workers=<NUM_LIT:16>, retries=<NUM_LIT:3>):
|
if accept_key is None:<EOL><INDENT>accept_key = lambda key: True<EOL><DEDENT>try:<EOL><INDENT>bucket_name = bucket_name.name<EOL><DEDENT>except AttributeError:<EOL><INDENT>pass<EOL><DEDENT>total_size, key_no = <NUM_LIT:0>, -<NUM_LIT:1><EOL>key_iterator = _list_bucket(bucket_name, prefix=prefix, accept_key=accept_key)<EOL>download_key = functools.partial(_download_key, bucket_name=bucket_name, retries=retries)<EOL>with _create_process_pool(processes=workers) as pool:<EOL><INDENT>result_iterator = pool.imap_unordered(download_key, key_iterator)<EOL>for key_no, (key, content) in enumerate(result_iterator):<EOL><INDENT>if True or key_no % <NUM_LIT:1000> == <NUM_LIT:0>:<EOL><INDENT>logger.info(<EOL>"<STR_LIT>",<EOL>key_no, key, len(content), total_size / <NUM_LIT> ** <NUM_LIT:2><EOL>)<EOL><DEDENT>yield key, content<EOL>total_size += len(content)<EOL>if key_limit is not None and key_no + <NUM_LIT:1> >= key_limit:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>logger.info("<STR_LIT>" % (key_no + <NUM_LIT:1>, total_size))<EOL>
|
Iterate and download all S3 objects under `s3://bucket_name/prefix`.
Parameters
----------
bucket_name: str
The name of the bucket.
prefix: str, optional
Limits the iteration to keys starting wit the prefix.
accept_key: callable, optional
This is a function that accepts a key name (unicode string) and
returns True/False, signalling whether the given key should be downloaded.
The default behavior is to accept all keys.
key_limit: int, optional
If specified, the iterator will stop after yielding this many results.
workers: int, optional
The number of subprocesses to use.
retries: int, optional
The number of time to retry a failed download.
Yields
------
str
The full key name (does not include the bucket name).
bytes
The full contents of the key.
Notes
-----
The keys are processed in parallel, using `workers` processes (default: 16),
to speed up downloads greatly. If multiprocessing is not available, thus
_MULTIPROCESSING is False, this parameter will be ignored.
Examples
--------
>>> # get all JSON files under "mybucket/foo/"
>>> for key, content in iter_bucket(bucket_name, prefix='foo/', accept_key=lambda key: key.endswith('.json')):
... print key, len(content)
>>> # limit to 10k files, using 32 parallel workers (default is 16)
>>> for key, content in iter_bucket(bucket_name, key_limit=10000, workers=32):
... print key, len(content)
|
f8213:m3
|
def seek(self, position):
|
self._position = position<EOL>range_string = make_range_string(self._position)<EOL>logger.debug('<STR_LIT>', self._content_length, range_string)<EOL>try:<EOL><INDENT>self._body.close()<EOL><DEDENT>except AttributeError:<EOL><INDENT>pass<EOL><DEDENT>if position == self._content_length == <NUM_LIT:0> or position == self._content_length:<EOL><INDENT>self._body = io.BytesIO()<EOL><DEDENT>else:<EOL><INDENT>self._body = self._object.get(Range=range_string)['<STR_LIT>']<EOL><DEDENT>
|
Seek to the specified position (byte offset) in the S3 key.
:param int position: The byte offset from the beginning of the key.
|
f8213:c1:m1
|
def close(self):
|
logger.debug("<STR_LIT>")<EOL>self._object = None<EOL>
|
Flush and close this stream.
|
f8213:c2:m1
|
def readable(self):
|
return True<EOL>
|
Return True if the stream can be read from.
|
f8213:c2:m2
|
def detach(self):
|
raise io.UnsupportedOperation<EOL>
|
Unsupported.
|
f8213:c2:m4
|
def read(self, size=-<NUM_LIT:1>):
|
if size == <NUM_LIT:0>:<EOL><INDENT>return b'<STR_LIT>'<EOL><DEDENT>elif size < <NUM_LIT:0>:<EOL><INDENT>from_buf = self._read_from_buffer()<EOL>self._current_pos = self._content_length<EOL>return from_buf + self._raw_reader.read()<EOL><DEDENT>if len(self._buffer) >= size:<EOL><INDENT>return self._read_from_buffer(size)<EOL><DEDENT>if self._eof:<EOL><INDENT>return self._read_from_buffer()<EOL><DEDENT>self._fill_buffer(size)<EOL>return self._read_from_buffer(size)<EOL>
|
Read up to size bytes from the object and return them.
|
f8213:c2:m5
|
def read1(self, size=-<NUM_LIT:1>):
|
return self.read(size=size)<EOL>
|
This is the same as read().
|
f8213:c2:m6
|
def readinto(self, b):
|
data = self.read(len(b))<EOL>if not data:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>b[:len(data)] = data<EOL>return len(data)<EOL>
|
Read up to len(b) bytes into b, and return the number of bytes
read.
|
f8213:c2:m7
|
def readline(self, limit=-<NUM_LIT:1>):
|
if limit != -<NUM_LIT:1>:<EOL><INDENT>raise NotImplementedError('<STR_LIT>')<EOL><DEDENT>the_line = io.BytesIO()<EOL>while not (self._eof and len(self._buffer) == <NUM_LIT:0>):<EOL><INDENT>remaining_buffer = self._buffer.peek()<EOL>if self._line_terminator in remaining_buffer:<EOL><INDENT>next_newline = remaining_buffer.index(self._line_terminator)<EOL>the_line.write(self._read_from_buffer(next_newline + <NUM_LIT:1>))<EOL>break<EOL><DEDENT>else:<EOL><INDENT>the_line.write(self._read_from_buffer())<EOL>self._fill_buffer()<EOL><DEDENT><DEDENT>return the_line.getvalue()<EOL>
|
Read up to and including the next newline. Returns the bytes read.
|
f8213:c2:m8
|
def terminate(self):
|
pass<EOL>
|
Do nothing.
|
f8213:c2:m9
|
def _read_from_buffer(self, size=-<NUM_LIT:1>):
|
<EOL>size = size if size >= <NUM_LIT:0> else len(self._buffer)<EOL>part = self._buffer.read(size)<EOL>self._current_pos += len(part)<EOL>return part<EOL>
|
Remove at most size bytes from our buffer and return them.
|
f8213:c2:m10
|
def seekable(self):
|
return True<EOL>
|
If False, seek(), tell() and truncate() will raise IOError.
We offer only seek support, and no truncate support.
|
f8213:c3:m1
|
def seek(self, offset, whence=START):
|
logger.debug('<STR_LIT>', offset, whence)<EOL>if whence not in WHENCE_CHOICES:<EOL><INDENT>raise ValueError('<STR_LIT>' % WHENCE_CHOICES)<EOL><DEDENT>if whence == START:<EOL><INDENT>new_position = offset<EOL><DEDENT>elif whence == CURRENT:<EOL><INDENT>new_position = self._current_pos + offset<EOL><DEDENT>else:<EOL><INDENT>new_position = self._content_length + offset<EOL><DEDENT>new_position = clamp(new_position, <NUM_LIT:0>, self._content_length)<EOL>self._current_pos = new_position<EOL>self._raw_reader.seek(new_position)<EOL>logger.debug('<STR_LIT>', self._current_pos)<EOL>self._buffer.empty()<EOL>self._eof = self._current_pos == self._content_length<EOL>return self._current_pos<EOL>
|
Seek to the specified position.
:param int offset: The offset in bytes.
:param int whence: Where the offset is from.
Returns the position after seeking.
|
f8213:c3:m2
|
def tell(self):
|
return self._current_pos<EOL>
|
Return the current position within the file.
|
f8213:c3:m3
|
def truncate(self, size=None):
|
raise io.UnsupportedOperation<EOL>
|
Unsupported.
|
f8213:c3:m4
|
def writable(self):
|
return True<EOL>
|
Return True if the stream supports writing.
|
f8213:c4:m4
|
def tell(self):
|
return self._total_bytes<EOL>
|
Return the current stream position.
|
f8213:c4:m5
|
def write(self, b):
|
if not isinstance(b, _BINARY_TYPES):<EOL><INDENT>raise TypeError(<EOL>"<STR_LIT>" % (_BINARY_TYPES, type(b)))<EOL><DEDENT>self._buf.write(b)<EOL>self._total_bytes += len(b)<EOL>if self._buf.tell() >= self._min_part_size:<EOL><INDENT>self._upload_next_part()<EOL><DEDENT>return len(b)<EOL>
|
Write the given bytes (binary string) to the S3 file.
There's buffering happening under the covers, so this may not actually
do any HTTP transfer right away.
|
f8213:c4:m7
|
def terminate(self):
|
assert self._mp, "<STR_LIT>"<EOL>self._mp.abort()<EOL>self._mp = None<EOL>
|
Cancel the underlying multipart upload.
|
f8213:c4:m8
|
def open(uri, mode, kerberos=False, user=None, password=None):
|
if mode == '<STR_LIT:rb>':<EOL><INDENT>return BufferedInputBase(uri, mode, kerberos=kerberos, user=user, password=password)<EOL><DEDENT>else:<EOL><INDENT>raise NotImplementedError('<STR_LIT>' % mode)<EOL><DEDENT>
|
Implement streamed reader from a web site.
Supports Kerberos and Basic HTTP authentication.
Parameters
----------
url: str
The URL to open.
mode: str
The mode to open using.
kerberos: boolean, optional
If True, will attempt to use the local Kerberos credentials
user: str, optional
The username for authenticating over HTTP
password: str, optional
The password for authenticating over HTTP
Note
----
If neither kerberos or (user, password) are set, will connect unauthenticated.
|
f8214:m0
|
def close(self):
|
logger.debug("<STR_LIT>")<EOL>self.response = None<EOL>self._read_iter = None<EOL>
|
Flush and close this stream.
|
f8214:c0:m1
|
def readable(self):
|
return True<EOL>
|
Return True if the stream can be read from.
|
f8214:c0:m2
|
def detach(self):
|
raise io.UnsupportedOperation<EOL>
|
Unsupported.
|
f8214:c0:m4
|
def read(self, size=-<NUM_LIT:1>):
|
logger.debug("<STR_LIT>", size)<EOL>if self.response is None:<EOL><INDENT>return b'<STR_LIT>'<EOL><DEDENT>if size == <NUM_LIT:0>:<EOL><INDENT>return b'<STR_LIT>'<EOL><DEDENT>elif size < <NUM_LIT:0> and len(self._read_buffer) == <NUM_LIT:0>:<EOL><INDENT>retval = self.response.raw.read()<EOL><DEDENT>elif size < <NUM_LIT:0>:<EOL><INDENT>retval = self._read_buffer.read() + self.response.raw.read()<EOL><DEDENT>else:<EOL><INDENT>while len(self._read_buffer) < size:<EOL><INDENT>logger.debug("<STR_LIT>", self._current_pos, size)<EOL>bytes_read = self._read_buffer.fill(self._read_iter)<EOL>if bytes_read == <NUM_LIT:0>:<EOL><INDENT>retval = self._read_buffer.read()<EOL>self._current_pos += len(retval)<EOL>return retval<EOL><DEDENT><DEDENT>retval = self._read_buffer.read(size)<EOL><DEDENT>self._current_pos += len(retval)<EOL>return retval<EOL>
|
Mimics the read call to a filehandle object.
|
f8214:c0:m5
|
def read1(self, size=-<NUM_LIT:1>):
|
return self.read(size=size)<EOL>
|
This is the same as read().
|
f8214:c0:m6
|
def readinto(self, b):
|
data = self.read(len(b))<EOL>if not data:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>b[:len(data)] = data<EOL>return len(data)<EOL>
|
Read up to len(b) bytes into b, and return the number of bytes
read.
|
f8214:c0:m7
|
def __init__(self, url, mode='<STR_LIT:r>', buffer_size=DEFAULT_BUFFER_SIZE,<EOL>kerberos=False, user=None, password=None):
|
self.url = url<EOL>if kerberos:<EOL><INDENT>import requests_kerberos<EOL>self.auth = requests_kerberos.HTTPKerberosAuth()<EOL><DEDENT>elif user is not None and password is not None:<EOL><INDENT>self.auth = (user, password)<EOL><DEDENT>else:<EOL><INDENT>self.auth = None<EOL><DEDENT>self.buffer_size = buffer_size<EOL>self.mode = mode<EOL>self.response = self._partial_request()<EOL>if not self.response.ok:<EOL><INDENT>self.response.raise_for_status()<EOL><DEDENT>logger.debug('<STR_LIT>', self.response, self.response.raw)<EOL>self._seekable = True<EOL>self.content_length = int(self.response.headers.get("<STR_LIT>", -<NUM_LIT:1>))<EOL>if self.content_length < <NUM_LIT:0>:<EOL><INDENT>self._seekable = False<EOL><DEDENT>if self.response.headers.get("<STR_LIT>", "<STR_LIT:none>").lower() != "<STR_LIT>":<EOL><INDENT>self._seekable = False<EOL><DEDENT>self._read_iter = self.response.iter_content(self.buffer_size)<EOL>self._read_buffer = bytebuffer.ByteBuffer(buffer_size)<EOL>self._current_pos = <NUM_LIT:0><EOL>self.raw = None<EOL>
|
If Kerberos is True, will attempt to use the local Kerberos credentials.
Otherwise, will try to use "basic" HTTP authentication via username/password.
If none of those are set, will connect unauthenticated.
|
f8214:c1:m0
|
def seek(self, offset, whence=<NUM_LIT:0>):
|
logger.debug('<STR_LIT>', offset, whence)<EOL>if whence not in s3.WHENCE_CHOICES:<EOL><INDENT>raise ValueError('<STR_LIT>' % s3.WHENCE_CHOICES)<EOL><DEDENT>if not self.seekable():<EOL><INDENT>raise OSError<EOL><DEDENT>if whence == s3.START:<EOL><INDENT>new_pos = offset<EOL><DEDENT>elif whence == s3.CURRENT:<EOL><INDENT>new_pos = self._current_pos + offset<EOL><DEDENT>elif whence == s3.END:<EOL><INDENT>new_pos = self.content_length + offset<EOL><DEDENT>new_pos = s3.clamp(new_pos, <NUM_LIT:0>, self.content_length)<EOL>if self._current_pos == new_pos:<EOL><INDENT>return self._current_pos<EOL><DEDENT>logger.debug("<STR_LIT>", self._current_pos, new_pos)<EOL>self._current_pos = new_pos<EOL>if new_pos == self.content_length:<EOL><INDENT>self.response = None<EOL>self._read_iter = None<EOL>self._read_buffer.empty()<EOL><DEDENT>else:<EOL><INDENT>response = self._partial_request(new_pos)<EOL>if response.ok:<EOL><INDENT>self.response = response<EOL>self._read_iter = self.response.iter_content(self.buffer_size)<EOL>self._read_buffer.empty()<EOL><DEDENT>else:<EOL><INDENT>self.response = None<EOL><DEDENT><DEDENT>return self._current_pos<EOL>
|
Seek to the specified position.
:param int offset: The offset in bytes.
:param int whence: Where the offset is from.
Returns the position after seeking.
|
f8214:c1:m1
|
def truncate(self, size=None):
|
raise io.UnsupportedOperation<EOL>
|
Unsupported.
|
f8214:c1:m4
|
def __init__(self, chunk_size=io.DEFAULT_BUFFER_SIZE):
|
self._chunk_size = chunk_size<EOL>self.empty()<EOL>
|
Create a ByteBuffer instance that reads chunk_size bytes when filled.
Note that the buffer has no maximum size.
Parameters
-----------
chunk_size: int, optional
The the number of bytes that will be read from the supplied reader
or iterable when filling the buffer.
|
f8215:c0:m0
|
def __len__(self):
|
return len(self._bytes) - self._pos<EOL>
|
Return the number of unread bytes in the buffer as an int
|
f8215:c0:m1
|
def read(self, size=-<NUM_LIT:1>):
|
part = self.peek(size)<EOL>self._pos += len(part)<EOL>return part<EOL>
|
Read bytes from the buffer and advance the read position. Returns
the bytes in a bytestring.
Parameters
----------
size: int, optional
Maximum number of bytes to read. If negative or not supplied, read
all unread bytes in the buffer.
Returns
-------
bytes
|
f8215:c0:m2
|
def peek(self, size=-<NUM_LIT:1>):
|
if size < <NUM_LIT:0> or size > len(self):<EOL><INDENT>size = len(self)<EOL><DEDENT>part = self._bytes[self._pos:self._pos+size]<EOL>return part<EOL>
|
Get bytes from the buffer without advancing the read position.
Returns the bytes in a bytestring.
Parameters
----------
size: int, optional
Maximum number of bytes to return. If negative or not supplied,
return all unread bytes in the buffer.
Returns
-------
bytes
|
f8215:c0:m3
|
def empty(self):
|
self._bytes = b'<STR_LIT>'<EOL>self._pos = <NUM_LIT:0><EOL>
|
Remove all bytes from the buffer
|
f8215:c0:m4
|
def fill(self, source, size=-<NUM_LIT:1>):
|
size = size if size >= <NUM_LIT:0> else self._chunk_size<EOL>size = min(size, self._chunk_size)<EOL>if self._pos != <NUM_LIT:0>:<EOL><INDENT>self._bytes = self._bytes[self._pos:]<EOL>self._pos = <NUM_LIT:0><EOL><DEDENT>if hasattr(source, '<STR_LIT>'):<EOL><INDENT>new_bytes = source.read(size)<EOL><DEDENT>else:<EOL><INDENT>new_bytes = b'<STR_LIT>'<EOL>for more_bytes in source:<EOL><INDENT>new_bytes += more_bytes<EOL>if len(new_bytes) >= size:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>self._bytes += new_bytes<EOL>return len(new_bytes)<EOL>
|
Fill the buffer with bytes from source until one of these
conditions is met:
* size bytes have been read from source (if size >= 0);
* chunk_size bytes have been read from source;
* no more bytes can be read from source;
Returns the number of new bytes added to the buffer.
Note: all previously-read bytes in the buffer are removed.
Parameters
----------
source: a file-like object, or iterable/list that contains bytes
The source of bytes to fill the buffer with. If this argument has
the `read` attribute, it's assumed to be a file-like object and
`read` is called to get the bytes; otherwise it's assumed to be an
iterable or list that contains bytes, and a for loop is used to get
the bytes.
size: int, optional
The number of bytes to try to read from source. If not supplied,
negative, or larger than the buffer's chunk_size, then chunk_size
bytes are read. Note that if source is an iterable or list, then
it's possible that more than size bytes will be read if iterating
over source produces more than one byte at a time.
Returns
-------
int, the number of new bytes added to the buffer.
|
f8215:c0:m5
|
def open(path, mode='<STR_LIT:r>', host=None, user=None, port=DEFAULT_PORT):
|
if not host:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if not user:<EOL><INDENT>user = getpass.getuser()<EOL><DEDENT>conn = _connect(host, user, port)<EOL>sftp_client = conn.get_transport().open_sftp_client()<EOL>return sftp_client.open(path, mode)<EOL>
|
Open a file on a remote machine over SSH.
Expects authentication to be already set up via existing keys on the local machine.
Parameters
----------
path: str
The path to the file to open on the remote machine.
mode: str, optional
The mode to use for opening the file.
host: str, optional
The hostname of the remote machine. May not be None.
user: str, optional
The username to use to login to the remote machine.
If None, defaults to the name of the current user.
port: int, optional
The port to connect to.
Returns
-------
A file-like object.
Important
---------
If you specify a previously unseen host, then its host key will be added to
the local ~/.ssh/known_hosts *automatically*.
|
f8216:m1
|
def read_boto3():
|
session = get_minio_session()<EOL>s3 = session.resource('<STR_LIT>', endpoint_url=ENDPOINT_URL)<EOL>obj = s3.Object('<STR_LIT>', '<STR_LIT>')<EOL>data = obj.get()['<STR_LIT>'].read()<EOL>logging.info('<STR_LIT>', len(data))<EOL>return data<EOL>
|
Read directly using boto3.
|
f8219:m0
|
def verify_directory(dir):
|
tries = <NUM_LIT:0><EOL>while not os.path.exists(dir):<EOL><INDENT>try:<EOL><INDENT>tries += <NUM_LIT:1><EOL>os.makedirs(dir, compat.octal("<STR_LIT>"))<EOL><DEDENT>except:<EOL><INDENT>if tries > <NUM_LIT:5>:<EOL><INDENT>raise<EOL><DEDENT><DEDENT><DEDENT>
|
create and/or verify a filesystem directory.
|
f8228:m1
|
def parse_encoding(fp):
|
pos = fp.tell()<EOL>fp.seek(<NUM_LIT:0>)<EOL>try:<EOL><INDENT>line1 = fp.readline()<EOL>has_bom = line1.startswith(codecs.BOM_UTF8)<EOL>if has_bom:<EOL><INDENT>line1 = line1[len(codecs.BOM_UTF8):]<EOL><DEDENT>m = _PYTHON_MAGIC_COMMENT_re.match(line1.decode('<STR_LIT:ascii>', '<STR_LIT:ignore>'))<EOL>if not m:<EOL><INDENT>try:<EOL><INDENT>import parser<EOL>parser.suite(line1.decode('<STR_LIT:ascii>', '<STR_LIT:ignore>'))<EOL><DEDENT>except (ImportError, SyntaxError):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>line2 = fp.readline()<EOL>m = _PYTHON_MAGIC_COMMENT_re.match(<EOL>line2.decode('<STR_LIT:ascii>', '<STR_LIT:ignore>'))<EOL><DEDENT><DEDENT>if has_bom:<EOL><INDENT>if m:<EOL><INDENT>raise SyntaxError("<STR_LIT>""<STR_LIT>")<EOL><DEDENT>return '<STR_LIT>'<EOL><DEDENT>elif m:<EOL><INDENT>return m.group(<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>fp.seek(pos)<EOL><DEDENT>
|
Deduce the encoding of a Python source file (binary mode) from magic
comment.
It does this in the same way as the `Python interpreter`__
.. __: http://docs.python.org/ref/encodings.html
The ``fp`` argument should be a seekable file object in binary mode.
|
f8228:m3
|
def sorted_dict_repr(d):
|
keys = list(d.keys())<EOL>keys.sort()<EOL>return "<STR_LIT:{>" + "<STR_LIT:U+002CU+0020>".join(["<STR_LIT>" % (k, d[k]) for k in keys]) + "<STR_LIT:}>"<EOL>
|
repr() a dictionary with the keys in order.
Used by the lexer unit test to compare parse trees based on strings.
|
f8228:m4
|
def restore__ast(_ast):
|
if hasattr(_ast, '<STR_LIT>'):<EOL><INDENT>return<EOL><DEDENT>_ast.PyCF_ONLY_AST = <NUM_LIT:2> << <NUM_LIT:9><EOL>m = compile("""<STR_LIT>""", '<STR_LIT>', '<STR_LIT>', _ast.PyCF_ONLY_AST)<EOL>_ast.Module = type(m)<EOL>for cls in _ast.Module.__mro__:<EOL><INDENT>if cls.__name__ == '<STR_LIT>':<EOL><INDENT>_ast.mod = cls<EOL><DEDENT>elif cls.__name__ == '<STR_LIT>':<EOL><INDENT>_ast.AST = cls<EOL><DEDENT><DEDENT>_ast.FunctionDef = type(m.body[<NUM_LIT:0>])<EOL>_ast.ClassDef = type(m.body[<NUM_LIT:1>])<EOL>_ast.If = type(m.body[<NUM_LIT:2>])<EOL>_ast.Name = type(m.body[<NUM_LIT:3>].targets[<NUM_LIT:0>])<EOL>_ast.Store = type(m.body[<NUM_LIT:3>].targets[<NUM_LIT:0>].ctx)<EOL>_ast.Str = type(m.body[<NUM_LIT:3>].value)<EOL>_ast.Sub = type(m.body[<NUM_LIT:4>].value.op)<EOL>_ast.Add = type(m.body[<NUM_LIT:4>].value.left.op)<EOL>_ast.Div = type(m.body[<NUM_LIT:4>].value.right.op)<EOL>_ast.Mult = type(m.body[<NUM_LIT:4>].value.right.left.op)<EOL>_ast.RShift = type(m.body[<NUM_LIT:5>].value.op)<EOL>_ast.LShift = type(m.body[<NUM_LIT:5>].value.left.op)<EOL>_ast.Mod = type(m.body[<NUM_LIT:5>].value.left.left.op)<EOL>_ast.FloorDiv = type(m.body[<NUM_LIT:5>].value.left.left.left.op)<EOL>_ast.BitOr = type(m.body[<NUM_LIT:6>].value.op)<EOL>_ast.BitXor = type(m.body[<NUM_LIT:6>].value.left.op)<EOL>_ast.BitAnd = type(m.body[<NUM_LIT:6>].value.left.left.op)<EOL>_ast.Or = type(m.body[<NUM_LIT:7>].value.op)<EOL>_ast.And = type(m.body[<NUM_LIT:7>].value.values[<NUM_LIT:0>].op)<EOL>_ast.Invert = type(m.body[<NUM_LIT:8>].value.right.op)<EOL>_ast.Not = type(m.body[<NUM_LIT:8>].value.left.right.op)<EOL>_ast.UAdd = type(m.body[<NUM_LIT:8>].value.left.right.operand.op)<EOL>_ast.USub = type(m.body[<NUM_LIT:8>].value.left.left.op)<EOL>_ast.Or = type(m.body[<NUM_LIT:9>].value.op)<EOL>_ast.And = type(m.body[<NUM_LIT:9>].value.values[<NUM_LIT:0>].op)<EOL>_ast.IsNot = type(m.body[<NUM_LIT:10>].value.ops[<NUM_LIT:0>])<EOL>_ast.NotEq = type(m.body[<NUM_LIT:10>].value.ops[<NUM_LIT:1>])<EOL>_ast.Is = type(m.body[<NUM_LIT:10>].value.left.ops[<NUM_LIT:0>])<EOL>_ast.Eq = type(m.body[<NUM_LIT:10>].value.left.ops[<NUM_LIT:1>])<EOL>_ast.Gt = type(m.body[<NUM_LIT:11>].value.ops[<NUM_LIT:0>])<EOL>_ast.Lt = type(m.body[<NUM_LIT:11>].value.ops[<NUM_LIT:1>])<EOL>_ast.GtE = type(m.body[<NUM_LIT:11>].value.ops[<NUM_LIT:2>])<EOL>_ast.LtE = type(m.body[<NUM_LIT:11>].value.ops[<NUM_LIT:3>])<EOL>_ast.In = type(m.body[<NUM_LIT:12>].value.ops[<NUM_LIT:0>])<EOL>_ast.NotIn = type(m.body[<NUM_LIT:12>].value.ops[<NUM_LIT:1>])<EOL>
|
Attempt to restore the required classes to the _ast module if it
appears to be missing them
|
f8228:m5
|
def union(self, other):
|
x = SetLikeDict(**self)<EOL>x.update(other)<EOL>return x<EOL>
|
produce a 'union' of this dict and another (at the key level).
values in the second dict take precedence over that of the first
|
f8228:c3:m0
|
def get_or_create(self, key, creation_function, **kw):
|
return self._ctx_get_or_create(key, creation_function, None, **kw)<EOL>
|
Retrieve a value from the cache, using the given creation function
to generate a new value.
|
f8229:c0:m2
|
def _ctx_get_or_create(self, key, creation_function, context, **kw):
|
if not self.template.cache_enabled:<EOL><INDENT>return creation_function()<EOL><DEDENT>return self.impl.get_or_create(<EOL>key,<EOL>creation_function,<EOL>**self._get_cache_kw(kw, context))<EOL>
|
Retrieve a value from the cache, using the given creation function
to generate a new value.
|
f8229:c0:m3
|
def set(self, key, value, **kw):
|
self.impl.set(key, value, **self._get_cache_kw(kw, None))<EOL>
|
Place a value in the cache.
:param key: the value's key.
:param value: the value.
:param \**kw: cache configuration arguments.
|
f8229:c0:m4
|
def get(self, key, **kw):
|
return self.impl.get(key, **self._get_cache_kw(kw, None))<EOL>
|
Retrieve a value from the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
|
f8229:c0:m5
|
def invalidate(self, key, **kw):
|
self.impl.invalidate(key, **self._get_cache_kw(kw, None))<EOL>
|
Invalidate a value in the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
|
f8229:c0:m6
|
def invalidate_body(self):
|
self.invalidate('<STR_LIT>', __M_defname='<STR_LIT>')<EOL>
|
Invalidate the cached content of the "body" method for this
template.
|
f8229:c0:m7
|
def invalidate_def(self, name):
|
self.invalidate('<STR_LIT>' % name, __M_defname='<STR_LIT>' % name)<EOL>
|
Invalidate the cached content of a particular ``<%def>`` within this
template.
|
f8229:c0:m8
|
def invalidate_closure(self, name):
|
self.invalidate(name, __M_defname=name)<EOL>
|
Invalidate a nested ``<%def>`` within this template.
Caching of nested defs is a blunt tool as there is no
management of scope -- nested defs that use cache tags
need to have names unique of all other nested defs in the
template, else their content will be overwritten by
each other.
|
f8229:c0:m9
|
def get_or_create(self, key, creation_function, **kw):
|
raise NotImplementedError()<EOL>
|
Retrieve a value from the cache, using the given creation function
to generate a new value.
This function *must* return a value, either from
the cache, or via the given creation function.
If the creation function is called, the newly
created value should be populated into the cache
under the given key before being returned.
:param key: the value's key.
:param creation_function: function that when called generates
a new value.
:param \**kw: cache configuration arguments.
|
f8229:c1:m1
|
def set(self, key, value, **kw):
|
raise NotImplementedError()<EOL>
|
Place a value in the cache.
:param key: the value's key.
:param value: the value.
:param \**kw: cache configuration arguments.
|
f8229:c1:m2
|
def get(self, key, **kw):
|
raise NotImplementedError()<EOL>
|
Retrieve a value from the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments.
|
f8229:c1:m3
|
def invalidate(self, key, **kw):
|
raise NotImplementedError()<EOL>
|
Invalidate a value in the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments.
|
f8229:c1:m4
|
def supports_caller(func):
|
def wrap_stackframe(context, *args, **kwargs):<EOL><INDENT>context.caller_stack._push_frame()<EOL>try:<EOL><INDENT>return func(context, *args, **kwargs)<EOL><DEDENT>finally:<EOL><INDENT>context.caller_stack._pop_frame()<EOL><DEDENT><DEDENT>return wrap_stackframe<EOL>
|
Apply a caller_stack compatibility decorator to a plain
Python function.
See the example in :ref:`namespaces_python_modules`.
|
f8230:m0
|
def capture(context, callable_, *args, **kwargs):
|
if not compat.callable(callable_):<EOL><INDENT>raise exceptions.RuntimeException(<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>)<EOL><DEDENT>context._push_buffer()<EOL>try:<EOL><INDENT>callable_(*args, **kwargs)<EOL><DEDENT>finally:<EOL><INDENT>buf = context._pop_buffer()<EOL><DEDENT>return buf.getvalue()<EOL>
|
Execute the given template def, capturing the output into
a buffer.
See the example in :ref:`namespaces_python_modules`.
|
f8230:m1
|
def _include_file(context, uri, calling_uri, **kwargs):
|
template = _lookup_template(context, uri, calling_uri)<EOL>(callable_, ctx) = _populate_self_namespace(<EOL>context._clean_inheritance_tokens(),<EOL>template)<EOL>callable_(ctx, **_kwargs_for_include(callable_, context._data, **kwargs))<EOL>
|
locate the template from the given uri and include it in
the current output.
|
f8230:m4
|
def _inherit_from(context, uri, calling_uri):
|
if uri is None:<EOL><INDENT>return None<EOL><DEDENT>template = _lookup_template(context, uri, calling_uri)<EOL>self_ns = context['<STR_LIT>']<EOL>ih = self_ns<EOL>while ih.inherits is not None:<EOL><INDENT>ih = ih.inherits<EOL><DEDENT>lclcontext = context._locals({'<STR_LIT>': ih})<EOL>ih.inherits = TemplateNamespace("<STR_LIT>" % template.uri,<EOL>lclcontext,<EOL>template=template,<EOL>populate_self=False)<EOL>context._data['<STR_LIT>'] = lclcontext._data['<STR_LIT>'] = ih.inherits<EOL>callable_ = getattr(template.module, '<STR_LIT>', None)<EOL>if callable_ is not None:<EOL><INDENT>ret = callable_(template, lclcontext)<EOL>if ret:<EOL><INDENT>return ret<EOL><DEDENT><DEDENT>gen_ns = getattr(template.module, '<STR_LIT>', None)<EOL>if gen_ns is not None:<EOL><INDENT>gen_ns(context)<EOL><DEDENT>return (template.callable_, lclcontext)<EOL>
|
called by the _inherit method in template modules to set
up the inheritance chain at the start of a template's
execution.
|
f8230:m5
|
def _render(template, callable_, args, data, as_unicode=False):
|
if as_unicode:<EOL><INDENT>buf = util.FastEncodingBuffer(as_unicode=True)<EOL><DEDENT>elif template.bytestring_passthrough:<EOL><INDENT>buf = compat.StringIO()<EOL><DEDENT>else:<EOL><INDENT>buf = util.FastEncodingBuffer(<EOL>as_unicode=as_unicode,<EOL>encoding=template.output_encoding,<EOL>errors=template.encoding_errors)<EOL><DEDENT>context = Context(buf, **data)<EOL>context._outputting_as_unicode = as_unicode<EOL>context._set_with_template(template)<EOL>_render_context(template, callable_, context, *args,<EOL>**_kwargs_for_callable(callable_, data))<EOL>return context._pop_buffer().getvalue()<EOL>
|
create a Context and return the string
output of the given template and template callable.
|
f8230:m8
|
def _exec_template(callable_, context, args=None, kwargs=None):
|
template = context._with_template<EOL>if template is not None and(template.format_exceptions or template.error_handler):<EOL><INDENT>try:<EOL><INDENT>callable_(context, *args, **kwargs)<EOL><DEDENT>except Exception:<EOL><INDENT>_render_error(template, context, compat.exception_as())<EOL><DEDENT>except:<EOL><INDENT>e = sys.exc_info()[<NUM_LIT:0>]<EOL>_render_error(template, context, e)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>callable_(context, *args, **kwargs)<EOL><DEDENT>
|
execute a rendering callable given the callable, a
Context, and optional explicit arguments
the contextual Template will be located if it exists, and
the error handling options specified on that Template will
be interpreted here.
|
f8230:m12
|
@property<EOL><INDENT>def lookup(self):<DEDENT>
|
return self._with_template.lookup<EOL>
|
Return the :class:`.TemplateLookup` associated
with this :class:`.Context`.
|
f8230:c0:m2
|
@property<EOL><INDENT>def kwargs(self):<DEDENT>
|
return self._kwargs.copy()<EOL>
|
Return the dictionary of top level keyword arguments associated
with this :class:`.Context`.
This dictionary only includes the top-level arguments passed to
:meth:`.Template.render`. It does not include names produced within
the template execution such as local variable names or special names
such as ``self``, ``next``, etc.
The purpose of this dictionary is primarily for the case that
a :class:`.Template` accepts arguments via its ``<%page>`` tag,
which are normally expected to be passed via :meth:`.Template.render`,
except the template is being called in an inheritance context,
using the ``body()`` method. :attr:`.Context.kwargs` can then be
used to propagate these arguments to the inheriting template::
${next.body(**context.kwargs)}
|
f8230:c0:m3
|
def push_caller(self, caller):
|
self.caller_stack.append(caller)<EOL>
|
Push a ``caller`` callable onto the callstack for
this :class:`.Context`.
|
f8230:c0:m4
|
def pop_caller(self):
|
del self.caller_stack[-<NUM_LIT:1>]<EOL>
|
Pop a ``caller`` callable onto the callstack for this
:class:`.Context`.
|
f8230:c0:m5
|
def keys(self):
|
return list(self._data.keys())<EOL>
|
Return a list of all names established in this :class:`.Context`.
|
f8230:c0:m6
|
def _push_writer(self):
|
buf = util.FastEncodingBuffer()<EOL>self._buffer_stack.append(buf)<EOL>return buf.write<EOL>
|
push a capturing buffer onto this Context and return
the new writer function.
|
f8230:c0:m8
|
def _pop_buffer_and_writer(self):
|
buf = self._buffer_stack.pop()<EOL>return buf, self._buffer_stack[-<NUM_LIT:1>].write<EOL>
|
pop the most recent capturing buffer from this Context
and return the current writer after the pop.
|
f8230:c0:m9
|
def _push_buffer(self):
|
self._push_writer()<EOL>
|
push a capturing buffer onto this Context.
|
f8230:c0:m10
|
def _pop_buffer(self):
|
return self._buffer_stack.pop()<EOL>
|
pop the most recent capturing buffer from this Context.
|
f8230:c0:m11
|
def get(self, key, default=None):
|
return self._data.get(key, compat_builtins.__dict__.get(key, default))<EOL>
|
Return a value from this :class:`.Context`.
|
f8230:c0:m12
|
def write(self, string):
|
self._buffer_stack[-<NUM_LIT:1>].write(string)<EOL>
|
Write a string to this :class:`.Context` object's
underlying output buffer.
|
f8230:c0:m13
|
def writer(self):
|
return self._buffer_stack[-<NUM_LIT:1>].write<EOL>
|
Return the current writer function.
|
f8230:c0:m14
|
def _locals(self, d):
|
if not d:<EOL><INDENT>return self<EOL><DEDENT>c = self._copy()<EOL>c._data.update(d)<EOL>return c<EOL>
|
Create a new :class:`.Context` with a copy of this
:class:`.Context`'s current state,
updated with the given dictionary.
The :attr:`.Context.kwargs` collection remains
unaffected.
|
f8230:c0:m16
|
def _clean_inheritance_tokens(self):
|
c = self._copy()<EOL>x = c._data<EOL>x.pop('<STR_LIT>', None)<EOL>x.pop('<STR_LIT>', None)<EOL>x.pop('<STR_LIT>', None)<EOL>return c<EOL>
|
create a new copy of this :class:`.Context`. with
tokens related to inheritance state removed.
|
f8230:c0:m17
|
def cycle(self, *values):
|
if not values:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>return values[self.index % len(values)]<EOL>
|
Cycle through values as the loop progresses.
|
f8230:c4:m8
|
@util.memoized_property<EOL><INDENT>def attr(self):<DEDENT>
|
return _NSAttr(self)<EOL>
|
Access module level attributes by name.
This accessor allows templates to supply "scalar"
attributes which are particularly handy in inheritance
relationships.
.. seealso::
:ref:`inheritance_attr`
:ref:`namespace_attr_for_includes`
|
f8230:c6:m1
|
def get_namespace(self, uri):
|
key = (self, uri)<EOL>if key in self.context.namespaces:<EOL><INDENT>return self.context.namespaces[key]<EOL><DEDENT>else:<EOL><INDENT>ns = TemplateNamespace(uri, self.context._copy(),<EOL>templateuri=uri,<EOL>calling_uri=self._templateuri)<EOL>self.context.namespaces[key] = ns<EOL>return ns<EOL><DEDENT>
|
Return a :class:`.Namespace` corresponding to the given ``uri``.
If the given ``uri`` is a relative URI (i.e. it does not
contain a leading slash ``/``), the ``uri`` is adjusted to
be relative to the ``uri`` of the namespace itself. This
method is therefore mostly useful off of the built-in
``local`` namespace, described in :ref:`namespace_local`.
In
most cases, a template wouldn't need this function, and
should instead use the ``<%namespace>`` tag to load
namespaces. However, since all ``<%namespace>`` tags are
evaluated before the body of a template ever runs,
this method can be used to locate namespaces using
expressions that were generated within the body code of
the template, or to conditionally use a particular
namespace.
|
f8230:c6:m2
|
def get_template(self, uri):
|
return _lookup_template(self.context, uri, self._templateuri)<EOL>
|
Return a :class:`.Template` from the given ``uri``.
The ``uri`` resolution is relative to the ``uri`` of this
:class:`.Namespace` object's :class:`.Template`.
|
f8230:c6:m3
|
def get_cached(self, key, **kwargs):
|
return self.cache.get(key, **kwargs)<EOL>
|
Return a value from the :class:`.Cache` referenced by this
:class:`.Namespace` object's :class:`.Template`.
The advantage to this method versus direct access to the
:class:`.Cache` is that the configuration parameters
declared in ``<%page>`` take effect here, thereby calling
up the same configured backend as that configured
by ``<%page>``.
|
f8230:c6:m4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.