sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def get_by_uri(cls, uri):
"""Get a file instance by URI."""
assert uri is not None
return cls.query.filter_by(uri=uri).one_or_none() | Get a file instance by URI. | entailment |
def create(cls):
"""Create a file instance.
Note, object is only added to the database session.
"""
obj = cls(
id=uuid.uuid4(),
writable=True,
readable=False,
size=0,
)
db.session.add(obj)
return obj | Create a file instance.
Note, object is only added to the database session. | entailment |
def delete(self):
"""Delete a file instance.
The file instance can be deleted if it has no references from other
objects. The caller is responsible to test if the file instance is
writable and that the disk file can actually be removed.
.. note::
Normally you should use the Celery task to delete a file instance,
as this method will not remove the file on disk.
"""
self.query.filter_by(id=self.id).delete()
return self | Delete a file instance.
The file instance can be deleted if it has no references from other
objects. The caller is responsible to test if the file instance is
writable and that the disk file can actually be removed.
.. note::
Normally you should use the Celery task to delete a file instance,
as this method will not remove the file on disk. | entailment |
def update_checksum(self, progress_callback=None, chunk_size=None,
checksum_kwargs=None, **kwargs):
"""Update checksum based on file."""
self.checksum = self.storage(**kwargs).checksum(
progress_callback=progress_callback, chunk_size=chunk_size,
**(checksum_kwargs or {})) | Update checksum based on file. | entailment |
def clear_last_check(self):
"""Clear the checksum of the file."""
with db.session.begin_nested():
self.last_check = None
self.last_check_at = datetime.utcnow()
return self | Clear the checksum of the file. | entailment |
def verify_checksum(self, progress_callback=None, chunk_size=None,
throws=True, checksum_kwargs=None, **kwargs):
"""Verify checksum of file instance.
:param bool throws: If `True`, exceptions raised during checksum
calculation will be re-raised after logging. If set to `False`, and
an exception occurs, the `last_check` field is set to `None`
(`last_check_at` of course is updated), since no check actually was
performed.
:param dict checksum_kwargs: Passed as `**kwargs`` to
``storage().checksum``.
"""
try:
real_checksum = self.storage(**kwargs).checksum(
progress_callback=progress_callback, chunk_size=chunk_size,
**(checksum_kwargs or {}))
except Exception as exc:
current_app.logger.exception(str(exc))
if throws:
raise
real_checksum = None
with db.session.begin_nested():
self.last_check = (None if real_checksum is None
else (self.checksum == real_checksum))
self.last_check_at = datetime.utcnow()
return self.last_check | Verify checksum of file instance.
:param bool throws: If `True`, exceptions raised during checksum
calculation will be re-raised after logging. If set to `False`, and
an exception occurs, the `last_check` field is set to `None`
(`last_check_at` of course is updated), since no check actually was
performed.
:param dict checksum_kwargs: Passed as `**kwargs`` to
``storage().checksum``. | entailment |
def init_contents(self, size=0, **kwargs):
"""Initialize file."""
self.set_uri(
*self.storage(**kwargs).initialize(size=size),
readable=False, writable=True) | Initialize file. | entailment |
def update_contents(self, stream, seek=0, size=None, chunk_size=None,
progress_callback=None, **kwargs):
"""Save contents of stream to this file.
:param obj: ObjectVersion instance from where this file is accessed
from.
:param stream: File-like stream.
"""
self.checksum = None
return self.storage(**kwargs).update(
stream, seek=seek, size=size, chunk_size=chunk_size,
progress_callback=progress_callback
) | Save contents of stream to this file.
:param obj: ObjectVersion instance from where this file is accessed
from.
:param stream: File-like stream. | entailment |
def set_contents(self, stream, chunk_size=None, size=None, size_limit=None,
progress_callback=None, **kwargs):
"""Save contents of stream to this file.
:param obj: ObjectVersion instance from where this file is accessed
from.
:param stream: File-like stream.
"""
self.set_uri(
*self.storage(**kwargs).save(
stream, chunk_size=chunk_size, size=size,
size_limit=size_limit, progress_callback=progress_callback)) | Save contents of stream to this file.
:param obj: ObjectVersion instance from where this file is accessed
from.
:param stream: File-like stream. | entailment |
def copy_contents(self, fileinstance, progress_callback=None,
chunk_size=None, **kwargs):
"""Copy this file instance into another file instance."""
if not fileinstance.readable:
raise ValueError('Source file instance is not readable.')
if not self.size == 0:
raise ValueError('File instance has data.')
self.set_uri(
*self.storage(**kwargs).copy(
fileinstance.storage(**kwargs),
chunk_size=chunk_size,
progress_callback=progress_callback)) | Copy this file instance into another file instance. | entailment |
def send_file(self, filename, restricted=True, mimetype=None,
trusted=False, chunk_size=None, as_attachment=False,
**kwargs):
"""Send file to client."""
return self.storage(**kwargs).send_file(
filename,
mimetype=mimetype,
restricted=restricted,
checksum=self.checksum,
trusted=trusted,
chunk_size=chunk_size,
as_attachment=as_attachment,
) | Send file to client. | entailment |
def set_uri(self, uri, size, checksum, readable=True, writable=False,
storage_class=None):
"""Set a location of a file."""
self.uri = uri
self.size = size
self.checksum = checksum
self.writable = writable
self.readable = readable
self.storage_class = \
current_app.config['FILES_REST_DEFAULT_STORAGE_CLASS'] \
if storage_class is None else \
storage_class
return self | Set a location of a file. | entailment |
def set_contents(self, stream, chunk_size=None, size=None, size_limit=None,
progress_callback=None):
"""Save contents of stream to file instance.
If a file instance has already been set, this methods raises an
``FileInstanceAlreadySetError`` exception.
:param stream: File-like stream.
:param size: Size of stream if known.
:param chunk_size: Desired chunk size to read stream in. It is up to
the storage interface if it respects this value.
"""
if size_limit is None:
size_limit = self.bucket.size_limit
self.file = FileInstance.create()
self.file.set_contents(
stream, size_limit=size_limit, size=size, chunk_size=chunk_size,
progress_callback=progress_callback,
default_location=self.bucket.location.uri,
default_storage_class=self.bucket.default_storage_class,
)
return self | Save contents of stream to file instance.
If a file instance has already been set, this methods raises an
``FileInstanceAlreadySetError`` exception.
:param stream: File-like stream.
:param size: Size of stream if known.
:param chunk_size: Desired chunk size to read stream in. It is up to
the storage interface if it respects this value. | entailment |
def set_location(self, uri, size, checksum, storage_class=None):
"""Set only URI location of for object.
Useful to link files on externally controlled storage. If a file
instance has already been set, this methods raises an
``FileInstanceAlreadySetError`` exception.
:param uri: Full URI to object (which can be interpreted by the storage
interface).
:param size: Size of file.
:param checksum: Checksum of file.
:param storage_class: Storage class where file is stored ()
"""
self.file = FileInstance()
self.file.set_uri(
uri, size, checksum, storage_class=storage_class
)
db.session.add(self.file)
return self | Set only URI location of for object.
Useful to link files on externally controlled storage. If a file
instance has already been set, this methods raises an
``FileInstanceAlreadySetError`` exception.
:param uri: Full URI to object (which can be interpreted by the storage
interface).
:param size: Size of file.
:param checksum: Checksum of file.
:param storage_class: Storage class where file is stored () | entailment |
def send_file(self, restricted=True, trusted=False, **kwargs):
"""Wrap around FileInstance's send file."""
return self.file.send_file(
self.basename,
restricted=restricted,
mimetype=self.mimetype,
trusted=trusted,
**kwargs
) | Wrap around FileInstance's send file. | entailment |
def copy(self, bucket=None, key=None):
"""Copy an object version to a given bucket + object key.
The copy operation is handled completely at the metadata level. The
actual data on disk is not copied. Instead, the two object versions
will point to the same physical file (via the same FileInstance).
All the tags associated with the current object version are copied over
to the new instance.
.. warning::
If the destination object exists, it will be replaced by the new
object version which will become the latest version.
:param bucket: The bucket (instance or id) to copy the object to.
Default: current bucket.
:param key: Key name of destination object.
Default: current object key.
:returns: The copied object version.
"""
new_ob = ObjectVersion.create(
self.bucket if bucket is None else as_bucket(bucket),
key or self.key,
_file_id=self.file_id
)
for tag in self.tags:
ObjectVersionTag.create_or_update(object_version=new_ob,
key=tag.key,
value=tag.value)
return new_ob | Copy an object version to a given bucket + object key.
The copy operation is handled completely at the metadata level. The
actual data on disk is not copied. Instead, the two object versions
will point to the same physical file (via the same FileInstance).
All the tags associated with the current object version are copied over
to the new instance.
.. warning::
If the destination object exists, it will be replaced by the new
object version which will become the latest version.
:param bucket: The bucket (instance or id) to copy the object to.
Default: current bucket.
:param key: Key name of destination object.
Default: current object key.
:returns: The copied object version. | entailment |
def remove(self):
"""Permanently remove a specific object version from the database.
.. warning::
This by-passes the normal versioning and should only be used when
you want to permanently delete a specific object version. Otherwise
use :py:data:`ObjectVersion.delete()`.
Note the method does not remove the associated file instance which
must be garbage collected.
:returns: ``self``.
"""
with db.session.begin_nested():
if self.file_id:
self.bucket.size -= self.file.size
self.query.filter_by(
bucket_id=self.bucket_id,
key=self.key,
version_id=self.version_id,
).delete()
return self | Permanently remove a specific object version from the database.
.. warning::
This by-passes the normal versioning and should only be used when
you want to permanently delete a specific object version. Otherwise
use :py:data:`ObjectVersion.delete()`.
Note the method does not remove the associated file instance which
must be garbage collected.
:returns: ``self``. | entailment |
def create(cls, bucket, key, _file_id=None, stream=None, mimetype=None,
version_id=None, **kwargs):
"""Create a new object in a bucket.
The created object is by default created as a delete marker. You must
use ``set_contents()`` or ``set_location()`` in order to change this.
:param bucket: The bucket (instance or id) to create the object in.
:param key: Key of object.
:param _file_id: For internal use.
:param stream: File-like stream object. Used to set content of object
immediately after being created.
:param mimetype: MIME type of the file object if it is known.
:param kwargs: Keyword arguments passed to ``Object.set_contents()``.
"""
bucket = as_bucket(bucket)
if bucket.locked:
raise BucketLockedError()
with db.session.begin_nested():
latest_obj = cls.query.filter(
cls.bucket == bucket, cls.key == key, cls.is_head.is_(True)
).one_or_none()
if latest_obj is not None:
latest_obj.is_head = False
db.session.add(latest_obj)
# By default objects are created in a deleted state (i.e.
# file_id is null).
obj = cls(
bucket=bucket,
key=key,
version_id=version_id or uuid.uuid4(),
is_head=True,
mimetype=mimetype,
)
if _file_id:
file_ = _file_id if isinstance(_file_id, FileInstance) else \
FileInstance.get(_file_id)
obj.set_file(file_)
db.session.add(obj)
if stream:
obj.set_contents(stream, **kwargs)
return obj | Create a new object in a bucket.
The created object is by default created as a delete marker. You must
use ``set_contents()`` or ``set_location()`` in order to change this.
:param bucket: The bucket (instance or id) to create the object in.
:param key: Key of object.
:param _file_id: For internal use.
:param stream: File-like stream object. Used to set content of object
immediately after being created.
:param mimetype: MIME type of the file object if it is known.
:param kwargs: Keyword arguments passed to ``Object.set_contents()``. | entailment |
def get(cls, bucket, key, version_id=None):
"""Fetch a specific object.
By default the latest object version is returned, if
``version_id`` is not set.
:param bucket: The bucket (instance or id) to get the object from.
:param key: Key of object.
:param version_id: Specific version of an object.
"""
filters = [
cls.bucket_id == as_bucket_id(bucket),
cls.key == key,
]
if version_id:
filters.append(cls.version_id == version_id)
else:
filters.append(cls.is_head.is_(True))
filters.append(cls.file_id.isnot(None))
return cls.query.filter(*filters).one_or_none() | Fetch a specific object.
By default the latest object version is returned, if
``version_id`` is not set.
:param bucket: The bucket (instance or id) to get the object from.
:param key: Key of object.
:param version_id: Specific version of an object. | entailment |
def get_versions(cls, bucket, key, desc=True):
"""Fetch all versions of a specific object.
:param bucket: The bucket (instance or id) to get the object from.
:param key: Key of object.
:param desc: Sort results desc if True, asc otherwise.
:returns: The query to execute to fetch all versions.
"""
filters = [
cls.bucket_id == as_bucket_id(bucket),
cls.key == key,
]
order = cls.created.desc() if desc else cls.created.asc()
return cls.query.filter(*filters).order_by(cls.key, order) | Fetch all versions of a specific object.
:param bucket: The bucket (instance or id) to get the object from.
:param key: Key of object.
:param desc: Sort results desc if True, asc otherwise.
:returns: The query to execute to fetch all versions. | entailment |
def delete(cls, bucket, key):
"""Delete an object.
Technically works by creating a new version which works as a delete
marker.
:param bucket: The bucket (instance or id) to delete the object from.
:param key: Key of object.
:returns: Created delete marker object if key exists else ``None``.
"""
bucket_id = as_bucket_id(bucket)
obj = cls.get(bucket_id, key)
if obj:
return cls.create(as_bucket(bucket), key)
return None | Delete an object.
Technically works by creating a new version which works as a delete
marker.
:param bucket: The bucket (instance or id) to delete the object from.
:param key: Key of object.
:returns: Created delete marker object if key exists else ``None``. | entailment |
def get_by_bucket(cls, bucket, versions=False, with_deleted=False):
"""Return query that fetches all the objects in a bucket.
:param bucket: The bucket (instance or id) to query.
:param versions: Select all versions if True, only heads otherwise.
:param with_deleted: Select also deleted objects if True.
:returns: The query to retrieve filtered objects in the given bucket.
"""
bucket_id = bucket.id if isinstance(bucket, Bucket) else bucket
filters = [
cls.bucket_id == bucket_id,
]
if not versions:
filters.append(cls.is_head.is_(True))
if not with_deleted:
filters.append(cls.file_id.isnot(None))
return cls.query.filter(*filters).order_by(cls.key, cls.created.desc()) | Return query that fetches all the objects in a bucket.
:param bucket: The bucket (instance or id) to query.
:param versions: Select all versions if True, only heads otherwise.
:param with_deleted: Select also deleted objects if True.
:returns: The query to retrieve filtered objects in the given bucket. | entailment |
def relink_all(cls, old_file, new_file):
"""Relink all object versions (for a given file) to a new file.
.. warning::
Use this method with great care.
"""
assert old_file.checksum == new_file.checksum
assert old_file.id
assert new_file.id
with db.session.begin_nested():
ObjectVersion.query.filter_by(file_id=str(old_file.id)).update({
ObjectVersion.file_id: str(new_file.id)}) | Relink all object versions (for a given file) to a new file.
.. warning::
Use this method with great care. | entailment |
def copy(self, object_version=None, key=None):
"""Copy a tag to a given object version.
:param object_version: The object version instance to copy the tag to.
Default: current object version.
:param key: Key of destination tag.
Default: current tag key.
:return: The copied object version tag.
"""
return ObjectVersionTag.create(
self.object_version if object_version is None else object_version,
key or self.key,
self.value
) | Copy a tag to a given object version.
:param object_version: The object version instance to copy the tag to.
Default: current object version.
:param key: Key of destination tag.
Default: current tag key.
:return: The copied object version tag. | entailment |
def get(cls, object_version, key):
"""Get the tag object."""
return cls.query.filter_by(
version_id=as_object_version_id(object_version),
key=key,
).one_or_none() | Get the tag object. | entailment |
def create(cls, object_version, key, value):
"""Create a new tag for a given object version."""
assert len(key) < 256
assert len(value) < 256
with db.session.begin_nested():
obj = cls(version_id=as_object_version_id(object_version),
key=key,
value=value)
db.session.add(obj)
return obj | Create a new tag for a given object version. | entailment |
def create_or_update(cls, object_version, key, value):
"""Create or update a new tag for a given object version."""
assert len(key) < 256
assert len(value) < 256
obj = cls.get(object_version, key)
if obj:
obj.value = value
db.session.merge(obj)
else:
obj = cls.create(object_version, key, value)
return obj | Create or update a new tag for a given object version. | entailment |
def get_value(cls, object_version, key):
"""Get the tag value."""
obj = cls.get(object_version, key)
return obj.value if obj else None | Get the tag value. | entailment |
def delete(cls, object_version, key=None):
"""Delete tags.
:param object_version: The object version instance or id.
:param key: Key of the tag to delete.
Default: delete all tags.
"""
with db.session.begin_nested():
q = cls.query.filter_by(
version_id=as_object_version_id(object_version))
if key:
q = q.filter_by(key=key)
q.delete() | Delete tags.
:param object_version: The object version instance or id.
:param key: Key of the tag to delete.
Default: delete all tags. | entailment |
def last_part_number(self):
"""Get last part number."""
return int(self.size / self.chunk_size) \
if self.size % self.chunk_size else \
int(self.size / self.chunk_size) - 1 | Get last part number. | entailment |
def is_valid_chunksize(chunk_size):
"""Check if size is valid."""
min_csize = current_app.config['FILES_REST_MULTIPART_CHUNKSIZE_MIN']
max_csize = current_app.config['FILES_REST_MULTIPART_CHUNKSIZE_MAX']
return chunk_size >= min_csize and chunk_size <= max_csize | Check if size is valid. | entailment |
def is_valid_size(size, chunk_size):
"""Validate max theoretical size."""
min_csize = current_app.config['FILES_REST_MULTIPART_CHUNKSIZE_MIN']
max_size = \
chunk_size * current_app.config['FILES_REST_MULTIPART_MAX_PARTS']
return size > min_csize and size <= max_size | Validate max theoretical size. | entailment |
def expected_part_size(self, part_number):
"""Get expected part size for a particular part number."""
last_part = self.multipart.last_part_number
if part_number == last_part:
return self.multipart.last_part_size
elif part_number >= 0 and part_number < last_part:
return self.multipart.chunk_size
else:
raise MultipartInvalidPartNumber() | Get expected part size for a particular part number. | entailment |
def complete(self):
"""Mark a multipart object as complete."""
if Part.count(self) != self.last_part_number + 1:
raise MultipartMissingParts()
with db.session.begin_nested():
self.completed = True
self.file.readable = True
self.file.writable = False
return self | Mark a multipart object as complete. | entailment |
def merge_parts(self, version_id=None, **kwargs):
"""Merge parts into object version."""
self.file.update_checksum(**kwargs)
with db.session.begin_nested():
obj = ObjectVersion.create(
self.bucket,
self.key,
_file_id=self.file_id,
version_id=version_id
)
self.delete()
return obj | Merge parts into object version. | entailment |
def delete(self):
"""Delete a multipart object."""
# Update bucket size.
self.bucket.size -= self.size
# Remove parts
Part.query_by_multipart(self).delete()
# Remove self
self.query.filter_by(upload_id=self.upload_id).delete() | Delete a multipart object. | entailment |
def create(cls, bucket, key, size, chunk_size):
"""Create a new object in a bucket."""
bucket = as_bucket(bucket)
if bucket.locked:
raise BucketLockedError()
# Validate chunk size.
if not cls.is_valid_chunksize(chunk_size):
raise MultipartInvalidChunkSize()
# Validate max theoretical size.
if not cls.is_valid_size(size, chunk_size):
raise MultipartInvalidSize()
# Validate max bucket size.
bucket_limit = bucket.size_limit
if bucket_limit and size > bucket_limit:
desc = 'File size limit exceeded.' \
if isinstance(bucket_limit, int) else bucket_limit.reason
raise FileSizeError(description=desc)
with db.session.begin_nested():
file_ = FileInstance.create()
file_.size = size
obj = cls(
upload_id=uuid.uuid4(),
bucket=bucket,
key=key,
chunk_size=chunk_size,
size=size,
completed=False,
file=file_,
)
bucket.size += size
db.session.add(obj)
file_.init_contents(
size=size,
default_location=bucket.location.uri,
default_storage_class=bucket.default_storage_class,
)
return obj | Create a new object in a bucket. | entailment |
def get(cls, bucket, key, upload_id, with_completed=False):
"""Fetch a specific multipart object."""
q = cls.query.filter_by(
upload_id=upload_id,
bucket_id=as_bucket_id(bucket),
key=key,
)
if not with_completed:
q = q.filter(cls.completed.is_(False))
return q.one_or_none() | Fetch a specific multipart object. | entailment |
def query_expired(cls, dt, bucket=None):
"""Query all uncompleted multipart uploads."""
q = cls.query.filter(cls.created < dt).filter_by(completed=True)
if bucket:
q = q.filter(cls.bucket_id == as_bucket_id(bucket))
return q | Query all uncompleted multipart uploads. | entailment |
def query_by_bucket(cls, bucket):
"""Query all uncompleted multipart uploads."""
return cls.query.filter(cls.bucket_id == as_bucket_id(bucket)) | Query all uncompleted multipart uploads. | entailment |
def end_byte(self):
"""Get end byte in file for this part."""
return min(
(self.part_number + 1) * self.multipart.chunk_size,
self.multipart.size
) | Get end byte in file for this part. | entailment |
def create(cls, mp, part_number, stream=None, **kwargs):
"""Create a new part object in a multipart object."""
if part_number < 0 or part_number > mp.last_part_number:
raise MultipartInvalidPartNumber()
with db.session.begin_nested():
obj = cls(
multipart=mp,
part_number=part_number,
)
db.session.add(obj)
if stream:
obj.set_contents(stream, **kwargs)
return obj | Create a new part object in a multipart object. | entailment |
def get_or_none(cls, mp, part_number):
"""Get part number."""
return cls.query.filter_by(
upload_id=mp.upload_id,
part_number=part_number
).one_or_none() | Get part number. | entailment |
def get_or_create(cls, mp, part_number):
"""Get or create a part."""
obj = cls.get_or_none(mp, part_number)
if obj:
return obj
return cls.create(mp, part_number) | Get or create a part. | entailment |
def delete(cls, mp, part_number):
"""Get part number."""
return cls.query.filter_by(
upload_id=mp.upload_id,
part_number=part_number
).delete() | Get part number. | entailment |
def query_by_multipart(cls, multipart):
"""Get all parts for a specific multipart upload.
:param multipart: A :class:`invenio_files_rest.models.MultipartObject`
instance.
:returns: A :class:`invenio_files_rest.models.Part` instance.
"""
upload_id = multipart.upload_id \
if isinstance(multipart, MultipartObject) else multipart
return cls.query.filter_by(
upload_id=upload_id
) | Get all parts for a specific multipart upload.
:param multipart: A :class:`invenio_files_rest.models.MultipartObject`
instance.
:returns: A :class:`invenio_files_rest.models.Part` instance. | entailment |
def set_contents(self, stream, progress_callback=None):
"""Save contents of stream to part of file instance.
If a the MultipartObject is completed this methods raises an
``MultipartAlreadyCompleted`` exception.
:param stream: File-like stream.
:param size: Size of stream if known.
:param chunk_size: Desired chunk size to read stream in. It is up to
the storage interface if it respects this value.
"""
size, checksum = self.multipart.file.update_contents(
stream, seek=self.start_byte, size=self.part_size,
progress_callback=progress_callback,
)
self.checksum = checksum
return self | Save contents of stream to part of file instance.
If a the MultipartObject is completed this methods raises an
``MultipartAlreadyCompleted`` exception.
:param stream: File-like stream.
:param size: Size of stream if known.
:param chunk_size: Desired chunk size to read stream in. It is up to
the storage interface if it respects this value. | entailment |
def cdn_url(request):
"""
A context processor that exposes the full CDN URL in templates.
"""
cdn_url, ssl_url = _get_container_urls(CumulusStorage())
static_url = settings.STATIC_URL
return {
"CDN_URL": cdn_url + static_url,
"CDN_SSL_URL": ssl_url + static_url,
} | A context processor that exposes the full CDN URL in templates. | entailment |
def static_cdn_url(request):
"""
A context processor that exposes the full static CDN URL
as static URL in templates.
"""
cdn_url, ssl_url = _get_container_urls(CumulusStaticStorage())
static_url = settings.STATIC_URL
return {
"STATIC_URL": cdn_url + static_url,
"STATIC_SSL_URL": ssl_url + static_url,
"LOCAL_STATIC_URL": static_url,
} | A context processor that exposes the full static CDN URL
as static URL in templates. | entailment |
def _serialize_parameters(parameters):
"""Serialize some parameters to match python native types with formats
specified in google api docs like:
* True/False -> "true"/"false",
* {"a": 1, "b":2} -> "a:1|b:2"
:type parameters: dict oif query parameters
"""
for key, value in parameters.items():
if isinstance(value, bool):
parameters[key] = "true" if value else "false"
elif isinstance(value, dict):
parameters[key] = "|".join(
("%s:%s" % (k, v) for k, v in value.items()))
elif isinstance(value, (list, tuple)):
parameters[key] = "|".join(value)
return parameters | Serialize some parameters to match python native types with formats
specified in google api docs like:
* True/False -> "true"/"false",
* {"a": 1, "b":2} -> "a:1|b:2"
:type parameters: dict oif query parameters | entailment |
def _make_request(self, url, parameters, result_key):
"""Make http/https request to Google API.
Method prepares url parameters, drops None values, and gets default
values. Finally makes request using protocol assigned to client and
returns data.
:param url: url part - specifies API endpoint
:param parameters: dictionary of url parameters
:param result_key: key in output where result is expected
"""
url = urlparse.urljoin(urlparse.urljoin(self.base, url), "json")
# drop all None values and use defaults if not set
parameters = {key: value for key, value in parameters.items() if
value is not None}
parameters.setdefault("sensor", self.sensor)
parameters = self._serialize_parameters(parameters)
if self.api_key:
parameters["key"] = self.api_key
raw_response = requests.get(url, params=parameters)
response = raw_response.json()
if response["status"] == status.OK and result_key is not None:
return response[result_key]
elif response["status"] == status.OK:
del response["status"]
return response
else:
response["url"] = raw_response.url
raise errors.EXCEPTION_MAPPING.get(
response["status"],
errors.GmapException
)(response) | Make http/https request to Google API.
Method prepares url parameters, drops None values, and gets default
values. Finally makes request using protocol assigned to client and
returns data.
:param url: url part - specifies API endpoint
:param parameters: dictionary of url parameters
:param result_key: key in output where result is expected | entailment |
def geocode(self, address=None, components=None, region=None,
language=None, bounds=None, sensor=None):
"""Geocode given address. Geocoder can queried using address and/or
components. Components when used with address will restrict your query
to specific area. When used without address they act like more precise
query. For full details see
`Google docs <https://developers.google.com/maps/documentation/geocoding/>`_.
:param address: address string
:param components: ditc of components
:param region: region code specified as a ccTLD ("top-level domain")
two-character value, influences but not restricts query result
:param language: the language in which to return results. For full list
of laguages go to Google Maps API docs
:param bounds: two-tuple of (latitude, longitude) pairs of bounding
box. Influences but not restricts result (same as region parameter)
:param sensor: override default client sensor parameter
""" # noqa
parameters = dict(
address=address,
components=components,
language=language,
sensor=sensor,
region=region,
)
if bounds:
parameters['bounds'] = "%f,%f|%f,%f" % (
bounds[0][0], bounds[0][1], bounds[1][0], bounds[1][1])
return self._make_request(self.GEOCODE_URL, parameters, "results") | Geocode given address. Geocoder can queried using address and/or
components. Components when used with address will restrict your query
to specific area. When used without address they act like more precise
query. For full details see
`Google docs <https://developers.google.com/maps/documentation/geocoding/>`_.
:param address: address string
:param components: ditc of components
:param region: region code specified as a ccTLD ("top-level domain")
two-character value, influences but not restricts query result
:param language: the language in which to return results. For full list
of laguages go to Google Maps API docs
:param bounds: two-tuple of (latitude, longitude) pairs of bounding
box. Influences but not restricts result (same as region parameter)
:param sensor: override default client sensor parameter | entailment |
def reverse(self, lat, lon, result_type=None, location_type=None,
language=None, sensor=None):
"""Reverse geocode with given latitude and longitude.
:param lat: latitude of queried point
:param lon: longitude of queried point
:param result_type: list of result_type for filtered search.
Accepted values:
https://developers.google.com/maps/documentation/geocoding/intro#Types
**Important**: this feature may require using API key to work.
:param location_type: list of location_type for filtered search.
:param language: the language in which to return results. For full
list of laguages go to Google Maps API docs
:param sensor: override default client sensor parameter
.. note:: Google API allows to specify both latlng and address params
but it makes no sense and would not reverse geocode your query, so
here geocoding and reverse geocoding are separated
"""
parameters = dict(
latlng="%f,%f" % (lat, lon),
result_type=result_type,
location_type=location_type,
language=language,
sensor=sensor,
)
return self._make_request(self.GEOCODE_URL, parameters, "results") | Reverse geocode with given latitude and longitude.
:param lat: latitude of queried point
:param lon: longitude of queried point
:param result_type: list of result_type for filtered search.
Accepted values:
https://developers.google.com/maps/documentation/geocoding/intro#Types
**Important**: this feature may require using API key to work.
:param location_type: list of location_type for filtered search.
:param language: the language in which to return results. For full
list of laguages go to Google Maps API docs
:param sensor: override default client sensor parameter
.. note:: Google API allows to specify both latlng and address params
but it makes no sense and would not reverse geocode your query, so
here geocoding and reverse geocoding are separated | entailment |
def init_app(self, app):
"""Flask application initialization."""
self.init_config(app)
if hasattr(app, 'cli'):
app.cli.add_command(files_cmd)
app.extensions['invenio-files-rest'] = _FilesRESTState(app) | Flask application initialization. | entailment |
def check_sizelimit(size_limit, bytes_written, total_size):
"""Check if size limit was exceeded.
:param size_limit: The size limit.
:param bytes_written: The total number of bytes written.
:param total_size: The total file size.
:raises invenio_files_rest.errors.UnexpectedFileSizeError: If the bytes
written exceed the total size.
:raises invenio_files_rest.errors.FileSizeError: If the bytes
written are major than the limit size.
"""
if size_limit is not None and bytes_written > size_limit:
desc = 'File size limit exceeded.' \
if isinstance(size_limit, int) else size_limit.reason
raise FileSizeError(description=desc)
# Never write more than advertised
if total_size is not None and bytes_written > total_size:
raise UnexpectedFileSizeError(
description='File is bigger than expected.') | Check if size limit was exceeded.
:param size_limit: The size limit.
:param bytes_written: The total number of bytes written.
:param total_size: The total file size.
:raises invenio_files_rest.errors.UnexpectedFileSizeError: If the bytes
written exceed the total size.
:raises invenio_files_rest.errors.FileSizeError: If the bytes
written are major than the limit size. | entailment |
def send_file(self, filename, mimetype=None, restricted=True,
checksum=None, trusted=False, chunk_size=None,
as_attachment=False):
"""Send the file to the client."""
try:
fp = self.open(mode='rb')
except Exception as e:
raise StorageError('Could not send file: {}'.format(e))
try:
md5_checksum = None
if checksum:
algo, value = checksum.split(':')
if algo == 'md5':
md5_checksum = value
# Send stream is responsible for closing the file.
return send_stream(
fp,
filename,
self._size,
self._modified,
mimetype=mimetype,
restricted=restricted,
etag=checksum,
content_md5=md5_checksum,
chunk_size=chunk_size,
trusted=trusted,
as_attachment=as_attachment,
)
except Exception as e:
fp.close()
raise StorageError('Could not send file: {}'.format(e)) | Send the file to the client. | entailment |
def checksum(self, chunk_size=None, progress_callback=None, **kwargs):
"""Compute checksum of file."""
fp = self.open(mode='rb')
try:
value = self._compute_checksum(
fp, size=self._size, chunk_size=None,
progress_callback=progress_callback)
except StorageError:
raise
finally:
fp.close()
return value | Compute checksum of file. | entailment |
def copy(self, src, chunk_size=None, progress_callback=None):
"""Copy data from another file instance.
:param src: Source stream.
:param chunk_size: Chunk size to read from source stream.
"""
fp = src.open(mode='rb')
try:
return self.save(
fp, chunk_size=chunk_size, progress_callback=progress_callback)
finally:
fp.close() | Copy data from another file instance.
:param src: Source stream.
:param chunk_size: Chunk size to read from source stream. | entailment |
def _compute_checksum(self, stream, size=None, chunk_size=None,
progress_callback=None, **kwargs):
"""Get helper method to compute checksum from a stream.
Naive implementation that can be overwritten by subclasses in order to
provide more efficient implementation.
"""
if progress_callback and size:
progress_callback = partial(progress_callback, size)
else:
progress_callback = None
try:
algo, m = self._init_hash()
return compute_checksum(
stream, algo, m,
chunk_size=chunk_size,
progress_callback=progress_callback,
**kwargs
)
except Exception as e:
raise StorageError(
'Could not compute checksum of file: {0}'.format(e)) | Get helper method to compute checksum from a stream.
Naive implementation that can be overwritten by subclasses in order to
provide more efficient implementation. | entailment |
def _write_stream(self, src, dst, size=None, size_limit=None,
chunk_size=None, progress_callback=None):
"""Get helper to save stream from src to dest + compute checksum.
:param src: Source stream.
:param dst: Destination stream.
:param size: If provided, this exact amount of bytes will be
written to the destination file.
:param size_limit: ``FileSizeLimit`` instance to limit number of bytes
to write.
"""
chunk_size = chunk_size_or_default(chunk_size)
algo, m = self._init_hash()
bytes_written = 0
while 1:
# Check that size limits aren't bypassed
check_sizelimit(size_limit, bytes_written, size)
chunk = src.read(chunk_size)
if not chunk:
if progress_callback:
progress_callback(bytes_written, bytes_written)
break
dst.write(chunk)
bytes_written += len(chunk)
if m:
m.update(chunk)
if progress_callback:
progress_callback(None, bytes_written)
check_size(bytes_written, size)
return bytes_written, '{0}:{1}'.format(
algo, m.hexdigest()) if m else None | Get helper to save stream from src to dest + compute checksum.
:param src: Source stream.
:param dst: Destination stream.
:param size: If provided, this exact amount of bytes will be
written to the destination file.
:param size_limit: ``FileSizeLimit`` instance to limit number of bytes
to write. | entailment |
def upgrade():
"""Upgrade database."""
# table ObjectVersion: modify primary_key
if op.get_context().dialect.name == 'mysql':
Fk = 'fk_files_object_bucket_id_files_bucket'
op.execute(
'ALTER TABLE files_object '
'DROP FOREIGN KEY {0}, DROP PRIMARY KEY, '
'ADD PRIMARY KEY(version_id), '
'ADD FOREIGN KEY(bucket_id) '
'REFERENCES files_bucket(id)'.format(Fk))
else:
op.drop_constraint('pk_files_object', 'files_object', type_='primary')
op.create_primary_key(
'pk_files_object', 'files_object', ['version_id'])
op.create_unique_constraint(
'uq_files_object_bucket_id', 'files_object',
['bucket_id', 'version_id', 'key'])
# table ObjectVersionTag: create
op.create_table(
'files_objecttags',
sa.Column(
'version_id',
sqlalchemy_utils.types.uuid.UUIDType(),
nullable=False),
sa.Column(
'key',
sa.Text().with_variant(mysql.VARCHAR(255), 'mysql'),
nullable=False
),
sa.Column(
'value',
sa.Text().with_variant(mysql.VARCHAR(255), 'mysql'),
nullable=False
),
sa.PrimaryKeyConstraint('version_id', 'key'),
sa.ForeignKeyConstraint(
['version_id'],
[u'files_object.version_id'],
ondelete='CASCADE'),
) | Upgrade database. | entailment |
def downgrade():
"""Downgrade database."""
# table ObjectVersionTag
op.drop_table('files_objecttags')
# table ObjectVersion: modify primary_key
if op.get_context().dialect.name == 'mysql':
op.execute(
'ALTER TABLE files_object '
'DROP INDEX uq_files_object_bucket_id, '
'DROP PRIMARY KEY, '
'ADD PRIMARY KEY(`bucket_id`, `key`, `version_id`)')
else:
op.drop_constraint(
'pk_files_object', 'files_object', type_='primary')
op.create_primary_key('pk_files_object', 'files_object',
['bucket_id', 'key', 'version_id']) | Downgrade database. | entailment |
def unixtimestamp(datetime):
"""Get unix time stamp from that given datetime. If datetime
is not tzaware then it's assumed that it is UTC
"""
epoch = UTC.localize(datetime.utcfromtimestamp(0))
if not datetime.tzinfo:
dt = UTC.localize(datetime)
else:
dt = UTC.normalize(datetime)
delta = dt - epoch
return total_seconds(delta) | Get unix time stamp from that given datetime. If datetime
is not tzaware then it's assumed that it is UTC | entailment |
def timezone(self, lat, lon, datetime,
language=None, sensor=None):
"""Get time offset data for given location.
:param lat: Latitude of queried point
:param lon: Longitude of queried point
:param language: The language in which to return results. For full list
of laguages go to Google Maps API docs
:param datetime: Desired time. The Time Zone API uses the timestamp to
determine whether or not Daylight Savings should be applied.
datetime should be timezone aware. If it isn't the UTC timezone
is assumed.
:type datetime: datetime.datetime
:param sensor: Override default client sensor parameter
"""
parameters = dict(
location="%f,%f" % (lat, lon),
timestamp=unixtimestamp(datetime),
language=language,
sensor=sensor,
)
return self._make_request(self.TIMEZONE_URL, parameters, None) | Get time offset data for given location.
:param lat: Latitude of queried point
:param lon: Longitude of queried point
:param language: The language in which to return results. For full list
of laguages go to Google Maps API docs
:param datetime: Desired time. The Time Zone API uses the timestamp to
determine whether or not Daylight Savings should be applied.
datetime should be timezone aware. If it isn't the UTC timezone
is assumed.
:type datetime: datetime.datetime
:param sensor: Override default client sensor parameter | entailment |
def encode(locations):
"""
:param locations: locations list containig (lat, lon) two-tuples
:return: encoded polyline string
"""
encoded = (
(_encode_value(lat, prev_lat), _encode_value(lon, prev_lon))
for (prev_lat, prev_lon), (lat, lon)
in _iterate_with_previous(locations, first=(0, 0))
)
encoded = chain.from_iterable(encoded)
return ''.join(c for r in encoded for c in r) | :param locations: locations list containig (lat, lon) two-tuples
:return: encoded polyline string | entailment |
def upgrade():
"""Upgrade database."""
op.create_table(
'records_buckets',
sa.Column(
'record_id',
sqlalchemy_utils.types.uuid.UUIDType(),
nullable=False),
sa.Column(
'bucket_id',
sqlalchemy_utils.types.uuid.UUIDType(),
nullable=False),
sa.ForeignKeyConstraint(['bucket_id'], [u'files_bucket.id'], ),
sa.ForeignKeyConstraint(['record_id'], [u'records_metadata.id'], ),
sa.PrimaryKeyConstraint('record_id', 'bucket_id')
) | Upgrade database. | entailment |
def _writable(method):
"""Check that record is in defined status.
:param method: Method to be decorated.
:returns: Function decorated.
"""
@wraps(method)
def wrapper(self, *args, **kwargs):
"""Send record for indexing.
:returns: Execution result of the decorated method.
:raises InvalidOperationError: It occurs when the bucket is locked or
deleted.
"""
if self.bucket.locked or self.bucket.deleted:
raise InvalidOperationError()
return method(self, *args, **kwargs)
return wrapper | Check that record is in defined status.
:param method: Method to be decorated.
:returns: Function decorated. | entailment |
def get_version(self, version_id=None):
"""Return specific version ``ObjectVersion`` instance or HEAD.
:param version_id: Version ID of the object.
:returns: :class:`~invenio_files_rest.models.ObjectVersion` instance or
HEAD of the stored object.
"""
return ObjectVersion.get(bucket=self.obj.bucket, key=self.obj.key,
version_id=version_id) | Return specific version ``ObjectVersion`` instance or HEAD.
:param version_id: Version ID of the object.
:returns: :class:`~invenio_files_rest.models.ObjectVersion` instance or
HEAD of the stored object. | entailment |
def get(self, key, default=None):
"""Proxy to ``obj``.
:param key: Metadata key which holds the value.
:returns: Metadata value of the specified key or default.
"""
if hasattr(self.obj, key):
return getattr(self.obj, key)
return self.data.get(key, default) | Proxy to ``obj``.
:param key: Metadata key which holds the value.
:returns: Metadata value of the specified key or default. | entailment |
def dumps(self):
"""Create a dump of the metadata associated to the record."""
self.data.update({
'bucket': str(self.obj.bucket_id),
'checksum': self.obj.file.checksum,
'key': self.obj.key, # IMPORTANT it must stay here!
'size': self.obj.file.size,
'version_id': str(self.obj.version_id),
})
return self.data | Create a dump of the metadata associated to the record. | entailment |
def flush(self):
"""Flush changes to record."""
files = self.dumps()
# Do not create `_files` when there has not been `_files` field before
# and the record still has no files attached.
if files or '_files' in self.record:
self.record['_files'] = files | Flush changes to record. | entailment |
def sort_by(self, *ids):
"""Update files order.
:param ids: List of ids specifying the final status of the list.
"""
# Support sorting by file_ids or keys.
files = {str(f_.file_id): f_.key for f_ in self}
# self.record['_files'] = [{'key': files.get(id_, id_)} for id_ in ids]
self.filesmap = OrderedDict([
(files.get(id_, id_), self[files.get(id_, id_)].dumps())
for id_ in ids
])
self.flush() | Update files order.
:param ids: List of ids specifying the final status of the list. | entailment |
def rename(self, old_key, new_key):
"""Rename a file.
:param old_key: Old key that holds the object.
:param new_key: New key that will hold the object.
:returns: The object that has been renamed.
"""
assert new_key not in self
assert old_key != new_key
file_ = self[old_key]
old_data = self.filesmap[old_key]
# Create a new version with the new name
obj = ObjectVersion.create(
bucket=self.bucket, key=new_key,
_file_id=file_.obj.file_id
)
# Delete old key
self.filesmap[new_key] = self.file_cls(obj, old_data).dumps()
del self[old_key]
return obj | Rename a file.
:param old_key: Old key that holds the object.
:param new_key: New key that will hold the object.
:returns: The object that has been renamed. | entailment |
def dumps(self, bucket=None):
"""Serialize files from a bucket.
:param bucket: Instance of files
:class:`invenio_files_rest.models.Bucket`. (Default:
``self.bucket``)
:returns: List of serialized files.
"""
return [
self.file_cls(o, self.filesmap.get(o.key, {})).dumps()
for o in sorted_files_from_bucket(bucket or self.bucket, self.keys)
] | Serialize files from a bucket.
:param bucket: Instance of files
:class:`invenio_files_rest.models.Bucket`. (Default:
``self.bucket``)
:returns: List of serialized files. | entailment |
def files(self):
"""Get files iterator.
:returns: Files iterator.
"""
if self.model is None:
raise MissingModelError()
records_buckets = RecordsBuckets.query.filter_by(
record_id=self.id).first()
if not records_buckets:
bucket = self._create_bucket()
if not bucket:
return None
RecordsBuckets.create(record=self.model, bucket=bucket)
else:
bucket = records_buckets.bucket
return self.files_iter_cls(self, bucket=bucket, file_cls=self.file_cls) | Get files iterator.
:returns: Files iterator. | entailment |
def files(self, data):
"""Set files from data."""
current_files = self.files
if current_files:
raise RuntimeError('Can not update existing files.')
for key in data:
current_files[key] = data[key] | Set files from data. | entailment |
def delete(self, force=False):
"""Delete a record and also remove the RecordsBuckets if necessary.
:param force: True to remove also the
:class:`~invenio_records_files.models.RecordsBuckets` object.
:returns: Deleted record.
"""
if force:
RecordsBuckets.query.filter_by(
record=self.model,
bucket=self.files.bucket
).delete()
return super(Record, self).delete(force) | Delete a record and also remove the RecordsBuckets if necessary.
:param force: True to remove also the
:class:`~invenio_records_files.models.RecordsBuckets` object.
:returns: Deleted record. | entailment |
def create(cls, record, bucket):
"""Create a new RecordsBuckets and adds it to the session.
:param record: Record used to relate with the ``Bucket``.
:param bucket: Bucket used to relate with the ``Record``.
:returns: The :class:`~invenio_records_files.models.RecordsBuckets`
object created.
"""
rb = cls(record=record, bucket=bucket)
db.session.add(rb)
return rb | Create a new RecordsBuckets and adds it to the session.
:param record: Record used to relate with the ``Bucket``.
:param bucket: Bucket used to relate with the ``Record``.
:returns: The :class:`~invenio_records_files.models.RecordsBuckets`
object created. | entailment |
def sorted_files_from_bucket(bucket, keys=None):
"""Return files from bucket sorted by given keys.
:param bucket: :class:`~invenio_files_rest.models.Bucket` containing the
files.
:param keys: Keys order to be used.
:returns: Sorted list of bucket items.
"""
keys = keys or []
total = len(keys)
sortby = dict(zip(keys, range(total)))
values = ObjectVersion.get_by_bucket(bucket).all()
return sorted(values, key=lambda x: sortby.get(x.key, total)) | Return files from bucket sorted by given keys.
:param bucket: :class:`~invenio_files_rest.models.Bucket` containing the
files.
:param keys: Keys order to be used.
:returns: Sorted list of bucket items. | entailment |
def record_file_factory(pid, record, filename):
"""Get file from a record.
:param pid: Not used. It keeps the function signature.
:param record: Record which contains the files.
:param filename: Name of the file to be returned.
:returns: File object or ``None`` if not found.
"""
try:
if not (hasattr(record, 'files') and record.files):
return None
except MissingModelError:
return None
try:
return record.files[filename]
except KeyError:
return None | Get file from a record.
:param pid: Not used. It keeps the function signature.
:param record: Record which contains the files.
:param filename: Name of the file to be returned.
:returns: File object or ``None`` if not found. | entailment |
def file_download_ui(pid, record, _record_file_factory=None, **kwargs):
"""File download view for a given record.
Plug this method into your ``RECORDS_UI_ENDPOINTS`` configuration:
.. code-block:: python
RECORDS_UI_ENDPOINTS = dict(
recid=dict(
# ...
route='/records/<pid_value/files/<filename>',
view_imp='invenio_records_files.utils:file_download_ui',
record_class='invenio_records_files.api:Record',
)
)
If ``download`` is passed as a querystring argument, the file is sent as an
attachment.
:param pid: The :class:`invenio_pidstore.models.PersistentIdentifier`
instance.
:param record: The record metadata.
"""
_record_file_factory = _record_file_factory or record_file_factory
# Extract file from record.
fileobj = _record_file_factory(
pid, record, kwargs.get('filename')
)
if not fileobj:
abort(404)
obj = fileobj.obj
# Check permissions
ObjectResource.check_object_permission(obj)
# Send file.
return ObjectResource.send_object(
obj.bucket, obj,
expected_chksum=fileobj.get('checksum'),
logger_data={
'bucket_id': obj.bucket_id,
'pid_type': pid.pid_type,
'pid_value': pid.pid_value,
},
as_attachment=('download' in request.args)
) | File download view for a given record.
Plug this method into your ``RECORDS_UI_ENDPOINTS`` configuration:
.. code-block:: python
RECORDS_UI_ENDPOINTS = dict(
recid=dict(
# ...
route='/records/<pid_value/files/<filename>',
view_imp='invenio_records_files.utils:file_download_ui',
record_class='invenio_records_files.api:Record',
)
)
If ``download`` is passed as a querystring argument, the file is sent as an
attachment.
:param pid: The :class:`invenio_pidstore.models.PersistentIdentifier`
instance.
:param record: The record metadata. | entailment |
def default_bucket_link_factory(pid):
"""Factory for record bucket generation."""
try:
record = Record.get_record(pid.get_assigned_object())
bucket = record.files.bucket
return url_for('invenio_files_rest.bucket_api',
bucket_id=bucket.id, _external=True)
except AttributeError:
return None | Factory for record bucket generation. | entailment |
def flake(self, message):
"""Print an error message to stdout."""
self.stdout.write(str(message))
self.stdout.write('\n') | Print an error message to stdout. | entailment |
def node_name(node):
"""
Convenience function: Returns node.id, or node.name, or None
"""
return hasattr(node, 'id') and node.id or hasattr(node, 'name') and node.name | Convenience function: Returns node.id, or node.name, or None | entailment |
def names(self):
"""Return a list of the names referenced by this binding."""
names = []
if isinstance(self.source, ast.List):
for node in self.source.elts:
if isinstance(node, ast.Str):
names.append(node.s)
return names | Return a list of the names referenced by this binding. | entailment |
def unusedAssignments(self):
"""Return a generator for the assignments which have not been used."""
for name, binding in self.items():
if (not binding.used and name not in self.globals
and not self.uses_locals
and isinstance(binding, Assignment)):
yield name, binding | Return a generator for the assignments which have not been used. | entailment |
def check_plugins(self):
""" collect plugins from entry point 'frosted.plugins'
and run their check() method, passing the filename
"""
checkers = {}
for ep in pkg_resources.iter_entry_points(group='frosted.plugins'):
checkers.update({ep.name: ep.load()})
for plugin_name, plugin in checkers.items():
if self.filename != '(none)':
messages = plugin.check(self.filename)
for message, loc, args, kwargs in messages:
self.report(message, loc, *args, **kwargs) | collect plugins from entry point 'frosted.plugins'
and run their check() method, passing the filename | entailment |
def defer_function(self, callable):
"""Schedule a function handler to be called just before completion.
This is used for handling function bodies, which must be deferred because code later in the file might modify
the global scope. When 'callable' is called, the scope at the time this is called will be restored, however it
will contain any new bindings added to it.
"""
self._deferred_functions.append((callable, self.scope_stack[:], self.offset)) | Schedule a function handler to be called just before completion.
This is used for handling function bodies, which must be deferred because code later in the file might modify
the global scope. When 'callable' is called, the scope at the time this is called will be restored, however it
will contain any new bindings added to it. | entailment |
def defer_assignment(self, callable):
"""Schedule an assignment handler to be called just after deferred
function handlers."""
self._deferred_assignments.append((callable, self.scope_stack[:], self.offset)) | Schedule an assignment handler to be called just after deferred
function handlers. | entailment |
def run_deferred(self, deferred):
"""Run the callables in deferred using their associated scope stack."""
for handler, scope, offset in deferred:
self.scope_stack = scope
self.offset = offset
handler() | Run the callables in deferred using their associated scope stack. | entailment |
def check_dead_scopes(self):
"""Look at scopes which have been fully examined and report names in
them which were imported but unused."""
for scope in self.dead_scopes:
export = isinstance(scope.get('__all__'), ExportBinding)
if export:
all = scope['__all__'].names()
# Look for possible mistakes in the export list
if not scope.importStarred and os.path.basename(self.filename) != '__init__.py':
undefined = set(all) - set(scope)
for name in undefined:
self.report(messages.UndefinedExport, scope['__all__'].source, name)
else:
all = []
# Look for imported names that aren't used without checking imports in namespace definition
for importation in scope.values():
if isinstance(importation, Importation) and not importation.used and importation.name not in all:
self.report(messages.UnusedImport, importation.source, importation.name) | Look at scopes which have been fully examined and report names in
them which were imported but unused. | entailment |
def different_forks(self, lnode, rnode):
"""True, if lnode and rnode are located on different forks of
IF/TRY."""
ancestor = self.get_common_ancestor(lnode, rnode)
if isinstance(ancestor, ast.If):
for fork in (ancestor.body, ancestor.orelse):
if self.on_fork(ancestor, lnode, rnode, fork):
return True
elif isinstance(ancestor, ast.Try):
body = ancestor.body + ancestor.orelse
for fork in [body] + [[hdl] for hdl in ancestor.handlers]:
if self.on_fork(ancestor, lnode, rnode, fork):
return True
elif isinstance(ancestor, ast.TryFinally):
if self.on_fork(ancestor, lnode, rnode, ancestor.body):
return True
return False | True, if lnode and rnode are located on different forks of
IF/TRY. | entailment |
def add_binding(self, node, value, report_redef=True):
"""Called when a binding is altered.
- `node` is the statement responsible for the change
- `value` is the optional new value, a Binding instance, associated
with the binding; if None, the binding is deleted if it exists.
- if `report_redef` is True (default), rebinding while unused will be
reported.
"""
redefinedWhileUnused = False
if not isinstance(self.scope, ClassScope):
for scope in self.scope_stack[::-1]:
existing = scope.get(value.name)
if (isinstance(existing, Importation)
and not existing.used
and (not isinstance(value, Importation) or
value.fullName == existing.fullName)
and report_redef
and not self.different_forks(node, existing.source)):
redefinedWhileUnused = True
self.report(messages.RedefinedWhileUnused,
node, value.name, existing.source)
existing = self.scope.get(value.name)
if not redefinedWhileUnused and self.has_parent(value.source, ast.ListComp):
if (existing and report_redef
and not self.has_parent(existing.source, (ast.For, ast.ListComp))
and not self.different_forks(node, existing.source)):
self.report(messages.RedefinedInListComp,
node, value.name, existing.source)
if (isinstance(existing, Definition)
and not existing.used
and not self.different_forks(node, existing.source)):
self.report(messages.RedefinedWhileUnused,
node, value.name, existing.source)
else:
self.scope[value.name] = value | Called when a binding is altered.
- `node` is the statement responsible for the change
- `value` is the optional new value, a Binding instance, associated
with the binding; if None, the binding is deleted if it exists.
- if `report_redef` is True (default), rebinding while unused will be
reported. | entailment |
def is_docstring(self, node):
"""Determine if the given node is a docstring, as long as it is at the
correct place in the node tree."""
return isinstance(node, ast.Str) or (isinstance(node, ast.Expr) and
isinstance(node.value, ast.Str)) | Determine if the given node is a docstring, as long as it is at the
correct place in the node tree. | entailment |
def find_return_with_argument(self, node):
"""Finds and returns a return statment that has an argument.
Note that we should use node.returns in Python 3, but this method is never called in Python 3 so we don't bother
checking.
"""
for item in node.body:
if isinstance(item, ast.Return) and item.value:
return item
elif not isinstance(item, ast.FunctionDef) and hasattr(item, 'body'):
return_with_argument = self.find_return_with_argument(item)
if return_with_argument:
return return_with_argument | Finds and returns a return statment that has an argument.
Note that we should use node.returns in Python 3, but this method is never called in Python 3 so we don't bother
checking. | entailment |
def is_generator(self, node):
"""Checks whether a function is a generator by looking for a yield
statement or expression."""
if not isinstance(node.body, list):
# lambdas can not be generators
return False
for item in node.body:
if isinstance(item, (ast.Assign, ast.Expr)):
if isinstance(item.value, ast.Yield):
return True
elif not isinstance(item, ast.FunctionDef) and hasattr(item, 'body'):
if self.is_generator(item):
return True
return False | Checks whether a function is a generator by looking for a yield
statement or expression. | entailment |
def GLOBAL(self, node):
"""Keep track of globals declarations."""
if isinstance(self.scope, FunctionScope):
self.scope.globals.update(node.names) | Keep track of globals declarations. | entailment |
def FOR(self, node):
"""Process bindings for loop variables."""
vars = []
def collectLoopVars(n):
if isinstance(n, ast.Name):
vars.append(n.id)
elif isinstance(n, ast.expr_context):
return
else:
for c in ast.iter_child_nodes(n):
collectLoopVars(c)
collectLoopVars(node.target)
for varn in vars:
if (isinstance(self.scope.get(varn), Importation)
# unused ones will get an unused import warning
and self.scope[varn].used):
self.report(messages.ImportShadowedByLoopVar,
node, varn, self.scope[varn].source)
self.handle_children(node) | Process bindings for loop variables. | entailment |
def NAME(self, node):
"""Handle occurrence of Name (which can be a load/store/delete
access.)"""
# Locate the name in locals / function / globals scopes.
if isinstance(node.ctx, (ast.Load, ast.AugLoad)):
self.handle_node_load(node)
if (node.id == 'locals' and isinstance(self.scope, FunctionScope)
and isinstance(node.parent, ast.Call)):
# we are doing locals() call in current scope
self.scope.uses_locals = True
elif isinstance(node.ctx, (ast.Store, ast.AugStore)):
self.handle_node_store(node)
elif isinstance(node.ctx, ast.Del):
self.handle_node_delete(node)
else:
# must be a Param context -- this only happens for names in function
# arguments, but these aren't dispatched through here
raise RuntimeError("Got impossible expression context: %r" % (node.ctx,)) | Handle occurrence of Name (which can be a load/store/delete
access.) | entailment |
def CLASSDEF(self, node):
"""Check names used in a class definition, including its decorators,
base classes, and the body of its definition.
Additionally, add its name to the current scope.
"""
for deco in node.decorator_list:
self.handleNode(deco, node)
for baseNode in node.bases:
self.handleNode(baseNode, node)
if not PY2:
for keywordNode in node.keywords:
self.handleNode(keywordNode, node)
self.push_scope(ClassScope)
if self.settings.get('run_doctests', False):
self.defer_function(lambda: self.handle_doctests(node))
for stmt in node.body:
self.handleNode(stmt, node)
self.pop_scope()
self.add_binding(node, ClassDefinition(node.name, node)) | Check names used in a class definition, including its decorators,
base classes, and the body of its definition.
Additionally, add its name to the current scope. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.