code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
from .models import FileInstance, ObjectVersion
def create_file(key, path):
key = key_prefix + key
if checksum:
file_checksum = compute_md5_checksum(
open(path, 'rb'), chunk_size=chunk_size)
file_instance = FileInstance.query.filter_by(
checksum=file_checksum, size=os.path.getsize(path)
).first()
if file_instance:
return ObjectVersion.create(
bucket, key, _file_id=file_instance.id
)
return ObjectVersion.create(bucket, key, stream=open(path, 'rb'))
if os.path.isfile(source):
yield create_file(os.path.basename(source), source)
else:
for root, dirs, files in os.walk(source, topdown=False):
for name in files:
filename = os.path.join(root, name)
assert filename.startswith(source)
parts = [p for p in filename[len(source):].split(os.sep) if p]
yield create_file('/'.join(parts), os.path.join(root, name)) | def populate_from_path(bucket, source, checksum=True, key_prefix='',
chunk_size=None) | Populate a ``bucket`` from all files in path.
:param bucket: The bucket (instance or id) to create the object in.
:param source: The file or directory path.
:param checksum: If ``True`` then a MD5 checksum will be computed for each
file. (Default: ``True``)
:param key_prefix: The key prefix for the bucket.
:param chunk_size: Chunk size to read from file.
:returns: A iterator for all
:class:`invenio_files_rest.models.ObjectVersion` instances. | 2.483724 | 2.407603 | 1.031617 |
# COMMAND LINE OPTIONS
self.wipe = options.get("wipe")
self.test_run = options.get("test_run")
self.quiet = options.get("test_run")
self.container_name = options.get("container")
self.verbosity = int(options.get("verbosity"))
self.syncmedia = options.get("syncmedia")
self.syncstatic = options.get("syncstatic")
if self.test_run:
self.verbosity = 2
cli_includes = options.get("includes")
cli_excludes = options.get("excludes")
# CUMULUS CONNECTION AND SETTINGS FROM SETTINGS.PY
if self.syncmedia and self.syncstatic:
raise CommandError("options --media and --static are mutually exclusive")
if not self.container_name:
if self.syncmedia:
self.container_name = CUMULUS["CONTAINER"]
elif self.syncstatic:
self.container_name = CUMULUS["STATIC_CONTAINER"]
else:
raise CommandError("must select one of the required options, either --media or --static")
settings_includes = CUMULUS["INCLUDE_LIST"]
settings_excludes = CUMULUS["EXCLUDE_LIST"]
# PATH SETTINGS
if self.syncmedia:
self.file_root = os.path.abspath(settings.MEDIA_ROOT)
self.file_url = settings.MEDIA_URL
elif self.syncstatic:
self.file_root = os.path.abspath(settings.STATIC_ROOT)
self.file_url = settings.STATIC_URL
if not self.file_root.endswith("/"):
self.file_root = self.file_root + "/"
if self.file_url.startswith("/"):
self.file_url = self.file_url[1:]
# SYNCSTATIC VARS
# combine includes and excludes from the cli and django settings file
self.includes = list(set(cli_includes + settings_includes))
self.excludes = list(set(cli_excludes + settings_excludes))
# transform glob patterns to regular expressions
self.local_filenames = []
self.create_count = 0
self.upload_count = 0
self.update_count = 0
self.skip_count = 0
self.delete_count = 0 | def set_options(self, options) | Sets instance variables based on an options dict | 2.726393 | 2.692746 | 1.012495 |
cloud_objs = [cloud_obj.name for cloud_obj in self.container.get_objects()]
includes_pattern = r"|".join([fnmatch.translate(x) for x in includes])
excludes_pattern = r"|".join([fnmatch.translate(x) for x in excludes]) or r"$."
excludes = [o for o in cloud_objs if re.match(excludes_pattern, o)]
includes = [o for o in cloud_objs if re.match(includes_pattern, o)]
return [o for o in includes if o not in excludes] | def match_cloud(self, includes, excludes) | Returns the cloud objects that match the include and exclude patterns. | 2.226495 | 2.101401 | 1.059529 |
includes_pattern = r"|".join([fnmatch.translate(x) for x in includes])
excludes_pattern = r"|".join([fnmatch.translate(x) for x in excludes]) or r"$."
matches = []
for root, dirs, files in os.walk(prefix, topdown=True):
# exclude dirs
dirs[:] = [os.path.join(root, d) for d in dirs]
dirs[:] = [d for d in dirs if not re.match(excludes_pattern,
d.split(root)[1])]
# exclude/include files
files = [os.path.join(root, f) for f in files]
files = [os.path.join(root, f) for f in files
if not re.match(excludes_pattern, f)]
files = [os.path.join(root, f) for f in files
if re.match(includes_pattern, f.split(prefix)[1])]
for fname in files:
matches.append(fname)
return matches | def match_local(self, prefix, includes, excludes) | Filters os.walk() with include and exclude patterns.
See: http://stackoverflow.com/a/5141829/93559 | 1.943975 | 1.894721 | 1.025995 |
for relpath in relpaths:
abspath = [p for p in abspaths if p[len(self.file_root):] == relpath][0]
cloud_datetime = remote_objects[relpath] if relpath in remote_objects else None
local_datetime = datetime.datetime.utcfromtimestamp(os.stat(abspath).st_mtime)
if cloud_datetime and local_datetime < cloud_datetime:
self.skip_count += 1
if not self.quiet:
print("Skipped {0}: not modified.".format(relpath))
continue
if relpath in remote_objects:
self.update_count += 1
else:
self.create_count += 1
self.upload_file(abspath, relpath) | def upload_files(self, abspaths, relpaths, remote_objects) | Determines files to be uploaded and call ``upload_file`` on each. | 2.652157 | 2.61664 | 1.013573 |
if not self.test_run:
content = open(abspath, "rb")
content_type = get_content_type(cloud_filename, content)
headers = get_headers(cloud_filename, content_type)
if headers.get("Content-Encoding") == "gzip":
content = get_gzipped_contents(content)
size = content.size
else:
size = os.stat(abspath).st_size
self.container.create(
obj_name=cloud_filename,
data=content,
content_type=content_type,
content_length=size,
content_encoding=headers.get("Content-Encoding", None),
headers=headers,
ttl=CUMULUS["FILE_TTL"],
etag=None,
)
self.upload_count += 1
if not self.quiet or self.verbosity > 1:
print("Uploaded: {0}".format(cloud_filename)) | def upload_file(self, abspath, cloud_filename) | Uploads a file to the container. | 3.150072 | 3.092545 | 1.018602 |
for cloud_obj in cloud_objs:
if cloud_obj not in relpaths:
if not self.test_run:
self.delete_cloud_obj(cloud_obj)
self.delete_count += 1
if not self.quiet or self.verbosity > 1:
print("Deleted: {0}".format(cloud_obj)) | def delete_extra_files(self, relpaths, cloud_objs) | Deletes any objects from the container that do not exist locally. | 3.262806 | 3.0926 | 1.055036 |
self._connection.delete_object(
container=self.container_name,
obj=cloud_obj,
) | def delete_cloud_obj(self, cloud_obj) | Deletes an object from the container. | 4.585221 | 3.802155 | 1.205953 |
if self.test_run:
print("Wipe would delete {0} objects.".format(len(self.container.object_count)))
else:
if not self.quiet or self.verbosity > 1:
print("Deleting {0} objects...".format(len(self.container.object_count)))
self._connection.delete_all_objects() | def wipe_container(self) | Completely wipes out the contents of the container. | 4.434896 | 4.274577 | 1.037505 |
self.update_count = self.upload_count - self.create_count
if self.test_run:
print("Test run complete with the following results:")
print("Skipped {0}. Created {1}. Updated {2}. Deleted {3}.".format(
self.skip_count, self.create_count, self.update_count, self.delete_count)) | def print_tally(self) | Prints the final tally to stdout. | 4.04892 | 3.757616 | 1.077524 |
self._connection = Auth()._get_connection()
if len(args) == 0:
containers = self._connection.list_containers()
if not containers:
print("No containers were found for this account.")
elif len(args) == 1:
containers = self._connection.list_container_object_names(args[0])
if not containers:
print("No matching container found.")
else:
raise CommandError("Pass one and only one [container_name] as an argument")
for container in containers:
print(container) | def handle(self, *args, **options) | Lists all the items in a container to stdout. | 3.517467 | 3.131075 | 1.123406 |
current_task.update_state(
state=state('PROGRESS'),
meta=dict(size=size, total=total)
) | def progress_updater(size, total) | Progress reporter for checksum verification. | 7.234953 | 7.439198 | 0.972545 |
f = FileInstance.query.get(uuid.UUID(file_id))
# Anything might happen during the task, so being pessimistic and marking
# the file as unchecked is a reasonable precaution
if pessimistic:
f.clear_last_check()
db.session.commit()
f.verify_checksum(
progress_callback=progress_updater, chunk_size=chunk_size,
throws=throws, checksum_kwargs=checksum_kwargs)
db.session.commit() | def verify_checksum(file_id, pessimistic=False, chunk_size=None, throws=True,
checksum_kwargs=None) | Verify checksum of a file instance.
:param file_id: The file ID. | 5.936801 | 6.323242 | 0.938886 |
location = Location.get_by_name(location_name)
f_src = FileInstance.get(src_id)
# Create destination
f_dst = FileInstance.create()
db.session.commit()
try:
# Copy contents
f_dst.copy_contents(
f_src,
progress_callback=progress_updater,
default_location=location.uri,
)
db.session.commit()
except Exception:
# Remove destination file instance if an error occurred.
db.session.delete(f_dst)
db.session.commit()
raise
# Update all objects pointing to file.
ObjectVersion.relink_all(f_src, f_dst)
db.session.commit()
# Start a fixity check
if post_fixity_check:
verify_checksum.delay(str(f_dst.id)) | def migrate_file(src_id, location_name, post_fixity_check=False) | Task to migrate a file instance to a new location.
.. note:: If something goes wrong during the content copy, the destination
file instance is removed.
:param src_id: The :class:`invenio_files_rest.models.FileInstance` ID.
:param location_name: Where to migrate the file.
:param post_fixity_check: Verify checksum after migration.
(Default: ``False``) | 3.870482 | 3.688855 | 1.049237 |
try:
# First remove FileInstance from database and commit transaction to
# ensure integrity constraints are checked and enforced.
f = FileInstance.get(file_id)
if not f.writable:
return
f.delete()
db.session.commit()
# Next, remove the file on disk. This leaves the possibility of having
# a file on disk dangling in case the database removal works, and the
# disk file removal doesn't work.
f.storage().delete()
except IntegrityError:
if not silent:
raise | def remove_file_data(file_id, silent=True) | Remove file instance and associated data.
:param file_id: The :class:`invenio_files_rest.models.FileInstance` ID.
:param silent: It stops propagation of a possible arised IntegrityError
exception. (Default: ``True``)
:raises sqlalchemy.exc.IntegrityError: Raised if the database removal goes
wrong and silent is set to ``False``. | 9.168739 | 7.694283 | 1.19163 |
mp = MultipartObject.query.filter_by(upload_id=upload_id).one_or_none()
if not mp:
raise RuntimeError('Upload ID does not exists.')
if not mp.completed:
raise RuntimeError('MultipartObject is not completed.')
try:
obj = mp.merge_parts(
version_id=version_id,
progress_callback=progress_updater
)
db.session.commit()
return str(obj.version_id)
except Exception:
db.session.rollback()
raise | def merge_multipartobject(upload_id, version_id=None) | Merge multipart object.
:param upload_id: The :class:`invenio_files_rest.models.MultipartObject`
upload ID.
:param version_id: Optionally you can define which file version.
(Default: ``None``)
:returns: The :class:`invenio_files_rest.models.ObjectVersion` version
ID. | 3.392981 | 3.341109 | 1.015525 |
delta = current_app.config['FILES_REST_MULTIPART_EXPIRES']
expired_dt = datetime.utcnow() - delta
file_ids = []
for mp in MultipartObject.query_expired(expired_dt):
file_ids.append(str(mp.file_id))
mp.delete()
for fid in file_ids:
remove_file_data.delay(fid) | def remove_expired_multipartobjects() | Remove expired multipart objects. | 3.744184 | 3.469812 | 1.079074 |
# Either the FileInstance needs to be specified or all filestorage
# class parameters need to be specified
assert fileinstance or (fileurl and size)
if fileinstance:
# FIXME: Code here should be refactored since it assumes a lot on the
# directory structure where the file instances are written
fileurl = None
size = fileinstance.size
modified = fileinstance.updated
if fileinstance.uri:
# Use already existing URL.
fileurl = fileinstance.uri
else:
assert default_location
# Generate a new URL.
fileurl = make_path(
default_location,
str(fileinstance.id),
'data',
current_app.config['FILES_REST_STORAGE_PATH_DIMENSIONS'],
current_app.config['FILES_REST_STORAGE_PATH_SPLIT_LENGTH'],
)
return filestorage_class(
fileurl, size=size, modified=modified, clean_dir=clean_dir) | def pyfs_storage_factory(fileinstance=None, default_location=None,
default_storage_class=None,
filestorage_class=PyFSFileStorage, fileurl=None,
size=None, modified=None, clean_dir=True) | Get factory function for creating a PyFS file storage instance. | 5.101553 | 5.240033 | 0.973573 |
filedir = dirname(self.fileurl)
filename = basename(self.fileurl)
return (
opener.opendir(filedir, writeable=True, create_dir=create_dir),
filename
) | def _get_fs(self, create_dir=True) | Return tuple with filesystem and filename. | 6.16412 | 4.877217 | 1.26386 |
fs, path = self._get_fs()
return fs.open(path, mode=mode) | def open(self, mode='rb') | Open file.
The caller is responsible for closing the file. | 6.03024 | 6.40746 | 0.941128 |
fs, path = self._get_fs(create_dir=False)
if fs.exists(path):
fs.remove(path)
if self.clean_dir and fs.exists('.'):
fs.removedir('.')
return True | def delete(self) | Delete a file.
The base directory is also removed, as it is assumed that only one file
exists in the directory. | 6.590318 | 5.940323 | 1.109421 |
fs, path = self._get_fs()
# Required for reliably opening the file on certain file systems:
if fs.exists(path):
fp = fs.open(path, mode='r+b')
else:
fp = fs.open(path, mode='wb')
try:
fp.truncate(size)
except Exception:
fp.close()
self.delete()
raise
finally:
fp.close()
self._size = size
return self.fileurl, size, None | def initialize(self, size=0) | Initialize file on storage and truncate to given size. | 5.064695 | 4.212589 | 1.202276 |
fp = self.open(mode='wb')
try:
bytes_written, checksum = self._write_stream(
incoming_stream, fp, chunk_size=chunk_size,
progress_callback=progress_callback,
size_limit=size_limit, size=size)
except Exception:
fp.close()
self.delete()
raise
finally:
fp.close()
self._size = bytes_written
return self.fileurl, bytes_written, checksum | def save(self, incoming_stream, size_limit=None, size=None,
chunk_size=None, progress_callback=None) | Save file in the file system. | 2.98097 | 3.037195 | 0.981488 |
fp = self.open(mode='r+b')
try:
fp.seek(seek)
bytes_written, checksum = self._write_stream(
incoming_stream, fp, chunk_size=chunk_size,
size=size, progress_callback=progress_callback)
finally:
fp.close()
return bytes_written, checksum | def update(self, incoming_stream, seek=0, size=None, chunk_size=None,
progress_callback=None) | Update a file in the file system. | 2.737505 | 2.751426 | 0.99494 |
need_class = _action2need_map[action]
if obj is None:
return Permission(need_class(None))
arg = None
if isinstance(obj, Bucket):
arg = str(obj.id)
elif isinstance(obj, ObjectVersion):
arg = str(obj.bucket_id)
elif isinstance(obj, MultipartObject):
arg = str(obj.bucket_id)
else:
raise RuntimeError('Unknown object')
return Permission(need_class(arg)) | def permission_factory(obj, action) | Get default permission factory.
:param obj: An instance of :class:`invenio_files_rest.models.Bucket` or
:class:`invenio_files_rest.models.ObjectVersion` or
:class:`invenio_files_rest.models.MultipartObject` or ``None`` if
the action is global.
:param action: The required action.
:raises RuntimeError: If the object is unknown.
:returns: A :class:`invenio_access.permissions.Permission` instance. | 3.793718 | 2.96004 | 1.281644 |
if isinstance(self.storage, CumulusStorage):
if self.storage.exists(prefixed_path):
try:
etag = self.storage._get_object(prefixed_path).etag
digest = "{0}".format(hashlib.md5(source_storage.open(path).read()).hexdigest())
if etag == digest:
self.log(u"Skipping '{0}' (not modified based on file hash)".format(path))
return False
except:
raise
return super(Command, self).delete_file(path, prefixed_path, source_storage) | def delete_file(self, path, prefixed_path, source_storage) | Checks if the target file should be deleted if it already exists | 3.772309 | 3.643845 | 1.035255 |
srcroot = dirname(dirname(__file__))
d = current_app.config['DATADIR']
if exists(d):
shutil.rmtree(d)
makedirs(d)
# Clear data
Part.query.delete()
MultipartObject.query.delete()
ObjectVersion.query.delete()
Bucket.query.delete()
FileInstance.query.delete()
Location.query.delete()
db.session.commit()
# Create location
loc = Location(name='local', uri=d, default=True)
db.session.add(loc)
db.session.commit()
# Bucket 0
b1 = Bucket.create(loc)
b1.id = '00000000-0000-0000-0000-000000000000'
for f in ['README.rst', 'LICENSE']:
with open(join(srcroot, f), 'rb') as fp:
ObjectVersion.create(b1, f, stream=fp)
# Bucket 1
b2 = Bucket.create(loc)
b2.id = '11111111-1111-1111-1111-111111111111'
k = 'AUTHORS.rst'
with open(join(srcroot, 'CHANGES.rst'), 'rb') as fp:
ObjectVersion.create(b2, k, stream=fp)
with open(join(srcroot, 'AUTHORS.rst'), 'rb') as fp:
ObjectVersion.create(b2, k, stream=fp)
k = 'RELEASE-NOTES.rst'
with open(join(srcroot, 'RELEASE-NOTES.rst'), 'rb') as fp:
ObjectVersion.create(b2, k, stream=fp)
with open(join(srcroot, 'CHANGES.rst'), 'rb') as fp:
ObjectVersion.create(b2, k, stream=fp)
ObjectVersion.delete(b2.id, k)
# Bucket 2
b2 = Bucket.create(loc)
b2.id = '22222222-2222-2222-2222-222222222222'
db.session.commit() | def files() | Load files. | 1.965539 | 1.950642 | 1.007637 |
from .models import Bucket
bucket = Bucket.create()
db.session.commit()
click.secho(str(bucket), fg='green') | def touch() | Create new bucket. | 6.06131 | 5.00192 | 1.211797 |
from .models import Bucket
from .helpers import populate_from_path
for object_version in populate_from_path(
Bucket.get(bucket), source, checksum=checksum,
key_prefix=key_prefix):
click.secho(str(object_version))
db.session.commit() | def cp(source, bucket, checksum, key_prefix) | Create new bucket from all files in directory. | 5.567531 | 5.330032 | 1.044559 |
from .models import Location
location = Location(name=name, uri=uri, default=default)
db.session.add(location)
db.session.commit()
click.secho(str(location), fg='green') | def location(name, uri, default) | Create new location. | 2.951721 | 2.674572 | 1.103624 |
if hasattr(content, "content_type"):
content_type = content.content_type
else:
mime_type, encoding = mimetypes.guess_type(name)
content_type = mime_type
return content_type | def get_content_type(name, content) | Checks if the content_type is already set.
Otherwise uses the mimetypes library to guess. | 2.193051 | 2.018637 | 1.086402 |
if headers is None:
headers = {}
# don't set headers on directories
content_type = getattr(cloud_obj, "content_type", None)
if content_type == "application/directory":
return
matched_headers = {}
for pattern, pattern_headers in header_patterns:
if pattern.match(cloud_obj.name):
matched_headers.update(pattern_headers.copy())
# preserve headers already set
matched_headers.update(cloud_obj.headers)
# explicitly set headers overwrite matches and already set headers
matched_headers.update(headers)
if matched_headers != cloud_obj.headers:
cloud_obj.headers = matched_headers
cloud_obj.sync_metadata() | def sync_headers(cloud_obj, headers=None, header_patterns=HEADER_PATTERNS) | Overwrites the given cloud_obj's headers with the ones given as ``headers`
and adds additional headers as defined in the HEADERS setting depending on
the cloud_obj's file name. | 3.250995 | 3.200446 | 1.015795 |
zbuf = StringIO()
zfile = GzipFile(mode="wb", compresslevel=6, fileobj=zbuf)
zfile.write(input_file.read())
zfile.close()
return ContentFile(zbuf.getvalue()) | def get_gzipped_contents(input_file) | Returns a gzipped version of a previously opened file's buffer. | 2.422724 | 2.27457 | 1.065135 |
item_class = context.get('class')
return (
serializer_mapping[item_class] if item_class else BaseSchema,
context.get('many', False)
) | def schema_from_context(context) | Determine which schema to use. | 6.601978 | 6.295021 | 1.048762 |
assert max_rounds > 0
def _whitespace_waiting():
current = 0
while current < max_rounds and current != -1:
if task_result.ready():
# Task is done and we return
current = -1
if task_result.successful():
yield content
else:
yield FilesException(
description='Job failed.'
).get_body()
else:
# Send whitespace to prevent connection from closing.
current += 1
sleep(interval)
yield b' '
# Timed-out reached
if current == max_rounds:
yield FilesException(
description='Job timed out.'
).get_body()
return _whitespace_waiting() | def wait_for_taskresult(task_result, content, interval, max_rounds) | Get helper to wait for async task result to finish.
The task will periodically send whitespace to prevent the connection from
being closed.
:param task_result: The async task to wait for.
:param content: The content to return when the task is ready.
:param interval: The duration of a sleep period before check again if the
task is ready.
:param max_rounds: The maximum number of intervals the function check
before returning an Exception.
:returns: An iterator on the content or a
:class:`invenio_files_rest.errors.FilesException` exception if the
timeout happened or the job failed. | 6.138924 | 4.337175 | 1.41542 |
schema_class, many = schema_from_context(context or {})
if data is not None:
# Generate JSON response
data = json.dumps(
schema_class(context=context).dump(data, many=many).data,
**_format_args()
)
interval = current_app.config['FILES_REST_TASK_WAIT_INTERVAL']
max_rounds = int(
current_app.config['FILES_REST_TASK_WAIT_MAX_SECONDS'] // interval
)
response = current_app.response_class(
# Stream response if waiting for task result.
data if task_result is None else wait_for_taskresult(
task_result, data, interval, max_rounds, ),
mimetype='application/json'
)
else:
response = current_app.response_class(mimetype='application/json')
response.status_code = code
if headers is not None:
response.headers.extend(headers)
if etag:
response.set_etag(etag)
return response | def json_serializer(data=None, code=200, headers=None, context=None,
etag=None, task_result=None) | Build a json flask response using the given data.
:param data: The data to serialize. (Default: ``None``)
:param code: The HTTP status code. (Default: ``200``)
:param headers: The HTTP headers to include. (Default: ``None``)
:param context: The schema class context. (Default: ``None``)
:param etag: The ETag header. (Default: ``None``)
:param task_result: Optionally you can pass async task to wait for.
(Default: ``None``)
:returns: A Flask response with json data.
:rtype: :py:class:`flask.Response` | 3.68194 | 3.806204 | 0.967352 |
return {
'self': url_for('.bucket_api', bucket_id=o.id, _external=True),
'versions': url_for(
'.bucket_api', bucket_id=o.id, _external=True) + '?versions',
'uploads': url_for(
'.bucket_api', bucket_id=o.id, _external=True) + '?uploads',
} | def dump_links(self, o) | Dump links. | 2.515575 | 2.3994 | 1.048419 |
params = {'versionId': o.version_id}
data = {
'self': url_for(
'.object_api',
bucket_id=o.bucket_id,
key=o.key,
_external=True,
**(params if not o.is_head or o.deleted else {})
),
'version': url_for(
'.object_api',
bucket_id=o.bucket_id,
key=o.key,
_external=True,
**params
)
}
if o.is_head and not o.deleted:
data.update({'uploads': url_for(
'.object_api',
bucket_id=o.bucket_id,
key=o.key,
_external=True
) + '?uploads', })
return data | def dump_links(self, o) | Dump links. | 2.686412 | 2.602072 | 1.032413 |
if not many:
return data
else:
data = {'contents': data}
bucket = self.context.get('bucket')
if bucket:
data.update(BucketSchema().dump(bucket).data)
return data | def wrap(self, data, many) | Wrap response in envelope. | 5.82058 | 5.80568 | 1.002566 |
links = {
'self': url_for(
'.object_api',
bucket_id=o.bucket_id,
key=o.key,
uploadId=o.upload_id,
_external=True,
),
'object': url_for(
'.object_api',
bucket_id=o.bucket_id,
key=o.key,
_external=True,
),
}
version_id = self.context.get('object_version_id')
if version_id:
links.update({
'object_version': url_for(
'.object_api',
bucket_id=o.bucket_id,
key=o.key,
versionId=version_id,
_external=True,
)
})
bucket = self.context.get('bucket')
if bucket:
links.update({
'bucket': url_for(
'.bucket_api',
bucket_id=o.bucket_id,
_external=True,
)
})
return links | def dump_links(self, o) | Dump links. | 1.856442 | 1.848529 | 1.004281 |
if not many:
return data
else:
data = {'parts': data}
multipart = self.context.get('multipart')
if multipart:
data.update(MultipartObjectSchema(context={
'bucket': multipart.bucket}).dump(multipart).data)
return data | def wrap(self, data, many) | Wrap response in envelope. | 6.670354 | 6.791082 | 0.982223 |
# noqa
if optimize_waypoints:
waypoints.insert(0, "optimize:true")
parameters = dict(
origin=self.assume_latlon_or_address(origin),
destination=self.assume_latlon_or_address(destination),
mode=mode,
alternatives=alternatives,
waypoints=waypoints or [],
avoid=avoid,
language=language,
units=units,
region=region,
departure_time=departure_time,
arrival_time=arrival_time,
sensor=sensor,
)
return self._make_request(self.DIRECTIONS_URL, parameters, "routes") | def directions(self, origin, destination, mode=None, alternatives=None,
waypoints=None, optimize_waypoints=False,
avoid=None, language=None, units=None,
region=None, departure_time=None,
arrival_time=None, sensor=None) | Get directions between locations
:param origin: Origin location - string address; (latitude, longitude)
two-tuple, dict with ("lat", "lon") keys or object with (lat, lon)
attributes
:param destination: Destination location - type same as origin
:param mode: Travel mode as string, defaults to "driving".
See `google docs details <https://developers.google.com/maps/documentation/directions/#TravelModes>`_
:param alternatives: True if provide it has to return more then one
route alternative
:param waypoints: Iterable with set of intermediate stops,
like ("Munich", "Dallas")
See `google docs details <https://developers.google.com/maps/documentation/javascript/reference#DirectionsRequest>`_
:param optimize_waypoints: if true will attempt to re-order supplied
waypoints to minimize overall cost of the route. If waypoints are
optimized, the route returned will show the optimized order under
"waypoint_order". See `google docs details <https://developers.google.com/maps/documentation/javascript/reference#DirectionsRequest>`_
:param avoid: Iterable with set of restrictions,
like ("tolls", "highways"). For full list refer to
`google docs details <https://developers.google.com/maps/documentation/directions/#Restrictions>`_
:param language: The language in which to return results.
See `list of supported languages <https://developers.google.com/maps/faq#languagesupport>`_
:param units: Unit system for result. Defaults to unit system of
origin's country.
See `google docs details <https://developers.google.com/maps/documentation/directions/#UnitSystems>`_
:param region: The region code. Affects geocoding of origin and
destination (see `gmaps.Geocoding.geocode` region parameter)
:param departure_time: Desired time of departure as
seconds since midnight, January 1, 1970 UTC
:param arrival_time: Desired time of arrival for transit directions as
seconds since midnight, January 1, 1970 UTC. | 2.338009 | 2.683439 | 0.871274 |
op.drop_table('files_multipartobject_part')
op.drop_index(op.f('ix_files_object__mimetype'), table_name='files_object')
op.drop_table('files_object')
op.drop_table('files_multipartobject')
op.drop_table('files_buckettags')
op.drop_table('files_bucket')
op.drop_table('files_location')
op.drop_table('files_files') | def downgrade() | Downgrade database. | 2.710335 | 2.71493 | 0.998307 |
m, encoding = mimetypes.guess_type(filename)
if encoding:
m = ENCODING_MIMETYPES.get(encoding, None)
return m or 'application/octet-stream' | def guess_mimetype(filename) | Map extra mimetype with the encoding provided.
:returns: The extra mimetype. | 3.358918 | 4.164843 | 0.806493 |
def object_formatter(v, c, m, p):
return Markup('<a href="{0}">{1}</a>'.format(
link_func(m), text))
return object_formatter | def link(text, link_func) | Generate a object formatter for links.. | 6.276173 | 4.965684 | 1.263909 |
try:
count = 0
for file_id in ids:
f = FileInstance.query.filter_by(
id=uuid.UUID(file_id)).one_or_none()
if f is None:
raise ValueError(_("Cannot find file instance."))
verify_checksum.delay(file_id)
count += 1
if count > 0:
flash(_('Fixity check(s) sent to queue.'), 'success')
except Exception as exc:
if not self.handle_view_exception(exc):
raise
current_app.logger.exception(str(exc)) # pragma: no cover
flash(_('Failed to run fixity checks.'),
'error') | def action_verify_checksum(self, ids) | Inactivate users. | 3.659131 | 3.609917 | 1.013633 |
# Note, parse_sql does not include a keys if the value is an empty string
# (e.g. 'key=&test=a'), and thus technically we should not get strings
# which have zero length.
klen = len(key)
vlen = len(value)
return klen > 0 and klen < 256 and vlen > 0 and vlen < 256 | def validate_tag(key, value) | Validate a tag.
Keys must be less than 128 chars and values must be less than 256 chars. | 9.236691 | 9.31357 | 0.991745 |
# Get the value of the custom HTTP header and interpret it as an query
# string
qs = request.headers.get(
current_app.config['FILES_REST_FILE_TAGS_HEADER'], '')
tags = {}
for key, value in parse_qsl(qs):
# Check for duplicate keys
if key in tags:
raise DuplicateTagError()
# Check for too short/long keys and values.
if not validate_tag(key, value):
raise InvalidTagError()
tags[key] = value
return tags or None | def parse_header_tags() | Parse tags specified in the HTTP request header. | 5.376914 | 5.035775 | 1.067743 |
return content_length, part_number, request.stream, content_type, \
content_md5, None | def default_partfactory(part_number=None, content_length=None,
content_type=None, content_md5=None) | Get default part factory.
:param part_number: The part number. (Default: ``None``)
:param content_length: The content length. (Default: ``None``)
:param content_type: The HTTP Content-Type. (Default: ``None``)
:param content_md5: The content MD5. (Default: ``None``)
:returns: The content length, the part number, the stream, the content
type, MD5 of the content. | 6.209274 | 7.771902 | 0.798939 |
if content_type.startswith('multipart/form-data'):
abort(422)
return request.stream, content_length, content_md5, parse_header_tags() | def stream_uploadfactory(content_md5=None, content_length=None,
content_type=None) | Get default put factory.
If Content-Type is ``'multipart/form-data'`` then the stream is aborted.
:param content_md5: The content MD5. (Default: ``None``)
:param content_length: The content length. (Default: ``None``)
:param content_type: The HTTP Content-Type. (Default: ``None``)
:returns: The stream, content length, MD5 of the content. | 8.78167 | 8.611063 | 1.019812 |
return content_length, part_number, uploaded_file.stream, \
uploaded_file.headers.get('Content-Type'), None, None | def ngfileupload_partfactory(part_number=None, content_length=None,
uploaded_file=None) | Part factory for ng-file-upload.
:param part_number: The part number. (Default: ``None``)
:param content_length: The content length. (Default: ``None``)
:param uploaded_file: The upload request. (Default: ``None``)
:returns: The content length, part number, stream, HTTP Content-Type
header. | 5.53304 | 4.971505 | 1.112951 |
if not content_type.startswith('multipart/form-data'):
abort(422)
return uploaded_file.stream, content_length, None, parse_header_tags() | def ngfileupload_uploadfactory(content_length=None, content_type=None,
uploaded_file=None) | Get default put factory.
If Content-Type is ``'multipart/form-data'`` then the stream is aborted.
:param content_length: The content length. (Default: ``None``)
:param content_type: The HTTP Content-Type. (Default: ``None``)
:param uploaded_file: The upload request. (Default: ``None``)
:param file_tags_header: The file tags. (Default: ``None``)
:returns: A tuple containing stream, content length, and empty header. | 8.487268 | 8.386671 | 1.011995 |
@wraps(f)
def decorate(*args, **kwargs):
bucket_id = kwargs.pop('bucket_id')
bucket = Bucket.get(as_uuid(bucket_id))
if not bucket:
abort(404, 'Bucket does not exist.')
return f(bucket=bucket, *args, **kwargs)
return decorate | def pass_bucket(f) | Decorate to retrieve a bucket. | 2.435893 | 2.317261 | 1.051195 |
def decorate(f):
@wraps(f)
def inner(self, bucket, key, upload_id, *args, **kwargs):
obj = MultipartObject.get(
bucket, key, upload_id, with_completed=with_completed)
if obj is None:
abort(404, 'uploadId does not exists.')
return f(self, obj, *args, **kwargs)
return inner
return decorate | def pass_multipart(with_completed=False) | Decorate to retrieve an object. | 2.801013 | 2.60651 | 1.074622 |
if permission is not None and not permission.can():
if hidden:
abort(404)
else:
if current_user.is_authenticated:
abort(403,
'You do not have a permission for this action')
abort(401) | def check_permission(permission, hidden=True) | Check if permission is allowed.
If permission fails then the connection is aborted.
:param permission: The permission to check.
:param hidden: Determine if a 404 error (``True``) or 401/403 error
(``False``) should be returned if the permission is rejected (i.e.
hide or reveal the existence of a particular object). | 4.039533 | 4.543593 | 0.889061 |
def decorator_builder(f):
@wraps(f)
def decorate(*args, **kwargs):
check_permission(current_permission_factory(
object_getter(*args, **kwargs),
action(*args, **kwargs) if callable(action) else action,
), hidden=hidden)
return f(*args, **kwargs)
return decorate
return decorator_builder | def need_permissions(object_getter, action, hidden=True) | Get permission for buckets or abort.
:param object_getter: The function used to retrieve the object and pass it
to the permission factory.
:param action: The action needed.
:param hidden: Determine which kind of error to return. (Default: ``True``) | 3.392121 | 3.57192 | 0.949663 |
with db.session.begin_nested():
bucket = Bucket.create(
storage_class=current_app.config[
'FILES_REST_DEFAULT_STORAGE_CLASS'
],
)
db.session.commit()
return self.make_response(
data=bucket,
context={
'class': Bucket,
}
) | def post(self) | Create bucket. | 5.136752 | 4.490407 | 1.143939 |
return self.make_response(
data=MultipartObject.query_by_bucket(bucket).limit(1000).all(),
context={
'class': MultipartObject,
'bucket': bucket,
'many': True,
}
) | def multipart_listuploads(self, bucket) | List objects in a bucket.
:param bucket: A :class:`invenio_files_rest.models.Bucket` instance.
:returns: The Flask response. | 5.957527 | 5.827016 | 1.022398 |
if versions is not missing:
check_permission(
current_permission_factory(bucket, 'bucket-read-versions'),
hidden=False
)
return self.make_response(
data=ObjectVersion.get_by_bucket(
bucket.id, versions=versions is not missing).limit(1000).all(),
context={
'class': ObjectVersion,
'bucket': bucket,
'many': True,
}
) | def listobjects(self, bucket, versions) | List objects in a bucket.
:param bucket: A :class:`invenio_files_rest.models.Bucket` instance.
:returns: The Flask response. | 8.44788 | 7.827566 | 1.079247 |
if uploads is not missing:
return self.multipart_listuploads(bucket)
else:
return self.listobjects(bucket, versions) | def get(self, bucket=None, versions=missing, uploads=missing) | Get list of objects in the bucket.
:param bucket: A :class:`invenio_files_rest.models.Bucket` instance.
:returns: The Flask response. | 6.062447 | 8.75182 | 0.692707 |
check_permission(current_permission_factory(
obj,
'object-read'
))
if not obj.is_head:
check_permission(
current_permission_factory(obj, 'object-read-version'),
hidden=False
) | def check_object_permission(obj) | Retrieve object and abort if it doesn't exists. | 8.45502 | 7.353495 | 1.149796 |
obj = ObjectVersion.get(bucket, key, version_id=version_id)
if not obj:
abort(404, 'Object does not exists.')
cls.check_object_permission(obj)
return obj | def get_object(cls, bucket, key, version_id) | Retrieve object and abort if it doesn't exists.
If the file is not found, the connection is aborted and the 404
error is returned.
:param bucket: The bucket (instance or id) to get the object from.
:param key: The file key.
:param version_id: The version ID.
:returns: A :class:`invenio_files_rest.models.ObjectVersion` instance. | 4.042071 | 3.815126 | 1.059486 |
# Initial validation of size based on Content-Length.
# User can tamper with Content-Length, so this is just an initial up
# front check. The storage subsystem must validate the size limit as
# well.
stream, content_length, content_md5, tags = \
current_files_rest.upload_factory()
size_limit = bucket.size_limit
if content_length and size_limit and content_length > size_limit:
desc = 'File size limit exceeded.' \
if isinstance(size_limit, int) else size_limit.reason
raise FileSizeError(description=desc)
with db.session.begin_nested():
obj = ObjectVersion.create(bucket, key)
obj.set_contents(
stream, size=content_length, size_limit=size_limit)
# Check add tags
if tags:
for key, value in tags.items():
ObjectVersionTag.create(obj, key, value)
db.session.commit()
return self.make_response(
data=obj,
context={
'class': ObjectVersion,
'bucket': bucket,
},
etag=obj.file.checksum
) | def create_object(self, bucket, key) | Create a new object.
:param bucket: The bucket (instance or id) to get the object from.
:param key: The file key.
:returns: A Flask response. | 6.26756 | 6.239882 | 1.004436 |
if version_id is None:
# Create a delete marker.
with db.session.begin_nested():
ObjectVersion.delete(bucket, obj.key)
db.session.commit()
else:
# Permanently delete specific object version.
check_permission(
current_permission_factory(bucket, 'object-delete-version'),
hidden=False,
)
obj.remove()
db.session.commit()
if obj.file_id:
remove_file_data.delay(str(obj.file_id))
return self.make_response('', 204) | def delete_object(self, bucket, obj, version_id) | Delete an existing object.
:param bucket: The bucket (instance or id) to get the object from.
:param obj: A :class:`invenio_files_rest.models.ObjectVersion`
instance.
:param version_id: The version ID.
:returns: A Flask response. | 5.445412 | 5.160811 | 1.055147 |
if not obj.is_head:
check_permission(
current_permission_factory(obj, 'object-read-version'),
hidden=False
)
if expected_chksum and obj.file.checksum != expected_chksum:
current_app.logger.warning(
'File checksum mismatch detected.', extra=logger_data)
file_downloaded.send(current_app._get_current_object(), obj=obj)
return obj.send_file(restricted=restricted,
as_attachment=as_attachment) | def send_object(bucket, obj, expected_chksum=None,
logger_data=None, restricted=True, as_attachment=False) | Send an object for a given bucket.
:param bucket: The bucket (instance or id) to get the object from.
:param obj: A :class:`invenio_files_rest.models.ObjectVersion`
instance.
:params expected_chksum: Expected checksum.
:param logger_data: The python logger.
:param kwargs: Keyword arguments passed to ``Object.send_file()``
:returns: A Flask response. | 5.861743 | 4.801051 | 1.220929 |
return self.make_response(
data=Part.query_by_multipart(
multipart).order_by(Part.part_number).limit(1000).all(),
context={
'class': Part,
'multipart': multipart,
'many': True,
}
) | def multipart_listparts(self, multipart) | Get parts of a multipart upload.
:param multipart: A :class:`invenio_files_rest.models.MultipartObject`
instance.
:returns: A Flask response. | 5.781638 | 6.003481 | 0.963048 |
if size is None:
raise MissingQueryParameter('size')
if part_size is None:
raise MissingQueryParameter('partSize')
multipart = MultipartObject.create(bucket, key, size, part_size)
db.session.commit()
return self.make_response(
data=multipart,
context={
'class': MultipartObject,
'bucket': bucket,
}
) | def multipart_init(self, bucket, key, size=None, part_size=None) | Initialize a multipart upload.
:param bucket: The bucket (instance or id) to get the object from.
:param key: The file key.
:param size: The total size.
:param part_size: The part size.
:raises invenio_files_rest.errors.MissingQueryParameter: If size or
part_size are not defined.
:returns: A Flask response. | 3.882652 | 3.407765 | 1.139354 |
content_length, part_number, stream, content_type, content_md5, tags =\
current_files_rest.multipart_partfactory()
if content_length:
ck = multipart.last_part_size if \
part_number == multipart.last_part_number \
else multipart.chunk_size
if ck != content_length:
raise MultipartInvalidChunkSize()
# Create part
try:
p = Part.get_or_create(multipart, part_number)
p.set_contents(stream)
db.session.commit()
except Exception:
# We remove the Part since incomplete data may have been written to
# disk (e.g. client closed connection etc.) so it must be
# reuploaded.
db.session.rollback()
Part.delete(multipart, part_number)
raise
return self.make_response(
data=p,
context={
'class': Part,
},
etag=p.checksum
) | def multipart_uploadpart(self, multipart) | Upload a part.
:param multipart: A :class:`invenio_files_rest.models.MultipartObject`
instance.
:returns: A Flask response. | 6.892343 | 6.821638 | 1.010365 |
multipart.complete()
db.session.commit()
version_id = str(uuid.uuid4())
return self.make_response(
data=multipart,
context={
'class': MultipartObject,
'bucket': multipart.bucket,
'object_version_id': version_id,
},
# This will wait for the result, and send whitespace on the
# connection until the task has finished (or max timeout reached).
task_result=merge_multipartobject.delay(
str(multipart.upload_id),
version_id=version_id,
),
) | def multipart_complete(self, multipart) | Complete a multipart upload.
:param multipart: A :class:`invenio_files_rest.models.MultipartObject`
instance.
:returns: A Flask response. | 7.435795 | 7.907127 | 0.940391 |
multipart.delete()
db.session.commit()
if multipart.file_id:
remove_file_data.delay(str(multipart.file_id))
return self.make_response('', 204) | def multipart_delete(self, multipart) | Abort a multipart upload.
:param multipart: A :class:`invenio_files_rest.models.MultipartObject`
instance.
:returns: A Flask response. | 4.431646 | 6.106626 | 0.725711 |
if upload_id:
return self.multipart_listparts(bucket, key, upload_id)
else:
obj = self.get_object(bucket, key, version_id)
# If 'download' is missing from query string it will have
# the value None.
return self.send_object(bucket, obj,
as_attachment=download is not None) | def get(self, bucket=None, key=None, version_id=None, upload_id=None,
uploads=None, download=None) | Get object or list parts of a multpart upload.
:param bucket: The bucket (instance or id) to get the object from.
(Default: ``None``)
:param key: The file key. (Default: ``None``)
:param version_id: The version ID. (Default: ``None``)
:param upload_id: The upload ID. (Default: ``None``)
:param download: The download flag. (Default: ``None``)
:returns: A Flask response. | 5.204056 | 5.021411 | 1.036373 |
if uploads is not missing:
return self.multipart_init(bucket, key)
elif upload_id is not None:
return self.multipart_complete(bucket, key, upload_id)
abort(403) | def post(self, bucket=None, key=None, uploads=missing, upload_id=None) | Upload a new object or start/complete a multipart upload.
:param bucket: The bucket (instance or id) to get the object from.
(Default: ``None``)
:param key: The file key. (Default: ``None``)
:param upload_id: The upload ID. (Default: ``None``)
:returns: A Flask response. | 2.962507 | 4.32622 | 0.68478 |
if upload_id is not None:
return self.multipart_uploadpart(bucket, key, upload_id)
else:
return self.create_object(bucket, key) | def put(self, bucket=None, key=None, upload_id=None) | Update a new object or upload a part of a multipart upload.
:param bucket: The bucket (instance or id) to get the object from.
(Default: ``None``)
:param key: The file key. (Default: ``None``)
:param upload_id: The upload ID. (Default: ``None``)
:returns: A Flask response. | 3.606221 | 3.920522 | 0.919832 |
if upload_id is not None:
return self.multipart_delete(bucket, key, upload_id)
else:
obj = self.get_object(bucket, key, version_id)
return self.delete_object(bucket, obj, version_id) | def delete(self, bucket=None, key=None, version_id=None, upload_id=None,
uploads=None) | Delete an object or abort a multipart upload.
:param bucket: The bucket (instance or id) to get the object from.
(Default: ``None``)
:param key: The file key. (Default: ``None``)
:param version_id: The version ID. (Default: ``None``)
:param upload_id: The upload ID. (Default: ``None``)
:returns: A Flask response. | 2.582288 | 2.89853 | 0.890896 |
if not hasattr(self, "_container"):
if self.use_pyrax:
self._container = self.connection.create_container(self.container_name)
else:
self._container = None
return self._container | def _get_container(self) | Gets or creates the container. | 3.717459 | 2.994667 | 1.24136 |
if self.use_pyrax:
if container.cdn_ttl != self.ttl or not container.cdn_enabled:
container.make_public(ttl=self.ttl)
if hasattr(self, "_container_public_uri"):
delattr(self, "_container_public_uri")
self._container = container | def _set_container(self, container) | Sets the container (and, if needed, the configured TTL on it), making
the container publicly available. | 6.482678 | 5.127115 | 1.264391 |
if self.use_pyrax:
try:
return self.container.get_object(name)
except pyrax.exceptions.NoSuchObject:
return None
elif swiftclient:
try:
return self.container.get_object(name)
except swiftclient.exceptions.ClientException:
return None
else:
return self.container.get_object(name) | def _get_object(self, name) | Helper function to retrieve the requested Object. | 2.828232 | 2.692971 | 1.050227 |
return value if isinstance(value, ObjectVersion) \
else ObjectVersion.query.filter_by(version_id=value).one_or_none() | def as_object_version(value) | Get an object version object from an object version ID or an object version.
:param value: A :class:`invenio_files_rest.models.ObjectVersion` or an
object version ID.
:returns: A :class:`invenio_files_rest.models.ObjectVersion` instance. | 3.76206 | 4.762019 | 0.790014 |
@wraps(f)
def inner(self, *args, **kwargs):
res = f(self, *args, **kwargs)
self.bucket.size += self.file.size
return res
return inner | def update_bucket_size(f) | Decorate to update bucket size after operation. | 2.85163 | 2.598949 | 1.097224 |
def decorator(getter=default_getter, msg=default_msg):
def ensure_decorator(f):
@wraps(f)
def inner(self, *args, **kwargs):
if not getter(self):
raise exc_class(msg) if msg else exc_class()
return f(self, *args, **kwargs)
return inner
return ensure_decorator
return decorator | def ensure_state(default_getter, exc_class, default_msg=None) | Create a decorator factory function. | 2.151437 | 1.899692 | 1.132519 |
if not slug_pattern.match(name) or len(name) > 20:
raise ValueError(
'Invalid location name (lower-case alphanumeric + danshes).')
return name | def validate_name(self, key, name) | Validate name. | 11.339777 | 10.721215 | 1.057695 |
limits = [
lim for lim in current_files_rest.file_size_limiters(
self)
if lim.limit is not None
]
return min(limits) if limits else None | def size_limit(self) | Get size limit for this bucket.
The limit is based on the minimum output of the file size limiters. | 15.203445 | 10.517118 | 1.44559 |
with db.session.begin_nested():
bucket = Bucket(
default_location=self.default_location,
default_storage_class=self.default_storage_class,
quota_size=self.quota_size,
)
db.session.add(bucket)
for o in ObjectVersion.get_by_bucket(self):
o.copy(bucket=bucket)
bucket.locked = True if lock else self.locked
return bucket | def snapshot(self, lock=False) | Create a snapshot of latest objects in bucket.
:param lock: Create the new bucket in a locked state.
:returns: Newly created bucket containing copied ObjectVersion. | 4.21248 | 3.439309 | 1.224804 |
assert not bucket.locked
src_ovs = ObjectVersion.get_by_bucket(bucket=self, with_deleted=True)
dest_ovs = ObjectVersion.get_by_bucket(bucket=bucket,
with_deleted=True)
# transform into a dict { key: object version }
src_keys = {ov.key: ov for ov in src_ovs}
dest_keys = {ov.key: ov for ov in dest_ovs}
for key, ov in src_keys.items():
if not ov.deleted:
if key not in dest_keys or \
ov.file_id != dest_keys[key].file_id:
ov.copy(bucket=bucket)
elif key in dest_keys and not dest_keys[key].deleted:
ObjectVersion.delete(bucket, key)
if delete_extras:
for key, ov in dest_keys.items():
if key not in src_keys:
ObjectVersion.delete(bucket, key)
return bucket | def sync(self, bucket, delete_extras=False) | Sync self bucket ObjectVersions to the destination bucket.
The bucket is fully mirrored with the destination bucket following the
logic:
* same ObjectVersions are not touched
* new ObjectVersions are added to destination
* deleted ObjectVersions are deleted in destination
* extra ObjectVersions in dest are deleted if `delete_extras` param is
True
:param bucket: The destination bucket.
:param delete_extras: Delete extra ObjectVersions in destination if
True.
:returns: The bucket with an exact copy of ObjectVersions in self. | 2.764917 | 2.620752 | 1.055009 |
r
with db.session.begin_nested():
if location is None:
location = Location.get_default()
elif isinstance(location, six.string_types):
location = Location.get_by_name(location)
obj = cls(
default_location=location.id,
default_storage_class=storage_class or current_app.config[
'FILES_REST_DEFAULT_STORAGE_CLASS'],
**kwargs
)
db.session.add(obj)
return obj | def create(cls, location=None, storage_class=None, **kwargs) | r"""Create a bucket.
:param location: Location of a bucket (instance or name).
Default: Default location.
:param storage_class: Storage class of a bucket.
Default: Default storage class.
:param \**kwargs: Keyword arguments are forwarded to the class
:param \**kwargs: Keyword arguments are forwarded to the class
constructor.
:returns: Created bucket. | 2.882898 | 3.125885 | 0.922266 |
return cls.query.filter_by(
id=bucket_id,
deleted=False
).one_or_none() | def get(cls, bucket_id) | Get a bucket object (excluding deleted).
:param bucket_id: Bucket identifier.
:returns: Bucket instance. | 3.062825 | 4.657158 | 0.65766 |
bucket = cls.get(bucket_id)
if not bucket or bucket.deleted:
return False
bucket.deleted = True
return True | def delete(cls, bucket_id) | Delete a bucket.
Does not actually delete the Bucket, just marks it as deleted. | 3.571831 | 3.219975 | 1.109273 |
with db.session.begin_nested():
ObjectVersion.query.filter_by(
bucket_id=self.id
).delete()
self.query.filter_by(id=self.id).delete()
return self | def remove(self) | Permanently remove a bucket and all objects (including versions).
.. warning::
This by-passes the normal versioning and should only be used when
you want to permanently delete a bucket and its objects. Otherwise
use :py:data:`Bucket.delete()`.
Note the method does not remove the associated file instances which
must be garbage collected.
:returns: ``self``. | 4.586482 | 3.832831 | 1.19663 |
return cls.query.filter_by(
bucket_id=as_bucket_id(bucket),
key=key,
).one_or_none() | def get(cls, bucket, key) | Get tag object. | 3.75999 | 4.047315 | 0.929009 |
with db.session.begin_nested():
obj = cls(
bucket_id=as_bucket_id(bucket),
key=key,
value=value
)
db.session.add(obj)
return obj | def create(cls, bucket, key, value) | Create a new tag for bucket. | 2.902282 | 3.042392 | 0.953947 |
obj = cls.get(bucket, key)
if obj:
obj.value = value
db.session.merge(obj)
else:
obj = cls.create(bucket, key, value)
return obj | def create_or_update(cls, bucket, key, value) | Create or update a new tag for bucket. | 2.37552 | 2.457828 | 0.966512 |
obj = cls.get(bucket, key)
return obj.value if obj else None | def get_value(cls, bucket, key) | Get tag value. | 4.476 | 4.238631 | 1.056001 |
with db.session.begin_nested():
cls.query.filter_by(
bucket_id=as_bucket_id(bucket),
key=key,
).delete() | def delete(cls, bucket, key) | Delete a tag. | 3.948142 | 4.109002 | 0.960852 |
if len(uri) > current_app.config['FILES_REST_FILE_URI_MAX_LEN']:
raise ValueError(
'FileInstance URI too long ({0}).'.format(len(uri)))
return uri | def validate_uri(self, key, uri) | Validate uri. | 6.962559 | 6.623385 | 1.051209 |
assert uri is not None
return cls.query.filter_by(uri=uri).one_or_none() | def get_by_uri(cls, uri) | Get a file instance by URI. | 3.476857 | 3.374063 | 1.030466 |
obj = cls(
id=uuid.uuid4(),
writable=True,
readable=False,
size=0,
)
db.session.add(obj)
return obj | def create(cls) | Create a file instance.
Note, object is only added to the database session. | 4.790267 | 4.314046 | 1.110389 |
self.query.filter_by(id=self.id).delete()
return self | def delete(self) | Delete a file instance.
The file instance can be deleted if it has no references from other
objects. The caller is responsible to test if the file instance is
writable and that the disk file can actually be removed.
.. note::
Normally you should use the Celery task to delete a file instance,
as this method will not remove the file on disk. | 5.350757 | 8.806345 | 0.607602 |
self.checksum = self.storage(**kwargs).checksum(
progress_callback=progress_callback, chunk_size=chunk_size,
**(checksum_kwargs or {})) | def update_checksum(self, progress_callback=None, chunk_size=None,
checksum_kwargs=None, **kwargs) | Update checksum based on file. | 3.69652 | 3.418582 | 1.081302 |
with db.session.begin_nested():
self.last_check = None
self.last_check_at = datetime.utcnow()
return self | def clear_last_check(self) | Clear the checksum of the file. | 4.572384 | 4.416779 | 1.03523 |
try:
real_checksum = self.storage(**kwargs).checksum(
progress_callback=progress_callback, chunk_size=chunk_size,
**(checksum_kwargs or {}))
except Exception as exc:
current_app.logger.exception(str(exc))
if throws:
raise
real_checksum = None
with db.session.begin_nested():
self.last_check = (None if real_checksum is None
else (self.checksum == real_checksum))
self.last_check_at = datetime.utcnow()
return self.last_check | def verify_checksum(self, progress_callback=None, chunk_size=None,
throws=True, checksum_kwargs=None, **kwargs) | Verify checksum of file instance.
:param bool throws: If `True`, exceptions raised during checksum
calculation will be re-raised after logging. If set to `False`, and
an exception occurs, the `last_check` field is set to `None`
(`last_check_at` of course is updated), since no check actually was
performed.
:param dict checksum_kwargs: Passed as `**kwargs`` to
``storage().checksum``. | 3.039387 | 2.68619 | 1.131486 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.