sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def send_stream(stream, filename, size, mtime, mimetype=None, restricted=True, as_attachment=False, etag=None, content_md5=None, chunk_size=None, conditional=True, trusted=False): """Send the contents of a file to the client. .. warning:: It is very easy to be exposed to Cross-Site Scripting (XSS) attacks if you serve user uploaded files. Here are some recommendations: 1. Serve user uploaded files from a separate domain (not a subdomain). This way a malicious file can only attack other user uploaded files. 2. Prevent the browser from rendering and executing HTML files (by setting ``trusted=False``). 3. Force the browser to download the file as an attachment (``as_attachment=True``). :param stream: The file stream to send. :param filename: The file name. :param size: The file size. :param mtime: A Unix timestamp that represents last modified time (UTC). :param mimetype: The file mimetype. If ``None``, the module will try to guess. (Default: ``None``) :param restricted: If the file is not restricted, the module will set the cache-control. (Default: ``True``) :param as_attachment: If the file is an attachment. (Default: ``False``) :param etag: If defined, it will be set as HTTP E-Tag. :param content_md5: If defined, a HTTP Content-MD5 header will be set. :param chunk_size: The chunk size. :param conditional: Make the response conditional to the request. (Default: ``True``) :param trusted: Do not enable this option unless you know what you are doing. By default this function will send HTTP headers and MIME types that prevents your browser from rendering e.g. a HTML file which could contain a malicious script tag. (Default: ``False``) :returns: A Flask response instance. """ chunk_size = chunk_size_or_default(chunk_size) # Guess mimetype from filename if not provided. if mimetype is None and filename: mimetype = mimetypes.guess_type(filename)[0] if mimetype is None: mimetype = 'application/octet-stream' # Construct headers headers = Headers() headers['Content-Length'] = size if content_md5: headers['Content-MD5'] = content_md5 if not trusted: # Sanitize MIME type mimetype = sanitize_mimetype(mimetype, filename=filename) # See https://www.owasp.org/index.php/OWASP_Secure_Headers_Project # Prevent JavaScript execution headers['Content-Security-Policy'] = "default-src 'none';" # Prevent MIME type sniffing for browser. headers['X-Content-Type-Options'] = 'nosniff' # Prevent opening of downloaded file by IE headers['X-Download-Options'] = 'noopen' # Prevent cross domain requests from Flash/Acrobat. headers['X-Permitted-Cross-Domain-Policies'] = 'none' # Prevent files from being embedded in frame, iframe and object tags. headers['X-Frame-Options'] = 'deny' # Enable XSS protection (IE, Chrome, Safari) headers['X-XSS-Protection'] = '1; mode=block' # Force Content-Disposition for application/octet-stream to prevent # Content-Type sniffing. if as_attachment or mimetype == 'application/octet-stream': # See https://github.com/pallets/flask/commit/0049922f2e690a6d try: filenames = {'filename': filename.encode('latin-1')} except UnicodeEncodeError: filenames = {'filename*': "UTF-8''%s" % url_quote(filename)} encoded_filename = (unicodedata.normalize('NFKD', filename) .encode('latin-1', 'ignore')) if encoded_filename: filenames['filename'] = encoded_filename headers.add('Content-Disposition', 'attachment', **filenames) else: headers.add('Content-Disposition', 'inline') # Construct response object. rv = current_app.response_class( FileWrapper(stream, buffer_size=chunk_size), mimetype=mimetype, headers=headers, direct_passthrough=True, ) # Set etag if defined if etag: rv.set_etag(etag) # Set last modified time if mtime is not None: rv.last_modified = int(mtime) # Set cache-control if not restricted: rv.cache_control.public = True cache_timeout = current_app.get_send_file_max_age(filename) if cache_timeout is not None: rv.cache_control.max_age = cache_timeout rv.expires = int(time() + cache_timeout) if conditional: rv = rv.make_conditional(request) return rv
Send the contents of a file to the client. .. warning:: It is very easy to be exposed to Cross-Site Scripting (XSS) attacks if you serve user uploaded files. Here are some recommendations: 1. Serve user uploaded files from a separate domain (not a subdomain). This way a malicious file can only attack other user uploaded files. 2. Prevent the browser from rendering and executing HTML files (by setting ``trusted=False``). 3. Force the browser to download the file as an attachment (``as_attachment=True``). :param stream: The file stream to send. :param filename: The file name. :param size: The file size. :param mtime: A Unix timestamp that represents last modified time (UTC). :param mimetype: The file mimetype. If ``None``, the module will try to guess. (Default: ``None``) :param restricted: If the file is not restricted, the module will set the cache-control. (Default: ``True``) :param as_attachment: If the file is an attachment. (Default: ``False``) :param etag: If defined, it will be set as HTTP E-Tag. :param content_md5: If defined, a HTTP Content-MD5 header will be set. :param chunk_size: The chunk size. :param conditional: Make the response conditional to the request. (Default: ``True``) :param trusted: Do not enable this option unless you know what you are doing. By default this function will send HTTP headers and MIME types that prevents your browser from rendering e.g. a HTML file which could contain a malicious script tag. (Default: ``False``) :returns: A Flask response instance.
entailment
def sanitize_mimetype(mimetype, filename=None): """Sanitize a MIME type so the browser does not render the file.""" # Allow some few mime type like plain text, images and audio. if mimetype in MIMETYPE_WHITELIST: return mimetype # Rewrite HTML, JavaScript, CSS etc to text/plain. if mimetype in MIMETYPE_PLAINTEXT or \ (filename and filename.lower() in MIMETYPE_TEXTFILES): return 'text/plain' # Default return 'application/octet-stream'
Sanitize a MIME type so the browser does not render the file.
entailment
def make_path(base_uri, path, filename, path_dimensions, split_length): """Generate a path as base location for file instance. :param base_uri: The base URI. :param path: The relative path. :param path_dimensions: Number of chunks the path should be split into. :param split_length: The length of any chunk. :returns: A string representing the full path. """ assert len(path) > path_dimensions * split_length uri_parts = [] for i in range(path_dimensions): uri_parts.append(path[0:split_length]) path = path[split_length:] uri_parts.append(path) uri_parts.append(filename) return os.path.join(base_uri, *uri_parts)
Generate a path as base location for file instance. :param base_uri: The base URI. :param path: The relative path. :param path_dimensions: Number of chunks the path should be split into. :param split_length: The length of any chunk. :returns: A string representing the full path.
entailment
def compute_checksum(stream, algo, message_digest, chunk_size=None, progress_callback=None): """Get helper method to compute checksum from a stream. :param stream: File-like object. :param algo: Identifier for checksum algorithm. :param messsage_digest: A message digest instance. :param chunk_size: Read at most size bytes from the file at a time. :param progress_callback: Function accepting one argument with number of bytes read. (Default: ``None``) :returns: The checksum. """ chunk_size = chunk_size_or_default(chunk_size) bytes_read = 0 while 1: chunk = stream.read(chunk_size) if not chunk: if progress_callback: progress_callback(bytes_read) break message_digest.update(chunk) bytes_read += len(chunk) if progress_callback: progress_callback(bytes_read) return "{0}:{1}".format(algo, message_digest.hexdigest())
Get helper method to compute checksum from a stream. :param stream: File-like object. :param algo: Identifier for checksum algorithm. :param messsage_digest: A message digest instance. :param chunk_size: Read at most size bytes from the file at a time. :param progress_callback: Function accepting one argument with number of bytes read. (Default: ``None``) :returns: The checksum.
entailment
def populate_from_path(bucket, source, checksum=True, key_prefix='', chunk_size=None): """Populate a ``bucket`` from all files in path. :param bucket: The bucket (instance or id) to create the object in. :param source: The file or directory path. :param checksum: If ``True`` then a MD5 checksum will be computed for each file. (Default: ``True``) :param key_prefix: The key prefix for the bucket. :param chunk_size: Chunk size to read from file. :returns: A iterator for all :class:`invenio_files_rest.models.ObjectVersion` instances. """ from .models import FileInstance, ObjectVersion def create_file(key, path): """Create new ``ObjectVersion`` from path or existing ``FileInstance``. It checks MD5 checksum and size of existing ``FileInstance``s. """ key = key_prefix + key if checksum: file_checksum = compute_md5_checksum( open(path, 'rb'), chunk_size=chunk_size) file_instance = FileInstance.query.filter_by( checksum=file_checksum, size=os.path.getsize(path) ).first() if file_instance: return ObjectVersion.create( bucket, key, _file_id=file_instance.id ) return ObjectVersion.create(bucket, key, stream=open(path, 'rb')) if os.path.isfile(source): yield create_file(os.path.basename(source), source) else: for root, dirs, files in os.walk(source, topdown=False): for name in files: filename = os.path.join(root, name) assert filename.startswith(source) parts = [p for p in filename[len(source):].split(os.sep) if p] yield create_file('/'.join(parts), os.path.join(root, name))
Populate a ``bucket`` from all files in path. :param bucket: The bucket (instance or id) to create the object in. :param source: The file or directory path. :param checksum: If ``True`` then a MD5 checksum will be computed for each file. (Default: ``True``) :param key_prefix: The key prefix for the bucket. :param chunk_size: Chunk size to read from file. :returns: A iterator for all :class:`invenio_files_rest.models.ObjectVersion` instances.
entailment
def set_options(self, options): """ Sets instance variables based on an options dict """ # COMMAND LINE OPTIONS self.wipe = options.get("wipe") self.test_run = options.get("test_run") self.quiet = options.get("test_run") self.container_name = options.get("container") self.verbosity = int(options.get("verbosity")) self.syncmedia = options.get("syncmedia") self.syncstatic = options.get("syncstatic") if self.test_run: self.verbosity = 2 cli_includes = options.get("includes") cli_excludes = options.get("excludes") # CUMULUS CONNECTION AND SETTINGS FROM SETTINGS.PY if self.syncmedia and self.syncstatic: raise CommandError("options --media and --static are mutually exclusive") if not self.container_name: if self.syncmedia: self.container_name = CUMULUS["CONTAINER"] elif self.syncstatic: self.container_name = CUMULUS["STATIC_CONTAINER"] else: raise CommandError("must select one of the required options, either --media or --static") settings_includes = CUMULUS["INCLUDE_LIST"] settings_excludes = CUMULUS["EXCLUDE_LIST"] # PATH SETTINGS if self.syncmedia: self.file_root = os.path.abspath(settings.MEDIA_ROOT) self.file_url = settings.MEDIA_URL elif self.syncstatic: self.file_root = os.path.abspath(settings.STATIC_ROOT) self.file_url = settings.STATIC_URL if not self.file_root.endswith("/"): self.file_root = self.file_root + "/" if self.file_url.startswith("/"): self.file_url = self.file_url[1:] # SYNCSTATIC VARS # combine includes and excludes from the cli and django settings file self.includes = list(set(cli_includes + settings_includes)) self.excludes = list(set(cli_excludes + settings_excludes)) # transform glob patterns to regular expressions self.local_filenames = [] self.create_count = 0 self.upload_count = 0 self.update_count = 0 self.skip_count = 0 self.delete_count = 0
Sets instance variables based on an options dict
entailment
def match_cloud(self, includes, excludes): """ Returns the cloud objects that match the include and exclude patterns. """ cloud_objs = [cloud_obj.name for cloud_obj in self.container.get_objects()] includes_pattern = r"|".join([fnmatch.translate(x) for x in includes]) excludes_pattern = r"|".join([fnmatch.translate(x) for x in excludes]) or r"$." excludes = [o for o in cloud_objs if re.match(excludes_pattern, o)] includes = [o for o in cloud_objs if re.match(includes_pattern, o)] return [o for o in includes if o not in excludes]
Returns the cloud objects that match the include and exclude patterns.
entailment
def match_local(self, prefix, includes, excludes): """ Filters os.walk() with include and exclude patterns. See: http://stackoverflow.com/a/5141829/93559 """ includes_pattern = r"|".join([fnmatch.translate(x) for x in includes]) excludes_pattern = r"|".join([fnmatch.translate(x) for x in excludes]) or r"$." matches = [] for root, dirs, files in os.walk(prefix, topdown=True): # exclude dirs dirs[:] = [os.path.join(root, d) for d in dirs] dirs[:] = [d for d in dirs if not re.match(excludes_pattern, d.split(root)[1])] # exclude/include files files = [os.path.join(root, f) for f in files] files = [os.path.join(root, f) for f in files if not re.match(excludes_pattern, f)] files = [os.path.join(root, f) for f in files if re.match(includes_pattern, f.split(prefix)[1])] for fname in files: matches.append(fname) return matches
Filters os.walk() with include and exclude patterns. See: http://stackoverflow.com/a/5141829/93559
entailment
def upload_files(self, abspaths, relpaths, remote_objects): """ Determines files to be uploaded and call ``upload_file`` on each. """ for relpath in relpaths: abspath = [p for p in abspaths if p[len(self.file_root):] == relpath][0] cloud_datetime = remote_objects[relpath] if relpath in remote_objects else None local_datetime = datetime.datetime.utcfromtimestamp(os.stat(abspath).st_mtime) if cloud_datetime and local_datetime < cloud_datetime: self.skip_count += 1 if not self.quiet: print("Skipped {0}: not modified.".format(relpath)) continue if relpath in remote_objects: self.update_count += 1 else: self.create_count += 1 self.upload_file(abspath, relpath)
Determines files to be uploaded and call ``upload_file`` on each.
entailment
def upload_file(self, abspath, cloud_filename): """ Uploads a file to the container. """ if not self.test_run: content = open(abspath, "rb") content_type = get_content_type(cloud_filename, content) headers = get_headers(cloud_filename, content_type) if headers.get("Content-Encoding") == "gzip": content = get_gzipped_contents(content) size = content.size else: size = os.stat(abspath).st_size self.container.create( obj_name=cloud_filename, data=content, content_type=content_type, content_length=size, content_encoding=headers.get("Content-Encoding", None), headers=headers, ttl=CUMULUS["FILE_TTL"], etag=None, ) self.upload_count += 1 if not self.quiet or self.verbosity > 1: print("Uploaded: {0}".format(cloud_filename))
Uploads a file to the container.
entailment
def delete_extra_files(self, relpaths, cloud_objs): """ Deletes any objects from the container that do not exist locally. """ for cloud_obj in cloud_objs: if cloud_obj not in relpaths: if not self.test_run: self.delete_cloud_obj(cloud_obj) self.delete_count += 1 if not self.quiet or self.verbosity > 1: print("Deleted: {0}".format(cloud_obj))
Deletes any objects from the container that do not exist locally.
entailment
def delete_cloud_obj(self, cloud_obj): """ Deletes an object from the container. """ self._connection.delete_object( container=self.container_name, obj=cloud_obj, )
Deletes an object from the container.
entailment
def wipe_container(self): """ Completely wipes out the contents of the container. """ if self.test_run: print("Wipe would delete {0} objects.".format(len(self.container.object_count))) else: if not self.quiet or self.verbosity > 1: print("Deleting {0} objects...".format(len(self.container.object_count))) self._connection.delete_all_objects()
Completely wipes out the contents of the container.
entailment
def print_tally(self): """ Prints the final tally to stdout. """ self.update_count = self.upload_count - self.create_count if self.test_run: print("Test run complete with the following results:") print("Skipped {0}. Created {1}. Updated {2}. Deleted {3}.".format( self.skip_count, self.create_count, self.update_count, self.delete_count))
Prints the final tally to stdout.
entailment
def handle(self, *args, **options): """ Lists all the items in a container to stdout. """ self._connection = Auth()._get_connection() if len(args) == 0: containers = self._connection.list_containers() if not containers: print("No containers were found for this account.") elif len(args) == 1: containers = self._connection.list_container_object_names(args[0]) if not containers: print("No matching container found.") else: raise CommandError("Pass one and only one [container_name] as an argument") for container in containers: print(container)
Lists all the items in a container to stdout.
entailment
def progress_updater(size, total): """Progress reporter for checksum verification.""" current_task.update_state( state=state('PROGRESS'), meta=dict(size=size, total=total) )
Progress reporter for checksum verification.
entailment
def verify_checksum(file_id, pessimistic=False, chunk_size=None, throws=True, checksum_kwargs=None): """Verify checksum of a file instance. :param file_id: The file ID. """ f = FileInstance.query.get(uuid.UUID(file_id)) # Anything might happen during the task, so being pessimistic and marking # the file as unchecked is a reasonable precaution if pessimistic: f.clear_last_check() db.session.commit() f.verify_checksum( progress_callback=progress_updater, chunk_size=chunk_size, throws=throws, checksum_kwargs=checksum_kwargs) db.session.commit()
Verify checksum of a file instance. :param file_id: The file ID.
entailment
def schedule_checksum_verification(frequency=None, batch_interval=None, max_count=None, max_size=None, files_query=None, checksum_kwargs=None): """Schedule a batch of files for checksum verification. The purpose of this task is to be periodically called through `celerybeat`, in order achieve a repeated verification cycle of all file checksums, while following a set of constraints in order to throttle the execution rate of the checks. :param dict frequency: Time period over which a full check of all files should be performed. The argument is a dictionary that will be passed as arguments to the `datetime.timedelta` class. Defaults to a month (30 days). :param dict batch_interval: How often a batch is sent. If not supplied, this information will be extracted, if possible, from the celery.conf['CELERYBEAT_SCHEDULE'] entry of this task. The argument is a dictionary that will be passed as arguments to the `datetime.timedelta` class. :param int max_count: Max count of files of a single batch. When set to `0` it's automatically calculated to be distributed equally through the number of total batches. :param int max_size: Max size of a single batch in bytes. When set to `0` it's automatically calculated to be distributed equally through the number of total batches. :param str files_query: Import path for a function returning a FileInstance query for files that should be checked. :param dict checksum_kwargs: Passed to ``FileInstance.verify_checksum``. """ assert max_count is not None or max_size is not None frequency = timedelta(**frequency) if frequency else timedelta(days=30) if batch_interval: batch_interval = timedelta(**batch_interval) else: celery_schedule = current_celery.conf.get('CELERYBEAT_SCHEDULE', {}) batch_interval = batch_interval or next( (v['schedule'] for v in celery_schedule.values() if v.get('task') == schedule_checksum_verification.name), None) if not batch_interval or not isinstance(batch_interval, timedelta): raise Exception(u'No "batch_interval" could be decided') total_batches = int( frequency.total_seconds() / batch_interval.total_seconds()) files = obj_or_import_string( files_query, default=default_checksum_verification_files_query)() files = files.order_by( sa.func.coalesce(FileInstance.last_check_at, date.min)) if max_count is not None: all_files_count = files.count() min_count = int(math.ceil(all_files_count / total_batches)) max_count = min_count if max_count == 0 else max_count if max_count < min_count: current_app.logger.warning( u'The "max_count" you specified ({0}) is smaller than the ' 'minimum batch file count required ({1}) in order to achieve ' 'the file checks over the specified period ({2}).' .format(max_count, min_count, frequency)) files = files.limit(max_count) if max_size is not None: all_files_size = db.session.query( sa.func.sum(FileInstance.size)).scalar() min_size = int(math.ceil(all_files_size / total_batches)) max_size = min_size if max_size == 0 else max_size if max_size < min_size: current_app.logger.warning( u'The "max_size" you specified ({0}) is smaller than the ' 'minimum batch total file size required ({1}) in order to ' 'achieve the file checks over the specified period ({2}).' .format(max_size, min_size, frequency)) files = files.yield_per(1000) scheduled_file_ids = [] total_size = 0 for f in files: # Add at least the first file, since it might be larger than "max_size" scheduled_file_ids.append(str(f.id)) total_size += f.size if max_size and max_size <= total_size: break group( verify_checksum.s( file_id, pessimistic=True, throws=False, checksum_kwargs=(checksum_kwargs or {})) for file_id in scheduled_file_ids ).apply_async()
Schedule a batch of files for checksum verification. The purpose of this task is to be periodically called through `celerybeat`, in order achieve a repeated verification cycle of all file checksums, while following a set of constraints in order to throttle the execution rate of the checks. :param dict frequency: Time period over which a full check of all files should be performed. The argument is a dictionary that will be passed as arguments to the `datetime.timedelta` class. Defaults to a month (30 days). :param dict batch_interval: How often a batch is sent. If not supplied, this information will be extracted, if possible, from the celery.conf['CELERYBEAT_SCHEDULE'] entry of this task. The argument is a dictionary that will be passed as arguments to the `datetime.timedelta` class. :param int max_count: Max count of files of a single batch. When set to `0` it's automatically calculated to be distributed equally through the number of total batches. :param int max_size: Max size of a single batch in bytes. When set to `0` it's automatically calculated to be distributed equally through the number of total batches. :param str files_query: Import path for a function returning a FileInstance query for files that should be checked. :param dict checksum_kwargs: Passed to ``FileInstance.verify_checksum``.
entailment
def migrate_file(src_id, location_name, post_fixity_check=False): """Task to migrate a file instance to a new location. .. note:: If something goes wrong during the content copy, the destination file instance is removed. :param src_id: The :class:`invenio_files_rest.models.FileInstance` ID. :param location_name: Where to migrate the file. :param post_fixity_check: Verify checksum after migration. (Default: ``False``) """ location = Location.get_by_name(location_name) f_src = FileInstance.get(src_id) # Create destination f_dst = FileInstance.create() db.session.commit() try: # Copy contents f_dst.copy_contents( f_src, progress_callback=progress_updater, default_location=location.uri, ) db.session.commit() except Exception: # Remove destination file instance if an error occurred. db.session.delete(f_dst) db.session.commit() raise # Update all objects pointing to file. ObjectVersion.relink_all(f_src, f_dst) db.session.commit() # Start a fixity check if post_fixity_check: verify_checksum.delay(str(f_dst.id))
Task to migrate a file instance to a new location. .. note:: If something goes wrong during the content copy, the destination file instance is removed. :param src_id: The :class:`invenio_files_rest.models.FileInstance` ID. :param location_name: Where to migrate the file. :param post_fixity_check: Verify checksum after migration. (Default: ``False``)
entailment
def remove_file_data(file_id, silent=True): """Remove file instance and associated data. :param file_id: The :class:`invenio_files_rest.models.FileInstance` ID. :param silent: It stops propagation of a possible arised IntegrityError exception. (Default: ``True``) :raises sqlalchemy.exc.IntegrityError: Raised if the database removal goes wrong and silent is set to ``False``. """ try: # First remove FileInstance from database and commit transaction to # ensure integrity constraints are checked and enforced. f = FileInstance.get(file_id) if not f.writable: return f.delete() db.session.commit() # Next, remove the file on disk. This leaves the possibility of having # a file on disk dangling in case the database removal works, and the # disk file removal doesn't work. f.storage().delete() except IntegrityError: if not silent: raise
Remove file instance and associated data. :param file_id: The :class:`invenio_files_rest.models.FileInstance` ID. :param silent: It stops propagation of a possible arised IntegrityError exception. (Default: ``True``) :raises sqlalchemy.exc.IntegrityError: Raised if the database removal goes wrong and silent is set to ``False``.
entailment
def merge_multipartobject(upload_id, version_id=None): """Merge multipart object. :param upload_id: The :class:`invenio_files_rest.models.MultipartObject` upload ID. :param version_id: Optionally you can define which file version. (Default: ``None``) :returns: The :class:`invenio_files_rest.models.ObjectVersion` version ID. """ mp = MultipartObject.query.filter_by(upload_id=upload_id).one_or_none() if not mp: raise RuntimeError('Upload ID does not exists.') if not mp.completed: raise RuntimeError('MultipartObject is not completed.') try: obj = mp.merge_parts( version_id=version_id, progress_callback=progress_updater ) db.session.commit() return str(obj.version_id) except Exception: db.session.rollback() raise
Merge multipart object. :param upload_id: The :class:`invenio_files_rest.models.MultipartObject` upload ID. :param version_id: Optionally you can define which file version. (Default: ``None``) :returns: The :class:`invenio_files_rest.models.ObjectVersion` version ID.
entailment
def remove_expired_multipartobjects(): """Remove expired multipart objects.""" delta = current_app.config['FILES_REST_MULTIPART_EXPIRES'] expired_dt = datetime.utcnow() - delta file_ids = [] for mp in MultipartObject.query_expired(expired_dt): file_ids.append(str(mp.file_id)) mp.delete() for fid in file_ids: remove_file_data.delay(fid)
Remove expired multipart objects.
entailment
def pyfs_storage_factory(fileinstance=None, default_location=None, default_storage_class=None, filestorage_class=PyFSFileStorage, fileurl=None, size=None, modified=None, clean_dir=True): """Get factory function for creating a PyFS file storage instance.""" # Either the FileInstance needs to be specified or all filestorage # class parameters need to be specified assert fileinstance or (fileurl and size) if fileinstance: # FIXME: Code here should be refactored since it assumes a lot on the # directory structure where the file instances are written fileurl = None size = fileinstance.size modified = fileinstance.updated if fileinstance.uri: # Use already existing URL. fileurl = fileinstance.uri else: assert default_location # Generate a new URL. fileurl = make_path( default_location, str(fileinstance.id), 'data', current_app.config['FILES_REST_STORAGE_PATH_DIMENSIONS'], current_app.config['FILES_REST_STORAGE_PATH_SPLIT_LENGTH'], ) return filestorage_class( fileurl, size=size, modified=modified, clean_dir=clean_dir)
Get factory function for creating a PyFS file storage instance.
entailment
def _get_fs(self, create_dir=True): """Return tuple with filesystem and filename.""" filedir = dirname(self.fileurl) filename = basename(self.fileurl) return ( opener.opendir(filedir, writeable=True, create_dir=create_dir), filename )
Return tuple with filesystem and filename.
entailment
def open(self, mode='rb'): """Open file. The caller is responsible for closing the file. """ fs, path = self._get_fs() return fs.open(path, mode=mode)
Open file. The caller is responsible for closing the file.
entailment
def delete(self): """Delete a file. The base directory is also removed, as it is assumed that only one file exists in the directory. """ fs, path = self._get_fs(create_dir=False) if fs.exists(path): fs.remove(path) if self.clean_dir and fs.exists('.'): fs.removedir('.') return True
Delete a file. The base directory is also removed, as it is assumed that only one file exists in the directory.
entailment
def initialize(self, size=0): """Initialize file on storage and truncate to given size.""" fs, path = self._get_fs() # Required for reliably opening the file on certain file systems: if fs.exists(path): fp = fs.open(path, mode='r+b') else: fp = fs.open(path, mode='wb') try: fp.truncate(size) except Exception: fp.close() self.delete() raise finally: fp.close() self._size = size return self.fileurl, size, None
Initialize file on storage and truncate to given size.
entailment
def save(self, incoming_stream, size_limit=None, size=None, chunk_size=None, progress_callback=None): """Save file in the file system.""" fp = self.open(mode='wb') try: bytes_written, checksum = self._write_stream( incoming_stream, fp, chunk_size=chunk_size, progress_callback=progress_callback, size_limit=size_limit, size=size) except Exception: fp.close() self.delete() raise finally: fp.close() self._size = bytes_written return self.fileurl, bytes_written, checksum
Save file in the file system.
entailment
def update(self, incoming_stream, seek=0, size=None, chunk_size=None, progress_callback=None): """Update a file in the file system.""" fp = self.open(mode='r+b') try: fp.seek(seek) bytes_written, checksum = self._write_stream( incoming_stream, fp, chunk_size=chunk_size, size=size, progress_callback=progress_callback) finally: fp.close() return bytes_written, checksum
Update a file in the file system.
entailment
def permission_factory(obj, action): """Get default permission factory. :param obj: An instance of :class:`invenio_files_rest.models.Bucket` or :class:`invenio_files_rest.models.ObjectVersion` or :class:`invenio_files_rest.models.MultipartObject` or ``None`` if the action is global. :param action: The required action. :raises RuntimeError: If the object is unknown. :returns: A :class:`invenio_access.permissions.Permission` instance. """ need_class = _action2need_map[action] if obj is None: return Permission(need_class(None)) arg = None if isinstance(obj, Bucket): arg = str(obj.id) elif isinstance(obj, ObjectVersion): arg = str(obj.bucket_id) elif isinstance(obj, MultipartObject): arg = str(obj.bucket_id) else: raise RuntimeError('Unknown object') return Permission(need_class(arg))
Get default permission factory. :param obj: An instance of :class:`invenio_files_rest.models.Bucket` or :class:`invenio_files_rest.models.ObjectVersion` or :class:`invenio_files_rest.models.MultipartObject` or ``None`` if the action is global. :param action: The required action. :raises RuntimeError: If the object is unknown. :returns: A :class:`invenio_access.permissions.Permission` instance.
entailment
def delete_file(self, path, prefixed_path, source_storage): """ Checks if the target file should be deleted if it already exists """ if isinstance(self.storage, CumulusStorage): if self.storage.exists(prefixed_path): try: etag = self.storage._get_object(prefixed_path).etag digest = "{0}".format(hashlib.md5(source_storage.open(path).read()).hexdigest()) if etag == digest: self.log(u"Skipping '{0}' (not modified based on file hash)".format(path)) return False except: raise return super(Command, self).delete_file(path, prefixed_path, source_storage)
Checks if the target file should be deleted if it already exists
entailment
def files(): """Load files.""" srcroot = dirname(dirname(__file__)) d = current_app.config['DATADIR'] if exists(d): shutil.rmtree(d) makedirs(d) # Clear data Part.query.delete() MultipartObject.query.delete() ObjectVersion.query.delete() Bucket.query.delete() FileInstance.query.delete() Location.query.delete() db.session.commit() # Create location loc = Location(name='local', uri=d, default=True) db.session.add(loc) db.session.commit() # Bucket 0 b1 = Bucket.create(loc) b1.id = '00000000-0000-0000-0000-000000000000' for f in ['README.rst', 'LICENSE']: with open(join(srcroot, f), 'rb') as fp: ObjectVersion.create(b1, f, stream=fp) # Bucket 1 b2 = Bucket.create(loc) b2.id = '11111111-1111-1111-1111-111111111111' k = 'AUTHORS.rst' with open(join(srcroot, 'CHANGES.rst'), 'rb') as fp: ObjectVersion.create(b2, k, stream=fp) with open(join(srcroot, 'AUTHORS.rst'), 'rb') as fp: ObjectVersion.create(b2, k, stream=fp) k = 'RELEASE-NOTES.rst' with open(join(srcroot, 'RELEASE-NOTES.rst'), 'rb') as fp: ObjectVersion.create(b2, k, stream=fp) with open(join(srcroot, 'CHANGES.rst'), 'rb') as fp: ObjectVersion.create(b2, k, stream=fp) ObjectVersion.delete(b2.id, k) # Bucket 2 b2 = Bucket.create(loc) b2.id = '22222222-2222-2222-2222-222222222222' db.session.commit()
Load files.
entailment
def touch(): """Create new bucket.""" from .models import Bucket bucket = Bucket.create() db.session.commit() click.secho(str(bucket), fg='green')
Create new bucket.
entailment
def cp(source, bucket, checksum, key_prefix): """Create new bucket from all files in directory.""" from .models import Bucket from .helpers import populate_from_path for object_version in populate_from_path( Bucket.get(bucket), source, checksum=checksum, key_prefix=key_prefix): click.secho(str(object_version)) db.session.commit()
Create new bucket from all files in directory.
entailment
def location(name, uri, default): """Create new location.""" from .models import Location location = Location(name=name, uri=uri, default=default) db.session.add(location) db.session.commit() click.secho(str(location), fg='green')
Create new location.
entailment
def get_content_type(name, content): """ Checks if the content_type is already set. Otherwise uses the mimetypes library to guess. """ if hasattr(content, "content_type"): content_type = content.content_type else: mime_type, encoding = mimetypes.guess_type(name) content_type = mime_type return content_type
Checks if the content_type is already set. Otherwise uses the mimetypes library to guess.
entailment
def sync_headers(cloud_obj, headers=None, header_patterns=HEADER_PATTERNS): """ Overwrites the given cloud_obj's headers with the ones given as ``headers` and adds additional headers as defined in the HEADERS setting depending on the cloud_obj's file name. """ if headers is None: headers = {} # don't set headers on directories content_type = getattr(cloud_obj, "content_type", None) if content_type == "application/directory": return matched_headers = {} for pattern, pattern_headers in header_patterns: if pattern.match(cloud_obj.name): matched_headers.update(pattern_headers.copy()) # preserve headers already set matched_headers.update(cloud_obj.headers) # explicitly set headers overwrite matches and already set headers matched_headers.update(headers) if matched_headers != cloud_obj.headers: cloud_obj.headers = matched_headers cloud_obj.sync_metadata()
Overwrites the given cloud_obj's headers with the ones given as ``headers` and adds additional headers as defined in the HEADERS setting depending on the cloud_obj's file name.
entailment
def get_gzipped_contents(input_file): """ Returns a gzipped version of a previously opened file's buffer. """ zbuf = StringIO() zfile = GzipFile(mode="wb", compresslevel=6, fileobj=zbuf) zfile.write(input_file.read()) zfile.close() return ContentFile(zbuf.getvalue())
Returns a gzipped version of a previously opened file's buffer.
entailment
def schema_from_context(context): """Determine which schema to use.""" item_class = context.get('class') return ( serializer_mapping[item_class] if item_class else BaseSchema, context.get('many', False) )
Determine which schema to use.
entailment
def wait_for_taskresult(task_result, content, interval, max_rounds): """Get helper to wait for async task result to finish. The task will periodically send whitespace to prevent the connection from being closed. :param task_result: The async task to wait for. :param content: The content to return when the task is ready. :param interval: The duration of a sleep period before check again if the task is ready. :param max_rounds: The maximum number of intervals the function check before returning an Exception. :returns: An iterator on the content or a :class:`invenio_files_rest.errors.FilesException` exception if the timeout happened or the job failed. """ assert max_rounds > 0 def _whitespace_waiting(): current = 0 while current < max_rounds and current != -1: if task_result.ready(): # Task is done and we return current = -1 if task_result.successful(): yield content else: yield FilesException( description='Job failed.' ).get_body() else: # Send whitespace to prevent connection from closing. current += 1 sleep(interval) yield b' ' # Timed-out reached if current == max_rounds: yield FilesException( description='Job timed out.' ).get_body() return _whitespace_waiting()
Get helper to wait for async task result to finish. The task will periodically send whitespace to prevent the connection from being closed. :param task_result: The async task to wait for. :param content: The content to return when the task is ready. :param interval: The duration of a sleep period before check again if the task is ready. :param max_rounds: The maximum number of intervals the function check before returning an Exception. :returns: An iterator on the content or a :class:`invenio_files_rest.errors.FilesException` exception if the timeout happened or the job failed.
entailment
def json_serializer(data=None, code=200, headers=None, context=None, etag=None, task_result=None): """Build a json flask response using the given data. :param data: The data to serialize. (Default: ``None``) :param code: The HTTP status code. (Default: ``200``) :param headers: The HTTP headers to include. (Default: ``None``) :param context: The schema class context. (Default: ``None``) :param etag: The ETag header. (Default: ``None``) :param task_result: Optionally you can pass async task to wait for. (Default: ``None``) :returns: A Flask response with json data. :rtype: :py:class:`flask.Response` """ schema_class, many = schema_from_context(context or {}) if data is not None: # Generate JSON response data = json.dumps( schema_class(context=context).dump(data, many=many).data, **_format_args() ) interval = current_app.config['FILES_REST_TASK_WAIT_INTERVAL'] max_rounds = int( current_app.config['FILES_REST_TASK_WAIT_MAX_SECONDS'] // interval ) response = current_app.response_class( # Stream response if waiting for task result. data if task_result is None else wait_for_taskresult( task_result, data, interval, max_rounds, ), mimetype='application/json' ) else: response = current_app.response_class(mimetype='application/json') response.status_code = code if headers is not None: response.headers.extend(headers) if etag: response.set_etag(etag) return response
Build a json flask response using the given data. :param data: The data to serialize. (Default: ``None``) :param code: The HTTP status code. (Default: ``200``) :param headers: The HTTP headers to include. (Default: ``None``) :param context: The schema class context. (Default: ``None``) :param etag: The ETag header. (Default: ``None``) :param task_result: Optionally you can pass async task to wait for. (Default: ``None``) :returns: A Flask response with json data. :rtype: :py:class:`flask.Response`
entailment
def dump_links(self, o): """Dump links.""" return { 'self': url_for('.bucket_api', bucket_id=o.id, _external=True), 'versions': url_for( '.bucket_api', bucket_id=o.id, _external=True) + '?versions', 'uploads': url_for( '.bucket_api', bucket_id=o.id, _external=True) + '?uploads', }
Dump links.
entailment
def dump_links(self, o): """Dump links.""" params = {'versionId': o.version_id} data = { 'self': url_for( '.object_api', bucket_id=o.bucket_id, key=o.key, _external=True, **(params if not o.is_head or o.deleted else {}) ), 'version': url_for( '.object_api', bucket_id=o.bucket_id, key=o.key, _external=True, **params ) } if o.is_head and not o.deleted: data.update({'uploads': url_for( '.object_api', bucket_id=o.bucket_id, key=o.key, _external=True ) + '?uploads', }) return data
Dump links.
entailment
def wrap(self, data, many): """Wrap response in envelope.""" if not many: return data else: data = {'contents': data} bucket = self.context.get('bucket') if bucket: data.update(BucketSchema().dump(bucket).data) return data
Wrap response in envelope.
entailment
def dump_links(self, o): """Dump links.""" links = { 'self': url_for( '.object_api', bucket_id=o.bucket_id, key=o.key, uploadId=o.upload_id, _external=True, ), 'object': url_for( '.object_api', bucket_id=o.bucket_id, key=o.key, _external=True, ), } version_id = self.context.get('object_version_id') if version_id: links.update({ 'object_version': url_for( '.object_api', bucket_id=o.bucket_id, key=o.key, versionId=version_id, _external=True, ) }) bucket = self.context.get('bucket') if bucket: links.update({ 'bucket': url_for( '.bucket_api', bucket_id=o.bucket_id, _external=True, ) }) return links
Dump links.
entailment
def wrap(self, data, many): """Wrap response in envelope.""" if not many: return data else: data = {'parts': data} multipart = self.context.get('multipart') if multipart: data.update(MultipartObjectSchema(context={ 'bucket': multipart.bucket}).dump(multipart).data) return data
Wrap response in envelope.
entailment
def directions(self, origin, destination, mode=None, alternatives=None, waypoints=None, optimize_waypoints=False, avoid=None, language=None, units=None, region=None, departure_time=None, arrival_time=None, sensor=None): """Get directions between locations :param origin: Origin location - string address; (latitude, longitude) two-tuple, dict with ("lat", "lon") keys or object with (lat, lon) attributes :param destination: Destination location - type same as origin :param mode: Travel mode as string, defaults to "driving". See `google docs details <https://developers.google.com/maps/documentation/directions/#TravelModes>`_ :param alternatives: True if provide it has to return more then one route alternative :param waypoints: Iterable with set of intermediate stops, like ("Munich", "Dallas") See `google docs details <https://developers.google.com/maps/documentation/javascript/reference#DirectionsRequest>`_ :param optimize_waypoints: if true will attempt to re-order supplied waypoints to minimize overall cost of the route. If waypoints are optimized, the route returned will show the optimized order under "waypoint_order". See `google docs details <https://developers.google.com/maps/documentation/javascript/reference#DirectionsRequest>`_ :param avoid: Iterable with set of restrictions, like ("tolls", "highways"). For full list refer to `google docs details <https://developers.google.com/maps/documentation/directions/#Restrictions>`_ :param language: The language in which to return results. See `list of supported languages <https://developers.google.com/maps/faq#languagesupport>`_ :param units: Unit system for result. Defaults to unit system of origin's country. See `google docs details <https://developers.google.com/maps/documentation/directions/#UnitSystems>`_ :param region: The region code. Affects geocoding of origin and destination (see `gmaps.Geocoding.geocode` region parameter) :param departure_time: Desired time of departure as seconds since midnight, January 1, 1970 UTC :param arrival_time: Desired time of arrival for transit directions as seconds since midnight, January 1, 1970 UTC. """ # noqa if optimize_waypoints: waypoints.insert(0, "optimize:true") parameters = dict( origin=self.assume_latlon_or_address(origin), destination=self.assume_latlon_or_address(destination), mode=mode, alternatives=alternatives, waypoints=waypoints or [], avoid=avoid, language=language, units=units, region=region, departure_time=departure_time, arrival_time=arrival_time, sensor=sensor, ) return self._make_request(self.DIRECTIONS_URL, parameters, "routes")
Get directions between locations :param origin: Origin location - string address; (latitude, longitude) two-tuple, dict with ("lat", "lon") keys or object with (lat, lon) attributes :param destination: Destination location - type same as origin :param mode: Travel mode as string, defaults to "driving". See `google docs details <https://developers.google.com/maps/documentation/directions/#TravelModes>`_ :param alternatives: True if provide it has to return more then one route alternative :param waypoints: Iterable with set of intermediate stops, like ("Munich", "Dallas") See `google docs details <https://developers.google.com/maps/documentation/javascript/reference#DirectionsRequest>`_ :param optimize_waypoints: if true will attempt to re-order supplied waypoints to minimize overall cost of the route. If waypoints are optimized, the route returned will show the optimized order under "waypoint_order". See `google docs details <https://developers.google.com/maps/documentation/javascript/reference#DirectionsRequest>`_ :param avoid: Iterable with set of restrictions, like ("tolls", "highways"). For full list refer to `google docs details <https://developers.google.com/maps/documentation/directions/#Restrictions>`_ :param language: The language in which to return results. See `list of supported languages <https://developers.google.com/maps/faq#languagesupport>`_ :param units: Unit system for result. Defaults to unit system of origin's country. See `google docs details <https://developers.google.com/maps/documentation/directions/#UnitSystems>`_ :param region: The region code. Affects geocoding of origin and destination (see `gmaps.Geocoding.geocode` region parameter) :param departure_time: Desired time of departure as seconds since midnight, January 1, 1970 UTC :param arrival_time: Desired time of arrival for transit directions as seconds since midnight, January 1, 1970 UTC.
entailment
def upgrade(): """Upgrade database.""" # Variant types: def created(): """Return instance of a column.""" return sa.Column( 'created', sa.DateTime().with_variant(mysql.DATETIME(fsp=6), 'mysql'), nullable=False ) def updated(): """Return instance of a column.""" return sa.Column( 'updated', sa.DateTime().with_variant(mysql.DATETIME(fsp=6), 'mysql'), nullable=False ) def uri(): """Return instance of a column.""" return sa.Column( 'uri', sa.Text().with_variant(mysql.VARCHAR(255), 'mysql'), nullable=True ) def key(nullable=True): """Return instance of a column.""" return sa.Column( 'key', sa.Text().with_variant(mysql.VARCHAR(255), 'mysql'), nullable=nullable ) op.create_table( 'files_files', created(), updated(), sa.Column( 'id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False), uri(), sa.Column('storage_class', sa.String(length=1), nullable=True), sa.Column('size', sa.BigInteger(), nullable=True), sa.Column('checksum', sa.String(length=255), nullable=True), sa.Column('readable', sa.Boolean(name='readable'), nullable=False), sa.Column('writable', sa.Boolean(name='writable'), nullable=False), sa.Column('last_check_at', sa.DateTime(), nullable=True), sa.Column('last_check', sa.Boolean(name='last_check'), nullable=False), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('uri') ) op.create_table( 'files_location', created(), updated(), sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=20), nullable=False), sa.Column('uri', sa.String(length=255), nullable=False), sa.Column('default', sa.Boolean(name='default'), nullable=False), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name') ) op.create_table( 'files_bucket', created(), updated(), sa.Column( 'id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False), sa.Column('default_location', sa.Integer(), nullable=False), sa.Column( 'default_storage_class', sa.String(length=1), nullable=False ), sa.Column('size', sa.BigInteger(), nullable=False), sa.Column('quota_size', sa.BigInteger(), nullable=True), sa.Column('max_file_size', sa.BigInteger(), nullable=True), sa.Column('locked', sa.Boolean(name='locked'), nullable=False), sa.Column('deleted', sa.Boolean(name='deleted'), nullable=False), sa.ForeignKeyConstraint( ['default_location'], [u'files_location.id'], ondelete='RESTRICT'), sa.PrimaryKeyConstraint('id') ) op.create_table( 'files_buckettags', sa.Column( 'bucket_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False), sa.Column('key', sa.String(length=255), nullable=False), sa.Column('value', sa.Text(), nullable=False), sa.ForeignKeyConstraint( ['bucket_id'], [u'files_bucket.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('bucket_id', 'key') ) op.create_table( 'files_multipartobject', created(), updated(), sa.Column( 'upload_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False), sa.Column( 'bucket_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=True), key(), sa.Column( 'file_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False), sa.Column('chunk_size', sa.Integer(), nullable=True), sa.Column('size', sa.BigInteger(), nullable=True), sa.Column('completed', sa.Boolean(name='completed'), nullable=False), sa.ForeignKeyConstraint( ['bucket_id'], [u'files_bucket.id'], ondelete='RESTRICT'), sa.ForeignKeyConstraint( ['file_id'], [u'files_files.id'], ondelete='RESTRICT'), sa.PrimaryKeyConstraint('upload_id'), sa.UniqueConstraint('upload_id', 'bucket_id', 'key', name='uix_item') ) op.create_table( 'files_object', created(), updated(), sa.Column( 'bucket_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False), key(nullable=False), sa.Column( 'version_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False), sa.Column( 'file_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=True), sa.Column( '_mimetype', sa.String( length=255), nullable=True), sa.Column('is_head', sa.Boolean(name='is_head'), nullable=False), sa.ForeignKeyConstraint( ['bucket_id'], [u'files_bucket.id'], ondelete='RESTRICT'), sa.ForeignKeyConstraint( ['file_id'], [u'files_files.id'], ondelete='RESTRICT'), sa.PrimaryKeyConstraint('bucket_id', 'key', 'version_id') ) op.create_index( op.f('ix_files_object__mimetype'), 'files_object', ['_mimetype'], unique=False ) op.create_table( 'files_multipartobject_part', created(), updated(), sa.Column( 'upload_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False), sa.Column( 'part_number', sa.Integer(), autoincrement=False, nullable=False), sa.Column('checksum', sa.String(length=255), nullable=True), sa.ForeignKeyConstraint( ['upload_id'], [u'files_multipartobject.upload_id'], ondelete='RESTRICT'), sa.PrimaryKeyConstraint('upload_id', 'part_number') )
Upgrade database.
entailment
def downgrade(): """Downgrade database.""" op.drop_table('files_multipartobject_part') op.drop_index(op.f('ix_files_object__mimetype'), table_name='files_object') op.drop_table('files_object') op.drop_table('files_multipartobject') op.drop_table('files_buckettags') op.drop_table('files_bucket') op.drop_table('files_location') op.drop_table('files_files')
Downgrade database.
entailment
def guess_mimetype(filename): """Map extra mimetype with the encoding provided. :returns: The extra mimetype. """ m, encoding = mimetypes.guess_type(filename) if encoding: m = ENCODING_MIMETYPES.get(encoding, None) return m or 'application/octet-stream'
Map extra mimetype with the encoding provided. :returns: The extra mimetype.
entailment
def link(text, link_func): """Generate a object formatter for links..""" def object_formatter(v, c, m, p): """Format object view link.""" return Markup('<a href="{0}">{1}</a>'.format( link_func(m), text)) return object_formatter
Generate a object formatter for links..
entailment
def action_verify_checksum(self, ids): """Inactivate users.""" try: count = 0 for file_id in ids: f = FileInstance.query.filter_by( id=uuid.UUID(file_id)).one_or_none() if f is None: raise ValueError(_("Cannot find file instance.")) verify_checksum.delay(file_id) count += 1 if count > 0: flash(_('Fixity check(s) sent to queue.'), 'success') except Exception as exc: if not self.handle_view_exception(exc): raise current_app.logger.exception(str(exc)) # pragma: no cover flash(_('Failed to run fixity checks.'), 'error')
Inactivate users.
entailment
def validate_tag(key, value): """Validate a tag. Keys must be less than 128 chars and values must be less than 256 chars. """ # Note, parse_sql does not include a keys if the value is an empty string # (e.g. 'key=&test=a'), and thus technically we should not get strings # which have zero length. klen = len(key) vlen = len(value) return klen > 0 and klen < 256 and vlen > 0 and vlen < 256
Validate a tag. Keys must be less than 128 chars and values must be less than 256 chars.
entailment
def parse_header_tags(): """Parse tags specified in the HTTP request header.""" # Get the value of the custom HTTP header and interpret it as an query # string qs = request.headers.get( current_app.config['FILES_REST_FILE_TAGS_HEADER'], '') tags = {} for key, value in parse_qsl(qs): # Check for duplicate keys if key in tags: raise DuplicateTagError() # Check for too short/long keys and values. if not validate_tag(key, value): raise InvalidTagError() tags[key] = value return tags or None
Parse tags specified in the HTTP request header.
entailment
def default_partfactory(part_number=None, content_length=None, content_type=None, content_md5=None): """Get default part factory. :param part_number: The part number. (Default: ``None``) :param content_length: The content length. (Default: ``None``) :param content_type: The HTTP Content-Type. (Default: ``None``) :param content_md5: The content MD5. (Default: ``None``) :returns: The content length, the part number, the stream, the content type, MD5 of the content. """ return content_length, part_number, request.stream, content_type, \ content_md5, None
Get default part factory. :param part_number: The part number. (Default: ``None``) :param content_length: The content length. (Default: ``None``) :param content_type: The HTTP Content-Type. (Default: ``None``) :param content_md5: The content MD5. (Default: ``None``) :returns: The content length, the part number, the stream, the content type, MD5 of the content.
entailment
def stream_uploadfactory(content_md5=None, content_length=None, content_type=None): """Get default put factory. If Content-Type is ``'multipart/form-data'`` then the stream is aborted. :param content_md5: The content MD5. (Default: ``None``) :param content_length: The content length. (Default: ``None``) :param content_type: The HTTP Content-Type. (Default: ``None``) :returns: The stream, content length, MD5 of the content. """ if content_type.startswith('multipart/form-data'): abort(422) return request.stream, content_length, content_md5, parse_header_tags()
Get default put factory. If Content-Type is ``'multipart/form-data'`` then the stream is aborted. :param content_md5: The content MD5. (Default: ``None``) :param content_length: The content length. (Default: ``None``) :param content_type: The HTTP Content-Type. (Default: ``None``) :returns: The stream, content length, MD5 of the content.
entailment
def ngfileupload_partfactory(part_number=None, content_length=None, uploaded_file=None): """Part factory for ng-file-upload. :param part_number: The part number. (Default: ``None``) :param content_length: The content length. (Default: ``None``) :param uploaded_file: The upload request. (Default: ``None``) :returns: The content length, part number, stream, HTTP Content-Type header. """ return content_length, part_number, uploaded_file.stream, \ uploaded_file.headers.get('Content-Type'), None, None
Part factory for ng-file-upload. :param part_number: The part number. (Default: ``None``) :param content_length: The content length. (Default: ``None``) :param uploaded_file: The upload request. (Default: ``None``) :returns: The content length, part number, stream, HTTP Content-Type header.
entailment
def ngfileupload_uploadfactory(content_length=None, content_type=None, uploaded_file=None): """Get default put factory. If Content-Type is ``'multipart/form-data'`` then the stream is aborted. :param content_length: The content length. (Default: ``None``) :param content_type: The HTTP Content-Type. (Default: ``None``) :param uploaded_file: The upload request. (Default: ``None``) :param file_tags_header: The file tags. (Default: ``None``) :returns: A tuple containing stream, content length, and empty header. """ if not content_type.startswith('multipart/form-data'): abort(422) return uploaded_file.stream, content_length, None, parse_header_tags()
Get default put factory. If Content-Type is ``'multipart/form-data'`` then the stream is aborted. :param content_length: The content length. (Default: ``None``) :param content_type: The HTTP Content-Type. (Default: ``None``) :param uploaded_file: The upload request. (Default: ``None``) :param file_tags_header: The file tags. (Default: ``None``) :returns: A tuple containing stream, content length, and empty header.
entailment
def pass_bucket(f): """Decorate to retrieve a bucket.""" @wraps(f) def decorate(*args, **kwargs): bucket_id = kwargs.pop('bucket_id') bucket = Bucket.get(as_uuid(bucket_id)) if not bucket: abort(404, 'Bucket does not exist.') return f(bucket=bucket, *args, **kwargs) return decorate
Decorate to retrieve a bucket.
entailment
def pass_multipart(with_completed=False): """Decorate to retrieve an object.""" def decorate(f): @wraps(f) def inner(self, bucket, key, upload_id, *args, **kwargs): obj = MultipartObject.get( bucket, key, upload_id, with_completed=with_completed) if obj is None: abort(404, 'uploadId does not exists.') return f(self, obj, *args, **kwargs) return inner return decorate
Decorate to retrieve an object.
entailment
def check_permission(permission, hidden=True): """Check if permission is allowed. If permission fails then the connection is aborted. :param permission: The permission to check. :param hidden: Determine if a 404 error (``True``) or 401/403 error (``False``) should be returned if the permission is rejected (i.e. hide or reveal the existence of a particular object). """ if permission is not None and not permission.can(): if hidden: abort(404) else: if current_user.is_authenticated: abort(403, 'You do not have a permission for this action') abort(401)
Check if permission is allowed. If permission fails then the connection is aborted. :param permission: The permission to check. :param hidden: Determine if a 404 error (``True``) or 401/403 error (``False``) should be returned if the permission is rejected (i.e. hide or reveal the existence of a particular object).
entailment
def need_permissions(object_getter, action, hidden=True): """Get permission for buckets or abort. :param object_getter: The function used to retrieve the object and pass it to the permission factory. :param action: The action needed. :param hidden: Determine which kind of error to return. (Default: ``True``) """ def decorator_builder(f): @wraps(f) def decorate(*args, **kwargs): check_permission(current_permission_factory( object_getter(*args, **kwargs), action(*args, **kwargs) if callable(action) else action, ), hidden=hidden) return f(*args, **kwargs) return decorate return decorator_builder
Get permission for buckets or abort. :param object_getter: The function used to retrieve the object and pass it to the permission factory. :param action: The action needed. :param hidden: Determine which kind of error to return. (Default: ``True``)
entailment
def post(self): """Create bucket.""" with db.session.begin_nested(): bucket = Bucket.create( storage_class=current_app.config[ 'FILES_REST_DEFAULT_STORAGE_CLASS' ], ) db.session.commit() return self.make_response( data=bucket, context={ 'class': Bucket, } )
Create bucket.
entailment
def multipart_listuploads(self, bucket): """List objects in a bucket. :param bucket: A :class:`invenio_files_rest.models.Bucket` instance. :returns: The Flask response. """ return self.make_response( data=MultipartObject.query_by_bucket(bucket).limit(1000).all(), context={ 'class': MultipartObject, 'bucket': bucket, 'many': True, } )
List objects in a bucket. :param bucket: A :class:`invenio_files_rest.models.Bucket` instance. :returns: The Flask response.
entailment
def listobjects(self, bucket, versions): """List objects in a bucket. :param bucket: A :class:`invenio_files_rest.models.Bucket` instance. :returns: The Flask response. """ if versions is not missing: check_permission( current_permission_factory(bucket, 'bucket-read-versions'), hidden=False ) return self.make_response( data=ObjectVersion.get_by_bucket( bucket.id, versions=versions is not missing).limit(1000).all(), context={ 'class': ObjectVersion, 'bucket': bucket, 'many': True, } )
List objects in a bucket. :param bucket: A :class:`invenio_files_rest.models.Bucket` instance. :returns: The Flask response.
entailment
def get(self, bucket=None, versions=missing, uploads=missing): """Get list of objects in the bucket. :param bucket: A :class:`invenio_files_rest.models.Bucket` instance. :returns: The Flask response. """ if uploads is not missing: return self.multipart_listuploads(bucket) else: return self.listobjects(bucket, versions)
Get list of objects in the bucket. :param bucket: A :class:`invenio_files_rest.models.Bucket` instance. :returns: The Flask response.
entailment
def check_object_permission(obj): """Retrieve object and abort if it doesn't exists.""" check_permission(current_permission_factory( obj, 'object-read' )) if not obj.is_head: check_permission( current_permission_factory(obj, 'object-read-version'), hidden=False )
Retrieve object and abort if it doesn't exists.
entailment
def get_object(cls, bucket, key, version_id): """Retrieve object and abort if it doesn't exists. If the file is not found, the connection is aborted and the 404 error is returned. :param bucket: The bucket (instance or id) to get the object from. :param key: The file key. :param version_id: The version ID. :returns: A :class:`invenio_files_rest.models.ObjectVersion` instance. """ obj = ObjectVersion.get(bucket, key, version_id=version_id) if not obj: abort(404, 'Object does not exists.') cls.check_object_permission(obj) return obj
Retrieve object and abort if it doesn't exists. If the file is not found, the connection is aborted and the 404 error is returned. :param bucket: The bucket (instance or id) to get the object from. :param key: The file key. :param version_id: The version ID. :returns: A :class:`invenio_files_rest.models.ObjectVersion` instance.
entailment
def create_object(self, bucket, key): """Create a new object. :param bucket: The bucket (instance or id) to get the object from. :param key: The file key. :returns: A Flask response. """ # Initial validation of size based on Content-Length. # User can tamper with Content-Length, so this is just an initial up # front check. The storage subsystem must validate the size limit as # well. stream, content_length, content_md5, tags = \ current_files_rest.upload_factory() size_limit = bucket.size_limit if content_length and size_limit and content_length > size_limit: desc = 'File size limit exceeded.' \ if isinstance(size_limit, int) else size_limit.reason raise FileSizeError(description=desc) with db.session.begin_nested(): obj = ObjectVersion.create(bucket, key) obj.set_contents( stream, size=content_length, size_limit=size_limit) # Check add tags if tags: for key, value in tags.items(): ObjectVersionTag.create(obj, key, value) db.session.commit() return self.make_response( data=obj, context={ 'class': ObjectVersion, 'bucket': bucket, }, etag=obj.file.checksum )
Create a new object. :param bucket: The bucket (instance or id) to get the object from. :param key: The file key. :returns: A Flask response.
entailment
def delete_object(self, bucket, obj, version_id): """Delete an existing object. :param bucket: The bucket (instance or id) to get the object from. :param obj: A :class:`invenio_files_rest.models.ObjectVersion` instance. :param version_id: The version ID. :returns: A Flask response. """ if version_id is None: # Create a delete marker. with db.session.begin_nested(): ObjectVersion.delete(bucket, obj.key) db.session.commit() else: # Permanently delete specific object version. check_permission( current_permission_factory(bucket, 'object-delete-version'), hidden=False, ) obj.remove() db.session.commit() if obj.file_id: remove_file_data.delay(str(obj.file_id)) return self.make_response('', 204)
Delete an existing object. :param bucket: The bucket (instance or id) to get the object from. :param obj: A :class:`invenio_files_rest.models.ObjectVersion` instance. :param version_id: The version ID. :returns: A Flask response.
entailment
def send_object(bucket, obj, expected_chksum=None, logger_data=None, restricted=True, as_attachment=False): """Send an object for a given bucket. :param bucket: The bucket (instance or id) to get the object from. :param obj: A :class:`invenio_files_rest.models.ObjectVersion` instance. :params expected_chksum: Expected checksum. :param logger_data: The python logger. :param kwargs: Keyword arguments passed to ``Object.send_file()`` :returns: A Flask response. """ if not obj.is_head: check_permission( current_permission_factory(obj, 'object-read-version'), hidden=False ) if expected_chksum and obj.file.checksum != expected_chksum: current_app.logger.warning( 'File checksum mismatch detected.', extra=logger_data) file_downloaded.send(current_app._get_current_object(), obj=obj) return obj.send_file(restricted=restricted, as_attachment=as_attachment)
Send an object for a given bucket. :param bucket: The bucket (instance or id) to get the object from. :param obj: A :class:`invenio_files_rest.models.ObjectVersion` instance. :params expected_chksum: Expected checksum. :param logger_data: The python logger. :param kwargs: Keyword arguments passed to ``Object.send_file()`` :returns: A Flask response.
entailment
def multipart_listparts(self, multipart): """Get parts of a multipart upload. :param multipart: A :class:`invenio_files_rest.models.MultipartObject` instance. :returns: A Flask response. """ return self.make_response( data=Part.query_by_multipart( multipart).order_by(Part.part_number).limit(1000).all(), context={ 'class': Part, 'multipart': multipart, 'many': True, } )
Get parts of a multipart upload. :param multipart: A :class:`invenio_files_rest.models.MultipartObject` instance. :returns: A Flask response.
entailment
def multipart_init(self, bucket, key, size=None, part_size=None): """Initialize a multipart upload. :param bucket: The bucket (instance or id) to get the object from. :param key: The file key. :param size: The total size. :param part_size: The part size. :raises invenio_files_rest.errors.MissingQueryParameter: If size or part_size are not defined. :returns: A Flask response. """ if size is None: raise MissingQueryParameter('size') if part_size is None: raise MissingQueryParameter('partSize') multipart = MultipartObject.create(bucket, key, size, part_size) db.session.commit() return self.make_response( data=multipart, context={ 'class': MultipartObject, 'bucket': bucket, } )
Initialize a multipart upload. :param bucket: The bucket (instance or id) to get the object from. :param key: The file key. :param size: The total size. :param part_size: The part size. :raises invenio_files_rest.errors.MissingQueryParameter: If size or part_size are not defined. :returns: A Flask response.
entailment
def multipart_uploadpart(self, multipart): """Upload a part. :param multipart: A :class:`invenio_files_rest.models.MultipartObject` instance. :returns: A Flask response. """ content_length, part_number, stream, content_type, content_md5, tags =\ current_files_rest.multipart_partfactory() if content_length: ck = multipart.last_part_size if \ part_number == multipart.last_part_number \ else multipart.chunk_size if ck != content_length: raise MultipartInvalidChunkSize() # Create part try: p = Part.get_or_create(multipart, part_number) p.set_contents(stream) db.session.commit() except Exception: # We remove the Part since incomplete data may have been written to # disk (e.g. client closed connection etc.) so it must be # reuploaded. db.session.rollback() Part.delete(multipart, part_number) raise return self.make_response( data=p, context={ 'class': Part, }, etag=p.checksum )
Upload a part. :param multipart: A :class:`invenio_files_rest.models.MultipartObject` instance. :returns: A Flask response.
entailment
def multipart_complete(self, multipart): """Complete a multipart upload. :param multipart: A :class:`invenio_files_rest.models.MultipartObject` instance. :returns: A Flask response. """ multipart.complete() db.session.commit() version_id = str(uuid.uuid4()) return self.make_response( data=multipart, context={ 'class': MultipartObject, 'bucket': multipart.bucket, 'object_version_id': version_id, }, # This will wait for the result, and send whitespace on the # connection until the task has finished (or max timeout reached). task_result=merge_multipartobject.delay( str(multipart.upload_id), version_id=version_id, ), )
Complete a multipart upload. :param multipart: A :class:`invenio_files_rest.models.MultipartObject` instance. :returns: A Flask response.
entailment
def multipart_delete(self, multipart): """Abort a multipart upload. :param multipart: A :class:`invenio_files_rest.models.MultipartObject` instance. :returns: A Flask response. """ multipart.delete() db.session.commit() if multipart.file_id: remove_file_data.delay(str(multipart.file_id)) return self.make_response('', 204)
Abort a multipart upload. :param multipart: A :class:`invenio_files_rest.models.MultipartObject` instance. :returns: A Flask response.
entailment
def get(self, bucket=None, key=None, version_id=None, upload_id=None, uploads=None, download=None): """Get object or list parts of a multpart upload. :param bucket: The bucket (instance or id) to get the object from. (Default: ``None``) :param key: The file key. (Default: ``None``) :param version_id: The version ID. (Default: ``None``) :param upload_id: The upload ID. (Default: ``None``) :param download: The download flag. (Default: ``None``) :returns: A Flask response. """ if upload_id: return self.multipart_listparts(bucket, key, upload_id) else: obj = self.get_object(bucket, key, version_id) # If 'download' is missing from query string it will have # the value None. return self.send_object(bucket, obj, as_attachment=download is not None)
Get object or list parts of a multpart upload. :param bucket: The bucket (instance or id) to get the object from. (Default: ``None``) :param key: The file key. (Default: ``None``) :param version_id: The version ID. (Default: ``None``) :param upload_id: The upload ID. (Default: ``None``) :param download: The download flag. (Default: ``None``) :returns: A Flask response.
entailment
def post(self, bucket=None, key=None, uploads=missing, upload_id=None): """Upload a new object or start/complete a multipart upload. :param bucket: The bucket (instance or id) to get the object from. (Default: ``None``) :param key: The file key. (Default: ``None``) :param upload_id: The upload ID. (Default: ``None``) :returns: A Flask response. """ if uploads is not missing: return self.multipart_init(bucket, key) elif upload_id is not None: return self.multipart_complete(bucket, key, upload_id) abort(403)
Upload a new object or start/complete a multipart upload. :param bucket: The bucket (instance or id) to get the object from. (Default: ``None``) :param key: The file key. (Default: ``None``) :param upload_id: The upload ID. (Default: ``None``) :returns: A Flask response.
entailment
def put(self, bucket=None, key=None, upload_id=None): """Update a new object or upload a part of a multipart upload. :param bucket: The bucket (instance or id) to get the object from. (Default: ``None``) :param key: The file key. (Default: ``None``) :param upload_id: The upload ID. (Default: ``None``) :returns: A Flask response. """ if upload_id is not None: return self.multipart_uploadpart(bucket, key, upload_id) else: return self.create_object(bucket, key)
Update a new object or upload a part of a multipart upload. :param bucket: The bucket (instance or id) to get the object from. (Default: ``None``) :param key: The file key. (Default: ``None``) :param upload_id: The upload ID. (Default: ``None``) :returns: A Flask response.
entailment
def delete(self, bucket=None, key=None, version_id=None, upload_id=None, uploads=None): """Delete an object or abort a multipart upload. :param bucket: The bucket (instance or id) to get the object from. (Default: ``None``) :param key: The file key. (Default: ``None``) :param version_id: The version ID. (Default: ``None``) :param upload_id: The upload ID. (Default: ``None``) :returns: A Flask response. """ if upload_id is not None: return self.multipart_delete(bucket, key, upload_id) else: obj = self.get_object(bucket, key, version_id) return self.delete_object(bucket, obj, version_id)
Delete an object or abort a multipart upload. :param bucket: The bucket (instance or id) to get the object from. (Default: ``None``) :param key: The file key. (Default: ``None``) :param version_id: The version ID. (Default: ``None``) :param upload_id: The upload ID. (Default: ``None``) :returns: A Flask response.
entailment
def _get_container(self): """ Gets or creates the container. """ if not hasattr(self, "_container"): if self.use_pyrax: self._container = self.connection.create_container(self.container_name) else: self._container = None return self._container
Gets or creates the container.
entailment
def _set_container(self, container): """ Sets the container (and, if needed, the configured TTL on it), making the container publicly available. """ if self.use_pyrax: if container.cdn_ttl != self.ttl or not container.cdn_enabled: container.make_public(ttl=self.ttl) if hasattr(self, "_container_public_uri"): delattr(self, "_container_public_uri") self._container = container
Sets the container (and, if needed, the configured TTL on it), making the container publicly available.
entailment
def _get_object(self, name): """ Helper function to retrieve the requested Object. """ if self.use_pyrax: try: return self.container.get_object(name) except pyrax.exceptions.NoSuchObject: return None elif swiftclient: try: return self.container.get_object(name) except swiftclient.exceptions.ClientException: return None else: return self.container.get_object(name)
Helper function to retrieve the requested Object.
entailment
def as_object_version(value): """Get an object version object from an object version ID or an object version. :param value: A :class:`invenio_files_rest.models.ObjectVersion` or an object version ID. :returns: A :class:`invenio_files_rest.models.ObjectVersion` instance. """ return value if isinstance(value, ObjectVersion) \ else ObjectVersion.query.filter_by(version_id=value).one_or_none()
Get an object version object from an object version ID or an object version. :param value: A :class:`invenio_files_rest.models.ObjectVersion` or an object version ID. :returns: A :class:`invenio_files_rest.models.ObjectVersion` instance.
entailment
def update_bucket_size(f): """Decorate to update bucket size after operation.""" @wraps(f) def inner(self, *args, **kwargs): res = f(self, *args, **kwargs) self.bucket.size += self.file.size return res return inner
Decorate to update bucket size after operation.
entailment
def ensure_state(default_getter, exc_class, default_msg=None): """Create a decorator factory function.""" def decorator(getter=default_getter, msg=default_msg): def ensure_decorator(f): @wraps(f) def inner(self, *args, **kwargs): if not getter(self): raise exc_class(msg) if msg else exc_class() return f(self, *args, **kwargs) return inner return ensure_decorator return decorator
Create a decorator factory function.
entailment
def validate_name(self, key, name): """Validate name.""" if not slug_pattern.match(name) or len(name) > 20: raise ValueError( 'Invalid location name (lower-case alphanumeric + danshes).') return name
Validate name.
entailment
def size_limit(self): """Get size limit for this bucket. The limit is based on the minimum output of the file size limiters. """ limits = [ lim for lim in current_files_rest.file_size_limiters( self) if lim.limit is not None ] return min(limits) if limits else None
Get size limit for this bucket. The limit is based on the minimum output of the file size limiters.
entailment
def snapshot(self, lock=False): """Create a snapshot of latest objects in bucket. :param lock: Create the new bucket in a locked state. :returns: Newly created bucket containing copied ObjectVersion. """ with db.session.begin_nested(): bucket = Bucket( default_location=self.default_location, default_storage_class=self.default_storage_class, quota_size=self.quota_size, ) db.session.add(bucket) for o in ObjectVersion.get_by_bucket(self): o.copy(bucket=bucket) bucket.locked = True if lock else self.locked return bucket
Create a snapshot of latest objects in bucket. :param lock: Create the new bucket in a locked state. :returns: Newly created bucket containing copied ObjectVersion.
entailment
def sync(self, bucket, delete_extras=False): """Sync self bucket ObjectVersions to the destination bucket. The bucket is fully mirrored with the destination bucket following the logic: * same ObjectVersions are not touched * new ObjectVersions are added to destination * deleted ObjectVersions are deleted in destination * extra ObjectVersions in dest are deleted if `delete_extras` param is True :param bucket: The destination bucket. :param delete_extras: Delete extra ObjectVersions in destination if True. :returns: The bucket with an exact copy of ObjectVersions in self. """ assert not bucket.locked src_ovs = ObjectVersion.get_by_bucket(bucket=self, with_deleted=True) dest_ovs = ObjectVersion.get_by_bucket(bucket=bucket, with_deleted=True) # transform into a dict { key: object version } src_keys = {ov.key: ov for ov in src_ovs} dest_keys = {ov.key: ov for ov in dest_ovs} for key, ov in src_keys.items(): if not ov.deleted: if key not in dest_keys or \ ov.file_id != dest_keys[key].file_id: ov.copy(bucket=bucket) elif key in dest_keys and not dest_keys[key].deleted: ObjectVersion.delete(bucket, key) if delete_extras: for key, ov in dest_keys.items(): if key not in src_keys: ObjectVersion.delete(bucket, key) return bucket
Sync self bucket ObjectVersions to the destination bucket. The bucket is fully mirrored with the destination bucket following the logic: * same ObjectVersions are not touched * new ObjectVersions are added to destination * deleted ObjectVersions are deleted in destination * extra ObjectVersions in dest are deleted if `delete_extras` param is True :param bucket: The destination bucket. :param delete_extras: Delete extra ObjectVersions in destination if True. :returns: The bucket with an exact copy of ObjectVersions in self.
entailment
def create(cls, location=None, storage_class=None, **kwargs): r"""Create a bucket. :param location: Location of a bucket (instance or name). Default: Default location. :param storage_class: Storage class of a bucket. Default: Default storage class. :param \**kwargs: Keyword arguments are forwarded to the class :param \**kwargs: Keyword arguments are forwarded to the class constructor. :returns: Created bucket. """ with db.session.begin_nested(): if location is None: location = Location.get_default() elif isinstance(location, six.string_types): location = Location.get_by_name(location) obj = cls( default_location=location.id, default_storage_class=storage_class or current_app.config[ 'FILES_REST_DEFAULT_STORAGE_CLASS'], **kwargs ) db.session.add(obj) return obj
r"""Create a bucket. :param location: Location of a bucket (instance or name). Default: Default location. :param storage_class: Storage class of a bucket. Default: Default storage class. :param \**kwargs: Keyword arguments are forwarded to the class :param \**kwargs: Keyword arguments are forwarded to the class constructor. :returns: Created bucket.
entailment
def get(cls, bucket_id): """Get a bucket object (excluding deleted). :param bucket_id: Bucket identifier. :returns: Bucket instance. """ return cls.query.filter_by( id=bucket_id, deleted=False ).one_or_none()
Get a bucket object (excluding deleted). :param bucket_id: Bucket identifier. :returns: Bucket instance.
entailment
def delete(cls, bucket_id): """Delete a bucket. Does not actually delete the Bucket, just marks it as deleted. """ bucket = cls.get(bucket_id) if not bucket or bucket.deleted: return False bucket.deleted = True return True
Delete a bucket. Does not actually delete the Bucket, just marks it as deleted.
entailment
def remove(self): """Permanently remove a bucket and all objects (including versions). .. warning:: This by-passes the normal versioning and should only be used when you want to permanently delete a bucket and its objects. Otherwise use :py:data:`Bucket.delete()`. Note the method does not remove the associated file instances which must be garbage collected. :returns: ``self``. """ with db.session.begin_nested(): ObjectVersion.query.filter_by( bucket_id=self.id ).delete() self.query.filter_by(id=self.id).delete() return self
Permanently remove a bucket and all objects (including versions). .. warning:: This by-passes the normal versioning and should only be used when you want to permanently delete a bucket and its objects. Otherwise use :py:data:`Bucket.delete()`. Note the method does not remove the associated file instances which must be garbage collected. :returns: ``self``.
entailment
def get(cls, bucket, key): """Get tag object.""" return cls.query.filter_by( bucket_id=as_bucket_id(bucket), key=key, ).one_or_none()
Get tag object.
entailment
def create(cls, bucket, key, value): """Create a new tag for bucket.""" with db.session.begin_nested(): obj = cls( bucket_id=as_bucket_id(bucket), key=key, value=value ) db.session.add(obj) return obj
Create a new tag for bucket.
entailment
def create_or_update(cls, bucket, key, value): """Create or update a new tag for bucket.""" obj = cls.get(bucket, key) if obj: obj.value = value db.session.merge(obj) else: obj = cls.create(bucket, key, value) return obj
Create or update a new tag for bucket.
entailment
def get_value(cls, bucket, key): """Get tag value.""" obj = cls.get(bucket, key) return obj.value if obj else None
Get tag value.
entailment
def delete(cls, bucket, key): """Delete a tag.""" with db.session.begin_nested(): cls.query.filter_by( bucket_id=as_bucket_id(bucket), key=key, ).delete()
Delete a tag.
entailment
def validate_uri(self, key, uri): """Validate uri.""" if len(uri) > current_app.config['FILES_REST_FILE_URI_MAX_LEN']: raise ValueError( 'FileInstance URI too long ({0}).'.format(len(uri))) return uri
Validate uri.
entailment