code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
self.set_uri( *self.storage(**kwargs).initialize(size=size), readable=False, writable=True)
def init_contents(self, size=0, **kwargs)
Initialize file.
19.557459
16.34816
1.19631
self.checksum = None return self.storage(**kwargs).update( stream, seek=seek, size=size, chunk_size=chunk_size, progress_callback=progress_callback )
def update_contents(self, stream, seek=0, size=None, chunk_size=None, progress_callback=None, **kwargs)
Save contents of stream to this file. :param obj: ObjectVersion instance from where this file is accessed from. :param stream: File-like stream.
3.54663
4.955896
0.715638
self.set_uri( *self.storage(**kwargs).save( stream, chunk_size=chunk_size, size=size, size_limit=size_limit, progress_callback=progress_callback))
def set_contents(self, stream, chunk_size=None, size=None, size_limit=None, progress_callback=None, **kwargs)
Save contents of stream to this file. :param obj: ObjectVersion instance from where this file is accessed from. :param stream: File-like stream.
4.135129
4.800914
0.861321
if not fileinstance.readable: raise ValueError('Source file instance is not readable.') if not self.size == 0: raise ValueError('File instance has data.') self.set_uri( *self.storage(**kwargs).copy( fileinstance.storage(**kwargs), chunk_size=chunk_size, progress_callback=progress_callback))
def copy_contents(self, fileinstance, progress_callback=None, chunk_size=None, **kwargs)
Copy this file instance into another file instance.
5.386188
4.839646
1.11293
return self.storage(**kwargs).send_file( filename, mimetype=mimetype, restricted=restricted, checksum=self.checksum, trusted=trusted, chunk_size=chunk_size, as_attachment=as_attachment, )
def send_file(self, filename, restricted=True, mimetype=None, trusted=False, chunk_size=None, as_attachment=False, **kwargs)
Send file to client.
2.747727
2.896431
0.94866
self.uri = uri self.size = size self.checksum = checksum self.writable = writable self.readable = readable self.storage_class = \ current_app.config['FILES_REST_DEFAULT_STORAGE_CLASS'] \ if storage_class is None else \ storage_class return self
def set_uri(self, uri, size, checksum, readable=True, writable=False, storage_class=None)
Set a location of a file.
2.507204
2.524262
0.993242
if size_limit is None: size_limit = self.bucket.size_limit self.file = FileInstance.create() self.file.set_contents( stream, size_limit=size_limit, size=size, chunk_size=chunk_size, progress_callback=progress_callback, default_location=self.bucket.location.uri, default_storage_class=self.bucket.default_storage_class, ) return self
def set_contents(self, stream, chunk_size=None, size=None, size_limit=None, progress_callback=None)
Save contents of stream to file instance. If a file instance has already been set, this methods raises an ``FileInstanceAlreadySetError`` exception. :param stream: File-like stream. :param size: Size of stream if known. :param chunk_size: Desired chunk size to read stream in. It is up to the storage interface if it respects this value.
3.366996
3.182284
1.058044
self.file = FileInstance() self.file.set_uri( uri, size, checksum, storage_class=storage_class ) db.session.add(self.file) return self
def set_location(self, uri, size, checksum, storage_class=None)
Set only URI location of for object. Useful to link files on externally controlled storage. If a file instance has already been set, this methods raises an ``FileInstanceAlreadySetError`` exception. :param uri: Full URI to object (which can be interpreted by the storage interface). :param size: Size of file. :param checksum: Checksum of file. :param storage_class: Storage class where file is stored ()
4.103628
4.011748
1.022903
return self.file.send_file( self.basename, restricted=restricted, mimetype=self.mimetype, trusted=trusted, **kwargs )
def send_file(self, restricted=True, trusted=False, **kwargs)
Wrap around FileInstance's send file.
4.011263
3.360532
1.193639
new_ob = ObjectVersion.create( self.bucket if bucket is None else as_bucket(bucket), key or self.key, _file_id=self.file_id ) for tag in self.tags: ObjectVersionTag.create_or_update(object_version=new_ob, key=tag.key, value=tag.value) return new_ob
def copy(self, bucket=None, key=None)
Copy an object version to a given bucket + object key. The copy operation is handled completely at the metadata level. The actual data on disk is not copied. Instead, the two object versions will point to the same physical file (via the same FileInstance). All the tags associated with the current object version are copied over to the new instance. .. warning:: If the destination object exists, it will be replaced by the new object version which will become the latest version. :param bucket: The bucket (instance or id) to copy the object to. Default: current bucket. :param key: Key name of destination object. Default: current object key. :returns: The copied object version.
4.24804
4.433874
0.958088
with db.session.begin_nested(): if self.file_id: self.bucket.size -= self.file.size self.query.filter_by( bucket_id=self.bucket_id, key=self.key, version_id=self.version_id, ).delete() return self
def remove(self)
Permanently remove a specific object version from the database. .. warning:: This by-passes the normal versioning and should only be used when you want to permanently delete a specific object version. Otherwise use :py:data:`ObjectVersion.delete()`. Note the method does not remove the associated file instance which must be garbage collected. :returns: ``self``.
4.225206
3.935918
1.0735
bucket = as_bucket(bucket) if bucket.locked: raise BucketLockedError() with db.session.begin_nested(): latest_obj = cls.query.filter( cls.bucket == bucket, cls.key == key, cls.is_head.is_(True) ).one_or_none() if latest_obj is not None: latest_obj.is_head = False db.session.add(latest_obj) # By default objects are created in a deleted state (i.e. # file_id is null). obj = cls( bucket=bucket, key=key, version_id=version_id or uuid.uuid4(), is_head=True, mimetype=mimetype, ) if _file_id: file_ = _file_id if isinstance(_file_id, FileInstance) else \ FileInstance.get(_file_id) obj.set_file(file_) db.session.add(obj) if stream: obj.set_contents(stream, **kwargs) return obj
def create(cls, bucket, key, _file_id=None, stream=None, mimetype=None, version_id=None, **kwargs)
Create a new object in a bucket. The created object is by default created as a delete marker. You must use ``set_contents()`` or ``set_location()`` in order to change this. :param bucket: The bucket (instance or id) to create the object in. :param key: Key of object. :param _file_id: For internal use. :param stream: File-like stream object. Used to set content of object immediately after being created. :param mimetype: MIME type of the file object if it is known. :param kwargs: Keyword arguments passed to ``Object.set_contents()``.
2.724213
2.845099
0.957511
filters = [ cls.bucket_id == as_bucket_id(bucket), cls.key == key, ] if version_id: filters.append(cls.version_id == version_id) else: filters.append(cls.is_head.is_(True)) filters.append(cls.file_id.isnot(None)) return cls.query.filter(*filters).one_or_none()
def get(cls, bucket, key, version_id=None)
Fetch a specific object. By default the latest object version is returned, if ``version_id`` is not set. :param bucket: The bucket (instance or id) to get the object from. :param key: Key of object. :param version_id: Specific version of an object.
2.623754
3.248758
0.807617
filters = [ cls.bucket_id == as_bucket_id(bucket), cls.key == key, ] order = cls.created.desc() if desc else cls.created.asc() return cls.query.filter(*filters).order_by(cls.key, order)
def get_versions(cls, bucket, key, desc=True)
Fetch all versions of a specific object. :param bucket: The bucket (instance or id) to get the object from. :param key: Key of object. :param desc: Sort results desc if True, asc otherwise. :returns: The query to execute to fetch all versions.
4.05716
4.374504
0.927456
bucket_id = as_bucket_id(bucket) obj = cls.get(bucket_id, key) if obj: return cls.create(as_bucket(bucket), key) return None
def delete(cls, bucket, key)
Delete an object. Technically works by creating a new version which works as a delete marker. :param bucket: The bucket (instance or id) to delete the object from. :param key: Key of object. :returns: Created delete marker object if key exists else ``None``.
4.877435
5.260635
0.927157
bucket_id = bucket.id if isinstance(bucket, Bucket) else bucket filters = [ cls.bucket_id == bucket_id, ] if not versions: filters.append(cls.is_head.is_(True)) if not with_deleted: filters.append(cls.file_id.isnot(None)) return cls.query.filter(*filters).order_by(cls.key, cls.created.desc())
def get_by_bucket(cls, bucket, versions=False, with_deleted=False)
Return query that fetches all the objects in a bucket. :param bucket: The bucket (instance or id) to query. :param versions: Select all versions if True, only heads otherwise. :param with_deleted: Select also deleted objects if True. :returns: The query to retrieve filtered objects in the given bucket.
2.567707
2.618543
0.980586
assert old_file.checksum == new_file.checksum assert old_file.id assert new_file.id with db.session.begin_nested(): ObjectVersion.query.filter_by(file_id=str(old_file.id)).update({ ObjectVersion.file_id: str(new_file.id)})
def relink_all(cls, old_file, new_file)
Relink all object versions (for a given file) to a new file. .. warning:: Use this method with great care.
2.983391
3.192838
0.934401
return ObjectVersionTag.create( self.object_version if object_version is None else object_version, key or self.key, self.value )
def copy(self, object_version=None, key=None)
Copy a tag to a given object version. :param object_version: The object version instance to copy the tag to. Default: current object version. :param key: Key of destination tag. Default: current tag key. :return: The copied object version tag.
4.432205
4.502981
0.984282
return cls.query.filter_by( version_id=as_object_version_id(object_version), key=key, ).one_or_none()
def get(cls, object_version, key)
Get the tag object.
3.688337
3.55036
1.038863
assert len(key) < 256 assert len(value) < 256 with db.session.begin_nested(): obj = cls(version_id=as_object_version_id(object_version), key=key, value=value) db.session.add(obj) return obj
def create(cls, object_version, key, value)
Create a new tag for a given object version.
2.730435
2.761007
0.988927
assert len(key) < 256 assert len(value) < 256 obj = cls.get(object_version, key) if obj: obj.value = value db.session.merge(obj) else: obj = cls.create(object_version, key, value) return obj
def create_or_update(cls, object_version, key, value)
Create or update a new tag for a given object version.
2.189162
2.234922
0.979525
obj = cls.get(object_version, key) return obj.value if obj else None
def get_value(cls, object_version, key)
Get the tag value.
3.980536
3.261891
1.220315
with db.session.begin_nested(): q = cls.query.filter_by( version_id=as_object_version_id(object_version)) if key: q = q.filter_by(key=key) q.delete()
def delete(cls, object_version, key=None)
Delete tags. :param object_version: The object version instance or id. :param key: Key of the tag to delete. Default: delete all tags.
3.047184
3.286918
0.927064
return int(self.size / self.chunk_size) \ if self.size % self.chunk_size else \ int(self.size / self.chunk_size) - 1
def last_part_number(self)
Get last part number.
3.567571
3.175527
1.123458
min_csize = current_app.config['FILES_REST_MULTIPART_CHUNKSIZE_MIN'] max_csize = current_app.config['FILES_REST_MULTIPART_CHUNKSIZE_MAX'] return chunk_size >= min_csize and chunk_size <= max_csize
def is_valid_chunksize(chunk_size)
Check if size is valid.
2.791707
2.607083
1.070816
min_csize = current_app.config['FILES_REST_MULTIPART_CHUNKSIZE_MIN'] max_size = \ chunk_size * current_app.config['FILES_REST_MULTIPART_MAX_PARTS'] return size > min_csize and size <= max_size
def is_valid_size(size, chunk_size)
Validate max theoretical size.
4.656768
4.124029
1.129179
last_part = self.multipart.last_part_number if part_number == last_part: return self.multipart.last_part_size elif part_number >= 0 and part_number < last_part: return self.multipart.chunk_size else: raise MultipartInvalidPartNumber()
def expected_part_size(self, part_number)
Get expected part size for a particular part number.
3.209755
3.000068
1.069894
if Part.count(self) != self.last_part_number + 1: raise MultipartMissingParts() with db.session.begin_nested(): self.completed = True self.file.readable = True self.file.writable = False return self
def complete(self)
Mark a multipart object as complete.
9.271058
6.862103
1.351052
self.file.update_checksum(**kwargs) with db.session.begin_nested(): obj = ObjectVersion.create( self.bucket, self.key, _file_id=self.file_id, version_id=version_id ) self.delete() return obj
def merge_parts(self, version_id=None, **kwargs)
Merge parts into object version.
5.151992
4.819927
1.068894
# Update bucket size. self.bucket.size -= self.size # Remove parts Part.query_by_multipart(self).delete() # Remove self self.query.filter_by(upload_id=self.upload_id).delete()
def delete(self)
Delete a multipart object.
8.323673
6.582118
1.264589
bucket = as_bucket(bucket) if bucket.locked: raise BucketLockedError() # Validate chunk size. if not cls.is_valid_chunksize(chunk_size): raise MultipartInvalidChunkSize() # Validate max theoretical size. if not cls.is_valid_size(size, chunk_size): raise MultipartInvalidSize() # Validate max bucket size. bucket_limit = bucket.size_limit if bucket_limit and size > bucket_limit: desc = 'File size limit exceeded.' \ if isinstance(bucket_limit, int) else bucket_limit.reason raise FileSizeError(description=desc) with db.session.begin_nested(): file_ = FileInstance.create() file_.size = size obj = cls( upload_id=uuid.uuid4(), bucket=bucket, key=key, chunk_size=chunk_size, size=size, completed=False, file=file_, ) bucket.size += size db.session.add(obj) file_.init_contents( size=size, default_location=bucket.location.uri, default_storage_class=bucket.default_storage_class, ) return obj
def create(cls, bucket, key, size, chunk_size)
Create a new object in a bucket.
3.564741
3.544039
1.005842
q = cls.query.filter_by( upload_id=upload_id, bucket_id=as_bucket_id(bucket), key=key, ) if not with_completed: q = q.filter(cls.completed.is_(False)) return q.one_or_none()
def get(cls, bucket, key, upload_id, with_completed=False)
Fetch a specific multipart object.
2.640518
2.721927
0.970092
q = cls.query.filter(cls.created < dt).filter_by(completed=True) if bucket: q = q.filter(cls.bucket_id == as_bucket_id(bucket)) return q
def query_expired(cls, dt, bucket=None)
Query all uncompleted multipart uploads.
3.810775
3.575453
1.065816
return cls.query.filter(cls.bucket_id == as_bucket_id(bucket))
def query_by_bucket(cls, bucket)
Query all uncompleted multipart uploads.
6.575614
6.779501
0.969926
return min( (self.part_number + 1) * self.multipart.chunk_size, self.multipart.size )
def end_byte(self)
Get end byte in file for this part.
9.88295
7.991894
1.236622
if part_number < 0 or part_number > mp.last_part_number: raise MultipartInvalidPartNumber() with db.session.begin_nested(): obj = cls( multipart=mp, part_number=part_number, ) db.session.add(obj) if stream: obj.set_contents(stream, **kwargs) return obj
def create(cls, mp, part_number, stream=None, **kwargs)
Create a new part object in a multipart object.
2.944435
2.75414
1.069094
return cls.query.filter_by( upload_id=mp.upload_id, part_number=part_number ).one_or_none()
def get_or_none(cls, mp, part_number)
Get part number.
2.907412
2.769649
1.04974
obj = cls.get_or_none(mp, part_number) if obj: return obj return cls.create(mp, part_number)
def get_or_create(cls, mp, part_number)
Get or create a part.
2.733005
2.731853
1.000422
return cls.query.filter_by( upload_id=mp.upload_id, part_number=part_number ).delete()
def delete(cls, mp, part_number)
Get part number.
3.147946
3.106445
1.01336
upload_id = multipart.upload_id \ if isinstance(multipart, MultipartObject) else multipart return cls.query.filter_by( upload_id=upload_id )
def query_by_multipart(cls, multipart)
Get all parts for a specific multipart upload. :param multipart: A :class:`invenio_files_rest.models.MultipartObject` instance. :returns: A :class:`invenio_files_rest.models.Part` instance.
4.338776
4.463093
0.972146
size, checksum = self.multipart.file.update_contents( stream, seek=self.start_byte, size=self.part_size, progress_callback=progress_callback, ) self.checksum = checksum return self
def set_contents(self, stream, progress_callback=None)
Save contents of stream to part of file instance. If a the MultipartObject is completed this methods raises an ``MultipartAlreadyCompleted`` exception. :param stream: File-like stream. :param size: Size of stream if known. :param chunk_size: Desired chunk size to read stream in. It is up to the storage interface if it respects this value.
6.591669
8.182583
0.805573
cdn_url, ssl_url = _get_container_urls(CumulusStorage()) static_url = settings.STATIC_URL return { "CDN_URL": cdn_url + static_url, "CDN_SSL_URL": ssl_url + static_url, }
def cdn_url(request)
A context processor that exposes the full CDN URL in templates.
4.645943
4.866076
0.954762
cdn_url, ssl_url = _get_container_urls(CumulusStaticStorage()) static_url = settings.STATIC_URL return { "STATIC_URL": cdn_url + static_url, "STATIC_SSL_URL": ssl_url + static_url, "LOCAL_STATIC_URL": static_url, }
def static_cdn_url(request)
A context processor that exposes the full static CDN URL as static URL in templates.
4.094611
4.50876
0.908146
for key, value in parameters.items(): if isinstance(value, bool): parameters[key] = "true" if value else "false" elif isinstance(value, dict): parameters[key] = "|".join( ("%s:%s" % (k, v) for k, v in value.items())) elif isinstance(value, (list, tuple)): parameters[key] = "|".join(value) return parameters
def _serialize_parameters(parameters)
Serialize some parameters to match python native types with formats specified in google api docs like: * True/False -> "true"/"false", * {"a": 1, "b":2} -> "a:1|b:2" :type parameters: dict oif query parameters
2.089508
1.738743
1.201735
url = urlparse.urljoin(urlparse.urljoin(self.base, url), "json") # drop all None values and use defaults if not set parameters = {key: value for key, value in parameters.items() if value is not None} parameters.setdefault("sensor", self.sensor) parameters = self._serialize_parameters(parameters) if self.api_key: parameters["key"] = self.api_key raw_response = requests.get(url, params=parameters) response = raw_response.json() if response["status"] == status.OK and result_key is not None: return response[result_key] elif response["status"] == status.OK: del response["status"] return response else: response["url"] = raw_response.url raise errors.EXCEPTION_MAPPING.get( response["status"], errors.GmapException )(response)
def _make_request(self, url, parameters, result_key)
Make http/https request to Google API. Method prepares url parameters, drops None values, and gets default values. Finally makes request using protocol assigned to client and returns data. :param url: url part - specifies API endpoint :param parameters: dictionary of url parameters :param result_key: key in output where result is expected
3.332513
3.372992
0.987999
# noqa parameters = dict( address=address, components=components, language=language, sensor=sensor, region=region, ) if bounds: parameters['bounds'] = "%f,%f|%f,%f" % ( bounds[0][0], bounds[0][1], bounds[1][0], bounds[1][1]) return self._make_request(self.GEOCODE_URL, parameters, "results")
def geocode(self, address=None, components=None, region=None, language=None, bounds=None, sensor=None)
Geocode given address. Geocoder can queried using address and/or components. Components when used with address will restrict your query to specific area. When used without address they act like more precise query. For full details see `Google docs <https://developers.google.com/maps/documentation/geocoding/>`_. :param address: address string :param components: ditc of components :param region: region code specified as a ccTLD ("top-level domain") two-character value, influences but not restricts query result :param language: the language in which to return results. For full list of laguages go to Google Maps API docs :param bounds: two-tuple of (latitude, longitude) pairs of bounding box. Influences but not restricts result (same as region parameter) :param sensor: override default client sensor parameter
2.549946
3.070981
0.830336
parameters = dict( latlng="%f,%f" % (lat, lon), result_type=result_type, location_type=location_type, language=language, sensor=sensor, ) return self._make_request(self.GEOCODE_URL, parameters, "results")
def reverse(self, lat, lon, result_type=None, location_type=None, language=None, sensor=None)
Reverse geocode with given latitude and longitude. :param lat: latitude of queried point :param lon: longitude of queried point :param result_type: list of result_type for filtered search. Accepted values: https://developers.google.com/maps/documentation/geocoding/intro#Types **Important**: this feature may require using API key to work. :param location_type: list of location_type for filtered search. :param language: the language in which to return results. For full list of laguages go to Google Maps API docs :param sensor: override default client sensor parameter .. note:: Google API allows to specify both latlng and address params but it makes no sense and would not reverse geocode your query, so here geocoding and reverse geocoding are separated
3.032991
3.369066
0.900247
self.init_config(app) if hasattr(app, 'cli'): app.cli.add_command(files_cmd) app.extensions['invenio-files-rest'] = _FilesRESTState(app)
def init_app(self, app)
Flask application initialization.
4.668963
4.608888
1.013035
if size_limit is not None and bytes_written > size_limit: desc = 'File size limit exceeded.' \ if isinstance(size_limit, int) else size_limit.reason raise FileSizeError(description=desc) # Never write more than advertised if total_size is not None and bytes_written > total_size: raise UnexpectedFileSizeError( description='File is bigger than expected.')
def check_sizelimit(size_limit, bytes_written, total_size)
Check if size limit was exceeded. :param size_limit: The size limit. :param bytes_written: The total number of bytes written. :param total_size: The total file size. :raises invenio_files_rest.errors.UnexpectedFileSizeError: If the bytes written exceed the total size. :raises invenio_files_rest.errors.FileSizeError: If the bytes written are major than the limit size.
4.142105
3.962094
1.045433
try: fp = self.open(mode='rb') except Exception as e: raise StorageError('Could not send file: {}'.format(e)) try: md5_checksum = None if checksum: algo, value = checksum.split(':') if algo == 'md5': md5_checksum = value # Send stream is responsible for closing the file. return send_stream( fp, filename, self._size, self._modified, mimetype=mimetype, restricted=restricted, etag=checksum, content_md5=md5_checksum, chunk_size=chunk_size, trusted=trusted, as_attachment=as_attachment, ) except Exception as e: fp.close() raise StorageError('Could not send file: {}'.format(e))
def send_file(self, filename, mimetype=None, restricted=True, checksum=None, trusted=False, chunk_size=None, as_attachment=False)
Send the file to the client.
3.012315
3.107445
0.969386
fp = self.open(mode='rb') try: value = self._compute_checksum( fp, size=self._size, chunk_size=None, progress_callback=progress_callback) except StorageError: raise finally: fp.close() return value
def checksum(self, chunk_size=None, progress_callback=None, **kwargs)
Compute checksum of file.
3.835734
3.512133
1.092138
fp = src.open(mode='rb') try: return self.save( fp, chunk_size=chunk_size, progress_callback=progress_callback) finally: fp.close()
def copy(self, src, chunk_size=None, progress_callback=None)
Copy data from another file instance. :param src: Source stream. :param chunk_size: Chunk size to read from source stream.
3.055175
3.788212
0.806495
if progress_callback and size: progress_callback = partial(progress_callback, size) else: progress_callback = None try: algo, m = self._init_hash() return compute_checksum( stream, algo, m, chunk_size=chunk_size, progress_callback=progress_callback, **kwargs ) except Exception as e: raise StorageError( 'Could not compute checksum of file: {0}'.format(e))
def _compute_checksum(self, stream, size=None, chunk_size=None, progress_callback=None, **kwargs)
Get helper method to compute checksum from a stream. Naive implementation that can be overwritten by subclasses in order to provide more efficient implementation.
3.060563
3.176784
0.963416
chunk_size = chunk_size_or_default(chunk_size) algo, m = self._init_hash() bytes_written = 0 while 1: # Check that size limits aren't bypassed check_sizelimit(size_limit, bytes_written, size) chunk = src.read(chunk_size) if not chunk: if progress_callback: progress_callback(bytes_written, bytes_written) break dst.write(chunk) bytes_written += len(chunk) if m: m.update(chunk) if progress_callback: progress_callback(None, bytes_written) check_size(bytes_written, size) return bytes_written, '{0}:{1}'.format( algo, m.hexdigest()) if m else None
def _write_stream(self, src, dst, size=None, size_limit=None, chunk_size=None, progress_callback=None)
Get helper to save stream from src to dest + compute checksum. :param src: Source stream. :param dst: Destination stream. :param size: If provided, this exact amount of bytes will be written to the destination file. :param size_limit: ``FileSizeLimit`` instance to limit number of bytes to write.
3.304649
3.630605
0.91022
# table ObjectVersion: modify primary_key if op.get_context().dialect.name == 'mysql': Fk = 'fk_files_object_bucket_id_files_bucket' op.execute( 'ALTER TABLE files_object ' 'DROP FOREIGN KEY {0}, DROP PRIMARY KEY, ' 'ADD PRIMARY KEY(version_id), ' 'ADD FOREIGN KEY(bucket_id) ' 'REFERENCES files_bucket(id)'.format(Fk)) else: op.drop_constraint('pk_files_object', 'files_object', type_='primary') op.create_primary_key( 'pk_files_object', 'files_object', ['version_id']) op.create_unique_constraint( 'uq_files_object_bucket_id', 'files_object', ['bucket_id', 'version_id', 'key']) # table ObjectVersionTag: create op.create_table( 'files_objecttags', sa.Column( 'version_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False), sa.Column( 'key', sa.Text().with_variant(mysql.VARCHAR(255), 'mysql'), nullable=False ), sa.Column( 'value', sa.Text().with_variant(mysql.VARCHAR(255), 'mysql'), nullable=False ), sa.PrimaryKeyConstraint('version_id', 'key'), sa.ForeignKeyConstraint( ['version_id'], [u'files_object.version_id'], ondelete='CASCADE'), )
def upgrade()
Upgrade database.
2.308181
2.274587
1.014769
# table ObjectVersionTag op.drop_table('files_objecttags') # table ObjectVersion: modify primary_key if op.get_context().dialect.name == 'mysql': op.execute( 'ALTER TABLE files_object ' 'DROP INDEX uq_files_object_bucket_id, ' 'DROP PRIMARY KEY, ' 'ADD PRIMARY KEY(`bucket_id`, `key`, `version_id`)') else: op.drop_constraint( 'pk_files_object', 'files_object', type_='primary') op.create_primary_key('pk_files_object', 'files_object', ['bucket_id', 'key', 'version_id'])
def downgrade()
Downgrade database.
3.398613
3.334733
1.019156
epoch = UTC.localize(datetime.utcfromtimestamp(0)) if not datetime.tzinfo: dt = UTC.localize(datetime) else: dt = UTC.normalize(datetime) delta = dt - epoch return total_seconds(delta)
def unixtimestamp(datetime)
Get unix time stamp from that given datetime. If datetime is not tzaware then it's assumed that it is UTC
3.370318
3.641301
0.925581
parameters = dict( location="%f,%f" % (lat, lon), timestamp=unixtimestamp(datetime), language=language, sensor=sensor, ) return self._make_request(self.TIMEZONE_URL, parameters, None)
def timezone(self, lat, lon, datetime, language=None, sensor=None)
Get time offset data for given location. :param lat: Latitude of queried point :param lon: Longitude of queried point :param language: The language in which to return results. For full list of laguages go to Google Maps API docs :param datetime: Desired time. The Time Zone API uses the timestamp to determine whether or not Daylight Savings should be applied. datetime should be timezone aware. If it isn't the UTC timezone is assumed. :type datetime: datetime.datetime :param sensor: Override default client sensor parameter
4.148263
4.989797
0.831349
encoded = ( (_encode_value(lat, prev_lat), _encode_value(lon, prev_lon)) for (prev_lat, prev_lon), (lat, lon) in _iterate_with_previous(locations, first=(0, 0)) ) encoded = chain.from_iterable(encoded) return ''.join(c for r in encoded for c in r)
def encode(locations)
:param locations: locations list containig (lat, lon) two-tuples :return: encoded polyline string
4.157582
4.25115
0.97799
op.create_table( 'records_buckets', sa.Column( 'record_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False), sa.Column( 'bucket_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False), sa.ForeignKeyConstraint(['bucket_id'], [u'files_bucket.id'], ), sa.ForeignKeyConstraint(['record_id'], [u'records_metadata.id'], ), sa.PrimaryKeyConstraint('record_id', 'bucket_id') )
def upgrade()
Upgrade database.
1.853301
1.85048
1.001525
@wraps(method) def wrapper(self, *args, **kwargs): if self.bucket.locked or self.bucket.deleted: raise InvalidOperationError() return method(self, *args, **kwargs) return wrapper
def _writable(method)
Check that record is in defined status. :param method: Method to be decorated. :returns: Function decorated.
3.270853
3.657483
0.894291
return ObjectVersion.get(bucket=self.obj.bucket, key=self.obj.key, version_id=version_id)
def get_version(self, version_id=None)
Return specific version ``ObjectVersion`` instance or HEAD. :param version_id: Version ID of the object. :returns: :class:`~invenio_files_rest.models.ObjectVersion` instance or HEAD of the stored object.
5.643686
5.2177
1.081643
if hasattr(self.obj, key): return getattr(self.obj, key) return self.data.get(key, default)
def get(self, key, default=None)
Proxy to ``obj``. :param key: Metadata key which holds the value. :returns: Metadata value of the specified key or default.
3.477015
4.515367
0.77004
self.data.update({ 'bucket': str(self.obj.bucket_id), 'checksum': self.obj.file.checksum, 'key': self.obj.key, # IMPORTANT it must stay here! 'size': self.obj.file.size, 'version_id': str(self.obj.version_id), }) return self.data
def dumps(self)
Create a dump of the metadata associated to the record.
4.45431
4.120132
1.081109
files = self.dumps() # Do not create `_files` when there has not been `_files` field before # and the record still has no files attached. if files or '_files' in self.record: self.record['_files'] = files
def flush(self)
Flush changes to record.
14.321675
12.144664
1.179257
# Support sorting by file_ids or keys. files = {str(f_.file_id): f_.key for f_ in self} # self.record['_files'] = [{'key': files.get(id_, id_)} for id_ in ids] self.filesmap = OrderedDict([ (files.get(id_, id_), self[files.get(id_, id_)].dumps()) for id_ in ids ]) self.flush()
def sort_by(self, *ids)
Update files order. :param ids: List of ids specifying the final status of the list.
7.131514
7.109725
1.003065
assert new_key not in self assert old_key != new_key file_ = self[old_key] old_data = self.filesmap[old_key] # Create a new version with the new name obj = ObjectVersion.create( bucket=self.bucket, key=new_key, _file_id=file_.obj.file_id ) # Delete old key self.filesmap[new_key] = self.file_cls(obj, old_data).dumps() del self[old_key] return obj
def rename(self, old_key, new_key)
Rename a file. :param old_key: Old key that holds the object. :param new_key: New key that will hold the object. :returns: The object that has been renamed.
5.216142
5.339312
0.976932
return [ self.file_cls(o, self.filesmap.get(o.key, {})).dumps() for o in sorted_files_from_bucket(bucket or self.bucket, self.keys) ]
def dumps(self, bucket=None)
Serialize files from a bucket. :param bucket: Instance of files :class:`invenio_files_rest.models.Bucket`. (Default: ``self.bucket``) :returns: List of serialized files.
9.165948
8.67284
1.056857
if self.model is None: raise MissingModelError() records_buckets = RecordsBuckets.query.filter_by( record_id=self.id).first() if not records_buckets: bucket = self._create_bucket() if not bucket: return None RecordsBuckets.create(record=self.model, bucket=bucket) else: bucket = records_buckets.bucket return self.files_iter_cls(self, bucket=bucket, file_cls=self.file_cls)
def files(self)
Get files iterator. :returns: Files iterator.
4.51562
4.450901
1.014541
current_files = self.files if current_files: raise RuntimeError('Can not update existing files.') for key in data: current_files[key] = data[key]
def files(self, data)
Set files from data.
5.534729
4.89315
1.131118
if force: RecordsBuckets.query.filter_by( record=self.model, bucket=self.files.bucket ).delete() return super(Record, self).delete(force)
def delete(self, force=False)
Delete a record and also remove the RecordsBuckets if necessary. :param force: True to remove also the :class:`~invenio_records_files.models.RecordsBuckets` object. :returns: Deleted record.
7.31595
5.003723
1.462101
rb = cls(record=record, bucket=bucket) db.session.add(rb) return rb
def create(cls, record, bucket)
Create a new RecordsBuckets and adds it to the session. :param record: Record used to relate with the ``Bucket``. :param bucket: Bucket used to relate with the ``Record``. :returns: The :class:`~invenio_records_files.models.RecordsBuckets` object created.
4.436803
4.867885
0.911444
keys = keys or [] total = len(keys) sortby = dict(zip(keys, range(total))) values = ObjectVersion.get_by_bucket(bucket).all() return sorted(values, key=lambda x: sortby.get(x.key, total))
def sorted_files_from_bucket(bucket, keys=None)
Return files from bucket sorted by given keys. :param bucket: :class:`~invenio_files_rest.models.Bucket` containing the files. :param keys: Keys order to be used. :returns: Sorted list of bucket items.
4.49265
5.519542
0.813953
try: if not (hasattr(record, 'files') and record.files): return None except MissingModelError: return None try: return record.files[filename] except KeyError: return None
def record_file_factory(pid, record, filename)
Get file from a record. :param pid: Not used. It keeps the function signature. :param record: Record which contains the files. :param filename: Name of the file to be returned. :returns: File object or ``None`` if not found.
4.360077
4.042343
1.078602
_record_file_factory = _record_file_factory or record_file_factory # Extract file from record. fileobj = _record_file_factory( pid, record, kwargs.get('filename') ) if not fileobj: abort(404) obj = fileobj.obj # Check permissions ObjectResource.check_object_permission(obj) # Send file. return ObjectResource.send_object( obj.bucket, obj, expected_chksum=fileobj.get('checksum'), logger_data={ 'bucket_id': obj.bucket_id, 'pid_type': pid.pid_type, 'pid_value': pid.pid_value, }, as_attachment=('download' in request.args) )
def file_download_ui(pid, record, _record_file_factory=None, **kwargs)
File download view for a given record. Plug this method into your ``RECORDS_UI_ENDPOINTS`` configuration: .. code-block:: python RECORDS_UI_ENDPOINTS = dict( recid=dict( # ... route='/records/<pid_value/files/<filename>', view_imp='invenio_records_files.utils:file_download_ui', record_class='invenio_records_files.api:Record', ) ) If ``download`` is passed as a querystring argument, the file is sent as an attachment. :param pid: The :class:`invenio_pidstore.models.PersistentIdentifier` instance. :param record: The record metadata.
4.159412
4.43964
0.936881
try: record = Record.get_record(pid.get_assigned_object()) bucket = record.files.bucket return url_for('invenio_files_rest.bucket_api', bucket_id=bucket.id, _external=True) except AttributeError: return None
def default_bucket_link_factory(pid)
Factory for record bucket generation.
4.294171
4.557518
0.942217
self.stdout.write(str(message)) self.stdout.write('\n')
def flake(self, message)
Print an error message to stdout.
3.92272
3.401275
1.153309
return hasattr(node, 'id') and node.id or hasattr(node, 'name') and node.name
def node_name(node)
Convenience function: Returns node.id, or node.name, or None
4.628054
2.743207
1.687097
names = [] if isinstance(self.source, ast.List): for node in self.source.elts: if isinstance(node, ast.Str): names.append(node.s) return names
def names(self)
Return a list of the names referenced by this binding.
2.925161
2.503521
1.168418
for name, binding in self.items(): if (not binding.used and name not in self.globals and not self.uses_locals and isinstance(binding, Assignment)): yield name, binding
def unusedAssignments(self)
Return a generator for the assignments which have not been used.
8.386901
7.204749
1.16408
checkers = {} for ep in pkg_resources.iter_entry_points(group='frosted.plugins'): checkers.update({ep.name: ep.load()}) for plugin_name, plugin in checkers.items(): if self.filename != '(none)': messages = plugin.check(self.filename) for message, loc, args, kwargs in messages: self.report(message, loc, *args, **kwargs)
def check_plugins(self)
collect plugins from entry point 'frosted.plugins' and run their check() method, passing the filename
4.249265
3.345057
1.270311
self._deferred_functions.append((callable, self.scope_stack[:], self.offset))
def defer_function(self, callable)
Schedule a function handler to be called just before completion. This is used for handling function bodies, which must be deferred because code later in the file might modify the global scope. When 'callable' is called, the scope at the time this is called will be restored, however it will contain any new bindings added to it.
12.974958
8.700385
1.491309
self._deferred_assignments.append((callable, self.scope_stack[:], self.offset))
def defer_assignment(self, callable)
Schedule an assignment handler to be called just after deferred function handlers.
10.378085
10.721405
0.967978
for handler, scope, offset in deferred: self.scope_stack = scope self.offset = offset handler()
def run_deferred(self, deferred)
Run the callables in deferred using their associated scope stack.
10.872164
6.568269
1.655255
for scope in self.dead_scopes: export = isinstance(scope.get('__all__'), ExportBinding) if export: all = scope['__all__'].names() # Look for possible mistakes in the export list if not scope.importStarred and os.path.basename(self.filename) != '__init__.py': undefined = set(all) - set(scope) for name in undefined: self.report(messages.UndefinedExport, scope['__all__'].source, name) else: all = [] # Look for imported names that aren't used without checking imports in namespace definition for importation in scope.values(): if isinstance(importation, Importation) and not importation.used and importation.name not in all: self.report(messages.UnusedImport, importation.source, importation.name)
def check_dead_scopes(self)
Look at scopes which have been fully examined and report names in them which were imported but unused.
5.51557
5.037292
1.094948
ancestor = self.get_common_ancestor(lnode, rnode) if isinstance(ancestor, ast.If): for fork in (ancestor.body, ancestor.orelse): if self.on_fork(ancestor, lnode, rnode, fork): return True elif isinstance(ancestor, ast.Try): body = ancestor.body + ancestor.orelse for fork in [body] + [[hdl] for hdl in ancestor.handlers]: if self.on_fork(ancestor, lnode, rnode, fork): return True elif isinstance(ancestor, ast.TryFinally): if self.on_fork(ancestor, lnode, rnode, ancestor.body): return True return False
def different_forks(self, lnode, rnode)
True, if lnode and rnode are located on different forks of IF/TRY.
2.515122
2.403673
1.046366
redefinedWhileUnused = False if not isinstance(self.scope, ClassScope): for scope in self.scope_stack[::-1]: existing = scope.get(value.name) if (isinstance(existing, Importation) and not existing.used and (not isinstance(value, Importation) or value.fullName == existing.fullName) and report_redef and not self.different_forks(node, existing.source)): redefinedWhileUnused = True self.report(messages.RedefinedWhileUnused, node, value.name, existing.source) existing = self.scope.get(value.name) if not redefinedWhileUnused and self.has_parent(value.source, ast.ListComp): if (existing and report_redef and not self.has_parent(existing.source, (ast.For, ast.ListComp)) and not self.different_forks(node, existing.source)): self.report(messages.RedefinedInListComp, node, value.name, existing.source) if (isinstance(existing, Definition) and not existing.used and not self.different_forks(node, existing.source)): self.report(messages.RedefinedWhileUnused, node, value.name, existing.source) else: self.scope[value.name] = value
def add_binding(self, node, value, report_redef=True)
Called when a binding is altered. - `node` is the statement responsible for the change - `value` is the optional new value, a Binding instance, associated with the binding; if None, the binding is deleted if it exists. - if `report_redef` is True (default), rebinding while unused will be reported.
3.232595
3.231791
1.000249
return isinstance(node, ast.Str) or (isinstance(node, ast.Expr) and isinstance(node.value, ast.Str))
def is_docstring(self, node)
Determine if the given node is a docstring, as long as it is at the correct place in the node tree.
3.003917
2.620259
1.14642
for item in node.body: if isinstance(item, ast.Return) and item.value: return item elif not isinstance(item, ast.FunctionDef) and hasattr(item, 'body'): return_with_argument = self.find_return_with_argument(item) if return_with_argument: return return_with_argument
def find_return_with_argument(self, node)
Finds and returns a return statment that has an argument. Note that we should use node.returns in Python 3, but this method is never called in Python 3 so we don't bother checking.
1.99603
1.94074
1.028489
if not isinstance(node.body, list): # lambdas can not be generators return False for item in node.body: if isinstance(item, (ast.Assign, ast.Expr)): if isinstance(item.value, ast.Yield): return True elif not isinstance(item, ast.FunctionDef) and hasattr(item, 'body'): if self.is_generator(item): return True return False
def is_generator(self, node)
Checks whether a function is a generator by looking for a yield statement or expression.
2.66248
2.452313
1.085701
if isinstance(self.scope, FunctionScope): self.scope.globals.update(node.names)
def GLOBAL(self, node)
Keep track of globals declarations.
5.524911
4.389068
1.258789
vars = [] def collectLoopVars(n): if isinstance(n, ast.Name): vars.append(n.id) elif isinstance(n, ast.expr_context): return else: for c in ast.iter_child_nodes(n): collectLoopVars(c) collectLoopVars(node.target) for varn in vars: if (isinstance(self.scope.get(varn), Importation) # unused ones will get an unused import warning and self.scope[varn].used): self.report(messages.ImportShadowedByLoopVar, node, varn, self.scope[varn].source) self.handle_children(node)
def FOR(self, node)
Process bindings for loop variables.
4.592333
4.331963
1.060105
# Locate the name in locals / function / globals scopes. if isinstance(node.ctx, (ast.Load, ast.AugLoad)): self.handle_node_load(node) if (node.id == 'locals' and isinstance(self.scope, FunctionScope) and isinstance(node.parent, ast.Call)): # we are doing locals() call in current scope self.scope.uses_locals = True elif isinstance(node.ctx, (ast.Store, ast.AugStore)): self.handle_node_store(node) elif isinstance(node.ctx, ast.Del): self.handle_node_delete(node) else: # must be a Param context -- this only happens for names in function # arguments, but these aren't dispatched through here raise RuntimeError("Got impossible expression context: %r" % (node.ctx,))
def NAME(self, node)
Handle occurrence of Name (which can be a load/store/delete access.)
5.107098
4.765623
1.071654
for deco in node.decorator_list: self.handleNode(deco, node) for baseNode in node.bases: self.handleNode(baseNode, node) if not PY2: for keywordNode in node.keywords: self.handleNode(keywordNode, node) self.push_scope(ClassScope) if self.settings.get('run_doctests', False): self.defer_function(lambda: self.handle_doctests(node)) for stmt in node.body: self.handleNode(stmt, node) self.pop_scope() self.add_binding(node, ClassDefinition(node.name, node))
def CLASSDEF(self, node)
Check names used in a class definition, including its decorators, base classes, and the body of its definition. Additionally, add its name to the current scope.
3.296539
3.242559
1.016647
if not settings_path and filename: settings_path = os.path.dirname(os.path.abspath(filename)) settings_path = settings_path or os.getcwd() active_settings = settings.from_path(settings_path).copy() for key, value in itemsview(setting_overrides): access_key = key.replace('not_', '').lower() if type(active_settings.get(access_key)) in (list, tuple): if key.startswith('not_'): active_settings[access_key] = list(set(active_settings[access_key]).difference(value)) else: active_settings[access_key] = list(set(active_settings[access_key]).union(value)) else: active_settings[key] = value active_settings.update(setting_overrides) if _should_skip(filename, active_settings.get('skip', [])): if active_settings.get('directly_being_checked', None) == 1: reporter.flake(FileSkipped(filename)) return 1 elif active_settings.get('verbose', False): ignore = active_settings.get('ignore_frosted_errors', []) if(not "W200" in ignore and not "W201" in ignore): reporter.flake(FileSkipped(filename, None, verbose=active_settings.get('verbose'))) return 0 # First, compile into an AST and handle syntax errors. try: tree = compile(codeString, filename, "exec", _ast.PyCF_ONLY_AST) except SyntaxError: value = sys.exc_info()[1] msg = value.args[0] (lineno, offset, text) = value.lineno, value.offset, value.text # If there's an encoding problem with the file, the text is None. if text is None: # Avoid using msg, since for the only known case, it contains a # bogus message that claims the encoding the file declared was # unknown. reporter.unexpected_error(filename, 'problem decoding source') else: reporter.flake(PythonSyntaxError(filename, msg, lineno, offset, text, verbose=active_settings.get('verbose'))) return 1 except Exception: reporter.unexpected_error(filename, 'problem decoding source') return 1 # Okay, it's syntactically valid. Now check it. w = checker.Checker(tree, filename, None, ignore_lines=_noqa_lines(codeString), **active_settings) w.messages.sort(key=lambda m: m.lineno) for warning in w.messages: reporter.flake(warning) return len(w.messages)
def check(codeString, filename, reporter=modReporter.Default, settings_path=None, **setting_overrides)
Check the Python source given by codeString for unfrosted flakes.
3.460403
3.342196
1.035368
try: with open(filename, 'U') as f: codestr = f.read() + '\n' except UnicodeError: reporter.unexpected_error(filename, 'problem decoding source') return 1 except IOError: msg = sys.exc_info()[1] reporter.unexpected_error(filename, msg.args[1]) return 1 return check(codestr, filename, reporter, settings_path, **setting_overrides)
def check_path(filename, reporter=modReporter.Default, settings_path=None, **setting_overrides)
Check the given path, printing out any warnings detected.
3.024947
2.990818
1.011411
warnings = 0 for source_path in iter_source_code(paths): warnings += check_path(source_path, reporter, settings_path=None, **setting_overrides) return warnings
def check_recursive(paths, reporter=modReporter.Default, settings_path=None, **setting_overrides)
Recursively check all source files defined in paths.
4.547814
4.23453
1.073983
for path in paths: f = default_storage.open(path) f.name = os.path.basename(path) try: yield f except ClientError: logger.exception("File not found: %s", path)
def get_files_from_storage(paths)
Return S3 file where the name does not include the path.
3.910004
3.540226
1.104451
message = update.get_effective_message() bot.reply(update, message)
def echo(bot, update)
Echo the user message.
8.603044
7.628354
1.127772
logger.error('Update {} caused error {}'.format(update, error), extra={"tag": "err"})
def error(bot, update, error)
Log Errors caused by Updates.
6.51573
5.993363
1.087158