sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def _loadMore(self, start=0, trys=0, validation=True):
"""Retrieves the next page of results."""
self._log.debug("Loading page starting from %d" % start)
self._czero = start
self._pageoffs = 0
try:
pyxb.RequireValidWhenParsing(validation)
self._object_list = self._client.listObjects(
start=start,
count=self._pagesize,
fromDate=self._fromDate,
nodeId=self._nodeId,
)
except http.client.BadStatusLine as e:
self._log.warning("Server responded with Bad Status Line. Retrying in 5sec")
self._client.connection.close()
if trys > 3:
raise e
trys += 1
self._loadMore(start, trys)
except d1_common.types.exceptions.ServiceFailure as e:
self._log.error(e)
if trys > 3:
raise e
trys += 1
self._loadMore(start, trys, validation=False) | Retrieves the next page of results. | entailment |
def parse_mime_type(mime_type):
"""Carves up a mime-type and returns a tuple of the (type, subtype, params) where
'params' is a dictionary of all the parameters for the media range. For example, the
media range 'application/xhtml;q=0.5' would get parsed into:
('application', 'xhtml', {'q', '0.5'})
"""
parts = mime_type.split(";")
params = dict([tuple([s.strip() for s in param.split("=")]) for param in parts[1:]])
full_type = parts[0].strip()
# Java URLConnection class sends an Accept header that includes a single "*"
# Turn it into a legal wildcard.
if full_type == '*':
full_type = '*/*'
(type, subtype) = full_type.split("/")
return (type.strip(), subtype.strip(), params) | Carves up a mime-type and returns a tuple of the (type, subtype, params) where
'params' is a dictionary of all the parameters for the media range. For example, the
media range 'application/xhtml;q=0.5' would get parsed into:
('application', 'xhtml', {'q', '0.5'}) | entailment |
def best_match(supported, header):
"""Takes a list of supported mime-types and finds the best match for all the media-
ranges listed in header. The value of header must be a string that conforms to the
format of the HTTP Accept: header. The value of 'supported' is a list of mime-types.
>>> best_match(['application/xbel+xml', 'text/xml'], 'text/*;q=0.5,*/*; q=0.1')
'text/xml'
"""
parsed_header = [parse_media_range(r) for r in header.split(",")]
weighted_matches = [
(fitness_and_quality_parsed(mime_type, parsed_header), mime_type)
for mime_type in supported
]
weighted_matches.sort()
return weighted_matches[-1][0][1] and weighted_matches[-1][1] or '' | Takes a list of supported mime-types and finds the best match for all the media-
ranges listed in header. The value of header must be a string that conforms to the
format of the HTTP Accept: header. The value of 'supported' is a list of mime-types.
>>> best_match(['application/xbel+xml', 'text/xml'], 'text/*;q=0.5,*/*; q=0.1')
'text/xml' | entailment |
def _delete_chunked(queryset, chunk_size=500):
"""Chunked delete, which should be used if deleting many objects.
The reason why this method is needed is that deleting a lot of Data objects
requires Django to fetch all of them into memory (fast path is not used) and
this causes huge memory usage (and possibly OOM).
:param chunk_size: Optional chunk size
"""
while True:
# Discover primary key to limit the current chunk. This is required because delete
# cannot be called on a sliced queryset due to ordering requirement.
with transaction.atomic():
# Get offset of last item (needed because it may be less than the chunk size).
offset = queryset.order_by('pk')[:chunk_size].count()
if not offset:
break
# Fetch primary key of last item and use it to delete the chunk.
last_instance = queryset.order_by('pk')[offset - 1]
queryset.filter(pk__lte=last_instance.pk).delete() | Chunked delete, which should be used if deleting many objects.
The reason why this method is needed is that deleting a lot of Data objects
requires Django to fetch all of them into memory (fast path is not used) and
this causes huge memory usage (and possibly OOM).
:param chunk_size: Optional chunk size | entailment |
def duplicate(self, contributor=None):
"""Duplicate (make a copy) ``Data`` objects.
:param contributor: Duplication user
"""
bundle = [
{'original': data, 'copy': data.duplicate(contributor=contributor)}
for data in self
]
bundle = rewire_inputs(bundle)
duplicated = [item['copy'] for item in bundle]
return duplicated | Duplicate (make a copy) ``Data`` objects.
:param contributor: Duplication user | entailment |
def save_storage(self, instance, schema):
"""Save basic:json values to a Storage collection."""
for field_schema, fields in iterate_fields(instance, schema):
name = field_schema['name']
value = fields[name]
if field_schema.get('type', '').startswith('basic:json:'):
if value and not self.pk:
raise ValidationError(
'Data object must be `created` before creating `basic:json:` fields')
if isinstance(value, int):
# already in Storage
continue
if isinstance(value, str):
file_path = self.location.get_path(filename=value) # pylint: disable=no-member
if os.path.isfile(file_path):
try:
with open(file_path) as file_handler:
value = json.load(file_handler)
except json.JSONDecodeError:
with open(file_path) as file_handler:
content = file_handler.read()
content = content.rstrip()
raise ValidationError(
"Value of '{}' must be a valid JSON, current: {}".format(name, content)
)
storage = self.storages.create( # pylint: disable=no-member
name='Storage for data id {}'.format(self.pk),
contributor=self.contributor,
json=value,
)
# `value` is copied by value, so `fields[name]` must be changed
fields[name] = storage.pk | Save basic:json values to a Storage collection. | entailment |
def resolve_secrets(self):
"""Retrieve handles for all basic:secret: fields on input.
The process must have the ``secrets`` resource requirement
specified in order to access any secrets. Otherwise this method
will raise a ``PermissionDenied`` exception.
:return: A dictionary of secrets where key is the secret handle
and value is the secret value.
"""
secrets = {}
for field_schema, fields in iterate_fields(self.input, self.process.input_schema): # pylint: disable=no-member
if not field_schema.get('type', '').startswith('basic:secret:'):
continue
name = field_schema['name']
value = fields[name]
try:
handle = value['handle']
except KeyError:
continue
try:
secrets[handle] = Secret.objects.get_secret(
handle,
contributor=self.contributor
)
except Secret.DoesNotExist:
raise PermissionDenied("Access to secret not allowed or secret does not exist")
# If the process does not not have the right requirements it is not
# allowed to access any secrets.
allowed = self.process.requirements.get('resources', {}).get('secrets', False) # pylint: disable=no-member
if secrets and not allowed:
raise PermissionDenied(
"Process '{}' has secret inputs, but no permission to see secrets".format(
self.process.slug # pylint: disable=no-member
)
)
return secrets | Retrieve handles for all basic:secret: fields on input.
The process must have the ``secrets`` resource requirement
specified in order to access any secrets. Otherwise this method
will raise a ``PermissionDenied`` exception.
:return: A dictionary of secrets where key is the secret handle
and value is the secret value. | entailment |
def save_dependencies(self, instance, schema):
"""Save data: and list:data: references as parents."""
def add_dependency(value):
"""Add parent Data dependency."""
try:
DataDependency.objects.update_or_create(
parent=Data.objects.get(pk=value),
child=self,
defaults={'kind': DataDependency.KIND_IO},
)
except Data.DoesNotExist:
pass
for field_schema, fields in iterate_fields(instance, schema):
name = field_schema['name']
value = fields[name]
if field_schema.get('type', '').startswith('data:'):
add_dependency(value)
elif field_schema.get('type', '').startswith('list:data:'):
for data in value:
add_dependency(data) | Save data: and list:data: references as parents. | entailment |
def create_entity(self):
"""Create entity if `flow_collection` is defined in process.
Following rules applies for adding `Data` object to `Entity`:
* Only add `Data object` to `Entity` if process has defined
`flow_collection` field
* Add object to existing `Entity`, if all parents that are part
of it (but not necessary all parents), are part of the same
`Entity`
* If parents belong to different `Entities` or do not belong to
any `Entity`, create new `Entity`
"""
entity_type = self.process.entity_type # pylint: disable=no-member
entity_descriptor_schema = self.process.entity_descriptor_schema # pylint: disable=no-member
entity_input = self.process.entity_input # pylint: disable=no-member
if entity_type:
data_filter = {}
if entity_input:
input_id = dict_dot(self.input, entity_input, default=lambda: None)
if input_id is None:
logger.warning("Skipping creation of entity due to missing input.")
return
if isinstance(input_id, int):
data_filter['data__pk'] = input_id
elif isinstance(input_id, list):
data_filter['data__pk__in'] = input_id
else:
raise ValueError(
"Cannot create entity due to invalid value of field {}.".format(entity_input)
)
else:
data_filter['data__in'] = self.parents.all() # pylint: disable=no-member
entity_query = Entity.objects.filter(type=entity_type, **data_filter).distinct()
entity_count = entity_query.count()
if entity_count == 0:
descriptor_schema = DescriptorSchema.objects.filter(
slug=entity_descriptor_schema
).latest()
entity = Entity.objects.create(
contributor=self.contributor,
descriptor_schema=descriptor_schema,
type=entity_type,
name=self.name,
tags=self.tags,
)
assign_contributor_permissions(entity)
elif entity_count == 1:
entity = entity_query.first()
copy_permissions(entity, self)
else:
logger.info("Skipping creation of entity due to multiple entities found.")
entity = None
if entity:
entity.data.add(self)
# Inherit collections from entity.
for collection in entity.collections.all():
collection.data.add(self) | Create entity if `flow_collection` is defined in process.
Following rules applies for adding `Data` object to `Entity`:
* Only add `Data object` to `Entity` if process has defined
`flow_collection` field
* Add object to existing `Entity`, if all parents that are part
of it (but not necessary all parents), are part of the same
`Entity`
* If parents belong to different `Entities` or do not belong to
any `Entity`, create new `Entity` | entailment |
def save(self, render_name=False, *args, **kwargs): # pylint: disable=keyword-arg-before-vararg
"""Save the data model."""
if self.name != self._original_name:
self.named_by_user = True
create = self.pk is None
if create:
fill_with_defaults(self.input, self.process.input_schema) # pylint: disable=no-member
if not self.name:
self._render_name()
else:
self.named_by_user = True
self.checksum = get_data_checksum(
self.input, self.process.slug, self.process.version) # pylint: disable=no-member
elif render_name:
self._render_name()
self.save_storage(self.output, self.process.output_schema) # pylint: disable=no-member
if self.status != Data.STATUS_ERROR:
hydrate_size(self)
# If only specified fields are updated (e.g. in executor), size needs to be added
if 'update_fields' in kwargs:
kwargs['update_fields'].append('size')
# Input Data objects are validated only upon creation as they can be deleted later.
skip_missing_data = not create
validate_schema(
self.input, self.process.input_schema, skip_missing_data=skip_missing_data # pylint: disable=no-member
)
render_descriptor(self)
if self.descriptor_schema:
try:
validate_schema(self.descriptor, self.descriptor_schema.schema) # pylint: disable=no-member
self.descriptor_dirty = False
except DirtyError:
self.descriptor_dirty = True
elif self.descriptor and self.descriptor != {}:
raise ValueError("`descriptor_schema` must be defined if `descriptor` is given")
if self.status != Data.STATUS_ERROR:
output_schema = self.process.output_schema # pylint: disable=no-member
if self.status == Data.STATUS_DONE:
validate_schema(
self.output, output_schema, data_location=self.location, skip_missing_data=True
)
else:
validate_schema(
self.output, output_schema, data_location=self.location, test_required=False
)
with transaction.atomic():
self._perform_save(*args, **kwargs)
# We can only save dependencies after the data object has been saved. This
# is why a transaction block is needed and the save method must be called first.
if create:
self.save_dependencies(self.input, self.process.input_schema) # pylint: disable=no-member
self.create_entity() | Save the data model. | entailment |
def delete(self, *args, **kwargs):
"""Delete the data model."""
# Store ids in memory as relations are also deleted with the Data object.
storage_ids = list(self.storages.values_list('pk', flat=True)) # pylint: disable=no-member
super().delete(*args, **kwargs)
Storage.objects.filter(pk__in=storage_ids, data=None).delete() | Delete the data model. | entailment |
def duplicate(self, contributor=None):
"""Duplicate (make a copy)."""
if self.status not in [self.STATUS_DONE, self.STATUS_ERROR]:
raise ValidationError('Data object must have done or error status to be duplicated')
duplicate = Data.objects.get(id=self.id)
duplicate.pk = None
duplicate.slug = None
duplicate.name = 'Copy of {}'.format(self.name)
duplicate.duplicated = now()
if contributor:
duplicate.contributor = contributor
duplicate._perform_save(force_insert=True) # pylint: disable=protected-access
assign_contributor_permissions(duplicate)
# Override fields that are automatically set on create.
duplicate.created = self.created
duplicate._perform_save() # pylint: disable=protected-access
if self.location:
self.location.data.add(duplicate) # pylint: disable=no-member
duplicate.storages.set(self.storages.all()) # pylint: disable=no-member
for migration in self.migration_history.order_by('created'): # pylint: disable=no-member
migration.pk = None
migration.data = duplicate
migration.save(force_insert=True)
# Inherit existing child dependencies.
DataDependency.objects.bulk_create([
DataDependency(child=duplicate, parent=dependency.parent, kind=dependency.kind)
for dependency in DataDependency.objects.filter(child=self)
])
# Inherit existing parent dependencies.
DataDependency.objects.bulk_create([
DataDependency(child=dependency.child, parent=duplicate, kind=dependency.kind)
for dependency in DataDependency.objects.filter(parent=self)
])
return duplicate | Duplicate (make a copy). | entailment |
def _render_name(self):
"""Render data name.
The rendering is based on name template (`process.data_name`) and
input context.
"""
if not self.process.data_name or self.named_by_user: # pylint: disable=no-member
return
inputs = copy.deepcopy(self.input)
hydrate_input_references(inputs, self.process.input_schema, hydrate_values=False) # pylint: disable=no-member
template_context = inputs
try:
name = render_template(
self.process,
self.process.data_name, # pylint: disable=no-member
template_context
)
except EvaluationError:
name = '?'
self.name = name | Render data name.
The rendering is based on name template (`process.data_name`) and
input context. | entailment |
def get_path(self, prefix=None, filename=None):
"""Compose data location path."""
prefix = prefix or settings.FLOW_EXECUTOR['DATA_DIR']
path = os.path.join(prefix, self.subpath)
if filename:
path = os.path.join(path, filename)
return path | Compose data location path. | entailment |
def get_runtime_path(self, filename=None):
"""Compose data runtime location path."""
return self.get_path(prefix=settings.FLOW_EXECUTOR['RUNTIME_DIR'], filename=filename) | Compose data runtime location path. | entailment |
def get_data(self, entity):
"""Return serialized list of data objects on entity that user has `view` permission on."""
data = self._filter_queryset('view_data', entity.data.all())
return self._serialize_data(data) | Return serialized list of data objects on entity that user has `view` permission on. | entailment |
def validate(self):
"""Validate process descriptor."""
required_fields = ('slug', 'name', 'process_type', 'version')
for field in required_fields:
if getattr(self.metadata, field, None) is None:
raise ValidationError("process '{}' is missing required meta attribute: {}".format(
self.metadata.slug or '<unknown>', field))
if not PROCESSOR_TYPE_RE.match(self.metadata.process_type):
raise ValidationError("process '{}' has invalid type: {}".format(
self.metadata.slug, self.metadata.process_type)) | Validate process descriptor. | entailment |
def to_schema(self):
"""Return process schema for this process."""
process_type = self.metadata.process_type
if not process_type.endswith(':'):
process_type = '{}:'.format(process_type)
schema = {
'slug': self.metadata.slug,
'name': self.metadata.name,
'type': process_type,
'version': self.metadata.version,
'data_name': '',
'requirements': {
'executor': {
'docker': {
'image': 'resolwe/base:ubuntu-18.04',
},
},
},
}
if self.metadata.description is not None:
schema['description'] = self.metadata.description
if self.metadata.category is not None:
schema['category'] = self.metadata.category
if self.metadata.scheduling_class is not None:
schema['scheduling_class'] = self.metadata.scheduling_class
if self.metadata.persistence is not None:
schema['persistence'] = self.metadata.persistence
if self.metadata.requirements is not None:
schema['requirements'] = self.metadata.requirements
if self.metadata.data_name is not None:
schema['data_name'] = self.metadata.data_name
if self.metadata.entity is not None:
schema['entity'] = self.metadata.entity
if self.inputs:
schema['input'] = []
for field in self.inputs.values():
schema['input'].append(field.to_schema())
if self.outputs:
schema['output'] = []
for field in self.outputs.values():
schema['output'].append(field.to_schema())
schema['run'] = {
'language': 'python',
'program': self.source or '',
}
return schema | Return process schema for this process. | entailment |
def post_register_hook(self, verbosity=1):
"""Pull Docker images needed by processes after registering."""
if not getattr(settings, 'FLOW_DOCKER_DONT_PULL', False):
call_command('list_docker_images', pull=True, verbosity=verbosity) | Pull Docker images needed by processes after registering. | entailment |
def resolve_data_path(self, data=None, filename=None):
"""Resolve data path for use with the executor.
:param data: Data object instance
:param filename: Filename to resolve
:return: Resolved filename, which can be used to access the
given data file in programs executed using this executor
"""
if data is None:
return constants.DATA_ALL_VOLUME
# Prefix MUST be set because ``get_path`` uses Django's settings,
# if prefix is not set, to get path prefix. But the executor
# shouldn't use Django's settings directly, so prefix is set
# via a constant.
return data.location.get_path(prefix=constants.DATA_ALL_VOLUME, filename=filename) | Resolve data path for use with the executor.
:param data: Data object instance
:param filename: Filename to resolve
:return: Resolved filename, which can be used to access the
given data file in programs executed using this executor | entailment |
def resolve_upload_path(self, filename=None):
"""Resolve upload path for use with the executor.
:param filename: Filename to resolve
:return: Resolved filename, which can be used to access the
given uploaded file in programs executed using this
executor
"""
if filename is None:
return constants.UPLOAD_VOLUME
return os.path.join(constants.UPLOAD_VOLUME, filename) | Resolve upload path for use with the executor.
:param filename: Filename to resolve
:return: Resolved filename, which can be used to access the
given uploaded file in programs executed using this
executor | entailment |
def are_equal(a_dt, b_dt, round_sec=1):
"""Determine if two datetimes are equal with fuzz factor.
A naive datetime (no timezone information) is assumed to be in in UTC.
Args:
a_dt: datetime
Timestamp to compare.
b_dt: datetime
Timestamp to compare.
round_sec: int or float
Round the timestamps to the closest second divisible by this value before
comparing them.
E.g.:
- ``n_round_sec`` = 0.1: nearest 10th of a second.
- ``n_round_sec`` = 1: nearest second.
- ``n_round_sec`` = 30: nearest half minute.
Timestamps may lose resolution or otherwise change slightly as they go through
various transformations and storage systems. This again may cause timestamps
that
have been processed in different systems to fail an exact equality compare even
if
they were initially the same timestamp. This rounding avoids such problems as
long
as the error introduced to the original timestamp is not higher than the
rounding
value. Of course, the rounding also causes a loss in resolution in the values
compared, so should be kept as low as possible. The default value of 1 second
should
be a good tradeoff in most cases.
Returns:
bool
- **True**: If the two datetimes are equal after being rounded by
``round_sec``.
"""
ra_dt = round_to_nearest(a_dt, round_sec)
rb_dt = round_to_nearest(b_dt, round_sec)
logger.debug('Rounded:')
logger.debug('{} -> {}'.format(a_dt, ra_dt))
logger.debug('{} -> {}'.format(b_dt, rb_dt))
return normalize_datetime_to_utc(ra_dt) == normalize_datetime_to_utc(rb_dt) | Determine if two datetimes are equal with fuzz factor.
A naive datetime (no timezone information) is assumed to be in in UTC.
Args:
a_dt: datetime
Timestamp to compare.
b_dt: datetime
Timestamp to compare.
round_sec: int or float
Round the timestamps to the closest second divisible by this value before
comparing them.
E.g.:
- ``n_round_sec`` = 0.1: nearest 10th of a second.
- ``n_round_sec`` = 1: nearest second.
- ``n_round_sec`` = 30: nearest half minute.
Timestamps may lose resolution or otherwise change slightly as they go through
various transformations and storage systems. This again may cause timestamps
that
have been processed in different systems to fail an exact equality compare even
if
they were initially the same timestamp. This rounding avoids such problems as
long
as the error introduced to the original timestamp is not higher than the
rounding
value. Of course, the rounding also causes a loss in resolution in the values
compared, so should be kept as low as possible. The default value of 1 second
should
be a good tradeoff in most cases.
Returns:
bool
- **True**: If the two datetimes are equal after being rounded by
``round_sec``. | entailment |
def http_datetime_str_from_dt(dt):
"""Format datetime to HTTP Full Date format.
Args:
dt : datetime
- tz-aware: Used in the formatted string.
- tz-naive: Assumed to be in UTC.
Returns:
str
The returned format is a is fixed-length subset of that defined by RFC 1123 and
is
the preferred format for use in the HTTP Date header. E.g.:
``Sat, 02 Jan 1999 03:04:05 GMT``
See Also:
- http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3.1
"""
epoch_seconds = ts_from_dt(dt)
return email.utils.formatdate(epoch_seconds, localtime=False, usegmt=True) | Format datetime to HTTP Full Date format.
Args:
dt : datetime
- tz-aware: Used in the formatted string.
- tz-naive: Assumed to be in UTC.
Returns:
str
The returned format is a is fixed-length subset of that defined by RFC 1123 and
is
the preferred format for use in the HTTP Date header. E.g.:
``Sat, 02 Jan 1999 03:04:05 GMT``
See Also:
- http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3.1 | entailment |
def dt_from_http_datetime_str(http_full_datetime):
"""Parse HTTP Full Date formats and return as datetime.
Args:
http_full_datetime : str
Each of the allowed formats are supported:
- Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
- Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
- Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
HTTP Full Dates are always in UTC.
Returns:
datetime
The returned datetime is always timezone aware and in UTC.
See Also:
http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3.1
"""
date_parts = list(email.utils.parsedate(http_full_datetime)[:6])
year = date_parts[0]
if year <= 99:
year = year + 2000 if year < 50 else year + 1900
return create_utc_datetime(year, *date_parts[1:]) | Parse HTTP Full Date formats and return as datetime.
Args:
http_full_datetime : str
Each of the allowed formats are supported:
- Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
- Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
- Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
HTTP Full Dates are always in UTC.
Returns:
datetime
The returned datetime is always timezone aware and in UTC.
See Also:
http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3.1 | entailment |
def normalize_datetime_to_utc(dt):
"""Adjust datetime to UTC.
Apply the timezone offset to the datetime and set the timezone to UTC.
This is a no-op if the datetime is already in UTC.
Args:
dt : datetime
- tz-aware: Used in the formatted string.
- tz-naive: Assumed to be in UTC.
Returns:
datetime
The returned datetime is always timezone aware and in UTC.
Notes:
This forces a new object to be returned, which fixes an issue with
serialization to XML in PyXB. PyXB uses a mixin together with
datetime to handle the XML xs:dateTime. That type keeps track of
timezone information included in the original XML doc, which conflicts if we
return it here as part of a datetime mixin.
See Also:
``cast_naive_datetime_to_tz()``
"""
return datetime.datetime(
*dt.utctimetuple()[:6], microsecond=dt.microsecond, tzinfo=datetime.timezone.utc
) | Adjust datetime to UTC.
Apply the timezone offset to the datetime and set the timezone to UTC.
This is a no-op if the datetime is already in UTC.
Args:
dt : datetime
- tz-aware: Used in the formatted string.
- tz-naive: Assumed to be in UTC.
Returns:
datetime
The returned datetime is always timezone aware and in UTC.
Notes:
This forces a new object to be returned, which fixes an issue with
serialization to XML in PyXB. PyXB uses a mixin together with
datetime to handle the XML xs:dateTime. That type keeps track of
timezone information included in the original XML doc, which conflicts if we
return it here as part of a datetime mixin.
See Also:
``cast_naive_datetime_to_tz()`` | entailment |
def cast_naive_datetime_to_tz(dt, tz=UTC()):
"""If datetime is tz-naive, set it to ``tz``. If datetime is tz-aware, return it
unmodified.
Args:
dt : datetime
tz-naive or tz-aware datetime.
tz : datetime.tzinfo
The timezone to which to adjust tz-naive datetime.
Returns:
datetime
tz-aware datetime.
Warning:
This will change the actual moment in time that is represented if the datetime is
naive and represents a date and time not in ``tz``.
See Also:
``normalize_datetime_to_utc()``
"""
if has_tz(dt):
return dt
return dt.replace(tzinfo=tz) | If datetime is tz-naive, set it to ``tz``. If datetime is tz-aware, return it
unmodified.
Args:
dt : datetime
tz-naive or tz-aware datetime.
tz : datetime.tzinfo
The timezone to which to adjust tz-naive datetime.
Returns:
datetime
tz-aware datetime.
Warning:
This will change the actual moment in time that is represented if the datetime is
naive and represents a date and time not in ``tz``.
See Also:
``normalize_datetime_to_utc()`` | entailment |
def round_to_nearest(dt, n_round_sec=1.0):
"""Round datetime up or down to nearest divisor.
Round datetime up or down to nearest number of seconds that divides evenly by
the divisor.
Any timezone is preserved but ignored in the rounding.
Args:
dt: datetime
n_round_sec : int or float
Divisor for rounding
Examples:
- ``n_round_sec`` = 0.1: nearest 10th of a second.
- ``n_round_sec`` = 1: nearest second.
- ``n_round_sec`` = 30: nearest half minute.
"""
ts = ts_from_dt(strip_timezone(dt)) + n_round_sec / 2.0
res = dt_from_ts(ts - (ts % n_round_sec))
return res.replace(tzinfo=dt.tzinfo) | Round datetime up or down to nearest divisor.
Round datetime up or down to nearest number of seconds that divides evenly by
the divisor.
Any timezone is preserved but ignored in the rounding.
Args:
dt: datetime
n_round_sec : int or float
Divisor for rounding
Examples:
- ``n_round_sec`` = 0.1: nearest 10th of a second.
- ``n_round_sec`` = 1: nearest second.
- ``n_round_sec`` = 30: nearest half minute. | entailment |
def submit(self, data, runtime_dir, argv):
"""Run process with SLURM.
For details, see
:meth:`~resolwe.flow.managers.workload_connectors.base.BaseConnector.submit`.
"""
limits = data.process.get_resource_limits()
logger.debug(__(
"Connector '{}' running for Data with id {} ({}).",
self.__class__.__module__,
data.id,
repr(argv)
))
# Compute target partition.
partition = getattr(settings, 'FLOW_SLURM_PARTITION_DEFAULT', None)
if data.process.slug in getattr(settings, 'FLOW_SLURM_PARTITION_OVERRIDES', {}):
partition = settings.FLOW_SLURM_PARTITION_OVERRIDES[data.process.slug]
try:
# Make sure the resulting file is executable on creation.
script_path = os.path.join(runtime_dir, 'slurm.sh')
file_descriptor = os.open(script_path, os.O_WRONLY | os.O_CREAT, mode=0o555)
with os.fdopen(file_descriptor, 'wt') as script:
script.write('#!/bin/bash\n')
script.write('#SBATCH --mem={}M\n'.format(limits['memory'] + EXECUTOR_MEMORY_OVERHEAD))
script.write('#SBATCH --cpus-per-task={}\n'.format(limits['cores']))
if partition:
script.write('#SBATCH --partition={}\n'.format(partition))
# Render the argument vector into a command line.
line = ' '.join(map(shlex.quote, argv))
script.write(line + '\n')
command = ['/usr/bin/env', 'sbatch', script_path]
subprocess.Popen(
command,
cwd=runtime_dir,
stdin=subprocess.DEVNULL
).wait()
except OSError as err:
logger.error(__(
"OSError occurred while preparing SLURM script for Data {}: {}",
data.id, err
)) | Run process with SLURM.
For details, see
:meth:`~resolwe.flow.managers.workload_connectors.base.BaseConnector.submit`. | entailment |
def get_purge_files(root, output, output_schema, descriptor, descriptor_schema):
"""Get files to purge."""
def remove_file(fn, paths):
"""From paths remove fn and dirs before fn in dir tree."""
while fn:
for i in range(len(paths) - 1, -1, -1):
if fn == paths[i]:
paths.pop(i)
fn, _ = os.path.split(fn)
def remove_tree(fn, paths):
"""From paths remove fn and dirs before or after fn in dir tree."""
for i in range(len(paths) - 1, -1, -1):
head = paths[i]
while head:
if fn == head:
paths.pop(i)
break
head, _ = os.path.split(head)
remove_file(fn, paths)
def subfiles(root):
"""Extend unreferenced list with all subdirs and files in top dir."""
subs = []
for path, dirs, files in os.walk(root, topdown=False):
path = path[len(root) + 1:]
subs.extend(os.path.join(path, f) for f in files)
subs.extend(os.path.join(path, d) for d in dirs)
return subs
unreferenced_files = subfiles(root)
remove_file('jsonout.txt', unreferenced_files)
remove_file('stderr.txt', unreferenced_files)
remove_file('stdout.txt', unreferenced_files)
meta_fields = [
[output, output_schema],
[descriptor, descriptor_schema]
]
for meta_field, meta_field_schema in meta_fields:
for field_schema, fields in iterate_fields(meta_field, meta_field_schema):
if 'type' in field_schema:
field_type = field_schema['type']
field_name = field_schema['name']
# Remove basic:file: entries
if field_type.startswith('basic:file:'):
remove_file(fields[field_name]['file'], unreferenced_files)
# Remove list:basic:file: entries
elif field_type.startswith('list:basic:file:'):
for field in fields[field_name]:
remove_file(field['file'], unreferenced_files)
# Remove basic:dir: entries
elif field_type.startswith('basic:dir:'):
remove_tree(fields[field_name]['dir'], unreferenced_files)
# Remove list:basic:dir: entries
elif field_type.startswith('list:basic:dir:'):
for field in fields[field_name]:
remove_tree(field['dir'], unreferenced_files)
# Remove refs entries
if field_type.startswith('basic:file:') or field_type.startswith('basic:dir:'):
for ref in fields[field_name].get('refs', []):
remove_tree(ref, unreferenced_files)
elif field_type.startswith('list:basic:file:') or field_type.startswith('list:basic:dir:'):
for field in fields[field_name]:
for ref in field.get('refs', []):
remove_tree(ref, unreferenced_files)
return set([os.path.join(root, filename) for filename in unreferenced_files]) | Get files to purge. | entailment |
def location_purge(location_id, delete=False, verbosity=0):
"""Print and conditionally delete files not referenced by meta data.
:param location_id: Id of the
:class:`~resolwe.flow.models.DataLocation` model that data
objects reference to.
:param delete: If ``True``, then delete unreferenced files.
"""
try:
location = DataLocation.objects.get(id=location_id)
except DataLocation.DoesNotExist:
logger.warning("Data location does not exist", extra={'location_id': location_id})
return
unreferenced_files = set()
purged_data = Data.objects.none()
referenced_by_data = location.data.exists()
if referenced_by_data:
if location.data.exclude(status__in=[Data.STATUS_DONE, Data.STATUS_ERROR]).exists():
return
# Perform cleanup.
purge_files_sets = list()
purged_data = location.data.all()
for data in purged_data:
purge_files_sets.append(get_purge_files(
location.get_path(),
data.output,
data.process.output_schema,
data.descriptor,
getattr(data.descriptor_schema, 'schema', [])
))
intersected_files = set.intersection(*purge_files_sets) if purge_files_sets else set()
unreferenced_files.update(intersected_files)
else:
# Remove data directory.
unreferenced_files.add(location.get_path())
unreferenced_files.add(location.get_runtime_path())
if verbosity >= 1:
# Print unreferenced files
if unreferenced_files:
logger.info(__("Unreferenced files for location id {} ({}):", location_id, len(unreferenced_files)))
for name in unreferenced_files:
logger.info(__(" {}", name))
else:
logger.info(__("No unreferenced files for location id {}", location_id))
# Go through unreferenced files and delete them.
if delete:
for name in unreferenced_files:
if os.path.isfile(name) or os.path.islink(name):
os.remove(name)
elif os.path.isdir(name):
shutil.rmtree(name)
location.purged = True
location.save()
if not referenced_by_data:
location.delete() | Print and conditionally delete files not referenced by meta data.
:param location_id: Id of the
:class:`~resolwe.flow.models.DataLocation` model that data
objects reference to.
:param delete: If ``True``, then delete unreferenced files. | entailment |
def _location_purge_all(delete=False, verbosity=0):
"""Purge all data locations."""
if DataLocation.objects.exists():
for location in DataLocation.objects.filter(Q(purged=False) | Q(data=None)):
location_purge(location.id, delete, verbosity)
else:
logger.info("No data locations") | Purge all data locations. | entailment |
def _storage_purge_all(delete=False, verbosity=0):
"""Purge unreferenced storages."""
orphaned_storages = Storage.objects.filter(data=None)
if verbosity >= 1:
if orphaned_storages.exists():
logger.info(__("Unreferenced storages ({}):", orphaned_storages.count()))
for storage_id in orphaned_storages.values_list('id', flat=True):
logger.info(__(" {}", storage_id))
else:
logger.info("No unreferenced storages")
if delete:
orphaned_storages.delete() | Purge unreferenced storages. | entailment |
def purge_all(delete=False, verbosity=0):
"""Purge all data locations."""
_location_purge_all(delete, verbosity)
_storage_purge_all(delete, verbosity) | Purge all data locations. | entailment |
def get_value(self, node):
"""Convert value from an AST node."""
if not isinstance(node, ast.Str):
raise TypeError("must be a string literal")
return node.s | Convert value from an AST node. | entailment |
def get_value(self, node):
"""Convert value from an AST node."""
if not isinstance(node, ast.Attribute):
raise TypeError("must be an attribute")
if node.value.id != self.choices.__name__:
raise TypeError("must be an attribute of {}".format(self.choices.__name__))
return getattr(self.choices, node.attr) | Convert value from an AST node. | entailment |
def get_value(self, node):
"""Convert value from an AST node."""
if not isinstance(node, ast.Dict):
raise TypeError("must be a dictionary")
evaluator = SafeEvaluator()
try:
value = evaluator.run(node)
except Exception as ex:
# TODO: Handle errors.
raise ex
try:
# Ensure value is a serializable dictionary.
value = json.loads(json.dumps(value))
if not isinstance(value, dict):
raise TypeError
except (TypeError, ValueError):
raise TypeError("must be serializable")
return value | Convert value from an AST node. | entailment |
def visit_field_class(self, item, descriptor=None, fields=None):
"""Visit a class node containing a list of field definitions."""
discovered_fields = collections.OrderedDict()
field_groups = {}
for node in item.body:
if isinstance(node, ast.ClassDef):
field_groups[node.name] = self.visit_field_class(node)
continue
if not isinstance(node, ast.Assign):
continue
if not isinstance(node.value, ast.Call):
continue
if not isinstance(node.targets[0], ast.Name):
continue
# Build accessible symbols table.
symtable = {}
# All field types.
symtable.update({
field.__name__: field
for field in get_available_fields()
})
# Field group classes.
symtable.update(field_groups)
evaluator = SafeEvaluator(symtable=symtable)
name = node.targets[0].id
try:
field = evaluator.run(node.value)
except Exception as ex:
# TODO: Handle errors.
raise ex
if descriptor is not None:
field.contribute_to_class(descriptor, fields, name)
else:
discovered_fields[name] = field
if descriptor is None:
class Fields:
"""Fields wrapper."""
for name, field in discovered_fields.items():
setattr(Fields, name, field)
return Fields | Visit a class node containing a list of field definitions. | entailment |
def visit_ClassDef(self, node): # pylint: disable=invalid-name
"""Visit top-level classes."""
# Resolve everything as root scope contains everything from the process module.
for base in node.bases:
# Cover `from resolwe.process import ...`.
if isinstance(base, ast.Name) and isinstance(base.ctx, ast.Load):
base = getattr(runtime, base.id, None)
# Cover `from resolwe import process`.
elif isinstance(base, ast.Attribute) and isinstance(base.ctx, ast.Load):
base = getattr(runtime, base.attr, None)
else:
continue
if issubclass(base, runtime.Process):
break
else:
return
descriptor = ProcessDescriptor(source=self.source)
# Available embedded classes.
embedded_class_fields = {
runtime.PROCESS_INPUTS_NAME: descriptor.inputs,
runtime.PROCESS_OUTPUTS_NAME: descriptor.outputs,
}
# Parse metadata in class body.
for item in node.body:
if isinstance(item, ast.Assign):
# Possible metadata.
if (len(item.targets) == 1 and isinstance(item.targets[0], ast.Name)
and isinstance(item.targets[0].ctx, ast.Store)
and item.targets[0].id in PROCESS_METADATA):
# Try to get the metadata value.
value = PROCESS_METADATA[item.targets[0].id].get_value(item.value)
setattr(descriptor.metadata, item.targets[0].id, value)
elif (isinstance(item, ast.Expr) and isinstance(item.value, ast.Str)
and descriptor.metadata.description is None):
# Possible description string.
descriptor.metadata.description = item.value.s
elif isinstance(item, ast.ClassDef) and item.name in embedded_class_fields.keys():
# Possible input/output declaration.
self.visit_field_class(item, descriptor, embedded_class_fields[item.name])
descriptor.validate()
self.processes.append(descriptor) | Visit top-level classes. | entailment |
def parse(self):
"""Parse process.
:return: A list of discovered process descriptors
"""
root = ast.parse(self._source)
visitor = ProcessVisitor(source=self._source)
visitor.visit(root)
return visitor.processes | Parse process.
:return: A list of discovered process descriptors | entailment |
def get_permissions_class(permissions_name=None):
"""Load and cache permissions class.
If ``permissions_name`` is not given, it defaults to permissions
class set in Django ``FLOW_API['PERMISSIONS']`` setting.
"""
def load_permissions(permissions_name):
"""Look for a fully qualified flow permissions class."""
try:
return import_module('{}'.format(permissions_name)).ResolwePermissions
except AttributeError:
raise AttributeError("'ResolwePermissions' class not found in {} module.".format(
permissions_name))
except ImportError as ex:
# The permissions module wasn't found. Display a helpful error
# message listing all possible (built-in) permissions classes.
permissions_dir = os.path.join(os.path.dirname(upath(__file__)), '..', 'perms')
permissions_dir = os.path.normpath(permissions_dir)
try:
builtin_permissions = [
name for _, name, _ in pkgutil.iter_modules([permissions_dir]) if name not in ['tests']]
except EnvironmentError:
builtin_permissions = []
if permissions_name not in ['resolwe.auth.{}'.format(p) for p in builtin_permissions]:
permissions_reprs = map(repr, sorted(builtin_permissions))
err_msg = ("{} isn't an available flow permissions class.\n"
"Try using 'resolwe.auth.XXX', where XXX is one of:\n"
" {}\n"
"Error was: {}".format(permissions_name, ", ".join(permissions_reprs), ex))
raise ImproperlyConfigured(err_msg)
else:
# If there's some other error, this must be an error in Django
raise
if permissions_name is None:
permissions_name = settings.FLOW_API['PERMISSIONS']
if permissions_name not in permissions_classes:
permissions_classes[permissions_name] = load_permissions(permissions_name)
return permissions_classes[permissions_name] | Load and cache permissions class.
If ``permissions_name`` is not given, it defaults to permissions
class set in Django ``FLOW_API['PERMISSIONS']`` setting. | entailment |
def save(self, *args, **kwargs):
"""Save the model."""
name_max_len = self._meta.get_field('name').max_length
if len(self.name) > name_max_len:
self.name = self.name[:(name_max_len - 3)] + '...'
for _ in range(MAX_SLUG_RETRIES):
try:
# Attempt to save the model. It may fail due to slug conflict.
with transaction.atomic():
super().save(*args, **kwargs)
break
except IntegrityError as error:
# Retry in case of slug conflicts.
if '{}_slug'.format(self._meta.db_table) in error.args[0]:
self.slug = None
continue
raise
else:
raise IntegrityError("Maximum number of retries exceeded during slug generation") | Save the model. | entailment |
def get_identifiers(sysmeta_pyxb):
"""Get set of identifiers that provide revision context for SciObj.
Returns: tuple: PID, SID, OBSOLETES_PID, OBSOLETED_BY_PID
"""
pid = d1_common.xml.get_opt_val(sysmeta_pyxb, 'identifier')
sid = d1_common.xml.get_opt_val(sysmeta_pyxb, 'seriesId')
obsoletes_pid = d1_common.xml.get_opt_val(sysmeta_pyxb, 'obsoletes')
obsoleted_by_pid = d1_common.xml.get_opt_val(sysmeta_pyxb, 'obsoletedBy')
return pid, sid, obsoletes_pid, obsoleted_by_pid | Get set of identifiers that provide revision context for SciObj.
Returns: tuple: PID, SID, OBSOLETES_PID, OBSOLETED_BY_PID | entailment |
def topological_sort(unsorted_dict):
"""Sort objects by dependency.
Sort a dict of obsoleting PID to obsoleted PID to a list of PIDs in order of
obsolescence.
Args:
unsorted_dict : dict
Dict that holds obsolescence information. Each ``key/value`` pair establishes
that the PID in ``key`` identifies an object that obsoletes an object identifies
by the PID in ``value``.
Returns:
tuple of sorted_list, unconnected_dict :
``sorted_list``: A list of PIDs ordered so that all PIDs that obsolete an object
are listed after the object they obsolete.
``unconnected_dict``: A dict of PID to obsoleted PID of any objects that could not
be added to a revision chain. These items will have obsoletes PIDs that directly
or indirectly reference a PID that could not be sorted.
Notes:
``obsoletes_dict`` is modified by the sort and on return holds any items that
could not be sorted.
The sort works by repeatedly iterating over an unsorted list of PIDs and
moving PIDs to the sorted list as they become available. A PID is available to
be moved to the sorted list if it does not obsolete a PID or if the PID it
obsoletes is already in the sorted list.
"""
sorted_list = []
sorted_set = set()
found = True
unconnected_dict = unsorted_dict.copy()
while found:
found = False
for pid, obsoletes_pid in list(unconnected_dict.items()):
if obsoletes_pid is None or obsoletes_pid in sorted_set:
found = True
sorted_list.append(pid)
sorted_set.add(pid)
del unconnected_dict[pid]
return sorted_list, unconnected_dict | Sort objects by dependency.
Sort a dict of obsoleting PID to obsoleted PID to a list of PIDs in order of
obsolescence.
Args:
unsorted_dict : dict
Dict that holds obsolescence information. Each ``key/value`` pair establishes
that the PID in ``key`` identifies an object that obsoletes an object identifies
by the PID in ``value``.
Returns:
tuple of sorted_list, unconnected_dict :
``sorted_list``: A list of PIDs ordered so that all PIDs that obsolete an object
are listed after the object they obsolete.
``unconnected_dict``: A dict of PID to obsoleted PID of any objects that could not
be added to a revision chain. These items will have obsoletes PIDs that directly
or indirectly reference a PID that could not be sorted.
Notes:
``obsoletes_dict`` is modified by the sort and on return holds any items that
could not be sorted.
The sort works by repeatedly iterating over an unsorted list of PIDs and
moving PIDs to the sorted list as they become available. A PID is available to
be moved to the sorted list if it does not obsolete a PID or if the PID it
obsoletes is already in the sorted list. | entailment |
def get_pids_in_revision_chain(client, did):
"""Args: client: d1_client.cnclient.CoordinatingNodeClient or
d1_client.mnclient.MemberNodeClient.
did : str
SID or a PID of any object in a revision chain.
Returns:
list of str:
All PIDs in the chain. The returned list is in the same order as the chain. The
initial PID is typically obtained by resolving a SID. If the given PID is not in
a chain, a list containing the single object is returned.
"""
def _req(p):
return d1_common.xml.get_req_val(p)
def _opt(p, a):
return d1_common.xml.get_opt_val(p, a)
sysmeta_pyxb = client.getSystemMetadata(did)
# Walk to tail
while _opt(sysmeta_pyxb, 'obsoletes'):
sysmeta_pyxb = client.getSystemMetadata(_opt(sysmeta_pyxb, 'obsoletes'))
chain_pid_list = [_req(sysmeta_pyxb.identifier)]
# Walk from tail to head, recording traversed PIDs
while _opt(sysmeta_pyxb, 'obsoletedBy'):
sysmeta_pyxb = client.getSystemMetadata(_opt(sysmeta_pyxb, 'obsoletedBy'))
chain_pid_list.append(_req(sysmeta_pyxb.identifier))
return chain_pid_list | Args: client: d1_client.cnclient.CoordinatingNodeClient or
d1_client.mnclient.MemberNodeClient.
did : str
SID or a PID of any object in a revision chain.
Returns:
list of str:
All PIDs in the chain. The returned list is in the same order as the chain. The
initial PID is typically obtained by resolving a SID. If the given PID is not in
a chain, a list containing the single object is returned. | entailment |
def main():
"""Remove unused imports Unsafe!
Only tested on our codebase, which uses simple absolute imports on the form, "import
a.b.c".
"""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("path", nargs="+", help="File or directory path")
parser.add_argument("--exclude", nargs="+", help="Exclude glob patterns")
parser.add_argument(
"--no-recursive",
dest="recursive",
action="store_false",
help="Search directories recursively",
)
parser.add_argument(
"--ignore-invalid", action="store_true", help="Ignore invalid paths"
)
parser.add_argument(
"--pycharm", action="store_true", help="Enable PyCharm integration"
)
parser.add_argument(
"--diff",
dest="show_diff",
action="store_true",
help="Show diff and do not modify any files",
)
parser.add_argument(
"--dry-run", action="store_true", help="Process files but do not write results"
)
parser.add_argument("--debug", action="store_true", help="Debug level logging")
args = parser.parse_args()
d1_common.util.log_setup(args.debug)
repo_path = d1_dev.util.find_repo_root_by_path(__file__)
repo = git.Repo(repo_path)
specified_file_path_list = get_specified_file_path_list(args)
tracked_path_list = list(d1_dev.util.get_tracked_files(repo))
format_path_list = sorted(
set(specified_file_path_list).intersection(tracked_path_list)
)
progress_logger.start_task_type("Format modules", len(format_path_list))
for i, format_path in enumerate(format_path_list):
progress_logger.start_task("Format modules", i)
format_all_docstr(args, format_path)
progress_logger.end_task_type("Format modules") | Remove unused imports Unsafe!
Only tested on our codebase, which uses simple absolute imports on the form, "import
a.b.c". | entailment |
def get_docstr_list(red):
"""Find all triple-quoted docstrings in module."""
docstr_list = []
for n in red.find_all("string"):
if n.value.startswith('"""'):
docstr_list.append(n)
return docstr_list | Find all triple-quoted docstrings in module. | entailment |
def unwrap(s, node_indent):
"""Group lines of a docstring to blocks.
For now, only groups markdown list sections.
A block designates a list of consequtive lines that all start at the same
indentation level.
The lines of the docstring are iterated top to bottom. Each line is added to
`block_list` until a line is encountered that breaks sufficiently with the previous
line to be deemed to be the start of a new block. At that point, all lines
currently
in `block_list` are stripped and joined to a single line, which is added to
`unwrap_list`.
Some of the block breaks are easy to determine. E.g., a line that starts with "- "
is the start of a new markdown style list item, so is always the start of a new
block. But then there are things like this, which is a single block:
- An example list with a second line
And this, which is 3 single line blocks (due to the different indentation levels):
Args:
jwt_bu64: bytes
JWT, encoded using a a URL safe flavor of Base64.
"""
def get_indent():
if line_str.startswith('"""'):
return node_indent
return len(re.match(r"^( *)", line_str).group(1))
def finish_block():
if block_list:
unwrap_list.append(
(block_indent, (" ".join([v.strip() for v in block_list])).strip())
)
block_list.clear()
unwrap_list = []
block_indent = None
block_list = []
for line_str in s.splitlines():
line_str = line_str.rstrip()
line_indent = get_indent()
# A new block has been started. Record the indent of the first line in that
# block to use as the indent for all the lines that will be put in this block.
if not block_list:
block_indent = line_indent
# A blank line always starts a new block.
if line_str == "":
finish_block()
# Indent any lines that are less indentend than the docstr node
# if line_indent < node_indent:
# line_indent = block_indent
# A line that is indented less than the current block starts a new block.
if line_indent < block_indent:
finish_block()
# A line that is the start of a markdown list starts a new block.
elif line_str.strip().startswith(("- ", "* ")):
finish_block()
# A markdown title always starts a new block.
elif line_str.strip().endswith(":"):
finish_block()
block_list.append(line_str)
# Only make blocks for markdown list items. Write everything else as single line items.
if not block_list[0].strip().startswith(("- ", "* ")):
finish_block()
# Finish the block that was in progress when the end of the docstring was reached.
finish_block()
return unwrap_list | Group lines of a docstring to blocks.
For now, only groups markdown list sections.
A block designates a list of consequtive lines that all start at the same
indentation level.
The lines of the docstring are iterated top to bottom. Each line is added to
`block_list` until a line is encountered that breaks sufficiently with the previous
line to be deemed to be the start of a new block. At that point, all lines
currently
in `block_list` are stripped and joined to a single line, which is added to
`unwrap_list`.
Some of the block breaks are easy to determine. E.g., a line that starts with "- "
is the start of a new markdown style list item, so is always the start of a new
block. But then there are things like this, which is a single block:
- An example list with a second line
And this, which is 3 single line blocks (due to the different indentation levels):
Args:
jwt_bu64: bytes
JWT, encoded using a a URL safe flavor of Base64. | entailment |
def wrap(indent_int, unwrap_str):
"""Wrap a single line to one or more lines that start at indent_int and end at the
last word that will fit before WRAP_MARGIN_INT.
If there are no word breaks (spaces) before WRAP_MARGIN_INT, force a break at
WRAP_MARGIN_INT.
"""
with io.StringIO() as str_buf:
is_rest_block = unwrap_str.startswith(("- ", "* "))
while unwrap_str:
cut_pos = (unwrap_str + " ").rfind(" ", 0, WRAP_MARGIN_INT - indent_int)
if cut_pos == -1:
cut_pos = WRAP_MARGIN_INT
this_str, unwrap_str = unwrap_str[:cut_pos], unwrap_str[cut_pos + 1 :]
str_buf.write("{}{}\n".format(" " * indent_int, this_str))
if is_rest_block:
is_rest_block = False
indent_int += 2
return str_buf.getvalue() | Wrap a single line to one or more lines that start at indent_int and end at the
last word that will fit before WRAP_MARGIN_INT.
If there are no word breaks (spaces) before WRAP_MARGIN_INT, force a break at
WRAP_MARGIN_INT. | entailment |
def createSimpleResourceMap(ore_pid, scimeta_pid, sciobj_pid_list):
"""Create a simple OAI-ORE Resource Map with one Science Metadata document and any
number of Science Data objects.
This creates a document that establishes an association between a Science Metadata
object and any number of Science Data objects. The Science Metadata object contains
information that is indexed by DataONE, allowing both the Science Metadata and the
Science Data objects to be discoverable in DataONE Search. In search results, the
objects will appear together and can be downloaded as a single package.
Args:
ore_pid: str
Persistent Identifier (PID) to use for the new Resource Map
scimeta_pid: str
PID for an object that will be listed as the Science Metadata that is
describing the Science Data objects.
sciobj_pid_list: list of str
List of PIDs that will be listed as the Science Data objects that are being
described by the Science Metadata.
Returns:
ResourceMap : OAI-ORE Resource Map
"""
ore = ResourceMap()
ore.initialize(ore_pid)
ore.addMetadataDocument(scimeta_pid)
ore.addDataDocuments(sciobj_pid_list, scimeta_pid)
return ore | Create a simple OAI-ORE Resource Map with one Science Metadata document and any
number of Science Data objects.
This creates a document that establishes an association between a Science Metadata
object and any number of Science Data objects. The Science Metadata object contains
information that is indexed by DataONE, allowing both the Science Metadata and the
Science Data objects to be discoverable in DataONE Search. In search results, the
objects will appear together and can be downloaded as a single package.
Args:
ore_pid: str
Persistent Identifier (PID) to use for the new Resource Map
scimeta_pid: str
PID for an object that will be listed as the Science Metadata that is
describing the Science Data objects.
sciobj_pid_list: list of str
List of PIDs that will be listed as the Science Data objects that are being
described by the Science Metadata.
Returns:
ResourceMap : OAI-ORE Resource Map | entailment |
def createResourceMapFromStream(in_stream, base_url=d1_common.const.URL_DATAONE_ROOT):
"""Create a simple OAI-ORE Resource Map with one Science Metadata document and any
number of Science Data objects, using a stream of PIDs.
Args:
in_stream:
The first non-blank line is the PID of the resource map itself. Second line is
the science metadata PID and remaining lines are science data PIDs.
Example stream contents:
::
PID_ORE_value
sci_meta_pid_value
data_pid_1
data_pid_2
data_pid_3
base_url : str
Root of the DataONE environment in which the Resource Map will be used.
Returns:
ResourceMap : OAI-ORE Resource Map
"""
pids = []
for line in in_stream:
pid = line.strip()
if pid == "#" or pid.startswith("# "):
continue
if len(pids) < 2:
raise ValueError("Insufficient numbers of identifiers provided.")
logging.info("Read {} identifiers".format(len(pids)))
ore = ResourceMap(base_url=base_url)
logging.info("ORE PID = {}".format(pids[0]))
ore.initialize(pids[0])
logging.info("Metadata PID = {}".format(pids[1]))
ore.addMetadataDocument(pids[1])
ore.addDataDocuments(pids[2:], pids[1])
return ore | Create a simple OAI-ORE Resource Map with one Science Metadata document and any
number of Science Data objects, using a stream of PIDs.
Args:
in_stream:
The first non-blank line is the PID of the resource map itself. Second line is
the science metadata PID and remaining lines are science data PIDs.
Example stream contents:
::
PID_ORE_value
sci_meta_pid_value
data_pid_1
data_pid_2
data_pid_3
base_url : str
Root of the DataONE environment in which the Resource Map will be used.
Returns:
ResourceMap : OAI-ORE Resource Map | entailment |
def initialize(self, pid, ore_software_id=d1_common.const.ORE_SOFTWARE_ID):
"""Create the basic ORE document structure."""
# Set nice prefixes for the namespaces
for k in list(d1_common.const.ORE_NAMESPACE_DICT.keys()):
self.bind(k, d1_common.const.ORE_NAMESPACE_DICT[k])
# Create the ORE entity
oid = self._pid_to_id(pid)
ore = rdflib.URIRef(oid)
self.add((ore, rdflib.RDF.type, ORE.ResourceMap))
self.add((ore, DCTERMS.identifier, rdflib.term.Literal(pid)))
self.add((ore, DCTERMS.creator, rdflib.term.Literal(ore_software_id)))
# Add an empty aggregation
ag = rdflib.URIRef(oid + "#aggregation")
self.add((ore, ORE.describes, ag))
self.add((ag, rdflib.RDF.type, ORE.Aggregation))
self.add((ORE.Aggregation, rdflib.RDFS.isDefinedBy, ORE.term("")))
self.add(
(ORE.Aggregation, rdflib.RDFS.label, rdflib.term.Literal("Aggregation"))
)
self._ore_initialized = True | Create the basic ORE document structure. | entailment |
def serialize_to_transport(self, doc_format="xml", *args, **kwargs):
"""Serialize ResourceMap to UTF-8 encoded XML document.
Args:
doc_format: str
One of: ``xml``, ``n3``, ``turtle``, ``nt``, ``pretty-xml``, ``trix``,
``trig`` and ``nquads``.
args and kwargs:
Optional arguments forwarded to rdflib.ConjunctiveGraph.serialize().
Returns:
bytes: UTF-8 encoded XML doc.
Note:
Only the default, "xml", is automatically indexed by DataONE.
"""
return super(ResourceMap, self).serialize(
format=doc_format, encoding="utf-8", *args, **kwargs
) | Serialize ResourceMap to UTF-8 encoded XML document.
Args:
doc_format: str
One of: ``xml``, ``n3``, ``turtle``, ``nt``, ``pretty-xml``, ``trix``,
``trig`` and ``nquads``.
args and kwargs:
Optional arguments forwarded to rdflib.ConjunctiveGraph.serialize().
Returns:
bytes: UTF-8 encoded XML doc.
Note:
Only the default, "xml", is automatically indexed by DataONE. | entailment |
def serialize_to_display(self, doc_format="pretty-xml", *args, **kwargs):
"""Serialize ResourceMap to an XML doc that is pretty printed for display.
Args:
doc_format: str
One of: ``xml``, ``n3``, ``turtle``, ``nt``, ``pretty-xml``, ``trix``,
``trig`` and ``nquads``.
args and kwargs:
Optional arguments forwarded to rdflib.ConjunctiveGraph.serialize().
Returns:
str: Pretty printed Resource Map XML doc
Note:
Only the default, "xml", is automatically indexed by DataONE.
"""
return (
super(ResourceMap, self)
.serialize(format=doc_format, encoding=None, *args, **kwargs)
.decode("utf-8")
) | Serialize ResourceMap to an XML doc that is pretty printed for display.
Args:
doc_format: str
One of: ``xml``, ``n3``, ``turtle``, ``nt``, ``pretty-xml``, ``trix``,
``trig`` and ``nquads``.
args and kwargs:
Optional arguments forwarded to rdflib.ConjunctiveGraph.serialize().
Returns:
str: Pretty printed Resource Map XML doc
Note:
Only the default, "xml", is automatically indexed by DataONE. | entailment |
def deserialize(self, *args, **kwargs):
"""Deserialize Resource Map XML doc.
The source is specified using one of source, location, file or data.
Args:
source: InputSource, file-like object, or string
In the case of a string the string is the location of the source.
location: str
String indicating the relative or absolute URL of the source. Graph``s
absolutize method is used if a relative location is specified.
file: file-like object
data: str
The document to be parsed.
format : str
Used if format can not be determined from source. Defaults to ``rdf/xml``.
Format support can be extended with plugins.
Built-in: ``xml``, ``n3``, ``nt``, ``trix``, ``rdfa``
publicID: str
Logical URI to use as the document base. If None specified the document
location is used (at least in the case where there is a document location).
Raises:
xml.sax.SAXException based exception: On parse error.
"""
self.parse(*args, **kwargs)
self._ore_initialized = True | Deserialize Resource Map XML doc.
The source is specified using one of source, location, file or data.
Args:
source: InputSource, file-like object, or string
In the case of a string the string is the location of the source.
location: str
String indicating the relative or absolute URL of the source. Graph``s
absolutize method is used if a relative location is specified.
file: file-like object
data: str
The document to be parsed.
format : str
Used if format can not be determined from source. Defaults to ``rdf/xml``.
Format support can be extended with plugins.
Built-in: ``xml``, ``n3``, ``nt``, ``trix``, ``rdfa``
publicID: str
Logical URI to use as the document base. If None specified the document
location is used (at least in the case where there is a document location).
Raises:
xml.sax.SAXException based exception: On parse error. | entailment |
def getAggregation(self):
"""Returns:
str : URIRef of the Aggregation entity
"""
self._check_initialized()
return [
o for o in self.subjects(predicate=rdflib.RDF.type, object=ORE.Aggregation)
][0] | Returns:
str : URIRef of the Aggregation entity | entailment |
def getObjectByPid(self, pid):
"""
Args:
pid : str
Returns:
str : URIRef of the entry identified by ``pid``."""
self._check_initialized()
opid = rdflib.term.Literal(pid)
res = [o for o in self.subjects(predicate=DCTERMS.identifier, object=opid)]
return res[0] | Args:
pid : str
Returns:
str : URIRef of the entry identified by ``pid``. | entailment |
def addResource(self, pid):
"""Add a resource to the Resource Map.
Args:
pid : str
"""
self._check_initialized()
try:
# is entry already in place?
self.getObjectByPid(pid)
return
except IndexError:
pass
# Entry not present, add it to the graph
oid = self._pid_to_id(pid)
obj = rdflib.URIRef(oid)
ag = self.getAggregation()
self.add((ag, ORE.aggregates, obj))
self.add((obj, ORE.isAggregatedBy, ag))
self.add((obj, DCTERMS.identifier, rdflib.term.Literal(pid))) | Add a resource to the Resource Map.
Args:
pid : str | entailment |
def setDocuments(self, documenting_pid, documented_pid):
"""Add a CiTO, the Citation Typing Ontology, triple asserting that
``documenting_pid`` documents ``documented_pid``.
Adds assertion: ``documenting_pid cito:documents documented_pid``
Args:
documenting_pid: str
PID of a Science Object that documents ``documented_pid``.
documented_pid: str
PID of a Science Object that is documented by ``documenting_pid``.
"""
self._check_initialized()
documenting_id = self.getObjectByPid(documenting_pid)
documented_id = self.getObjectByPid(documented_pid)
self.add((documenting_id, CITO.documents, documented_id)) | Add a CiTO, the Citation Typing Ontology, triple asserting that
``documenting_pid`` documents ``documented_pid``.
Adds assertion: ``documenting_pid cito:documents documented_pid``
Args:
documenting_pid: str
PID of a Science Object that documents ``documented_pid``.
documented_pid: str
PID of a Science Object that is documented by ``documenting_pid``. | entailment |
def setDocumentedBy(self, documented_pid, documenting_pid):
"""Add a CiTO, the Citation Typing Ontology, triple asserting that
``documented_pid`` isDocumentedBy ``documenting_pid``.
Adds assertion: ``documented_pid cito:isDocumentedBy documenting_pid``
Args:
documented_pid: str
PID of a Science Object that is documented by ``documenting_pid``.
documenting_pid: str
PID of a Science Object that documents ``documented_pid``.
"""
self._check_initialized()
documented_id = self.getObjectByPid(documented_pid)
documenting_id = self.getObjectByPid(documenting_pid)
self.add((documented_id, CITO.isDocumentedBy, documenting_id)) | Add a CiTO, the Citation Typing Ontology, triple asserting that
``documented_pid`` isDocumentedBy ``documenting_pid``.
Adds assertion: ``documented_pid cito:isDocumentedBy documenting_pid``
Args:
documented_pid: str
PID of a Science Object that is documented by ``documenting_pid``.
documenting_pid: str
PID of a Science Object that documents ``documented_pid``. | entailment |
def addDataDocuments(self, scidata_pid_list, scimeta_pid=None):
"""Add Science Data object(s)
Args:
scidata_pid_list : list of str
List of one or more PIDs of Science Data objects
scimeta_pid: str
PID of a Science Metadata object that documents the Science Data objects.
"""
mpids = self.getAggregatedScienceMetadataPids()
if scimeta_pid is None:
if len(mpids) > 1:
raise ValueError(
"No metadata PID specified and more than one choice available."
)
scimeta_pid = mpids[0]
else:
if scimeta_pid not in mpids:
self.addMetadataDocument(scimeta_pid)
for dpid in scidata_pid_list:
self.addResource(dpid)
self.setDocumentedBy(dpid, scimeta_pid)
self.setDocuments(scimeta_pid, dpid) | Add Science Data object(s)
Args:
scidata_pid_list : list of str
List of one or more PIDs of Science Data objects
scimeta_pid: str
PID of a Science Metadata object that documents the Science Data objects. | entailment |
def getResourceMapPid(self):
"""Returns:
str : PID of the Resource Map itself.
"""
ore = [
o for o in self.subjects(predicate=rdflib.RDF.type, object=ORE.ResourceMap)
][0]
pid = [str(o) for o in self.objects(predicate=DCTERMS.identifier, subject=ore)][
0
]
return pid | Returns:
str : PID of the Resource Map itself. | entailment |
def getAllTriples(self):
"""Returns:
list of tuples : Each tuple holds a subject, predicate, object triple
"""
return [(str(s), str(p), str(o)) for s, p, o in self] | Returns:
list of tuples : Each tuple holds a subject, predicate, object triple | entailment |
def getSubjectObjectsByPredicate(self, predicate):
"""
Args:
predicate : str
Predicate for which to return subject, object tuples.
Returns:
list of subject, object tuples: All subject/objects with ``predicate``.
Notes:
Equivalent SPARQL:
.. highlight: sql
::
SELECT DISTINCT ?s ?o
WHERE {{
?s {0} ?o .
}}
"""
return sorted(
set(
[
(str(s), str(o))
for s, o in self.subject_objects(rdflib.term.URIRef(predicate))
]
)
) | Args:
predicate : str
Predicate for which to return subject, object tuples.
Returns:
list of subject, object tuples: All subject/objects with ``predicate``.
Notes:
Equivalent SPARQL:
.. highlight: sql
::
SELECT DISTINCT ?s ?o
WHERE {{
?s {0} ?o .
}} | entailment |
def parseDoc(self, doc_str, format="xml"):
"""Parse a OAI-ORE Resource Maps document.
See Also: ``rdflib.ConjunctiveGraph.parse`` for documentation on arguments.
"""
self.parse(data=doc_str, format=format)
self._ore_initialized = True
return self | Parse a OAI-ORE Resource Maps document.
See Also: ``rdflib.ConjunctiveGraph.parse`` for documentation on arguments. | entailment |
def _pid_to_id(self, pid):
"""Converts a pid to a URI that can be used as an OAI-ORE identifier."""
return d1_common.url.joinPathElements(
self._base_url,
self._version_tag,
"resolve",
d1_common.url.encodePathElement(pid),
) | Converts a pid to a URI that can be used as an OAI-ORE identifier. | entailment |
def make_checksum_validation_script(stats_list):
"""Make batch files required for checking checksums from another machine."""
if not os.path.exists('./hash_check'):
os.mkdir('./hash_check')
with open('./hash_check/curl.sh', 'w') as curl_f, open(
'./hash_check/md5.txt', 'w'
) as md5_f, open('./hash_check/sha1.txt', 'w') as sha1_f:
curl_f.write('#!/usr/bin/env bash\n\n')
for stats_dict in stats_list:
for sysmeta_xml in stats_dict['largest_sysmeta_xml']:
print(sysmeta_xml)
sysmeta_pyxb = d1_common.types.dataoneTypes_v1_2.CreateFromDocument(
sysmeta_xml
)
pid = sysmeta_pyxb.identifier.value().encode('utf-8')
file_name = re.sub('\W+', '_', pid)
size = sysmeta_pyxb.size
base_url = stats_dict['gmn_dict']['base_url']
if size > 100 * 1024 * 1024:
logging.info('Ignored large object. size={} pid={}')
curl_f.write('# {} {}\n'.format(size, pid))
curl_f.write(
'curl -o obj/{} {}/v1/object/{}\n'.format(
file_name, base_url, d1_common.url.encodePathElement(pid)
)
)
if sysmeta_pyxb.checksum.algorithm == 'MD5':
md5_f.write(
'{} obj/{}\n'.format(sysmeta_pyxb.checksum.value(), file_name)
)
else:
sha1_f.write(
'{} obj/{}\n'.format(sysmeta_pyxb.checksum.value(), file_name)
)
with open('./hash_check/check.sh', 'w') as f:
f.write('#!/usr/bin/env bash\n\n')
f.write('mkdir -p obj\n')
f.write('./curl.sh\n')
f.write('sha1sum -c sha1.txt\n')
f.write('md5sum -c md5.txt\n') | Make batch files required for checking checksums from another machine. | entailment |
def formfield(self, **kwargs):
"""
:returns: A :class:`~django.forms.FloatField` with ``max_value`` 90 and
``min_value`` -90.
"""
kwargs.update({
'max_value': 90,
'min_value': -90,
})
return super(LatitudeField, self).formfield(**kwargs) | :returns: A :class:`~django.forms.FloatField` with ``max_value`` 90 and
``min_value`` -90. | entailment |
def formfield(self, **kwargs):
"""
:returns: A :class:`~django.forms.FloatField` with ``max_value`` 180 and
``min_value`` -180.
"""
kwargs.update({
'max_value': 180,
'min_value': -180,
})
return super(LongitudeField, self).formfield(**kwargs) | :returns: A :class:`~django.forms.FloatField` with ``max_value`` 180 and
``min_value`` -180. | entailment |
def formfield(self, **kwargs):
"""
:returns: A :class:`~osm_field.forms.OSMFormField` with a
:class:`~osm_field.widgets.OSMWidget`.
"""
widget_kwargs = {
'lat_field': self.latitude_field_name,
'lon_field': self.longitude_field_name,
}
if self.data_field_name:
widget_kwargs['data_field'] = self.data_field_name
defaults = {
'form_class': OSMFormField,
'widget': OSMWidget(**widget_kwargs),
}
defaults.update(kwargs)
return super(OSMField, self).formfield(**defaults) | :returns: A :class:`~osm_field.forms.OSMFormField` with a
:class:`~osm_field.widgets.OSMWidget`. | entailment |
def latitude_field_name(self):
"""
The name of the related :class:`LatitudeField`.
"""
if self._lat_field_name is None:
self._lat_field_name = self.name + '_lat'
return self._lat_field_name | The name of the related :class:`LatitudeField`. | entailment |
def longitude_field_name(self):
"""
The name of the related :class:`LongitudeField`.
"""
if self._lon_field_name is None:
self._lon_field_name = self.name + '_lon'
return self._lon_field_name | The name of the related :class:`LongitudeField`. | entailment |
def update_dependency_kinds(apps, schema_editor):
"""Update historical dependency kinds as they may be wrong."""
DataDependency = apps.get_model('flow', 'DataDependency')
for dependency in DataDependency.objects.all():
# Assume dependency is of subprocess kind.
dependency.kind = 'subprocess'
# Check child inputs to determine if this is an IO dependency.
child = dependency.child
parent = dependency.parent
for field_schema, fields in iterate_fields(child.input, child.process.input_schema):
name = field_schema['name']
value = fields[name]
if field_schema.get('type', '').startswith('data:'):
if value == parent.pk:
dependency.kind = 'io'
break
elif field_schema.get('type', '').startswith('list:data:'):
for data in value:
if value == parent.pk:
dependency.kind = 'io'
break
dependency.save() | Update historical dependency kinds as they may be wrong. | entailment |
def escape(self, value):
"""Escape given value."""
value = soft_unicode(value)
if self._engine._escape is None: # pylint: disable=protected-access
return value
return self._engine._escape(value) | Escape given value. | entailment |
def _wrap_jinja_filter(self, function):
"""Propagate exceptions as undefined values filter."""
def wrapper(*args, **kwargs):
"""Filter wrapper."""
try:
return function(*args, **kwargs)
except Exception: # pylint: disable=broad-except
return NestedUndefined()
# Copy over Jinja filter decoration attributes.
for attribute in dir(function):
if attribute.endswith('filter'):
setattr(wrapper, attribute, getattr(function, attribute))
return wrapper | Propagate exceptions as undefined values filter. | entailment |
def _register_custom_filters(self):
"""Register any custom filter modules."""
custom_filters = self.settings.get('CUSTOM_FILTERS', [])
if not isinstance(custom_filters, list):
raise KeyError("`CUSTOM_FILTERS` setting must be a list.")
for filter_module_name in custom_filters:
try:
filter_module = import_module(filter_module_name)
except ImportError as error:
raise ImproperlyConfigured(
"Failed to load custom filter module '{}'.\n"
"Error was: {}".format(filter_module_name, error)
)
try:
filter_map = getattr(filter_module, 'filters')
if not isinstance(filter_map, dict):
raise TypeError
except (AttributeError, TypeError):
raise ImproperlyConfigured(
"Filter module '{}' does not define a 'filters' dictionary".format(filter_module_name)
)
self._environment.filters.update(filter_map) | Register any custom filter modules. | entailment |
def _evaluation_context(self, escape, safe_wrapper):
"""Configure the evaluation context."""
self._escape = escape
self._safe_wrapper = safe_wrapper
try:
yield
finally:
self._escape = None
self._safe_wrapper = None | Configure the evaluation context. | entailment |
def evaluate_block(self, template, context=None, escape=None, safe_wrapper=None):
"""Evaluate a template block."""
if context is None:
context = {}
try:
with self._evaluation_context(escape, safe_wrapper):
template = self._environment.from_string(template)
return template.render(**context)
except jinja2.TemplateError as error:
raise EvaluationError(error.args[0])
finally:
self._escape = None | Evaluate a template block. | entailment |
def evaluate_inline(self, expression, context=None, escape=None, safe_wrapper=None):
"""Evaluate an inline expression."""
if context is None:
context = {}
try:
with self._evaluation_context(escape, safe_wrapper):
compiled = self._environment.compile_expression(expression)
return compiled(**context)
except jinja2.TemplateError as error:
raise EvaluationError(error.args[0]) | Evaluate an inline expression. | entailment |
def update_module_file(redbaron_tree, module_path, show_diff=False, dry_run=False):
"""Set show_diff to False to overwrite module_path with a new file generated from
``redbaron_tree``.
Returns True if tree is different from source.
"""
with tempfile.NamedTemporaryFile() as tmp_file:
tmp_file.write(redbaron_tree_to_module_str(redbaron_tree))
tmp_file.seek(0)
if are_files_equal(module_path, tmp_file.name):
logging.debug('Source unchanged')
return False
logging.debug('Source modified')
tmp_file.seek(0)
diff_update_file(module_path, tmp_file.read(), show_diff, dry_run) | Set show_diff to False to overwrite module_path with a new file generated from
``redbaron_tree``.
Returns True if tree is different from source. | entailment |
def find_repo_root_by_path(path):
"""Given a path to an item in a git repository, find the root of the repository."""
repo = git.Repo(path, search_parent_directories=True)
repo_path = repo.git.rev_parse('--show-toplevel')
logging.info('Repository: {}'.format(repo_path))
return repo_path | Given a path to an item in a git repository, find the root of the repository. | entailment |
def _format_value(self, operation, key, indent):
"""A value that exists in the operation but has value None is displayed.
A value that does not exist in the operation is left out entirely. The value
name in the operation must match the value name in the template, but the
location does not have to match.
"""
v = self._find_value(operation, key)
if v == "NOT_FOUND":
return []
if not isinstance(v, list):
v = [v]
if not len(v):
v = [None]
key = key + ":"
lines = []
for s in v:
# Access control rules are stored in tuples.
if isinstance(s, tuple):
s = "{}: {}".format(*s)
lines.append(
"{}{}{}{}".format(
" " * indent, key, " " * (TAB - indent - len(key) - 1), s
)
)
key = ""
return lines | A value that exists in the operation but has value None is displayed.
A value that does not exist in the operation is left out entirely. The value
name in the operation must match the value name in the template, but the
location does not have to match. | entailment |
def add_cors_headers(response, request):
"""Add Cross-Origin Resource Sharing (CORS) headers to response.
- ``method_list`` is a list of HTTP methods that are allowed for the endpoint that
was called. It should not include "OPTIONS", which is included automatically
since it's allowed for all endpoints.
"""
opt_method_list = ",".join(request.allowed_method_list + ["OPTIONS"])
response["Allow"] = opt_method_list
response["Access-Control-Allow-Methods"] = opt_method_list
response["Access-Control-Allow-Origin"] = request.META.get("Origin", "*")
response["Access-Control-Allow-Headers"] = "Authorization"
response["Access-Control-Allow-Credentials"] = "true" | Add Cross-Origin Resource Sharing (CORS) headers to response.
- ``method_list`` is a list of HTTP methods that are allowed for the endpoint that
was called. It should not include "OPTIONS", which is included automatically
since it's allowed for all endpoints. | entailment |
def run_process(self, slug, inputs):
"""Run a new process from a running process."""
def export_files(value):
"""Export input files of spawned process."""
if isinstance(value, str) and os.path.isfile(value):
# TODO: Use the protocol to export files and get the
# process schema to check field type.
print("export {}".format(value))
elif isinstance(value, dict):
for item in value.values():
export_files(item)
elif isinstance(value, list):
for item in value:
export_files(item)
export_files(inputs)
print('run {}'.format(json.dumps({'process': slug, 'input': inputs}, separators=(',', ':')))) | Run a new process from a running process. | entailment |
def info(self, *args):
"""Log informational message."""
report = resolwe_runtime_utils.info(' '.join([str(x) for x in args]))
# TODO: Use the protocol to report progress.
print(report) | Log informational message. | entailment |
def get_data_id_by_slug(self, slug):
"""Find data object ID for given slug.
This method queries the Resolwe API and requires network access.
"""
resolwe_host = os.environ.get('RESOLWE_HOST_URL')
url = urllib.parse.urljoin(resolwe_host, '/api/data?slug={}&fields=id'.format(slug))
with urllib.request.urlopen(url, timeout=60) as f:
data = json.loads(f.read().decode('utf-8'))
if len(data) == 1:
return data[0]['id']
elif not data:
raise ValueError('Data not found for slug {}'.format(slug))
else:
raise ValueError('More than one data object returned for slug {}'.format(slug)) | Find data object ID for given slug.
This method queries the Resolwe API and requires network access. | entailment |
def requirements(self):
"""Process requirements."""
class dotdict(dict): # pylint: disable=invalid-name
"""Dot notation access to dictionary attributes."""
def __getattr__(self, attr):
value = self.get(attr)
return dotdict(value) if isinstance(value, dict) else value
return dotdict(self._meta.metadata.requirements) | Process requirements. | entailment |
def start(self, inputs):
"""Start the process.
:param inputs: An instance of `Inputs` describing the process inputs
:return: An instance of `Outputs` describing the process outputs
"""
self.logger.info("Process is starting")
outputs = Outputs(self._meta.outputs)
self.logger.info("Process is running")
try:
self.run(inputs.freeze(), outputs)
return outputs.freeze()
except Exception as error:
self.logger.exception("Exception while running process")
print(resolwe_runtime_utils.error(str(error)))
raise
except: # noqa
self.logger.exception("Exception while running process")
print(resolwe_runtime_utils.error("Exception while running process"))
raise
finally:
self.logger.info("Process has finished") | Start the process.
:param inputs: An instance of `Inputs` describing the process inputs
:return: An instance of `Outputs` describing the process outputs | entailment |
def discover_process(self, path):
"""Perform process discovery in given path.
This method will be called during process registration and
should return a list of dictionaries with discovered process
schemas.
"""
if not path.lower().endswith('.py'):
return []
parser = SafeParser(open(path).read())
processes = parser.parse()
return [process.to_schema() for process in processes] | Perform process discovery in given path.
This method will be called during process registration and
should return a list of dictionaries with discovered process
schemas. | entailment |
def evaluate(self, data):
"""Evaluate the code needed to compute a given Data object."""
return 'PYTHONPATH="{runtime}" python3 -u -m resolwe.process {program} --slug {slug} --inputs {inputs}'.format(
runtime=PYTHON_RUNTIME_VOLUME,
program=PYTHON_PROGRAM_VOLUME,
slug=shlex.quote(data.process.slug),
inputs=PYTHON_INPUTS_VOLUME,
) | Evaluate the code needed to compute a given Data object. | entailment |
def prepare_runtime(self, runtime_dir, data):
"""Prepare runtime directory."""
# Copy over Python process runtime (resolwe.process).
import resolwe.process as runtime_package
src_dir = os.path.dirname(inspect.getsourcefile(runtime_package))
dest_package_dir = os.path.join(runtime_dir, PYTHON_RUNTIME_DIRNAME, 'resolwe', 'process')
shutil.copytree(src_dir, dest_package_dir)
os.chmod(dest_package_dir, 0o755)
# Write python source file.
source = data.process.run.get('program', '')
program_path = os.path.join(runtime_dir, PYTHON_PROGRAM_FILENAME)
with open(program_path, 'w') as file:
file.write(source)
os.chmod(program_path, 0o755)
# Write serialized inputs.
inputs = copy.deepcopy(data.input)
hydrate_input_references(inputs, data.process.input_schema)
hydrate_input_uploads(inputs, data.process.input_schema)
inputs_path = os.path.join(runtime_dir, PYTHON_INPUTS_FILENAME)
# XXX: Skip serialization of LazyStorageJSON. We should support
# LazyStorageJSON in Python processes on the new communication protocol
def default(obj):
"""Get default value."""
class_name = obj.__class__.__name__
if class_name == 'LazyStorageJSON':
return ''
raise TypeError(f'Object of type {class_name} is not JSON serializable')
with open(inputs_path, 'w') as file:
json.dump(inputs, file, default=default)
# Generate volume maps required to expose needed files.
volume_maps = {
PYTHON_RUNTIME_DIRNAME: PYTHON_RUNTIME_VOLUME,
PYTHON_PROGRAM_FILENAME: PYTHON_PROGRAM_VOLUME,
PYTHON_INPUTS_FILENAME: PYTHON_INPUTS_VOLUME,
}
return volume_maps | Prepare runtime directory. | entailment |
def get_subject_with_local_validation(jwt_bu64, cert_obj):
"""Validate the JWT and return the subject it contains.
- The JWT is validated by checking that it was signed with a CN certificate.
- The returned subject can be trusted for authz and authn operations.
- Possible validation errors include:
- A trusted (TLS/SSL) connection could not be made to the CN holding the
signing certificate.
- The JWT could not be decoded.
- The JWT signature signature was invalid.
- The JWT claim set contains invalid "Not Before" or "Expiration Time" claims.
Args:
jwt_bu64: bytes
The JWT encoded using a a URL safe flavor of Base64.
cert_obj: cryptography.Certificate
Public certificate used for signing the JWT (typically the CN cert).
Returns:
- On successful validation, the subject contained in the JWT is returned.
- If validation fails for any reason, errors are logged and None is returned.
"""
try:
jwt_dict = validate_and_decode(jwt_bu64, cert_obj)
except JwtException as e:
return log_jwt_bu64_info(logging.error, str(e), jwt_bu64)
try:
return jwt_dict['sub']
except LookupError:
log_jwt_dict_info(logging.error, 'Missing "sub" key', jwt_dict) | Validate the JWT and return the subject it contains.
- The JWT is validated by checking that it was signed with a CN certificate.
- The returned subject can be trusted for authz and authn operations.
- Possible validation errors include:
- A trusted (TLS/SSL) connection could not be made to the CN holding the
signing certificate.
- The JWT could not be decoded.
- The JWT signature signature was invalid.
- The JWT claim set contains invalid "Not Before" or "Expiration Time" claims.
Args:
jwt_bu64: bytes
The JWT encoded using a a URL safe flavor of Base64.
cert_obj: cryptography.Certificate
Public certificate used for signing the JWT (typically the CN cert).
Returns:
- On successful validation, the subject contained in the JWT is returned.
- If validation fails for any reason, errors are logged and None is returned. | entailment |
def get_subject_with_remote_validation(jwt_bu64, base_url):
"""Same as get_subject_with_local_validation() except that the signing certificate
is automatically downloaded from the CN.
- Additional possible validations errors:
- The certificate could not be retrieved from the root CN.
"""
cert_obj = d1_common.cert.x509.download_as_obj(base_url)
return get_subject_with_local_validation(jwt_bu64, cert_obj) | Same as get_subject_with_local_validation() except that the signing certificate
is automatically downloaded from the CN.
- Additional possible validations errors:
- The certificate could not be retrieved from the root CN. | entailment |
def get_subject_with_file_validation(jwt_bu64, cert_path):
"""Same as get_subject_with_local_validation() except that the signing certificate
is read from a local PEM file."""
cert_obj = d1_common.cert.x509.deserialize_pem_file(cert_path)
return get_subject_with_local_validation(jwt_bu64, cert_obj) | Same as get_subject_with_local_validation() except that the signing certificate
is read from a local PEM file. | entailment |
def get_subject_without_validation(jwt_bu64):
"""Extract subject from the JWT without validating the JWT.
- The extracted subject cannot be trusted for authn or authz.
Args:
jwt_bu64: bytes
JWT, encoded using a a URL safe flavor of Base64.
Returns:
str: The subject contained in the JWT.
"""
try:
jwt_dict = get_jwt_dict(jwt_bu64)
except JwtException as e:
return log_jwt_bu64_info(logging.error, str(e), jwt_bu64)
try:
return jwt_dict['sub']
except LookupError:
log_jwt_dict_info(logging.error, 'Missing "sub" key', jwt_dict) | Extract subject from the JWT without validating the JWT.
- The extracted subject cannot be trusted for authn or authz.
Args:
jwt_bu64: bytes
JWT, encoded using a a URL safe flavor of Base64.
Returns:
str: The subject contained in the JWT. | entailment |
def get_jwt_dict(jwt_bu64):
"""Parse Base64 encoded JWT and return as a dict.
- JWTs contain a set of values serialized to a JSON dict. This decodes the JWT and
returns it as a dict containing Unicode strings.
- In addition, a SHA1 hash is added to the dict for convenience.
Args:
jwt_bu64: bytes
JWT, encoded using a a URL safe flavor of Base64.
Returns:
dict: Values embedded in and derived from the JWT.
"""
jwt_tup = get_jwt_tup(jwt_bu64)
try:
jwt_dict = json.loads(jwt_tup[0].decode('utf-8'))
jwt_dict.update(json.loads(jwt_tup[1].decode('utf-8')))
jwt_dict['_sig_sha1'] = hashlib.sha1(jwt_tup[2]).hexdigest()
except TypeError as e:
raise JwtException('Decode failed. error="{}"'.format(e))
return jwt_dict | Parse Base64 encoded JWT and return as a dict.
- JWTs contain a set of values serialized to a JSON dict. This decodes the JWT and
returns it as a dict containing Unicode strings.
- In addition, a SHA1 hash is added to the dict for convenience.
Args:
jwt_bu64: bytes
JWT, encoded using a a URL safe flavor of Base64.
Returns:
dict: Values embedded in and derived from the JWT. | entailment |
def validate_and_decode(jwt_bu64, cert_obj):
"""Validate the JWT and return as a dict.
- JWTs contain a set of values serialized to a JSON dict. This decodes the JWT and
returns it as a dict.
Args:
jwt_bu64: bytes
The JWT encoded using a a URL safe flavor of Base64.
cert_obj: cryptography.Certificate
Public certificate used for signing the JWT (typically the CN cert).
Raises:
JwtException: If validation fails.
Returns:
dict: Values embedded in the JWT.
"""
try:
return jwt.decode(
jwt_bu64.strip(), cert_obj.public_key(), algorithms=['RS256'], verify=True
)
except jwt.InvalidTokenError as e:
raise JwtException('Signature is invalid. error="{}"'.format(str(e))) | Validate the JWT and return as a dict.
- JWTs contain a set of values serialized to a JSON dict. This decodes the JWT and
returns it as a dict.
Args:
jwt_bu64: bytes
The JWT encoded using a a URL safe flavor of Base64.
cert_obj: cryptography.Certificate
Public certificate used for signing the JWT (typically the CN cert).
Raises:
JwtException: If validation fails.
Returns:
dict: Values embedded in the JWT. | entailment |
def log_jwt_dict_info(log, msg_str, jwt_dict):
"""Dump JWT to log.
Args:
log: Logger
Logger to which to write the message.
msg_str: str
A message to write to the log before the JWT values.
jwt_dict: dict
JWT containing values to log.
Returns:
None
"""
d = ts_to_str(jwt_dict)
# Log known items in specific order, then the rest just sorted
log_list = [(b, d.pop(a)) for a, b, c in CLAIM_LIST if a in d] + [
(k, d[k]) for k in sorted(d)
]
list(
map(
log,
['{}:'.format(msg_str)] + [' {}: {}'.format(k, v) for k, v in log_list],
)
) | Dump JWT to log.
Args:
log: Logger
Logger to which to write the message.
msg_str: str
A message to write to the log before the JWT values.
jwt_dict: dict
JWT containing values to log.
Returns:
None | entailment |
def ts_to_str(jwt_dict):
"""Convert timestamps in JWT to human readable dates.
Args:
jwt_dict: dict
JWT with some keys containing timestamps.
Returns:
dict: Copy of input dict where timestamps have been replaced with human readable
dates.
"""
d = ts_to_dt(jwt_dict)
for k, v in list(d.items()):
if isinstance(v, datetime.datetime):
d[k] = v.isoformat().replace('T', ' ')
return d | Convert timestamps in JWT to human readable dates.
Args:
jwt_dict: dict
JWT with some keys containing timestamps.
Returns:
dict: Copy of input dict where timestamps have been replaced with human readable
dates. | entailment |
def ts_to_dt(jwt_dict):
"""Convert timestamps in JWT to datetime objects.
Args:
jwt_dict: dict
JWT with some keys containing timestamps.
Returns:
dict: Copy of input dict where timestamps have been replaced with
datetime.datetime() objects.
"""
d = jwt_dict.copy()
for k, v in [v[:2] for v in CLAIM_LIST if v[2]]:
if k in jwt_dict:
d[k] = d1_common.date_time.dt_from_ts(jwt_dict[k])
return d | Convert timestamps in JWT to datetime objects.
Args:
jwt_dict: dict
JWT with some keys containing timestamps.
Returns:
dict: Copy of input dict where timestamps have been replaced with
datetime.datetime() objects. | entailment |
def encode_bu64(b):
"""Encode bytes to a URL safe flavor of Base64 used by JWTs.
- Reverse of decode_bu64().
Args:
b: bytes
Bytes to Base64 encode.
Returns:
bytes: URL safe Base64 encoded version of input.
"""
s = base64.standard_b64encode(b)
s = s.rstrip('=')
s = s.replace('+', '-')
s = s.replace('/', '_')
return s | Encode bytes to a URL safe flavor of Base64 used by JWTs.
- Reverse of decode_bu64().
Args:
b: bytes
Bytes to Base64 encode.
Returns:
bytes: URL safe Base64 encoded version of input. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.