sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def _get_node_path(self, node):
"""Return the path from the root to ``node`` as a list of node names."""
path = []
while node.up:
path.append(node.name)
node = node.up
return list(reversed(path)) | Return the path from the root to ``node`` as a list of node names. | entailment |
def _layout(self, node):
"""ETE calls this function to style each node before rendering.
- ETE terms:
- A Style is a specification for how to render the node itself
- A Face defines extra information that is rendered outside of the node
- Face objects are used here to provide more control on how to draw the nodes.
"""
def set_edge_style():
"""Set the style for edges and make the node invisible."""
node_style = ete3.NodeStyle()
node_style["vt_line_color"] = EDGE_COLOR
node_style["hz_line_color"] = EDGE_COLOR
node_style["vt_line_width"] = EDGE_WIDTH
node_style["hz_line_width"] = EDGE_WIDTH
node_style["size"] = 0
node.set_style(node_style)
def style_subject_node(color="Black"):
"""Specify the appearance of Subject nodes."""
face = ete3.TextFace(node.name, fsize=SUBJECT_NODE_FONT_SIZE, fgcolor=color)
set_face_margin(face)
node.add_face(face, column=0, position="branch-right")
def style_type_node(color="Black"):
"""Specify the appearance of Type nodes."""
face = ete3.CircleFace(
radius=TYPE_NODE_RADIUS,
color=TYPE_NODE_COLOR_DICT.get(node.name, "White"),
style="circle",
label={
"text": node.name,
"color": color,
"fontsize": (
TYPE_NODE_FONT_SIZE_FILE
if self._render_type == "file"
else TYPE_NODE_FONT_SIZE_BROWSE
),
},
)
set_face_margin(face)
node.add_face(face, column=0, position="branch-right")
def set_face_margin(face):
"""Add margins to Face object.
- Add space between inner_border and border on TextFace.
- Add space outside bounding area of CircleFace.
"""
face.margin_left = 5
face.margin_right = 5
# face.margin_top = 5
# face.margin_bottom = 5
set_edge_style()
if hasattr(node, SUBJECT_NODE_TAG):
style_subject_node()
elif hasattr(node, TYPE_NODE_TAG):
style_type_node()
else:
raise AssertionError("Unknown node type") | ETE calls this function to style each node before rendering.
- ETE terms:
- A Style is a specification for how to render the node itself
- A Face defines extra information that is rendered outside of the node
- Face objects are used here to provide more control on how to draw the nodes. | entailment |
def extend_settings(self, data_id, files, secrets):
"""Prevent processes requiring access to secrets from being run."""
process = Data.objects.get(pk=data_id).process
if process.requirements.get('resources', {}).get('secrets', False):
raise PermissionDenied(
"Process which requires access to secrets cannot be run using the local executor"
)
return super().extend_settings(data_id, files, secrets) | Prevent processes requiring access to secrets from being run. | entailment |
def get_finder(import_path):
"""Get a process finder."""
finder_class = import_string(import_path)
if not issubclass(finder_class, BaseProcessesFinder):
raise ImproperlyConfigured(
'Finder "{}" is not a subclass of "{}"'.format(finder_class, BaseProcessesFinder))
return finder_class() | Get a process finder. | entailment |
def _find_folders(self, folder_name):
"""Return a list of sub-directories."""
found_folders = []
for app_config in apps.get_app_configs():
folder_path = os.path.join(app_config.path, folder_name)
if os.path.isdir(folder_path):
found_folders.append(folder_path)
return found_folders | Return a list of sub-directories. | entailment |
def sanity(request, sysmeta_pyxb):
"""Check that sysmeta_pyxb is suitable for creating a new object and matches the
uploaded sciobj bytes."""
_does_not_contain_replica_sections(sysmeta_pyxb)
_is_not_archived(sysmeta_pyxb)
_obsoleted_by_not_specified(sysmeta_pyxb)
if 'HTTP_VENDOR_GMN_REMOTE_URL' in request.META:
return
_has_correct_file_size(request, sysmeta_pyxb)
_is_supported_checksum_algorithm(sysmeta_pyxb)
_is_correct_checksum(request, sysmeta_pyxb) | Check that sysmeta_pyxb is suitable for creating a new object and matches the
uploaded sciobj bytes. | entailment |
def is_valid_sid_for_new_standalone(sysmeta_pyxb):
"""Assert that any SID in ``sysmeta_pyxb`` can be assigned to a new standalone
object."""
sid = d1_common.xml.get_opt_val(sysmeta_pyxb, 'seriesId')
if not d1_gmn.app.did.is_valid_sid_for_new_standalone(sid):
raise d1_common.types.exceptions.IdentifierNotUnique(
0,
'Identifier is already in use as {}. did="{}"'.format(
d1_gmn.app.did.classify_identifier(sid), sid
),
identifier=sid,
) | Assert that any SID in ``sysmeta_pyxb`` can be assigned to a new standalone
object. | entailment |
def is_valid_sid_for_chain(pid, sid):
"""Assert that ``sid`` can be assigned to the single object ``pid`` or to the chain
to which ``pid`` belongs.
- If the chain does not have a SID, the new SID must be previously unused.
- If the chain already has a SID, the new SID must match the existing SID.
"""
if not d1_gmn.app.did.is_valid_sid_for_chain(pid, sid):
existing_sid = d1_gmn.app.revision.get_sid_by_pid(pid)
raise d1_common.types.exceptions.IdentifierNotUnique(
0,
'A different SID is already assigned to the revision chain to which '
'the object being created or updated belongs. A SID cannot be changed '
'once it has been assigned to a chain. '
'existing_sid="{}", new_sid="{}", pid="{}"'.format(existing_sid, sid, pid),
) | Assert that ``sid`` can be assigned to the single object ``pid`` or to the chain
to which ``pid`` belongs.
- If the chain does not have a SID, the new SID must be previously unused.
- If the chain already has a SID, the new SID must match the existing SID. | entailment |
def _does_not_contain_replica_sections(sysmeta_pyxb):
"""Assert that ``sysmeta_pyxb`` does not contain any replica information."""
if len(getattr(sysmeta_pyxb, 'replica', [])):
raise d1_common.types.exceptions.InvalidSystemMetadata(
0,
'A replica section was included. A new object object created via '
'create() or update() cannot already have replicas. pid="{}"'.format(
d1_common.xml.get_req_val(sysmeta_pyxb.identifier)
),
identifier=d1_common.xml.get_req_val(sysmeta_pyxb.identifier),
) | Assert that ``sysmeta_pyxb`` does not contain any replica information. | entailment |
def _is_not_archived(sysmeta_pyxb):
"""Assert that ``sysmeta_pyxb`` does not have have the archived flag set."""
if _is_archived(sysmeta_pyxb):
raise d1_common.types.exceptions.InvalidSystemMetadata(
0,
'Archived flag was set. A new object created via create() or update() '
'cannot already be archived. pid="{}"'.format(
d1_common.xml.get_req_val(sysmeta_pyxb.identifier)
),
identifier=d1_common.xml.get_req_val(sysmeta_pyxb.identifier),
) | Assert that ``sysmeta_pyxb`` does not have have the archived flag set. | entailment |
def get_d1_env_by_base_url(cn_base_url):
"""Given the BaseURL for a CN, return the DataONE environment dict for the CN's
environemnt."""
for k, v in D1_ENV_DICT:
if v['base_url'].startswith(cn_base_url):
return D1_ENV_DICT[k] | Given the BaseURL for a CN, return the DataONE environment dict for the CN's
environemnt. | entailment |
def match_all(d_SMEFT, parameters=None):
"""Match the SMEFT Warsaw basis onto the WET JMS basis."""
p = default_parameters.copy()
if parameters is not None:
# if parameters are passed in, overwrite the default values
p.update(parameters)
C = wilson.util.smeftutil.wcxf2arrays_symmetrized(d_SMEFT)
C['vT'] = 246.22
C_WET = match_all_array(C, p)
C_WET = wilson.translate.wet.rotate_down(C_WET, p)
C_WET = wetutil.unscale_dict_wet(C_WET)
d_WET = wilson.util.smeftutil.arrays2wcxf(C_WET)
basis = wcxf.Basis['WET', 'JMS']
keys = set(d_WET.keys()) & set(basis.all_wcs)
d_WET = {k: d_WET[k] for k in keys}
return d_WET | Match the SMEFT Warsaw basis onto the WET JMS basis. | entailment |
def _debug_mode_responses(self, request, response):
"""Extra functionality available in debug mode.
- If pretty printed output was requested, force the content type to text. This
causes the browser to not try to format the output in any way.
- If SQL profiling is turned on, return a page with SQL query timing
information instead of the actual response.
"""
if django.conf.settings.DEBUG_GMN:
if 'pretty' in request.GET:
response['Content-Type'] = d1_common.const.CONTENT_TYPE_TEXT
if (
'HTTP_VENDOR_PROFILE_SQL' in request.META
or django.conf.settings.DEBUG_PROFILE_SQL
):
response_list = []
for query in django.db.connection.queries:
response_list.append('{}\n{}'.format(query['time'], query['sql']))
return django.http.HttpResponse(
'\n\n'.join(response_list), d1_common.const.CONTENT_TYPE_TEXT
)
return response | Extra functionality available in debug mode.
- If pretty printed output was requested, force the content type to text. This
causes the browser to not try to format the output in any way.
- If SQL profiling is turned on, return a page with SQL query timing
information instead of the actual response. | entailment |
def queryResponse(
self, queryEngine, query_str, vendorSpecific=None, do_post=False, **kwargs
):
"""CNRead.query(session, queryEngine, query) → OctetStream
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNRead.query MNQuery.query(session,
queryEngine, query) → OctetStream http://jenkins.
-1.dataone.org/jenkins/job/API%20Documentation%20-%20trunk/ws/api-
documentation/build/html/apis/MN_APIs.html#MNQuery.query.
Args:
queryEngine:
query_str:
vendorSpecific:
do_post:
**kwargs:
Returns:
"""
self._log.debug(
'Solr query: {}'.format(
', '.join(['{}={}'.format(k, v) for (k, v) in list(locals().items())])
)
)
return (self.POST if do_post else self.GET)(
['query', queryEngine, query_str], headers=vendorSpecific, **kwargs
) | CNRead.query(session, queryEngine, query) → OctetStream
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNRead.query MNQuery.query(session,
queryEngine, query) → OctetStream http://jenkins.
-1.dataone.org/jenkins/job/API%20Documentation%20-%20trunk/ws/api-
documentation/build/html/apis/MN_APIs.html#MNQuery.query.
Args:
queryEngine:
query_str:
vendorSpecific:
do_post:
**kwargs:
Returns: | entailment |
def query(
self, queryEngine, query_str, vendorSpecific=None, do_post=False, **kwargs
):
"""See Also: queryResponse()
Args:
queryEngine:
query_str:
vendorSpecific:
do_post:
**kwargs:
Returns:
"""
response = self.queryResponse(
queryEngine, query_str, vendorSpecific, do_post, **kwargs
)
if self._content_type_is_json(response):
return self._read_json_response(response)
else:
return self._read_stream_response(response) | See Also: queryResponse()
Args:
queryEngine:
query_str:
vendorSpecific:
do_post:
**kwargs:
Returns: | entailment |
def getQueryEngineDescriptionResponse(self, queryEngine, vendorSpecific=None, **kwargs):
"""CNRead.getQueryEngineDescription(session, queryEngine) →
QueryEngineDescription https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNRead.getQueryEngineDescription
MNQuery.getQueryEngineDescription(session, queryEngine) → QueryEngineDescription
http://jenkins-1.dataone.org/jenkins/job/API%20D ocumentation%20-%20trunk/ws.
/api-documentation/build/html/apis/MN_APIs.h
tml#MNQuery.getQueryEngineDescription.
Args:
queryEngine:
**kwargs:
Returns:
"""
return self.GET(['query', queryEngine], query=kwargs, headers=vendorSpecific) | CNRead.getQueryEngineDescription(session, queryEngine) →
QueryEngineDescription https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNRead.getQueryEngineDescription
MNQuery.getQueryEngineDescription(session, queryEngine) → QueryEngineDescription
http://jenkins-1.dataone.org/jenkins/job/API%20D ocumentation%20-%20trunk/ws.
/api-documentation/build/html/apis/MN_APIs.h
tml#MNQuery.getQueryEngineDescription.
Args:
queryEngine:
**kwargs:
Returns: | entailment |
def getQueryEngineDescription(self, queryEngine, **kwargs):
"""See Also: getQueryEngineDescriptionResponse()
Args:
queryEngine:
**kwargs:
Returns:
"""
response = self.getQueryEngineDescriptionResponse(queryEngine, **kwargs)
return self._read_dataone_type_response(response, 'QueryEngineDescription') | See Also: getQueryEngineDescriptionResponse()
Args:
queryEngine:
**kwargs:
Returns: | entailment |
def _get_cache_key(self, obj):
"""Derive cache key for given object."""
if obj is not None:
# Make sure that key is REALLY unique.
return '{}-{}'.format(id(self), obj.pk)
return "{}-None".format(id(self)) | Derive cache key for given object. | entailment |
def set(self, obj, build_kwargs):
"""Set cached value."""
if build_kwargs is None:
build_kwargs = {}
cached = {}
if 'queryset' in build_kwargs:
cached = {
'model': build_kwargs['queryset'].model,
'pks': list(build_kwargs['queryset'].values_list('pk', flat=True)),
}
elif 'obj' in build_kwargs:
cached = {
'obj': build_kwargs['obj'],
}
if not hasattr(self._thread_local, 'cache'):
self._thread_local.cache = {}
self._thread_local.cache[self._get_cache_key(obj)] = cached | Set cached value. | entailment |
def take(self, obj):
"""Get cached value and clean cache."""
cached = self._thread_local.cache[self._get_cache_key(obj)]
build_kwargs = {}
if 'model' in cached and 'pks' in cached:
build_kwargs['queryset'] = cached['model'].objects.filter(pk__in=cached['pks'])
elif 'obj' in cached:
if cached['obj'].__class__.objects.filter(pk=cached['obj'].pk).exists():
build_kwargs['obj'] = cached['obj']
else:
# Object was deleted in the meantime.
build_kwargs['queryset'] = cached['obj'].__class__.objects.none()
self._clean_cache(obj)
return build_kwargs | Get cached value and clean cache. | entailment |
def connect(self, signal, **kwargs):
"""Connect a specific signal type to this receiver."""
signal.connect(self, **kwargs)
self.connections.append((signal, kwargs)) | Connect a specific signal type to this receiver. | entailment |
def disconnect(self):
"""Disconnect all connected signal types from this receiver."""
for signal, kwargs in self.connections:
signal.disconnect(self, **kwargs) | Disconnect all connected signal types from this receiver. | entailment |
def connect(self, index):
"""Connect signals needed for dependency updates.
Pre- and post-delete signals have to be handled separately, as:
* in the pre-delete signal we have the information which
objects to rebuild, but affected relations are still
presented, so rebuild would reflect in the wrong (outdated)
indices
* in the post-delete signal indices can be rebuild corectly,
but there is no information which objects to rebuild, as
affected relations were already deleted
To bypass this, list of objects should be stored in the
pre-delete signal indexing should be triggered in the
post-delete signal.
"""
self.index = index
signal = ElasticSignal(self, 'process', pass_kwargs=True)
signal.connect(post_save, sender=self.model)
signal.connect(pre_delete, sender=self.model)
pre_delete_signal = ElasticSignal(self, 'process_predelete', pass_kwargs=True)
pre_delete_signal.connect(pre_delete, sender=self.model)
post_delete_signal = ElasticSignal(self, 'process_delete', pass_kwargs=True)
post_delete_signal.connect(post_delete, sender=self.model)
return [signal, pre_delete_signal, post_delete_signal] | Connect signals needed for dependency updates.
Pre- and post-delete signals have to be handled separately, as:
* in the pre-delete signal we have the information which
objects to rebuild, but affected relations are still
presented, so rebuild would reflect in the wrong (outdated)
indices
* in the post-delete signal indices can be rebuild corectly,
but there is no information which objects to rebuild, as
affected relations were already deleted
To bypass this, list of objects should be stored in the
pre-delete signal indexing should be triggered in the
post-delete signal. | entailment |
def connect(self, index):
"""Connect signals needed for dependency updates."""
# Determine which model is the target model as either side of the relation
# may be passed as `field`.
if index.object_type == self.field.rel.model:
self.model = self.field.rel.related_model
self.accessor = self.field.rel.field.attname
else:
self.model = self.field.rel.model
if self.field.rel.symmetrical:
# Symmetrical m2m relation on self has no reverse accessor.
raise NotImplementedError(
'Dependencies on symmetrical M2M relations are not supported due '
'to strange handling of the m2m_changed signal which only makes '
'half of the relation visible during signal execution. For now you '
'need to use symmetrical=False on the M2M field definition.'
)
else:
self.accessor = self.field.rel.get_accessor_name()
# Connect signals.
signals = super().connect(index)
m2m_signal = ElasticSignal(self, 'process_m2m', pass_kwargs=True)
m2m_signal.connect(m2m_changed, sender=self.field.through)
signals.append(m2m_signal)
# If the relation has a custom through model, we need to subscribe to it.
if not self.field.rel.through._meta.auto_created: # pylint: disable=protected-access
signal = ElasticSignal(self, 'process_m2m_through_save', pass_kwargs=True)
signal.connect(post_save, sender=self.field.rel.through)
signals.append(signal)
signal = ElasticSignal(self, 'process_m2m_through_pre_delete', pass_kwargs=True)
signal.connect(pre_delete, sender=self.field.rel.through)
signals.append(signal)
signal = ElasticSignal(self, 'process_m2m_through_post_delete', pass_kwargs=True)
signal.connect(post_delete, sender=self.field.rel.through)
signals.append(signal)
return signals | Connect signals needed for dependency updates. | entailment |
def _filter(self, objects, **kwargs):
"""Determine if dependent object should be processed."""
for obj in objects:
if self.filter(obj, **kwargs) is False:
return False
return True | Determine if dependent object should be processed. | entailment |
def _get_build_kwargs(self, obj, pk_set=None, action=None, update_fields=None, reverse=None, **kwargs):
"""Prepare arguments for rebuilding indices."""
if action is None:
# Check filter before rebuilding index.
if not self._filter([obj], update_fields=update_fields):
return
queryset = getattr(obj, self.accessor).all()
# Special handling for relations to self.
if self.field.rel.model == self.field.rel.related_model:
queryset = queryset.union(getattr(obj, self.field.rel.get_accessor_name()).all())
return {'queryset': queryset}
else:
# Update to relation itself, only update the object in question.
if self.field.rel.model == self.field.rel.related_model:
# Special case, self-reference, update both ends of the relation.
pks = set()
if self._filter(self.model.objects.filter(pk__in=pk_set)):
pks.add(obj.pk)
if self._filter(self.model.objects.filter(pk__in=[obj.pk])):
pks.update(pk_set)
return {'queryset': self.index.object_type.objects.filter(pk__in=pks)}
elif isinstance(obj, self.model):
# Need to switch the role of object and pk_set.
result = {'queryset': self.index.object_type.objects.filter(pk__in=pk_set)}
pk_set = {obj.pk}
else:
result = {'obj': obj}
if action != 'post_clear':
# Check filter before rebuilding index.
if not self._filter(self.model.objects.filter(pk__in=pk_set)):
return
return result | Prepare arguments for rebuilding indices. | entailment |
def process_predelete(self, obj, pk_set=None, action=None, update_fields=None, **kwargs):
"""Render the queryset of influenced objects and cache it."""
build_kwargs = self._get_build_kwargs(obj, pk_set, action, update_fields, **kwargs)
self.delete_cache.set(obj, build_kwargs) | Render the queryset of influenced objects and cache it. | entailment |
def process_delete(self, obj, pk_set=None, action=None, update_fields=None, **kwargs):
"""Recreate queryset from the index and rebuild the index."""
build_kwargs = self.delete_cache.take(obj)
if build_kwargs:
self.index.build(**build_kwargs) | Recreate queryset from the index and rebuild the index. | entailment |
def process_m2m(self, obj, pk_set=None, action=None, update_fields=None, cache_key=None, **kwargs):
"""Process signals from dependencies.
Remove signal is processed in two parts. For details see:
:func:`~Dependency.connect`
"""
if action not in (None, 'post_add', 'pre_remove', 'post_remove', 'post_clear'):
return
if action == 'post_remove':
build_kwargs = self.remove_cache.take(cache_key)
else:
build_kwargs = self._get_build_kwargs(obj, pk_set, action, update_fields, **kwargs)
if action == 'pre_remove':
self.remove_cache.set(cache_key, build_kwargs)
return
if build_kwargs:
self.index.build(**build_kwargs) | Process signals from dependencies.
Remove signal is processed in two parts. For details see:
:func:`~Dependency.connect` | entailment |
def _process_m2m_through(self, obj, action):
"""Process custom M2M through model actions."""
source = getattr(obj, self.field.rel.field.m2m_field_name())
target = getattr(obj, self.field.rel.field.m2m_reverse_field_name())
pk_set = set()
if target:
pk_set.add(target.pk)
self.process_m2m(source, pk_set, action=action, reverse=False, cache_key=obj) | Process custom M2M through model actions. | entailment |
def process_m2m_through_save(self, obj, created=False, **kwargs):
"""Process M2M post save for custom through model."""
# We are only interested in signals that establish relations.
if not created:
return
self._process_m2m_through(obj, 'post_add') | Process M2M post save for custom through model. | entailment |
def process(self, obj, pk_set=None, action=None, update_fields=None, **kwargs):
"""Process signals from dependencies."""
build_kwargs = self._get_build_kwargs(obj, pk_set, action, update_fields, **kwargs)
if build_kwargs:
self.index.build(**build_kwargs) | Process signals from dependencies. | entailment |
def _connect_signal(self, index):
"""Create signals for building indexes."""
post_save_signal = ElasticSignal(index, 'build')
post_save_signal.connect(post_save, sender=index.object_type)
self.signals.append(post_save_signal)
post_delete_signal = ElasticSignal(index, 'remove_object')
post_delete_signal.connect(post_delete, sender=index.object_type)
self.signals.append(post_delete_signal)
# Connect signals for all dependencies.
for dependency in index.get_dependencies():
# Automatically convert m2m fields to dependencies.
if isinstance(dependency, (models.ManyToManyField, ManyToManyDescriptor)):
dependency = ManyToManyDependency(dependency)
elif not isinstance(dependency, Dependency):
raise TypeError("Unsupported dependency type: {}".format(repr(dependency)))
signal = dependency.connect(index)
self.signals.extend(signal) | Create signals for building indexes. | entailment |
def register_signals(self):
"""Register signals for all indexes."""
for index in self.indexes:
if index.object_type:
self._connect_signal(index) | Register signals for all indexes. | entailment |
def discover_indexes(self):
"""Save list of index builders into ``_index_builders``."""
self.indexes = []
for app_config in apps.get_app_configs():
indexes_path = '{}.elastic_indexes'.format(app_config.name)
try:
indexes_module = import_module(indexes_path)
for attr_name in dir(indexes_module):
attr = getattr(indexes_module, attr_name)
if inspect.isclass(attr) and issubclass(attr, BaseIndex) and attr is not BaseIndex:
# Make sure that parallel tests have different indices.
if is_testing():
index = attr.document_class._index._name # pylint: disable=protected-access
testing_postfix = '_test_{}_{}'.format(TESTING_UUID, os.getpid())
if not index.endswith(testing_postfix):
# Replace current postfix with the new one.
if attr.testing_postfix:
index = index[:-len(attr.testing_postfix)]
index = index + testing_postfix
attr.testing_postfix = testing_postfix
attr.document_class._index._name = index # pylint: disable=protected-access
index = attr()
# Apply any extensions defined for the given index. Currently index extensions are
# limited to extending "mappings".
for extension in composer.get_extensions(attr):
mapping = getattr(extension, 'mapping', {})
index.mapping.update(mapping)
self.indexes.append(index)
except ImportError as ex:
if not re.match('No module named .*elastic_indexes.*', str(ex)):
raise | Save list of index builders into ``_index_builders``. | entailment |
def build(self, obj=None, queryset=None, push=True):
"""Trigger building of the indexes.
Support passing ``obj`` parameter to the indexes, so we can
trigger build only for one object.
"""
for index in self.indexes:
index.build(obj, queryset, push) | Trigger building of the indexes.
Support passing ``obj`` parameter to the indexes, so we can
trigger build only for one object. | entailment |
def push(self, index=None):
"""Push built documents to ElasticSearch.
If ``index`` is specified, only that index will be pushed.
"""
for ind in self.indexes:
if index and not isinstance(ind, index):
continue
ind.push() | Push built documents to ElasticSearch.
If ``index`` is specified, only that index will be pushed. | entailment |
def delete(self, skip_mapping=False):
"""Delete all entries from ElasticSearch."""
for index in self.indexes:
index.destroy()
if not skip_mapping:
index.create_mapping() | Delete all entries from ElasticSearch. | entailment |
def destroy(self):
"""Delete all indexes from Elasticsearch and index builder."""
self.unregister_signals()
for index in self.indexes:
index.destroy()
self.indexes = [] | Delete all indexes from Elasticsearch and index builder. | entailment |
def _set_initial(self, C_in, scale_in):
r"""Set the initial values for parameters and Wilson coefficients at
the scale `scale_in`."""
self.C_in = C_in
self.scale_in = scale_in | r"""Set the initial values for parameters and Wilson coefficients at
the scale `scale_in`. | entailment |
def _set_initial_wcxf(self, wc, get_smpar=True):
"""Load the initial values for Wilson coefficients from a
wcxf.WC instance.
Parameters:
- `get_smpar`: boolean, optional, defaults to True. If True, an attempt
is made to determine the SM parameters from the requirement of
reproducing the correct SM masses and mixings at the electroweak
scale. As approximations are involved, the result might or might not
be reliable, depending on the size of the Wilson coefficients
affecting the SM masses and mixings. If False, Standard Model
parameters have to be provided separately and are assumed to be in
the weak basis used for the Warsaw basis as defined in WCxf,
i.e. in the basis where the down-type and charged lepton mass
matrices are diagonal.
"""
if wc.eft != 'SMEFT':
raise ValueError("Wilson coefficients use wrong EFT.")
if wc.basis != 'Warsaw':
raise ValueError("Wilson coefficients use wrong basis.")
self.scale_in = wc.scale
C = wilson.util.smeftutil.wcxf2arrays_symmetrized(wc.dict)
# fill in zeros for missing WCs
for k, s in smeftutil.C_keys_shape.items():
if k not in C and k not in smeftutil.SM_keys:
if s == 1:
C[k] = 0
else:
C[k] = np.zeros(s)
if self.C_in is None:
self.C_in = C
else:
self.C_in.update(C)
if get_smpar:
self.C_in.update(self._get_sm_scale_in()) | Load the initial values for Wilson coefficients from a
wcxf.WC instance.
Parameters:
- `get_smpar`: boolean, optional, defaults to True. If True, an attempt
is made to determine the SM parameters from the requirement of
reproducing the correct SM masses and mixings at the electroweak
scale. As approximations are involved, the result might or might not
be reliable, depending on the size of the Wilson coefficients
affecting the SM masses and mixings. If False, Standard Model
parameters have to be provided separately and are assumed to be in
the weak basis used for the Warsaw basis as defined in WCxf,
i.e. in the basis where the down-type and charged lepton mass
matrices are diagonal. | entailment |
def _to_wcxf(self, C_out, scale_out):
"""Return the Wilson coefficients `C_out` as a wcxf.WC instance.
Note that the Wilson coefficients are rotated into the Warsaw basis
as defined in WCxf, i.e. to the basis where the down-type and charged
lepton mass matrices are diagonal."""
C = self._rotate_defaultbasis(C_out)
d = wilson.util.smeftutil.arrays2wcxf_nonred(C)
basis = wcxf.Basis['SMEFT', 'Warsaw']
all_wcs = set(basis.all_wcs) # to speed up lookup
d = {k: v for k, v in d.items() if k in all_wcs and v != 0}
d = wcxf.WC.dict2values(d)
wc = wcxf.WC('SMEFT', 'Warsaw', scale_out, d)
return wc | Return the Wilson coefficients `C_out` as a wcxf.WC instance.
Note that the Wilson coefficients are rotated into the Warsaw basis
as defined in WCxf, i.e. to the basis where the down-type and charged
lepton mass matrices are diagonal. | entailment |
def _rgevolve(self, scale_out, **kwargs):
"""Solve the SMEFT RGEs from the initial scale to `scale_out`.
Returns a dictionary with parameters and Wilson coefficients at
`scale_out`. Additional keyword arguments will be passed to
the ODE solver `scipy.integrate.odeint`."""
self._check_initial()
return rge.smeft_evolve(C_in=self.C_in,
scale_in=self.scale_in,
scale_out=scale_out,
**kwargs) | Solve the SMEFT RGEs from the initial scale to `scale_out`.
Returns a dictionary with parameters and Wilson coefficients at
`scale_out`. Additional keyword arguments will be passed to
the ODE solver `scipy.integrate.odeint`. | entailment |
def _rgevolve_leadinglog(self, scale_out):
"""Compute the leading logarithmic approximation to the solution
of the SMEFT RGEs from the initial scale to `scale_out`.
Returns a dictionary with parameters and Wilson coefficients.
Much faster but less precise that `rgevolve`.
"""
self._check_initial()
return rge.smeft_evolve_leadinglog(C_in=self.C_in,
scale_in=self.scale_in,
scale_out=scale_out) | Compute the leading logarithmic approximation to the solution
of the SMEFT RGEs from the initial scale to `scale_out`.
Returns a dictionary with parameters and Wilson coefficients.
Much faster but less precise that `rgevolve`. | entailment |
def _run_sm_scale_in(self, C_out, scale_sm=91.1876):
"""Get the SM parameters at the EW scale, using an estimate `C_out`
of the Wilson coefficients at that scale, and run them to the
input scale."""
# initialize an empty SMEFT instance
smeft_sm = SMEFT(wc=None)
C_in_sm = smeftutil.C_array2dict(np.zeros(9999))
# set the SM parameters to the values obtained from smpar.smeftpar
C_SM = smpar.smeftpar(scale_sm, C_out, basis='Warsaw')
SM_keys = set(smeftutil.SM_keys) # to speed up lookup
C_SM = {k: v for k, v in C_SM.items() if k in SM_keys}
# set the Wilson coefficients at the EW scale to C_out
C_in_sm.update(C_out)
C_in_sm.update(C_SM)
smeft_sm._set_initial(C_in_sm, scale_sm)
# run up (with 1% relative precision, ignore running of Wilson coefficients)
C_SM_high = smeft_sm._rgevolve(self.scale_in, newphys=False, rtol=0.001, atol=1)
C_SM_high = self._rotate_defaultbasis(C_SM_high)
return {k: v for k, v in C_SM_high.items() if k in SM_keys} | Get the SM parameters at the EW scale, using an estimate `C_out`
of the Wilson coefficients at that scale, and run them to the
input scale. | entailment |
def get_smpar(self, accuracy='integrate', scale_sm=91.1876):
"""Compute the SM MS-bar parameters at the electroweak scale.
This method can be used to validate the accuracy of the iterative
extraction of SM parameters. If successful, the values returned by this
method should agree with the values in the dictionary
`wilson.run.smeft.smpar.p`."""
if accuracy == 'integrate':
C_out = self._rgevolve(scale_sm)
elif accuracy == 'leadinglog':
C_out = self._rgevolve_leadinglog(scale_sm)
else:
raise ValueError("'{}' is not a valid value of 'accuracy' (must be either 'integrate' or 'leadinglog').".format(accuracy))
return smpar.smpar(C_out) | Compute the SM MS-bar parameters at the electroweak scale.
This method can be used to validate the accuracy of the iterative
extraction of SM parameters. If successful, the values returned by this
method should agree with the values in the dictionary
`wilson.run.smeft.smpar.p`. | entailment |
def _get_sm_scale_in(self, scale_sm=91.1876):
"""Get an estimate of the SM parameters at the input scale by running
them from the EW scale using constant values for the Wilson coefficients
(corresponding to their leading log approximated values at the EW
scale).
Note that this is not guaranteed to work and will fail if some of the
Wilson coefficients (the ones affecting the extraction of SM parameters)
are large."""
# intialize a copy of ourselves
_smeft = SMEFT(self.wc, get_smpar=False)
# Step 1: run the SM up, using the WCs at scale_input as (constant) estimate
_smeft.C_in.update(self._run_sm_scale_in(self.C_in, scale_sm=scale_sm))
# Step 2: run the WCs down in LL approximation
C_out = _smeft._rgevolve_leadinglog(scale_sm)
# Step 3: run the SM up again, this time using the WCs at scale_sm as (constant) estimate
return self._run_sm_scale_in(C_out, scale_sm=scale_sm) | Get an estimate of the SM parameters at the input scale by running
them from the EW scale using constant values for the Wilson coefficients
(corresponding to their leading log approximated values at the EW
scale).
Note that this is not guaranteed to work and will fail if some of the
Wilson coefficients (the ones affecting the extraction of SM parameters)
are large. | entailment |
def run(self, scale, accuracy='integrate', **kwargs):
"""Return the Wilson coefficients (as wcxf.WC instance) evolved to the
scale `scale`.
Parameters:
- `scale`: scale in GeV
- accuracy: whether to use the numerical solution to the RGE
('integrate', the default, slow but precise) or the leading logarithmic
approximation ('leadinglog', approximate but much faster).
"""
if accuracy == 'integrate':
C_out = self._rgevolve(scale, **kwargs)
elif accuracy == 'leadinglog':
C_out = self._rgevolve_leadinglog(scale)
else:
raise ValueError("'{}' is not a valid value of 'accuracy' (must be either 'integrate' or 'leadinglog').".format(accuracy))
return self._to_wcxf(C_out, scale) | Return the Wilson coefficients (as wcxf.WC instance) evolved to the
scale `scale`.
Parameters:
- `scale`: scale in GeV
- accuracy: whether to use the numerical solution to the RGE
('integrate', the default, slow but precise) or the leading logarithmic
approximation ('leadinglog', approximate but much faster). | entailment |
def run_continuous(self, scale):
"""Return a continuous solution to the RGE as `RGsolution` instance."""
if scale == self.scale_in:
raise ValueError("The scale must be different from the input scale")
elif scale < self.scale_in:
scale_min = scale
scale_max = self.scale_in
elif scale > self.scale_in:
scale_max = scale
scale_min = self.scale_in
fun = rge.smeft_evolve_continuous(C_in=self.C_in,
scale_in=self.scale_in,
scale_out=scale)
return wilson.classes.RGsolution(fun, scale_min, scale_max) | Return a continuous solution to the RGE as `RGsolution` instance. | entailment |
def deconstruct(self):
"""Deconstruct method."""
name, path, args, kwargs = super().deconstruct()
if self.populate_from is not None:
kwargs['populate_from'] = self.populate_from
if self.unique_with != ():
kwargs['unique_with'] = self.unique_with
kwargs.pop('unique', None)
return name, path, args, kwargs | Deconstruct method. | entailment |
def _get_unique_constraints(self, instance):
"""Return SQL filter for filtering by fields in ``unique_with`` attribute.
Filter is returned as tuple of two elements where first one is
placeholder which is safe to insert into SQL query and second
one may include potentially dangerous values and must be passed
to SQL query in ``params`` attribute to make sure it is properly
escaped.
"""
constraints_expression = []
constraints_values = {}
for field_name in self.unique_with:
if constants.LOOKUP_SEP in field_name:
raise NotImplementedError(
'`unique_with` constraint does not support lookups by related models.'
)
field = instance._meta.get_field(field_name) # pylint: disable=protected-access
field_value = getattr(instance, field_name)
# Convert value to the database representation.
field_db_value = field.get_prep_value(field_value)
constraint_key = 'unique_' + field_name
constraints_expression.append("{} = %({})s".format(
connection.ops.quote_name(field.column),
constraint_key
))
constraints_values[constraint_key] = field_db_value
if not constraints_expression:
return '', []
constraints_expression = 'AND ' + ' AND '.join(constraints_expression)
return constraints_expression, constraints_values | Return SQL filter for filtering by fields in ``unique_with`` attribute.
Filter is returned as tuple of two elements where first one is
placeholder which is safe to insert into SQL query and second
one may include potentially dangerous values and must be passed
to SQL query in ``params`` attribute to make sure it is properly
escaped. | entailment |
def _get_populate_from_value(self, instance):
"""Get the value from ``populate_from`` attribute."""
if hasattr(self.populate_from, '__call__'):
# ResolweSlugField(populate_from=lambda instance: ...)
return self.populate_from(instance)
else:
# ResolweSlugField(populate_from='foo')
attr = getattr(instance, self.populate_from)
return attr() if callable(attr) else attr | Get the value from ``populate_from`` attribute. | entailment |
def pre_save(self, instance, add):
"""Ensure slug uniqunes before save."""
slug = self.value_from_object(instance)
# We don't want to change slug defined by user.
predefined_slug = bool(slug)
if not slug and self.populate_from:
slug = self._get_populate_from_value(instance)
if slug:
slug = slugify(slug)
if not slug:
slug = None
if not self.blank:
slug = instance._meta.model_name # pylint: disable=protected-access
elif not self.null:
slug = ''
if slug:
# Make sure that auto generated slug with added sequence
# won't excede maximal length.
# Validation of predefined slugs is handled by Django.
if not predefined_slug:
slug = slug[:(self.max_length - MAX_SLUG_SEQUENCE_DIGITS - 1)]
constraints_placeholder, constraints_values = self._get_unique_constraints(instance)
instance_pk_name = instance._meta.pk.name # pylint: disable=protected-access
# Safe values - make sure that there is no chance of SQL injection.
query_params = {
'constraints_placeholder': constraints_placeholder,
'slug_column': connection.ops.quote_name(self.column),
'slug_len': len(slug),
'table_name': connection.ops.quote_name(self.model._meta.db_table), # pylint: disable=protected-access
'pk_neq_placeholder': 'AND {} != %(instance_pk)s'.format(instance_pk_name) if instance.pk else ''
}
# SQL injection unsafe values - will be escaped.
# Keys prefixed with `unique_` are reserved for `constraints_values` dict.
query_escape_params = {
'slug': slug,
'slug_regex': '^{}(-[0-9]*)?$'.format(slug),
}
query_escape_params.update(constraints_values)
if instance.pk:
query_escape_params['instance_pk'] = instance.pk
with connection.cursor() as cursor:
# TODO: Slowest part of this query is `MAX` function. It can
# be optimized by indexing slug column by slug sequence.
# https://www.postgresql.org/docs/9.4/static/indexes-expressional.html
cursor.execute(
"""
SELECT
CASE
WHEN (
EXISTS(
SELECT 1 FROM {table_name} WHERE (
{slug_column} = %(slug)s
{pk_neq_placeholder}
{constraints_placeholder}
)
)
) THEN MAX(slug_sequence) + 1
ELSE NULL
END
FROM (
SELECT COALESCE(
NULLIF(
RIGHT({slug_column}, -{slug_len}-1),
''
),
'1'
)::text::integer AS slug_sequence
FROM {table_name} WHERE (
{slug_column} ~ %(slug_regex)s
{pk_neq_placeholder}
{constraints_placeholder}
)
) AS tmp
""".format(**query_params),
params=query_escape_params
)
result = cursor.fetchone()[0]
if result is not None:
if predefined_slug:
raise SlugError(
"Slug '{}' (version {}) is already taken.".format(slug, instance.version)
)
if len(str(result)) > MAX_SLUG_SEQUENCE_DIGITS:
raise SlugError(
"Auto-generated slug sequence too long - please choose a different slug."
)
slug = '{}-{}'.format(slug, result)
# Make the updated slug available as instance attribute.
setattr(instance, self.name, slug)
return slug | Ensure slug uniqunes before save. | entailment |
def discover_process(self, path):
"""Perform process discovery in given path.
This method will be called during process registration and
should return a list of dictionaries with discovered process
schemas.
"""
if not path.lower().endswith(('.yml', '.yaml')):
return []
with open(path) as fn:
schemas = yaml.load(fn, Loader=yaml.FullLoader)
if not schemas:
# TODO: Logger.
# self.stderr.write("Could not read YAML file {}".format(schema_file))
return []
process_schemas = []
for schema in schemas:
if 'run' not in schema:
continue
# NOTE: This currently assumes that 'bash' is the default.
if schema['run'].get('language', 'bash') != 'workflow':
continue
process_schemas.append(schema)
return process_schemas | Perform process discovery in given path.
This method will be called during process registration and
should return a list of dictionaries with discovered process
schemas. | entailment |
def _evaluate_expressions(self, expression_engine, step_id, values, context):
"""Recursively evaluate expressions in a dictionary of values."""
if expression_engine is None:
return values
processed = {}
for name, value in values.items():
if isinstance(value, str):
value = value.strip()
try:
expression = expression_engine.get_inline_expression(value)
if expression is not None:
# Inline expression.
value = expression_engine.evaluate_inline(expression, context)
else:
# Block expression.
value = expression_engine.evaluate_block(value, context)
except EvaluationError as error:
raise ExecutionError('Error while evaluating expression for step "{}":\n{}'.format(
step_id, error
))
elif isinstance(value, dict):
value = self._evaluate_expressions(expression_engine, step_id, value, context)
processed[name] = value
return processed | Recursively evaluate expressions in a dictionary of values. | entailment |
def evaluate(self, data):
"""Evaluate the code needed to compute a given Data object."""
expression_engine = data.process.requirements.get('expression-engine', None)
if expression_engine is not None:
expression_engine = self.get_expression_engine(expression_engine)
# Parse steps.
steps = data.process.run.get('program', None)
if steps is None:
return
if not isinstance(steps, list):
raise ExecutionError('Workflow program must be a list of steps.')
# Expression engine evaluation context.
context = {
'input': data.input,
'steps': collections.OrderedDict(),
}
for index, step in enumerate(steps):
try:
step_id = step['id']
step_slug = step['run']
except KeyError as error:
raise ExecutionError('Incorrect definition of step "{}", missing property "{}".'.format(
step.get('id', index), error
))
# Fetch target process.
process = Process.objects.filter(slug=step_slug).order_by('-version').first()
if not process:
raise ExecutionError('Incorrect definition of step "{}", invalid process "{}".'.format(
step_id, step_slug
))
# Process all input variables.
step_input = step.get('input', {})
if not isinstance(step_input, dict):
raise ExecutionError('Incorrect definition of step "{}", input must be a dictionary.'.format(
step_id
))
data_input = self._evaluate_expressions(expression_engine, step_id, step_input, context)
# Create the data object.
data_object = Data.objects.create(
process=process,
contributor=data.contributor,
tags=data.tags,
input=data_input,
)
DataDependency.objects.create(
parent=data,
child=data_object,
kind=DataDependency.KIND_SUBPROCESS,
)
# Copy permissions.
copy_permissions(data, data_object)
# Copy collections.
for collection in data.collection_set.all():
collection.data.add(data_object)
context['steps'][step_id] = data_object.pk
# Immediately set our status to done and output all data object identifiers.
data.output = {
'steps': list(context['steps'].values()),
}
data.status = Data.STATUS_DONE | Evaluate the code needed to compute a given Data object. | entailment |
async def init():
"""Create a connection to the Redis server."""
global redis_conn # pylint: disable=global-statement,invalid-name
conn = await aioredis.create_connection(
'redis://{}:{}'.format(
SETTINGS.get('FLOW_EXECUTOR', {}).get('REDIS_CONNECTION', {}).get('host', 'localhost'),
SETTINGS.get('FLOW_EXECUTOR', {}).get('REDIS_CONNECTION', {}).get('port', 56379)
),
db=int(SETTINGS.get('FLOW_EXECUTOR', {}).get('REDIS_CONNECTION', {}).get('db', 1))
)
redis_conn = aioredis.Redis(conn) | Create a connection to the Redis server. | entailment |
async def send_manager_command(cmd, expect_reply=True, extra_fields={}):
"""Send a properly formatted command to the manager.
:param cmd: The command to send (:class:`str`).
:param expect_reply: If ``True``, wait for the manager to reply
with an acknowledgement packet.
:param extra_fields: A dictionary of extra information that's
merged into the packet body (i.e. not under an extra key).
"""
packet = {
ExecutorProtocol.DATA_ID: DATA['id'],
ExecutorProtocol.COMMAND: cmd,
}
packet.update(extra_fields)
logger.debug("Sending command to listener: {}".format(json.dumps(packet)))
# TODO what happens here if the push fails? we don't have any realistic recourse,
# so just let it explode and stop processing
queue_channel = EXECUTOR_SETTINGS['REDIS_CHANNEL_PAIR'][0]
try:
await redis_conn.rpush(queue_channel, json.dumps(packet))
except Exception:
logger.error("Error sending command to manager:\n\n{}".format(traceback.format_exc()))
raise
if not expect_reply:
return
for _ in range(_REDIS_RETRIES):
response = await redis_conn.blpop(QUEUE_RESPONSE_CHANNEL, timeout=1)
if response:
break
else:
# NOTE: If there's still no response after a few seconds, the system is broken
# enough that it makes sense to give up; we're isolated here, so if the manager
# doesn't respond, we can't really do much more than just crash
raise RuntimeError("No response from the manager after {} retries.".format(_REDIS_RETRIES))
_, item = response
result = json.loads(item.decode('utf-8'))[ExecutorProtocol.RESULT]
assert result in [ExecutorProtocol.RESULT_OK, ExecutorProtocol.RESULT_ERROR]
if result == ExecutorProtocol.RESULT_OK:
return True
return False | Send a properly formatted command to the manager.
:param cmd: The command to send (:class:`str`).
:param expect_reply: If ``True``, wait for the manager to reply
with an acknowledgement packet.
:param extra_fields: A dictionary of extra information that's
merged into the packet body (i.e. not under an extra key). | entailment |
def extract_values_query(query, field_list, out_stream=None):
"""Get list of dicts where each dict holds values from one SciObj.
Args:
field_list: list of str
List of field names for which to return values. Must be strings from
FIELD_NAME_TO_generate_dict.keys().
If None, return all fields.
filter_arg_dict: dict
Dict of arguments to pass to ``ScienceObject.objects.filter()``.
out_stream: open file-like object
If provided, the JSON doc is streamed out instead of buffered in memory.
Returns:
list of dict: The keys in the returned dict correspond to the field names in
``field_list``.
Raises:
raise d1_common.types.exceptions.InvalidRequest() if ``field_list`` contains any
invalid field names. A list of the invalid fields is included in the exception.
"""
lookup_dict, generate_dict = _split_field_list(field_list)
query, annotate_key_list = _annotate_query(query, generate_dict)
# return query, annotate_key_list
#
# query, annotate_key_list = _create_query(filter_arg_dict, generate_dict)
lookup_list = [v["lookup_str"] for k, v in lookup_dict.items()] + annotate_key_list
if out_stream is None:
return _create_sciobj_list(query, lookup_list, lookup_dict, generate_dict)
else:
return _write_stream(query, lookup_list, lookup_dict, generate_dict, out_stream) | Get list of dicts where each dict holds values from one SciObj.
Args:
field_list: list of str
List of field names for which to return values. Must be strings from
FIELD_NAME_TO_generate_dict.keys().
If None, return all fields.
filter_arg_dict: dict
Dict of arguments to pass to ``ScienceObject.objects.filter()``.
out_stream: open file-like object
If provided, the JSON doc is streamed out instead of buffered in memory.
Returns:
list of dict: The keys in the returned dict correspond to the field names in
``field_list``.
Raises:
raise d1_common.types.exceptions.InvalidRequest() if ``field_list`` contains any
invalid field names. A list of the invalid fields is included in the exception. | entailment |
def extract_values(field_list=None, filter_arg_dict=None, out_stream=None):
"""Get list of dicts where each dict holds values from one SciObj.
Args:
field_list: list of str
List of field names for which to return values. Must be strings from
FIELD_NAME_TO_generate_dict.keys().
If None, return all fields.
filter_arg_dict: dict
Dict of arguments to pass to ``ScienceObject.objects.filter()``.
Returns:
list of dict: The keys in the returned dict correspond to the field names in
``field_list``.
Raises:
raise d1_common.types.exceptions.InvalidRequest() if ``field_list`` contains any
invalid field names. A list of the invalid fields is included in the exception.
"""
lookup_dict, generate_dict = _split_field_list(field_list)
query, annotate_key_list = _create_query(filter_arg_dict, generate_dict)
lookup_list = [v["lookup_str"] for k, v in lookup_dict.items()] + annotate_key_list
if out_stream is None:
return _create_sciobj_list(query, lookup_list, lookup_dict, generate_dict)
else:
return _write_stream(query, lookup_list, lookup_dict, generate_dict, out_stream) | Get list of dicts where each dict holds values from one SciObj.
Args:
field_list: list of str
List of field names for which to return values. Must be strings from
FIELD_NAME_TO_generate_dict.keys().
If None, return all fields.
filter_arg_dict: dict
Dict of arguments to pass to ``ScienceObject.objects.filter()``.
Returns:
list of dict: The keys in the returned dict correspond to the field names in
``field_list``.
Raises:
raise d1_common.types.exceptions.InvalidRequest() if ``field_list`` contains any
invalid field names. A list of the invalid fields is included in the exception. | entailment |
def assert_invalid_field_list(field_list):
"""raise d1_common.types.exceptions.InvalidRequest() if ``field_list`` contains any
invalid field names. A list of the invalid fields is included in the exception.
- Implicitly called by ``extract_values()``.
"""
if field_list is not None:
invalid_field_list = [
v for v in field_list if v not in get_valid_field_name_list()
]
if invalid_field_list:
raise d1_common.types.exceptions.InvalidRequest(
0, "Invalid fields: {}".format(", ".join(invalid_field_list))
) | raise d1_common.types.exceptions.InvalidRequest() if ``field_list`` contains any
invalid field names. A list of the invalid fields is included in the exception.
- Implicitly called by ``extract_values()``. | entailment |
def _annotate_query(query, generate_dict):
"""Add annotations to the query to retrieve values required by field value generate
functions."""
annotate_key_list = []
for field_name, annotate_dict in generate_dict.items():
for annotate_name, annotate_func in annotate_dict["annotate_dict"].items():
query = annotate_func(query)
annotate_key_list.append(annotate_name)
return query, annotate_key_list | Add annotations to the query to retrieve values required by field value generate
functions. | entailment |
def _value_list_to_sciobj_dict(
sciobj_value_list, lookup_list, lookup_dict, generate_dict
):
"""Create a dict where the keys are the requested field names, from the values
returned by Django."""
sciobj_dict = {}
# for sciobj_value, lookup_str in zip(sciobj_value_list, lookup_list):
lookup_to_value_dict = {k: v for k, v in zip(lookup_list, sciobj_value_list)}
for field_name, r_dict in lookup_dict.items():
if r_dict["lookup_str"] in lookup_to_value_dict.keys():
sciobj_dict[field_name] = lookup_to_value_dict[r_dict["lookup_str"]]
for field_name, annotate_dict in generate_dict.items():
for final_name, generate_func in annotate_dict["generate_dict"].items():
sciobj_dict[field_name] = generate_func(lookup_to_value_dict)
return sciobj_dict | Create a dict where the keys are the requested field names, from the values
returned by Django. | entailment |
def _split_field_list(field_list):
"""Split the list of fields for which to extract values into lists by extraction
methods.
- Remove any duplicated field names.
- Raises ValueError with list of any invalid field names in ``field_list``.
"""
lookup_dict = {}
generate_dict = {}
for field_name in field_list or FIELD_NAME_TO_EXTRACT_DICT.keys():
try:
extract_dict = FIELD_NAME_TO_EXTRACT_DICT[field_name]
except KeyError:
assert_invalid_field_list(field_list)
else:
if "lookup_str" in extract_dict:
lookup_dict[field_name] = extract_dict
else:
generate_dict[field_name] = extract_dict
return lookup_dict, generate_dict | Split the list of fields for which to extract values into lists by extraction
methods.
- Remove any duplicated field names.
- Raises ValueError with list of any invalid field names in ``field_list``. | entailment |
def dataoneTypes(request):
"""Return the PyXB binding to use when handling a request."""
if is_v1_api(request):
return d1_common.types.dataoneTypes_v1_1
elif is_v2_api(request) or is_diag_api(request):
return d1_common.types.dataoneTypes_v2_0
else:
raise d1_common.types.exceptions.ServiceFailure(
0, 'Unknown version designator in URL. url="{}"'.format(request.path)
) | Return the PyXB binding to use when handling a request. | entailment |
def parse_and_normalize_url_date(date_str):
"""Parse a ISO 8601 date-time with optional timezone.
- Return as datetime with timezone adjusted to UTC.
- Return naive date-time set to UTC.
"""
if date_str is None:
return None
try:
return d1_common.date_time.dt_from_iso8601_str(date_str)
except d1_common.date_time.iso8601.ParseError as e:
raise d1_common.types.exceptions.InvalidRequest(
0,
'Invalid date format for URL parameter. date="{}" error="{}"'.format(
date_str, str(e)
),
) | Parse a ISO 8601 date-time with optional timezone.
- Return as datetime with timezone adjusted to UTC.
- Return naive date-time set to UTC. | entailment |
def get(self, doc_id):
"""Retrieve the specified document."""
resp_dict = self._get_query(q='id:{}'.format(doc_id))
if resp_dict['response']['numFound'] > 0:
return resp_dict['response']['docs'][0] | Retrieve the specified document. | entailment |
def get_ids(self, start=0, rows=1000, **query_dict):
"""Retrieve a list of identifiers for documents matching the query."""
resp_dict = self._get_query(start=start, rows=rows, **query_dict)
return {
'matches': resp_dict['response']['numFound'],
'start': start,
'ids': [d['id'] for d in resp_dict['response']['docs']],
} | Retrieve a list of identifiers for documents matching the query. | entailment |
def count(self, **query_dict):
"""Return the number of entries that match query."""
param_dict = query_dict.copy()
param_dict['count'] = 0
resp_dict = self._get_query(**param_dict)
return resp_dict['response']['numFound'] | Return the number of entries that match query. | entailment |
def get_field_values(self, name, maxvalues=-1, sort=True, **query_dict):
"""Retrieve the unique values for a field, along with their usage counts.
:param name: Name of field for which to retrieve values
:type name: string
:param sort: Sort the result
:param maxvalues: Maximum number of values to retrieve. Default is -1,
which causes retrieval of all values.
:type maxvalues: int
:returns: dict of {fieldname: [[value, count], ... ], }
"""
param_dict = query_dict.copy()
param_dict.update(
{
'rows': '0',
'facet': 'true',
'facet.field': name,
'facet.limit': str(maxvalues),
'facet.zeros': 'false',
'facet.sort': str(sort).lower(),
}
)
resp_dict = self._post_query(**param_dict)
result_dict = resp_dict['facet_counts']['facet_fields']
result_dict['numFound'] = resp_dict['response']['numFound']
return result_dict | Retrieve the unique values for a field, along with their usage counts.
:param name: Name of field for which to retrieve values
:type name: string
:param sort: Sort the result
:param maxvalues: Maximum number of values to retrieve. Default is -1,
which causes retrieval of all values.
:type maxvalues: int
:returns: dict of {fieldname: [[value, count], ... ], } | entailment |
def get_field_min_max(self, name, **query_dict):
"""Returns the minimum and maximum values of the specified field. This requires
two search calls to the service, each requesting a single value of a single
field.
@param name(string) Name of the field
@param q(string) Query identifying range of records for min and max values
@param fq(string) Filter restricting range of query
@return list of [min, max]
"""
param_dict = query_dict.copy()
param_dict.update({'rows': 1, 'fl': name, 'sort': '%s asc' % name})
try:
min_resp_dict = self._post_query(**param_dict)
param_dict['sort'] = '%s desc' % name
max_resp_dict = self._post_query(**param_dict)
return (
min_resp_dict['response']['docs'][0][name],
max_resp_dict['response']['docs'][0][name],
)
except Exception:
self._log.exception('Exception')
raise | Returns the minimum and maximum values of the specified field. This requires
two search calls to the service, each requesting a single value of a single
field.
@param name(string) Name of the field
@param q(string) Query identifying range of records for min and max values
@param fq(string) Filter restricting range of query
@return list of [min, max] | entailment |
def field_alpha_histogram(
self, name, n_bins=10, include_queries=True, **query_dict
):
"""Generates a histogram of values from a string field.
Output is: [[low, high, count, query], ... ]. Bin edges is determined by equal
division of the fields.
"""
bin_list = []
q_bin = []
try:
# get total number of values for the field
# TODO: this is a slow mechanism to retrieve the number of distinct values
# Need to replace this with something more efficient.
# Can probably replace with a range of alpha chars - need to check on
# case sensitivity
param_dict = query_dict.copy()
f_vals = self.get_field_values(name, maxvalues=-1, **param_dict)
n_values = len(f_vals[name]) // 2
if n_values < n_bins:
n_bins = n_values
if n_values == n_bins:
# Use equivalence instead of range queries to retrieve the
# values
for i in range(n_bins):
a_bin = [f_vals[name][i * 2], f_vals[name][i * 2], 0]
bin_q = '{}:{}'.format(
name, self._prepare_query_term(name, a_bin[0])
)
q_bin.append(bin_q)
bin_list.append(a_bin)
else:
delta = n_values / n_bins
if delta == 1:
# Use equivalence queries, except the last one which includes the
# remainder of terms
for i in range(n_bins - 1):
a_bin = [f_vals[name][i * 2], f_vals[name][i * 2], 0]
bin_q = '{}:{}'.format(
name, self._prepare_query_term(name, a_bin[0])
)
q_bin.append(bin_q)
bin_list.append(a_bin)
term = f_vals[name][(n_bins - 1) * 2]
a_bin = [term, f_vals[name][((n_values - 1) * 2)], 0]
bin_q = '{}:[{} TO *]'.format(
name, self._prepare_query_term(name, term)
)
q_bin.append(bin_q)
bin_list.append(a_bin)
else:
# Use range for all terms
# now need to page through all the values and get those at
# the edges
c_offset = 0.0
delta = float(n_values) / float(n_bins)
for i in range(n_bins):
idx_l = int(c_offset) * 2
idx_u = (int(c_offset + delta) * 2) - 2
a_bin = [f_vals[name][idx_l], f_vals[name][idx_u], 0]
# logger.info(str(a_bin))
try:
if i == 0:
bin_q = '{}:[* TO {}]'.format(
name, self._prepare_query_term(name, a_bin[1])
)
elif i == n_bins - 1:
bin_q = '{}:[{} TO *]'.format(
name, self._prepare_query_term(name, a_bin[0])
)
else:
bin_q = '{}:[{} TO {}]'.format(
name,
self._prepare_query_term(name, a_bin[0]),
self._prepare_query_term(name, a_bin[1]),
)
except Exception:
self._log.exception('Exception:')
raise
q_bin.append(bin_q)
bin_list.append(a_bin)
c_offset = c_offset + delta
# now execute the facet query request
param_dict = query_dict.copy()
param_dict.update(
{
'rows': '0',
'facet': 'true',
'facet.field': name,
'facet.limit': '1',
'facet.mincount': 1,
'facet.query': [sq.encode('utf-8') for sq in q_bin],
}
)
resp_dict = self._post_query(**param_dict)
for i in range(len(bin_list)):
v = resp_dict['facet_counts']['facet_queries'][q_bin[i]]
bin_list[i][2] = v
if include_queries:
bin_list[i].append(q_bin[i])
except Exception:
self._log.exception('Exception')
raise
return bin_list | Generates a histogram of values from a string field.
Output is: [[low, high, count, query], ... ]. Bin edges is determined by equal
division of the fields. | entailment |
def add_docs(self, docs):
"""docs is a list of fields that are a dictionary of name:value for a record."""
return self.query(
'solr',
'<add>{}</add>'.format(
''.join([self._format_add(fields) for fields in docs])
),
do_post=True,
) | docs is a list of fields that are a dictionary of name:value for a record. | entailment |
def _coerce_type(self, field_type, value):
"""Returns unicode(value) after trying to coerce it into the Solr field type.
@param field_type(string) The Solr field type for the value
@param value(any) The value that is to be represented as Unicode text.
"""
if value is None:
return None
if field_type == 'string':
return str(value)
elif field_type == 'text':
return str(value)
elif field_type == 'int':
try:
v = int(value)
return str(v)
except:
return None
elif field_type == 'float':
try:
v = float(value)
return str(v)
except:
return None
elif field_type == 'date':
try:
v = datetime.datetime(
value['year'],
value['month'],
value['day'],
value['hour'],
value['minute'],
value['second'],
)
v = v.strftime('%Y-%m-%dT%H:%M:%S.0Z')
return v
except:
return None
return str(value) | Returns unicode(value) after trying to coerce it into the Solr field type.
@param field_type(string) The Solr field type for the value
@param value(any) The value that is to be represented as Unicode text. | entailment |
def _get_solr_type(self, field):
"""Returns the Solr type of the specified field name.
Assumes the convention of dynamic fields using an underscore + type character
code for the field name.
"""
field_type = 'string'
try:
field_type = FIELD_TYPE_CONVERSION_MAP[field]
return field_type
except:
pass
fta = field.split('_')
if len(fta) > 1:
ft = fta[len(fta) - 1]
try:
field_type = FIELD_TYPE_CONVERSION_MAP[ft]
# cache the type so it's used next time
FIELD_TYPE_CONVERSION_MAP[field] = field_type
except:
pass
return field_type | Returns the Solr type of the specified field name.
Assumes the convention of dynamic fields using an underscore + type character
code for the field name. | entailment |
def _get_query(self, **query_dict):
"""Perform a GET query against Solr and return the response as a Python dict."""
param_dict = query_dict.copy()
return self._send_query(do_post=False, **param_dict) | Perform a GET query against Solr and return the response as a Python dict. | entailment |
def _post_query(self, **query_dict):
"""Perform a POST query against Solr and return the response as a Python
dict."""
param_dict = query_dict.copy()
return self._send_query(do_post=True, **param_dict) | Perform a POST query against Solr and return the response as a Python
dict. | entailment |
def _send_query(self, do_post=False, **query_dict):
"""Perform a query against Solr and return the response as a Python dict."""
# self._prepare_query_term()
param_dict = query_dict.copy()
param_dict.setdefault('wt', 'json')
param_dict.setdefault('q', '*.*')
param_dict.setdefault('fl', '*')
return self.query('solr', '', do_post=do_post, query=param_dict) | Perform a query against Solr and return the response as a Python dict. | entailment |
def _prepare_query_term(self, field, term):
"""Prepare a query term for inclusion in a query.
This escapes the term and if necessary, wraps the term in quotes.
"""
if term == "*":
return term
add_star = False
if term[len(term) - 1] == '*':
add_star = True
term = term[0 : len(term) - 1]
term = self._escape_query_term(term)
if add_star:
term = '{}*'.format(term)
if self._get_solr_type(field) in ['string', 'text', 'text_ws']:
return '"{}"'.format(term)
return term | Prepare a query term for inclusion in a query.
This escapes the term and if necessary, wraps the term in quotes. | entailment |
def _escape_query_term(self, term):
"""Escape a query term for inclusion in a query.
- Also see: prepare_query_term().
"""
term = term.replace('\\', '\\\\')
for c in RESERVED_CHAR_LIST:
term = term.replace(c, r'\{}'.format(c))
return term | Escape a query term for inclusion in a query.
- Also see: prepare_query_term(). | entailment |
def _next_page(self, offset):
"""Retrieves the next set of results from the service."""
self._log.debug("Iterator c_record={}".format(self.c_record))
page_size = self.page_size
if (offset + page_size) > self.max_records:
page_size = self.max_records - offset
param_dict = self.query_dict.copy()
param_dict.update(
{
'start': str(offset),
'rows': str(page_size),
'explainOther': '',
'hl.fl': '',
}
)
self.res = self.client.search(**param_dict)
self._num_hits = int(self.res['response']['numFound']) | Retrieves the next set of results from the service. | entailment |
def _next_page(self, offset):
"""Retrieves the next set of results from the service."""
self._log.debug("Iterator c_record={}".format(self.c_record))
param_dict = self.query_dict.copy()
param_dict.update(
{
'rows': '0',
'facet': 'true',
'facet.limit': str(self.page_size),
'facet.offset': str(offset),
'facet.zeros': 'false',
}
)
print(param_dict)
# resp_dict = self.client._get_query(**param_dict)
resp_dict = self.client._post_query(**param_dict)
# resp_dict = self.client.search(**param_dict)
pprint.pprint(resp_dict)
try:
self.res = resp_dict['facet_counts']['facet_fields'][self.field]
self._log.debug(self.res)
except Exception:
self.res = []
self.index = 0 | Retrieves the next set of results from the service. | entailment |
def migrate_flow_collection(apps, schema_editor):
"""Migrate 'flow_collection' field to 'entity_type'."""
Process = apps.get_model('flow', 'Process')
DescriptorSchema = apps.get_model('flow', 'DescriptorSchema')
for process in Process.objects.all():
process.entity_type = process.flow_collection
process.entity_descriptor_schema = process.flow_collection
if (process.entity_descriptor_schema is not None and
not DescriptorSchema.objects.filter(slug=process.entity_descriptor_schema).exists()):
raise LookupError(
"Descriptow schema '{}' referenced in 'entity_descriptor_schema' not "
"found.".format(process.entity_descriptor_schema)
)
process.save() | Migrate 'flow_collection' field to 'entity_type'. | entailment |
def get_pyxb_binding_by_api_version(api_major, api_minor=0):
"""Map DataONE API version tag to PyXB binding.
Given a DataONE API major version number, return PyXB binding that can
serialize and deserialize DataONE XML docs of that version.
Args:
api_major, api_minor: str or int
DataONE API major and minor version numbers.
- If ``api_major`` is an integer, it is combined with ``api_minor`` to form an
exact version.
- If ``api_major`` is a string of ``v1`` or ``v2``, ``api_minor`` is ignored
and the latest PyXB bindingavailable for the ``api_major`` version is
returned.
Returns:
PyXB binding: E.g., ``d1_common.types.dataoneTypes_v1_1``.
"""
try:
return VERSION_TO_BINDING_DICT[api_major, api_minor]
except KeyError:
raise ValueError(
'Unknown DataONE API version: {}.{}'.format(api_major, api_minor)
) | Map DataONE API version tag to PyXB binding.
Given a DataONE API major version number, return PyXB binding that can
serialize and deserialize DataONE XML docs of that version.
Args:
api_major, api_minor: str or int
DataONE API major and minor version numbers.
- If ``api_major`` is an integer, it is combined with ``api_minor`` to form an
exact version.
- If ``api_major`` is a string of ``v1`` or ``v2``, ``api_minor`` is ignored
and the latest PyXB bindingavailable for the ``api_major`` version is
returned.
Returns:
PyXB binding: E.g., ``d1_common.types.dataoneTypes_v1_1``. | entailment |
def extract_version_tag_from_url(url):
"""Extract a DataONE API version tag from a MN or CN service endpoint URL.
Args:
url : str
Service endpoint URL. E.g.: ``https://mn.example.org/path/v2/object/pid``.
Returns:
str : Valid version tags are currently ``v1`` or ``v2``.
"""
m = re.match(r'(/|^)(v\d)(/|$)', url)
if not m:
return None
return m.group(2) | Extract a DataONE API version tag from a MN or CN service endpoint URL.
Args:
url : str
Service endpoint URL. E.g.: ``https://mn.example.org/path/v2/object/pid``.
Returns:
str : Valid version tags are currently ``v1`` or ``v2``. | entailment |
def str_to_v1_str(xml_str):
"""Convert a API v2 XML doc to v1 XML doc.
Removes elements that are only valid for v2 and changes namespace to v1.
If doc is already v1, it is returned unchanged.
Args:
xml_str : str
API v2 XML doc. E.g.: ``SystemMetadata v2``.
Returns:
str : API v1 XML doc. E.g.: ``SystemMetadata v1``.
"""
if str_is_v1(xml_str):
return xml_str
etree_obj = str_to_etree(xml_str)
strip_v2_elements(etree_obj)
etree_replace_namespace(etree_obj, d1_common.types.dataoneTypes_v1.Namespace)
return etree_to_str(etree_obj) | Convert a API v2 XML doc to v1 XML doc.
Removes elements that are only valid for v2 and changes namespace to v1.
If doc is already v1, it is returned unchanged.
Args:
xml_str : str
API v2 XML doc. E.g.: ``SystemMetadata v2``.
Returns:
str : API v1 XML doc. E.g.: ``SystemMetadata v1``. | entailment |
def str_to_v2_str(xml_str):
"""Convert a API v1 XML doc to v2 XML doc.
All v1 elements are valid for v2, so only changes namespace.
Args:
xml_str : str
API v1 XML doc. E.g.: ``SystemMetadata v1``.
Returns:
str : API v2 XML doc. E.g.: ``SystemMetadata v2``.
"""
if str_is_v2(xml_str):
return xml_str
etree_obj = str_to_etree(xml_str)
etree_replace_namespace(etree_obj, d1_common.types.dataoneTypes_v2_0.Namespace)
return etree_to_str(etree_obj) | Convert a API v1 XML doc to v2 XML doc.
All v1 elements are valid for v2, so only changes namespace.
Args:
xml_str : str
API v1 XML doc. E.g.: ``SystemMetadata v1``.
Returns:
str : API v2 XML doc. E.g.: ``SystemMetadata v2``. | entailment |
def str_is_well_formed(xml_str):
"""
Args:
xml_str : str
DataONE API XML doc.
Returns:
bool: **True** if XML doc is well formed.
"""
try:
str_to_etree(xml_str)
except xml.etree.ElementTree.ParseError:
return False
else:
return True | Args:
xml_str : str
DataONE API XML doc.
Returns:
bool: **True** if XML doc is well formed. | entailment |
def pyxb_is_v1(pyxb_obj):
"""
Args:
pyxb_obj : PyXB object
PyXB object holding an unknown type.
Returns:
bool: **True** if ``pyxb_obj`` holds an API v1 type.
"""
# TODO: Will not detect v1.2 as v1.
return (
pyxb_obj._element().name().namespace()
== d1_common.types.dataoneTypes_v1.Namespace
) | Args:
pyxb_obj : PyXB object
PyXB object holding an unknown type.
Returns:
bool: **True** if ``pyxb_obj`` holds an API v1 type. | entailment |
def pyxb_is_v2(pyxb_obj):
"""
Args:
pyxb_obj : PyXB object
PyXB object holding an unknown type.
Returns:
bool: **True** if ``pyxb_obj`` holds an API v2 type.
"""
return (
pyxb_obj._element().name().namespace()
== d1_common.types.dataoneTypes_v2_0.Namespace
) | Args:
pyxb_obj : PyXB object
PyXB object holding an unknown type.
Returns:
bool: **True** if ``pyxb_obj`` holds an API v2 type. | entailment |
def str_to_etree(xml_str, encoding='utf-8'):
"""Deserialize API XML doc to an ElementTree.
Args:
xml_str: bytes
DataONE API XML doc
encoding: str
Decoder to use when converting the XML doc ``bytes`` to a Unicode str.
Returns:
ElementTree: Matching the API version of the XML doc.
"""
parser = xml.etree.ElementTree.XMLParser(encoding=encoding)
return xml.etree.ElementTree.fromstring(xml_str, parser=parser) | Deserialize API XML doc to an ElementTree.
Args:
xml_str: bytes
DataONE API XML doc
encoding: str
Decoder to use when converting the XML doc ``bytes`` to a Unicode str.
Returns:
ElementTree: Matching the API version of the XML doc. | entailment |
def replace_namespace_with_prefix(tag_str, ns_reverse_dict=None):
"""Convert XML tag names with namespace on the form ``{namespace}tag`` to form
``prefix:tag``.
Args:
tag_str: str
Tag name with namespace. E.g.:
``{http://www.openarchives.org/ore/terms/}ResourceMap``.
ns_reverse_dict : dict
A dictionary of namespace to prefix to use for the conversion. If not supplied, a
default dict with the namespaces used in DataONE XML types is used.
Returns:
str: Tag name with prefix. E.g.: ``ore:ResourceMap``.
"""
ns_reverse_dict = ns_reverse_dict or NS_REVERSE_DICT
for namespace_str, prefix_str in ns_reverse_dict.items():
tag_str = tag_str.replace(
'{{{}}}'.format(namespace_str), '{}:'.format(prefix_str)
)
return tag_str | Convert XML tag names with namespace on the form ``{namespace}tag`` to form
``prefix:tag``.
Args:
tag_str: str
Tag name with namespace. E.g.:
``{http://www.openarchives.org/ore/terms/}ResourceMap``.
ns_reverse_dict : dict
A dictionary of namespace to prefix to use for the conversion. If not supplied, a
default dict with the namespaces used in DataONE XML types is used.
Returns:
str: Tag name with prefix. E.g.: ``ore:ResourceMap``. | entailment |
def etree_replace_namespace(etree_obj, ns_str):
"""In-place change the namespace of elements in an ElementTree.
Args:
etree_obj: ElementTree
ns_str : str
The namespace to set. E.g.: ``http://ns.dataone.org/service/types/v1``.
"""
def _replace_recursive(el, n):
el.tag = re.sub(r'{.*\}', '{{{}}}'.format(n), el.tag)
el.text = el.text.strip() if el.text else None
el.tail = el.tail.strip() if el.tail else None
for child_el in el:
_replace_recursive(child_el, n)
_replace_recursive(etree_obj, ns_str) | In-place change the namespace of elements in an ElementTree.
Args:
etree_obj: ElementTree
ns_str : str
The namespace to set. E.g.: ``http://ns.dataone.org/service/types/v1``. | entailment |
def strip_v2_elements(etree_obj):
"""In-place remove elements and attributes that are only valid in v2 types.
Args: etree_obj: ElementTree ElementTree holding one of the DataONE API types
that changed between v1 and v2.
"""
if etree_obj.tag == v2_0_tag('logEntry'):
strip_logEntry(etree_obj)
elif etree_obj.tag == v2_0_tag('log'):
strip_log(etree_obj)
elif etree_obj.tag == v2_0_tag('node'):
strip_node(etree_obj)
elif etree_obj.tag == v2_0_tag('nodeList'):
strip_node_list(etree_obj)
elif etree_obj.tag == v2_0_tag('systemMetadata'):
strip_system_metadata(etree_obj)
else:
raise ValueError('Unknown root element. tag="{}"'.format(etree_obj.tag)) | In-place remove elements and attributes that are only valid in v2 types.
Args: etree_obj: ElementTree ElementTree holding one of the DataONE API types
that changed between v1 and v2. | entailment |
def strip_system_metadata(etree_obj):
"""In-place remove elements and attributes that are only valid in v2 types from v1
System Metadata.
Args: etree_obj: ElementTree ElementTree holding a v1 SystemMetadata.
"""
for series_id_el in etree_obj.findall('seriesId'):
etree_obj.remove(series_id_el)
for media_type_el in etree_obj.findall('mediaType'):
etree_obj.remove(media_type_el)
for file_name_el in etree_obj.findall('fileName'):
etree_obj.remove(file_name_el) | In-place remove elements and attributes that are only valid in v2 types from v1
System Metadata.
Args: etree_obj: ElementTree ElementTree holding a v1 SystemMetadata. | entailment |
def _create_replica(self, sysmeta_pyxb, sciobj_bytestream):
"""GMN handles replicas differently from native objects, with the main
differences being related to handling of restrictions related to revision chains
and SIDs.
So this create sequence differs significantly from the regular one that is
accessed through MNStorage.create().
"""
pid = d1_common.xml.get_req_val(sysmeta_pyxb.identifier)
self._assert_is_pid_of_local_unprocessed_replica(pid)
self._check_and_create_replica_revision(sysmeta_pyxb, "obsoletes")
self._check_and_create_replica_revision(sysmeta_pyxb, "obsoletedBy")
sciobj_url = d1_gmn.app.sciobj_store.get_rel_sciobj_file_url_by_pid(pid)
sciobj_model = d1_gmn.app.sysmeta.create_or_update(sysmeta_pyxb, sciobj_url)
self._store_science_object_bytes(pid, sciobj_bytestream)
d1_gmn.app.event_log.create_log_entry(
sciobj_model, "create", "0.0.0.0", "[replica]", "[replica]"
) | GMN handles replicas differently from native objects, with the main
differences being related to handling of restrictions related to revision chains
and SIDs.
So this create sequence differs significantly from the regular one that is
accessed through MNStorage.create(). | entailment |
async def restricted_import(self, async_client, node_type):
"""Import only the Science Objects specified by a text file.
The file must be UTF-8 encoded and contain one PIDs or SIDs per line.
"""
item_task_name = "Importing objects"
pid_path = self.options['pid_path']
if not os.path.exists(pid_path):
raise ConnectionError('File does not exist: {}'.format(pid_path))
with open(pid_path, encoding='UTF-8') as pid_file:
self.progress_logger.start_task_type(
item_task_name, len(pid_file.readlines())
)
pid_file.seek(0)
for pid in pid_file.readlines():
pid = pid.strip()
self.progress_logger.start_task(item_task_name)
# Ignore any blank lines in the file
if not pid:
continue
await self.import_aggregated(async_client, pid)
self.progress_logger.end_task_type(item_task_name) | Import only the Science Objects specified by a text file.
The file must be UTF-8 encoded and contain one PIDs or SIDs per line. | entailment |
async def import_aggregated(self, async_client, pid):
"""Import the SciObj at {pid}.
If the SciObj is a Resource Map, also recursively import the aggregated objects.
"""
self._logger.info('Importing: {}'.format(pid))
task_set = set()
object_info_pyxb = d1_common.types.dataoneTypes.ObjectInfo()
object_info_pyxb.identifier = pid
task_set.add(self.import_object(async_client, object_info_pyxb))
result_set, task_set = await asyncio.wait(task_set)
assert len(result_set) == 1
assert not task_set
sysmeta_pyxb = result_set.pop().result()
if not sysmeta_pyxb:
# Import was skipped
return
assert d1_common.xml.get_req_val(sysmeta_pyxb.identifier) == pid
if d1_gmn.app.did.is_resource_map_db(pid):
for member_pid in d1_gmn.app.resource_map.get_resource_map_members_by_map(
pid
):
self.progress_logger.event("Importing aggregated SciObj")
self._logger.info('Importing aggregated SciObj. pid="{}"'.format(pid))
await self.import_aggregated(async_client, member_pid) | Import the SciObj at {pid}.
If the SciObj is a Resource Map, also recursively import the aggregated objects. | entailment |
async def get_object_proxy_location(self, client, pid):
"""If object is proxied, return the proxy location URL.
If object is local, return None.
"""
try:
return (await client.describe(pid)).get("DataONE-Proxy")
except d1_common.types.exceptions.DataONEException:
# Workaround for older GMNs that return 500 instead of 404 for describe()
pass | If object is proxied, return the proxy location URL.
If object is local, return None. | entailment |
def get_list_objects_arg_dict(self, node_type):
"""Create a dict of arguments that will be passed to listObjects().
If {node_type} is a CN, add filtering to include only objects from this GMN
instance in the ObjectList returned by CNCore.listObjects().
"""
arg_dict = {
# Restrict query for faster debugging
# "fromDate": datetime.datetime(2017, 1, 1),
# "toDate": datetime.datetime(2017, 1, 10),
}
if node_type == "cn":
arg_dict["nodeId"] = django.conf.settings.NODE_IDENTIFIER
return arg_dict | Create a dict of arguments that will be passed to listObjects().
If {node_type} is a CN, add filtering to include only objects from this GMN
instance in the ObjectList returned by CNCore.listObjects(). | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.