sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def get_log_records_arg_dict(self, node_type):
"""Create a dict of arguments that will be passed to getLogRecords().
If {node_type} is a CN, add filtering to include only objects from this GMN
instance in the ObjectList returned by CNCore.listObjects().
"""
arg_dict = {
# Restrict query for faster debugging
# "fromDate": datetime.datetime(2017, 1, 1),
# "toDate": datetime.datetime(2017, 1, 3),
}
if node_type == "cn":
arg_dict["nodeId"] = django.conf.settings.NODE_IDENTIFIER
return arg_dict | Create a dict of arguments that will be passed to getLogRecords().
If {node_type} is a CN, add filtering to include only objects from this GMN
instance in the ObjectList returned by CNCore.listObjects(). | entailment |
async def is_cn(self, client):
"""Return True if node at {base_url} is a CN, False if it is an MN.
Raise a DataONEException if it's not a functional CN or MN.
"""
node_pyxb = await client.get_capabilities()
return d1_common.type_conversions.pyxb_get_type_name(node_pyxb) == "NodeList" | Return True if node at {base_url} is a CN, False if it is an MN.
Raise a DataONEException if it's not a functional CN or MN. | entailment |
async def probe_node_type_major(self, client):
"""Determine if import source node is a CN or MN and which major version API to
use."""
try:
node_pyxb = await self.get_node_doc(client)
except d1_common.types.exceptions.DataONEException as e:
raise django.core.management.base.CommandError(
"Could not find a functional CN or MN at the provided BaseURL. "
'base_url="{}" error="{}"'.format(
self.options["baseurl"], e.friendly_format()
)
)
is_cn = d1_common.type_conversions.pyxb_get_type_name(node_pyxb) == "NodeList"
if is_cn:
self.assert_is_known_node_id(
node_pyxb, django.conf.settings.NODE_IDENTIFIER
)
self._logger.info(
"Importing from CN: {}. filtered on MN: {}".format(
d1_common.xml.get_req_val(
self.find_node(node_pyxb, self.options["baseurl"]).identifier
),
django.conf.settings.NODE_IDENTIFIER,
)
)
return "cn", "v2"
else:
self._logger.info(
"Importing from MN: {}".format(
d1_common.xml.get_req_val(node_pyxb.identifier)
)
)
return "mn", self.find_node_api_version(node_pyxb) | Determine if import source node is a CN or MN and which major version API to
use. | entailment |
def find_node(self, node_list_pyxb, base_url):
"""Search NodeList for Node that has {base_url}.
Return matching Node or None
"""
for node_pyxb in node_list_pyxb.node:
if node_pyxb.baseURL == base_url:
return node_pyxb | Search NodeList for Node that has {base_url}.
Return matching Node or None | entailment |
def assert_is_known_node_id(self, node_list_pyxb, node_id):
"""When importing from a CN, ensure that the NodeID which the ObjectList will be
filtered by is known to the CN."""
node_pyxb = self.find_node_by_id(node_list_pyxb, node_id)
assert node_pyxb is not None, (
"The NodeID of this GMN instance is unknown to the CN at the provided BaseURL. "
'node_id="{}" base_url="{}"'.format(node_id, self.options["baseurl"])
) | When importing from a CN, ensure that the NodeID which the ObjectList will be
filtered by is known to the CN. | entailment |
def find_node_api_version(self, node_pyxb):
"""Find the highest API major version supported by node."""
max_major = 0
for s in node_pyxb.services.service:
max_major = max(max_major, int(s.version[1:]))
return max_major | Find the highest API major version supported by node. | entailment |
def find_node_by_id(self, node_list_pyxb, node_id):
"""Search NodeList for Node with {node_id}.
Return matching Node or None
"""
for node_pyxb in node_list_pyxb.node:
# if node_pyxb.baseURL == base_url:
if d1_common.xml.get_req_val(node_pyxb.identifier) == node_id:
return node_pyxb | Search NodeList for Node with {node_id}.
Return matching Node or None | entailment |
def celery_run(data_id, runtime_dir, argv):
"""Run process executor.
:param data_id: The id of the :class:`~resolwe.flow.models.Data`
object to be processed.
:param runtime_dir: The directory from which to run the executor.
:param argv: The argument vector used to run the executor.
:param verbosity: The logging verbosity level.
"""
subprocess.Popen(
argv,
cwd=runtime_dir,
stdin=subprocess.DEVNULL
).wait() | Run process executor.
:param data_id: The id of the :class:`~resolwe.flow.models.Data`
object to be processed.
:param runtime_dir: The directory from which to run the executor.
:param argv: The argument vector used to run the executor.
:param verbosity: The logging verbosity level. | entailment |
def archive_sciobj(pid):
"""Set the status of an object to archived.
Preconditions:
- The object with the pid is verified to exist.
- The object is not a replica.
- The object is not archived.
"""
sciobj_model = d1_gmn.app.model_util.get_sci_model(pid)
sciobj_model.is_archived = True
sciobj_model.save()
_update_modified_timestamp(sciobj_model) | Set the status of an object to archived.
Preconditions:
- The object with the pid is verified to exist.
- The object is not a replica.
- The object is not archived. | entailment |
def create_or_update(sysmeta_pyxb, sciobj_url=None):
"""Create or update database representation of a System Metadata object and closely
related internal state.
- If ``sciobj_url`` is not passed on create, storage in the internal sciobj store
is assumed
- If ``sciobj_url`` is passed on create, it can reference a location in the
internal sciobj store, or an arbitrary location on disk, or a remote web server.
See the sciobj_store module for more information
- if ``sciobj_url`` is not passed on update, the sciobj location remains unchanged
- If ``sciobj_url`` is passed on update, the sciobj location is updated
Preconditions:
- All values in ``sysmeta_pyxb`` must be valid for the operation being performed
"""
# TODO: Make sure that old sections are removed if not included in update.
pid = d1_common.xml.get_req_val(sysmeta_pyxb.identifier)
if sciobj_url is None:
sciobj_url = d1_gmn.app.sciobj_store.get_rel_sciobj_file_url_by_pid(pid)
try:
sci_model = d1_gmn.app.model_util.get_sci_model(pid)
except d1_gmn.app.models.ScienceObject.DoesNotExist:
sci_model = d1_gmn.app.models.ScienceObject()
sci_model.pid = d1_gmn.app.did.get_or_create_did(pid)
sci_model.url = sciobj_url
sci_model.serial_version = sysmeta_pyxb.serialVersion
sci_model.uploaded_timestamp = d1_common.date_time.normalize_datetime_to_utc(
sysmeta_pyxb.dateUploaded
)
_base_pyxb_to_model(sci_model, sysmeta_pyxb)
sci_model.save()
if _has_media_type_pyxb(sysmeta_pyxb):
_media_type_pyxb_to_model(sci_model, sysmeta_pyxb)
_access_policy_pyxb_to_model(sci_model, sysmeta_pyxb)
if _has_replication_policy_pyxb(sysmeta_pyxb):
_replication_policy_pyxb_to_model(sci_model, sysmeta_pyxb)
replica_pyxb_to_model(sci_model, sysmeta_pyxb)
revision_pyxb_to_model(sci_model, sysmeta_pyxb, pid)
sci_model.save()
return sci_model | Create or update database representation of a System Metadata object and closely
related internal state.
- If ``sciobj_url`` is not passed on create, storage in the internal sciobj store
is assumed
- If ``sciobj_url`` is passed on create, it can reference a location in the
internal sciobj store, or an arbitrary location on disk, or a remote web server.
See the sciobj_store module for more information
- if ``sciobj_url`` is not passed on update, the sciobj location remains unchanged
- If ``sciobj_url`` is passed on update, the sciobj location is updated
Preconditions:
- All values in ``sysmeta_pyxb`` must be valid for the operation being performed | entailment |
def _access_policy_pyxb_to_model(sci_model, sysmeta_pyxb):
"""Create or update the database representation of the sysmeta_pyxb access policy.
If called without an access policy, any existing permissions on the object
are removed and the access policy for the rights holder is recreated.
Preconditions:
- Each subject has been verified to a valid DataONE account.
- Subject has changePermission for object.
Postconditions:
- The Permission and related tables contain the new access policy.
Notes:
- There can be multiple rules in a policy and each rule can contain multiple
subjects. So there are two ways that the same subject can be specified multiple
times in a policy. If this happens, multiple, conflicting action levels may be
provided for the subject. This is handled by checking for an existing row for
the subject for this object and updating it if it contains a lower action
level. The end result is that there is one row for each subject, for each
object and this row contains the highest action level.
"""
_delete_existing_access_policy(sysmeta_pyxb)
# Add an implicit allow rule with all permissions for the rights holder.
allow_rights_holder = d1_common.types.dataoneTypes.AccessRule()
permission = d1_common.types.dataoneTypes.Permission(
d1_gmn.app.auth.CHANGEPERMISSION_STR
)
allow_rights_holder.permission.append(permission)
allow_rights_holder.subject.append(
d1_common.xml.get_req_val(sysmeta_pyxb.rightsHolder)
)
top_level = _get_highest_level_action_for_rule(allow_rights_holder)
_insert_permission_rows(sci_model, allow_rights_holder, top_level)
# Create db entries for all subjects for which permissions have been granted.
if _has_access_policy_pyxb(sysmeta_pyxb):
for allow_rule in sysmeta_pyxb.accessPolicy.allow:
top_level = _get_highest_level_action_for_rule(allow_rule)
_insert_permission_rows(sci_model, allow_rule, top_level) | Create or update the database representation of the sysmeta_pyxb access policy.
If called without an access policy, any existing permissions on the object
are removed and the access policy for the rights holder is recreated.
Preconditions:
- Each subject has been verified to a valid DataONE account.
- Subject has changePermission for object.
Postconditions:
- The Permission and related tables contain the new access policy.
Notes:
- There can be multiple rules in a policy and each rule can contain multiple
subjects. So there are two ways that the same subject can be specified multiple
times in a policy. If this happens, multiple, conflicting action levels may be
provided for the subject. This is handled by checking for an existing row for
the subject for this object and updating it if it contains a lower action
level. The end result is that there is one row for each subject, for each
object and this row contains the highest action level. | entailment |
def add_arguments(self, parser):
"""Args:
parser:
"""
parser.description = __doc__
parser.formatter_class = argparse.RawDescriptionHelpFormatter
parser.add_argument("--debug", action="store_true", help="Debug level logging")
parser.add_argument(
"--force",
action="store_true",
help="Import even if local database is not empty",
)
parser.add_argument("--clear", action="store_true", help="Clear local database")
parser.add_argument(
"--cert-pub",
dest="cert_pem_path",
action="store",
help="Path to PEM formatted public key of certificate",
)
parser.add_argument(
"--cert-key",
dest="cert_key_path",
action="store",
help="Path to PEM formatted private key of certificate",
)
parser.add_argument(
"--public",
action="store_true",
help="Do not use certificate even if available",
)
parser.add_argument(
"--timeout",
type=float,
action="store",
default=DEFAULT_TIMEOUT_SEC,
help="Timeout for D1 API call to the source MN",
)
parser.add_argument(
"--workers",
type=int,
action="store",
default=DEFAULT_N_WORKERS,
help="Max number workers making concurrent connections to the source MN",
)
parser.add_argument(
"--object-page-size",
type=int,
action="store",
default=d1_common.const.DEFAULT_SLICE_SIZE,
help="Number of objects to retrieve in each listObjects() call",
)
parser.add_argument(
"--log-page-size",
type=int,
action="store",
default=d1_common.const.DEFAULT_SLICE_SIZE,
help="Number of log records to retrieve in each getLogRecords() call",
)
parser.add_argument(
"--major",
type=int,
action="store",
help="Use API major version instead of finding by connecting to CN",
)
parser.add_argument(
"--only-log", action="store_true", help="Only import event logs"
)
parser.add_argument(
"--max-obj",
type=int,
action="store",
help="Limit number of objects to import",
)
parser.add_argument("baseurl", help="Source MN BaseURL") | Args:
parser: | entailment |
def handle(self, *args, **opt):
"""Args:
*args:
**opt:
"""
d1_gmn.app.management.commands.util.util.log_setup(opt["debug"])
logging.info(
"Running management command: {}".format(
__name__
) # util.get_command_name())
)
d1_gmn.app.management.commands.util.util.exit_if_other_instance_is_running(
__name__
)
self._opt = opt
try:
# profiler = profile.Profile()
# profiler.runcall(self._handle)
# profiler.print_stats()
self._handle()
except d1_common.types.exceptions.DataONEException as e:
logging.error(str(e))
raise django.core.management.base.CommandError(str(e))
self._events.dump_to_log() | Args:
*args:
**opt: | entailment |
def _create_log_entry(self, log_record_pyxb):
"""Args:
log_record_pyxb:
"""
event_log_model = d1_gmn.app.event_log.create_log_entry(
d1_gmn.app.model_util.get_sci_model(
d1_common.xml.get_req_val(log_record_pyxb.identifier)
),
log_record_pyxb.event,
log_record_pyxb.ipAddress,
log_record_pyxb.userAgent,
log_record_pyxb.subject.value(),
)
event_log_model.timestamp = d1_common.date_time.normalize_datetime_to_utc(
log_record_pyxb.dateLogged
)
event_log_model.save() | Args:
log_record_pyxb: | entailment |
def _download_source_sciobj_bytes_to_store(self, client, pid):
"""Args:
client: pid:
"""
if d1_gmn.app.sciobj_store.is_existing_sciobj_file(pid):
self._events.log_and_count(
"Skipped download of existing sciobj bytes", 'pid="{}"'.format(pid)
)
else:
with d1_gmn.app.sciobj_store.open_sciobj_file_by_pid(
pid, write=True
) as sciobj_file:
client.get_and_save(pid, sciobj_file) | Args:
client: pid: | entailment |
def _assert_path_is_dir(self, dir_path):
"""Args:
dir_path:
"""
if not os.path.isdir(dir_path):
raise django.core.management.base.CommandError(
'Invalid dir path. path="{}"'.format(dir_path)
) | Args:
dir_path: | entailment |
def deserialize_subject_info(subject_info_xml_path):
"""Deserialize a SubjectInfo XML file to a PyXB object."""
try:
with open(subject_info_xml_path) as f:
return d1_common.xml.deserialize(f.read())
except ValueError as e:
raise d1_common.types.exceptions.InvalidToken(
0,
'Could not deserialize SubjectInfo. subject_info="{}", error="{}"'.format(
subject_info_xml_path, str(e)
),
) | Deserialize a SubjectInfo XML file to a PyXB object. | entailment |
def HEAD(self, rest_path_list, **kwargs):
"""Send a HEAD request. See requests.sessions.request for optional parameters.
:returns: Response object
"""
kwargs.setdefault("allow_redirects", False)
return self._request("HEAD", rest_path_list, **kwargs) | Send a HEAD request. See requests.sessions.request for optional parameters.
:returns: Response object | entailment |
def POST(self, rest_path_list, **kwargs):
"""Send a POST request with optional streaming multipart encoding. See
requests.sessions.request for optional parameters. To post regular data, pass a
string, iterator or generator as the ``data`` argument. To post a multipart
stream, pass a dictionary of multipart elements as the ``fields`` argument.
E.g.:
fields = {
'field0': 'value',
'field1': 'value',
'field2': ('filename.xml', open('file.xml', 'rb'), 'application/xml')
}
:returns: Response object
"""
fields = kwargs.pop("fields", None)
if fields is not None:
return self._send_mmp_stream("POST", rest_path_list, fields, **kwargs)
else:
return self._request("POST", rest_path_list, **kwargs) | Send a POST request with optional streaming multipart encoding. See
requests.sessions.request for optional parameters. To post regular data, pass a
string, iterator or generator as the ``data`` argument. To post a multipart
stream, pass a dictionary of multipart elements as the ``fields`` argument.
E.g.:
fields = {
'field0': 'value',
'field1': 'value',
'field2': ('filename.xml', open('file.xml', 'rb'), 'application/xml')
}
:returns: Response object | entailment |
def PUT(self, rest_path_list, **kwargs):
"""Send a PUT request with optional streaming multipart encoding. See
requests.sessions.request for optional parameters. See post() for parameters.
:returns: Response object
"""
fields = kwargs.pop("fields", None)
if fields is not None:
return self._send_mmp_stream("PUT", rest_path_list, fields, **kwargs)
else:
return self._request("PUT", rest_path_list, **kwargs) | Send a PUT request with optional streaming multipart encoding. See
requests.sessions.request for optional parameters. See post() for parameters.
:returns: Response object | entailment |
def get_curl_command_line(self, method, url, **kwargs):
"""Get request as cURL command line for debugging."""
if kwargs.get("query"):
url = "{}?{}".format(url, d1_common.url.urlencode(kwargs["query"]))
curl_list = ["curl"]
if method.lower() == "head":
curl_list.append("--head")
else:
curl_list.append("-X {}".format(method))
for k, v in sorted(list(kwargs["headers"].items())):
curl_list.append('-H "{}: {}"'.format(k, v))
curl_list.append("{}".format(url))
return " ".join(curl_list) | Get request as cURL command line for debugging. | entailment |
def dump_request_and_response(self, response):
"""Return a string containing a nicely formatted representation of the request
and response objects for logging and debugging.
- Note: Does not work if the request or response body is a MultipartEncoder
object.
"""
if response.reason is None:
response.reason = "<unknown>"
return d1_client.util.normalize_request_response_dump(
requests_toolbelt.utils.dump.dump_response(response)
) | Return a string containing a nicely formatted representation of the request
and response objects for logging and debugging.
- Note: Does not work if the request or response body is a MultipartEncoder
object. | entailment |
def _timeout_to_float(self, timeout):
"""Convert timeout to float.
Return None if timeout is None, 0 or 0.0. timeout=None disables timeouts in
Requests.
"""
if timeout is not None:
try:
timeout_float = float(timeout)
except ValueError:
raise ValueError(
'timeout_sec must be a valid number or None. timeout="{}"'.format(
timeout
)
)
if timeout_float:
return timeout_float | Convert timeout to float.
Return None if timeout is None, 0 or 0.0. timeout=None disables timeouts in
Requests. | entailment |
def main():
"""Print the RedBaron syntax tree for a Python module."""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("path", help="Python module path")
args = parser.parse_args()
r = d1_dev.util.redbaron_module_path_to_tree(args.path)
print(r.help(True)) | Print the RedBaron syntax tree for a Python module. | entailment |
def abs_path_from_base(base_path, rel_path):
"""Join a base and a relative path and return an absolute path to the resulting
location.
Args:
base_path: str
Relative or absolute path to prepend to ``rel_path``.
rel_path: str
Path relative to the location of the module file from which this function is called.
Returns:
str : Absolute path to the location specified by ``rel_path``.
"""
# noinspection PyProtectedMember
return os.path.abspath(
os.path.join(
os.path.dirname(sys._getframe(1).f_code.co_filename), base_path, rel_path
)
) | Join a base and a relative path and return an absolute path to the resulting
location.
Args:
base_path: str
Relative or absolute path to prepend to ``rel_path``.
rel_path: str
Path relative to the location of the module file from which this function is called.
Returns:
str : Absolute path to the location specified by ``rel_path``. | entailment |
def abs_path(rel_path):
"""Convert a path that is relative to the module from which this function is called,
to an absolute path.
Args:
rel_path: str
Path relative to the location of the module file from which this function is called.
Returns:
str : Absolute path to the location specified by ``rel_path``.
"""
# noinspection PyProtectedMember
return os.path.abspath(
os.path.join(os.path.dirname(sys._getframe(1).f_code.co_filename), rel_path)
) | Convert a path that is relative to the module from which this function is called,
to an absolute path.
Args:
rel_path: str
Path relative to the location of the module file from which this function is called.
Returns:
str : Absolute path to the location specified by ``rel_path``. | entailment |
def get_xml(self, encoding='unicode'):
"""Returns:
str : Current state of the wrapper as XML
"""
return xml.etree.ElementTree.tostring(self._root_el, encoding) | Returns:
str : Current state of the wrapper as XML | entailment |
def get_pretty_xml(self, encoding='unicode'):
"""Returns:
str : Current state of the wrapper as a pretty printed XML string.
"""
return d1_common.xml.reformat_to_pretty_xml(
xml.etree.ElementTree.tostring(self._root_el, encoding)
) | Returns:
str : Current state of the wrapper as a pretty printed XML string. | entailment |
def get_xml_below_element(self, el_name, el_idx=0, encoding='unicode'):
"""
Args:
el_name : str
Name of element that is the base of the branch to retrieve.
el_idx : int
Index of element to use as base in the event that there are multiple sibling
elements with the same name.
Returns:
str : XML fragment rooted at ``el``.
"""
return xml.etree.ElementTree.tostring(
self.get_element_by_name(el_name, el_idx), encoding
) | Args:
el_name : str
Name of element that is the base of the branch to retrieve.
el_idx : int
Index of element to use as base in the event that there are multiple sibling
elements with the same name.
Returns:
str : XML fragment rooted at ``el``. | entailment |
def get_element_by_xpath(self, xpath_str, namespaces=None):
"""
Args:
xpath_str : str
XPath matching the elements for which to search.
Returns:
list : List of elements matching ``xpath_str``.
If there are no matching elements, an empty list is returned.
"""
try:
return self._root_el.findall('.' + xpath_str, namespaces)
except (ValueError, xml.etree.ElementTree.ParseError) as e:
raise SimpleXMLWrapperException(
'XPath select raised exception. xpath_str="{}" error="{}"'.format(
xpath_str, str(e)
)
) | Args:
xpath_str : str
XPath matching the elements for which to search.
Returns:
list : List of elements matching ``xpath_str``.
If there are no matching elements, an empty list is returned. | entailment |
def get_element_by_name(self, el_name, el_idx=0):
"""
Args:
el_name : str
Name of element to get.
el_idx : int
Index of element to use as base in the event that there are multiple sibling
elements with the same name.
Returns:
element : The selected element.
"""
el_list = self.get_element_list_by_name(el_name)
try:
return el_list[el_idx]
except IndexError:
raise SimpleXMLWrapperException(
'Element not found. element_name="{}" requested_idx={} '
'available_elements={}'.format(el_name, el_idx, len(el_list))
) | Args:
el_name : str
Name of element to get.
el_idx : int
Index of element to use as base in the event that there are multiple sibling
elements with the same name.
Returns:
element : The selected element. | entailment |
def get_element_by_attr_key(self, attr_key, el_idx=0):
"""
Args:
attr_key : str
Name of attribute for which to search
el_idx : int
Index of element to use as base in the event that there are multiple sibling
elements with the same name.
Returns:
Element containing an attribute key named ``attr_key``.
"""
el_list = self.get_element_list_by_attr_key(attr_key)
try:
return el_list[el_idx]
except IndexError:
raise SimpleXMLWrapperException(
'Element with tag not found. tag_name="{}" requested_idx={} '
'available_elements={}'.format(attr_key, el_idx, len(el_list))
) | Args:
attr_key : str
Name of attribute for which to search
el_idx : int
Index of element to use as base in the event that there are multiple sibling
elements with the same name.
Returns:
Element containing an attribute key named ``attr_key``. | entailment |
def set_element_text(self, el_name, el_text, el_idx=0):
"""
Args:
el_name : str
Name of element to update.
el_text : str
Text to set for element.
el_idx : int
Index of element to use in the event that there are multiple sibling
elements with the same name.
"""
self.get_element_by_name(el_name, el_idx).text = el_text | Args:
el_name : str
Name of element to update.
el_text : str
Text to set for element.
el_idx : int
Index of element to use in the event that there are multiple sibling
elements with the same name. | entailment |
def set_element_text_by_attr_key(self, attr_key, el_text, el_idx=0):
"""
Args:
attr_key : str
Name of attribute for which to search
el_text : str
Text to set for element.
el_idx : int
Index of element to use in the event that there are multiple sibling
elements with the same name.
"""
self.get_element_by_attr_key(attr_key, el_idx).text = el_text | Args:
attr_key : str
Name of attribute for which to search
el_text : str
Text to set for element.
el_idx : int
Index of element to use in the event that there are multiple sibling
elements with the same name. | entailment |
def get_attr_value(self, attr_key, el_idx=0):
"""Return the value of the selected attribute in the selected element.
Args:
attr_key : str
Name of attribute for which to search
el_idx : int
Index of element to use in the event that there are multiple sibling
elements with the same name.
Returns:
str : Value of the selected attribute in the selected element.
"""
return self.get_element_by_attr_key(attr_key, el_idx).attrib[attr_key] | Return the value of the selected attribute in the selected element.
Args:
attr_key : str
Name of attribute for which to search
el_idx : int
Index of element to use in the event that there are multiple sibling
elements with the same name.
Returns:
str : Value of the selected attribute in the selected element. | entailment |
def set_attr_text(self, attr_key, attr_val, el_idx=0):
"""Set the value of the selected attribute of the selected element.
Args:
attr_key : str
Name of attribute for which to search
attr_val : str
Text to set for the attribute.
el_idx : int
Index of element to use in the event that there are multiple sibling
elements with the same name.
"""
self.get_element_by_attr_key(attr_key, el_idx).attrib[attr_key] = attr_val | Set the value of the selected attribute of the selected element.
Args:
attr_key : str
Name of attribute for which to search
attr_val : str
Text to set for the attribute.
el_idx : int
Index of element to use in the event that there are multiple sibling
elements with the same name. | entailment |
def get_element_dt(self, el_name, tz=None, el_idx=0):
"""Return the text of the selected element as a ``datetime.datetime`` object.
The element text must be a ISO8601 formatted datetime
Args:
el_name : str
Name of element to use.
tz : datetime.tzinfo
Timezone in which to return the datetime.
- Without a timezone, other contextual information is required in order to
determine the exact represented time.
- If dt has timezone: The ``tz`` parameter is ignored.
- If dt is naive (without timezone): The timezone is set to ``tz``.
- ``tz=None``: Prevent naive dt from being set to a timezone. Without a
timezone, other contextual information is required in order to determine
the exact represented time.
- ``tz=d1_common.date_time.UTC()``: Set naive dt to UTC.
el_idx : int
Index of element to use in the event that there are multiple sibling
elements with the same name.
Returns:
datetime.datetime
"""
return iso8601.parse_date(self.get_element_by_name(el_name, el_idx).text, tz) | Return the text of the selected element as a ``datetime.datetime`` object.
The element text must be a ISO8601 formatted datetime
Args:
el_name : str
Name of element to use.
tz : datetime.tzinfo
Timezone in which to return the datetime.
- Without a timezone, other contextual information is required in order to
determine the exact represented time.
- If dt has timezone: The ``tz`` parameter is ignored.
- If dt is naive (without timezone): The timezone is set to ``tz``.
- ``tz=None``: Prevent naive dt from being set to a timezone. Without a
timezone, other contextual information is required in order to determine
the exact represented time.
- ``tz=d1_common.date_time.UTC()``: Set naive dt to UTC.
el_idx : int
Index of element to use in the event that there are multiple sibling
elements with the same name.
Returns:
datetime.datetime | entailment |
def set_element_dt(self, el_name, dt, tz=None, el_idx=0):
"""Set the text of the selected element to an ISO8601 formatted datetime.
Args:
el_name : str
Name of element to update.
dt : datetime.datetime
Date and time to set
tz : datetime.tzinfo
Timezone to set
- Without a timezone, other contextual information is required in order to
determine the exact represented time.
- If dt has timezone: The ``tz`` parameter is ignored.
- If dt is naive (without timezone): The timezone is set to ``tz``.
- ``tz=None``: Prevent naive dt from being set to a timezone. Without a
timezone, other contextual information is required in order to determine
the exact represented time.
- ``tz=d1_common.date_time.UTC()``: Set naive dt to UTC.
el_idx : int
Index of element to use in the event that there are multiple sibling
elements with the same name.
"""
dt = d1_common.date_time.cast_naive_datetime_to_tz(dt, tz)
self.get_element_by_name(el_name, el_idx).text = dt.isoformat() | Set the text of the selected element to an ISO8601 formatted datetime.
Args:
el_name : str
Name of element to update.
dt : datetime.datetime
Date and time to set
tz : datetime.tzinfo
Timezone to set
- Without a timezone, other contextual information is required in order to
determine the exact represented time.
- If dt has timezone: The ``tz`` parameter is ignored.
- If dt is naive (without timezone): The timezone is set to ``tz``.
- ``tz=None``: Prevent naive dt from being set to a timezone. Without a
timezone, other contextual information is required in order to determine
the exact represented time.
- ``tz=d1_common.date_time.UTC()``: Set naive dt to UTC.
el_idx : int
Index of element to use in the event that there are multiple sibling
elements with the same name. | entailment |
def replace_by_etree(self, root_el, el_idx=0):
"""Replace element.
Select element that has the same name as ``root_el``, then replace the selected
element with ``root_el``
``root_el`` can be a single element or the root of an element tree.
Args:
root_el : element
New element that will replace the existing element.
"""
el = self.get_element_by_name(root_el.tag, el_idx)
el[:] = list(root_el)
el.attrib = root_el.attrib | Replace element.
Select element that has the same name as ``root_el``, then replace the selected
element with ``root_el``
``root_el`` can be a single element or the root of an element tree.
Args:
root_el : element
New element that will replace the existing element. | entailment |
def replace_by_xml(self, xml_str, el_idx=0):
"""Replace element.
Select element that has the same name as ``xml_str``, then replace the selected
element with ``xml_str``
- ``xml_str`` must have a single element in the root.
- The root element in ``xml_str`` can have an arbitrary number of children.
Args:
xml_str : str
New element that will replace the existing element.
"""
root_el = self.parse_xml(xml_str)
self.replace_by_etree(root_el, el_idx) | Replace element.
Select element that has the same name as ``xml_str``, then replace the selected
element with ``xml_str``
- ``xml_str`` must have a single element in the root.
- The root element in ``xml_str`` can have an arbitrary number of children.
Args:
xml_str : str
New element that will replace the existing element. | entailment |
def as_sql(self, compiler, connection): # pylint: disable=arguments-differ
"""Compile SQL for this function."""
sql, params = super().as_sql(compiler, connection)
params.append(self.path)
return sql, params | Compile SQL for this function. | entailment |
def with_json_path(self, path, field=None):
"""Annotate Storage objects with a specific JSON path.
:param path: Path to get inside the stored object, which can be
either a list of path components or a comma-separated
string
:param field: Optional output field name
"""
if field is None:
field = '_'.join(['json'] + json_path_components(path))
kwargs = {field: JsonGetPath('json', path)}
return self.defer('json').annotate(**kwargs) | Annotate Storage objects with a specific JSON path.
:param path: Path to get inside the stored object, which can be
either a list of path components or a comma-separated
string
:param field: Optional output field name | entailment |
def get_json_path(self, path):
"""Return only a specific JSON path of Storage objects.
:param path: Path to get inside the stored object, which can be
either a list of path components or a comma-separated
string
"""
return self.with_json_path(path, field='result').values_list('result', flat=True) | Return only a specific JSON path of Storage objects.
:param path: Path to get inside the stored object, which can be
either a list of path components or a comma-separated
string | entailment |
def _get_storage(self):
"""Load `json` field from `Storage` object."""
if self._json is None:
self._json = Storage.objects.get(**self._kwargs).json | Load `json` field from `Storage` object. | entailment |
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""Perform forward migration."""
Process = from_state.apps.get_model('flow', 'Process') # pylint: disable=invalid-name
Data = from_state.apps.get_model('flow', 'Data') # pylint: disable=invalid-name
try:
# pylint: disable=invalid-name
ProcessMigrationHistory = from_state.apps.get_model('flow', 'ProcessMigrationHistory')
DataMigrationHistory = from_state.apps.get_model('flow', 'DataMigrationHistory')
except LookupError:
raise LookupError(
"Unable to retrieve migration history models. Perhaps you need "
"to add a migration dependency to a recent enough Resolwe flow "
"app in your migration?"
)
# Migrate processes.
processes = Process.objects.filter(slug=self.process)
if not processes.exists():
return
migrated_processes = set()
schema_field = '{}_schema'.format(self.schema_type)
for process in processes:
current_schema = getattr(process, schema_field)
if not self.migrate_process_schema(process, current_schema, from_state):
continue
setattr(process, schema_field, current_schema)
process.save()
migrated_processes.add(process)
# Update process migration log.
ProcessMigrationHistory.objects.create(
migration=self.migration_id,
process=process,
metadata=self.describe_process_migration(process),
)
if not migrated_processes:
return
# Migrate all data objects.
data = Data.objects.filter(process__in=migrated_processes)
self.migrate_data(data, from_state)
# Update data migration log.
for datum in data:
DataMigrationHistory.objects.create(
migration=self.migration_id,
data=datum,
metadata=self.describe_data_migration(datum)
) | Perform forward migration. | entailment |
def deconstruct(self):
"""Deconstruct operation."""
return (
self.__class__.__name__,
[],
{
'process': self.process,
'field': self._raw_field,
'schema': self.schema,
'default': self.default,
}
) | Deconstruct operation. | entailment |
def migrate_process_schema(self, process, schema, from_state):
"""Migrate process schema.
:param process: Process instance
:param schema: Process schema to migrate
:param from_state: Database model state
:return: True if the process was migrated, False otherwise
"""
container = dict_dot(schema, '.'.join(self.field[:-1]), default=list)
# Ignore processes, which already contain the target field with the
# target schema.
for field in container:
if field['name'] == self.field[-1]:
if field == self.schema:
return False
else:
raise ValueError(
"Failed to migrate schema for process '{process}' as the field '{field}' "
"already exists and has an incompatible schema".format(
process=process.slug,
field=self.field[-1]
)
)
# Add field to container.
container.append(self.schema)
return True | Migrate process schema.
:param process: Process instance
:param schema: Process schema to migrate
:param from_state: Database model state
:return: True if the process was migrated, False otherwise | entailment |
def migrate_data(self, data, from_state):
"""Migrate data objects.
:param data: Queryset containing all data objects that need
to be migrated
:param from_state: Database model state
"""
if not self.default:
return
self.default.prepare(data, from_state)
for instance in data:
value = self.default.get_default_for(instance, from_state)
if not value and not self.schema.get('required', True):
continue
# Set default value.
container = getattr(instance, self.schema_type, {})
dict_dot(container, '.'.join(self.field), value)
setattr(instance, self.schema_type, container)
instance.save() | Migrate data objects.
:param data: Queryset containing all data objects that need
to be migrated
:param from_state: Database model state | entailment |
def deconstruct(self):
"""Deconstruct operation."""
return (
self.__class__.__name__,
[],
{
'process': self.process,
'field': self._raw_field,
'new_field': self.new_field,
}
) | Deconstruct operation. | entailment |
def migrate_process_schema(self, process, schema, from_state):
"""Migrate process schema.
:param process: Process instance
:param schema: Process schema to migrate
:param from_state: Database model state
:return: True if the process was migrated, False otherwise
"""
container = dict_dot(schema, '.'.join(self.field[:-1]), default=list)
# Ignore processes, which already contain the target field.
migrate = False
for field in container:
if field['name'] == self.field[-1]:
field['name'] = self.new_field
migrate = True
break
elif field['name'] == self.new_field:
# Already has target field.
migrate = False
break
else:
if not self.skip_no_field:
raise ValueError(
"Unable to rename: there is no field with name '{field}' or '{new_field}'.".format(
field=self.field[-1],
new_field=self.new_field,
)
)
return migrate | Migrate process schema.
:param process: Process instance
:param schema: Process schema to migrate
:param from_state: Database model state
:return: True if the process was migrated, False otherwise | entailment |
def migrate_data(self, data, from_state):
"""Migrate data objects.
:param data: Queryset containing all data objects that need
to be migrated
:param from_state: Database model state
"""
for instance in data:
if instance.status == 'ER':
continue
container = getattr(instance, self.schema_type, {})
schema = container.pop(self.field[-1])
container[self.new_field] = schema
setattr(instance, self.schema_type, container)
instance.save() | Migrate data objects.
:param data: Queryset containing all data objects that need
to be migrated
:param from_state: Database model state | entailment |
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""Perform forward migration."""
from resolwe.flow.models.data import DataQuerySet
# pylint: disable=protected-access
Data = from_state.apps.get_model('flow', 'Data') # pylint: disable=invalid-name
DataQuerySet._delete_chunked(Data.objects.filter(process__persistence='TMP'))
DataQuerySet._delete_chunked(Data.objects.filter(status='ER')) | Perform forward migration. | entailment |
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""Perform forward migration."""
from resolwe.flow.models.data import DataQuerySet
# pylint: disable=protected-access
Data = from_state.apps.get_model('flow', 'Data') # pylint: disable=invalid-name
DataQuerySet._delete_chunked(Data.objects.filter(process__slug=self.process))
Process = from_state.apps.get_model('flow', 'Process') # pylint: disable=invalid-name
Process.objects.filter(slug=self.process).delete() | Perform forward migration. | entailment |
def migrate_process_schema(self, process, schema, from_state):
"""Migrate process schema.
:param process: Process instance
:param schema: Process schema to migrate
:param from_state: Database model state
:return: True if the process was migrated, False otherwise
"""
if process.type == self.new_type:
return False
process.type = self.new_type
return True | Migrate process schema.
:param process: Process instance
:param schema: Process schema to migrate
:param from_state: Database model state
:return: True if the process was migrated, False otherwise | entailment |
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""Perform forward migration."""
Process = from_state.apps.get_model('flow', 'Process') # pylint: disable=invalid-name
# Validate process types to ensure consistency.
errors = validate_process_types(Process.objects.all())
if errors:
raise ValueError("Process type consistency check failed after migration: {}".format(errors)) | Perform forward migration. | entailment |
def home(request):
"""Home page.
Root of web server should redirect to here.
"""
if request.path.endswith('/'):
return django.http.HttpResponseRedirect(request.path[:-1])
return django.http.HttpResponse(
generate_status_xml(), d1_common.const.CONTENT_TYPE_XML
) | Home page.
Root of web server should redirect to here. | entailment |
def error_404(request, exception):
"""Handle 404s outside of the valid API URL endpoints Note: Cannot raise NotFound()
here, as this method is not covered by the GMN middleware handler that catches
DataONE exceptions raised by normal views."""
return django.http.HttpResponseNotFound(
d1_common.types.exceptions.NotFound(
0,
'Invalid API endpoint',
# Include the regexes the URL was tested against
# traceInformation=str(exception),
nodeId=django.conf.settings.NODE_IDENTIFIER,
).serialize_to_transport(xslt_url=django.urls.base.reverse('home_xslt')),
d1_common.const.CONTENT_TYPE_XML,
) | Handle 404s outside of the valid API URL endpoints Note: Cannot raise NotFound()
here, as this method is not covered by the GMN middleware handler that catches
DataONE exceptions raised by normal views. | entailment |
def get_obj_store_free_space_bytes():
"""Return total free space available on the disk on which the object storage resides
(in bytes)"""
obj_store_path = django.conf.settings.OBJECT_STORE_PATH
if platform.system() == 'Windows':
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(
ctypes.c_wchar_p(obj_store_path), None, None, ctypes.pointer(free_bytes)
)
return free_bytes.value
else:
return os.statvfs(obj_store_path).f_bfree * os.statvfs(obj_store_path).f_frsize | Return total free space available on the disk on which the object storage resides
(in bytes) | entailment |
def m2Lambda_to_vMh2(m2, Lambda, C):
"""Function to numerically determine the physical Higgs VEV and mass
given the parameters of the Higgs potential."""
try:
v = (sqrt(2 * m2 / Lambda) + 3 * m2**(3 / 2) /
(sqrt(2) * Lambda**(5 / 2)) * C['phi'])
except ValueError:
v = 0
Mh2 = 2 * m2 * (1 - m2 / Lambda * (3 * C['phi'] - 4 * Lambda * C['phiBox'] +
Lambda * C['phiD']))
return {'v': v, 'Mh2': Mh2} | Function to numerically determine the physical Higgs VEV and mass
given the parameters of the Higgs potential. | entailment |
def vMh2_to_m2Lambda(v, Mh2, C):
"""Function to numerically determine the parameters of the Higgs potential
given the physical Higgs VEV and mass."""
if C['phi'] == 0 and C['phiBox'] == 0 and C['phiD'] == 0:
return _vMh2_to_m2Lambda_SM(v, Mh2)
else:
def f0(x): # we want the root of this function
m2, Lambda = x
d = m2Lambda_to_vMh2(m2=m2.real, Lambda=Lambda.real,
C=C)
return np.array([d['v'] - v, d['Mh2'] - Mh2])
dSM = _vMh2_to_m2Lambda_SM(v, Mh2)
x0 = np.array([dSM['m2'], dSM['Lambda']])
try:
xres = scipy.optimize.newton_krylov(f0, x0)
except (scipy.optimize.nonlin.NoConvergence, ValueError) as e:
warnings.warn('Standard optimization method did not converge. The GMRES method is used instead.', Warning)
try:
xres = scipy.optimize.newton_krylov(f0, x0, method='gmres',
f_tol=1e-7)
except (scipy.optimize.nonlin.NoConvergence, ValueError) as e:
raise ValueError("No solution for m^2 and Lambda found. This problem can be caused by very large values for one or several Wilson coefficients.")
return {'m2': xres[0], 'Lambda': xres[1]} | Function to numerically determine the parameters of the Higgs potential
given the physical Higgs VEV and mass. | entailment |
def get_gpbar(ebar, gbar, v, C):
r"""Function to numerically determine the hypercharge gauge coupling
in terms of $\bar e$, $\bar g$, v, and the Wilson coefficients."""
if C['phiWB'] == 0: # this is the trivial case
gpbar = ebar * gbar / sqrt(gbar**2 - ebar**2)
else: # if epsilon != 0, need to iterate
def f0(x): # we want the root of this function
gpb = x
gb = gbar
eps = C['phiWB'] * (v**2)
ebar_calc = (gb * gpb / sqrt(gb**2 + gpb**2) *
(1 - eps * gb * gpb / (gb**2 + gpb**2)))
return (ebar_calc - ebar).real
try:
gpbar = scipy.optimize.brentq(f0, 0, 3)
except (scipy.optimize.nonlin.NoConvergence, ValueError) as e:
raise ValueError("No solution for gp found. This problem can be caused by very large values for one or several Wilson coefficients.")
return gpbar * (1 - C['phiB'] * (v**2)) | r"""Function to numerically determine the hypercharge gauge coupling
in terms of $\bar e$, $\bar g$, v, and the Wilson coefficients. | entailment |
def smeftpar(scale, C, basis):
"""Get the running parameters in SMEFT."""
# start with a zero dict and update it with the input values
MW = p['m_W']
# MZ = p['m_Z']
GF = p['GF']
Mh = p['m_h']
vb = sqrt(1 / sqrt(2) / GF)
v = vb # TODO
_d = vMh2_to_m2Lambda(v=v, Mh2=Mh**2, C=C)
m2 = _d['m2'].real
Lambda = _d['Lambda'].real
gsbar = sqrt(4 * pi * p['alpha_s'])
gs = (1 - C['phiG'] * (v**2)) * gsbar
gbar = 2 * MW / v
g = gbar * (1 - C['phiW'] * (v**2))
ebar = sqrt(4 * pi * p['alpha_e'])
gp = get_gpbar(ebar, gbar, v, C)
c = {}
c['m2'] = m2
c['Lambda'] = Lambda
c['g'] = g
c['gp'] = gp
c['gs'] = gs
K = ckmutil.ckm.ckm_tree(p['Vus'], p['Vub'], p['Vcb'], p['delta'])
if basis == 'Warsaw':
Mu = K.conj().T @ np.diag([p['m_u'], p['m_c'], p['m_t']])
Md = np.diag([p['m_d'], p['m_s'], p['m_b']])
elif basis == 'Warsaw up':
Mu = np.diag([p['m_u'], p['m_c'], p['m_t']])
Md = K @ np.diag([p['m_d'], p['m_s'], p['m_b']])
else:
raise ValueError("Basis '{}' not supported".format(basis))
Me = np.diag([p['m_e'], p['m_mu'], p['m_tau']])
c['Gd'] = Md / (v / sqrt(2)) + C['dphi'] * (v**2) / 2
c['Gu'] = Mu / (v / sqrt(2)) + C['uphi'] * (v**2) / 2
c['Ge'] = Me / (v / sqrt(2)) + C['ephi'] * (v**2) / 2
return c | Get the running parameters in SMEFT. | entailment |
def smpar(C):
"""Get the running effective SM parameters."""
m2 = C['m2'].real
Lambda = C['Lambda'].real
v = (sqrt(2 * m2 / Lambda) + 3 * m2**(3 / 2) /
(sqrt(2) * Lambda**(5 / 2)) * C['phi'])
GF = 1 / (sqrt(2) * v**2) # TODO
Mh2 = 2 * m2 * (1 - m2 / Lambda * (3 * C['phi'] - 4 * Lambda * C['phiBox'] +
Lambda * C['phiD']))
eps = C['phiWB'] * (v**2)
gb = (C['g'] / (1 - C['phiW'] * (v**2))).real
gpb = (C['gp'] / (1 - C['phiB'] * (v**2))).real
gsb = (C['gs'] / (1 - C['phiG'] * (v**2))).real
MW = gb * v / 2
ZG0 = 1 + C['phiD'] * (v**2) / 4
MZ = (sqrt(gb**2 + gpb**2) / 2 * v
* (1 + eps * gb * gpb / (gb**2 + gpb**2)) * ZG0)
Mnup = -(v**2) * C['llphiphi']
Mep = v / sqrt(2) * (C['Ge'] - C['ephi'] * (v**2) / 2)
Mup = v / sqrt(2) * (C['Gu'] - C['uphi'] * (v**2) / 2)
Mdp = v / sqrt(2) * (C['Gd'] - C['dphi'] * (v**2) / 2)
UeL, Me, UeR = ckmutil.diag.msvd(Mep)
UuL, Mu, UuR = ckmutil.diag.msvd(Mup)
UdL, Md, UdR = ckmutil.diag.msvd(Mdp)
UnuL, Mnu = ckmutil.diag.mtakfac(Mnup)
eb = (gb * gpb / sqrt(gb**2 + gpb**2) *
(1 - eps * gb * gpb / (gb**2 + gpb**2)))
K = UuL.conj().T @ UdL
# U = UeL.conj().T @ UnuL
sm = {}
sm['GF'] = GF
sm['alpha_e'] = eb**2 / (4 * pi)
sm['alpha_s'] = gsb**2 / (4 * pi)
sm['Vub'] = abs(K[0, 2])
sm['Vcb'] = abs(K[1, 2])
sm['Vus'] = abs(K[0, 1])
sm['delta'] = phase(-K[0, 0] * K[0, 2].conj()
/ (K[1, 0] * K[1, 2].conj()))
# sm['U'] = Uu
sm['m_W'] = MW
sm['m_Z'] = MZ
sm['m_h'] = sqrt(abs(Mh2))
sm['m_u'] = Mu[0]
sm['m_c'] = Mu[1]
sm['m_t'] = Mu[2]
sm['m_d'] = Md[0]
sm['m_s'] = Md[1]
sm['m_b'] = Md[2]
sm['m_e'] = Me[0]
sm['m_mu'] = Me[1]
sm['m_tau'] = Me[2]
return {k: v.real for k, v in sm.items()} | Get the running effective SM parameters. | entailment |
def scale_8(b):
"""Translations necessary for class-8 coefficients
to go from a basis with only non-redundant WCxf
operators to a basis where the Wilson coefficients are symmetrized like
the operators."""
a = np.array(b, copy=True, dtype=complex)
for i in range(3):
a[0, 0, 1, i] = 1/2 * b[0, 0, 1, i]
a[0, 0, 2, i] = 1/2 * b[0, 0, 2, i]
a[0, 1, 1, i] = 1/2 * b[0, 1, 1, i]
a[0, 1, 2, i] = 2/3 * b[0, 1, 2, i] - 1/6 * b[0, 2, 1, i] - 1/6 * b[1, 0, 2, i] + 1/6 * b[1, 2, 0, i]
a[0, 2, 1, i] = - (1/6) * b[0, 1, 2, i] + 2/3 * b[0, 2, 1, i] + 1/6 * b[1, 0, 2, i] + 1/3 * b[1, 2, 0, i]
a[0, 2, 2, i] = 1/2 * b[0, 2, 2, i]
a[1, 0, 2, i] = - (1/6) * b[0, 1, 2, i] + 1/6 * b[0, 2, 1, i] + 2/3 * b[1, 0, 2, i] - 1/6 * b[1, 2, 0, i]
a[1, 1, 2, i] = 1/2 * b[1, 1, 2, i]
a[1, 2, 0, i] = 1/6 * b[0, 1, 2, i] + 1/3 * b[0, 2, 1, i] - 1/6 * b[1, 0, 2, i] + 2/3 * b[1, 2, 0, i]
a[1, 2, 2, i] = 1/2 * b[1, 2, 2, i]
return a | Translations necessary for class-8 coefficients
to go from a basis with only non-redundant WCxf
operators to a basis where the Wilson coefficients are symmetrized like
the operators. | entailment |
def symmetrize(C):
"""Symmetrize the Wilson coefficient arrays.
Note that this function does not take into account the symmetry factors
that occur when transitioning from a basis with only non-redundant operators
(like in WCxf) to a basis where the Wilson coefficients are symmetrized
like the operators. See `symmetrize_nonred` for this case."""
C_symm = {}
for i, v in C.items():
if i in C_symm_keys[0]:
C_symm[i] = v.real
elif i in C_symm_keys[1] + C_symm_keys[3]:
C_symm[i] = v # nothing to do
elif i in C_symm_keys[2]:
C_symm[i] = symmetrize_2(C[i])
elif i in C_symm_keys[4]:
C_symm[i] = symmetrize_4(C[i])
elif i in C_symm_keys[5]:
C_symm[i] = symmetrize_5(C[i])
elif i in C_symm_keys[6]:
C_symm[i] = symmetrize_6(C[i])
elif i in C_symm_keys[7]:
C_symm[i] = symmetrize_7(C[i])
elif i in C_symm_keys[8]:
C_symm[i] = symmetrize_8(C[i])
elif i in C_symm_keys[9]:
C_symm[i] = symmetrize_9(C[i])
return C_symm | Symmetrize the Wilson coefficient arrays.
Note that this function does not take into account the symmetry factors
that occur when transitioning from a basis with only non-redundant operators
(like in WCxf) to a basis where the Wilson coefficients are symmetrized
like the operators. See `symmetrize_nonred` for this case. | entailment |
def arrays2wcxf(C):
"""Convert a dictionary with Wilson coefficient names as keys and
numbers or numpy arrays as values to a dictionary with a Wilson coefficient
name followed by underscore and numeric indices as keys and numbers as
values. This is needed for the output in WCxf format."""
d = {}
for k, v in C.items():
if np.shape(v) == () or np.shape(v) == (1,):
d[k] = v
else:
ind = np.indices(v.shape).reshape(v.ndim, v.size).T
for i in ind:
name = k + '_' + ''.join([str(int(j) + 1) for j in i])
d[name] = v[tuple(i)]
return d | Convert a dictionary with Wilson coefficient names as keys and
numbers or numpy arrays as values to a dictionary with a Wilson coefficient
name followed by underscore and numeric indices as keys and numbers as
values. This is needed for the output in WCxf format. | entailment |
def wcxf2arrays(d):
"""Convert a dictionary with a Wilson coefficient
name followed by underscore and numeric indices as keys and numbers as
values to a dictionary with Wilson coefficient names as keys and
numbers or numpy arrays as values. This is needed for the parsing
of input in WCxf format."""
C = {}
for k, v in d.items():
name = k.split('_')[0]
s = C_keys_shape[name]
if s == 1:
C[k] = v
else:
ind = k.split('_')[-1]
if name not in C:
C[name] = np.zeros(s, dtype=complex)
C[name][tuple([int(i) - 1 for i in ind])] = v
return C | Convert a dictionary with a Wilson coefficient
name followed by underscore and numeric indices as keys and numbers as
values to a dictionary with Wilson coefficient names as keys and
numbers or numpy arrays as values. This is needed for the parsing
of input in WCxf format. | entailment |
def add_missing(C):
"""Add arrays with zeros for missing Wilson coefficient keys"""
C_out = C.copy()
for k in (set(WC_keys) - set(C.keys())):
C_out[k] = np.zeros(C_keys_shape[k])
return C_out | Add arrays with zeros for missing Wilson coefficient keys | entailment |
def C_array2dict(C):
"""Convert a 1D array containing C values to a dictionary."""
d = OrderedDict()
i=0
for k in C_keys:
s = C_keys_shape[k]
if s == 1:
j = i+1
d[k] = C[i]
else:
j = i \
+ reduce(operator.mul, s, 1)
d[k] = C[i:j].reshape(s)
i = j
return d | Convert a 1D array containing C values to a dictionary. | entailment |
def C_dict2array(C):
"""Convert an OrderedDict containing C values to a 1D array."""
return np.hstack([np.asarray(C[k]).ravel() for k in C_keys]) | Convert an OrderedDict containing C values to a 1D array. | entailment |
def symmetrize_nonred(C):
"""Symmetrize the Wilson coefficient arrays.
This function takes into account the symmetry factors
that occur when transitioning from a basis with only non-redundant operators
(like in WCxf) to a basis where the Wilson coefficients are symmetrized
like the operators."""
C_symm = {}
for i, v in C.items():
if i in C_symm_keys[0]:
C_symm[i] = v.real
elif i in C_symm_keys[1] + C_symm_keys[3]:
C_symm[i] = v # nothing to do
elif i in C_symm_keys[2]:
C_symm[i] = symmetrize_2(C[i])
elif i in C_symm_keys[4]:
C_symm[i] = symmetrize_4(C[i])
C_symm[i] = C_symm[i] / _d_4
elif i in C_symm_keys[5]:
C_symm[i] = symmetrize_5(C[i])
elif i in C_symm_keys[6]:
C_symm[i] = symmetrize_6(C[i])
C_symm[i] = C_symm[i] / _d_6
elif i in C_symm_keys[7]:
C_symm[i] = symmetrize_7(C[i])
C_symm[i] = C_symm[i] / _d_7
elif i in C_symm_keys[8]:
C_symm[i] = scale_8(C[i])
C_symm[i] = symmetrize_8(C_symm[i])
elif i in C_symm_keys[9]:
C_symm[i] = symmetrize_9(C[i])
return C_symm | Symmetrize the Wilson coefficient arrays.
This function takes into account the symmetry factors
that occur when transitioning from a basis with only non-redundant operators
(like in WCxf) to a basis where the Wilson coefficients are symmetrized
like the operators. | entailment |
def unscale_dict(C):
"""Undo the scaling applied in `scale_dict`."""
C_out = {k: _scale_dict[k] * v for k, v in C.items()}
for k in C_symm_keys[8]:
C_out['qqql'] = unscale_8(C_out['qqql'])
return C_out | Undo the scaling applied in `scale_dict`. | entailment |
def wcxf2arrays_symmetrized(d):
"""Convert a dictionary with a Wilson coefficient
name followed by underscore and numeric indices as keys and numbers as
values to a dictionary with Wilson coefficient names as keys and
numbers or numpy arrays as values.
In contrast to `wcxf2arrays`, here the numpy arrays fulfill the same
symmetry relations as the operators (i.e. they contain redundant entries)
and they do not contain undefined indices.
Zero arrays are added for missing coefficients."""
C = wcxf2arrays(d)
C = symmetrize_nonred(C)
C = add_missing(C)
return C | Convert a dictionary with a Wilson coefficient
name followed by underscore and numeric indices as keys and numbers as
values to a dictionary with Wilson coefficient names as keys and
numbers or numpy arrays as values.
In contrast to `wcxf2arrays`, here the numpy arrays fulfill the same
symmetry relations as the operators (i.e. they contain redundant entries)
and they do not contain undefined indices.
Zero arrays are added for missing coefficients. | entailment |
def commit_signal(data_id):
"""Nudge manager at the end of every Data object save event."""
if not getattr(settings, 'FLOW_MANAGER_DISABLE_AUTO_CALLS', False):
immediate = getattr(settings, 'FLOW_MANAGER_SYNC_AUTO_CALLS', False)
async_to_sync(manager.communicate)(data_id=data_id, save_settings=False, run_sync=immediate) | Nudge manager at the end of every Data object save event. | entailment |
def manager_post_save_handler(sender, instance, created, **kwargs):
"""Run newly created (spawned) processes."""
if instance.status == Data.STATUS_DONE or instance.status == Data.STATUS_ERROR or created:
# Run manager at the end of the potential transaction. Otherwise
# tasks are send to workers before transaction ends and therefore
# workers cannot access objects created inside transaction.
transaction.on_commit(lambda: commit_signal(instance.id)) | Run newly created (spawned) processes. | entailment |
def delete_entity(sender, instance, **kwargs):
"""Delete Entity when last Data object is deleted."""
# 1 means that the last Data object is going to be deleted.
Entity.objects.annotate(num_data=Count('data')).filter(data=instance, num_data=1).delete() | Delete Entity when last Data object is deleted. | entailment |
def delete_relation(sender, instance, **kwargs):
"""Delete the Relation object when the last Entity is removed."""
def process_signal(relation_id):
"""Get the relation and delete it if it has no entities left."""
try:
relation = Relation.objects.get(pk=relation_id)
except Relation.DoesNotExist:
return
if relation.entities.count() == 0:
relation.delete()
# Wait for partitions to be recreated.
transaction.on_commit(lambda: process_signal(instance.relation_id)) | Delete the Relation object when the last Entity is removed. | entailment |
async def run_executor():
"""Start the actual execution; instantiate the executor and run."""
parser = argparse.ArgumentParser(description="Run the specified executor.")
parser.add_argument('module', help="The module from which to instantiate the concrete executor.")
args = parser.parse_args()
module_name = '{}.run'.format(args.module)
class_name = 'FlowExecutor'
module = import_module(module_name, __package__)
executor = getattr(module, class_name)()
with open(ExecutorFiles.PROCESS_SCRIPT, 'rt') as script_file:
await executor.run(DATA['id'], script_file.read()) | Start the actual execution; instantiate the executor and run. | entailment |
def assert_valid(sysmeta_pyxb, pid):
"""Validate file at {sciobj_path} against schema selected via formatId and raise
InvalidRequest if invalid.
Validation is only performed when:
- SciMeta validation is enabled
- and Object size is below size limit for validation
- and formatId designates object as a Science Metadata object which is recognized
and parsed by DataONE CNs
- and XML Schema (XSD) files for formatId are present on local system
"""
if not (_is_validation_enabled() and _is_installed_scimeta_format_id(sysmeta_pyxb)):
return
if _is_above_size_limit(sysmeta_pyxb):
if _is_action_accept():
return
else:
raise d1_common.types.exceptions.InvalidRequest(
0,
'Science Metadata file is above size limit for validation and this '
'node has been configured to reject unvalidated Science Metadata '
'files. For more information, see the SCIMETA_VALIDATE* settings. '
'size={} size_limit={}'.format(
sysmeta_pyxb.size, django.conf.settings.SCIMETA_VALIDATION_MAX_SIZE
),
)
with d1_gmn.app.sciobj_store.open_sciobj_file_by_pid_ctx(pid) as sciobj_file:
try:
d1_scimeta.xml_schema.validate(sysmeta_pyxb.formatId, sciobj_file.read())
except d1_scimeta.xml_schema.SciMetaValidationError as e:
raise d1_common.types.exceptions.InvalidRequest(0, str(e)) | Validate file at {sciobj_path} against schema selected via formatId and raise
InvalidRequest if invalid.
Validation is only performed when:
- SciMeta validation is enabled
- and Object size is below size limit for validation
- and formatId designates object as a Science Metadata object which is recognized
and parsed by DataONE CNs
- and XML Schema (XSD) files for formatId are present on local system | entailment |
def ready(self):
"""Perform application initialization."""
# Initialize the type extension composer.
from . composer import composer
composer.discover_extensions()
is_migrating = sys.argv[1:2] == ['migrate']
if is_migrating:
# Do not register signals and ES indices when:
# * migrating - model instances used during migrations do
# not contain the full functionality of models and things
# like content types don't work correctly and signals are
# not versioned so they are guaranteed to work only with
# the last version of the model
return
# Connect all signals
from . import signals # pylint: disable=unused-import
# Register ES indices
from . builder import index_builder | Perform application initialization. | entailment |
def to_internal_value(self, data):
"""Convert to internal value."""
user = getattr(self.context.get('request'), 'user')
queryset = self.get_queryset()
permission = get_full_perm('view', queryset.model)
try:
return get_objects_for_user(
user,
permission,
queryset.filter(**{self.slug_field: data}),
).latest()
except ObjectDoesNotExist:
self.fail(
'does_not_exist',
slug_name=self.slug_field,
value=smart_text(data),
model_name=queryset.model._meta.model_name, # pylint: disable=protected-access
)
except (TypeError, ValueError):
self.fail('invalid') | Convert to internal value. | entailment |
def dumps(dct, as_dict=False, **kwargs):
""" Dump the dict to json in DynamoDB Format
You can use any other simplejson or json options
:param dct - the dict to dump
:param as_dict - returns the result as python dict (useful for DynamoDB boto3 library) or as json sting
:returns: DynamoDB json format.
"""
result_ = TypeSerializer().serialize(json.loads(json.dumps(dct, default=json_serial),
use_decimal=True))
if as_dict:
return next(six.iteritems(result_))[1]
else:
return json.dumps(next(six.iteritems(result_))[1], **kwargs) | Dump the dict to json in DynamoDB Format
You can use any other simplejson or json options
:param dct - the dict to dump
:param as_dict - returns the result as python dict (useful for DynamoDB boto3 library) or as json sting
:returns: DynamoDB json format. | entailment |
def object_hook(dct):
""" DynamoDB object hook to return python values """
try:
# First - Try to parse the dct as DynamoDB parsed
if 'BOOL' in dct:
return dct['BOOL']
if 'S' in dct:
val = dct['S']
try:
return datetime.strptime(val, '%Y-%m-%dT%H:%M:%S.%f')
except:
return str(val)
if 'SS' in dct:
return list(dct['SS'])
if 'N' in dct:
if re.match("^-?\d+?\.\d+?$", dct['N']) is not None:
return float(dct['N'])
else:
try:
return int(dct['N'])
except:
return int(dct['N'])
if 'B' in dct:
return str(dct['B'])
if 'NS' in dct:
return set(dct['NS'])
if 'BS' in dct:
return set(dct['BS'])
if 'M' in dct:
return dct['M']
if 'L' in dct:
return dct['L']
if 'NULL' in dct and dct['NULL'] is True:
return None
except:
return dct
# In a Case of returning a regular python dict
for key, val in six.iteritems(dct):
if isinstance(val, six.string_types):
try:
dct[key] = datetime.strptime(val, '%Y-%m-%dT%H:%M:%S.%f')
except:
# This is a regular Basestring object
pass
if isinstance(val, Decimal):
if val % 1 > 0:
dct[key] = float(val)
elif six.PY3:
dct[key] = int(val)
elif val < sys.maxsize:
dct[key] = int(val)
else:
dct[key] = long(val)
return dct | DynamoDB object hook to return python values | entailment |
def loads(s, as_dict=False, *args, **kwargs):
""" Loads dynamodb json format to a python dict.
:param s - the json string or dict (with the as_dict variable set to True) to convert
:returns python dict object
"""
if as_dict or (not isinstance(s, six.string_types)):
s = json.dumps(s)
kwargs['object_hook'] = object_hook
return json.loads(s, *args, **kwargs) | Loads dynamodb json format to a python dict.
:param s - the json string or dict (with the as_dict variable set to True) to convert
:returns python dict object | entailment |
def tileSize(self, zoom):
"Returns the size (in meters) of a tile"
assert zoom in range(0, len(self.RESOLUTIONS))
return self.tileSizePx * self.RESOLUTIONS[int(zoom)] | Returns the size (in meters) of a tile | entailment |
def tileBounds(self, zoom, tileCol, tileRow):
"Returns the bounds of a tile in LV03 (EPSG:21781)"
assert zoom in range(0, len(self.RESOLUTIONS))
# 0,0 at top left: y axis down and x axis right
tileSize = self.tileSize(zoom)
minX = self.MINX + tileCol * tileSize
maxX = self.MINX + (tileCol + 1) * tileSize
if self.originCorner == 'bottom-left':
minY = self.MINY + tileRow * tileSize
maxY = self.MINY + (tileRow + 1) * tileSize
elif self.originCorner == 'top-left':
minY = self.MAXY - (tileRow + 1) * tileSize
maxY = self.MAXY - tileRow * tileSize
return [minX, minY, maxX, maxY] | Returns the bounds of a tile in LV03 (EPSG:21781) | entailment |
def tileAddress(self, zoom, point):
"Returns a tile address based on a zoom level and \
a point in the tile"
[x, y] = point
assert x <= self.MAXX and x >= self.MINX
assert y <= self.MAXY and y >= self.MINY
assert zoom in range(0, len(self.RESOLUTIONS))
tileS = self.tileSize(zoom)
offsetX = abs(x - self.MINX)
if self.originCorner == 'bottom-left':
offsetY = abs(y - self.MINY)
elif self.originCorner == 'top-left':
offsetY = abs(self.MAXY - y)
col = offsetX / tileS
row = offsetY / tileS
# We are exactly on the edge of a tile and the extent
if x in (self.MINX, self.MAXX) and col.is_integer():
col = max(0, col - 1)
if y in (self.MINY, self.MAXY) and row.is_integer():
row = max(0, row - 1)
return [
int(math.floor(col)),
int(math.floor(row))
] | Returns a tile address based on a zoom level and \
a point in the tile | entailment |
def intersectsExtent(self, extent):
"Determine if an extent intersects this instance extent"
return \
self.extent[0] <= extent[2] and self.extent[2] >= extent[0] and \
self.extent[1] <= extent[3] and self.extent[3] >= extent[1] | Determine if an extent intersects this instance extent | entailment |
def iterGrid(self, minZoom, maxZoom):
"Yields the tileBounds, zoom, tileCol and tileRow"
assert minZoom in range(0, len(self.RESOLUTIONS))
assert maxZoom in range(0, len(self.RESOLUTIONS))
assert minZoom <= maxZoom
for zoom in xrange(minZoom, maxZoom + 1):
[minRow, minCol, maxRow, maxCol] = self.getExtentAddress(zoom)
for row in xrange(minRow, maxRow + 1):
for col in xrange(minCol, maxCol + 1):
tileBounds = self.tileBounds(zoom, col, row)
yield (tileBounds, zoom, col, row) | Yields the tileBounds, zoom, tileCol and tileRow | entailment |
def numberOfXTilesAtZoom(self, zoom):
"Returns the number of tiles over x at a given zoom level"
[minRow, minCol, maxRow, maxCol] = self.getExtentAddress(zoom)
return maxCol - minCol + 1 | Returns the number of tiles over x at a given zoom level | entailment |
def numberOfYTilesAtZoom(self, zoom):
"Retruns the number of tiles over y at a given zoom level"
[minRow, minCol, maxRow, maxCol] = self.getExtentAddress(zoom)
return maxRow - minRow + 1 | Retruns the number of tiles over y at a given zoom level | entailment |
def numberOfTilesAtZoom(self, zoom):
"Returns the total number of tile at a given zoom level"
[minRow, minCol, maxRow, maxCol] = self.getExtentAddress(zoom)
return (maxCol - minCol + 1) * (maxRow - minRow + 1) | Returns the total number of tile at a given zoom level | entailment |
def totalNumberOfTiles(self, minZoom=None, maxZoom=None):
"Return the total number of tiles for this instance extent"
nbTiles = 0
minZoom = minZoom or 0
if maxZoom:
maxZoom = maxZoom + 1
else:
maxZoom = len(self.RESOLUTIONS)
for zoom in xrange(minZoom, maxZoom):
nbTiles += self.numberOfTilesAtZoom(zoom)
return nbTiles | Return the total number of tiles for this instance extent | entailment |
def getZoom(self, resolution):
"Return the zoom level for a given resolution"
assert resolution in self.RESOLUTIONS
return self.RESOLUTIONS.index(resolution) | Return the zoom level for a given resolution | entailment |
def _getZoomLevelRange(self, resolution, unit='meters'):
"Return lower and higher zoom level given a resolution"
assert unit in ('meters', 'degrees')
if unit == 'meters' and self.unit == 'degrees':
resolution = resolution / self.metersPerUnit
elif unit == 'degrees' and self.unit == 'meters':
resolution = resolution * EPSG4326_METERS_PER_UNIT
lo = 0
hi = len(self.RESOLUTIONS)
while lo < hi:
mid = (lo + hi) // 2
if resolution > self.RESOLUTIONS[mid]:
hi = mid
else:
lo = mid + 1
return lo, hi | Return lower and higher zoom level given a resolution | entailment |
def getClosestZoom(self, resolution, unit='meters'):
"""
Return the closest zoom level for a given resolution
Parameters:
resolution -- max. resolution
unit -- unit for output (default='meters')
"""
lo, hi = self._getZoomLevelRange(resolution, unit)
if lo == 0:
return lo
if hi == len(self.RESOLUTIONS):
return hi - 1
before = self.RESOLUTIONS[lo - 1]
if abs(self.RESOLUTIONS[lo] - resolution) < abs(before - resolution):
return lo
return lo - 1 | Return the closest zoom level for a given resolution
Parameters:
resolution -- max. resolution
unit -- unit for output (default='meters') | entailment |
def getCeilingZoom(self, resolution, unit='meters'):
"""
Return the maximized zoom level for a given resolution
Parameters:
resolution -- max. resolution
unit -- unit for output (default='meters')
"""
if resolution in self.RESOLUTIONS:
return self.getZoom(resolution)
lo, hi = self._getZoomLevelRange(resolution, unit)
if lo == 0 or lo == hi:
return lo
if hi == len(self.RESOLUTIONS):
return hi - 1
return lo + 1 | Return the maximized zoom level for a given resolution
Parameters:
resolution -- max. resolution
unit -- unit for output (default='meters') | entailment |
def getScale(self, zoom):
"""Returns the scale at a given zoom level"""
if self.unit == 'degrees':
resolution = self.getResolution(zoom) * EPSG4326_METERS_PER_UNIT
else:
resolution = self.getResolution(zoom)
return resolution / STANDARD_PIXEL_SIZE | Returns the scale at a given zoom level | entailment |
def getExtentAddress(self, zoom, extent=None, contained=False):
"""
Return the bounding addresses ([minRow, minCol, maxRow, maxCol] based
on the instance's extent or a user defined extent. Generic method
that works with regular and irregular pyramids.
Parameters:
zoom -- the zoom for which we want the bounding addresses
extent (optional) -- the extent ([minX, minY, maxX, maxY])
defaults to the instance extent
contained (optional) -- get only tile addresses that contain
a coordinate of the extent. For instance if
the extent only intersects a tile border,
if this option is set to True, this tile
will be ignored. defaults to False
"""
if extent:
bbox = extent
else:
bbox = self.extent
minX = bbox[0]
maxX = bbox[2]
if self.originCorner == 'bottom-left':
minY = bbox[3]
maxY = bbox[1]
elif self.originCorner == 'top-left':
minY = bbox[1]
maxY = bbox[3]
[minCol, minRow] = self.tileAddress(zoom, [minX, maxY])
[maxCol, maxRow] = self.tileAddress(zoom, [maxX, minY])
if contained and minCol != maxCol or minRow != maxRow:
parentBoundsMin = self.tileBounds(zoom, minCol, minRow)
if self.originCorner == 'bottom-left':
if parentBoundsMin[2] == maxX:
maxCol -= 1
if parentBoundsMin[3] == minY:
maxRow -= 1
elif self.originCorner == 'top-left':
if parentBoundsMin[2] == maxX:
maxCol -= 1
if parentBoundsMin[1] == minY:
maxRow -= 1
return [minRow, minCol, maxRow, maxCol] | Return the bounding addresses ([minRow, minCol, maxRow, maxCol] based
on the instance's extent or a user defined extent. Generic method
that works with regular and irregular pyramids.
Parameters:
zoom -- the zoom for which we want the bounding addresses
extent (optional) -- the extent ([minX, minY, maxX, maxY])
defaults to the instance extent
contained (optional) -- get only tile addresses that contain
a coordinate of the extent. For instance if
the extent only intersects a tile border,
if this option is set to True, this tile
will be ignored. defaults to False | entailment |
def getParentTiles(self, zoom, col, row, zoomParent):
"""
Return the parent tile(s) for an irregular (not following quadindex)
and regular tiling scheme
Parameters:
zoom -- the zoom level a the child tile
row -- the row of the child tile
col -- the col of the child tile
zoomParent -- the target zoom of the parent tile
"""
assert zoomParent <= zoom
if zoomParent == zoom:
return [[zoom, col, row]]
extent = self.tileBounds(zoom, col, row)
minRow, minCol, maxRow, maxCol = self.getExtentAddress(
zoomParent, extent=extent, contained=True)
addresses = []
for c in range(minCol, maxCol + 1):
for r in range(minRow, maxRow + 1):
addresses.append([zoomParent, c, r])
return addresses | Return the parent tile(s) for an irregular (not following quadindex)
and regular tiling scheme
Parameters:
zoom -- the zoom level a the child tile
row -- the row of the child tile
col -- the col of the child tile
zoomParent -- the target zoom of the parent tile | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.