sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def serialize_gen(
obj_pyxb, encoding='utf-8', pretty=False, strip_prolog=False, xslt_url=None
):
"""Serialize PyXB object to XML.
Args:
obj_pyxb: PyXB object
PyXB object to serialize.
encoding: str
Encoding to use for XML doc bytes
pretty: bool
True: Use pretty print formatting for human readability.
strip_prolog:
True: remove any XML prolog (e.g., ``<?xml version="1.0" encoding="utf-8"?>``),
from the resulting XML doc.
xslt_url: str
If specified, add a processing instruction to the XML doc that specifies the
download location for an XSLT stylesheet.
Returns:
XML document
"""
assert d1_common.type_conversions.is_pyxb(obj_pyxb)
assert encoding in (None, 'utf-8', 'UTF-8')
try:
obj_dom = obj_pyxb.toDOM()
except pyxb.ValidationError as e:
raise ValueError(
'Unable to serialize PyXB to XML. error="{}"'.format(e.details())
)
except pyxb.PyXBException as e:
raise ValueError('Unable to serialize PyXB to XML. error="{}"'.format(str(e)))
if xslt_url:
xslt_processing_instruction = obj_dom.createProcessingInstruction(
'xml-stylesheet', 'type="text/xsl" href="{}"'.format(xslt_url)
)
root = obj_dom.firstChild
obj_dom.insertBefore(xslt_processing_instruction, root)
if pretty:
xml_str = obj_dom.toprettyxml(indent=' ', encoding=encoding)
# Remove empty lines in the result caused by a bug in toprettyxml()
if encoding is None:
xml_str = re.sub(r'^\s*$\n', r'', xml_str, flags=re.MULTILINE)
else:
xml_str = re.sub(b'^\s*$\n', b'', xml_str, flags=re.MULTILINE)
else:
xml_str = obj_dom.toxml(encoding)
if strip_prolog:
if encoding is None:
xml_str = re.sub(r'^<\?(.*)\?>', r'', xml_str)
else:
xml_str = re.sub(b'^<\?(.*)\?>', b'', xml_str)
return xml_str.strip() | Serialize PyXB object to XML.
Args:
obj_pyxb: PyXB object
PyXB object to serialize.
encoding: str
Encoding to use for XML doc bytes
pretty: bool
True: Use pretty print formatting for human readability.
strip_prolog:
True: remove any XML prolog (e.g., ``<?xml version="1.0" encoding="utf-8"?>``),
from the resulting XML doc.
xslt_url: str
If specified, add a processing instruction to the XML doc that specifies the
download location for an XSLT stylesheet.
Returns:
XML document | entailment |
def serialize_for_transport(obj_pyxb, pretty=False, strip_prolog=False, xslt_url=None):
"""Serialize PyXB object to XML ``bytes`` with UTF-8 encoding for transport over the
network, filesystem storage and other machine usage.
Args:
obj_pyxb: PyXB object
PyXB object to serialize.
pretty: bool
True: Use pretty print formatting for human readability.
strip_prolog:
True: remove any XML prolog (e.g., ``<?xml version="1.0" encoding="utf-8"?>``),
from the resulting XML doc.
xslt_url: str
If specified, add a processing instruction to the XML doc that specifies the
download location for an XSLT stylesheet.
Returns:
bytes: UTF-8 encoded XML document
See Also:
``serialize_for_display()``
"""
return serialize_gen(obj_pyxb, 'utf-8', pretty, strip_prolog, xslt_url) | Serialize PyXB object to XML ``bytes`` with UTF-8 encoding for transport over the
network, filesystem storage and other machine usage.
Args:
obj_pyxb: PyXB object
PyXB object to serialize.
pretty: bool
True: Use pretty print formatting for human readability.
strip_prolog:
True: remove any XML prolog (e.g., ``<?xml version="1.0" encoding="utf-8"?>``),
from the resulting XML doc.
xslt_url: str
If specified, add a processing instruction to the XML doc that specifies the
download location for an XSLT stylesheet.
Returns:
bytes: UTF-8 encoded XML document
See Also:
``serialize_for_display()`` | entailment |
def serialize_to_xml_str(obj_pyxb, pretty=True, strip_prolog=False, xslt_url=None):
"""Serialize PyXB object to pretty printed XML ``str`` for display.
Args:
obj_pyxb: PyXB object
PyXB object to serialize.
pretty: bool
False: Disable pretty print formatting. XML will not have line breaks.
strip_prolog:
True: remove any XML prolog (e.g., ``<?xml version="1.0" encoding="utf-8"?>``),
from the resulting XML doc.
xslt_url: str
If specified, add a processing instruction to the XML doc that specifies the
download location for an XSLT stylesheet.
Returns:
str: Pretty printed XML document
"""
return serialize_gen(obj_pyxb, None, pretty, strip_prolog, xslt_url) | Serialize PyXB object to pretty printed XML ``str`` for display.
Args:
obj_pyxb: PyXB object
PyXB object to serialize.
pretty: bool
False: Disable pretty print formatting. XML will not have line breaks.
strip_prolog:
True: remove any XML prolog (e.g., ``<?xml version="1.0" encoding="utf-8"?>``),
from the resulting XML doc.
xslt_url: str
If specified, add a processing instruction to the XML doc that specifies the
download location for an XSLT stylesheet.
Returns:
str: Pretty printed XML document | entailment |
def reformat_to_pretty_xml(doc_xml):
"""Pretty print XML doc.
Args:
doc_xml : str
Well formed XML doc
Returns:
str: Pretty printed XML doc
"""
assert isinstance(doc_xml, str)
dom_obj = xml.dom.minidom.parseString(doc_xml)
pretty_xml = dom_obj.toprettyxml(indent=' ')
# Remove empty lines in the result caused by a bug in toprettyxml()
return re.sub(r'^\s*$\n', r'', pretty_xml, flags=re.MULTILINE) | Pretty print XML doc.
Args:
doc_xml : str
Well formed XML doc
Returns:
str: Pretty printed XML doc | entailment |
def are_equivalent(a_xml, b_xml, encoding=None):
"""Return True if two XML docs are semantically equivalent, else False.
- TODO: Include test for tails. Skipped for now because tails are not used in any
D1 types.
"""
assert isinstance(a_xml, str)
assert isinstance(b_xml, str)
a_tree = str_to_etree(a_xml, encoding)
b_tree = str_to_etree(b_xml, encoding)
return are_equal_or_superset(a_tree, b_tree) and are_equal_or_superset(
b_tree, a_tree
) | Return True if two XML docs are semantically equivalent, else False.
- TODO: Include test for tails. Skipped for now because tails are not used in any
D1 types. | entailment |
def are_equal_or_superset(superset_tree, base_tree):
"""Return True if ``superset_tree`` is equal to or a superset of ``base_tree``
- Checks that all elements and attributes in ``superset_tree`` are present and
contain the same values as in ``base_tree``. For elements, also checks that the
order is the same.
- Can be used for checking if one XML document is based on another, as long as all
the information in ``base_tree`` is also present and unmodified in
``superset_tree``.
"""
try:
_compare_attr(superset_tree, base_tree)
_compare_text(superset_tree, base_tree)
except CompareError as e:
logger.debug(str(e))
return False
return True | Return True if ``superset_tree`` is equal to or a superset of ``base_tree``
- Checks that all elements and attributes in ``superset_tree`` are present and
contain the same values as in ``base_tree``. For elements, also checks that the
order is the same.
- Can be used for checking if one XML document is based on another, as long as all
the information in ``base_tree`` is also present and unmodified in
``superset_tree``. | entailment |
def are_equal_xml(a_xml, b_xml):
"""Normalize and compare XML documents for equality. The document may or may not be
a DataONE type.
Args:
a_xml: str
b_xml: str
XML documents to compare for equality.
Returns:
bool: ``True`` if the XML documents are semantically equivalent.
"""
a_dom = xml.dom.minidom.parseString(a_xml)
b_dom = xml.dom.minidom.parseString(b_xml)
return are_equal_elements(a_dom.documentElement, b_dom.documentElement) | Normalize and compare XML documents for equality. The document may or may not be
a DataONE type.
Args:
a_xml: str
b_xml: str
XML documents to compare for equality.
Returns:
bool: ``True`` if the XML documents are semantically equivalent. | entailment |
def are_equal_elements(a_el, b_el):
"""Normalize and compare ElementTrees for equality.
Args:
a_el: ElementTree
b_el: ElementTree
ElementTrees to compare for equality.
Returns:
bool: ``True`` if the ElementTrees are semantically equivalent.
"""
if a_el.tagName != b_el.tagName:
return False
if sorted(a_el.attributes.items()) != sorted(b_el.attributes.items()):
return False
if len(a_el.childNodes) != len(b_el.childNodes):
return False
for a_child_el, b_child_el in zip(a_el.childNodes, b_el.childNodes):
if a_child_el.nodeType != b_child_el.nodeType:
return False
if (
a_child_el.nodeType == a_child_el.TEXT_NODE
and a_child_el.data != b_child_el.data
):
return False
if a_child_el.nodeType == a_child_el.ELEMENT_NODE and not are_equal_elements(
a_child_el, b_child_el
):
return False
return True | Normalize and compare ElementTrees for equality.
Args:
a_el: ElementTree
b_el: ElementTree
ElementTrees to compare for equality.
Returns:
bool: ``True`` if the ElementTrees are semantically equivalent. | entailment |
def sort_elements_by_child_values(obj_pyxb, child_name_list):
"""In-place sort simple or complex elements in a PyXB object by values they contain
in child elements.
Args:
obj_pyxb: PyXB object
child_name_list: list of str
List of element names that are direct children of the PyXB object.
"""
obj_pyxb.sort(key=lambda x: [get_auto(getattr(x, n)) for n in child_name_list]) | In-place sort simple or complex elements in a PyXB object by values they contain
in child elements.
Args:
obj_pyxb: PyXB object
child_name_list: list of str
List of element names that are direct children of the PyXB object. | entailment |
def format_diff_pyxb(a_pyxb, b_pyxb):
"""Create a diff between two PyXB objects.
Args:
a_pyxb: PyXB object
b_pyxb: PyXB object
Returns:
str : `Differ`-style delta
"""
return '\n'.join(
difflib.ndiff(
serialize_to_xml_str(a_pyxb).splitlines(),
serialize_to_xml_str(b_pyxb).splitlines(),
)
) | Create a diff between two PyXB objects.
Args:
a_pyxb: PyXB object
b_pyxb: PyXB object
Returns:
str : `Differ`-style delta | entailment |
def format_diff_xml(a_xml, b_xml):
"""Create a diff between two XML documents.
Args:
a_xml: str
b_xml: str
Returns:
str : `Differ`-style delta
"""
return '\n'.join(
difflib.ndiff(
reformat_to_pretty_xml(a_xml).splitlines(),
reformat_to_pretty_xml(b_xml).splitlines(),
)
) | Create a diff between two XML documents.
Args:
a_xml: str
b_xml: str
Returns:
str : `Differ`-style delta | entailment |
def get_opt_attr(obj_pyxb, attr_str, default_val=None):
"""Get an optional attribute value from a PyXB element.
The attributes for elements that are optional according to the schema and
not set in the PyXB object are present and set to None.
PyXB validation will fail if required elements are missing.
Args:
obj_pyxb: PyXB object
attr_str: str
Name of an attribute that the PyXB object may contain.
default_val: any object
Value to return if the attribute is not present.
Returns:
str : Value of the attribute if present, else ``default_val``.
"""
v = getattr(obj_pyxb, attr_str, default_val)
return v if v is not None else default_val | Get an optional attribute value from a PyXB element.
The attributes for elements that are optional according to the schema and
not set in the PyXB object are present and set to None.
PyXB validation will fail if required elements are missing.
Args:
obj_pyxb: PyXB object
attr_str: str
Name of an attribute that the PyXB object may contain.
default_val: any object
Value to return if the attribute is not present.
Returns:
str : Value of the attribute if present, else ``default_val``. | entailment |
def get_opt_val(obj_pyxb, attr_str, default_val=None):
"""Get an optional Simple Content value from a PyXB element.
The attributes for elements that are optional according to the schema and
not set in the PyXB object are present and set to None.
PyXB validation will fail if required elements are missing.
Args:
obj_pyxb: PyXB object
attr_str: str
Name of an attribute that the PyXB object may contain.
default_val: any object
Value to return if the attribute is not present.
Returns:
str : Value of the attribute if present, else ``default_val``.
"""
try:
return get_req_val(getattr(obj_pyxb, attr_str))
except (ValueError, AttributeError):
return default_val | Get an optional Simple Content value from a PyXB element.
The attributes for elements that are optional according to the schema and
not set in the PyXB object are present and set to None.
PyXB validation will fail if required elements are missing.
Args:
obj_pyxb: PyXB object
attr_str: str
Name of an attribute that the PyXB object may contain.
default_val: any object
Value to return if the attribute is not present.
Returns:
str : Value of the attribute if present, else ``default_val``. | entailment |
def resolwe_exception_handler(exc, context):
"""Handle exceptions raised in API and make them nicer.
To enable this, you have to add it to the settings:
.. code:: python
REST_FRAMEWORK = {
'EXCEPTION_HANDLER': 'resolwe.flow.utils.exceptions.resolwe_exception_handler',
}
"""
response = exception_handler(exc, context)
if isinstance(exc, ValidationError):
if response is None:
response = Response({})
response.status_code = 400
response.data['error'] = exc.message
return response | Handle exceptions raised in API and make them nicer.
To enable this, you have to add it to the settings:
.. code:: python
REST_FRAMEWORK = {
'EXCEPTION_HANDLER': 'resolwe.flow.utils.exceptions.resolwe_exception_handler',
} | entailment |
def validate_and_decode(jwt_bu64, cert_obj):
"""Example for validating the signature of a JWT using only the cryptography
library.
Note that this does NOT validate the claims in the claim set.
"""
public_key = cert_obj.public_key()
message = '.'.join(d1_common.cert.jwt.get_bu64_tup(jwt_bu64)[:2])
signature = d1_common.cert.jwt.get_jwt_tup(jwt_bu64)[2]
try:
public_key.verify(
signature,
message,
cryptography.hazmat.primitives.asymmetric.padding.PKCS1v15(),
cryptography.hazmat.primitives.hashes.SHA256(),
)
except cryptography.exceptions.InvalidSignature as e:
raise Exception('Signature is invalid. error="{}"'.format(str(e)))
return d1_common.cert.jwt.get_jwt_dict(jwt_bu64) | Example for validating the signature of a JWT using only the cryptography
library.
Note that this does NOT validate the claims in the claim set. | entailment |
def find_valid_combinations(cert_file_name_list, jwt_file_name_list):
"""Given a list of cert and JWT file names, print a list showing each combination
along with indicators for combinations where the JWT signature was successfully
validated with the cert."""
for cert_file_name in cert_file_name_list:
cert_pem = '' # self.test_files.load_utf8_to_str(cert_file_name)
cert_obj = d1_common.cert.x509.deserialize_pem(cert_pem)
# d1_common.cert.x509.log_cert_info(logging.info, 'CERT', cert_obj)
for jwt_file_name in jwt_file_name_list:
jwt_bu64 = '' # self.test_files.load_utf8_to_str(jwt_file_name)
# d1_common.cert.jwt.log_jwt_bu64_info(logging.info, 'JWT', jwt_bu64)
is_ok = False
try:
d1_common.cert.jwt.validate_and_decode(jwt_bu64, cert_obj)
except d1_common.cert.jwt.JwtException as e:
logging.info('Invalid. msg="{}"'.format(str(e)))
else:
is_ok = True
logging.info(
'{} {} {}'.format(
'***' if is_ok else ' ', cert_file_name, jwt_file_name
)
) | Given a list of cert and JWT file names, print a list showing each combination
along with indicators for combinations where the JWT signature was successfully
validated with the cert. | entailment |
def parseUrl(url):
"""Return a dict containing scheme, netloc, url, params, query, fragment keys.
query is a dict where the values are always lists. If the query key appears only
once in the URL, the list will have a single value.
"""
scheme, netloc, url, params, query, fragment = urllib.parse.urlparse(url)
query_dict = {
k: sorted(v) if len(v) > 1 else v[0]
for k, v in list(urllib.parse.parse_qs(query).items())
}
return {
'scheme': scheme,
'netloc': netloc,
'url': url,
'params': params,
'query': query_dict,
'fragment': fragment,
} | Return a dict containing scheme, netloc, url, params, query, fragment keys.
query is a dict where the values are always lists. If the query key appears only
once in the URL, the list will have a single value. | entailment |
def encodePathElement(element):
"""Encode a URL path element according to RFC3986."""
return urllib.parse.quote(
(
element.encode('utf-8')
if isinstance(element, str)
else str(element)
if isinstance(element, int)
else element
),
safe=d1_common.const.URL_PATHELEMENT_SAFE_CHARS,
) | Encode a URL path element according to RFC3986. | entailment |
def encodeQueryElement(element):
"""Encode a URL query element according to RFC3986."""
return urllib.parse.quote(
(
element.encode('utf-8')
if isinstance(element, str)
else str(element)
if isinstance(element, int)
else element
),
safe=d1_common.const.URL_QUERYELEMENT_SAFE_CHARS,
) | Encode a URL query element according to RFC3986. | entailment |
def urlencode(query, doseq=0):
"""Modified version of the standard urllib.urlencode that is conforms to RFC3986.
The urllib version encodes spaces as '+' which can lead to inconsistency. This
version will always encode spaces as '%20'.
Encode a sequence of two-element tuples or dictionary into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
"""
if hasattr(query, "items"):
# Remove None parameters from query. Dictionaries are mutable, so we can
# remove the the items directly. dict.keys() creates a copy of the
# dictionary keys, making it safe to remove elements from the dictionary
# while iterating.
for k in list(query.keys()):
if query[k] is None:
del query[k]
# mapping objects
query = list(query.items())
else:
# Remove None parameters from query. Tuples are immutable, so we have to
# build a new version that does not contain the elements we want to remove,
# and replace the original with it.
query = list(filter((lambda x: x[1] is not None), query))
# it's a bother at times that strings and string-like objects are
# sequences...
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# zero-length sequences of all types will get here and succeed,
# but that's a minor nit - since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError(
"not a valid non-string sequence or mapping object"
).with_traceback(tb)
l = []
if not doseq:
# preserve old behavior
for k, v in query:
k = encodeQueryElement(str(k))
v = encodeQueryElement(str(v))
l.append(k + '=' + v)
else:
for k, v in query:
k = encodeQueryElement(str(k))
if isinstance(v, str):
v = encodeQueryElement(v)
l.append(k + '=' + v)
elif isinstance(v, str):
# is there a reasonable way to convert to ASCII?
# encode generates a string, but "replace" or "ignore"
# lose information and "strict" can raise UnicodeError
v = encodeQueryElement(v.encode("ASCII", "replace"))
l.append(k + '=' + v)
else:
try:
# is this a sufficient test for sequence-ness?
len(v)
except TypeError:
# not a sequence
v = encodeQueryElement(str(v))
l.append(k + '=' + v)
else:
# loop over the sequence
for elt in v:
l.append(k + '=' + encodeQueryElement(str(elt)))
return '&'.join(sorted(l)) | Modified version of the standard urllib.urlencode that is conforms to RFC3986.
The urllib version encodes spaces as '+' which can lead to inconsistency. This
version will always encode spaces as '%20'.
Encode a sequence of two-element tuples or dictionary into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input. | entailment |
def makeCNBaseURL(url):
"""Attempt to create a valid CN BaseURL when one or more sections of the URL are
missing."""
o = urllib.parse.urlparse(url, scheme=d1_common.const.DEFAULT_CN_PROTOCOL)
if o.netloc and o.path:
netloc = o.netloc
path = o.path
elif o.netloc:
netloc = o.netloc
path = d1_common.const.DEFAULT_CN_PATH
elif o.path:
s = o.path.split('/', 1)
netloc = s[0]
if len(s) == 1:
path = d1_common.const.DEFAULT_CN_PATH
else:
path = s[1]
else:
netloc = d1_common.const.DEFAULT_CN_HOST
path = d1_common.const.DEFAULT_CN_PATH
return urllib.parse.urlunparse(
(o.scheme, netloc, path, o.params, o.query, o.fragment)
) | Attempt to create a valid CN BaseURL when one or more sections of the URL are
missing. | entailment |
def makeMNBaseURL(url):
"""Attempt to create a valid MN BaseURL when one or more sections of the URL are
missing."""
o = urllib.parse.urlparse(url, scheme=d1_common.const.DEFAULT_MN_PROTOCOL)
if o.netloc and o.path:
netloc = o.netloc
path = o.path
elif o.netloc:
netloc = o.netloc
path = d1_common.const.DEFAULT_MN_PATH
elif o.path:
s = o.path.split('/', 1)
netloc = s[0]
if len(s) == 1:
path = d1_common.const.DEFAULT_MN_PATH
else:
path = s[1]
else:
netloc = d1_common.const.DEFAULT_MN_HOST
path = d1_common.const.DEFAULT_MN_PATH
return urllib.parse.urlunparse(
(o.scheme, netloc, path, o.params, o.query, o.fragment)
) | Attempt to create a valid MN BaseURL when one or more sections of the URL are
missing. | entailment |
def find_url_mismatches(a_url, b_url):
"""Given two URLs, return a list of any mismatches.
If the list is empty, the URLs are equivalent. Implemented by parsing and comparing
the elements. See RFC 1738 for details.
"""
diff_list = []
a_parts = urllib.parse.urlparse(a_url)
b_parts = urllib.parse.urlparse(b_url)
# scheme
if a_parts.scheme.lower() != b_parts.scheme.lower():
diff_list.append(
'Schemes differ. a="{}" b="{}" differ'.format(
a_parts.scheme.lower(), b_parts.scheme.lower()
)
)
# netloc
if a_parts.netloc.lower() != b_parts.netloc.lower():
diff_list.append(
'Network locations differ. a="{}" b="{}"'.format(
a_parts.netloc.lower(), b_parts.netloc.lower
)
)
# path
if a_parts.path != b_parts.path:
diff_list.append(
'Paths differ: a="{}" b="{}"'.format(a_parts.path, b_parts.path)
)
# fragment
if a_parts.fragment != b_parts.fragment:
diff_list.append(
'Fragments differ. a="{}" b="{}"'.format(a_parts.fragment, b_parts.fragment)
)
# param
a_param_list = sorted(a_parts.params.split(";"))
b_param_list = sorted(b_parts.params.split(";"))
if a_param_list != b_param_list:
diff_list.append(
'Parameters differ. a="{}" b="{}"'.format(
', '.join(a_param_list), ', '.join(b_param_list)
)
)
# query
a_query_dict = urllib.parse.parse_qs(a_parts.query)
b_query_dict = urllib.parse.parse_qs(b_parts.query)
if len(list(a_query_dict.keys())) != len(list(b_query_dict.keys())):
diff_list.append(
'Number of query keys differs. a={} b={}'.format(
len(list(a_query_dict.keys())), len(list(b_query_dict.keys()))
)
)
for a_key in b_query_dict:
if a_key not in list(b_query_dict.keys()):
diff_list.append(
'Query key in first missing in second. a_key="{}"'.format(a_key)
)
elif sorted(a_query_dict[a_key]) != sorted(b_query_dict[a_key]):
diff_list.append(
'Query values differ. key="{}" a_value="{}" b_value="{}"'.format(
a_key, sorted(a_query_dict[a_key]), sorted(b_query_dict[a_key])
)
)
for b_key in b_query_dict:
if b_key not in a_query_dict:
diff_list.append(
'Query key in second missing in first. b_key="{}"'.format(b_key)
)
return diff_list | Given two URLs, return a list of any mismatches.
If the list is empty, the URLs are equivalent. Implemented by parsing and comparing
the elements. See RFC 1738 for details. | entailment |
def main():
"""Run the program."""
args = parse_arguments()
ext = os.path.splitext(args.input_file)[-1].lower()
with gzip.open(args.output_file, mode='wt') as outfile:
csvwriter = csv.writer(outfile, delimiter=str('\t'), lineterminator='\n')
try:
if ext in ('.tab', '.txt', '.tsv'):
with open(args.input_file) as infile:
for line in infile:
outline = line.strip().split('\t')
csvwriter.writerow(outline)
elif ext == '.csv':
with open(args.input_file) as infile:
for line in infile:
outline = line.strip().split(',')
csvwriter.writerow(outline)
elif ext in ('.xls', '.xlsx'):
workbook = xlrd.open_workbook(args.input_file)
worksheet = workbook.sheets()[0]
for rownum in range(worksheet.nrows):
csvwriter.writerow(worksheet.row_values(rownum))
else:
print('{"proc.error":"File extension not recognized."}')
except Exception:
print('{"proc.error":"Corrupt or unrecognized file."}')
raise | Run the program. | entailment |
def search(self, line):
"""CN search."""
if self._session.get(d1_cli.impl.session.QUERY_ENGINE_NAME) == "solr":
return self._search_solr(line)
raise d1_cli.impl.exceptions.InvalidArguments(
"Unsupported query engine: {}".format(
self._session.get(d1_cli.impl.session.QUERY_ENGINE_NAME)
)
) | CN search. | entailment |
def resolve(self, pid):
"""Get Object Locations for Object."""
client = d1_cli.impl.client.CLICNClient(
**self._cn_client_connect_params_from_session()
)
object_location_list_pyxb = client.resolve(pid)
for location in object_location_list_pyxb.objectLocation:
d1_cli.impl.util.print_info(location.url) | Get Object Locations for Object. | entailment |
def science_object_get(self, pid, path):
"""First try the MN set in the session.
Then try to resolve via the CN set in the session.
"""
mn_client = d1_cli.impl.client.CLIMNClient(
**self._mn_client_connect_params_from_session()
)
try:
response = mn_client.get(pid)
except d1_common.types.exceptions.DataONEException:
pass
else:
self._output(response, path)
return
cn_client = d1_cli.impl.client.CLICNClient(
**self._cn_client_connect_params_from_session()
)
object_location_list_pyxb = cn_client.resolve(pid)
for location in object_location_list_pyxb.objectLocation:
try:
params = self._mn_client_connect_params_from_session()
params["base_url"] = location.baseURL
mn_client = d1_cli.impl.client.CLIMNClient(**params)
response = mn_client.get(pid)
except d1_common.types.exceptions.DataONEException:
pass
else:
self._output(response, path)
return
raise d1_cli.impl.exceptions.CLIError("Could not find object: {}".format(pid)) | First try the MN set in the session.
Then try to resolve via the CN set in the session. | entailment |
def science_object_create(self, pid, path, format_id=None):
"""Create a new Science Object on a Member Node."""
self._queue_science_object_create(pid, path, format_id) | Create a new Science Object on a Member Node. | entailment |
def science_object_update(self, pid_old, path, pid_new, format_id=None):
"""Obsolete a Science Object on a Member Node with a different one."""
self._queue_science_object_update(pid_old, path, pid_new, format_id) | Obsolete a Science Object on a Member Node with a different one. | entailment |
def _output(self, file_like_object, path=None):
"""Display or save file like object."""
if not path:
self._output_to_display(file_like_object)
else:
self._output_to_file(file_like_object, path) | Display or save file like object. | entailment |
def _search_solr(self, line):
"""Perform a SOLR search."""
try:
query_str = self._create_solr_query(line)
client = d1_cli.impl.client.CLICNClient(
**self._cn_client_connect_params_from_session()
)
object_list_pyxb = client.search(
queryType=d1_common.const.DEFAULT_SEARCH_ENGINE,
query=query_str,
start=self._session.get(d1_cli.impl.session.START_NAME),
rows=self._session.get(d1_cli.impl.session.COUNT_NAME),
)
d1_cli.impl.util.print_info(self._pretty(object_list_pyxb.toxml("utf-8")))
except d1_common.types.exceptions.ServiceFailure as e:
e = "%".join(str(e).splitlines()) # Flatten line
regexp = re.compile(
r"errorCode: (?P<error_code>\d+)%.*%Status code: (?P<status_code>\d+)"
)
result = regexp.search(e)
if (
(result is not None)
and (result.group("error_code") == "500")
and (result.group("status_code") == "400")
): # noqa: E129
result = re.search(
r"<b>description</b> <u>(?P<description>[^<]+)</u>", e
)
msg = re.sub(
"&([^;]+);",
lambda m: chr(html.entities.name2codepoint[m.group(1)]),
result.group("description"),
)
d1_cli.impl.util.print_info("Warning: %s" % msg)
else:
d1_cli.impl.util.print_error("Unexpected error:\n%s" % str(e)) | Perform a SOLR search. | entailment |
def _create_solr_query(self, line):
"""Actual search - easier to test. """
p0 = ""
if line:
p0 = line.strip()
p1 = self._query_string_to_solr_filter(line)
p2 = self._object_format_to_solr_filter(line)
p3 = self._time_span_to_solr_filter()
result = p0 + p1 + p2 + p3
return result.strip() | Actual search - easier to test. | entailment |
def apply_filter_list(func, obj):
"""Apply `func` to list or tuple `obj` element-wise and directly otherwise."""
if isinstance(obj, (list, tuple)):
return [func(item) for item in obj]
return func(obj) | Apply `func` to list or tuple `obj` element-wise and directly otherwise. | entailment |
def _get_data_attr(data, attr):
"""Get data object field."""
if isinstance(data, dict):
# `Data` object's id is hydrated as `__id` in expression engine
data = data['__id']
data_obj = Data.objects.get(id=data)
return getattr(data_obj, attr) | Get data object field. | entailment |
def input_(data, field_path):
"""Return a hydrated value of the ``input`` field."""
data_obj = Data.objects.get(id=data['__id'])
inputs = copy.deepcopy(data_obj.input)
# XXX: Optimize by hydrating only the required field (major refactoring).
hydrate_input_references(inputs, data_obj.process.input_schema)
hydrate_input_uploads(inputs, data_obj.process.input_schema)
return dict_dot(inputs, field_path) | Return a hydrated value of the ``input`` field. | entailment |
def _get_hydrated_path(field):
"""Return HydratedPath object for file-type field."""
# Get only file path if whole file object is given.
if isinstance(field, str) and hasattr(field, 'file_name'):
# field is already actually a HydratedPath object
return field
if isinstance(field, dict) and 'file' in field:
hydrated_path = field['file']
if not hasattr(hydrated_path, 'file_name'):
raise TypeError("Filter argument must be a valid file-type field.")
return hydrated_path | Return HydratedPath object for file-type field. | entailment |
def get_url(field):
"""Return file's url based on base url set in settings."""
hydrated_path = _get_hydrated_path(field)
base_url = getattr(settings, 'RESOLWE_HOST_URL', 'localhost')
return "{}/data/{}/{}".format(base_url, hydrated_path.data_id, hydrated_path.file_name) | Return file's url based on base url set in settings. | entailment |
def descriptor(obj, path=''):
"""Return descriptor of given object.
If ``path`` is specified, only the content on that path is
returned.
"""
if isinstance(obj, dict):
# Current object is hydrated, so we need to get descriptor from
# dict representation.
desc = obj['__descriptor']
else:
desc = obj.descriptor
resp = dict_dot(desc, path)
if isinstance(resp, list) or isinstance(resp, dict):
return json.dumps(resp)
return resp | Return descriptor of given object.
If ``path`` is specified, only the content on that path is
returned. | entailment |
def _close_open_date_ranges(self, record):
"""If a date range is missing the start or end date, close it by copying the
date from the existing value."""
date_ranges = (('beginDate', 'endDate'),)
for begin, end in date_ranges:
if begin in record and end in record:
return
elif begin in record:
record[end] = record[begin]
elif end in record:
record[begin] = record[end] | If a date range is missing the start or end date, close it by copying the
date from the existing value. | entailment |
def escapeQueryTerm(self, term):
"""
+ - && || ! ( ) { } [ ] ^ " ~ * ? : \
"""
reserved = [
'+',
'-',
'&',
'|',
'!',
'(',
')',
'{',
'}',
'[',
']',
'^',
'"',
'~',
'*',
'?',
':',
]
term = term.replace('\\', '\\\\')
for c in reserved:
term = term.replace(c, "\%s" % c)
return term | + - && || ! ( ) { } [ ] ^ " ~ * ? : \ | entailment |
def prepareQueryTerm(self, field, term):
"""Prepare a query term for inclusion in a query.
This escapes the term and if necessary, wraps the term in quotes.
"""
if term == "*":
return term
addstar = False
if term[len(term) - 1] == '*':
addstar = True
term = term[0 : len(term) - 1]
term = self.escapeQueryTerm(term)
if addstar:
term = '%s*' % term
if self.getSolrType(field) in ['string', 'text', 'text_ws']:
return '"%s"' % term
return term | Prepare a query term for inclusion in a query.
This escapes the term and if necessary, wraps the term in quotes. | entailment |
def coerceType(self, ftype, value):
"""Returns unicode(value) after trying to coerce it into the SOLR field type.
@param ftype(string) The SOLR field type for the value
@param value(any) The value that is to be represented as Unicode text.
"""
if value is None:
return None
if ftype == 'string':
return str(value)
elif ftype == 'text':
return str(value)
elif ftype == 'int':
try:
v = int(value)
return str(v)
except Exception:
return None
elif ftype == 'float':
try:
v = float(value)
return str(v)
except Exception:
return None
elif ftype == 'date':
try:
v = datetime.datetime.strptime(value, '%b %d %Y %I:%M%p')
return v.isoformat()
except Exception:
return None
return str(value) | Returns unicode(value) after trying to coerce it into the SOLR field type.
@param ftype(string) The SOLR field type for the value
@param value(any) The value that is to be represented as Unicode text. | entailment |
def getSolrType(self, field):
"""Returns the SOLR type of the specified field name.
Assumes the convention of dynamic fields using an underscore + type character
code for the field name.
"""
ftype = 'string'
try:
ftype = self.fieldtypes[field]
return ftype
except Exception:
pass
fta = field.split('_')
if len(fta) > 1:
ft = fta[len(fta) - 1]
try:
ftype = self.fieldtypes[ft]
# cache the type so it's used next time
self.fieldtypes[field] = ftype
except Exception:
pass
return ftype | Returns the SOLR type of the specified field name.
Assumes the convention of dynamic fields using an underscore + type character
code for the field name. | entailment |
def addDocs(self, docs):
"""docs is a list of fields that are a dictionary of name:value for a record."""
lst = ['<add>']
for fields in docs:
self.__add(lst, fields)
lst.append('</add>')
xstr = ''.join(lst)
return self.doUpdateXML(xstr) | docs is a list of fields that are a dictionary of name:value for a record. | entailment |
def count(self, q='*:*', fq=None):
"""Return the number of entries that match query."""
params = {'q': q, 'rows': '0'}
if fq is not None:
params['fq'] = fq
res = self.search(params)
hits = res['response']['numFound']
return hits | Return the number of entries that match query. | entailment |
def getIds(self, query='*:*', fq=None, start=0, rows=1000):
"""Returns a dictionary of: matches: number of matches failed: if true, then an
exception was thrown start: starting index ids: [id, id, ...]
See also the SOLRSearchResponseIterator class
"""
params = {'q': query, 'start': str(start), 'rows': str(rows), 'wt': 'python'}
if fq is not None:
params['fq'] = fq
request = urllib.parse.urlencode(params, doseq=True)
data = None
response = {'matches': 0, 'start': start, 'failed': True, 'ids': []}
try:
rsp = self.doPost(self.solrBase + '', request, self.formheaders)
data = eval(rsp.read())
except Exception:
pass
if data is None:
return response
response['failed'] = False
response['matches'] = data['response']['numFound']
for doc in data['response']['docs']:
response['ids'].append(doc['id'][0])
return response | Returns a dictionary of: matches: number of matches failed: if true, then an
exception was thrown start: starting index ids: [id, id, ...]
See also the SOLRSearchResponseIterator class | entailment |
def get(self, id):
"""Retrieves the specified document."""
params = {'q': 'id:%s' % str(id), 'wt': 'python'}
request = urllib.parse.urlencode(params, doseq=True)
data = None
try:
rsp = self.doPost(self.solrBase + '', request, self.formheaders)
data = eval(rsp.read())
except Exception:
pass
if data['response']['numFound'] > 0:
return data['response']['docs'][0]
return None | Retrieves the specified document. | entailment |
def getFields(self, numTerms=1):
"""Retrieve a list of fields. The response looks something like:
{
'responseHeader':{
'status':0,
'QTime':44},
'index':{
'numDocs':2000,
'maxDoc':2000,
'numTerms':23791,
'version':1227298371173,
'optimized':True,
'current':True,
'hasDeletions':False,
'directory':'org.apache.lucene.store.FSDirectory:org.apache.lucene.store.FSDirectory@/Users/vieglais/opt/localsolr_svn/home/data/index',
'lastModified':'2009-03-12T18:27:59Z'},
'fields':{
'created':{
'type':'date',
'schema':'I-S----O----l',
'index':'I-S----O-----',
'docs':2000,
'distinct':1,
'topTerms':['2009-03-12T18:13:22Z',2000],
'histogram':['2',0,'4',0,'8',0,'16',0,'32',0,'64',0,'128',0,'256',0,
'512',0,'1024',0,'2048',1]},
'species_s':{
'type':'string',
'schema':'I-SM---O----l',
'dynamicBase':'*_s',
'index':'I-S----O-----',
'docs':1924,
'distinct':209,
'topTerms':['cepedianum',352],
'histogram':['2',34,'4',34,'8',16,'16',13,'32',6,'64',3,'128',1,
'256',2,'512',2]},
"""
if self._fields is not None:
return self._fields
params = {'numTerms': str(numTerms), 'wt': 'python'}
request = urllib.parse.urlencode(params, doseq=True)
rsp = self.doPost(self.solrBase + '/admin/luke', request, self.formheaders)
data = eval(rsp.read())
self._fields = data
return data | Retrieve a list of fields. The response looks something like:
{
'responseHeader':{
'status':0,
'QTime':44},
'index':{
'numDocs':2000,
'maxDoc':2000,
'numTerms':23791,
'version':1227298371173,
'optimized':True,
'current':True,
'hasDeletions':False,
'directory':'org.apache.lucene.store.FSDirectory:org.apache.lucene.store.FSDirectory@/Users/vieglais/opt/localsolr_svn/home/data/index',
'lastModified':'2009-03-12T18:27:59Z'},
'fields':{
'created':{
'type':'date',
'schema':'I-S----O----l',
'index':'I-S----O-----',
'docs':2000,
'distinct':1,
'topTerms':['2009-03-12T18:13:22Z',2000],
'histogram':['2',0,'4',0,'8',0,'16',0,'32',0,'64',0,'128',0,'256',0,
'512',0,'1024',0,'2048',1]},
'species_s':{
'type':'string',
'schema':'I-SM---O----l',
'dynamicBase':'*_s',
'index':'I-S----O-----',
'docs':1924,
'distinct':209,
'topTerms':['cepedianum',352],
'histogram':['2',34,'4',34,'8',16,'16',13,'32',6,'64',3,'128',1,
'256',2,'512',2]}, | entailment |
def fieldValues(self, name, q="*:*", fq=None, maxvalues=-1):
"""Retrieve the unique values for a field, along with their usage counts.
http://localhost:8080/solr/select/?q=*:*&rows=0&facet=true&inde
nt=on&wt=python&facet.field=genus_s&facet.limit=10&facet.zeros=false&fa
cet.sort=false.
@param name(string) Name of field to retrieve values for
@param q(string) Query identifying the records from which values will be retrieved
@param fq(string) Filter query restricting operation of query
@param maxvalues(int) Maximum number of values to retrieve. Default is -1,
which causes retrieval of all values.
@return dict of {fieldname: [[value, count], ... ], }
"""
params = {
'q': q,
'rows': '0',
'facet': 'true',
'facet.field': name,
'facet.limit': str(maxvalues),
'facet.zeros': 'false',
'wt': 'python',
'facet.sort': 'false',
}
if fq is not None:
params['fq'] = fq
request = urllib.parse.urlencode(params, doseq=True)
rsp = self.doPost(self.solrBase + '', request, self.formheaders)
data = eval(rsp.read())
return data['facet_counts']['facet_fields'] | Retrieve the unique values for a field, along with their usage counts.
http://localhost:8080/solr/select/?q=*:*&rows=0&facet=true&inde
nt=on&wt=python&facet.field=genus_s&facet.limit=10&facet.zeros=false&fa
cet.sort=false.
@param name(string) Name of field to retrieve values for
@param q(string) Query identifying the records from which values will be retrieved
@param fq(string) Filter query restricting operation of query
@param maxvalues(int) Maximum number of values to retrieve. Default is -1,
which causes retrieval of all values.
@return dict of {fieldname: [[value, count], ... ], } | entailment |
def fieldMinMax(self, name, q='*:*', fq=None):
"""Returns the minimum and maximum values of the specified field. This requires
two search calls to the service, each requesting a single value of a single
field.
@param name(string) Name of the field
@param q(string) Query identifying range of records for min and max values
@param fq(string) Filter restricting range of query
@return list of [min, max]
"""
minmax = [None, None]
oldpersist = self.persistent
self.persistent = True
params = {
'q': q,
'rows': 1,
'fl': name,
'sort': '%s asc' % name,
'wt': 'python',
}
if fq is not None:
params['fq'] = fq
try:
data = self.search(params)
minmax[0] = data['response']['docs'][0][name][0]
params['sort'] = '%s desc' % name
data = self.search(params)
minmax[1] = data['response']['docs'][0][name][0]
except Exception as e:
self.logger.debug('Exception in MinMax: %s' % str(e))
pass
finally:
self.persistent = oldpersist
if not self.persistent:
self.conn.close()
return minmax | Returns the minimum and maximum values of the specified field. This requires
two search calls to the service, each requesting a single value of a single
field.
@param name(string) Name of the field
@param q(string) Query identifying range of records for min and max values
@param fq(string) Filter restricting range of query
@return list of [min, max] | entailment |
def getftype(self, name):
"""Returns the python type for the specified field name. The field list is
cached so multiple calls do not invoke a getFields request each time.
@param name(string) The name of the SOLR field
@returns Python type of the field.
"""
fields = self.getFields()
try:
fld = fields['fields'][name]
except Exception:
return str
if fld['type'] in ['string', 'text', 'stext', 'text_ws']:
return str
if fld['type'] in ['sint', 'integer', 'long', 'slong']:
return int
if fld['type'] in ['sdouble', 'double', 'sfloat', 'float']:
return float
if fld['type'] in ['boolean']:
return bool
return fld['type'] | Returns the python type for the specified field name. The field list is
cached so multiple calls do not invoke a getFields request each time.
@param name(string) The name of the SOLR field
@returns Python type of the field. | entailment |
def fieldAlphaHistogram(
self, name, q='*:*', fq=None, nbins=10, includequeries=True
):
"""Generates a histogram of values from a string field. Output is:
[[low, high, count, query], ... ] Bin edges is determined by equal division
of the fields
"""
oldpersist = self.persistent
self.persistent = True
bins = []
qbin = []
fvals = []
try:
# get total number of values for the field
# TODO: this is a slow mechanism to retrieve the number of distinct values
# Need to replace this with something more efficient.
## Can probably replace with a range of alpha chars - need to check on
## case sensitivity
fvals = self.fieldValues(name, q, fq, maxvalues=-1)
nvalues = len(fvals[name]) / 2
if nvalues < nbins:
nbins = nvalues
if nvalues == nbins:
# Use equivalence instead of range queries to retrieve the values
for i in range(0, nbins):
bin = [fvals[name][i * 2], fvals[name][i * 2], 0]
binq = '%s:%s' % (name, self.prepareQueryTerm(name, bin[0]))
qbin.append(binq)
bins.append(bin)
else:
delta = nvalues / nbins
if delta == 1:
# Use equivalence queries, except the last one which includes the
# remainder of terms
for i in range(0, nbins - 2):
bin = [fvals[name][i * 2], fvals[name][i * 2], 0]
binq = '%s:%s' % (name, self.prepareQueryTerm(name, bin[0]))
qbin.append(binq)
bins.append(bin)
term = fvals[name][(nbins - 1) * 2]
bin = [term, fvals[name][((nvalues - 1) * 2)], 0]
binq = '%s:[%s TO *]' % (name, self.prepareQueryTerm(name, term))
qbin.append(binq)
bins.append(bin)
else:
# Use range for all terms
# now need to page through all the values and get those at the edges
coffset = 0.0
delta = float(nvalues) / float(nbins)
for i in range(0, nbins):
idxl = int(coffset) * 2
idxu = (int(coffset + delta) * 2) - 2
bin = [fvals[name][idxl], fvals[name][idxu], 0]
# logging.info(str(bin))
binq = ''
try:
if i == 0:
binq = '%s:[* TO %s]' % (
name,
self.prepareQueryTerm(name, bin[1]),
)
elif i == nbins - 1:
binq = '%s:[%s TO *]' % (
name,
self.prepareQueryTerm(name, bin[0]),
)
else:
binq = '%s:[%s TO %s]' % (
name,
self.prepareQueryTerm(name, bin[0]),
self.prepareQueryTerm(name, bin[1]),
)
except Exception:
self.logger.exception('Exception 1 in fieldAlphaHistogram:')
qbin.append(binq)
bins.append(bin)
coffset = coffset + delta
# now execute the facet query request
params = {
'q': q,
'rows': '0',
'facet': 'true',
'facet.field': name,
'facet.limit': '1',
'facet.mincount': 1,
'wt': 'python',
}
request = urllib.parse.urlencode(params, doseq=True)
for sq in qbin:
try:
request = request + '&%s' % urllib.parse.urlencode(
{'facet.query': self.encoder(sq)[0]}
)
except Exception:
self.logger.exception('Exception 2 in fieldAlphaHistogram')
rsp = self.doPost(self.solrBase + '', request, self.formheaders)
data = eval(rsp.read())
for i in range(0, len(bins)):
v = data['facet_counts']['facet_queries'][qbin[i]]
bins[i][2] = v
if includequeries:
bins[i].append(qbin[i])
finally:
self.persistent = oldpersist
if not self.persistent:
self.conn.close()
return bins | Generates a histogram of values from a string field. Output is:
[[low, high, count, query], ... ] Bin edges is determined by equal division
of the fields | entailment |
def fieldHistogram(
self, name, q="*:*", fq=None, nbins=10, minmax=None, includequeries=True
):
"""Generates a histogram of values. Expects the field to be integer or floating
point.
@param name(string) Name of the field to compute
@param q(string) The query identifying the set of records for the histogram
@param fq(string) Filter query to restrict application of query
@param nbins(int) Number of bins in resulting histogram
@return list of [binmin, binmax, n, binquery]
"""
oldpersist = self.persistent
self.persistent = True
ftype = self.getftype(name)
if ftype == str:
##handle text histograms over here
bins = self.fieldAlphaHistogram(
name, q=q, fq=fq, nbins=nbins, includequeries=includequeries
)
self.persistent = oldpersist
if not self.persistent:
self.conn.close()
return bins
bins = []
qbin = []
fvals = self.fieldValues(name, q, fq, maxvalues=nbins + 1)
if len(fvals[name]) < 3:
return bins
nvalues = len(fvals[name]) / 2
if nvalues < nbins:
nbins = nvalues
minoffset = 1
if ftype == float:
minoffset = 0.00001
try:
if minmax is None:
minmax = self.fieldMinMax(name, q=q, fq=fq)
# logging.info("MINMAX = %s" % str(minmax))
minmax[0] = float(minmax[0])
minmax[1] = float(minmax[1])
delta = (minmax[1] - minmax[0]) / nbins
for i in range(0, nbins):
binmin = minmax[0] + (i * delta)
bin = [binmin, binmin + delta, 0]
if ftype == int:
bin[0] = int(bin[0])
bin[1] = int(bin[1])
if i == 0:
binq = '%s:[* TO %d]' % (name, bin[1])
elif i == nbins - 1:
binq = '%s:[%d TO *]' % (name, bin[0] + minoffset)
bin[0] = bin[0] + minoffset
if bin[1] < bin[0]:
bin[1] = bin[0]
else:
binq = '%s:[%d TO %d]' % (name, bin[0] + minoffset, bin[1])
bin[0] = bin[0] + minoffset
else:
if i == 0:
binq = '%s:[* TO %f]' % (name, bin[1])
elif i == nbins - 1:
binq = '%s:[%f TO *]' % (name, bin[0] + minoffset)
else:
binq = '%s:[%f TO %f]' % (name, bin[0] + minoffset, bin[1])
qbin.append(binq)
bins.append(bin)
# now execute the facet query request
params = {
'q': q,
'rows': '0',
'facet': 'true',
'facet.field': name,
'facet.limit': '1',
'facet.mincount': 1,
'wt': 'python',
}
request = urllib.parse.urlencode(params, doseq=True)
for sq in qbin:
request = request + '&%s' % urllib.parse.urlencode({'facet.query': sq})
rsp = self.doPost(self.solrBase + '', request, self.formheaders)
data = eval(rsp.read())
for i in range(0, len(bins)):
v = data['facet_counts']['facet_queries'][qbin[i]]
bins[i][2] = v
if includequeries:
bins[i].append(qbin[i])
finally:
self.persistent = oldpersist
if not self.persistent:
self.conn.close()
return bins | Generates a histogram of values. Expects the field to be integer or floating
point.
@param name(string) Name of the field to compute
@param q(string) The query identifying the set of records for the histogram
@param fq(string) Filter query to restrict application of query
@param nbins(int) Number of bins in resulting histogram
@return list of [binmin, binmax, n, binquery] | entailment |
def fieldHistogram2d(self, colname, rowname, q="*:*", fq=None, ncols=10, nrows=10):
"""Generates a 2d histogram of values. Expects the field to be integer or
floating point.
@param name1(string) Name of field1 columns to compute
@param name2(string) Name of field2 rows to compute
@param q(string) The query identifying the set of records for the histogram
@param fq(string) Filter query to restrict application of query
@param nbins1(int) Number of columns in resulting histogram
@param nbins2(int) Number of rows in resulting histogram
@return dict of {colname: name of column index
rowname: name of row index
cols: [] list of min values for each column bin
rows: [] list of min values for each row bin
z: [[],
[]]
"""
def _mkQterm(name, minv, maxv, isint, isfirst, islast):
q = ''
if isint:
minv = int(minv)
maxv = int(maxv)
if isfirst:
q = '%s:[* TO %d]' % (name, maxv)
elif islast:
q = '%s:[%d TO *]' % (name, maxv)
else:
q = '%s:[%d TO %d]' % (name, minv, maxv)
else:
if isfirst:
q = '%s:[* TO %f]' % (name, maxv)
elif islast:
q = '%s:[%f TO *]' % (name, maxv)
else:
q = '%s:[%f TO %f]' % (name, minv, maxv)
return q
oldpersist = self.persistent
self.persistent = True
ftype_col = self.getftype(colname)
ftype_row = self.getftype(rowname)
result = {
'colname': colname,
'rowname': rowname,
'cols': [],
'rows': [],
'z': [],
}
minoffsetcol = 1
minoffsetrow = 1
if ftype_col == float:
minoffsetcol = 0.00001 # noqa: F841
if ftype_row == float:
minoffsetrow = 0.00001 # noqa: F841
try:
rowminmax = self.fieldMinMax(rowname, q=q, fq=fq)
rowminmax[0] = float(rowminmax[0])
rowminmax[1] = float(rowminmax[1])
colminmax = self.fieldMinMax(colname, q=q, fq=fq)
colminmax[0] = float(colminmax[0])
colminmax[1] = float(colminmax[1])
rowdelta = (rowminmax[1] - rowminmax[0]) / nrows
coldelta = (colminmax[1] - colminmax[0]) / ncols
for rowidx in range(0, nrows):
rmin = rowminmax[0] + (rowidx * rowdelta)
result['rows'].append(rmin)
rmax = rmin + rowdelta
rowq = _mkQterm(
rowname,
rmin,
rmax,
(ftype_row == int),
(rowidx == 0),
(rowidx == nrows - 1),
)
qq = "%s AND %s" % (q, rowq)
logging.debug("row=%d, q= %s" % (rowidx, qq))
bins = []
cline = []
for colidx in range(0, ncols):
cmin = colminmax[0] + (colidx * coldelta)
result['cols'].append(cmin)
cmax = cmin + coldelta
colq = _mkQterm(
colname,
cmin,
cmax,
(ftype_col == int),
(colidx == 0),
(colidx == ncols - 1),
)
bin = [colidx, rowidx, cmin, rmin, cmax, rmax, 0, colq]
bins.append(bin)
# now execute the facet query request
params = {
'q': qq,
'rows': '0',
'facet': 'true',
'facet.field': colname,
'facet.limit': '1',
'facet.mincount': 1,
'wt': 'python',
}
if fq is not None:
params['fq'] = fq
request = urllib.parse.urlencode(params, doseq=True)
for bin in bins:
request = request + '&%s' % urllib.parse.urlencode(
{'facet.query': bin[7]}
)
rsp = self.doPost(self.solrBase + '', request, self.formheaders)
data = eval(rsp.read())
for bin in bins:
v = data['facet_counts']['facet_queries'][bin[7]]
cline.append(v)
result['z'].append(cline)
finally:
self.persistent = oldpersist
if not self.persistent:
self.conn.close()
return result | Generates a 2d histogram of values. Expects the field to be integer or
floating point.
@param name1(string) Name of field1 columns to compute
@param name2(string) Name of field2 rows to compute
@param q(string) The query identifying the set of records for the histogram
@param fq(string) Filter query to restrict application of query
@param nbins1(int) Number of columns in resulting histogram
@param nbins2(int) Number of rows in resulting histogram
@return dict of {colname: name of column index
rowname: name of row index
cols: [] list of min values for each column bin
rows: [] list of min values for each row bin
z: [[],
[]] | entailment |
def _nextPage(self, offset):
"""Retrieves the next set of results from the service."""
self.logger.debug("Iterator crecord=%s" % str(self.crecord))
params = {
'q': self.q,
'start': str(offset),
'rows': str(self.pagesize),
'fl': self.fields,
'explainOther': '',
'hl.fl': '',
}
if self.fq is not None:
params['fq'] = self.fq
self.res = self.client.search(params)
self._numhits = int(self.res['response']['numFound']) | Retrieves the next set of results from the service. | entailment |
def _nextPage(self, offset):
"""Retrieves the next set of results from the service."""
self.logger.debug("Iterator crecord=%s" % str(self.crecord))
params = {
'q': self.q,
'rows': '0',
'facet': 'true',
'facet.field': self.field,
'facet.limit': str(self.pagesize),
'facet.offset': str(offset),
'facet.zeros': 'false',
'wt': 'python',
}
if self.fq is not None:
params['fq'] = self.fq
request = urllib.parse.urlencode(params, doseq=True)
rsp = self.client.doPost(
self.client.solrBase + '', request, self.client.formheaders
)
data = eval(rsp.read())
try:
self.res = data['facet_counts']['facet_fields'][self.field]
self.logger.debug(self.res)
except Exception:
self.res = []
self.index = 0 | Retrieves the next set of results from the service. | entailment |
def get_monitor_ping(request):
"""MNCore.ping() β Boolean."""
response = d1_gmn.app.views.util.http_response_with_boolean_true_type()
d1_gmn.app.views.headers.add_http_date_header(response)
return response | MNCore.ping() β Boolean. | entailment |
def get_log(request):
"""MNCore.getLogRecords(session[, fromDate][, toDate][, idFilter][, event]
[, start=0][, count=1000]) β Log
Sorted by timestamp, id.
"""
query = d1_gmn.app.models.EventLog.objects.all().order_by('timestamp', 'id')
if not d1_gmn.app.auth.is_trusted_subject(request):
query = d1_gmn.app.db_filter.add_access_policy_filter(
request, query, 'sciobj__id'
)
query = d1_gmn.app.db_filter.add_redact_annotation(request, query)
query = d1_gmn.app.db_filter.add_datetime_filter(
request, query, 'timestamp', 'fromDate', 'gte'
)
query = d1_gmn.app.db_filter.add_datetime_filter(
request, query, 'timestamp', 'toDate', 'lt'
)
query = d1_gmn.app.db_filter.add_string_filter(
request, query, 'event__event', 'event'
)
if d1_gmn.app.views.util.is_v1_api(request):
query = d1_gmn.app.db_filter.add_string_begins_with_filter(
request, query, 'sciobj__pid__did', 'pidFilter'
)
elif d1_gmn.app.views.util.is_v2_api(request):
query = d1_gmn.app.db_filter.add_sid_or_string_begins_with_filter(
request, query, 'sciobj__pid__did', 'idFilter'
)
else:
assert False, 'Unable to determine API version'
total_int = query.count()
query, start, count = d1_gmn.app.views.slice.add_slice_filter(
request, query, total_int
)
return {
'query': query,
'start': start,
'count': count,
'total': total_int,
'type': 'log',
} | MNCore.getLogRecords(session[, fromDate][, toDate][, idFilter][, event]
[, start=0][, count=1000]) β Log
Sorted by timestamp, id. | entailment |
def get_node(request):
"""MNCore.getCapabilities() β Node."""
api_major_int = 2 if d1_gmn.app.views.util.is_v2_api(request) else 1
node_pretty_xml = d1_gmn.app.node.get_pretty_xml(api_major_int)
return django.http.HttpResponse(node_pretty_xml, d1_common.const.CONTENT_TYPE_XML) | MNCore.getCapabilities() β Node. | entailment |
def get_object(request, pid):
"""MNRead.get(session, did) β OctetStream."""
sciobj = d1_gmn.app.models.ScienceObject.objects.get(pid__did=pid)
content_type_str = d1_gmn.app.object_format_cache.get_content_type(
sciobj.format.format
)
# Return local or proxied SciObj bytes
response = django.http.StreamingHttpResponse(
_get_sciobj_iter(sciobj), content_type_str
)
d1_gmn.app.views.headers.add_sciobj_properties_headers_to_response(response, sciobj)
d1_gmn.app.event_log.log_read_event(pid, request)
return response | MNRead.get(session, did) β OctetStream. | entailment |
def get_meta(request, pid):
"""MNRead.getSystemMetadata(session, pid) β SystemMetadata."""
d1_gmn.app.event_log.log_read_event(pid, request)
return django.http.HttpResponse(
d1_gmn.app.views.util.generate_sysmeta_xml_matching_api_version(request, pid),
d1_common.const.CONTENT_TYPE_XML,
) | MNRead.getSystemMetadata(session, pid) β SystemMetadata. | entailment |
def head_object(request, pid):
"""MNRead.describe(session, did) β DescribeResponse."""
sciobj = d1_gmn.app.models.ScienceObject.objects.get(pid__did=pid)
response = django.http.HttpResponse()
d1_gmn.app.views.headers.add_sciobj_properties_headers_to_response(response, sciobj)
d1_gmn.app.event_log.log_read_event(pid, request)
return response | MNRead.describe(session, did) β DescribeResponse. | entailment |
def get_checksum(request, pid):
"""MNRead.getChecksum(session, did[, checksumAlgorithm]) β Checksum."""
# MNRead.getChecksum() requires that a new checksum be calculated. Cannot
# simply return the checksum from the sysmeta.
#
# If the checksumAlgorithm argument was not provided, it defaults to
# the system wide default checksum algorithm.
algorithm = request.GET.get(
'checksumAlgorithm', d1_common.const.DEFAULT_CHECKSUM_ALGORITHM
)
if not d1_common.checksum.is_supported_algorithm(algorithm):
raise d1_common.types.exceptions.InvalidRequest(
0,
'Invalid checksum algorithm. invalid="{}", supported="{}"'.format(
algorithm,
', '.join(
list(
d1_common.checksum.DATAONE_TO_PYTHON_CHECKSUM_ALGORITHM_MAP.keys()
)
),
),
)
sciobj_model = d1_gmn.app.models.ScienceObject.objects.get(pid__did=pid)
with _get_sciobj_iter(sciobj_model) as sciobj_iter:
checksum_obj = d1_common.checksum.create_checksum_object_from_iterator(
sciobj_iter, algorithm
)
# Log the access of this object.
# TODO: look into log type other than 'read'
d1_gmn.app.event_log.log_read_event(pid, request)
return django.http.HttpResponse(
checksum_obj.toxml('utf-8'), d1_common.const.CONTENT_TYPE_XML
) | MNRead.getChecksum(session, did[, checksumAlgorithm]) β Checksum. | entailment |
def post_error(request):
"""MNRead.synchronizationFailed(session, message)"""
d1_gmn.app.views.assert_db.post_has_mime_parts(request, (('file', 'message'),))
try:
synchronization_failed = d1_gmn.app.views.util.deserialize(
request.FILES['message']
)
except d1_common.types.exceptions.DataONEException as e:
# In v1, MNRead.synchronizationFailed() cannot return an InvalidRequest
# to the CN. Can only log the issue and return a 200 OK.
logging.error(
'Received notification of synchronization error from CN but was unable '
'to deserialize the DataONE Exception passed by the CN. '
'message="{}" error="{}"'.format(
d1_gmn.app.views.util.read_utf8_xml(request.FILES['message']), str(e)
)
)
else:
logging.error(
'Received notification of synchronization error from CN:\n{}'.format(
str(synchronization_failed)
)
)
return d1_gmn.app.views.util.http_response_with_boolean_true_type() | MNRead.synchronizationFailed(session, message) | entailment |
def get_replica(request, pid):
"""MNReplication.getReplica(session, did) β OctetStream."""
_assert_node_is_authorized(request, pid)
sciobj = d1_gmn.app.models.ScienceObject.objects.get(pid__did=pid)
content_type_str = d1_gmn.app.object_format_cache.get_content_type(
sciobj.format.format
)
# Replica is always a local file that can be handled with FileResponse()
response = django.http.FileResponse(
d1_gmn.app.sciobj_store.open_sciobj_file_by_pid(pid), content_type_str
)
d1_gmn.app.views.headers.add_sciobj_properties_headers_to_response(response, sciobj)
# Log the replication of this object.
d1_gmn.app.event_log.log_replicate_event(pid, request)
return response | MNReplication.getReplica(session, did) β OctetStream. | entailment |
def put_meta(request):
"""MNStorage.updateSystemMetadata(session, pid, sysmeta) β boolean.
TODO: Currently, this call allows making breaking changes to SysMeta. We need to
clarify what can be modified and what the behavior should be when working with SIDs
and chains.
"""
if django.conf.settings.REQUIRE_WHITELIST_FOR_UPDATE:
d1_gmn.app.auth.assert_create_update_delete_permission(request)
d1_gmn.app.util.coerce_put_post(request)
d1_gmn.app.views.assert_db.post_has_mime_parts(
request, (('field', 'pid'), ('file', 'sysmeta'))
)
pid = request.POST['pid']
d1_gmn.app.auth.assert_allowed(request, d1_gmn.app.auth.WRITE_LEVEL, pid)
new_sysmeta_pyxb = d1_gmn.app.sysmeta.deserialize(request.FILES['sysmeta'])
d1_gmn.app.views.assert_sysmeta.has_matching_modified_timestamp(new_sysmeta_pyxb)
d1_gmn.app.views.create.set_mn_controlled_values(
request, new_sysmeta_pyxb, is_modification=True
)
d1_gmn.app.sysmeta.create_or_update(new_sysmeta_pyxb)
d1_gmn.app.event_log.log_update_event(
pid,
request,
timestamp=d1_common.date_time.normalize_datetime_to_utc(
new_sysmeta_pyxb.dateUploaded
),
)
return d1_gmn.app.views.util.http_response_with_boolean_true_type() | MNStorage.updateSystemMetadata(session, pid, sysmeta) β boolean.
TODO: Currently, this call allows making breaking changes to SysMeta. We need to
clarify what can be modified and what the behavior should be when working with SIDs
and chains. | entailment |
def get_is_authorized(request, pid):
"""MNAuthorization.isAuthorized(did, action) -> Boolean."""
if 'action' not in request.GET:
raise d1_common.types.exceptions.InvalidRequest(
0, 'Missing required parameter. required="action"'
)
# Convert action string to action level. Raises InvalidRequest if the
# action string is not valid.
level = d1_gmn.app.auth.action_to_level(request.GET['action'])
d1_gmn.app.auth.assert_allowed(request, level, pid)
return d1_gmn.app.views.util.http_response_with_boolean_true_type() | MNAuthorization.isAuthorized(did, action) -> Boolean. | entailment |
def post_refresh_system_metadata(request):
"""MNStorage.systemMetadataChanged(session, did, serialVersion,
dateSysMetaLastModified) β boolean."""
d1_gmn.app.views.assert_db.post_has_mime_parts(
request,
(
('field', 'pid'),
('field', 'serialVersion'),
('field', 'dateSysMetaLastModified'),
),
)
d1_gmn.app.views.assert_db.is_existing_object(request.POST['pid'])
d1_gmn.app.models.sysmeta_refresh_queue(
request.POST['pid'],
request.POST['serialVersion'],
request.POST['dateSysMetaLastModified'],
'queued',
).save()
return d1_gmn.app.views.util.http_response_with_boolean_true_type() | MNStorage.systemMetadataChanged(session, did, serialVersion,
dateSysMetaLastModified) β boolean. | entailment |
def post_object_list(request):
"""MNStorage.create(session, did, object, sysmeta) β Identifier."""
d1_gmn.app.views.assert_db.post_has_mime_parts(
request, (('field', 'pid'), ('file', 'object'), ('file', 'sysmeta'))
)
url_pid = request.POST['pid']
sysmeta_pyxb = d1_gmn.app.sysmeta.deserialize(request.FILES['sysmeta'])
d1_gmn.app.views.assert_sysmeta.obsoletes_not_specified(sysmeta_pyxb)
d1_gmn.app.views.assert_sysmeta.matches_url_pid(sysmeta_pyxb, url_pid)
d1_gmn.app.views.assert_sysmeta.is_valid_sid_for_new_standalone(sysmeta_pyxb)
d1_gmn.app.views.create.create_sciobj(request, sysmeta_pyxb)
return url_pid | MNStorage.create(session, did, object, sysmeta) β Identifier. | entailment |
def put_object(request, old_pid):
"""MNStorage.update(session, pid, object, newPid, sysmeta) β Identifier."""
if django.conf.settings.REQUIRE_WHITELIST_FOR_UPDATE:
d1_gmn.app.auth.assert_create_update_delete_permission(request)
d1_gmn.app.util.coerce_put_post(request)
d1_gmn.app.views.assert_db.post_has_mime_parts(
request, (('field', 'newPid'), ('file', 'object'), ('file', 'sysmeta'))
)
d1_gmn.app.views.assert_db.is_valid_pid_to_be_updated(old_pid)
sysmeta_pyxb = d1_gmn.app.sysmeta.deserialize(request.FILES['sysmeta'])
new_pid = request.POST['newPid']
d1_gmn.app.views.assert_sysmeta.matches_url_pid(sysmeta_pyxb, new_pid)
d1_gmn.app.views.assert_sysmeta.obsoletes_matches_pid_if_specified(
sysmeta_pyxb, old_pid
)
sysmeta_pyxb.obsoletes = old_pid
sid = d1_common.xml.get_opt_val(sysmeta_pyxb, 'seriesId')
d1_gmn.app.views.assert_sysmeta.is_valid_sid_for_chain(old_pid, sid)
d1_gmn.app.views.create.create_sciobj(request, sysmeta_pyxb)
# The create event for the new object is added in create_sciobj(). The update
# event on the old object is added here.
d1_gmn.app.event_log.log_update_event(
old_pid,
request,
timestamp=d1_common.date_time.normalize_datetime_to_utc(
sysmeta_pyxb.dateUploaded
),
)
d1_gmn.app.sysmeta.update_modified_timestamp(old_pid)
return new_pid | MNStorage.update(session, pid, object, newPid, sysmeta) β Identifier. | entailment |
def post_generate_identifier(request):
"""MNStorage.generateIdentifier(session, scheme[, fragment]) β Identifier."""
d1_gmn.app.views.assert_db.post_has_mime_parts(request, (('field', 'scheme'),))
if request.POST['scheme'] != 'UUID':
raise d1_common.types.exceptions.InvalidRequest(
0, 'Only the UUID scheme is currently supported'
)
fragment = request.POST.get('fragment', None)
while True:
pid = (fragment if fragment else '') + uuid.uuid4().hex
if not d1_gmn.app.models.ScienceObject.objects.filter(pid__did=pid).exists():
return pid | MNStorage.generateIdentifier(session, scheme[, fragment]) β Identifier. | entailment |
def put_archive(request, pid):
"""MNStorage.archive(session, did) β Identifier."""
d1_gmn.app.views.assert_db.is_not_replica(pid)
d1_gmn.app.views.assert_db.is_not_archived(pid)
d1_gmn.app.sysmeta.archive_sciobj(pid)
return pid | MNStorage.archive(session, did) β Identifier. | entailment |
def post_replicate(request):
"""MNReplication.replicate(session, sysmeta, sourceNode) β boolean."""
d1_gmn.app.views.assert_db.post_has_mime_parts(
request, (('field', 'sourceNode'), ('file', 'sysmeta'))
)
sysmeta_pyxb = d1_gmn.app.sysmeta.deserialize(request.FILES['sysmeta'])
d1_gmn.app.local_replica.assert_request_complies_with_replication_policy(
sysmeta_pyxb
)
pid = d1_common.xml.get_req_val(sysmeta_pyxb.identifier)
d1_gmn.app.views.assert_db.is_valid_pid_for_create(pid)
d1_gmn.app.local_replica.add_to_replication_queue(
request.POST['sourceNode'], sysmeta_pyxb
)
return d1_gmn.app.views.util.http_response_with_boolean_true_type() | MNReplication.replicate(session, sysmeta, sourceNode) β boolean. | entailment |
def contribute_to_class(self, process, fields, name):
"""Register this field with a specific process.
:param process: Process descriptor instance
:param fields: Fields registry to use
:param name: Field name
"""
self.name = name
self.process = process
fields[name] = self | Register this field with a specific process.
:param process: Process descriptor instance
:param fields: Fields registry to use
:param name: Field name | entailment |
def to_schema(self):
"""Return field schema for this field."""
if not self.name or not self.process:
raise ValueError("field is not registered with process")
schema = {
'name': self.name,
'type': self.get_field_type(),
}
if self.required is not None:
schema['required'] = self.required
if self.label is not None:
schema['label'] = self.label
if self.description is not None:
schema['description'] = self.description
if self.default is not None:
schema['default'] = self.default
if self.hidden is not None:
schema['hidden'] = self.hidden
if self.choices is not None:
for choice, label in self.choices:
schema.setdefault('choices', []).append({
'label': label,
'value': choice,
})
return schema | Return field schema for this field. | entailment |
def validate(self, value):
"""Validate field value."""
if self.required and value is None:
raise ValidationError("field is required")
if value is not None and self.choices is not None:
choices = [choice for choice, _ in self.choices]
if value not in choices:
raise ValidationError("field must be one of: {}".format(
", ".join(choices),
)) | Validate field value. | entailment |
def clean(self, value):
"""Run validators and return the clean value."""
if value is None:
value = self.default
try:
value = self.to_python(value)
self.validate(value)
except ValidationError as error:
raise ValidationError("invalid value for {}: {}".format(
self.name,
error.args[0]
))
return value | Run validators and return the clean value. | entailment |
def validate(self, value):
"""Validate field value."""
if value is not None and not isinstance(value, str):
raise ValidationError("field must be a string")
super().validate(value) | Validate field value. | entailment |
def validate(self, value):
"""Validate field value."""
if value is not None and not isinstance(value, bool):
raise ValidationError("field must be a boolean")
super().validate(value) | Validate field value. | entailment |
def to_output(self, value):
"""Convert value to process output format."""
return json.loads(resolwe_runtime_utils.save(self.name, str(value))) | Convert value to process output format. | entailment |
def to_python(self, value):
"""Convert value if needed."""
if isinstance(value, str):
return value
elif isinstance(value, dict):
try:
value = value['url']
except KeyError:
raise ValidationError("dictionary must contain an 'url' element")
if not isinstance(value, str):
raise ValidationError("field's url element must be a string")
return value
elif not isinstance(value, None):
raise ValidationError("field must be a string or a dict") | Convert value if needed. | entailment |
def import_file(self, imported_format=None, progress_from=0.0, progress_to=None):
"""Import field source file to working directory.
:param imported_format: Import file format (extracted, compressed or both)
:param progress_from: Initial progress value
:param progress_to: Final progress value
:return: Destination file path (if extracted and compressed, extracted path given)
"""
if not hasattr(resolwe_runtime_utils, 'import_file'):
raise RuntimeError('Requires resolwe-runtime-utils >= 2.0.0')
if imported_format is None:
imported_format = resolwe_runtime_utils.ImportedFormat.BOTH
return resolwe_runtime_utils.import_file(
src=self.file_temp,
file_name=self.path,
imported_format=imported_format,
progress_from=progress_from,
progress_to=progress_to
) | Import field source file to working directory.
:param imported_format: Import file format (extracted, compressed or both)
:param progress_from: Initial progress value
:param progress_to: Final progress value
:return: Destination file path (if extracted and compressed, extracted path given) | entailment |
def to_python(self, value):
"""Convert value if needed."""
if isinstance(value, FileDescriptor):
return value
elif isinstance(value, str):
return FileDescriptor(value)
elif isinstance(value, dict):
try:
path = value['file']
except KeyError:
raise ValidationError("dictionary must contain a 'file' element")
if not isinstance(path, str):
raise ValidationError("field's file element must be a string")
size = value.get('size', None)
if size is not None and not isinstance(size, int):
raise ValidationError("field's size element must be an integer")
total_size = value.get('total_size', None)
if total_size is not None and not isinstance(total_size, int):
raise ValidationError("field's total_size element must be an integer")
is_remote = value.get('is_remote', None)
if is_remote is not None and not isinstance(is_remote, bool):
raise ValidationError("field's is_remote element must be a boolean")
file_temp = value.get('file_temp', None)
if file_temp is not None and not isinstance(file_temp, str):
raise ValidationError("field's file_temp element must be a string")
refs = value.get('refs', None)
if refs is not None and not isinstance(refs, list):
# TODO: Validate that all refs are strings.
raise ValidationError("field's refs element must be a list of strings")
return FileDescriptor(
path,
size=size,
total_size=total_size,
is_remote=is_remote,
file_temp=file_temp,
refs=refs,
)
elif not isinstance(value, None):
raise ValidationError("field must be a FileDescriptor, string or a dict") | Convert value if needed. | entailment |
def to_output(self, value):
"""Convert value to process output format."""
return json.loads(resolwe_runtime_utils.save_file(self.name, value.path, *value.refs)) | Convert value to process output format. | entailment |
def to_python(self, value):
"""Convert value if needed."""
if isinstance(value, DirDescriptor):
return value
elif isinstance(value, str):
return DirDescriptor(value)
elif isinstance(value, dict):
try:
path = value['dir']
except KeyError:
raise ValidationError("dictionary must contain a 'dir' element")
if not isinstance(path, str):
raise ValidationError("field's dir element must be a string")
size = value.get('size', None)
if size is not None and not isinstance(size, int):
raise ValidationError("field's size element must be an integer")
total_size = value.get('total_size', None)
if total_size is not None and not isinstance(total_size, int):
raise ValidationError("field's total_size element must be an integer")
refs = value.get('refs', None)
if refs is not None and not isinstance(refs, list):
# TODO: Validate that all refs are strings.
raise ValidationError("field's refs element must be a list of strings")
return DirDescriptor(
path,
size=size,
total_size=total_size,
refs=refs,
)
elif not isinstance(value, None):
raise ValidationError("field must be a DirDescriptor, string or a dict") | Convert value if needed. | entailment |
def contribute_to_class(self, process, fields, name):
"""Register this field with a specific process.
:param process: Process descriptor instance
:param fields: Fields registry to use
:param name: Field name
"""
super().contribute_to_class(process, fields, name)
self.inner.name = name
self.inner.process = process | Register this field with a specific process.
:param process: Process descriptor instance
:param fields: Fields registry to use
:param name: Field name | entailment |
def to_output(self, value):
"""Convert value to process output format."""
return {self.name: [self.inner.to_output(v)[self.name] for v in value]} | Convert value to process output format. | entailment |
def validate(self, value):
"""Validate field value."""
if value is not None:
if not isinstance(value, list):
raise ValidationError("field must be a list")
for index, element in enumerate(value):
try:
self.inner.validate(element)
except ValidationError as error:
raise ValidationError("invalid element {}: {}".format(
index,
error.args[0],
))
super().validate(value) | Validate field value. | entailment |
def _get(self, key):
"""Return given key from cache."""
self._populate_cache()
if key not in self._cache:
raise AttributeError("DataField has no member {}".format(key))
return self._cache[key] | Return given key from cache. | entailment |
def to_python(self, value):
"""Convert value if needed."""
cache = None
if value is None:
return None
if isinstance(value, DataDescriptor):
return value
elif isinstance(value, dict):
# Allow pre-hydrated data objects.
cache = value
try:
value = cache['__id']
except KeyError:
raise ValidationError("dictionary must contain an '__id' element")
elif not isinstance(value, int):
raise ValidationError("field must be a DataDescriptor, data object primary key or dict")
return DataDescriptor(value, self, cache) | Convert value if needed. | entailment |
def contribute_to_class(self, process, fields, name):
"""Register this field with a specific process.
:param process: Process descriptor instance
:param fields: Fields registry to use
:param name: Field name
"""
# Use order-preserving definition namespace (__dict__) to respect the
# order of GroupField's fields definition.
for field_name in self.field_group.__dict__:
if field_name.startswith('_'):
continue
field = getattr(self.field_group, field_name)
field.contribute_to_class(process, self.fields, field_name)
super().contribute_to_class(process, fields, name) | Register this field with a specific process.
:param process: Process descriptor instance
:param fields: Fields registry to use
:param name: Field name | entailment |
def to_python(self, value):
"""Convert value if needed."""
if isinstance(value, GroupDescriptor):
value = value._value # pylint: disable=protected-access
result = {}
for name, field in self.fields.items():
result[name] = field.to_python(value.get(name, None))
return GroupDescriptor(result) | Convert value if needed. | entailment |
def to_schema(self):
"""Return field schema for this field."""
schema = super().to_schema()
if self.disabled is not None:
schema['disabled'] = self.disabled
if self.collapsed is not None:
schema['collapsed'] = self.collapsed
group = []
for field in self.fields.values():
group.append(field.to_schema())
schema['group'] = group
return schema | Return field schema for this field. | entailment |
def update_constants():
"""Recreate channel name constants with changed settings.
This kludge is mostly needed due to the way Django settings are
patched for testing and how modules need to be imported throughout
the project. On import time, settings are not patched yet, but some
of the code needs static values immediately. Updating functions such
as this one are then needed to fix dummy values.
"""
global MANAGER_CONTROL_CHANNEL, MANAGER_EXECUTOR_CHANNELS # pylint: disable=global-statement
global MANAGER_LISTENER_STATS, MANAGER_STATE_PREFIX # pylint: disable=global-statement
redis_prefix = getattr(settings, 'FLOW_MANAGER', {}).get('REDIS_PREFIX', '')
MANAGER_CONTROL_CHANNEL = '{}.control'.format(redis_prefix)
MANAGER_EXECUTOR_CHANNELS = ManagerChannelPair(
'{}.result_queue'.format(redis_prefix),
'{}.result_queue_response'.format(redis_prefix),
)
MANAGER_STATE_PREFIX = '{}.state'.format(redis_prefix)
MANAGER_LISTENER_STATS = '{}.listener_stats'.format(redis_prefix) | Recreate channel name constants with changed settings.
This kludge is mostly needed due to the way Django settings are
patched for testing and how modules need to be imported throughout
the project. On import time, settings are not patched yet, but some
of the code needs static values immediately. Updating functions such
as this one are then needed to fix dummy values. | entailment |
def destroy_channels(self):
"""Destroy Redis channels managed by this state instance."""
for item_name in dir(self):
item = getattr(self, item_name)
if isinstance(item, self.RedisAtomicBase):
self.redis.delete(item.item_name) | Destroy Redis channels managed by this state instance. | entailment |
def render_to_image_file(
self, image_out_path, width_pixels=None, height_pixels=None, dpi=90
):
"""Render the SubjectInfo to an image file.
Args:
image_out_path : str
Path to where image image will be written. Valid extensions are
``.svg,`` ``.pdf``, and ``.png``.
width_pixels : int
Width of image to write.
height_pixels : int
Height of image to write, in pixels.
dpi:
Dots Per Inch to declare in image file. This does not change the
resolution of the image but may change the size of the image when
rendered.
Returns:
None
"""
self._render_type = "file"
self._tree.render(
file_name=image_out_path,
w=width_pixels,
h=height_pixels,
dpi=dpi,
units="px",
tree_style=self._get_tree_style(),
) | Render the SubjectInfo to an image file.
Args:
image_out_path : str
Path to where image image will be written. Valid extensions are
``.svg,`` ``.pdf``, and ``.png``.
width_pixels : int
Width of image to write.
height_pixels : int
Height of image to write, in pixels.
dpi:
Dots Per Inch to declare in image file. This does not change the
resolution of the image but may change the size of the image when
rendered.
Returns:
None | entailment |
def browse_in_qt5_ui(self):
"""Browse and edit the SubjectInfo in a simple Qt5 based UI."""
self._render_type = "browse"
self._tree.show(tree_style=self._get_tree_style()) | Browse and edit the SubjectInfo in a simple Qt5 based UI. | entailment |
def _gen_etetoolkit_tree(self, node, subject_info_tree):
"""Copy SubjectInfoTree to a ETE Tree."""
for si_node in subject_info_tree.child_list:
if si_node.type_str == TYPE_NODE_TAG:
child = self._add_type_node(node, si_node.label_str)
elif si_node.type_str == SUBJECT_NODE_TAG:
child = self._add_subject_node(node, si_node.label_str)
else:
raise AssertionError(
'Unknown node type. type_str="{}"'.format(si_node.type_str)
)
self._gen_etetoolkit_tree(child, si_node) | Copy SubjectInfoTree to a ETE Tree. | entailment |
def _add_type_node(self, node, label):
"""Add a node representing a SubjectInfo type."""
child = node.add_child(name=label)
child.add_feature(TYPE_NODE_TAG, True)
return child | Add a node representing a SubjectInfo type. | entailment |
def _add_subject_node(self, node, subj_str):
"""Add a node containing a subject string."""
child = node.add_child(name=subj_str)
child.add_feature(SUBJECT_NODE_TAG, True)
return child | Add a node containing a subject string. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.