sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def get_contributor_sort_value(self, obj):
"""Generate display name for contributor."""
user = obj.contributor
if user.first_name or user.last_name:
contributor = user.get_full_name()
else:
contributor = user.username
return contributor.strip().lower() | Generate display name for contributor. | entailment |
def _get_user(self, user):
"""Generate user filtering tokens."""
return ' '.join([user.username, user.first_name, user.last_name]) | Generate user filtering tokens. | entailment |
def get_owner_ids_value(self, obj):
"""Extract owners' ids."""
return [
user.pk
for user in get_users_with_permission(obj, get_full_perm('owner', obj))
] | Extract owners' ids. | entailment |
def get_owner_names_value(self, obj):
"""Extract owners' names."""
return [
self._get_user(user)
for user in get_users_with_permission(obj, get_full_perm('owner', obj))
] | Extract owners' names. | entailment |
def _get_and_assert_slice_param(url_dict, param_name, default_int):
"""Return ``param_str`` converted to an int.
If str cannot be converted to int or int is not zero or positive, raise
InvalidRequest.
"""
param_str = url_dict['query'].get(param_name, default_int)
try:
n = int(param_str)
except ValueError:
raise d1_common.types.exceptions.InvalidRequest(
0,
'Slice parameter is not a valid integer. {}="{}"'.format(
param_name, param_str
),
)
if n < 0:
raise d1_common.types.exceptions.InvalidRequest(
0,
'Slice parameter cannot be a negative number. {}="{}"'.format(
param_name, param_str
),
)
return n | Return ``param_str`` converted to an int.
If str cannot be converted to int or int is not zero or positive, raise
InvalidRequest. | entailment |
def _assert_valid_start(start_int, count_int, total_int):
"""Assert that the number of objects visible to the active subject is higher than
the requested start position for the slice.
This ensures that it's possible to create a valid slice.
"""
if total_int and start_int >= total_int:
raise d1_common.types.exceptions.InvalidRequest(
0,
'Requested a non-existing slice. start={} count={} total={}'.format(
start_int, count_int, total_int
),
) | Assert that the number of objects visible to the active subject is higher than
the requested start position for the slice.
This ensures that it's possible to create a valid slice. | entailment |
def _adjust_count_if_required(start_int, count_int, total_int):
"""Adjust requested object count down if there are not enough objects visible to the
active subjects to cover the requested slice start and count.
Preconditions: start is verified to be lower than the number of visible objects,
making it possible to create a valid slice by adjusting count.
"""
if start_int + count_int > total_int:
count_int = total_int - start_int
count_int = min(count_int, django.conf.settings.MAX_SLICE_ITEMS)
return count_int | Adjust requested object count down if there are not enough objects visible to the
active subjects to cover the requested slice start and count.
Preconditions: start is verified to be lower than the number of visible objects,
making it possible to create a valid slice by adjusting count. | entailment |
def _add_fallback_slice_filter(query, start_int, count_int, total_int):
"""Create a slice of a query based on request start and count parameters.
This adds `OFFSET <start> LIMIT <count>` to the SQL query, which causes slicing to
run very slowly on large result sets.
"""
logging.debug(
'Adding fallback slice filter. start={} count={} total={} '.format(
start_int, count_int, total_int
)
)
if not count_int:
return query.none()
else:
return query[start_int : start_int + count_int] | Create a slice of a query based on request start and count parameters.
This adds `OFFSET <start> LIMIT <count>` to the SQL query, which causes slicing to
run very slowly on large result sets. | entailment |
def _cache_get_last_in_slice(url_dict, start_int, total_int, authn_subj_list):
"""Return None if cache entry does not exist."""
key_str = _gen_cache_key_for_slice(url_dict, start_int, total_int, authn_subj_list)
# TODO: Django docs state that cache.get() should return None on unknown key.
try:
last_ts_tup = django.core.cache.cache.get(key_str)
except KeyError:
last_ts_tup = None
logging.debug('Cache get. key="{}" -> last_ts_tup={}'.format(key_str, last_ts_tup))
return last_ts_tup | Return None if cache entry does not exist. | entailment |
def _gen_cache_key_for_slice(url_dict, start_int, total_int, authn_subj_list):
"""Generate cache key for the REST URL the client is currently accessing or is
expected to access in order to get the slice starting at the given ``start_int`` of
a multi-slice result set.
When used for finding the key to check in the current call, ``start_int`` is
0, or the start that was passed in the current call.
When used for finding the key to set for the anticipated call, ``start_int`` is
current ``start_int`` + ``count_int``, the number of objects the current call will
return.
The URL for the slice is the same as for the current slice, except that the
`start` query parameter has been increased by the number of items returned in
the current slice.
Except for advancing the start value and potentially adjusting the desired
slice size, it doesn't make sense for the client to change the REST URL during
slicing, but such queries are supported. They will, however, trigger
potentially expensive database queries to find the current slice position.
To support adjustments in desired slice size during slicing, the count is not
used when generating the key.
The active subjects are used in the key in order to prevent potential security
issues if authenticated subjects change during slicing.
The url_dict is normalized by encoding it to a JSON string with sorted keys. A
hash of the JSON is used for better distribution in a hash map and to avoid
the 256 bytes limit on keys in some caches.
"""
# logging.debug('Gen key. result_record_count={}'.format(result_record_count))
key_url_dict = copy.deepcopy(url_dict)
key_url_dict['query'].pop('start', None)
key_url_dict['query'].pop('count', None)
key_json = d1_common.util.serialize_to_normalized_compact_json(
{
'url_dict': key_url_dict,
'start': start_int,
'total': total_int,
'subject': authn_subj_list,
}
)
logging.debug('key_json={}'.format(key_json))
return hashlib.sha256(key_json.encode('utf-8')).hexdigest() | Generate cache key for the REST URL the client is currently accessing or is
expected to access in order to get the slice starting at the given ``start_int`` of
a multi-slice result set.
When used for finding the key to check in the current call, ``start_int`` is
0, or the start that was passed in the current call.
When used for finding the key to set for the anticipated call, ``start_int`` is
current ``start_int`` + ``count_int``, the number of objects the current call will
return.
The URL for the slice is the same as for the current slice, except that the
`start` query parameter has been increased by the number of items returned in
the current slice.
Except for advancing the start value and potentially adjusting the desired
slice size, it doesn't make sense for the client to change the REST URL during
slicing, but such queries are supported. They will, however, trigger
potentially expensive database queries to find the current slice position.
To support adjustments in desired slice size during slicing, the count is not
used when generating the key.
The active subjects are used in the key in order to prevent potential security
issues if authenticated subjects change during slicing.
The url_dict is normalized by encoding it to a JSON string with sorted keys. A
hash of the JSON is used for better distribution in a hash map and to avoid
the 256 bytes limit on keys in some caches. | entailment |
def smeft_toarray(wc_name, wc_dict):
"""Construct a numpy array with Wilson coefficient values from a
dictionary of label-value pairs corresponding to the non-redundant
elements."""
shape = smeftutil.C_keys_shape[wc_name]
C = np.zeros(shape, dtype=complex)
for k, v in wc_dict.items():
if k.split('_')[0] != wc_name:
continue
indices = k.split('_')[-1] # e.g. '1213'
indices = tuple(int(s) - 1 for s in indices) # e.g. (1, 2, 1, 3)
C[indices] = v
C = smeftutil.symmetrize({wc_name: C})[wc_name]
return C | Construct a numpy array with Wilson coefficient values from a
dictionary of label-value pairs corresponding to the non-redundant
elements. | entailment |
def warsaw_to_warsawmass(C, parameters=None, sectors=None):
"""Translate from the Warsaw basis to the 'Warsaw mass' basis.
Parameters used:
- `Vus`, `Vub`, `Vcb`, `gamma`: elements of the unitary CKM matrix (defined
as the mismatch between left-handed quark mass matrix diagonalization
matrices).
"""
p = default_parameters.copy()
if parameters is not None:
# if parameters are passed in, overwrite the default values
p.update(parameters)
# start out with a 1:1 copy
C_out = C.copy()
# rotate left-handed up-type quark fields in uL-uR operator WCs
C_rotate_u = ['uphi', 'uG', 'uW', 'uB']
for name in C_rotate_u:
_array = smeft_toarray(name, C)
V = ckmutil.ckm.ckm_tree(p["Vus"], p["Vub"], p["Vcb"], p["delta"])
UuL = V.conj().T
_array = UuL.conj().T @ _array
_dict = smeft_fromarray(name, _array)
C_out.update(_dict)
# diagonalize dimension-5 Weinberg operator
_array = smeft_toarray('llphiphi', C)
_array = np.diag(ckmutil.diag.msvd(_array)[1])
_dict = smeft_fromarray('llphiphi', _array)
C_out.update(_dict)
return C_out | Translate from the Warsaw basis to the 'Warsaw mass' basis.
Parameters used:
- `Vus`, `Vub`, `Vcb`, `gamma`: elements of the unitary CKM matrix (defined
as the mismatch between left-handed quark mass matrix diagonalization
matrices). | entailment |
def warsaw_up_to_warsaw(C, parameters=None, sectors=None):
"""Translate from the 'Warsaw up' basis to the Warsaw basis.
Parameters used:
- `Vus`, `Vub`, `Vcb`, `gamma`: elements of the unitary CKM matrix (defined
as the mismatch between left-handed quark mass matrix diagonalization
matrices).
"""
C_in = smeftutil.wcxf2arrays_symmetrized(C)
p = default_parameters.copy()
if parameters is not None:
# if parameters are passed in, overwrite the default values
p.update(parameters)
Uu = Ud = Ul = Ue = np.eye(3)
V = ckmutil.ckm.ckm_tree(p["Vus"], p["Vub"], p["Vcb"], p["delta"])
Uq = V
C_out = smeftutil.flavor_rotation(C_in, Uq, Uu, Ud, Ul, Ue)
C_out = smeftutil.arrays2wcxf_nonred(C_out)
warsaw = wcxf.Basis['SMEFT', 'Warsaw']
all_wcs = set(warsaw.all_wcs) # to speed up lookup
return {k: v for k, v in C_out.items() if k in all_wcs} | Translate from the 'Warsaw up' basis to the Warsaw basis.
Parameters used:
- `Vus`, `Vub`, `Vcb`, `gamma`: elements of the unitary CKM matrix (defined
as the mismatch between left-handed quark mass matrix diagonalization
matrices). | entailment |
def sysmeta_add_preferred(sysmeta_pyxb, node_urn):
"""Add a remote Member Node to the list of preferred replication targets to this
System Metadata object.
Also remove the target MN from the list of blocked Member Nodes if present.
If the target MN is already in the preferred list and not in the blocked list, this
function is a no-op.
Args:
sysmeta_pyxb : SystemMetadata PyXB object.
System Metadata in which to add the preferred replication target.
If the System Metadata does not already have a Replication Policy, a default
replication policy which enables replication is added and populated with the
preferred replication target.
node_urn : str
Node URN of the remote MN that will be added. On the form
``urn:node:MyMemberNode``.
"""
if not has_replication_policy(sysmeta_pyxb):
sysmeta_set_default_rp(sysmeta_pyxb)
rp_pyxb = sysmeta_pyxb.replicationPolicy
_add_node(rp_pyxb, 'pref', node_urn)
_remove_node(rp_pyxb, 'block', node_urn) | Add a remote Member Node to the list of preferred replication targets to this
System Metadata object.
Also remove the target MN from the list of blocked Member Nodes if present.
If the target MN is already in the preferred list and not in the blocked list, this
function is a no-op.
Args:
sysmeta_pyxb : SystemMetadata PyXB object.
System Metadata in which to add the preferred replication target.
If the System Metadata does not already have a Replication Policy, a default
replication policy which enables replication is added and populated with the
preferred replication target.
node_urn : str
Node URN of the remote MN that will be added. On the form
``urn:node:MyMemberNode``. | entailment |
def normalize(rp_pyxb):
"""Normalize a ReplicationPolicy PyXB type in place.
The preferred and blocked lists are sorted alphabetically. As blocked nodes
override preferred nodes, and any node present in both lists is removed from the
preferred list.
Args:
rp_pyxb : ReplicationPolicy PyXB object
The object will be normalized in place.
"""
# noinspection PyMissingOrEmptyDocstring
def sort(r, a):
d1_common.xml.sort_value_list_pyxb(_get_attr_or_list(r, a))
rp_pyxb.preferredMemberNode = set(_get_attr_or_list(rp_pyxb, 'pref')) - set(
_get_attr_or_list(rp_pyxb, 'block')
)
sort(rp_pyxb, 'block')
sort(rp_pyxb, 'pref') | Normalize a ReplicationPolicy PyXB type in place.
The preferred and blocked lists are sorted alphabetically. As blocked nodes
override preferred nodes, and any node present in both lists is removed from the
preferred list.
Args:
rp_pyxb : ReplicationPolicy PyXB object
The object will be normalized in place. | entailment |
def are_equivalent_xml(a_xml, b_xml):
"""Check if two ReplicationPolicy XML docs are semantically equivalent.
The ReplicationPolicy XML docs are normalized before comparison.
Args:
a_xml, b_xml: ReplicationPolicy XML docs to compare
Returns:
bool: ``True`` if the resulting policies for the two objects are semantically
equivalent.
"""
return are_equivalent_pyxb(
d1_common.xml.deserialize(a_xml), d1_common.xml.deserialize(b_xml)
) | Check if two ReplicationPolicy XML docs are semantically equivalent.
The ReplicationPolicy XML docs are normalized before comparison.
Args:
a_xml, b_xml: ReplicationPolicy XML docs to compare
Returns:
bool: ``True`` if the resulting policies for the two objects are semantically
equivalent. | entailment |
def pyxb_to_dict(rp_pyxb):
"""Convert ReplicationPolicy PyXB object to a normalized dict.
Args:
rp_pyxb: ReplicationPolicy to convert.
Returns:
dict : Replication Policy as normalized dict.
Example::
{
'allowed': True,
'num': 3,
'blockedMemberNode': {'urn:node:NODE1', 'urn:node:NODE2', 'urn:node:NODE3'},
'preferredMemberNode': {'urn:node:NODE4', 'urn:node:NODE5'},
}
"""
return {
'allowed': bool(_get_attr_or_list(rp_pyxb, 'allowed')),
'num': _get_as_int(rp_pyxb),
'block': _get_as_set(rp_pyxb, 'block'),
'pref': _get_as_set(rp_pyxb, 'pref'),
} | Convert ReplicationPolicy PyXB object to a normalized dict.
Args:
rp_pyxb: ReplicationPolicy to convert.
Returns:
dict : Replication Policy as normalized dict.
Example::
{
'allowed': True,
'num': 3,
'blockedMemberNode': {'urn:node:NODE1', 'urn:node:NODE2', 'urn:node:NODE3'},
'preferredMemberNode': {'urn:node:NODE4', 'urn:node:NODE5'},
} | entailment |
def dict_to_pyxb(rp_dict):
"""Convert dict to ReplicationPolicy PyXB object.
Args:
rp_dict: Native Python structure representing a Replication Policy.
Example::
{
'allowed': True,
'num': 3,
'blockedMemberNode': {'urn:node:NODE1', 'urn:node:NODE2', 'urn:node:NODE3'},
'preferredMemberNode': {'urn:node:NODE4', 'urn:node:NODE5'},
}
Returns:
ReplicationPolicy PyXB object.
"""
rp_pyxb = d1_common.types.dataoneTypes.replicationPolicy()
rp_pyxb.replicationAllowed = rp_dict['allowed']
rp_pyxb.numberReplicas = rp_dict['num']
rp_pyxb.blockedMemberNode = rp_dict['block']
rp_pyxb.preferredMemberNode = rp_dict['pref']
normalize(rp_pyxb)
return rp_pyxb | Convert dict to ReplicationPolicy PyXB object.
Args:
rp_dict: Native Python structure representing a Replication Policy.
Example::
{
'allowed': True,
'num': 3,
'blockedMemberNode': {'urn:node:NODE1', 'urn:node:NODE2', 'urn:node:NODE3'},
'preferredMemberNode': {'urn:node:NODE4', 'urn:node:NODE5'},
}
Returns:
ReplicationPolicy PyXB object. | entailment |
def _ensure_allow_rp(rp_pyxb):
"""Ensure that RP allows replication."""
if not rp_pyxb.replicationAllowed:
rp_pyxb.replicationAllowed = True
if not rp_pyxb.numberReplicas:
rp_pyxb.numberReplicas = 3 | Ensure that RP allows replication. | entailment |
def _display_interval(i):
"""Convert a time interval into a human-readable string.
:param i: The interval to convert, in seconds.
"""
sigils = ["d", "h", "m", "s"]
factors = [24 * 60 * 60, 60 * 60, 60, 1]
remain = int(i)
result = ""
for fac, sig in zip(factors, sigils):
if remain < fac:
continue
result += "{}{}".format(remain // fac, sig)
remain = remain % fac
return result | Convert a time interval into a human-readable string.
:param i: The interval to convert, in seconds. | entailment |
def update(self, num):
"""Update metrics with the new number."""
num = float(num)
self.count += 1
self.low = min(self.low, num)
self.high = max(self.high, num)
# Welford's online mean and variance algorithm.
delta = num - self.mean
self.mean = self.mean + delta / self.count
delta2 = num - self.mean
self._rolling_variance = self._rolling_variance + delta * delta2
if self.count > 1:
self.deviation = math.sqrt(self._rolling_variance / (self.count - 1))
else:
self.deviation = 0.0 | Update metrics with the new number. | entailment |
def to_dict(self):
"""Pack the stats computed into a dictionary."""
return {
'high': self.high,
'low': self.low,
'mean': self.mean,
'count': self.count,
'deviation': self.deviation,
} | Pack the stats computed into a dictionary. | entailment |
def add(self, count, timestamp=None):
"""Add a value at the specified time to the series.
:param count: The number of work items ready at the specified
time.
:param timestamp: The timestamp to add. Defaults to None,
meaning current time. It should be strictly greater (newer)
than the last added timestamp.
"""
if timestamp is None:
timestamp = time.time()
if self.last_data >= timestamp:
raise ValueError("Time {} >= {} in load average calculation".format(self.last_data, timestamp))
self.last_data = timestamp
for meta in self.intervals.values():
meta.push(count, timestamp) | Add a value at the specified time to the series.
:param count: The number of work items ready at the specified
time.
:param timestamp: The timestamp to add. Defaults to None,
meaning current time. It should be strictly greater (newer)
than the last added timestamp. | entailment |
def to_dict(self):
"""Pack the load averages into a nicely-keyed dictionary."""
result = {}
for meta in self.intervals.values():
result[meta.display] = meta.value
return result | Pack the load averages into a nicely-keyed dictionary. | entailment |
def listFormats(self, vendorSpecific=None):
"""See Also: listFormatsResponse()
Args:
vendorSpecific:
Returns:
"""
response = self.listFormatsResponse(vendorSpecific)
return self._read_dataone_type_response(response, 'ObjectFormatList') | See Also: listFormatsResponse()
Args:
vendorSpecific:
Returns: | entailment |
def getFormat(self, formatId, vendorSpecific=None):
"""See Also: getFormatResponse()
Args:
formatId:
vendorSpecific:
Returns:
"""
response = self.getFormatResponse(formatId, vendorSpecific)
return self._read_dataone_type_response(response, 'ObjectFormat') | See Also: getFormatResponse()
Args:
formatId:
vendorSpecific:
Returns: | entailment |
def reserveIdentifierResponse(self, pid, vendorSpecific=None):
"""CNCore.getLogRecords(session[, fromDate][, toDate][, event][, start][,
count]) β Log https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNCore.getLogRecords Implemented in
d1_client.baseclient.py.
CNCore.reserveIdentifier(session, pid) β Identifier
https://releases.dataone.org/online/api-documentation-v2.0.1/apis/CN_APIs.html#CNCore.reserveIdentifier
Args:
pid:
vendorSpecific:
Returns:
"""
mmp_dict = {'pid': pid}
return self.POST(['reserve', pid], fields=mmp_dict, headers=vendorSpecific) | CNCore.getLogRecords(session[, fromDate][, toDate][, event][, start][,
count]) β Log https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNCore.getLogRecords Implemented in
d1_client.baseclient.py.
CNCore.reserveIdentifier(session, pid) β Identifier
https://releases.dataone.org/online/api-documentation-v2.0.1/apis/CN_APIs.html#CNCore.reserveIdentifier
Args:
pid:
vendorSpecific:
Returns: | entailment |
def reserveIdentifier(self, pid, vendorSpecific=None):
"""See Also: reserveIdentifierResponse()
Args:
pid:
vendorSpecific:
Returns:
"""
response = self.reserveIdentifierResponse(pid, vendorSpecific)
return self._read_dataone_type_response(response, 'Identifier', vendorSpecific) | See Also: reserveIdentifierResponse()
Args:
pid:
vendorSpecific:
Returns: | entailment |
def listChecksumAlgorithms(self, vendorSpecific=None):
"""See Also: listChecksumAlgorithmsResponse()
Args:
vendorSpecific:
Returns:
"""
response = self.listChecksumAlgorithmsResponse(vendorSpecific)
return self._read_dataone_type_response(response, 'ChecksumAlgorithmList') | See Also: listChecksumAlgorithmsResponse()
Args:
vendorSpecific:
Returns: | entailment |
def setObsoletedByResponse(
self, pid, obsoletedByPid, serialVersion, vendorSpecific=None
):
"""CNCore.setObsoletedBy(session, pid, obsoletedByPid, serialVersion) β boolean
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNCore.setObsoletedBy.
Args:
pid:
obsoletedByPid:
serialVersion:
vendorSpecific:
Returns:
"""
mmp_dict = {
'obsoletedByPid': obsoletedByPid,
'serialVersion': str(serialVersion),
}
return self.PUT(['obsoletedBy', pid], fields=mmp_dict, headers=vendorSpecific) | CNCore.setObsoletedBy(session, pid, obsoletedByPid, serialVersion) β boolean
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNCore.setObsoletedBy.
Args:
pid:
obsoletedByPid:
serialVersion:
vendorSpecific:
Returns: | entailment |
def setObsoletedBy(self, pid, obsoletedByPid, serialVersion, vendorSpecific=None):
"""See Also: setObsoletedByResponse()
Args:
pid:
obsoletedByPid:
serialVersion:
vendorSpecific:
Returns:
"""
response = self.setObsoletedByResponse(
pid, obsoletedByPid, serialVersion, vendorSpecific
)
return self._read_boolean_response(response) | See Also: setObsoletedByResponse()
Args:
pid:
obsoletedByPid:
serialVersion:
vendorSpecific:
Returns: | entailment |
def listNodes(self, vendorSpecific=None):
"""See Also: listNodesResponse()
Args:
vendorSpecific:
Returns:
"""
response = self.listNodesResponse(vendorSpecific)
return self._read_dataone_type_response(response, 'NodeList') | See Also: listNodesResponse()
Args:
vendorSpecific:
Returns: | entailment |
def hasReservationResponse(self, pid, subject, vendorSpecific=None):
"""CNCore.registerSystemMetadata(session, pid, sysmeta) β Identifier CN
INTERNAL.
CNCore.hasReservation(session, pid) β boolean
https://releases.dataone.org/online/api-documentation-v2.0.1/apis/CN_APIs.html#CNCore.hasReservation
Args:
pid:
subject:
vendorSpecific:
Returns:
"""
return self.GET(['reserve', pid, subject], headers=vendorSpecific) | CNCore.registerSystemMetadata(session, pid, sysmeta) β Identifier CN
INTERNAL.
CNCore.hasReservation(session, pid) β boolean
https://releases.dataone.org/online/api-documentation-v2.0.1/apis/CN_APIs.html#CNCore.hasReservation
Args:
pid:
subject:
vendorSpecific:
Returns: | entailment |
def hasReservation(self, pid, subject, vendorSpecific=None):
"""See Also: hasReservationResponse()
Args:
pid:
subject:
vendorSpecific:
Returns:
"""
response = self.hasReservationResponse(pid, subject, vendorSpecific)
return self._read_boolean_404_response(response) | See Also: hasReservationResponse()
Args:
pid:
subject:
vendorSpecific:
Returns: | entailment |
def resolve(self, pid, vendorSpecific=None):
"""See Also: resolveResponse()
Args:
pid:
vendorSpecific:
Returns:
"""
response = self.resolveResponse(pid, vendorSpecific)
return self._read_dataone_type_response(
response, 'ObjectLocationList', response_is_303_redirect=True
) | See Also: resolveResponse()
Args:
pid:
vendorSpecific:
Returns: | entailment |
def searchResponse(self, queryType, query, vendorSpecific=None, **kwargs):
"""CNRead.search(session, queryType, query) β ObjectList
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNRead.search.
Args:
queryType:
query:
vendorSpecific:
**kwargs:
Returns:
"""
return self.GET(
['search', queryType, query], headers=vendorSpecific, query=kwargs
) | CNRead.search(session, queryType, query) β ObjectList
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNRead.search.
Args:
queryType:
query:
vendorSpecific:
**kwargs:
Returns: | entailment |
def search(self, queryType, query=None, vendorSpecific=None, **kwargs):
"""See Also: searchResponse()
Args:
queryType:
query:
vendorSpecific:
**kwargs:
Returns:
"""
response = self.searchResponse(queryType, query, vendorSpecific, **kwargs)
return self._read_dataone_type_response(response, 'ObjectList') | See Also: searchResponse()
Args:
queryType:
query:
vendorSpecific:
**kwargs:
Returns: | entailment |
def queryResponse(self, queryEngine, query=None, vendorSpecific=None, **kwargs):
"""CNRead.query(session, queryEngine, query) β OctetStream
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNRead.query.
Args:
queryEngine:
query:
vendorSpecific:
**kwargs:
Returns:
"""
return self.GET(
['query', queryEngine, query], headers=vendorSpecific, query=kwargs
) | CNRead.query(session, queryEngine, query) β OctetStream
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNRead.query.
Args:
queryEngine:
query:
vendorSpecific:
**kwargs:
Returns: | entailment |
def query(self, queryEngine, query=None, vendorSpecific=None, **kwargs):
"""See Also: queryResponse()
Args:
queryEngine:
query:
vendorSpecific:
**kwargs:
Returns:
"""
response = self.queryResponse(queryEngine, query, vendorSpecific, **kwargs)
return self._read_stream_response(response) | See Also: queryResponse()
Args:
queryEngine:
query:
vendorSpecific:
**kwargs:
Returns: | entailment |
def setRightsHolderResponse(self, pid, userId, serialVersion, vendorSpecific=None):
"""CNAuthorization.setRightsHolder(session, pid, userId, serialVersion)
β Identifier https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNAuthorization.setRightsHolder.
Args:
pid:
userId:
serialVersion:
vendorSpecific:
Returns:
"""
mmp_dict = {'userId': userId, 'serialVersion': str(serialVersion)}
return self.PUT(['owner', pid], headers=vendorSpecific, fields=mmp_dict) | CNAuthorization.setRightsHolder(session, pid, userId, serialVersion)
β Identifier https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNAuthorization.setRightsHolder.
Args:
pid:
userId:
serialVersion:
vendorSpecific:
Returns: | entailment |
def setRightsHolder(self, pid, userId, serialVersion, vendorSpecific=None):
"""See Also: setRightsHolderResponse()
Args:
pid:
userId:
serialVersion:
vendorSpecific:
Returns:
"""
response = self.setRightsHolderResponse(
pid, userId, serialVersion, vendorSpecific
)
return self._read_boolean_response(response) | See Also: setRightsHolderResponse()
Args:
pid:
userId:
serialVersion:
vendorSpecific:
Returns: | entailment |
def setAccessPolicyResponse(
self, pid, accessPolicy, serialVersion, vendorSpecific=None
):
"""CNAuthorization.setAccessPolicy(session, pid, accessPolicy, serialVersion) β
boolean https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNAuthorization.setAccessPolicy.
Args:
pid:
accessPolicy:
serialVersion:
vendorSpecific:
Returns:
"""
mmp_dict = {
'serialVersion': str(serialVersion),
'accessPolicy': ('accessPolicy.xml', accessPolicy.toxml('utf-8')),
}
return self.PUT(['accessRules', pid], fields=mmp_dict, headers=vendorSpecific) | CNAuthorization.setAccessPolicy(session, pid, accessPolicy, serialVersion) β
boolean https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNAuthorization.setAccessPolicy.
Args:
pid:
accessPolicy:
serialVersion:
vendorSpecific:
Returns: | entailment |
def setAccessPolicy(self, pid, accessPolicy, serialVersion, vendorSpecific=None):
"""See Also: setAccessPolicyResponse()
Args:
pid:
accessPolicy:
serialVersion:
vendorSpecific:
Returns:
"""
response = self.setAccessPolicyResponse(
pid, accessPolicy, serialVersion, vendorSpecific
)
return self._read_boolean_response(response) | See Also: setAccessPolicyResponse()
Args:
pid:
accessPolicy:
serialVersion:
vendorSpecific:
Returns: | entailment |
def registerAccountResponse(self, person, vendorSpecific=None):
"""CNIdentity.registerAccount(session, person) β Subject
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.registerAccount.
Args:
person:
vendorSpecific:
Returns:
"""
mmp_dict = {'person': ('person.xml', person.toxml('utf-8'))}
return self.POST('accounts', fields=mmp_dict, headers=vendorSpecific) | CNIdentity.registerAccount(session, person) β Subject
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.registerAccount.
Args:
person:
vendorSpecific:
Returns: | entailment |
def registerAccount(self, person, vendorSpecific=None):
"""See Also: registerAccountResponse()
Args:
person:
vendorSpecific:
Returns:
"""
response = self.registerAccountResponse(person, vendorSpecific)
return self._read_boolean_response(response) | See Also: registerAccountResponse()
Args:
person:
vendorSpecific:
Returns: | entailment |
def updateAccountResponse(self, subject, person, vendorSpecific=None):
"""CNIdentity.updateAccount(session, person) β Subject
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.updateAccount.
Args:
subject:
person:
vendorSpecific:
Returns:
"""
mmp_dict = {'person': ('person.xml', person.toxml('utf-8'))}
return self.PUT(['accounts', subject], fields=mmp_dict, headers=vendorSpecific) | CNIdentity.updateAccount(session, person) β Subject
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.updateAccount.
Args:
subject:
person:
vendorSpecific:
Returns: | entailment |
def updateAccount(self, subject, person, vendorSpecific=None):
"""See Also: updateAccountResponse()
Args:
subject:
person:
vendorSpecific:
Returns:
"""
response = self.updateAccountResponse(subject, person, vendorSpecific)
return self._read_boolean_response(response) | See Also: updateAccountResponse()
Args:
subject:
person:
vendorSpecific:
Returns: | entailment |
def verifyAccount(self, subject, vendorSpecific=None):
"""See Also: verifyAccountResponse()
Args:
subject:
vendorSpecific:
Returns:
"""
response = self.verifyAccountResponse(subject, vendorSpecific)
return self._read_boolean_response(response) | See Also: verifyAccountResponse()
Args:
subject:
vendorSpecific:
Returns: | entailment |
def getSubjectInfo(self, subject, vendorSpecific=None):
"""See Also: getSubjectInfoResponse()
Args:
subject:
vendorSpecific:
Returns:
"""
response = self.getSubjectInfoResponse(subject, vendorSpecific)
return self._read_dataone_type_response(response, 'SubjectInfo') | See Also: getSubjectInfoResponse()
Args:
subject:
vendorSpecific:
Returns: | entailment |
def listSubjectsResponse(
self, query, status=None, start=None, count=None, vendorSpecific=None
):
"""CNIdentity.listSubjects(session, query, status, start, count) β SubjectList
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.listSubjects.
Args:
query:
status:
start:
count:
vendorSpecific:
Returns:
"""
url_query = {'status': status, 'start': start, 'count': count, 'query': query}
return self.GET('accounts', query=url_query, headers=vendorSpecific) | CNIdentity.listSubjects(session, query, status, start, count) β SubjectList
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.listSubjects.
Args:
query:
status:
start:
count:
vendorSpecific:
Returns: | entailment |
def listSubjects(
self, query, status=None, start=None, count=None, vendorSpecific=None
):
"""See Also: listSubjectsResponse()
Args:
query:
status:
start:
count:
vendorSpecific:
Returns:
"""
response = self.listSubjectsResponse(
query, status, start, count, vendorSpecific
)
return self._read_dataone_type_response(response, 'SubjectInfo') | See Also: listSubjectsResponse()
Args:
query:
status:
start:
count:
vendorSpecific:
Returns: | entailment |
def mapIdentityResponse(
self, primarySubject, secondarySubject, vendorSpecific=None
):
"""CNIdentity.mapIdentity(session, subject) β boolean
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.mapIdentity.
Args:
primarySubject:
secondarySubject:
vendorSpecific:
Returns:
"""
mmp_dict = {
'primarySubject': primarySubject.toxml('utf-8'),
'secondarySubject': secondarySubject.toxml('utf-8'),
}
return self.POST(['accounts', 'map'], fields=mmp_dict, headers=vendorSpecific) | CNIdentity.mapIdentity(session, subject) β boolean
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.mapIdentity.
Args:
primarySubject:
secondarySubject:
vendorSpecific:
Returns: | entailment |
def mapIdentity(self, primarySubject, secondarySubject, vendorSpecific=None):
"""See Also: mapIdentityResponse()
Args:
primarySubject:
secondarySubject:
vendorSpecific:
Returns:
"""
response = self.mapIdentityResponse(
primarySubject, secondarySubject, vendorSpecific
)
return self._read_boolean_response(response) | See Also: mapIdentityResponse()
Args:
primarySubject:
secondarySubject:
vendorSpecific:
Returns: | entailment |
def removeMapIdentity(self, subject, vendorSpecific=None):
"""See Also: removeMapIdentityResponse()
Args:
subject:
vendorSpecific:
Returns:
"""
response = self.removeMapIdentityResponse(subject, vendorSpecific)
return self._read_boolean_response(response) | See Also: removeMapIdentityResponse()
Args:
subject:
vendorSpecific:
Returns: | entailment |
def denyMapIdentity(self, subject, vendorSpecific=None):
"""See Also: denyMapIdentityResponse()
Args:
subject:
vendorSpecific:
Returns:
"""
response = self.denyMapIdentityResponse(subject, vendorSpecific)
return self._read_boolean_response(response) | See Also: denyMapIdentityResponse()
Args:
subject:
vendorSpecific:
Returns: | entailment |
def requestMapIdentityResponse(self, subject, vendorSpecific=None):
"""CNIdentity.requestMapIdentity(session, subject) β boolean
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.requestMapIdentity.
Args:
subject:
vendorSpecific:
Returns:
"""
mmp_dict = {'subject': subject.toxml('utf-8')}
return self.POST('accounts', fields=mmp_dict, headers=vendorSpecific) | CNIdentity.requestMapIdentity(session, subject) β boolean
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.requestMapIdentity.
Args:
subject:
vendorSpecific:
Returns: | entailment |
def requestMapIdentity(self, subject, vendorSpecific=None):
"""See Also: requestMapIdentityResponse()
Args:
subject:
vendorSpecific:
Returns:
"""
response = self.requestMapIdentityResponse(subject, vendorSpecific)
return self._read_boolean_response(response) | See Also: requestMapIdentityResponse()
Args:
subject:
vendorSpecific:
Returns: | entailment |
def confirmMapIdentity(self, subject, vendorSpecific=None):
"""See Also: confirmMapIdentityResponse()
Args:
subject:
vendorSpecific:
Returns:
"""
response = self.confirmMapIdentityResponse(subject, vendorSpecific)
return self._read_boolean_response(response) | See Also: confirmMapIdentityResponse()
Args:
subject:
vendorSpecific:
Returns: | entailment |
def createGroupResponse(self, group, vendorSpecific=None):
"""CNIdentity.createGroup(session, groupName) β Subject
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.createGroup.
Args:
group:
vendorSpecific:
Returns:
"""
mmp_dict = {'group': ('group.xml', group.toxml('utf-8'))}
return self.POST('groups', fields=mmp_dict, headers=vendorSpecific) | CNIdentity.createGroup(session, groupName) β Subject
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.createGroup.
Args:
group:
vendorSpecific:
Returns: | entailment |
def createGroup(self, group, vendorSpecific=None):
"""See Also: createGroupResponse()
Args:
group:
vendorSpecific:
Returns:
"""
response = self.createGroupResponse(group, vendorSpecific)
return self._read_boolean_response(response) | See Also: createGroupResponse()
Args:
group:
vendorSpecific:
Returns: | entailment |
def updateGroupResponse(self, group, vendorSpecific=None):
"""CNIdentity.addGroupMembers(session, groupName, members) β boolean
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.addGroupMembers.
Args:
group:
vendorSpecific:
Returns:
"""
mmp_dict = {'group': ('group.xml', group.toxml('utf-8'))}
return self.PUT('groups', fields=mmp_dict, headers=vendorSpecific) | CNIdentity.addGroupMembers(session, groupName, members) β boolean
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.addGroupMembers.
Args:
group:
vendorSpecific:
Returns: | entailment |
def updateGroup(self, group, vendorSpecific=None):
"""See Also: updateGroupResponse()
Args:
group:
vendorSpecific:
Returns:
"""
response = self.updateGroupResponse(group, vendorSpecific)
return self._read_boolean_response(response) | See Also: updateGroupResponse()
Args:
group:
vendorSpecific:
Returns: | entailment |
def setReplicationStatusResponse(
self, pid, nodeRef, status, dataoneError=None, vendorSpecific=None
):
"""CNReplication.setReplicationStatus(session, pid, nodeRef, status, failure) β
boolean https://releases.dataone.org/online/api-documentatio
n-v2.0.1/apis/CN_APIs.html#CNReplication.setReplicationStatus.
Args:
pid:
nodeRef:
status:
dataoneError:
vendorSpecific:
Returns:
"""
mmp_dict = {'nodeRef': nodeRef, 'status': status} # .toxml('utf-8'),
if dataoneError is not None:
mmp_dict['failure'] = ('failure.xml', dataoneError.serialize_to_transport())
return self.PUT(
['replicaNotifications', pid], fields=mmp_dict, headers=vendorSpecific
) | CNReplication.setReplicationStatus(session, pid, nodeRef, status, failure) β
boolean https://releases.dataone.org/online/api-documentatio
n-v2.0.1/apis/CN_APIs.html#CNReplication.setReplicationStatus.
Args:
pid:
nodeRef:
status:
dataoneError:
vendorSpecific:
Returns: | entailment |
def setReplicationStatus(
self, pid, nodeRef, status, dataoneError=None, vendorSpecific=None
):
"""See Also: setReplicationStatusResponse()
Args:
pid:
nodeRef:
status:
dataoneError:
vendorSpecific:
Returns:
"""
response = self.setReplicationStatusResponse(
pid, nodeRef, status, dataoneError, vendorSpecific
)
return self._read_boolean_response(response) | See Also: setReplicationStatusResponse()
Args:
pid:
nodeRef:
status:
dataoneError:
vendorSpecific:
Returns: | entailment |
def updateReplicationMetadataResponse(
self, pid, replicaMetadata, serialVersion, vendorSpecific=None
):
"""CNReplication.updateReplicationMetadata(session, pid, replicaMetadata,
serialVersion) β boolean https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_AP Is.html#CNReplication.updateReplicationMetadata
Not implemented.
Args:
pid:
replicaMetadata:
serialVersion:
vendorSpecific:
Returns:
"""
mmp_dict = {
'replicaMetadata': ('replicaMetadata.xml', replicaMetadata.toxml('utf-8')),
'serialVersion': str(serialVersion),
}
return self.PUT(
['replicaMetadata', pid], fields=mmp_dict, headers=vendorSpecific
) | CNReplication.updateReplicationMetadata(session, pid, replicaMetadata,
serialVersion) β boolean https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_AP Is.html#CNReplication.updateReplicationMetadata
Not implemented.
Args:
pid:
replicaMetadata:
serialVersion:
vendorSpecific:
Returns: | entailment |
def updateReplicationMetadata(
self, pid, replicaMetadata, serialVersion, vendorSpecific=None
):
"""See Also: updateReplicationMetadataResponse()
Args:
pid:
replicaMetadata:
serialVersion:
vendorSpecific:
Returns:
"""
response = self.updateReplicationMetadataResponse(
pid, replicaMetadata, serialVersion, vendorSpecific
)
return self._read_boolean_response(response) | See Also: updateReplicationMetadataResponse()
Args:
pid:
replicaMetadata:
serialVersion:
vendorSpecific:
Returns: | entailment |
def setReplicationPolicyResponse(
self, pid, policy, serialVersion, vendorSpecific=None
):
"""CNReplication.setReplicationPolicy(session, pid, policy, serialVersion) β
boolean https://releases.dataone.org/online/api-docume
ntation-v2.0.1/apis/CN_APIs.html#CNReplication.setReplicationPolicy.
Args:
pid:
policy:
serialVersion:
vendorSpecific:
Returns:
"""
mmp_dict = {
'policy': ('policy.xml', policy.toxml('utf-8')),
'serialVersion': (str(serialVersion)),
}
return self.PUT(
['replicaPolicies', pid], fields=mmp_dict, headers=vendorSpecific
) | CNReplication.setReplicationPolicy(session, pid, policy, serialVersion) β
boolean https://releases.dataone.org/online/api-docume
ntation-v2.0.1/apis/CN_APIs.html#CNReplication.setReplicationPolicy.
Args:
pid:
policy:
serialVersion:
vendorSpecific:
Returns: | entailment |
def setReplicationPolicy(self, pid, policy, serialVersion, vendorSpecific=None):
"""See Also: setReplicationPolicyResponse()
Args:
pid:
policy:
serialVersion:
vendorSpecific:
Returns:
"""
response = self.setReplicationPolicyResponse(
pid, policy, serialVersion, vendorSpecific
)
return self._read_boolean_response(response) | See Also: setReplicationPolicyResponse()
Args:
pid:
policy:
serialVersion:
vendorSpecific:
Returns: | entailment |
def isNodeAuthorizedResponse(self, targetNodeSubject, pid, vendorSpecific=None):
"""CNReplication.isNodeAuthorized(session, targetNodeSubject, pid,
replicatePermission) β boolean() https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNReplication.isNodeAuthorized.
Args:
targetNodeSubject:
pid:
vendorSpecific:
Returns:
"""
query_dict = {'targetNodeSubject': targetNodeSubject}
return self.GET(
['replicaAuthorizations', pid], query=query_dict, headers=vendorSpecific
) | CNReplication.isNodeAuthorized(session, targetNodeSubject, pid,
replicatePermission) β boolean() https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNReplication.isNodeAuthorized.
Args:
targetNodeSubject:
pid:
vendorSpecific:
Returns: | entailment |
def isNodeAuthorized(self, targetNodeSubject, pid, vendorSpecific=None):
"""See Also: isNodeAuthorizedResponse()
Args:
targetNodeSubject:
pid:
vendorSpecific:
Returns:
"""
response = self.isNodeAuthorizedResponse(targetNodeSubject, pid, vendorSpecific)
return self._read_boolean_401_response(response) | See Also: isNodeAuthorizedResponse()
Args:
targetNodeSubject:
pid:
vendorSpecific:
Returns: | entailment |
def deleteReplicationMetadataResponse(
self, pid, nodeId, serialVersion, vendorSpecific=None
):
"""CNReplication.deleteReplicationMetadata(session, pid, policy, serialVersion)
β boolean https://releases.dataone.org/online/api-docume
ntation-v2.0.1/apis/CN_APIs.html#CNReplication.deleteReplicationMetadat a.
Args:
pid:
nodeId:
serialVersion:
vendorSpecific:
Returns:
"""
mmp_dict = {'nodeId': nodeId, 'serialVersion': str(serialVersion)}
return self.PUT(
['removeReplicaMetadata', pid], fields=mmp_dict, headers=vendorSpecific
) | CNReplication.deleteReplicationMetadata(session, pid, policy, serialVersion)
β boolean https://releases.dataone.org/online/api-docume
ntation-v2.0.1/apis/CN_APIs.html#CNReplication.deleteReplicationMetadat a.
Args:
pid:
nodeId:
serialVersion:
vendorSpecific:
Returns: | entailment |
def deleteReplicationMetadata(
self, pid, nodeId, serialVersion, vendorSpecific=None
):
"""See Also: deleteReplicationMetadataResponse()
Args:
pid:
nodeId:
serialVersion:
vendorSpecific:
Returns:
"""
response = self.deleteReplicationMetadataResponse(
pid, nodeId, serialVersion, vendorSpecific
)
return self._read_boolean_response(response) | See Also: deleteReplicationMetadataResponse()
Args:
pid:
nodeId:
serialVersion:
vendorSpecific:
Returns: | entailment |
def updateNodeCapabilitiesResponse(self, nodeId, node, vendorSpecific=None):
"""CNRegister.updateNodeCapabilities(session, nodeId, node) β boolean
https://releases.dataone.org/online/api-documentation-v2.0.1/apis/CN_AP
Is.html#CNRegister.updateNodeCapabilities.
Args:
nodeId:
node:
vendorSpecific:
Returns:
"""
mmp_dict = {'node': ('node.xml', node.toxml('utf-8'))}
return self.PUT(['node', nodeId], fields=mmp_dict, headers=vendorSpecific) | CNRegister.updateNodeCapabilities(session, nodeId, node) β boolean
https://releases.dataone.org/online/api-documentation-v2.0.1/apis/CN_AP
Is.html#CNRegister.updateNodeCapabilities.
Args:
nodeId:
node:
vendorSpecific:
Returns: | entailment |
def updateNodeCapabilities(self, nodeId, node, vendorSpecific=None):
"""See Also: updateNodeCapabilitiesResponse()
Args:
nodeId:
node:
vendorSpecific:
Returns:
"""
response = self.updateNodeCapabilitiesResponse(nodeId, node, vendorSpecific)
return self._read_boolean_response(response) | See Also: updateNodeCapabilitiesResponse()
Args:
nodeId:
node:
vendorSpecific:
Returns: | entailment |
def registerResponse(self, node, vendorSpecific=None):
"""CNRegister.register(session, node) β NodeReference
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNRegister.register.
Args:
node:
vendorSpecific:
Returns:
"""
mmp_dict = {'node': ('node.xml', node.toxml('utf-8'))}
return self.POST('node', fields=mmp_dict, headers=vendorSpecific) | CNRegister.register(session, node) β NodeReference
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNRegister.register.
Args:
node:
vendorSpecific:
Returns: | entailment |
def register(self, node, vendorSpecific=None):
"""See Also: registerResponse()
Args:
node:
vendorSpecific:
Returns:
"""
response = self.registerResponse(node, vendorSpecific)
return self._read_boolean_response(response) | See Also: registerResponse()
Args:
node:
vendorSpecific:
Returns: | entailment |
def valid(self, instance, schema):
"""Validate schema."""
try:
jsonschema.validate(instance, schema)
except jsonschema.exceptions.ValidationError as ex:
self.stderr.write(" VALIDATION ERROR: {}".format(instance['name'] if 'name' in instance else ''))
self.stderr.write(" path: {}".format(ex.path))
self.stderr.write(" message: {}".format(ex.message))
self.stderr.write(" validator: {}".format(ex.validator))
self.stderr.write(" val. value: {}".format(ex.validator_value))
return False
try:
# Check that default values fit field schema.
for field in ['input', 'output', 'schema']:
for schema, _, path in iterate_schema({}, instance.get(field, {})):
if 'default' in schema:
validate_schema({schema['name']: schema['default']}, [schema])
except ValidationError:
self.stderr.write(" VALIDATION ERROR: {}".format(instance['name']))
self.stderr.write(" Default value of field '{}' is not valid.". format(path))
return False
return True | Validate schema. | entailment |
def find_descriptor_schemas(self, schema_file):
"""Find descriptor schemas in given path."""
if not schema_file.lower().endswith(('.yml', '.yaml')):
return []
with open(schema_file) as fn:
schemas = yaml.load(fn, Loader=yaml.FullLoader)
if not schemas:
self.stderr.write("Could not read YAML file {}".format(schema_file))
return []
descriptor_schemas = []
for schema in schemas:
if 'schema' not in schema:
continue
descriptor_schemas.append(schema)
return descriptor_schemas | Find descriptor schemas in given path. | entailment |
def find_schemas(self, schema_path, schema_type=SCHEMA_TYPE_PROCESS, verbosity=1):
"""Find schemas in packages that match filters."""
schema_matches = []
if not os.path.isdir(schema_path):
if verbosity > 0:
self.stdout.write("Invalid path {}".format(schema_path))
return
if schema_type not in [SCHEMA_TYPE_PROCESS, SCHEMA_TYPE_DESCRIPTOR]:
raise ValueError('Invalid schema type')
for root, _, files in os.walk(schema_path):
for schema_file in [os.path.join(root, fn) for fn in files]:
schemas = None
if schema_type == SCHEMA_TYPE_DESCRIPTOR:
# Discover descriptors.
schemas = self.find_descriptor_schemas(schema_file)
elif schema_type == SCHEMA_TYPE_PROCESS:
# Perform process discovery for all supported execution engines.
schemas = []
for execution_engine in manager.execution_engines.values():
schemas.extend(execution_engine.discover_process(schema_file))
for schema in schemas:
schema_matches.append(schema)
return schema_matches | Find schemas in packages that match filters. | entailment |
def register_processes(self, process_schemas, user, force=False, verbosity=1):
"""Read and register processors."""
log_processors = []
log_templates = []
for p in process_schemas:
# TODO: Remove this when all processes are migrated to the
# new syntax.
if 'flow_collection' in p:
if 'entity' in p:
self.stderr.write(
"Skip processor {}: only one of 'flow_collection' and 'entity' fields "
"allowed".format(p['slug'])
)
continue
p['entity'] = {'type': p.pop('flow_collection')}
if p['type'][-1] != ':':
p['type'] += ':'
if 'category' in p and not p['category'].endswith(':'):
p['category'] += ':'
for field in ['input', 'output']:
for schema, _, _ in iterate_schema({}, p[field] if field in p else {}):
if not schema['type'][-1].endswith(':'):
schema['type'] += ':'
# TODO: Check if schemas validate with our JSON meta schema and Processor model docs.
if not self.valid(p, PROCESSOR_SCHEMA):
continue
if 'entity' in p:
if 'type' not in p['entity']:
self.stderr.write(
"Skip process {}: 'entity.type' required if 'entity' defined".format(p['slug'])
)
continue
p['entity_type'] = p['entity']['type']
p['entity_descriptor_schema'] = p['entity'].get('descriptor_schema', p['entity_type'])
p['entity_input'] = p['entity'].get('input', None)
p.pop('entity')
if not DescriptorSchema.objects.filter(slug=p['entity_descriptor_schema']).exists():
self.stderr.write(
"Skip processor {}: Unknown descriptor schema '{}' used in 'entity' "
"field.".format(p['slug'], p['entity_descriptor_schema'])
)
continue
if 'persistence' in p:
persistence_mapping = {
'RAW': Process.PERSISTENCE_RAW,
'CACHED': Process.PERSISTENCE_CACHED,
'TEMP': Process.PERSISTENCE_TEMP,
}
p['persistence'] = persistence_mapping[p['persistence']]
if 'scheduling_class' in p:
scheduling_class_mapping = {
'interactive': Process.SCHEDULING_CLASS_INTERACTIVE,
'batch': Process.SCHEDULING_CLASS_BATCH
}
p['scheduling_class'] = scheduling_class_mapping[p['scheduling_class']]
if 'input' in p:
p['input_schema'] = p.pop('input')
if 'output' in p:
p['output_schema'] = p.pop('output')
slug = p['slug']
if 'run' in p:
# Set default language to 'bash' if not set.
p['run'].setdefault('language', 'bash')
# Transform output schema using the execution engine.
try:
execution_engine = manager.get_execution_engine(p['run']['language'])
extra_output_schema = execution_engine.get_output_schema(p)
if extra_output_schema:
p.setdefault('output_schema', []).extend(extra_output_schema)
except InvalidEngineError:
self.stderr.write("Skip processor {}: execution engine '{}' not supported".format(
slug, p['run']['language']
))
continue
# Validate if container image is allowed based on the configured pattern.
# NOTE: This validation happens here and is not deferred to executors because the idea
# is that this will be moved to a "container" requirement independent of the
# executor.
if hasattr(settings, 'FLOW_CONTAINER_VALIDATE_IMAGE'):
try:
container_image = dict_dot(p, 'requirements.executor.docker.image')
if not re.match(settings.FLOW_CONTAINER_VALIDATE_IMAGE, container_image):
self.stderr.write("Skip processor {}: container image does not match '{}'".format(
slug, settings.FLOW_CONTAINER_VALIDATE_IMAGE,
))
continue
except KeyError:
pass
version = p['version']
int_version = convert_version_string_to_int(version, VERSION_NUMBER_BITS)
# `latest version` is returned as `int` so it has to be compared to `int_version`
latest_version = Process.objects.filter(slug=slug).aggregate(Max('version'))['version__max']
if latest_version is not None and latest_version > int_version:
self.stderr.write("Skip processor {}: newer version installed".format(slug))
continue
previous_process_qs = Process.objects.filter(slug=slug)
if previous_process_qs.exists():
previous_process = previous_process_qs.latest()
else:
previous_process = None
process_query = Process.objects.filter(slug=slug, version=version)
if process_query.exists():
if not force:
if verbosity > 0:
self.stdout.write("Skip processor {}: same version installed".format(slug))
continue
process_query.update(**p)
log_processors.append("Updated {}".format(slug))
else:
process = Process.objects.create(contributor=user, **p)
assign_contributor_permissions(process)
if previous_process:
copy_permissions(previous_process, process)
log_processors.append("Inserted {}".format(slug))
if verbosity > 0:
if log_processors:
self.stdout.write("Processor Updates:")
for log in log_processors:
self.stdout.write(" {}".format(log))
if log_templates:
self.stdout.write("Default Template Updates:")
for log in log_templates:
self.stdout.write(" {}".format(log)) | Read and register processors. | entailment |
def register_descriptors(self, descriptor_schemas, user, force=False, verbosity=1):
"""Read and register descriptors."""
log_descriptors = []
for descriptor_schema in descriptor_schemas:
for schema, _, _ in iterate_schema({}, descriptor_schema.get('schema', {})):
if not schema['type'][-1].endswith(':'):
schema['type'] += ':'
if 'schema' not in descriptor_schema:
descriptor_schema['schema'] = []
if not self.valid(descriptor_schema, DESCRIPTOR_SCHEMA):
continue
slug = descriptor_schema['slug']
version = descriptor_schema.get('version', '0.0.0')
int_version = convert_version_string_to_int(version, VERSION_NUMBER_BITS)
# `latest version` is returned as `int` so it has to be compared to `int_version`
latest_version = DescriptorSchema.objects.filter(slug=slug).aggregate(Max('version'))['version__max']
if latest_version is not None and latest_version > int_version:
self.stderr.write("Skip descriptor schema {}: newer version installed".format(slug))
continue
previous_descriptor_qs = DescriptorSchema.objects.filter(slug=slug)
if previous_descriptor_qs.exists():
previous_descriptor = previous_descriptor_qs.latest()
else:
previous_descriptor = None
descriptor_query = DescriptorSchema.objects.filter(slug=slug, version=version)
if descriptor_query.exists():
if not force:
if verbosity > 0:
self.stdout.write("Skip descriptor schema {}: same version installed".format(slug))
continue
descriptor_query.update(**descriptor_schema)
log_descriptors.append("Updated {}".format(slug))
else:
descriptor = DescriptorSchema.objects.create(contributor=user, **descriptor_schema)
assign_contributor_permissions(descriptor)
if previous_descriptor:
copy_permissions(previous_descriptor, descriptor)
log_descriptors.append("Inserted {}".format(slug))
if log_descriptors and verbosity > 0:
self.stdout.write("Descriptor schemas Updates:")
for log in log_descriptors:
self.stdout.write(" {}".format(log)) | Read and register descriptors. | entailment |
def retire(self, process_schemas):
"""Retire obsolete processes.
Remove old process versions without data. Find processes that have been
registered but do not exist in the code anymore, then:
- If they do not have data: remove them
- If they have data: flag them not active (``is_active=False``)
"""
process_slugs = set(ps['slug'] for ps in process_schemas)
# Processes that are in DB but not in the code
retired_processes = Process.objects.filter(~Q(slug__in=process_slugs))
# Remove retired processes which do not have data
retired_processes.filter(data__exact=None).delete()
# Remove non-latest processes which do not have data
latest_version_processes = Process.objects.order_by('slug', '-version').distinct('slug')
Process.objects.filter(data__exact=None).difference(latest_version_processes).delete()
# Deactivate retired processes which have data
retired_processes.update(is_active=False) | Retire obsolete processes.
Remove old process versions without data. Find processes that have been
registered but do not exist in the code anymore, then:
- If they do not have data: remove them
- If they have data: flag them not active (``is_active=False``) | entailment |
def handle(self, *args, **options):
"""Register processes."""
force = options.get('force')
retire = options.get('retire')
verbosity = int(options.get('verbosity'))
users = get_user_model().objects.filter(is_superuser=True).order_by('date_joined')
if not users.exists():
self.stderr.write("Admin does not exist: create a superuser")
exit(1)
process_paths, descriptor_paths = [], []
process_schemas, descriptor_schemas = [], []
for finder in get_finders():
process_paths.extend(finder.find_processes())
descriptor_paths.extend(finder.find_descriptors())
for proc_path in process_paths:
process_schemas.extend(
self.find_schemas(proc_path, schema_type=SCHEMA_TYPE_PROCESS, verbosity=verbosity))
for desc_path in descriptor_paths:
descriptor_schemas.extend(
self.find_schemas(desc_path, schema_type=SCHEMA_TYPE_DESCRIPTOR, verbosity=verbosity))
user_admin = users.first()
self.register_descriptors(descriptor_schemas, user_admin, force, verbosity=verbosity)
# NOTE: Descriptor schemas must be registered first, so
# processes can validate 'entity_descriptor_schema' field.
self.register_processes(process_schemas, user_admin, force, verbosity=verbosity)
if retire:
self.retire(process_schemas)
if verbosity > 0:
self.stdout.write("Running executor post-registration hook...")
manager.get_executor().post_register_hook(verbosity=verbosity) | Register processes. | entailment |
def is_valid_pid_for_create(did):
"""Return True if ``did`` is the PID of an object that can be created with
MNStorage.create() or MNStorage.update().
To be valid for create() and update(), the DID:
- Must not be the PID of an object that exists on this MN
- Must not be a known SID known to this MN
- Must not have been accepted for replication by this MN.
- Must not be referenced as obsoletes or obsoletedBy in an object that exists on
this MN
In addition, if the DID exists in a resource map:
- If RESOURCE_MAP_CREATE = 'reserve':
- The DataONE subject that is making the call must have write or changePermission
on the resource map.
"""
# logger.debug('existing: {}'.format(is_existing_object(did)))
# logger.debug('sid: {}'.format(is_sid(did)))
# logger.debug('local_replica: {}'.format(is_local_replica(did)))
# logger.debug('revision: {}'.format(d1_gmn.app.revision.is_revision(did)))
return (
not is_existing_object(did)
and not is_sid(did)
and not is_local_replica(did)
and not d1_gmn.app.revision.is_revision(did)
and d1_gmn.app.resource_map.is_sciobj_valid_for_create()
) | Return True if ``did`` is the PID of an object that can be created with
MNStorage.create() or MNStorage.update().
To be valid for create() and update(), the DID:
- Must not be the PID of an object that exists on this MN
- Must not be a known SID known to this MN
- Must not have been accepted for replication by this MN.
- Must not be referenced as obsoletes or obsoletedBy in an object that exists on
this MN
In addition, if the DID exists in a resource map:
- If RESOURCE_MAP_CREATE = 'reserve':
- The DataONE subject that is making the call must have write or changePermission
on the resource map. | entailment |
def is_valid_pid_to_be_updated(did):
"""Return True if ``did`` is the PID of an object that can be updated (obsoleted)
with MNStorage.update()"""
return (
is_existing_object(did)
and not is_local_replica(did)
and not is_archived(did)
and not is_obsoleted(did)
) | Return True if ``did`` is the PID of an object that can be updated (obsoleted)
with MNStorage.update() | entailment |
def is_valid_sid_for_chain(pid, sid):
"""Return True if ``sid`` can be assigned to the single object ``pid`` or to the
chain to which ``pid`` belongs.
- If the chain does not have a SID, the new SID must be previously unused.
- If the chain already has a SID, the new SID must match the existing SID.
All known PIDs are associated with a chain.
Preconditions:
- ``pid`` is verified to exist. E.g., with
d1_gmn.app.views.asserts.is_existing_object().
- ``sid`` is None or verified to be a SID
"""
if _is_unused_did(sid):
return True
existing_sid = d1_gmn.app.revision.get_sid_by_pid(pid)
if existing_sid is None:
return False
return existing_sid == sid | Return True if ``sid`` can be assigned to the single object ``pid`` or to the
chain to which ``pid`` belongs.
- If the chain does not have a SID, the new SID must be previously unused.
- If the chain already has a SID, the new SID must match the existing SID.
All known PIDs are associated with a chain.
Preconditions:
- ``pid`` is verified to exist. E.g., with
d1_gmn.app.views.asserts.is_existing_object().
- ``sid`` is None or verified to be a SID | entailment |
def is_existing_object(did):
"""Return True if PID is for an object for which science bytes are stored locally.
This excludes SIDs and PIDs for unprocessed replica requests, remote or non-existing
revisions of local replicas and objects aggregated in Resource Maps.
"""
return d1_gmn.app.models.ScienceObject.objects.filter(pid__did=did).exists() | Return True if PID is for an object for which science bytes are stored locally.
This excludes SIDs and PIDs for unprocessed replica requests, remote or non-existing
revisions of local replicas and objects aggregated in Resource Maps. | entailment |
def classify_identifier(did):
"""Return a text fragment classifying the ``did``
Return <UNKNOWN> if the DID could not be classified. This should not normally happen
and may indicate that the DID was orphaned in the database.
"""
if _is_unused_did(did):
return 'unused on this Member Node'
elif is_sid(did):
return 'a Series ID (SID) of a revision chain'
elif is_local_replica(did):
return 'a Persistent ID (PID) of a local replica'
elif is_unprocessed_local_replica(did):
return (
'a Persistent ID (PID) of an accepted but not yet processed local replica'
)
elif is_archived(did):
return 'a Persistent ID (PID) of a previously archived local object'
elif is_obsoleted(did):
return 'a Persistent ID (PID) of a previously updated (obsoleted) local object'
elif is_resource_map_db(did):
return 'a Persistent ID (PID) of a local resource map'
elif is_existing_object(did):
return 'a Persistent ID (PID) of an existing local object'
elif is_revision_chain_placeholder(did):
return (
'a Persistent ID (PID) of a remote or non-existing revision of a local '
'replica'
)
elif is_resource_map_member(did):
return (
'a Persistent ID (PID) of a remote or non-existing object aggregated in '
'a local Resource Map'
)
logger.warning('Unable to classify known identifier. did="{}"'.format(did))
return '<UNKNOWN>' | Return a text fragment classifying the ``did``
Return <UNKNOWN> if the DID could not be classified. This should not normally happen
and may indicate that the DID was orphaned in the database. | entailment |
def is_local_replica(pid):
"""Includes unprocessed replication requests."""
return d1_gmn.app.models.LocalReplica.objects.filter(pid__did=pid).exists() | Includes unprocessed replication requests. | entailment |
def is_unprocessed_local_replica(pid):
"""Is local replica with status "queued"."""
return d1_gmn.app.models.LocalReplica.objects.filter(
pid__did=pid, info__status__status='queued'
).exists() | Is local replica with status "queued". | entailment |
def is_revision_chain_placeholder(pid):
"""For replicas, the PIDs referenced in revision chains are reserved for use by
other replicas."""
return d1_gmn.app.models.ReplicaRevisionChainReference.objects.filter(
pid__did=pid
).exists() | For replicas, the PIDs referenced in revision chains are reserved for use by
other replicas. | entailment |
def _is_did(did):
"""Return True if ``did`` is recorded in a local context.
``did``=None is supported and returns False.
A DID can be classified with classify_identifier().
"""
return d1_gmn.app.models.IdNamespace.objects.filter(did=did).exists() | Return True if ``did`` is recorded in a local context.
``did``=None is supported and returns False.
A DID can be classified with classify_identifier(). | entailment |
def prepare_connection():
"""Set dafault connection for ElasticSearch.
.. warning::
In case of using multiprocessing/multithreading, connection will
be probably initialized in the main process/thread and the same
connection (socket) will be used in all processes/threads. This
will cause some unexpected timeouts of pushes to Elasticsearch.
So make sure that this function is called again in each
process/thread to make sure that unique connection will be used.
"""
elasticsearch_host = getattr(settings, 'ELASTICSEARCH_HOST', 'localhost')
elasticsearch_port = getattr(settings, 'ELASTICSEARCH_PORT', 9200)
connections.create_connection(hosts=['{}:{}'.format(elasticsearch_host, elasticsearch_port)]) | Set dafault connection for ElasticSearch.
.. warning::
In case of using multiprocessing/multithreading, connection will
be probably initialized in the main process/thread and the same
connection (socket) will be used in all processes/threads. This
will cause some unexpected timeouts of pushes to Elasticsearch.
So make sure that this function is called again in each
process/thread to make sure that unique connection will be used. | entailment |
def _log(pid, request, event, timestamp=None):
"""Log an operation that was performed on a sciobj."""
# Support logging events that are not associated with an object.
sciobj_model = None
if pid is not None:
try:
sciobj_model = d1_gmn.app.models.ScienceObject.objects.filter(pid__did=pid)[
0
]
except IndexError:
raise d1_common.types.exceptions.ServiceFailure(
0,
'Attempted to create event log for non-existing object. pid="{}"'.format(
pid
),
)
event_log_model = create_log_entry(
sciobj_model,
event,
request.META['REMOTE_ADDR'],
request.META.get('HTTP_USER_AGENT', '<not provided>'),
request.primary_subject_str,
)
# The datetime is an optional parameter. If it is not provided, a
# "auto_now_add=True" value in the the model defaults it to Now. The
# disadvantage to this approach is that we have to update the timestamp in a
# separate step if we want to set it to anything other than Now.
if timestamp is not None:
event_log_model.timestamp = timestamp
event_log_model.save() | Log an operation that was performed on a sciobj. | entailment |
def _is_ignored_read_event(request):
"""Return True if this read event was generated by an automated process, as
indicated by the user configurable LOG_IGNORE* settings.
See settings_site.py for description and rationale for the settings.
"""
if (
django.conf.settings.LOG_IGNORE_TRUSTED_SUBJECT
and d1_gmn.app.auth.is_trusted_subject(request)
):
return True
if (
django.conf.settings.LOG_IGNORE_NODE_SUBJECT
and d1_gmn.app.auth.is_client_side_cert_subject(request)
):
return True
if _has_regex_match(
request.META['REMOTE_ADDR'], django.conf.settings.LOG_IGNORE_IP_ADDRESS
):
return True
if _has_regex_match(
request.META.get('HTTP_USER_AGENT', '<not provided>'),
django.conf.settings.LOG_IGNORE_USER_AGENT,
):
return True
if _has_regex_match(
request.primary_subject_str, django.conf.settings.LOG_IGNORE_SUBJECT
):
return True
return False | Return True if this read event was generated by an automated process, as
indicated by the user configurable LOG_IGNORE* settings.
See settings_site.py for description and rationale for the settings. | entailment |
def action_to_level(action):
"""Map action name to action level."""
try:
return ACTION_LEVEL_MAP[action]
except LookupError:
raise d1_common.types.exceptions.InvalidRequest(
0, 'Unknown action. action="{}"'.format(action)
) | Map action name to action level. | entailment |
def level_to_action(level):
"""Map action level to action name."""
try:
return LEVEL_ACTION_MAP[level]
except LookupError:
raise d1_common.types.exceptions.InvalidRequest(
0, 'Unknown action level. level="{}"'.format(level)
) | Map action level to action name. | entailment |
def get_trusted_subjects():
"""Get set of subjects that have unlimited access to all SciObj and APIs on this
node."""
cert_subj = _get_client_side_certificate_subject()
return (
d1_gmn.app.node_registry.get_cn_subjects()
| django.conf.settings.DATAONE_TRUSTED_SUBJECTS
| {cert_subj}
if cert_subj is not None
else set()
) | Get set of subjects that have unlimited access to all SciObj and APIs on this
node. | entailment |
def is_trusted_subject(request):
"""Determine if calling subject is fully trusted."""
logging.debug('Active subjects: {}'.format(', '.join(request.all_subjects_set)))
logging.debug('Trusted subjects: {}'.format(', '.join(get_trusted_subjects())))
return not request.all_subjects_set.isdisjoint(get_trusted_subjects()) | Determine if calling subject is fully trusted. | entailment |
def _get_client_side_certificate_subject():
"""Return the DN from the client side certificate as a D1 subject if a client side
cert has been configured.
Else return None.
"""
subject = django.core.cache.cache.get('client_side_certificate_subject')
if subject is not None:
return subject
cert_pem = _get_client_side_certificate_pem()
if cert_pem is None:
return None
subject = _extract_subject_from_pem(cert_pem)
django.core.cache.cache.set('client_side_certificate_subject', subject)
return subject | Return the DN from the client side certificate as a D1 subject if a client side
cert has been configured.
Else return None. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.