sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def do_save(self, line):
"""save [config_file] Save session variables to file save (without parameters):
Save session to default file ~/.dataone_cli.conf save.
<file>: Save session to specified file.
"""
config_file = self._split_args(line, 0, 1)[0]
self._command_processor.get_session().save(config_file)
if config_file is None:
config_file = (
self._command_processor.get_session().get_default_pickle_file_path()
)
self._print_info_if_verbose("Saved session to file: {}".format(config_file)) | save [config_file] Save session variables to file save (without parameters):
Save session to default file ~/.dataone_cli.conf save.
<file>: Save session to specified file. | entailment |
def do_reset(self, line):
"""reset Set all session variables to their default values."""
self._split_args(line, 0, 0)
self._command_processor.get_session().reset()
self._print_info_if_verbose("Successfully reset session variables") | reset Set all session variables to their default values. | entailment |
def do_allowaccess(self, line):
"""allowaccess <subject> [access-level] Set the access level for subject Access
level is "read", "write" or "changePermission".
Access level defaults to "read" if not specified. Special subjects: public:
Any subject, authenticated and not authenticated authenticatedUser: Any
subject that has authenticated with CILogon verifiedUser: Any subject that has
authenticated with CILogon and has been verified by DataONE
"""
subject, permission = self._split_args(line, 1, 1)
self._command_processor.get_session().get_access_control().add_allowed_subject(
subject, permission
)
self._print_info_if_verbose(
'Set {} access for subject "{}"'.format(permission, subject)
) | allowaccess <subject> [access-level] Set the access level for subject Access
level is "read", "write" or "changePermission".
Access level defaults to "read" if not specified. Special subjects: public:
Any subject, authenticated and not authenticated authenticatedUser: Any
subject that has authenticated with CILogon verifiedUser: Any subject that has
authenticated with CILogon and has been verified by DataONE | entailment |
def do_denyaccess(self, line):
"""denyaccess <subject> Remove subject from access policy."""
subject, = self._split_args(line, 1, 0)
self._command_processor.get_session().get_access_control().remove_allowed_subject(
subject
)
self._print_info_if_verbose(
'Removed subject "{}" from access policy'.format(subject)
) | denyaccess <subject> Remove subject from access policy. | entailment |
def do_clearaccess(self, line):
"""clearaccess Remove all subjects from access policy Only the submitter will
have access to the object."""
self._split_args(line, 0, 0)
self._command_processor.get_session().get_access_control().clear()
self._print_info_if_verbose("Removed all subjects from access policy") | clearaccess Remove all subjects from access policy Only the submitter will
have access to the object. | entailment |
def do_allowrep(self, line):
"""allowrep Allow new objects to be replicated."""
self._split_args(line, 0, 0)
self._command_processor.get_session().get_replication_policy().set_replication_allowed(
True
)
self._print_info_if_verbose("Set replication policy to allow replication") | allowrep Allow new objects to be replicated. | entailment |
def do_denyrep(self, line):
"""denyrep Prevent new objects from being replicated."""
self._split_args(line, 0, 0)
self._command_processor.get_session().get_replication_policy().set_replication_allowed(
False
)
self._print_info_if_verbose("Set replication policy to deny replication") | denyrep Prevent new objects from being replicated. | entailment |
def do_preferrep(self, line):
"""preferrep <member node> [member node ...] Add one or more preferred Member
Nodes to replication policy."""
mns = self._split_args(line, 1, -1)
self._command_processor.get_session().get_replication_policy().add_preferred(
mns
)
self._print_info_if_verbose(
"Set {} as preferred replication target(s)".format(", ".join(mns))
) | preferrep <member node> [member node ...] Add one or more preferred Member
Nodes to replication policy. | entailment |
def do_blockrep(self, line):
"""blockrep <member node> [member node ...] Add one or more blocked Member Node
to replication policy."""
mns = self._split_args(line, 1, -1)
self._command_processor.get_session().get_replication_policy().add_blocked(mns)
self._print_info_if_verbose(
"Set {} as blocked replication target(s)".format(", ".join(mns))
) | blockrep <member node> [member node ...] Add one or more blocked Member Node
to replication policy. | entailment |
def do_removerep(self, line):
"""removerep <member node> [member node ...] Remove one or more Member Nodes
from replication policy."""
mns = self._split_args(line, 1, -1)
self._command_processor.get_session().get_replication_policy().repremove(mns)
self._print_info_if_verbose(
"Removed {} from replication policy".format(", ".join(mns))
) | removerep <member node> [member node ...] Remove one or more Member Nodes
from replication policy. | entailment |
def do_numberrep(self, line):
"""numberrep <number of replicas> Set preferred number of replicas for new
objects If the preferred number of replicas is set to zero, replication is also
disallowed."""
n_replicas = self._split_args(line, 1, 0)[0]
self._command_processor.get_session().get_replication_policy().set_number_of_replicas(
n_replicas
)
self._print_info_if_verbose("Set number of replicas to {}".format(n_replicas)) | numberrep <number of replicas> Set preferred number of replicas for new
objects If the preferred number of replicas is set to zero, replication is also
disallowed. | entailment |
def do_clearrep(self, line):
"""clearrep Set the replication policy to default.
The default replication policy has no preferred or blocked member nodes, allows
replication and sets the preferred number of replicas to 3.
"""
self._split_args(line, 0, 0)
self._command_processor.get_session().get_replication_policy().clear()
self._print_info_if_verbose("Cleared the replication policy") | clearrep Set the replication policy to default.
The default replication policy has no preferred or blocked member nodes, allows
replication and sets the preferred number of replicas to 3. | entailment |
def do_get(self, line):
"""get <identifier> <file> Get an object from a Member Node.
The object is saved to <file>.
"""
pid, output_file = self._split_args(line, 2, 0)
self._command_processor.science_object_get(pid, output_file)
self._print_info_if_verbose(
'Downloaded "{}" to file: {}'.format(pid, output_file)
) | get <identifier> <file> Get an object from a Member Node.
The object is saved to <file>. | entailment |
def do_meta(self, line):
"""meta <identifier> [file] Get the System Metadata that is associated with a
Science Object.
If the metadata is not on the Coordinating Node, the Member Node is checked.
Provide ``file`` to save the System Metada to disk instead of displaying it.
"""
pid, output_file = self._split_args(line, 1, 1)
self._command_processor.system_metadata_get(pid, output_file)
if output_file is not None:
self._print_info_if_verbose(
'Downloaded system metadata for "{}" to file: {}'.format(
pid, output_file
)
) | meta <identifier> [file] Get the System Metadata that is associated with a
Science Object.
If the metadata is not on the Coordinating Node, the Member Node is checked.
Provide ``file`` to save the System Metada to disk instead of displaying it. | entailment |
def do_list(self, line):
"""list [path] Retrieve a list of available Science Data Objects from Member
Node The response is filtered by the from-date, to-date, search, start and count
session variables.
See also: search
"""
path = self._split_args(line, 0, 1, pad=False)
if len(path):
path = path[0]
self._command_processor.list_objects(path) | list [path] Retrieve a list of available Science Data Objects from Member
Node The response is filtered by the from-date, to-date, search, start and count
session variables.
See also: search | entailment |
def do_log(self, line):
"""log [path] Retrieve event log from Member Node The response is filtered by
the from-date, to-date, start and count session parameters."""
path = self._split_args(line, 0, 1, pad=False)
if len(path):
path = path[0]
self._command_processor.log(path) | log [path] Retrieve event log from Member Node The response is filtered by
the from-date, to-date, start and count session parameters. | entailment |
def do_resolve(self, line):
"""resolve <identifier> Find all locations from which the given Science Object
can be downloaded."""
pid, = self._split_args(line, 1, 0)
self._command_processor.resolve(pid) | resolve <identifier> Find all locations from which the given Science Object
can be downloaded. | entailment |
def do_create(self, line):
"""create <identifier> <file> Create a new Science Object on a Member Node.
The System Metadata that becomes associated with the new Science Object is
generated from the session variables.
"""
pid, sciobj_path = self._split_args(line, 2, 0)
self._command_processor.science_object_create(pid, sciobj_path)
self._print_info_if_verbose(
'Added create operation for identifier "{}" to write queue'.format(pid)
) | create <identifier> <file> Create a new Science Object on a Member Node.
The System Metadata that becomes associated with the new Science Object is
generated from the session variables. | entailment |
def do_update(self, line):
"""update <old-pid> <new-pid> <file> Replace an existing Science Object in a.
:term:`MN` with another.
"""
curr_pid, pid_new, input_file = self._split_args(line, 3, 0)
self._command_processor.science_object_update(curr_pid, input_file, pid_new)
self._print_info_if_verbose(
'Added update operation for identifier "{}" to write queue'.format(curr_pid)
) | update <old-pid> <new-pid> <file> Replace an existing Science Object in a.
:term:`MN` with another. | entailment |
def do_package(self, line):
"""package <package-pid> <science-metadata-pid> <science-pid> [science- pid.
...] Create a simple OAI-ORE Resource Map on a Member Node.
"""
pids = self._split_args(line, 3, -1, pad=False)
self._command_processor.create_package(pids)
self._print_info_if_verbose(
'Added package create operation for identifier "{}" to write queue'.format(
pids[0]
)
) | package <package-pid> <science-metadata-pid> <science-pid> [science- pid.
...] Create a simple OAI-ORE Resource Map on a Member Node. | entailment |
def do_archive(self, line):
"""archive <identifier> [identifier ...] Mark one or more existing Science
Objects as archived."""
pids = self._split_args(line, 1, -1)
self._command_processor.science_object_archive(pids)
self._print_info_if_verbose(
"Added archive operation for identifier(s) {} to write queue".format(
", ".join(pids)
)
) | archive <identifier> [identifier ...] Mark one or more existing Science
Objects as archived. | entailment |
def do_updateaccess(self, line):
"""updateaccess <identifier> [identifier ...] Update the Access Policy on one or
more existing Science Data Objects."""
pids = self._split_args(line, 1, -1)
self._command_processor.update_access_policy(pids)
self._print_info_if_verbose(
"Added access policy update operation for identifiers {} to write queue".format(
", ".join(pids)
)
) | updateaccess <identifier> [identifier ...] Update the Access Policy on one or
more existing Science Data Objects. | entailment |
def do_updatereplication(self, line):
"""updatereplication <identifier> [identifier ...] Update the Replication Policy
on one or more existing Science Data Objects."""
pids = self._split_args(line, 1, -1)
self._command_processor.update_replication_policy(pids)
self._print_info_if_verbose(
"Added replication policy update operation for identifiers {} to write queue".format(
", ".join(pids)
)
) | updatereplication <identifier> [identifier ...] Update the Replication Policy
on one or more existing Science Data Objects. | entailment |
def do_search(self, line):
"""search [query] Comprehensive search for Science Data Objects across all
available MNs.
See https://releases.dataone.org/online/api-
documentation-v2.0.1/design/SearchMetadata.html for the available search terms.
"""
args = self._split_args(line, 0, -1)
query = " ".join([_f for _f in args if _f])
self._command_processor.search(query) | search [query] Comprehensive search for Science Data Objects across all
available MNs.
See https://releases.dataone.org/online/api-
documentation-v2.0.1/design/SearchMetadata.html for the available search terms. | entailment |
def do_ping(self, line):
"""ping [base-url ...] Check if a server responds to the DataONE ping() API
method ping (no arguments): Ping the CN and MN that is specified in the session
ping <base-url> [base-url ...]: Ping each CN or MN.
If an incomplete base-url is provided, default CN and MN base URLs at the given
url are pinged.
"""
hosts = self._split_args(line, 0, 99, pad=False)
self._command_processor.ping(hosts) | ping [base-url ...] Check if a server responds to the DataONE ping() API
method ping (no arguments): Ping the CN and MN that is specified in the session
ping <base-url> [base-url ...]: Ping each CN or MN.
If an incomplete base-url is provided, default CN and MN base URLs at the given
url are pinged. | entailment |
def do_queue(self, line):
"""queue Print the queue of write operations."""
self._split_args(line, 0, 0)
self._command_processor.get_operation_queue().display() | queue Print the queue of write operations. | entailment |
def do_run(self, line):
"""run Perform each operation in the queue of write operations."""
self._split_args(line, 0, 0)
self._command_processor.get_operation_queue().execute()
self._print_info_if_verbose(
"All operations in the write queue were successfully executed"
) | run Perform each operation in the queue of write operations. | entailment |
def do_edit(self, line):
"""edit Edit the queue of write operations."""
self._split_args(line, 0, 0)
self._command_processor.get_operation_queue().edit()
self._print_info_if_verbose("The write operation queue was successfully edited") | edit Edit the queue of write operations. | entailment |
def do_clearqueue(self, line):
"""clearqueue Remove the operations in the queue of write operations without
performing them."""
self._split_args(line, 0, 0)
self._command_processor.get_operation_queue().clear()
self._print_info_if_verbose("All operations in the write queue were cleared") | clearqueue Remove the operations in the queue of write operations without
performing them. | entailment |
def _print_help(self):
"""Custom help message to group commands by functionality."""
msg = """Commands (type help <command> for details)
CLI: help history exit quit
Session, General: set load save reset
Session, Access Control: allowaccess denyaccess clearaccess
Session, Replication: allowrep denyrep preferrep blockrep
removerep numberrep clearrep
Read Operations: get meta list log resolve
Write Operations: update create package archive
updateaccess updatereplication
Utilities: listformats listnodes search ping
Write Operation Queue: queue run edit clearqueue
Command History: Arrow Up, Arrow Down
Command Editing: Arrow Left, Arrow Right, Delete
"""
if platform.system() != "Windows":
msg += """Command Completion: Single Tab: Complete unique command
Double Tab: Display possible commands
"""
d1_cli.impl.util.print_info(msg) | Custom help message to group commands by functionality. | entailment |
def get_groups_with_perms(obj, attach_perms=False):
"""Return queryset of all ``Group`` objects with *any* object permissions for the given ``obj``."""
ctype = get_content_type(obj)
group_model = get_group_obj_perms_model(obj)
if not attach_perms:
# It's much easier without attached perms so we do it first if that is the case
group_rel_name = group_model.group.field.related_query_name()
if group_model.objects.is_generic():
group_filters = {
'%s__content_type' % group_rel_name: ctype,
'%s__object_pk' % group_rel_name: obj.pk,
}
else:
group_filters = {'%s__content_object' % group_rel_name: obj}
return Group.objects.filter(**group_filters).distinct()
else:
group_perms_mapping = defaultdict(list)
groups_with_perms = get_groups_with_perms(obj)
queryset = group_model.objects.filter(group__in=groups_with_perms).prefetch_related('group', 'permission')
if group_model is GroupObjectPermission:
queryset = queryset.filter(object_pk=obj.pk, content_type=ctype)
else:
queryset = queryset.filter(content_object_id=obj.pk)
for group_perm in queryset:
group_perms_mapping[group_perm.group].append(group_perm.permission.codename)
return dict(group_perms_mapping) | Return queryset of all ``Group`` objects with *any* object permissions for the given ``obj``. | entailment |
def _group_groups(perm_list):
"""Group permissions by group.
Input is list of tuples of length 3, where each tuple is in
following format::
(<group_id>, <group_name>, <single_permission>)
Permissions are regrouped and returned in such way that there is
only one tuple for each group::
(<group_id>, <group_name>, [<first_permission>, <second_permission>,...])
:param list perm_list: list of touples of length 3
:return: list tuples with grouped permissions
:rtype: list
"""
perm_list = sorted(perm_list, key=lambda tup: tup[0])
grouped_perms = []
for key, group in groupby(perm_list, lambda tup: (tup[0], tup[1])):
grouped_perms.append((key[0], key[1], [g[2] for g in group]))
return grouped_perms | Group permissions by group.
Input is list of tuples of length 3, where each tuple is in
following format::
(<group_id>, <group_name>, <single_permission>)
Permissions are regrouped and returned in such way that there is
only one tuple for each group::
(<group_id>, <group_name>, [<first_permission>, <second_permission>,...])
:param list perm_list: list of touples of length 3
:return: list tuples with grouped permissions
:rtype: list | entailment |
def get_user_group_perms(user_or_group, obj):
"""Get permissins for user groups.
Based on guardian.core.ObjectPermissionChecker.
"""
user, group = get_identity(user_or_group)
if user and not user.is_active:
return [], []
user_model = get_user_model()
ctype = ContentType.objects.get_for_model(obj)
group_model = get_group_obj_perms_model(obj)
group_rel_name = group_model.permission.field.related_query_name()
if user:
user_rel_name = user_model.groups.field.related_query_name()
group_filters = {user_rel_name: user}
else:
group_filters = {'pk': group.pk}
if group_model.objects.is_generic():
group_filters.update({
'{}__content_type'.format(group_rel_name): ctype,
'{}__object_pk'.format(group_rel_name): obj.pk,
})
else:
group_filters['{}__content_object'.format(group_rel_name)] = obj
user_perms, group_perms = [], []
if user:
perms_qs = Permission.objects.filter(content_type=ctype)
if user.is_superuser:
user_perms = list(chain(perms_qs.values_list("codename", flat=True)))
else:
model = get_user_obj_perms_model(obj)
related_name = model.permission.field.related_query_name()
user_filters = {'{}__user'.format(related_name): user}
if model.objects.is_generic():
user_filters.update({
'{}__content_type'.format(related_name): ctype,
'{}__object_pk'.format(related_name): obj.pk,
})
else:
user_filters['{}__content_object'.format(related_name)] = obj
user_perms_qs = perms_qs.filter(**user_filters)
user_perms = list(chain(user_perms_qs.values_list("codename", flat=True)))
group_perms_qs = Group.objects.filter(**group_filters)
group_perms = list(chain(group_perms_qs.order_by("pk").values_list(
"pk", "name", "{}__permission__codename".format(group_rel_name))))
group_perms = _group_groups(group_perms)
return user_perms, group_perms | Get permissins for user groups.
Based on guardian.core.ObjectPermissionChecker. | entailment |
def get_object_perms(obj, user=None):
"""Return permissions for given object in Resolwe specific format.
Function returns permissions for given object ``obj`` in following
format::
{
"type": "group"/"user"/"public",
"id": <group_or_user_id>,
"name": <group_or_user_name>,
"permissions": [<first_permission>, <second_permission>,...]
}
For ``public`` type ``id`` and ``name`` keys are omitted.
If ``user`` parameter is given, permissions are limited only to
given user, groups he belongs to and public permissions.
:param obj: Resolwe's DB model's instance
:type obj: a subclass of :class:`~resolwe.flow.models.base.BaseModel`
:param user: Django user
:type user: :class:`~django.contrib.auth.models.User` or :data:`None`
:return: list of permissions object in described format
:rtype: list
"""
def format_permissions(perms):
"""Remove model name from permission."""
ctype = ContentType.objects.get_for_model(obj)
return [perm.replace('_{}'.format(ctype.name), '') for perm in perms]
perms_list = []
if user:
if user.is_authenticated:
user_perms, group_perms = get_user_group_perms(user, obj)
else:
user_perms, group_perms = [], []
if user_perms != []:
perms_list.append({
'type': 'user',
'id': user.pk,
'name': user.get_full_name() or user.username,
'permissions': format_permissions(user_perms),
})
if group_perms != []:
for group_id, group_name, perms in group_perms:
perms_list.append({
'type': 'group',
'id': group_id,
'name': group_name,
'permissions': format_permissions(perms),
})
else:
user_options = {
'attach_perms': True,
'with_group_users': False
}
for user, perms in get_users_with_perms(obj, **user_options).items():
if user.username == settings.ANONYMOUS_USER_NAME:
# public user is treated separately
continue
perms_list.append({
'type': 'user',
'id': user.pk,
'name': user.get_full_name() or user.username,
'permissions': format_permissions(perms),
})
group_options = {
'attach_perms': True,
}
for group, perms in get_groups_with_perms(obj, **group_options).items():
perms_list.append({
'type': 'group',
'id': group.pk,
'name': group.name,
'permissions': format_permissions(perms),
})
public_perms = get_perms(AnonymousUser(), obj)
if public_perms != []:
perms_list.append({
'type': 'public',
'permissions': format_permissions(public_perms),
})
return perms_list | Return permissions for given object in Resolwe specific format.
Function returns permissions for given object ``obj`` in following
format::
{
"type": "group"/"user"/"public",
"id": <group_or_user_id>,
"name": <group_or_user_name>,
"permissions": [<first_permission>, <second_permission>,...]
}
For ``public`` type ``id`` and ``name`` keys are omitted.
If ``user`` parameter is given, permissions are limited only to
given user, groups he belongs to and public permissions.
:param obj: Resolwe's DB model's instance
:type obj: a subclass of :class:`~resolwe.flow.models.base.BaseModel`
:param user: Django user
:type user: :class:`~django.contrib.auth.models.User` or :data:`None`
:return: list of permissions object in described format
:rtype: list | entailment |
def get_objects_for_user(user, perms, klass=None, use_groups=True, any_perm=False,
with_superuser=True, accept_global_perms=True, perms_filter='pk__in'):
"""Return queryset with required permissions."""
if isinstance(perms, str):
perms = [perms]
ctype = None
app_label = None
codenames = set()
# Compute codenames set and ctype if possible
for perm in perms:
if '.' in perm:
new_app_label, codename = perm.split('.', 1)
if app_label is not None and app_label != new_app_label:
raise MixedContentTypeError(
"Given perms must have same app label "
"({} != {})".format(app_label, new_app_label))
else:
app_label = new_app_label
else:
codename = perm
codenames.add(codename)
if app_label is not None:
new_ctype = ContentType.objects.get(app_label=app_label,
permission__codename=codename)
if ctype is not None and ctype != new_ctype:
raise MixedContentTypeError(
"ContentType was once computed to be {} and another "
"one {}".format(ctype, new_ctype))
else:
ctype = new_ctype
# Compute queryset and ctype if still missing
if ctype is None and klass is not None:
queryset = _get_queryset(klass)
ctype = ContentType.objects.get_for_model(queryset.model)
elif ctype is not None and klass is None:
queryset = _get_queryset(ctype.model_class())
elif klass is None:
raise WrongAppError("Cannot determine content type")
else:
queryset = _get_queryset(klass)
if ctype.model_class() != queryset.model and perms_filter == 'pk__in':
raise MixedContentTypeError("Content type for given perms and "
"klass differs")
# At this point, we should have both ctype and queryset and they should
# match which means: ctype.model_class() == queryset.model
# we should also have `codenames` list
# First check if user is superuser and if so, return queryset immediately
if with_superuser and user.is_superuser:
return queryset
# Check if the user is anonymous. The
# django.contrib.auth.models.AnonymousUser object doesn't work for queries
# and it's nice to be able to pass in request.user blindly.
if user.is_anonymous:
user = get_anonymous_user()
global_perms = set()
has_global_perms = False
# a superuser has by default assigned global perms for any
if accept_global_perms and with_superuser:
for code in codenames:
if user.has_perm(ctype.app_label + '.' + code):
global_perms.add(code)
for code in global_perms:
codenames.remove(code)
# prerequisite: there must be elements in global_perms otherwise just
# follow the procedure for object based permissions only AND
# 1. codenames is empty, which means that permissions are ONLY set
# globally, therefore return the full queryset.
# OR
# 2. any_perm is True, then the global permission beats the object
# based permission anyway, therefore return full queryset
if global_perms and (not codenames or any_perm):
return queryset
# if we have global perms and still some object based perms differing
# from global perms and any_perm is set to false, then we have to flag
# that global perms exist in order to merge object based permissions by
# user and by group correctly. Scenario: global perm change_xx and
# object based perm delete_xx on object A for user, and object based
# permission delete_xx on object B for group, to which user is
# assigned.
# get_objects_for_user(user, [change_xx, delete_xx], use_groups=True,
# any_perm=False, accept_global_perms=True) must retrieve object A and
# B.
elif global_perms and codenames:
has_global_perms = True
# Now we should extract list of pk values for which we would filter
# queryset
user_model = get_user_obj_perms_model(queryset.model)
user_obj_perms_queryset = (user_model.objects
.filter(Q(user=user) | Q(user=get_anonymous_user()))
.filter(permission__content_type=ctype))
if codenames:
user_obj_perms_queryset = user_obj_perms_queryset.filter(
permission__codename__in=codenames)
direct_fields = ['content_object__pk', 'permission__codename']
generic_fields = ['object_pk', 'permission__codename']
if user_model.objects.is_generic():
user_fields = generic_fields
else:
user_fields = direct_fields
if use_groups:
group_model = get_group_obj_perms_model(queryset.model)
group_filters = {
'permission__content_type': ctype,
'group__{}'.format(get_user_model().groups.field.related_query_name()): user,
}
if codenames:
group_filters.update({
'permission__codename__in': codenames,
})
groups_obj_perms_queryset = group_model.objects.filter(**group_filters)
if group_model.objects.is_generic():
group_fields = generic_fields
else:
group_fields = direct_fields
if not any_perm and codenames and not has_global_perms:
user_obj_perms = user_obj_perms_queryset.values_list(*user_fields)
groups_obj_perms = groups_obj_perms_queryset.values_list(*group_fields)
data = list(user_obj_perms) + list(groups_obj_perms)
# sorting/grouping by pk (first in result tuple)
data = sorted(data, key=lambda t: t[0])
pk_list = []
for pk, group in groupby(data, lambda t: t[0]):
obj_codenames = set((e[1] for e in group))
if codenames.issubset(obj_codenames):
pk_list.append(pk)
objects = queryset.filter(**{perms_filter: pk_list})
return objects
if not any_perm and len(codenames) > 1:
counts = user_obj_perms_queryset.values(
user_fields[0]).annotate(object_pk_count=Count(user_fields[0]))
user_obj_perms_queryset = counts.filter(
object_pk_count__gte=len(codenames))
values = user_obj_perms_queryset.values_list(user_fields[0], flat=True)
if user_model.objects.is_generic():
values = list(values)
query = Q(**{perms_filter: values})
if use_groups:
values = groups_obj_perms_queryset.values_list(group_fields[0], flat=True)
if group_model.objects.is_generic():
values = list(values)
query |= Q(**{perms_filter: values})
return queryset.filter(query) | Return queryset with required permissions. | entailment |
def get_users_with_permission(obj, permission):
"""Return users with specific permission on object.
:param obj: Object to return users for
:param permission: Permission codename
"""
user_model = get_user_model()
return user_model.objects.filter(
userobjectpermission__object_pk=obj.pk,
userobjectpermission__permission__codename=permission,
).distinct() | Return users with specific permission on object.
:param obj: Object to return users for
:param permission: Permission codename | entailment |
def purge_run(self, event):
"""Run purge for the object with ``location_id`` specified in ``event`` argument."""
location_id = event['location_id']
verbosity = event['verbosity']
try:
logger.info(__("Running purge for location id {}.", location_id))
location_purge(location_id=location_id, delete=True, verbosity=verbosity)
except Exception: # pylint: disable=broad-except
logger.exception("Error while purging location.", extra={'location_id': location_id}) | Run purge for the object with ``location_id`` specified in ``event`` argument. | entailment |
def get_resource_limits(self):
"""Get the core count and memory usage limits for this process.
:return: A dictionary with the resource limits, containing the
following keys:
- ``memory``: Memory usage limit, in MB. Defaults to 4096 if
not otherwise specified in the resource requirements.
- ``cores``: Core count limit. Defaults to 1.
:rtype: dict
"""
# Get limit defaults and overrides.
limit_defaults = getattr(settings, 'FLOW_PROCESS_RESOURCE_DEFAULTS', {})
limit_overrides = getattr(settings, 'FLOW_PROCESS_RESOURCE_OVERRIDES', {})
limits = {}
resources = self.requirements.get('resources', {}) # pylint: disable=no-member
limits['cores'] = int(resources.get('cores', 1))
max_cores = getattr(settings, 'FLOW_PROCESS_MAX_CORES', None)
if max_cores:
limits['cores'] = min(limits['cores'], max_cores)
memory = limit_overrides.get('memory', {}).get(self.slug, None)
if memory is None:
memory = int(resources.get(
'memory',
# If no memory resource is configured, check settings.
limit_defaults.get('memory', 4096)
))
limits['memory'] = memory
return limits | Get the core count and memory usage limits for this process.
:return: A dictionary with the resource limits, containing the
following keys:
- ``memory``: Memory usage limit, in MB. Defaults to 4096 if
not otherwise specified in the resource requirements.
- ``cores``: Core count limit. Defaults to 1.
:rtype: dict | entailment |
def validate_query_params(self):
"""Ensure no unsupported query params were used."""
allowed_params = set(self.get_filters().keys())
allowed_params.update(self.get_always_allowed_arguments())
unallowed = set(self.request.query_params.keys()) - allowed_params
if unallowed:
msg = 'Unsupported parameter(s): {}. Please use a combination of: {}.'.format(
', '.join(unallowed),
', '.join(allowed_params),
)
self.form.add_error(field=None, error=ParseError(msg)) | Ensure no unsupported query params were used. | entailment |
def set_default_option(cls, key, value):
"""Class method. Set the default value of the option `key` (string)
to `value` for all future instances of the class.
Note that this does not affect existing instances or the instance
called from."""
cls._default_options.update(cls._option_schema({key: value})) | Class method. Set the default value of the option `key` (string)
to `value` for all future instances of the class.
Note that this does not affect existing instances or the instance
called from. | entailment |
def set_option(self, key, value):
"""Set the option `key` (string) to `value`.
Instance method, affects only current instance.
This will clear the cache."""
self._options.update(self._option_schema({key: value}))
self.clear_cache() | Set the option `key` (string) to `value`.
Instance method, affects only current instance.
This will clear the cache. | entailment |
def get_option(self, key):
"""Return the current value of the option `key` (string).
Instance method, only refers to current instance."""
return self._options.get(key, self._default_options[key]) | Return the current value of the option `key` (string).
Instance method, only refers to current instance. | entailment |
def from_wc(cls, wc):
"""Return a `Wilson` instance initialized by a `wcxf.WC` instance"""
return cls(wcdict=wc.dict, scale=wc.scale, eft=wc.eft, basis=wc.basis) | Return a `Wilson` instance initialized by a `wcxf.WC` instance | entailment |
def load_wc(cls, stream):
"""Return a `Wilson` instance initialized by a WCxf file-like object"""
wc = wcxf.WC.load(stream)
return cls.from_wc(wc) | Return a `Wilson` instance initialized by a WCxf file-like object | entailment |
def match_run(self, scale, eft, basis, sectors='all'):
"""Run the Wilson coefficients to a different scale
(and possibly different EFT)
and return them as `wcxf.WC` instance.
Parameters:
- `scale`: output scale in GeV
- `eft`: output EFT
- `basis`: output basis
- `sectors`: in the case of WET (or WET-4 or WET-3), a tuple of sector
names can be optionally provided. In this case, only the Wilson coefficients
from this sector(s) will be returned and all others discareded. This
can speed up the computation significantly if only a small number of sectors
is of interest. The sector names are defined in the WCxf basis file.
"""
cached = self._get_from_cache(sector=sectors, scale=scale, eft=eft, basis=basis)
if cached is not None:
return cached
if sectors == 'all':
# the default value for sectors is "None" for translators
translate_sectors = None
else:
translate_sectors = sectors
scale_ew = self.get_option('smeft_matchingscale')
mb = self.get_option('mb_matchingscale')
mc = self.get_option('mc_matchingscale')
if self.wc.basis == basis and self.wc.eft == eft and scale == self.wc.scale:
return self.wc # nothing to do
if self.wc.eft == 'SMEFT':
smeft_accuracy = self.get_option('smeft_accuracy')
if eft == 'SMEFT':
smeft = SMEFT(self.wc.translate('Warsaw', sectors=translate_sectors, parameters=self.parameters))
# if input and output EFT ist SMEFT, just run.
wc_out = smeft.run(scale, accuracy=smeft_accuracy)
self._set_cache('all', scale, 'SMEFT', wc_out.basis, wc_out)
return wc_out
else:
# if SMEFT -> WET-x: match to WET at the EW scale
wc_ew = self._get_from_cache(sector='all', scale=scale_ew, eft='WET', basis='JMS')
if wc_ew is None:
if self.wc.scale == scale_ew:
wc_ew = self.wc.match('WET', 'JMS', parameters=self.parameters) # no need to run
else:
smeft = SMEFT(self.wc.translate('Warsaw', parameters=self.parameters))
wc_ew = smeft.run(scale_ew, accuracy=smeft_accuracy).match('WET', 'JMS', parameters=self.parameters)
self._set_cache('all', scale_ew, wc_ew.eft, wc_ew.basis, wc_ew)
wet = WETrunner(wc_ew, **self._wetrun_opt())
elif self.wc.eft in ['WET', 'WET-4', 'WET-3']:
wet = WETrunner(self.wc.translate('JMS', parameters=self.parameters, sectors=translate_sectors), **self._wetrun_opt())
else:
raise ValueError("Input EFT {} unknown or not supported".format(self.wc.eft))
if eft == wet.eft: # just run
wc_out = wet.run(scale, sectors=sectors).translate(basis, sectors=translate_sectors, parameters=self.parameters)
self._set_cache(sectors, scale, eft, basis, wc_out)
return wc_out
elif eft == 'WET-4' and wet.eft == 'WET': # match at mb
wc_mb = wet.run(mb, sectors=sectors).match('WET-4', 'JMS', parameters=self.parameters)
wet4 = WETrunner(wc_mb, **self._wetrun_opt())
wc_out = wet4.run(scale, sectors=sectors).translate(basis, sectors=translate_sectors, parameters=self.parameters)
self._set_cache(sectors, scale, 'WET-4', basis, wc_out)
return wc_out
elif eft == 'WET-3' and wet.eft == 'WET-4': # match at mc
wc_mc = wet.run(mc, sectors=sectors).match('WET-3', 'JMS', parameters=self.parameters)
wet3 = WETrunner(wc_mc, **self._wetrun_opt())
wc_out = wet3.run(scale, sectors=sectors).translate(basis, sectors=translate_sectors, parameters=self.parameters)
return wc_out
self._set_cache(sectors, scale, 'WET-3', basis, wc_out)
elif eft == 'WET-3' and wet.eft == 'WET': # match at mb and mc
wc_mb = wet.run(mb, sectors=sectors).match('WET-4', 'JMS', parameters=self.parameters)
wet4 = WETrunner(wc_mb, **self._wetrun_opt())
wc_mc = wet4.run(mc, sectors=sectors).match('WET-3', 'JMS', parameters=self.parameters)
wet3 = WETrunner(wc_mc, **self._wetrun_opt())
wc_out = wet3.run(scale, sectors=sectors).translate(basis, sectors=translate_sectors, parameters=self.parameters)
self._set_cache(sectors, scale, 'WET-3', basis, wc_out)
return wc_out
else:
raise ValueError("Running from {} to {} not implemented".format(wet.eft, eft)) | Run the Wilson coefficients to a different scale
(and possibly different EFT)
and return them as `wcxf.WC` instance.
Parameters:
- `scale`: output scale in GeV
- `eft`: output EFT
- `basis`: output basis
- `sectors`: in the case of WET (or WET-4 or WET-3), a tuple of sector
names can be optionally provided. In this case, only the Wilson coefficients
from this sector(s) will be returned and all others discareded. This
can speed up the computation significantly if only a small number of sectors
is of interest. The sector names are defined in the WCxf basis file. | entailment |
def _get_from_cache(self, sector, scale, eft, basis):
"""Try to load a set of Wilson coefficients from the cache, else return
None."""
try:
return self._cache[eft][scale][basis][sector]
except KeyError:
return None | Try to load a set of Wilson coefficients from the cache, else return
None. | entailment |
def plotdata(self, key, part='re', scale='log', steps=50):
"""Return a tuple of arrays x, y that can be fed to plt.plot,
where x is the scale in GeV and y is the parameter of interest.
Parameters:
- key: dicionary key of the parameter to be plotted (e.g. a WCxf
coefficient name or a SM parameter like 'g')
- part: plot the real part 're' (default) or the imaginary part 'im'
- scale: 'log'; make the x steps logarithmically distributed; for
'linear', linearly distributed
- steps: steps in x to take (default: 50)
"""
if scale == 'log':
x = np.logspace(log(self.scale_min),
log(self.scale_max),
steps,
base=e)
elif scale == 'linear':
x = np.linspace(self.scale_min,
self.scale_max,
steps)
y = self.fun(x)
y = np.array([d[key] for d in y])
if part == 're':
return x, y.real
elif part == 'im':
return x, y.imag | Return a tuple of arrays x, y that can be fed to plt.plot,
where x is the scale in GeV and y is the parameter of interest.
Parameters:
- key: dicionary key of the parameter to be plotted (e.g. a WCxf
coefficient name or a SM parameter like 'g')
- part: plot the real part 're' (default) or the imaginary part 'im'
- scale: 'log'; make the x steps logarithmically distributed; for
'linear', linearly distributed
- steps: steps in x to take (default: 50) | entailment |
def plot(self, key, part='re', scale='log', steps=50, legend=True, plotargs={}):
"""Plot the RG evolution of parameter `key`.
Parameters:
- part, scale, steps: see `plotdata`
- legend: boolean, show the legend (default: True)
- plotargs: dictionary of arguments to be passed to plt.plot
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Please install matplotlib if you want to use the plot method")
pdat = self.plotdata(key, part=part, scale=scale, steps=steps)
plt.plot(*pdat, label=key, **plotargs)
if scale == 'log':
plt.xscale('log')
if legend:
plt.legend() | Plot the RG evolution of parameter `key`.
Parameters:
- part, scale, steps: see `plotdata`
- legend: boolean, show the legend (default: True)
- plotargs: dictionary of arguments to be passed to plt.plot | entailment |
async def run_consumer(timeout=None, dry_run=False):
"""Run the consumer until it finishes processing.
:param timeout: Set maximum execution time before cancellation, or
``None`` (default) for unlimited.
:param dry_run: If ``True``, don't actually dispatch messages, just
dequeue them. Defaults to ``False``.
"""
channel = state.MANAGER_CONTROL_CHANNEL
scope = {
'type': 'control_event',
'channel': channel,
}
app = ApplicationCommunicator(ManagerConsumer, scope)
channel_layer = get_channel_layer()
async def _consume_loop():
"""Run a loop to consume messages off the channels layer."""
while True:
message = await channel_layer.receive(channel)
if dry_run:
continue
if message.get('type', {}) == '_resolwe_manager_quit':
break
message.update(scope)
await app.send_input(message)
if timeout is None:
await _consume_loop()
try:
# A further grace period to catch late messages.
async with async_timeout.timeout(timeout or 1):
await _consume_loop()
except asyncio.TimeoutError:
pass
await app.wait() | Run the consumer until it finishes processing.
:param timeout: Set maximum execution time before cancellation, or
``None`` (default) for unlimited.
:param dry_run: If ``True``, don't actually dispatch messages, just
dequeue them. Defaults to ``False``. | entailment |
def discover_extensions(self):
"""Discover available extensions."""
if self._discovery_done:
return
try:
previous_state = self._extensions.copy()
for app_config in apps.get_app_configs():
indexes_path = '{}.extensions'.format(app_config.name)
try:
import_module(indexes_path)
except ImportError:
pass
self._discovery_done = True
except Exception:
# Rollback state to prevent corrupted state on exceptions during import.
self._extensions = previous_state
raise | Discover available extensions. | entailment |
def _get_class_path(self, klass_or_instance):
"""Return class path for a given class.
:param klass_or_instance: Class or instance of given class
:return: String containing the class path
"""
if inspect.isclass(klass_or_instance):
klass = '{}.{}'.format(klass_or_instance.__module__, klass_or_instance.__name__)
elif not isinstance(klass_or_instance, str):
klass = klass_or_instance.__class__
klass = '{}.{}'.format(klass.__module__, klass.__name__)
else:
klass = klass_or_instance
return klass | Return class path for a given class.
:param klass_or_instance: Class or instance of given class
:return: String containing the class path | entailment |
def add_extension(self, klass, extension):
"""Register an extension for a class.
:param klass: Class to register an extension for
:param extension: Extension (arbitrary type)
"""
klass = self._get_class_path(klass)
# TODO: Take order into account.
self._extensions.setdefault(klass, []).append(extension) | Register an extension for a class.
:param klass: Class to register an extension for
:param extension: Extension (arbitrary type) | entailment |
def get_extensions(self, klass):
"""Return all registered extensions of a class.
:param klass: Class to get registered extensions for
:return: All registered extensions for given class
"""
self.discover_extensions()
return self._extensions.get(self._get_class_path(klass), []) | Return all registered extensions of a class.
:param klass: Class to get registered extensions for
:return: All registered extensions for given class | entailment |
def _get_running_parameters(self, scale, f, loop=3):
"""Get the running parameters (e.g. quark masses and the strong
coupling at a given scale."""
p = {}
p['alpha_s'] = qcd.alpha_s(scale, self.f, self.parameters['alpha_s'], loop=loop)
p['m_b'] = qcd.m_b(self.parameters['m_b'], scale, self.f, self.parameters['alpha_s'], loop=loop)
p['m_c'] = qcd.m_c(self.parameters['m_c'], scale, self.f, self.parameters['alpha_s'], loop=loop)
p['m_s'] = qcd.m_s(self.parameters['m_s'], scale, self.f, self.parameters['alpha_s'], loop=loop)
p['m_u'] = qcd.m_s(self.parameters['m_u'], scale, self.f, self.parameters['alpha_s'], loop=loop)
p['m_d'] = qcd.m_s(self.parameters['m_d'], scale, self.f, self.parameters['alpha_s'], loop=loop)
# running ignored for alpha_e and lepton mass
p['alpha_e'] = self.parameters['alpha_e']
p['m_e'] = self.parameters['m_e']
p['m_mu'] = self.parameters['m_mu']
p['m_tau'] = self.parameters['m_tau']
return p | Get the running parameters (e.g. quark masses and the strong
coupling at a given scale. | entailment |
def run(self, scale_out, sectors='all'):
"""Evolve the Wilson coefficients to the scale `scale_out`.
Parameters:
- scale_out: output scale
- sectors: optional. If provided, must be a tuple of strings
corresponding to WCxf sector names. Only Wilson coefficients
belonging to these sectors will be present in the output.
Returns an instance of `wcxf.WC`.
"""
C_out = self._run_dict(scale_out, sectors=sectors)
all_wcs = set(wcxf.Basis[self.eft, 'JMS'].all_wcs) # to speed up lookup
C_out = {k: v for k, v in C_out.items()
if v != 0 and k in all_wcs}
return wcxf.WC(eft=self.eft, basis='JMS',
scale=scale_out,
values=wcxf.WC.dict2values(C_out)) | Evolve the Wilson coefficients to the scale `scale_out`.
Parameters:
- scale_out: output scale
- sectors: optional. If provided, must be a tuple of strings
corresponding to WCxf sector names. Only Wilson coefficients
belonging to these sectors will be present in the output.
Returns an instance of `wcxf.WC`. | entailment |
def size(self):
"""Returns:
int : The total number of bytes that will be returned by the iterator.
"""
if hasattr(self._stream, 'len'):
return len(self._stream)
elif hasattr(self._stream, 'fileno'):
return os.fstat(self._stream.fileno()).st_size
else:
cur_pos = self._stream.tell()
size = self._stream.seek(0, os.SEEK_END)
self._stream.seek(cur_pos)
return size | Returns:
int : The total number of bytes that will be returned by the iterator. | entailment |
def _get_cn_cert():
"""Get the public TLS/SSL X.509 certificate from the root CN of the DataONE
environment. The certificate is used for validating the signature of the JWTs.
If certificate retrieval fails, a new attempt to retrieve the certificate
is performed after the cache expires (settings.CACHES.default.TIMEOUT).
If successful, returns a cryptography.Certificate().
"""
try:
cert_obj = django.core.cache.cache.cn_cert_obj
d1_common.cert.x509.log_cert_info(
logging.debug, 'Using cached CN cert for JWT validation', cert_obj
)
return cert_obj
except AttributeError:
cn_cert_obj = _download_and_decode_cn_cert()
django.core.cache.cache.cn_cert_obj = cn_cert_obj
return cn_cert_obj | Get the public TLS/SSL X.509 certificate from the root CN of the DataONE
environment. The certificate is used for validating the signature of the JWTs.
If certificate retrieval fails, a new attempt to retrieve the certificate
is performed after the cache expires (settings.CACHES.default.TIMEOUT).
If successful, returns a cryptography.Certificate(). | entailment |
def key(self, key=""):
"""
Takes a user's API key string which applies content settings. API keys can
be found at <https://derpibooru.org/users/edit>.
"""
params = join_params(self.parameters, {"key": key})
return self.__class__(**params) | Takes a user's API key string which applies content settings. API keys can
be found at <https://derpibooru.org/users/edit>. | entailment |
def query(self, *q):
"""
Takes one or more strings for searching by tag and/or metadata.
"""
params = join_params(self.parameters, {"q": q})
return self.__class__(**params) | Takes one or more strings for searching by tag and/or metadata. | entailment |
def sort_by(self, sf):
"""
Determines how to sort search results. Available sorting methods are
sort.SCORE, sort.COMMENTS, sort.HEIGHT, sort.RELEVANCE, sort.CREATED_AT,
and sort.RANDOM; default is sort.CREATED_AT.
"""
params = join_params(self.parameters, {"sf": sf})
return self.__class__(**params) | Determines how to sort search results. Available sorting methods are
sort.SCORE, sort.COMMENTS, sort.HEIGHT, sort.RELEVANCE, sort.CREATED_AT,
and sort.RANDOM; default is sort.CREATED_AT. | entailment |
def limit(self, limit):
"""
Set absolute limit on number of images to return, or set to None to return
as many results as needed; default 50 posts.
"""
params = join_params(self.parameters, {"limit": limit})
return self.__class__(**params) | Set absolute limit on number of images to return, or set to None to return
as many results as needed; default 50 posts. | entailment |
def filter(self, filter_id=""):
"""
Takes a filter's ID to be used in the current search context. Filter IDs can
be found at <https://derpibooru.org/filters/> by inspecting the URL parameters.
If no filter is provided, the user's current filter will be used.
"""
params = join_params(self.parameters, {"filter_id": filter_id})
return self.__class__(**params) | Takes a filter's ID to be used in the current search context. Filter IDs can
be found at <https://derpibooru.org/filters/> by inspecting the URL parameters.
If no filter is provided, the user's current filter will be used. | entailment |
def faves(self, option):
"""
Set whether to filter by a user's faves list. Options available are
user.ONLY, user.NOT, and None; default is None.
"""
params = join_params(self.parameters, {"faves": option})
return self.__class__(**params) | Set whether to filter by a user's faves list. Options available are
user.ONLY, user.NOT, and None; default is None. | entailment |
def upvotes(self, option):
"""
Set whether to filter by a user's upvoted list. Options available are
user.ONLY, user.NOT, and None; default is None.
"""
params = join_params(self.parameters, {"upvotes": option})
return self.__class__(**params) | Set whether to filter by a user's upvoted list. Options available are
user.ONLY, user.NOT, and None; default is None. | entailment |
def uploads(self, option):
"""
Set whether to filter by a user's uploads list. Options available are
user.ONLY, user.NOT, and None; default is None.
"""
params = join_params(self.parameters, {"uploads": option})
return self.__class__(**params) | Set whether to filter by a user's uploads list. Options available are
user.ONLY, user.NOT, and None; default is None. | entailment |
def watched(self, option):
"""
Set whether to filter by a user's watchlist. Options available are
user.ONLY, user.NOT, and None; default is None.
"""
params = join_params(self.parameters, {"watched": option})
return self.__class__(**params) | Set whether to filter by a user's watchlist. Options available are
user.ONLY, user.NOT, and None; default is None. | entailment |
def define_contributor(self, request):
"""Define contributor by adding it to request.data."""
request.data['contributor'] = self.resolve_user(request.user).pk | Define contributor by adding it to request.data. | entailment |
def create(self, request, *args, **kwargs):
"""Create a resource."""
self.define_contributor(request)
try:
return super().create(request, *args, **kwargs)
except IntegrityError as ex:
return Response({'error': str(ex)}, status=status.HTTP_409_CONFLICT) | Create a resource. | entailment |
def perform_create(self, serializer):
"""Create a resource."""
with transaction.atomic():
instance = serializer.save()
# Assign all permissions to the object contributor.
assign_contributor_permissions(instance) | Create a resource. | entailment |
def update(self, request, *args, **kwargs):
"""Update a resource."""
# NOTE: Use the original method instead when support for locking is added:
# https://github.com/encode/django-rest-framework/issues/4675
# return super().update(request, *args, **kwargs)
with transaction.atomic():
return self._update(request, *args, **kwargs) | Update a resource. | entailment |
def _update(self, request, *args, **kwargs):
"""Update a resource."""
partial = kwargs.pop('partial', False)
# NOTE: The line below was changed.
instance = self.get_object_with_lock()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
if getattr(instance, '_prefetched_objects_cache', None):
# If 'prefetch_related' has been applied to a queryset, we need to
# forcibly invalidate the prefetch cache on the instance.
instance._prefetched_objects_cache = {} # pylint: disable=protected-access
return Response(serializer.data) | Update a resource. | entailment |
def slug_exists(self, request):
"""Check if given url slug exists.
Check if slug given in query parameter ``name`` exists. Return
``True`` if slug already exists and ``False`` otherwise.
"""
if not request.user.is_authenticated:
return Response(status=status.HTTP_401_UNAUTHORIZED)
if 'name' not in request.query_params:
return Response({'error': 'Query parameter `name` must be given.'},
status=status.HTTP_400_BAD_REQUEST)
queryset = self.get_queryset()
slug_name = request.query_params['name']
return Response(queryset.filter(slug__iexact=slug_name).exists()) | Check if given url slug exists.
Check if slug given in query parameter ``name`` exists. Return
``True`` if slug already exists and ``False`` otherwise. | entailment |
def get_ids(self, request_data, parameter_name='ids'):
"""Extract a list of integers from request data."""
if parameter_name not in request_data:
raise ParseError("`{}` parameter is required".format(parameter_name))
ids = request_data.get(parameter_name)
if not isinstance(ids, list):
raise ParseError("`{}` parameter not a list".format(parameter_name))
if not ids:
raise ParseError("`{}` parameter is empty".format(parameter_name))
if any(map(lambda id: not isinstance(id, int), ids)):
raise ParseError("`{}` parameter contains non-integers".format(parameter_name))
return ids | Extract a list of integers from request data. | entailment |
def get_id(self, request_data, parameter_name='id'):
"""Extract an integer from request data."""
if parameter_name not in request_data:
raise ParseError("`{}` parameter is required".format(parameter_name))
id_parameter = request_data.get(parameter_name, None)
if not isinstance(id_parameter, int):
raise ParseError("`{}` parameter not an integer".format(parameter_name))
return id_parameter | Extract an integer from request data. | entailment |
def deserialize(dataone_exception_xml):
"""Deserialize a DataONE Exception XML doc."""
# logging.debug('dataone_exception_xml="{}"'
# .format(d1_common.xml.pretty_xml(dataone_exception_xml)))
try:
dataone_exception_pyxb = d1_common.xml.deserialize_d1_exception(
dataone_exception_xml
)
except ValueError as e:
raise ServiceFailure(
detailCode='0',
description='Deserialization failed. error="{}" doc="{}"'.format(
str(e),
'<empty response>'
if not dataone_exception_xml
else dataone_exception_xml,
),
traceInformation=traceback.format_exc(),
)
else:
x = create_exception_by_name(
dataone_exception_pyxb.name,
dataone_exception_pyxb.detailCode,
dataone_exception_pyxb.description,
_get_trace_information_content(dataone_exception_pyxb),
dataone_exception_pyxb.identifier,
dataone_exception_pyxb.nodeId,
)
return x | Deserialize a DataONE Exception XML doc. | entailment |
def deserialize_from_headers(headers):
"""Deserialize a DataONE Exception that is stored in a map of HTTP headers (used in
responses to HTTP HEAD requests)."""
return create_exception_by_name(
_get_header(headers, 'DataONE-Exception-Name'),
_get_header(headers, 'DataONE-Exception-DetailCode'),
_get_header(headers, 'DataONE-Exception-Description'),
_get_header(headers, 'DataONE-Exception-TraceInformation'),
_get_header(headers, 'DataONE-Exception-Identifier'),
_get_header(headers, 'DataONE-Exception-NodeId'),
) | Deserialize a DataONE Exception that is stored in a map of HTTP headers (used in
responses to HTTP HEAD requests). | entailment |
def create_exception_by_name(
name,
detailCode='0',
description='',
traceInformation=None,
identifier=None,
nodeId=None,
):
"""Create a DataONEException based object by name.
Args:
name: str
The type name of a DataONE Exception. E.g. NotFound.
If an unknown type name is used, it is automatically set to ServiceFailure. As
the XML Schema for DataONE Exceptions does not restrict the type names, this
may occur when deserializing an exception not defined by DataONE.
detailCode: int
Optional index into a table of predefined error conditions.
See Also:
For remaining args, see: ``DataONEException()``
"""
try:
dataone_exception = globals()[name]
except LookupError:
dataone_exception = ServiceFailure
return dataone_exception(
detailCode, description, traceInformation, identifier, nodeId
) | Create a DataONEException based object by name.
Args:
name: str
The type name of a DataONE Exception. E.g. NotFound.
If an unknown type name is used, it is automatically set to ServiceFailure. As
the XML Schema for DataONE Exceptions does not restrict the type names, this
may occur when deserializing an exception not defined by DataONE.
detailCode: int
Optional index into a table of predefined error conditions.
See Also:
For remaining args, see: ``DataONEException()`` | entailment |
def create_exception_by_error_code(
errorCode,
detailCode='0',
description='',
traceInformation=None,
identifier=None,
nodeId=None,
):
"""Create a DataONE Exception object by errorCode.
See Also: For args, see: ``DataONEException()``
"""
try:
dataone_exception = ERROR_CODE_TO_EXCEPTION_DICT[errorCode]
except LookupError:
dataone_exception = ServiceFailure
return dataone_exception(
detailCode, description, traceInformation, identifier, nodeId
) | Create a DataONE Exception object by errorCode.
See Also: For args, see: ``DataONEException()`` | entailment |
def _fmt(self, tag, msg):
"""Format a string for inclusion in the exception's string representation.
If msg is None, format to empty string. If msg has a single line, format to:
tag: msg If msg has multiple lines, format to: tag: line 1 line 2 Msg is
truncated to 1024 chars.
"""
msg = msg or '<unset>'
msg = str(msg)
msg = msg.strip()
if not msg:
return
if len(msg) > 2048:
msg = msg[:1024] + '...'
if msg.count('\n') <= 1:
return '{}: {}\n'.format(tag, msg.strip())
else:
return '{}:\n {}\n'.format(tag, msg.replace('\n', '\n ').strip()) | Format a string for inclusion in the exception's string representation.
If msg is None, format to empty string. If msg has a single line, format to:
tag: msg If msg has multiple lines, format to: tag: line 1 line 2 Msg is
truncated to 1024 chars. | entailment |
def friendly_format(self):
"""Serialize to a format more suitable for displaying to end users."""
if self.description is not None:
msg = self.description
else:
msg = 'errorCode: {} / detailCode: {}'.format(
self.errorCode, self.detailCode
)
return self._fmt(self.name, msg) | Serialize to a format more suitable for displaying to end users. | entailment |
def serialize_to_transport(self, encoding='utf-8', xslt_url=None):
"""Serialize to XML ``bytes`` with prolog.
Args:
encoding: str
Encoding to use for XML doc bytes
xslt_url: str
If specified, add a processing instruction to the XML doc that specifies the
download location for an XSLT stylesheet.
Returns:
bytes: XML holding a DataONEError based type.
"""
assert encoding in ('utf-8', 'UTF-8')
dataone_exception_pyxb = self.get_pyxb()
return d1_common.xml.serialize_for_transport(
dataone_exception_pyxb, xslt_url=xslt_url
) | Serialize to XML ``bytes`` with prolog.
Args:
encoding: str
Encoding to use for XML doc bytes
xslt_url: str
If specified, add a processing instruction to the XML doc that specifies the
download location for an XSLT stylesheet.
Returns:
bytes: XML holding a DataONEError based type. | entailment |
def serialize_to_display(self, xslt_url=None):
"""Serialize to a pretty printed Unicode str, suitable for display.
Args: xslt_url: url Optional link to an XSLT stylesheet. If provided, a
processing instruction for the stylesheet is included in the XML prolog.
"""
return d1_common.xml.serialize_to_xml_str(
self.get_pyxb(), pretty=True, xslt_url=xslt_url
) | Serialize to a pretty printed Unicode str, suitable for display.
Args: xslt_url: url Optional link to an XSLT stylesheet. If provided, a
processing instruction for the stylesheet is included in the XML prolog. | entailment |
def serialize_to_headers(self):
"""Serialize to a dict of HTTP headers.
Used in responses to HTTP HEAD requests. As with regular HTTP GET requests, HEAD
requests may return DataONE Exceptions. Since a response to a HEAD request
cannot include a body, the error is returned as a set of HTTP headers instead of
an XML document.
"""
return {
'DataONE-Exception-Name': self.__class__.__name__,
'DataONE-Exception-ErrorCode': self._format_header(self.errorCode),
'DataONE-Exception-DetailCode': self._format_header(self.detailCode),
'DataONE-Exception-Description': self._format_header(self.description),
'DataONE-Exception-TraceInformation': self._format_header(
self.traceInformation
),
'DataONE-Exception-Identifier': self._format_header(self.identifier),
'DataONE-Exception-NodeID': self._format_header(self.nodeId),
} | Serialize to a dict of HTTP headers.
Used in responses to HTTP HEAD requests. As with regular HTTP GET requests, HEAD
requests may return DataONE Exceptions. Since a response to a HEAD request
cannot include a body, the error is returned as a set of HTTP headers instead of
an XML document. | entailment |
def get_pyxb(self):
"""Generate a DataONE Exception PyXB object.
The PyXB object supports directly reading and writing the individual values that
may be included in a DataONE Exception.
"""
dataone_exception_pyxb = dataoneErrors.error()
dataone_exception_pyxb.name = self.__class__.__name__
dataone_exception_pyxb.errorCode = self.errorCode
dataone_exception_pyxb.detailCode = self.detailCode
if self.description is not None:
dataone_exception_pyxb.description = self.description
dataone_exception_pyxb.traceInformation = self.traceInformation
if self.identifier is not None:
dataone_exception_pyxb.identifier = self.identifier
if self.nodeId is not None:
dataone_exception_pyxb.nodeId = self.nodeId
return dataone_exception_pyxb | Generate a DataONE Exception PyXB object.
The PyXB object supports directly reading and writing the individual values that
may be included in a DataONE Exception. | entailment |
def recreate_parent_dependencies(apps, schema_editor):
"""Create empty dependency relation if parent has been deleted."""
Data = apps.get_model('flow', 'Data')
DataDependency = apps.get_model('flow', 'DataDependency')
def process_dependency(data, parent):
if not Data.objects.filter(pk=parent).exists():
DataDependency.objects.create(
child=data, parent=None, kind='io'
)
for data in Data.objects.all():
for field_schema, fields in iterate_fields(data.input, data.process.input_schema):
name = field_schema['name']
value = fields[name]
if field_schema.get('type', '').startswith('data:'):
process_dependency(data, value)
elif field_schema.get('type', '').startswith('list:data:'):
for parent in value:
process_dependency(data, parent) | Create empty dependency relation if parent has been deleted. | entailment |
def add_access_policy_filter(request, query, column_name):
"""Filter records that do not have ``read`` or better access for one or more of the
active subjects.
Since ``read`` is the lowest access level that a subject can have, this method only
has to filter on the presence of the subject.
"""
q = d1_gmn.app.models.Subject.objects.filter(
subject__in=request.all_subjects_set
).values('permission__sciobj')
filter_arg = '{}__in'.format(column_name)
return query.filter(**{filter_arg: q}) | Filter records that do not have ``read`` or better access for one or more of the
active subjects.
Since ``read`` is the lowest access level that a subject can have, this method only
has to filter on the presence of the subject. | entailment |
def add_redact_annotation(request, query):
"""Flag LogEntry records that require ``ipAddress`` and ``subject`` fields to be
redacted before being returned to the client.
Only trusted subjects and subjects with ``write`` or ``changePermission`` on a
SciObj receive unredacted ``ipAddress`` and ``subject`` in LogEntry records for the
associated SciObj.
Subjects with only ``read`` access receive redacted records.
"""
return query.annotate(
redact=django.db.models.Exists(
d1_gmn.app.models.Permission.objects.filter(
sciobj=django.db.models.OuterRef('sciobj'),
subject__subject__in=request.all_subjects_set,
level__gte=d1_gmn.app.auth.WRITE_LEVEL,
),
negated=True,
)
) | Flag LogEntry records that require ``ipAddress`` and ``subject`` fields to be
redacted before being returned to the client.
Only trusted subjects and subjects with ``write`` or ``changePermission`` on a
SciObj receive unredacted ``ipAddress`` and ``subject`` in LogEntry records for the
associated SciObj.
Subjects with only ``read`` access receive redacted records. | entailment |
def calculate_total_size(apps, schema_editor):
"""Add ``total_size`` field to all file/dir-type outputs."""
Data = apps.get_model('flow', 'Data')
for data in Data.objects.all():
hydrate_size(data, force=True)
data.save() | Add ``total_size`` field to all file/dir-type outputs. | entailment |
def remove_total_size(apps, schema_editor):
"""Remove ``total_size`` field from all file/dir-type outputs."""
Data = apps.get_model('flow', 'Data')
for data in Data.objects.all():
for field_schema, fields in iterate_fields(data.output, data.process.output_schema):
name = field_schema['name']
value = fields[name]
if 'type' in field_schema:
if field_schema['type'].startswith('basic:file:'):
del value['total_size']
elif field_schema['type'].startswith('list:basic:file:'):
for obj in value:
del obj['total_size']
elif field_schema['type'].startswith('basic:dir:'):
del value['total_size']
elif field_schema['type'].startswith('list:basic:dir:'):
for obj in value:
del obj['total_size']
data.save() | Remove ``total_size`` field from all file/dir-type outputs. | entailment |
def is_valid_pid_for_create(did):
"""Assert that ``did`` can be used as a PID for creating a new object with
MNStorage.create() or MNStorage.update()."""
if not d1_gmn.app.did.is_valid_pid_for_create(did):
raise d1_common.types.exceptions.IdentifierNotUnique(
0,
'Identifier is already in use as {}. did="{}"'.format(
d1_gmn.app.did.classify_identifier(did), did
),
identifier=did,
) | Assert that ``did`` can be used as a PID for creating a new object with
MNStorage.create() or MNStorage.update(). | entailment |
def is_valid_pid_to_be_updated(did):
"""Assert that ``did`` is the PID of an object that can be updated (obsoleted) with
MNStorage.update()"""
if not d1_gmn.app.did.is_valid_pid_to_be_updated(did):
raise d1_common.types.exceptions.InvalidRequest(
0,
'Object cannot be updated because the identifier for the object to be '
'updated is {}. did="{}"'.format(
d1_gmn.app.did.classify_identifier(did), did
),
identifier=did,
) | Assert that ``did`` is the PID of an object that can be updated (obsoleted) with
MNStorage.update() | entailment |
def post_has_mime_parts(request, parts):
"""Validate that a MMP POST contains all required sections.
:param request: Django Request
:param parts: [(part_type, part_name), ...]
:return: None or raises exception.
Where information is stored in the request:
part_type header: request.META['HTTP_<UPPER CASE NAME>']
part_type file: request.FILES['<name>']
part_type field: request.POST['<name>']
"""
missing = []
for part_type, part_name in parts:
if part_type == 'header':
if 'HTTP_' + part_name.upper() not in request.META:
missing.append('{}: {}'.format(part_type, part_name))
elif part_type == 'file':
if part_name not in list(request.FILES.keys()):
missing.append('{}: {}'.format(part_type, part_name))
elif part_type == 'field':
if part_name not in list(request.POST.keys()):
missing.append('{}: {}'.format(part_type, part_name))
else:
raise d1_common.types.exceptions.ServiceFailure(
0, 'Invalid part_type. part_type="{}"'.format(part_type)
)
if len(missing) > 0:
raise d1_common.types.exceptions.InvalidRequest(
0,
'Missing part(s) in MIME Multipart document. missing="{}"'.format(
', '.join(missing)
),
) | Validate that a MMP POST contains all required sections.
:param request: Django Request
:param parts: [(part_type, part_name), ...]
:return: None or raises exception.
Where information is stored in the request:
part_type header: request.META['HTTP_<UPPER CASE NAME>']
part_type file: request.FILES['<name>']
part_type field: request.POST['<name>'] | entailment |
def _decade_ranges_in_date_range(self, begin_date, end_date):
"""Return a list of decades which is covered by date range."""
begin_dated = begin_date.year / 10
end_dated = end_date.year / 10
decades = []
for d in range(begin_dated, end_dated + 1):
decades.append('{}-{}'.format(d * 10, d * 10 + 9))
return decades | Return a list of decades which is covered by date range. | entailment |
def _years_in_date_range_within_decade(self, decade, begin_date, end_date):
"""Return a list of years in one decade which is covered by date range."""
begin_year = begin_date.year
end_year = end_date.year
if begin_year < decade:
begin_year = decade
if end_year > decade + 9:
end_year = decade + 9
return list(range(begin_year, end_year + 1)) | Return a list of years in one decade which is covered by date range. | entailment |
def smeft_evolve_leadinglog(C_in, scale_in, scale_out, newphys=True):
"""Solve the SMEFT RGEs in the leading log approximation.
Input C_in and output C_out are dictionaries of arrays."""
C_out = deepcopy(C_in)
b = beta.beta(C_out, newphys=newphys)
for k, C in C_out.items():
C_out[k] = C + b[k] / (16 * pi**2) * log(scale_out / scale_in)
return C_out | Solve the SMEFT RGEs in the leading log approximation.
Input C_in and output C_out are dictionaries of arrays. | entailment |
def _smeft_evolve(C_in, scale_in, scale_out, newphys=True, **kwargs):
"""Axuliary function used in `smeft_evolve` and `smeft_evolve_continuous`"""
def fun(t0, y):
return beta.beta_array(C=C_array2dict(y.view(complex)),
newphys=newphys).view(float) / (16 * pi**2)
y0 = C_dict2array(C_in).view(float)
sol = solve_ivp(fun=fun,
t_span=(log(scale_in), log(scale_out)),
y0=y0, **kwargs)
return sol | Axuliary function used in `smeft_evolve` and `smeft_evolve_continuous` | entailment |
def smeft_evolve(C_in, scale_in, scale_out, newphys=True, **kwargs):
"""Solve the SMEFT RGEs by numeric integration.
Input C_in and output C_out are dictionaries of arrays."""
sol = _smeft_evolve(C_in, scale_in, scale_out, newphys=newphys, **kwargs)
return C_array2dict(sol.y[:, -1].view(complex)) | Solve the SMEFT RGEs by numeric integration.
Input C_in and output C_out are dictionaries of arrays. | entailment |
def smeft_evolve_continuous(C_in, scale_in, scale_out, newphys=True, **kwargs):
"""Solve the SMEFT RGEs by numeric integration, returning a function that
allows to compute an interpolated solution at arbitrary intermediate
scales."""
sol = _smeft_evolve(C_in, scale_in, scale_out, newphys=newphys,
dense_output=True, **kwargs)
@np.vectorize
def _rge_solution(scale):
t = log(scale)
y = sol.sol(t).view(complex)
yd = C_array2dict(y)
yw = arrays2wcxf_nonred(yd)
return yw
def rge_solution(scale):
# this is to return a scalar if the input is scalar
return _rge_solution(scale)[()]
return rge_solution | Solve the SMEFT RGEs by numeric integration, returning a function that
allows to compute an interpolated solution at arbitrary intermediate
scales. | entailment |
def invalid_index(self, name):
"""Show an invalid index error message."""
self.stderr.write("Unknown index: {}".format(name))
self.stderr.write("Supported indices are:")
for index in index_builder.indexes:
self.stderr.write(" * {}".format(index.__class__.__name__)) | Show an invalid index error message. | entailment |
def filter_indices(self, options, verbosity, *args, **kwargs):
"""Filter indices and execute an action for each index."""
index_name_map = {
index.__class__.__name__: index
for index in index_builder.indexes
}
# Process includes.
if options['index']:
indices = set(options['index'])
else:
indices = set(index_name_map.keys())
# Process excludes.
for index_name in options['exclude']:
if index_name not in index_name_map:
self.invalid_index(index_name)
return
indices.discard(index_name)
# Execute action for each remaining index.
for index_name in indices:
try:
index = index_name_map[index_name]
except KeyError:
self.invalid_index(index_name)
return
if verbosity > 0:
self.stdout.write("Processing index '{}'...".format(index_name))
self.handle_index(index, *args, **kwargs) | Filter indices and execute an action for each index. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.