sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def is_allowed(request, level, pid): """Check if one or more subjects are allowed to perform action level on object. If a subject holds permissions for one action level on object, all lower action levels are also allowed. Any included subject that is unknown to this MN is treated as a subject without permissions. Returns: bool True: - The active subjects include one or more subjects that: - are fully trusted DataONE infrastructure subjects, causing all rights to be granted regardless of requested access level and SciObj - OR are in the object's ACL for the requested access level. The ACL contains the subjects from the object's allow rules and the object's rightsHolder, which has all rights. - OR object is public, which always yields a match on the "public" symbolic subject. False: - None of the active subjects are in the object's ACL for the requested access level or for lower levels. - OR PID does not exist - OR access level is invalid """ if is_trusted_subject(request): return True return d1_gmn.app.models.Permission.objects.filter( sciobj__pid__did=pid, subject__subject__in=request.all_subjects_set, level__gte=level, ).exists()
Check if one or more subjects are allowed to perform action level on object. If a subject holds permissions for one action level on object, all lower action levels are also allowed. Any included subject that is unknown to this MN is treated as a subject without permissions. Returns: bool True: - The active subjects include one or more subjects that: - are fully trusted DataONE infrastructure subjects, causing all rights to be granted regardless of requested access level and SciObj - OR are in the object's ACL for the requested access level. The ACL contains the subjects from the object's allow rules and the object's rightsHolder, which has all rights. - OR object is public, which always yields a match on the "public" symbolic subject. False: - None of the active subjects are in the object's ACL for the requested access level or for lower levels. - OR PID does not exist - OR access level is invalid
entailment
def assert_create_update_delete_permission(request): """Access only by subjects with Create/Update/Delete permission and by trusted infrastructure (CNs).""" if not has_create_update_delete_permission(request): raise d1_common.types.exceptions.NotAuthorized( 0, 'Access allowed only for subjects with Create/Update/Delete ' 'permission. active_subjects="{}"'.format(format_active_subjects(request)), )
Access only by subjects with Create/Update/Delete permission and by trusted infrastructure (CNs).
entailment
def assert_allowed(request, level, pid): """Assert that one or more subjects are allowed to perform action on object. Raise NotAuthorized if object exists and subject is not allowed. Raise NotFound if object does not exist. """ if not d1_gmn.app.models.ScienceObject.objects.filter(pid__did=pid).exists(): raise d1_common.types.exceptions.NotFound( 0, 'Attempted to perform operation on non-existing object. pid="{}"'.format( pid ), ) if not is_allowed(request, level, pid): raise d1_common.types.exceptions.NotAuthorized( 0, 'Operation is denied. level="{}", pid="{}", active_subjects="{}"'.format( level_to_action(level), pid, format_active_subjects(request) ), )
Assert that one or more subjects are allowed to perform action on object. Raise NotAuthorized if object exists and subject is not allowed. Raise NotFound if object does not exist.
entailment
def format_active_subjects(request): """Create a string listing active subjects for this connection, suitable for appending to authentication error messages.""" decorated_subject_list = [request.primary_subject_str + ' (primary)'] for subject in request.all_subjects_set: if subject != request.primary_subject_str: decorated_subject_list.append(subject) return ', '.join(decorated_subject_list)
Create a string listing active subjects for this connection, suitable for appending to authentication error messages.
entailment
def main(): """Remove unused imports Unsafe! Only tested on our codebase, which uses simple absolute imports on the form, "import a.b.c". """ parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument("path", nargs="+", help="File or directory path") parser.add_argument("--exclude", nargs="+", help="Exclude glob patterns") parser.add_argument( "--no-recursive", dest="recursive", action="store_false", help="Search directories recursively", ) parser.add_argument( "--ignore-invalid", action="store_true", help="Ignore invalid paths" ) parser.add_argument( "--pycharm", action="store_true", help="Enable PyCharm integration" ) parser.add_argument( "--diff", dest="show_diff", action="store_true", help="Show diff and do not modify any files", ) parser.add_argument( "--dry-run", action="store_true", help="Process files but do not write results" ) parser.add_argument("--debug", action="store_true", help="Debug level logging") args = parser.parse_args() d1_common.util.log_setup(args.debug) repo_path = d1_dev.util.find_repo_root_by_path(__file__) repo = git.Repo(repo_path) specified_file_path_list = get_specified_file_path_list(args) # tracked_path_list = list(d1_dev.util.get_tracked_files(repo)) # format_path_list = sorted( # set(specified_file_path_list).intersection(tracked_path_list) # ) format_path_list = specified_file_path_list for format_path in format_path_list: comment_unused_imports(args, format_path)
Remove unused imports Unsafe! Only tested on our codebase, which uses simple absolute imports on the form, "import a.b.c".
entailment
def comment_import(r, unused_dot_list): """Comment out import for {dot_str}.""" unused_dot_str = ".".join(unused_dot_list) for n in r("ImportNode"): if n.names()[0] == unused_dot_str: # The "!" is inserted so that this line doesn't show up when searching for # the comment pattern in code. n.replace("#{}# {}".format("!", str(n))) break
Comment out import for {dot_str}.
entailment
def get_atomtrailer_list(r): """Capture only the leading dotted name list. A full sequence typically includes function calls and parameters. pkga.pkgb.pkgc.one_call(arg1, arg2, arg3=4) """ dot_set = set() for n in r.find_all(("atomtrailers",)): name_list = [] for x in n.value: if x.type != "name": break name_list.append(x.value) if name_list: dot_set.add(tuple(name_list)) return sorted(dot_set)
Capture only the leading dotted name list. A full sequence typically includes function calls and parameters. pkga.pkgb.pkgc.one_call(arg1, arg2, arg3=4)
entailment
def custom_filter_tags(self, value, search): """Support tags query.""" if not isinstance(value, list): value = value.split(',') filters = [Q('match', **{'tags': item}) for item in value] search = search.query('bool', must=filters) return search
Support tags query.
entailment
def custom_filter_text(self, value, search): """Support general query using the 'text' attribute.""" if isinstance(value, list): value = ' '.join(value) should = [ Q('match', slug={'query': value, 'operator': 'and', 'boost': 10.0}), Q('match', **{'slug.ngrams': {'query': value, 'operator': 'and', 'boost': 5.0}}), Q('match', name={'query': value, 'operator': 'and', 'boost': 10.0}), Q('match', **{'name.ngrams': {'query': value, 'operator': 'and', 'boost': 5.0}}), Q('match', contributor_name={'query': value, 'operator': 'and', 'boost': 5.0}), Q('match', **{'contributor_name.ngrams': {'query': value, 'operator': 'and', 'boost': 2.0}}), Q('match', owner_names={'query': value, 'operator': 'and', 'boost': 5.0}), Q('match', **{'owner_names.ngrams': {'query': value, 'operator': 'and', 'boost': 2.0}}), Q('match', descriptor_data={'query': value, 'operator': 'and'}), ] # Add registered text extensions. for extension in composer.get_extensions(self): if hasattr(extension, 'text_filter'): should += extension.text_filter(value) search = search.query('bool', should=should) return search
Support general query using the 'text' attribute.
entailment
def set_content_permissions(self, user, obj, payload): """Apply permissions to data objects and entities in ``Collection``.""" for entity in obj.entity_set.all(): if user.has_perm('share_entity', entity): update_permission(entity, payload) # Data doesn't have "ADD" permission, so it has to be removed payload = remove_permission(payload, 'add') for data in obj.data.all(): if user.has_perm('share_data', data): update_permission(data, payload)
Apply permissions to data objects and entities in ``Collection``.
entailment
def create(self, request, *args, **kwargs): """Only authenticated usesr can create new collections.""" if not request.user.is_authenticated: raise exceptions.NotFound return super().create(request, *args, **kwargs)
Only authenticated usesr can create new collections.
entailment
def destroy(self, request, *args, **kwargs): """Destroy a model instance. If ``delete_content`` flag is set in query parameters, also all Data objects and Entities, on which user has ``EDIT`` permission, contained in collection will be deleted. """ obj = self.get_object() user = request.user if strtobool(request.query_params.get('delete_content', 'false')): for entity in obj.entity_set.all(): if user.has_perm('edit_entity', entity): entity.delete() for data in obj.data.all(): if user.has_perm('edit_data', data): data.delete() return super().destroy(request, *args, **kwargs)
Destroy a model instance. If ``delete_content`` flag is set in query parameters, also all Data objects and Entities, on which user has ``EDIT`` permission, contained in collection will be deleted.
entailment
def add_data(self, request, pk=None): """Add data to collection.""" collection = self.get_object() if 'ids' not in request.data: return Response({"error": "`ids`parameter is required"}, status=status.HTTP_400_BAD_REQUEST) missing = [] for data_id in request.data['ids']: if not Data.objects.filter(pk=data_id).exists(): missing.append(data_id) if missing: return Response( {"error": "Data objects with following ids are missing: {}".format(', '.join(missing))}, status=status.HTTP_400_BAD_REQUEST) for data_id in request.data['ids']: collection.data.add(data_id) return Response()
Add data to collection.
entailment
def remove_data(self, request, pk=None): """Remove data from collection.""" collection = self.get_object() if 'ids' not in request.data: return Response({"error": "`ids`parameter is required"}, status=status.HTTP_400_BAD_REQUEST) for data_id in request.data['ids']: collection.data.remove(data_id) return Response()
Remove data from collection.
entailment
def duplicate(self, request, *args, **kwargs): """Duplicate (make copy of) ``Collection`` models.""" if not request.user.is_authenticated: raise exceptions.NotFound ids = self.get_ids(request.data) queryset = get_objects_for_user(request.user, 'view_collection', Collection.objects.filter(id__in=ids)) actual_ids = queryset.values_list('id', flat=True) missing_ids = list(set(ids) - set(actual_ids)) if missing_ids: raise exceptions.ParseError( "Collections with the following ids not found: {}".format(', '.join(map(str, missing_ids))) ) duplicated = queryset.duplicate(contributor=request.user) serializer = self.get_serializer(duplicated, many=True) return Response(serializer.data)
Duplicate (make copy of) ``Collection`` models.
entailment
async def is_object_synced_to_cn(self, client, pid): """Check if object with {pid} has successfully synced to the CN. CNRead.describe() is used as it's a light-weight HTTP HEAD request. This assumes that the call is being made over a connection that has been authenticated and has read or better access on the given object if it exists. """ try: await client.describe(pid) except d1_common.types.exceptions.DataONEException: return False return True
Check if object with {pid} has successfully synced to the CN. CNRead.describe() is used as it's a light-weight HTTP HEAD request. This assumes that the call is being made over a connection that has been authenticated and has read or better access on the given object if it exists.
entailment
def apply(self, search, field, value): """Apply lookup expression to search query.""" return search.query( self.query_type( # pylint: disable=not-callable **{ field: { self.operator: self.get_value_query(value) } } ) )
Apply lookup expression to search query.
entailment
def apply(self, search, field, value): """Apply lookup expression to search query.""" if not isinstance(value, list): value = [x for x in value.strip().split(',') if x] filters = [Q('match', **{field: item}) for item in value] return search.query('bool', should=filters)
Apply lookup expression to search query.
entailment
def apply(self, search, field, value): """Apply lookup expression to search query.""" # We assume that the field in question has a "raw" counterpart. return search.query('match', **{'{}.raw'.format(field): value})
Apply lookup expression to search query.
entailment
def register_lookup(self, lookup): """Register lookup.""" if lookup.operator in self._lookups: raise KeyError("Lookup for operator '{}' is already registered".format(lookup.operator)) self._lookups[lookup.operator] = lookup()
Register lookup.
entailment
def get_lookup(self, operator): """Look up a lookup. :param operator: Name of the lookup operator """ try: return self._lookups[operator] except KeyError: raise NotImplementedError("Lookup operator '{}' is not supported".format(operator))
Look up a lookup. :param operator: Name of the lookup operator
entailment
def build(self, search, raw_query): """Build query. :param search: Search query instance :param raw_query: Raw query arguments dictionary """ unmatched_items = {} for expression, value in raw_query.items(): # Parse query expression into tokens. tokens = expression.split(TOKEN_SEPARATOR) field = tokens[0] tail = tokens[1:] if field not in self.fields: unmatched_items[expression] = value continue # Map field alias to final field. field = self.fields_map.get(field, field) # Parse lookup expression. Currently only no token or a single token is allowed. if tail: if len(tail) > 1: raise NotImplementedError("Nested lookup expressions are not supported") lookup = self.get_lookup(tail[0]) search = lookup.apply(search, field, value) else: # Default lookup. custom_filter = getattr(self.custom_filter_object, 'custom_filter_{}'.format(field), None) if custom_filter is not None: search = custom_filter(value, search) elif isinstance(value, list): # Default is 'should' between matches. If you need anything else, # a custom filter for this field should be implemented. filters = [Q('match', **{field: item}) for item in value] search = search.query('bool', should=filters) else: search = search.query('match', **{field: {'query': value, 'operator': 'and'}}) return (search, unmatched_items)
Build query. :param search: Search query instance :param raw_query: Raw query arguments dictionary
entailment
def resolve_sid(f): """View handler decorator that adds SID resolve and PID validation. - For v1 calls, assume that ``did`` is a pid and raise NotFound exception if it's not valid. - For v2 calls, if DID is a valid PID, return it. If not, try to resolve it as a SID and, if successful, return the new PID. Else, raise NotFound exception. """ @functools.wraps(f) def wrapper(request, did, *args, **kwargs): pid = resolve_sid_func(request, did) return f(request, pid, *args, **kwargs) return wrapper
View handler decorator that adds SID resolve and PID validation. - For v1 calls, assume that ``did`` is a pid and raise NotFound exception if it's not valid. - For v2 calls, if DID is a valid PID, return it. If not, try to resolve it as a SID and, if successful, return the new PID. Else, raise NotFound exception.
entailment
def decode_did(f): """View handler decorator that decodes "%2f" ("/") in SID or PID extracted from URL path segment by Django.""" @functools.wraps(f) def wrapper(request, did, *args, **kwargs): return f(request, decode_path_segment(did), *args, **kwargs) return wrapper
View handler decorator that decodes "%2f" ("/") in SID or PID extracted from URL path segment by Django.
entailment
def trusted_permission(f): """Access only by D1 infrastructure.""" @functools.wraps(f) def wrapper(request, *args, **kwargs): trusted(request) return f(request, *args, **kwargs) return wrapper
Access only by D1 infrastructure.
entailment
def list_objects_access(f): """Access to listObjects() controlled by settings.PUBLIC_OBJECT_LIST.""" @functools.wraps(f) def wrapper(request, *args, **kwargs): if not django.conf.settings.PUBLIC_OBJECT_LIST: trusted(request) return f(request, *args, **kwargs) return wrapper
Access to listObjects() controlled by settings.PUBLIC_OBJECT_LIST.
entailment
def get_log_records_access(f): """Access to getLogRecords() controlled by settings.PUBLIC_LOG_RECORDS.""" @functools.wraps(f) def wrapper(request, *args, **kwargs): if not django.conf.settings.PUBLIC_LOG_RECORDS: trusted(request) return f(request, *args, **kwargs) return wrapper
Access to getLogRecords() controlled by settings.PUBLIC_LOG_RECORDS.
entailment
def assert_create_update_delete_permission(f): """Access only by subjects with Create/Update/Delete permission and by trusted infrastructure (CNs).""" @functools.wraps(f) def wrapper(request, *args, **kwargs): d1_gmn.app.auth.assert_create_update_delete_permission(request) return f(request, *args, **kwargs) return wrapper
Access only by subjects with Create/Update/Delete permission and by trusted infrastructure (CNs).
entailment
def authenticated(f): """Access only with a valid session.""" @functools.wraps(f) def wrapper(request, *args, **kwargs): if d1_common.const.SUBJECT_AUTHENTICATED not in request.all_subjects_set: raise d1_common.types.exceptions.NotAuthorized( 0, 'Access allowed only for authenticated subjects. Please reconnect with ' 'a valid DataONE session certificate. active_subjects="{}"'.format( d1_gmn.app.auth.format_active_subjects(request) ), ) return f(request, *args, **kwargs) return wrapper
Access only with a valid session.
entailment
def required_permission(f, level): """Assert that subject has access at given level or higher for object.""" @functools.wraps(f) def wrapper(request, pid, *args, **kwargs): d1_gmn.app.auth.assert_allowed(request, level, pid) return f(request, pid, *args, **kwargs) return wrapper
Assert that subject has access at given level or higher for object.
entailment
def _read_and_deserialize_dataone_type(self, response): """Given a response body, try to create an instance of a DataONE type. The return value will be either an instance of a type or a DataONE exception. """ try: return d1_common.xml.deserialize(response.content) except ValueError as e: self._raise_service_failure_invalid_dataone_type(response, e)
Given a response body, try to create an instance of a DataONE type. The return value will be either an instance of a type or a DataONE exception.
entailment
def get(self, pid, stream=False, vendorSpecific=None): """Initiate a MNRead.get(). Return a Requests Response object from which the object bytes can be retrieved. When ``stream`` is False, Requests buffers the entire object in memory before returning the Response. This can exhaust available memory on the local machine when retrieving large science objects. The solution is to set ``stream`` to True, which causes the returned Response object to contain a a stream. However, see note below. When ``stream`` = True, the Response object will contain a stream which can be processed without buffering the entire science object in memory. However, failure to read all data from the stream can cause connections to be blocked. Due to this, the ``stream`` parameter is False by default. Also see: - http://docs.python-requests.org/en/master/user/advanced/body-content-workflow - get_and_save() in this module. """ response = self.getResponse(pid, stream, vendorSpecific) return self._read_stream_response(response)
Initiate a MNRead.get(). Return a Requests Response object from which the object bytes can be retrieved. When ``stream`` is False, Requests buffers the entire object in memory before returning the Response. This can exhaust available memory on the local machine when retrieving large science objects. The solution is to set ``stream`` to True, which causes the returned Response object to contain a a stream. However, see note below. When ``stream`` = True, the Response object will contain a stream which can be processed without buffering the entire science object in memory. However, failure to read all data from the stream can cause connections to be blocked. Due to this, the ``stream`` parameter is False by default. Also see: - http://docs.python-requests.org/en/master/user/advanced/body-content-workflow - get_and_save() in this module.
entailment
def get_and_save( self, pid, sciobj_stream, create_missing_dirs=False, vendorSpecific=None ): """Like MNRead.get(), but also retrieve the object bytes and store them in a stream. This method does not have the potential issue with excessive memory usage that get() with ``stream``=False has. Also see MNRead.get(). """ response = self.get(pid, stream=True, vendorSpecific=vendorSpecific) try: if create_missing_dirs: d1_common.utils.filesystem.create_missing_directories_for_file( sciobj_stream ) for chunk_str in response.iter_content( chunk_size=d1_common.const.DEFAULT_CHUNK_SIZE ): if chunk_str: sciobj_stream.write(chunk_str) finally: response.close() return response
Like MNRead.get(), but also retrieve the object bytes and store them in a stream. This method does not have the potential issue with excessive memory usage that get() with ``stream``=False has. Also see MNRead.get().
entailment
def describe(self, pid, vendorSpecific=None): """Note: If the server returns a status code other than 200 OK, a ServiceFailure will be raised, as this method is based on a HEAD request, which cannot carry exception information.""" response = self.describeResponse(pid, vendorSpecific=vendorSpecific) return self._read_header_response(response)
Note: If the server returns a status code other than 200 OK, a ServiceFailure will be raised, as this method is based on a HEAD request, which cannot carry exception information.
entailment
def isAuthorized(self, pid, action, vendorSpecific=None): """Return True if user is allowed to perform ``action`` on ``pid``, else False.""" response = self.isAuthorizedResponse(pid, action, vendorSpecific) return self._read_boolean_401_response(response)
Return True if user is allowed to perform ``action`` on ``pid``, else False.
entailment
def fields(self): """Filter fields based on request query parameters.""" fields = super().fields return apply_subfield_projection(self, copy.copy(fields))
Filter fields based on request query parameters.
entailment
def iterate_fields(fields, schema, path_prefix=None): """Iterate over all field values sub-fields. This will iterate over all field values. Some fields defined in the schema might not be visited. :param fields: field values to iterate over :type fields: dict :param schema: schema to iterate over :type schema: dict :return: (field schema, field value) :rtype: tuple """ if path_prefix is not None and path_prefix != '' and path_prefix[-1] != '.': path_prefix += '.' schema_dict = {val['name']: val for val in schema} for field_id, properties in fields.items(): path = '{}{}'.format(path_prefix, field_id) if path_prefix is not None else None if field_id not in schema_dict: raise KeyError("Field definition ({}) missing in schema".format(field_id)) if 'group' in schema_dict[field_id]: for rvals in iterate_fields(properties, schema_dict[field_id]['group'], path): yield rvals if path_prefix is not None else rvals[:2] else: rvals = (schema_dict[field_id], fields, path) yield rvals if path_prefix is not None else rvals[:2]
Iterate over all field values sub-fields. This will iterate over all field values. Some fields defined in the schema might not be visited. :param fields: field values to iterate over :type fields: dict :param schema: schema to iterate over :type schema: dict :return: (field schema, field value) :rtype: tuple
entailment
def iterate_schema(fields, schema, path_prefix=''): """Iterate over all schema sub-fields. This will iterate over all field definitions in the schema. Some field v alues might be None. :param fields: field values to iterate over :type fields: dict :param schema: schema to iterate over :type schema: dict :param path_prefix: dot separated path prefix :type path_prefix: str :return: (field schema, field value, field path) :rtype: tuple """ if path_prefix and path_prefix[-1] != '.': path_prefix += '.' for field_schema in schema: name = field_schema['name'] if 'group' in field_schema: for rvals in iterate_schema(fields[name] if name in fields else {}, field_schema['group'], '{}{}'.format(path_prefix, name)): yield rvals else: yield (field_schema, fields, '{}{}'.format(path_prefix, name))
Iterate over all schema sub-fields. This will iterate over all field definitions in the schema. Some field v alues might be None. :param fields: field values to iterate over :type fields: dict :param schema: schema to iterate over :type schema: dict :param path_prefix: dot separated path prefix :type path_prefix: str :return: (field schema, field value, field path) :rtype: tuple
entailment
def iterate_dict(container, exclude=None, path=None): """Iterate over a nested dictionary. The dictionary is iterated over in a depth first manner. :param container: Dictionary to iterate over :param exclude: Optional callable, which is given key and value as arguments and may return True to stop iteration of that branch :return: (path, key, value) tuple """ if path is None: path = [] for key, value in container.items(): if callable(exclude) and exclude(key, value): continue if isinstance(value, collections.Mapping): for inner_path, inner_key, inner_value in iterate_dict(value, exclude=exclude, path=path + [key]): yield inner_path, inner_key, inner_value yield path, key, value
Iterate over a nested dictionary. The dictionary is iterated over in a depth first manner. :param container: Dictionary to iterate over :param exclude: Optional callable, which is given key and value as arguments and may return True to stop iteration of that branch :return: (path, key, value) tuple
entailment
def get_subjects(request): """Get all subjects in the certificate. - Returns: primary_str (primary subject), equivalent_set (equivalent identities, groups and group memberships) - The primary subject is the certificate subject DN, serialized to a DataONE compliant subject string. """ if _is_certificate_provided(request): try: return get_authenticated_subjects(request.META['SSL_CLIENT_CERT']) except Exception as e: raise d1_common.types.exceptions.InvalidToken( 0, 'Error extracting session from certificate. error="{}"'.format(str(e)), ) else: return d1_common.const.SUBJECT_PUBLIC, set()
Get all subjects in the certificate. - Returns: primary_str (primary subject), equivalent_set (equivalent identities, groups and group memberships) - The primary subject is the certificate subject DN, serialized to a DataONE compliant subject string.
entailment
def get_authenticated_subjects(cert_pem): """Return primary subject and set of equivalents authenticated by certificate. - ``cert_pem`` can be str or bytes """ if isinstance(cert_pem, str): cert_pem = cert_pem.encode('utf-8') return d1_common.cert.subjects.extract_subjects(cert_pem)
Return primary subject and set of equivalents authenticated by certificate. - ``cert_pem`` can be str or bytes
entailment
def get_serializer_class(self): """Augment base serializer class. Include permissions information with objects. """ base_class = super().get_serializer_class() class SerializerWithPermissions(base_class): """Augment serializer class.""" def get_fields(serializer_self): # pylint: disable=no-self-argument """Return serializer's fields.""" fields = super().get_fields() fields['current_user_permissions'] = CurrentUserPermissionsSerializer(read_only=True) return fields def to_representation(serializer_self, instance): # pylint: disable=no-self-argument """Object serializer.""" data = super().to_representation(instance) if ('fields' not in self.request.query_params or 'current_user_permissions' in self.request.query_params['fields']): data['current_user_permissions'] = get_object_perms(instance, self.request.user) return data return SerializerWithPermissions
Augment base serializer class. Include permissions information with objects.
entailment
def detail_permissions(self, request, pk=None): """Get or set permissions API endpoint.""" obj = self.get_object() if request.method == 'POST': content_type = ContentType.objects.get_for_model(obj) payload = request.data share_content = strtobool(payload.pop('share_content', 'false')) user = request.user is_owner = user.has_perm('owner_{}'.format(content_type), obj=obj) allow_owner = is_owner or user.is_superuser check_owner_permission(payload, allow_owner) check_public_permissions(payload) check_user_permissions(payload, request.user.pk) with transaction.atomic(): update_permission(obj, payload) owner_count = UserObjectPermission.objects.filter( object_pk=obj.id, content_type=content_type, permission__codename__startswith='owner_' ).count() if not owner_count: raise exceptions.ParseError('Object must have at least one owner.') if share_content: self.set_content_permissions(user, obj, payload) return Response(get_object_perms(obj))
Get or set permissions API endpoint.
entailment
def save(self, *args, **kwargs): """Perform descriptor validation and save object.""" if self.descriptor_schema: try: validate_schema(self.descriptor, self.descriptor_schema.schema) # pylint: disable=no-member self.descriptor_dirty = False except DirtyError: self.descriptor_dirty = True elif self.descriptor and self.descriptor != {}: raise ValueError("`descriptor_schema` must be defined if `descriptor` is given") super().save()
Perform descriptor validation and save object.
entailment
def duplicate(self, contributor=None): """Duplicate (make a copy).""" duplicate = Collection.objects.get(id=self.id) duplicate.pk = None duplicate.slug = None duplicate.name = 'Copy of {}'.format(self.name) duplicate.duplicated = now() if contributor: duplicate.contributor = contributor duplicate.save(force_insert=True) assign_contributor_permissions(duplicate) # Fields to inherit from original data object. duplicate.created = self.created duplicate.save() # Duplicate collection's entities. entities = get_objects_for_user(contributor, 'view_entity', self.entity_set.all()) # pylint: disable=no-member duplicated_entities = entities.duplicate(contributor=contributor) duplicate.entity_set.add(*duplicated_entities) # Add duplicated data objects to collection. for duplicated_entity in duplicate.entity_set.all(): duplicate.data.add(*duplicated_entity.data.all()) return duplicate
Duplicate (make a copy).
entailment
def _scalar2array(d): """Convert a dictionary with scalar elements and string indices '_1234' to a dictionary of arrays. Unspecified entries are np.nan.""" da = {} for k, v in d.items(): if '_' not in k: da[k] = v else: name = ''.join(k.split('_')[:-1]) ind = k.split('_')[-1] dim = len(ind) if name not in da: shape = tuple(3 for i in range(dim)) da[name] = np.empty(shape, dtype=complex) da[name][:] = np.nan da[name][tuple(int(i) - 1 for i in ind)] = v return da
Convert a dictionary with scalar elements and string indices '_1234' to a dictionary of arrays. Unspecified entries are np.nan.
entailment
def _symm_herm(C): """To get rid of NaNs produced by _scalar2array, symmetrize operators where C_ijkl = C_jilk*""" nans = np.isnan(C) C[nans] = np.einsum('jilk', C)[nans].conj() return C
To get rid of NaNs produced by _scalar2array, symmetrize operators where C_ijkl = C_jilk*
entailment
def _symm_current(C): """To get rid of NaNs produced by _scalar2array, symmetrize operators where C_ijkl = C_klij""" nans = np.isnan(C) C[nans] = np.einsum('klij', C)[nans] return C
To get rid of NaNs produced by _scalar2array, symmetrize operators where C_ijkl = C_klij
entailment
def _antisymm_12(C): """To get rid of NaNs produced by _scalar2array, antisymmetrize the first two indices of operators where C_ijkl = -C_jikl""" nans = np.isnan(C) C[nans] = -np.einsum('jikl', C)[nans] return C
To get rid of NaNs produced by _scalar2array, antisymmetrize the first two indices of operators where C_ijkl = -C_jikl
entailment
def JMS_to_array(C, sectors=None): """For a dictionary with JMS Wilson coefficients, return a dictionary of arrays.""" if sectors is None: wc_keys = wcxf.Basis['WET', 'JMS'].all_wcs else: try: wc_keys = [k for s in sectors for k in wcxf.Basis['WET', 'JMS'].sectors[s]] except KeyError: print(sectors) # fill in zeros for missing coefficients C_complete = {k: C.get(k, 0) for k in wc_keys} Ca = _scalar2array(C_complete) for k in Ca: if k in C_symm_keys[5]: Ca[k] = _symm_herm(Ca[k]) if k in C_symm_keys[41]: Ca[k] = _symm_current(Ca[k]) if k in C_symm_keys[4]: Ca[k] = _symm_herm(_symm_current(Ca[k])) if k in C_symm_keys[9]: Ca[k] = _antisymm_12(Ca[k]) return Ca
For a dictionary with JMS Wilson coefficients, return a dictionary of arrays.
entailment
def symmetrize_JMS_dict(C): """For a dictionary with JMS Wilson coefficients but keys that might not be in the non-redundant basis, return a dictionary with keys from the basis and values conjugated if necessary.""" wc_keys = set(wcxf.Basis['WET', 'JMS'].all_wcs) Cs = {} for op, v in C.items(): if '_' not in op or op in wc_keys: Cs[op] = v continue name, ind = op.split('_') if name in C_symm_keys[5]: i, j, k, l = ind indnew = ''.join([j, i, l, k]) Cs['_'.join([name, indnew])] = v.conjugate() elif name in C_symm_keys[41]: i, j, k, l = ind indnew = ''.join([k, l, i, j]) Cs['_'.join([name, indnew])] = v elif name in C_symm_keys[4]: i, j, k, l = ind indnew = ''.join([l, k, j, i]) newname = '_'.join([name, indnew]) if newname in wc_keys: Cs[newname] = v.conjugate() else: indnew = ''.join([j, i, l, k]) newname = '_'.join([name, indnew]) if newname in wc_keys: Cs[newname] = v.conjugate() else: indnew = ''.join([k, l, i, j]) newname = '_'.join([name, indnew]) Cs[newname] = v elif name in C_symm_keys[9]: i, j, k, l = ind indnew = ''.join([j, i, k, l]) Cs['_'.join([name, indnew])] = -v return Cs
For a dictionary with JMS Wilson coefficients but keys that might not be in the non-redundant basis, return a dictionary with keys from the basis and values conjugated if necessary.
entailment
def rotate_down(C_in, p): """Redefinition of all Wilson coefficients in the JMS basis when rotating down-type quark fields from the flavour to the mass basis. C_in is expected to be an array-valued dictionary containg a key for all Wilson coefficient matrices.""" C = C_in.copy() V = ckmutil.ckm.ckm_tree(p["Vus"], p["Vub"], p["Vcb"], p["delta"]) UdL = V ## B conserving operators # type dL dL dL dL for k in ['VddLL']: C[k] = np.einsum('ia,jb,kc,ld,ijkl->abcd', UdL.conj(), UdL, UdL.conj(), UdL, C_in[k]) # type X X dL dL for k in ['V1udLL', 'V8udLL', 'VedLL', 'VnudLL']: C[k] = np.einsum('kc,ld,ijkl->ijcd', UdL.conj(), UdL, C_in[k]) # type dL dL X X for k in ['V1ddLR', 'V1duLR', 'V8ddLR', 'V8duLR', 'VdeLR']: C[k] = np.einsum('ia,jb,ijkl->abkl', UdL.conj(), UdL, C_in[k]) # type dL X dL X for k in ['S1ddRR', 'S8ddRR']: C[k] = np.einsum('ia,kc,ijkl->ajcl', UdL.conj(), UdL.conj(), C_in[k]) # type X dL X X for k in ['V1udduLR', 'V8udduLR']: C[k] = np.einsum('jb,ijkl->ibkl', UdL, C_in[k]) # type X X dL X for k in ['VnueduLL', 'SedRR', 'TedRR', 'SnueduRR', 'TnueduRR', 'S1udRR', 'S8udRR', 'S1udduRR', 'S8udduRR', ]: C[k] = np.einsum('kc,ijkl->ijcl', UdL.conj(), C_in[k]) # type X X X dL for k in ['SedRL', ]: C[k] = np.einsum('ld,ijkl->ijkd', UdL, C_in[k]) ## DeltaB=DeltaL=1 operators # type dL X X X for k in ['SduuLL', 'SduuLR']: C[k] = np.einsum('ia,ijkl->ajkl', UdL, C_in[k]) # type X X dL X for k in ['SuudRL', 'SdudRL']: C[k] = np.einsum('kc,ijkl->ijcl', UdL, C_in[k]) # type X dL dL X for k in ['SuddLL']: C[k] = np.einsum('jb,kc,ijkl->ibcl', UdL, UdL, C_in[k]) return C
Redefinition of all Wilson coefficients in the JMS basis when rotating down-type quark fields from the flavour to the mass basis. C_in is expected to be an array-valued dictionary containg a key for all Wilson coefficient matrices.
entailment
def scale_dict_wet(C): """To account for the fact that arXiv:Jenkins:2017jig uses a flavour non-redundant basis in contrast to WCxf, symmetry factors of two have to be introduced in several places for operators that are symmetric under the interchange of two currents.""" return {k: v / _scale_dict[k] for k, v in C.items()}
To account for the fact that arXiv:Jenkins:2017jig uses a flavour non-redundant basis in contrast to WCxf, symmetry factors of two have to be introduced in several places for operators that are symmetric under the interchange of two currents.
entailment
def unscale_dict_wet(C): """Undo the scaling applied in `scale_dict_wet`.""" return {k: _scale_dict[k] * v for k, v in C.items()}
Undo the scaling applied in `scale_dict_wet`.
entailment
def write_reqs(req_path, req_list): """ Args: req_path: req_list: """ with open(req_path, 'w') as f: f.write('\n'.join(req_list) + "\n")
Args: req_path: req_list:
entailment
def submit(self, data, runtime_dir, argv): """Run process locally. For details, see :meth:`~resolwe.flow.managers.workload_connectors.base.BaseConnector.submit`. """ logger.debug(__( "Connector '{}' running for Data with id {} ({}).", self.__class__.__module__, data.id, repr(argv) )) subprocess.Popen( argv, cwd=runtime_dir, stdin=subprocess.DEVNULL ).wait()
Run process locally. For details, see :meth:`~resolwe.flow.managers.workload_connectors.base.BaseConnector.submit`.
entailment
def create_checksum_object_from_stream( f, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM ): """Calculate the checksum of a stream. Args: f: file-like object Only requirement is a ``read()`` method that returns ``bytes``. algorithm: str Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``. Returns: Populated Checksum PyXB object. """ checksum_str = calculate_checksum_on_stream(f, algorithm) checksum_pyxb = d1_common.types.dataoneTypes.checksum(checksum_str) checksum_pyxb.algorithm = algorithm return checksum_pyxb
Calculate the checksum of a stream. Args: f: file-like object Only requirement is a ``read()`` method that returns ``bytes``. algorithm: str Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``. Returns: Populated Checksum PyXB object.
entailment
def create_checksum_object_from_iterator( itr, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM ): """Calculate the checksum of an iterator. Args: itr: iterable Object which supports the iterator protocol. algorithm: str Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``. Returns: Populated Checksum PyXB object. """ checksum_str = calculate_checksum_on_iterator(itr, algorithm) checksum_pyxb = d1_common.types.dataoneTypes.checksum(checksum_str) checksum_pyxb.algorithm = algorithm return checksum_pyxb
Calculate the checksum of an iterator. Args: itr: iterable Object which supports the iterator protocol. algorithm: str Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``. Returns: Populated Checksum PyXB object.
entailment
def create_checksum_object_from_bytes( b, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM ): """Calculate the checksum of ``bytes``. Warning: This method requires the entire object to be buffered in (virtual) memory, which should normally be avoided in production code. Args: b: bytes Raw bytes algorithm: str Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``. Returns: Populated PyXB Checksum object. """ checksum_str = calculate_checksum_on_bytes(b, algorithm) checksum_pyxb = d1_common.types.dataoneTypes.checksum(checksum_str) checksum_pyxb.algorithm = algorithm return checksum_pyxb
Calculate the checksum of ``bytes``. Warning: This method requires the entire object to be buffered in (virtual) memory, which should normally be avoided in production code. Args: b: bytes Raw bytes algorithm: str Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``. Returns: Populated PyXB Checksum object.
entailment
def calculate_checksum_on_stream( f, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM, chunk_size=DEFAULT_CHUNK_SIZE, ): """Calculate the checksum of a stream. Args: f: file-like object Only requirement is a ``read()`` method that returns ``bytes``. algorithm: str Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``. chunk_size : int Number of bytes to read from the file and add to the checksum at a time. Returns: str : Checksum as a hexadecimal string, with length decided by the algorithm. """ checksum_calc = get_checksum_calculator_by_dataone_designator(algorithm) while True: chunk = f.read(chunk_size) if not chunk: break checksum_calc.update(chunk) return checksum_calc.hexdigest()
Calculate the checksum of a stream. Args: f: file-like object Only requirement is a ``read()`` method that returns ``bytes``. algorithm: str Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``. chunk_size : int Number of bytes to read from the file and add to the checksum at a time. Returns: str : Checksum as a hexadecimal string, with length decided by the algorithm.
entailment
def calculate_checksum_on_iterator( itr, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM ): """Calculate the checksum of an iterator. Args: itr: iterable Object which supports the iterator protocol. algorithm: str Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``. Returns: str : Checksum as a hexadecimal string, with length decided by the algorithm. """ checksum_calc = get_checksum_calculator_by_dataone_designator(algorithm) for chunk in itr: checksum_calc.update(chunk) return checksum_calc.hexdigest()
Calculate the checksum of an iterator. Args: itr: iterable Object which supports the iterator protocol. algorithm: str Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``. Returns: str : Checksum as a hexadecimal string, with length decided by the algorithm.
entailment
def calculate_checksum_on_bytes( b, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM ): """Calculate the checksum of ``bytes``. Warning: This method requires the entire object to be buffered in (virtual) memory, which should normally be avoided in production code. Args: b: bytes Raw bytes algorithm: str Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``. Returns: str : Checksum as a hexadecimal string, with length decided by the algorithm. """ checksum_calc = get_checksum_calculator_by_dataone_designator(algorithm) checksum_calc.update(b) return checksum_calc.hexdigest()
Calculate the checksum of ``bytes``. Warning: This method requires the entire object to be buffered in (virtual) memory, which should normally be avoided in production code. Args: b: bytes Raw bytes algorithm: str Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``. Returns: str : Checksum as a hexadecimal string, with length decided by the algorithm.
entailment
def are_checksums_equal(checksum_a_pyxb, checksum_b_pyxb): """Determine if checksums are equal. Args: checksum_a_pyxb, checksum_b_pyxb: PyXB Checksum objects to compare. Returns: bool - **True**: The checksums contain the same hexadecimal values calculated with the same algorithm. Identical checksums guarantee (for all practical purposes) that the checksums were calculated from the same sequence of bytes. - **False**: The checksums were calculated with the same algorithm but the hexadecimal values are different. Raises: ValueError The checksums were calculated with different algorithms, hence cannot be compared. """ if checksum_a_pyxb.algorithm != checksum_b_pyxb.algorithm: raise ValueError( 'Cannot compare checksums calculated with different algorithms. ' 'a="{}" b="{}"'.format(checksum_a_pyxb.algorithm, checksum_b_pyxb.algorithm) ) return checksum_a_pyxb.value().lower() == checksum_b_pyxb.value().lower()
Determine if checksums are equal. Args: checksum_a_pyxb, checksum_b_pyxb: PyXB Checksum objects to compare. Returns: bool - **True**: The checksums contain the same hexadecimal values calculated with the same algorithm. Identical checksums guarantee (for all practical purposes) that the checksums were calculated from the same sequence of bytes. - **False**: The checksums were calculated with the same algorithm but the hexadecimal values are different. Raises: ValueError The checksums were calculated with different algorithms, hence cannot be compared.
entailment
def format_checksum(checksum_pyxb): """Create string representation of a PyXB Checksum object. Args: PyXB Checksum object Returns: str : Combined hexadecimal value and algorithm name. """ return '{}/{}'.format( checksum_pyxb.algorithm.upper().replace('-', ''), checksum_pyxb.value().lower() )
Create string representation of a PyXB Checksum object. Args: PyXB Checksum object Returns: str : Combined hexadecimal value and algorithm name.
entailment
def handle(self, *args, **kwargs): """Run the executor listener. This method never returns.""" listener = ExecutorListener(redis_params=getattr(settings, 'FLOW_MANAGER', {}).get('REDIS_CONNECTION', {})) def _killer(signum, frame): """Kill the listener on receipt of a signal.""" listener.terminate() signal(SIGINT, _killer) signal(SIGTERM, _killer) async def _runner(): """Run the listener instance.""" if kwargs['clear_queue']: await listener.clear_queue() async with listener: pass loop = asyncio.new_event_loop() loop.run_until_complete(_runner()) loop.close()
Run the executor listener. This method never returns.
entailment
def apply_subfield_projection(field, value, deep=False): """Apply projection from request context. The passed dictionary may be mutated. :param field: An instance of `Field` or `Serializer` :type field: `Field` or `Serializer` :param value: Dictionary to apply the projection to :type value: dict :param deep: Also process all deep projections :type deep: bool """ # Discover the root manually. We cannot use either `self.root` or `self.context` # due to a bug with incorrect caching (see DRF issue #5087). prefix = [] root = field while root.parent is not None: # Skip anonymous serializers (e.g., intermediate ListSerializers). if root.field_name: prefix.append(root.field_name) root = root.parent prefix = prefix[::-1] context = getattr(root, '_context', {}) # If there is no request, we cannot perform filtering. request = context.get('request') if request is None: return value filtered = set(request.query_params.get('fields', '').split(FIELD_SEPARATOR)) filtered.discard('') if not filtered: # If there are no fields specified in the filter, return all fields. return value # Extract projection for current and deeper levels. current_level = len(prefix) current_projection = [] for item in filtered: item = item.split(FIELD_DEREFERENCE) if len(item) <= current_level: continue if item[:current_level] == prefix: if deep: current_projection.append(item[current_level:]) else: current_projection.append([item[current_level]]) if deep and not current_projection: # For deep projections, an empty projection means that all fields should # be returned without any projection. return value # Apply projection. return apply_projection(current_projection, value)
Apply projection from request context. The passed dictionary may be mutated. :param field: An instance of `Field` or `Serializer` :type field: `Field` or `Serializer` :param value: Dictionary to apply the projection to :type value: dict :param deep: Also process all deep projections :type deep: bool
entailment
def apply_projection(projection, value): """Apply projection.""" if isinstance(value, Sequence): # Apply projection to each item in the list. return [ apply_projection(projection, item) for item in value ] elif not isinstance(value, Mapping): # Non-dictionary values are simply ignored. return value # Extract projection for current level. try: current_projection = [p[0] for p in projection] except IndexError: return value # Apply projection. for name in list(value.keys()): if name not in current_projection: value.pop(name) elif isinstance(value[name], dict): # Apply projection recursively. value[name] = apply_projection( [p[1:] for p in projection if p[0] == name], value[name] ) return value
Apply projection.
entailment
def _create_partitions(self, instance, partitions): """Create partitions.""" for partition in partitions: RelationPartition.objects.create( relation=instance, entity=partition['entity'], label=partition.get('label', None), position=partition.get('position', None), )
Create partitions.
entailment
def create(self, validated_data): """Create ``Relation`` object and add partitions of ``Entities``.""" # `partitions` field is renamed to `relationpartition_set` based on source of nested serializer partitions = validated_data.pop('relationpartition_set') with transaction.atomic(): instance = Relation.objects.create(**validated_data) self._create_partitions(instance, partitions) return instance
Create ``Relation`` object and add partitions of ``Entities``.
entailment
def update(self, instance, validated_data): """Update ``Relation``.""" # `partitions` field is renamed to `relationpartition_set` based on source of nested serializer partitions = validated_data.pop('relationpartition_set', None) with transaction.atomic(): instance = super().update(instance, validated_data) if partitions is not None: # TODO: Apply the diff instead of recreating all objects. instance.relationpartition_set.all().delete() self._create_partitions(instance, partitions) return instance
Update ``Relation``.
entailment
def output(file_like_object, path, verbose=False): """Display or save file like object.""" if not path: for line in file_like_object: if verbose: print_info(line.rstrip()) else: print(line.rstrip()) else: try: object_file = open(os.path.expanduser(path), "w", encoding="utf-8") shutil.copyfileobj(file_like_object, object_file) object_file.close() except EnvironmentError as xxx_todo_changeme: (errno, strerror) = xxx_todo_changeme.args error_line_list = [ "Could not write to object_file: {}".format(path), "I/O error({}): {}".format(errno, strerror), ] error_message = "\n".join(error_line_list) raise d1_cli.impl.exceptions.CLIError(error_message)
Display or save file like object.
entailment
def _print_level(level, msg): """Print the information in Unicode safe manner.""" for l in str(msg.rstrip()).split("\n"): print("{0:<9s}{1}".format(level, str(l)))
Print the information in Unicode safe manner.
entailment
def get_process_definition_start(fname, slug): """Find the first line of process definition. The first line of process definition is the line with a slug. :param str fname: Path to filename with processes :param string slug: process slug :return: line where the process definiton starts :rtype: int """ with open(fname) as file_: for i, line in enumerate(file_): if re.search(r'slug:\s*{}'.format(slug), line): return i + 1 # In case starting line is not found just return first line return 1
Find the first line of process definition. The first line of process definition is the line with a slug. :param str fname: Path to filename with processes :param string slug: process slug :return: line where the process definiton starts :rtype: int
entailment
def get_processes(process_dir, base_source_uri): """Find processes in path. :param str process_dir: Path to the directory where to search for processes :param str base_source_uri: Base URL of the source code repository with process definitions :return: Dictionary of processes where keys are URLs pointing to processes' source code and values are processes' definitions parsed from YAML files :rtype: dict :raises: ValueError: if multiple processes with the same slug are found """ global PROCESS_CACHE # pylint: disable=global-statement if PROCESS_CACHE is not None: return PROCESS_CACHE all_process_files = [] process_file_extensions = ['*.yaml', '*.yml'] for root, _, filenames in os.walk(process_dir): for extension in process_file_extensions: for filename in fnmatch.filter(filenames, extension): all_process_files.append(os.path.join(root, filename)) def read_yaml_file(fname): """Read the yaml file.""" with open(fname) as f: return yaml.load(f, Loader=yaml.FullLoader) processes = [] for process_file in all_process_files: processes_in_file = read_yaml_file(process_file) for process in processes_in_file: # This section finds the line in file where the # defintion of the process starts. (there are # multiple process definition in some files). startline = get_process_definition_start(process_file, process['slug']) # Put together URL to starting line of process definition. process['source_uri'] = base_source_uri + process_file[len(process_dir) + 1:] + '#L' + str(startline) if 'category' not in process: process['category'] = 'uncategorized' processes.append(process) PROCESS_CACHE = processes return processes
Find processes in path. :param str process_dir: Path to the directory where to search for processes :param str base_source_uri: Base URL of the source code repository with process definitions :return: Dictionary of processes where keys are URLs pointing to processes' source code and values are processes' definitions parsed from YAML files :rtype: dict :raises: ValueError: if multiple processes with the same slug are found
entailment
def setup(app): """Register directives. When sphinx loads the extension (= imports the extension module) it also executes the setup() function. Setup is the way extension informs Sphinx about everything that the extension enables: which config_values are introduced, which custom nodes/directives/roles and which events are defined in extension. In this case, only one new directive is created. All used nodes are constructed from already existing nodes in docutils.nodes package. """ app.add_config_value('autoprocess_process_dir', '', 'env') app.add_config_value('autoprocess_source_base_url', '', 'env') app.add_config_value('autoprocess_definitions_uri', '', 'env') app.add_directive('autoprocess', AutoProcessDirective) app.add_directive('autoprocesscategory', AutoProcessCategoryDirective) app.add_directive('autoprocesstype', AutoProcessTypesDirective) # The setup() function can return a dictionary. This is treated by # Sphinx as metadata of the extension: return {'version': '0.2'}
Register directives. When sphinx loads the extension (= imports the extension module) it also executes the setup() function. Setup is the way extension informs Sphinx about everything that the extension enables: which config_values are introduced, which custom nodes/directives/roles and which events are defined in extension. In this case, only one new directive is created. All used nodes are constructed from already existing nodes in docutils.nodes package.
entailment
def make_field(self, field_name, field_body): """Fill content into nodes. :param string field_name: Field name of the field :param field_name: Field body if the field :type field_name: str or instance of docutils.nodes :return: field instance filled with given name and body :rtype: nodes.field """ name = nodes.field_name() name += nodes.Text(field_name) paragraph = nodes.paragraph() if isinstance(field_body, str): # This is the case when field_body is just a string: paragraph += nodes.Text(field_body) else: # This is the case when field_body is a complex node: # useful when constructing nested field lists paragraph += field_body body = nodes.field_body() body += paragraph field = nodes.field() field.extend([name, body]) return field
Fill content into nodes. :param string field_name: Field name of the field :param field_name: Field body if the field :type field_name: str or instance of docutils.nodes :return: field instance filled with given name and body :rtype: nodes.field
entailment
def make_properties_list(self, field): """Fill the ``field`` into a properties list and return it. :param dict field: the content of the property list to make :return: field_list instance filled with given field :rtype: nodes.field_list """ properties_list = nodes.field_list() # changing the order of elements in this list affects # the order in which they are displayed property_names = ['label', 'type', 'description', 'required', 'disabled', 'hidden', 'default', 'placeholder', 'validate_regex', 'choices', 'collapse', 'group'] for name in property_names: if name not in field: continue value = field[name] # Value should be formatted in code-style (=literal) mode if name in ['type', 'default', 'placeholder', 'validate_regex']: literal_node = nodes.literal(str(value), str(value)) properties_list += self.make_field(name, literal_node) # Special formating of ``value`` is needed if name == 'choices' elif name == 'choices': bullet_list = nodes.bullet_list() for choice in value: label = nodes.Text(choice['label'] + ': ') val = nodes.literal(choice['value'], choice['value']) paragraph = nodes.paragraph() paragraph += label paragraph += val list_item = nodes.list_item() list_item += paragraph bullet_list += list_item properties_list += self.make_field(name, bullet_list) else: properties_list += self.make_field(name, str(value)) return properties_list
Fill the ``field`` into a properties list and return it. :param dict field: the content of the property list to make :return: field_list instance filled with given field :rtype: nodes.field_list
entailment
def make_process_header(self, slug, typ, version, source_uri, description, inputs): """Generate a process definition header. :param str slug: process' slug :param str typ: process' type :param str version: process' version :param str source_uri: url to the process definition :param str description: process' description :param dict inputs: process' inputs """ node = addnodes.desc() signode = addnodes.desc_signature(slug, '') node.append(signode) node['objtype'] = node['desctype'] = typ signode += addnodes.desc_annotation(typ, typ, classes=['process-type']) signode += addnodes.desc_addname('', '') signode += addnodes.desc_name(slug + ' ', slug + ' ') paramlist = addnodes.desc_parameterlist() for field_schema, _, _ in iterate_schema({}, inputs, ''): field_type = field_schema['type'] field_name = field_schema['name'] field_default = field_schema.get('default', None) field_default = '' if field_default is None else '={}'.format(field_default) param = addnodes.desc_parameter('', '', noemph=True) param += nodes.emphasis(field_type, field_type, classes=['process-type']) # separate by non-breaking space in the output param += nodes.strong(text='\xa0\xa0' + field_name) paramlist += param signode += paramlist signode += nodes.reference('', nodes.Text('[Source: v{}]'.format(version)), refuri=source_uri, classes=['viewcode-link']) desc = nodes.paragraph() desc += nodes.Text(description, description) return [node, desc]
Generate a process definition header. :param str slug: process' slug :param str typ: process' type :param str version: process' version :param str source_uri: url to the process definition :param str description: process' description :param dict inputs: process' inputs
entailment
def make_process_node(self, process): """Fill the content of process definiton node. :param dict process: process data as given from yaml.load function :return: process node """ name = process['name'] slug = process['slug'] typ = process['type'] version = process['version'] description = process.get('description', '') source_uri = process['source_uri'] inputs = process.get('input', []) outputs = process.get('output', []) # Make process name a section title: section = nodes.section(ids=['process-' + slug]) section += nodes.title(name, name) # Make process header: section += self.make_process_header(slug, typ, version, source_uri, description, inputs) # Make inputs section: container_node = nodes.container(classes=['toggle']) container_header = nodes.paragraph(classes=['header']) container_header += nodes.strong(text='Input arguments') container_node += container_header container_body = nodes.container() for field_schema, _, path in iterate_schema({}, inputs, ''): container_body += nodes.strong(text=path) container_body += self.make_properties_list(field_schema) container_node += container_body section += container_node # Make outputs section: container_node = nodes.container(classes=['toggle']) container_header = nodes.paragraph(classes=['header']) container_header += nodes.strong(text='Output results') container_node += container_header container_body = nodes.container() for field_schema, _, path in iterate_schema({}, outputs, ''): container_body += nodes.strong(text=path) container_body += self.make_properties_list(field_schema) container_node += container_body section += container_node return [section, addnodes.index(entries=[('single', name, 'process-' + slug, '', None)])]
Fill the content of process definiton node. :param dict process: process data as given from yaml.load function :return: process node
entailment
def run(self): """Create a list of process definitions.""" config = self.state.document.settings.env.config # Get all processes: processes = get_processes(config.autoprocess_process_dir, config.autoprocess_source_base_url) process_nodes = [] for process in sorted(processes, key=itemgetter('name')): process_nodes.extend(self.make_process_node(process)) return process_nodes
Create a list of process definitions.
entailment
def run(self): """Create a category tree.""" config = self.state.document.settings.env.config # Group processes by category processes = get_processes(config.autoprocess_process_dir, config.autoprocess_source_base_url) processes.sort(key=itemgetter('category')) categorized_processes = {k: list(g) for k, g in groupby(processes, itemgetter('category'))} # Build category tree category_sections = {'': nodes.container(ids=['categories'])} top_categories = [] for category in sorted(categorized_processes.keys()): category_path = '' for category_node in category.split(':'): parent_category_path = category_path category_path += '{}:'.format(category_node) if category_path in category_sections: continue category_name = category_node.capitalize() section = nodes.section(ids=['category-' + category_node]) section += nodes.title(category_name, category_name) # Add process list category_key = category_path[:-1] if category_key in categorized_processes: listnode = nodes.bullet_list() section += listnode for process in categorized_processes[category_key]: par = nodes.paragraph() node = nodes.reference('', process['name'], internal=True) node['refuri'] = config.autoprocess_definitions_uri + '#process-' + process['slug'] node['reftitle'] = process['name'] par += node listnode += nodes.list_item('', par) category_sections[parent_category_path] += section category_sections[category_path] = section if parent_category_path == '': top_categories.append(section) # Return top sections only return top_categories
Create a category tree.
entailment
def run(self): """Create a type list.""" config = self.state.document.settings.env.config # Group processes by category processes = get_processes(config.autoprocess_process_dir, config.autoprocess_source_base_url) processes.sort(key=itemgetter('type')) processes_by_types = {k: list(g) for k, g in groupby(processes, itemgetter('type'))} listnode = nodes.bullet_list() for typ in sorted(processes_by_types.keys()): par = nodes.paragraph() par += nodes.literal(typ, typ) par += nodes.Text(' - ') processes = sorted(processes_by_types[typ], key=itemgetter('name')) last_process = processes[-1] for process in processes: node = nodes.reference('', process['name'], internal=True) node['refuri'] = config.autoprocess_definitions_uri + '#process-' + process['slug'] node['reftitle'] = process['name'] par += node if process != last_process: par += nodes.Text(', ') listnode += nodes.list_item('', par) return [listnode]
Create a type list.
entailment
async def get(self, file_stream, pid, vendor_specific=None): """MNRead.get() Retrieve the SciObj bytes and write them to a file or other stream. Args: file_stream: Open file-like object Stream to which the SciObj bytes will be written. pid: str vendor_specific: dict Custom HTTP headers to include in the request See also: MNRead.get(). """ async with await self._retry_request( "get", ["object", pid], vendor_specific=vendor_specific ) as response: self._assert_valid_response(response) async for chunk_str, _ in response.content.iter_chunks(): file_stream.write(chunk_str)
MNRead.get() Retrieve the SciObj bytes and write them to a file or other stream. Args: file_stream: Open file-like object Stream to which the SciObj bytes will be written. pid: str vendor_specific: dict Custom HTTP headers to include in the request See also: MNRead.get().
entailment
async def synchronize(self, pid, vendor_specific=None): """Send an object synchronization request to the CN.""" return await self._request_pyxb( "post", ["synchronize", pid], {}, mmp_dict={"pid": pid}, vendor_specific=vendor_specific, )
Send an object synchronization request to the CN.
entailment
def _datetime_to_iso8601(self, query_dict): """Encode any datetime query parameters to ISO8601.""" return { k: v if not isinstance(v, datetime.datetime) else v.isoformat() for k, v in list(query_dict.items()) }
Encode any datetime query parameters to ISO8601.
entailment
def to_representation(self, value): """Project outgoing native value.""" value = apply_subfield_projection(self, value, deep=True) return super().to_representation(value)
Project outgoing native value.
entailment
def synchronizeResponse(self, pid, vendorSpecific=None): """CNRead.synchronize(session, pid) → boolean POST /synchronize. Args: pid: vendorSpecific: """ mmp_dict = {'pid': pid} return self.POST(['synchronize'], fields=mmp_dict, headers=vendorSpecific)
CNRead.synchronize(session, pid) → boolean POST /synchronize. Args: pid: vendorSpecific:
entailment
def synchronize(self, pid, vendorSpecific=None): """See Also: synchronizeResponse() Args: pid: vendorSpecific: Returns: """ response = self.synchronizeResponse(pid, vendorSpecific) return self._read_boolean_response(response)
See Also: synchronizeResponse() Args: pid: vendorSpecific: Returns:
entailment
def handle(self, *args, **options): """Command handle.""" verbosity = int(options['verbosity']) if self.has_filter(options): self.filter_indices(options, verbosity) else: # Process all indices. index_builder.build()
Command handle.
entailment
def validate_bagit_file(bagit_path): """Check if a BagIt file is valid. Raises: ServiceFailure If the BagIt zip archive file fails any of the following checks: - Is a valid zip file. - The tag and manifest files are correctly formatted. - Contains all the files listed in the manifests. - The file checksums match the manifests. """ _assert_zip_file(bagit_path) bagit_zip = zipfile.ZipFile(bagit_path) manifest_info_list = _get_manifest_info_list(bagit_zip) _validate_checksums(bagit_zip, manifest_info_list) return True
Check if a BagIt file is valid. Raises: ServiceFailure If the BagIt zip archive file fails any of the following checks: - Is a valid zip file. - The tag and manifest files are correctly formatted. - Contains all the files listed in the manifests. - The file checksums match the manifests.
entailment
def create_bagit_stream(dir_name, payload_info_list): """Create a stream containing a BagIt zip archive. Args: dir_name : str The name of the root directory in the zip file, under which all the files are placed (avoids "zip bombs"). payload_info_list: list List of payload_info_dict, each dict describing a file. - keys: pid, filename, iter, checksum, checksum_algorithm - If the filename is None, the pid is used for the filename. """ zip_file = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED) _add_path(dir_name, payload_info_list) payload_byte_count, payload_file_count = _add_payload_files( zip_file, payload_info_list ) tag_info_list = _add_tag_files( zip_file, dir_name, payload_info_list, payload_byte_count, payload_file_count ) _add_manifest_files(zip_file, dir_name, payload_info_list, tag_info_list) _add_tag_manifest_file(zip_file, dir_name, tag_info_list) return zip_file
Create a stream containing a BagIt zip archive. Args: dir_name : str The name of the root directory in the zip file, under which all the files are placed (avoids "zip bombs"). payload_info_list: list List of payload_info_dict, each dict describing a file. - keys: pid, filename, iter, checksum, checksum_algorithm - If the filename is None, the pid is used for the filename.
entailment
def _add_path(dir_name, payload_info_list): """Add a key with the path to each payload_info_dict.""" for payload_info_dict in payload_info_list: file_name = payload_info_dict['filename'] or payload_info_dict['pid'] payload_info_dict['path'] = d1_common.utils.filesystem.gen_safe_path( dir_name, 'data', file_name )
Add a key with the path to each payload_info_dict.
entailment
def _add_payload_files(zip_file, payload_info_list): """Add the payload files to the zip.""" payload_byte_count = 0 payload_file_count = 0 for payload_info_dict in payload_info_list: zip_file.write_iter(payload_info_dict['path'], payload_info_dict['iter']) payload_byte_count += payload_info_dict['iter'].size payload_file_count += 1 return payload_byte_count, payload_file_count
Add the payload files to the zip.
entailment
def _add_tag_files( zip_file, dir_name, payload_info_list, payload_byte_count, payload_file_count ): """Generate the tag files and add them to the zip.""" tag_info_list = [] _add_tag_file(zip_file, dir_name, tag_info_list, _gen_bagit_text_file_tup()) _add_tag_file( zip_file, dir_name, tag_info_list, _gen_bag_info_file_tup(payload_byte_count, payload_file_count), ) _add_tag_file( zip_file, dir_name, tag_info_list, _gen_pid_mapping_file_tup(payload_info_list) ) return tag_info_list
Generate the tag files and add them to the zip.
entailment
def _add_manifest_files(zip_file, dir_name, payload_info_list, tag_info_list): """Generate the manifest files and add them to the zip.""" for checksum_algorithm in _get_checksum_algorithm_set(payload_info_list): _add_tag_file( zip_file, dir_name, tag_info_list, _gen_manifest_file_tup(payload_info_list, checksum_algorithm), )
Generate the manifest files and add them to the zip.
entailment
def _add_tag_manifest_file(zip_file, dir_name, tag_info_list): """Generate the tag manifest file and add it to the zip.""" _add_tag_file( zip_file, dir_name, tag_info_list, _gen_tag_manifest_file_tup(tag_info_list) )
Generate the tag manifest file and add it to the zip.
entailment
def _add_tag_file(zip_file, dir_name, tag_info_list, tag_tup): """Add a tag file to zip_file and record info for the tag manifest file.""" tag_name, tag_str = tag_tup tag_path = d1_common.utils.filesystem.gen_safe_path(dir_name, tag_name) tag_iter = _create_and_add_tag_iter(zip_file, tag_path, tag_str) tag_info_list.append( { 'path': tag_path, 'checksum': d1_common.checksum.calculate_checksum_on_iterator( tag_iter, TAG_CHECKSUM_ALGO ), } )
Add a tag file to zip_file and record info for the tag manifest file.
entailment
def handle(self, *args, **options): """Command handle.""" verbosity = int(options['verbosity']) skip_mapping = options['skip_mapping'] if self.has_filter(options): self.filter_indices(options, verbosity, skip_mapping=skip_mapping) else: # Process all indices. index_builder.delete(skip_mapping=skip_mapping)
Command handle.
entailment
def pyxb_to_dict(node_list_pyxb): """ Returns: dict : Representation of ``node_list_pyxb``, keyed on the Node identifier (``urn:node:*``). Example:: { u'urn:node:ARCTIC': { 'base_url': u'https://arcticdata.io/metacat/d1/mn', 'description': u'The US National Science Foundation...', 'name': u'Arctic Data Center', 'ping': None, 'replicate': 0, 'state': u'up', 'synchronize': 1, 'type': u'mn' }, u'urn:node:BCODMO': { 'base_url': u'https://www.bco-dmo.org/d1/mn', 'description': u'Biological and Chemical Oceanography Data...', 'name': u'Biological and Chemical Oceanography Data...', 'ping': None, 'replicate': 0, 'state': u'up', 'synchronize': 1, 'type': u'mn' }, } """ f_dict = {} for f_pyxb in sorted(node_list_pyxb.node, key=lambda x: x.identifier.value()): f_dict[f_pyxb.identifier.value()] = { 'name': f_pyxb.name, 'description': f_pyxb.description, 'base_url': f_pyxb.baseURL, 'ping': f_pyxb.ping, 'replicate': f_pyxb.replicate, 'synchronize': f_pyxb.synchronize, 'type': f_pyxb.type, 'state': f_pyxb.state, } # TODO: # f_pyxb.services # f_pyxb.synchronization # f_pyxb.subject # f_pyxb.contactSubject # f_pyxb.nodeReplicationPolicy, return f_dict
Returns: dict : Representation of ``node_list_pyxb``, keyed on the Node identifier (``urn:node:*``). Example:: { u'urn:node:ARCTIC': { 'base_url': u'https://arcticdata.io/metacat/d1/mn', 'description': u'The US National Science Foundation...', 'name': u'Arctic Data Center', 'ping': None, 'replicate': 0, 'state': u'up', 'synchronize': 1, 'type': u'mn' }, u'urn:node:BCODMO': { 'base_url': u'https://www.bco-dmo.org/d1/mn', 'description': u'Biological and Chemical Oceanography Data...', 'name': u'Biological and Chemical Oceanography Data...', 'ping': None, 'replicate': 0, 'state': u'up', 'synchronize': 1, 'type': u'mn' }, }
entailment
def filter_queryset(self, request, queryset, view): """Filter permissions queryset.""" user = request.user app_label = queryset.model._meta.app_label # pylint: disable=protected-access model_name = queryset.model._meta.model_name # pylint: disable=protected-access kwargs = {} if model_name == 'storage': model_name = 'data' kwargs['perms_filter'] = 'data__pk__in' if model_name == 'relation': model_name = 'collection' kwargs['perms_filter'] = 'collection__pk__in' permission = '{}.view_{}'.format(app_label, model_name) return get_objects_for_user(user, permission, queryset, **kwargs)
Filter permissions queryset.
entailment