sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def has_object_permission(self, request, view, obj): """Check object permissions.""" # admins can do anything if request.user.is_superuser: return True # `share` permission is required for editing permissions if 'permissions' in view.action: self.perms_map['POST'] = ['%(app_label)s.share_%(model_name)s'] if view.action in ['add_data', 'remove_data']: self.perms_map['POST'] = ['%(app_label)s.add_%(model_name)s'] if hasattr(view, 'get_queryset'): queryset = view.get_queryset() else: queryset = getattr(view, 'queryset', None) assert queryset is not None, ( 'Cannot apply DjangoObjectPermissions on a view that ' 'does not set `.queryset` or have a `.get_queryset()` method.' ) model_cls = queryset.model user = request.user perms = self.get_required_object_permissions(request.method, model_cls) if not user.has_perms(perms, obj) and not AnonymousUser().has_perms(perms, obj): # If the user does not have permissions we need to determine if # they have read permissions to see 403, or not, and simply see # a 404 response. if request.method in permissions.SAFE_METHODS: # Read permissions already checked and failed, no need # to make another lookup. raise Http404 read_perms = self.get_required_object_permissions('GET', model_cls) if not user.has_perms(read_perms, obj): raise Http404 # Has read permissions. return False return True
Check object permissions.
entailment
def get_active_subject_set(self, request): """Get a set containing all subjects for which the current connection has been successfully authenticated.""" # Handle complete certificate in vendor specific extension. if django.conf.settings.DEBUG_GMN: if 'HTTP_VENDOR_INCLUDE_CERTIFICATE' in request.META: request.META[ 'SSL_CLIENT_CERT' ] = self.pem_in_http_header_to_pem_in_string( request.META['HTTP_VENDOR_INCLUDE_CERTIFICATE'] ) # Add subjects from any provided certificate and JWT and store them in # the Django request obj. cert_primary_str, cert_equivalent_set = d1_gmn.app.middleware.session_cert.get_subjects( request ) jwt_subject_list = d1_gmn.app.middleware.session_jwt.validate_jwt_and_get_subject_list( request ) primary_subject_str = cert_primary_str all_subjects_set = ( cert_equivalent_set | {cert_primary_str} | set(jwt_subject_list) ) if len(jwt_subject_list) == 1: jwt_primary_str = jwt_subject_list[0] if jwt_primary_str != cert_primary_str: if cert_primary_str == d1_common.const.SUBJECT_PUBLIC: primary_subject_str = jwt_primary_str else: logging.warning( 'Both a certificate and a JWT were provided and the primary ' 'subjects differ. Using the certificate for primary subject and' 'the JWT as equivalent.' ) logging.info('Primary active subject: {}'.format(primary_subject_str)) logging.info( 'All active subjects: {}'.format(', '.join(sorted(all_subjects_set))) ) # Handle list of subjects in vendor specific extension: if django.conf.settings.DEBUG_GMN: # This is added to any subjects obtained from cert and/or JWT. if 'HTTP_VENDOR_INCLUDE_SUBJECTS' in request.META: request.all_subjects_set.update( request.META['HTTP_VENDOR_INCLUDE_SUBJECTS'].split('\t') ) return primary_subject_str, all_subjects_set
Get a set containing all subjects for which the current connection has been successfully authenticated.
entailment
def log_setup(debug_bool): """Set up logging. We output only to stdout. Instead of also writing to a log file, redirect stdout to a log file when the script is executed from cron. """ level = logging.DEBUG if debug_bool else logging.INFO logging.config.dictConfig( { "version": 1, "disable_existing_loggers": False, "formatters": { "verbose": { "format": "%(asctime)s %(levelname)-8s %(name)s %(module)s " "%(process)d %(thread)d %(message)s", "datefmt": "%Y-%m-%d %H:%M:%S", } }, "handlers": { "console": { "class": "logging.StreamHandler", "formatter": "verbose", "level": level, "stream": "ext://sys.stdout", } }, "loggers": { "": { "handlers": ["console"], "level": level, "class": "logging.StreamHandler", } }, } )
Set up logging. We output only to stdout. Instead of also writing to a log file, redirect stdout to a log file when the script is executed from cron.
entailment
def connect(self, dsn): """Connect to DB. dbname: the database name user: user name used to authenticate password: password used to authenticate host: database host address (defaults to UNIX socket if not provided) port: connection port number (defaults to 5432 if not provided) """ self.con = psycopg2.connect(dsn) self.cur = self.con.cursor(cursor_factory=psycopg2.extras.DictCursor) # autocommit: Disable automatic transactions self.con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
Connect to DB. dbname: the database name user: user name used to authenticate password: password used to authenticate host: database host address (defaults to UNIX socket if not provided) port: connection port number (defaults to 5432 if not provided)
entailment
def get_query_param(self, key, default=None): """Get query parameter uniformly for GET and POST requests.""" value = self.request.query_params.get(key, None) if value is None: value = self.request.data.get(key, None) if value is None: value = default return value
Get query parameter uniformly for GET and POST requests.
entailment
def get_query_params(self): """Get combined query parameters (GET and POST).""" params = self.request.query_params.copy() params.update(self.request.data) return params
Get combined query parameters (GET and POST).
entailment
def order_search(self, search): """Order given search by the ordering parameter given in request. :param search: ElasticSearch query object """ ordering = self.get_query_param('ordering', self.ordering) if not ordering: return search sort_fields = [] for raw_ordering in ordering.split(','): ordering_field = raw_ordering.lstrip('-') if ordering_field not in self.ordering_fields: raise ParseError('Ordering by `{}` is not supported.'.format(ordering_field)) ordering_field = self.ordering_map.get(ordering_field, ordering_field) direction = '-' if raw_ordering[0] == '-' else '' sort_fields.append('{}{}'.format(direction, ordering_field)) return search.sort(*sort_fields)
Order given search by the ordering parameter given in request. :param search: ElasticSearch query object
entailment
def filter_search(self, search): """Filter given search by the filter parameter given in request. :param search: ElasticSearch query object """ builder = QueryBuilder( self.filtering_fields, self.filtering_map, self ) search, unmatched = builder.build(search, self.get_query_params()) # Ensure that no unsupported arguments were used. for argument in self.get_always_allowed_arguments(): unmatched.pop(argument, None) if unmatched: msg = 'Unsupported parameter(s): {}. Please use a combination of: {}.'.format( ', '.join(unmatched), ', '.join(self.filtering_fields), ) raise ParseError(msg) return search
Filter given search by the filter parameter given in request. :param search: ElasticSearch query object
entailment
def filter_permissions(self, search): """Filter given query based on permissions of the user in the request. :param search: ElasticSearch query object """ user = self.request.user if user.is_superuser: return search if user.is_anonymous: user = get_anonymous_user() filters = [Q('match', users_with_permissions=user.pk)] filters.extend([ Q('match', groups_with_permissions=group.pk) for group in user.groups.all() ]) filters.append(Q('match', public_permission=True)) # `minimum_should_match` is set to 1 by default return search.query('bool', should=filters)
Filter given query based on permissions of the user in the request. :param search: ElasticSearch query object
entailment
def paginate_response(self, queryset, serializers_kwargs={}): """Optionally return paginated response. If pagination parameters are provided in the request, then paginated response is returned, otherwise response is not paginated. """ page = self.paginate_queryset(queryset) if page is not None: serializer = self.get_serializer(page, many=True, **serializers_kwargs) return self.get_paginated_response(serializer.data) serializer = self.get_serializer(queryset, many=True, **serializers_kwargs) return Response(serializer.data)
Optionally return paginated response. If pagination parameters are provided in the request, then paginated response is returned, otherwise response is not paginated.
entailment
def search(self): """Handle the search request.""" search = self.document_class().search() # pylint: disable=not-callable search = self.custom_filter(search) search = self.filter_search(search) search = self.order_search(search) search = self.filter_permissions(search) if search.count() > ELASTICSEARCH_SIZE: limit = self.paginator.get_limit(self.request) if not limit or limit > ELASTICSEARCH_SIZE: raise TooManyResults() search = search.extra(size=ELASTICSEARCH_SIZE) return search
Handle the search request.
entailment
def list_with_post(self, request): """Endpoint handler.""" if self.is_search_request(): search = self.search() page = self.paginate_queryset(search) if page is None: items = search else: items = page try: primary_keys = [] order_map_cases = [] for order, item in enumerate(items): pk = item[self.primary_key_field] primary_keys.append(pk) order_map_cases.append(When(pk=pk, then=Value(order))) queryset = self.get_queryset().filter( pk__in=primary_keys ).order_by( Case(*order_map_cases, output_field=IntegerField()).asc() ) except KeyError: raise KeyError("Combined viewset requires that your index contains a field with " "the primary key. By default this field is called 'id', but you " "can change it by setting primary_key_field.") # Pagination must be handled differently. serializer = self.get_serializer(queryset, many=True) if page is not None: return self.get_paginated_response(serializer.data) return Response(serializer.data) else: queryset = self.filter_queryset(self.get_queryset()) return self.paginate_response(queryset)
Endpoint handler.
entailment
def getattr(self, path, fh): """Called by FUSE when the attributes for a file or directory are required. Returns a dictionary with keys identical to the stat C structure of stat(2). st_atime, st_mtime and st_ctime should be floats. On OSX, st_nlink should count all files inside the directory. On Linux, only the subdirectories are counted. The 'st_dev' and 'st_blksize' fields are ignored. The 'st_ino' field is ignored except if the 'use_ino' mount option is given. This method gets very heavy traffic. """ self._raise_error_if_os_special_file(path) # log.debug(u'getattr(): {0}'.format(path)) attribute = self._get_attributes_through_cache(path) # log.debug('getattr() returned attribute: {0}'.format(attribute)) return self._stat_from_attributes(attribute)
Called by FUSE when the attributes for a file or directory are required. Returns a dictionary with keys identical to the stat C structure of stat(2). st_atime, st_mtime and st_ctime should be floats. On OSX, st_nlink should count all files inside the directory. On Linux, only the subdirectories are counted. The 'st_dev' and 'st_blksize' fields are ignored. The 'st_ino' field is ignored except if the 'use_ino' mount option is given. This method gets very heavy traffic.
entailment
def readdir(self, path, fh): """Called by FUSE when a directory is opened. Returns a list of file and directory names for the directory. """ log.debug('readdir(): {}'.format(path)) try: dir = self._directory_cache[path] except KeyError: dir = self._get_directory(path) self._directory_cache[path] = dir return dir
Called by FUSE when a directory is opened. Returns a list of file and directory names for the directory.
entailment
def open(self, path, flags): """Called by FUSE when a file is opened. Determines if the provided path and open flags are valid. """ log.debug('open(): {}'.format(path)) # ONEDrive is currently read only. Anything but read access is denied. if (flags & self._READ_ONLY_ACCESS_MODE) != os.O_RDONLY: self._raise_error_permission_denied(path) # Any file in the filesystem can be opened. attribute = self._get_attributes_through_cache(path) return attribute.is_dir()
Called by FUSE when a file is opened. Determines if the provided path and open flags are valid.
entailment
def get_inline_expression(self, text): """Extract an inline expression from the given text.""" text = text.strip() if not text.startswith(self.inline_tags[0]) or not text.endswith(self.inline_tags[1]): return return text[2:-2]
Extract an inline expression from the given text.
entailment
def _filter_queryset(self, queryset): """Filter queryset by entity, label and position. Due to a bug in django-filter these filters have to be applied manually: https://github.com/carltongibson/django-filter/issues/883 """ entities = self.request.query_params.getlist('entity') labels = self.request.query_params.getlist('label') positions = self.request.query_params.getlist('position') if labels and len(labels) != len(entities): raise exceptions.ParseError( 'If `labels` query parameter is given, also `entities` ' 'must be given and they must be of the same length.' ) if positions and len(positions) != len(entities): raise exceptions.ParseError( 'If `positions` query parameter is given, also `entities` ' 'must be given and they must be of the same length.' ) if entities: for entity, label, position in zip_longest(entities, labels, positions): filter_params = {'entities__pk': entity} if label: filter_params['relationpartition__label'] = label if position: filter_params['relationpartition__position'] = position queryset = queryset.filter(**filter_params) return queryset
Filter queryset by entity, label and position. Due to a bug in django-filter these filters have to be applied manually: https://github.com/carltongibson/django-filter/issues/883
entailment
def update(self, request, *args, **kwargs): """Update the ``Relation`` object. Reject the update if user doesn't have ``EDIT`` permission on the collection referenced in the ``Relation``. """ instance = self.get_object() if (not request.user.has_perm('edit_collection', instance.collection) and not request.user.is_superuser): return Response(status=status.HTTP_401_UNAUTHORIZED) return super().update(request, *args, **kwargs)
Update the ``Relation`` object. Reject the update if user doesn't have ``EDIT`` permission on the collection referenced in the ``Relation``.
entailment
def _process_permission(perm): """Rebuild indexes affected by the given permission.""" # XXX: Optimize: rebuild only permissions, not whole document codename = perm.permission.codename if not codename.startswith('view') and not codename.startswith('owner'): return index_builder.build(perm.content_object)
Rebuild indexes affected by the given permission.
entailment
def parse_response(response, encoding='utf-8'): """Parse a multipart Requests.Response into a tuple of BodyPart objects. Args: response: Requests.Response encoding: The parser will assume that any text in the HTML body is encoded with this encoding when decoding it for use in the ``text`` attribute. Returns: tuple of BodyPart Members: headers (CaseInsensitiveDict), content (bytes), text (Unicode), encoding (str). """ return requests_toolbelt.multipart.decoder.MultipartDecoder.from_response( response, encoding ).parts
Parse a multipart Requests.Response into a tuple of BodyPart objects. Args: response: Requests.Response encoding: The parser will assume that any text in the HTML body is encoded with this encoding when decoding it for use in the ``text`` attribute. Returns: tuple of BodyPart Members: headers (CaseInsensitiveDict), content (bytes), text (Unicode), encoding (str).
entailment
def parse_str(mmp_bytes, content_type, encoding='utf-8'): """Parse multipart document bytes into a tuple of BodyPart objects. Args: mmp_bytes: bytes Multipart document. content_type : str Must be on the form, ``multipart/form-data; boundary=<BOUNDARY>``, where ``<BOUNDARY>`` is the string that separates the parts of the multipart document in ``mmp_bytes``. In HTTP requests and responses, it is passed in the Content-Type header. encoding : str The coding used for the text in the HTML body. Returns: tuple of BodyPart Members: headers (CaseInsensitiveDict), content (bytes), text (Unicode), encoding (str). """ return requests_toolbelt.multipart.decoder.MultipartDecoder( mmp_bytes, content_type, encoding ).parts
Parse multipart document bytes into a tuple of BodyPart objects. Args: mmp_bytes: bytes Multipart document. content_type : str Must be on the form, ``multipart/form-data; boundary=<BOUNDARY>``, where ``<BOUNDARY>`` is the string that separates the parts of the multipart document in ``mmp_bytes``. In HTTP requests and responses, it is passed in the Content-Type header. encoding : str The coding used for the text in the HTML body. Returns: tuple of BodyPart Members: headers (CaseInsensitiveDict), content (bytes), text (Unicode), encoding (str).
entailment
def normalize(body_part_tup,): """Normalize a tuple of BodyPart objects to a string. Normalization is done by sorting the body_parts by the Content- Disposition headers, which is typically on the form, ``form-data; name="name_of_part``. """ return '\n\n'.join( [ '{}\n\n{}'.format( str(p.headers[b'Content-Disposition'], p.encoding), p.text ) for p in sorted( body_part_tup, key=lambda p: p.headers[b'Content-Disposition'] ) ] )
Normalize a tuple of BodyPart objects to a string. Normalization is done by sorting the body_parts by the Content- Disposition headers, which is typically on the form, ``form-data; name="name_of_part``.
entailment
def is_multipart(header_dict): """ Args: header_dict : CaseInsensitiveDict Returns: bool: ``True`` if ``header_dict`` has a Content-Type key (case insensitive) with value that begins with 'multipart'. """ return ( {k.lower(): v for k, v in header_dict.items()} .get('content-type', '') .startswith('multipart') )
Args: header_dict : CaseInsensitiveDict Returns: bool: ``True`` if ``header_dict`` has a Content-Type key (case insensitive) with value that begins with 'multipart'.
entailment
def _split_path_by_reserved_name(self, path): """Return: object_tree_path, resolver, controlled_path.""" for i, e in enumerate(path): if e in self._resolvers or e == self._get_readme_filename(): return path[:i], path[i], path[i + 1 :] raise d1_onedrive.impl.onedrive_exceptions.PathException( 'Invalid folder: %s' % str(path) )
Return: object_tree_path, resolver, controlled_path.
entailment
def _generate_readme_text(self, object_tree_path): """Generate a human readable description of the folder in text format.""" wdef_folder = self._object_tree.get_source_tree_folder(object_tree_path) res = StringIO() if len(object_tree_path): folder_name = object_tree_path[-1] else: folder_name = 'root' header = 'ObjectTree Folder "{}"'.format(folder_name) res.write(header + '\n') res.write('{}\n\n'.format('=' * len(header))) res.write( 'The content present in object_tree folders is determined by a list\n' ) res.write( 'of specific identifiers and by queries applied against the DataONE\n' ) res.write('search index.\n\n') res.write('Queries:\n\n') if len(wdef_folder['queries']): for query in wdef_folder['queries']: res.write('- {}\n'.format(query)) else: res.write('No queries specified at this level.\n') res.write('\n\n') res.write('Identifiers:\n\n') if len(wdef_folder['identifiers']): for pid in wdef_folder['identifiers']: res.write('- {}\n'.format(pid)) else: res.write('No individual identifiers selected at this level.\n') res.write('\n\n') res.write('Sub-folders:\n\n') if len(wdef_folder['collections']): for f in wdef_folder['collections']: res.write('- {}\n'.format(f)) else: res.write('No object_tree sub-folders are specified at this level.\n') return res.getvalue().encode('utf-8')
Generate a human readable description of the folder in text format.
entailment
def _serialize_data(self, data): """Return serialized data or list of ids, depending on `hydrate_data` query param.""" if self.request and self.request.query_params.get('hydrate_data', False): serializer = DataSerializer(data, many=True, read_only=True) serializer.bind('data', self) return serializer.data else: return [d.id for d in data]
Return serialized data or list of ids, depending on `hydrate_data` query param.
entailment
def _filter_queryset(self, perms, queryset): """Filter object objects by permissions of user in request.""" user = self.request.user if self.request else AnonymousUser() return get_objects_for_user(user, perms, queryset)
Filter object objects by permissions of user in request.
entailment
def get_data(self, collection): """Return serialized list of data objects on collection that user has `view` permission on.""" data = self._filter_queryset('view_data', collection.data.all()) return self._serialize_data(data)
Return serialized list of data objects on collection that user has `view` permission on.
entailment
def get_fields(self): """Dynamically adapt fields based on the current request.""" fields = super(CollectionSerializer, self).get_fields() if self.request.method == "GET": fields['data'] = serializers.SerializerMethodField() else: fields['data'] = serializers.PrimaryKeyRelatedField(many=True, read_only=True) return fields
Dynamically adapt fields based on the current request.
entailment
def load_engines(manager, class_name, base_module, engines, class_key='ENGINE', engine_type='engine'): """Load engines.""" loaded_engines = {} for module_name_or_dict in engines: if not isinstance(module_name_or_dict, dict): module_name_or_dict = { class_key: module_name_or_dict } try: module_name = module_name_or_dict[class_key] engine_settings = module_name_or_dict except KeyError: raise ImproperlyConfigured("If {} specification is a dictionary, it must define {}".format( engine_type, class_key)) try: engine_module = import_module(module_name) try: engine = getattr(engine_module, class_name)(manager=manager, settings=engine_settings) if not isinstance(engine, BaseEngine): raise ImproperlyConfigured("{} module {} class {} must extend BaseEngine".format( engine_type.capitalize(), module_name, class_name)) except AttributeError: raise ImproperlyConfigured("{} module {} is missing a {} class".format( engine_type.capitalize(), module_name, class_name)) if engine.get_name() in loaded_engines: raise ImproperlyConfigured("Duplicated {} {}".format(engine_type, engine.get_name())) loaded_engines[engine.get_name()] = engine except ImportError as ex: # The engine wasn't found. Display a helpful error message listing all possible # (built-in) engines. engine_dir = os.path.join(os.path.dirname(upath(__file__)), base_module) try: builtin_engines = [name for _, name, _ in pkgutil.iter_modules([engine_dir])] except EnvironmentError: builtin_engines = [] if module_name not in ['resolwe.flow.{}.{}'.format(base_module, builtin_engine) for builtin_engine in builtin_engines]: engine_reprs = map(repr, sorted(builtin_engines)) error_msg = ("{} isn't an available dataflow {}.\n" "Try using 'resolwe.flow.{}.XXX', where XXX is one of:\n" " {}\n" "Error was: {}".format( module_name, engine_type, base_module, ", ".join(engine_reprs), ex )) raise ImproperlyConfigured(error_msg) else: # If there's some other error, this must be an error in Django raise return loaded_engines
Load engines.
entailment
def is_sysmeta_pyxb(sysmeta_pyxb): """Args: sysmeta_pyxb: Object that may or may not be a SystemMetadata PyXB object. Returns: bool: - ``True`` if ``sysmeta_pyxb`` is a SystemMetadata PyXB object. - ``False`` if ``sysmeta_pyxb`` is not a PyXB object or is a PyXB object of a type other than SystemMetadata. """ return ( d1_common.type_conversions.is_pyxb_d1_type(sysmeta_pyxb) and d1_common.type_conversions.pyxb_get_type_name(sysmeta_pyxb) == 'SystemMetadata' )
Args: sysmeta_pyxb: Object that may or may not be a SystemMetadata PyXB object. Returns: bool: - ``True`` if ``sysmeta_pyxb`` is a SystemMetadata PyXB object. - ``False`` if ``sysmeta_pyxb`` is not a PyXB object or is a PyXB object of a type other than SystemMetadata.
entailment
def normalize_in_place(sysmeta_pyxb, reset_timestamps=False): """Normalize SystemMetadata PyXB object in-place. Args: sysmeta_pyxb: SystemMetadata PyXB object to normalize. reset_timestamps: bool ``True``: Timestamps in the SystemMetadata are set to a standard value so that objects that are compared after normalization register as equivalent if only their timestamps differ. Notes: The SystemMetadata is normalized by removing any redundant information and ordering all sections where there are no semantics associated with the order. The normalized SystemMetadata is intended to be semantically equivalent to the un-normalized one. """ if sysmeta_pyxb.accessPolicy is not None: sysmeta_pyxb.accessPolicy = d1_common.wrap.access_policy.get_normalized_pyxb( sysmeta_pyxb.accessPolicy ) if getattr(sysmeta_pyxb, 'mediaType', False): d1_common.xml.sort_value_list_pyxb(sysmeta_pyxb.mediaType.property_) if getattr(sysmeta_pyxb, 'replicationPolicy', False): d1_common.xml.sort_value_list_pyxb( sysmeta_pyxb.replicationPolicy.preferredMemberNode ) d1_common.xml.sort_value_list_pyxb( sysmeta_pyxb.replicationPolicy.blockedMemberNode ) d1_common.xml.sort_elements_by_child_values( sysmeta_pyxb.replica, ['replicaVerified', 'replicaMemberNode', 'replicationStatus'], ) sysmeta_pyxb.archived = bool(sysmeta_pyxb.archived) if reset_timestamps: epoch_dt = datetime.datetime(1970, 1, 1, tzinfo=d1_common.date_time.UTC()) sysmeta_pyxb.dateUploaded = epoch_dt sysmeta_pyxb.dateSysMetadataModified = epoch_dt for replica_pyxb in getattr(sysmeta_pyxb, 'replica', []): replica_pyxb.replicaVerified = epoch_dt else: sysmeta_pyxb.dateUploaded = d1_common.date_time.round_to_nearest( sysmeta_pyxb.dateUploaded ) sysmeta_pyxb.dateSysMetadataModified = d1_common.date_time.round_to_nearest( sysmeta_pyxb.dateSysMetadataModified ) for replica_pyxb in getattr(sysmeta_pyxb, 'replica', []): replica_pyxb.replicaVerified = d1_common.date_time.round_to_nearest( replica_pyxb.replicaVerified )
Normalize SystemMetadata PyXB object in-place. Args: sysmeta_pyxb: SystemMetadata PyXB object to normalize. reset_timestamps: bool ``True``: Timestamps in the SystemMetadata are set to a standard value so that objects that are compared after normalization register as equivalent if only their timestamps differ. Notes: The SystemMetadata is normalized by removing any redundant information and ordering all sections where there are no semantics associated with the order. The normalized SystemMetadata is intended to be semantically equivalent to the un-normalized one.
entailment
def are_equivalent_pyxb(a_pyxb, b_pyxb, ignore_timestamps=False): """Determine if SystemMetadata PyXB objects are semantically equivalent. Normalize then compare SystemMetadata PyXB objects for equivalency. Args: a_pyxb, b_pyxb : SystemMetadata PyXB objects to compare reset_timestamps: bool ``True``: Timestamps in the SystemMetadata are set to a standard value so that objects that are compared after normalization register as equivalent if only their timestamps differ. Returns: bool: **True** if SystemMetadata PyXB objects are semantically equivalent. Notes: The SystemMetadata is normalized by removing any redundant information and ordering all sections where there are no semantics associated with the order. The normalized SystemMetadata is intended to be semantically equivalent to the un-normalized one. """ normalize_in_place(a_pyxb, ignore_timestamps) normalize_in_place(b_pyxb, ignore_timestamps) a_xml = d1_common.xml.serialize_to_xml_str(a_pyxb) b_xml = d1_common.xml.serialize_to_xml_str(b_pyxb) are_equivalent = d1_common.xml.are_equivalent(a_xml, b_xml) if not are_equivalent: logger.debug('XML documents not equivalent:') logger.debug(d1_common.xml.format_diff_xml(a_xml, b_xml)) return are_equivalent
Determine if SystemMetadata PyXB objects are semantically equivalent. Normalize then compare SystemMetadata PyXB objects for equivalency. Args: a_pyxb, b_pyxb : SystemMetadata PyXB objects to compare reset_timestamps: bool ``True``: Timestamps in the SystemMetadata are set to a standard value so that objects that are compared after normalization register as equivalent if only their timestamps differ. Returns: bool: **True** if SystemMetadata PyXB objects are semantically equivalent. Notes: The SystemMetadata is normalized by removing any redundant information and ordering all sections where there are no semantics associated with the order. The normalized SystemMetadata is intended to be semantically equivalent to the un-normalized one.
entailment
def are_equivalent_xml(a_xml, b_xml, ignore_timestamps=False): """Determine if two SystemMetadata XML docs are semantically equivalent. Normalize then compare SystemMetadata XML docs for equivalency. Args: a_xml, b_xml: bytes UTF-8 encoded SystemMetadata XML docs to compare ignore_timestamps: bool ``True``: Timestamps in the SystemMetadata are ignored so that objects that are compared register as equivalent if only their timestamps differ. Returns: bool: **True** if SystemMetadata XML docs are semantically equivalent. Notes: The SystemMetadata is normalized by removing any redundant information and ordering all sections where there are no semantics associated with the order. The normalized SystemMetadata is intended to be semantically equivalent to the un-normalized one. """ """Normalizes then compares SystemMetadata XML docs for equivalency. ``a_xml`` and ``b_xml`` should be utf-8 encoded DataONE System Metadata XML documents. """ return are_equivalent_pyxb( d1_common.xml.deserialize(a_xml), d1_common.xml.deserialize(b_xml), ignore_timestamps, )
Determine if two SystemMetadata XML docs are semantically equivalent. Normalize then compare SystemMetadata XML docs for equivalency. Args: a_xml, b_xml: bytes UTF-8 encoded SystemMetadata XML docs to compare ignore_timestamps: bool ``True``: Timestamps in the SystemMetadata are ignored so that objects that are compared register as equivalent if only their timestamps differ. Returns: bool: **True** if SystemMetadata XML docs are semantically equivalent. Notes: The SystemMetadata is normalized by removing any redundant information and ordering all sections where there are no semantics associated with the order. The normalized SystemMetadata is intended to be semantically equivalent to the un-normalized one.
entailment
def clear_elements(sysmeta_pyxb, clear_replica=True, clear_serial_version=True): """{clear_replica} causes any replica information to be removed from the object. {clear_replica} ignores any differences in replica information, as this information is often different between MN and CN. """ if clear_replica: sysmeta_pyxb.replica = None if clear_serial_version: sysmeta_pyxb.serialVersion = None sysmeta_pyxb.replicationPolicy = None
{clear_replica} causes any replica information to be removed from the object. {clear_replica} ignores any differences in replica information, as this information is often different between MN and CN.
entailment
def update_elements(dst_pyxb, src_pyxb, el_list): """Copy elements specified in ``el_list`` from ``src_pyxb`` to ``dst_pyxb`` Only elements that are children of root are supported. See SYSMETA_ROOT_CHILD_LIST. If an element in ``el_list`` does not exist in ``src_pyxb``, it is removed from ``dst_pyxb``. """ invalid_element_set = set(el_list) - set(SYSMETA_ROOT_CHILD_LIST) if invalid_element_set: raise ValueError( 'Passed one or more invalid elements. invalid="{}"'.format( ', '.join(sorted(list(invalid_element_set))) ) ) for el_str in el_list: setattr(dst_pyxb, el_str, getattr(src_pyxb, el_str, None))
Copy elements specified in ``el_list`` from ``src_pyxb`` to ``dst_pyxb`` Only elements that are children of root are supported. See SYSMETA_ROOT_CHILD_LIST. If an element in ``el_list`` does not exist in ``src_pyxb``, it is removed from ``dst_pyxb``.
entailment
def get_full_perm(perm, obj): """Join action with the content type of ``obj``. Permission is returned in the format of ``<action>_<object_type>``. """ ctype = ContentType.objects.get_for_model(obj) # Camel case class names are converted into a space-separated # content types, so spaces have to be removed. ctype = str(ctype).replace(' ', '') return '{}_{}'.format(perm.lower(), ctype)
Join action with the content type of ``obj``. Permission is returned in the format of ``<action>_<object_type>``.
entailment
def copy_permissions(src_obj, dest_obj): """Copy permissions form ``src_obj`` to ``dest_obj``.""" def _process_permission(codename, user_or_group, dest_obj, relabel): """Process single permission.""" if relabel: codename = change_perm_ctype(codename, dest_obj) if codename not in dest_all_perms: return # dest object doesn't have matching permission assign_perm(codename, user_or_group, dest_obj) src_obj_ctype = ContentType.objects.get_for_model(src_obj) dest_obj_ctype = ContentType.objects.get_for_model(dest_obj) dest_all_perms = get_all_perms(dest_obj) relabel = (src_obj_ctype != dest_obj_ctype) for perm in UserObjectPermission.objects.filter(object_pk=src_obj.pk, content_type=src_obj_ctype): _process_permission(perm.permission.codename, perm.user, dest_obj, relabel) for perm in GroupObjectPermission.objects.filter(object_pk=src_obj.pk, content_type=src_obj_ctype): _process_permission(perm.permission.codename, perm.group, dest_obj, relabel)
Copy permissions form ``src_obj`` to ``dest_obj``.
entailment
def fetch_user(query): """Get user by ``pk`` or ``username``. Raise error if it doesn't exist.""" user_filter = {'pk': query} if query.isdigit() else {'username': query} user_model = get_user_model() try: return user_model.objects.get(**user_filter) except user_model.DoesNotExist: raise exceptions.ParseError("Unknown user: {}".format(query))
Get user by ``pk`` or ``username``. Raise error if it doesn't exist.
entailment
def fetch_group(query): """Get group by ``pk`` or ``name``. Raise error if it doesn't exist.""" group_filter = {'pk': query} if query.isdigit() else {'name': query} try: return Group.objects.get(**group_filter) except Group.DoesNotExist: raise exceptions.ParseError("Unknown group: {}".format(query))
Get group by ``pk`` or ``name``. Raise error if it doesn't exist.
entailment
def check_owner_permission(payload, allow_user_owner): """Raise ``PermissionDenied``if ``owner`` found in ``data``.""" for entity_type in ['users', 'groups']: for perm_type in ['add', 'remove']: for perms in payload.get(entity_type, {}).get(perm_type, {}).values(): if 'owner' in perms: if entity_type == 'users' and allow_user_owner: continue if entity_type == 'groups': raise exceptions.ParseError("Owner permission cannot be assigned to a group") raise exceptions.PermissionDenied("Only owners can grant/revoke owner permission")
Raise ``PermissionDenied``if ``owner`` found in ``data``.
entailment
def check_public_permissions(payload): """Raise ``PermissionDenied`` if public permissions are too open.""" allowed_public_permissions = ['view', 'add', 'download'] for perm_type in ['add', 'remove']: for perm in payload.get('public', {}).get(perm_type, []): if perm not in allowed_public_permissions: raise exceptions.PermissionDenied("Permissions for public users are too open")
Raise ``PermissionDenied`` if public permissions are too open.
entailment
def check_user_permissions(payload, user_pk): """Raise ``PermissionDenied`` if ``payload`` includes ``user_pk``.""" for perm_type in ['add', 'remove']: user_pks = payload.get('users', {}).get(perm_type, {}).keys() if user_pk in user_pks: raise exceptions.PermissionDenied("You cannot change your own permissions")
Raise ``PermissionDenied`` if ``payload`` includes ``user_pk``.
entailment
def remove_permission(payload, permission): """Remove all occurrences of ``permission`` from ``payload``.""" payload = copy.deepcopy(payload) for entity_type in ['users', 'groups']: for perm_type in ['add', 'remove']: for perms in payload.get(entity_type, {}).get(perm_type, {}).values(): if permission in perms: perms.remove(permission) for perm_type in ['add', 'remove']: perms = payload.get('public', {}).get(perm_type, []) if permission in perms: perms.remove(permission) return payload
Remove all occurrences of ``permission`` from ``payload``.
entailment
def update_permission(obj, data): """Update object permissions.""" full_permissions = get_all_perms(obj) def apply_perm(perm_func, perms, entity): """Apply permissions using given ``perm_func``. ``perm_func`` is intended to be ``assign_perms`` or ``remove_perms`` shortcut function from ``django-guardian``, but can be any function that accepts permission codename, user/group and object parameters (in this order). If given permission does not exist, ``exceptions.ParseError`` is raised. "ALL" passed as ``perms`` parameter, will call ``perm_function`` with ``full_permissions`` list. :param func perm_func: Permissions function to be applied :param list params: list of params to be allpied :param entity: user or group to be passed to ``perm_func`` :type entity: `~django.contrib.auth.models.User` or `~django.contrib.auth.models.Group` """ if perms == 'ALL': perms = full_permissions for perm in perms: perm_codename = get_full_perm(perm, obj) if perm_codename not in full_permissions: raise exceptions.ParseError("Unknown permission: {}".format(perm)) perm_func(perm_codename, entity, obj) def set_permissions(entity_type, perm_type): """Set object permissions.""" perm_func = assign_perm if perm_type == 'add' else remove_perm fetch_fn = fetch_user if entity_type == 'users' else fetch_group for entity_id in data.get(entity_type, {}).get(perm_type, []): entity = fetch_fn(entity_id) if entity: perms = data[entity_type][perm_type][entity_id] apply_perm(perm_func, perms, entity) def set_public_permissions(perm_type): """Set public permissions.""" perm_func = assign_perm if perm_type == 'add' else remove_perm user = AnonymousUser() perms = data.get('public', {}).get(perm_type, []) apply_perm(perm_func, perms, user) with transaction.atomic(): set_permissions('users', 'add') set_permissions('users', 'remove') set_permissions('groups', 'add') set_permissions('groups', 'remove') set_public_permissions('add') set_public_permissions('remove')
Update object permissions.
entailment
def assign_contributor_permissions(obj, contributor=None): """Assign all permissions to object's contributor.""" for permission in get_all_perms(obj): assign_perm(permission, contributor if contributor else obj.contributor, obj)
Assign all permissions to object's contributor.
entailment
async def start(self): """Start process execution.""" # Workaround for pylint issue #1469 # (https://github.com/PyCQA/pylint/issues/1469). self.proc = await subprocess.create_subprocess_exec( # pylint: disable=no-member *shlex.split(self.command), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) self.stdout = self.proc.stdout return self.proc.pid
Start process execution.
entailment
async def run_script(self, script): """Execute the script and save results.""" script = os.linesep.join(['set -x', 'set +B', script, 'exit']) + os.linesep self.proc.stdin.write(script.encode('utf-8')) await self.proc.stdin.drain() self.proc.stdin.close()
Execute the script and save results.
entailment
async def terminate(self): """Terminate a running script.""" self.proc.terminate() await asyncio.wait_for(self.proc.wait(), self.kill_delay) if self.proc.returncode is None: self.proc.kill() await self.proc.wait() await super().terminate()
Terminate a running script.
entailment
async def _make_connection(self): """Construct a connection to Redis.""" return await aioredis.create_redis( 'redis://{}:{}'.format( self._redis_params.get('host', 'localhost'), self._redis_params.get('port', 6379) ), db=int(self._redis_params.get('db', 1)) )
Construct a connection to Redis.
entailment
async def _call_redis(self, meth, *args, **kwargs): """Perform a Redis call and handle connection dropping.""" while True: try: if not self._redis: self._redis = await self._make_connection() return await meth(self._redis, *args, **kwargs) except aioredis.RedisError: logger.exception("Redis connection error") if self._redis: self._redis.close() await self._redis.wait_closed() self._redis = None await asyncio.sleep(3)
Perform a Redis call and handle connection dropping.
entailment
async def clear_queue(self): """Reset the executor queue channel to an empty state.""" conn = await self._make_connection() try: script = """ local keys = redis.call('KEYS', ARGV[1]) redis.call('DEL', unpack(keys)) """ await conn.eval( script, keys=[], args=['*{}*'.format(settings.FLOW_MANAGER['REDIS_PREFIX'])], ) finally: conn.close()
Reset the executor queue channel to an empty state.
entailment
def terminate(self): """Stop the standalone manager.""" logger.info(__( "Terminating Resolwe listener on channel '{}'.", state.MANAGER_EXECUTOR_CHANNELS.queue )) self._should_stop = True
Stop the standalone manager.
entailment
def _queue_response_channel(self, obj): """Generate the feedback channel name from the object's id. :param obj: The Channels message object. """ return '{}.{}'.format(state.MANAGER_EXECUTOR_CHANNELS.queue_response, obj[ExecutorProtocol.DATA_ID])
Generate the feedback channel name from the object's id. :param obj: The Channels message object.
entailment
async def _send_reply(self, obj, reply): """Send a reply with added standard fields back to executor. :param obj: The original Channels message object to which we're replying. :param reply: The message contents dictionary. The data id is added automatically (``reply`` is modified in place). """ reply.update({ ExecutorProtocol.DATA_ID: obj[ExecutorProtocol.DATA_ID], }) await self._call_redis(aioredis.Redis.rpush, self._queue_response_channel(obj), json.dumps(reply))
Send a reply with added standard fields back to executor. :param obj: The original Channels message object to which we're replying. :param reply: The message contents dictionary. The data id is added automatically (``reply`` is modified in place).
entailment
def hydrate_spawned_files(self, exported_files_mapper, filename, data_id): """Pop the given file's map from the exported files mapping. :param exported_files_mapper: The dict of file mappings this process produced. :param filename: The filename to format and remove from the mapping. :param data_id: The id of the :meth:`~resolwe.flow.models.Data` object owning the mapping. :return: The formatted mapping between the filename and temporary file path. :rtype: dict """ # JSON only has string dictionary keys, so the Data object id # needs to be stringified first. data_id = str(data_id) if filename not in exported_files_mapper[data_id]: raise KeyError("Use 're-export' to prepare the file for spawned process: {}".format(filename)) export_fn = exported_files_mapper[data_id].pop(filename) if exported_files_mapper[data_id] == {}: exported_files_mapper.pop(data_id) return {'file_temp': export_fn, 'file': filename}
Pop the given file's map from the exported files mapping. :param exported_files_mapper: The dict of file mappings this process produced. :param filename: The filename to format and remove from the mapping. :param data_id: The id of the :meth:`~resolwe.flow.models.Data` object owning the mapping. :return: The formatted mapping between the filename and temporary file path. :rtype: dict
entailment
def handle_update(self, obj, internal_call=False): """Handle an incoming ``Data`` object update request. :param obj: The Channels message object. Command object format: .. code-block:: none { 'command': 'update', 'data_id': [id of the :class:`~resolwe.flow.models.Data` object this command changes], 'changeset': { [keys to be changed] } } :param internal_call: If ``True``, this is an internal delegate call, so a reply to the executor won't be sent. """ data_id = obj[ExecutorProtocol.DATA_ID] changeset = obj[ExecutorProtocol.UPDATE_CHANGESET] if not internal_call: logger.debug( __("Handling update for Data with id {} (handle_update).", data_id), extra={ 'data_id': data_id, 'packet': obj } ) try: d = Data.objects.get(pk=data_id) except Data.DoesNotExist: logger.warning( "Data object does not exist (handle_update).", extra={ 'data_id': data_id, } ) if not internal_call: async_to_sync(self._send_reply)(obj, {ExecutorProtocol.RESULT: ExecutorProtocol.RESULT_ERROR}) async_to_sync(consumer.send_event)({ WorkerProtocol.COMMAND: WorkerProtocol.ABORT, WorkerProtocol.DATA_ID: obj[ExecutorProtocol.DATA_ID], WorkerProtocol.FINISH_COMMUNICATE_EXTRA: { 'executor': getattr(settings, 'FLOW_EXECUTOR', {}).get('NAME', 'resolwe.flow.executors.local'), }, }) return if changeset.get('status', None) == Data.STATUS_ERROR: logger.error( __("Error occured while running process '{}' (handle_update).", d.process.slug), extra={ 'data_id': data_id, 'api_url': '{}{}'.format( getattr(settings, 'RESOLWE_HOST_URL', ''), reverse('resolwe-api:data-detail', kwargs={'pk': data_id}) ), } ) if d.status == Data.STATUS_ERROR: changeset['status'] = Data.STATUS_ERROR if not d.started: changeset['started'] = now() changeset['modified'] = now() for key, val in changeset.items(): if key in ['process_error', 'process_warning', 'process_info']: # Trim process_* fields to not exceed max length of the database field. for i, entry in enumerate(val): max_length = Data._meta.get_field(key).base_field.max_length # pylint: disable=protected-access if len(entry) > max_length: val[i] = entry[:max_length - 3] + '...' getattr(d, key).extend(val) elif key != 'output': setattr(d, key, val) if 'output' in changeset: if not isinstance(d.output, dict): d.output = {} for key, val in changeset['output'].items(): dict_dot(d.output, key, val) try: d.save(update_fields=list(changeset.keys())) except ValidationError as exc: logger.error( __( "Validation error when saving Data object of process '{}' (handle_update):\n\n{}", d.process.slug, traceback.format_exc() ), extra={ 'data_id': data_id } ) d.refresh_from_db() d.process_error.append(exc.message) d.status = Data.STATUS_ERROR try: d.save(update_fields=['process_error', 'status']) except Exception: # pylint: disable=broad-except pass except Exception: # pylint: disable=broad-except logger.error( __( "Error when saving Data object of process '{}' (handle_update):\n\n{}", d.process.slug, traceback.format_exc() ), extra={ 'data_id': data_id } ) if not internal_call: async_to_sync(self._send_reply)(obj, {ExecutorProtocol.RESULT: ExecutorProtocol.RESULT_OK})
Handle an incoming ``Data`` object update request. :param obj: The Channels message object. Command object format: .. code-block:: none { 'command': 'update', 'data_id': [id of the :class:`~resolwe.flow.models.Data` object this command changes], 'changeset': { [keys to be changed] } } :param internal_call: If ``True``, this is an internal delegate call, so a reply to the executor won't be sent.
entailment
def handle_finish(self, obj): """Handle an incoming ``Data`` finished processing request. :param obj: The Channels message object. Command object format: .. code-block:: none { 'command': 'finish', 'data_id': [id of the :class:`~resolwe.flow.models.Data` object this command changes], 'process_rc': [exit status of the processing] 'spawn_processes': [optional; list of spawn dictionaries], 'exported_files_mapper': [if spawn_processes present] } """ data_id = obj[ExecutorProtocol.DATA_ID] logger.debug( __("Finishing Data with id {} (handle_finish).", data_id), extra={ 'data_id': data_id, 'packet': obj } ) spawning_failed = False with transaction.atomic(): # Spawn any new jobs in the request. spawned = False if ExecutorProtocol.FINISH_SPAWN_PROCESSES in obj: if is_testing(): # NOTE: This is a work-around for Django issue #10827 # (https://code.djangoproject.com/ticket/10827), same as in # TestCaseHelpers._pre_setup(). Because the listener is running # independently, it must clear the cache on its own. ContentType.objects.clear_cache() spawned = True exported_files_mapper = obj[ExecutorProtocol.FINISH_EXPORTED_FILES] logger.debug( __("Spawning new Data objects for Data with id {} (handle_finish).", data_id), extra={ 'data_id': data_id } ) try: # This transaction is needed because we're running # asynchronously with respect to the main Django code # here; the manager can get nudged from elsewhere. with transaction.atomic(): parent_data = Data.objects.get(pk=data_id) # Spawn processes. for d in obj[ExecutorProtocol.FINISH_SPAWN_PROCESSES]: d['contributor'] = parent_data.contributor d['process'] = Process.objects.filter(slug=d['process']).latest() d['tags'] = parent_data.tags for field_schema, fields in iterate_fields(d.get('input', {}), d['process'].input_schema): type_ = field_schema['type'] name = field_schema['name'] value = fields[name] if type_ == 'basic:file:': fields[name] = self.hydrate_spawned_files( exported_files_mapper, value, data_id ) elif type_ == 'list:basic:file:': fields[name] = [self.hydrate_spawned_files(exported_files_mapper, fn, data_id) for fn in value] with transaction.atomic(): d = Data.objects.create(**d) DataDependency.objects.create( parent=parent_data, child=d, kind=DataDependency.KIND_SUBPROCESS, ) # Copy permissions. copy_permissions(parent_data, d) # Entity is added to the collection only when it is # created - when it only contains 1 Data object. entities = Entity.objects.filter(data=d).annotate(num_data=Count('data')).filter( num_data=1) # Copy collections. for collection in parent_data.collection_set.all(): collection.data.add(d) # Add entities to which data belongs to the collection. for entity in entities: entity.collections.add(collection) except Exception: # pylint: disable=broad-except logger.error( __( "Error while preparing spawned Data objects of process '{}' (handle_finish):\n\n{}", parent_data.process.slug, traceback.format_exc() ), extra={ 'data_id': data_id } ) spawning_failed = True # Data wrap up happens last, so that any triggered signals # already see the spawned children. What the children themselves # see is guaranteed by the transaction we're in. if ExecutorProtocol.FINISH_PROCESS_RC in obj: process_rc = obj[ExecutorProtocol.FINISH_PROCESS_RC] try: d = Data.objects.get(pk=data_id) except Data.DoesNotExist: logger.warning( "Data object does not exist (handle_finish).", extra={ 'data_id': data_id, } ) async_to_sync(self._send_reply)(obj, {ExecutorProtocol.RESULT: ExecutorProtocol.RESULT_ERROR}) return changeset = { 'process_progress': 100, 'finished': now(), } if spawning_failed: changeset['status'] = Data.STATUS_ERROR changeset['process_error'] = ["Error while preparing spawned Data objects"] elif process_rc == 0 and not d.status == Data.STATUS_ERROR: changeset['status'] = Data.STATUS_DONE else: changeset['status'] = Data.STATUS_ERROR changeset['process_rc'] = process_rc obj[ExecutorProtocol.UPDATE_CHANGESET] = changeset self.handle_update(obj, internal_call=True) if not getattr(settings, 'FLOW_MANAGER_KEEP_DATA', False): # Purge worker is not running in test runner, so we should skip triggering it. if not is_testing(): channel_layer = get_channel_layer() try: async_to_sync(channel_layer.send)( CHANNEL_PURGE_WORKER, { 'type': TYPE_PURGE_RUN, 'location_id': d.location.id, 'verbosity': self._verbosity, } ) except ChannelFull: logger.warning( "Cannot trigger purge because channel is full.", extra={'data_id': data_id} ) # Notify the executor that we're done. async_to_sync(self._send_reply)(obj, {ExecutorProtocol.RESULT: ExecutorProtocol.RESULT_OK}) # Now nudge the main manager to perform final cleanup. This is # needed even if there was no spawn baggage, since the manager # may need to know when executors have finished, to keep count # of them and manage synchronization. async_to_sync(consumer.send_event)({ WorkerProtocol.COMMAND: WorkerProtocol.FINISH, WorkerProtocol.DATA_ID: data_id, WorkerProtocol.FINISH_SPAWNED: spawned, WorkerProtocol.FINISH_COMMUNICATE_EXTRA: { 'executor': getattr(settings, 'FLOW_EXECUTOR', {}).get('NAME', 'resolwe.flow.executors.local'), }, })
Handle an incoming ``Data`` finished processing request. :param obj: The Channels message object. Command object format: .. code-block:: none { 'command': 'finish', 'data_id': [id of the :class:`~resolwe.flow.models.Data` object this command changes], 'process_rc': [exit status of the processing] 'spawn_processes': [optional; list of spawn dictionaries], 'exported_files_mapper': [if spawn_processes present] }
entailment
def handle_abort(self, obj): """Handle an incoming ``Data`` abort processing request. .. IMPORTANT:: This only makes manager's state consistent and doesn't affect Data object in any way. Any changes to the Data must be applied over ``handle_update`` method. :param obj: The Channels message object. Command object format: .. code-block:: none { 'command': 'abort', 'data_id': [id of the :class:`~resolwe.flow.models.Data` object this command was triggered by], } """ async_to_sync(consumer.send_event)({ WorkerProtocol.COMMAND: WorkerProtocol.ABORT, WorkerProtocol.DATA_ID: obj[ExecutorProtocol.DATA_ID], WorkerProtocol.FINISH_COMMUNICATE_EXTRA: { 'executor': getattr(settings, 'FLOW_EXECUTOR', {}).get('NAME', 'resolwe.flow.executors.local'), }, })
Handle an incoming ``Data`` abort processing request. .. IMPORTANT:: This only makes manager's state consistent and doesn't affect Data object in any way. Any changes to the Data must be applied over ``handle_update`` method. :param obj: The Channels message object. Command object format: .. code-block:: none { 'command': 'abort', 'data_id': [id of the :class:`~resolwe.flow.models.Data` object this command was triggered by], }
entailment
def handle_log(self, obj): """Handle an incoming log processing request. :param obj: The Channels message object. Command object format: .. code-block:: none { 'command': 'log', 'message': [log message] } """ record_dict = json.loads(obj[ExecutorProtocol.LOG_MESSAGE]) record_dict['msg'] = record_dict['msg'] executors_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'executors') record_dict['pathname'] = os.path.join(executors_dir, record_dict['pathname']) logger.handle(logging.makeLogRecord(record_dict))
Handle an incoming log processing request. :param obj: The Channels message object. Command object format: .. code-block:: none { 'command': 'log', 'message': [log message] }
entailment
async def push_stats(self): """Push current stats to Redis.""" snapshot = self._make_stats() try: serialized = json.dumps(snapshot) await self._call_redis(aioredis.Redis.set, state.MANAGER_LISTENER_STATS, serialized) await self._call_redis(aioredis.Redis.expire, state.MANAGER_LISTENER_STATS, 3600) except TypeError: logger.error(__( "Listener can't serialize statistics:\n\n{}", traceback.format_exc() )) except aioredis.RedisError: logger.error(__( "Listener can't store updated statistics:\n\n{}", traceback.format_exc() ))
Push current stats to Redis.
entailment
def check_critical_load(self): """Check for critical load and log an error if necessary.""" if self.load_avg.intervals['1m'].value > 1: if self.last_load_level == 1 and time.time() - self.last_load_log < 30: return self.last_load_log = time.time() self.last_load_level = 1 logger.error( "Listener load limit exceeded, the system can't handle this!", extra=self._make_stats() ) elif self.load_avg.intervals['1m'].value > 0.8: if self.last_load_level == 0.8 and time.time() - self.last_load_log < 30: return self.last_load_log = time.time() self.last_load_level = 0.8 logger.warning( "Listener load approaching critical!", extra=self._make_stats() ) else: self.last_load_log = -math.inf self.last_load_level = 0
Check for critical load and log an error if necessary.
entailment
async def run(self): """Run the main listener run loop. Doesn't return until :meth:`terminate` is called. """ logger.info(__( "Starting Resolwe listener on channel '{}'.", state.MANAGER_EXECUTOR_CHANNELS.queue )) while not self._should_stop: await self.push_stats() ret = await self._call_redis(aioredis.Redis.blpop, state.MANAGER_EXECUTOR_CHANNELS.queue, timeout=1) if ret is None: self.load_avg.add(0) continue remaining = await self._call_redis(aioredis.Redis.llen, state.MANAGER_EXECUTOR_CHANNELS.queue) self.load_avg.add(remaining + 1) self.check_critical_load() _, item = ret try: item = item.decode('utf-8') logger.debug(__("Got command from executor: {}", item)) obj = json.loads(item) except json.JSONDecodeError: logger.error( __("Undecodable command packet:\n\n{}"), traceback.format_exc() ) continue command = obj.get(ExecutorProtocol.COMMAND, None) if command is None: continue service_start = time.perf_counter() handler = getattr(self, 'handle_' + command, None) if handler: try: with PrioritizedBatcher.global_instance(): await database_sync_to_async(handler)(obj) except Exception: # pylint: disable=broad-except logger.error(__( "Executor command handling error:\n\n{}", traceback.format_exc() )) else: logger.error( __("Unknown executor command '{}'.", command), extra={'decoded_packet': obj} ) # We do want to measure wall-clock time elapsed, because # system load will impact event handling performance. On # a lagging system, good internal performance is meaningless. service_end = time.perf_counter() self.service_time.update(service_end - service_start) logger.info(__( "Stopping Resolwe listener on channel '{}'.", state.MANAGER_EXECUTOR_CHANNELS.queue ))
Run the main listener run loop. Doesn't return until :meth:`terminate` is called.
entailment
def dokanMain(self, dokanOptions, dokanOperations): """Issue callback to start dokan drive. :param DokanOptions: drive options :type DokanOptions: DOKAN_OPTIONS :param DokanOperations: pointers implemented file system calls :type DokanOperations: DokanOperations :return: error code :rtype: int """ return int( self.dokanDLL.DokanMain( PDOKAN_OPTIONS(dokanOptions), PDOKAN_OPERATIONS(dokanOperations) ) )
Issue callback to start dokan drive. :param DokanOptions: drive options :type DokanOptions: DOKAN_OPTIONS :param DokanOperations: pointers implemented file system calls :type DokanOperations: DokanOperations :return: error code :rtype: int
entailment
def createFile( self, fileName, desiredAccess, shareMode, creationDisposition, flagsAndAttributes, dokanFileInfo, ): """Creates a file. :param fileName: name of file to create :type fileName: ctypes.c_wchar_p :param desiredAccess: desired access flags :type desiredAccess: ctypes.c_ulong :param shareMode: share mode flags :type shareMode: ctypes.c_ulong :param creationDisposition: creation disposition flags :type creationDisposition: ctypes.c_ulong :param flagsAndAttributes: creation flags and attributes :type flagsAndAttributes: ctypes.c_ulong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ return self.operations('createFile', fileName)
Creates a file. :param fileName: name of file to create :type fileName: ctypes.c_wchar_p :param desiredAccess: desired access flags :type desiredAccess: ctypes.c_ulong :param shareMode: share mode flags :type shareMode: ctypes.c_ulong :param creationDisposition: creation disposition flags :type creationDisposition: ctypes.c_ulong :param flagsAndAttributes: creation flags and attributes :type flagsAndAttributes: ctypes.c_ulong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int
entailment
def readFile( self, fileName, buffer, numberOfBytesToRead, numberOfBytesRead, offset, dokanFileInfo, ): """Read a file. :param fileName: name of file to read :type fileName: ctypes.c_wchar_p :param buffer: buffer for content read :type buffer: ctypes.c_void_p :param numberOfBytesToRead: number of bytes to read :type numberOfBytesToRead: ctypes.c_ulong :param numberOfBytesRead: number of bytes read :type numberOfBytesRead: ctypes.POINTER(ctypes.c_ulong) :param offset: byte offset :type offset: ctypes.c_longlong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ try: ret = self.operations('readFile', fileName, numberOfBytesToRead, offset) data = ctypes.create_string_buffer( ret[:numberOfBytesToRead], numberOfBytesToRead ) ctypes.memmove(buffer, data, numberOfBytesToRead) sizeRead = ctypes.c_ulong(len(ret)) ctypes.memmove( numberOfBytesRead, ctypes.byref(sizeRead), ctypes.sizeof(ctypes.c_ulong) ) return d1_onedrive.impl.drivers.dokan.const.DOKAN_SUCCESS except Exception: # logging.error('%s', e) return d1_onedrive.impl.drivers.dokan.const.DOKAN_ERROR
Read a file. :param fileName: name of file to read :type fileName: ctypes.c_wchar_p :param buffer: buffer for content read :type buffer: ctypes.c_void_p :param numberOfBytesToRead: number of bytes to read :type numberOfBytesToRead: ctypes.c_ulong :param numberOfBytesRead: number of bytes read :type numberOfBytesRead: ctypes.POINTER(ctypes.c_ulong) :param offset: byte offset :type offset: ctypes.c_longlong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int
entailment
def writeFile( self, fileName, buffer, numberOfBytesToWrite, numberOfBytesWritten, offset, dokanFileInfo, ): """Read a file. :param fileName: name of file to write :type fileName: ctypes.c_wchar_p :param buffer: buffer to write :type buffer: ctypes.c_void_p :param numberOfBytesToWrite: number of bytes to write :type numberOfBytesToWrite: ctypes.c_ulong :param numberOfBytesWritten: number of bytes written :type numberOfBytesWritten: ctypes.POINTER(ctypes.c_ulong) :param offset: byte offset :type offset: ctypes.c_longlong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ return self.operations( 'writeFile', fileName, buffer, numberOfBytesToWrite, offset )
Read a file. :param fileName: name of file to write :type fileName: ctypes.c_wchar_p :param buffer: buffer to write :type buffer: ctypes.c_void_p :param numberOfBytesToWrite: number of bytes to write :type numberOfBytesToWrite: ctypes.c_ulong :param numberOfBytesWritten: number of bytes written :type numberOfBytesWritten: ctypes.POINTER(ctypes.c_ulong) :param offset: byte offset :type offset: ctypes.c_longlong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int
entailment
def findFilesWithPattern( self, fileName, searchPattern, fillFindData, dokanFileInfo ): """Find files in a certain path that match the search pattern. :param fileName: path to search :type fileName: ctypes.c_wchar_p :param searchPattern: pattern to search for :type searchPattern: ctypes.c_wchar_p :param fillFindData: function pointer for populating search results :type fillFindData: PFillFindData :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ try: ret = self.operations('findFilesWithPattern', fileName, searchPattern) if ret is None: return d1_onedrive.impl.drivers.dokan.const.DOKAN_ERROR for r in ret: create_ft = self.python_timestamp_to_win32_filetime(r['ctime']) last_access_ft = self.python_timestamp_to_win32_filetime(r['atime']) last_write_ft = self.python_timestamp_to_win32_filetime(r['wtime']) cft = ctypes.wintypes.FILETIME(create_ft[0], create_ft[1]) laft = ctypes.wintypes.FILETIME(last_access_ft[0], last_access_ft[1]) lwft = ctypes.wintypes.FILETIME(last_write_ft[0], last_write_ft[1]) size = self.pyint_to_double_dwords(r['size']) File = ctypes.wintypes.WIN32_FIND_DATAW( ctypes.c_ulong(r['attr']), # attributes cft, # creation time laft, # last access time lwft, # last write time size[1], # upper bits of size size[0], # lower bits of size ctypes.c_ulong(0), # reserved for FS ctypes.c_ulong(0), # reserved for FS r['name'], # file name '', ) # alternate name pFile = ctypes.wintypes.PWIN32_FIND_DATAW(File) fillFindData(pFile, dokanFileInfo) return d1_onedrive.impl.drivers.dokan.const.DOKAN_SUCCESS except Exception as e: logging.error('%s', e) return d1_onedrive.impl.drivers.dokan.const.DOKAN_ERROR
Find files in a certain path that match the search pattern. :param fileName: path to search :type fileName: ctypes.c_wchar_p :param searchPattern: pattern to search for :type searchPattern: ctypes.c_wchar_p :param fillFindData: function pointer for populating search results :type fillFindData: PFillFindData :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int
entailment
def setFileTime( self, fileName, creationTime, lastAccessTime, lastWriteTime, dokanFileInfo ): """Set time values for a file. :param fileName: name of file to set time values for :type fileName: ctypes.c_wchar_p :param creationTime: creation time of file :type creationTime: ctypes.POINTER(ctypes.wintypes.FILETIME) :param lastAccessTime: last access time of file :type lastAccessTime: ctypes.POINTER(ctypes.wintypes.FILETIME) :param lastWriteTime: last write time of file :type lastWriteTime: ctypes.POINTER(ctypes.wintypes.FILETIME) :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ return self.operations('setFileTime', fileName)
Set time values for a file. :param fileName: name of file to set time values for :type fileName: ctypes.c_wchar_p :param creationTime: creation time of file :type creationTime: ctypes.POINTER(ctypes.wintypes.FILETIME) :param lastAccessTime: last access time of file :type lastAccessTime: ctypes.POINTER(ctypes.wintypes.FILETIME) :param lastWriteTime: last write time of file :type lastWriteTime: ctypes.POINTER(ctypes.wintypes.FILETIME) :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int
entailment
def moveFile(self, existingFileName, newFileName, replaceExisiting, dokanFileInfo): """Move a file. :param existingFileName: name of file to move :type existingFileName: ctypes.c_wchar_p :param newFileName: new name of file :type newFileName: ctypes.c_wchar_p :param replaceExisting: flag to indicate replacement of existing file :type replaceExisting: ctypes.c_bool :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ return self.operations('moveFile', existingFileName, newFileName)
Move a file. :param existingFileName: name of file to move :type existingFileName: ctypes.c_wchar_p :param newFileName: new name of file :type newFileName: ctypes.c_wchar_p :param replaceExisting: flag to indicate replacement of existing file :type replaceExisting: ctypes.c_bool :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int
entailment
def lockFile(self, fileName, byteOffset, length, dokanFileInfo): """Lock a file. :param fileName: name of file to lock :type fileName: ctypes.c_wchar_p :param byteOffset: location to start lock :type byteOffset: ctypes.c_longlong :param length: number of bytes to lock :type length: ctypes.c_longlong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ return self.operations('lockFile', fileName, byteOffset, length)
Lock a file. :param fileName: name of file to lock :type fileName: ctypes.c_wchar_p :param byteOffset: location to start lock :type byteOffset: ctypes.c_longlong :param length: number of bytes to lock :type length: ctypes.c_longlong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int
entailment
def unlockFile(self, fileName, byteOffset, length, dokanFileInfo): """Unlock a file. :param fileName: name of file to unlock :type fileName: ctypes.c_wchar_p :param byteOffset: location to start unlock :type byteOffset: ctypes.c_longlong :param length: number of bytes to unlock :type length: ctypes.c_longlong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ return self.operations('unlockFile', fileName, byteOffset, length)
Unlock a file. :param fileName: name of file to unlock :type fileName: ctypes.c_wchar_p :param byteOffset: location to start unlock :type byteOffset: ctypes.c_longlong :param length: number of bytes to unlock :type length: ctypes.c_longlong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int
entailment
def getDiskFreeSpace( self, freeBytesAvailable, totalNumberOfBytes, totalNumberOfFreeBytes, dokanFileInfo, ): """Get the amount of free space on this volume. :param freeBytesAvailable: pointer for free bytes available :type freeBytesAvailable: ctypes.c_void_p :param totalNumberOfBytes: pointer for total number of bytes :type totalNumberOfBytes: ctypes.c_void_p :param totalNumberOfFreeBytes: pointer for total number of free bytes :type totalNumberOfFreeBytes: ctypes.c_void_p :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ ret = self.operations('getDiskFreeSpace') ctypes.memmove( freeBytesAvailable, ctypes.byref(ctypes.c_longlong(ret['freeBytesAvailable'])), ctypes.sizeof(ctypes.c_longlong), ) ctypes.memmove( totalNumberOfBytes, ctypes.byref(ctypes.c_longlong(ret['totalNumberOfBytes'])), ctypes.sizeof(ctypes.c_longlong), ) ctypes.memmove( totalNumberOfFreeBytes, ctypes.byref(ctypes.c_longlong(ret['totalNumberOfFreeBytes'])), ctypes.sizeof(ctypes.c_longlong), ) return d1_onedrive.impl.drivers.dokan.const.DOKAN_SUCCESS
Get the amount of free space on this volume. :param freeBytesAvailable: pointer for free bytes available :type freeBytesAvailable: ctypes.c_void_p :param totalNumberOfBytes: pointer for total number of bytes :type totalNumberOfBytes: ctypes.c_void_p :param totalNumberOfFreeBytes: pointer for total number of free bytes :type totalNumberOfFreeBytes: ctypes.c_void_p :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int
entailment
def getVolumeInformation( self, volumeNameBuffer, volumeNameSize, volumeSerialNumber, maximumComponentLength, fileSystemFlags, fileSystemNameBuffer, fileSystemNameSize, dokanFileInfo, ): """Get information about the volume. :param volumeNameBuffer: buffer for volume name :type volumeNameBuffer: ctypes.c_void_p :param volumeNameSize: volume name buffer size :type volumeNameSize: ctypes.c_ulong :param volumeSerialNumber: buffer for volume serial number :type volumeSerialNumber: ctypes.c_void_p :param maximumComponentLength: buffer for maximum component length :type maximumComponentLength: ctypes.c_void_p :param fileSystemFlags: buffer for file system flags :type fileSystemFlags: ctypes.c_void_p :param fileSystemNameBuffer: buffer for file system name :type fileSystemNameBuffer: ctypes.c_void_p :param fileSystemNameSize: file system name buffer size :type fileSystemNameSize: ctypes.c_ulong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ ret = self.operations('getVolumeInformation') # populate volume name buffer ctypes.memmove( volumeNameBuffer, ret['volumeNameBuffer'], min( ctypes.sizeof(ctypes.c_wchar) * len(ret['volumeNameBuffer']), volumeNameSize, ), ) # populate serial number buffer serialNum = ctypes.c_ulong(self.serialNumber) ctypes.memmove( volumeSerialNumber, ctypes.byref(serialNum), ctypes.sizeof(ctypes.c_ulong) ) # populate max component length maxCompLen = ctypes.c_ulong(ret['maximumComponentLength']) ctypes.memmove( maximumComponentLength, ctypes.byref(maxCompLen), ctypes.sizeof(ctypes.c_ulong), ) # populate filesystem flags buffer fsFlags = ctypes.c_ulong(ret['fileSystemFlags']) ctypes.memmove( fileSystemFlags, ctypes.byref(fsFlags), ctypes.sizeof(ctypes.c_ulong) ) # populate filesystem name ctypes.memmove( fileSystemNameBuffer, ret['fileSystemNameBuffer'], min( ctypes.sizeof(ctypes.c_wchar) * len(ret['fileSystemNameBuffer']), fileSystemNameSize, ), ) return d1_onedrive.impl.drivers.dokan.const.DOKAN_SUCCESS
Get information about the volume. :param volumeNameBuffer: buffer for volume name :type volumeNameBuffer: ctypes.c_void_p :param volumeNameSize: volume name buffer size :type volumeNameSize: ctypes.c_ulong :param volumeSerialNumber: buffer for volume serial number :type volumeSerialNumber: ctypes.c_void_p :param maximumComponentLength: buffer for maximum component length :type maximumComponentLength: ctypes.c_void_p :param fileSystemFlags: buffer for file system flags :type fileSystemFlags: ctypes.c_void_p :param fileSystemNameBuffer: buffer for file system name :type fileSystemNameBuffer: ctypes.c_void_p :param fileSystemNameSize: file system name buffer size :type fileSystemNameSize: ctypes.c_ulong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int
entailment
def getFileSecurity( self, fileName, securityInformation, securityDescriptor, lengthSecurityDescriptorBuffer, lengthNeeded, dokanFileInfo, ): """Get security attributes of a file. :param fileName: name of file to get security for :type fileName: ctypes.c_wchar_p :param securityInformation: buffer for security information :type securityInformation: PSECURITY_INFORMATION :param securityDescriptor: buffer for security descriptor :type securityDescriptor: PSECURITY_DESCRIPTOR :param lengthSecurityDescriptorBuffer: length of descriptor buffer :type lengthSecurityDescriptorBuffer: ctypes.c_ulong :param lengthNeeded: length needed for the buffer :type lengthNeeded: ctypes.POINTER(ctypes.c_ulong) :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ return self.operations('getFileSecurity', fileName)
Get security attributes of a file. :param fileName: name of file to get security for :type fileName: ctypes.c_wchar_p :param securityInformation: buffer for security information :type securityInformation: PSECURITY_INFORMATION :param securityDescriptor: buffer for security descriptor :type securityDescriptor: PSECURITY_DESCRIPTOR :param lengthSecurityDescriptorBuffer: length of descriptor buffer :type lengthSecurityDescriptorBuffer: ctypes.c_ulong :param lengthNeeded: length needed for the buffer :type lengthNeeded: ctypes.POINTER(ctypes.c_ulong) :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int
entailment
def setFileSecurity( self, fileName, securityInformation, securityDescriptor, lengthSecurityDescriptorBuffer, dokanFileInfo, ): """Set security attributes of a file. :param fileName: name of file to set security for :type fileName: ctypes.c_wchar_p :param securityInformation: new security information :type securityInformation: PSECURITY_INFORMATION :param securityDescriptor: newsecurity descriptor :type securityDescriptor: PSECURITY_DESCRIPTOR :param lengthSecurityDescriptorBuffer: length of descriptor buffer :type lengthSecurityDescriptorBuffer: ctypes.c_ulong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ return self.operations('setFileSecurity', fileName)
Set security attributes of a file. :param fileName: name of file to set security for :type fileName: ctypes.c_wchar_p :param securityInformation: new security information :type securityInformation: PSECURITY_INFORMATION :param securityDescriptor: newsecurity descriptor :type securityDescriptor: PSECURITY_DESCRIPTOR :param lengthSecurityDescriptorBuffer: length of descriptor buffer :type lengthSecurityDescriptorBuffer: ctypes.c_ulong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int
entailment
def createSimpleResourceMap(ore_pid, sci_meta_pid, data_pids): """Create a simple resource map with one metadata document and n data objects.""" ore = ResourceMap() ore.initialize(ore_pid) ore.addMetadataDocument(sci_meta_pid) ore.addDataDocuments(data_pids, sci_meta_pid) return ore
Create a simple resource map with one metadata document and n data objects.
entailment
def pids2ore(in_stream, fmt='xml', base_url='https://cn.dataone.org/cn'): """read pids from in_stream and generate a resource map. first pid is the ore_pid second is the sci meta pid remainder are data pids """ pids = [] for line in in_stream: pid = line.strip() if len(pid) > 0: if not pid.startswith("# "): pids.append(pid) if (len(pids)) < 2: raise ValueError("Insufficient identifiers provided.") logging.info("Read %d identifiers", len(pids)) ore = ResourceMap(base_url=base_url) logging.info("ORE PID = %s", pids[0]) ore.initialize(pids[0]) logging.info("Metadata PID = %s", pids[1]) ore.addMetadataDocument(pids[1]) ore.addDataDocuments(pids[2:], pids[1]) return ore.serialize_to_display(doc_format=fmt)
read pids from in_stream and generate a resource map. first pid is the ore_pid second is the sci meta pid remainder are data pids
entailment
def submit(self, data, runtime_dir, argv): """Run process. For details, see :meth:`~resolwe.flow.managers.workload_connectors.base.BaseConnector.submit`. """ queue = 'ordinary' if data.process.scheduling_class == Process.SCHEDULING_CLASS_INTERACTIVE: queue = 'hipri' logger.debug(__( "Connector '{}' running for Data with id {} ({}) in celery queue {}, EAGER is {}.", self.__class__.__module__, data.id, repr(argv), queue, getattr(settings, 'CELERY_ALWAYS_EAGER', None) )) celery_run.apply_async((data.id, runtime_dir, argv), queue=queue)
Run process. For details, see :meth:`~resolwe.flow.managers.workload_connectors.base.BaseConnector.submit`.
entailment
def refresh(self): """Synchronize the local tree of Solr records for DataONE identifiers and queries with the reference tree.""" if self._source_tree.cache_is_stale(): self._source_tree.refresh() logging.info('Refreshing object tree') self._init_cache() self.sync_cache_with_source_tree()
Synchronize the local tree of Solr records for DataONE identifiers and queries with the reference tree.
entailment
def get_object_record(self, pid): """Get an object that has already been cached in the object tree. Caching happens when the object tree is refreshed. """ try: return self._cache['records'][pid] except KeyError: raise d1_onedrive.impl.onedrive_exceptions.ONEDriveException('Unknown PID')
Get an object that has already been cached in the object tree. Caching happens when the object tree is refreshed.
entailment
def get_object_record_with_sync(self, pid): """Get an object that may not currently be in the cache. If the object is not in the cache, an attempt is made to retrieve the record from a CN on the fly. If the object is found, it is cached before being returned to the user. This allows the object tree caching system to be used for objects that are not in the object tree. ONEDrive uses this functionality for the FlatSpace folder. """ try: return self._cache['records'][pid] except KeyError: return self._get_uncached_object_record(pid)
Get an object that may not currently be in the cache. If the object is not in the cache, an attempt is made to retrieve the record from a CN on the fly. If the object is found, it is cached before being returned to the user. This allows the object tree caching system to be used for objects that are not in the object tree. ONEDrive uses this functionality for the FlatSpace folder.
entailment
def _create_cache_item_for_pid(self, cache_folder, pid): """The source tree can contain identifiers that are no longer valid (or were never valid). Any items for which a Solr record cannot be retrieved are silently skipped. """ try: record = self._solr_client.get_solr_record(pid) except d1_onedrive.impl.onedrive_exceptions.ONEDriveException: pass else: self._create_cache_item(cache_folder, record)
The source tree can contain identifiers that are no longer valid (or were never valid). Any items for which a Solr record cannot be retrieved are silently skipped.
entailment
def dependency_status(data): """Return abstracted status of dependencies. - ``STATUS_ERROR`` .. one dependency has error status or was deleted - ``STATUS_DONE`` .. all dependencies have done status - ``None`` .. other """ parents_statuses = set( DataDependency.objects.filter( child=data, kind=DataDependency.KIND_IO ).distinct('parent__status').values_list('parent__status', flat=True) ) if not parents_statuses: return Data.STATUS_DONE if None in parents_statuses: # Some parents have been deleted. return Data.STATUS_ERROR if Data.STATUS_ERROR in parents_statuses: return Data.STATUS_ERROR if len(parents_statuses) == 1 and Data.STATUS_DONE in parents_statuses: return Data.STATUS_DONE return None
Return abstracted status of dependencies. - ``STATUS_ERROR`` .. one dependency has error status or was deleted - ``STATUS_DONE`` .. all dependencies have done status - ``None`` .. other
entailment
def discover_engines(self, executor=None): """Discover configured engines. :param executor: Optional executor module override """ if executor is None: executor = getattr(settings, 'FLOW_EXECUTOR', {}).get('NAME', 'resolwe.flow.executors.local') self.executor = self.load_executor(executor) logger.info( __("Loaded '{}' executor.", str(self.executor.__class__.__module__).replace('.prepare', '')) ) expression_engines = getattr(settings, 'FLOW_EXPRESSION_ENGINES', ['resolwe.flow.expression_engines.jinja']) self.expression_engines = self.load_expression_engines(expression_engines) logger.info(__( "Found {} expression engines: {}", len(self.expression_engines), ', '.join(self.expression_engines.keys()) )) execution_engines = getattr(settings, 'FLOW_EXECUTION_ENGINES', ['resolwe.flow.execution_engines.bash']) self.execution_engines = self.load_execution_engines(execution_engines) logger.info(__( "Found {} execution engines: {}", len(self.execution_engines), ', '.join(self.execution_engines.keys()) ))
Discover configured engines. :param executor: Optional executor module override
entailment
def reset(self, keep_state=False): """Reset the shared state and drain Django Channels. :param keep_state: If ``True``, do not reset the shared manager state (useful in tests, where the settings overrides need to be kept). Defaults to ``False``. """ if not keep_state: self.state = state.ManagerState(state.MANAGER_STATE_PREFIX) self.state.reset() async_to_sync(consumer.run_consumer)(timeout=1) async_to_sync(self.sync_counter.reset)()
Reset the shared state and drain Django Channels. :param keep_state: If ``True``, do not reset the shared manager state (useful in tests, where the settings overrides need to be kept). Defaults to ``False``.
entailment
def _marshal_settings(self): """Marshal Django settings into a serializable object. :return: The serialized settings. :rtype: dict """ result = {} for key in dir(settings): if any(map(key.startswith, ['FLOW_', 'RESOLWE_', 'CELERY_'])): result[key] = getattr(settings, key) return result
Marshal Django settings into a serializable object. :return: The serialized settings. :rtype: dict
entailment
def _include_environment_variables(self, program, executor_vars): """Define environment variables.""" env_vars = { 'RESOLWE_HOST_URL': self.settings_actual.get('RESOLWE_HOST_URL', 'localhost'), } set_env = self.settings_actual.get('FLOW_EXECUTOR', {}).get('SET_ENV', {}) env_vars.update(executor_vars) env_vars.update(set_env) export_commands = ['export {}={}'.format(key, shlex.quote(value)) for key, value in env_vars.items()] return os.linesep.join(export_commands) + os.linesep + program
Define environment variables.
entailment
def run(self, data, runtime_dir, argv): """Select a concrete connector and run the process through it. :param data: The :class:`~resolwe.flow.models.Data` object that is to be run. :param runtime_dir: The directory the executor is run from. :param argv: The argument vector used to spawn the executor. """ process_scheduling = self.scheduling_class_map[data.process.scheduling_class] if 'DISPATCHER_MAPPING' in getattr(settings, 'FLOW_MANAGER', {}): class_name = settings.FLOW_MANAGER['DISPATCHER_MAPPING'][process_scheduling] else: class_name = getattr(settings, 'FLOW_MANAGER', {}).get('NAME', DEFAULT_CONNECTOR) data.scheduled = now() data.save(update_fields=['scheduled']) async_to_sync(self.sync_counter.inc)('executor') return self.connectors[class_name].submit(data, runtime_dir, argv)
Select a concrete connector and run the process through it. :param data: The :class:`~resolwe.flow.models.Data` object that is to be run. :param runtime_dir: The directory the executor is run from. :param argv: The argument vector used to spawn the executor.
entailment
def _get_per_data_dir(self, dir_base, subpath): """Extend the given base directory with a per-data component. The method creates a private path for the :class:`~resolwe.flow.models.Data` object, such as:: ./test_data/1/ if ``base_dir`` is ``'./test_data'`` and ``subpath`` is ``1``. :param dir_base: The base path to be extended. This will usually be one of the directories configured in the ``FLOW_EXECUTOR`` setting. :param subpath: Objects's subpath used for the extending. :return: The new path for the :class:`~resolwe.flow.models.Data` object. :rtype: str """ # Use Django settings here, because the state must be preserved # across events. This also implies the directory settings can't # be patched outside the manager and then just sent along in the # command packets. result = self.settings_actual.get('FLOW_EXECUTOR', {}).get(dir_base, '') return os.path.join(result, subpath)
Extend the given base directory with a per-data component. The method creates a private path for the :class:`~resolwe.flow.models.Data` object, such as:: ./test_data/1/ if ``base_dir`` is ``'./test_data'`` and ``subpath`` is ``1``. :param dir_base: The base path to be extended. This will usually be one of the directories configured in the ``FLOW_EXECUTOR`` setting. :param subpath: Objects's subpath used for the extending. :return: The new path for the :class:`~resolwe.flow.models.Data` object. :rtype: str
entailment
def _prepare_data_dir(self, data): """Prepare destination directory where the data will live. :param data: The :class:`~resolwe.flow.models.Data` object for which to prepare the private execution directory. :return: The prepared data directory path. :rtype: str """ logger.debug(__("Preparing data directory for Data with id {}.", data.id)) with transaction.atomic(): # Create a temporary random location and then override it with data # location id since object has to be created first. # TODO Find a better solution, e.g. defer the database constraint. temporary_location_string = uuid.uuid4().hex[:10] data_location = DataLocation.objects.create(subpath=temporary_location_string) data_location.subpath = str(data_location.id) data_location.save() data_location.data.add(data) output_path = self._get_per_data_dir('DATA_DIR', data_location.subpath) dir_mode = self.settings_actual.get('FLOW_EXECUTOR', {}).get('DATA_DIR_MODE', 0o755) os.mkdir(output_path, mode=dir_mode) # os.mkdir is not guaranteed to set the given mode os.chmod(output_path, dir_mode) return output_path
Prepare destination directory where the data will live. :param data: The :class:`~resolwe.flow.models.Data` object for which to prepare the private execution directory. :return: The prepared data directory path. :rtype: str
entailment
def _prepare_context(self, data_id, data_dir, runtime_dir, **kwargs): """Prepare settings and constants JSONs for the executor. Settings and constants provided by other ``resolwe`` modules and :class:`~django.conf.settings` are all inaccessible in the executor once it is deployed, so they need to be serialized into the runtime directory. :param data_id: The :class:`~resolwe.flow.models.Data` object id being prepared for. :param data_dir: The target execution directory for this :class:`~resolwe.flow.models.Data` object. :param runtime_dir: The target runtime support directory for this :class:`~resolwe.flow.models.Data` object; this is where the environment is serialized into. :param kwargs: Extra settings to include in the main settings file. """ files = {} secrets = {} settings_dict = {} settings_dict['DATA_DIR'] = data_dir settings_dict['REDIS_CHANNEL_PAIR'] = state.MANAGER_EXECUTOR_CHANNELS files[ExecutorFiles.EXECUTOR_SETTINGS] = settings_dict django_settings = {} django_settings.update(self.settings_actual) django_settings.update(kwargs) files[ExecutorFiles.DJANGO_SETTINGS] = django_settings # Add scheduling classes. files[ExecutorFiles.PROCESS_META] = { k: getattr(Process, k) for k in dir(Process) if k.startswith('SCHEDULING_CLASS_') and isinstance(getattr(Process, k), str) } # Add Data status constants. files[ExecutorFiles.DATA_META] = { k: getattr(Data, k) for k in dir(Data) if k.startswith('STATUS_') and isinstance(getattr(Data, k), str) } # Extend the settings with whatever the executor wants. self.executor.extend_settings(data_id, files, secrets) # Save the settings into the various files in the runtime dir. settings_dict[ExecutorFiles.FILE_LIST_KEY] = list(files.keys()) for file_name in files: file_path = os.path.join(runtime_dir, file_name) with open(file_path, 'wt') as json_file: json.dump(files[file_name], json_file, cls=SettingsJSONifier) # Save the secrets in the runtime dir, with permissions to prevent listing the given # directory. secrets_dir = os.path.join(runtime_dir, ExecutorFiles.SECRETS_DIR) os.makedirs(secrets_dir, mode=0o300) for file_name, value in secrets.items(): file_path = os.path.join(secrets_dir, file_name) # Set umask to 0 to ensure that we set the correct permissions. old_umask = os.umask(0) try: # We need to use os.open in order to correctly enforce file creation. Otherwise, # there is a race condition which can be used to create the file with different # ownership/permissions. file_descriptor = os.open(file_path, os.O_WRONLY | os.O_CREAT, mode=0o600) with os.fdopen(file_descriptor, 'w') as raw_file: raw_file.write(value) finally: os.umask(old_umask)
Prepare settings and constants JSONs for the executor. Settings and constants provided by other ``resolwe`` modules and :class:`~django.conf.settings` are all inaccessible in the executor once it is deployed, so they need to be serialized into the runtime directory. :param data_id: The :class:`~resolwe.flow.models.Data` object id being prepared for. :param data_dir: The target execution directory for this :class:`~resolwe.flow.models.Data` object. :param runtime_dir: The target runtime support directory for this :class:`~resolwe.flow.models.Data` object; this is where the environment is serialized into. :param kwargs: Extra settings to include in the main settings file.
entailment
def _prepare_executor(self, data, executor): """Copy executor sources into the destination directory. :param data: The :class:`~resolwe.flow.models.Data` object being prepared for. :param executor: The fully qualified name of the executor that is to be used for this data object. :return: Tuple containing the relative fully qualified name of the executor class ('relative' to how the executor will be run) and the path to the directory where the executor will be deployed. :rtype: (str, str) """ logger.debug(__("Preparing executor for Data with id {}", data.id)) # Both of these imports are here only to get the packages' paths. import resolwe.flow.executors as executor_package exec_dir = os.path.dirname(inspect.getsourcefile(executor_package)) dest_dir = self._get_per_data_dir('RUNTIME_DIR', data.location.subpath) dest_package_dir = os.path.join(dest_dir, 'executors') shutil.copytree(exec_dir, dest_package_dir) dir_mode = self.settings_actual.get('FLOW_EXECUTOR', {}).get('RUNTIME_DIR_MODE', 0o755) os.chmod(dest_dir, dir_mode) class_name = executor.rpartition('.executors.')[-1] return '.{}'.format(class_name), dest_dir
Copy executor sources into the destination directory. :param data: The :class:`~resolwe.flow.models.Data` object being prepared for. :param executor: The fully qualified name of the executor that is to be used for this data object. :return: Tuple containing the relative fully qualified name of the executor class ('relative' to how the executor will be run) and the path to the directory where the executor will be deployed. :rtype: (str, str)
entailment
def _prepare_script(self, dest_dir, program): """Copy the script into the destination directory. :param dest_dir: The target directory where the script will be saved. :param program: The script text to be saved. :return: The name of the script file. :rtype: str """ script_name = ExecutorFiles.PROCESS_SCRIPT dest_file = os.path.join(dest_dir, script_name) with open(dest_file, 'wt') as dest_file_obj: dest_file_obj.write(program) os.chmod(dest_file, 0o700) return script_name
Copy the script into the destination directory. :param dest_dir: The target directory where the script will be saved. :param program: The script text to be saved. :return: The name of the script file. :rtype: str
entailment
async def handle_control_event(self, message): """Handle an event from the Channels layer. Channels layer callback, do not call directly. """ cmd = message[WorkerProtocol.COMMAND] logger.debug(__("Manager worker got channel command '{}'.", cmd)) # Prepare settings for use; Django overlaid by state overlaid by # anything immediate in the current packet. immediates = {} if cmd == WorkerProtocol.COMMUNICATE: immediates = message.get(WorkerProtocol.COMMUNICATE_SETTINGS, {}) or {} override = self.state.settings_override or {} override.update(immediates) self.settings_actual = self._marshal_settings() self.settings_actual.update(override) if cmd == WorkerProtocol.COMMUNICATE: try: await database_sync_to_async(self._data_scan)(**message[WorkerProtocol.COMMUNICATE_EXTRA]) except Exception: logger.exception("Unknown error occured while processing communicate control command.") raise finally: await self.sync_counter.dec('communicate') elif cmd == WorkerProtocol.FINISH: try: data_id = message[WorkerProtocol.DATA_ID] data_location = DataLocation.objects.get(data__id=data_id) if not getattr(settings, 'FLOW_MANAGER_KEEP_DATA', False): try: def handle_error(func, path, exc_info): """Handle permission errors while removing data directories.""" if isinstance(exc_info[1], PermissionError): os.chmod(path, 0o700) shutil.rmtree(path) # Remove secrets directory, but leave the rest of the runtime directory # intact. Runtime directory will be removed during data purge, when the # data object is removed. secrets_dir = os.path.join( self._get_per_data_dir('RUNTIME_DIR', data_location.subpath), ExecutorFiles.SECRETS_DIR ) shutil.rmtree(secrets_dir, onerror=handle_error) except OSError: logger.exception("Manager exception while removing data runtime directory.") if message[WorkerProtocol.FINISH_SPAWNED]: await database_sync_to_async(self._data_scan)(**message[WorkerProtocol.FINISH_COMMUNICATE_EXTRA]) except Exception: logger.exception( "Unknown error occured while processing finish control command.", extra={'data_id': data_id} ) raise finally: await self.sync_counter.dec('executor') elif cmd == WorkerProtocol.ABORT: await self.sync_counter.dec('executor') else: logger.error(__("Ignoring unknown manager control command '{}'.", cmd))
Handle an event from the Channels layer. Channels layer callback, do not call directly.
entailment
def _ensure_counter(self): """Ensure the sync counter is a valid non-dummy object.""" if not isinstance(self.sync_counter, self._SynchronizationManager): self.sync_counter = self._SynchronizationManager()
Ensure the sync counter is a valid non-dummy object.
entailment
async def execution_barrier(self): """Wait for executors to finish. At least one must finish after this point to avoid a deadlock. """ async def _barrier(): """Enter the sync block and exit the app afterwards.""" async with self.sync_counter: pass await consumer.exit_consumer() self._ensure_counter() await asyncio.wait([ _barrier(), consumer.run_consumer(), ]) self.sync_counter = self._SynchronizationManagerDummy()
Wait for executors to finish. At least one must finish after this point to avoid a deadlock.
entailment
async def communicate(self, data_id=None, run_sync=False, save_settings=True): """Scan database for resolving Data objects and process them. This is submitted as a task to the manager's channel workers. :param data_id: Optional id of Data object which (+ its children) should be processes. If it is not given, all resolving objects are processed. :param run_sync: If ``True``, wait until all processes spawned from this point on have finished processing. If no processes are spawned, this results in a deadlock, since counts are handled on process finish. :param save_settings: If ``True``, save the current Django settings context to the global state. This should never be ``True`` for "automatic" calls, such as from Django signals, which can be invoked from inappropriate contexts (such as in the listener). For user code, it should be left at the default value. The saved settings are in effect until the next such call. """ executor = getattr(settings, 'FLOW_EXECUTOR', {}).get('NAME', 'resolwe.flow.executors.local') logger.debug(__( "Manager sending communicate command on '{}' triggered by Data with id {}.", state.MANAGER_CONTROL_CHANNEL, data_id, )) saved_settings = self.state.settings_override if save_settings: saved_settings = self._marshal_settings() self.state.settings_override = saved_settings if run_sync: self._ensure_counter() await self.sync_counter.inc('communicate') try: await consumer.send_event({ WorkerProtocol.COMMAND: WorkerProtocol.COMMUNICATE, WorkerProtocol.COMMUNICATE_SETTINGS: saved_settings, WorkerProtocol.COMMUNICATE_EXTRA: { 'data_id': data_id, 'executor': executor, }, }) except ChannelFull: logger.exception("ChannelFull error occurred while sending communicate message.") await self.sync_counter.dec('communicate') if run_sync and not self.sync_counter.active: logger.debug(__( "Manager on channel '{}' entering synchronization block.", state.MANAGER_CONTROL_CHANNEL )) await self.execution_barrier() logger.debug(__( "Manager on channel '{}' exiting synchronization block.", state.MANAGER_CONTROL_CHANNEL ))
Scan database for resolving Data objects and process them. This is submitted as a task to the manager's channel workers. :param data_id: Optional id of Data object which (+ its children) should be processes. If it is not given, all resolving objects are processed. :param run_sync: If ``True``, wait until all processes spawned from this point on have finished processing. If no processes are spawned, this results in a deadlock, since counts are handled on process finish. :param save_settings: If ``True``, save the current Django settings context to the global state. This should never be ``True`` for "automatic" calls, such as from Django signals, which can be invoked from inappropriate contexts (such as in the listener). For user code, it should be left at the default value. The saved settings are in effect until the next such call.
entailment
def _data_execute(self, data, program, executor): """Execute the Data object. The activities carried out here include target directory preparation, executor copying, setting serialization and actual execution of the object. :param data: The :class:`~resolwe.flow.models.Data` object to execute. :param program: The process text the manager got out of execution engine evaluation. :param executor: The executor to use for this object. """ if not program: return logger.debug(__("Manager preparing Data with id {} for processing.", data.id)) # Prepare the executor's environment. try: executor_env_vars = self.get_executor().get_environment_variables() program = self._include_environment_variables(program, executor_env_vars) data_dir = self._prepare_data_dir(data) executor_module, runtime_dir = self._prepare_executor(data, executor) # Execute execution engine specific runtime preparation. execution_engine = data.process.run.get('language', None) volume_maps = self.get_execution_engine(execution_engine).prepare_runtime(runtime_dir, data) self._prepare_context(data.id, data_dir, runtime_dir, RUNTIME_VOLUME_MAPS=volume_maps) self._prepare_script(runtime_dir, program) argv = [ '/bin/bash', '-c', self.settings_actual.get('FLOW_EXECUTOR', {}).get('PYTHON', '/usr/bin/env python') + ' -m executors ' + executor_module ] except PermissionDenied as error: data.status = Data.STATUS_ERROR data.process_error.append("Permission denied for process: {}".format(error)) data.save() return except OSError as err: logger.error(__( "OSError occurred while preparing data {} (will skip): {}", data.id, err )) return # Hand off to the run() method for execution. logger.info(__("Running {}", runtime_dir)) self.run(data, runtime_dir, argv)
Execute the Data object. The activities carried out here include target directory preparation, executor copying, setting serialization and actual execution of the object. :param data: The :class:`~resolwe.flow.models.Data` object to execute. :param program: The process text the manager got out of execution engine evaluation. :param executor: The executor to use for this object.
entailment
def _data_scan(self, data_id=None, executor='resolwe.flow.executors.local', **kwargs): """Scan for new Data objects and execute them. :param data_id: Optional id of Data object which (+ its children) should be scanned. If it is not given, all resolving objects are processed. :param executor: The fully qualified name of the executor to use for all :class:`~resolwe.flow.models.Data` objects discovered in this pass. """ def process_data_object(data): """Process a single data object.""" # Lock for update. Note that we want this transaction to be as short as possible in # order to reduce contention and avoid deadlocks. This is why we do not lock all # resolving objects for update, but instead only lock one object at a time. This # allows managers running in parallel to process different objects. data = Data.objects.select_for_update().get(pk=data.pk) if data.status != Data.STATUS_RESOLVING: # The object might have already been processed while waiting for the lock to be # obtained. In this case, skip the object. return dep_status = dependency_status(data) if dep_status == Data.STATUS_ERROR: data.status = Data.STATUS_ERROR data.process_error.append("One or more inputs have status ERROR") data.process_rc = 1 data.save() return elif dep_status != Data.STATUS_DONE: return if data.process.run: try: execution_engine = data.process.run.get('language', None) # Evaluation by the execution engine may spawn additional data objects and # perform other queries on the database. Queries of all possible execution # engines need to be audited for possibilities of deadlocks in case any # additional locks are introduced. Currently, we only take an explicit lock on # the currently processing object. program = self.get_execution_engine(execution_engine).evaluate(data) except (ExecutionError, InvalidEngineError) as error: data.status = Data.STATUS_ERROR data.process_error.append("Error in process script: {}".format(error)) data.save() return # Set allocated resources: resource_limits = data.process.get_resource_limits() data.process_memory = resource_limits['memory'] data.process_cores = resource_limits['cores'] else: # If there is no run section, then we should not try to run anything. But the # program must not be set to None as then the process will be stuck in waiting # state. program = '' if data.status != Data.STATUS_DONE: # The data object may already be marked as done by the execution engine. In this # case we must not revert the status to STATUS_WAITING. data.status = Data.STATUS_WAITING data.save(render_name=True) # Actually run the object only if there was nothing with the transaction. transaction.on_commit( # Make sure the closure gets the right values here, since they're # changed in the loop. lambda d=data, p=program: self._data_execute(d, p, executor) ) logger.debug(__("Manager processing communicate command triggered by Data with id {}.", data_id)) if is_testing(): # NOTE: This is a work-around for Django issue #10827 # (https://code.djangoproject.com/ticket/10827), same as in # TestCaseHelpers._pre_setup(). Because the worker is running # independently, it must clear the cache on its own. ContentType.objects.clear_cache() # Ensure settings overrides apply self.discover_engines(executor=executor) try: queryset = Data.objects.filter(status=Data.STATUS_RESOLVING) if data_id is not None: # Scan only given data object and its children. queryset = queryset.filter(Q(parents=data_id) | Q(id=data_id)).distinct() for data in queryset: try: with transaction.atomic(): process_data_object(data) # All data objects created by the execution engine are commited after this # point and may be processed by other managers running in parallel. At the # same time, the lock for the current data object is released. except Exception as error: # pylint: disable=broad-except logger.exception(__( "Unhandled exception in _data_scan while processing data object {}.", data.pk )) # Unhandled error while processing a data object. We must set its # status to STATUS_ERROR to prevent the object from being retried # on next _data_scan run. We must perform this operation without # using the Django ORM as using the ORM may be the reason the error # occurred in the first place. error_msg = "Internal error: {}".format(error) process_error_field = Data._meta.get_field('process_error') # pylint: disable=protected-access max_length = process_error_field.base_field.max_length if len(error_msg) > max_length: error_msg = error_msg[:max_length - 3] + '...' try: with connection.cursor() as cursor: cursor.execute( """ UPDATE {table} SET status = %(status)s, process_error = process_error || (%(error)s)::varchar[] WHERE id = %(id)s """.format( table=Data._meta.db_table # pylint: disable=protected-access ), { 'status': Data.STATUS_ERROR, 'error': [error_msg], 'id': data.pk } ) except Exception as error: # pylint: disable=broad-except # If object's state cannot be changed due to some database-related # issue, at least skip the object for this run. logger.exception(__( "Unhandled exception in _data_scan while trying to emit error for {}.", data.pk )) except IntegrityError as exp: logger.error(__("IntegrityError in manager {}", exp)) return
Scan for new Data objects and execute them. :param data_id: Optional id of Data object which (+ its children) should be scanned. If it is not given, all resolving objects are processed. :param executor: The fully qualified name of the executor to use for all :class:`~resolwe.flow.models.Data` objects discovered in this pass.
entailment