sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def get_expression_engine(self, name): """Return an expression engine instance.""" try: return self.expression_engines[name] except KeyError: raise InvalidEngineError("Unsupported expression engine: {}".format(name))
Return an expression engine instance.
entailment
def get_execution_engine(self, name): """Return an execution engine instance.""" try: return self.execution_engines[name] except KeyError: raise InvalidEngineError("Unsupported execution engine: {}".format(name))
Return an execution engine instance.
entailment
def load_executor(self, executor_name): """Load process executor.""" executor_name = executor_name + '.prepare' module = import_module(executor_name) return module.FlowExecutorPreparer()
Load process executor.
entailment
def extract_subjects(cert_pem): """Extract subjects from a DataONE PEM (Base64) encoded X.509 v3 certificate. Args: cert_pem: str or bytes PEM (Base64) encoded X.509 v3 certificate Returns: 2-tuple: - The primary subject string, extracted from the certificate DN. - A set of equivalent identities, group memberships and inferred symbolic subjects extracted from the SubjectInfo (if present.) - All returned subjects are DataONE compliant serializations. - A copy of the primary subject is always included in the set of equivalent identities. """ primary_str, subject_info_xml = d1_common.cert.x509.extract_subjects(cert_pem) equivalent_set = { primary_str, d1_common.const.SUBJECT_AUTHENTICATED, d1_common.const.SUBJECT_PUBLIC, } if subject_info_xml is not None: equivalent_set |= d1_common.cert.subject_info.extract_subjects( subject_info_xml, primary_str ) return primary_str, equivalent_set
Extract subjects from a DataONE PEM (Base64) encoded X.509 v3 certificate. Args: cert_pem: str or bytes PEM (Base64) encoded X.509 v3 certificate Returns: 2-tuple: - The primary subject string, extracted from the certificate DN. - A set of equivalent identities, group memberships and inferred symbolic subjects extracted from the SubjectInfo (if present.) - All returned subjects are DataONE compliant serializations. - A copy of the primary subject is always included in the set of equivalent identities.
entailment
def get_queryset(self): # pylint: disable=method-hidden """Return queryset.""" if self.request and self.request.query_params.get('hydrate_data', False): return self.queryset.prefetch_related('data__entity_set') return self.queryset
Return queryset.
entailment
def _get_collection_for_user(self, collection_id, user): """Check that collection exists and user has `add` permission.""" collection_query = Collection.objects.filter(pk=collection_id) if not collection_query.exists(): raise exceptions.ValidationError('Collection id does not exist') collection = collection_query.first() if not user.has_perm('add_collection', obj=collection): if user.is_authenticated: raise exceptions.PermissionDenied() else: raise exceptions.NotFound() return collection
Check that collection exists and user has `add` permission.
entailment
def _get_entities(self, user, ids): """Return entities queryset based on provided entity ids.""" queryset = get_objects_for_user(user, 'view_entity', Entity.objects.filter(id__in=ids)) actual_ids = queryset.values_list('id', flat=True) missing_ids = list(set(ids) - set(actual_ids)) if missing_ids: raise exceptions.ParseError( "Entities with the following ids not found: {}" .format(', '.join(map(str, missing_ids))) ) return queryset
Return entities queryset based on provided entity ids.
entailment
def set_content_permissions(self, user, obj, payload): """Apply permissions to data objects in ``Entity``.""" # Data doesn't have "ADD" permission, so it has to be removed payload = remove_permission(payload, 'add') for data in obj.data.all(): if user.has_perm('share_data', data): update_permission(data, payload)
Apply permissions to data objects in ``Entity``.
entailment
def destroy(self, request, *args, **kwargs): """Destroy a model instance. If ``delete_content`` flag is set in query parameters, also all Data objects contained in entity will be deleted. """ obj = self.get_object() user = request.user if strtobool(request.query_params.get('delete_content', 'false')): for data in obj.data.all(): if user.has_perm('edit_data', data): data.delete() # If all data objects in an entity are removed, the entity may # have already been removed, so there is no need to call destroy. if not Entity.objects.filter(pk=obj.pk).exists(): return Response(status=status.HTTP_204_NO_CONTENT) # NOTE: Collection's ``destroy`` method should be skiped, so we # intentionaly call it's parent. return super(CollectionViewSet, self).destroy( # pylint: disable=no-member,bad-super-call request, *args, **kwargs )
Destroy a model instance. If ``delete_content`` flag is set in query parameters, also all Data objects contained in entity will be deleted.
entailment
def add_to_collection(self, request, pk=None): """Add Entity to a collection.""" entity = self.get_object() # TODO use `self.get_ids` (and elsewhere). Backwards # incompatible because raised error's response contains # ``detail`` instead of ``error``). if 'ids' not in request.data: return Response({"error": "`ids` parameter is required"}, status=status.HTTP_400_BAD_REQUEST) for collection_id in request.data['ids']: self._get_collection_for_user(collection_id, request.user) for collection_id in request.data['ids']: entity.collections.add(collection_id) collection = Collection.objects.get(pk=collection_id) for data in entity.data.all(): collection.data.add(data) return Response()
Add Entity to a collection.
entailment
def add_data(self, request, pk=None): """Add data to Entity and it's collection.""" # add data to entity resp = super().add_data(request, pk) # add data to collections in which entity is entity = self.get_object() for collection in entity.collections.all(): collection.data.add(*request.data['ids']) return resp
Add data to Entity and it's collection.
entailment
def move_to_collection(self, request, *args, **kwargs): """Move samples from source to destination collection.""" ids = self.get_ids(request.data) src_collection_id = self.get_id(request.data, 'source_collection') dst_collection_id = self.get_id(request.data, 'destination_collection') src_collection = self._get_collection_for_user(src_collection_id, request.user) dst_collection = self._get_collection_for_user(dst_collection_id, request.user) entity_qs = self._get_entities(request.user, ids) entity_qs.move_to_collection(src_collection, dst_collection) return Response()
Move samples from source to destination collection.
entailment
def update(self, request, *args, **kwargs): """Update an entity. Original queryset produces a temporary database table whose rows cannot be selected for an update. As a workaround, we patch get_queryset function to return only Entity objects without additional data that is not needed for the update. """ orig_get_queryset = self.get_queryset def patched_get_queryset(): """Patched get_queryset method.""" entity_ids = orig_get_queryset().values_list('id', flat=True) return Entity.objects.filter(id__in=entity_ids) self.get_queryset = patched_get_queryset resp = super().update(request, *args, **kwargs) self.get_queryset = orig_get_queryset return resp
Update an entity. Original queryset produces a temporary database table whose rows cannot be selected for an update. As a workaround, we patch get_queryset function to return only Entity objects without additional data that is not needed for the update.
entailment
async def start(self): """Start process execution.""" # arguments passed to the Docker command command_args = { 'command': self.command, 'container_image': self.requirements.get('image', constants.DEFAULT_CONTAINER_IMAGE), } # Get limit defaults. limit_defaults = SETTINGS.get('FLOW_PROCESS_RESOURCE_DEFAULTS', {}) # Set resource limits. limits = [] # Each core is equivalent to 1024 CPU shares. The default for Docker containers # is 1024 shares (we don't need to explicitly set that). limits.append('--cpu-shares={}'.format(int(self.process['resource_limits']['cores']) * 1024)) # Some SWAP is needed to avoid OOM signal. Swappiness is low to prevent # extensive usage of SWAP (this would reduce the performance). memory = self.process['resource_limits']['memory'] + DOCKER_MEMORY_HARD_LIMIT_BUFFER memory_swap = int(memory * DOCKER_MEMORY_SWAP_RATIO) limits.append('--memory={}m'.format(memory)) limits.append('--memory-swap={}m'.format(memory_swap)) limits.append('--memory-reservation={}m'.format(self.process['resource_limits']['memory'])) limits.append('--memory-swappiness={}'.format(DOCKER_MEMORY_SWAPPINESS)) # Set ulimits for interactive processes to prevent them from running too long. if self.process['scheduling_class'] == PROCESS_META['SCHEDULING_CLASS_INTERACTIVE']: # TODO: This is not very good as each child gets the same limit. limits.append('--ulimit cpu={}'.format(limit_defaults.get('cpu_time_interactive', 30))) command_args['limits'] = ' '.join(limits) # set container name self.container_name_prefix = SETTINGS.get('FLOW_EXECUTOR', {}).get('CONTAINER_NAME_PREFIX', 'resolwe') command_args['container_name'] = '--name={}'.format(self._generate_container_name()) if 'network' in self.resources: # Configure Docker network mode for the container (if specified). # By default, current Docker versions use the 'bridge' mode which # creates a network stack on the default Docker bridge. network = SETTINGS.get('FLOW_EXECUTOR', {}).get('NETWORK', '') command_args['network'] = '--net={}'.format(network) if network else '' else: # No network if not specified. command_args['network'] = '--net=none' # Security options. security = [] # Generate and set seccomp policy to limit syscalls. policy_file = tempfile.NamedTemporaryFile(mode='w') json.dump(SECCOMP_POLICY, policy_file) policy_file.file.flush() if not SETTINGS.get('FLOW_DOCKER_DISABLE_SECCOMP', False): security.append('--security-opt seccomp={}'.format(policy_file.name)) self.temporary_files.append(policy_file) # Drop all capabilities and only add ones that are needed. security.append('--cap-drop=all') command_args['security'] = ' '.join(security) # Setup Docker volumes. def new_volume(kind, base_dir_name, volume, path=None, read_only=True): """Generate a new volume entry. :param kind: Kind of volume, which is used for getting extra options from settings (the ``FLOW_DOCKER_VOLUME_EXTRA_OPTIONS`` setting) :param base_dir_name: Name of base directory setting for volume source path :param volume: Destination volume mount point :param path: Optional additional path atoms appended to source path :param read_only: True to make the volume read-only """ if path is None: path = [] path = [str(atom) for atom in path] options = set(SETTINGS.get('FLOW_DOCKER_VOLUME_EXTRA_OPTIONS', {}).get(kind, '').split(',')) options.discard('') # Do not allow modification of read-only option. options.discard('ro') options.discard('rw') if read_only: options.add('ro') else: options.add('rw') return { 'src': os.path.join(SETTINGS['FLOW_EXECUTOR'].get(base_dir_name, ''), *path), 'dest': volume, 'options': ','.join(options), } volumes = [ new_volume( 'data', 'DATA_DIR', constants.DATA_VOLUME, [DATA_LOCATION['subpath']], read_only=False ), new_volume('data_all', 'DATA_DIR', constants.DATA_ALL_VOLUME), new_volume('upload', 'UPLOAD_DIR', constants.UPLOAD_VOLUME, read_only=False), new_volume( 'secrets', 'RUNTIME_DIR', constants.SECRETS_VOLUME, [DATA_LOCATION['subpath'], ExecutorFiles.SECRETS_DIR] ), ] # Generate dummy passwd and create mappings for it. This is required because some tools # inside the container may try to lookup the given UID/GID and will crash if they don't # exist. So we create minimal user/group files. passwd_file = tempfile.NamedTemporaryFile(mode='w') passwd_file.write('root:x:0:0:root:/root:/bin/bash\n') passwd_file.write('user:x:{}:{}:user:/:/bin/bash\n'.format(os.getuid(), os.getgid())) passwd_file.file.flush() self.temporary_files.append(passwd_file) group_file = tempfile.NamedTemporaryFile(mode='w') group_file.write('root:x:0:\n') group_file.write('user:x:{}:user\n'.format(os.getgid())) group_file.file.flush() self.temporary_files.append(group_file) volumes += [ new_volume('users', None, '/etc/passwd', [passwd_file.name]), new_volume('users', None, '/etc/group', [group_file.name]), ] # Create volumes for tools. # NOTE: To prevent processes tampering with tools, all tools are mounted read-only self.tools_volumes = [] for index, tool in enumerate(self.get_tools_paths()): self.tools_volumes.append(new_volume( 'tools', None, os.path.join('/usr/local/bin/resolwe', str(index)), [tool] )) volumes += self.tools_volumes # Create volumes for runtime (all read-only). runtime_volume_maps = SETTINGS.get('RUNTIME_VOLUME_MAPS', None) if runtime_volume_maps: for src, dst in runtime_volume_maps.items(): volumes.append(new_volume( 'runtime', 'RUNTIME_DIR', dst, [DATA_LOCATION['subpath'], src], )) # Add any extra volumes verbatim. volumes += SETTINGS.get('FLOW_DOCKER_EXTRA_VOLUMES', []) # Make sure that tmp dir exists. os.makedirs(constants.TMPDIR, mode=0o755, exist_ok=True) # Create Docker --volume parameters from volumes. command_args['volumes'] = ' '.join(['--volume="{src}":"{dest}":{options}'.format(**volume) for volume in volumes]) # Set working directory to the data volume. command_args['workdir'] = '--workdir={}'.format(constants.DATA_VOLUME) # Change user inside the container. command_args['user'] = '--user={}:{}'.format(os.getuid(), os.getgid()) # A non-login Bash shell should be used here (a subshell will be spawned later). command_args['shell'] = '/bin/bash' # Check if image exists locally. If not, command will exit with non-zero returncode check_command = '{command} image inspect {container_image}'.format(**command_args) logger.debug("Checking existence of docker image: {}".format(command_args['container_image'])) check_proc = await subprocess.create_subprocess_exec( # pylint: disable=no-member *shlex.split(check_command), stdout=subprocess.PIPE, stderr=subprocess.PIPE ) await check_proc.communicate() if check_proc.returncode != 0: pull_command = '{command} pull {container_image}'.format(**command_args) logger.info("Pulling docker image: {}".format(command_args['container_image'])) pull_proc = await subprocess.create_subprocess_exec( # pylint: disable=no-member *shlex.split(pull_command), stdout=subprocess.PIPE, stderr=subprocess.PIPE ) _, stderr = await pull_proc.communicate() if pull_proc.returncode != 0: error_msg = "Docker failed to pull {} image.".format(command_args['container_image']) if stderr: error_msg = '\n'.join([error_msg, stderr.decode('utf-8')]) raise RuntimeError(error_msg) docker_command = ( '{command} run --rm --interactive {container_name} {network} {volumes} {limits} ' '{security} {workdir} {user} {container_image} {shell}'.format(**command_args) ) logger.info("Starting docker container with command: {}".format(docker_command)) start_time = time.time() # Workaround for pylint issue #1469 # (https://github.com/PyCQA/pylint/issues/1469). self.proc = await subprocess.create_subprocess_exec( # pylint: disable=no-member *shlex.split(docker_command), limit=4 * (2 ** 20), # 4MB buffer size for line buffering stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) stdout = [] async def wait_for_container(): """Wait for Docker container to start to avoid blocking the code that uses it.""" self.proc.stdin.write(('echo PING' + os.linesep).encode('utf-8')) await self.proc.stdin.drain() while True: line = await self.proc.stdout.readline() stdout.append(line) if line.rstrip() == b'PING': break if self.proc.stdout.at_eof(): raise RuntimeError() try: await asyncio.wait_for(wait_for_container(), timeout=DOCKER_START_TIMEOUT) except (asyncio.TimeoutError, RuntimeError): error_msg = "Docker container has not started for {} seconds.".format(DOCKER_START_TIMEOUT) stdout = ''.join([line.decode('utf-8') for line in stdout if line]) if stdout: error_msg = '\n'.join([error_msg, stdout]) raise RuntimeError(error_msg) end_time = time.time() logger.info("It took {:.2f}s for Docker container to start".format(end_time - start_time)) self.stdout = self.proc.stdout
Start process execution.
entailment
async def run_script(self, script): """Execute the script and save results.""" # Create a Bash command to add all the tools to PATH. tools_paths = ':'.join([map_["dest"] for map_ in self.tools_volumes]) add_tools_path = 'export PATH=$PATH:{}'.format(tools_paths) # Spawn another child bash, to avoid running anything as PID 1, which has special # signal handling (e.g., cannot be SIGKILL-ed from inside). # A login Bash shell is needed to source /etc/profile. bash_line = '/bin/bash --login; exit $?' + os.linesep script = os.linesep.join(['set -x', 'set +B', add_tools_path, script]) + os.linesep self.proc.stdin.write(bash_line.encode('utf-8')) await self.proc.stdin.drain() self.proc.stdin.write(script.encode('utf-8')) await self.proc.stdin.drain() self.proc.stdin.close()
Execute the script and save results.
entailment
async def end(self): """End process execution.""" try: await self.proc.wait() finally: # Cleanup temporary files. for temporary_file in self.temporary_files: temporary_file.close() self.temporary_files = [] return self.proc.returncode
End process execution.
entailment
async def terminate(self): """Terminate a running script.""" # Workaround for pylint issue #1469 # (https://github.com/PyCQA/pylint/issues/1469). cmd = await subprocess.create_subprocess_exec( # pylint: disable=no-member *shlex.split('{} rm -f {}'.format(self.command, self._generate_container_name())) ) await cmd.wait() await self.proc.wait() await super().terminate()
Terminate a running script.
entailment
def iterjson(text): """Decode JSON stream.""" decoder = json.JSONDecoder() while text: obj, ndx = decoder.raw_decode(text) if not isinstance(obj, dict): raise ValueError() text = text[ndx:].lstrip('\r\n') yield obj
Decode JSON stream.
entailment
async def _send_manager_command(self, *args, **kwargs): """Send an update to manager and terminate the process if it fails.""" resp = await send_manager_command(*args, **kwargs) if resp is False: await self.terminate()
Send an update to manager and terminate the process if it fails.
entailment
async def update_data_status(self, **kwargs): """Update (PATCH) Data object. :param kwargs: The dictionary of :class:`~resolwe.flow.models.Data` attributes to be changed. """ await self._send_manager_command(ExecutorProtocol.UPDATE, extra_fields={ ExecutorProtocol.UPDATE_CHANGESET: kwargs })
Update (PATCH) Data object. :param kwargs: The dictionary of :class:`~resolwe.flow.models.Data` attributes to be changed.
entailment
async def run(self, data_id, script): """Execute the script and save results.""" logger.debug("Executor for Data with id {} has started.".format(data_id)) try: finish_fields = await self._run(data_id, script) except SystemExit as ex: raise ex except Exception as error: # pylint: disable=broad-except logger.exception("Unhandled exception in executor") # Send error report. await self.update_data_status(process_error=[str(error)], status=DATA_META['STATUS_ERROR']) finish_fields = { ExecutorProtocol.FINISH_PROCESS_RC: 1, } if finish_fields is not None: await self._send_manager_command(ExecutorProtocol.FINISH, extra_fields=finish_fields)
Execute the script and save results.
entailment
def _create_file(self, filename): """Ensure a new file is created and opened for writing.""" file_descriptor = os.open(filename, os.O_WRONLY | os.O_CREAT | os.O_EXCL) return os.fdopen(file_descriptor, 'w')
Ensure a new file is created and opened for writing.
entailment
async def _run(self, data_id, script): """Execute the script and save results.""" self.data_id = data_id # Fetch data instance to get any executor requirements. self.process = PROCESS requirements = self.process['requirements'] self.requirements = requirements.get('executor', {}).get(self.name, {}) # pylint: disable=no-member self.resources = requirements.get('resources', {}) logger.debug("Preparing output files for Data with id {}".format(data_id)) os.chdir(EXECUTOR_SETTINGS['DATA_DIR']) try: log_file = self._create_file('stdout.txt') json_file = self._create_file('jsonout.txt') except FileExistsError: logger.error("Stdout or jsonout out file already exists.") # Looks like executor was already ran for this Data object, # so don't raise the error to prevent setting status to error. await self._send_manager_command(ExecutorProtocol.ABORT, expect_reply=False) return proc_pid = await self.start() await self.update_data_status( status=DATA_META['STATUS_PROCESSING'], process_pid=proc_pid ) # Run process and handle intermediate results logger.info("Running program for Data with id {}".format(data_id)) logger.debug("The program for Data with id {} is: \n{}".format(data_id, script)) await self.run_script(script) spawn_processes = [] output = {} process_error, process_warning, process_info = [], [], [] process_progress, process_rc = 0, 0 # read process output try: stdout = self.get_stdout() while True: line = await stdout.readline() logger.debug("Process's output: {}".format(line.strip())) if not line: break line = line.decode('utf-8') try: if line.strip().startswith('run'): # Save process and spawn if no errors log_file.write(line) log_file.flush() for obj in iterjson(line[3:].strip()): spawn_processes.append(obj) elif line.strip().startswith('export'): file_name = line[6:].strip() export_folder = SETTINGS['FLOW_EXECUTOR']['UPLOAD_DIR'] unique_name = 'export_{}'.format(uuid.uuid4().hex) export_path = os.path.join(export_folder, unique_name) self.exported_files_mapper[self.data_id][file_name] = unique_name shutil.move(file_name, export_path) else: # If JSON, save to MongoDB updates = {} for obj in iterjson(line): for key, val in obj.items(): if key.startswith('proc.'): if key == 'proc.error': process_error.append(val) if not process_rc: process_rc = 1 updates['process_rc'] = process_rc updates['process_error'] = process_error updates['status'] = DATA_META['STATUS_ERROR'] elif key == 'proc.warning': process_warning.append(val) updates['process_warning'] = process_warning elif key == 'proc.info': process_info.append(val) updates['process_info'] = process_info elif key == 'proc.rc': process_rc = int(val) updates['process_rc'] = process_rc if process_rc != 0: updates['status'] = DATA_META['STATUS_ERROR'] elif key == 'proc.progress': process_progress = int(float(val) * 100) updates['process_progress'] = process_progress else: output[key] = val updates['output'] = output if updates: await self.update_data_status(**updates) # Process meta fields are collected in listener, so we can clear them. process_error, process_warning, process_info = [], [], [] if process_rc > 0: log_file.close() json_file.close() await self._send_manager_command(ExecutorProtocol.FINISH, extra_fields={ ExecutorProtocol.FINISH_PROCESS_RC: process_rc }) return # Debug output # Not referenced in Data object json_file.write(line) json_file.flush() except ValueError as ex: # Ignore if not JSON log_file.write(line) log_file.flush() except MemoryError as ex: logger.error("Out of memory:\n\n{}".format(ex)) except IOError as ex: # TODO: if ex.errno == 28: no more free space raise ex finally: # Store results log_file.close() json_file.close() return_code = await self.end() if process_rc < return_code: process_rc = return_code # send a notification to the executor listener that we're done finish_fields = { ExecutorProtocol.FINISH_PROCESS_RC: process_rc } if spawn_processes and process_rc == 0: finish_fields[ExecutorProtocol.FINISH_SPAWN_PROCESSES] = spawn_processes finish_fields[ExecutorProtocol.FINISH_EXPORTED_FILES] = self.exported_files_mapper return finish_fields
Execute the script and save results.
entailment
def get_total_size_of_queued_replicas(): """Return the total number of bytes of requested, unprocessed replicas.""" return ( d1_gmn.app.models.ReplicationQueue.objects.filter( local_replica__info__status__status='queued' ).aggregate(Sum('size'))['size__sum'] or 0 )
Return the total number of bytes of requested, unprocessed replicas.
entailment
def add_to_replication_queue(source_node_urn, sysmeta_pyxb): """Add a replication request issued by a CN to a queue that is processed asynchronously. Preconditions: - sysmeta_pyxb.identifier is verified to be available for create. E.g., with d1_gmn.app.views.is_valid_pid_for_create(pid). Postconditions: - The database is set up to track a new replica, with initial status, "queued". - The PID provided in the sysmeta_pyxb is reserved for the replica. """ replica_info_model = d1_gmn.app.models.replica_info( status_str='queued', source_node_urn=source_node_urn ) local_replica_model = d1_gmn.app.models.local_replica( pid=d1_common.xml.get_req_val(sysmeta_pyxb.identifier), replica_info_model=replica_info_model, ) d1_gmn.app.models.replication_queue( local_replica_model=local_replica_model, size=sysmeta_pyxb.size )
Add a replication request issued by a CN to a queue that is processed asynchronously. Preconditions: - sysmeta_pyxb.identifier is verified to be available for create. E.g., with d1_gmn.app.views.is_valid_pid_for_create(pid). Postconditions: - The database is set up to track a new replica, with initial status, "queued". - The PID provided in the sysmeta_pyxb is reserved for the replica.
entailment
def add_arguments(parser, doc_str, add_base_url=True): """Add standard arguments for DataONE utilities to a command line parser.""" parser.description = doc_str parser.formatter_class = argparse.RawDescriptionHelpFormatter parser.add_argument("--debug", action="store_true", help="Debug level logging") parser.add_argument( "--cert-pub", dest="cert_pem_path", action="store", default=django.conf.settings.CLIENT_CERT_PATH, help="Path to PEM formatted public key of certificate", ) parser.add_argument( "--cert-key", dest="cert_key_path", action="store", default=django.conf.settings.CLIENT_CERT_PRIVATE_KEY_PATH, help="Path to PEM formatted private key of certificate", ) parser.add_argument( "--public", action="store_true", help="Do not use certificate even if available" ) parser.add_argument( "--disable-server-cert-validation", action="store_true", help="Do not validate the TLS/SSL server side certificate of the source node (insecure)", ) parser.add_argument( "--timeout", type=float, action="store", default=DEFAULT_TIMEOUT_SEC, help="Timeout for DataONE API calls to the source MN", ) parser.add_argument( "--retries", type=int, action="store", default=DEFAULT_RETRY_COUNT, help="Retry DataONE API calls that raise HTTP level exceptions", ) parser.add_argument( "--page-size", type=int, action="store", default=DEFAULT_PAGE_SIZE, help="Number of objects to retrieve in each list method API call to source MN", ) parser.add_argument( "--major", type=int, action="store", help="Skip automatic detection of API major version and use the provided version", ) parser.add_argument( "--max-concurrent", type=int, action="store", default=DEFAULT_MAX_CONCURRENT_TASK_COUNT, help="Max number of concurrent DataONE API", ) if not add_base_url: parser.add_argument( "--baseurl", action="store", default=django.conf.settings.DATAONE_ROOT, help="Remote MN or CN BaseURL", ) else: parser.add_argument("baseurl", help="Remote MN or CN BaseURL")
Add standard arguments for DataONE utilities to a command line parser.
entailment
def path_generator( path_list, include_glob_list=None, exclude_glob_list=None, recursive=True, ignore_invalid=False, default_excludes=True, return_dir_paths=False, ): """# language=rst. Args: path_list: list of str List of file- and dir paths. File paths are used directly and dirs are searched for files. ``path_list`` does not accept glob patterns, as it's more convenient to let the shell expand glob patterns to directly specified files and dirs. E.g., to use a glob to select all .py files in a subdir, the command may be called with sub/dir/*.py, which the shell expands to a list of files, which are then passed to this function. The paths should be Unicode or utf-8 strings. Tilde ("~") to home expansion is performed on the paths. The shell can also expand glob patterns to dir paths or a mix of file and dir paths. include_glob_list: list of str exclude_glob_list: list of str Patterns ending with "/" are matched only against dir names. All other patterns are matched only against file names. If the include list contains any file patterns, files must match one or more of the patterns in order to be returned. If the include list contains any dir patterns, dirs must match one or more of the patterns in order for the recursive search to descend into them. The exclude list works in the same way except that matching files and dirs are excluded instead of included. If both include and exclude lists are specified, files and dirs must both match the include and not match the exclude patterns in order to be returned or descended into. recursive: bool - **True** (default): Search subdirectories - **False**: Do not search subdirectories ignore_invalid: bool - **True**: Invalid paths in path_list are ignored. - **False** (default): EnvironmentError is raised if any of the paths in ``path_list`` do not reference an existing file or dir. default_excludes: bool - **True**: A list of glob patterns for files and dirs that should typically be ignored is added to any exclude patterns passed to the function. These include dirs such as .git and backup files, such as files appended with "~". - **False**: No files or dirs are excluded by default. return_dir_paths: bool - **False**: Only file paths are returned. - **True**: Directory paths are also returned. Returns: File path iterator Notes: During iteration, the iterator can be prevented from descending into a directory by sending a "skip" flag when the iterator yields the directory path. This allows the client to determine if directories should be iterated by, for instance, which files are present in the directory. This can be used in conjunction with the include and exclude glob lists. Note that, in order to receive directory paths that can be skipped, ``return_dir_paths`` must be set to True. The regular ``for...in`` syntax does not support sending the "skip" flag back to the iterator. Instead, use a pattern like: .. highlight: python :: itr = file_iterator.file_iter(..., return_dir_paths=True) try: path = itr.next() while True: skip_dir = determine_if_dir_should_be_skipped(path) file_path = itr.send(skip_dir) except KeyboardInterrupt: raise StopIteration except StopIteration: pass Glob patterns are matched only against file and directory names, not the full paths. Paths passed directly in ``path_list`` are not filtered. The same file can be returned multiple times if ``path_list`` contains duplicated file paths or dir paths, or dir paths that implicitly include the same subdirs. ``include_glob_list`` and ``exclude_glob_list`` are handy for filtering the files found in dir searches. Remember to escape the include and exclude glob patterns on the command line so that they're not expanded by the shell. """ include_glob_list = include_glob_list or [] exclude_glob_list = exclude_glob_list or [] if default_excludes: exclude_glob_list += DEFAULT_EXCLUDE_GLOB_LIST logging.debug('file_iter():') logging.debug(' paths: {}'.format(', '.join(path_list))) logging.debug(' include: {}'.format(', '.join(include_glob_list))) logging.debug(' exclude: {}'.format(', '.join(exclude_glob_list))) logging.debug(' recursive: {}'.format(recursive)) logging.debug(' ignore_invalid: {}'.format(ignore_invalid)) logging.debug(' default_excludes: {}'.format(default_excludes)) logging.debug(' return_dir_paths: {}'.format(return_dir_paths)) logging.debug('') include_file_glob_list = [ p for p in include_glob_list if not p.endswith(os.path.sep) ] exclude_file_glob_list = [ p for p in exclude_glob_list if not p.endswith(os.path.sep) ] include_dir_glob_list = [p for p in include_glob_list if p.endswith(os.path.sep)] exclude_dir_glob_list = [p for p in exclude_glob_list if p.endswith(os.path.sep)] for path in path_list: path = os.path.expanduser(path) # Return file if os.path.isfile(path): file_name = os.path.split(path)[1] if not _is_filtered( file_name, include_file_glob_list, exclude_file_glob_list ): yield path # Search directory elif os.path.isdir(path): yield from _filtered_walk( path, include_dir_glob_list, exclude_dir_glob_list, include_file_glob_list, exclude_file_glob_list, return_dir_paths, recursive, ) # else: # # Single directory search # file_path_iter = os.listdir(path) # # skip_dir = None # # while True: # file_or_dir_path = file_path_iter.send(skip_dir) # file_or_dir_name = os.path.split(file_or_dir_path)[1] # skip_dir = False # skip_dir = yield file_or_dir_path else: if not ignore_invalid: raise EnvironmentError(0, 'Not a valid file or dir path', path)
# language=rst. Args: path_list: list of str List of file- and dir paths. File paths are used directly and dirs are searched for files. ``path_list`` does not accept glob patterns, as it's more convenient to let the shell expand glob patterns to directly specified files and dirs. E.g., to use a glob to select all .py files in a subdir, the command may be called with sub/dir/*.py, which the shell expands to a list of files, which are then passed to this function. The paths should be Unicode or utf-8 strings. Tilde ("~") to home expansion is performed on the paths. The shell can also expand glob patterns to dir paths or a mix of file and dir paths. include_glob_list: list of str exclude_glob_list: list of str Patterns ending with "/" are matched only against dir names. All other patterns are matched only against file names. If the include list contains any file patterns, files must match one or more of the patterns in order to be returned. If the include list contains any dir patterns, dirs must match one or more of the patterns in order for the recursive search to descend into them. The exclude list works in the same way except that matching files and dirs are excluded instead of included. If both include and exclude lists are specified, files and dirs must both match the include and not match the exclude patterns in order to be returned or descended into. recursive: bool - **True** (default): Search subdirectories - **False**: Do not search subdirectories ignore_invalid: bool - **True**: Invalid paths in path_list are ignored. - **False** (default): EnvironmentError is raised if any of the paths in ``path_list`` do not reference an existing file or dir. default_excludes: bool - **True**: A list of glob patterns for files and dirs that should typically be ignored is added to any exclude patterns passed to the function. These include dirs such as .git and backup files, such as files appended with "~". - **False**: No files or dirs are excluded by default. return_dir_paths: bool - **False**: Only file paths are returned. - **True**: Directory paths are also returned. Returns: File path iterator Notes: During iteration, the iterator can be prevented from descending into a directory by sending a "skip" flag when the iterator yields the directory path. This allows the client to determine if directories should be iterated by, for instance, which files are present in the directory. This can be used in conjunction with the include and exclude glob lists. Note that, in order to receive directory paths that can be skipped, ``return_dir_paths`` must be set to True. The regular ``for...in`` syntax does not support sending the "skip" flag back to the iterator. Instead, use a pattern like: .. highlight: python :: itr = file_iterator.file_iter(..., return_dir_paths=True) try: path = itr.next() while True: skip_dir = determine_if_dir_should_be_skipped(path) file_path = itr.send(skip_dir) except KeyboardInterrupt: raise StopIteration except StopIteration: pass Glob patterns are matched only against file and directory names, not the full paths. Paths passed directly in ``path_list`` are not filtered. The same file can be returned multiple times if ``path_list`` contains duplicated file paths or dir paths, or dir paths that implicitly include the same subdirs. ``include_glob_list`` and ``exclude_glob_list`` are handy for filtering the files found in dir searches. Remember to escape the include and exclude glob patterns on the command line so that they're not expanded by the shell.
entailment
def get_resource_map_members(pid): """``pid`` is the PID of a Resource Map or the PID of a member of a Resource Map.""" if d1_gmn.app.did.is_resource_map_db(pid): return get_resource_map_members_by_map(pid) elif d1_gmn.app.did.is_resource_map_member(pid): return get_resource_map_members_by_member(pid) else: raise d1_common.types.exceptions.InvalidRequest( 0, 'Not a Resource Map or Resource Map member. pid="{}"'.format(pid) )
``pid`` is the PID of a Resource Map or the PID of a member of a Resource Map.
entailment
def set_with_conversion(self, variable, value_string): """Convert user supplied string to Python type. Lets user use values such as True, False and integers. All variables can be set to None, regardless of type. Handle the case where a string is typed by the user and is not quoted, as a string literal. """ self._assert_valid_variable(variable) try: v = ast.literal_eval(value_string) except (ValueError, SyntaxError): v = value_string if v is None or v == "none": self._variables[variable] = None else: try: type_converter = variable_type_map[variable] value_string = self._validate_variable_type( value_string, type_converter ) value = type_converter(value_string) self._variables[variable] = value except ValueError: raise d1_cli.impl.exceptions.InvalidArguments( "Invalid value for {}: {}".format(variable, value_string) )
Convert user supplied string to Python type. Lets user use values such as True, False and integers. All variables can be set to None, regardless of type. Handle the case where a string is typed by the user and is not quoted, as a string literal.
entailment
def log_setup(is_debug=False, is_multiprocess=False): """Set up a standardized log format for the DataONE Python stack. All Python components should use this function. If ``is_multiprocess`` is True, include process ID in the log so that logs can be separated for each process. Output only to stdout and stderr. """ format_str = ( '%(asctime)s %(name)s %(module)s:%(lineno)d %(process)4d %(levelname)-8s %(message)s' if is_multiprocess else '%(asctime)s %(name)s %(module)s:%(lineno)d %(levelname)-8s %(message)s' ) formatter = logging.Formatter(format_str, '%Y-%m-%d %H:%M:%S') console_logger = logging.StreamHandler(sys.stdout) console_logger.setFormatter(formatter) logging.getLogger('').addHandler(console_logger) if is_debug: logging.getLogger('').setLevel(logging.DEBUG) else: logging.getLogger('').setLevel(logging.INFO)
Set up a standardized log format for the DataONE Python stack. All Python components should use this function. If ``is_multiprocess`` is True, include process ID in the log so that logs can be separated for each process. Output only to stdout and stderr.
entailment
def get_content_type(content_type): """Extract the MIME type value from a content type string. Removes any subtype and parameter values that may be present in the string. Args: content_type: str String with content type and optional subtype and parameter fields. Returns: str: String with only content type Example: :: Input: multipart/form-data; boundary=aBoundaryString Returns: multipart/form-data """ m = email.message.Message() m['Content-Type'] = content_type return m.get_content_type()
Extract the MIME type value from a content type string. Removes any subtype and parameter values that may be present in the string. Args: content_type: str String with content type and optional subtype and parameter fields. Returns: str: String with only content type Example: :: Input: multipart/form-data; boundary=aBoundaryString Returns: multipart/form-data
entailment
def nested_update(d, u): """Merge two nested dicts. Nested dicts are sometimes used for representing various recursive structures. When updating such a structure, it may be convenient to present the updated data as a corresponding recursive structure. This function will then apply the update. Args: d: dict dict that will be updated in-place. May or may not contain nested dicts. u: dict dict with contents that will be merged into ``d``. May or may not contain nested dicts. """ for k, v in list(u.items()): if isinstance(v, collections.Mapping): r = nested_update(d.get(k, {}), v) d[k] = r else: d[k] = u[k] return d
Merge two nested dicts. Nested dicts are sometimes used for representing various recursive structures. When updating such a structure, it may be convenient to present the updated data as a corresponding recursive structure. This function will then apply the update. Args: d: dict dict that will be updated in-place. May or may not contain nested dicts. u: dict dict with contents that will be merged into ``d``. May or may not contain nested dicts.
entailment
def print_logging(): """Context manager to temporarily suppress additional information such as timestamps when writing to loggers. This makes logging look like ``print()``. The main use case is in scripts that mix logging and ``print()``, as Python uses separate streams for those, and output can and does end up getting shuffled if ``print()`` and logging is used interchangeably. When entering the context, the logging levels on the current handlers are saved then modified to WARNING levels. A new DEBUG level handler with a formatter that does not write timestamps, etc, is then created. When leaving the context, the DEBUG handler is removed and existing loggers are restored to their previous levels. By modifying the log levels to WARNING instead of completely disabling the loggers, it is ensured that potentially serious issues can still be logged while the context manager is in effect. """ root_logger = logging.getLogger() old_level_list = [h.level for h in root_logger.handlers] for h in root_logger.handlers: h.setLevel(logging.WARN) log_format = logging.Formatter('%(message)s') stream_handler = logging.StreamHandler(sys.stdout) stream_handler.setFormatter(log_format) stream_handler.setLevel(logging.DEBUG) root_logger.addHandler(stream_handler) yield root_logger.removeHandler(stream_handler) for h, level in zip(root_logger.handlers, old_level_list): h.setLevel(level)
Context manager to temporarily suppress additional information such as timestamps when writing to loggers. This makes logging look like ``print()``. The main use case is in scripts that mix logging and ``print()``, as Python uses separate streams for those, and output can and does end up getting shuffled if ``print()`` and logging is used interchangeably. When entering the context, the logging levels on the current handlers are saved then modified to WARNING levels. A new DEBUG level handler with a formatter that does not write timestamps, etc, is then created. When leaving the context, the DEBUG handler is removed and existing loggers are restored to their previous levels. By modifying the log levels to WARNING instead of completely disabling the loggers, it is ensured that potentially serious issues can still be logged while the context manager is in effect.
entailment
def save_json(py_obj, json_path): """Serialize a native object to JSON and save it normalized, pretty printed to a file. The JSON string is normalized by sorting any dictionary keys. Args: py_obj: object Any object that can be represented in JSON. Some types, such as datetimes are automatically converted to strings. json_path: str File path to which to write the JSON file. E.g.: The path must exist. The filename will normally end with ".json". See Also: ToJsonCompatibleTypes() """ with open(json_path, 'w', encoding='utf-8') as f: f.write(serialize_to_normalized_pretty_json(py_obj))
Serialize a native object to JSON and save it normalized, pretty printed to a file. The JSON string is normalized by sorting any dictionary keys. Args: py_obj: object Any object that can be represented in JSON. Some types, such as datetimes are automatically converted to strings. json_path: str File path to which to write the JSON file. E.g.: The path must exist. The filename will normally end with ".json". See Also: ToJsonCompatibleTypes()
entailment
def serialize_to_normalized_pretty_json(py_obj): """Serialize a native object to normalized, pretty printed JSON. The JSON string is normalized by sorting any dictionary keys. Args: py_obj: object Any object that can be represented in JSON. Some types, such as datetimes are automatically converted to strings. Returns: str: normalized, pretty printed JSON string. """ return json.dumps(py_obj, sort_keys=True, indent=2, cls=ToJsonCompatibleTypes)
Serialize a native object to normalized, pretty printed JSON. The JSON string is normalized by sorting any dictionary keys. Args: py_obj: object Any object that can be represented in JSON. Some types, such as datetimes are automatically converted to strings. Returns: str: normalized, pretty printed JSON string.
entailment
def serialize_to_normalized_compact_json(py_obj): """Serialize a native object to normalized, compact JSON. The JSON string is normalized by sorting any dictionary keys. It will be on a single line without whitespace between elements. Args: py_obj: object Any object that can be represented in JSON. Some types, such as datetimes are automatically converted to strings. Returns: str: normalized, compact JSON string. """ return json.dumps( py_obj, sort_keys=True, separators=(',', ':'), cls=ToJsonCompatibleTypes )
Serialize a native object to normalized, compact JSON. The JSON string is normalized by sorting any dictionary keys. It will be on a single line without whitespace between elements. Args: py_obj: object Any object that can be represented in JSON. Some types, such as datetimes are automatically converted to strings. Returns: str: normalized, compact JSON string.
entailment
def format_sec_to_dhm(sec): """Format seconds to days, hours, minutes. Args: sec: float or int Number of seconds in a period of time Returns: Period of time represented as a string on the form ``0d:00h:00m``. """ rem_int, s_int = divmod(int(sec), 60) rem_int, m_int, = divmod(rem_int, 60) d_int, h_int, = divmod(rem_int, 24) return '{}d{:02d}h{:02d}m'.format(d_int, h_int, m_int)
Format seconds to days, hours, minutes. Args: sec: float or int Number of seconds in a period of time Returns: Period of time represented as a string on the form ``0d:00h:00m``.
entailment
def count(self, event_str, inc_int=1): """Count an event. Args: event_str: The name of an event to count. Used as a key in the event dict. The same name will also be used in the summary. inc_int: int Optional argument to increase the count for the event by more than 1. """ self._event_dict.setdefault(event_str, 0) self._event_dict[event_str] += inc_int
Count an event. Args: event_str: The name of an event to count. Used as a key in the event dict. The same name will also be used in the summary. inc_int: int Optional argument to increase the count for the event by more than 1.
entailment
def log_and_count(self, event_str, msg_str=None, inc_int=None): """Count an event and write a message to a logger. Args: event_str: str The name of an event to count. Used as a key in the event dict. The same name will be used in the summary. This also becomes a part of the message logged by this function. msg_str: str Optional message with details about the events. The message is only written to the log. While the ``event_str`` functions as a key and must remain the same for the same type of event, ``log_str`` may change between calls. inc_int: int Optional argument to increase the count for the event by more than 1. """ logger.info( ' - '.join(map(str, [v for v in (event_str, msg_str, inc_int) if v])) ) self.count(event_str, inc_int or 1)
Count an event and write a message to a logger. Args: event_str: str The name of an event to count. Used as a key in the event dict. The same name will be used in the summary. This also becomes a part of the message logged by this function. msg_str: str Optional message with details about the events. The message is only written to the log. While the ``event_str`` functions as a key and must remain the same for the same type of event, ``log_str`` may change between calls. inc_int: int Optional argument to increase the count for the event by more than 1.
entailment
def dump_to_log(self): """Write summary to logger with the name and number of times each event has been counted. This function may be called at any point in the process. Counts are not zeroed. """ if self._event_dict: logger.info('Events:') for event_str, count_int in sorted(self._event_dict.items()): logger.info(' {}: {}'.format(event_str, count_int)) else: logger.info('No Events')
Write summary to logger with the name and number of times each event has been counted. This function may be called at any point in the process. Counts are not zeroed.
entailment
def delete_all_from_db(): """Clear the database. Used for testing and debugging. """ # The models.CASCADE property is set on all ForeignKey fields, so tables can # be deleted in any order without breaking constraints. for model in django.apps.apps.get_models(): model.objects.all().delete()
Clear the database. Used for testing and debugging.
entailment
def get_query_param(request, key): """Get query parameter uniformly for GET and POST requests.""" value = request.query_params.get(key) or request.data.get(key) if value is None: raise KeyError() return value
Get query parameter uniformly for GET and POST requests.
entailment
def get_limit(self, request): """Return limit parameter.""" if self.limit_query_param: try: return _positive_int( get_query_param(request, self.limit_query_param), strict=True, cutoff=self.max_limit ) except (KeyError, ValueError): pass return self.default_limit
Return limit parameter.
entailment
def get_offset(self, request): """Return offset parameter.""" try: return _positive_int( get_query_param(request, self.offset_query_param), ) except (KeyError, ValueError): return 0
Return offset parameter.
entailment
def futurize_module(module_path, show_diff, write_update): """2to3 uses AST, not Baron.""" logging.info('Futurizing module... path="{}"'.format(module_path)) ast_tree = back_to_the_futurize(module_path) return d1_dev.util.update_module_file_ast( ast_tree, module_path, show_diff, write_update )
2to3 uses AST, not Baron.
entailment
def _remove_single_line_import_comments(r): """We previously used more groups for the import statements and named each group.""" logging.info('Removing single line import comments') import_r, remaining_r = split_by_last_import(r) new_import_r = redbaron.NodeList() for i, v in enumerate(import_r): if 1 < i < len(import_r) - 2: if not ( import_r[i - 2].type != 'comment' and v.type == 'comment' and import_r[i + 2].type != 'comment' ) or _is_keep_comment(v): new_import_r.append(v) else: new_import_r.append(v) return new_import_r + remaining_r
We previously used more groups for the import statements and named each group.
entailment
def _update_init_all(module_path, r): """Add or update __all__ in __init__.py file.""" module_dir_path = os.path.split(module_path)[0] module_list = [] for item_name in os.listdir(module_dir_path): item_path = os.path.join(module_dir_path, item_name) if os.path.isfile(item_path) and item_name in ('__init__.py', 'setup.py'): continue if os.path.isfile(item_path) and not item_name.endswith('.py'): continue # if os.path.isdir(item_path) and not os.path.isfile( # os.path.join(item_path, '__init__.py') # ): # continue if os.path.isdir(item_path): continue module_list.append(re.sub(r'.py$', '', item_name).encode('utf-8')) module_literal_str = str(sorted(module_list)) assignment_node_list = r('AssignmentNode', recursive=False) for n in assignment_node_list: if n.type == 'assignment' and n.target.value == '__all__': n.value = module_literal_str break else: r.node_list.append( redbaron.RedBaron('__all__ = {}\n'.format(module_literal_str)) ) return r
Add or update __all__ in __init__.py file.
entailment
def _remove_init_all(r): """Remove any __all__ in __init__.py file.""" new_r = redbaron.NodeList() for n in r.node_list: if n.type == 'assignment' and n.target.value == '__all__': pass else: new_r.append(n) return new_r
Remove any __all__ in __init__.py file.
entailment
def get_object_list_json(request): """gmn.listObjects(session[, fromDate][, toDate][, formatId] [, identifier][, replicaStatus][, start=0][, count=1000] [, f=sysmetaField ...]) → ObjectListJson GMN specific API for fast retrieval of object sysmeta elements. """ # TODO: Add to documentation if "f" in request.GET: field_list = request.GET.getlist("f") else: field_list = None result_dict = d1_gmn.app.views.util.query_object_list(request, "object_list_json") result_dict["fields"] = field_list result_dict["objects"] = d1_gmn.app.sysmeta_extract.extract_values_query( result_dict["query"], field_list ) del result_dict["query"] return django.http.HttpResponse( d1_common.util.serialize_to_normalized_pretty_json(result_dict), d1_common.const.CONTENT_TYPE_JSON, )
gmn.listObjects(session[, fromDate][, toDate][, formatId] [, identifier][, replicaStatus][, start=0][, count=1000] [, f=sysmetaField ...]) → ObjectListJson GMN specific API for fast retrieval of object sysmeta elements.
entailment
def configure_logging(emit_list): """Configure logging to send log records to the master.""" if 'sphinx' in sys.modules: module_base = 'resolwe.flow.executors' else: module_base = 'executors' logging_config = dict( version=1, formatters={ 'json_formatter': { '()': JSONFormatter }, }, handlers={ 'redis': { 'class': module_base + '.logger.RedisHandler', 'formatter': 'json_formatter', 'level': logging.INFO, 'emit_list': emit_list }, 'console': { 'class': 'logging.StreamHandler', 'level': logging.WARNING }, }, root={ 'handlers': ['redis', 'console'], 'level': logging.DEBUG, }, loggers={ # Don't use redis logger to prevent circular dependency. module_base + '.manager_comm': { 'level': 'INFO', 'handlers': ['console'], 'propagate': False, }, }, ) dictConfig(logging_config)
Configure logging to send log records to the master.
entailment
def format(self, record): """Dump the record to JSON.""" data = record.__dict__.copy() data['data_id'] = DATA['id'] data['data_location_id'] = DATA_LOCATION['id'] data['hostname'] = socket.gethostname() # Get relative path, so listener can reconstruct the path to the actual code. data['pathname'] = os.path.relpath(data['pathname'], os.path.dirname(__file__)) # Exception and Traceback cannot be serialized. data['exc_info'] = None # Ensure logging message is instantiated to a string. data['msg'] = str(data['msg']) return json.dumps(data)
Dump the record to JSON.
entailment
def emit(self, record): """Send log message to the listener.""" future = asyncio.ensure_future(send_manager_command( ExecutorProtocol.LOG, extra_fields={ ExecutorProtocol.LOG_MESSAGE: self.format(record), }, expect_reply=False )) self.emit_list.append(future)
Send log message to the listener.
entailment
async def is_object_synced_to_cn(self, client, pid): """Check if object with {pid} has successfully synced to the CN. CNRead.describe() is used as it's a light-weight HTTP HEAD request. This assumes that the call is being made over a connection that has been authenticated and has read or better access on the given object if it exists. """ status = await client.describe(pid) if status == 200: self.progress_logger.event("SciObj already synced on CN") return True elif status == 404: self.progress_logger.event("SciObj has not synced to CN") return False self.progress_logger.event( "CNRead.describe() returned unexpected status code. " 'pid="{}" status="{}"'.format(pid, status) ) return True
Check if object with {pid} has successfully synced to the CN. CNRead.describe() is used as it's a light-weight HTTP HEAD request. This assumes that the call is being made over a connection that has been authenticated and has read or better access on the given object if it exists.
entailment
async def send_synchronization_request(self, client, pid): """Issue a notification and request for sync for object with {pid} to the CN.""" # Skip CN call for debugging # status = 200 status = await client.synchronize(pid) if status == 200: self.progress_logger.event("Issued sync request, CN accepted") else: self.progress_logger.event( "CNRead.synchronize() returned unexpected status code. " 'pid="{}" status="{}"'.format(pid, status) )
Issue a notification and request for sync for object with {pid} to the CN.
entailment
def create_secret(self, value, contributor, metadata=None, expires=None): """Create a new secret, returning its handle. :param value: Secret value to store :param contributor: User owning the secret :param metadata: Optional metadata dictionary (must be JSON serializable) :param expires: Optional date/time of expiry (defaults to None, which means that the secret never expires) :return: Secret handle """ if metadata is None: metadata = {} secret = self.create( value=value, contributor=contributor, metadata=metadata, expires=expires, ) return str(secret.handle)
Create a new secret, returning its handle. :param value: Secret value to store :param contributor: User owning the secret :param metadata: Optional metadata dictionary (must be JSON serializable) :param expires: Optional date/time of expiry (defaults to None, which means that the secret never expires) :return: Secret handle
entailment
def get_secret(self, handle, contributor): """Retrieve an existing secret's value. :param handle: Secret handle :param contributor: User instance to perform contributor validation, which means that only secrets for the given contributor will be looked up. """ queryset = self.all() if contributor is not None: queryset = queryset.filter(contributor=contributor) secret = queryset.get(handle=handle) return secret.value
Retrieve an existing secret's value. :param handle: Secret handle :param contributor: User instance to perform contributor validation, which means that only secrets for the given contributor will be looked up.
entailment
def validation_schema(name): """Return json schema for json validation.""" schemas = { 'processor': 'processSchema.json', 'descriptor': 'descriptorSchema.json', 'field': 'fieldSchema.json', 'type': 'typeSchema.json', } if name not in schemas: raise ValueError() field_schema_file = finders.find('flow/{}'.format(schemas['field']), all=True)[0] with open(field_schema_file, 'r') as fn: field_schema = fn.read() if name == 'field': return json.loads(field_schema.replace('{{PARENT}}', '')) schema_file = finders.find('flow/{}'.format(schemas[name]), all=True)[0] with open(schema_file, 'r') as fn: schema = fn.read() return json.loads(schema.replace('{{FIELD}}', field_schema).replace('{{PARENT}}', '/field'))
Return json schema for json validation.
entailment
def validate_schema(instance, schema, test_required=True, data_location=None, skip_missing_data=False): """Check if DictField values are consistent with our data types. Perform basic JSON schema validation and our custom validations: * check that required fields are given (if `test_required` is set to ``True``) * check if ``basic:file:`` and ``list:basic:file`` fields match regex given in schema (only if ``validate_regex`` is defined in schema for coresponding fields) and exists (only if ``data_location`` is given) * check if directories referenced in ``basic:dir:`` and ``list:basic:dir``fields exist (only if ``data_location`` is given) * check that referenced ``Data`` objects (in ``data:<data_type>`` and ``list:data:<data_type>`` fields) exists and are of type ``<data_type>`` * check that referenced ``Storage`` objects (in ``basic:json`` fields) exists :param list instance: Instance to be validated :param list schema: Schema for validation :param bool test_required: Flag for testing if all required fields are present. It is usefule if validation is run before ``Data`` object is finished and there are some field stil missing (default: ``False``) :param :class:`~resolwe.flow.models.data.DataLocation` data_location: data location used for checking if files and directories exist (default: ``None``) :param bool skip_missing_data: Don't raise an error if referenced ``Data`` object does not exist :rtype: None :raises ValidationError: if ``instance`` doesn't match schema defined in ``schema`` """ from .storage import Storage # Prevent circular import. path_prefix = None if data_location: path_prefix = data_location.get_path() def validate_refs(field): """Validate reference paths.""" for ref_filename in field.get('refs', []): ref_path = os.path.join(path_prefix, ref_filename) if not os.path.exists(ref_path): raise ValidationError("Path referenced in `refs` ({}) does not exist.".format(ref_path)) if not (os.path.isfile(ref_path) or os.path.isdir(ref_path)): raise ValidationError( "Path referenced in `refs` ({}) is neither a file or directory.".format(ref_path)) def validate_file(field, regex): """Validate file name (and check that it exists).""" filename = field['file'] if regex and not re.search(regex, filename): raise ValidationError( "File name {} does not match regex {}".format(filename, regex)) if path_prefix: path = os.path.join(path_prefix, filename) if not os.path.exists(path): raise ValidationError("Referenced path ({}) does not exist.".format(path)) if not os.path.isfile(path): raise ValidationError("Referenced path ({}) is not a file.".format(path)) validate_refs(field) def validate_dir(field): """Check that dirs and referenced files exists.""" dirname = field['dir'] if path_prefix: path = os.path.join(path_prefix, dirname) if not os.path.exists(path): raise ValidationError("Referenced path ({}) does not exist.".format(path)) if not os.path.isdir(path): raise ValidationError("Referenced path ({}) is not a directory.".format(path)) validate_refs(field) def validate_data(data_pk, type_): """Check that `Data` objects exist and is of right type.""" from .data import Data # prevent circular import data_qs = Data.objects.filter(pk=data_pk).values('process__type') if not data_qs.exists(): if skip_missing_data: return raise ValidationError( "Referenced `Data` object does not exist (id:{})".format(data_pk)) data = data_qs.first() if not data['process__type'].startswith(type_): raise ValidationError( "Data object of type `{}` is required, but type `{}` is given. " "(id:{})".format(type_, data['process__type'], data_pk)) def validate_range(value, interval, name): """Check that given value is inside the specified range.""" if not interval: return if value < interval[0] or value > interval[1]: raise ValidationError( "Value of field '{}' is out of range. It should be between {} and {}.".format( name, interval[0], interval[1] ) ) is_dirty = False dirty_fields = [] for _schema, _fields, _ in iterate_schema(instance, schema): name = _schema['name'] is_required = _schema.get('required', True) if test_required and is_required and name not in _fields: is_dirty = True dirty_fields.append(name) if name in _fields: field = _fields[name] type_ = _schema.get('type', "") # Treat None as if the field is missing. if not is_required and field is None: continue try: jsonschema.validate([{"type": type_, "value": field}], TYPE_SCHEMA) except jsonschema.exceptions.ValidationError as ex: raise ValidationError(ex.message) choices = [choice['value'] for choice in _schema.get('choices', [])] allow_custom_choice = _schema.get('allow_custom_choice', False) if choices and not allow_custom_choice and field not in choices: raise ValidationError( "Value of field '{}' must match one of predefined choices. " "Current value: {}".format(name, field) ) if type_ == 'basic:file:': validate_file(field, _schema.get('validate_regex')) elif type_ == 'list:basic:file:': for obj in field: validate_file(obj, _schema.get('validate_regex')) elif type_ == 'basic:dir:': validate_dir(field) elif type_ == 'list:basic:dir:': for obj in field: validate_dir(obj) elif type_ == 'basic:json:' and not Storage.objects.filter(pk=field).exists(): raise ValidationError( "Referenced `Storage` object does not exist (id:{})".format(field)) elif type_.startswith('data:'): validate_data(field, type_) elif type_.startswith('list:data:'): for data_id in field: validate_data(data_id, type_[5:]) # remove `list:` from type elif type_ == 'basic:integer:' or type_ == 'basic:decimal:': validate_range(field, _schema.get('range'), name) elif type_ == 'list:basic:integer:' or type_ == 'list:basic:decimal:': for obj in field: validate_range(obj, _schema.get('range'), name) try: # Check that schema definitions exist for all fields for _, _ in iterate_fields(instance, schema): pass except KeyError as ex: raise ValidationError(str(ex)) if is_dirty: dirty_fields = ['"{}"'.format(field) for field in dirty_fields] raise DirtyError("Required fields {} not given.".format(', '.join(dirty_fields)))
Check if DictField values are consistent with our data types. Perform basic JSON schema validation and our custom validations: * check that required fields are given (if `test_required` is set to ``True``) * check if ``basic:file:`` and ``list:basic:file`` fields match regex given in schema (only if ``validate_regex`` is defined in schema for coresponding fields) and exists (only if ``data_location`` is given) * check if directories referenced in ``basic:dir:`` and ``list:basic:dir``fields exist (only if ``data_location`` is given) * check that referenced ``Data`` objects (in ``data:<data_type>`` and ``list:data:<data_type>`` fields) exists and are of type ``<data_type>`` * check that referenced ``Storage`` objects (in ``basic:json`` fields) exists :param list instance: Instance to be validated :param list schema: Schema for validation :param bool test_required: Flag for testing if all required fields are present. It is usefule if validation is run before ``Data`` object is finished and there are some field stil missing (default: ``False``) :param :class:`~resolwe.flow.models.data.DataLocation` data_location: data location used for checking if files and directories exist (default: ``None``) :param bool skip_missing_data: Don't raise an error if referenced ``Data`` object does not exist :rtype: None :raises ValidationError: if ``instance`` doesn't match schema defined in ``schema``
entailment
def _hydrate_values(output, output_schema, data): """Hydrate basic:file and basic:json values. Find fields with basic:file type and assign a full path to the file. Find fields with basic:json type and assign a JSON object from storage. """ def hydrate_path(file_name): """Hydrate file paths.""" from resolwe.flow.managers import manager class HydratedPath(str): """String wrapper, which also stores the original filename.""" __slots__ = ('data_id', 'file_name') def __new__(cls, value=''): """Initialize hydrated path.""" hydrated = str.__new__(cls, value) hydrated.data_id = data.id hydrated.file_name = file_name return hydrated return HydratedPath(manager.get_executor().resolve_data_path(data, file_name)) def hydrate_storage(storage_id): """Hydrate storage fields.""" from .storage import LazyStorageJSON # Prevent circular import. return LazyStorageJSON(pk=storage_id) for field_schema, fields in iterate_fields(output, output_schema): name = field_schema['name'] value = fields[name] if 'type' in field_schema: if field_schema['type'].startswith('basic:file:'): value['file'] = hydrate_path(value['file']) value['refs'] = [hydrate_path(ref) for ref in value.get('refs', [])] elif field_schema['type'].startswith('list:basic:file:'): for obj in value: obj['file'] = hydrate_path(obj['file']) obj['refs'] = [hydrate_path(ref) for ref in obj.get('refs', [])] if field_schema['type'].startswith('basic:dir:'): value['dir'] = hydrate_path(value['dir']) value['refs'] = [hydrate_path(ref) for ref in value.get('refs', [])] elif field_schema['type'].startswith('list:basic:dir:'): for obj in value: obj['dir'] = hydrate_path(obj['dir']) obj['refs'] = [hydrate_path(ref) for ref in obj.get('refs', [])] elif field_schema['type'].startswith('basic:json:'): fields[name] = hydrate_storage(value) elif field_schema['type'].startswith('list:basic:json:'): fields[name] = [hydrate_storage(storage_id) for storage_id in value]
Hydrate basic:file and basic:json values. Find fields with basic:file type and assign a full path to the file. Find fields with basic:json type and assign a JSON object from storage.
entailment
def hydrate_input_references(input_, input_schema, hydrate_values=True): """Hydrate ``input_`` with linked data. Find fields with complex data:<...> types in ``input_``. Assign an output of corresponding data object to those fields. """ from .data import Data # prevent circular import for field_schema, fields in iterate_fields(input_, input_schema): name = field_schema['name'] value = fields[name] if 'type' in field_schema: if field_schema['type'].startswith('data:'): if value is None: continue try: data = Data.objects.get(id=value) except Data.DoesNotExist: fields[name] = {} continue output = copy.deepcopy(data.output) if hydrate_values: _hydrate_values(output, data.process.output_schema, data) output["__id"] = data.id output["__type"] = data.process.type output["__descriptor"] = data.descriptor output["__entity_name"] = None output["__output_schema"] = data.process.output_schema entity = data.entity_set.values('name').first() if entity: output["__entity_name"] = entity['name'] fields[name] = output elif field_schema['type'].startswith('list:data:'): outputs = [] for val in value: if val is None: continue try: data = Data.objects.get(id=val) except Data.DoesNotExist: outputs.append({}) continue output = copy.deepcopy(data.output) if hydrate_values: _hydrate_values(output, data.process.output_schema, data) output["__id"] = data.id output["__type"] = data.process.type output["__descriptor"] = data.descriptor output["__output_schema"] = data.process.output_schema entity = data.entity_set.values('name').first() if entity: output["__entity_name"] = entity['name'] outputs.append(output) fields[name] = outputs
Hydrate ``input_`` with linked data. Find fields with complex data:<...> types in ``input_``. Assign an output of corresponding data object to those fields.
entailment
def hydrate_input_uploads(input_, input_schema, hydrate_values=True): """Hydrate input basic:upload types with upload location. Find basic:upload fields in input. Add the upload location for relative paths. """ from resolwe.flow.managers import manager files = [] for field_schema, fields in iterate_fields(input_, input_schema): name = field_schema['name'] value = fields[name] if 'type' in field_schema: if field_schema['type'] == 'basic:file:': files.append(value) elif field_schema['type'] == 'list:basic:file:': files.extend(value) urlregex = re.compile(r'^(https?|ftp)://[-A-Za-z0-9\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\+&@#/%=~_|]') for value in files: if 'file_temp' in value: if isinstance(value['file_temp'], str): # If file_temp not url, hydrate path. if not urlregex.search(value['file_temp']): value['file_temp'] = manager.get_executor().resolve_upload_path(value['file_temp']) else: # Something very strange happened. value['file_temp'] = 'Invalid value for file_temp in DB'
Hydrate input basic:upload types with upload location. Find basic:upload fields in input. Add the upload location for relative paths.
entailment
def hydrate_size(data, force=False): """Add file and dir sizes. Add sizes to ``basic:file:``, ``list:basic:file``, ``basic:dir:`` and ``list:basic:dir:`` fields. ``force`` parameter is used to recompute file sizes also on objects that already have these values, e.g. in migrations. """ from .data import Data # prevent circular import def get_dir_size(path): """Get directory size.""" total_size = 0 for dirpath, _, filenames in os.walk(path): for file_name in filenames: file_path = os.path.join(dirpath, file_name) if not os.path.isfile(file_path): # Skip all "not normal" files (links, ...) continue total_size += os.path.getsize(file_path) return total_size def get_refs_size(obj, obj_path): """Calculate size of all references of ``obj``. :param dict obj: Data object's output field (of type file/dir). :param str obj_path: Path to ``obj``. """ total_size = 0 for ref in obj.get('refs', []): ref_path = data.location.get_path(filename=ref) if ref_path in obj_path: # It is a common case that ``obj['file']`` is also contained in # one of obj['ref']. In that case, we need to make sure that it's # size is not counted twice: continue if os.path.isfile(ref_path): total_size += os.path.getsize(ref_path) elif os.path.isdir(ref_path): total_size += get_dir_size(ref_path) return total_size def add_file_size(obj): """Add file size to the basic:file field.""" if data.status in [Data.STATUS_DONE, Data.STATUS_ERROR] and 'size' in obj and not force: return path = data.location.get_path(filename=obj['file']) if not os.path.isfile(path): raise ValidationError("Referenced file does not exist ({})".format(path)) obj['size'] = os.path.getsize(path) obj['total_size'] = obj['size'] + get_refs_size(obj, path) def add_dir_size(obj): """Add directory size to the basic:dir field.""" if data.status in [Data.STATUS_DONE, Data.STATUS_ERROR] and 'size' in obj and not force: return path = data.location.get_path(filename=obj['dir']) if not os.path.isdir(path): raise ValidationError("Referenced dir does not exist ({})".format(path)) obj['size'] = get_dir_size(path) obj['total_size'] = obj['size'] + get_refs_size(obj, path) data_size = 0 for field_schema, fields in iterate_fields(data.output, data.process.output_schema): name = field_schema['name'] value = fields[name] if 'type' in field_schema: if field_schema['type'].startswith('basic:file:'): add_file_size(value) data_size += value.get('total_size', 0) elif field_schema['type'].startswith('list:basic:file:'): for obj in value: add_file_size(obj) data_size += obj.get('total_size', 0) elif field_schema['type'].startswith('basic:dir:'): add_dir_size(value) data_size += value.get('total_size', 0) elif field_schema['type'].startswith('list:basic:dir:'): for obj in value: add_dir_size(obj) data_size += obj.get('total_size', 0) data.size = data_size
Add file and dir sizes. Add sizes to ``basic:file:``, ``list:basic:file``, ``basic:dir:`` and ``list:basic:dir:`` fields. ``force`` parameter is used to recompute file sizes also on objects that already have these values, e.g. in migrations.
entailment
def render_descriptor(data): """Render data descriptor. The rendering is based on descriptor schema and input context. :param data: data instance :type data: :class:`resolwe.flow.models.Data` or :class:`dict` """ if not data.descriptor_schema: return # Set default values for field_schema, field, path in iterate_schema(data.descriptor, data.descriptor_schema.schema, 'descriptor'): if 'default' in field_schema and field_schema['name'] not in field: dict_dot(data, path, field_schema['default'])
Render data descriptor. The rendering is based on descriptor schema and input context. :param data: data instance :type data: :class:`resolwe.flow.models.Data` or :class:`dict`
entailment
def render_template(process, template_string, context): """Render template using the specified expression engine.""" from resolwe.flow.managers import manager # Get the appropriate expression engine. If none is defined, do not evaluate # any expressions. expression_engine = process.requirements.get('expression-engine', None) if not expression_engine: return template_string return manager.get_expression_engine(expression_engine).evaluate_block(template_string, context)
Render template using the specified expression engine.
entailment
def json_path_components(path): """Convert JSON path to individual path components. :param path: JSON path, which can be either an iterable of path components or a dot-separated string :return: A list of path components """ if isinstance(path, str): path = path.split('.') return list(path)
Convert JSON path to individual path components. :param path: JSON path, which can be either an iterable of path components or a dot-separated string :return: A list of path components
entailment
def validate_process_subtype(supertype_name, supertype, subtype_name, subtype): """Perform process subtype validation. :param supertype_name: Supertype name :param supertype: Supertype schema :param subtype_name: Subtype name :param subtype: Subtype schema :return: A list of validation error strings """ errors = [] for item in supertype: # Ensure that the item exists in subtype and has the same schema. for subitem in subtype: if item['name'] != subitem['name']: continue for key in set(item.keys()) | set(subitem.keys()): if key in ('label', 'description'): # Label and description can differ. continue elif key == 'required': # A non-required item can be made required in subtype, but not the # other way around. item_required = item.get('required', True) subitem_required = subitem.get('required', False) if item_required and not subitem_required: errors.append("Field '{}' is marked as required in '{}' and optional in '{}'.".format( item['name'], supertype_name, subtype_name, )) elif item.get(key, None) != subitem.get(key, None): errors.append("Schema for field '{}' in type '{}' does not match supertype '{}'.".format( item['name'], subtype_name, supertype_name )) break else: errors.append("Schema for type '{}' is missing supertype '{}' field '{}'.".format( subtype_name, supertype_name, item['name'] )) return errors
Perform process subtype validation. :param supertype_name: Supertype name :param supertype: Supertype schema :param subtype_name: Subtype name :param subtype: Subtype schema :return: A list of validation error strings
entailment
def validate_process_types(queryset=None): """Perform process type validation. :param queryset: Optional process queryset to validate :return: A list of validation error strings """ if not queryset: from .process import Process queryset = Process.objects.all() processes = {} for process in queryset: dict_dot( processes, process.type.replace(':', '.') + '__schema__', process.output_schema ) errors = [] for path, key, value in iterate_dict(processes, exclude=lambda key, value: key == '__schema__'): if '__schema__' not in value: continue # Validate with any parent types. for length in range(len(path), 0, -1): parent_type = '.'.join(path[:length] + ['__schema__']) try: parent_schema = dict_dot(processes, parent_type) except KeyError: continue errors += validate_process_subtype( supertype_name=':'.join(path[:length]), supertype=parent_schema, subtype_name=':'.join(path + [key]), subtype=value['__schema__'] ) return errors
Perform process type validation. :param queryset: Optional process queryset to validate :return: A list of validation error strings
entailment
def fill_with_defaults(process_input, input_schema): """Fill empty optional fields in input with default values.""" for field_schema, fields, path in iterate_schema(process_input, input_schema): if 'default' in field_schema and field_schema['name'] not in fields: dict_dot(process_input, path, field_schema['default'])
Fill empty optional fields in input with default values.
entailment
def to_internal_value(self, data): """Format the internal value.""" # When setting the contributor, it may be passed as an integer. if isinstance(data, dict) and isinstance(data.get('id', None), int): data = data['id'] elif isinstance(data, int): pass else: raise ValidationError("Contributor must be an integer or a dictionary with key 'id'") return self.Meta.model.objects.get(pk=data)
Format the internal value.
entailment
def extract_descriptor(self, obj): """Extract data from the descriptor.""" descriptor = [] def flatten(current): """Flatten descriptor.""" if isinstance(current, dict): for key in current: flatten(current[key]) elif isinstance(current, list): for val in current: flatten(val) elif isinstance(current, (int, bool, float, str)): descriptor.append(str(current)) flatten(obj.descriptor) return descriptor
Extract data from the descriptor.
entailment
def _serialize_items(self, serializer, kind, items): """Return serialized items or list of ids, depending on `hydrate_XXX` query param.""" if self.request and self.request.query_params.get('hydrate_{}'.format(kind), False): serializer = serializer(items, many=True, read_only=True) serializer.bind(kind, self) return serializer.data else: return [item.id for item in items]
Return serialized items or list of ids, depending on `hydrate_XXX` query param.
entailment
def get_entity_names(self, data): """Return serialized list of entity names on data that user has `view` permission on.""" entities = self._filter_queryset('view_entity', data.entity_set.all()) return list(entities.values_list('name', flat=True))
Return serialized list of entity names on data that user has `view` permission on.
entailment
def get_collections(self, data): """Return serialized list of collection objects on data that user has `view` permission on.""" collections = self._filter_queryset('view_collection', data.collection_set.all()) from .collection import CollectionSerializer class CollectionWithoutDataSerializer(WithoutDataSerializerMixin, CollectionSerializer): """Collection without data field serializer.""" return self._serialize_items(CollectionWithoutDataSerializer, 'collections', collections)
Return serialized list of collection objects on data that user has `view` permission on.
entailment
def get_entities(self, data): """Return serialized list of entity objects on data that user has `view` permission on.""" entities = self._filter_queryset('view_entity', data.entity_set.all()) from .entity import EntitySerializer class EntityWithoutDataSerializer(WithoutDataSerializerMixin, EntitySerializer): """Entity without data field serializer.""" return self._serialize_items(EntityWithoutDataSerializer, 'entities', entities)
Return serialized list of entity objects on data that user has `view` permission on.
entailment
def get_fields(self): """Dynamically adapt fields based on the current request.""" fields = super(DataSerializer, self).get_fields() # Hide collections/entities fields on list views as fetching them may be expensive. if self.parent is not None: del fields['collections'] del fields['entities'] return fields
Dynamically adapt fields based on the current request.
entailment
def handle(self, *args, **options): """Handle command list_docker_images.""" verbosity = int(options.get('verbosity')) # Check that the specified output format is valid if options['format'] != 'plain' and options['format'] != 'yaml': raise CommandError("Unknown output format: %s" % options['format']) # Gather only unique latest custom Docker requirements that the processes are using # The 'image' field is optional, so be careful about that as well unique_docker_images = set( p.requirements['executor']['docker']['image'] for p in Process.objects.filter(is_active=True).order_by( 'slug', '-version' ).distinct( 'slug' ).only( 'requirements' ).filter( requirements__icontains='docker' ) if 'image' in p.requirements.get('executor', {}).get('docker', {}) ) # Add the default image. unique_docker_images.add(DEFAULT_CONTAINER_IMAGE) # Pull images if requested or just output the list in specified format if options['pull']: # Remove set of already pulled images. with PULLED_IMAGES_LOCK: unique_docker_images.difference_update(PULLED_IMAGES) # Get the desired 'docker' command from settings or use the default docker = getattr(settings, 'FLOW_DOCKER_COMMAND', 'docker') # Pull each image for img in unique_docker_images: ret = subprocess.call( shlex.split('{} pull {}'.format(docker, img)), stdout=None if verbosity > 0 else subprocess.DEVNULL, stderr=None if verbosity > 0 else subprocess.DEVNULL, ) # Update set of pulled images. with PULLED_IMAGES_LOCK: PULLED_IMAGES.add(img) if ret != 0: errmsg = "Failed to pull Docker image '{}'!".format(img) if not options['ignore_pull_errors']: # Print error and stop execution raise CommandError(errmsg) else: # Print error, but keep going logger.error(errmsg) if verbosity > 0: self.stderr.write(errmsg) else: msg = "Docker image '{}' pulled successfully!".format(img) logger.info(msg) if verbosity > 0: self.stdout.write(msg) else: # Sort the set of unique Docker images for nicer output. unique_docker_images = sorted(unique_docker_images) # Convert the set of unique Docker images into a list of dicts for easier output imgs = [ dict(name=s[0], tag=s[1] if len(s) == 2 else 'latest') for s in (img.split(':') for img in unique_docker_images) ] # Output in YAML or plaintext (one image per line), as requested if options['format'] == 'yaml': out = yaml.safe_dump(imgs, default_flow_style=True, default_style="'") else: out = functools.reduce(operator.add, ('{name}:{tag}\n'.format(**i) for i in imgs), '') self.stdout.write(out, ending='')
Handle command list_docker_images.
entailment
def create(self, request, *args, **kwargs): """Create a resource.""" collections = request.data.get('collections', []) # check that user has permissions on all collections that Data # object will be added to for collection_id in collections: try: collection = Collection.objects.get(pk=collection_id) except Collection.DoesNotExist: return Response({'collections': ['Invalid pk "{}" - object does not exist.'.format(collection_id)]}, status=status.HTTP_400_BAD_REQUEST) if not request.user.has_perm('add_collection', obj=collection): if request.user.has_perm('view_collection', obj=collection): raise exceptions.PermissionDenied( "You don't have `ADD` permission on collection (id: {}).".format(collection_id) ) else: raise exceptions.NotFound( "Collection not found (id: {}).".format(collection_id) ) self.define_contributor(request) if kwargs.pop('get_or_create', False): response = self.perform_get_or_create(request, *args, **kwargs) if response: return response return super().create(request, *args, **kwargs)
Create a resource.
entailment
def get_or_create(self, request, *args, **kwargs): """Get ``Data`` object if similar already exists, otherwise create it.""" kwargs['get_or_create'] = True return self.create(request, *args, **kwargs)
Get ``Data`` object if similar already exists, otherwise create it.
entailment
def perform_get_or_create(self, request, *args, **kwargs): """Perform "get_or_create" - return existing object if found.""" serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) process = serializer.validated_data.get('process') process_input = request.data.get('input', {}) fill_with_defaults(process_input, process.input_schema) checksum = get_data_checksum(process_input, process.slug, process.version) data_qs = Data.objects.filter( checksum=checksum, process__persistence__in=[Process.PERSISTENCE_CACHED, Process.PERSISTENCE_TEMP], ) data_qs = get_objects_for_user(request.user, 'view_data', data_qs) if data_qs.exists(): data = data_qs.order_by('created').last() serializer = self.get_serializer(data) return Response(serializer.data)
Perform "get_or_create" - return existing object if found.
entailment
def perform_create(self, serializer): """Create a resource.""" process = serializer.validated_data.get('process') if not process.is_active: raise exceptions.ParseError( 'Process retired (id: {}, slug: {}/{}).'.format(process.id, process.slug, process.version) ) with transaction.atomic(): instance = serializer.save() assign_contributor_permissions(instance) # Entity is added to the collection only when it is # created - when it only contains 1 Data object. entities = Entity.objects.annotate(num_data=Count('data')).filter(data=instance, num_data=1) # Assign data object to all specified collections. collection_pks = self.request.data.get('collections', []) for collection in Collection.objects.filter(pk__in=collection_pks): collection.data.add(instance) copy_permissions(collection, instance) # Add entities to which data belongs to the collection. for entity in entities: entity.collections.add(collection) copy_permissions(collection, entity)
Create a resource.
entailment
def ready(self): """Called once per Django process instance. If the filesystem setup fails or if an error is found in settings.py, django.core.exceptions.ImproperlyConfigured is raised, causing Django not to launch the main GMN app. """ # Stop the startup code from running automatically from pytest unit tests. # When running tests in parallel with xdist, an instance of GMN is launched # before thread specific settings have been applied. # if hasattr(sys, '_launched_by_pytest'): # return self._assert_readable_file_if_set('CLIENT_CERT_PATH') self._assert_readable_file_if_set('CLIENT_CERT_PRIVATE_KEY_PATH') self._assert_dirs_exist('OBJECT_FORMAT_CACHE_PATH') self._assert_is_type('SCIMETA_VALIDATION_ENABLED', bool) self._assert_is_type('SCIMETA_VALIDATION_MAX_SIZE', int) self._assert_is_in('SCIMETA_VALIDATION_OVER_SIZE_ACTION', ('reject', 'accept')) self._warn_unsafe_for_prod() self._check_resource_map_create() if not d1_gmn.app.sciobj_store.is_existing_store(): self._create_sciobj_store_root() self._add_xslt_mimetype()
Called once per Django process instance. If the filesystem setup fails or if an error is found in settings.py, django.core.exceptions.ImproperlyConfigured is raised, causing Django not to launch the main GMN app.
entailment
def _assert_dirs_exist(self, setting_name): """Check that the dirs leading up to the given file path exist. Does not check if the file exists. """ v = self._get_setting(setting_name) if (not os.path.isdir(os.path.split(v)[0])) or os.path.isdir(v): self.raise_config_error( setting_name, v, str, 'a file path in an existing directory', is_none_allowed=False, )
Check that the dirs leading up to the given file path exist. Does not check if the file exists.
entailment
def _warn_unsafe_for_prod(self): """Warn on settings that are not safe for production.""" safe_settings_list = [ ('DEBUG', False), ('DEBUG_GMN', False), ('STAND_ALONE', False), ('DATABASES.default.ATOMIC_REQUESTS', True), ('SECRET_KEY', '<Do not modify this placeholder value>'), ('STATIC_SERVER', False), ] for setting_str, setting_safe in safe_settings_list: setting_current = self._get_setting(setting_str) if setting_current != setting_safe: logger.warning( 'Setting is unsafe for use in production. setting="{}" current="{}" ' 'safe="{}"'.format(setting_str, setting_current, setting_safe) )
Warn on settings that are not safe for production.
entailment
def _get_setting(self, setting_dotted_name, default=None): """Return the value of a potentially nested dict setting. E.g., 'DATABASES.default.NAME """ name_list = setting_dotted_name.split('.') setting_obj = getattr(django.conf.settings, name_list[0], default) # if len(name_list) == 1: # return setting_obj return functools.reduce( lambda o, a: o.get(a, default), [setting_obj] + name_list[1:] )
Return the value of a potentially nested dict setting. E.g., 'DATABASES.default.NAME
entailment
def _refresh_connection(self): """Refresh connection to Elasticsearch when worker is started. File descriptors (sockets) can be shared between multiple threads. If same connection is used by multiple threads at the same time, this can cause timeouts in some of the pushes. So connection needs to be reestablished in each thread to make sure that it is unique per thread. """ # Thread with same id can be created when one terminates, but it # is ok, as we are only concerned about concurent pushes. current_thread_id = threading.current_thread().ident if current_thread_id != self.connection_thread_id: prepare_connection() self.connection_thread_id = current_thread_id
Refresh connection to Elasticsearch when worker is started. File descriptors (sockets) can be shared between multiple threads. If same connection is used by multiple threads at the same time, this can cause timeouts in some of the pushes. So connection needs to be reestablished in each thread to make sure that it is unique per thread.
entailment
def generate_id(self, obj): """Generate unique document id for ElasticSearch.""" object_type = type(obj).__name__.lower() return '{}_{}'.format(object_type, self.get_object_id(obj))
Generate unique document id for ElasticSearch.
entailment
def process_object(self, obj): """Process current object and push it to the ElasticSearch.""" document = self.document_class(meta={'id': self.generate_id(obj)}) for field in document._doc_type.mapping: # pylint: disable=protected-access if field in ['users_with_permissions', 'groups_with_permissions', 'public_permission']: continue # These fields are handled separately try: # use get_X_value function get_value_function = getattr(self, 'get_{}_value'.format(field), None) if get_value_function: setattr(document, field, get_value_function(obj)) # pylint: disable=not-callable continue # use `mapping` dict if field in self.mapping: if callable(self.mapping[field]): setattr(document, field, self.mapping[field](obj)) continue try: object_attr = dict_dot(obj, self.mapping[field]) except (KeyError, AttributeError): object_attr = None if callable(object_attr): # use method on object setattr(document, field, object_attr(obj)) else: # use attribute on object setattr(document, field, object_attr) continue # get value from the object try: object_value = dict_dot(obj, field) setattr(document, field, object_value) continue except KeyError: pass raise AttributeError("Cannot determine mapping for field {}".format(field)) except Exception: # pylint: disable=broad-except logger.exception( "Error occurred while setting value of field '%s' in '%s' Elasticsearch index.", field, self.__class__.__name__, extra={'object_type': self.object_type, 'obj_id': obj.pk} ) permissions = self.get_permissions(obj) document.users_with_permissions = permissions['users'] document.groups_with_permissions = permissions['groups'] document.public_permission = permissions['public'] self.push_queue.append(document)
Process current object and push it to the ElasticSearch.
entailment
def create_mapping(self): """Create the mappings in elasticsearch.""" try: self.document_class.init() self._mapping_created = True except IllegalOperation as error: if error.args[0].startswith('You cannot update analysis configuration'): # Ignore mapping update errors, which are thrown even when the analysis # configuration stays the same. # TODO: Remove this when https://github.com/elastic/elasticsearch-dsl-py/pull/272 is merged. return raise
Create the mappings in elasticsearch.
entailment
def build(self, obj=None, queryset=None, push=True): """Build indexes.""" if obj is not None and queryset is not None: raise ValueError( "Only one of 'obj' and 'queryset' parameters can be passed to the build method." ) if obj is not None: if self.queryset.model != obj._meta.model: # pylint: disable=protected-access logger.debug( "Object type mismatch, skipping build of '%s' Elasticsearch index.", self.__class__.__name__ ) return if not self.queryset.filter(pk=self.get_object_id(obj)).exists(): logger.debug( "Object not in predefined queryset, skipping build of '%s' Elasticsearch index.", self.__class__.__name__ ) return elif queryset is not None: if self.queryset.model != queryset.model: logger.debug( "Queryset type mismatch, skipping build of '%s' Elasticsearch index.", self.__class__.__name__ ) return FULL_REBUILD = 'full' # pylint: disable=invalid-name def handler(agg=None): """Index build handler.""" if agg == FULL_REBUILD: queryset = self.queryset.all() else: queryset = self.queryset.none().union(*agg) self._build(queryset=queryset, push=push) def aggregator(agg=None): """Index build aggregator.""" if agg == FULL_REBUILD: # A full rebuild is required, ignore any other builds. pass else: if agg is None: agg = [] if obj is not None: # Build of a single object. agg.append(self.queryset.filter(pk=obj.pk)) elif queryset is not None: # Build of multiple objects. agg.append(queryset) else: # Full rebuild, ignore any other builds. agg = FULL_REBUILD return agg batcher = PrioritizedBatcher.global_instance() if batcher.is_started: batcher.add('resolwe.elastic', handler, group_by=(self._index_name, push), aggregator=aggregator) else: self._build(obj=obj, queryset=queryset, push=push)
Build indexes.
entailment
def _build(self, obj=None, queryset=None, push=True): """Build indexes.""" logger.debug("Building '%s' Elasticsearch index...", self.__class__.__name__) if obj is not None: build_list = [obj] elif queryset is not None: build_list = self.queryset.intersection(queryset) logger.debug("Found %s elements to build.", build_list.count()) else: build_list = self.queryset.all() logger.debug("Found %s elements to build.", build_list.count()) for obj in build_list: if self.filter(obj) is False: continue try: obj = self.preprocess_object(obj) except Exception: # pylint: disable=broad-except logger.exception( "Error occurred while preprocessing '%s' Elasticsearch index.", self.__class__.__name__, extra={'object_type': self.object_type, 'obj_id': obj.pk} ) try: self.process_object(obj) except Exception: # pylint: disable=broad-except logger.exception( "Error occurred while processing '%s' Elasticsearch index.", self.__class__.__name__, extra={'object_type': self.object_type, 'obj_id': obj.pk} ) logger.debug("Finished building '%s' Elasticsearch index.", self.__class__.__name__) if push: self.push()
Build indexes.
entailment
def push(self): """Push built documents to ElasticSearch.""" self._refresh_connection() # Check if we need to update mappings as this needs to be done # before we push anything to the Elasticsearch server. # This must be done even if the queue is empty, as otherwise ES # will fail when retrieving data. if not self._mapping_created: logger.debug("Pushing mapping for Elasticsearch index '%s'.", self.__class__.__name__) self.create_mapping() if not self.push_queue: logger.debug("No documents to push, skipping push.") return logger.debug("Found %s documents to push to Elasticsearch.", len(self.push_queue)) bulk(connections.get_connection(), (doc.to_dict(True) for doc in self.push_queue), refresh=True) self.push_queue = [] logger.debug("Finished pushing builded documents to Elasticsearch server.")
Push built documents to ElasticSearch.
entailment
def destroy(self): """Destroy an index.""" self._refresh_connection() self.push_queue = [] index_name = self.document_class()._get_index() # pylint: disable=protected-access connections.get_connection().indices.delete(index_name, ignore=404) self._mapping_created = False
Destroy an index.
entailment
def get_permissions(self, obj): """Return users and groups with ``view`` permission on the current object. Return a dict with two keys - ``users`` and ``groups`` - which contain list of ids of users/groups with ``view`` permission. """ # TODO: Optimize this for bulk running filters = { 'object_pk': obj.id, 'content_type': ContentType.objects.get_for_model(obj), 'permission__codename__startswith': 'view', } return { 'users': list( UserObjectPermission.objects.filter(**filters).distinct('user').values_list('user_id', flat=True) ), 'groups': list( GroupObjectPermission.objects.filter(**filters).distinct('group').values_list('group', flat=True) ), 'public': UserObjectPermission.objects.filter(user__username=ANONYMOUS_USER_NAME, **filters).exists(), }
Return users and groups with ``view`` permission on the current object. Return a dict with two keys - ``users`` and ``groups`` - which contain list of ids of users/groups with ``view`` permission.
entailment
def remove_object(self, obj): """Remove current object from the ElasticSearch.""" obj_id = self.generate_id(obj) es_obj = self.document_class.get(obj_id, ignore=[404]) # Object may not exist in this index. if es_obj: es_obj.delete(refresh=True)
Remove current object from the ElasticSearch.
entailment
def generate_pyxb_binding(self, args): """Args: args: """ pyxbgen_args = [] pyxbgen_args.append('--schema-root=\'{}\''.format(self.schema_dir)) pyxbgen_args.append('--binding-root=\'{}\''.format(self.binding_dir)) pyxbgen_args.append( '--schema-stripped-prefix=' '\'https://repository.dataone.org/software/cicore/branches/D1_SCHEMA_v1.1/\'' ) pyxbgen_args.extend(args) self.run_pyxbgen(pyxbgen_args)
Args: args:
entailment
def run_pyxbgen(self, args): """Args: args: """ cmd = 'pyxbgen {}'.format(' '.join(args)) print(cmd) os.system(cmd)
Args: args:
entailment
def generate_version_file(self, schema_filename, binding_filename): """Given a DataONE schema, generates a file that contains version information about the schema.""" version_filename = binding_filename + '_version.txt' version_path = os.path.join(self.binding_dir, version_filename) schema_path = os.path.join(self.schema_dir, schema_filename) try: tstamp, svnpath, svnrev, version = self.get_version_info_from_svn( schema_path ) except TypeError: pass else: self.write_version_file(version_path, tstamp, svnpath, svnrev, version)
Given a DataONE schema, generates a file that contains version information about the schema.
entailment
def write_version_file(self, version_file_path, tstamp, svnpath, svnrev, version): """Args: version_file_path: tstamp: svnpath: svnrev: version: """ txt = """# This file is automatically generated. Manual edits will be erased. # When this file was generated TIMESTAMP="{}" # Path of the schema used in the repository SVNPATH="{1}" # SVN revision of the schema that was used SVNREVISION="{2}" # The version tag of the schema VERSION="{3}" """.format( tstamp, svnpath, svnrev, version ) with open(version_file_path, 'w') as f: f.write(txt)
Args: version_file_path: tstamp: svnpath: svnrev: version:
entailment
def updateSystemMetadataResponse(self, pid, sysmeta_pyxb, vendorSpecific=None): """MNStorage.updateSystemMetadata(session, pid, sysmeta) → boolean http://jenkins-1.dataone.org/documentation/unstable/API-Documentation- development/apis/MN_APIs.html#MNStorage.updateSystemMetadata. Args: pid: sysmeta_pyxb: vendorSpecific: Returns: """ mmp_dict = { 'pid': pid.encode('utf-8'), 'sysmeta': ('sysmeta.xml', sysmeta_pyxb.toxml('utf-8')), } return self.PUT('meta', fields=mmp_dict, headers=vendorSpecific)
MNStorage.updateSystemMetadata(session, pid, sysmeta) → boolean http://jenkins-1.dataone.org/documentation/unstable/API-Documentation- development/apis/MN_APIs.html#MNStorage.updateSystemMetadata. Args: pid: sysmeta_pyxb: vendorSpecific: Returns:
entailment
def set_data_location(apps, schema_editor): """Create DataLocation for each Data.""" Data = apps.get_model('flow', 'Data') DataLocation = apps.get_model('flow', 'DataLocation') for data in Data.objects.all(): if os.path.isdir(os.path.join(settings.FLOW_EXECUTOR['DATA_DIR'], str(data.id))): with transaction.atomic(): # Manually set DataLocation id to preserve data directory. data_location = DataLocation.objects.create(id=data.id, subpath=str(data.id)) data_location.data.add(data) # Increment DataLocation id's sequence if DataLocation.objects.exists(): max_id = DataLocation.objects.order_by('id').last().id with connection.cursor() as cursor: cursor.execute( "ALTER SEQUENCE flow_datalocation_id_seq RESTART WITH {};".format(max_id + 1) )
Create DataLocation for each Data.
entailment