_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q28700
distVersion
train
def distVersion(): """ The distribution version identifying a published release on PyPI. """ from pkg_resources import parse_version build_number = buildNumber() parsedBaseVersion = parse_version(baseVersion) if isinstance(parsedBaseVersion, tuple): raise RuntimeError("Setuptools version 8.0 or newer required. Update by running " "'pip install setuptools --upgrade'") if build_number is not None and parsedBaseVersion.is_prerelease: return baseVersion + '.dev' + build_number else: return baseVersion
python
{ "resource": "" }
q28701
GlobalThrottle.throttle
train
def throttle( self, wait=True ): """ If the wait parameter is True, this method returns True after suspending the current thread as necessary to ensure that no less than the configured minimum interval passed since the most recent time an invocation of this method returned True in any thread. If the wait parameter is False, this method immediatly returns True if at least the configured minimum interval has passed since the most recent time this method returned True in any thread, or False otherwise. """ # I think there is a race in Thread.start(), hence the lock with self.thread_start_lock: if not self.thread_started: self.thread.start( ) self.thread_started = True return self.semaphore.acquire( blocking=wait )
python
{ "resource": "" }
q28702
LocalThrottle.throttle
train
def throttle( self, wait=True ): """ If the wait parameter is True, this method returns True after suspending the current thread as necessary to ensure that no less than the configured minimum interval has passed since the last invocation of this method in the current thread returned True. If the wait parameter is False, this method immediatly returns True (if at least the configured minimum interval has passed since the last time this method returned True in the current thread) or False otherwise. """ now = time.time( ) last_invocation = self.per_thread.last_invocation if last_invocation is not None: interval = now - last_invocation if interval < self.min_interval: if wait: remainder = self.min_interval - interval time.sleep( remainder ) else: return False self.per_thread.last_invocation = now return True
python
{ "resource": "" }
q28703
ParasolBatchSystem._runParasol
train
def _runParasol(self, command, autoRetry=True): """ Issues a parasol command using popen to capture the output. If the command fails then it will try pinging parasol until it gets a response. When it gets a response it will recursively call the issue parasol command, repeating this pattern for a maximum of N times. The final exit value will reflect this. """ command = list(concat(self.parasolCommand, command)) while True: logger.debug('Running %r', command) process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=-1) stdout, stderr = process.communicate() status = process.wait() for line in stderr.decode('utf-8').split('\n'): if line: logger.warn(line) if status == 0: return 0, stdout.decode('utf-8').split('\n') message = 'Command %r failed with exit status %i' % (command, status) if autoRetry: logger.warn(message) else: logger.error(message) return status, None logger.warn('Waiting for a 10s, before trying again') time.sleep(10)
python
{ "resource": "" }
q28704
ParasolBatchSystem.issueBatchJob
train
def issueBatchJob(self, jobNode): """ Issues parasol with job commands. """ self.checkResourceRequest(jobNode.memory, jobNode.cores, jobNode.disk) MiB = 1 << 20 truncatedMemory = (old_div(jobNode.memory, MiB)) * MiB # Look for a batch for jobs with these resource requirements, with # the memory rounded down to the nearest megabyte. Rounding down # meams the new job can't ever decrease the memory requirements # of jobs already in the batch. if len(self.resultsFiles) >= self.maxBatches: raise RuntimeError( 'Number of batches reached limit of %i' % self.maxBatches) try: results = self.resultsFiles[(truncatedMemory, jobNode.cores)] except KeyError: results = getTempFile(rootDir=self.parasolResultsDir) self.resultsFiles[(truncatedMemory, jobNode.cores)] = results # Prefix the command with environment overrides, optionally looking them up from the # current environment if the value is None command = ' '.join(concat('env', self.__environment(), jobNode.command)) parasolCommand = ['-verbose', '-ram=%i' % jobNode.memory, '-cpu=%i' % jobNode.cores, '-results=' + results, 'add', 'job', command] # Deal with the cpus self.usedCpus += jobNode.cores while True: # Process finished results with no wait try: jobID = self.cpuUsageQueue.get_nowait() except Empty: break if jobID in list(self.jobIDsToCpu.keys()): self.usedCpus -= self.jobIDsToCpu.pop(jobID) assert self.usedCpus >= 0 while self.usedCpus > self.maxCores: # If we are still waiting jobID = self.cpuUsageQueue.get() if jobID in list(self.jobIDsToCpu.keys()): self.usedCpus -= self.jobIDsToCpu.pop(jobID) assert self.usedCpus >= 0 # Now keep going while True: line = self._runParasol(parasolCommand)[1][0] match = self.parasolOutputPattern.match(line) if match is None: # This is because parasol add job will return success, even if the job was not # properly issued! logger.debug('We failed to properly add the job, we will try again after a 5s.') time.sleep(5) else: jobID = int(match.group(1)) self.jobIDsToCpu[jobID] = jobNode.cores self.runningJobs.add(jobID) logger.debug("Got the parasol job id: %s from line: %s" % (jobID, line)) return jobID
python
{ "resource": "" }
q28705
ParasolBatchSystem.getJobIDsForResultsFile
train
def getJobIDsForResultsFile(self, resultsFile): """ Get all queued and running jobs for a results file. """ jobIDs = [] for line in self._runParasol(['-extended', 'list', 'jobs'])[1]: fields = line.strip().split() if len(fields) == 0 or fields[-1] != resultsFile: continue jobID = fields[0] jobIDs.append(int(jobID)) return set(jobIDs)
python
{ "resource": "" }
q28706
ParasolBatchSystem.getIssuedBatchJobIDs
train
def getIssuedBatchJobIDs(self): """ Gets the list of jobs issued to parasol in all results files, but not including jobs created by other users. """ issuedJobs = set() for resultsFile in itervalues(self.resultsFiles): issuedJobs.update(self.getJobIDsForResultsFile(resultsFile)) return list(issuedJobs)
python
{ "resource": "" }
q28707
ParasolBatchSystem.getRunningBatchJobIDs
train
def getRunningBatchJobIDs(self): """ Returns map of running jobIDs and the time they have been running. """ # Example lines.. # r 5410186 benedictpaten worker 1247029663 localhost # r 5410324 benedictpaten worker 1247030076 localhost runningJobs = {} issuedJobs = self.getIssuedBatchJobIDs() for line in self._runParasol(['pstat2'])[1]: if line != '': match = self.runningPattern.match(line) if match is not None: jobID = int(match.group(1)) startTime = int(match.group(2)) if jobID in issuedJobs: # It's one of our jobs runningJobs[jobID] = time.time() - startTime return runningJobs
python
{ "resource": "" }
q28708
ParasolBatchSystem.updatedJobWorker
train
def updatedJobWorker(self): """ We use the parasol results to update the status of jobs, adding them to the list of updated jobs. Results have the following structure.. (thanks Mark D!) int status; /* Job status - wait() return format. 0 is good. */ char *host; /* Machine job ran on. */ char *jobId; /* Job queuing system job ID */ char *exe; /* Job executable file (no path) */ int usrTicks; /* 'User' CPU time in ticks. */ int sysTicks; /* 'System' CPU time in ticks. */ unsigned submitTime; /* Job submission time in seconds since 1/1/1970 */ unsigned startTime; /* Job start time in seconds since 1/1/1970 */ unsigned endTime; /* Job end time in seconds since 1/1/1970 */ char *user; /* User who ran job */ char *errFile; /* Location of stderr file on host */ Plus you finally have the command name. """ resultsFiles = set() resultsFileHandles = [] try: while self.running: # Look for any new results files that have been created, and open them newResultsFiles = set(os.listdir(self.parasolResultsDir)).difference(resultsFiles) for newFile in newResultsFiles: newFilePath = os.path.join(self.parasolResultsDir, newFile) resultsFileHandles.append(open(newFilePath, 'r')) resultsFiles.add(newFile) for fileHandle in resultsFileHandles: while self.running: line = fileHandle.readline() if not line: break assert line[-1] == '\n' (status, host, jobId, exe, usrTicks, sysTicks, submitTime, startTime, endTime, user, errFile, command) = line[:-1].split(None, 11) status = int(status) jobId = int(jobId) if os.WIFEXITED(status): status = os.WEXITSTATUS(status) else: status = -status self.cpuUsageQueue.put(jobId) startTime = int(startTime) endTime = int(endTime) if endTime == startTime: # Both, start and end time is an integer so to get sub-second # accuracy we use the ticks reported by Parasol as an approximation. # This isn't documented but what Parasol calls "ticks" is actually a # hundredth of a second. Parasol does the unit conversion early on # after a job finished. Search paraNode.c for ticksToHundreths. We # also cheat a little by always reporting at least one hundredth of a # second. usrTicks = int(usrTicks) sysTicks = int(sysTicks) wallTime = float( max( 1, usrTicks + sysTicks) ) * 0.01 else: wallTime = float(endTime - startTime) self.updatedJobsQueue.put((jobId, status, wallTime)) time.sleep(1) except: logger.warn("Error occurred while parsing parasol results files.") raise finally: for fileHandle in resultsFileHandles: fileHandle.close()
python
{ "resource": "" }
q28709
process_infile
train
def process_infile(f, fileStore): """ Takes an array of files or a single file and imports into the jobstore. This returns a tuple or an array of tuples replacing all previous path strings. Toil does not preserve a file's original name upon import and so the tuple keeps track of this with the format: '(filepath, preserveThisFilename)' :param f: String or an Array. The smallest element must be a string, so: an array of strings, an array of arrays of strings... etc. :param fileStore: The filestore object that is called to load files into the filestore. :return: A tuple or an array of tuples. """ # check if this has already been processed if isinstance(f, tuple): return f elif isinstance(f, list): return process_array_infile(f, fileStore) elif isinstance(f, basestring): return process_single_infile(f, fileStore) else: raise RuntimeError('Error processing file: '.format(str(f)))
python
{ "resource": "" }
q28710
GCEProvisioner._readCredentials
train
def _readCredentials(self): """ Get the credentials from the file specified by GOOGLE_APPLICATION_CREDENTIALS. """ self._googleJson = os.getenv('GOOGLE_APPLICATION_CREDENTIALS') if not self._googleJson: raise RuntimeError('GOOGLE_APPLICATION_CREDENTIALS not set.') try: with open(self._googleJson) as jsonFile: self.googleConnectionParams = json.loads(jsonFile.read()) except: raise RuntimeError('GCEProvisioner: Could not parse the Google service account json file %s' % self._googleJson) self._projectId = self.googleConnectionParams['project_id'] self._clientEmail = self.googleConnectionParams['client_email'] self._credentialsPath = self._googleJson self._masterPublicKey = None self._gceDriver = self._getDriver()
python
{ "resource": "" }
q28711
GCEProvisioner.destroyCluster
train
def destroyCluster(self): """ Try a few times to terminate all of the instances in the group. """ logger.debug("Destroying cluster %s" % self.clusterName) instancesToTerminate = self._getNodesInCluster() attempts = 0 while instancesToTerminate and attempts < 3: self._terminateInstances(instances=instancesToTerminate) instancesToTerminate = self._getNodesInCluster() attempts += 1 # remove group instanceGroup = self._gceDriver.ex_get_instancegroup(self.clusterName, zone=self._zone) instanceGroup.destroy()
python
{ "resource": "" }
q28712
GCEProvisioner._injectWorkerFiles
train
def _injectWorkerFiles(self, node, botoExists): """ Set up the credentials on the worker. """ node.waitForNode('toil_worker', keyName=self._keyName) node.copySshKeys(self._keyName) node.injectFile(self._credentialsPath, GoogleJobStore.nodeServiceAccountJson, 'toil_worker') if self._sseKey: node.injectFile(self._sseKey, self._sseKey, 'toil_worker') if botoExists: node.injectFile(self._botoPath, self.NODE_BOTO_PATH, 'toil_worker')
python
{ "resource": "" }
q28713
GCEProvisioner._getDriver
train
def _getDriver(self): """ Connect to GCE """ driverCls = get_driver(Provider.GCE) return driverCls(self._clientEmail, self._googleJson, project=self._projectId, datacenter=self._zone)
python
{ "resource": "" }
q28714
GCEProvisioner.ex_create_multiple_nodes
train
def ex_create_multiple_nodes( self, base_name, size, image, number, location=None, ex_network='default', ex_subnetwork=None, ex_tags=None, ex_metadata=None, ignore_errors=True, use_existing_disk=True, poll_interval=2, external_ip='ephemeral', ex_disk_type='pd-standard', ex_disk_auto_delete=True, ex_service_accounts=None, timeout=DEFAULT_TASK_COMPLETION_TIMEOUT, description=None, ex_can_ip_forward=None, ex_disks_gce_struct=None, ex_nic_gce_struct=None, ex_on_host_maintenance=None, ex_automatic_restart=None, ex_image_family=None, ex_preemptible=None): """ Monkey patch to gce.py in libcloud to allow disk and images to be specified. Also changed name to a uuid below. The prefix 'wp' identifies preemptable nodes and 'wn' non-preemptable nodes. """ # if image and ex_disks_gce_struct: # raise ValueError("Cannot specify both 'image' and " # "'ex_disks_gce_struct'.") driver = self._getDriver() if image and ex_image_family: raise ValueError("Cannot specify both 'image' and " "'ex_image_family'") location = location or driver.zone if not hasattr(location, 'name'): location = driver.ex_get_zone(location) if not hasattr(size, 'name'): size = driver.ex_get_size(size, location) if not hasattr(ex_network, 'name'): ex_network = driver.ex_get_network(ex_network) if ex_subnetwork and not hasattr(ex_subnetwork, 'name'): ex_subnetwork = \ driver.ex_get_subnetwork(ex_subnetwork, region=driver._get_region_from_zone( location)) if ex_image_family: image = driver.ex_get_image_from_family(ex_image_family) if image and not hasattr(image, 'name'): image = driver.ex_get_image(image) if not hasattr(ex_disk_type, 'name'): ex_disk_type = driver.ex_get_disktype(ex_disk_type, zone=location) node_attrs = {'size': size, 'image': image, 'location': location, 'network': ex_network, 'subnetwork': ex_subnetwork, 'tags': ex_tags, 'metadata': ex_metadata, 'ignore_errors': ignore_errors, 'use_existing_disk': use_existing_disk, 'external_ip': external_ip, 'ex_disk_type': ex_disk_type, 'ex_disk_auto_delete': ex_disk_auto_delete, 'ex_service_accounts': ex_service_accounts, 'description': description, 'ex_can_ip_forward': ex_can_ip_forward, 'ex_disks_gce_struct': ex_disks_gce_struct, 'ex_nic_gce_struct': ex_nic_gce_struct, 'ex_on_host_maintenance': ex_on_host_maintenance, 'ex_automatic_restart': ex_automatic_restart, 'ex_preemptible': ex_preemptible} # List for holding the status information for disk/node creation. status_list = [] for i in range(number): name = 'wp' if ex_preemptible else 'wn' name += str(uuid.uuid4()) #'%s-%03d' % (base_name, i) status = {'name': name, 'node_response': None, 'node': None} status_list.append(status) start_time = time.time() complete = False while not complete: if (time.time() - start_time >= timeout): raise Exception("Timeout (%s sec) while waiting for multiple " "instances") complete = True time.sleep(poll_interval) for status in status_list: # Create the node or check status if already in progress. if not status['node']: if not status['node_response']: driver._multi_create_node(status, node_attrs) else: driver._multi_check_node(status, node_attrs) # If any of the nodes have not been created (or failed) we are # not done yet. if not status['node']: complete = False # Return list of nodes node_list = [] for status in status_list: node_list.append(status['node']) return node_list
python
{ "resource": "" }
q28715
fetch_parent_dir
train
def fetch_parent_dir(filepath, n=1): '''Returns a parent directory, n places above the input filepath. Equivalent to something like: '/home/user/dir'.split('/')[-2] if n=2. ''' filepath = os.path.realpath(filepath) for i in range(n): filepath = os.path.dirname(filepath) return os.path.basename(filepath)
python
{ "resource": "" }
q28716
ToilStatus.print_dot_chart
train
def print_dot_chart(self): """Print a dot output graph representing the workflow.""" print("digraph toil_graph {") print("# This graph was created from job-store: %s" % self.jobStoreName) # Make job IDs to node names map jobsToNodeNames = dict(enumerate(map(lambda job: job.jobStoreID, self.jobsToReport))) # Print the nodes for job in set(self.jobsToReport): print('%s [label="%s %s"];' % ( jobsToNodeNames[job.jobStoreID], job.jobName, job.jobStoreID)) # Print the edges for job in set(self.jobsToReport): for level, jobList in enumerate(job.stack): for childJob in jobList: # Check, b/c successor may be finished / not in the set of jobs if childJob.jobStoreID in jobsToNodeNames: print('%s -> %s [label="%i"];' % ( jobsToNodeNames[job.jobStoreID], jobsToNodeNames[childJob.jobStoreID], level)) print("}")
python
{ "resource": "" }
q28717
ToilStatus.printJobLog
train
def printJobLog(self): """Takes a list of jobs, finds their log files, and prints them to the terminal.""" for job in self.jobsToReport: if job.logJobStoreFileID is not None: msg = "LOG_FILE_OF_JOB:%s LOG: =======>\n" % job with job.getLogFileHandle(self.jobStore) as fH: msg += fH.read() msg += "<=========" else: msg = "LOG_FILE_OF_JOB:%s LOG: Job has no log file" % job print(msg)
python
{ "resource": "" }
q28718
ToilStatus.printJobChildren
train
def printJobChildren(self): """Takes a list of jobs, and prints their successors.""" for job in self.jobsToReport: children = "CHILDREN_OF_JOB:%s " % job for level, jobList in enumerate(job.stack): for childJob in jobList: children += "\t(CHILD_JOB:%s,PRECEDENCE:%i)" % (childJob, level) print(children)
python
{ "resource": "" }
q28719
ToilStatus.printAggregateJobStats
train
def printAggregateJobStats(self, properties, childNumber): """Prints a job's ID, log file, remaining tries, and other properties.""" for job in self.jobsToReport: lf = lambda x: "%s:%s" % (x, str(x in properties)) print("\t".join(("JOB:%s" % job, "LOG_FILE:%s" % job.logJobStoreFileID, "TRYS_REMAINING:%i" % job.remainingRetryCount, "CHILD_NUMBER:%s" % childNumber, lf("READY_TO_RUN"), lf("IS_ZOMBIE"), lf("HAS_SERVICES"), lf("IS_SERVICE"))))
python
{ "resource": "" }
q28720
ToilStatus.report_on_jobs
train
def report_on_jobs(self): """ Gathers information about jobs such as its child jobs and status. :returns jobStats: Pairings of a useful category and a list of jobs which fall into it. :rtype dict: """ hasChildren = [] readyToRun = [] zombies = [] hasLogFile = [] hasServices = [] services = [] properties = set() for job in self.jobsToReport: if job.logJobStoreFileID is not None: hasLogFile.append(job) childNumber = reduce(lambda x, y: x + y, map(len, job.stack) + [0]) if childNumber > 0: # Total number of successors > 0 hasChildren.append(job) properties.add("HAS_CHILDREN") elif job.command is not None: # Job has no children and a command to run. Indicates job could be run. readyToRun.append(job) properties.add("READY_TO_RUN") else: # Job has no successors and no command, so is a zombie job. zombies.append(job) properties.add("IS_ZOMBIE") if job.services: hasServices.append(job) properties.add("HAS_SERVICES") if job.startJobStoreID or job.terminateJobStoreID or job.errorJobStoreID: # These attributes are only set in service jobs services.append(job) properties.add("IS_SERVICE") jobStats = {'hasChildren': hasChildren, 'readyToRun': readyToRun, 'zombies': zombies, 'hasServices': hasServices, 'services': services, 'hasLogFile': hasLogFile, 'properties': properties, 'childNumber': childNumber} return jobStats
python
{ "resource": "" }
q28721
ToilStatus.getPIDStatus
train
def getPIDStatus(jobStoreName): """ Determine the status of a process with a particular pid. Checks to see if a process exists or not. :return: A string indicating the status of the PID of the workflow as stored in the jobstore. :rtype: str """ try: jobstore = Toil.resumeJobStore(jobStoreName) except NoSuchJobStoreException: return 'QUEUED' except NoSuchFileException: return 'QUEUED' try: with jobstore.readSharedFileStream('pid.log') as pidFile: pid = int(pidFile.read()) try: os.kill(pid, 0) # Does not kill process when 0 is passed. except OSError: # Process not found, must be done. return 'COMPLETED' else: return 'RUNNING' except NoSuchFileException: pass return 'QUEUED'
python
{ "resource": "" }
q28722
ToilStatus.getStatus
train
def getStatus(jobStoreName): """ Determine the status of a workflow. If the jobstore does not exist, this returns 'QUEUED', assuming it has not been created yet. Checks for the existence of files created in the toil.Leader.run(). In toil.Leader.run(), if a workflow completes with failed jobs, 'failed.log' is created, otherwise 'succeeded.log' is written. If neither of these exist, the leader is still running jobs. :return: A string indicating the status of the workflow. ['COMPLETED', 'RUNNING', 'ERROR', 'QUEUED'] :rtype: str """ try: jobstore = Toil.resumeJobStore(jobStoreName) except NoSuchJobStoreException: return 'QUEUED' except NoSuchFileException: return 'QUEUED' try: with jobstore.readSharedFileStream('succeeded.log') as successful: pass return 'COMPLETED' except NoSuchFileException: try: with jobstore.readSharedFileStream('failed.log') as failed: pass return 'ERROR' except NoSuchFileException: pass return 'RUNNING'
python
{ "resource": "" }
q28723
ToilStatus.fetchRootJob
train
def fetchRootJob(self): """ Fetches the root job from the jobStore that provides context for all other jobs. Exactly the same as the jobStore.loadRootJob() function, but with a different exit message if the root job is not found (indicating the workflow ran successfully to completion and certain stats cannot be gathered from it meaningfully such as which jobs are left to run). :raises JobException: if the root job does not exist. """ try: return self.jobStore.loadRootJob() except JobException: print('Root job is absent. The workflow may have completed successfully.', file=sys.stderr) raise
python
{ "resource": "" }
q28724
ToilStatus.fetchUserJobs
train
def fetchUserJobs(self, jobs): """ Takes a user input array of jobs, verifies that they are in the jobStore and returns the array of jobsToReport. :param list jobs: A list of jobs to be verified. :returns jobsToReport: A list of jobs which are verified to be in the jobStore. """ jobsToReport = [] for jobID in jobs: try: jobsToReport.append(self.jobStore.load(jobID)) except JobException: print('The job %s could not be found.' % jobID, file=sys.stderr) raise return jobsToReport
python
{ "resource": "" }
q28725
ToilStatus.traverseJobGraph
train
def traverseJobGraph(self, rootJob, jobsToReport=None, foundJobStoreIDs=None): """ Find all current jobs in the jobStore and return them as an Array. :param jobNode rootJob: The root job of the workflow. :param list jobsToReport: A list of jobNodes to be added to and returned. :param set foundJobStoreIDs: A set of jobStoreIDs used to keep track of jobStoreIDs encountered in traversal. :returns jobsToReport: The list of jobs currently in the job graph. """ if jobsToReport is None: jobsToReport = [] if foundJobStoreIDs is None: foundJobStoreIDs = set() if rootJob.jobStoreID in foundJobStoreIDs: return jobsToReport foundJobStoreIDs.add(rootJob.jobStoreID) jobsToReport.append(rootJob) # Traverse jobs in stack for jobs in rootJob.stack: for successorJobStoreID in [x.jobStoreID for x in jobs]: if successorJobStoreID not in foundJobStoreIDs and self.jobStore.exists(successorJobStoreID): self.traverseJobGraph(self.jobStore.load(successorJobStoreID), jobsToReport, foundJobStoreIDs) # Traverse service jobs for jobs in rootJob.services: for serviceJobStoreID in [x.jobStoreID for x in jobs]: if self.jobStore.exists(serviceJobStoreID): if serviceJobStoreID in foundJobStoreIDs: raise RuntimeError('Service job was unexpectedly found while traversing ') foundJobStoreIDs.add(serviceJobStoreID) jobsToReport.append(self.jobStore.load(serviceJobStoreID)) return jobsToReport
python
{ "resource": "" }
q28726
lookupEnvVar
train
def lookupEnvVar(name, envName, defaultValue): """ Use this for looking up environment variables that control Toil and are important enough to log the result of that lookup. :param str name: the human readable name of the variable :param str envName: the name of the environment variable to lookup :param str defaultValue: the fall-back value :return: the value of the environment variable or the default value the variable is not set :rtype: str """ try: value = os.environ[envName] except KeyError: log.info('Using default %s of %s as %s is not set.', name, defaultValue, envName) return defaultValue else: log.info('Overriding %s of %s with %s from %s.', name, defaultValue, value, envName) return value
python
{ "resource": "" }
q28727
checkDockerImageExists
train
def checkDockerImageExists(appliance): """ Attempts to check a url registryName for the existence of a docker image with a given tag. :param str appliance: The url of a docker image's registry (with a tag) of the form: 'quay.io/<repo_path>:<tag>' or '<repo_path>:<tag>'. Examples: 'quay.io/ucsc_cgl/toil:latest', 'ubuntu:latest', or 'broadinstitute/genomes-in-the-cloud:2.0.0'. :return: Raises an exception if the docker image cannot be found or is invalid. Otherwise, it will return the appliance string. :rtype: str """ if currentCommit in appliance: return appliance registryName, imageName, tag = parseDockerAppliance(appliance) if registryName == 'docker.io': return requestCheckDockerIo(origAppliance=appliance, imageName=imageName, tag=tag) else: return requestCheckRegularDocker(origAppliance=appliance, registryName=registryName, imageName=imageName, tag=tag)
python
{ "resource": "" }
q28728
parseDockerAppliance
train
def parseDockerAppliance(appliance): """ Takes string describing a docker image and returns the parsed registry, image reference, and tag for that image. Example: "quay.io/ucsc_cgl/toil:latest" Should return: "quay.io", "ucsc_cgl/toil", "latest" If a registry is not defined, the default is: "docker.io" If a tag is not defined, the default is: "latest" :param appliance: The full url of the docker image originally specified by the user (or the default). e.g. "quay.io/ucsc_cgl/toil:latest" :return: registryName, imageName, tag """ appliance = appliance.lower() # get the tag if ':' in appliance: tag = appliance.split(':')[-1] appliance = appliance[:-(len(':' + tag))] # remove only the tag else: # default to 'latest' if no tag is specified tag = 'latest' # get the registry and image registryName = 'docker.io' # default if not specified imageName = appliance # will be true if not specified if '/' in appliance and '.' in appliance.split('/')[0]: registryName = appliance.split('/')[0] imageName = appliance[len(registryName):] registryName = registryName.strip('/') imageName = imageName.strip('/') return registryName, imageName, tag
python
{ "resource": "" }
q28729
requestCheckRegularDocker
train
def requestCheckRegularDocker(origAppliance, registryName, imageName, tag): """ Checks to see if an image exists using the requests library. URL is based on the docker v2 schema described here: https://docs.docker.com/registry/spec/manifest-v2-2/ This has the following format: https://{websitehostname}.io/v2/{repo}/manifests/{tag} Does not work with the official (docker.io) site, because they require an OAuth token, so a separate check is done for docker.io images. :param str origAppliance: The full url of the docker image originally specified by the user (or the default). e.g. "quay.io/ucsc_cgl/toil:latest" :param str registryName: The url of a docker image's registry. e.g. "quay.io" :param str imageName: The image, including path and excluding the tag. e.g. "ucsc_cgl/toil" :param str tag: The tag used at that docker image's registry. e.g. "latest" :return: Return True if match found. Raise otherwise. """ ioURL = 'https://{webhost}/v2/{pathName}/manifests/{tag}' \ ''.format(webhost=registryName, pathName=imageName, tag=tag) response = requests.head(ioURL) if not response.ok: raise ApplianceImageNotFound(origAppliance, ioURL, response.status_code) else: return origAppliance
python
{ "resource": "" }
q28730
requestCheckDockerIo
train
def requestCheckDockerIo(origAppliance, imageName, tag): """ Checks docker.io to see if an image exists using the requests library. URL is based on the docker v2 schema. Requires that an access token be fetched first. :param str origAppliance: The full url of the docker image originally specified by the user (or the default). e.g. "ubuntu:latest" :param str imageName: The image, including path and excluding the tag. e.g. "ubuntu" :param str tag: The tag used at that docker image's registry. e.g. "latest" :return: Return True if match found. Raise otherwise. """ # only official images like 'busybox' or 'ubuntu' if '/' not in imageName: imageName = 'library/' + imageName token_url = 'https://auth.docker.io/token?service=registry.docker.io&scope=repository:{repo}:pull'.format(repo=imageName) requests_url = 'https://registry-1.docker.io/v2/{repo}/manifests/{tag}'.format(repo=imageName, tag=tag) token = requests.get(token_url) jsonToken = token.json() bearer = jsonToken["token"] response = requests.head(requests_url, headers={'Authorization': 'Bearer {}'.format(bearer)}) if not response.ok: raise ApplianceImageNotFound(origAppliance, requests_url, response.status_code) else: return origAppliance
python
{ "resource": "" }
q28731
JobGraph.restartCheckpoint
train
def restartCheckpoint(self, jobStore): """Restart a checkpoint after the total failure of jobs in its subtree. Writes the changes to the jobStore immediately. All the checkpoint's successors will be deleted, but its retry count will *not* be decreased. Returns a list with the IDs of any successors deleted. """ assert self.checkpoint is not None successorsDeleted = [] if self.stack or self.services or self.command != None: if self.command != None: assert self.command == self.checkpoint logger.debug("Checkpoint job already has command set to run") else: self.command = self.checkpoint jobStore.update(self) # Update immediately to ensure that checkpoint # is made before deleting any remaining successors if self.stack or self.services: # If the subtree of successors is not complete restart everything logger.debug("Checkpoint job has unfinished successor jobs, deleting the jobs on the stack: %s, services: %s " % (self.stack, self.services)) # Delete everything on the stack, as these represent successors to clean # up as we restart the queue def recursiveDelete(jobGraph2): # Recursive walk the stack to delete all remaining jobs for jobs in jobGraph2.stack + jobGraph2.services: for jobNode in jobs: if jobStore.exists(jobNode.jobStoreID): recursiveDelete(jobStore.load(jobNode.jobStoreID)) else: logger.debug("Job %s has already been deleted", jobNode) if jobGraph2 != self: logger.debug("Checkpoint is deleting old successor job: %s", jobGraph2.jobStoreID) jobStore.delete(jobGraph2.jobStoreID) successorsDeleted.append(jobGraph2.jobStoreID) recursiveDelete(self) self.stack = [ [], [] ] # Initialise the job to mimic the state of a job # that has been previously serialised but which as yet has no successors self.services = [] # Empty the services # Update the jobStore to avoid doing this twice on failure and make this clean. jobStore.update(self) return successorsDeleted
python
{ "resource": "" }
q28732
Context.absolute_name
train
def absolute_name(self, name): """ Returns the absolute form of the specified resource name. If the specified name is already absolute, that name will be returned unchanged, otherwise the given name will be prefixed with the namespace this object was configured with. Relative names starting with underscores are disallowed. >>> ctx = Context( 'us-west-1b', namespace='/' ) >>> ctx.absolute_name('bar') '/bar' >>> ctx.absolute_name('/bar') '/bar' >>> ctx.absolute_name('') '/' >>> ctx.absolute_name('/') '/' >>> ctx.absolute_name('_bar') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... InvalidPathError: Invalid path '/_bar' >>> ctx.absolute_name('/_bar') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... InvalidPathError: Invalid path '/_bar' >>> ctx = Context( 'us-west-1b', namespace='/foo/' ) >>> ctx.absolute_name('bar') '/foo/bar' >>> ctx.absolute_name('bar/') '/foo/bar/' >>> ctx.absolute_name('bar1/bar2') '/foo/bar1/bar2' >>> ctx.absolute_name('/bar') '/bar' >>> ctx.absolute_name('') '/foo/' >>> ctx.absolute_name('/') '/' >>> ctx.absolute_name('_bar') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... InvalidPathError: Invalid path '/foo/_bar' >>> ctx.absolute_name('/_bar') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... InvalidPathError: Invalid path '/_bar' """ if self.is_absolute_name(name): result = name else: result = self.namespace + name if not self.name_re.match(result): raise self.InvalidPathError(result) return result
python
{ "resource": "" }
q28733
Context.to_aws_name
train
def to_aws_name(self, name): """ Returns a transliteration of the name that safe to use for resource names on AWS. If the given name is relative, it converted to its absolute form before the transliteration. The transliteration uses two consequitive '_' to encode a single '_' and a single '_' to separate the name components. AWS-safe names are by definition absolute such that the leading separator can be removed. This leads to fairly readable AWS-safe names, especially for names in the root namespace, where the transliteration is the identity function if the input does not contain any '_'. This scheme only works if name components don't start with '_'. Without that condition, '/_' would become '___' the inverse of which is '_/'. >>> ctx = Context( 'us-west-1b', namespace='/' ) >>> ctx.to_aws_name( 'foo' ) 'foo' >>> ctx.from_aws_name( 'foo' ) 'foo' Illegal paths that would introduce ambiguity need to raise an exception >>> ctx.to_aws_name('/_') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... InvalidPathError: Invalid path '/_' >>> ctx.to_aws_name('/_/') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... InvalidPathError: Invalid path '/_/' >>> ctx.from_aws_name('___') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... InvalidPathError: Invalid path '/_/' >>> ctx.to_aws_name( 'foo_bar') 'foo__bar' >>> ctx.from_aws_name( 'foo__bar') 'foo_bar' >>> ctx.to_aws_name( '/sub_ns/foo_bar') 'sub__ns_foo__bar' >>> ctx.to_aws_name( 'sub_ns/foo_bar') 'sub__ns_foo__bar' >>> ctx.from_aws_name( 'sub__ns_foo__bar' ) 'sub_ns/foo_bar' >>> ctx.to_aws_name( 'g_/' ) 'g___' >>> ctx.from_aws_name( 'g___' ) 'g_/' >>> ctx = Context( 'us-west-1b', namespace='/this_ns/' ) >>> ctx.to_aws_name( 'foo' ) 'this__ns_foo' >>> ctx.from_aws_name( 'this__ns_foo' ) 'foo' >>> ctx.to_aws_name( 'foo_bar') 'this__ns_foo__bar' >>> ctx.from_aws_name( 'this__ns_foo__bar') 'foo_bar' >>> ctx.to_aws_name( '/other_ns/foo_bar' ) 'other__ns_foo__bar' >>> ctx.from_aws_name( 'other__ns_foo__bar' ) '/other_ns/foo_bar' >>> ctx.to_aws_name( 'other_ns/foo_bar' ) 'this__ns_other__ns_foo__bar' >>> ctx.from_aws_name( 'this__ns_other__ns_foo__bar' ) 'other_ns/foo_bar' >>> ctx.to_aws_name( '/this_ns/foo_bar' ) 'this__ns_foo__bar' >>> ctx.from_aws_name( 'this__ns_foo__bar' ) 'foo_bar' """ name = self.absolute_name(name) assert name.startswith('/') return name[1:].replace('_', '__').replace('/', '_')
python
{ "resource": "" }
q28734
Resource.create
train
def create(cls, jobStore, leaderPath): """ Saves the content of the file or directory at the given path to the given job store and returns a resource object representing that content for the purpose of obtaining it again at a generic, public URL. This method should be invoked on the leader node. :param toil.jobStores.abstractJobStore.AbstractJobStore jobStore: :param str leaderPath: :rtype: Resource """ pathHash = cls._pathHash(leaderPath) contentHash = hashlib.md5() # noinspection PyProtectedMember with cls._load(leaderPath) as src: with jobStore.writeSharedFileStream(sharedFileName=pathHash, isProtected=False) as dst: userScript = src.read() contentHash.update(userScript) dst.write(userScript) return cls(name=os.path.basename(leaderPath), pathHash=pathHash, url=jobStore.getSharedPublicUrl(sharedFileName=pathHash), contentHash=contentHash.hexdigest())
python
{ "resource": "" }
q28735
Resource.prepareSystem
train
def prepareSystem(cls): """ Prepares this system for the downloading and lookup of resources. This method should only be invoked on a worker node. It is idempotent but not thread-safe. """ try: resourceRootDirPath = os.environ[cls.rootDirPathEnvName] except KeyError: # Create directory holding local copies of requested resources ... resourceRootDirPath = mkdtemp() # .. and register its location in an environment variable such that child processes # can find it. os.environ[cls.rootDirPathEnvName] = resourceRootDirPath assert os.path.isdir(resourceRootDirPath)
python
{ "resource": "" }
q28736
Resource.cleanSystem
train
def cleanSystem(cls): """ Removes all downloaded, localized resources """ resourceRootDirPath = os.environ[cls.rootDirPathEnvName] os.environ.pop(cls.rootDirPathEnvName) shutil.rmtree(resourceRootDirPath) for k, v in list(os.environ.items()): if k.startswith(cls.resourceEnvNamePrefix): os.environ.pop(k)
python
{ "resource": "" }
q28737
Resource.lookup
train
def lookup(cls, leaderPath): """ Returns a resource object representing a resource created from a file or directory at the given path on the leader. This method should be invoked on the worker. The given path does not need to refer to an existing file or directory on the worker, it only identifies the resource within an instance of toil. This method returns None if no resource for the given path exists. :rtype: Resource """ pathHash = cls._pathHash(leaderPath) try: path_key = cls.resourceEnvNamePrefix + pathHash s = os.environ[path_key] except KeyError: log.warn("'%s' may exist, but is not yet referenced by the worker (KeyError from os.environ[]).", str(path_key)) return None else: self = cls.unpickle(s) assert self.pathHash == pathHash return self
python
{ "resource": "" }
q28738
Resource.localDirPath
train
def localDirPath(self): """ The path to the directory containing the resource on the worker. """ rootDirPath = os.environ[self.rootDirPathEnvName] return os.path.join(rootDirPath, self.contentHash)
python
{ "resource": "" }
q28739
Resource._download
train
def _download(self, dstFile): """ Download this resource from its URL to the given file object. :type dstFile: io.BytesIO|io.FileIO """ for attempt in retry(predicate=lambda e: isinstance(e, HTTPError) and e.code == 400): with attempt: with closing(urlopen(self.url)) as content: buf = content.read() contentHash = hashlib.md5(buf) assert contentHash.hexdigest() == self.contentHash dstFile.write(buf)
python
{ "resource": "" }
q28740
ModuleDescriptor._check_conflict
train
def _check_conflict(cls, dirPath, name): """ Check whether the module of the given name conflicts with another module on the sys.path. :param dirPath: the directory from which the module was originally loaded :param name: the mpdule name """ old_sys_path = sys.path try: sys.path = [d for d in old_sys_path if os.path.realpath(d) != os.path.realpath(dirPath)] try: colliding_module = importlib.import_module(name) except ImportError: pass else: raise ResourceException( "The user module '%s' collides with module '%s from '%s'." % ( name, colliding_module.__name__, colliding_module.__file__)) finally: sys.path = old_sys_path
python
{ "resource": "" }
q28741
ModuleDescriptor._getResourceClass
train
def _getResourceClass(self): """ Return the concrete subclass of Resource that's appropriate for auto-deploying this module. """ if self.fromVirtualEnv: subcls = VirtualEnvResource elif os.path.isdir(self._resourcePath): subcls = DirectoryResource elif os.path.isfile(self._resourcePath): subcls = FileResource elif os.path.exists(self._resourcePath): raise AssertionError("Neither a file or a directory: '%s'" % self._resourcePath) else: raise AssertionError("No such file or directory: '%s'" % self._resourcePath) return subcls
python
{ "resource": "" }
q28742
ModuleDescriptor.localize
train
def localize(self): """ Check if this module was saved as a resource. If it was, return a new module descriptor that points to a local copy of that resource. Should only be called on a worker node. On the leader, this method returns this resource, i.e. self. :rtype: toil.resource.Resource """ if not self._runningOnWorker(): log.warn('The localize() method should only be invoked on a worker.') resource = Resource.lookup(self._resourcePath) if resource is None: return self else: def stash(tmpDirPath): # Save the original dirPath such that we can restore it in globalize() with open(os.path.join(tmpDirPath, '.stash'), 'w') as f: f.write('1' if self.fromVirtualEnv else '0') f.write(self.dirPath) resource.download(callback=stash) return self.__class__(dirPath=resource.localDirPath, name=self.name, fromVirtualEnv=self.fromVirtualEnv)
python
{ "resource": "" }
q28743
ModuleDescriptor._resourcePath
train
def _resourcePath(self): """ The path to the directory that should be used when shipping this module and its siblings around as a resource. """ if self.fromVirtualEnv: return self.dirPath elif '.' in self.name: return os.path.join(self.dirPath, self._rootPackage()) else: initName = self._initModuleName(self.dirPath) if initName: raise ResourceException( "Toil does not support loading a user script from a package directory. You " "may want to remove %s from %s or invoke the user script as a module via " "'PYTHONPATH=\"%s\" python -m %s.%s'." % tuple(concat(initName, self.dirPath, os.path.split(self.dirPath), self.name))) return self.dirPath
python
{ "resource": "" }
q28744
fetchJobStoreFiles
train
def fetchJobStoreFiles(jobStore, options): """ Takes a list of file names as glob patterns, searches for these within a given directory, and attempts to take all of the files found and copy them into options.localFilePath. :param jobStore: A fileJobStore object. :param options.fetch: List of file glob patterns to search for in the jobStore and copy into options.localFilePath. :param options.localFilePath: Local directory to copy files into. :param options.jobStore: The path to the jobStore directory. """ for jobStoreFile in options.fetch: jobStoreHits = recursiveGlob(directoryname=options.jobStore, glob_pattern=jobStoreFile) for jobStoreFileID in jobStoreHits: logger.debug("Copying job store file: %s to %s", jobStoreFileID, options.localFilePath[0]) jobStore.readFile(jobStoreFileID, os.path.join(options.localFilePath[0], os.path.basename(jobStoreFileID)), symlink=options.useSymlinks)
python
{ "resource": "" }
q28745
printContentsOfJobStore
train
def printContentsOfJobStore(jobStorePath, nameOfJob=None): """ Fetch a list of all files contained in the jobStore directory input if nameOfJob is not declared, otherwise it only prints out the names of files for that specific job for which it can find a match. Also creates a logFile containing this same record of job files in the working directory. :param jobStorePath: Directory path to recursively look for files. :param nameOfJob: Default is None, which prints out all files in the jobStore. If specified, it will print all jobStore files that have been written to the jobStore by that job. """ if nameOfJob: glob = "*" + nameOfJob + "*" logFile = nameOfJob + "_fileset.txt" else: glob = "*" logFile = "jobstore_files.txt" nameOfJob = "" list_of_files = recursiveGlob(directoryname=jobStorePath, glob_pattern=glob) if os.path.exists(logFile): os.remove(logFile) for gfile in sorted(list_of_files): if not gfile.endswith('.new'): logger.debug(nameOfJob + "File: %s", os.path.basename(gfile)) with open(logFile, "a+") as f: f.write(os.path.basename(gfile)) f.write("\n")
python
{ "resource": "" }
q28746
BaseJob.disk
train
def disk(self): """ The maximum number of bytes of disk the job will require to run. """ if self._disk is not None: return self._disk elif self._config is not None: return self._config.defaultDisk else: raise AttributeError("Default value for 'disk' cannot be determined")
python
{ "resource": "" }
q28747
BaseJob.memory
train
def memory(self): """ The maximum number of bytes of memory the job will require to run. """ if self._memory is not None: return self._memory elif self._config is not None: return self._config.defaultMemory else: raise AttributeError("Default value for 'memory' cannot be determined")
python
{ "resource": "" }
q28748
BaseJob.cores
train
def cores(self): """ The number of CPU cores required. """ if self._cores is not None: return self._cores elif self._config is not None: return self._config.defaultCores else: raise AttributeError("Default value for 'cores' cannot be determined")
python
{ "resource": "" }
q28749
BaseJob.preemptable
train
def preemptable(self): """ Whether the job can be run on a preemptable node. """ if self._preemptable is not None: return self._preemptable elif self._config is not None: return self._config.defaultPreemptable else: raise AttributeError("Default value for 'preemptable' cannot be determined")
python
{ "resource": "" }
q28750
BaseJob._requirements
train
def _requirements(self): """ Gets a dictionary of all the object's resource requirements. Unset values are defaulted to None """ return {'memory': getattr(self, 'memory', None), 'cores': getattr(self, 'cores', None), 'disk': getattr(self, 'disk', None), 'preemptable': getattr(self, 'preemptable', None)}
python
{ "resource": "" }
q28751
BaseJob._parseResource
train
def _parseResource(name, value): """ Parse a Toil job's resource requirement value and apply resource-specific type checks. If the value is a string, a binary or metric unit prefix in it will be evaluated and the corresponding integral value will be returned. :param str name: The name of the resource :param None|str|float|int value: The resource value :rtype: int|float|None >>> Job._parseResource('cores', None) >>> Job._parseResource('cores', 1), Job._parseResource('disk', 1), \ Job._parseResource('memory', 1) (1, 1, 1) >>> Job._parseResource('cores', '1G'), Job._parseResource('disk', '1G'), \ Job._parseResource('memory', '1G') (1073741824, 1073741824, 1073741824) >>> Job._parseResource('cores', 1.1) 1.1 >>> Job._parseResource('disk', 1.1) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: The 'disk' requirement does not accept values that are of <type 'float'> >>> Job._parseResource('memory', object()) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: The 'memory' requirement does not accept values that are of ... """ assert name in ('memory', 'disk', 'cores') if value is None: return value elif isinstance(value, (str, bytes)): value = human2bytes(value) if isinstance(value, int): return value elif isinstance(value, float) and name == 'cores': return value else: raise TypeError("The '%s' requirement does not accept values that are of %s" % (name, type(value)))
python
{ "resource": "" }
q28752
Job.addFollowOn
train
def addFollowOn(self, followOnJob): """ Adds a follow-on job, follow-on jobs will be run after the child jobs and \ their successors have been run. :param toil.job.Job followOnJob: :return: followOnJob :rtype: toil.job.Job """ self._followOns.append(followOnJob) followOnJob._addPredecessor(self) return followOnJob
python
{ "resource": "" }
q28753
Job.addService
train
def addService(self, service, parentService=None): """ Add a service. The :func:`toil.job.Job.Service.start` method of the service will be called after the run method has completed but before any successors are run. The service's :func:`toil.job.Job.Service.stop` method will be called once the successors of the job have been run. Services allow things like databases and servers to be started and accessed by jobs in a workflow. :raises toil.job.JobException: If service has already been made the child of a job or another service. :param toil.job.Job.Service service: Service to add. :param toil.job.Job.Service parentService: Service that will be started before 'service' is started. Allows trees of services to be established. parentService must be a service of this job. :return: a promise that will be replaced with the return value from :func:`toil.job.Job.Service.start` of service in any successor of the job. :rtype: toil.job.Promise """ if parentService is not None: # Do check to ensure that parentService is a service of this job def check(services): for jS in services: if jS.service == parentService or check(jS.service._childServices): return True return False if not check(self._services): raise JobException("Parent service is not a service of the given job") return parentService._addChild(service) else: if service._hasParent: raise JobException("The service already has a parent service") service._hasParent = True jobService = ServiceJob(service) self._services.append(jobService) return jobService.rv()
python
{ "resource": "" }
q28754
Job.addChildFn
train
def addChildFn(self, fn, *args, **kwargs): """ Adds a function as a child job. :param fn: Function to be run as a child job with ``*args`` and ``**kwargs`` as \ arguments to this function. See toil.job.FunctionWrappingJob for reserved \ keyword arguments used to specify resource requirements. :return: The new child job that wraps fn. :rtype: toil.job.FunctionWrappingJob """ if PromisedRequirement.convertPromises(kwargs): return self.addChild(PromisedRequirementFunctionWrappingJob.create(fn, *args, **kwargs)) else: return self.addChild(FunctionWrappingJob(fn, *args, **kwargs))
python
{ "resource": "" }
q28755
Job.addFollowOnFn
train
def addFollowOnFn(self, fn, *args, **kwargs): """ Adds a function as a follow-on job. :param fn: Function to be run as a follow-on job with ``*args`` and ``**kwargs`` as \ arguments to this function. See toil.job.FunctionWrappingJob for reserved \ keyword arguments used to specify resource requirements. :return: The new follow-on job that wraps fn. :rtype: toil.job.FunctionWrappingJob """ if PromisedRequirement.convertPromises(kwargs): return self.addFollowOn(PromisedRequirementFunctionWrappingJob.create(fn, *args, **kwargs)) else: return self.addFollowOn(FunctionWrappingJob(fn, *args, **kwargs))
python
{ "resource": "" }
q28756
Job.checkNewCheckpointsAreLeafVertices
train
def checkNewCheckpointsAreLeafVertices(self): """ A checkpoint job is a job that is restarted if either it fails, or if any of \ its successors completely fails, exhausting their retries. A job is a leaf it is has no successors. A checkpoint job must be a leaf when initially added to the job graph. When its \ run method is invoked it can then create direct successors. This restriction is made to simplify implementation. :raises toil.job.JobGraphDeadlockException: if there exists a job being added to the graph for which \ checkpoint=True and which is not a leaf. """ roots = self.getRootJobs() # Roots jobs of component, these are preexisting jobs in the graph # All jobs in the component of the job graph containing self jobs = set() list(map(lambda x : x._dfs(jobs), roots)) # Check for each job for which checkpoint is true that it is a cut vertex or leaf for y in [x for x in jobs if x.checkpoint]: if y not in roots: # The roots are the prexisting jobs if not Job._isLeafVertex(y): raise JobGraphDeadlockException("New checkpoint job %s is not a leaf in the job graph" % y)
python
{ "resource": "" }
q28757
Job._addPredecessor
train
def _addPredecessor(self, predecessorJob): """ Adds a predecessor job to the set of predecessor jobs. Raises a \ RuntimeError if the job is already a predecessor. """ if predecessorJob in self._directPredecessors: raise RuntimeError("The given job is already a predecessor of this job") self._directPredecessors.add(predecessorJob)
python
{ "resource": "" }
q28758
Job._dfs
train
def _dfs(self, visited): """ Adds the job and all jobs reachable on a directed path from current node to the given set. """ if self not in visited: visited.add(self) for successor in self._children + self._followOns: successor._dfs(visited)
python
{ "resource": "" }
q28759
Job._checkJobGraphAcylicDFS
train
def _checkJobGraphAcylicDFS(self, stack, visited, extraEdges): """ DFS traversal to detect cycles in augmented job graph. """ if self not in visited: visited.add(self) stack.append(self) for successor in self._children + self._followOns + extraEdges[self]: successor._checkJobGraphAcylicDFS(stack, visited, extraEdges) assert stack.pop() == self if self in stack: stack.append(self) raise JobGraphDeadlockException("A cycle of job dependencies has been detected '%s'" % stack)
python
{ "resource": "" }
q28760
Job._getImpliedEdges
train
def _getImpliedEdges(roots): """ Gets the set of implied edges. See Job.checkJobGraphAcylic """ #Get nodes in job graph nodes = set() for root in roots: root._dfs(nodes) ##For each follow-on edge calculate the extra implied edges #Adjacency list of implied edges, i.e. map of jobs to lists of jobs #connected by an implied edge extraEdges = dict([(n, []) for n in nodes]) for job in nodes: if len(job._followOns) > 0: #Get set of jobs connected by a directed path to job, starting #with a child edge reacheable = set() for child in job._children: child._dfs(reacheable) #Now add extra edges for descendant in reacheable: extraEdges[descendant] += job._followOns[:] return extraEdges
python
{ "resource": "" }
q28761
Job._createEmptyJobGraphForJob
train
def _createEmptyJobGraphForJob(self, jobStore, command=None, predecessorNumber=0): """ Create an empty job for the job. """ # set _config to determine user determined default values for resource requirements self._config = jobStore.config return jobStore.create(JobNode.fromJob(self, command=command, predecessorNumber=predecessorNumber))
python
{ "resource": "" }
q28762
Job._makeJobGraphs
train
def _makeJobGraphs(self, jobGraph, jobStore): """ Creates a jobGraph for each job in the job graph, recursively. """ jobsToJobGraphs = {self:jobGraph} for successors in (self._followOns, self._children): jobs = [successor._makeJobGraphs2(jobStore, jobsToJobGraphs) for successor in successors] jobGraph.stack.append(jobs) return jobsToJobGraphs
python
{ "resource": "" }
q28763
Job._serialiseJob
train
def _serialiseJob(self, jobStore, jobsToJobGraphs, rootJobGraph): """ Pickle a job and its jobGraph to disk. """ # Pickle the job so that its run method can be run at a later time. # Drop out the children/followOns/predecessors/services - which are # all recorded within the jobStore and do not need to be stored within # the job self._children, self._followOns, self._services = [], [], [] self._directPredecessors, self._promiseJobStore = set(), None # The pickled job is "run" as the command of the job, see worker # for the mechanism which unpickles the job and executes the Job.run # method. with jobStore.writeFileStream(rootJobGraph.jobStoreID) as (fileHandle, fileStoreID): pickle.dump(self, fileHandle, pickle.HIGHEST_PROTOCOL) # Note that getUserScript() may have been overridden. This is intended. If we used # self.userModule directly, we'd be getting a reference to job.py if the job was # specified as a function (as opposed to a class) since that is where FunctionWrappingJob # is defined. What we really want is the module that was loaded as __main__, # and FunctionWrappingJob overrides getUserScript() to give us just that. Only then can # filter_main() in _unpickle( ) do its job of resolving any user-defined type or function. userScript = self.getUserScript().globalize() jobsToJobGraphs[self].command = ' '.join(('_toil', fileStoreID) + userScript.toCommand()) #Update the status of the jobGraph on disk jobStore.update(jobsToJobGraphs[self])
python
{ "resource": "" }
q28764
Job._serialiseServices
train
def _serialiseServices(self, jobStore, jobGraph, rootJobGraph): """ Serialises the services for a job. """ def processService(serviceJob, depth): # Extend the depth of the services if necessary if depth == len(jobGraph.services): jobGraph.services.append([]) # Recursively call to process child services for childServiceJob in serviceJob.service._childServices: processService(childServiceJob, depth+1) # Make a job wrapper serviceJobGraph = serviceJob._createEmptyJobGraphForJob(jobStore, predecessorNumber=1) # Create the start and terminate flags serviceJobGraph.startJobStoreID = jobStore.getEmptyFileStoreID() serviceJobGraph.terminateJobStoreID = jobStore.getEmptyFileStoreID() serviceJobGraph.errorJobStoreID = jobStore.getEmptyFileStoreID() assert jobStore.fileExists(serviceJobGraph.startJobStoreID) assert jobStore.fileExists(serviceJobGraph.terminateJobStoreID) assert jobStore.fileExists(serviceJobGraph.errorJobStoreID) # Create the service job tuple j = ServiceJobNode(jobStoreID=serviceJobGraph.jobStoreID, memory=serviceJobGraph.memory, cores=serviceJobGraph.cores, disk=serviceJobGraph.disk, preemptable=serviceJobGraph.preemptable, startJobStoreID=serviceJobGraph.startJobStoreID, terminateJobStoreID=serviceJobGraph.terminateJobStoreID, errorJobStoreID=serviceJobGraph.errorJobStoreID, jobName=serviceJobGraph.jobName, unitName=serviceJobGraph.unitName, command=serviceJobGraph.command, predecessorNumber=serviceJobGraph.predecessorNumber) # Add the service job tuple to the list of services to run jobGraph.services[depth].append(j) # Break the links between the services to stop them being serialised together #childServices = serviceJob.service._childServices serviceJob.service._childServices = None assert serviceJob._services == [] #service = serviceJob.service # Pickle the job serviceJob.pickledService = pickle.dumps(serviceJob.service, protocol=pickle.HIGHEST_PROTOCOL) serviceJob.service = None # Serialise the service job and job wrapper serviceJob._serialiseJob(jobStore, { serviceJob:serviceJobGraph }, rootJobGraph) # Restore values #serviceJob.service = service #serviceJob.service._childServices = childServices for serviceJob in self._services: processService(serviceJob, 0) self._services = []
python
{ "resource": "" }
q28765
Job._serialiseJobGraph
train
def _serialiseJobGraph(self, jobGraph, jobStore, returnValues, firstJob): """ Pickle the graph of jobs in the jobStore. The graph is not fully serialised \ until the jobGraph itself is written to disk, this is not performed by this \ function because of the need to coordinate this operation with other updates. \ """ #Check if the job graph has created #any cycles of dependencies or has multiple roots self.checkJobGraphForDeadlocks() #Create the jobGraphs for followOns/children with jobStore.batch(): jobsToJobGraphs = self._makeJobGraphs(jobGraph, jobStore) #Get an ordering on the jobs which we use for pickling the jobs in the #correct order to ensure the promises are properly established ordering = self.getTopologicalOrderingOfJobs() assert len(ordering) == len(jobsToJobGraphs) with jobStore.batch(): # Temporarily set the jobStore locators for the promise call back functions for job in ordering: job.prepareForPromiseRegistration(jobStore) def setForServices(serviceJob): serviceJob.prepareForPromiseRegistration(jobStore) for childServiceJob in serviceJob.service._childServices: setForServices(childServiceJob) for serviceJob in job._services: setForServices(serviceJob) ordering.reverse() assert self == ordering[-1] if firstJob: #If the first job we serialise all the jobs, including the root job for job in ordering: # Pickle the services for the job job._serialiseServices(jobStore, jobsToJobGraphs[job], jobGraph) # Now pickle the job job._serialiseJob(jobStore, jobsToJobGraphs, jobGraph) else: #We store the return values at this point, because if a return value #is a promise from another job, we need to register the promise #before we serialise the other jobs self._fulfillPromises(returnValues, jobStore) #Pickle the non-root jobs for job in ordering[:-1]: # Pickle the services for the job job._serialiseServices(jobStore, jobsToJobGraphs[job], jobGraph) # Pickle the job itself job._serialiseJob(jobStore, jobsToJobGraphs, jobGraph) # Pickle any services for the job self._serialiseServices(jobStore, jobGraph, jobGraph)
python
{ "resource": "" }
q28766
Job._serialiseFirstJob
train
def _serialiseFirstJob(self, jobStore): """ Serialises the root job. Returns the wrapping job. :param toil.jobStores.abstractJobStore.AbstractJobStore jobStore: """ # Check if the workflow root is a checkpoint but not a leaf vertex. # All other job vertices in the graph are checked by checkNewCheckpointsAreLeafVertices if self.checkpoint and not Job._isLeafVertex(self): raise JobGraphDeadlockException( 'New checkpoint job %s is not a leaf in the job graph' % self) # Create first jobGraph jobGraph = self._createEmptyJobGraphForJob(jobStore=jobStore, predecessorNumber=0) # Write the graph of jobs to disk self._serialiseJobGraph(jobGraph, jobStore, None, True) jobStore.update(jobGraph) # Store the name of the first job in a file in case of restart. Up to this point the # root job is not recoverable. FIXME: "root job" or "first job", which one is it? jobStore.setRootJob(jobGraph.jobStoreID) return jobGraph
python
{ "resource": "" }
q28767
Job._serialiseExistingJob
train
def _serialiseExistingJob(self, jobGraph, jobStore, returnValues): """ Serialise an existing job. """ self._serialiseJobGraph(jobGraph, jobStore, returnValues, False) #Drop the completed command, if not dropped already jobGraph.command = None #Merge any children (follow-ons) created in the initial serialisation #with children (follow-ons) created in the subsequent scale-up. assert len(jobGraph.stack) >= 4 combinedChildren = jobGraph.stack[-1] + jobGraph.stack[-3] combinedFollowOns = jobGraph.stack[-2] + jobGraph.stack[-4] jobGraph.stack = jobGraph.stack[:-4] if len(combinedFollowOns) > 0: jobGraph.stack.append(combinedFollowOns) if len(combinedChildren) > 0: jobGraph.stack.append(combinedChildren)
python
{ "resource": "" }
q28768
Job._executor
train
def _executor(self, jobGraph, stats, fileStore): """ This is the core wrapping method for running the job within a worker. It sets up the stats and logging before yielding. After completion of the body, the function will finish up the stats and logging, and starts the async update process for the job. """ if stats is not None: startTime = time.time() startClock = getTotalCpuTime() baseDir = os.getcwd() yield # If the job is not a checkpoint job, add the promise files to delete # to the list of jobStoreFileIDs to delete if not self.checkpoint: for jobStoreFileID in Promise.filesToDelete: fileStore.deleteGlobalFile(jobStoreFileID) else: # Else copy them to the job wrapper to delete later jobGraph.checkpointFilesToDelete = list(Promise.filesToDelete) Promise.filesToDelete.clear() # Now indicate the asynchronous update of the job can happen fileStore._updateJobWhenDone() # Change dir back to cwd dir, if changed by job (this is a safety issue) if os.getcwd() != baseDir: os.chdir(baseDir) # Finish up the stats if stats is not None: totalCpuTime, totalMemoryUsage = getTotalCpuTimeAndMemoryUsage() stats.jobs.append( Expando( time=str(time.time() - startTime), clock=str(totalCpuTime - startClock), class_name=self._jobName(), memory=str(totalMemoryUsage) ) )
python
{ "resource": "" }
q28769
Job._runner
train
def _runner(self, jobGraph, jobStore, fileStore): """ This method actually runs the job, and serialises the next jobs. :param class jobGraph: Instance of a jobGraph object :param class jobStore: Instance of the job store :param toil.fileStore.FileStore fileStore: Instance of a Cached on uncached filestore :return: """ # Make fileStore available as an attribute during run() ... self._fileStore = fileStore # ... but also pass it to run() as an argument for backwards compatibility. returnValues = self._run(jobGraph, fileStore) # Serialize the new jobs defined by the run method to the jobStore self._serialiseExistingJob(jobGraph, jobStore, returnValues)
python
{ "resource": "" }
q28770
PromisedRequirementFunctionWrappingJob.create
train
def create(cls, userFunction, *args, **kwargs): """ Creates an encapsulated Toil job function with unfulfilled promised resource requirements. After the promises are fulfilled, a child job function is created using updated resource values. The subgraph is encapsulated to ensure that this child job function is run before other children in the workflow. Otherwise, a different child may try to use an unresolved promise return value from the parent. """ return EncapsulatedJob(cls(userFunction, *args, **kwargs))
python
{ "resource": "" }
q28771
PromisedRequirement.getValue
train
def getValue(self): """ Returns PromisedRequirement value """ func = dill.loads(self._func) return func(*self._args)
python
{ "resource": "" }
q28772
PromisedRequirement.convertPromises
train
def convertPromises(kwargs): """ Returns True if reserved resource keyword is a Promise or PromisedRequirement instance. Converts Promise instance to PromisedRequirement. :param kwargs: function keyword arguments :return: bool """ for r in ["disk", "memory", "cores"]: if isinstance(kwargs.get(r), Promise): kwargs[r] = PromisedRequirement(kwargs[r]) return True elif isinstance(kwargs.get(r), PromisedRequirement): return True return False
python
{ "resource": "" }
q28773
getPublicIP
train
def getPublicIP(): """Get the IP that this machine uses to contact the internet. If behind a NAT, this will still be this computer's IP, and not the router's.""" try: # Try to get the internet-facing IP by attempting a connection # to a non-existent server and reading what IP was used. with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as sock: # 203.0.113.0/24 is reserved as TEST-NET-3 by RFC 5737, so # there is guaranteed to be no one listening on the other # end (and we won't accidentally DOS anyone). sock.connect(('203.0.113.1', 1)) ip = sock.getsockname()[0] return ip except: # Something went terribly wrong. Just give loopback rather # than killing everything, because this is often called just # to provide a default argument return '127.0.0.1'
python
{ "resource": "" }
q28774
setDefaultOptions
train
def setDefaultOptions(config): """ Set default options for builtin batch systems. This is required if a Config object is not constructed from an Options object. """ config.batchSystem = "singleMachine" config.disableAutoDeployment = False config.environment = {} config.statePollingWait = None # if not set, will default to seconds in getWaitDuration() config.maxLocalJobs = multiprocessing.cpu_count() config.manualMemArgs = False # single machine config.scale = 1 config.linkImports = False # mesos config.mesosMasterAddress = '%s:5050' % getPublicIP() # parasol config.parasolCommand = 'parasol' config.parasolMaxBatches = 10000
python
{ "resource": "" }
q28775
googleRetry
train
def googleRetry(f): """ This decorator retries the wrapped function if google throws any angry service errors. It should wrap any function that makes use of the Google Client API """ @wraps(f) def wrapper(*args, **kwargs): for attempt in retry(delays=truncExpBackoff(), timeout=300, predicate=googleRetryPredicate): with attempt: return f(*args, **kwargs) return wrapper
python
{ "resource": "" }
q28776
GoogleJobStore._getBlobFromURL
train
def _getBlobFromURL(cls, url, exists=False): """ Gets the blob specified by the url. caution: makes no api request. blob may not ACTUALLY exist :param urlparse.ParseResult url: the URL :param bool exists: if True, then syncs local blob object with cloud and raises exceptions if it doesn't exist remotely :return: the blob requested :rtype: :class:`~google.cloud.storage.blob.Blob` """ bucketName = url.netloc fileName = url.path # remove leading '/', which can cause problems if fileName is a path if fileName.startswith('/'): fileName = fileName[1:] storageClient = storage.Client() bucket = storageClient.get_bucket(bucketName) blob = bucket.blob(bytes(fileName)) if exists: if not blob.exists(): raise NoSuchFileException # sync with cloud so info like size is available blob.reload() return blob
python
{ "resource": "" }
q28777
mean
train
def mean(xs): """ Return the mean value of a sequence of values. >>> mean([2,4,4,4,5,5,7,9]) 5.0 >>> mean([9,10,11,7,13]) 10.0 >>> mean([1,1,10,19,19]) 10.0 >>> mean([10,10,10,10,10]) 10.0 >>> mean([1,"b"]) Traceback (most recent call last): ... ValueError: Input can't have non-numeric elements >>> mean([]) Traceback (most recent call last): ... ValueError: Input can't be empty """ try: return sum(xs) / float(len(xs)) except TypeError: raise ValueError("Input can't have non-numeric elements") except ZeroDivisionError: raise ValueError("Input can't be empty")
python
{ "resource": "" }
q28778
std_dev
train
def std_dev(xs): """ Returns the standard deviation of the given iterable of numbers. From http://rosettacode.org/wiki/Standard_deviation#Python An empty list, or a list with non-numeric elements will raise a TypeError. >>> std_dev([2,4,4,4,5,5,7,9]) 2.0 >>> std_dev([9,10,11,7,13]) 2.0 >>> std_dev([1,1,10,19,19]) 8.049844718999243 >>> std_dev({1,1,10,19,19}) == std_dev({19,10,1}) True >>> std_dev([10,10,10,10,10]) 0.0 >>> std_dev([1,"b"]) Traceback (most recent call last): ... ValueError: Input can't have non-numeric elements >>> std_dev([]) Traceback (most recent call last): ... ValueError: Input can't be empty """ m = mean(xs) # this checks our pre-conditions, too return sqrt(sum((x - m) ** 2 for x in xs) / float(len(xs)))
python
{ "resource": "" }
q28779
partition_seq
train
def partition_seq(seq, size): """ Splits a sequence into an iterable of subsequences. All subsequences are of the given size, except the last one, which may be smaller. If the input list is modified while the returned list is processed, the behavior of the program is undefined. :param seq: the list to split :param size: the desired size of the sublists, must be > 0 :type size: int :return: an iterable of sublists >>> list(partition_seq("",1)) [] >>> list(partition_seq("abcde",2)) ['ab', 'cd', 'e'] >>> list(partition_seq("abcd",2)) ['ab', 'cd'] >>> list(partition_seq("abcde",1)) ['a', 'b', 'c', 'd', 'e'] >>> list(partition_seq("abcde",0)) Traceback (most recent call last): ... ValueError: Size must be greater than 0 >>> l=[1,2,3,4] >>> i = iter( partition_seq(l,2) ) >>> l.pop(0) 1 >>> next(i) [2, 3] """ if size < 1: raise ValueError('Size must be greater than 0') return (seq[pos:pos + size] for pos in xrange(0, len(seq), size))
python
{ "resource": "" }
q28780
filter
train
def filter( names, pat ): """Return the subset of the list NAMES that match PAT""" import os, posixpath result = [ ] pat = os.path.normcase( pat ) if not pat in _cache: res = translate( pat ) if len( _cache ) >= _MAXCACHE: _cache.clear( ) _cache[ pat ] = re.compile( res ) match = _cache[ pat ].match if os.path is posixpath: # normcase on posix is NOP. Optimize it away from the loop. for name in names: if match( name ): result.append( name ) else: for name in names: if match( os.path.normcase( name ) ): result.append( name ) return result
python
{ "resource": "" }
q28781
AzureProvisioner._readClusterSettings
train
def _readClusterSettings(self): """ Read the current instance's meta-data to get the cluster settings. """ # get the leader metadata mdUrl = "http://169.254.169.254/metadata/instance?api-version=2017-08-01" header = {'Metadata': 'True'} request = urllib.request.Request(url=mdUrl, headers=header) response = urllib.request.urlopen(request) data = response.read() dataStr = data.decode("utf-8") metadata = json.loads(dataStr) # set values from the leader meta-data self._zone = metadata['compute']['location'] self.clusterName = metadata['compute']['resourceGroupName'] tagsStr = metadata['compute']['tags'] tags = dict(item.split(":") for item in tagsStr.split(";")) self._owner = tags.get('owner', 'no-owner') leader = self.getLeader() self._leaderPrivateIP = leader.privateIP self._setSSH() # create id_rsa.pub file on the leader if it is not there self._masterPublicKeyFile = self.LEADER_HOME_DIR + '.ssh/id_rsa.pub' # Add static nodes to /etc/hosts since Azure sometimes fails to find them with DNS map(lambda x: self._addToHosts(x), self.getProvisionedWorkers(None))
python
{ "resource": "" }
q28782
AzureProvisioner.launchCluster
train
def launchCluster(self, leaderNodeType, leaderStorage, owner, **kwargs): """ Launches an Azure cluster using Ansible. A resource group is created for the cluster. All the virtual machines are created within this resource group. Cloud-config is called during vm creation to create directories and launch the appliance. The azureStorageCredentials must be passed in kwargs. These credentials allow access to Azure jobStores. """ self._owner = owner self._masterPublicKeyFile = kwargs['publicKeyFile'] if not self._masterPublicKeyFile: raise RuntimeError("The Azure provisioner requires a public key file.") storageCredentials = kwargs['azureStorageCredentials'] if not storageCredentials: raise RuntimeError("azureStorageCredentials must be given.") self._checkValidClusterName() self._checkIfClusterExists() # Create the cluster. clusterArgs = { 'resgrp': self.clusterName, # The resource group, which represents the cluster. 'region': self._zone } self.callPlaybook(self.playbook['create-cluster'], clusterArgs, wait=True) ansibleArgs = { 'vmsize': leaderNodeType, 'resgrp': self.clusterName, # The resource group, which represents the cluster. 'region': self._zone, 'role': "leader", 'owner': self._owner, # Just a tag. 'diskSize': str(leaderStorage), # TODO: not implemented 'publickeyfile': self._masterPublicKeyFile # The users public key to be added to authorized_keys } # Ansible reads the cloud-config script from a file. with tempfile.NamedTemporaryFile(delete=False) as t: userData = self._getCloudConfigUserData('leader') t.write(userData) ansibleArgs['cloudconfig'] = t.name # Launch the leader VM. retries = 0 while True: instanceName = 'l' + str(uuid.uuid4()) ansibleArgs['vmname'] = instanceName # Azure limits the name to 24 characters, no dashes. ansibleArgs['storagename'] = instanceName.replace('-', '')[:24] self.callPlaybook(self.playbook['create'], ansibleArgs, wait=True) try: leaderNode = self.getLeader() except IndexError: raise RuntimeError("Failed to launcher leader") self._leaderPrivateIP = leaderNode.privateIP # IP available as soon as the playbook finishes try: # Fix for DNS failure. self._addToHosts(leaderNode, leaderNode.publicIP) leaderNode.waitForNode('toil_leader') # Make sure leader appliance is up. # Transfer credentials if storageCredentials is not None: fullPathCredentials = os.path.expanduser(storageCredentials) if os.path.isfile(fullPathCredentials): leaderNode.injectFile(fullPathCredentials, self.LEADER_HOME_DIR, 'toil_leader') ansibleCredentials = '.azure/credentials' fullPathAnsibleCredentials = os.path.expanduser('~/' + ansibleCredentials) if os.path.isfile(fullPathAnsibleCredentials): leaderNode.sshAppliance('mkdir', '-p', self.LEADER_HOME_DIR + '.azure') leaderNode.injectFile(fullPathAnsibleCredentials, self.LEADER_HOME_DIR + ansibleCredentials, 'toil_leader') break #success! except RuntimeError as e: self._terminateNode(instanceName, False) # remove failed leader retries += 1 if retries == 3: logger.debug("Leader appliance failed to start. Giving up.") raise e logger.debug("Leader appliance failed to start, retrying. (Error %s)" % e) logger.debug('Launched leader')
python
{ "resource": "" }
q28783
AzureProvisioner._checkIfClusterExists
train
def _checkIfClusterExists(self): """ Try deleting the resource group. This will fail if it exists and raise an exception. """ ansibleArgs = { 'resgrp': self.clusterName, 'region': self._zone } try: self.callPlaybook(self.playbook['check-cluster'], ansibleArgs, wait=True) except RuntimeError: logger.info("The cluster could not be created. Try deleting the cluster if it already exits.") raise
python
{ "resource": "" }
q28784
encrypt
train
def encrypt(message, keyPath): """ Encrypts a message given a path to a local file containing a key. :param message: The message to be encrypted. :param keyPath: A path to a file containing a 256-bit key (and nothing else). :type message: bytes :type keyPath: str :rtype: bytes A constant overhead is added to every encrypted message (for the nonce and MAC). >>> import tempfile >>> k = tempfile.mktemp() >>> with open(k, 'wb') as f: ... _ = f.write(nacl.utils.random(SecretBox.KEY_SIZE)) >>> message = 'test'.encode('utf-8') >>> len(encrypt(message, k)) == overhead + len(message) True >>> import os >>> os.remove(k) """ with open(keyPath, 'rb') as f: key = f.read() if len(key) != SecretBox.KEY_SIZE: raise ValueError("Key is %d bytes, but must be exactly %d bytes" % (len(key), SecretBox.KEY_SIZE)) sb = SecretBox(key) # We generate the nonce using secure random bits. For long enough # nonce size, the chance of a random nonce collision becomes # *much* smaller than the chance of a subtle coding error causing # a nonce reuse. Currently the nonce size is 192 bits--the chance # of a collision is astronomically low. (This approach is # recommended in the libsodium documentation.) nonce = nacl.utils.random(SecretBox.NONCE_SIZE) assert len(nonce) == SecretBox.NONCE_SIZE return bytes(sb.encrypt(message, nonce))
python
{ "resource": "" }
q28785
memoize
train
def memoize(f): """ A decorator that memoizes a function result based on its parameters. For example, this can be used in place of lazy initialization. If the decorating function is invoked by multiple threads, the decorated function may be called more than once with the same arguments. """ # TODO: Recommend that f's arguments be immutable memory = {} @wraps(f) def new_f(*args): try: return memory[args] except KeyError: r = f(*args) memory[args] = r return r return new_f
python
{ "resource": "" }
q28786
sync_memoize
train
def sync_memoize(f): """ Like memoize, but guarantees that decorated function is only called once, even when multiple threads are calling the decorating function with multiple parameters. """ # TODO: Think about an f that is recursive memory = {} lock = Lock() @wraps(f) def new_f(*args): try: return memory[args] except KeyError: # on cache misses, retry with lock held with lock: try: return memory[args] except KeyError: r = f(*args) memory[args] = r return r return new_f
python
{ "resource": "" }
q28787
less_strict_bool
train
def less_strict_bool(x): """Idempotent and None-safe version of strict_bool.""" if x is None: return False elif x is True or x is False: return x else: return strict_bool(x)
python
{ "resource": "" }
q28788
setup
train
def setup(job, input_file_id, n, down_checkpoints): """Sets up the sort. Returns the FileID of the sorted file """ # Write the input file to the file store job.fileStore.logToMaster("Starting the merge sort") return job.addChildJobFn(down, input_file_id, n, down_checkpoints=down_checkpoints, memory='600M').rv()
python
{ "resource": "" }
q28789
down
train
def down(job, input_file_id, n, down_checkpoints): """Input is a file and a range into that file to sort and an output location in which to write the sorted file. If the range is larger than a threshold N the range is divided recursively and a follow on job is then created which merges back the results. Otherwise, the file is sorted and placed in the output. """ # Read the file input_file = job.fileStore.readGlobalFile(input_file_id, cache=False) length = os.path.getsize(input_file) if length > n: # We will subdivide the file job.fileStore.logToMaster("Splitting file: %s of size: %s" % (input_file_id, length), level=logging.CRITICAL) # Split the file into two copies mid_point = get_midpoint(input_file, 0, length) t1 = job.fileStore.getLocalTempFile() with open(t1, 'w') as fH: copy_subrange_of_file(input_file, 0, mid_point + 1, fH) t2 = job.fileStore.getLocalTempFile() with open(t2, 'w') as fH: copy_subrange_of_file(input_file, mid_point + 1, length, fH) # Call the down function recursively return job.addFollowOnJobFn(up, job.addChildJobFn(down, job.fileStore.writeGlobalFile(t1), n, down_checkpoints=down_checkpoints, memory='600M').rv(), job.addChildJobFn(down, job.fileStore.writeGlobalFile(t2), n, down_checkpoints=down_checkpoints, memory='600M').rv()).rv() else: # We can sort this bit of the file job.fileStore.logToMaster("Sorting file: %s of size: %s" % (input_file_id, length), level=logging.CRITICAL) # Sort the copy and write back to the fileStore output_file = job.fileStore.getLocalTempFile() sort(input_file, output_file) return job.fileStore.writeGlobalFile(output_file)
python
{ "resource": "" }
q28790
up
train
def up(job, input_file_id_1, input_file_id_2): """Merges the two files and places them in the output. """ with job.fileStore.writeGlobalFileStream() as (fileHandle, output_id): with job.fileStore.readGlobalFileStream(input_file_id_1) as inputFileHandle1: with job.fileStore.readGlobalFileStream(input_file_id_2) as inputFileHandle2: job.fileStore.logToMaster("Merging %s and %s to %s" % (input_file_id_1, input_file_id_2, output_id)) merge(inputFileHandle1, inputFileHandle2, fileHandle) # Cleanup up the input files - these deletes will occur after the completion is successful. job.fileStore.deleteGlobalFile(input_file_id_1) job.fileStore.deleteGlobalFile(input_file_id_2) return output_id
python
{ "resource": "" }
q28791
sort
train
def sort(in_file, out_file): """Sorts the given file. """ filehandle = open(in_file, 'r') lines = filehandle.readlines() filehandle.close() lines.sort() filehandle = open(out_file, 'w') for line in lines: filehandle.write(line) filehandle.close()
python
{ "resource": "" }
q28792
merge
train
def merge(filehandle_1, filehandle_2, output_filehandle): """Merges together two files maintaining sorted order. """ line2 = filehandle_2.readline() for line1 in filehandle_1.readlines(): while line2 != '' and line2 <= line1: output_filehandle.write(line2) line2 = filehandle_2.readline() output_filehandle.write(line1) while line2 != '': output_filehandle.write(line2) line2 = filehandle_2.readline()
python
{ "resource": "" }
q28793
get_midpoint
train
def get_midpoint(file, file_start, file_end): """Finds the point in the file to split. Returns an int i such that fileStart <= i < fileEnd """ filehandle = open(file, 'r') mid_point = (file_start + file_end) / 2 assert mid_point >= file_start filehandle.seek(mid_point) line = filehandle.readline() assert len(line) >= 1 if len(line) + mid_point < file_end: return mid_point + len(line) - 1 filehandle.seek(file_start) line = filehandle.readline() assert len(line) >= 1 assert len(line) + file_start <= file_end return len(line) + file_start - 1
python
{ "resource": "" }
q28794
FileJobStore.robust_rmtree
train
def robust_rmtree(self, path, max_retries=3): """Robustly tries to delete paths. Retries several times (with increasing delays) if an OSError occurs. If the final attempt fails, the Exception is propagated to the caller. Borrowing patterns from: https://github.com/hashdist/hashdist """ delay = 1 for _ in range(max_retries): try: shutil.rmtree(path) break except OSError: logger.debug('Unable to remove path: {}. Retrying in {} seconds.'.format(path, delay)) time.sleep(delay) delay *= 2 if os.path.exists(path): # Final attempt, pass any Exceptions up to caller. shutil.rmtree(path)
python
{ "resource": "" }
q28795
FileJobStore._getUniqueName
train
def _getUniqueName(self, fileName, jobStoreID=None, sourceFunctionName="x"): """ Create unique file name within a jobStore directory or tmp directory. :param fileName: A file name, which can be a full path as only the basename will be used. :param jobStoreID: If given, the path returned will be in the jobStore directory. Otherwise, the tmp directory will be used. :param sourceFunctionName: This name is the name of the function that generated this file. Defaults to x if that name was not a normal name. Used for tracking files. :return: The full path with a unique file name. """ fd, absPath = self._getTempFile(jobStoreID) os.close(fd) os.unlink(absPath) # remove the .tmp extension and add the file name (noExt,ext) = os.path.splitext(absPath) uniquePath = noExt + '-' + sourceFunctionName + '-' + os.path.basename(fileName) if os.path.exists(absPath): return absPath # give up, just return temp name to avoid conflicts return uniquePath
python
{ "resource": "" }
q28796
_fetchAzureAccountKey
train
def _fetchAzureAccountKey(accountName): """ Find the account key for a given Azure storage account. The account key is taken from the AZURE_ACCOUNT_KEY_<account> environment variable if it exists, then from plain AZURE_ACCOUNT_KEY, and then from looking in the file ~/.toilAzureCredentials. That file has format: [AzureStorageCredentials] accountName1=ACCOUNTKEY1== accountName2=ACCOUNTKEY2== """ try: return os.environ['AZURE_ACCOUNT_KEY_' + accountName] except KeyError: try: return os.environ['AZURE_ACCOUNT_KEY'] except KeyError: configParser = RawConfigParser() configParser.read(os.path.expanduser(credential_file_path)) try: return configParser.get('AzureStorageCredentials', accountName) except NoOptionError: raise RuntimeError("No account key found for '%s', please provide it in '%s'" % (accountName, credential_file_path))
python
{ "resource": "" }
q28797
dockerPredicate
train
def dockerPredicate(e): """ Used to ensure Docker exceptions are retried if appropriate :param e: Exception :return: True if e retriable, else False """ if not isinstance(e, subprocess.CalledProcessError): return False if e.returncode == 125: return True
python
{ "resource": "" }
q28798
getContainerName
train
def getContainerName(job): """Create a random string including the job name, and return it.""" return '--'.join([str(job), base64.b64encode(os.urandom(9), b'-_').decode('utf-8')])\ .replace("'", '').replace('"', '').replace('_', '')
python
{ "resource": "" }
q28799
AnsibleDriver.callPlaybook
train
def callPlaybook(self, playbook, ansibleArgs, wait=True, tags=["all"]): """ Run a playbook. :param playbook: An Ansible playbook to run. :param ansibleArgs: Arguments to pass to the playbook. :param wait: Wait for the play to finish if true. :param tags: Control tags for the play. """ playbook = os.path.join(self.playbooks, playbook) # Path to playbook being executed verbosity = "-vvvvv" if logger.isEnabledFor(logging.DEBUG) else "-v" command = ["ansible-playbook", verbosity, "--tags", ",".join(tags), "--extra-vars"] command.append(" ".join(["=".join(i) for i in ansibleArgs.items()])) # Arguments being passed to playbook command.append(playbook) logger.debug("Executing Ansible call `%s`", " ".join(command)) p = subprocess.Popen(command) if wait: p.communicate() if p.returncode != 0: # FIXME: parse error codes raise RuntimeError("Ansible reported an error when executing playbook %s" % playbook)
python
{ "resource": "" }