_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q28800
Leader.run
train
def run(self): """ This runs the leader process to issue and manage jobs. :raises: toil.leader.FailedJobsException if at the end of function their remain \ failed jobs :return: The return value of the root job's run function. :rtype: Any """ # Start the stats/logging aggregation thread self.statsAndLogging.start() if self.config.metrics: self.toilMetrics = ToilMetrics(provisioner=self.provisioner) try: # Start service manager thread self.serviceManager.start() try: # Create cluster scaling processes if not None if self.clusterScaler is not None: self.clusterScaler.start() try: # Run the main loop self.innerLoop() finally: if self.clusterScaler is not None: logger.debug('Waiting for workers to shutdown.') startTime = time.time() self.clusterScaler.shutdown() logger.debug('Worker shutdown complete in %s seconds.', time.time() - startTime) finally: # Ensure service manager thread is properly shutdown self.serviceManager.shutdown() finally: # Ensure the stats and logging thread is properly shutdown self.statsAndLogging.shutdown() if self.toilMetrics: self.toilMetrics.shutdown() # Filter the failed jobs self.toilState.totalFailedJobs = [j for j in self.toilState.totalFailedJobs if self.jobStore.exists(j.jobStoreID)] try: self.create_status_sentinel_file(self.toilState.totalFailedJobs) except IOError as e: logger.debug('Error from importFile with hardlink=True: {}'.format(e)) logger.info("Finished toil run %s" % ("successfully." if not self.toilState.totalFailedJobs \ else ("with %s failed jobs." % len(self.toilState.totalFailedJobs)))) if len(self.toilState.totalFailedJobs): logger.info("Failed jobs at end of the run: %s", ' '.join(str(job) for job in self.toilState.totalFailedJobs)) # Cleanup if len(self.toilState.totalFailedJobs) > 0: raise FailedJobsException(self.config.jobStore, self.toilState.totalFailedJobs, self.jobStore) return self.jobStore.getRootJobReturnValue()
python
{ "resource": "" }
q28801
Leader.create_status_sentinel_file
train
def create_status_sentinel_file(self, fail): """Create a file in the jobstore indicating failure or success.""" logName = 'failed.log' if fail else 'succeeded.log' localLog = os.path.join(os.getcwd(), logName) open(localLog, 'w').close() self.jobStore.importFile('file://' + localLog, logName, hardlink=True) if os.path.exists(localLog): # Bandaid for Jenkins tests failing stochastically and unexplainably. os.remove(localLog)
python
{ "resource": "" }
q28802
Leader._checkSuccessorReadyToRunMultiplePredecessors
train
def _checkSuccessorReadyToRunMultiplePredecessors(self, jobGraph, jobNode, successorJobStoreID): """Handle the special cases of checking if a successor job is ready to run when there are multiple predecessors""" # See implementation note at the top of this file for discussion of multiple predecessors logger.debug("Successor job: %s of job: %s has multiple " "predecessors", jobNode, jobGraph) # Get the successor job graph, which is caches if successorJobStoreID not in self.toilState.jobsToBeScheduledWithMultiplePredecessors: self.toilState.jobsToBeScheduledWithMultiplePredecessors[successorJobStoreID] = self.jobStore.load(successorJobStoreID) successorJobGraph = self.toilState.jobsToBeScheduledWithMultiplePredecessors[successorJobStoreID] # Add the jobGraph as a finished predecessor to the successor successorJobGraph.predecessorsFinished.add(jobGraph.jobStoreID) # If the successor is in the set of successors of failed jobs if successorJobStoreID in self.toilState.failedSuccessors: if not self._handledFailedSuccessor(jobNode, jobGraph, successorJobStoreID): return False # If the successor job's predecessors have all not all completed then # ignore the jobGraph as is not yet ready to run assert len(successorJobGraph.predecessorsFinished) <= successorJobGraph.predecessorNumber if len(successorJobGraph.predecessorsFinished) < successorJobGraph.predecessorNumber: return False else: # Remove the successor job from the cache self.toilState.jobsToBeScheduledWithMultiplePredecessors.pop(successorJobStoreID) return True
python
{ "resource": "" }
q28803
Leader._makeJobSuccessorReadyToRun
train
def _makeJobSuccessorReadyToRun(self, jobGraph, jobNode): """make a successor job ready to run, returning False if they should not yet be run""" successorJobStoreID = jobNode.jobStoreID #Build map from successor to predecessors. if successorJobStoreID not in self.toilState.successorJobStoreIDToPredecessorJobs: self.toilState.successorJobStoreIDToPredecessorJobs[successorJobStoreID] = [] self.toilState.successorJobStoreIDToPredecessorJobs[successorJobStoreID].append(jobGraph) if jobNode.predecessorNumber > 1: return self._checkSuccessorReadyToRunMultiplePredecessors(jobGraph, jobNode, successorJobStoreID) else: return True
python
{ "resource": "" }
q28804
Leader._processFailedSuccessors
train
def _processFailedSuccessors(self, jobGraph): """Some of the jobs successors failed then either fail the job or restart it if it has retries left and is a checkpoint job""" if jobGraph.jobStoreID in self.toilState.servicesIssued: # The job has services running, signal for them to be killed # once they are killed then the jobGraph will be re-added to # the updatedJobs set and then scheduled to be removed logger.debug("Telling job: %s to terminate its services due to successor failure", jobGraph.jobStoreID) self.serviceManager.killServices(self.toilState.servicesIssued[jobGraph.jobStoreID], error=True) elif jobGraph.jobStoreID in self.toilState.successorCounts: # The job has non-service jobs running wait for them to finish # the job will be re-added to the updated jobs when these jobs # are done logger.debug("Job %s with ID: %s with failed successors still has successor jobs running", jobGraph, jobGraph.jobStoreID) elif jobGraph.checkpoint is not None and jobGraph.remainingRetryCount > 1: # If the job is a checkpoint and has remaining retries then reissue it. # The logic behind using > 1 rather than > 0 here: Since this job has # been tried once (without decreasing its retry count as the job # itself was successful), and its subtree failed, it shouldn't be retried # unless it has more than 1 try. logger.warn('Job: %s is being restarted as a checkpoint after the total ' 'failure of jobs in its subtree.', jobGraph.jobStoreID) self.issueJob(JobNode.fromJobGraph(jobGraph)) else: # Mark it totally failed logger.debug("Job %s is being processed as completely failed", jobGraph.jobStoreID) self.processTotallyFailedJob(jobGraph)
python
{ "resource": "" }
q28805
Leader._startServiceJobs
train
def _startServiceJobs(self): """Start any service jobs available from the service manager""" self.issueQueingServiceJobs() while True: serviceJob = self.serviceManager.getServiceJobsToStart(0) # Stop trying to get jobs when function returns None if serviceJob is None: break logger.debug('Launching service job: %s', serviceJob) self.issueServiceJob(serviceJob)
python
{ "resource": "" }
q28806
Leader._processJobsWithRunningServices
train
def _processJobsWithRunningServices(self): """Get jobs whose services have started""" while True: jobGraph = self.serviceManager.getJobGraphWhoseServicesAreRunning(0) if jobGraph is None: # Stop trying to get jobs when function returns None break logger.debug('Job: %s has established its services.', jobGraph.jobStoreID) jobGraph.services = [] self.toilState.updatedJobs.add((jobGraph, 0))
python
{ "resource": "" }
q28807
Leader._gatherUpdatedJobs
train
def _gatherUpdatedJobs(self, updatedJobTuple): """Gather any new, updated jobGraph from the batch system""" jobID, result, wallTime = updatedJobTuple # easy, track different state try: updatedJob = self.jobBatchSystemIDToIssuedJob[jobID] except KeyError: logger.warn("A result seems to already have been processed " "for job %s", jobID) else: if result == 0: cur_logger = (logger.debug if str(updatedJob.jobName).startswith(CWL_INTERNAL_JOBS) else logger.info) cur_logger('Job ended successfully: %s', updatedJob) if self.toilMetrics: self.toilMetrics.logCompletedJob(updatedJob) else: logger.warn('Job failed with exit value %i: %s', result, updatedJob) self.processFinishedJob(jobID, result, wallTime=wallTime)
python
{ "resource": "" }
q28808
Leader._processLostJobs
train
def _processLostJobs(self): """Process jobs that have gone awry""" # In the case that there is nothing happening (no updated jobs to # gather for rescueJobsFrequency seconds) check if there are any jobs # that have run too long (see self.reissueOverLongJobs) or which have # gone missing from the batch system (see self.reissueMissingJobs) if ((time.time() - self.timeSinceJobsLastRescued) >= self.config.rescueJobsFrequency): # We only rescue jobs every N seconds, and when we have apparently # exhausted the current jobGraph supply self.reissueOverLongJobs() logger.info("Reissued any over long jobs") hasNoMissingJobs = self.reissueMissingJobs() if hasNoMissingJobs: self.timeSinceJobsLastRescued = time.time() else: # This means we'll try again in a minute, providing things are quiet self.timeSinceJobsLastRescued += 60 logger.debug("Rescued any (long) missing jobs")
python
{ "resource": "" }
q28809
Leader.innerLoop
train
def innerLoop(self): """ The main loop for processing jobs by the leader. """ self.timeSinceJobsLastRescued = time.time() while self.toilState.updatedJobs or \ self.getNumberOfJobsIssued() or \ self.serviceManager.jobsIssuedToServiceManager: if self.toilState.updatedJobs: self._processReadyJobs() # deal with service-related jobs self._startServiceJobs() self._processJobsWithRunningServices() # check in with the batch system updatedJobTuple = self.batchSystem.getUpdatedBatchJob(maxWait=2) if updatedJobTuple is not None: self._gatherUpdatedJobs(updatedJobTuple) else: self._processLostJobs() # Check on the associated threads and exit if a failure is detected self.statsAndLogging.check() self.serviceManager.check() # the cluster scaler object will only be instantiated if autoscaling is enabled if self.clusterScaler is not None: self.clusterScaler.check() if len(self.toilState.updatedJobs) == 0 and self.deadlockThrottler.throttle(wait=False): # Nothing happened this round and it's been long # enough since we last checked. Check for deadlocks. self.checkForDeadlocks() logger.debug("Finished the main loop: no jobs left to run.") # Consistency check the toil state assert self.toilState.updatedJobs == set() assert self.toilState.successorCounts == {} assert self.toilState.successorJobStoreIDToPredecessorJobs == {} assert self.toilState.serviceJobStoreIDToPredecessorJob == {} assert self.toilState.servicesIssued == {}
python
{ "resource": "" }
q28810
Leader.checkForDeadlocks
train
def checkForDeadlocks(self): """ Checks if the system is deadlocked running service jobs. """ totalRunningJobs = len(self.batchSystem.getRunningBatchJobIDs()) totalServicesIssued = self.serviceJobsIssued + self.preemptableServiceJobsIssued # If there are no updated jobs and at least some jobs running if totalServicesIssued >= totalRunningJobs and totalRunningJobs > 0: serviceJobs = [x for x in list(self.jobBatchSystemIDToIssuedJob.keys()) if isinstance(self.jobBatchSystemIDToIssuedJob[x], ServiceJobNode)] runningServiceJobs = set([x for x in serviceJobs if self.serviceManager.isRunning(self.jobBatchSystemIDToIssuedJob[x])]) assert len(runningServiceJobs) <= totalRunningJobs # If all the running jobs are active services then we have a potential deadlock if len(runningServiceJobs) == totalRunningJobs: # We wait self.config.deadlockWait seconds before declaring the system deadlocked if self.potentialDeadlockedJobs != runningServiceJobs: self.potentialDeadlockedJobs = runningServiceJobs self.potentialDeadlockTime = time.time() elif time.time() - self.potentialDeadlockTime >= self.config.deadlockWait: raise DeadlockException("The system is service deadlocked - all %d running jobs are active services" % totalRunningJobs) else: # We have observed non-service jobs running, so reset the potential deadlock self.potentialDeadlockedJobs = set() self.potentialDeadlockTime = 0 else: # We have observed non-service jobs running, so reset the potential deadlock self.potentialDeadlockedJobs = set() self.potentialDeadlockTime = 0
python
{ "resource": "" }
q28811
Leader.issueJob
train
def issueJob(self, jobNode): """Add a job to the queue of jobs.""" jobNode.command = ' '.join((resolveEntryPoint('_toil_worker'), jobNode.jobName, self.jobStoreLocator, jobNode.jobStoreID)) # jobBatchSystemID is an int that is an incremented counter for each job jobBatchSystemID = self.batchSystem.issueBatchJob(jobNode) self.jobBatchSystemIDToIssuedJob[jobBatchSystemID] = jobNode if jobNode.preemptable: # len(jobBatchSystemIDToIssuedJob) should always be greater than or equal to preemptableJobsIssued, # so increment this value after the job is added to the issuedJob dict self.preemptableJobsIssued += 1 cur_logger = logger.debug if jobNode.jobName.startswith(CWL_INTERNAL_JOBS) else logger.info cur_logger("Issued job %s with job batch system ID: " "%s and cores: %s, disk: %s, and memory: %s", jobNode, str(jobBatchSystemID), int(jobNode.cores), bytes2human(jobNode.disk), bytes2human(jobNode.memory)) if self.toilMetrics: self.toilMetrics.logIssuedJob(jobNode) self.toilMetrics.logQueueSize(self.getNumberOfJobsIssued())
python
{ "resource": "" }
q28812
Leader.issueServiceJob
train
def issueServiceJob(self, jobNode): """ Issue a service job, putting it on a queue if the maximum number of service jobs to be scheduled has been reached. """ if jobNode.preemptable: self.preemptableServiceJobsToBeIssued.append(jobNode) else: self.serviceJobsToBeIssued.append(jobNode) self.issueQueingServiceJobs()
python
{ "resource": "" }
q28813
Leader.issueQueingServiceJobs
train
def issueQueingServiceJobs(self): """Issues any queuing service jobs up to the limit of the maximum allowed.""" while len(self.serviceJobsToBeIssued) > 0 and self.serviceJobsIssued < self.config.maxServiceJobs: self.issueJob(self.serviceJobsToBeIssued.pop()) self.serviceJobsIssued += 1 while len(self.preemptableServiceJobsToBeIssued) > 0 and self.preemptableServiceJobsIssued < self.config.maxPreemptableServiceJobs: self.issueJob(self.preemptableServiceJobsToBeIssued.pop()) self.preemptableServiceJobsIssued += 1
python
{ "resource": "" }
q28814
Leader.removeJob
train
def removeJob(self, jobBatchSystemID): """Removes a job from the system.""" assert jobBatchSystemID in self.jobBatchSystemIDToIssuedJob jobNode = self.jobBatchSystemIDToIssuedJob[jobBatchSystemID] if jobNode.preemptable: # len(jobBatchSystemIDToIssuedJob) should always be greater than or equal to preemptableJobsIssued, # so decrement this value before removing the job from the issuedJob map assert self.preemptableJobsIssued > 0 self.preemptableJobsIssued -= 1 del self.jobBatchSystemIDToIssuedJob[jobBatchSystemID] # If service job if jobNode.jobStoreID in self.toilState.serviceJobStoreIDToPredecessorJob: # Decrement the number of services if jobNode.preemptable: self.preemptableServiceJobsIssued -= 1 else: self.serviceJobsIssued -= 1 return jobNode
python
{ "resource": "" }
q28815
Leader.killJobs
train
def killJobs(self, jobsToKill): """ Kills the given set of jobs and then sends them for processing """ if len(jobsToKill) > 0: self.batchSystem.killBatchJobs(jobsToKill) for jobBatchSystemID in jobsToKill: self.processFinishedJob(jobBatchSystemID, 1)
python
{ "resource": "" }
q28816
Leader.reissueOverLongJobs
train
def reissueOverLongJobs(self): """ Check each issued job - if it is running for longer than desirable issue a kill instruction. Wait for the job to die then we pass the job to processFinishedJob. """ maxJobDuration = self.config.maxJobDuration jobsToKill = [] if maxJobDuration < 10000000: # We won't bother doing anything if rescue time > 16 weeks. runningJobs = self.batchSystem.getRunningBatchJobIDs() for jobBatchSystemID in list(runningJobs.keys()): if runningJobs[jobBatchSystemID] > maxJobDuration: logger.warn("The job: %s has been running for: %s seconds, more than the " "max job duration: %s, we'll kill it", str(self.jobBatchSystemIDToIssuedJob[jobBatchSystemID].jobStoreID), str(runningJobs[jobBatchSystemID]), str(maxJobDuration)) jobsToKill.append(jobBatchSystemID) self.killJobs(jobsToKill)
python
{ "resource": "" }
q28817
Leader.processFinishedJob
train
def processFinishedJob(self, batchSystemID, resultStatus, wallTime=None): """ Function reads a processed jobGraph file and updates its state. """ jobNode = self.removeJob(batchSystemID) jobStoreID = jobNode.jobStoreID if wallTime is not None and self.clusterScaler is not None: self.clusterScaler.addCompletedJob(jobNode, wallTime) if self.jobStore.exists(jobStoreID): logger.debug("Job %s continues to exist (i.e. has more to do)", jobNode) try: jobGraph = self.jobStore.load(jobStoreID) except NoSuchJobException: # Avoid importing AWSJobStore as the corresponding extra might be missing if self.jobStore.__class__.__name__ == 'AWSJobStore': # We have a ghost job - the job has been deleted but a stale read from # SDB gave us a false positive when we checked for its existence. # Process the job from here as any other job removed from the job store. # This is a temporary work around until https://github.com/BD2KGenomics/toil/issues/1091 # is completed logger.warn('Got a stale read from SDB for job %s', jobNode) self.processRemovedJob(jobNode, resultStatus) return else: raise if jobGraph.logJobStoreFileID is not None: with jobGraph.getLogFileHandle(self.jobStore) as logFileStream: # more memory efficient than read().striplines() while leaving off the # trailing \n left when using readlines() # http://stackoverflow.com/a/15233739 StatsAndLogging.logWithFormatting(jobStoreID, logFileStream, method=logger.warn, message='The job seems to have left a log file, indicating failure: %s' % jobGraph) if self.config.writeLogs or self.config.writeLogsGzip: with jobGraph.getLogFileHandle(self.jobStore) as logFileStream: StatsAndLogging.writeLogFiles(jobGraph.chainedJobs, logFileStream, self.config) if resultStatus != 0: # If the batch system returned a non-zero exit code then the worker # is assumed not to have captured the failure of the job, so we # reduce the retry count here. if jobGraph.logJobStoreFileID is None: logger.warn("No log file is present, despite job failing: %s", jobNode) jobGraph.setupJobAfterFailure(self.config) self.jobStore.update(jobGraph) elif jobStoreID in self.toilState.hasFailedSuccessors: # If the job has completed okay, we can remove it from the list of jobs with failed successors self.toilState.hasFailedSuccessors.remove(jobStoreID) self.toilState.updatedJobs.add((jobGraph, resultStatus)) #Now we know the #jobGraph is done we can add it to the list of updated jobGraph files logger.debug("Added job: %s to active jobs", jobGraph) else: #The jobGraph is done self.processRemovedJob(jobNode, resultStatus)
python
{ "resource": "" }
q28818
Leader.getSuccessors
train
def getSuccessors(jobGraph, alreadySeenSuccessors, jobStore): """ Gets successors of the given job by walking the job graph recursively. Any successor in alreadySeenSuccessors is ignored and not traversed. Returns the set of found successors. This set is added to alreadySeenSuccessors. """ successors = set() def successorRecursion(jobGraph): # For lists of successors for successorList in jobGraph.stack: # For each successor in list of successors for successorJobNode in successorList: # If successor not already visited if successorJobNode.jobStoreID not in alreadySeenSuccessors: # Add to set of successors successors.add(successorJobNode.jobStoreID) alreadySeenSuccessors.add(successorJobNode.jobStoreID) # Recurse if job exists # (job may not exist if already completed) if jobStore.exists(successorJobNode.jobStoreID): successorRecursion(jobStore.load(successorJobNode.jobStoreID)) successorRecursion(jobGraph) # Recurse from jobGraph return successors
python
{ "resource": "" }
q28819
Leader.processTotallyFailedJob
train
def processTotallyFailedJob(self, jobGraph): """ Processes a totally failed job. """ # Mark job as a totally failed job self.toilState.totalFailedJobs.add(JobNode.fromJobGraph(jobGraph)) if self.toilMetrics: self.toilMetrics.logFailedJob(jobGraph) if jobGraph.jobStoreID in self.toilState.serviceJobStoreIDToPredecessorJob: # Is # a service job logger.debug("Service job is being processed as a totally failed job: %s", jobGraph) predecesssorJobGraph = self.toilState.serviceJobStoreIDToPredecessorJob[jobGraph.jobStoreID] # This removes the service job as a service of the predecessor # and potentially makes the predecessor active self._updatePredecessorStatus(jobGraph.jobStoreID) # Remove the start flag, if it still exists. This indicates # to the service manager that the job has "started", this prevents # the service manager from deadlocking while waiting self.jobStore.deleteFile(jobGraph.startJobStoreID) # Signal to any other services in the group that they should # terminate. We do this to prevent other services in the set # of services from deadlocking waiting for this service to start properly if predecesssorJobGraph.jobStoreID in self.toilState.servicesIssued: self.serviceManager.killServices(self.toilState.servicesIssued[predecesssorJobGraph.jobStoreID], error=True) logger.debug("Job: %s is instructing all the services of its parent job to quit", jobGraph) self.toilState.hasFailedSuccessors.add(predecesssorJobGraph.jobStoreID) # This ensures that the # job will not attempt to run any of it's successors on the stack else: # Is a non-service job assert jobGraph.jobStoreID not in self.toilState.servicesIssued # Traverse failed job's successor graph and get the jobStoreID of new successors. # Any successor already in toilState.failedSuccessors will not be traversed # All successors traversed will be added to toilState.failedSuccessors and returned # as a set (unseenSuccessors). unseenSuccessors = self.getSuccessors(jobGraph, self.toilState.failedSuccessors, self.jobStore) logger.debug("Found new failed successors: %s of job: %s", " ".join( unseenSuccessors), jobGraph) # For each newly found successor for successorJobStoreID in unseenSuccessors: # If the successor is a successor of other jobs that have already tried to schedule it if successorJobStoreID in self.toilState.successorJobStoreIDToPredecessorJobs: # For each such predecessor job # (we remove the successor from toilState.successorJobStoreIDToPredecessorJobs to avoid doing # this multiple times for each failed predecessor) for predecessorJob in self.toilState.successorJobStoreIDToPredecessorJobs.pop(successorJobStoreID): # Reduce the predecessor job's successor count. self.toilState.successorCounts[predecessorJob.jobStoreID] -= 1 # Indicate that it has failed jobs. self.toilState.hasFailedSuccessors.add(predecessorJob.jobStoreID) logger.debug("Marking job: %s as having failed successors (found by " "reading successors failed job)", predecessorJob) # If the predecessor has no remaining successors, add to list of active jobs assert self.toilState.successorCounts[predecessorJob.jobStoreID] >= 0 if self.toilState.successorCounts[predecessorJob.jobStoreID] == 0: self.toilState.updatedJobs.add((predecessorJob, 0)) # Remove the predecessor job from the set of jobs with successors. self.toilState.successorCounts.pop(predecessorJob.jobStoreID) # If the job has predecessor(s) if jobGraph.jobStoreID in self.toilState.successorJobStoreIDToPredecessorJobs: # For each predecessor of the job for predecessorJobGraph in self.toilState.successorJobStoreIDToPredecessorJobs[jobGraph.jobStoreID]: # Mark the predecessor as failed self.toilState.hasFailedSuccessors.add(predecessorJobGraph.jobStoreID) logger.debug("Totally failed job: %s is marking direct predecessor: %s " "as having failed jobs", jobGraph, predecessorJobGraph) self._updatePredecessorStatus(jobGraph.jobStoreID)
python
{ "resource": "" }
q28820
Leader._updatePredecessorStatus
train
def _updatePredecessorStatus(self, jobStoreID): """ Update status of predecessors for finished successor job. """ if jobStoreID in self.toilState.serviceJobStoreIDToPredecessorJob: # Is a service job predecessorJob = self.toilState.serviceJobStoreIDToPredecessorJob.pop(jobStoreID) self.toilState.servicesIssued[predecessorJob.jobStoreID].pop(jobStoreID) if len(self.toilState.servicesIssued[predecessorJob.jobStoreID]) == 0: # Predecessor job has # all its services terminated self.toilState.servicesIssued.pop(predecessorJob.jobStoreID) # The job has no running services self.toilState.updatedJobs.add((predecessorJob, 0)) # Now we know # the job is done we can add it to the list of updated job files elif jobStoreID not in self.toilState.successorJobStoreIDToPredecessorJobs: #We have reach the root job assert len(self.toilState.updatedJobs) == 0 assert len(self.toilState.successorJobStoreIDToPredecessorJobs) == 0 assert len(self.toilState.successorCounts) == 0 logger.debug("Reached root job %s so no predecessors to clean up" % jobStoreID) else: # Is a non-root, non-service job logger.debug("Cleaning the predecessors of %s" % jobStoreID) # For each predecessor for predecessorJob in self.toilState.successorJobStoreIDToPredecessorJobs.pop(jobStoreID): # Reduce the predecessor's number of successors by one to indicate the # completion of the jobStoreID job self.toilState.successorCounts[predecessorJob.jobStoreID] -= 1 # If the predecessor job is done and all the successors are complete if self.toilState.successorCounts[predecessorJob.jobStoreID] == 0: # Remove it from the set of jobs with active successors self.toilState.successorCounts.pop(predecessorJob.jobStoreID) if predecessorJob.jobStoreID not in self.toilState.hasFailedSuccessors: # Pop stack at this point, as we can get rid of its successors predecessorJob.stack.pop() # Now we know the job is done we can add it to the list of updated job files assert predecessorJob not in self.toilState.updatedJobs self.toilState.updatedJobs.add((predecessorJob, 0))
python
{ "resource": "" }
q28821
simplify_list
train
def simplify_list(maybe_list): """Turn a length one list loaded by cwltool into a scalar. Anything else is passed as-is, by reference.""" if isinstance(maybe_list, MutableSequence): is_list = aslist(maybe_list) if len(is_list) == 1: return is_list[0] return maybe_list
python
{ "resource": "" }
q28822
toil_get_file
train
def toil_get_file(file_store, index, existing, file_store_id): """Get path to input file from Toil jobstore.""" if not file_store_id.startswith("toilfs:"): return file_store.jobStore.getPublicUrl(file_store.jobStore.importFile(file_store_id)) src_path = file_store.readGlobalFile(file_store_id[7:]) index[src_path] = file_store_id existing[file_store_id] = src_path return schema_salad.ref_resolver.file_uri(src_path)
python
{ "resource": "" }
q28823
write_file
train
def write_file(writeFunc, index, existing, x): """Write a file into the Toil jobstore. 'existing' is a set of files retrieved as inputs from toil_get_file. This ensures they are mapped back as the same name if passed through. """ # Toil fileStore reference if x.startswith("toilfs:"): return x # File literal outputs with no path, we don't write these and will fail # with unsupportedRequirement when retrieving later with getFile elif x.startswith("_:"): return x else: x = existing.get(x, x) if x not in index: if not urlparse.urlparse(x).scheme: rp = os.path.realpath(x) else: rp = x try: index[x] = "toilfs:" + writeFunc(rp) existing[index[x]] = x except Exception as e: cwllogger.error("Got exception '%s' while copying '%s'", e, x) raise return index[x]
python
{ "resource": "" }
q28824
uploadFile
train
def uploadFile(uploadfunc, fileindex, existing, uf, skip_broken=False): """Update a file object so that the location is a reference to the toil file store, writing it to the file store if necessary. """ if uf["location"].startswith("toilfs:") or uf["location"].startswith("_:"): return if uf["location"] in fileindex: uf["location"] = fileindex[uf["location"]] return if not uf["location"] and uf["path"]: uf["location"] = schema_salad.ref_resolver.file_uri(uf["path"]) if uf["location"].startswith("file://") and not os.path.isfile(uf["location"][7:]): if skip_broken: return else: raise cwltool.errors.WorkflowException( "File is missing: %s" % uf["location"]) uf["location"] = write_file( uploadfunc, fileindex, existing, uf["location"])
python
{ "resource": "" }
q28825
toilStageFiles
train
def toilStageFiles(file_store, cwljob, outdir, index, existing, export, destBucket=None): """Copy input files out of the global file store and update location and path.""" def _collectDirEntries(obj): # type: (Union[Dict[Text, Any], List[Dict[Text, Any]]]) -> Iterator[Dict[Text, Any]] if isinstance(obj, dict): if obj.get("class") in ("File", "Directory"): yield obj else: for sub_obj in obj.values(): for dir_entry in _collectDirEntries(sub_obj): yield dir_entry elif isinstance(obj, list): for sub_obj in obj: for dir_entry in _collectDirEntries(sub_obj): yield dir_entry jobfiles = list(_collectDirEntries(cwljob)) pm = ToilPathMapper( jobfiles, "", outdir, separateDirs=False, stage_listing=True) for f, p in pm.items(): if not p.staged: continue # Deal with bucket exports if destBucket: # Directories don't need to be created if we're exporting to # a bucket if p.type == "File": # Remove the staging directory from the filepath and # form the destination URL unstageTargetPath = p.target[len(outdir):] destUrl = '/'.join(s.strip('/') for s in [destBucket, unstageTargetPath]) file_store.exportFile(p.resolved[7:], destUrl) continue if not os.path.exists(os.path.dirname(p.target)): os.makedirs(os.path.dirname(p.target), 0o0755) if p.type == "File": file_store.exportFile(p.resolved[7:], "file://" + p.target) elif p.type == "Directory" and not os.path.exists(p.target): os.makedirs(p.target, 0o0755) elif p.type == "CreateFile": with open(p.target, "wb") as n: n.write(p.resolved.encode("utf-8")) def _check_adjust(f): f["location"] = schema_salad.ref_resolver.file_uri( pm.mapper(f["location"])[1]) if "contents" in f: del f["contents"] return f visit_class(cwljob, ("File", "Directory"), _check_adjust)
python
{ "resource": "" }
q28826
_makeNestedTempDir
train
def _makeNestedTempDir(top, seed, levels=2): """ Gets a temporary directory in the hierarchy of directories under a given top directory. This exists to avoid placing too many temporary directories under a single top in a flat structure, which can slow down metadata updates such as deletes on the local file system. The seed parameter allows for deterministic placement of the created directory. The seed is hashed into hex digest and the directory structure is created from the initial letters of the digest. :param top : string, top directory for the hierarchy :param seed : string, the hierarchy will be generated from this seed string :rtype : string, path to temporary directory - will be created when necessary. """ # Valid chars for the creation of temporary directories validDirs = hashlib.md5(six.b(str(seed))).hexdigest() tempDir = top for i in range(max(min(levels, len(validDirs)), 1)): tempDir = os.path.join(tempDir, validDirs[i]) if not os.path.exists(tempDir): try: os.makedirs(tempDir) except os.error: if not os.path.exists(tempDir): # In the case that a collision occurs and # it is created while we wait then we ignore raise return tempDir
python
{ "resource": "" }
q28827
remove_pickle_problems
train
def remove_pickle_problems(obj): """doc_loader does not pickle correctly, causing Toil errors, remove from objects. """ if hasattr(obj, "doc_loader"): obj.doc_loader = None if hasattr(obj, "embedded_tool"): obj.embedded_tool = remove_pickle_problems(obj.embedded_tool) if hasattr(obj, "steps"): obj.steps = [remove_pickle_problems(s) for s in obj.steps] return obj
python
{ "resource": "" }
q28828
cleanTempDirs
train
def cleanTempDirs(job): """Remove temporarly created directories.""" if job is CWLJob and job._succeeded: # Only CWLJobs have this attribute. for tempDir in job.openTempDirs: if os.path.exists(tempDir): shutil.rmtree(tempDir) job.openTempDirs = []
python
{ "resource": "" }
q28829
StepValueFrom.do_eval
train
def do_eval(self, inputs, ctx): """Evalute ourselves.""" return cwltool.expression.do_eval( self.expr, inputs, self.req, None, None, {}, context=ctx)
python
{ "resource": "" }
q28830
DefaultWithSource.resolve
train
def resolve(self): """Determine the final input value.""" if self.source: result = self.source[1][self.source[0]] if result: return result return self.default
python
{ "resource": "" }
q28831
find
train
def find(basedir, string): """ walk basedir and return all files matching string """ matches = [] for root, dirnames, filenames in os.walk(basedir): for filename in fnmatch.filter(filenames, string): matches.append(os.path.join(root, filename)) return matches
python
{ "resource": "" }
q28832
find_first_match
train
def find_first_match(basedir, string): """ return the first file that matches string starting from basedir """ matches = find(basedir, string) return matches[0] if matches else matches
python
{ "resource": "" }
q28833
tokenize_conf_stream
train
def tokenize_conf_stream(conf_handle): """ convert the key=val pairs in a LSF config stream to tuples of tokens """ for line in conf_handle: if line.startswith("#"): continue tokens = line.split("=") if len(tokens) != 2: continue yield (tokens[0].strip(), tokens[1].strip())
python
{ "resource": "" }
q28834
apply_bparams
train
def apply_bparams(fn): """ apply fn to each line of bparams, returning the result """ cmd = ["bparams", "-a"] try: output = subprocess.check_output(cmd).decode('utf-8') except: return None return fn(output.split("\n"))
python
{ "resource": "" }
q28835
apply_lsadmin
train
def apply_lsadmin(fn): """ apply fn to each line of lsadmin, returning the result """ cmd = ["lsadmin", "showconf", "lim"] try: output = subprocess.check_output(cmd).decode('utf-8') except: return None return fn(output.split("\n"))
python
{ "resource": "" }
q28836
get_lsf_units
train
def get_lsf_units(resource=False): """ check if we can find LSF_UNITS_FOR_LIMITS in lsadmin and lsf.conf files, preferring the value in bparams, then lsadmin, then the lsf.conf file """ lsf_units = apply_bparams(get_lsf_units_from_stream) if lsf_units: return lsf_units lsf_units = apply_lsadmin(get_lsf_units_from_stream) if lsf_units: return lsf_units lsf_units = apply_conf_file(get_lsf_units_from_stream, LSF_CONF_FILENAME) if lsf_units: return lsf_units # -R usage units are in MB, not KB by default if resource: return DEFAULT_RESOURCE_UNITS else: return DEFAULT_LSF_UNITS
python
{ "resource": "" }
q28837
parse_memory
train
def parse_memory(mem, resource): """ Parse memory parameter """ lsf_unit = get_lsf_units(resource=resource) return convert_mb(float(mem) * 1024, lsf_unit)
python
{ "resource": "" }
q28838
per_core_reservation
train
def per_core_reservation(): """ returns True if the cluster is configured for reservations to be per core, False if it is per job """ per_core = apply_bparams(per_core_reserve_from_stream) if per_core: if per_core.upper() == "Y": return True else: return False per_core = apply_lsadmin(per_core_reserve_from_stream) if per_core: if per_core.upper() == "Y": return True else: return False per_core = apply_conf_file(per_core_reserve_from_stream, LSB_PARAMS_FILENAME) if per_core and per_core.upper() == "Y": return True else: return False return False
python
{ "resource": "" }
q28839
addOptions
train
def addOptions(parser, config=Config()): """ Adds toil options to a parser object, either optparse or argparse. """ # Wrapper function that allows toil to be used with both the optparse and # argparse option parsing modules addLoggingOptions(parser) # This adds the logging stuff. if isinstance(parser, ArgumentParser): def addGroup(headingString, bodyString): return parser.add_argument_group(headingString, bodyString).add_argument parser.register("type", "bool", lambda v: v.lower() == "true") # Custom type for arg=True/False. _addOptions(addGroup, config) else: raise RuntimeError("Unanticipated class passed to addOptions(), %s. Expecting " "argparse.ArgumentParser" % parser.__class__)
python
{ "resource": "" }
q28840
parseSetEnv
train
def parseSetEnv(l): """ Parses a list of strings of the form "NAME=VALUE" or just "NAME" into a dictionary. Strings of the latter from will result in dictionary entries whose value is None. :type l: list[str] :rtype: dict[str,str] >>> parseSetEnv([]) {} >>> parseSetEnv(['a']) {'a': None} >>> parseSetEnv(['a=']) {'a': ''} >>> parseSetEnv(['a=b']) {'a': 'b'} >>> parseSetEnv(['a=a', 'a=b']) {'a': 'b'} >>> parseSetEnv(['a=b', 'c=d']) {'a': 'b', 'c': 'd'} >>> parseSetEnv(['a=b=c']) {'a': 'b=c'} >>> parseSetEnv(['']) Traceback (most recent call last): ... ValueError: Empty name >>> parseSetEnv(['=1']) Traceback (most recent call last): ... ValueError: Empty name """ d = dict() for i in l: try: k, v = i.split('=', 1) except ValueError: k, v = i, None if not k: raise ValueError('Empty name') d[k] = v return d
python
{ "resource": "" }
q28841
getDirSizeRecursively
train
def getDirSizeRecursively(dirPath): """ This method will return the cumulative number of bytes occupied by the files on disk in the directory and its subdirectories. This method will raise a 'subprocess.CalledProcessError' if it is unable to access a folder or file because of insufficient permissions. Therefore this method should only be called on the jobStore, and will alert the user if some portion is inaccessible. Everything in the jobStore should have appropriate permissions as there is no way to read the filesize without permissions. The environment variable 'BLOCKSIZE'='512' is set instead of the much cleaner --block-size=1 because Apple can't handle it. :param str dirPath: A valid path to a directory or file. :return: Total size, in bytes, of the file or directory at dirPath. """ # du is often faster than using os.lstat(), sometimes significantly so. # The call: 'du -s /some/path' should give the number of 512-byte blocks # allocated with the environment variable: BLOCKSIZE='512' set, and we # multiply this by 512 to return the filesize in bytes. return int(subprocess.check_output(['du', '-s', dirPath], env=dict(os.environ, BLOCKSIZE='512')).decode('utf-8').split()[0]) * 512
python
{ "resource": "" }
q28842
getFileSystemSize
train
def getFileSystemSize(dirPath): """ Return the free space, and total size of the file system hosting `dirPath`. :param str dirPath: A valid path to a directory. :return: free space and total size of file system :rtype: tuple """ assert os.path.exists(dirPath) diskStats = os.statvfs(dirPath) freeSpace = diskStats.f_frsize * diskStats.f_bavail diskSize = diskStats.f_frsize * diskStats.f_blocks return freeSpace, diskSize
python
{ "resource": "" }
q28843
Toil.restart
train
def restart(self): """ Restarts a workflow that has been interrupted. :return: The root job's return value """ self._assertContextManagerUsed() self.writePIDFile() if not self.config.restart: raise ToilRestartException('A Toil workflow must be initiated with Toil.start(), ' 'not restart().') from toil.job import JobException try: self._jobStore.loadRootJob() except JobException: logger.warning( 'Requested restart but the workflow has already been completed; allowing exports to rerun.') return self._jobStore.getRootJobReturnValue() self._batchSystem = self.createBatchSystem(self.config) self._setupAutoDeployment() try: self._setBatchSystemEnvVars() self._serialiseEnv() self._cacheAllJobs() self._setProvisioner() rootJobGraph = self._jobStore.clean(jobCache=self._jobCache) return self._runMainLoop(rootJobGraph) finally: self._shutdownBatchSystem()
python
{ "resource": "" }
q28844
Toil.getJobStore
train
def getJobStore(cls, locator): """ Create an instance of the concrete job store implementation that matches the given locator. :param str locator: The location of the job store to be represent by the instance :return: an instance of a concrete subclass of AbstractJobStore :rtype: toil.jobStores.abstractJobStore.AbstractJobStore """ name, rest = cls.parseLocator(locator) if name == 'file': from toil.jobStores.fileJobStore import FileJobStore return FileJobStore(rest) elif name == 'aws': from toil.jobStores.aws.jobStore import AWSJobStore return AWSJobStore(rest) elif name == 'azure': from toil.jobStores.azureJobStore import AzureJobStore return AzureJobStore(rest) elif name == 'google': from toil.jobStores.googleJobStore import GoogleJobStore return GoogleJobStore(rest) else: raise RuntimeError("Unknown job store implementation '%s'" % name)
python
{ "resource": "" }
q28845
Toil.createBatchSystem
train
def createBatchSystem(config): """ Creates an instance of the batch system specified in the given config. :param toil.common.Config config: the current configuration :rtype: batchSystems.abstractBatchSystem.AbstractBatchSystem :return: an instance of a concrete subclass of AbstractBatchSystem """ kwargs = dict(config=config, maxCores=config.maxCores, maxMemory=config.maxMemory, maxDisk=config.maxDisk) from toil.batchSystems.registry import batchSystemFactoryFor try: factory = batchSystemFactoryFor(config.batchSystem) batchSystemClass = factory() except: raise RuntimeError('Unrecognised batch system: %s' % config.batchSystem) if not config.disableCaching and not batchSystemClass.supportsWorkerCleanup(): raise RuntimeError('%s currently does not support shared caching. Set the ' '--disableCaching flag if you want to ' 'use this batch system.' % config.batchSystem) logger.debug('Using the %s' % re.sub("([a-z])([A-Z])", "\g<1> \g<2>", batchSystemClass.__name__).lower()) return batchSystemClass(**kwargs)
python
{ "resource": "" }
q28846
Toil._setupAutoDeployment
train
def _setupAutoDeployment(self, userScript=None): """ Determine the user script, save it to the job store and inject a reference to the saved copy into the batch system such that it can auto-deploy the resource on the worker nodes. :param toil.resource.ModuleDescriptor userScript: the module descriptor referencing the user script. If None, it will be looked up in the job store. """ if userScript is not None: # This branch is hit when a workflow is being started if userScript.belongsToToil: logger.debug('User script %s belongs to Toil. No need to auto-deploy it.', userScript) userScript = None else: if (self._batchSystem.supportsAutoDeployment() and not self.config.disableAutoDeployment): # Note that by saving the ModuleDescriptor, and not the Resource we allow for # redeploying a potentially modified user script on workflow restarts. with self._jobStore.writeSharedFileStream('userScript') as f: pickle.dump(userScript, f, protocol=pickle.HIGHEST_PROTOCOL) else: from toil.batchSystems.singleMachine import SingleMachineBatchSystem if not isinstance(self._batchSystem, SingleMachineBatchSystem): logger.warn('Batch system does not support auto-deployment. The user ' 'script %s will have to be present at the same location on ' 'every worker.', userScript) userScript = None else: # This branch is hit on restarts from toil.jobStores.abstractJobStore import NoSuchFileException try: with self._jobStore.readSharedFileStream('userScript') as f: userScript = safeUnpickleFromStream(f) except NoSuchFileException: logger.debug('User script neither set explicitly nor present in the job store.') userScript = None if userScript is None: logger.debug('No user script to auto-deploy.') else: logger.debug('Saving user script %s as a resource', userScript) userScriptResource = userScript.saveAsResourceTo(self._jobStore) logger.debug('Injecting user script %s into batch system.', userScriptResource) self._batchSystem.setUserScript(userScriptResource)
python
{ "resource": "" }
q28847
Toil.importFile
train
def importFile(self, srcUrl, sharedFileName=None): """ Imports the file at the given URL into job store. See :func:`toil.jobStores.abstractJobStore.AbstractJobStore.importFile` for a full description """ self._assertContextManagerUsed() return self._jobStore.importFile(srcUrl, sharedFileName=sharedFileName)
python
{ "resource": "" }
q28848
Toil._setBatchSystemEnvVars
train
def _setBatchSystemEnvVars(self): """ Sets the environment variables required by the job store and those passed on command line. """ for envDict in (self._jobStore.getEnv(), self.config.environment): for k, v in iteritems(envDict): self._batchSystem.setEnv(k, v)
python
{ "resource": "" }
q28849
Toil._serialiseEnv
train
def _serialiseEnv(self): """ Puts the environment in a globally accessible pickle file. """ # Dump out the environment of this process in the environment pickle file. with self._jobStore.writeSharedFileStream("environment.pickle") as fileHandle: pickle.dump(dict(os.environ), fileHandle, pickle.HIGHEST_PROTOCOL) logger.debug("Written the environment for the jobs to the environment file")
python
{ "resource": "" }
q28850
Toil._cacheAllJobs
train
def _cacheAllJobs(self): """ Downloads all jobs in the current job store into self.jobCache. """ logger.debug('Caching all jobs in job store') self._jobCache = {jobGraph.jobStoreID: jobGraph for jobGraph in self._jobStore.jobs()} logger.debug('{} jobs downloaded.'.format(len(self._jobCache)))
python
{ "resource": "" }
q28851
Toil.getWorkflowDir
train
def getWorkflowDir(workflowID, configWorkDir=None): """ Returns a path to the directory where worker directories and the cache will be located for this workflow. :param str workflowID: Unique identifier for the workflow :param str configWorkDir: Value passed to the program using the --workDir flag :return: Path to the workflow directory :rtype: str """ workDir = configWorkDir or os.getenv('TOIL_WORKDIR') or tempfile.gettempdir() if not os.path.exists(workDir): raise RuntimeError("The directory specified by --workDir or TOIL_WORKDIR (%s) does not " "exist." % workDir) # Create the workflow dir, make it unique to each host in case workDir is on a shared FS. # This prevents workers on different nodes from erasing each other's directories. workflowDir = os.path.join(workDir, 'toil-%s-%s' % (workflowID, getNodeID())) try: # Directory creation is atomic os.mkdir(workflowDir) except OSError as err: if err.errno != 17: # The directory exists if a previous worker set it up. raise else: logger.debug('Created the workflow directory at %s' % workflowDir) return workflowDir
python
{ "resource": "" }
q28852
Toil._shutdownBatchSystem
train
def _shutdownBatchSystem(self): """ Shuts down current batch system if it has been created. """ assert self._batchSystem is not None startTime = time.time() logger.debug('Shutting down batch system ...') self._batchSystem.shutdown() logger.debug('... finished shutting down the batch system in %s seconds.' % (time.time() - startTime))
python
{ "resource": "" }
q28853
Toil.writePIDFile
train
def writePIDFile(self): """ Write a the pid of this process to a file in the jobstore. Overwriting the current contents of pid.log is a feature, not a bug of this method. Other methods will rely on always having the most current pid available. So far there is no reason to store any old pids. """ with self._jobStore.writeSharedFileStream('pid.log') as f: f.write(str(os.getpid()).encode('utf-8'))
python
{ "resource": "" }
q28854
AnalyzeWDL.find_asts
train
def find_asts(self, ast_root, name): ''' Finds an AST node with the given name and the entire subtree under it. A function borrowed from scottfrazer. Thank you Scott Frazer! :param ast_root: The WDL AST. The whole thing generally, but really any portion that you wish to search. :param name: The name of the subtree you're looking for, like "Task". :return: nodes representing the AST subtrees matching the "name" given. ''' nodes = [] if isinstance(ast_root, wdl_parser.AstList): for node in ast_root: nodes.extend(self.find_asts(node, name)) elif isinstance(ast_root, wdl_parser.Ast): if ast_root.name == name: nodes.append(ast_root) for attr_name, attr in ast_root.attributes.items(): nodes.extend(self.find_asts(attr, name)) return nodes
python
{ "resource": "" }
q28855
AnalyzeWDL.dict_from_JSON
train
def dict_from_JSON(self, JSON_file): ''' Takes a WDL-mapped json file and creates a dict containing the bindings. The 'return' value is only used for unittests. :param JSON_file: A required JSON file containing WDL variable bindings. :return: Returns the self.json_dict purely for unittests. ''' # TODO: Add context support for variables within multiple wdl files with open(JSON_file) as data_file: data = json.load(data_file) for d in data: if isinstance(data[d], basestring): self.json_dict[d] = '"' + data[d] + '"' else: self.json_dict[d] = data[d] return self.json_dict
python
{ "resource": "" }
q28856
AnalyzeWDL.create_tasks_dict
train
def create_tasks_dict(self, ast): ''' Parse each "Task" in the AST. This will create self.tasks_dictionary, where each task name is a key. :return: Creates the self.tasks_dictionary necessary for much of the parser. Returning it is only necessary for unittests. ''' tasks = self.find_asts(ast, 'Task') for task in tasks: self.parse_task(task) return self.tasks_dictionary
python
{ "resource": "" }
q28857
AnalyzeWDL.parse_task
train
def parse_task(self, task): ''' Parses a WDL task AST subtree. Currently looks at and parses 4 sections: 1. Declarations (e.g. string x = 'helloworld') 2. Commandline (a bash command with dynamic variables inserted) 3. Runtime (docker image; disk; CPU; RAM; etc.) 4. Outputs (expected return values/files) :param task: An AST subtree of a WDL "Task". :return: Returns nothing but adds a task to the self.tasks_dictionary necessary for much of the parser. ''' task_name = task.attributes["name"].source_string # task declarations declaration_array = [] for declaration_subAST in task.attr("declarations"): declaration_array.append(self.parse_task_declaration(declaration_subAST)) self.tasks_dictionary.setdefault(task_name, OrderedDict())['inputs'] = declaration_array for section in task.attr("sections"): # task commandline entries section [command(s) to run] if section.name == "RawCommand": command_array = self.parse_task_rawcommand(section) self.tasks_dictionary.setdefault(task_name, OrderedDict())['raw_commandline'] = command_array # task runtime section (docker image; disk; CPU; RAM; etc.) if section.name == "Runtime": runtime_dict = self.parse_task_runtime(section.attr("map")) self.tasks_dictionary.setdefault(task_name, OrderedDict())['runtime'] = runtime_dict # task output filenames section (expected return values/files) if section.name == "Outputs": output_array = self.parse_task_outputs(section) self.tasks_dictionary.setdefault(task_name, OrderedDict())['outputs'] = output_array
python
{ "resource": "" }
q28858
AnalyzeWDL.parse_task_declaration
train
def parse_task_declaration(self, declaration_subAST): ''' Parses the declaration section of the WDL task AST subtree. Examples: String my_name String your_name Int two_chains_i_mean_names = 0 :param declaration_subAST: Some subAST representing a task declaration like: 'String file_name' :return: var_name, var_type, var_value Example: Input subAST representing: 'String file_name' Output: var_name='file_name', var_type='String', var_value=None ''' var_name = self.parse_declaration_name(declaration_subAST.attr("name")) var_type = self.parse_declaration_type(declaration_subAST.attr("type")) var_expressn = self.parse_declaration_expressn(declaration_subAST.attr("expression"), es='') return (var_name, var_type, var_expressn)
python
{ "resource": "" }
q28859
AnalyzeWDL.parse_task_rawcommand
train
def parse_task_rawcommand(self, rawcommand_subAST): ''' Parses the rawcommand section of the WDL task AST subtree. Task "rawcommands" are divided into many parts. There are 2 types of parts: normal strings, & variables that can serve as changeable inputs. The following example command: 'echo ${variable1} ${variable2} > output_file.txt' Has 5 parts: Normal String: 'echo ' Variable Input: variable1 Normal String: ' ' Variable Input: variable2 Normal String: ' > output_file.txt' Variables can also have additional conditions, like 'sep', which is like the python ''.join() function and in WDL looks like: ${sep=" -V " GVCFs} and would be translated as: ' -V '.join(GVCFs). :param rawcommand_subAST: A subAST representing some bash command. :return: A list=[] of tuples=() representing the parts of the command: e.g. [(command_var, command_type, additional_conditions_list), ...] Where: command_var = 'GVCFs' command_type = 'variable' command_actions = {'sep': ' -V '} ''' command_array = [] for code_snippet in rawcommand_subAST.attributes["parts"]: # normal string if isinstance(code_snippet, wdl_parser.Terminal): command_var = "r'''" + code_snippet.source_string + "'''" # a variable like ${dinosaurDNA} if isinstance(code_snippet, wdl_parser.Ast): if code_snippet.name == 'CommandParameter': # change in the future? seems to be a different parameter but works for all cases it seems? code_expr = self.parse_declaration_expressn(code_snippet.attr('expr'), es='') code_attributes = self.parse_task_rawcommand_attributes(code_snippet.attr('attributes')) command_var = self.modify_cmd_expr_w_attributes(code_expr, code_attributes) if isinstance(code_snippet, wdl_parser.AstList): raise NotImplementedError command_array.append(command_var) return command_array
python
{ "resource": "" }
q28860
AnalyzeWDL.parse_task_runtime
train
def parse_task_runtime(self, runtime_subAST): ''' Parses the runtime section of the WDL task AST subtree. The task "runtime" section currently supports context fields for a docker container, CPU resources, RAM resources, and disk resources. :param runtime_subAST: A subAST representing runtime parameters. :return: A list=[] of runtime attributes, for example: runtime_attributes = [('docker','quay.io/encode-dcc/map:v1.0'), ('cpu','2'), ('memory','17.1 GB'), ('disks','local-disk 420 HDD')] ''' runtime_attributes = OrderedDict() if isinstance(runtime_subAST, wdl_parser.Terminal): raise NotImplementedError elif isinstance(runtime_subAST, wdl_parser.Ast): raise NotImplementedError elif isinstance(runtime_subAST, wdl_parser.AstList): for ast in runtime_subAST: key = self.parse_task_runtime_key(ast.attr('key')) value = self.parse_declaration_expressn(ast.attr('value'), es='') if value.startswith('"'): value = self.translate_wdl_string_to_python_string(value[1:-1]) runtime_attributes[key] = value return runtime_attributes
python
{ "resource": "" }
q28861
AnalyzeWDL.parse_task_outputs
train
def parse_task_outputs(self, i): ''' Parse the WDL output section. Outputs are like declarations, with a type, name, and value. Examples: ------------ Simple Cases ------------ 'Int num = 7' var_name: 'num' var_type: 'Int' var_value: 7 String idea = 'Lab grown golden eagle burgers.' var_name: 'idea' var_type: 'String' var_value: 'Lab grown golden eagle burgers.' File ideaFile = 'goldenEagleStemCellStartUpDisrupt.txt' var_name: 'ideaFile' var_type: 'File' var_value: 'goldenEagleStemCellStartUpDisrupt.txt' ------------------- More Abstract Cases ------------------- Array[File] allOfMyTerribleIdeas = glob(*.txt)[0] var_name: 'allOfMyTerribleIdeas' var_type**: 'File' var_value: [*.txt] var_actions: {'index_lookup': '0', 'glob': 'None'} **toilwdl.py converts 'Array[File]' to 'ArrayFile' :return: output_array representing outputs generated by the job/task: e.g. x = [(var_name, var_type, var_value, var_actions), ...] ''' output_array = [] for j in i.attributes['attributes']: if j.name == 'Output': var_name = self.parse_declaration_name(j.attr("name")) var_type = self.parse_declaration_type(j.attr("type")) var_expressn = self.parse_declaration_expressn(j.attr("expression"), es='', output_expressn=True) if not (var_expressn.startswith('(') and var_expressn.endswith(')')): var_expressn = self.translate_wdl_string_to_python_string(var_expressn) output_array.append((var_name, var_type, var_expressn)) else: raise NotImplementedError return output_array
python
{ "resource": "" }
q28862
AnalyzeWDL.parse_workflow
train
def parse_workflow(self, workflow): ''' Parses a WDL workflow AST subtree. Currently looks at and parses 3 sections: 1. Declarations (e.g. string x = 'helloworld') 2. Calls (similar to a python def) 3. Scatter (which expects to map to a Call or multiple Calls) Returns nothing but creates the self.workflows_dictionary necessary for much of the parser. :param workflow: An AST subtree of a WDL "Workflow". :return: Returns nothing but adds a workflow to the self.workflows_dictionary necessary for much of the parser. ''' workflow_name = workflow.attr('name').source_string wf_declared_dict = OrderedDict() for section in workflow.attr("body"): if section.name == "Declaration": var_name, var_map = self.parse_workflow_declaration(section) wf_declared_dict[var_name] = var_map self.workflows_dictionary.setdefault(workflow_name, OrderedDict())['wf_declarations'] = wf_declared_dict if section.name == "Scatter": scattertask = self.parse_workflow_scatter(section) self.workflows_dictionary.setdefault(workflow_name, OrderedDict())['scatter' + str(self.scatter_number)] = scattertask self.scatter_number += 1 if section.name == "Call": task = self.parse_workflow_call(section) self.workflows_dictionary.setdefault(workflow_name, OrderedDict())['call' + str(self.call_number)] = task self.call_number += 1 if section.name == "If": task = self.parse_workflow_if(section) self.workflows_dictionary.setdefault(workflow_name, OrderedDict())['if' + str(self.if_number)] = task self.if_number += 1
python
{ "resource": "" }
q28863
AnalyzeWDL.parse_declaration_expressn_ternaryif
train
def parse_declaration_expressn_ternaryif(self, cond, iftrue, iffalse, es): """ Classic if statement. This needs to be rearranged. In wdl, this looks like: if <condition> then <iftrue> else <iffalse> In python, this needs to be: <iftrue> if <condition> else <iffalse> :param cond: :param iftrue: :param iffalse: :param es: :return: """ es = es + self.parse_declaration_expressn(iftrue, es='') es = es + ' if ' + self.parse_declaration_expressn(cond, es='') es = es + ' else ' + self.parse_declaration_expressn(iffalse, es='') return es
python
{ "resource": "" }
q28864
AnalyzeWDL.parse_declaration_expressn_tupleliteral
train
def parse_declaration_expressn_tupleliteral(self, values, es): """ Same in python. Just a parenthesis enclosed tuple. :param values: :param es: :return: """ es = es + '(' for ast in values: es = es + self.parse_declaration_expressn(ast, es='') + ', ' if es.endswith(', '): es = es[:-2] return es + ')'
python
{ "resource": "" }
q28865
AnalyzeWDL.parse_declaration_expressn_arrayliteral
train
def parse_declaration_expressn_arrayliteral(self, values, es): """ Same in python. Just a square bracket enclosed array. :param values: :param es: :return: """ es = es + '[' for ast in values: es = es + self.parse_declaration_expressn(ast, es='') + ', ' if es.endswith(', '): es = es[:-2] return es + ']'
python
{ "resource": "" }
q28866
AnalyzeWDL.parse_declaration_expressn_operator
train
def parse_declaration_expressn_operator(self, lhsAST, rhsAST, es, operator): """ Simply joins the left and right hand arguments lhs and rhs with an operator. :param lhsAST: :param rhsAST: :param es: :param operator: :return: """ if isinstance(lhsAST, wdl_parser.Terminal): if lhsAST.str == 'string': es = es + '"{string}"'.format(string=lhsAST.source_string) else: es = es + '{string}'.format(string=lhsAST.source_string) elif isinstance(lhsAST, wdl_parser.Ast): es = es + self.parse_declaration_expressn(lhsAST, es='') elif isinstance(lhsAST, wdl_parser.AstList): raise NotImplementedError es = es + operator if isinstance(rhsAST, wdl_parser.Terminal): if rhsAST.str == 'string': es = es + '"{string}"'.format(string=rhsAST.source_string) else: es = es + '{string}'.format(string=rhsAST.source_string) elif isinstance(rhsAST, wdl_parser.Ast): es = es + self.parse_declaration_expressn(rhsAST, es='') elif isinstance(rhsAST, wdl_parser.AstList): raise NotImplementedError return es
python
{ "resource": "" }
q28867
AnalyzeWDL.parse_declaration_expressn_fncall
train
def parse_declaration_expressn_fncall(self, name, params, es): """ Parses out cromwell's built-in function calls. Some of these are special and need minor adjustments, for example length(), which is equivalent to python's len() function. Or sub, which is equivalent to re.sub(), but needs a rearrangement of input variables. Known to be supported: sub, size, read_tsv, length, select_first. :param name: :param params: :param es: :return: """ # name of the function if isinstance(name, wdl_parser.Terminal): if name.str: # use python's built-in for length() if name.source_string == 'length': es = es + 'len(' elif name.source_string == 'stdout': return es + 'stdout' else: es = es + name.source_string + '(' else: raise NotImplementedError elif isinstance(name, wdl_parser.Ast): raise NotImplementedError elif isinstance(name, wdl_parser.AstList): raise NotImplementedError # use python's re.sub() for sub() if name.source_string == 'sub': es_params = self.parse_declaration_expressn_fncall_SUBparams(params) else: es_params = self.parse_declaration_expressn_fncall_normalparams(params) if name.source_string == 'glob': return es + es_params + ', tempDir)' elif name.source_string == 'size': return es + es_params + ', fileStore=fileStore)' else: return es + es_params + ')'
python
{ "resource": "" }
q28868
AnalyzeWDL.parse_workflow_declaration
train
def parse_workflow_declaration(self, wf_declaration_subAST): ''' Parses a WDL declaration AST subtree into a string and a python dictionary containing its 'type' and 'value'. For example: var_name = refIndex var_map = {'type': File, 'value': bamIndex} :param wf_declaration_subAST: An AST subtree of a workflow declaration. :return: var_name, which is the name of the declared variable :return: var_map, a dictionary with keys for type and value. e.g. {'type': File, 'value': bamIndex} ''' var_map = OrderedDict() var_name = self.parse_declaration_name(wf_declaration_subAST.attr("name")) var_type = self.parse_declaration_type(wf_declaration_subAST.attr("type")) var_expressn = self.parse_declaration_expressn(wf_declaration_subAST.attr("expression"), es='') var_map['name'] = var_name var_map['type'] = var_type var_map['value'] = var_expressn return var_name, var_map
python
{ "resource": "" }
q28869
AWSJobStore._registered
train
def _registered(self): """ A optional boolean property indidcating whether this job store is registered. The registry is the authority on deciding if a job store exists or not. If True, this job store exists, if None the job store is transitioning from True to False or vice versa, if False the job store doesn't exist. :type: bool|None """ # The weird mapping of the SDB item attribute value to the property value is due to # backwards compatibility. 'True' becomes True, that's easy. Toil < 3.3.0 writes this at # the end of job store creation. Absence of either the registry, the item or the # attribute becomes False, representing a truly absent, non-existing job store. An # attribute value of 'False', which is what Toil < 3.3.0 writes at the *beginning* of job # store destruction, indicates a job store in transition, reflecting the fact that 3.3.0 # may leak buckets or domains even though the registry reports 'False' for them. We # can't handle job stores that were partially created by 3.3.0, though. registry_domain = self._bindDomain(domain_name='toil-registry', create=False, block=False) if registry_domain is None: return False else: for attempt in retry_sdb(): with attempt: attributes = registry_domain.get_attributes(item_name=self.namePrefix, attribute_name='exists', consistent_read=True) try: exists = attributes['exists'] except KeyError: return False else: if exists == 'True': return True elif exists == 'False': return None else: assert False
python
{ "resource": "" }
q28870
AWSJobStore._bindBucket
train
def _bindBucket(self, bucket_name, create=False, block=True, versioning=False): """ Return the Boto Bucket object representing the S3 bucket with the given name. If the bucket does not exist and `create` is True, it will be created. :param str bucket_name: the name of the bucket to bind to :param bool create: Whether to create bucket the if it doesn't exist :param bool block: If False, return None if the bucket doesn't exist. If True, wait until bucket appears. Ignored if `create` is True. :rtype: Bucket|None :raises S3ResponseError: If `block` is True and the bucket still doesn't exist after the retry timeout expires. """ assert self.minBucketNameLen <= len(bucket_name) <= self.maxBucketNameLen assert self.bucketNameRe.match(bucket_name) log.debug("Binding to job store bucket '%s'.", bucket_name) def bucket_creation_pending(e): # https://github.com/BD2KGenomics/toil/issues/955 # https://github.com/BD2KGenomics/toil/issues/995 # https://github.com/BD2KGenomics/toil/issues/1093 return (isinstance(e, (S3CreateError, S3ResponseError)) and e.error_code in ('BucketAlreadyOwnedByYou', 'OperationAborted')) bucketExisted = True for attempt in retry_s3(predicate=bucket_creation_pending): with attempt: try: bucket = self.s3.get_bucket(bucket_name, validate=True) except S3ResponseError as e: if e.error_code == 'NoSuchBucket': bucketExisted = False log.debug("Bucket '%s' does not exist.", bucket_name) if create: log.debug("Creating bucket '%s'.", bucket_name) location = region_to_bucket_location(self.region) bucket = self.s3.create_bucket(bucket_name, location=location) assert self.__getBucketRegion(bucket) == self.region elif block: raise else: return None elif e.status == 301: # This is raised if the user attempts to get a bucket in a region outside # the specified one, if the specified one is not `us-east-1`. The us-east-1 # server allows a user to use buckets from any region. bucket = self.s3.get_bucket(bucket_name, validate=False) raise BucketLocationConflictException(self.__getBucketRegion(bucket)) else: raise else: if self.__getBucketRegion(bucket) != self.region: raise BucketLocationConflictException(self.__getBucketRegion(bucket)) if versioning and not bucketExisted: # only call this method on bucket creation bucket.configure_versioning(True) else: # now test for versioning consistency # we should never see any of these errors since 'versioning' should always be true bucket_versioning = self.__getBucketVersioning(bucket) if bucket_versioning != versioning: assert False, 'Cannot modify versioning on existing bucket' elif bucket_versioning is None: assert False, 'Cannot use a bucket with versioning suspended' if bucketExisted: log.debug("Using pre-existing job store bucket '%s'.", bucket_name) else: log.debug("Created new job store bucket '%s'.", bucket_name) return bucket
python
{ "resource": "" }
q28871
AWSJobStore._bindDomain
train
def _bindDomain(self, domain_name, create=False, block=True): """ Return the Boto Domain object representing the SDB domain of the given name. If the domain does not exist and `create` is True, it will be created. :param str domain_name: the name of the domain to bind to :param bool create: True if domain should be created if it doesn't exist :param bool block: If False, return None if the domain doesn't exist. If True, wait until domain appears. This parameter is ignored if create is True. :rtype: Domain|None :raises SDBResponseError: If `block` is True and the domain still doesn't exist after the retry timeout expires. """ log.debug("Binding to job store domain '%s'.", domain_name) retryargs = dict(predicate=lambda e: no_such_sdb_domain(e) or sdb_unavailable(e)) if not block: retryargs['timeout'] = 15 for attempt in retry_sdb(**retryargs): with attempt: try: return self.db.get_domain(domain_name) except SDBResponseError as e: if no_such_sdb_domain(e): if create: return self.db.create_domain(domain_name) elif block: raise else: return None else: raise
python
{ "resource": "" }
q28872
AWSJobStore.__getBucketVersioning
train
def __getBucketVersioning(self, bucket): """ For newly created buckets get_versioning_status returns an empty dict. In the past we've seen None in this case. We map both to a return value of False. Otherwise, the 'Versioning' entry in the dictionary returned by get_versioning_status can be 'Enabled', 'Suspended' or 'Disabled' which we map to True, None and False respectively. Note that we've never seen a versioning status of 'Disabled', only the empty dictionary. Calling configure_versioning with False on a bucket will cause get_versioning_status to then return 'Suspended' even on a new bucket that never had versioning enabled. """ for attempt in retry_s3(): with attempt: status = bucket.get_versioning_status() return self.versionings[status['Versioning']] if status else False
python
{ "resource": "" }
q28873
flatten
train
def flatten( iterables ): """ Flatten an iterable, except for string elements. """ for it in iterables: if isinstance(it, str): yield it else: for element in it: yield element
python
{ "resource": "" }
q28874
BatchSystemSupport.checkResourceRequest
train
def checkResourceRequest(self, memory, cores, disk): """ Check resource request is not greater than that available or allowed. :param int memory: amount of memory being requested, in bytes :param float cores: number of cores being requested :param int disk: amount of disk space being requested, in bytes :raise InsufficientSystemResources: raised when a resource is requested in an amount greater than allowed """ assert memory is not None assert disk is not None assert cores is not None if cores > self.maxCores: raise InsufficientSystemResources('cores', cores, self.maxCores) if memory > self.maxMemory: raise InsufficientSystemResources('memory', memory, self.maxMemory) if disk > self.maxDisk: raise InsufficientSystemResources('disk', disk, self.maxDisk)
python
{ "resource": "" }
q28875
BatchSystemLocalSupport.handleLocalJob
train
def handleLocalJob(self, jobNode): # type: (JobNode) -> Optional[int] """ To be called by issueBatchJobs. Returns the jobID if the jobNode has been submitted to the local queue, otherwise returns None """ if (not self.config.runCwlInternalJobsOnWorkers and jobNode.jobName.startswith(CWL_INTERNAL_JOBS)): return self.localBatch.issueBatchJob(jobNode) else: return None
python
{ "resource": "" }
q28876
BatchSystemLocalSupport.getNextJobID
train
def getNextJobID(self): # type: () -> int """ Must be used to get job IDs so that the local and batch jobs do not conflict. """ with self.localBatch.jobIndexLock: jobID = self.localBatch.jobIndex self.localBatch.jobIndex += 1 return jobID
python
{ "resource": "" }
q28877
shutdownFileStore
train
def shutdownFileStore(workflowDir, workflowID): """ Run the deferred functions from any prematurely terminated jobs still lingering on the system and carry out any necessary filestore-specific cleanup. This is a destructive operation and it is important to ensure that there are no other running processes on the system that are modifying or using the file store for this workflow. This is the intended to be the last call to the file store in a Toil run, called by the batch system cleanup function upon batch system shutdown. :param str workflowDir: The path to the cache directory :param str workflowID: The workflow ID for this invocation of the workflow """ cacheDir = os.path.join(workflowDir, cacheDirName(workflowID)) if os.path.exists(cacheDir): # The presence of the cacheDir suggests this was a cached run. We don't need the cache lock # for any of this since this is the final cleanup of a job and there should be no other # conflicting processes using the cache. CachingFileStore.shutdown(cacheDir) else: # This absence of cacheDir suggests otherwise. NonCachingFileStore.shutdown(workflowDir)
python
{ "resource": "" }
q28878
DeferredFunction.create
train
def create(cls, function, *args, **kwargs): """ Capture the given callable and arguments as an instance of this class. :param callable function: The deferred action to take in the form of a function :param tuple args: Non-keyword arguments to the function :param dict kwargs: Keyword arguments to the function """ # The general principle is to deserialize as late as possible, i.e. when the function is # to be invoked, as that will avoid redundantly deserializing deferred functions for # concurrently running jobs when the cache state is loaded from disk. By implication we # should serialize as early as possible. We need to serialize the function as well as its # arguments. return cls(*list(map(dill.dumps, (function, args, kwargs))), name=function.__name__, module=ModuleDescriptor.forModule(function.__module__).globalize())
python
{ "resource": "" }
q28879
DeferredFunction.invoke
train
def invoke(self): """ Invoke the captured function with the captured arguments. """ logger.debug('Running deferred function %s.', self) self.module.makeLoadable() function, args, kwargs = list(map(dill.loads, (self.function, self.args, self.kwargs))) return function(*args, **kwargs)
python
{ "resource": "" }
q28880
FileStore.getLocalTempDir
train
def getLocalTempDir(self): """ Get a new local temporary directory in which to write files that persist for the duration of the job. :return: The absolute path to a new local temporary directory. This directory will exist for the duration of the job only, and is guaranteed to be deleted once the job terminates, removing all files it contains recursively. :rtype: str """ return os.path.abspath(tempfile.mkdtemp(prefix="t", dir=self.localTempDir))
python
{ "resource": "" }
q28881
FileStore.getLocalTempFile
train
def getLocalTempFile(self): """ Get a new local temporary file that will persist for the duration of the job. :return: The absolute path to a local temporary file. This file will exist for the duration of the job only, and is guaranteed to be deleted once the job terminates. :rtype: str """ handle, tmpFile = tempfile.mkstemp(prefix="tmp", suffix=".tmp", dir=self.localTempDir) os.close(handle) return os.path.abspath(tmpFile)
python
{ "resource": "" }
q28882
FileStore.writeGlobalFileStream
train
def writeGlobalFileStream(self, cleanup=False): """ Similar to writeGlobalFile, but allows the writing of a stream to the job store. The yielded file handle does not need to and should not be closed explicitly. :param bool cleanup: is as in :func:`toil.fileStore.FileStore.writeGlobalFile`. :return: A context manager yielding a tuple of 1) a file handle which can be written to and 2) the toil.fileStore.FileID of the resulting file in the job store. """ # TODO: Make this work with FileID with self.jobStore.writeFileStream(None if not cleanup else self.jobGraph.jobStoreID) as (backingStream, fileStoreID): # We have a string version of the file ID, and the backing stream. # We need to yield a stream the caller can write to, and a FileID # that accurately reflects the size of the data written to the # stream. We assume the stream is not seekable. # Make and keep a reference to the file ID, which is currently empty fileID = FileID(fileStoreID, 0) # Wrap the stream to increment the file ID's size for each byte written wrappedStream = WriteWatchingStream(backingStream) # When the stream is written to, count the bytes def handle(numBytes): fileID.size += numBytes wrappedStream.onWrite(handle) yield wrappedStream, fileID
python
{ "resource": "" }
q28883
FileStore.readGlobalFile
train
def readGlobalFile(self, fileStoreID, userPath=None, cache=True, mutable=False, symlink=False): """ Makes the file associated with fileStoreID available locally. If mutable is True, then a copy of the file will be created locally so that the original is not modified and does not change the file for other jobs. If mutable is False, then a link can be created to the file, saving disk resources. If a user path is specified, it is used as the destination. If a user path isn't specified, the file is stored in the local temp directory with an encoded name. :param toil.fileStore.FileID fileStoreID: job store id for the file :param string userPath: a path to the name of file to which the global file will be copied or hard-linked (see below). :param bool cache: Described in :func:`toil.fileStore.CachingFileStore.readGlobalFile` :param bool mutable: Described in :func:`toil.fileStore.CachingFileStore.readGlobalFile` :return: An absolute path to a local, temporary copy of the file keyed by fileStoreID. :rtype: str """ raise NotImplementedError()
python
{ "resource": "" }
q28884
FileStore._runDeferredFunctions
train
def _runDeferredFunctions(deferredFunctions): """ Invoke the specified deferred functions and return a list of names of functions that raised an exception while being invoked. :param list[DeferredFunction] deferredFunctions: the DeferredFunctions to run :rtype: list[str] """ failures = [] for deferredFunction in deferredFunctions: try: deferredFunction.invoke() except: failures.append(deferredFunction.name) logger.exception('%s failed.', deferredFunction) return failures
python
{ "resource": "" }
q28885
FileStore.logToMaster
train
def logToMaster(self, text, level=logging.INFO): """ Send a logging message to the leader. The message will also be \ logged by the worker at the same level. :param text: The string to log. :param int level: The logging level. """ logger.log(level=level, msg=("LOG-TO-MASTER: " + text)) self.loggingMessages.append(dict(text=text, level=level))
python
{ "resource": "" }
q28886
FileStore._pidExists
train
def _pidExists(pid): """ This will return True if the process associated with pid is still running on the machine. This is based on stackoverflow question 568271. :param int pid: ID of the process to check for :return: True/False :rtype: bool """ assert pid > 0 try: os.kill(pid, 0) except OSError as err: if err.errno == errno.ESRCH: # ESRCH == No such process return False else: raise else: return True
python
{ "resource": "" }
q28887
CachingFileStore.open
train
def open(self, job): """ This context manager decorated method allows cache-specific operations to be conducted before and after the execution of a job in worker.py """ # Create a working directory for the job startingDir = os.getcwd() self.localTempDir = makePublicDir(os.path.join(self.localTempDir, str(uuid.uuid4()))) # Check the status of all jobs on this node. If there are jobs that started and died before # cleaning up their presence from the cache state file, restore the cache file to a state # where the jobs don't exist. with self._CacheState.open(self) as cacheInfo: self.findAndHandleDeadJobs(cacheInfo) # While we have a lock on the cache file, run a naive check to see if jobs on this node # have greatly gone over their requested limits. if cacheInfo.sigmaJob < 0: logger.warning('Detecting that one or more jobs on this node have used more ' 'resources than requested. Turn on debug logs to see more' 'information on cache usage.') # Get the requirements for the job and clean the cache if necessary. cleanCache will # ensure that the requirements for this job are stored in the state file. jobReqs = job.disk # Cleanup the cache to free up enough space for this job (if needed) self.cleanCache(jobReqs) try: os.chdir(self.localTempDir) yield finally: diskUsed = getDirSizeRecursively(self.localTempDir) logString = ("Job {jobName} used {percent:.2f}% ({humanDisk}B [{disk}B] used, " "{humanRequestedDisk}B [{requestedDisk}B] requested) at the end of " "its run.".format(jobName=self.jobName, percent=(float(diskUsed) / jobReqs * 100 if jobReqs > 0 else 0.0), humanDisk=bytes2human(diskUsed), disk=diskUsed, humanRequestedDisk=bytes2human(jobReqs), requestedDisk=jobReqs)) self.logToMaster(logString, level=logging.DEBUG) if diskUsed > jobReqs: self.logToMaster("Job used more disk than requested. Please reconsider modifying " "the user script to avoid the chance of failure due to " "incorrectly requested resources. " + logString, level=logging.WARNING) os.chdir(startingDir) self.cleanupInProgress = True # Delete all the job specific files and return sizes to jobReqs self.returnJobReqs(jobReqs) with self._CacheState.open(self) as cacheInfo: # Carry out any user-defined cleanup actions deferredFunctions = cacheInfo.jobState[self.jobID]['deferredFunctions'] failures = self._runDeferredFunctions(deferredFunctions) for failure in failures: self.logToMaster('Deferred function "%s" failed.' % failure, logging.WARN) # Finally delete the job from the cache state file cacheInfo.jobState.pop(self.jobID)
python
{ "resource": "" }
q28888
CachingFileStore._setupCache
train
def _setupCache(self): """ Setup the cache based on the provided values for localCacheDir. """ # we first check whether the cache directory exists. If it doesn't, create it. if not os.path.exists(self.localCacheDir): # Create a temporary directory as this worker's private cache. If all goes well, it # will be renamed into the cache for this node. personalCacheDir = ''.join([os.path.dirname(self.localCacheDir), '/.ctmp-', str(uuid.uuid4())]) os.mkdir(personalCacheDir, 0o755) self._createCacheLockFile(personalCacheDir) try: os.rename(personalCacheDir, self.localCacheDir) except OSError as err: # The only acceptable FAIL case is that the destination is a non-empty directory # directory. Assuming (it's ambiguous) atomic renaming of directories, if the # dst is non-empty, it only means that another worker has beaten this one to the # rename. if err.errno == errno.ENOTEMPTY: # Cleanup your own mess. It's only polite. shutil.rmtree(personalCacheDir) else: raise # You can't reach here unless a local cache directory has been created successfully with self._CacheState.open(self) as cacheInfo: # Ensure this cache is from the correct attempt at the workflow! If it isn't, we # need to reset the cache lock file if cacheInfo.attemptNumber != self.workflowAttemptNumber: if cacheInfo.nlink == 2: cacheInfo.cached = 0 # cached file sizes are accounted for by job store else: allCachedFiles = [os.path.join(self.localCacheDir, x) for x in os.listdir(self.localCacheDir) if not self._isHidden(x)] cacheInfo.cached = sum([os.stat(cachedFile).st_size for cachedFile in allCachedFiles]) # TODO: Delete the working directories cacheInfo.sigmaJob = 0 cacheInfo.attemptNumber = self.workflowAttemptNumber self.nlinkThreshold = cacheInfo.nlink
python
{ "resource": "" }
q28889
CachingFileStore._createCacheLockFile
train
def _createCacheLockFile(self, tempCacheDir): """ Create the cache lock file file to contain the state of the cache on the node. :param str tempCacheDir: Temporary directory to use for setting up a cache lock file the first time. """ # The nlink threshold is setup along with the first instance of the cache class on the # node. It needs the cache dir to sniff link count for files form the job store. self.setNlinkThreshold(tempCacheDir) # Get the free space on the device freeSpace, _ = getFileSystemSize(tempCacheDir) # Create the cache lock file. open(os.path.join(tempCacheDir, os.path.basename(self.cacheLockFile)), 'w').close() # Setup the cache state file personalCacheStateFile = os.path.join(tempCacheDir, os.path.basename(self.cacheStateFile)) # Setup the initial values for the cache state file in a dict cacheInfo = self._CacheState({ 'nlink': self.nlinkThreshold, 'attemptNumber': self.workflowAttemptNumber, 'total': freeSpace, 'cached': 0, 'sigmaJob': 0, 'cacheDir': self.localCacheDir, 'jobState': {}}) cacheInfo.write(personalCacheStateFile)
python
{ "resource": "" }
q28890
CachingFileStore.decodedFileID
train
def decodedFileID(self, cachedFilePath): """ Decode a cached fileName back to a job store file ID. :param str cachedFilePath: Path to the cached file :return: The jobstore file ID associated with the file :rtype: str """ fileDir, fileName = os.path.split(cachedFilePath) assert fileDir == self.localCacheDir, 'Can\'t decode uncached file names' # We encode and decode here because base64 can't work with unencoded text # Its probably worth, later, converting all file name variables to bytes # and not text. return base64.urlsafe_b64decode(fileName.encode('utf-8')).decode('utf-8')
python
{ "resource": "" }
q28891
CachingFileStore.addToCache
train
def addToCache(self, localFilePath, jobStoreFileID, callingFunc, mutable=False): """ Used to process the caching of a file. This depends on whether a file is being written to file store, or read from it. WRITING The file is in localTempDir. It needs to be linked into cache if possible. READING The file is already in the cache dir. Depending on whether it is modifiable or not, does it need to be linked to the required location, or copied. If it is copied, can the file still be retained in cache? :param str localFilePath: Path to the Source file :param jobStoreFileID: jobStoreID for the file :param str callingFunc: Who called this function, 'write' or 'read' :param bool mutable: See modifiable in readGlobalFile """ assert callingFunc in ('read', 'write') with self.cacheLock() as lockFileHandle: cachedFile = self.encodedFileID(jobStoreFileID) # The file to be cached MUST originate in the environment of the TOIL temp directory if (os.stat(self.localCacheDir).st_dev != os.stat(os.path.dirname(localFilePath)).st_dev): raise InvalidSourceCacheError('Attempting to cache a file across file systems ' 'cachedir = %s, file = %s.' % (self.localCacheDir, localFilePath)) if not localFilePath.startswith(self.localTempDir): raise InvalidSourceCacheError('Attempting a cache operation on a non-local file ' '%s.' % localFilePath) if callingFunc == 'read' and mutable: shutil.copyfile(cachedFile, localFilePath) fileSize = os.stat(cachedFile).st_size cacheInfo = self._CacheState._load(self.cacheStateFile) cacheInfo.cached += fileSize if cacheInfo.nlink != 2 else 0 if not cacheInfo.isBalanced(): os.remove(cachedFile) cacheInfo.cached -= fileSize if cacheInfo.nlink != 2 else 0 logger.debug('Could not download both download ' + '%s as mutable and add to ' % os.path.basename(localFilePath) + 'cache. Hence only mutable copy retained.') else: logger.debug('CACHE: Added file with ID \'%s\' to the cache.' % jobStoreFileID) jobState = self._JobState(cacheInfo.jobState[self.jobID]) jobState.addToJobSpecFiles(jobStoreFileID, localFilePath, -1, False) cacheInfo.jobState[self.jobID] = jobState.__dict__ cacheInfo.write(self.cacheStateFile) else: # There are two possibilities, read and immutable, and write. both cases do # almost the same thing except for the direction of the os.link hence we're # writing them together. if callingFunc == 'read': # and mutable is inherently False src = cachedFile dest = localFilePath # To mirror behaviour of shutil.copyfile if os.path.exists(dest): os.remove(dest) else: # write src = localFilePath dest = cachedFile try: os.link(src, dest) except OSError as err: if err.errno != errno.EEXIST: raise # If we get the EEXIST error, it can only be from write since in read we are # explicitly deleting the file. This shouldn't happen with the .partial # logic hence we raise a cache error. raise CacheError('Attempting to recache a file %s.' % src) else: # Chmod the cached file. Cached files can never be modified. os.chmod(cachedFile, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH) # Return the filesize of cachedFile to the job and increase the cached size # The values passed here don't matter since rFS looks at the file only for # the stat self.returnFileSize(jobStoreFileID, localFilePath, lockFileHandle, fileAlreadyCached=False) if callingFunc == 'read': logger.debug('CACHE: Read file with ID \'%s\' from the cache.' % jobStoreFileID) else: logger.debug('CACHE: Added file with ID \'%s\' to the cache.' % jobStoreFileID)
python
{ "resource": "" }
q28892
CachingFileStore.cleanCache
train
def cleanCache(self, newJobReqs): """ Cleanup all files in the cache directory to ensure that at lead newJobReqs are available for use. :param float newJobReqs: the total number of bytes of files allowed in the cache. """ with self._CacheState.open(self) as cacheInfo: # Add the new job's disk requirements to the sigmaJobDisk variable cacheInfo.sigmaJob += newJobReqs # Initialize the job state here. we use a partial in the jobSpecificFiles call so # that this entire thing is pickleable. Based on answer by user Nathaniel Gentile at # http://stackoverflow.com/questions/2600790 assert self.jobID not in cacheInfo.jobState cacheInfo.jobState[self.jobID] = { 'jobName': self.jobName, 'jobReqs': newJobReqs, 'jobDir': self.localTempDir, 'jobSpecificFiles': defaultdict(partial(defaultdict,int)), 'filesToFSIDs': defaultdict(set), 'pid': os.getpid(), 'deferredFunctions': []} # If the caching equation is balanced, do nothing. if cacheInfo.isBalanced(): return None # List of deletable cached files. A deletable cache file is one # that is not in use by any other worker (identified by the number of symlinks to # the file) allCacheFiles = [os.path.join(self.localCacheDir, x) for x in os.listdir(self.localCacheDir) if not self._isHidden(x)] allCacheFiles = [(path, os.stat(path)) for path in allCacheFiles] # TODO mtime vs ctime deletableCacheFiles = {(path, inode.st_mtime, inode.st_size) for path, inode in allCacheFiles if inode.st_nlink == self.nlinkThreshold} # Sort in descending order of mtime so the first items to be popped from the list # are the least recently created. deletableCacheFiles = sorted(deletableCacheFiles, key=lambda x: (-x[1], -x[2])) logger.debug('CACHE: Need %s bytes for new job. Detecting an estimated %s (out of a ' 'total %s) bytes available for running the new job. The size of the cache ' 'is %s bytes.', newJobReqs, (cacheInfo.total - (cacheInfo.cached + cacheInfo.sigmaJob - newJobReqs)), cacheInfo.total, cacheInfo.cached) logger.debug('CACHE: Evicting files to make room for the new job.') # Now do the actual file removal totalEvicted = 0 while not cacheInfo.isBalanced() and len(deletableCacheFiles) > 0: cachedFile, fileCreateTime, cachedFileSize = deletableCacheFiles.pop() os.remove(cachedFile) cacheInfo.cached -= cachedFileSize if self.nlinkThreshold != 2 else 0 totalEvicted += cachedFileSize assert cacheInfo.cached >= 0 logger.debug('CACHE: Evicted file with ID \'%s\' (%s bytes)' % (self.decodedFileID(cachedFile), cachedFileSize)) logger.debug('CACHE: Evicted a total of %s bytes. Available space is now %s bytes.', totalEvicted, (cacheInfo.total - (cacheInfo.cached + cacheInfo.sigmaJob - newJobReqs))) if not cacheInfo.isBalanced(): raise CacheUnbalancedError()
python
{ "resource": "" }
q28893
CachingFileStore.removeSingleCachedFile
train
def removeSingleCachedFile(self, fileStoreID): """ Removes a single file described by the fileStoreID from the cache forcibly. """ with self._CacheState.open(self) as cacheInfo: cachedFile = self.encodedFileID(fileStoreID) cachedFileStats = os.stat(cachedFile) # We know the file exists because this function was called in the if block. So we # have to ensure nothing has changed since then. assert cachedFileStats.st_nlink <= self.nlinkThreshold, \ 'Attempting to delete a global file that is in use by another job.' assert cachedFileStats.st_nlink >= self.nlinkThreshold, \ 'A global file has too FEW links at deletion time. Our link threshold is incorrect!' # Remove the file size from the cached file size if the jobstore is not fileJobStore # and then delete the file os.remove(cachedFile) if self.nlinkThreshold != 2: cacheInfo.cached -= cachedFileStats.st_size if not cacheInfo.isBalanced(): self.logToMaster('CACHE: The cache was not balanced on removing single file', logging.WARN) self.logToMaster('CACHE: Successfully removed file with ID \'%s\'.' % fileStoreID) return None
python
{ "resource": "" }
q28894
CachingFileStore._accountForNlinkEquals2
train
def _accountForNlinkEquals2(self, localFilePath): """ This is a utility function that accounts for the fact that if nlinkThreshold == 2, the size of the file is accounted for by the file store copy of the file and thus the file size shouldn't be added to the cached file sizes. :param str localFilePath: Path to the local file that was linked to the file store copy. """ fileStats = os.stat(localFilePath) assert fileStats.st_nlink >= self.nlinkThreshold with self._CacheState.open(self) as cacheInfo: cacheInfo.sigmaJob -= fileStats.st_size jobState = self._JobState(cacheInfo.jobState[self.jobID]) jobState.updateJobReqs(fileStats.st_size, 'remove')
python
{ "resource": "" }
q28895
CachingFileStore.returnJobReqs
train
def returnJobReqs(self, jobReqs): """ This function returns the effective job requirements back to the pool after the job completes. It also deletes the local copies of files with the cache lock held. :param float jobReqs: Original size requirement of the job """ # Since we are only reading this job's specific values from the state file, we don't # need a lock jobState = self._JobState(self._CacheState._load(self.cacheStateFile ).jobState[self.jobID]) for x in list(jobState.jobSpecificFiles.keys()): self.deleteLocalFile(x) with self._CacheState.open(self) as cacheInfo: cacheInfo.sigmaJob -= jobReqs
python
{ "resource": "" }
q28896
CachingFileStore.asyncWrite
train
def asyncWrite(self): """ A function to write files asynchronously to the job store such that subsequent jobs are not delayed by a long write operation. """ try: while True: try: # Block for up to two seconds waiting for a file args = self.queue.get(timeout=2) except Empty: # Check if termination event is signaled # (set in the event of an exception in the worker) if self._terminateEvent.isSet(): raise RuntimeError("The termination flag is set, exiting") continue # Normal termination condition is getting None from queue if args is None: break inputFileHandle, jobStoreFileID = args cachedFileName = self.encodedFileID(jobStoreFileID) # Ensure that the harbinger exists in the cache directory and that the PID # matches that of this writing thread. # If asyncWrite is ported to subprocesses instead of threads in the future, # insert logic here to securely overwrite the harbinger file. harbingerFile = self.HarbingerFile(self, cachedFileName=cachedFileName) assert harbingerFile.exists() assert harbingerFile.read() == int(os.getpid()) # We pass in a fileHandle, rather than the file-name, in case # the file itself is deleted. The fileHandle itself should persist # while we maintain the open file handle with self.jobStore.updateFileStream(jobStoreFileID) as outputFileHandle: shutil.copyfileobj(inputFileHandle, outputFileHandle) inputFileHandle.close() # Remove the file from the lock files with self._pendingFileWritesLock: self._pendingFileWrites.remove(jobStoreFileID) # Remove the harbinger file harbingerFile.delete() except: self._terminateEvent.set() raise
python
{ "resource": "" }
q28897
CachingFileStore._updateJobWhenDone
train
def _updateJobWhenDone(self): """ Asynchronously update the status of the job on the disk, first waiting \ until the writing threads have finished and the input blockFn has stopped \ blocking. """ def asyncUpdate(): try: # Wait till all file writes have completed for i in range(len(self.workers)): self.queue.put(None) for thread in self.workers: thread.join() # Wait till input block-fn returns - in the event of an exception # this will eventually terminate self.inputBlockFn() # Check the terminate event, if set we can not guarantee # that the workers ended correctly, therefore we exit without # completing the update if self._terminateEvent.isSet(): raise RuntimeError("The termination flag is set, exiting before update") # Indicate any files that should be deleted once the update of # the job wrapper is completed. self.jobGraph.filesToDelete = list(self.filesToDelete) # Complete the job self.jobStore.update(self.jobGraph) # Delete any remnant jobs list(map(self.jobStore.delete, self.jobsToDelete)) # Delete any remnant files list(map(self.jobStore.deleteFile, self.filesToDelete)) # Remove the files to delete list, having successfully removed the files if len(self.filesToDelete) > 0: self.jobGraph.filesToDelete = [] # Update, removing emptying files to delete self.jobStore.update(self.jobGraph) except: self._terminateEvent.set() raise finally: # Indicate that _blockFn can return # This code will always run self.updateSemaphore.release() # The update semaphore is held while the job is written to the job store try: self.updateSemaphore.acquire() t = Thread(target=asyncUpdate) t.start() except: # This is to ensure that the semaphore is released in a crash to stop a deadlock # scenario self.updateSemaphore.release() raise
python
{ "resource": "" }
q28898
NonCachingFileStore._getAllJobStates
train
def _getAllJobStates(workflowDir): """ Generator function that deserializes and yields the job state for every job on the node, one at a time. :param str workflowDir: The location of the workflow directory on the node. :return: dict with keys (jobName, jobPID, jobDir, deferredFunctions) :rtype: dict """ jobStateFiles = [] for root, dirs, files in os.walk(workflowDir): for filename in files: if filename == '.jobState': jobStateFiles.append(os.path.join(root, filename)) for filename in jobStateFiles: try: yield NonCachingFileStore._readJobState(filename) except IOError as e: if e.errno == 2: # job finished & deleted its jobState file since the jobState files were discovered continue else: raise
python
{ "resource": "" }
q28899
NonCachingFileStore._createJobStateFile
train
def _createJobStateFile(self): """ Create the job state file for the current job and fill in the required values. :return: Path to the job state file :rtype: str """ jobStateFile = os.path.join(self.localTempDir, '.jobState') jobState = {'jobPID': os.getpid(), 'jobName': self.jobName, 'jobDir': self.localTempDir, 'deferredFunctions': []} with open(jobStateFile + '.tmp', 'wb') as fH: dill.dump(jobState, fH) os.rename(jobStateFile + '.tmp', jobStateFile) return jobStateFile
python
{ "resource": "" }