_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q28900
WriteWatchingStream.write
train
def write(self, data): """ Write the given data to the file. """ # Do the write self.backingStream.write(data) for listener in self.writeListeners: # Send out notifications listener(len(data))
python
{ "resource": "" }
q28901
ServiceManager.scheduleServices
train
def scheduleServices(self, jobGraph): """ Schedule the services of a job asynchronously. When the job's services are running the jobGraph for the job will be returned by toil.leader.ServiceManager.getJobGraphsWhoseServicesAreRunning. :param toil.jobGraph.JobGraph jobGraph: wrapper of job with services to schedule. """ # Add jobGraph to set being processed by the service manager self.jobGraphsWithServicesBeingStarted.add(jobGraph) # Add number of jobs managed by ServiceManager self.jobsIssuedToServiceManager += sum(map(len, jobGraph.services)) + 1 # The plus one accounts for the root job # Asynchronously schedule the services self._jobGraphsWithServicesToStart.put(jobGraph)
python
{ "resource": "" }
q28902
ServiceManager.shutdown
train
def shutdown(self): """ Cleanly terminate worker threads starting and killing services. Will block until all services are started and blocked. """ logger.debug('Waiting for service manager thread to finish ...') startTime = time.time() self._terminate.set() self._serviceStarter.join() # Kill any services still running to avoid deadlock for services in list(self.toilState.servicesIssued.values()): self.killServices(services, error=True) logger.debug('... finished shutting down the service manager. Took %s seconds', time.time() - startTime)
python
{ "resource": "" }
q28903
ServiceManager._startServices
train
def _startServices(jobGraphsWithServicesToStart, jobGraphsWithServicesThatHaveStarted, serviceJobsToStart, terminate, jobStore): """ Thread used to schedule services. """ servicesThatAreStarting = set() servicesRemainingToStartForJob = {} serviceToJobGraph = {} while True: with throttle(1.0): if terminate.is_set(): logger.debug('Received signal to quit starting services.') break try: jobGraph = jobGraphsWithServicesToStart.get_nowait() if len(jobGraph.services) > 1: # Have to fall back to the old blocking behavior to # ensure entire service "groups" are issued as a whole. blockUntilServiceGroupIsStarted(jobGraph, jobGraphsWithServicesThatHaveStarted, serviceJobsToStart, terminate, jobStore) continue # Found a new job that needs to schedule its services. for serviceJob in jobGraph.services[0]: serviceToJobGraph[serviceJob] = jobGraph servicesRemainingToStartForJob[jobGraph] = len(jobGraph.services[0]) # Issue the service jobs all at once. for serviceJob in jobGraph.services[0]: logger.debug("Service manager is starting service job: %s, start ID: %s", serviceJob, serviceJob.startJobStoreID) serviceJobsToStart.put(serviceJob) # We should now start to monitor these services to see if # they've started yet. servicesThatAreStarting.update(jobGraph.services[0]) except Empty: # No new jobs that need services scheduled. pass for serviceJob in list(servicesThatAreStarting): if not jobStore.fileExists(serviceJob.startJobStoreID): # Service has started! servicesThatAreStarting.remove(serviceJob) parentJob = serviceToJobGraph[serviceJob] servicesRemainingToStartForJob[parentJob] -= 1 assert servicesRemainingToStartForJob[parentJob] >= 0 del serviceToJobGraph[serviceJob] # Find if any jobGraphs have had *all* their services started. jobGraphsToRemove = set() for jobGraph, remainingServices in servicesRemainingToStartForJob.items(): if remainingServices == 0: jobGraphsWithServicesThatHaveStarted.put(jobGraph) jobGraphsToRemove.add(jobGraph) for jobGraph in jobGraphsToRemove: del servicesRemainingToStartForJob[jobGraph]
python
{ "resource": "" }
q28904
optimize_spot_bid
train
def optimize_spot_bid(ctx, instance_type, spot_bid): """ Check whether the bid is sane and makes an effort to place the instance in a sensible zone. """ spot_history = _get_spot_history(ctx, instance_type) if spot_history: _check_spot_bid(spot_bid, spot_history) zones = ctx.ec2.get_all_zones() most_stable_zone = choose_spot_zone(zones, spot_bid, spot_history) logger.debug("Placing spot instances in zone %s.", most_stable_zone) return most_stable_zone
python
{ "resource": "" }
q28905
_check_spot_bid
train
def _check_spot_bid(spot_bid, spot_history): """ Prevents users from potentially over-paying for instances Note: this checks over the whole region, not a particular zone :param spot_bid: float :type spot_history: list[SpotPriceHistory] :raises UserError: if bid is > 2X the spot price's average >>> from collections import namedtuple >>> FauxHistory = namedtuple( "FauxHistory", [ "price", "availability_zone" ] ) >>> spot_data = [ FauxHistory( 0.1, "us-west-2a" ), \ FauxHistory( 0.2, "us-west-2a" ), \ FauxHistory( 0.3, "us-west-2b" ), \ FauxHistory( 0.6, "us-west-2b" ) ] >>> # noinspection PyProtectedMember >>> _check_spot_bid( 0.1, spot_data ) >>> # noinspection PyProtectedMember # >>> Box._check_spot_bid( 2, spot_data ) Traceback (most recent call last): ... UserError: Your bid $ 2.000000 is more than double this instance type's average spot price ($ 0.300000) over the last week """ average = mean([datum.price for datum in spot_history]) if spot_bid > average * 2: logger.warn("Your bid $ %f is more than double this instance type's average " "spot price ($ %f) over the last week", spot_bid, average)
python
{ "resource": "" }
q28906
checkValidNodeTypes
train
def checkValidNodeTypes(provisioner, nodeTypes): """ Raises if an invalid nodeType is specified for aws, azure, or gce. :param str provisioner: 'aws', 'gce', or 'azure' to specify which cloud provisioner used. :param nodeTypes: A list of node types. Example: ['t2.micro', 't2.medium'] :return: Nothing. Raises if invalid nodeType. """ if not nodeTypes: return if not isinstance(nodeTypes, list): nodeTypes = [nodeTypes] if not isinstance(nodeTypes[0], string_types): return # check if a valid node type for aws from toil.lib.generatedEC2Lists import E2Instances, regionDict if provisioner == 'aws': from toil.provisioners.aws import getCurrentAWSZone currentZone = getCurrentAWSZone() if not currentZone: currentZone = 'us-west-2' else: currentZone = currentZone[:-1] # adds something like 'a' or 'b' to the end # check if instance type exists in this region for nodeType in nodeTypes: if nodeType and ':' in nodeType: nodeType = nodeType.split(':')[0] if nodeType not in regionDict[currentZone]: # They probably misspelled it and can't tell. close = get_close_matches(nodeType, regionDict[currentZone], 1) if len(close) > 0: helpText = ' Did you mean ' + close[0] + '?' else: helpText = '' raise RuntimeError('Invalid nodeType (%s) specified for AWS in region: %s.%s' '' % (nodeType, currentZone, helpText)) # Only checks if aws nodeType specified for gce/azure atm. if provisioner == 'gce' or provisioner == 'azure': for nodeType in nodeTypes: if nodeType and ':' in nodeType: nodeType = nodeType.split(':')[0] try: E2Instances[nodeType] raise RuntimeError("It looks like you've specified an AWS nodeType with the " "{} provisioner. Please specify an {} nodeType." "".format(provisioner, provisioner)) except KeyError: pass
python
{ "resource": "" }
q28907
padStr
train
def padStr(s, field=None): """ Pad the begining of a string with spaces, if necessary. """ if field is None: return s else: if len(s) >= field: return s else: return " " * (field - len(s)) + s
python
{ "resource": "" }
q28908
prettyMemory
train
def prettyMemory(k, field=None, isBytes=False): """ Given input k as kilobytes, return a nicely formatted string. """ if isBytes: k /= 1024 if k < 1024: return padStr("%gK" % k, field) if k < (1024 * 1024): return padStr("%.1fM" % (old_div(k, 1024.0)), field) if k < (1024 * 1024 * 1024): return padStr("%.1fG" % (k / 1024.0 / 1024.0), field) if k < (1024 * 1024 * 1024 * 1024): return padStr("%.1fT" % (k / 1024.0 / 1024.0 / 1024.0), field) if k < (1024 * 1024 * 1024 * 1024 * 1024): return padStr("%.1fP" % (k / 1024.0 / 1024.0 / 1024.0 / 1024.0), field)
python
{ "resource": "" }
q28909
prettyTime
train
def prettyTime(t, field=None): """ Given input t as seconds, return a nicely formatted string. """ from math import floor pluralDict = {True: "s", False: ""} if t < 120: return padStr("%ds" % t, field) if t < 120 * 60: m = floor(old_div(t, 60.)) s = t % 60 return padStr("%dm%ds" % (m, s), field) if t < 25 * 60 * 60: h = floor(t / 60. / 60.) m = floor(old_div((t - (h * 60. * 60.)), 60.)) s = t % 60 return padStr("%dh%gm%ds" % (h, m, s), field) if t < 7 * 24 * 60 * 60: d = floor(t / 24. / 60. / 60.) h = floor((t - (d * 24. * 60. * 60.)) / 60. / 60.) m = floor(old_div((t - (d * 24. * 60. * 60.) - (h * 60. * 60.)), 60.)) s = t % 60 dPlural = pluralDict[d > 1] return padStr("%dday%s%dh%dm%ds" % (d, dPlural, h, m, s), field) w = floor(t / 7. / 24. / 60. / 60.) d = floor((t - (w * 7 * 24 * 60 * 60)) / 24. / 60. / 60.) h = floor((t - (w * 7. * 24. * 60. * 60.) - (d * 24. * 60. * 60.)) / 60. / 60.) m = floor(old_div((t - (w * 7. * 24. * 60. * 60.) - (d * 24. * 60. * 60.) - (h * 60. * 60.)), 60.)) s = t % 60 wPlural = pluralDict[w > 1] dPlural = pluralDict[d > 1] return padStr("%dweek%s%dday%s%dh%dm%ds" % (w, wPlural, d, dPlural, h, m, s), field)
python
{ "resource": "" }
q28910
reportTime
train
def reportTime(t, options, field=None): """ Given t seconds, report back the correct format as string. """ if options.pretty: return prettyTime(t, field=field) else: if field is not None: return "%*.2f" % (field, t) else: return "%.2f" % t
python
{ "resource": "" }
q28911
reportMemory
train
def reportMemory(k, options, field=None, isBytes=False): """ Given k kilobytes, report back the correct format as string. """ if options.pretty: return prettyMemory(int(k), field=field, isBytes=isBytes) else: if isBytes: k /= 1024. if field is not None: return "%*dK" % (field - 1, k) # -1 for the "K" else: return "%dK" % int(k)
python
{ "resource": "" }
q28912
refineData
train
def refineData(root, options): """ walk down from the root and gather up the important bits. """ worker = root.worker job = root.jobs jobTypesTree = root.job_types jobTypes = [] for childName in jobTypesTree: jobTypes.append(jobTypesTree[childName]) return root, worker, job, jobTypes
python
{ "resource": "" }
q28913
decorateSubHeader
train
def decorateSubHeader(title, columnWidths, options): """ Add a marker to the correct field if the TITLE is sorted on. """ title = title.lower() if title != options.sortCategory: s = "| %*s%*s%*s%*s%*s " % ( columnWidths.getWidth(title, "min"), "min", columnWidths.getWidth(title, "med"), "med", columnWidths.getWidth(title, "ave"), "ave", columnWidths.getWidth(title, "max"), "max", columnWidths.getWidth(title, "total"), "total") return s else: s = "| " for field, width in [("min", columnWidths.getWidth(title, "min")), ("med", columnWidths.getWidth(title, "med")), ("ave", columnWidths.getWidth(title, "ave")), ("max", columnWidths.getWidth(title, "max")), ("total", columnWidths.getWidth(title, "total"))]: if options.sortField == field: s += "%*s*" % (width - 1, field) else: s += "%*s" % (width, field) s += " " return s
python
{ "resource": "" }
q28914
get
train
def get(tree, name): """ Return a float value attribute NAME from TREE. """ if name in tree: value = tree[name] else: return float("nan") try: a = float(value) except ValueError: a = float("nan") return a
python
{ "resource": "" }
q28915
sortJobs
train
def sortJobs(jobTypes, options): """ Return a jobTypes all sorted. """ longforms = {"med": "median", "ave": "average", "min": "min", "total": "total", "max": "max",} sortField = longforms[options.sortField] if (options.sortCategory == "time" or options.sortCategory == "clock" or options.sortCategory == "wait" or options.sortCategory == "memory" ): return sorted( jobTypes, key=lambda tag: getattr(tag, "%s_%s" % (sortField, options.sortCategory)), reverse=options.sortReverse) elif options.sortCategory == "alpha": return sorted( jobTypes, key=lambda tag: tag.name, reverse=options.sortReverse) elif options.sortCategory == "count": return sorted(jobTypes, key=lambda tag: tag.total_number, reverse=options.sortReverse)
python
{ "resource": "" }
q28916
reportPrettyData
train
def reportPrettyData(root, worker, job, job_types, options): """ print the important bits out. """ out_str = "Batch System: %s\n" % root.batch_system out_str += ("Default Cores: %s Default Memory: %s\n" "Max Cores: %s\n" % ( reportNumber(get(root, "default_cores"), options), reportMemory(get(root, "default_memory"), options, isBytes=True), reportNumber(get(root, "max_cores"), options), )) out_str += ("Total Clock: %s Total Runtime: %s\n" % ( reportTime(get(root, "total_clock"), options), reportTime(get(root, "total_run_time"), options), )) job_types = sortJobs(job_types, options) columnWidths = computeColumnWidths(job_types, worker, job, options) out_str += "Worker\n" out_str += sprintTag("worker", worker, options, columnWidths=columnWidths) out_str += "Job\n" out_str += sprintTag("job", job, options, columnWidths=columnWidths) for t in job_types: out_str += " %s\n" % t.name out_str += sprintTag(t.name, t, options, columnWidths=columnWidths) return out_str
python
{ "resource": "" }
q28917
updateColumnWidths
train
def updateColumnWidths(tag, cw, options): """ Update the column width attributes for this tag's fields. """ longforms = {"med": "median", "ave": "average", "min": "min", "total": "total", "max": "max",} for category in ["time", "clock", "wait", "memory"]: if category in options.categories: for field in ["min", "med", "ave", "max", "total"]: t = getattr(tag, "%s_%s" % (longforms[field], category)) if category in ["time", "clock", "wait"]: s = reportTime(t, options, field=cw.getWidth(category, field)).strip() else: s = reportMemory(t, options, field=cw.getWidth(category, field), isBytes=True).strip() if len(s) >= cw.getWidth(category, field): # this string is larger than max, width must be increased cw.setWidth(category, field, len(s) + 1)
python
{ "resource": "" }
q28918
buildElement
train
def buildElement(element, items, itemName): """ Create an element for output. """ def assertNonnegative(i,name): if i < 0: raise RuntimeError("Negative value %s reported for %s" %(i,name) ) else: return float(i) itemTimes = [] itemClocks = [] itemMemory = [] for item in items: itemTimes.append(assertNonnegative(float(item["time"]), "time")) itemClocks.append(assertNonnegative(float(item["clock"]), "clock")) itemMemory.append(assertNonnegative(float(item["memory"]), "memory")) assert len(itemClocks) == len(itemTimes) == len(itemMemory) itemWaits=[] for index in range(0,len(itemTimes)): itemWaits.append(itemTimes[index] - itemClocks[index]) itemWaits.sort() itemTimes.sort() itemClocks.sort() itemMemory.sort() if len(itemTimes) == 0: itemTimes.append(0) itemClocks.append(0) itemWaits.append(0) itemMemory.append(0) element[itemName]=Expando( total_number=float(len(items)), total_time=float(sum(itemTimes)), median_time=float(itemTimes[old_div(len(itemTimes),2)]), average_time=float(old_div(sum(itemTimes),len(itemTimes))), min_time=float(min(itemTimes)), max_time=float(max(itemTimes)), total_clock=float(sum(itemClocks)), median_clock=float(itemClocks[old_div(len(itemClocks),2)]), average_clock=float(old_div(sum(itemClocks),len(itemClocks))), min_clock=float(min(itemClocks)), max_clock=float(max(itemClocks)), total_wait=float(sum(itemWaits)), median_wait=float(itemWaits[old_div(len(itemWaits),2)]), average_wait=float(old_div(sum(itemWaits),len(itemWaits))), min_wait=float(min(itemWaits)), max_wait=float(max(itemWaits)), total_memory=float(sum(itemMemory)), median_memory=float(itemMemory[old_div(len(itemMemory),2)]), average_memory=float(old_div(sum(itemMemory),len(itemMemory))), min_memory=float(min(itemMemory)), max_memory=float(max(itemMemory)), name=itemName ) return element[itemName]
python
{ "resource": "" }
q28919
getStats
train
def getStats(jobStore): """ Collect and return the stats and config data. """ def aggregateStats(fileHandle,aggregateObject): try: stats = json.load(fileHandle, object_hook=Expando) for key in list(stats.keys()): if key in aggregateObject: aggregateObject[key].append(stats[key]) else: aggregateObject[key]=[stats[key]] except ValueError: logger.critical("File %s contains corrupted json. Skipping file." % fileHandle) pass # The file is corrupted. aggregateObject = Expando() callBack = partial(aggregateStats, aggregateObject=aggregateObject) jobStore.readStatsAndLogging(callBack, readAll=True) return aggregateObject
python
{ "resource": "" }
q28920
processData
train
def processData(config, stats): """ Collate the stats and report """ if 'total_time' not in stats or 'total_clock' not in stats: # toil job not finished yet stats.total_time = [0.0] stats.total_clock = [0.0] stats.total_time = sum([float(number) for number in stats.total_time]) stats.total_clock = sum([float(number) for number in stats.total_clock]) collatedStatsTag = Expando(total_run_time=stats.total_time, total_clock=stats.total_clock, batch_system=config.batchSystem, default_memory=str(config.defaultMemory), default_cores=str(config.defaultCores), max_cores=str(config.maxCores) ) # Add worker info worker = [_f for _f in getattr(stats, 'workers', []) if _f] jobs = [_f for _f in getattr(stats, 'jobs', []) if _f] jobs = [item for sublist in jobs for item in sublist] def fn4(job): try: return list(jobs) except TypeError: return [] buildElement(collatedStatsTag, worker, "worker") createSummary(buildElement(collatedStatsTag, jobs, "jobs"), getattr(stats, 'workers', []), "worker", fn4) # Get info for each job jobNames = set() for job in jobs: jobNames.add(job.class_name) jobTypesTag = Expando() collatedStatsTag.job_types = jobTypesTag for jobName in jobNames: jobTypes = [ job for job in jobs if job.class_name == jobName ] buildElement(jobTypesTag, jobTypes, jobName) collatedStatsTag.name = "collatedStatsTag" return collatedStatsTag
python
{ "resource": "" }
q28921
ColumnWidths.title
train
def title(self, category): """ Return the total printed length of this category item. """ return sum( [self.getWidth(category, x) for x in self.fields])
python
{ "resource": "" }
q28922
nextChainableJobGraph
train
def nextChainableJobGraph(jobGraph, jobStore): """Returns the next chainable jobGraph after this jobGraph if one exists, or None if the chain must terminate. """ #If no more jobs to run or services not finished, quit if len(jobGraph.stack) == 0 or len(jobGraph.services) > 0 or jobGraph.checkpoint != None: logger.debug("Stopping running chain of jobs: length of stack: %s, services: %s, checkpoint: %s", len(jobGraph.stack), len(jobGraph.services), jobGraph.checkpoint != None) return None #Get the next set of jobs to run jobs = jobGraph.stack[-1] assert len(jobs) > 0 #If there are 2 or more jobs to run in parallel we quit if len(jobs) >= 2: logger.debug("No more jobs can run in series by this worker," " it's got %i children", len(jobs)-1) return None #We check the requirements of the jobGraph to see if we can run it #within the current worker successorJobNode = jobs[0] if successorJobNode.memory > jobGraph.memory: logger.debug("We need more memory for the next job, so finishing") return None if successorJobNode.cores > jobGraph.cores: logger.debug("We need more cores for the next job, so finishing") return None if successorJobNode.disk > jobGraph.disk: logger.debug("We need more disk for the next job, so finishing") return None if successorJobNode.preemptable != jobGraph.preemptable: logger.debug("Preemptability is different for the next job, returning to the leader") return None if successorJobNode.predecessorNumber > 1: logger.debug("The jobGraph has multiple predecessors, we must return to the leader.") return None # Load the successor jobGraph successorJobGraph = jobStore.load(successorJobNode.jobStoreID) # Somewhat ugly, but check if job is a checkpoint job and quit if # so if successorJobGraph.command.startswith("_toil "): #Load the job successorJob = Job._loadJob(successorJobGraph.command, jobStore) # Check it is not a checkpoint if successorJob.checkpoint: logger.debug("Next job is checkpoint, so finishing") return None # Made it through! This job is chainable. return successorJobGraph
python
{ "resource": "" }
q28923
LoggingDatagramHandler.handle
train
def handle(self): """ Handle a single message. SocketServer takes care of splitting out the messages. Messages are JSON-encoded logging module records. """ # Unpack the data from the request data, socket = self.request try: # Parse it as JSON message_attrs = json.loads(data.decode('utf-8')) # Fluff it up into a proper logging record record = logging.makeLogRecord(message_attrs) except: # Complain someone is sending us bad logging data logging.error("Malformed log message from {}".format(self.client_address[0])) else: # Log level filtering should have been done on the remote end. The handle() method # skips it on this end. log.handle(record)
python
{ "resource": "" }
q28924
RealtimeLogger._stopLeader
train
def _stopLeader(cls): """ Stop the server on the leader. """ with cls.lock: assert cls.initialized > 0 cls.initialized -= 1 if cls.initialized == 0: if cls.loggingServer: log.info('Stopping real-time logging server.') cls.loggingServer.shutdown() cls.loggingServer = None if cls.serverThread: log.info('Joining real-time logging server thread.') cls.serverThread.join() cls.serverThread = None for k in list(os.environ.keys()): if k.startswith(cls.envPrefix): os.environ.pop(k)
python
{ "resource": "" }
q28925
RealtimeLogger.getLogger
train
def getLogger(cls): """ Get the logger that logs real-time to the leader. Note that if the returned logger is used on the leader, you will see the message twice, since it still goes to the normal log handlers, too. """ # Only do the setup once, so we don't add a handler every time we log. Use a lock to do # so safely even if we're being called in different threads. Use double-checked locking # to reduce the overhead introduced by the lock. if cls.logger is None: with cls.lock: if cls.logger is None: cls.logger = logging.getLogger('toil-rt') try: level = os.environ[cls.envPrefix + 'LEVEL'] except KeyError: # There is no server running on the leader, so suppress most log messages # and skip the UDP stuff. cls.logger.setLevel(logging.CRITICAL) else: # Adopt the logging level set on the leader. toil.lib.bioio.setLogLevel(level, cls.logger) try: address = os.environ[cls.envPrefix + 'ADDRESS'] except KeyError: pass else: # We know where to send messages to, so send them. host, port = address.split(':') cls.logger.addHandler(JSONDatagramHandler(host, int(port))) return cls.logger
python
{ "resource": "" }
q28926
uploadFromPath
train
def uploadFromPath(localFilePath, partSize, bucket, fileID, headers): """ Uploads a file to s3, using multipart uploading if applicable :param str localFilePath: Path of the file to upload to s3 :param int partSize: max size of each part in the multipart upload, in bytes :param boto.s3.Bucket bucket: the s3 bucket to upload to :param str fileID: the name of the file to upload to :param headers: http headers to use when uploading - generally used for encryption purposes :return: version of the newly uploaded file """ file_size, file_time = fileSizeAndTime(localFilePath) if file_size <= partSize: key = bucket.new_key(key_name=bytes(fileID)) key.name = fileID for attempt in retry_s3(): with attempt: key.set_contents_from_filename(localFilePath, headers=headers) version = key.version_id else: with open(localFilePath, 'rb') as f: version = chunkedFileUpload(f, bucket, fileID, file_size, headers, partSize) for attempt in retry_s3(): with attempt: key = bucket.get_key(bytes(fileID), headers=headers, version_id=version) assert key.size == file_size # Make reasonably sure that the file wasn't touched during the upload assert fileSizeAndTime(localFilePath) == (file_size, file_time) return version
python
{ "resource": "" }
q28927
copyKeyMultipart
train
def copyKeyMultipart(srcBucketName, srcKeyName, srcKeyVersion, dstBucketName, dstKeyName, sseAlgorithm=None, sseKey=None, copySourceSseAlgorithm=None, copySourceSseKey=None): """ Copies a key from a source key to a destination key in multiple parts. Note that if the destination key exists it will be overwritten implicitly, and if it does not exist a new key will be created. If the destination bucket does not exist an error will be raised. :param str srcBucketName: The name of the bucket to be copied from. :param str srcKeyName: The name of the key to be copied from. :param str srcKeyVersion: The version of the key to be copied from. :param str dstBucketName: The name of the destination bucket for the copy. :param str dstKeyName: The name of the destination key that will be created or overwritten. :param str sseAlgorithm: Server-side encryption algorithm for the destination. :param str sseKey: Server-side encryption key for the destination. :param str copySourceSseAlgorithm: Server-side encryption algorithm for the source. :param str copySourceSseKey: Server-side encryption key for the source. :rtype: str :return: The version of the copied file (or None if versioning is not enabled for dstBucket). """ s3 = boto3.resource('s3') dstBucket = s3.Bucket(oldstr(dstBucketName)) dstObject = dstBucket.Object(oldstr(dstKeyName)) copySource = {'Bucket': oldstr(srcBucketName), 'Key': oldstr(srcKeyName)} if srcKeyVersion is not None: copySource['VersionId'] = oldstr(srcKeyVersion) # The boto3 functions don't allow passing parameters as None to # indicate they weren't provided. So we have to do a bit of work # to ensure we only provide the parameters when they are actually # required. destEncryptionArgs = {} if sseKey is not None: destEncryptionArgs.update({'SSECustomerAlgorithm': sseAlgorithm, 'SSECustomerKey': sseKey}) copyEncryptionArgs = {} if copySourceSseKey is not None: copyEncryptionArgs.update({'CopySourceSSECustomerAlgorithm': copySourceSseAlgorithm, 'CopySourceSSECustomerKey': copySourceSseKey}) copyEncryptionArgs.update(destEncryptionArgs) dstObject.copy(copySource, ExtraArgs=copyEncryptionArgs) # Unfortunately, boto3's managed copy doesn't return the version # that it actually copied to. So we have to check immediately # after, leaving open the possibility that it may have been # modified again in the few seconds since the copy finished. There # isn't much we can do about it. info = boto3.client('s3').head_object(Bucket=dstObject.bucket_name, Key=dstObject.key, **destEncryptionArgs) return info.get('VersionId', None)
python
{ "resource": "" }
q28928
_put_attributes_using_post
train
def _put_attributes_using_post(self, domain_or_name, item_name, attributes, replace=True, expected_value=None): """ Monkey-patched version of SDBConnection.put_attributes that uses POST instead of GET The GET version is subject to the URL length limit which kicks in before the 256 x 1024 limit for attribute values. Using POST prevents that. https://github.com/BD2KGenomics/toil/issues/502 """ domain, domain_name = self.get_domain_and_name(domain_or_name) params = {'DomainName': domain_name, 'ItemName': item_name} self._build_name_value_list(params, attributes, replace) if expected_value: self._build_expected_value(params, expected_value) # The addition of the verb keyword argument is the only difference to put_attributes (Hannes) return self.get_status('PutAttributes', params, verb='POST')
python
{ "resource": "" }
q28929
logFile
train
def logFile(fileName, printFunction=logger.info): """Writes out a formatted version of the given log file """ printFunction("Reporting file: %s" % fileName) shortName = fileName.split("/")[-1] fileHandle = open(fileName, 'r') line = fileHandle.readline() while line != '': if line[-1] == '\n': line = line[:-1] printFunction("%s:\t%s" % (shortName, line)) line = fileHandle.readline() fileHandle.close()
python
{ "resource": "" }
q28930
logStream
train
def logStream(fileHandle, shortName, printFunction=logger.info): """Writes out a formatted version of the given log stream. """ printFunction("Reporting file: %s" % shortName) line = fileHandle.readline() while line != '': if line[-1] == '\n': line = line[:-1] printFunction("%s:\t%s" % (shortName, line)) line = fileHandle.readline() fileHandle.close()
python
{ "resource": "" }
q28931
system
train
def system(command): """ A convenience wrapper around subprocess.check_call that logs the command before passing it on. The command can be either a string or a sequence of strings. If it is a string shell=True will be passed to subprocess.check_call. :type command: str | sequence[string] """ logger.debug('Running: %r', command) subprocess.check_call(command, shell=isinstance(command, string_types), bufsize=-1)
python
{ "resource": "" }
q28932
absSymPath
train
def absSymPath(path): """like os.path.abspath except it doesn't dereference symlinks """ curr_path = os.getcwd() return os.path.normpath(os.path.join(curr_path, path))
python
{ "resource": "" }
q28933
makePublicDir
train
def makePublicDir(dirName): """Makes a given subdirectory if it doesn't already exist, making sure it is public. """ if not os.path.exists(dirName): os.mkdir(dirName) os.chmod(dirName, 0o777) return dirName
python
{ "resource": "" }
q28934
write_AST
train
def write_AST(wdl_file, outdir=None): ''' Writes a file with the AST for a wdl file in the outdir. ''' if outdir is None: outdir = os.getcwd() with open(os.path.join(outdir, 'AST.out'), 'w') as f: with open(wdl_file, 'r') as wdl: wdl_string = wdl.read() ast = wdl_parser.parse(wdl_string).ast() f.write(ast.dumps(indent=2))
python
{ "resource": "" }
q28935
SynthesizeWDL.write_main
train
def write_main(self): ''' Writes out a huge string representing the main section of the python compiled toil script. Currently looks at and writes 5 sections: 1. JSON Variables (includes importing and preparing files as tuples) 2. TSV Variables (includes importing and preparing files as tuples) 3. CSV Variables (includes importing and preparing files as tuples) 4. Wrapping each WDL "task" function as a toil job 5. List out children and encapsulated jobs by priority, then start job0. This should create variable declarations necessary for function calls. Map file paths appropriately and store them in the toil fileStore so that they are persistent from job to job. Create job wrappers for toil. And finally write out, and run the jobs in order of priority using the addChild and encapsulate commands provided by toil. :return: giant string containing the main def for the toil script. ''' main_section = '' # write out the main header main_header = self.write_main_header() main_section = main_section + main_header # write out the workflow declarations main_section = main_section + ' # WF Declarations\n' wf_declarations_to_write = self.write_main_wfdeclarations() main_section = main_section + wf_declarations_to_write # write toil job wrappers with input vars jobs_to_write = self.write_main_jobwrappers() main_section = main_section + jobs_to_write # loop to export all outputs to a cloud bucket if self.destBucket: main_destbucket = self.write_main_destbucket() main_section = main_section + main_destbucket return main_section
python
{ "resource": "" }
q28936
SynthesizeWDL.write_main_jobwrappers
train
def write_main_jobwrappers(self): ''' Writes out 'jobs' as wrapped toil objects in preparation for calling. :return: A string representing this. ''' main_section = '' # toil cannot technically start with multiple jobs, so an empty # 'initialize_jobs' function is always called first to get around this main_section = main_section + '\n job0 = Job.wrapJobFn(initialize_jobs)\n' # declare each job in main as a wrapped toil function in order of priority for wf in self.workflows_dictionary: for assignment in self.workflows_dictionary[wf]: if assignment.startswith('call'): main_section += ' job0 = job0.encapsulate()\n' main_section += self.write_main_jobwrappers_call(self.workflows_dictionary[wf][assignment]) if assignment.startswith('scatter'): main_section += ' job0 = job0.encapsulate()\n' main_section += self.write_main_jobwrappers_scatter(self.workflows_dictionary[wf][assignment], assignment) if assignment.startswith('if'): main_section += ' if {}:\n'.format(self.workflows_dictionary[wf][assignment]['expression']) main_section += self.write_main_jobwrappers_if(self.workflows_dictionary[wf][assignment]['body']) main_section += '\n fileStore.start(job0)\n' return main_section
python
{ "resource": "" }
q28937
SynthesizeWDL.write_scatterfunction
train
def write_scatterfunction(self, job, scattername): ''' Writes out a python function for each WDL "scatter" object. ''' scatter_outputs = self.fetch_scatter_outputs(job) # write the function header fn_section = self.write_scatterfunction_header(scattername) # write the scatter definitions fn_section += self.write_scatterfunction_lists(scatter_outputs) # write fn_section += self.write_scatterfunction_loop(job, scatter_outputs) # write the outputs for the task to return fn_section += self.write_scatterfunction_outputreturn(scatter_outputs) return fn_section
python
{ "resource": "" }
q28938
SynthesizeWDL.write_function_bashscriptline
train
def write_function_bashscriptline(self, job): ''' Writes a function to create a bashscript for injection into the docker container. :param job_task_reference: The job referenced in WDL's Task section. :param job_alias: The actual job name to be written. :return: A string writing all of this. ''' fn_section = " generate_docker_bashscript_file(temp_dir=tempDir, docker_dir=tempDir, globs=[" # TODO: Add glob # if 'outputs' in self.tasks_dictionary[job]: # for output in self.tasks_dictionary[job]['outputs']: # fn_section += '({}), '.format(output[2]) if fn_section.endswith(', '): fn_section = fn_section[:-2] fn_section += "], cmd=cmd, job_name='{}')\n\n".format(str(job)) return fn_section
python
{ "resource": "" }
q28939
SynthesizeWDL.write_python_file
train
def write_python_file(self, module_section, fn_section, main_section, output_file): ''' Just takes three strings and writes them to output_file. :param module_section: A string of 'import modules'. :param fn_section: A string of python 'def functions()'. :param main_section: A string declaring toil options and main's header. :param job_section: A string import files into toil and declaring jobs. :param output_file: The file to write the compiled toil script to. ''' with open(output_file, 'w') as file: file.write(module_section) file.write(fn_section) file.write(main_section)
python
{ "resource": "" }
q28940
MesosBatchSystem._buildExecutor
train
def _buildExecutor(self): """ Creates and returns an ExecutorInfo-shaped object representing our executor implementation. """ # The executor program is installed as a setuptools entry point by setup.py info = addict.Dict() info.name = "toil" info.command.value = resolveEntryPoint('_toil_mesos_executor') info.executor_id.value = "toil-%i" % os.getpid() info.source = pwd.getpwuid(os.getuid()).pw_name return info
python
{ "resource": "" }
q28941
MesosBatchSystem._startDriver
train
def _startDriver(self): """ The Mesos driver thread which handles the scheduler's communication with the Mesos master """ framework = addict.Dict() framework.user = getpass.getuser() # We must determine the user name ourselves with pymesos framework.name = "toil" framework.principal = framework.name # Make the driver which implements most of the scheduler logic and calls back to us for the user-defined parts. # Make sure it will call us with nice namespace-y addicts self.driver = MesosSchedulerDriver(self, framework, self._resolveAddress(self.mesosMasterAddress), use_addict=True, implicit_acknowledgements=True) self.driver.start()
python
{ "resource": "" }
q28942
MesosBatchSystem.registered
train
def registered(self, driver, frameworkId, masterInfo): """ Invoked when the scheduler successfully registers with a Mesos master """ log.debug("Registered with framework ID %s", frameworkId.value) # Save the framework ID self.frameworkId = frameworkId.value
python
{ "resource": "" }
q28943
MesosBatchSystem._newMesosTask
train
def _newMesosTask(self, job, offer): """ Build the Mesos task object for a given the Toil job and Mesos offer """ task = addict.Dict() task.task_id.value = str(job.jobID) task.agent_id.value = offer.agent_id.value task.name = job.name task.data = encode_data(pickle.dumps(job)) task.executor = addict.Dict(self.executor) task.resources = [] task.resources.append(addict.Dict()) cpus = task.resources[-1] cpus.name = 'cpus' cpus.type = 'SCALAR' cpus.scalar.value = job.resources.cores task.resources.append(addict.Dict()) disk = task.resources[-1] disk.name = 'disk' disk.type = 'SCALAR' if toMiB(job.resources.disk) > 1: disk.scalar.value = toMiB(job.resources.disk) else: log.warning("Job %s uses less disk than Mesos requires. Rounding %s up to 1 MiB.", job.jobID, job.resources.disk) disk.scalar.value = 1 task.resources.append(addict.Dict()) mem = task.resources[-1] mem.name = 'mem' mem.type = 'SCALAR' if toMiB(job.resources.memory) > 1: mem.scalar.value = toMiB(job.resources.memory) else: log.warning("Job %s uses less memory than Mesos requires. Rounding %s up to 1 MiB.", job.jobID, job.resources.memory) mem.scalar.value = 1 return task
python
{ "resource": "" }
q28944
MesosBatchSystem.frameworkMessage
train
def frameworkMessage(self, driver, executorId, agentId, message): """ Invoked when an executor sends a message. """ # Take it out of base 64 encoding from Protobuf message = decode_data(message) log.debug('Got framework message from executor %s running on agent %s: %s', executorId.value, agentId.value, message) message = ast.literal_eval(message) assert isinstance(message, dict) # Handle the mandatory fields of a message nodeAddress = message.pop('address') executor = self._registerNode(nodeAddress, agentId.value) # Handle optional message fields for k, v in iteritems(message): if k == 'nodeInfo': assert isinstance(v, dict) resources = [taskData for taskData in itervalues(self.runningJobMap) if taskData.executorID == executorId.value] requestedCores = sum(taskData.cores for taskData in resources) requestedMemory = sum(taskData.memory for taskData in resources) executor.nodeInfo = NodeInfo(requestedCores=requestedCores, requestedMemory=requestedMemory, **v) self.executors[nodeAddress] = executor else: raise RuntimeError("Unknown message field '%s'." % k)
python
{ "resource": "" }
q28945
MesosBatchSystem._registerNode
train
def _registerNode(self, nodeAddress, agentId, nodePort=5051): """ Called when we get communication from an agent. Remembers the information about the agent by address, and the agent address by agent ID. """ executor = self.executors.get(nodeAddress) if executor is None or executor.agentId != agentId: executor = self.ExecutorInfo(nodeAddress=nodeAddress, agentId=agentId, nodeInfo=None, lastSeen=time.time()) self.executors[nodeAddress] = executor else: executor.lastSeen = time.time() # Record the IP under the agent id self.agentsByID[agentId] = nodeAddress return executor
python
{ "resource": "" }
q28946
Node.remainingBillingInterval
train
def remainingBillingInterval(self): """ If the node has a launch time, this function returns a floating point value between 0 and 1.0 representing how far we are into the current billing cycle for the given instance. If the return value is .25, we are one quarter into the billing cycle, with three quarters remaining before we will be charged again for that instance. Assumes a billing cycle of one hour. :return: Float from 0 -> 1.0 representing percentage of pre-paid time left in cycle. """ if self.launchTime: now = datetime.datetime.utcnow() delta = now - parse_iso_utc(self.launchTime) return 1 - delta.total_seconds() / 3600.0 % 1.0 else: return 1
python
{ "resource": "" }
q28947
Node.copySshKeys
train
def copySshKeys(self, keyName): """ Copy authorized_keys file to the core user from the keyName user.""" if keyName == 'core': return # No point. # Make sure that keys are there. self._waitForSSHKeys(keyName=keyName) # copy keys to core user so that the ssh calls will work # - normal mechanism failed unless public key was in the google-ssh format # - even so, the key wasn't copied correctly to the core account keyFile = '/home/%s/.ssh/authorized_keys' % keyName self.sshInstance('/usr/bin/sudo', '/usr/bin/cp', keyFile, '/home/core/.ssh', user=keyName) self.sshInstance('/usr/bin/sudo', '/usr/bin/chown', 'core', '/home/core/.ssh/authorized_keys', user=keyName)
python
{ "resource": "" }
q28948
Node.injectFile
train
def injectFile(self, fromFile, toFile, role): """ rysnc a file to the vm with the given role """ maxRetries = 10 for retry in range(maxRetries): try: self.coreRsync([fromFile, ":" + toFile], applianceName=role) return True except Exception as e: logger.debug("Rsync to new node failed, trying again. Error message: %s" % e) time.sleep(10*retry) raise RuntimeError("Failed to inject file %s to %s with ip %s" % (fromFile, role, self.effectiveIP) )
python
{ "resource": "" }
q28949
Node._waitForSSHPort
train
def _waitForSSHPort(self): """ Wait until the instance represented by this box is accessible via SSH. :return: the number of unsuccessful attempts to connect to the port before a the first success """ logger.debug('Waiting for ssh port to open...') for i in count(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.settimeout(a_short_time) s.connect((self.effectiveIP, 22)) logger.debug('...ssh port open') return i except socket.error: pass finally: s.close()
python
{ "resource": "" }
q28950
Node.sshInstance
train
def sshInstance(self, *args, **kwargs): """ Run a command on the instance. Returns the binary output of the command. """ kwargs['collectStdout'] = True return self.coreSSH(*args, **kwargs)
python
{ "resource": "" }
q28951
adjustEndingReservationForJob
train
def adjustEndingReservationForJob(reservation, jobShape, wallTime): """ Add a job to an ending reservation that ends at wallTime, splitting the reservation if the job doesn't fill the entire timeslice. """ if jobShape.wallTime - wallTime < reservation.shape.wallTime: # This job only partially fills one of the slices. Create a new slice. reservation.shape, nS = split(reservation.shape, jobShape, jobShape.wallTime - wallTime) nS.nReservation = reservation.nReservation reservation.nReservation = nS else: # This job perfectly fits within the boundaries of the slices. reservation.subtract(jobShape)
python
{ "resource": "" }
q28952
split
train
def split(nodeShape, jobShape, wallTime): """ Partition a node allocation into two to fit the job, returning the modified shape of the node and a new node reservation for the extra time that the job didn't fill. """ return (Shape(wallTime, nodeShape.memory - jobShape.memory, nodeShape.cores - jobShape.cores, nodeShape.disk - jobShape.disk, nodeShape.preemptable), NodeReservation(Shape(nodeShape.wallTime - wallTime, nodeShape.memory, nodeShape.cores, nodeShape.disk, nodeShape.preemptable)))
python
{ "resource": "" }
q28953
BinPackedFit.binPack
train
def binPack(self, jobShapes): """Pack a list of jobShapes into the fewest nodes reasonable. Can be run multiple times.""" # TODO: Check for redundancy with batchsystems.mesos.JobQueue() sorting logger.debug('Running bin packing for node shapes %s and %s job(s).', self.nodeShapes, len(jobShapes)) # Sort in descending order from largest to smallest. The FFD like-strategy will pack the # jobs in order from longest to shortest. jobShapes.sort() jobShapes.reverse() assert len(jobShapes) == 0 or jobShapes[0] >= jobShapes[-1] for jS in jobShapes: self.addJobShape(jS)
python
{ "resource": "" }
q28954
BinPackedFit.getRequiredNodes
train
def getRequiredNodes(self): """ Returns a dict from node shape to number of nodes required to run the packed jobs. """ return {nodeShape:len(self.nodeReservations[nodeShape]) for nodeShape in self.nodeShapes}
python
{ "resource": "" }
q28955
NodeReservation.fits
train
def fits(self, jobShape): """Check if a job shape's resource requirements will fit within this allocation.""" return jobShape.memory <= self.shape.memory and \ jobShape.cores <= self.shape.cores and \ jobShape.disk <= self.shape.disk and \ (jobShape.preemptable or not self.shape.preemptable)
python
{ "resource": "" }
q28956
NodeReservation.shapes
train
def shapes(self): """Get all time-slice shapes, in order, from this reservation on.""" shapes = [] curRes = self while curRes is not None: shapes.append(curRes.shape) curRes = curRes.nReservation return shapes
python
{ "resource": "" }
q28957
NodeReservation.subtract
train
def subtract(self, jobShape): """ Subtracts the resources necessary to run a jobShape from the reservation. """ self.shape = Shape(self.shape.wallTime, self.shape.memory - jobShape.memory, self.shape.cores - jobShape.cores, self.shape.disk - jobShape.disk, self.shape.preemptable)
python
{ "resource": "" }
q28958
ClusterScaler.setStaticNodes
train
def setStaticNodes(self, nodes, preemptable): """ Used to track statically provisioned nodes. This method must be called before any auto-scaled nodes are provisioned. These nodes are treated differently than auto-scaled nodes in that they should not be automatically terminated. :param nodes: list of Node objects """ prefix = 'non-' if not preemptable else '' logger.debug("Adding %s to %spreemptable static nodes", nodes, prefix) if nodes is not None: self.static[preemptable] = {node.privateIP : node for node in nodes}
python
{ "resource": "" }
q28959
ClusterScaler.smoothEstimate
train
def smoothEstimate(self, nodeShape, estimatedNodeCount): """ Smooth out fluctuations in the estimate for this node compared to previous runs. Returns an integer. """ weightedEstimate = (1 - self.betaInertia) * estimatedNodeCount + \ self.betaInertia * self.previousWeightedEstimate[nodeShape] self.previousWeightedEstimate[nodeShape] = weightedEstimate return self._round(weightedEstimate)
python
{ "resource": "" }
q28960
ClusterScaler.getEstimatedNodeCounts
train
def getEstimatedNodeCounts(self, queuedJobShapes, currentNodeCounts): """ Given the resource requirements of queued jobs and the current size of the cluster, returns a dict mapping from nodeShape to the number of nodes we want in the cluster right now. """ nodesToRunQueuedJobs = binPacking(jobShapes=queuedJobShapes, nodeShapes=self.nodeShapes, goalTime=self.targetTime) estimatedNodeCounts = {} for nodeShape in self.nodeShapes: nodeType = self.nodeShapeToType[nodeShape] logger.debug("Nodes of type %s to run queued jobs = " "%s" % (nodeType, nodesToRunQueuedJobs[nodeShape])) # Actual calculation of the estimated number of nodes required estimatedNodeCount = 0 if nodesToRunQueuedJobs[nodeShape] == 0 \ else max(1, self._round(nodesToRunQueuedJobs[nodeShape])) logger.debug("Estimating %i nodes of shape %s" % (estimatedNodeCount, nodeShape)) # Use inertia parameter to smooth out fluctuations according to an exponentially # weighted moving average. estimatedNodeCount = self.smoothEstimate(nodeShape, estimatedNodeCount) # If we're scaling a non-preemptable node type, we need to see if we have a # deficit of preemptable nodes of this type that we should compensate for. if not nodeShape.preemptable: compensation = self.config.preemptableCompensation assert 0.0 <= compensation <= 1.0 # The number of nodes we provision as compensation for missing preemptable # nodes is the product of the deficit (the number of preemptable nodes we did # _not_ allocate) and configuration preference. compensationNodes = self._round(self.preemptableNodeDeficit[nodeType] * compensation) if compensationNodes > 0: logger.debug('Adding %d non-preemptable nodes of type %s to compensate for a ' 'deficit of %d preemptable ones.', compensationNodes, nodeType, self.preemptableNodeDeficit[nodeType]) estimatedNodeCount += compensationNodes logger.debug("Currently %i nodes of type %s in cluster" % (currentNodeCounts[nodeShape], nodeType)) if self.leader.toilMetrics: self.leader.toilMetrics.logClusterSize(nodeType=nodeType, currentSize=currentNodeCounts[nodeShape], desiredSize=estimatedNodeCount) # Bound number using the max and min node parameters if estimatedNodeCount > self.maxNodes[nodeShape]: logger.debug('Limiting the estimated number of necessary %s (%s) to the ' 'configured maximum (%s).', nodeType, estimatedNodeCount, self.maxNodes[nodeShape]) estimatedNodeCount = self.maxNodes[nodeShape] elif estimatedNodeCount < self.minNodes[nodeShape]: logger.debug('Raising the estimated number of necessary %s (%s) to the ' 'configured minimum (%s).', nodeType, estimatedNodeCount, self.minNodes[nodeShape]) estimatedNodeCount = self.minNodes[nodeShape] estimatedNodeCounts[nodeShape] = estimatedNodeCount return estimatedNodeCounts
python
{ "resource": "" }
q28961
ClusterScaler.setNodeCount
train
def setNodeCount(self, nodeType, numNodes, preemptable=False, force=False): """ Attempt to grow or shrink the number of preemptable or non-preemptable worker nodes in the cluster to the given value, or as close a value as possible, and, after performing the necessary additions or removals of worker nodes, return the resulting number of preemptable or non-preemptable nodes currently in the cluster. :param str nodeType: The node type to add or remove. :param int numNodes: Desired size of the cluster :param bool preemptable: whether the added nodes will be preemptable, i.e. whether they may be removed spontaneously by the underlying platform at any time. :param bool force: If False, the provisioner is allowed to deviate from the given number of nodes. For example, when downsizing a cluster, a provisioner might leave nodes running if they have active jobs running on them. :rtype: int :return: the number of worker nodes in the cluster after making the necessary adjustments. This value should be, but is not guaranteed to be, close or equal to the `numNodes` argument. It represents the closest possible approximation of the actual cluster size at the time this method returns. """ for attempt in retry(predicate=self.provisioner.retryPredicate): with attempt: workerInstances = self.getNodes(preemptable=preemptable) logger.debug("Cluster contains %i instances" % len(workerInstances)) # Reduce to nodes of the correct type workerInstances = {node:workerInstances[node] for node in workerInstances if node.nodeType == nodeType} ignoredNodes = [node for node in workerInstances if node.privateIP in self.ignoredNodes] numIgnoredNodes = len(ignoredNodes) numCurrentNodes = len(workerInstances) logger.debug("Cluster contains %i instances of type %s (%i ignored and draining jobs until " "they can be safely terminated)" % (numCurrentNodes, nodeType, numIgnoredNodes)) if not force: delta = numNodes - (numCurrentNodes - numIgnoredNodes) else: delta = numNodes - numCurrentNodes if delta > 0 and numIgnoredNodes > 0: # We can un-ignore a few nodes to compensate for the additional nodes we want. numNodesToUnignore = min(delta, numIgnoredNodes) logger.debug('Unignoring %i nodes because we want to scale back up again.' % numNodesToUnignore) delta -= numNodesToUnignore for node in ignoredNodes[:numNodesToUnignore]: self.ignoredNodes.remove(node.privateIP) self.leader.batchSystem.unignoreNode(node.privateIP) if delta > 0: logger.info('Adding %i %s nodes to get to desired cluster size of %i.', delta, 'preemptable' if preemptable else 'non-preemptable', numNodes) numNodes = numCurrentNodes + self._addNodes(nodeType, numNodes=delta, preemptable=preemptable) elif delta < 0: logger.info('Removing %i %s nodes to get to desired cluster size of %i.', -delta, 'preemptable' if preemptable else 'non-preemptable', numNodes) numNodes = numCurrentNodes - self._removeNodes(workerInstances, nodeType = nodeType, numNodes=-delta, preemptable=preemptable, force=force) else: if not force: logger.debug('Cluster (minus ignored nodes) already at desired size of %i. Nothing to do.', numNodes) else: logger.debug('Cluster already at desired size of %i. Nothing to do.', numNodes) return numNodes
python
{ "resource": "" }
q28962
ClusterScaler.getNodes
train
def getNodes(self, preemptable): """ Returns a dictionary mapping node identifiers of preemptable or non-preemptable nodes to NodeInfo objects, one for each node. This method is the definitive source on nodes in cluster, & is responsible for consolidating cluster state between the provisioner & batch system. :param bool preemptable: If True (False) only (non-)preemptable nodes will be returned. If None, all nodes will be returned. :rtype: dict[Node, NodeInfo] """ def _getInfo(allMesosNodes, ip): info = None try: info = allMesosNodes[ip] except KeyError: # never seen by mesos - 1 of 3 possibilities: # 1) node is still launching mesos & will come online soon # 2) no jobs have been assigned to this worker. This means the executor was never # launched, so we don't even get an executorInfo back indicating 0 workers running # 3) mesos crashed before launching, worker will never come online # In all 3 situations it's safe to fake executor info with 0 workers, since in all # cases there are no workers running. info = NodeInfo(coresTotal=1, coresUsed=0, requestedCores=0, memoryTotal=1, memoryUsed=0, requestedMemory=0, workers=0) else: # Node was tracked but we haven't seen this in the last 10 minutes inUse = self.leader.batchSystem.nodeInUse(ip) if not inUse and info: # The node hasn't reported in the last 10 minutes & last we know # there weren't any tasks running. We will fake executorInfo with no # worker to reflect this, since otherwise this node will never # be considered for termination info.workers = 0 else: pass # despite the node not reporting to mesos jobs may still be running # so we can't terminate the node return info allMesosNodes = self.leader.batchSystem.getNodes(preemptable, timeout=None) recentMesosNodes = self.leader.batchSystem.getNodes(preemptable) provisionerNodes = self.provisioner.getProvisionedWorkers(nodeType=None, preemptable=preemptable) if len(recentMesosNodes) != len(provisionerNodes): logger.debug("Consolidating state between mesos and provisioner") nodeToInfo = {} # fixme: what happens if awsFilterImpairedNodes is used? # if this assertion is false it means that user-managed nodes are being # used that are outside the provisioner's control # this would violate many basic assumptions in autoscaling so it currently not allowed for node, ip in ((node, node.privateIP) for node in provisionerNodes): info = None if ip not in recentMesosNodes: logger.debug("Worker node at %s is not reporting executor information", ip) # we don't have up to date information about the node info = _getInfo(allMesosNodes, ip) else: # mesos knows about the ip & we have up to date information - easy! info = recentMesosNodes[ip] # add info to dict to return nodeToInfo[node] = info return nodeToInfo
python
{ "resource": "" }
q28963
ScalerThread.check
train
def check(self): """ Attempt to join any existing scaler threads that may have died or finished. This insures any exceptions raised in the threads are propagated in a timely fashion. """ try: self.join(timeout=0) except Exception as e: logger.exception(e) raise
python
{ "resource": "" }
q28964
ScalerThread.shutdown
train
def shutdown(self): """ Shutdown the cluster. """ self.stop = True if self.stats: self.stats.shutDownStats() self.join()
python
{ "resource": "" }
q28965
AbstractJobStore.initialize
train
def initialize(self, config): """ Create the physical storage for this job store, allocate a workflow ID and persist the given Toil configuration to the store. :param toil.common.Config config: the Toil configuration to initialize this job store with. The given configuration will be updated with the newly allocated workflow ID. :raises JobStoreExistsException: if the physical storage for this job store already exists """ assert config.workflowID is None config.workflowID = str(uuid4()) logger.debug("The workflow ID is: '%s'" % config.workflowID) self.__config = config self.writeConfig()
python
{ "resource": "" }
q28966
AbstractJobStore.setRootJob
train
def setRootJob(self, rootJobStoreID): """ Set the root job of the workflow backed by this job store :param str rootJobStoreID: The ID of the job to set as root """ with self.writeSharedFileStream(self.rootJobStoreIDFileName) as f: f.write(rootJobStoreID.encode('utf-8'))
python
{ "resource": "" }
q28967
AbstractJobStore.loadRootJob
train
def loadRootJob(self): """ Loads the root job in the current job store. :raises toil.job.JobException: If no root job is set or if the root job doesn't exist in this job store :return: The root job. :rtype: toil.jobGraph.JobGraph """ try: with self.readSharedFileStream(self.rootJobStoreIDFileName) as f: rootJobStoreID = f.read().decode('utf-8') except NoSuchFileException: raise JobException('No job has been set as the root in this job store') if not self.exists(rootJobStoreID): raise JobException("The root job '%s' doesn't exist. Either the Toil workflow " "is finished or has never been started" % rootJobStoreID) return self.load(rootJobStoreID)
python
{ "resource": "" }
q28968
AbstractJobStore.createRootJob
train
def createRootJob(self, *args, **kwargs): """ Create a new job and set it as the root job in this job store :rtype: toil.jobGraph.JobGraph """ rootJob = self.create(*args, **kwargs) self.setRootJob(rootJob.jobStoreID) return rootJob
python
{ "resource": "" }
q28969
AbstractJobStore._jobStoreClasses
train
def _jobStoreClasses(self): """ A list of concrete AbstractJobStore implementations whose dependencies are installed. :rtype: list[AbstractJobStore] """ jobStoreClassNames = ( "toil.jobStores.azureJobStore.AzureJobStore", "toil.jobStores.fileJobStore.FileJobStore", "toil.jobStores.googleJobStore.GoogleJobStore", "toil.jobStores.aws.jobStore.AWSJobStore", "toil.jobStores.abstractJobStore.JobStoreSupport") jobStoreClasses = [] for className in jobStoreClassNames: moduleName, className = className.rsplit('.', 1) from importlib import import_module try: module = import_module(moduleName) except ImportError: logger.debug("Unable to import '%s' as is expected if the corresponding extra was " "omitted at installation time.", moduleName) else: jobStoreClass = getattr(module, className) jobStoreClasses.append(jobStoreClass) return jobStoreClasses
python
{ "resource": "" }
q28970
AbstractJobStore._findJobStoreForUrl
train
def _findJobStoreForUrl(self, url, export=False): """ Returns the AbstractJobStore subclass that supports the given URL. :param urlparse.ParseResult url: The given URL :param bool export: The URL for :rtype: toil.jobStore.AbstractJobStore """ for jobStoreCls in self._jobStoreClasses: if jobStoreCls._supportsUrl(url, export): return jobStoreCls raise RuntimeError("No job store implementation supports %sporting for URL '%s'" % ('ex' if export else 'im', url.geturl()))
python
{ "resource": "" }
q28971
AbstractJobStore.importFile
train
def importFile(self, srcUrl, sharedFileName=None, hardlink=False): """ Imports the file at the given URL into job store. The ID of the newly imported file is returned. If the name of a shared file name is provided, the file will be imported as such and None is returned. Currently supported schemes are: - 's3' for objects in Amazon S3 e.g. s3://bucket/key - 'wasb' for blobs in Azure Blob Storage e.g. wasb://container/blob - 'file' for local files e.g. file:///local/file/path - 'http' e.g. http://someurl.com/path - 'gs' e.g. gs://bucket/file :param str srcUrl: URL that points to a file or object in the storage mechanism of a supported URL scheme e.g. a blob in an Azure Blob Storage container. :param str sharedFileName: Optional name to assign to the imported file within the job store :return: The jobStoreFileId of the imported file or None if sharedFileName was given :rtype: toil.fileStore.FileID or None """ # Note that the helper method _importFile is used to read from the source and write to # destination (which is the current job store in this case). To implement any # optimizations that circumvent this, the _importFile method should be overridden by # subclasses of AbstractJobStore. srcUrl = urlparse.urlparse(srcUrl) otherCls = self._findJobStoreForUrl(srcUrl) return self._importFile(otherCls, srcUrl, sharedFileName=sharedFileName, hardlink=hardlink)
python
{ "resource": "" }
q28972
AbstractJobStore._exportFile
train
def _exportFile(self, otherCls, jobStoreFileID, url): """ Refer to exportFile docstring for information about this method. :param AbstractJobStore otherCls: The concrete subclass of AbstractJobStore that supports exporting to the given URL. Note that the type annotation here is not completely accurate. This is not an instance, it's a class, but there is no way to reflect that in :pep:`484` type hints. :param str jobStoreFileID: The id of the file that will be exported. :param urlparse.ParseResult url: The parsed URL of the file to export to. """ with self.readFileStream(jobStoreFileID) as readable: otherCls._writeToUrl(readable, url)
python
{ "resource": "" }
q28973
parseStorage
train
def parseStorage(storageData): """ Parses EC2 JSON storage param string into a number. Examples: "2 x 160 SSD" "3 x 2000 HDD" "EBS only" "1 x 410" "8 x 1.9 NVMe SSD" :param str storageData: EC2 JSON storage param string. :return: Two floats representing: (# of disks), and (disk_capacity in GiB of each disk). """ if storageData == "EBS only": return [0, 0] else: specs = storageData.strip().split() if isNumber(specs[0]) and specs[1] == 'x' and isNumber(specs[2]): return float(specs[0].replace(',', '')), float(specs[2].replace(',', '')) else: raise RuntimeError('EC2 JSON format has likely changed. Error parsing disk specs.')
python
{ "resource": "" }
q28974
parseMemory
train
def parseMemory(memAttribute): """ Returns EC2 'memory' string as a float. Format should always be '#' GiB (example: '244 GiB' or '1,952 GiB'). Amazon loves to put commas in their numbers, so we have to accommodate that. If the syntax ever changes, this will raise. :param memAttribute: EC2 JSON memory param string. :return: A float representing memory in GiB. """ mem = memAttribute.replace(',', '').split() if mem[1] == 'GiB': return float(mem[0]) else: raise RuntimeError('EC2 JSON format has likely changed. Error parsing memory.')
python
{ "resource": "" }
q28975
fetchEC2InstanceDict
train
def fetchEC2InstanceDict(regionNickname=None): """ Fetches EC2 instances types by region programmatically using the AWS pricing API. See: https://aws.amazon.com/blogs/aws/new-aws-price-list-api/ :return: A dict of InstanceType objects, where the key is the string: aws instance name (example: 't2.micro'), and the value is an InstanceType object representing that aws instance name. """ ec2Source = 'https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/index.json' if regionNickname is None: regionNickname = 'us-west-2' region = EC2Regions[regionNickname] # JSON uses verbose region names as keys ec2InstanceList = [] # summon the API to grab the latest instance types/prices/specs response = requests.get(ec2Source) if response.ok: ec2InstanceList = parseEC2Json2List(jsontext=response.text, region=region) if ec2InstanceList: return dict((_.name, _) for _ in ec2InstanceList) else: from toil.lib import generatedEC2Lists as defaultEC2 return dict((_.name, _) for _ in defaultEC2.ec2InstancesByRegion[regionNickname])
python
{ "resource": "" }
q28976
parseEC2Json2List
train
def parseEC2Json2List(jsontext, region): """ Takes a JSON and returns a list of InstanceType objects representing EC2 instance params. :param jsontext: :param region: :return: """ currentList = json.loads(jsontext) ec2InstanceList = [] for k, v in iteritems(currentList["products"]): if "location" in v["attributes"] and v["attributes"]["location"] == region: # 3 tenant types: 'Host' (always $0.00; just a template?) # 'Dedicated' (toil does not support; these are pricier) # 'Shared' (AWS default and what toil uses) if "tenancy" in v["attributes"] and v["attributes"]["tenancy"] == "Shared": if v["attributes"]["operatingSystem"] == "Linux": # The same instance can appear with multiple "operation" # values; "RunInstances" is normal, and # "RunInstances:<code>" is e.g. Linux with MS SQL Server # installed. if v["attributes"]["operation"] == "RunInstances": disks, disk_capacity = parseStorage(v["attributes"]["storage"]) memory = parseMemory(v["attributes"]["memory"]) instance = InstanceType(name=v["attributes"]["instanceType"], cores=v["attributes"]["vcpu"], memory=memory, disks=disks, disk_capacity=disk_capacity) if instance not in ec2InstanceList: ec2InstanceList.append(instance) else: raise RuntimeError('EC2 JSON format has likely changed. ' 'Duplicate instance {} found.'.format(instance)) return ec2InstanceList
python
{ "resource": "" }
q28977
updateStaticEC2Instances
train
def updateStaticEC2Instances(): """ Generates a new python file of fetchable EC2 Instances by region with current prices and specs. Takes a few (~3+) minutes to run (you'll need decent internet). :return: Nothing. Writes a new 'generatedEC2Lists.py' file. """ logger.info("Updating Toil's EC2 lists to the most current version from AWS's bulk API. " "This may take a while, depending on your internet connection.") dirname = os.path.dirname(__file__) # the file Toil uses to get info about EC2 instance types origFile = os.path.join(dirname, 'generatedEC2Lists.py') assert os.path.exists(origFile) # use a temporary file until all info is fetched genFile = os.path.join(dirname, 'generatedEC2Lists_tmp.py') assert not os.path.exists(genFile) # will be used to save a copy of the original when finished oldFile = os.path.join(dirname, 'generatedEC2Lists_old.py') # provenance note, copyright and imports with open(genFile, 'w') as f: f.write(textwrap.dedent(''' # !!! AUTOGENERATED FILE !!! # Update with: src/toil/utils/toilUpdateEC2Instances.py # # Copyright (C) 2015-{year} UCSC Computational Genomics Lab # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from six import iteritems from toil.lib.ec2nodes import InstanceType\n\n\n''').format(year=datetime.date.today().strftime("%Y"))[1:]) currentEC2List = [] instancesByRegion = {} for regionNickname, _ in iteritems(EC2Regions): currentEC2Dict = fetchEC2InstanceDict(regionNickname=regionNickname) for instanceName, instanceTypeObj in iteritems(currentEC2Dict): if instanceTypeObj not in currentEC2List: currentEC2List.append(instanceTypeObj) instancesByRegion.setdefault(regionNickname, []).append(instanceName) # write header of total EC2 instance type list genString = "# {num} Instance Types. Generated {date}.\n".format( num=str(len(currentEC2List)), date=str(datetime.datetime.now())) genString = genString + "E2Instances = {\n" sortedCurrentEC2List = sorted(currentEC2List, key=lambda x: x.name) # write the list of all instances types for i in sortedCurrentEC2List: z = " '{name}': InstanceType(name='{name}', cores={cores}, memory={memory}, disks={disks}, disk_capacity={disk_capacity})," \ "\n".format(name=i.name, cores=i.cores, memory=i.memory, disks=i.disks, disk_capacity=i.disk_capacity) genString = genString + z genString = genString + '}\n\n' genString = genString + 'regionDict = {\n' for regionName, instanceList in iteritems(instancesByRegion): genString = genString + " '{regionName}': [".format(regionName=regionName) for instance in sorted(instanceList): genString = genString + "'{instance}', ".format(instance=instance) if genString.endswith(', '): genString = genString[:-2] genString = genString + '],\n' if genString.endswith(',\n'): genString = genString[:-len(',\n')] genString = genString + '}\n' with open(genFile, 'a+') as f: f.write(genString) # append key for fetching at the end regionKey = '\nec2InstancesByRegion = dict((region, [E2Instances[i] for i in instances]) for region, instances in iteritems(regionDict))\n' with open(genFile, 'a+') as f: f.write(regionKey) # preserve the original file unless it already exists if not os.path.exists(oldFile): os.rename(origFile, oldFile) # delete the original file if it's still there if os.path.exists(origFile): os.remove(origFile) # replace the instance list with a current list os.rename(genFile, origFile)
python
{ "resource": "" }
q28978
SingleMachineBatchSystem._runWorker
train
def _runWorker(self, jobCommand, jobID, environment): """ Run the jobCommand using the worker and wait for it to finish. The worker is forked unless it is a '_toil_worker' job and debugWorker is True. """ startTime = time.time() # Time job is started if self.debugWorker and "_toil_worker" in jobCommand: # Run the worker without forking jobName, jobStoreLocator, jobStoreID = jobCommand.split()[1:] # Parse command jobStore = Toil.resumeJobStore(jobStoreLocator) # TODO: The following does not yet properly populate self.runningJobs so it is not possible to kill # running jobs in forkless mode - see the "None" value in place of popen info = Info(time.time(), None, killIntended=False) try: self.runningJobs[jobID] = info try: toil_worker.workerScript(jobStore, jobStore.config, jobName, jobStoreID, redirectOutputToLogFile=not self.debugWorker) # Call the worker finally: self.runningJobs.pop(jobID) finally: if not info.killIntended: self.outputQueue.put((jobID, 0, time.time() - startTime)) else: with self.popenLock: popen = subprocess.Popen(jobCommand, shell=True, env=dict(os.environ, **environment)) info = Info(time.time(), popen, killIntended=False) try: self.runningJobs[jobID] = info try: statusCode = popen.wait() if statusCode != 0 and not info.killIntended: log.error("Got exit code %i (indicating failure) " "from job %s.", statusCode, self.jobs[jobID]) finally: self.runningJobs.pop(jobID) finally: if not info.killIntended: self.outputQueue.put((jobID, statusCode, time.time() - startTime))
python
{ "resource": "" }
q28979
SingleMachineBatchSystem.issueBatchJob
train
def issueBatchJob(self, jobNode): """Adds the command and resources to a queue to be run.""" # Round cores to minCores and apply scale cores = math.ceil(jobNode.cores * self.scale / self.minCores) * self.minCores assert cores <= self.maxCores, ('The job {} is requesting {} cores, more than the maximum of ' '{} cores this batch system was configured with. Scale is ' 'set to {}.'.format(jobNode.jobName, cores, self.maxCores, self.scale)) assert cores >= self.minCores assert jobNode.memory <= self.maxMemory, ('The job {} is requesting {} bytes of memory, more than ' 'the maximum of {} this batch system was configured ' 'with.'.format(jobNode.jobName, jobNode.memory, self.maxMemory)) self.checkResourceRequest(jobNode.memory, cores, jobNode.disk) log.debug("Issuing the command: %s with memory: %i, cores: %i, disk: %i" % ( jobNode.command, jobNode.memory, cores, jobNode.disk)) with self.jobIndexLock: jobID = self.jobIndex self.jobIndex += 1 self.jobs[jobID] = jobNode.command self.inputQueue.put((jobNode.command, jobID, cores, jobNode.memory, jobNode.disk, self.environment.copy())) if self.debugWorker: # then run immediately, blocking for return self.worker(self.inputQueue) return jobID
python
{ "resource": "" }
q28980
SingleMachineBatchSystem.killBatchJobs
train
def killBatchJobs(self, jobIDs): """Kills jobs by ID.""" log.debug('Killing jobs: {}'.format(jobIDs)) for jobID in jobIDs: if jobID in self.runningJobs: info = self.runningJobs[jobID] info.killIntended = True if info.popen != None: os.kill(info.popen.pid, 9) else: # No popen if running in forkless mode currently assert self.debugWorker log.critical("Can't kill job: %s in debug mode" % jobID) while jobID in self.runningJobs: pass
python
{ "resource": "" }
q28981
SingleMachineBatchSystem.shutdown
train
def shutdown(self): """ Cleanly terminate worker threads. Add sentinels to inputQueue equal to maxThreads. Join all worker threads. """ # Remove reference to inputQueue (raises exception if inputQueue is used after method call) inputQueue = self.inputQueue self.inputQueue = None for i in range(self.numWorkers): inputQueue.put(None) for thread in self.workerThreads: thread.join() BatchSystemSupport.workerCleanup(self.workerCleanupInfo)
python
{ "resource": "" }
q28982
SingleMachineBatchSystem.getUpdatedBatchJob
train
def getUpdatedBatchJob(self, maxWait): """Returns a map of the run jobs and the return value of their processes.""" try: item = self.outputQueue.get(timeout=maxWait) except Empty: return None jobID, exitValue, wallTime = item jobCommand = self.jobs.pop(jobID) log.debug("Ran jobID: %s with exit value: %i", jobID, exitValue) return jobID, exitValue, wallTime
python
{ "resource": "" }
q28983
MesosExecutor.registered
train
def registered(self, driver, executorInfo, frameworkInfo, agentInfo): """ Invoked once the executor driver has been able to successfully connect with Mesos. """ # Get the ID we have been assigned, if we have it self.id = executorInfo.executor_id.get('value', None) log.debug("Registered executor %s with framework", self.id) self.address = socket.gethostbyname(agentInfo.hostname) nodeInfoThread = threading.Thread(target=self._sendFrameworkMessage, args=[driver]) nodeInfoThread.daemon = True nodeInfoThread.start()
python
{ "resource": "" }
q28984
MesosExecutor.killTask
train
def killTask(self, driver, taskId): """ Kill parent task process and all its spawned children """ try: pid = self.runningTasks[taskId] pgid = os.getpgid(pid) except KeyError: pass else: os.killpg(pgid, signal.SIGKILL)
python
{ "resource": "" }
q28985
retry
train
def retry( delays=(0, 1, 1, 4, 16, 64), timeout=300, predicate=never ): """ Retry an operation while the failure matches a given predicate and until a given timeout expires, waiting a given amount of time in between attempts. This function is a generator that yields contextmanagers. See doctests below for example usage. :param Iterable[float] delays: an interable yielding the time in seconds to wait before each retried attempt, the last element of the iterable will be repeated. :param float timeout: a overall timeout that should not be exceeded for all attempts together. This is a best-effort mechanism only and it won't abort an ongoing attempt, even if the timeout expires during that attempt. :param Callable[[Exception],bool] predicate: a unary callable returning True if another attempt should be made to recover from the given exception. The default value for this parameter will prevent any retries! :return: a generator yielding context managers, one per attempt :rtype: Iterator Retry for a limited amount of time: >>> true = lambda _:True >>> false = lambda _:False >>> i = 0 >>> for attempt in retry( delays=[0], timeout=.1, predicate=true ): ... with attempt: ... i += 1 ... raise RuntimeError('foo') Traceback (most recent call last): ... RuntimeError: foo >>> i > 1 True If timeout is 0, do exactly one attempt: >>> i = 0 >>> for attempt in retry( timeout=0 ): ... with attempt: ... i += 1 ... raise RuntimeError( 'foo' ) Traceback (most recent call last): ... RuntimeError: foo >>> i 1 Don't retry on success: >>> i = 0 >>> for attempt in retry( delays=[0], timeout=.1, predicate=true ): ... with attempt: ... i += 1 >>> i 1 Don't retry on unless predicate returns True: >>> i = 0 >>> for attempt in retry( delays=[0], timeout=.1, predicate=false): ... with attempt: ... i += 1 ... raise RuntimeError( 'foo' ) Traceback (most recent call last): ... RuntimeError: foo >>> i 1 """ if timeout > 0: go = [ None ] @contextmanager def repeated_attempt( delay ): try: yield except Exception as e: if time.time( ) + delay < expiration and predicate( e ): log.info( 'Got %s, trying again in %is.', e, delay ) time.sleep( delay ) else: raise else: go.pop( ) delays = iter( delays ) expiration = time.time( ) + timeout delay = next( delays ) while go: yield repeated_attempt( delay ) delay = next( delays, delay ) else: @contextmanager def single_attempt( ): yield yield single_attempt( )
python
{ "resource": "" }
q28986
retryable_http_error
train
def retryable_http_error( e ): """ Determine if an error encountered during an HTTP download is likely to go away if we try again. """ if isinstance( e, urllib.error.HTTPError ) and e.code in ('503', '408', '500'): # The server returned one of: # 503 Service Unavailable # 408 Request Timeout # 500 Internal Server Error return True if isinstance( e, BadStatusLine ): # The server didn't return a valid response at all return True return False
python
{ "resource": "" }
q28987
AbstractGridEngineBatchSystem.getRunningBatchJobIDs
train
def getRunningBatchJobIDs(self): """ Retrieve running job IDs from local and batch scheduler. Respects statePollingWait and will return cached results if not within time period to talk with the scheduler. """ if (self._getRunningBatchJobIDsTimestamp and ( datetime.now() - self._getRunningBatchJobIDsTimestamp).total_seconds() < self.config.statePollingWait): batchIds = self._getRunningBatchJobIDsCache else: batchIds = with_retries(self.worker.getRunningJobIDs) self._getRunningBatchJobIDsCache = batchIds self._getRunningBatchJobIDsTimestamp = datetime.now() batchIds.update(self.getRunningLocalJobIDs()) return batchIds
python
{ "resource": "" }
q28988
GaussianMultivariate.get_lower_bound
train
def get_lower_bound(self): """Compute the lower bound to integrate cumulative density. Returns: float: lower bound for cumulative density integral. """ lower_bounds = [] for distribution in self.distribs.values(): lower_bound = distribution.percent_point(distribution.mean / 10000) if not pd.isnull(lower_bound): lower_bounds.append(lower_bound) return min(lower_bounds)
python
{ "resource": "" }
q28989
GaussianMultivariate.get_column_names
train
def get_column_names(self, X): """Return iterable containing columns for the given array X. Args: X: `numpy.ndarray` or `pandas.DataFrame`. Returns: iterable: columns for the given matrix. """ if isinstance(X, pd.DataFrame): return X.columns return range(X.shape[1])
python
{ "resource": "" }
q28990
GaussianMultivariate.get_column
train
def get_column(self, X, column): """Return a column of the given matrix. Args: X: `numpy.ndarray` or `pandas.DataFrame`. column: `int` or `str`. Returns: np.ndarray: Selected column. """ if isinstance(X, pd.DataFrame): return X[column].values return X[:, column]
python
{ "resource": "" }
q28991
GaussianMultivariate.set_column
train
def set_column(self, X, column, value): """Sets a column on the matrix X with the given value. Args: X: `numpy.ndarray` or `pandas.DataFrame`. column: `int` or `str`. value: `np.ndarray` with shape (1,) Returns: `np.ndarray` or `pandas.DataFrame` with the inserted column. """ if isinstance(X, pd.DataFrame): X.loc[:, column] = value else: X[:, column] = value return X
python
{ "resource": "" }
q28992
GaussianMultivariate._get_covariance
train
def _get_covariance(self, X): """Compute covariance matrix with transformed data. Args: X: `numpy.ndarray` or `pandas.DataFrame`. Returns: np.ndarray """ result = pd.DataFrame(index=range(len(X))) column_names = self.get_column_names(X) for column_name in column_names: column = self.get_column(X, column_name) distrib = self.distribs[column_name] # get original distrib's cdf of the column cdf = distrib.cumulative_distribution(column) if distrib.constant_value is not None: # This is to avoid np.inf in the case the column is constant. cdf = np.ones(column.shape) - EPSILON # get inverse cdf using standard normal result = self.set_column(result, column_name, stats.norm.ppf(cdf)) # remove any rows that have infinite values result = result[(result != np.inf).all(axis=1)] return pd.DataFrame(data=result).cov().values
python
{ "resource": "" }
q28993
GaussianMultivariate.fit
train
def fit(self, X): """Compute the distribution for each variable and then its covariance matrix. Args: X(numpy.ndarray or pandas.DataFrame): Data to model. Returns: None """ LOGGER.debug('Fitting Gaussian Copula') column_names = self.get_column_names(X) distribution_class = import_object(self.distribution) for column_name in column_names: self.distribs[column_name] = distribution_class() column = self.get_column(X, column_name) self.distribs[column_name].fit(column) self.covariance = self._get_covariance(X) self.fitted = True
python
{ "resource": "" }
q28994
GaussianMultivariate.cumulative_distribution
train
def cumulative_distribution(self, X): """Computes the cumulative distribution function for the copula Args: X: `numpy.ndarray` or `pandas.DataFrame` Returns: np.array: cumulative probability """ self.check_fit() # Wrapper for pdf to accept vector as args def func(*args): return self.probability_density(list(args)) # Lower bound for integral, to split significant part from tail lower_bound = self.get_lower_bound() ranges = [[lower_bound, val] for val in X] return integrate.nquad(func, ranges)[0]
python
{ "resource": "" }
q28995
GaussianMultivariate.sample
train
def sample(self, num_rows=1): """Creates sintentic values stadistically similar to the original dataset. Args: num_rows: `int` amount of samples to generate. Returns: np.ndarray: Sampled data. """ self.check_fit() res = {} means = np.zeros(self.covariance.shape[0]) size = (num_rows,) clean_cov = np.nan_to_num(self.covariance) samples = np.random.multivariate_normal(means, clean_cov, size=size) for i, (label, distrib) in enumerate(self.distribs.items()): cdf = stats.norm.cdf(samples[:, i]) res[label] = distrib.percent_point(cdf) return pd.DataFrame(data=res)
python
{ "resource": "" }
q28996
GaussianUnivariate.probability_density
train
def probability_density(self, X): """Compute probability density. Arguments: X: `np.ndarray` of shape (n, 1). Returns: np.ndarray """ self.check_fit() return norm.pdf(X, loc=self.mean, scale=self.std)
python
{ "resource": "" }
q28997
GaussianUnivariate.cumulative_distribution
train
def cumulative_distribution(self, X): """Cumulative distribution function for gaussian distribution. Arguments: X: `np.ndarray` of shape (n, 1). Returns: np.ndarray: Cumulative density for X. """ self.check_fit() return norm.cdf(X, loc=self.mean, scale=self.std)
python
{ "resource": "" }
q28998
GaussianUnivariate.percent_point
train
def percent_point(self, U): """Given a cumulated distribution value, returns a value in original space. Arguments: U: `np.ndarray` of shape (n, 1) and values in [0,1] Returns: `np.ndarray`: Estimated values in original space. """ self.check_fit() return norm.ppf(U, loc=self.mean, scale=self.std)
python
{ "resource": "" }
q28999
GaussianUnivariate.sample
train
def sample(self, num_samples=1): """Returns new data point based on model. Arguments: n_samples: `int` Returns: np.ndarray: Generated samples """ self.check_fit() return np.random.normal(self.mean, self.std, num_samples)
python
{ "resource": "" }