Search is not available for this dataset
text
stringlengths
75
104k
def recommend_k_items_slow(self, test, top_k=10, remove_seen=True): """Recommend top K items for all users which are in the test set. Args: test: test Spark dataframe top_k: top n items to return remove_seen: remove items test users have already seen in the past from the recommended set. """ # TODO: remove seen if remove_seen: raise ValueError("Not implemented") self.get_user_affinity(test)\ .write.mode("overwrite")\ .saveAsTable(self.f("{prefix}user_affinity")) # user_affinity * item_similarity # filter top-k query = self.f( """ SELECT {col_user}, {col_item}, score FROM ( SELECT df.{col_user}, S.i2 {col_item}, SUM(df.{col_rating} * S.value) AS score, row_number() OVER(PARTITION BY {col_user} ORDER BY SUM(df.{col_rating} * S.value) DESC) rank FROM {prefix}user_affinity df, {prefix}item_similarity S WHERE df.{col_item} = S.i1 GROUP BY df.{col_user}, S.i2 ) WHERE rank <= {top_k} """, top_k=top_k, ) return self.spark.sql(query)
def setauth(self,basic_auth): """ setauth can be used during runtime to make sure that authentication is reset. it can be used when changing passwords/apikeys to make sure reconnects succeed """ self.headers = [] # If we have auth if basic_auth is not None: # we use a cheap hack to get the basic auth header out of the auth object. # This snippet ends up with us having an array of the necessary headers # to perform authentication. class auth_extractor(): def __init__(self): self.headers = {} extractor = auth_extractor() basic_auth(extractor) for header in extractor.headers: self.headers.append("%s: %s" % (header, extractor.headers[header]))
def send(self, cmd): """Send the given command thru the websocket""" with self.ws_sendlock: self.ws.send(json.dumps(cmd))
def subscribe(self, stream, callback, transform=""): """Given a stream, a callback and an optional transform, sets up the subscription""" if self.status == "disconnected" or self.status == "disconnecting" or self.status == "connecting": self.connect() if self.status is not "connected": return False logging.debug("Subscribing to %s", stream) self.send({"cmd": "subscribe", "arg": stream, "transform": transform}) with self.subscription_lock: self.subscriptions[stream + ":" + transform] = callback return True
def unsubscribe(self, stream, transform=""): """Unsubscribe from the given stream (with the optional transform)""" if self.status is not "connected": return False logging.debug("Unsubscribing from %s", stream) self.send( {"cmd": "unsubscribe", "arg": stream, "transform": transform}) self.subscription_lock.acquire() del self.subscriptions[stream + ":" + transform] if len(self.subscriptions) is 0: self.subscription_lock.release() self.disconnect() else: self.subscription_lock.release()
def connect(self): """Attempt to connect to the websocket - and returns either True or False depending on if the connection was successful or not""" # Wait for the lock to be available (ie, the websocket is not being used (yet)) self.ws_openlock.acquire() self.ws_openlock.release() if self.status == "connected": return True # Already connected if self.status == "disconnecting": # If currently disconnecting, wait a moment, and retry connect time.sleep(0.1) return self.connect() if self.status == "disconnected" or self.status == "reconnecting": self.ws = websocket.WebSocketApp(self.ws_url, header=self.headers, on_message=self.__on_message, on_ping=self.__on_ping, on_open=self.__on_open, on_close=self.__on_close, on_error=self.__on_error) self.ws_thread = threading.Thread(target=self.ws.run_forever) self.ws_thread.daemon = True self.status = "connecting" self.ws_openlock.acquire() self.ws_thread.start() self.ws_openlock.acquire() self.ws_openlock.release() return self.status == "connected"
def __reconnect(self): """This is called when a connection is lost - it attempts to reconnect to the server""" self.status = "reconnecting" # Reset the disconnect time after 15 minutes if self.disconnected_time - self.connected_time > 15 * 60: self.reconnect_time = self.reconnect_time_starting_seconds else: self.reconnect_time *= self.reconnect_time_backoff_multiplier if self.reconnect_time > self.reconnect_time_max_seconds: self.reconnect_time = self.reconnect_time_max_seconds # We want to add some randomness to the reconnect rate - necessary so that we don't pound the server # if it goes down self.reconnect_time *= 1 + random.uniform(-0.2, 0.2) if self.reconnect_time < self.reconnect_time_starting_seconds: self.reconnect_time = self.reconnect_time_starting_seconds logging.warn("ConnectorDB:WS: Attempting to reconnect in %fs", self.reconnect_time) self.reconnector = threading.Timer(self.reconnect_time, self.__reconnect_fnc) self.reconnector.daemon = True self.reconnector.start()
def __resubscribe(self): """Send subscribe command for all existing subscriptions. This allows to resume a connection that was closed""" with self.subscription_lock: for sub in self.subscriptions: logging.debug("Resubscribing to %s", sub) stream_transform = sub.split(":", 1) self.send({ "cmd": "subscribe", "arg": stream_transform[0], "transform": stream_transform[1] })
def __on_open(self, ws): """Called when the websocket is opened""" logging.debug("ConnectorDB: Websocket opened") # Connection success - decrease the wait time for next connection self.reconnect_time /= self.reconnect_time_backoff_multiplier self.status = "connected" self.lastpingtime = time.time() self.__ensure_ping() self.connected_time = time.time() # Release the lock that connect called self.ws_openlock.release()
def __on_close(self, ws): """Called when the websocket is closed""" if self.status == "disconnected": return # This can be double-called on disconnect logging.debug("ConnectorDB:WS: Websocket closed") # Turn off the ping timer if self.pingtimer is not None: self.pingtimer.cancel() self.disconnected_time = time.time() if self.status == "disconnecting": self.status = "disconnected" elif self.status == "connected": self.__reconnect()
def __on_error(self, ws, err): """Called when there is an error in the websocket""" logging.debug("ConnectorDB:WS: Connection Error") if self.status == "connecting": self.status = "errored" self.ws_openlock.release()
def __on_message(self, ws, msg): """This function is called whenever there is a message received from the server""" msg = json.loads(msg) logging.debug("ConnectorDB:WS: Msg '%s'", msg["stream"]) # Build the subcription key stream_key = msg["stream"] + ":" if "transform" in msg: stream_key += msg["transform"] self.subscription_lock.acquire() if stream_key in self.subscriptions: subscription_function = self.subscriptions[stream_key] self.subscription_lock.release() fresult = subscription_function(msg["stream"], msg["data"]) if fresult is True: # This is a special result - if the subscription function of a downlink returns True, # then the datapoint is acknowledged automatically (ie, reinserted in non-downlink stream) fresult = msg["data"] if fresult is not False and fresult is not None and msg["stream"].endswith( "/downlink") and msg["stream"].count("/") == 3: # If the above conditions are true, it means that the datapoints were from a downlink, # and the subscriber function chooses to acknowledge them, so we reinsert them. self.insert(msg["stream"][:-9], fresult) else: self.subscription_lock.release() logging.warn( "ConnectorDB:WS: Msg '%s' not subscribed! Subscriptions: %s", msg["stream"], list(self.subscriptions.keys()))
def __on_ping(self, ws, data): """The server periodically sends us websocket ping messages to keep the connection alive. To ensure that the connection to the server is still active, we memorize the most recent ping's time and we periodically ensure that a ping was received in __ensure_ping""" logging.debug("ConnectorDB:WS: ping") self.lastpingtime = time.time()
def __ensure_ping(self): """Each time the server sends a ping message, we record the timestamp. If we haven't received a ping within the given interval, then we assume that the connection was lost, close the websocket and attempt to reconnect""" logging.debug("ConnectorDB:WS: pingcheck") if (time.time() - self.lastpingtime > self.connection_ping_timeout): logging.warn("ConnectorDB:WS: Websocket ping timed out!") if self.ws is not None: self.ws.close() self.__on_close(self.ws) else: # reset the ping timer self.pingtimer = threading.Timer(self.connection_ping_timeout, self.__ensure_ping) self.pingtimer.daemon = True self.pingtimer.start()
def gatk_select_variants(job, mode, vcf_id, ref_fasta, ref_fai, ref_dict): """ Isolates a particular variant type from a VCF file using GATK SelectVariants :param JobFunctionWrappingJob job: passed automatically by Toil :param str mode: variant type (i.e. SNP or INDEL) :param str vcf_id: FileStoreID for input VCF file :param str ref_fasta: FileStoreID for reference genome fasta :param str ref_fai: FileStoreID for reference genome index file :param str ref_dict: FileStoreID for reference genome sequence dictionary file :return: FileStoreID for filtered VCF :rtype: str """ job.fileStore.logToMaster('Running GATK SelectVariants to select %ss' % mode) inputs = {'genome.fa': ref_fasta, 'genome.fa.fai': ref_fai, 'genome.dict': ref_dict, 'input.vcf': vcf_id} work_dir = job.fileStore.getLocalTempDir() for name, file_store_id in inputs.iteritems(): job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name)) command = ['-T', 'SelectVariants', '-R', 'genome.fa', '-V', 'input.vcf', '-o', 'output.vcf', '-selectType', mode] docker_parameters = ['--rm', 'log-driver', 'none', '-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)] dockerCall(job=job, workDir=work_dir, parameters=command, tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2', dockerParameters=docker_parameters) return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.vcf'))
def gatk_variant_filtration(job, vcf_id, filter_name, filter_expression, ref_fasta, ref_fai, ref_dict): """ Filters VCF file using GATK VariantFiltration. Fixes extra pair of quotation marks in VCF header that may interfere with other VCF tools. :param JobFunctionWrappingJob job: passed automatically by Toil :param str vcf_id: FileStoreID for input VCF file :param str filter_name: Name of filter for VCF header :param str filter_expression: JEXL filter expression :param str ref_fasta: FileStoreID for reference genome fasta :param str ref_fai: FileStoreID for reference genome index file :param str ref_dict: FileStoreID for reference genome sequence dictionary file :return: FileStoreID for filtered VCF file :rtype: str """ inputs = {'genome.fa': ref_fasta, 'genome.fa.fai': ref_fai, 'genome.dict': ref_dict, 'input.vcf': vcf_id} work_dir = job.fileStore.getLocalTempDir() for name, file_store_id in inputs.iteritems(): job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name)) command = ['-T', 'VariantFiltration', '-R', 'genome.fa', '-V', 'input.vcf', '--filterName', filter_name, # Documents filter name in header '--filterExpression', filter_expression, '-o', 'filtered_variants.vcf'] job.fileStore.logToMaster('Running GATK VariantFiltration using {name}: ' '{expression}'.format(name=filter_name, expression=filter_expression)) docker_parameters = ['--rm', 'log-driver', 'none', '-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)] dockerCall(job=job, workDir=work_dir, parameters=command, tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2', dockerParameters=docker_parameters) # Remove extra quotation marks around filter expression. malformed_header = os.path.join(work_dir, 'filtered_variants.vcf') fixed_header = os.path.join(work_dir, 'fixed_header.vcf') filter_regex = re.escape('"%s"' % filter_expression) with open(malformed_header, 'r') as f, open(fixed_header, 'w') as g: for line in f: g.write(re.sub(filter_regex, filter_expression, line)) return job.fileStore.writeGlobalFile(fixed_header)
def gatk_variant_recalibrator(job, mode, vcf, ref_fasta, ref_fai, ref_dict, annotations, hapmap=None, omni=None, phase=None, dbsnp=None, mills=None, max_gaussians=4, unsafe_mode=False): """ Runs either SNP or INDEL variant quality score recalibration using GATK VariantRecalibrator. Because the VQSR method models SNPs and INDELs differently, VQSR must be run separately for these variant types. :param JobFunctionWrappingJob job: passed automatically by Toil :param str mode: Determines variant recalibration mode (SNP or INDEL) :param str vcf: FileStoreID for input VCF file :param str ref_fasta: FileStoreID for reference genome fasta :param str ref_fai: FileStoreID for reference genome index file :param str ref_dict: FileStoreID for reference genome sequence dictionary file :param list[str] annotations: List of GATK variant annotations to filter on :param str hapmap: FileStoreID for HapMap resource file, required for SNP VQSR :param str omni: FileStoreID for Omni resource file, required for SNP VQSR :param str phase: FileStoreID for 1000G resource file, required for SNP VQSR :param str dbsnp: FilesStoreID for dbSNP resource file, required for SNP and INDEL VQSR :param str mills: FileStoreID for Mills resource file, required for INDEL VQSR :param int max_gaussians: Number of Gaussians used during training, default is 4 :param bool unsafe_mode: If True, runs gatk UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY" :return: FileStoreID for the variant recalibration table, tranche file, and plots file :rtype: tuple """ mode = mode.upper() inputs = {'genome.fa': ref_fasta, 'genome.fa.fai': ref_fai, 'genome.dict': ref_dict, 'input.vcf': vcf} # Refer to GATK documentation for description of recommended parameters: # https://software.broadinstitute.org/gatk/documentation/article?id=1259 # https://software.broadinstitute.org/gatk/documentation/article?id=2805 # This base command includes parameters for both INDEL and SNP VQSR. command = ['-T', 'VariantRecalibrator', '-R', 'genome.fa', '-input', 'input.vcf', '-tranche', '100.0', '-tranche', '99.9', '-tranche', '99.0', '-tranche', '90.0', '--maxGaussians', str(max_gaussians), '-recalFile', 'output.recal', '-tranchesFile', 'output.tranches', '-rscriptFile', 'output.plots.R'] # Parameters and resource files for SNP VQSR. if mode == 'SNP': command.extend( ['-resource:hapmap,known=false,training=true,truth=true,prior=15.0', 'hapmap.vcf', '-resource:omni,known=false,training=true,truth=true,prior=12.0', 'omni.vcf', '-resource:dbsnp,known=true,training=false,truth=false,prior=2.0', 'dbsnp.vcf', '-resource:1000G,known=false,training=true,truth=false,prior=10.0', '1000G.vcf', '-mode', 'SNP']) inputs['hapmap.vcf'] = hapmap inputs['omni.vcf'] = omni inputs['dbsnp.vcf'] = dbsnp inputs['1000G.vcf'] = phase # Parameters and resource files for INDEL VQSR elif mode == 'INDEL': command.extend( ['-resource:mills,known=false,training=true,truth=true,prior=12.0', 'mills.vcf', '-resource:dbsnp,known=true,training=false,truth=false,prior=2.0', 'dbsnp.vcf', '-mode', 'INDEL']) inputs['mills.vcf'] = mills inputs['dbsnp.vcf'] = dbsnp else: raise ValueError('Variant filter modes can be SNP or INDEL, got %s' % mode) for annotation in annotations: command.extend(['-an', annotation]) if unsafe_mode: command.extend(['-U', 'ALLOW_SEQ_DICT_INCOMPATIBILITY']) # Delay reading in files until function is configured work_dir = job.fileStore.getLocalTempDir() for name, file_store_id in inputs.iteritems(): job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name)) job.fileStore.logToMaster('Running GATK VariantRecalibrator on {mode}s using the following annotations:\n' '{annotations}'.format(mode=mode, annotations='\n'.join(annotations))) docker_parameters = ['--rm', 'log-driver', 'none', '-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)] dockerCall(job=job, workDir=work_dir, parameters=command, tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2', dockerParameters=docker_parameters) recal_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.recal')) tranches_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.tranches')) plots_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.plots.R')) return recal_id, tranches_id, plots_id
def gatk_apply_variant_recalibration(job, mode, vcf, recal_table, tranches, ref_fasta, ref_fai, ref_dict, ts_filter_level=99.0, unsafe_mode=False): """ Applies variant quality score recalibration to VCF file using GATK ApplyRecalibration :param JobFunctionWrappingJob job: passed automatically by Toil :param str mode: Determines variant recalibration mode (SNP or INDEL) :param str vcf: FileStoreID for input VCF file :param str recal_table: FileStoreID for recalibration table file :param str tranches: FileStoreID for tranches file :param str ref_fasta: FileStoreID for reference genome fasta :param str ref_fai: FileStoreID for reference genome index file :param str ref_dict: FileStoreID for reference genome sequence dictionary file :param float ts_filter_level: Sensitivity expressed as a percentage, default is 99.0 :param bool unsafe_mode: If True, runs gatk UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY" :return: FileStoreID for recalibrated VCF file :rtype: str """ inputs = {'genome.fa': ref_fasta, 'genome.fa.fai': ref_fai, 'genome.dict': ref_dict, 'input.vcf': vcf, 'recal': recal_table, 'tranches': tranches} work_dir = job.fileStore.getLocalTempDir() for name, file_store_id in inputs.iteritems(): job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name)) mode = mode.upper() # GATK recommended parameters: # https://software.broadinstitute.org/gatk/documentation/article?id=2805 command = ['-T', 'ApplyRecalibration', '-mode', mode, '-R', 'genome.fa', '-input', 'input.vcf', '-o', 'vqsr.vcf', '-ts_filter_level', str(ts_filter_level), '-recalFile', 'recal', '-tranchesFile', 'tranches'] if unsafe_mode: command.extend(['-U', 'ALLOW_SEQ_DICT_INCOMPATIBILITY']) job.fileStore.logToMaster('Running GATK ApplyRecalibration on {mode}s ' 'with a sensitivity of {sensitivity}%'.format(mode=mode, sensitivity=ts_filter_level)) docker_parameters = ['--rm', 'log-driver', 'none', '-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)] dockerCall(job=job, workDir=work_dir, parameters=command, tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2', dockerParameters=docker_parameters) return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'vqsr.vcf'))
def gatk_combine_variants(job, vcfs, ref_fasta, ref_fai, ref_dict, merge_option='UNIQUIFY'): """ Merges VCF files using GATK CombineVariants :param JobFunctionWrappingJob job: Toil Job instance :param dict vcfs: Dictionary of VCF FileStoreIDs {sample identifier: FileStoreID} :param str ref_fasta: FileStoreID for reference genome fasta :param str ref_fai: FileStoreID for reference genome index file :param str ref_dict: FileStoreID for reference genome sequence dictionary file :param str merge_option: Value for --genotypemergeoption flag (Default: 'UNIQUIFY') 'UNIQUIFY': Multiple variants at a single site are merged into a single variant record. 'UNSORTED': Used to merge VCFs from the same sample :return: FileStoreID for merged VCF file :rtype: str """ job.fileStore.logToMaster('Running GATK CombineVariants') inputs = {'genome.fa': ref_fasta, 'genome.fa.fai': ref_fai, 'genome.dict': ref_dict} inputs.update(vcfs) work_dir = job.fileStore.getLocalTempDir() for name, file_store_id in inputs.iteritems(): job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name)) command = ['-T', 'CombineVariants', '-R', '/data/genome.fa', '-o', '/data/merged.vcf', '--genotypemergeoption', merge_option] for uuid, vcf_id in vcfs.iteritems(): command.extend(['--variant', os.path.join('/data', uuid)]) docker_parameters = ['--rm', 'log-driver', 'none', '-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)] dockerCall(job=job, workDir=work_dir, parameters=command, tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2', dockerParameters=docker_parameters) return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'merged.vcf'))
def bam_quickcheck(bam_path): """ Perform a quick check on a BAM via `samtools quickcheck`. This will detect obvious BAM errors such as truncation. :param str bam_path: path to BAM file to checked :rtype: boolean :return: True if the BAM is valid, False is BAM is invalid or something related to the call went wrong """ directory, bam_name = os.path.split(bam_path) exit_code = subprocess.call(['docker', 'run', '-v', directory + ':/data', 'quay.io/ucsc_cgl/samtools:1.3--256539928ea162949d8a65ca5c79a72ef557ce7c', 'quickcheck', '-vv', '/data/' + bam_name]) if exit_code != 0: return False return True
def load_handlers(handler_mapping): """ Given a dictionary mapping which looks like the following, import the objects based on the dotted path and yield the packet type and handler as pairs. If the special string '*' is passed, don't process that, pass it on as it is a wildcard. If an non-string object is given for either packet or handler (key or value) assume these are the objects to use and yield them. :: { 'rfxcom.protocol.Status': 'home.collect.logging_handler', 'rfxcom.protocol.Elec': 'home.collect.elec_handler', 'rfxcom.protocol.TempHumidity': 'home.collect.temp_humidity_handler', '*': 'home.collect.logging_handler' } """ handlers = {} for packet_type, handler in handler_mapping.items(): if packet_type == '*': Packet = packet_type elif isinstance(packet_type, str): Packet = importer(packet_type) else: Packet = packet_type if isinstance(handler, str): Handler = importer(handler) else: Handler = handler if Packet in handlers: raise HandlerConfigError( "Handler already provided for packet %s" % Packet) handlers[Packet] = Handler return handlers
def write_config(configuration): """Helper to write the JSON configuration to a file""" with open(CONFIG_PATH, 'w') as f: json.dump(configuration, f, indent=2, sort_keys=True)
def get_config(): """Gets the configuration for this project from the default JSON file, or writes one if it doesn't exist :rtype: dict """ if not os.path.exists(CONFIG_PATH): write_config({}) with open(CONFIG_PATH) as f: return json.load(f)
def get_ontology(self, ontology): """Gets the metadata for a given ontology :param str ontology: The name of the ontology :return: The dictionary representing the JSON from the OLS :rtype: dict """ url = self.ontology_metadata_fmt.format(ontology=ontology) response = requests.get(url) return response.json()
def get_term(self, ontology, iri): """Gets the data for a given term :param str ontology: The name of the ontology :param str iri: The IRI of a term :rtype: dict """ url = self.ontology_term_fmt.format(ontology, iri) response = requests.get(url) return response.json()
def search(self, name, query_fields=None): """Searches the OLS with the given term :param str name: :param list[str] query_fields: Fields to query :return: dict """ params = {'q': name} if query_fields is not None: params['queryFields'] = '{{{}}}'.format(','.join(query_fields)) response = requests.get(self.ontology_search, params=params) return response.json()
def suggest(self, name, ontology=None): """Suggest terms from an optional list of ontologies :param str name: :param list[str] ontology: :rtype: dict .. seealso:: https://www.ebi.ac.uk/ols/docs/api#_suggest_term """ params = {'q': name} if ontology: params['ontology'] = ','.join(ontology) response = requests.get(self.ontology_suggest, params=params) return response.json()
def _iter_terms_helper(url, size=None, sleep=None): """Iterates over all terms, lazily with paging :param str url: The url to query :param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI. :param int sleep: The amount of time to sleep between pages. Defaults to none. :rtype: iter[dict] """ if size is None: size = 500 elif size > 500: raise ValueError('Maximum size is 500. Given: {}'.format(size)) t = time.time() response = requests.get(url, params={'size': size}).json() links = response['_links'] for response_term in _iterate_response_terms(response): yield response_term t = time.time() - t log.info( 'Page %s/%s done in %.2f seconds', response['page']['number'] + 1, response['page']['totalPages'], t ) log.info('Estimated time until done: %.2f minutes', t * response['page']['totalPages'] / 60) while 'next' in links: if sleep: time.sleep(sleep) t = time.time() response = requests.get(links['next']['href'], params={'size': size}).json() links = response['_links'] for response_term in _iterate_response_terms(response): yield response_term log.info( 'Page %s/%s done in %.2f seconds', response['page']['number'], response['page']['totalPages'], time.time() - t )
def iter_terms(self, ontology, size=None, sleep=None): """Iterates over all terms, lazily with paging :param str ontology: The name of the ontology :param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI. :param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds. :rtype: iter[dict] """ url = self.ontology_terms_fmt.format(ontology=ontology) for term in self._iter_terms_helper(url, size=size, sleep=sleep): yield term
def iter_descendants(self, ontology, iri, size=None, sleep=None): """Iterates over the descendants of a given term :param str ontology: The name of the ontology :param str iri: The IRI of a term :param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI. :param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds. :rtype: iter[dict] """ url = self.ontology_term_descendants_fmt.format(ontology=ontology, iri=iri) log.info('getting %s', url) for term in self._iter_terms_helper(url, size=size, sleep=sleep): yield term
def iter_descendants_labels(self, ontology, iri, size=None, sleep=None): """Iterates over the labels for the descendants of a given term :param str ontology: The name of the ontology :param str iri: The IRI of a term :param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI. :param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds. :rtype: iter[str] """ for label in _help_iterate_labels(self.iter_descendants(ontology, iri, size=size, sleep=sleep)): yield label
def iter_labels(self, ontology, size=None, sleep=None): """Iterates over the labels of terms in the ontology. Automatically wraps the pager returned by the OLS. :param str ontology: The name of the ontology :param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI. :param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds. :rtype: iter[str] """ for label in _help_iterate_labels(self.iter_terms(ontology=ontology, size=size, sleep=sleep)): yield label
def iter_hierarchy(self, ontology, size=None, sleep=None): """Iterates over parent-child relations :param str ontology: The name of the ontology :param int size: The size of each page. Defaults to 500, which is the maximum allowed by the EBI. :param int sleep: The amount of time to sleep between pages. Defaults to 0 seconds. :rtype: iter[tuple[str,str]] """ for term in self.iter_terms(ontology=ontology, size=size, sleep=sleep): try: hierarchy_children_link = term['_links'][HIERARCHICAL_CHILDREN]['href'] except KeyError: # there's no children for this one continue response = requests.get(hierarchy_children_link).json() for child_term in response['_embedded']['terms']: yield term['label'], child_term['label']
def run_fastqc(job, r1_id, r2_id): """ Run Fastqc on the input reads :param JobFunctionWrappingJob job: passed automatically by Toil :param str r1_id: FileStoreID of fastq read 1 :param str r2_id: FileStoreID of fastq read 2 :return: FileStoreID of fastQC output (tarball) :rtype: str """ work_dir = job.fileStore.getLocalTempDir() job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, 'R1.fastq')) parameters = ['/data/R1.fastq'] output_names = ['R1_fastqc.html', 'R1_fastqc.zip'] if r2_id: job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, 'R2.fastq')) parameters.extend(['-t', '2', '/data/R2.fastq']) output_names.extend(['R2_fastqc.html', 'R2_fastqc.zip']) dockerCall(job=job, tool='quay.io/ucsc_cgl/fastqc:0.11.5--be13567d00cd4c586edf8ae47d991815c8c72a49', workDir=work_dir, parameters=parameters) output_files = [os.path.join(work_dir, x) for x in output_names] tarball_files(tar_name='fastqc.tar.gz', file_paths=output_files, output_dir=work_dir) return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'fastqc.tar.gz'))
def addStream(self, stream, t1=None, t2=None, limit=None, i1=None, i2=None, transform=None): """Adds the given stream to the query construction. The function supports both stream names and Stream objects.""" params = query_maker(t1, t2, limit, i1, i2, transform) params["stream"] = get_stream(self.cdb, stream) # Now add the stream to the query parameters self.query.append(params)
def create_app(config=None): """ This needs some tidying up. To avoid circular imports we import everything here but it makes this method a bit more gross. """ # Initialise the app from home.config import TEMPLATE_FOLDER, STATIC_FOLDER app = Flask(__name__, static_folder=STATIC_FOLDER, template_folder=TEMPLATE_FOLDER) app.config['SECRET_KEY'] = 'ssh, its a secret.' # Load the default config, the specified config file and then any # overwrites that are manually passed in. app.config.from_object('home.config') if 'HOME_SETTINGS' in environ: app.config.from_envvar('HOME_SETTINGS') app.config.from_object(config) # Register the web front end and the API. from home.dash.web import web from home.dash.api import api app.register_blueprint(web) app.register_blueprint(api, url_prefix='/api') login_manager.init_app(app) login_manager.login_view = 'Dashboard Web.login' from home.dash.models import User @login_manager.user_loader def load_user(user_id): return User.query.get(int(user_id)) # Initialise the migrations app, we want to store all migrations within # the project directory for easier packaging. Migrate(app, db, directory=app.config['MIGRATE_DIRECTORY']) admin = Admin(app) from home.dash.admin import setup_admin setup_admin(admin) # Wire up the database to the app so it gets the config. db.init_app(app) return app
def spawn_spark_cluster(job, numWorkers, cores=None, memory=None, disk=None, overrideLeaderIP=None): ''' :param numWorkers: The number of worker nodes to have in the cluster. \ Must be greater than or equal to 1. :param cores: Optional parameter to set the number of cores per node. \ If not provided, we use the number of cores on the node that launches \ the service. :param memory: Optional parameter to set the memory requested per node. :param disk: Optional parameter to set the disk requested per node. :type leaderMemory: int or string convertable by bd2k.util.humanize.human2bytes to an int :type numWorkers: int :type cores: int :type memory: int or string convertable by bd2k.util.humanize.human2bytes to an int :type disk: int or string convertable by bd2k.util.humanize.human2bytes to an int ''' if numWorkers < 1: raise ValueError("Must have more than one worker. %d given." % numWorkers) leaderService = SparkService(cores=cores, memory=memory, disk=disk, overrideLeaderIP=overrideLeaderIP) leaderIP = job.addService(leaderService) for i in range(numWorkers): job.addService(WorkerService(leaderIP, cores=cores, disk=disk, memory=memory), parentService=leaderService) return leaderIP
def start(self, job): """ Start spark and hdfs master containers :param job: The underlying job. """ if self.hostname is None: self.hostname = subprocess.check_output(["hostname", "-f",])[:-1] _log.info("Started Spark master container.") self.sparkContainerID = dockerCheckOutput(job=job, defer=STOP, workDir=os.getcwd(), tool="quay.io/ucsc_cgl/apache-spark-master:1.5.2", dockerParameters=["--net=host", "-d", "-v", "/mnt/ephemeral/:/ephemeral/:rw", "-e", "SPARK_MASTER_IP=" + self.hostname, "-e", "SPARK_LOCAL_DIRS=/ephemeral/spark/local", "-e", "SPARK_WORKER_DIR=/ephemeral/spark/work"], parameters=[self.hostname])[:-1] _log.info("Started HDFS Datanode.") self.hdfsContainerID = dockerCheckOutput(job=job, defer=STOP, workDir=os.getcwd(), tool="quay.io/ucsc_cgl/apache-hadoop-master:2.6.2", dockerParameters=["--net=host", "-d"], parameters=[self.hostname])[:-1] return self.hostname
def start(self, job): """ Start spark and hdfs worker containers :param job: The underlying job. """ # start spark and our datanode self.sparkContainerID = dockerCheckOutput(job=job, defer=STOP, workDir=os.getcwd(), tool="quay.io/ucsc_cgl/apache-spark-worker:1.5.2", dockerParameters=["--net=host", "-d", "-v", "/mnt/ephemeral/:/ephemeral/:rw", "-e", "\"SPARK_MASTER_IP=" + self.masterIP + ":" + _SPARK_MASTER_PORT + "\"", "-e", "SPARK_LOCAL_DIRS=/ephemeral/spark/local", "-e", "SPARK_WORKER_DIR=/ephemeral/spark/work"], parameters=[self.masterIP + ":" + _SPARK_MASTER_PORT])[:-1] self.__start_datanode(job) # fake do/while to check if HDFS is up hdfs_down = True retries = 0 while hdfs_down and (retries < 5): _log.info("Sleeping 30 seconds before checking HDFS startup.") time.sleep(30) clusterID = "" try: clusterID = subprocess.check_output(["docker", "exec", self.hdfsContainerID, "grep", "clusterID", "-R", "/opt/apache-hadoop/logs"]) except: # grep returns a non-zero exit code if the pattern is not found # we expect to not find the pattern, so a non-zero code is OK pass if "Incompatible" in clusterID: _log.warning("Hadoop Datanode failed to start with: %s", clusterID) _log.warning("Retrying container startup, retry #%d.", retries) retries += 1 _log.warning("Removing ephemeral hdfs directory.") subprocess.check_call(["docker", "exec", self.hdfsContainerID, "rm", "-rf", "/ephemeral/hdfs"]) _log.warning("Killing container %s.", self.hdfsContainerID) subprocess.check_call(["docker", "kill", self.hdfsContainerID]) # todo: this is copied code. clean up! _log.info("Restarting datanode.") self.__start_datanode(job) else: _log.info("HDFS datanode started up OK!") hdfs_down = False if retries >= 5: raise RuntimeError("Failed %d times trying to start HDFS datanode." % retries) return
def __start_datanode(self, job): """ Launches the Hadoop datanode. :param job: The underlying job. """ self.hdfsContainerID = dockerCheckOutput(job=job, defer=STOP, workDir=os.getcwd(), tool="quay.io/ucsc_cgl/apache-hadoop-worker:2.6.2", dockerParameters=["--net=host", "-d", "-v", "/mnt/ephemeral/:/ephemeral/:rw"], parameters=[self.masterIP])[:-1]
def stop(self, fileStore): """ Stop spark and hdfs worker containers :param job: The underlying job. """ subprocess.call(["docker", "exec", self.sparkContainerID, "rm", "-r", "/ephemeral/spark"]) subprocess.call(["docker", "stop", self.sparkContainerID]) subprocess.call(["docker", "rm", self.sparkContainerID]) _log.info("Stopped Spark worker.") subprocess.call(["docker", "exec", self.hdfsContainerID, "rm", "-r", "/ephemeral/hdfs"]) subprocess.call(["docker", "stop", self.hdfsContainerID]) subprocess.call(["docker", "rm", self.hdfsContainerID]) _log.info("Stopped HDFS datanode.") return
def check(self): """ Checks to see if Spark worker and HDFS datanode are still running. """ status = _checkContainerStatus(self.sparkContainerID, self.hdfsContainerID, sparkNoun='worker', hdfsNoun='datanode') return status
def base_tokenizer(fp): 'Tokenizer. Generates tokens stream from text' if isinstance(fp, StringIO): template_file = fp size = template_file.len else: #empty file check if os.fstat(fp.fileno()).st_size == 0: yield TOKEN_EOF, 'EOF', 0, 0 return template_file = mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ) size = template_file.size() lineno = 0 while 1: lineno += 1 pos = 1 # end of file if template_file.tell() == size: yield TOKEN_EOF, 'EOF', lineno, 0 break # now we tokinize line by line line = template_file.readline().decode('utf-8') line = line.replace('\r\n', '') line = line.replace('\n', '') # ignoring non XML comments if re_comment.match(line): continue last_text = deque() while line: line_len = len(line) for token in tokens: m = token.regex.match(line) if m: if last_text: yield TOKEN_TEXT, ''.join(last_text), lineno, pos pos += len(last_text) last_text.clear() offset, value = m.end(), m.group() line = line[offset:] yield token, value, lineno, pos pos += offset break # we did not get right in tokens list, so next char is text if line_len == len(line): last_text.append(line[0]) line = line[1:] if last_text: yield TOKEN_TEXT, ''.join(last_text), lineno, pos pos += len(last_text) last_text.clear() yield TOKEN_NEWLINE, '\n', lineno, pos # all work is done template_file.close()
def get_mint_tree(tokens_stream): ''' This function is wrapper to normal parsers (tag_parser, block_parser, etc.). Returns mint tree. ''' smart_stack = RecursiveStack() block_parser.parse(tokens_stream, smart_stack) return MintTemplate(body=smart_stack.stack)
def lookup_zone(conn, zone): """Look up a zone ID for a zone string. Args: conn: boto.route53.Route53Connection zone: string eg. foursquare.com Returns: zone ID eg. ZE2DYFZDWGSL4. Raises: ZoneNotFoundError if zone not found.""" all_zones = conn.get_all_hosted_zones() for resp in all_zones['ListHostedZonesResponse']['HostedZones']: if resp['Name'].rstrip('.') == zone.rstrip('.'): return resp['Id'].replace('/hostedzone/', '') raise ZoneNotFoundError('zone %s not found in response' % zone)
def fetch_config(zone, conn): """Fetch all pieces of a Route 53 config from Amazon. Args: zone: string, hosted zone id. conn: boto.route53.Route53Connection Returns: list of ElementTrees, one for each piece of config.""" more_to_fetch = True cfg_chunks = [] next_name = None next_type = None next_identifier = None while more_to_fetch == True: more_to_fetch = False getstr = '/%s/hostedzone/%s/rrset' % (R53_API_VERSION, zone) if next_name is not None: getstr += '?name=%s&type=%s' % (next_name, next_type) if next_identifier is not None: getstr += '&identifier=%s' % next_identifier log.debug('requesting %s' % getstr) resp = conn.make_request('GET', getstr) etree = lxml.etree.parse(resp) cfg_chunks.append(etree) root = etree.getroot() truncated = root.find('{%s}IsTruncated' % R53_XMLNS) if truncated is not None and truncated.text == 'true': more_to_fetch = True next_name = root.find('{%s}NextRecordName' % R53_XMLNS).text next_type = root.find('{%s}NextRecordType' % R53_XMLNS).text try: next_identifier = root.find('{%s}NextRecordIdentifier' % R53_XMLNS).text except AttributeError: # may not have next_identifier next_identifier = None return cfg_chunks
def merge_config(cfg_chunks): """Merge a set of fetched Route 53 config Etrees into a canonical form. Args: cfg_chunks: [ lxml.etree.ETree ] Returns: lxml.etree.Element""" root = lxml.etree.XML('<ResourceRecordSets xmlns="%s"></ResourceRecordSets>' % R53_XMLNS, parser=XML_PARSER) for chunk in cfg_chunks: for rrset in chunk.iterfind('.//{%s}ResourceRecordSet' % R53_XMLNS): root.append(rrset) return root
def normalize_rrs(rrsets): """Lexically sort the order of every ResourceRecord in a ResourceRecords element so we don't generate spurious changes: ordering of e.g. NS records is irrelevant to the DNS line protocol, but XML sees it differently. Also rewrite any wildcard records to use the ascii hex code: somewhere deep inside route53 is something that used to look like tinydns, and amazon's API will always display wildcard records as "\052.example.com". Args: rrsest: lxml.etree.Element (<ResourceRecordSets>) """ for rrset in rrsets: if rrset.tag == '{%s}ResourceRecordSet' % R53_XMLNS: for rrs in rrset: # preformat wildcard records if rrs.tag == '{%s}Name' % R53_XMLNS: if rrs.text.startswith('*.'): old_text = rrs.text new_text = '\\052.%s' % old_text[2:] print 'Found wildcard record, rewriting to %s' % new_text rrs.text = rrs.text.replace(old_text, new_text) # sort ResourceRecord elements by Value if rrs.tag == '{%s}ResourceRecords' % R53_XMLNS: # 0th value of ResourceRecord is always the Value element sorted_rrs = sorted(rrs, key=lambda x: x[0].text) rrs[:] = sorted_rrs return rrsets
def generate_changeset(old, new, comment=None): """Diff two XML configs and return an object with changes to be written. Args: old, new: lxml.etree.Element (<ResourceRecordSets>). Returns: lxml.etree.ETree (<ChangeResourceRecordSetsRequest>) or None""" rrsets_tag = '{%s}ResourceRecordSets' % R53_XMLNS if rrsets_tag not in (old.tag, new.tag): log.error('both configs must be ResourceRecordSets tags. old: %s, new: %s' % (old.tag, new.tag)) raise InvalidArgumentException() if comment is None: comment = 'Generated by %s for %s@%s at %s.' % ( __file__, os.environ['USER'], socket.gethostname(), time.strftime('%Y-%m-%d %H:%M:%S')) root = lxml.etree.XML("""<ChangeResourceRecordSetsRequest xmlns="%s"> <ChangeBatch> <Comment>%s</Comment> <Changes/> </ChangeBatch> </ChangeResourceRecordSetsRequest>""" % ( R53_XMLNS, comment), parser=XML_PARSER) changesroot = root.find('.//{%s}Changes' % R53_XMLNS) old = normalize_rrs(old) new = normalize_rrs(new) oldset = set([lxml.etree.tostring(x).rstrip() for x in old]) newset = set([lxml.etree.tostring(x).rstrip() for x in new]) if oldset == newset: return None # look for removed elements for rrs in old: rrsst = lxml.etree.tostring(rrs).rstrip() if rrsst not in newset: log.debug("REMOVED:") log.debug(rrsst) change = lxml.etree.XML('<Change xmlns="%s"><Action>DELETE</Action></Change>' % R53_XMLNS, parser=XML_PARSER) change.append(rrs) changesroot.append(change) # look for added elements for rrs in new: rrsst = lxml.etree.tostring(rrs).rstrip() if rrsst not in oldset: log.debug("ADDED:") log.debug(rrsst) change = lxml.etree.XML('<Change xmlns="%s"><Action>CREATE</Action></Change>' % R53_XMLNS, parser=XML_PARSER) change.append(rrs) changesroot.append(change) return root
def validate_changeset(changeset): """Validate a changeset is compatible with Amazon's API spec. Args: changeset: lxml.etree.Element (<ChangeResourceRecordSetsRequest>) Returns: [ errors ] list of error strings or [].""" errors = [] changes = changeset.findall('.//{%s}Change' % R53_XMLNS) num_changes = len(changes) if num_changes == 0: errors.append('changeset must have at least one <Change> element') if num_changes > 100: errors.append('changeset has %d <Change> elements: max is 100' % num_changes) rrs = changeset.findall('.//{%s}ResourceRecord' % R53_XMLNS) num_rrs = len(rrs) if num_rrs > 1000: errors.append('changeset has %d ResourceRecord elements: max is 1000' % num_rrs) values = changeset.findall('.//{%s}Value' % R53_XMLNS) num_chars = 0 for value in values: num_chars += len(value.text) if num_chars > 10000: errors.append('changeset has %d chars in <Value> text: max is 10000' % num_chars) return errors
def minimize_best_n(Members): ''' Orders population members from lowest fitness to highest fitness Args: Members (list): list of PyGenetics Member objects Returns: lsit: ordered lsit of Members, from highest fitness to lowest fitness ''' return(list(reversed(sorted( Members, key=lambda Member: Member.fitness_score ))))
def fitness(self): '''Population fitness == average member fitness score''' if len(self.__members) != 0: if self.__num_processes > 1: members = [m.get() for m in self.__members] else: members = self.__members return sum(m.fitness_score for m in members) / len(members) else: return None
def ave_cost_fn_val(self): '''Returns average cost function return value for all members''' if len(self.__members) != 0: if self.__num_processes > 1: members = [m.get() for m in self.__members] else: members = self.__members return sum(m.cost_fn_val for m in members) / len(members) else: return None
def med_cost_fn_val(self): '''Returns median cost function return value for all members''' if len(self.__members) != 0: if self.__num_processes > 1: members = [m.get() for m in self.__members] else: members = self.__members return median([m.cost_fn_val for m in members]) else: return None
def parameters(self): '''Population parameter vals == average member parameter vals''' if len(self.__members) != 0: if self.__num_processes > 1: members = [m.get() for m in self.__members] else: members = self.__members params = {} for p in self.__parameters: params[p.name] = sum( m.parameters[p.name] for m in members ) / len(members) return params else: return None
def members(self): '''Returns Member objects of population''' if self.__num_processes > 1: return [m.get() for m in self.__members] else: return self.__members
def add_parameter(self, name, min_val, max_val): '''Adds a paramber to the Population Args: name (str): name of the parameter min_val (int or float): minimum value for the parameter max_val (int or float): maximum value for the parameter ''' self.__parameters.append(Parameter(name, min_val, max_val))
def generate_population(self): '''Generates self.__pop_size Members with randomly initialized values for each parameter added with add_parameter(), evaluates their fitness ''' if self.__num_processes > 1: process_pool = Pool(processes=self.__num_processes) self.__members = [] for _ in range(self.__pop_size): feed_dict = {} for param in self.__parameters: feed_dict[param.name] = self.__random_param_val( param.min_val, param.max_val, param.dtype ) if self.__num_processes > 1: self.__members.append(process_pool.apply_async( self._start_process, [self.__cost_fn, feed_dict, self.__cost_fn_args]) ) else: self.__members.append( Member( feed_dict, self.__cost_fn(feed_dict, self.__cost_fn_args) ) ) if self.__num_processes > 1: process_pool.close() process_pool.join() self.__determine_best_member()
def next_generation(self, mut_rate=0, max_mut_amt=0, log_base=10): '''Generates the next population from a previously evaluated generation Args: mut_rate (float): mutation rate for new members (0.0 - 1.0) max_mut_amt (float): how much the member is allowed to mutate (0.0 - 1.0, proportion change of mutated parameter) log_base (int): the higher this number, the more likely the first Members (chosen with supplied selection function) are chosen as parents for the next generation ''' if self.__num_processes > 1: process_pool = Pool(processes=self.__num_processes) members = [m.get() for m in self.__members] else: members = self.__members if len(members) == 0: raise Exception( 'Generation 0 not found: use generate_population() first' ) selected_members = self.__select_fn(members) reproduction_probs = list(reversed(logspace(0.0, 1.0, num=len(selected_members), base=log_base))) reproduction_probs = reproduction_probs / sum(reproduction_probs) self.__members = [] for _ in range(self.__pop_size): parent_1 = nrandom.choice(selected_members, p=reproduction_probs) parent_2 = nrandom.choice(selected_members, p=reproduction_probs) feed_dict = {} for param in self.__parameters: which_parent = uniform(0, 1) if which_parent < 0.5: feed_dict[param.name] = parent_1.parameters[param.name] else: feed_dict[param.name] = parent_2.parameters[param.name] feed_dict[param.name] = self.__mutate_parameter( feed_dict[param.name], param, mut_rate, max_mut_amt ) if self.__num_processes > 1: self.__members.append(process_pool.apply_async( self._start_process, [self.__cost_fn, feed_dict, self.__cost_fn_args]) ) else: self.__members.append( Member( feed_dict, self.__cost_fn(feed_dict, self.__cost_fn_args) ) ) if self.__num_processes > 1: process_pool.close() process_pool.join() self.__determine_best_member()
def __mutate_parameter(value, param, mut_rate, max_mut_amt): '''Private, static method: mutates parameter Args: value (int or float): current value for Member's parameter param (Parameter): parameter object mut_rate (float): mutation rate of the value max_mut_amt (float): maximum mutation amount of the value Returns: int or float: mutated value ''' if uniform(0, 1) < mut_rate: mut_amt = uniform(0, max_mut_amt) op = choice((add, sub)) new_val = op(value, param.dtype( (param.max_val - param.min_val) * mut_amt )) if new_val > param.max_val: return param.max_val elif new_val < param.min_val: return param.min_val else: return new_val else: return value
def __determine_best_member(self): '''Private method: determines if any current population members have a fitness score better than the current best ''' if self.__num_processes > 1: members = [m.get() for m in self.__members] else: members = self.__members if self.__best_fitness is None: self.__best_fitness = members[0].fitness_score self.__best_cost_fn_val = members[0].cost_fn_val self.__best_parameters = {} for p in self.__parameters: self.__best_parameters[p.name] = members[0].parameters[p.name] for m_id, member in enumerate(members): if member.fitness_score > self.__best_fitness: self.__best_fitness = member.fitness_score self.__best_cost_fn_val = member.cost_fn_val self.__best_parameters = {} for p in self.__parameters: self.__best_parameters[p.name] = member.parameters[p.name]
def update_defaults(self, defaults): """Updates the given defaults with values from the config files and the environ. Does a little special handling for certain types of options (lists).""" # Then go and look for the other sources of configuration: config = {} # 1. config files for section in ('global', self.name): config.update( self.normalize_keys(self.get_config_section(section)) ) # 2. environmental variables if not self.isolated: config.update(self.normalize_keys(self.get_environ_vars())) # Then set the options with those values for key, val in config.items(): option = self.get_option(key) if option is not None: # ignore empty values if not val: continue if option.action in ('store_true', 'store_false', 'count'): val = strtobool(val) if option.action == 'append': val = val.split() val = [self.check_default(option, key, v) for v in val] else: val = self.check_default(option, key, val) defaults[option.dest] = val return defaults
def normalize_keys(self, items): """Return a config dictionary with normalized keys regardless of whether the keys were specified in environment variables or in config files""" normalized = {} for key, val in items: key = key.replace('_', '-') if not key.startswith('--'): key = '--%s' % key # only prefer long opts normalized[key] = val return normalized
def get_environ_vars(self): """Returns a generator with all environmental vars with prefix PIP_""" for key, val in os.environ.items(): if _environ_prefix_re.search(key): yield (_environ_prefix_re.sub("", key).lower(), val)
def throws_exception(callable, *exceptions): """ Return True if the callable throws the specified exception >>> throws_exception(lambda: int('3')) False >>> throws_exception(lambda: int('a')) True >>> throws_exception(lambda: int('a'), KeyError) False """ with context.ExceptionTrap(): with context.ExceptionTrap(*exceptions) as exc: callable() return bool(exc)
def transform_hits(hits): """ The list from pypi is really a list of versions. We want a list of packages with the list of versions stored inline. This converts the list from pypi into one we can use. """ packages = {} for hit in hits: name = hit['name'] summary = hit['summary'] version = hit['version'] score = hit['_pypi_ordering'] if score is None: score = 0 if name not in packages.keys(): packages[name] = { 'name': name, 'summary': summary, 'versions': [version], 'score': score, } else: packages[name]['versions'].append(version) # if this is the highest version, replace summary and score if version == highest_version(packages[name]['versions']): packages[name]['summary'] = summary packages[name]['score'] = score # each record has a unique name now, so we will convert the dict into a # list sorted by score package_list = sorted( packages.values(), key=lambda x: x['score'], reverse=True, ) return package_list
def _transform_result(typ, result): """Convert the result back into the input type. """ if issubclass(typ, bytes): return tostring(result, encoding='utf-8') elif issubclass(typ, unicode): return tostring(result, encoding='unicode') else: return result
def fragments_fromstring(html, no_leading_text=False, base_url=None, parser=None, **kw): """ Parses several HTML elements, returning a list of elements. The first item in the list may be a string (though leading whitespace is removed). If no_leading_text is true, then it will be an error if there is leading text, and it will always be a list of only elements. base_url will set the document's base_url attribute (and the tree's docinfo.URL) """ if parser is None: parser = html_parser # FIXME: check what happens when you give html with a body, head, etc. if isinstance(html, bytes): if not _looks_like_full_html_bytes(html): # can't use %-formatting in early Py3 versions html = ('<html><body>'.encode('ascii') + html + '</body></html>'.encode('ascii')) else: if not _looks_like_full_html_unicode(html): html = '<html><body>%s</body></html>' % html doc = document_fromstring(html, parser=parser, base_url=base_url, **kw) assert _nons(doc.tag) == 'html' bodies = [e for e in doc if _nons(e.tag) == 'body'] assert len(bodies) == 1, ("too many bodies: %r in %r" % (bodies, html)) body = bodies[0] elements = [] if no_leading_text and body.text and body.text.strip(): raise etree.ParserError( "There is leading text: %r" % body.text) if body.text and body.text.strip(): elements.append(body.text) elements.extend(body) # FIXME: removing the reference to the parent artificial document # would be nice return elements
def fragment_fromstring(html, create_parent=False, base_url=None, parser=None, **kw): """ Parses a single HTML element; it is an error if there is more than one element, or if anything but whitespace precedes or follows the element. If ``create_parent`` is true (or is a tag name) then a parent node will be created to encapsulate the HTML in a single element. In this case, leading or trailing text is also allowed, as are multiple elements as result of the parsing. Passing a ``base_url`` will set the document's ``base_url`` attribute (and the tree's docinfo.URL). """ if parser is None: parser = html_parser accept_leading_text = bool(create_parent) elements = fragments_fromstring( html, parser=parser, no_leading_text=not accept_leading_text, base_url=base_url, **kw) if create_parent: if not isinstance(create_parent, basestring): create_parent = 'div' new_root = Element(create_parent) if elements: if isinstance(elements[0], basestring): new_root.text = elements[0] del elements[0] new_root.extend(elements) return new_root if not elements: raise etree.ParserError('No elements found') if len(elements) > 1: raise etree.ParserError( "Multiple elements found (%s)" % ', '.join([_element_name(e) for e in elements])) el = elements[0] if el.tail and el.tail.strip(): raise etree.ParserError( "Element followed by text: %r" % el.tail) el.tail = None return el
def fromstring(html, base_url=None, parser=None, **kw): """ Parse the html, returning a single element/document. This tries to minimally parse the chunk of text, without knowing if it is a fragment or a document. base_url will set the document's base_url attribute (and the tree's docinfo.URL) """ if parser is None: parser = html_parser if isinstance(html, bytes): is_full_html = _looks_like_full_html_bytes(html) else: is_full_html = _looks_like_full_html_unicode(html) doc = document_fromstring(html, parser=parser, base_url=base_url, **kw) if is_full_html: return doc # otherwise, lets parse it out... bodies = doc.findall('body') if not bodies: bodies = doc.findall('{%s}body' % XHTML_NAMESPACE) if bodies: body = bodies[0] if len(bodies) > 1: # Somehow there are multiple bodies, which is bad, but just # smash them into one body for other_body in bodies[1:]: if other_body.text: if len(body): body[-1].tail = (body[-1].tail or '') + other_body.text else: body.text = (body.text or '') + other_body.text body.extend(other_body) # We'll ignore tail # I guess we are ignoring attributes too other_body.drop_tree() else: body = None heads = doc.findall('head') if not heads: heads = doc.findall('{%s}head' % XHTML_NAMESPACE) if heads: # Well, we have some sort of structure, so lets keep it all head = heads[0] if len(heads) > 1: for other_head in heads[1:]: head.extend(other_head) # We don't care about text or tail in a head other_head.drop_tree() return doc if body is None: return doc if (len(body) == 1 and (not body.text or not body.text.strip()) and (not body[-1].tail or not body[-1].tail.strip())): # The body has just one element, so it was probably a single # element passed in return body[0] # Now we have a body which represents a bunch of tags which have the # content that was passed in. We will create a fake container, which # is the body tag, except <body> implies too much structure. if _contains_block_level_tag(body): body.tag = 'div' else: body.tag = 'span' return body
def parse(filename_or_url, parser=None, base_url=None, **kw): """ Parse a filename, URL, or file-like object into an HTML document tree. Note: this returns a tree, not an element. Use ``parse(...).getroot()`` to get the document root. You can override the base URL with the ``base_url`` keyword. This is most useful when parsing from a file-like object. """ if parser is None: parser = html_parser return etree.parse(filename_or_url, parser, base_url=base_url, **kw)
def submit_form(form, extra_values=None, open_http=None): """ Helper function to submit a form. Returns a file-like object, as from ``urllib.urlopen()``. This object also has a ``.geturl()`` function, which shows the URL if there were any redirects. You can use this like:: form = doc.forms[0] form.inputs['foo'].value = 'bar' # etc response = form.submit() doc = parse(response) doc.make_links_absolute(response.geturl()) To change the HTTP requester, pass a function as ``open_http`` keyword argument that opens the URL for you. The function must have the following signature:: open_http(method, URL, values) The action is one of 'GET' or 'POST', the URL is the target URL as a string, and the values are a sequence of ``(name, value)`` tuples with the form data. """ values = form.form_values() if extra_values: if hasattr(extra_values, 'items'): extra_values = extra_values.items() values.extend(extra_values) if open_http is None: open_http = open_http_urllib if form.action: url = form.action else: url = form.base_url return open_http(form.method, url, values)
def html_to_xhtml(html): """Convert all tags in an HTML tree to XHTML by moving them to the XHTML namespace. """ try: html = html.getroot() except AttributeError: pass prefix = "{%s}" % XHTML_NAMESPACE for el in html.iter(etree.Element): tag = el.tag if tag[0] != '{': el.tag = prefix + tag
def xhtml_to_html(xhtml): """Convert all tags in an XHTML tree to HTML by removing their XHTML namespace. """ try: xhtml = xhtml.getroot() except AttributeError: pass prefix = "{%s}" % XHTML_NAMESPACE prefix_len = len(prefix) for el in xhtml.iter(prefix + "*"): el.tag = el.tag[prefix_len:]
def tostring(doc, pretty_print=False, include_meta_content_type=False, encoding=None, method="html", with_tail=True, doctype=None): """Return an HTML string representation of the document. Note: if include_meta_content_type is true this will create a ``<meta http-equiv="Content-Type" ...>`` tag in the head; regardless of the value of include_meta_content_type any existing ``<meta http-equiv="Content-Type" ...>`` tag will be removed The ``encoding`` argument controls the output encoding (defauts to ASCII, with &#...; character references for any characters outside of ASCII). Note that you can pass the name ``'unicode'`` as ``encoding`` argument to serialise to a Unicode string. The ``method`` argument defines the output method. It defaults to 'html', but can also be 'xml' for xhtml output, or 'text' to serialise to plain text without markup. To leave out the tail text of the top-level element that is being serialised, pass ``with_tail=False``. The ``doctype`` option allows passing in a plain string that will be serialised before the XML tree. Note that passing in non well-formed content here will make the XML output non well-formed. Also, an existing doctype in the document tree will not be removed when serialising an ElementTree instance. Example:: >>> from lxml import html >>> root = html.fragment_fromstring('<p>Hello<br>world!</p>') >>> html.tostring(root) b'<p>Hello<br>world!</p>' >>> html.tostring(root, method='html') b'<p>Hello<br>world!</p>' >>> html.tostring(root, method='xml') b'<p>Hello<br/>world!</p>' >>> html.tostring(root, method='text') b'Helloworld!' >>> html.tostring(root, method='text', encoding='unicode') u'Helloworld!' >>> root = html.fragment_fromstring('<div><p>Hello<br>world!</p>TAIL</div>') >>> html.tostring(root[0], method='text', encoding='unicode') u'Helloworld!TAIL' >>> html.tostring(root[0], method='text', encoding='unicode', with_tail=False) u'Helloworld!' >>> doc = html.document_fromstring('<p>Hello<br>world!</p>') >>> html.tostring(doc, method='html', encoding='unicode') u'<html><body><p>Hello<br>world!</p></body></html>' >>> print(html.tostring(doc, method='html', encoding='unicode', ... doctype='<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"' ... ' "http://www.w3.org/TR/html4/strict.dtd">')) <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd"> <html><body><p>Hello<br>world!</p></body></html> """ html = etree.tostring(doc, method=method, pretty_print=pretty_print, encoding=encoding, with_tail=with_tail, doctype=doctype) if method == 'html' and not include_meta_content_type: if isinstance(html, str): html = __str_replace_meta_content_type('', html) else: html = __bytes_replace_meta_content_type(bytes(), html) return html
def open_in_browser(doc, encoding=None): """ Open the HTML document in a web browser, saving it to a temporary file to open it. Note that this does not delete the file after use. This is mainly meant for debugging. """ import os import webbrowser import tempfile if not isinstance(doc, etree._ElementTree): doc = etree.ElementTree(doc) handle, fn = tempfile.mkstemp(suffix='.html') f = os.fdopen(handle, 'wb') try: doc.write(f, method="html", encoding=encoding or doc.docinfo.encoding or "UTF-8") finally: # we leak the file itself here, but we should at least close it f.close() url = 'file://' + fn.replace(os.path.sep, '/') print(url) webbrowser.open(url)
def _label__get(self): """ Get or set any <label> element associated with this element. """ id = self.get('id') if not id: return None result = _label_xpath(self, id=id) if not result: return None else: return result[0]
def drop_tree(self): """ Removes this element from the tree, including its children and text. The tail text is joined to the previous element or parent. """ parent = self.getparent() assert parent is not None if self.tail: previous = self.getprevious() if previous is None: parent.text = (parent.text or '') + self.tail else: previous.tail = (previous.tail or '') + self.tail parent.remove(self)
def drop_tag(self): """ Remove the tag, but not its children or text. The children and text are merged into the parent. Example:: >>> h = fragment_fromstring('<div>Hello <b>World!</b></div>') >>> h.find('.//b').drop_tag() >>> print(tostring(h, encoding='unicode')) <div>Hello World!</div> """ parent = self.getparent() assert parent is not None previous = self.getprevious() if self.text and isinstance(self.tag, basestring): # not a Comment, etc. if previous is None: parent.text = (parent.text or '') + self.text else: previous.tail = (previous.tail or '') + self.text if self.tail: if len(self): last = self[-1] last.tail = (last.tail or '') + self.tail elif previous is None: parent.text = (parent.text or '') + self.tail else: previous.tail = (previous.tail or '') + self.tail index = parent.index(self) parent[index:index+1] = self[:]
def find_rel_links(self, rel): """ Find any links like ``<a rel="{rel}">...</a>``; returns a list of elements. """ rel = rel.lower() return [el for el in _rel_links_xpath(self) if el.get('rel').lower() == rel]
def get_element_by_id(self, id, *default): """ Get the first element in a document with the given id. If none is found, return the default argument if provided or raise KeyError otherwise. Note that there can be more than one element with the same id, and this isn't uncommon in HTML documents found in the wild. Browsers return only the first match, and this function does the same. """ try: # FIXME: should this check for multiple matches? # browsers just return the first one return _id_xpath(self, id=id)[0] except IndexError: if default: return default[0] else: raise KeyError(id)
def cssselect(self, expr, translator='html'): """ Run the CSS expression on this element and its children, returning a list of the results. Equivalent to lxml.cssselect.CSSSelect(expr, translator='html')(self) -- note that pre-compiling the expression can provide a substantial speedup. """ # Do the import here to make the dependency optional. from lxml.cssselect import CSSSelector return CSSSelector(expr, translator=translator)(self)
def make_links_absolute(self, base_url=None, resolve_base_href=True, handle_failures=None): """ Make all links in the document absolute, given the ``base_url`` for the document (the full URL where the document came from), or if no ``base_url`` is given, then the ``.base_url`` of the document. If ``resolve_base_href`` is true, then any ``<base href>`` tags in the document are used *and* removed from the document. If it is false then any such tag is ignored. If ``handle_failures`` is None (default), a failure to process a URL will abort the processing. If set to 'ignore', errors are ignored. If set to 'discard', failing URLs will be removed. """ if base_url is None: base_url = self.base_url if base_url is None: raise TypeError( "No base_url given, and the document has no base_url") if resolve_base_href: self.resolve_base_href() if handle_failures == 'ignore': def link_repl(href): try: return urljoin(base_url, href) except ValueError: return href elif handle_failures == 'discard': def link_repl(href): try: return urljoin(base_url, href) except ValueError: return None elif handle_failures is None: def link_repl(href): return urljoin(base_url, href) else: raise ValueError( "unexpected value for handle_failures: %r" % handle_failures) self.rewrite_links(link_repl)
def resolve_base_href(self, handle_failures=None): """ Find any ``<base href>`` tag in the document, and apply its values to all links found in the document. Also remove the tag once it has been applied. If ``handle_failures`` is None (default), a failure to process a URL will abort the processing. If set to 'ignore', errors are ignored. If set to 'discard', failing URLs will be removed. """ base_href = None basetags = self.xpath('//base[@href]|//x:base[@href]', namespaces={'x': XHTML_NAMESPACE}) for b in basetags: base_href = b.get('href') b.drop_tree() if not base_href: return self.make_links_absolute(base_href, resolve_base_href=False, handle_failures=handle_failures)
def iterlinks(self): """ Yield (element, attribute, link, pos), where attribute may be None (indicating the link is in the text). ``pos`` is the position where the link occurs; often 0, but sometimes something else in the case of links in stylesheets or style tags. Note: <base href> is *not* taken into account in any way. The link you get is exactly the link in the document. Note: multiple links inside of a single text string or attribute value are returned in reversed order. This makes it possible to replace or delete them from the text string value based on their reported text positions. Otherwise, a modification at one text position can change the positions of links reported later on. """ link_attrs = defs.link_attrs for el in self.iter(etree.Element): attribs = el.attrib tag = _nons(el.tag) if tag == 'object': codebase = None ## <object> tags have attributes that are relative to ## codebase if 'codebase' in attribs: codebase = el.get('codebase') yield (el, 'codebase', codebase, 0) for attrib in ('classid', 'data'): if attrib in attribs: value = el.get(attrib) if codebase is not None: value = urljoin(codebase, value) yield (el, attrib, value, 0) if 'archive' in attribs: for match in _archive_re.finditer(el.get('archive')): value = match.group(0) if codebase is not None: value = urljoin(codebase, value) yield (el, 'archive', value, match.start()) else: for attrib in link_attrs: if attrib in attribs: yield (el, attrib, attribs[attrib], 0) if tag == 'meta': http_equiv = attribs.get('http-equiv', '').lower() if http_equiv == 'refresh': content = attribs.get('content', '') match = _parse_meta_refresh_url(content) url = (match.group('url') if match else content).strip() # unexpected content means the redirect won't work, but we might # as well be permissive and return the entire string. if url: url, pos = _unquote_match( url, match.start('url') if match else content.find(url)) yield (el, 'content', url, pos) elif tag == 'param': valuetype = el.get('valuetype') or '' if valuetype.lower() == 'ref': ## FIXME: while it's fine we *find* this link, ## according to the spec we aren't supposed to ## actually change the value, including resolving ## it. It can also still be a link, even if it ## doesn't have a valuetype="ref" (which seems to be the norm) ## http://www.w3.org/TR/html401/struct/objects.html#adef-valuetype yield (el, 'value', el.get('value'), 0) elif tag == 'style' and el.text: urls = [ # (start_pos, url) _unquote_match(match.group(1), match.start(1))[::-1] for match in _iter_css_urls(el.text) ] + [ (match.start(1), match.group(1)) for match in _iter_css_imports(el.text) ] if urls: # sort by start pos to bring both match sets back into order # and reverse the list to report correct positions despite # modifications urls.sort(reverse=True) for start, url in urls: yield (el, None, url, start) if 'style' in attribs: urls = list(_iter_css_urls(attribs['style'])) if urls: # return in reversed order to simplify in-place modifications for match in urls[::-1]: url, start = _unquote_match(match.group(1), match.start(1)) yield (el, 'style', url, start)
def rewrite_links(self, link_repl_func, resolve_base_href=True, base_href=None): """ Rewrite all the links in the document. For each link ``link_repl_func(link)`` will be called, and the return value will replace the old link. Note that links may not be absolute (unless you first called ``make_links_absolute()``), and may be internal (e.g., ``'#anchor'``). They can also be values like ``'mailto:email'`` or ``'javascript:expr'``. If you give ``base_href`` then all links passed to ``link_repl_func()`` will take that into account. If the ``link_repl_func`` returns None, the attribute or tag text will be removed completely. """ if base_href is not None: # FIXME: this can be done in one pass with a wrapper # around link_repl_func self.make_links_absolute( base_href, resolve_base_href=resolve_base_href) elif resolve_base_href: self.resolve_base_href() for el, attrib, link, pos in self.iterlinks(): new_link = link_repl_func(link.strip()) if new_link == link: continue if new_link is None: # Remove the attribute or element content if attrib is None: el.text = '' else: del el.attrib[attrib] continue if attrib is None: new = el.text[:pos] + new_link + el.text[pos+len(link):] el.text = new else: cur = el.get(attrib) if not pos and len(cur) == len(link): new = new_link # most common case else: new = cur[:pos] + new_link + cur[pos+len(link):] el.set(attrib, new)
def form_values(self): """ Return a list of tuples of the field values for the form. This is suitable to be passed to ``urllib.urlencode()``. """ results = [] for el in self.inputs: name = el.name if not name: continue tag = _nons(el.tag) if tag == 'textarea': results.append((name, el.value)) elif tag == 'select': value = el.value if el.multiple: for v in value: results.append((name, v)) elif value is not None: results.append((name, el.value)) else: assert tag == 'input', ( "Unexpected tag: %r" % el) if el.checkable and not el.checked: continue if el.type in ('submit', 'image', 'reset'): continue value = el.value if value is not None: results.append((name, el.value)) return results
def _action__get(self): """ Get/set the form's ``action`` attribute. """ base_url = self.base_url action = self.get('action') if base_url and action is not None: return urljoin(base_url, action) else: return action
def _value__get(self): """ Get/set the value (which is the contents of this element) """ content = self.text or '' if self.tag.startswith("{%s}" % XHTML_NAMESPACE): serialisation_method = 'xml' else: serialisation_method = 'html' for el in self: # it's rare that we actually get here, so let's not use ''.join() content += etree.tostring( el, method=serialisation_method, encoding='unicode') return content
def _value__get(self): """ Get/set the value of this select (the selected option). If this is a multi-select, this is a set-like object that represents all the selected options. """ if self.multiple: return MultipleSelectOptions(self) for el in _options_xpath(self): if el.get('selected') is not None: value = el.get('value') if value is None: value = el.text or '' if value: value = value.strip() return value return None
def value_options(self): """ All the possible values this select can have (the ``value`` attribute of all the ``<option>`` elements. """ options = [] for el in _options_xpath(self): value = el.get('value') if value is None: value = el.text or '' if value: value = value.strip() options.append(value) return options
def _value__get(self): """ Get/set the value of this element, using the ``value`` attribute. Also, if this is a checkbox and it has no value, this defaults to ``'on'``. If it is a checkbox or radio that is not checked, this returns None. """ if self.checkable: if self.checked: return self.get('value') or 'on' else: return None return self.get('value')
def _for_element__get(self): """ Get/set the element this label points to. Return None if it can't be found. """ id = self.get('for') if not id: return None return self.body.get_element_by_id(id)
def classpath(v): """given a class/instance return the full class path (eg, prefix.module.Classname) :param v: class or instance :returns: string, the full classpath of v """ if isinstance(v, type): ret = strclass(v) else: ret = strclass(v.__class__) return ret
def loghandler_members(): """iterate through the attributes of every logger's handler this is used to switch out stderr and stdout in tests when buffer is True :returns: generator of tuples, each tuple has (name, handler, member_name, member_val) """ Members = namedtuple("Members", ["name", "handler", "member_name", "member"]) log_manager = logging.Logger.manager loggers = [] ignore = set([modname()]) if log_manager.root: loggers = list(log_manager.loggerDict.items()) loggers.append(("root", log_manager.root)) for logger_name, logger in loggers: if logger_name in ignore: continue for handler in getattr(logger, "handlers", []): members = inspect.getmembers(handler) for member_name, member in members: yield Members(logger_name, handler, member_name, member)
def get_counts(): """return test counts that are set via pyt environment variables when pyt runs the test :returns: dict, 3 keys (classes, tests, modules) and how many tests of each were found by pyt """ counts = {} ks = [ ('PYT_TEST_CLASS_COUNT', "classes"), ('PYT_TEST_COUNT', "tests"), ('PYT_TEST_MODULE_COUNT', "modules"), ] for ek, cn in ks: counts[cn] = int(os.environ.get(ek, 0)) return counts
def is_single_class(): """Returns True if only a single class is being run or some tests within a single class""" ret = False counts = get_counts() if counts["classes"] < 1 and counts["modules"] < 1: ret = counts["tests"] > 0 else: ret = counts["classes"] <= 1 and counts["modules"] <= 1 return ret
def is_single_module(): """Returns True if only a module is being run""" ret = False counts = get_counts() if counts["modules"] == 1: ret = True elif counts["modules"] < 1: ret = is_single_class() return ret
def validate_params(request): """Validate request params.""" if 'params' in request: correct_params = isinstance(request['params'], (list, dict)) error = 'Incorrect parameter values' assert correct_params, error
def validate_id(request): """Validate request id.""" if 'id' in request: correct_id = isinstance( request['id'], (string_types, int, None), ) error = 'Incorrect identifier' assert correct_id, error