_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q37600
IdentifierSQLiteIndex.list_documents
train
def list_documents(self, limit=None): """ Generates vids of all indexed identifiers. Args: limit (int, optional): If not empty, the maximum number of results to return Generates: str: vid of the document. """ limit_str = '' if limit: try: limit_str = 'LIMIT {}'.format(int(limit)) except (TypeError, ValueError): pass query = ('SELECT identifier FROM identifier_index ' + limit_str) for row in self.backend.library.database.connection.execute(query).fetchall(): yield row['identifier']
python
{ "resource": "" }
q37601
IdentifierSQLiteIndex.reset
train
def reset(self): """ Drops index table. """ query = """ DROP TABLE identifier_index; """ self.backend.library.database.connection.execute(query)
python
{ "resource": "" }
q37602
timezone
train
def timezone(utcoffset): ''' Return a string representing the timezone offset. Remaining seconds are rounded to the nearest minute. >>> timezone(3600) '+01:00' >>> timezone(5400) '+01:30' >>> timezone(-28800) '-08:00' ''' hours, seconds = divmod(abs(utcoffset), 3600) minutes = round(float(seconds) / 60) if utcoffset >= 0: sign = '+' else: sign = '-' return '{0}{1:02d}:{2:02d}'.format(sign, int(hours), int(minutes))
python
{ "resource": "" }
q37603
BuildConfigGroupAccessor.build_duration
train
def build_duration(self): """Return the difference between build and build_done states""" return int(self.state.build_done) - int(self.state.build)
python
{ "resource": "" }
q37604
BuildConfigGroupAccessor.build_duration_pretty
train
def build_duration_pretty(self): """Return the difference between build and build_done states, in a human readable format""" from ambry.util import pretty_time from time import time if not self.state.building: return None built = self.state.built or time() try: return pretty_time(int(built) - int(self.state.building)) except TypeError: # one of the values is None or not a number return None
python
{ "resource": "" }
q37605
BuildConfigGroupAccessor.built_datetime
train
def built_datetime(self): """Return the built time as a datetime object""" from datetime import datetime try: return datetime.fromtimestamp(self.state.build_done) except TypeError: # build_done is null return None
python
{ "resource": "" }
q37606
BuildConfigGroupAccessor.new_datetime
train
def new_datetime(self): """Return the time the bundle was created as a datetime object""" from datetime import datetime try: return datetime.fromtimestamp(self.state.new) except TypeError: return None
python
{ "resource": "" }
q37607
BuildConfigGroupAccessor.last_datetime
train
def last_datetime(self): """Return the time of the last operation on the bundle as a datetime object""" from datetime import datetime try: return datetime.fromtimestamp(self.state.lasttime) except TypeError: return None
python
{ "resource": "" }
q37608
list_product_versions
train
def list_product_versions(page_size=200, page_index=0, sort="", q=""): """ List all ProductVersions """ content = list_product_versions_raw(page_size, page_index, sort, q) if content: return utils.format_json_list(content)
python
{ "resource": "" }
q37609
create_product_version
train
def create_product_version(product_id, version, **kwargs): """ Create a new ProductVersion. Each ProductVersion represents a supported product release stream, which includes milestones and releases typically associated with a single major.minor version of a Product. Follows the Red Hat product support cycle, and typically includes Alpha, Beta, GA, and CP releases with the same major.minor version. Example: ProductVersion 1.0 includes the following releases: 1.0.Beta1, 1.0.GA, 1.0.1, etc. """ data = create_product_version_raw(product_id, version, **kwargs) if data: return utils.format_json(data)
python
{ "resource": "" }
q37610
update_product_version
train
def update_product_version(id, **kwargs): """ Update the ProductVersion with ID id with new values. """ content = update_product_version_raw(id, **kwargs) if content: return utils.format_json(content)
python
{ "resource": "" }
q37611
Widget.prepare_data
train
def prepare_data(self): ''' Method returning data passed to template. Subclasses can override it. ''' value = self.get_raw_value() return dict(widget=self, field=self.field, value=value, readonly=not self.field.writable)
python
{ "resource": "" }
q37612
Widget.render
train
def render(self): ''' Renders widget to template ''' data = self.prepare_data() if self.field.readable: return self.env.template.render(self.template, **data) return ''
python
{ "resource": "" }
q37613
AsyncRequestEngine._request
train
def _request(self, url, *, method='GET', headers=None, data=None, result_callback=None): """Perform asynchronous request. :param str url: request URL. :param str method: request method. :param dict headers: request headers. :param object data: JSON-encodable object. :param object -> object result_callback: result callback. :rtype: dict :raise: APIError """ request = self._prepare_request(url, method, headers, data) retries_left = self._conn_retries while True: try: response = yield self._client.fetch(request) try: if result_callback: return result_callback(response.body) except (ValueError, TypeError) as err: raise MalformedResponse(err) from None return response.body except httpclient.HTTPError as err: resp_body = err.response.body \ if err.response is not None else None if err.code == 599: if self._conn_retries is None or retries_left <= 0: raise CommunicationError(err) from None else: retries_left -= 1 retry_in = (self._conn_retries - retries_left) * 2 self._log.warning('Server communication error: %s. ' 'Retrying in %s seconds.', err, retry_in) yield gen.sleep(retry_in) continue elif 400 <= err.code < 500: raise ClientError(err.code, resp_body) from None raise ServerError(err.code, resp_body) from None
python
{ "resource": "" }
q37614
BlastReadsAlignments.adjustHspsForPlotting
train
def adjustHspsForPlotting(self, titleAlignments): """ Our HSPs are about to be plotted. If we are using e-values, these need to be adjusted. @param titleAlignments: An instance of L{TitleAlignment}. """ # If we're using bit scores, there's nothing to do. if self.scoreClass is HigherIsBetterScore: return # Convert all e-values to high positive values, and keep track of the # maximum converted value. maxConvertedEValue = None zeroHsps = [] # Note: don't call self.hsps() here because that will read them # from disk again, which is not what's wanted. for hsp in titleAlignments.hsps(): if hsp.score.score == 0.0: zeroHsps.append(hsp) else: convertedEValue = -1.0 * log10(hsp.score.score) hsp.score.score = convertedEValue if (maxConvertedEValue is None or convertedEValue > maxConvertedEValue): maxConvertedEValue = convertedEValue if zeroHsps: # Save values so that we can use them in self.adjustPlot self._maxConvertedEValue = maxConvertedEValue self._zeroEValueFound = True # Adjust all zero e-value HSPs to have numerically high values. if self.randomizeZeroEValues: for hsp in zeroHsps: hsp.score.score = (maxConvertedEValue + 2 + uniform( 0, ZERO_EVALUE_UPPER_RANDOM_INCREMENT)) else: for count, hsp in enumerate(zeroHsps, start=1): hsp.score.score = maxConvertedEValue + count else: self._zeroEValueFound = False
python
{ "resource": "" }
q37615
BlastReadsAlignments.adjustPlot
train
def adjustPlot(self, readsAx): """ Add a horizontal line to the plotted reads if we're plotting e-values and a zero e-value was found. @param readsAx: A Matplotlib sub-plot instance, as returned by matplotlib.pyplot.subplot. """ # If we're using bit scores, there's nothing to do. if self.scoreClass is HigherIsBetterScore: return if self._zeroEValueFound: readsAx.axhline(y=self._maxConvertedEValue + 0.5, color='#cccccc', linewidth=0.5)
python
{ "resource": "" }
q37616
ExpMatrix.get_figure
train
def get_figure(self, heatmap_kw=None, **kwargs): """Generate a plotly figure showing the matrix as a heatmap. This is a shortcut for ``ExpMatrix.get_heatmap(...).get_figure(...)``. See :func:`ExpHeatmap.get_figure` for keyword arguments. Parameters ---------- heatmap_kw : dict or None If not None, dictionary containing keyword arguments to be passed to the `ExpHeatmap` constructor. Returns ------- `plotly.graph_objs.Figure` The plotly figure. """ if heatmap_kw is not None: assert isinstance(heatmap_kw, dict) if heatmap_kw is None: heatmap_kw = {} return self.get_heatmap(**heatmap_kw).get_figure(**kwargs)
python
{ "resource": "" }
q37617
ExpMatrix.sort_genes
train
def sort_genes(self, stable=True, inplace=False, ascending=True): """Sort the rows of the matrix alphabetically by gene name. Parameters ---------- stable: bool, optional Whether to use a stable sorting algorithm. [True] inplace: bool, optional Whether to perform the operation in place.[False] ascending: bool, optional Whether to sort in ascending order [True] Returns ------- `ExpMatrix` The sorted matrix. """ kind = 'quicksort' if stable: kind = 'mergesort' return self.sort_index(kind=kind, inplace=inplace, ascending=ascending)
python
{ "resource": "" }
q37618
ExpMatrix.sort_samples
train
def sort_samples(self, stable=True, inplace=False, ascending=True): """Sort the columns of the matrix alphabetically by sample name. Parameters ---------- stable: bool, optional Whether to use a stable sorting algorithm. [True] inplace: bool, optional Whether to perform the operation in place.[False] ascending: bool, optional Whether to sort in ascending order [True] Returns ------- `ExpMatrix` The sorted matrix. """ kind = 'quicksort' if stable: kind = 'mergesort' return self.sort_index(axis=1, kind=kind, inplace=inplace, ascending=ascending)
python
{ "resource": "" }
q37619
ExpMatrix.sample_correlations
train
def sample_correlations(self): """Returns an `ExpMatrix` containing all pairwise sample correlations. Returns ------- `ExpMatrix` The sample correlation matrix. """ C = np.corrcoef(self.X.T) corr_matrix = ExpMatrix(genes=self.samples, samples=self.samples, X=C) return corr_matrix
python
{ "resource": "" }
q37620
ExpMatrix.read_tsv
train
def read_tsv(cls, file_path: str, gene_table: ExpGeneTable = None, encoding: str = 'UTF-8', sep: str = '\t'): """Read expression matrix from a tab-delimited text file. Parameters ---------- file_path: str The path of the text file. gene_table: `ExpGeneTable` object, optional The set of valid genes. If given, the genes in the text file will be filtered against this set of genes. (None) encoding: str, optional The file encoding. ("UTF-8") sep: str, optional The separator. ("\t") Returns ------- `ExpMatrix` The expression matrix. """ # use pd.read_csv to parse the tsv file into a DataFrame matrix = cls(pd.read_csv(file_path, sep=sep, index_col=0, header=0, encoding=encoding)) # parse index column separately # (this seems to be the only way we can prevent pandas from converting # "nan" or "NaN" to floats in the index)['1_cell_306.120', '1_cell_086.024', '1_cell_168.103'] #ind = pd.read_csv(file_path, sep=sep, usecols=[0, ], header=0, # encoding=encoding, na_filter=False) ind = pd.read_csv(file_path, sep=sep, usecols=[0, ], header=None, skiprows=1, encoding=encoding, na_filter=False) matrix.index = ind.iloc[:, 0] matrix.index.name = 'Genes' if gene_table is not None: # filter genes matrix = matrix.filter_genes(gene_table.gene_names) return matrix
python
{ "resource": "" }
q37621
check_chain
train
def check_chain(chain): """Verify a merkle chain to see if the Merkle root can be reproduced. """ link = chain[0][0] for i in range(1, len(chain) - 1): if chain[i][1] == 'R': link = hash_function(link + chain[i][0]).digest() elif chain[i][1] == 'L': link = hash_function(chain[i][0] + link).digest() else: raise MerkleError('Link %s has no side value: %s' % (str(i), str(codecs.encode(chain[i][0], 'hex_codec')))) if link == chain[-1][0]: return link else: raise MerkleError('The Merkle Chain is not valid.')
python
{ "resource": "" }
q37622
check_hex_chain
train
def check_hex_chain(chain): """Verify a merkle chain, with hashes hex encoded, to see if the Merkle root can be reproduced. """ return codecs.encode(check_chain([(codecs.decode(i[0], 'hex_codec'), i[1]) for i in chain]), 'hex_codec')
python
{ "resource": "" }
q37623
MerkleTree.add_hash
train
def add_hash(self, value): """Add a Node based on a precomputed, hex encoded, hash value. """ self.leaves.append(Node(codecs.decode(value, 'hex_codec'), prehashed=True))
python
{ "resource": "" }
q37624
MerkleTree.clear
train
def clear(self): """Clears the Merkle Tree by releasing the Merkle root and each leaf's references, the rest should be garbage collected. This may be useful for situations where you want to take an existing tree, make changes to the leaves, but leave it uncalculated for some time, without node references that are no longer correct still hanging around. Usually it is better just to make a new tree. """ self.root = None for leaf in self.leaves: leaf.p, leaf.sib, leaf.side = (None, ) * 3
python
{ "resource": "" }
q37625
MerkleTree.build_fun
train
def build_fun(self, layer=None): """Calculate the merkle root and make references between nodes in the tree. Written in functional style purely for fun. """ if not layer: if not self.leaves: raise MerkleError('The tree has no leaves and cannot be calculated.') layer = self.leaves[::] layer = self._build(layer) if len(layer) == 1: self.root = layer[0] else: self.build_fun(layer=layer) return self.root.val
python
{ "resource": "" }
q37626
MerkleTree._build
train
def _build(self, leaves): """Private helper function to create the next aggregation level and put all references in place. """ new, odd = [], None # check if even number of leaves, promote odd leaf to next level, if not if len(leaves) % 2 == 1: odd = leaves.pop(-1) for i in range(0, len(leaves), 2): newnode = Node(leaves[i].val + leaves[i + 1].val) newnode.l, newnode.r = leaves[i], leaves[i + 1] leaves[i].side, leaves[i + 1].side, leaves[i].p, leaves[i + 1].p = 'L', 'R', newnode, newnode leaves[i].sib, leaves[i + 1].sib = leaves[i + 1], leaves[i] new.append(newnode) if odd: new.append(odd) return new
python
{ "resource": "" }
q37627
MerkleTree.get_chain
train
def get_chain(self, index): """Assemble and return the chain leading from a given node to the merkle root of this tree. """ chain = [] this = self.leaves[index] chain.append((this.val, 'SELF')) while this.p: chain.append((this.sib.val, this.sib.side)) this = this.p chain.append((this.val, 'ROOT')) return chain
python
{ "resource": "" }
q37628
MerkleTree.get_all_chains
train
def get_all_chains(self): """Assemble and return a list of all chains for all leaf nodes to the merkle root. """ return [self.get_chain(i) for i in range(len(self.leaves))]
python
{ "resource": "" }
q37629
MerkleTree.get_hex_chain
train
def get_hex_chain(self, index): """Assemble and return the chain leading from a given node to the merkle root of this tree with hash values in hex form """ return [(codecs.encode(i[0], 'hex_codec'), i[1]) for i in self.get_chain(index)]
python
{ "resource": "" }
q37630
MerkleTree.get_all_hex_chains
train
def get_all_hex_chains(self): """Assemble and return a list of all chains for all nodes to the merkle root, hex encoded. """ return [[(codecs.encode(i[0], 'hex_codec'), i[1]) for i in j] for j in self.get_all_chains()]
python
{ "resource": "" }
q37631
MerkleTree._get_whole_subtrees
train
def _get_whole_subtrees(self): """Returns an array of nodes in the tree that have balanced subtrees beneath them, moving from left to right. """ subtrees = [] loose_leaves = len(self.leaves) - 2**int(log(len(self.leaves), 2)) the_node = self.root while loose_leaves: subtrees.append(the_node.l) the_node = the_node.r loose_leaves = loose_leaves - 2**int(log(loose_leaves, 2)) subtrees.append(the_node) return subtrees
python
{ "resource": "" }
q37632
MerkleTree.add_adjust
train
def add_adjust(self, data, prehashed=False): """Add a new leaf, and adjust the tree, without rebuilding the whole thing. """ subtrees = self._get_whole_subtrees() new_node = Node(data, prehashed=prehashed) self.leaves.append(new_node) for node in reversed(subtrees): new_parent = Node(node.val + new_node.val) node.p, new_node.p = new_parent, new_parent new_parent.l, new_parent.r = node, new_node node.sib, new_node.sib = new_node, node node.side, new_node.side = 'L', 'R' new_node = new_node.p self.root = new_node
python
{ "resource": "" }
q37633
StaticGSEResult.fold_enrichment
train
def fold_enrichment(self): """Returns the fold enrichment of the gene set. Fold enrichment is defined as ratio between the observed and the expected number of gene set genes present. """ expected = self.K * (self.n/float(self.N)) return self.k / expected
python
{ "resource": "" }
q37634
StaticGSEResult.get_pretty_format
train
def get_pretty_format(self, max_name_length=0): """Returns a nicely formatted string describing the result. Parameters ---------- max_name_length: int [0] The maximum length of the gene set name (in characters). If the gene set name is longer than this number, it will be truncated and "..." will be appended to it, so that the final string exactly meets the length requirement. If 0 (default), no truncation is performed. If not 0, must be at least 3. Returns ------- str The formatted string. Raises ------ ValueError If an invalid length value is specified. """ assert isinstance(max_name_length, (int, np.integer)) if max_name_length < 0 or (1 <= max_name_length <= 2): raise ValueError('max_name_length must be 0 or >= 3.') gs_name = self.gene_set._name if max_name_length > 0 and len(gs_name) > max_name_length: assert max_name_length >= 3 gs_name = gs_name[:(max_name_length - 3)] + '...' param_str = '(%d/%d @ %d/%d, pval=%.1e, fe=%.1fx)' \ % (self.k, self.K, self.n, self.N, self.pval, self.fold_enrichment) return '%s %s' % (gs_name, param_str)
python
{ "resource": "" }
q37635
main
train
def main(recordFilenames, fastaFilename, title, xRange, bitRange): """ Print reads that match in a specified X-axis and bit score range. @param recordFilenames: A C{list} of C{str} file names contain results of a BLAST run, in JSON format. @param fastaFilename: The C{str} name of the FASTA file that was originally BLASTed. @param title: The C{str} title of the subject sequence, as output by BLAST. @param xRange: A (start, end) list of C{int}s, giving an X-axis range or C{None} if the entire X axis range should be printed. @param bitRange: A (start, end) list of C{int}s, giving a bit score range or C{None} if the entire bit score range should be printed. """ reads = FastaReads(fastaFilename) blastReadsAlignments = BlastReadsAlignments(reads, recordFilenames) filtered = blastReadsAlignments.filter(whitelist=set([title]), negativeTitleRegex='.') titlesAlignments = TitlesAlignments(filtered) if title not in titlesAlignments: print('%s: Title %r not found in BLAST output' % (sys.argv[0], title)) sys.exit(3) for titleAlignment in titlesAlignments[title]: for hsp in titleAlignment.hsps: if ((xRange is None or (xRange[0] <= hsp.subjectEnd and xRange[1] >= hsp.subjectStart)) and (bitRange is None or (bitRange[0] <= hsp.score.score <= bitRange[1]))): print(('query: %s, start: %d, end: %d, score: %d' % ( titleAlignment.read.id, hsp.subjectStart, hsp.subjectEnd, hsp.score.score)))
python
{ "resource": "" }
q37636
fetch_seq
train
def fetch_seq(ac, start_i=None, end_i=None): """Fetches sequences and subsequences from NCBI eutils and Ensembl REST interfaces. :param string ac: accession of sequence to fetch :param int start_i: start position of *interbase* interval :param int end_i: end position of *interbase* interval **IMPORTANT** start_i and end_i specify 0-based interbase coordinates, which refer to junctions between nucleotides. This is numerically equivalent to 0-based, right-open nucleotide coordinates. Without an interval, the full sequence is returned:: >> len(fetch_seq('NP_056374.2')) 1596 Therefore, it's preferable to provide the interval rather than using Python slicing sequence on the delivered sequence:: >> fetch_seq('NP_056374.2',0,10) # This! 'MESRETLSSS' >> fetch_seq('NP_056374.2')[0:10] # Not this! 'MESRETLSSS' >> fetch_seq('NP_056374.2',0,10) == fetch_seq('NP_056374.2')[0:10] True Providing intervals is especially important for large sequences:: >> fetch_seq('NC_000001.10',2000000,2000030) 'ATCACACGTGCAGGAACCCTTTTCCAAAGG' This call will pull back 30 bases plus overhead; without the interval, one would receive 250MB of chr1 plus overhead! Essentially any RefSeq, Genbank, BIC, or Ensembl sequence may be fetched: >> [(ac,fetch_seq(ac,0,25)) ... for ac in ['NG_032072.1', 'NW_003571030.1', 'NT_113901.1', ... 'NC_000001.10','NP_056374.2', 'GL000191.1', 'KB663603.1', ... 'ENST00000288602', 'ENSP00000288602']] # doctest: +NORMALIZE_WHITESPACE [('NG_032072.1', 'AAAATTAAATTAAAATAAATAAAAA'), ('NW_003571030.1', 'TTGTGTGTTAGGGTGCTCTAAGCAA'), ('NT_113901.1', 'GAATTCCTCGTTCACACAGTTTCTT'), ('NC_000001.10', 'NNNNNNNNNNNNNNNNNNNNNNNNN'), ('NP_056374.2', 'MESRETLSSSRQRGGESDFLPVSSA'), ('GL000191.1', 'GATCCACCTGCCTCAGCCTCCCAGA'), ('KB663603.1', 'TTTATTTATTTTAGATACTTATCTC'), ('ENST00000288602', u'CGCCTCCCTTCCCCCTCCCCGCCCG'), ('ENSP00000288602', u'MAALSGGGGGGAEPGQALFNGDMEP')] RuntimeError is thrown in the case of errors:: >> fetch_seq('NM_9.9') Traceback (most recent call last): ... RuntimeError: No sequence available for NM_9.9 >> fetch_seq('QQ01234') Traceback (most recent call last): ... RuntimeError: No sequence fetcher for QQ01234 """ ac_dispatch = [ { "re": re.compile(r"^(?:AC|N[CGMPRTW])_|^[A-L]\w\d|^U\d"), "fetcher": _fetch_seq_ncbi }, { "re": re.compile(r"^ENS[TP]\d+"), "fetcher": _fetch_seq_ensembl }, ] eligible_fetchers = [ dr["fetcher"] for dr in ac_dispatch if dr["re"].match(ac) ] if len(eligible_fetchers) == 0: raise RuntimeError("No sequence fetcher for {ac}".format(ac=ac)) if len(eligible_fetchers) >= 1: # pragma: nocover (no way to test) _logger.debug("Multiple sequence fetchers found for " "{ac}; using first".format(ac=ac)) fetcher = eligible_fetchers[0] _logger.debug("fetching {ac} with {f}".format(ac=ac, f=fetcher)) try: return fetcher(ac, start_i, end_i) except requests.RequestException as ex: raise RuntimeError("Failed to fetch {ac} ({ex})".format(ac=ac, ex=ex))
python
{ "resource": "" }
q37637
_fetch_seq_ensembl
train
def _fetch_seq_ensembl(ac, start_i=None, end_i=None): """Fetch the specified sequence slice from Ensembl using the public REST interface. An interbase interval may be optionally provided with start_i and end_i. However, the Ensembl REST interface does not currently accept intervals, so the entire sequence is returned and sliced locally. >> len(_fetch_seq_ensembl('ENSP00000288602')) 766 >> _fetch_seq_ensembl('ENSP00000288602',0,10) u'MAALSGGGGG' >> _fetch_seq_ensembl('ENSP00000288602')[0:10] u'MAALSGGGGG' >> ac = 'ENSP00000288602' >> _fetch_seq_ensembl(ac ,0, 10) == _fetch_seq_ensembl(ac)[0:10] True """ url_fmt = "http://rest.ensembl.org/sequence/id/{ac}" url = url_fmt.format(ac=ac) r = requests.get(url, headers={"Content-Type": "application/json"}) r.raise_for_status() seq = r.json()["seq"] return seq if (start_i is None or end_i is None) else seq[start_i:end_i]
python
{ "resource": "" }
q37638
_fetch_seq_ncbi
train
def _fetch_seq_ncbi(ac, start_i=None, end_i=None): """Fetch sequences from NCBI using the eutils interface. An interbase interval may be optionally provided with start_i and end_i. NCBI eutils will return just the requested subsequence, which might greatly reduce payload sizes (especially with chromosome-scale sequences). The request includes `tool` and `email` arguments to identify the caller as the bioutils package. According to https://www.ncbi.nlm.nih.gov/books/NBK25497/, these values should correspond to the library, not the library client. Using the defaults is recommended. Nonetheless, callers may set `bioutils.seqfetcher.ncbi_tool` and `bioutils.seqfetcher.ncbi_email` to custom values if that is desired. >> len(_fetch_seq_ncbi('NP_056374.2')) 1596 Pass the desired interval rather than using Python's [] slice operator. >> _fetch_seq_ncbi('NP_056374.2',0,10) 'MESRETLSSS' >> _fetch_seq_ncbi('NP_056374.2')[0:10] 'MESRETLSSS' >> _fetch_seq_ncbi('NP_056374.2',0,10) == _fetch_seq_ncbi('NP_056374.2')[0:10] True """ db = "protein" if ac[1] == "P" else "nucleotide" url_fmt = ("https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?" "db={db}&id={ac}&rettype=fasta") if start_i is None or end_i is None: url = url_fmt.format(db=db, ac=ac) else: url_fmt += "&seq_start={start}&seq_stop={stop}" url = url_fmt.format(db=db, ac=ac, start=start_i + 1, stop=end_i) url += "&tool={tool}&email={email}".format(tool=ncbi_tool, email=ncbi_email) url = _add_eutils_api_key(url) n_retries = 0 while True: resp = requests.get(url) if resp.ok: seq = "".join(resp.text.splitlines()[1:]) return seq if n_retries >= retry_limit: break if n_retries == 0: _logger.warning("Failed to fetch {}".format(url)) sleeptime = random.randint(n_retries,3) ** n_retries _logger.warning("Failure {}/{}; retry in {} seconds".format(n_retries, retry_limit, sleeptime)) time.sleep(sleeptime) n_retries += 1 # Falls through only on failure resp.raise_for_status()
python
{ "resource": "" }
q37639
_add_eutils_api_key
train
def _add_eutils_api_key(url): """Adds eutils api key to the query :param url: eutils url with a query string :return: url with api_key parameter set to the value of environment variable 'NCBI_API_KEY' if available """ apikey = os.environ.get("NCBI_API_KEY") if apikey: url += "&api_key={apikey}".format(apikey=apikey) return url
python
{ "resource": "" }
q37640
rom
train
def rom(addr, dout, CONTENT): ''' CONTENT == tuple of non-sparse values ''' @always_comb def read(): dout.next = CONTENT[int(addr)] return read
python
{ "resource": "" }
q37641
Event.post_to_twitter
train
def post_to_twitter(self, message=None): """Update twitter status, i.e., post a tweet""" consumer = oauth2.Consumer(key=conf.TWITTER_CONSUMER_KEY, secret=conf.TWITTER_CONSUMER_SECRET) token = oauth2.Token(key=conf.TWITTER_ACCESS_TOKEN, secret=conf.TWITTER_ACCESS_SECRET) client = SimpleTwitterClient(consumer=consumer, token=token) if not message: message = self.get_message() hash_tag = '#status' if conf.BASE_URL: permalink = urlparse.urljoin(conf.BASE_URL, reverse('overseer:event_short', args=[self.pk])) if len(message) + len(permalink) + len(hash_tag) > 138: message = '%s.. %s %s' % (message[:140-4-len(hash_tag)-len(permalink)], permalink, hash_tag) else: message = '%s %s %s' % (message, permalink, hash_tag) else: if len(message) + len(hash_tag) > 139: message = '%s.. %s' % (message[:140-3-len(hash_tag)], hash_tag) else: message = '%s %s' % (message, hash_tag) return client.update_status(message)
python
{ "resource": "" }
q37642
get_gaf_gene_ontology_file
train
def get_gaf_gene_ontology_file(path): """Extract the gene ontology file associated with a GO annotation file. Parameters ---------- path: str The path name of the GO annotation file. Returns ------- str The URL of the associated gene ontology file. """ assert isinstance(path, str) version = None with misc.smart_open_read(path, encoding='UTF-8', try_gzip=True) as fh: for l in fh: if l[0] != '!': break if l.startswith('!GO-version:'): version = l.split(' ')[1] break return version
python
{ "resource": "" }
q37643
Displacement.apply
train
def apply(self, im): """ Apply an n-dimensional displacement by shifting an image or volume. Parameters ---------- im : ndarray The image or volume to shift """ from scipy.ndimage.interpolation import shift return shift(im, map(lambda x: -x, self.delta), mode='nearest')
python
{ "resource": "" }
q37644
Displacement.compute
train
def compute(a, b): """ Compute an optimal displacement between two ndarrays. Finds the displacement between two ndimensional arrays. Arrays must be of the same size. Algorithm uses a cross correlation, computed efficiently through an n-dimensional fft. Parameters ---------- a : ndarray The first array b : ndarray The second array """ from numpy.fft import rfftn, irfftn from numpy import unravel_index, argmax # compute real-valued cross-correlation in fourier domain s = a.shape f = rfftn(a) f *= rfftn(b).conjugate() c = abs(irfftn(f, s)) # find location of maximum inds = unravel_index(argmax(c), s) # fix displacements that are greater than half the total size pairs = zip(inds, a.shape) # cast to basic python int for serialization adjusted = [int(d - n) if d > n // 2 else int(d) for (d, n) in pairs] return Displacement(adjusted)
python
{ "resource": "" }
q37645
LocalDisplacement.compute
train
def compute(a, b, axis): """ Finds optimal displacements localized along an axis """ delta = [] for aa, bb in zip(rollaxis(a, axis, 0), rollaxis(b, axis, 0)): delta.append(Displacement.compute(aa, bb).delta) return LocalDisplacement(delta, axis=axis)
python
{ "resource": "" }
q37646
LocalDisplacement.apply
train
def apply(self, im): """ Apply axis-localized displacements. Parameters ---------- im : ndarray The image or volume to shift """ from scipy.ndimage.interpolation import shift im = rollaxis(im, self.axis) im.setflags(write=True) for ind in range(0, im.shape[0]): im[ind] = shift(im[ind], map(lambda x: -x, self.delta[ind]), mode='nearest') im = rollaxis(im, 0, self.axis+1) return im
python
{ "resource": "" }
q37647
write_sample_sheet
train
def write_sample_sheet(output_file, accessions, names, celfile_urls, sel=None): """Generate a sample sheet in tab-separated text format. The columns contain the following sample attributes: 1) accession 2) name 3) CEL file name 4) CEL file URL Parameters ---------- output_file: str The path of the output file. accessions: list or tuple of str The sample accessions. names: list or tuple of str The sample names. celfile_urls: list or tuple of str The sample CEL file URLs. sel: Iterable, optional A list of sample indices to include. If None, all samples are included. [None] Returns ------- None """ assert isinstance(output_file, str) assert isinstance(accessions, (list, tuple)) for acc in accessions: assert isinstance(acc, str) assert isinstance(names, (list, tuple)) for n in names: assert isinstance(n, str) assert isinstance(celfile_urls, (list, tuple)) for u in celfile_urls: assert isinstance(u, str) if sel is not None: assert isinstance(sel, Iterable) for i in sel: assert isinstance(i, (int, np.integer)) with open(output_file, 'wb') as ofh: writer = csv.writer(ofh, dialect='excel-tab', lineterminator=os.linesep, quoting=csv.QUOTE_NONE) # write header writer.writerow(['Accession', 'Name', 'CEL file name', 'CEL file URL']) n = len(list(names)) if sel is None: sel = range(n) for i in sel: cf = celfile_urls[i].split('/')[-1] # row = [accessions[i], names[i], cf, celfile_urls[i]] writer.writerow([accessions[i], names[i], cf, celfile_urls[i]])
python
{ "resource": "" }
q37648
NCBISequenceLinkURL
train
def NCBISequenceLinkURL(title, default=None): """ Given a sequence title, like "gi|42768646|gb|AY516849.1| Homo sapiens", return the URL of a link to the info page at NCBI. title: the sequence title to produce a link URL for. default: the value to return if the title cannot be parsed. """ try: ref = title.split('|')[3].split('.')[0] except IndexError: return default else: return 'http://www.ncbi.nlm.nih.gov/nuccore/%s' % (ref,)
python
{ "resource": "" }
q37649
NCBISequenceLink
train
def NCBISequenceLink(title, default=None): """ Given a sequence title, like "gi|42768646|gb|AY516849.1| Homo sapiens", return an HTML A tag dispalying a link to the info page at NCBI. title: the sequence title to produce an HTML link for. default: the value to return if the title cannot be parsed. """ url = NCBISequenceLinkURL(title) if url is None: return default else: return '<a href="%s" target="_blank">%s</a>' % (url, title)
python
{ "resource": "" }
q37650
AlignmentPanelHTMLWriter._writeFASTA
train
def _writeFASTA(self, i, image): """ Write a FASTA file containing the set of reads that hit a sequence. @param i: The number of the image in self._images. @param image: A member of self._images. @return: A C{str}, either 'fasta' or 'fastq' indicating the format of the reads in C{self._titlesAlignments}. """ if isinstance(self._titlesAlignments.readsAlignments.reads, FastqReads): format_ = 'fastq' else: format_ = 'fasta' filename = '%s/%d.%s' % (self._outputDir, i, format_) titleAlignments = self._titlesAlignments[image['title']] with open(filename, 'w') as fp: for titleAlignment in titleAlignments: fp.write(titleAlignment.read.toString(format_)) return format_
python
{ "resource": "" }
q37651
AlignmentPanelHTMLWriter._writeFeatures
train
def _writeFeatures(self, i, image): """ Write a text file containing the features as a table. @param i: The number of the image in self._images. @param image: A member of self._images. @return: The C{str} features file name - just the base name, not including the path to the file. """ basename = 'features-%d.txt' % i filename = '%s/%s' % (self._outputDir, basename) featureList = image['graphInfo']['features'] with open(filename, 'w') as fp: for feature in featureList: fp.write('%s\n\n' % feature.feature) return basename
python
{ "resource": "" }
q37652
DosDateTimeToTimeTuple
train
def DosDateTimeToTimeTuple(dosDateTime): """Convert an MS-DOS format date time to a Python time tuple. """ dos_date = dosDateTime >> 16 dos_time = dosDateTime & 0xffff day = dos_date & 0x1f month = (dos_date >> 5) & 0xf year = 1980 + (dos_date >> 9) second = 2 * (dos_time & 0x1f) minute = (dos_time >> 5) & 0x3f hour = dos_time >> 11 return time.localtime( time.mktime((year, month, day, hour, minute, second, 0, 1, -1)))
python
{ "resource": "" }
q37653
bitScoreToEValue
train
def bitScoreToEValue(bitScore, dbSize, dbSequenceCount, queryLength, lengthAdjustment): """ Convert a bit score to an e-value. @param bitScore: The C{float} bit score to convert. @param dbSize: The C{int} total size of the database (i.e., the sum of the lengths of all sequences in the BLAST database). @param dbSequenceCount: The C{int} number of sequences in the database. @param queryLength: The C{int} length of the query. @param lengthAdjustment: The C{int} length adjustment (BLAST XML output calls this the Statistics_hsp-len). @return: A C{float} e-value. """ effectiveDbSize = ( (dbSize - dbSequenceCount * lengthAdjustment) * (queryLength - lengthAdjustment) ) return effectiveDbSize * (2.0 ** (-1.0 * bitScore))
python
{ "resource": "" }
q37654
eValueToBitScore
train
def eValueToBitScore(eValue, dbSize, dbSequenceCount, queryLength, lengthAdjustment): """ Convert an e-value to a bit score. @param eValue: The C{float} e-value to convert. @param dbSize: The C{int} total size of the database (i.e., the sum of the lengths of all sequences in the BLAST database). @param dbSequenceCount: The C{int} number of sequences in the database. @param queryLength: The C{int} length of the query. @param lengthAdjustment: The C{int} length adjustment (BLAST XML output calls this the Statistics_hsp-len). @return: A C{float} bit score. """ effectiveDbSize = ( (dbSize - dbSequenceCount * lengthAdjustment) * (queryLength - lengthAdjustment) ) return -1.0 * (log(eValue / effectiveDbSize) / _LOG2)
python
{ "resource": "" }
q37655
parseBtop
train
def parseBtop(btopString): """ Parse a BTOP string. The format is described at https://www.ncbi.nlm.nih.gov/books/NBK279682/ @param btopString: A C{str} BTOP sequence. @raise ValueError: If C{btopString} is not valid BTOP. @return: A generator that yields a series of integers and 2-tuples of letters, as found in the BTOP string C{btopString}. """ isdigit = str.isdigit value = None queryLetter = None for offset, char in enumerate(btopString): if isdigit(char): if queryLetter is not None: raise ValueError( 'BTOP string %r has a query letter %r at offset %d with ' 'no corresponding subject letter' % (btopString, queryLetter, offset - 1)) value = int(char) if value is None else value * 10 + int(char) else: if value is not None: yield value value = None queryLetter = char else: if queryLetter is None: queryLetter = char else: if queryLetter == '-' and char == '-': raise ValueError( 'BTOP string %r has two consecutive gaps at ' 'offset %d' % (btopString, offset - 1)) elif queryLetter == char: raise ValueError( 'BTOP string %r has two consecutive identical %r ' 'letters at offset %d' % (btopString, char, offset - 1)) yield (queryLetter, char) queryLetter = None if value is not None: yield value elif queryLetter is not None: raise ValueError( 'BTOP string %r has a trailing query letter %r with ' 'no corresponding subject letter' % (btopString, queryLetter))
python
{ "resource": "" }
q37656
countGaps
train
def countGaps(btopString): """ Count the query and subject gaps in a BTOP string. @param btopString: A C{str} BTOP sequence. @raise ValueError: If L{parseBtop} finds an error in the BTOP string C{btopString}. @return: A 2-tuple of C{int}s, with the (query, subject) gaps counts as found in C{btopString}. """ queryGaps = subjectGaps = 0 for countOrMismatch in parseBtop(btopString): if isinstance(countOrMismatch, tuple): queryChar, subjectChar = countOrMismatch queryGaps += int(queryChar == '-') subjectGaps += int(subjectChar == '-') return (queryGaps, subjectGaps)
python
{ "resource": "" }
q37657
btop2cigar
train
def btop2cigar(btopString, concise=False, aa=False): """ Convert a BTOP string to a CIGAR string. @param btopString: A C{str} BTOP sequence. @param concise: If C{True}, use 'M' for matches and mismatches instead of the more specific 'X' and '='. @param aa: If C{True}, C{btopString} will be interpreted as though it refers to amino acids (as in the BTOP string produced by DIAMOND). In that case, it is not possible to use the 'precise' CIGAR characters because amino acids have multiple codons so we cannot know whether an amino acid match is due to an exact nucleotide matches or not. Also, the numbers in the BTOP string will be multiplied by 3 since they refer to a number of amino acids matching. @raise ValueError: If L{parseBtop} finds an error in C{btopString} or if C{aa} and C{concise} are both C{True}. @return: A C{str} CIGAR string. """ if aa and concise: raise ValueError('aa and concise cannot both be True') result = [] thisLength = thisOperation = currentLength = currentOperation = None for item in parseBtop(btopString): if isinstance(item, int): thisLength = item thisOperation = CEQUAL if concise else CMATCH else: thisLength = 1 query, reference = item if query == '-': # The query has a gap. That means that in matching the # query to the reference a deletion is needed in the # reference. assert reference != '-' thisOperation = CDEL elif reference == '-': # The reference has a gap. That means that in matching the # query to the reference an insertion is needed in the # reference. thisOperation = CINS else: # A substitution was needed. assert query != reference thisOperation = CDIFF if concise else CMATCH if thisOperation == currentOperation: currentLength += thisLength else: if currentOperation: result.append( '%d%s' % ((3 * currentLength) if aa else currentLength, currentOperation)) currentLength, currentOperation = thisLength, thisOperation # We reached the end of the BTOP string. If there was an operation # underway, emit it. The 'if' here should only be needed to catch the # case where btopString was empty. assert currentOperation or btopString == '' if currentOperation: result.append( '%d%s' % ((3 * currentLength) if aa else currentLength, currentOperation)) return ''.join(result)
python
{ "resource": "" }
q37658
Command.progress_callback
train
def progress_callback(self, action, node, elapsed_time=None): """ Callback to report progress :param str action: :param list node: app, module :param int | None elapsed_time: """ if action == 'load_start': self.stdout.write('Loading fixture {}.{}...'.format(*node), ending='') self.stdout.flush() elif action == 'load_success': message = 'SUCCESS' if elapsed_time: message += ' ({:.03} seconds) '.format(elapsed_time) self.stdout.write(message)
python
{ "resource": "" }
q37659
GOTerm.get_pretty_format
train
def get_pretty_format(self, include_id=True, max_name_length=0, abbreviate=True): """Returns a nicely formatted string with the GO term information. Parameters ---------- include_id: bool, optional Include the GO term ID. max_name_length: int, optional Truncate the formatted string so that its total length does not exceed this value. abbreviate: bool, optional Do not use abberviations (see ``_abbrev``) to shorten the GO term name. Returns ------- str The formatted string. """ name = self.name if abbreviate: for abb in self._abbrev: name = re.sub(abb[0], abb[1], name) if 3 <= max_name_length < len(name): name = name[:(max_name_length-3)] + '...' if include_id: return "%s: %s (%s)" % (self.domain_short, name, self.id) else: return "%s: %s" % (self.domain_short, name)
python
{ "resource": "" }
q37660
HSP.toDict
train
def toDict(self): """ Get information about the HSP as a dictionary. @return: A C{dict} representation of the HSP. """ result = _Base.toDict(self) result['score'] = self.score.score return result
python
{ "resource": "" }
q37661
get_argument_parser
train
def get_argument_parser(): """Returns an argument parser object for the script.""" desc = 'Filter FASTA file by chromosome names.' parser = cli.get_argument_parser(desc=desc) parser.add_argument( '-f', '--fasta-file', default='-', type=str, help=textwrap.dedent("""\ Path of the FASTA file. The file may be gzip'ed. If set to ``-``, read from ``stdin``.""")) parser.add_argument( '-s', '--species', type=str, choices=sorted(ensembl.SPECIES_CHROMPAT.keys()), default='human', help=textwrap.dedent("""\ Species for which to extract genes. (This parameter is ignored if ``--chromosome-pattern`` is specified.)""") ) parser.add_argument( '-c', '--chromosome-pattern', type=str, required=False, default=None, help=textwrap.dedent("""\ Regular expression that chromosome names have to match. If not specified, determine pattern based on the setting of ``--species``.""") ) parser.add_argument( '-o', '--output-file', type=str, required=True, help=textwrap.dedent("""\ Path of output file. If set to ``-``, print to ``stdout``, and redirect logging messages to ``stderr``.""")) parser = cli.add_reporting_args(parser) return parser
python
{ "resource": "" }
q37662
main
train
def main(args=None): """Script body.""" if args is None: # parse command-line arguments parser = get_argument_parser() args = parser.parse_args() fasta_file = args.fasta_file species = args.species chrom_pat = args.chromosome_pattern output_file = args.output_file log_file = args.log_file quiet = args.quiet verbose = args.verbose # configure root logger log_stream = sys.stdout if output_file == '-': # if we print output to stdout, redirect log messages to stderr log_stream = sys.stderr logger = misc.get_logger(log_stream=log_stream, log_file=log_file, quiet=quiet, verbose=verbose) # generate regular expression object from the chromosome pattern if chrom_pat is None: chrom_pat = ensembl.SPECIES_CHROMPAT[species] chrom_re = re.compile(chrom_pat) # filter the FASTA file # note: each chromosome sequence is temporarily read into memory, # so this script has a large memory footprint with \ misc.smart_open_read( fasta_file, mode='r', encoding='ascii', try_gzip=True ) as fh, \ misc.smart_open_write( output_file, mode='w', encoding='ascii' ) as ofh: # inside = False reader = FastaReader(fh) for seq in reader: chrom = seq.name.split(' ', 1)[0] if chrom_re.match(chrom) is None: logger.info('Ignoring chromosome "%s"...', chrom) continue seq.name = chrom seq.append_fasta(ofh) return 0
python
{ "resource": "" }
q37663
make_app
train
def make_app(global_conf, **app_conf): """Create a WSGI application and return it ``global_conf`` The inherited configuration for this application. Normally from the [DEFAULT] section of the Paste ini file. ``app_conf`` The application's local configuration. Normally specified in the [app:<name>] section of the Paste ini file (where <name> defaults to main). """ # Configure the environment and fill conf dictionary. environment.load_environment(global_conf, app_conf) # Dispatch request to controllers. app = controllers.make_router() # Init request-dependant environment app = set_application_url(app) # CUSTOM MIDDLEWARE HERE (filtered by error handling middlewares) # Handle Python exceptions if not conf['debug']: def json_error_template(head_html, exception, extra): error_json = { 'code': 500, 'hint': u'See the HTTP server log to see the exception traceback.', 'message': exception, } if head_html: error_json['head_html'] = head_html if extra: error_json['extra'] = extra return json.dumps({'error': error_json}) weberror.errormiddleware.error_template = json_error_template app = weberror.errormiddleware.ErrorMiddleware(app, global_conf, **conf['errorware']) app = ensure_json_content_type(app) app = add_x_api_version_header(app) if conf['debug'] and ipdb is not None: app = launch_debugger_on_exception(app) return app
python
{ "resource": "" }
q37664
dimensionalIterator
train
def dimensionalIterator(dimensions, maxItems=-1): """ Given a list of n positive integers, return a generator that yields n-tuples of coordinates to 'fill' the dimensions. This is like an odometer in a car, but the dimensions do not each have to be 10. For example: dimensionalIterator((2, 3)) will yield in order (0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2). See the tests in test_dimension.py for many more examples. A dimension may also be given as '*', to provide a dimension that is never exhausted. For example, dimensionalIterator(('*', 2)) yields the infinite series (0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1), .... maxItems can be used to limit the number of tuples yielded. """ nDimensions = len(dimensions) if nDimensions == 0 or maxItems == 0: return if any(map(lambda x: x != '*' and x <= 0, dimensions)): raise ValueError odometer = [0, ] * nDimensions while maxItems != 0: yield tuple(odometer) maxItems -= 1 wheel = nDimensions - 1 while (dimensions[wheel] != '*' and odometer[wheel] == dimensions[wheel] - 1 and wheel >= 0): odometer[wheel] = 0 wheel -= 1 if wheel < 0: return odometer[wheel] += 1
python
{ "resource": "" }
q37665
matchToString
train
def matchToString(dnaMatch, read1, read2, matchAmbiguous=True, indent='', offsets=None): """ Format a DNA match as a string. @param dnaMatch: A C{dict} returned by C{compareDNAReads}. @param read1: A C{Read} instance or an instance of one of its subclasses. @param read2: A C{Read} instance or an instance of one of its subclasses. @param matchAmbiguous: If C{True}, ambiguous nucleotides that are possibly correct were counted as actually being correct. Otherwise, the match was done strictly, insisting that only non-ambiguous nucleotides could contribute to the matching nucleotide count. @param indent: A C{str} to indent all returned lines with. @param offsets: If not C{None}, a C{set} of offsets of interest that were only considered when making C{match}. @return: A C{str} describing the match. """ match = dnaMatch['match'] identicalMatchCount = match['identicalMatchCount'] ambiguousMatchCount = match['ambiguousMatchCount'] gapMismatchCount = match['gapMismatchCount'] gapGapMismatchCount = match['gapGapMismatchCount'] nonGapMismatchCount = match['nonGapMismatchCount'] if offsets: len1 = len2 = len(offsets) else: len1, len2 = map(len, (read1, read2)) result = [] append = result.append append(countPrint('%sExact matches' % indent, identicalMatchCount, len1, len2)) append(countPrint('%sAmbiguous matches' % indent, ambiguousMatchCount, len1, len2)) if ambiguousMatchCount and identicalMatchCount: anyMatchCount = identicalMatchCount + ambiguousMatchCount append(countPrint('%sExact or ambiguous matches' % indent, anyMatchCount, len1, len2)) mismatchCount = (gapMismatchCount + gapGapMismatchCount + nonGapMismatchCount) append(countPrint('%sMismatches' % indent, mismatchCount, len1, len2)) conflicts = 'conflicts' if matchAmbiguous else 'conflicts or ambiguities' append(countPrint('%s Not involving gaps (i.e., %s)' % (indent, conflicts), nonGapMismatchCount, len1, len2)) append(countPrint('%s Involving a gap in one sequence' % indent, gapMismatchCount, len1, len2)) append(countPrint('%s Involving a gap in both sequences' % indent, gapGapMismatchCount, len1, len2)) for read, key in zip((read1, read2), ('read1', 'read2')): append('%s Id: %s' % (indent, read.id)) length = len(read) append('%s Length: %d' % (indent, length)) gapCount = len(dnaMatch[key]['gapOffsets']) append(countPrint('%s Gaps' % indent, gapCount, length)) if gapCount: append( '%s Gap locations (1-based): %s' % (indent, ', '.join(map(lambda offset: str(offset + 1), sorted(dnaMatch[key]['gapOffsets']))))) ambiguousCount = len(dnaMatch[key]['ambiguousOffsets']) append(countPrint('%s Ambiguous' % indent, ambiguousCount, length)) extraCount = dnaMatch[key]['extraCount'] if extraCount: append(countPrint('%s Extra nucleotides at end' % indent, extraCount, length)) return '\n'.join(result)
python
{ "resource": "" }
q37666
compareDNAReads
train
def compareDNAReads(read1, read2, matchAmbiguous=True, gapChars='-', offsets=None): """ Compare two DNA sequences. @param read1: A C{Read} instance or an instance of one of its subclasses. @param read2: A C{Read} instance or an instance of one of its subclasses. @param matchAmbiguous: If C{True}, count ambiguous nucleotides that are possibly correct as actually being correct, and score these in the ambiguousMatchCount. Otherwise, we are strict and insist that only non-ambiguous nucleotides can contribute to the matching nucleotide count. @param gapChars: An object supporting __contains__ with characters that should be considered to be gaps. @param offsets: If not C{None}, a C{set} of offsets of interest. Offsets not in the set will not be considered. @return: A C{dict} with information about the match and the individual sequences (see below). """ identicalMatchCount = ambiguousMatchCount = 0 gapMismatchCount = nonGapMismatchCount = gapGapMismatchCount = 0 read1ExtraCount = read2ExtraCount = 0 read1GapOffsets = [] read2GapOffsets = [] read1AmbiguousOffsets = [] read2AmbiguousOffsets = [] empty = set() def _identicalMatch(a, b): return a == b and len(AMBIGUOUS[a]) == 1 def _ambiguousMatch(a, b, matchAmbiguous): """ Checks if two characters match ambiguously if matchAmbiguous is True. A match is an ambiguous match if it is not an identical match, but the sets of ambiguous characters overlap. """ return (matchAmbiguous and not _identicalMatch(a, b) and AMBIGUOUS.get(a, empty) & AMBIGUOUS.get(b, empty)) for offset, (a, b) in enumerate(zip_longest(read1.sequence.upper(), read2.sequence.upper())): # Use 'is not None' in the following to allow an empty offsets set # to be passed. if offsets is not None and offset not in offsets: continue if len(AMBIGUOUS.get(a, '')) > 1: read1AmbiguousOffsets.append(offset) if len(AMBIGUOUS.get(b, '')) > 1: read2AmbiguousOffsets.append(offset) if a is None: # b has an extra character at its end (it cannot be None). assert b is not None read2ExtraCount += 1 if b in gapChars: read2GapOffsets.append(offset) elif b is None: # a has an extra character at its end. read1ExtraCount += 1 if a in gapChars: read1GapOffsets.append(offset) else: # We have a character from both sequences (they could still be # gap characters). if a in gapChars: read1GapOffsets.append(offset) if b in gapChars: # Both are gaps. This can happen (though hopefully not # if the sequences were pairwise aligned). gapGapMismatchCount += 1 read2GapOffsets.append(offset) else: # a is a gap, b is not. gapMismatchCount += 1 else: if b in gapChars: # b is a gap, a is not. gapMismatchCount += 1 read2GapOffsets.append(offset) else: # Neither is a gap character. if _identicalMatch(a, b): identicalMatchCount += 1 elif _ambiguousMatch(a, b, matchAmbiguous): ambiguousMatchCount += 1 else: nonGapMismatchCount += 1 return { 'match': { 'identicalMatchCount': identicalMatchCount, 'ambiguousMatchCount': ambiguousMatchCount, 'gapMismatchCount': gapMismatchCount, 'gapGapMismatchCount': gapGapMismatchCount, 'nonGapMismatchCount': nonGapMismatchCount, }, 'read1': { 'ambiguousOffsets': read1AmbiguousOffsets, 'extraCount': read1ExtraCount, 'gapOffsets': read1GapOffsets, }, 'read2': { 'ambiguousOffsets': read2AmbiguousOffsets, 'extraCount': read2ExtraCount, 'gapOffsets': read2GapOffsets, }, }
python
{ "resource": "" }
q37667
check
train
def check(fastaFile, jsonFiles): """ Check for simple consistency between the FASTA file and the JSON files. Note that some checking is already performed by the BlastReadsAlignments class. That includes checking the number of reads matches the number of BLAST records and that read ids and BLAST record read ids match. @param jsonFiles: A C{list} of names of our BLAST JSON. These may may be compressed (as bz2). @param fastaFile: The C{str} name of a FASTA-containing file. """ reads = FastaReads(fastaFile) readsAlignments = BlastReadsAlignments(reads, jsonFiles) for index, readAlignments in enumerate(readsAlignments): # Check that all the alignments in the BLAST JSON do not have query # sequences or query offsets that are greater than the length of # the sequence given in the FASTA file. fastaLen = len(readAlignments.read) for readAlignment in readAlignments: for hsp in readAlignment.hsps: # The FASTA sequence should be at least as long as the # query in the JSON BLAST record (minus any gaps). assert (fastaLen >= len(hsp.query) - hsp.query.count('-')), ( 'record %d: FASTA len %d < HSP query len %d.\n' 'FASTA: %s\nQuery match: %s' % ( index, fastaLen, len(hsp.query), readAlignments.read.sequence, hsp.query)) # The FASTA sequence length should be larger than either of # the query offsets mentioned in the JSON BLAST # record. That's because readStart and readEnd are offsets # into the read - so they can't be bigger than the read # length. # # TODO: These asserts should be more informative when they # fail. assert fastaLen >= hsp.readEnd >= hsp.readStart, ( 'record %d: FASTA len %d not greater than both read ' 'offsets (%d - %d), or read offsets are non-increasing. ' 'FASTA: %s\nQuery match: %s' % ( index, fastaLen, hsp.readStart, hsp.readEnd, readAlignments.read.sequence, hsp.query))
python
{ "resource": "" }
q37668
thresholdForIdentity
train
def thresholdForIdentity(identity, colors): """ Get the best identity threshold for a specific identity value. @param identity: A C{float} nucleotide identity. @param colors: A C{list} of (threshold, color) tuples, where threshold is a C{float} and color is a C{str} to be used as a cell background. This is as returned by C{parseColors}. @return: The first C{float} threshold that the given identity is at least as big as. """ for threshold, _ in colors: if identity >= threshold: return threshold raise ValueError('This should never happen! Last threshold is not 0.0?')
python
{ "resource": "" }
q37669
parseColors
train
def parseColors(colors, defaultColor): """ Parse command line color information. @param colors: A C{list} of space separated "value color" strings, such as ["0.9 red", "0.75 rgb(23, 190, 207)", "0.1 #CF3CF3"]. @param defaultColor: The C{str} color to use for cells that do not reach the identity fraction threshold of any color in C{colors}. @return: A C{list} of (threshold, color) tuples, where threshold is a C{float} (from C{colors}) and color is a C{str} (from C{colors}). The list will be sorted by decreasing threshold values. """ result = [] if colors: for colorInfo in colors: fields = colorInfo.split(maxsplit=1) if len(fields) == 2: threshold, color = fields try: threshold = float(threshold) except ValueError: print('--color arguments must be given as space-separated ' 'pairs of "value color" where the value is a ' 'numeric identity threshold. Your value %r is not ' 'numeric.' % threshold, file=sys.stderr) sys.exit(1) if 0.0 > threshold > 1.0: print('--color arguments must be given as space-separated ' 'pairs of "value color" where the value is a ' 'numeric identity threshold from 0.0 to 1.0. Your ' 'value %r is not in that range.' % threshold, file=sys.stderr) sys.exit(1) result.append((threshold, color)) else: print('--color arguments must be given as space-separated ' 'pairs of "value color". You have given %r, which does ' 'not contain a space.' % colorInfo, file=sys.stderr) sys.exit(1) result.sort(key=itemgetter(0), reverse=True) if not result or result[-1][0] > 0.0: result.append((0.0, defaultColor)) return result
python
{ "resource": "" }
q37670
getReadLengths
train
def getReadLengths(reads, gapChars): """ Get all read lengths, excluding gap characters. @param reads: A C{Reads} instance. @param gapChars: A C{str} of sequence characters considered to be gaps. @return: A C{dict} keyed by read id, with C{int} length values. """ gapChars = set(gapChars) result = {} for read in reads: result[read.id] = len(read) - sum( character in gapChars for character in read.sequence) return result
python
{ "resource": "" }
q37671
explanation
train
def explanation(matchAmbiguous, concise, showLengths, showGaps, showNs): """ Make an explanation of the output HTML table. @param matchAmbiguous: If C{True}, count ambiguous nucleotides that are possibly correct as actually being correct. Otherwise, we are strict and insist that only non-ambiguous nucleotides can contribute to the matching nucleotide count. @param concise: If C{True}, do not show match detail abbreviations. @param showLengths: If C{True}, include the lengths of sequences. @param showGaps: If C{True}, include the number of gaps in sequences. @param showNs: If C{True}, include the number of N characters in sequences. @return: A C{str} of HTML. """ result = [""" <h1>Sequence versus sequence identity table</h1> <p> The table cells below show the nucleotide identity fraction for the sequences (<span class="best">like this</span> for the best value in each row). The identity fraction numerator is the sum of the number of identical """] if matchAmbiguous: result.append('nucleotides plus the number of ambiguously matching ' 'nucleotides.') else: result.append('nucleotides.') result.append("""The denominator is the length of the sequence <em>for the row</em>. Sequence gaps are not included when calculating their lengths. </p> """) if showLengths or showGaps or showNs or matchAmbiguous or not concise: result.append(""" <p> Key to abbreviations: <ul> """) if showLengths: result.append('<li>L: sequence Length.</li>') if showGaps: result.append('<li>G: number of Gaps in sequence.</li>') if showNs: result.append('<li>N: number of N characters in sequence.</li>') if not concise: result.append('<li>IM: Identical nucleotide Matches.</li>') if matchAmbiguous: result.append('<li>AM: Ambiguous nucleotide Matches.</li>') result.append(""" <li>GG: Gap/Gap matches (both sequences have gaps).</li> <li>G?: Gap/Non-gap mismatches (one sequence has a gap).</li> <li>NE: Non-equal nucleotide mismatches.</li> </ul> </p> """) return '\n'.join(result)
python
{ "resource": "" }
q37672
collectData
train
def collectData(reads1, reads2, square, matchAmbiguous): """ Get pairwise matching statistics for two sets of reads. @param reads1: An C{OrderedDict} of C{str} read ids whose values are C{Read} instances. These will be the rows of the table. @param reads2: An C{OrderedDict} of C{str} read ids whose values are C{Read} instances. These will be the columns of the table. @param square: If C{True} we are making a square table of a set of sequences against themselves (in which case we show nothing on the diagonal). @param matchAmbiguous: If C{True}, count ambiguous nucleotides that are possibly correct as actually being correct. Otherwise, we are strict and insist that only non-ambiguous nucleotides can contribute to the matching nucleotide count. """ result = defaultdict(dict) for id1, read1 in reads1.items(): for id2, read2 in reads2.items(): if id1 != id2 or not square: match = compareDNAReads( read1, read2, matchAmbiguous=matchAmbiguous)['match'] if not matchAmbiguous: assert match['ambiguousMatchCount'] == 0 result[id1][id2] = result[id2][id1] = match return result
python
{ "resource": "" }
q37673
simpleTable
train
def simpleTable(tableData, reads1, reads2, square, matchAmbiguous, gapChars): """ Make a text table showing inter-sequence distances. @param tableData: A C{defaultdict(dict)} keyed by read ids, whose values are the dictionaries returned by compareDNAReads. @param reads1: An C{OrderedDict} of C{str} read ids whose values are C{Read} instances. These will be the rows of the table. @param reads2: An C{OrderedDict} of C{str} read ids whose values are C{Read} instances. These will be the columns of the table. @param square: If C{True} we are making a square table of a set of sequences against themselves (in which case we show nothing on the diagonal). @param matchAmbiguous: If C{True}, count ambiguous nucleotides that are possibly correct as actually being correct. Otherwise, we are strict and insist that only non-ambiguous nucleotides can contribute to the matching nucleotide count. @param gapChars: A C{str} of sequence characters considered to be gaps. """ readLengths1 = getReadLengths(reads1.values(), gapChars) print('ID\t' + '\t'.join(reads2)) for id1, read1 in reads1.items(): read1Len = readLengths1[id1] print(id1, end='') for id2, read2 in reads2.items(): if id1 == id2 and square: print('\t', end='') else: stats = tableData[id1][id2] identity = ( stats['identicalMatchCount'] + (stats['ambiguousMatchCount'] if matchAmbiguous else 0) ) / read1Len print('\t%.4f' % identity, end='') print()
python
{ "resource": "" }
q37674
get_file_md5sum
train
def get_file_md5sum(path): """Calculate the MD5 hash for a file.""" with open(path, 'rb') as fh: h = str(hashlib.md5(fh.read()).hexdigest()) return h
python
{ "resource": "" }
q37675
smart_open_read
train
def smart_open_read(path=None, mode='rb', encoding=None, try_gzip=False): """Open a file for reading or return ``stdin``. Adapted from StackOverflow user "Wolph" (http://stackoverflow.com/a/17603000). """ assert mode in ('r', 'rb') assert path is None or isinstance(path, (str, _oldstr)) assert isinstance(mode, (str, _oldstr)) assert encoding is None or isinstance(encoding, (str, _oldstr)) assert isinstance(try_gzip, bool) fh = None binfh = None gzfh = None if path is None: # open stdin fh = io.open(sys.stdin.fileno(), mode=mode, encoding=encoding) else: # open an actual file if try_gzip: # gzip.open defaults to mode 'rb' gzfh = try_open_gzip(path) if gzfh is not None: logger.debug('Opening gzip''ed file.') # wrap gzip stream binfh = io.BufferedReader(gzfh) if 'b' not in mode: # add a text wrapper on top logger.debug('Adding text wrapper.') fh = io.TextIOWrapper(binfh, encoding=encoding) else: fh = io.open(path, mode=mode, encoding=encoding) yield_fh = fh if fh is None: yield_fh = binfh try: yield yield_fh finally: # close all open files if fh is not None: # make sure we don't close stdin if fh.fileno() != sys.stdin.fileno(): fh.close() if binfh is not None: binfh.close() if gzfh is not None: gzfh.close()
python
{ "resource": "" }
q37676
smart_open_write
train
def smart_open_write(path=None, mode='wb', encoding=None): """Open a file for writing or return ``stdout``. Adapted from StackOverflow user "Wolph" (http://stackoverflow.com/a/17603000). """ if path is not None: # open a file fh = io.open(path, mode=mode, encoding=encoding) else: # open stdout fh = io.open(sys.stdout.fileno(), mode=mode, encoding=encoding) #fh = sys.stdout try: yield fh finally: # make sure we don't close stdout if fh.fileno() != sys.stdout.fileno(): fh.close()
python
{ "resource": "" }
q37677
get_url_size
train
def get_url_size(url): """Get the size of a URL. Note: Uses requests, so it does not work for FTP URLs. Source: StackOverflow user "Burhan Khalid". (http://stackoverflow.com/a/24585314/5651021) Parameters ---------- url : str The URL. Returns ------- int The size of the URL in bytes. """ r = requests.head(url, headers={'Accept-Encoding': 'identity'}) size = int(r.headers['content-length']) return size
python
{ "resource": "" }
q37678
make_sure_dir_exists
train
def make_sure_dir_exists(dir_, create_subfolders=False): """Ensures that a directory exists. Adapted from StackOverflow users "Bengt" and "Heikki Toivonen" (http://stackoverflow.com/a/5032238). Parameters ---------- dir_: str The directory path. create_subfolders: bool, optional Whether to create any inexistent subfolders. [False] Returns ------- None Raises ------ OSError If a file system error occurs. """ assert isinstance(dir_, (str, _oldstr)) assert isinstance(create_subfolders, bool) try: if create_subfolders: os.makedirs(dir_) else: os.mkdir(dir_) except OSError as exception: if exception.errno != errno.EEXIST: raise
python
{ "resource": "" }
q37679
get_file_size
train
def get_file_size(path): """The the size of a file in bytes. Parameters ---------- path: str The path of the file. Returns ------- int The size of the file in bytes. Raises ------ IOError If the file does not exist. OSError If a file system error occurs. """ assert isinstance(path, (str, _oldstr)) if not os.path.isfile(path): raise IOError('File "%s" does not exist.', path) return os.path.getsize(path)
python
{ "resource": "" }
q37680
gzip_open_text
train
def gzip_open_text(path, encoding=None): """Opens a plain-text file that may be gzip'ed. Parameters ---------- path : str The file. encoding : str, optional The encoding to use. Returns ------- file-like A file-like object. Notes ----- Generally, reading gzip'ed files with gzip.open is very slow, and it is preferable to pipe the file into the python script using ``gunzip -c``. The script then reads the file from stdin. """ if encoding is None: encoding = sys.getdefaultencoding() assert os.path.isfile(path) is_compressed = False try: gzip.open(path, mode='rb').read(1) except IOError: pass else: is_compressed = True if is_compressed: if six.PY2: import codecs zf = gzip.open(path, 'rb') reader = codecs.getreader(encoding) fh = reader(zf) else: fh = gzip.open(path, mode='rt', encoding=encoding) else: # the following works in Python 2.7, thanks to future fh = open(path, mode='r', encoding=encoding) return fh
python
{ "resource": "" }
q37681
bisect_index
train
def bisect_index(a, x): """ Find the leftmost index of an element in a list using binary search. Parameters ---------- a: list A sorted list. x: arbitrary The element. Returns ------- int The index. """ i = bisect.bisect_left(a, x) if i != len(a) and a[i] == x: return i raise ValueError
python
{ "resource": "" }
q37682
read_single
train
def read_single(path, encoding = 'UTF-8'): """ Reads the first column of a tab-delimited text file. The file can either be uncompressed or gzip'ed. Parameters ---------- path: str The path of the file. enc: str The file encoding. Returns ------- List of str A list containing the elements in the first column. """ assert isinstance(path, (str, _oldstr)) data = [] with smart_open_read(path, mode='rb', try_gzip=True) as fh: reader = csv.reader(fh, dialect='excel-tab', encoding=encoding) for l in reader: data.append(l[0]) return data
python
{ "resource": "" }
q37683
add_tcp_firewall_rule
train
def add_tcp_firewall_rule(project, access_token, name, tag, port): """Adds a TCP firewall rule. TODO: docstring""" headers = { 'Authorization': 'Bearer %s' % access_token.access_token } payload = { "name": name, "kind": "compute#firewall", "sourceRanges": [ "0.0.0.0/0" ], "targetTags": [ tag ], "allowed": [ { "IPProtocol": "tcp", "ports": [ str(port), ] } ], "network": "projects/%s/global/networks/default" % project } r = requests.post('https://www.googleapis.com/compute/v1/' 'projects/%s/global/firewalls' % project, headers=headers, json=payload) r.raise_for_status() op = r.json()['name'] LOGGER.info('Requested to add firewall rule for tag "%s" ' '(operation "%s")...', tag, op) return op
python
{ "resource": "" }
q37684
update
train
def update(taxids, conn, force_download, silent): """Update local UniProt database""" if not silent: click.secho("WARNING: Update is very time consuming and can take several " "hours depending which organisms you are importing!", fg="yellow") if not taxids: click.echo("Please note that you can restrict import to organisms by " "NCBI taxonomy IDs") click.echo("Example (human, mouse, rat):\n") click.secho("\tpyuniprot update --taxids 9606,10090,10116\n\n", fg="green") if taxids: taxids = [int(taxid.strip()) for taxid in taxids.strip().split(',') if re.search('^ *\d+ *$', taxid)] database.update(taxids=taxids, connection=conn, force_download=force_download, silent=silent)
python
{ "resource": "" }
q37685
web
train
def web(host, port): """Start web application""" from .webserver.web import get_app get_app().run(host=host, port=port)
python
{ "resource": "" }
q37686
checkCompatibleParams
train
def checkCompatibleParams(initialParams, laterParams): """ Check a later set of BLAST parameters against those originally found. @param initialParams: A C{dict} with the originally encountered BLAST parameter settings. @param laterParams: A C{dict} with BLAST parameter settings encountered later. @return: A C{str} summary of the parameter differences if the parameter sets differ, else C{None}. """ # Note that although the params contains a 'date', its value is empty # (as far as I've seen). This could become an issue one day if it # becomes non-empty and differs between JSON files that we cat # together. In that case we may need to be more specific in our params # compatible checking. err = [] for param in initialParams: if param in laterParams: if (param not in VARIABLE_PARAMS and initialParams[param] != laterParams[param]): err.append( '\tParam %r initial value %r differs from ' 'later value %r' % (param, initialParams[param], laterParams[param])) else: err.append('\t%r found in initial parameters, not found ' 'in later parameters' % param) for param in laterParams: if param not in initialParams: err.append('\t%r found in later parameters, not seen in ' 'initial parameters' % param) return 'Summary of differences:\n%s' % '\n'.join(err) if err else None
python
{ "resource": "" }
q37687
GeneSet.to_list
train
def to_list(self): """Converts the GeneSet object to a flat list of strings. Note: see also :meth:`from_list`. Parameters ---------- Returns ------- list of str The data from the GeneSet object as a flat list. """ src = self._source or '' coll = self._collection or '' desc = self._description or '' l = [self._id, src, coll, self._name, ','.join(sorted(self._genes)), desc] return l
python
{ "resource": "" }
q37688
GeneSet.from_list
train
def from_list(cls, l): """Generate an GeneSet object from a list of strings. Note: See also :meth:`to_list`. Parameters ---------- l: list or tuple of str A list of strings representing gene set ID, name, genes, source, collection, and description. The genes must be comma-separated. See also :meth:`to_list`. Returns ------- `genometools.basic.GeneSet` The gene set. """ id_ = l[0] name = l[3] genes = l[4].split(',') src = l[1] or None coll = l[2] or None desc = l[5] or None return cls(id_, name, genes, src, coll, desc)
python
{ "resource": "" }
q37689
findPrimer
train
def findPrimer(primer, seq): """ Look for a primer sequence. @param primer: A C{str} primer sequence. @param seq: A BioPython C{Bio.Seq} sequence. @return: A C{list} of zero-based offsets into the sequence at which the primer can be found. If no instances are found, return an empty C{list}. """ offsets = [] seq = seq.upper() primer = primer.upper() primerLen = len(primer) discarded = 0 offset = seq.find(primer) while offset > -1: offsets.append(discarded + offset) seq = seq[offset + primerLen:] discarded += offset + primerLen offset = seq.find(primer) return offsets
python
{ "resource": "" }
q37690
findPrimerBidi
train
def findPrimerBidi(primer, seq): """ Look for a primer in a sequence and its reverse complement. @param primer: A C{str} primer sequence. @param seq: A BioPython C{Bio.Seq} sequence. @return: A C{tuple} of two lists. The first contains (zero-based) ascending offsets into the sequence at which the primer can be found. The second is a similar list ascending offsets into the original sequence where the primer matches the reverse complemented of the sequence. If no instances are found, the corresponding list in the returned tuple must be empty. """ # Note that we reverse complement the primer to find the reverse # matches. This is much simpler than reverse complementing the sequence # because it allows us to use findPrimer and to deal with overlapping # matches correctly. forward = findPrimer(primer, seq) reverse = findPrimer(reverse_complement(primer), seq) return forward, reverse
python
{ "resource": "" }
q37691
parseRangeString
train
def parseRangeString(s, convertToZeroBased=False): """ Parse a range string of the form 1-5,12,100-200. @param s: A C{str} specifiying a set of numbers, given in the form of comma separated numeric ranges or individual indices. @param convertToZeroBased: If C{True} all indices will have one subtracted from them. @return: A C{set} of all C{int}s in the specified set. """ result = set() for _range in s.split(','): match = _rangeRegex.match(_range) if match: start, end = match.groups() start = int(start) if end is None: end = start else: end = int(end) if start > end: start, end = end, start if convertToZeroBased: result.update(range(start - 1, end)) else: result.update(range(start, end + 1)) else: raise ValueError( 'Illegal range %r. Ranges must single numbers or ' 'number-number.' % _range) return result
python
{ "resource": "" }
q37692
nucleotidesToStr
train
def nucleotidesToStr(nucleotides, prefix=''): """ Convert offsets and base counts to a string. @param nucleotides: A C{defaultdict(Counter)} instance, keyed by C{int} offset, with nucleotides keying the Counters. @param prefix: A C{str} to put at the start of each line. @return: A C{str} representation of the offsets and nucleotide counts for each. """ result = [] for offset in sorted(nucleotides): result.append( '%s%d: %s' % (prefix, offset, baseCountsToStr(nucleotides[offset]))) return '\n'.join(result)
python
{ "resource": "" }
q37693
DiamondReadsAlignments._getReader
train
def _getReader(self, filename, scoreClass): """ Obtain a JSON record reader for DIAMOND records. @param filename: The C{str} file name holding the JSON. @param scoreClass: A class to hold and compare scores (see scores.py). """ if filename.endswith('.json') or filename.endswith('.json.bz2'): return JSONRecordsReader(filename, scoreClass) else: raise ValueError( 'Unknown DIAMOND record file suffix for file %r.' % filename)
python
{ "resource": "" }
q37694
get_argument_parser
train
def get_argument_parser(): """Creates the argument parser for the extract_entrez2gene.py script. Returns ------- A fully configured `argparse.ArgumentParser` object. Notes ----- This function is used by the `sphinx-argparse` extension for sphinx. """ desc = 'Generate a mapping of Entrez IDs to gene symbols.' parser = cli.get_argument_parser(desc=desc) parser.add_argument( '-f', '--gene2acc-file', type=str, required=True, help=textwrap.dedent("""\ Path of gene2accession.gz file (from ftp://ftp.ncbi.nlm.nih.gov/gene/DATA), or a filtered version thereof.""") ) parser.add_argument( '-o', '--output-file', type=str, required=True, help=textwrap.dedent("""\ Path of output file. If set to ``-``, print to ``stdout``, and redirect logging messages to ``stderr``.""") ) parser.add_argument( '-l', '--log-file', type=str, default=None, help='Path of log file. If not specified, print to stdout.' ) parser.add_argument( '-q', '--quiet', action='store_true', help='Suppress all output except warnings and errors.' ) parser.add_argument( '-v', '--verbose', action='store_true', help='Enable verbose output. Ignored if ``--quiet`` is specified.' ) return parser
python
{ "resource": "" }
q37695
read_gene2acc
train
def read_gene2acc(file_path, logger): """Extracts Entrez ID -> gene symbol mapping from gene2accession.gz file. Parameters ---------- file_path: str The path of the gene2accession.gz file (or a filtered version thereof). The file may be gzip'ed. Returns ------- dict A mapping of Entrez IDs to gene symbols. """ gene2acc = {} with misc.smart_open_read(file_path, mode='rb', try_gzip=True) as fh: reader = csv.reader(fh, dialect='excel-tab') next(reader) # skip header for i, l in enumerate(reader): id_ = int(l[1]) symbol = l[15] try: gene2acc[id_].append(symbol) except KeyError: gene2acc[id_] = [symbol] # print (l[0],l[15]) # make sure all EntrezIDs map to a unique gene symbol n = len(gene2acc) for k, v in gene2acc.items(): symbols = sorted(set(v)) assert len(symbols) == 1 gene2acc[k] = symbols[0] all_symbols = sorted(set(gene2acc.values())) m = len(all_symbols) logger.info('Found %d Entrez Gene IDs associated with %d gene symbols.', n, m) return gene2acc
python
{ "resource": "" }
q37696
write_entrez2gene
train
def write_entrez2gene(file_path, entrez2gene, logger): """Writes Entrez ID -> gene symbol mapping to a tab-delimited text file. Parameters ---------- file_path: str The path of the output file. entrez2gene: dict The mapping of Entrez IDs to gene symbols. Returns ------- None """ with misc.smart_open_write(file_path, mode='wb') as ofh: writer = csv.writer(ofh, dialect='excel-tab', lineterminator=os.linesep) for k in sorted(entrez2gene.keys(), key=lambda x: int(x)): writer.writerow([k, entrez2gene[k]]) logger.info('Output written to file "%s".', file_path)
python
{ "resource": "" }
q37697
main
train
def main(args=None): """Extracts Entrez ID -> gene symbol mapping and writes it to a text file. Parameters ---------- args: argparse.Namespace object, optional The argument values. If not specified, the values will be obtained by parsing the command line arguments using the `argparse` module. Returns ------- int Exit code (0 if no error occurred). Raises ------ SystemError If the version of the Python interpreter is not >= 2.7. """ vinfo = sys.version_info if not (vinfo >= (2, 7)): raise SystemError('Python interpreter version >= 2.7 required, ' 'found %d.%d instead.' % (vinfo.major, vinfo.minor)) if args is None: parser = get_argument_parser() args = parser.parse_args() gene2acc_file = args.gene2acc_file output_file = args.output_file log_file = args.log_file quiet = args.quiet verbose = args.verbose # configure logger log_stream = sys.stdout if output_file == '-': log_stream = sys.stderr logger = misc.get_logger(log_stream=log_stream, log_file=log_file, quiet=quiet, verbose=verbose) entrez2gene = read_gene2acc(gene2acc_file, logger) write_entrez2gene(output_file, entrez2gene, logger) return 0
python
{ "resource": "" }
q37698
find
train
def find(s): """ Find an amino acid whose name or abbreviation is s. @param s: A C{str} amino acid specifier. This may be a full name, a 3-letter abbreviation or a 1-letter abbreviation. Case is ignored. return: An L{AminoAcid} instance or C{None} if no matching amino acid can be located. """ abbrev1 = None origS = s if ' ' in s: # Convert first word to title case, others to lower. first, rest = s.split(' ', 1) s = first.title() + ' ' + rest.lower() else: s = s.title() if s in NAMES: abbrev1 = s elif s in ABBREV3_TO_ABBREV1: abbrev1 = ABBREV3_TO_ABBREV1[s] elif s in NAMES_TO_ABBREV1: abbrev1 = NAMES_TO_ABBREV1[s] else: # Look for a 3-letter codon. def findCodon(target): for abbrev1, codons in CODONS.items(): for codon in codons: if codon == target: return abbrev1 abbrev1 = findCodon(origS.upper()) if abbrev1: return AminoAcid( NAMES[abbrev1], ABBREV3[abbrev1], abbrev1, CODONS[abbrev1], PROPERTIES[abbrev1], PROPERTY_DETAILS[abbrev1], PROPERTY_CLUSTERS[abbrev1])
python
{ "resource": "" }
q37699
_propertiesOrClustersForSequence
train
def _propertiesOrClustersForSequence(sequence, propertyNames, propertyValues, missingAAValue): """ Extract amino acid property values or cluster numbers for a sequence. @param sequence: An C{AARead} (or a subclass) instance. @param propertyNames: An iterable of C{str} property names (each of which must be a key of a key in the C{propertyValues} C{dict}). @param propertyValues: A C{dict} in the form of C{PROPERTY_DETAILS} or C{PROPERTY_CLUSTERS} (see above). @param missingAAValue: A C{float} value to use for properties when an AA (e.g., 'X') is not known. @raise ValueError: If an unknown property is given in C{propertyNames}. @return: A C{dict} keyed by (lowercase) property name, with values that are C{list}s of the corresponding property value in C{propertyValues} in order of sequence position. """ propertyNames = sorted(map(str.lower, set(propertyNames))) # Make sure all mentioned property names exist for at least one AA. knownProperties = set() for names in propertyValues.values(): knownProperties.update(names) unknown = set(propertyNames) - knownProperties if unknown: raise ValueError( 'Unknown propert%s: %s.' % ('y' if len(unknown) == 1 else 'ies', ', '.join(unknown))) aas = sequence.sequence.upper() result = {} for propertyName in propertyNames: result[propertyName] = values = [] append = values.append for aa in aas: try: properties = propertyValues[aa] except KeyError: # No such AA. append(missingAAValue) else: append(properties[propertyName]) return result
python
{ "resource": "" }