_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q37900
_stage_ctrl
train
def _stage_ctrl(rst, clk, rx_rdy, rx_vld, tx_rdy, tx_vld, stage_en, stop_rx=None, stop_tx=None, BC=False): ''' Single stage control BC - enable bubble compression ''' if stop_rx==None: stop_rx = False if stop_tx==None: stop_tx = False state = Signal(bool(0)) a = Signal(bool(0)) b = Signal(bool(0)) bc_link = state if BC else True @always_comb def _comb1(): a.next = tx_rdy or stop_tx or not bc_link b.next = rx_vld or stop_rx @always_comb def _comb2(): rx_rdy.next = a and not stop_rx tx_vld.next = state and not stop_tx stage_en.next = a and b @always_seq(clk.posedge, reset=rst) def _state(): if a: state.next = b return _comb1, _comb2, _state
python
{ "resource": "" }
q37901
_sanityCheck
train
def _sanityCheck(subjectStart, subjectEnd, queryStart, queryEnd, queryStartInSubject, queryEndInSubject, hsp, queryLen, subjectGaps, queryGaps, localDict): """ Perform some sanity checks on an HSP. Call _debugPrint on any error. @param subjectStart: The 0-based C{int} start offset of the match in the subject. @param subjectEnd: The 0-based C{int} end offset of the match in the subject. @param queryStart: The 0-based C{int} start offset of the match in the query. @param queryEnd: The 0-based C{int} end offset of the match in the query. @param queryStartInSubject: The 0-based C{int} offset of where the query starts in the subject. @param queryEndInSubject: The 0-based C{int} offset of where the query ends in the subject. @param hsp: The HSP C{dict} passed to normalizeHSP. @param queryLen: the C{int} length of the query sequence. @param subjectGaps: the C{int} number of gaps in the subject. @param queryGaps: the C{int} number of gaps in the query. @param localDict: A C{dict} of local variables from our caller (as produced by locals()). """ # Subject indices must always be ascending. if subjectStart >= subjectEnd: _debugPrint(hsp, queryLen, localDict, 'subjectStart >= subjectEnd') subjectMatchLength = subjectEnd - subjectStart queryMatchLength = queryEnd - queryStart # Sanity check that the length of the matches in the subject and query # are identical, taking into account gaps in both. subjectMatchLengthWithGaps = subjectMatchLength + subjectGaps queryMatchLengthWithGaps = queryMatchLength + queryGaps if subjectMatchLengthWithGaps != queryMatchLengthWithGaps: _debugPrint(hsp, queryLen, localDict, 'Including gaps, subject match length (%d) != Query match ' 'length (%d)' % (subjectMatchLengthWithGaps, queryMatchLengthWithGaps)) if queryStartInSubject > subjectStart: _debugPrint(hsp, queryLen, localDict, 'queryStartInSubject (%d) > subjectStart (%d)' % (queryStartInSubject, subjectStart)) if queryEndInSubject < subjectEnd: _debugPrint(hsp, queryLen, localDict, 'queryEndInSubject (%d) < subjectEnd (%d)' % (queryEndInSubject, subjectEnd))
python
{ "resource": "" }
q37902
get_compute_credentials
train
def get_compute_credentials(key): """Authenticates a service account for the compute engine. This uses the `oauth2client.service_account` module. Since the `google` Python package does not support the compute engine (yet?), we need to make direct HTTP requests. For that we need authentication tokens. Obtaining these based on the credentials provided by the `google.auth2` module is much more cumbersome than using the `oauth2client` module. See: - https://cloud.google.com/iap/docs/authentication-howto - https://developers.google.com/identity/protocols/OAuth2ServiceAccount TODO: docstring""" scopes = ['https://www.googleapis.com/auth/compute'] credentials = ServiceAccountCredentials.from_json_keyfile_dict( key, scopes=scopes) return credentials
python
{ "resource": "" }
q37903
ExpGeneTable.hash
train
def hash(self): """Generate a hash value.""" h = hash_pandas_object(self, index=True) return hashlib.md5(h.values.tobytes()).hexdigest()
python
{ "resource": "" }
q37904
ExpGeneTable.genes
train
def genes(self): """Return a list of all genes.""" return [ExpGene.from_series(g) for i, g in self.reset_index().iterrows()]
python
{ "resource": "" }
q37905
ExpGeneTable.read_tsv
train
def read_tsv(cls, file_or_buffer: str): """Read genes from tab-delimited text file.""" df = pd.read_csv(file_or_buffer, sep='\t', index_col=0) df = df.where(pd.notnull(df), None) # Note: df.where(..., None) changes all column types to `object`. return cls(df)
python
{ "resource": "" }
q37906
ExpGeneTable.from_genes
train
def from_genes(cls, genes: List[ExpGene]): """Initialize instance using a list of `ExpGene` objects.""" data = [g.to_dict() for g in genes] index = [d.pop('ensembl_id') for d in data] table = cls(data, index=index) return table
python
{ "resource": "" }
q37907
ExpGeneTable.from_gene_ids
train
def from_gene_ids(cls, gene_ids: List[str]): """Initialize instance from gene IDs.""" genes = [ExpGene(id_) for id_ in gene_ids] return cls.from_genes(genes)
python
{ "resource": "" }
q37908
ExpGeneTable.from_gene_ids_and_names
train
def from_gene_ids_and_names(cls, gene_names: Dict[str, str]): """Initialize instance from gene IDs and names.""" genes = [ExpGene(id_, name=name) for id_, name in gene_names.items()] return cls.from_genes(genes)
python
{ "resource": "" }
q37909
copy
train
def copy(src, trg, transform=None): ''' copy items with optional fields transformation ''' source = open(src[0], src[1]) target = open(trg[0], trg[1], autocommit=1000) for item in source.get(): item = dict(item) if '_id' in item: del item['_id'] if transform: item = transform(item) target.put(trg[0](item)) source.close() target.commit() target.close()
python
{ "resource": "" }
q37910
_regexp
train
def _regexp(expr, item): ''' REGEXP function for Sqlite ''' reg = re.compile(expr) return reg.search(item) is not None
python
{ "resource": "" }
q37911
Storage._dict_factory
train
def _dict_factory(cursor, row): ''' factory for sqlite3 to return results as dict ''' d = {} for idx, col in enumerate(cursor.description): if col[0] == 'rowid': d['_id'] = row[idx] else: d[col[0]] = row[idx] return d
python
{ "resource": "" }
q37912
Storage._create_table
train
def _create_table(self, table_name): ''' create sqlite's table for storing simple dictionaries ''' if self.fieldnames: sql_fields = [] for field in self._fields: if field != '_id': if 'dblite' in self._fields[field]: sql_fields.append(' '.join([field, self._fields[field]['dblite']])) else: sql_fields.append(field) sql_fields = ','.join(sql_fields) SQL = 'CREATE TABLE IF NOT EXISTS %s (%s);' % (table_name, sql_fields) try: self._cursor.execute(SQL) except sqlite3.OperationalError, err: raise RuntimeError('Create table error, %s, SQL: %s' % (err, SQL))
python
{ "resource": "" }
q37913
Storage._make_item
train
def _make_item(self, item): ''' make Item class ''' for field in self._item_class.fields: if (field in item) and ('dblite_serializer' in self._item_class.fields[field]): serializer = self._item_class.fields[field]['dblite_serializer'] item[field] = serializer.loads(item[field]) return self._item_class(item)
python
{ "resource": "" }
q37914
Storage._get_all
train
def _get_all(self): ''' return all items ''' rowid = 0 while True: SQL_SELECT_MANY = 'SELECT rowid, * FROM %s WHERE rowid > ? LIMIT ?;' % self._table self._cursor.execute(SQL_SELECT_MANY, (rowid, ITEMS_PER_REQUEST)) items = self._cursor.fetchall() if len(items) == 0: break for item in items: rowid = item['_id'] yield self._make_item(item)
python
{ "resource": "" }
q37915
Storage.get_one
train
def get_one(self, criteria): ''' return one item ''' try: items = [item for item in self._get_with_criteria(criteria, limit=1)] return items[0] except: return None
python
{ "resource": "" }
q37916
Storage.put
train
def put(self, item): ''' store item in sqlite database ''' if isinstance(item, self._item_class): self._put_one(item) elif isinstance(item, (list, tuple)): self._put_many(item) else: raise RuntimeError('Unknown item(s) type, %s' % type(item))
python
{ "resource": "" }
q37917
Storage._put_one
train
def _put_one(self, item): ''' store one item in database ''' # prepare values values = [] for k, v in item.items(): if k == '_id': continue if 'dblite_serializer' in item.fields[k]: serializer = item.fields[k]['dblite_serializer'] v = serializer.dumps(v) if v is not None: v = sqlite3.Binary(buffer(v)) values.append(v) # check if Item is new => update it if '_id' in item: fieldnames = ','.join(['%s=?' % f for f in item if f != '_id']) values.append(item['_id']) SQL = 'UPDATE %s SET %s WHERE rowid=?;' % (self._table, fieldnames) # new Item else: fieldnames = ','.join([f for f in item if f != '_id']) fieldnames_template = ','.join(['?' for f in item if f != '_id']) SQL = 'INSERT INTO %s (%s) VALUES (%s);' % (self._table, fieldnames, fieldnames_template) try: self._cursor.execute(SQL, values) except sqlite3.OperationalError, err: raise RuntimeError('Item put() error, %s, SQL: %s, values: %s' % (err, SQL, values) ) except sqlite3.IntegrityError: raise DuplicateItem('Duplicate item, %s' % item) self._do_autocommit()
python
{ "resource": "" }
q37918
Storage._put_many
train
def _put_many(self, items): ''' store items in sqlite database ''' for item in items: if not isinstance(item, self._item_class): raise RuntimeError('Items mismatch for %s and %s' % (self._item_class, type(item))) self._put_one(item)
python
{ "resource": "" }
q37919
Storage.sql
train
def sql(self, sql, params=()): ''' execute sql request and return items ''' def _items(items): for item in items: yield self._item_class(item) sql = sql.strip() try: self._cursor.execute(sql, params) except sqlite3.OperationalError, err: raise SQLError('%s, SQL: %s, params: %s' % (err, sql, params) ) except sqlite3.IntegrityError: raise DuplicateItem('Duplicate item, %s' % item) if sql.upper().startswith('SELECT'): return _items(self._cursor.fetchall()) else: return None
python
{ "resource": "" }
q37920
run_tasks
train
def run_tasks(tasks, max_workers=None, use_processes=False): """ Run an iterable of tasks. tasks: The iterable of tasks max_workers: (optional, None) The maximum number of workers to use. As of Python 3.5, if None is passed to the thread executor will default to 5 * the number of processors and the process executor will default to the number of processors. If you are running an older version, consider this a non-optional value. use_processes: (optional, False) use a process pool instead of a thread pool. """ futures = set() if use_processes: get_executor = concurrent.futures.ProcessPoolExecutor else: get_executor = concurrent.futures.ThreadPoolExecutor with get_executor(max_workers) as executor: def execute(function, name): """ Submit a task to the pool """ future = executor.submit(function) future.name = name futures.add(future) def wait(): """ Wait for at least one task to complete """ results = [] waited = concurrent.futures.wait( futures, return_when=concurrent.futures.FIRST_COMPLETED ) for future in waited.done: exc = future.exception() if exc is None: results.append( TaskResult( future.name, True, None, future.result() ) ) else: results.append(TaskResult(future.name, False, exc, None)) futures.remove(future) return results return task_loop(tasks, execute, wait)
python
{ "resource": "" }
q37921
wait_for_zone_op
train
def wait_for_zone_op(access_token, project, zone, name, interval=1.0): """Wait until a zone operation is finished. TODO: docstring""" assert isinstance(interval, (int, float)) assert interval >= 0.1 status = 'RUNNING' progress = 0 LOGGER.info('Waiting for zone operation "%s" to finish...', name) while status != 'DONE': # print('\rRunning! Progress: %d %%' % progress, end='') r = requests.get('https://www.googleapis.com/compute/v1/' 'projects/%s/zones/%s/operations/%s?access_token=%s' % (project, zone, name, access_token.access_token)) r.raise_for_status() # print(r.status_code) #result = json.loads(r.content.decode('utf-8')) result = r.json() status = result['status'] progress = result['progress'] time.sleep(interval) LOGGER.info('Zone operation "%s" done!', name)
python
{ "resource": "" }
q37922
LocalAlignment._initialise
train
def _initialise(self): """ Initialises table with dictionary. """ d = {'score': 0, 'pointer': None, 'ins': 0, 'del': 0} cols = len(self.seq1Seq) + 1 rows = len(self.seq2Seq) + 1 # Note that this puts a ref to the same dict (d) into each cell of # the table. Hopefully that is what was intended. Eyeballing the # code below that uses the table it looks like table entries are # entirely replaced, so this seems ok. Terry. table = [[d for _ in range(cols)] for _ in range(rows)] return table
python
{ "resource": "" }
q37923
LocalAlignment._cigarString
train
def _cigarString(self, output): """ Return a cigar string of aligned sequences. @param output: a C{tup} of strings (align1, align, align2) @return: a C{str} containing the cigar string. Eg with input: 'GGCCCGCA' and 'GG-CTGCA', return 2=1D1=1X3= """ cigar = [] count = 0 align1 = output[0] align2 = output[2] for nt1, nt2 in zip(align1, align2): if nt1 == nt2: cigar.append('=') elif nt1 == '-': cigar.append('I') elif nt2 == '-': cigar.append('D') else: cigar.append('X') # Initially create a list of characters, # eg ['=', '=', 'D', '=', 'X', '=', '=', '='] cigar.append('*') # Append an arbitrary character to ensure parsing below functions cigarString = '' previousCharacter = '' count = 0 first = True for character in cigar: if first: previousCharacter = character count += 1 first = False else: if character == previousCharacter: count += 1 else: cigarString += (str(count) + str(previousCharacter)) count = 1 previousCharacter = character return cigarString
python
{ "resource": "" }
q37924
LocalAlignment._alignmentToStr
train
def _alignmentToStr(self, result): """ Make a textual representation of an alignment result. @param result: A C{dict}, as returned by C{self.createAlignment}. @return: A C{str} desription of a result. For every three lines the first and third contain the input sequences, possibly padded with '-'. The second contains '|' where the two sequences match, and ' ' where not. Format of the output is as follows: Cigar: (Cigar string) Evalue: Bitscore: Id1 Match start: (int) Match end: (int) Id2 Match start: (int) Match end: (int) Id1: 1 (seq) 50 [lines to show matches] Id2: 1 (seq) 50 """ if result is None: return ('\nNo alignment between %s and %s\n' % ( self.seq1ID, self.seq2ID)) else: header = ( '\nCigar string of aligned region: %s\n' '%s Match start: %d Match end: %d\n' '%s Match start: %d Match end: %d\n' % (result['cigar'], self.seq1ID, result['sequence1Start'], result['sequence1End'], self.seq2ID, result['sequence2Start'], result['sequence2End']) ) text = '\n'.join(result['text']) return header + text
python
{ "resource": "" }
q37925
LocalAlignment.createAlignment
train
def createAlignment(self, resultFormat=dict): """ Run the alignment algorithm. @param resultFormat: Either C{dict} or C{str}, giving the desired result format. @return: If C{resultFormat} is C{dict}, a C{dict} containing information about the match (or C{None}) if there is no match. If C{resultFormat} is C{str}, a C{str} containing a readable version of the match info (see _alignmentToStr above for the exact format). """ table = self._initialise() alignment = self._fillAndTraceback(table) output = alignment[0] if output[0] == '' or output[2] == '': result = None else: indexes = alignment[1] result = { 'cigar': self._cigarString(output), 'sequence1Start': indexes['min_col'], 'sequence1End': indexes['max_col'], 'sequence2Start': indexes['min_row'], 'sequence2End': indexes['max_row'], 'text': self._formatAlignment(output, indexes), } return self._alignmentToStr(result) if resultFormat is str else result
python
{ "resource": "" }
q37926
SQLBuilder.select
train
def select(self, fields=['rowid', '*'], offset=None, limit=None): ''' return SELECT SQL ''' # base SQL SQL = 'SELECT %s FROM %s' % (','.join(fields), self._table) # selectors if self._selectors: SQL = ' '.join([SQL, 'WHERE', self._selectors]).strip() # modifiers if self._modifiers: SQL = ' '.join([SQL, self._modifiers]) # limit if limit is not None and isinstance(limit, int): SQL = ' '.join((SQL, 'LIMIT %s' % limit)) # offset if (limit is not None) and (offset is not None) and isinstance(offset, int): SQL = ' '.join((SQL, 'OFFSET %s' % offset)) return ''.join((SQL, ';'))
python
{ "resource": "" }
q37927
SQLBuilder.delete
train
def delete(self): ''' return DELETE SQL ''' SQL = 'DELETE FROM %s' % self._table if self._selectors: SQL = ' '.join([SQL, 'WHERE', self._selectors]).strip() return SQL
python
{ "resource": "" }
q37928
SQLBuilder._parse
train
def _parse(self, params): ''' parse parameters and return SQL ''' if not isinstance(params, dict): return None, None if len(params) == 0: return None, None selectors = list() modifiers = list() for k in params.keys(): if k in LOGICAL_OPERATORS: selectors.append(self._logical(k, params[k])) elif k in QUERY_MODIFIERS: modifiers.append(self._modifier(k, params[k])) else: if k == '_id': selectors.append("rowid%s" % (self._value_wrapper(params[k]))) else: selectors.append("%s%s" % (k, self._value_wrapper(params[k]))) _selectors = ' AND '.join(selectors).strip() _modifiers = ' '.join(modifiers).strip() return _selectors, _modifiers
python
{ "resource": "" }
q37929
SQLBuilder._value_wrapper
train
def _value_wrapper(self, value): ''' wrapper for values ''' if isinstance(value, (int, float,)): return '=%s' % value elif isinstance(value, (str, unicode)): value = value.strip() # LIKE if RE_LIKE.match(value): return ' LIKE %s' % repr(RE_LIKE.match(value).group('RE_LIKE')) # REGEXP elif RE_REGEXP.match(value): return ' REGEXP %s' % repr(RE_REGEXP.search(value).group('RE_REGEXP')) else: return '=%s' % repr(value) elif value is None: return ' ISNULL'
python
{ "resource": "" }
q37930
make_router
train
def make_router(*routings): """Return a WSGI application that dispatches requests to controllers """ routes = [] for routing in routings: methods, regex, app = routing[:3] if isinstance(methods, basestring): methods = (methods,) vars = routing[3] if len(routing) >= 4 else {} routes.append((methods, re.compile(unicode(regex)), app, vars)) def router(environ, start_response): """Dispatch request to controllers.""" req = webob.Request(environ) split_path_info = req.path_info.split('/') if split_path_info[0]: # When path_info doesn't start with a "/" this is an error or a attack => Reject request. # An example of an URL with such a invalid path_info: http://127.0.0.1http%3A//127.0.0.1%3A80/result?... ctx = contexts.Ctx(req) headers = wsgihelpers.handle_cross_origin_resource_sharing(ctx) return wsgihelpers.respond_json(ctx, dict( apiVersion = 1, error = dict( code = 400, # Bad Request message = ctx._(u"Invalid path: {0}").format(req.path_info), ), ), headers = headers, )(environ, start_response) for methods, regex, app, vars in routes: match = regex.match(req.path_info) if match is not None: if methods is not None and req.method not in methods: ctx = contexts.Ctx(req) headers = wsgihelpers.handle_cross_origin_resource_sharing(ctx) return wsgihelpers.respond_json(ctx, dict( apiVersion = 1, error = dict( code = 405, message = ctx._(u"You cannot use HTTP {} to access this URL. Use one of {}.").format( req.method, methods), ), ), headers = headers, )(environ, start_response) if getattr(req, 'urlvars', None) is None: req.urlvars = {} req.urlvars.update(match.groupdict()) req.urlvars.update(vars) req.script_name += req.path_info[:match.end()] req.path_info = req.path_info[match.end():] return app(req.environ, start_response) ctx = contexts.Ctx(req) headers = wsgihelpers.handle_cross_origin_resource_sharing(ctx) return wsgihelpers.respond_json(ctx, dict( apiVersion = 1, error = dict( code = 404, # Not Found message = ctx._(u"Path not found: {0}").format(req.path_info), ), ), headers = headers, )(environ, start_response) return router
python
{ "resource": "" }
q37931
respond_json
train
def respond_json(ctx, data, code = None, headers = [], json_dumps_default = None, jsonp = None): """Return a JSON response. This function is optimized for JSON following `Google JSON Style Guide <http://google-styleguide.googlecode.com/svn/trunk/jsoncstyleguide.xml>`_, but will handle any JSON except for HTTP errors. """ if isinstance(data, collections.Mapping): # Remove null properties as recommended by Google JSON Style Guide. data = type(data)( (name, value) for name, value in data.iteritems() if value is not None ) error = data.get('error') if isinstance(error, collections.Mapping): error = data['error'] = type(error)( (name, value) for name, value in error.iteritems() if value is not None ) else: error = None if jsonp: content_type = 'application/javascript; charset=utf-8' else: content_type = 'application/json; charset=utf-8' if error: code = code or error['code'] assert isinstance(code, int) response = webob.exc.status_map[code](headers = headers) response.content_type = content_type if code == 204: # No content return response if error.get('code') is None: error['code'] = code if error.get('message') is None: title = errors_title.get(code) title = ctx._(title) if title is not None else response.status error['message'] = title else: response = ctx.req.response response.content_type = content_type if code is not None: response.status = code response.headers.update(headers) # try: # text = json.dumps(data, encoding = 'utf-8', ensure_ascii = False, indent = 2) # except UnicodeDecodeError: # text = json.dumps(data, ensure_ascii = True, indent = 2) if json_dumps_default is None: text = json.dumps(data) else: text = json.dumps(data, default = json_dumps_default) text = unicode(text) if jsonp: text = u'{0}({1})'.format(jsonp, text) response.text = text return response
python
{ "resource": "" }
q37932
get_assembly
train
def get_assembly(name): """read a single assembly by name, returning a dictionary of assembly data >>> assy = get_assembly('GRCh37.p13') >>> assy['name'] 'GRCh37.p13' >>> assy['description'] 'Genome Reference Consortium Human Build 37 patch release 13 (GRCh37.p13)' >>> assy['refseq_ac'] 'GCF_000001405.25' >>> assy['genbank_ac'] 'GCA_000001405.14' >>> len(assy['sequences']) 297 >>> import pprint >>> pprint.pprint(assy['sequences'][0]) {'aliases': ['chr1'], 'assembly_unit': 'Primary Assembly', 'genbank_ac': 'CM000663.1', 'length': 249250621, 'name': '1', 'refseq_ac': 'NC_000001.10', 'relationship': '=', 'sequence_role': 'assembled-molecule'} """ fn = pkg_resources.resource_filename( __name__, _assy_path_fmt.format(name=name)) return json.load(gzip.open(fn, mode="rt", encoding="utf-8"))
python
{ "resource": "" }
q37933
make_name_ac_map
train
def make_name_ac_map(assy_name, primary_only=False): """make map from sequence name to accession for given assembly name >>> grch38p5_name_ac_map = make_name_ac_map('GRCh38.p5') >>> grch38p5_name_ac_map['1'] 'NC_000001.11' """ return { s['name']: s['refseq_ac'] for s in get_assembly(assy_name)['sequences'] if (not primary_only or _is_primary(s)) }
python
{ "resource": "" }
q37934
main
train
def main(args=None): """Extract all exon annotations of protein-coding genes.""" if args is None: parser = get_argument_parser() args = parser.parse_args() input_file = args.annotation_file output_file = args.output_file species = args.species chrom_pat = args.chromosome_pattern field_name = args.field_name log_file = args.log_file quiet = args.quiet verbose = args.verbose # configure root logger log_stream = sys.stdout if output_file == '-': # if we print output to stdout, redirect log messages to stderr log_stream = sys.stderr logger = misc.get_logger(log_stream = log_stream, log_file = log_file, quiet = quiet, verbose = verbose) if chrom_pat is None: chrom_pat = re.compile(ensembl.SPECIES_CHROMPAT[species]) else: chrom_pat = re.compile(chrom_pat) logger.info('Regular expression used for filtering chromosome names: "%s"', chrom_pat.pattern) chromosomes = set() excluded_chromosomes = set() i = 0 exons = 0 logger.info('Parsing data...') if input_file == '-': input_file = None with misc.smart_open_read(input_file, mode = 'rb', try_gzip = True) as fh, \ misc.smart_open_write(output_file) as ofh: #if i >= 500000: break reader = csv.reader(fh, dialect = 'excel-tab') writer = csv.writer(ofh, dialect = 'excel-tab', lineterminator = os.linesep, quoting = csv.QUOTE_NONE , quotechar = '|') for l in reader: i += 1 #if i % int(1e5) == 0: # print '\r%d...' %(i), ; sys.stdout.flush() # report progress if len(l) > 1 and l[2] == field_name: attr = parse_attributes(l[8]) type_ = attr['gene_biotype'] if type_ in ['protein_coding','polymorphic_pseudogene']: # test whether chromosome is valid chrom = l[0] m = chrom_pat.match(chrom) if m is None: excluded_chromosomes.add(chrom) continue chromosomes.add(chrom) writer.writerow(l) exons += 1 logger.info('Done! (Parsed %d lines.)', i) logger.info('') logger.info('Gene chromosomes (%d):', len(chromosomes)) logger.info('\t' + ', '.join(sorted(chromosomes))) logger.info('') logger.info('Excluded chromosomes (%d):', len(excluded_chromosomes)) logger.info('\t' + ', '.join(sorted(excluded_chromosomes))) logger.info('') logger.info('Total no. of exons: %d' %(exons)) return 0
python
{ "resource": "" }
q37935
titleCounts
train
def titleCounts(readsAlignments): """ Count the number of times each title in a readsAlignments instance is matched. This is useful for rapidly discovering what titles were matched and with what frequency. @param readsAlignments: A L{dark.alignments.ReadsAlignments} instance. @return: A C{dict} whose keys are titles and whose values are the integer counts of the number of reads that matched that title. """ titles = defaultdict(int) for readAlignments in readsAlignments: for alignment in readAlignments: titles[alignment.subjectTitle] += 1 return titles
python
{ "resource": "" }
q37936
TitleAlignment.toDict
train
def toDict(self): """ Get information about a title alignment as a dictionary. @return: A C{dict} representation of the title aligment. """ return { 'hsps': [hsp.toDict() for hsp in self.hsps], 'read': self.read.toDict(), }
python
{ "resource": "" }
q37937
TitleAlignments.hasScoreBetterThan
train
def hasScoreBetterThan(self, score): """ Is there an HSP with a score better than a given value? @return: A C{bool}, C{True} if there is at least one HSP in the alignments for this title with a score better than C{score}. """ # Note: Do not assume that HSPs in an alignment are sorted in # decreasing order (as they are in BLAST output). If we could # assume that, we could just check the first HSP in each alignment. for hsp in self.hsps(): if hsp.betterThan(score): return True return False
python
{ "resource": "" }
q37938
TitleAlignments.coverage
train
def coverage(self): """ Get the fraction of this title sequence that is matched by its reads. @return: The C{float} fraction of the title sequence matched by its reads. """ intervals = ReadIntervals(self.subjectLength) for hsp in self.hsps(): intervals.add(hsp.subjectStart, hsp.subjectEnd) return intervals.coverage()
python
{ "resource": "" }
q37939
TitleAlignments.coverageInfo
train
def coverageInfo(self): """ Return information about the bases found at each location in our title sequence. @return: A C{dict} whose keys are C{int} subject offsets and whose values are unsorted lists of (score, base) 2-tuples, giving all the bases from reads that matched the subject at subject location, along with the bit score of the matching read. """ result = defaultdict(list) for titleAlignment in self: for hsp in titleAlignment.hsps: score = hsp.score.score for (subjectOffset, base, _) in titleAlignment.read.walkHSP( hsp, includeWhiskers=False): result[subjectOffset].append((score, base)) return result
python
{ "resource": "" }
q37940
TitleAlignments.residueCounts
train
def residueCounts(self, convertCaseTo='upper'): """ Count residue frequencies at all sequence locations matched by reads. @param convertCaseTo: A C{str}, 'upper', 'lower', or 'none'. If 'none', case will not be converted (both the upper and lower case string of a residue will be present in the result if they are present in the read - usually due to low complexity masking). @return: A C{dict} whose keys are C{int} offsets into the title sequence and whose values are C{Counters} with the residue as keys and the count of that residue at that location as values. """ if convertCaseTo == 'none': def convert(x): return x elif convertCaseTo == 'lower': convert = str.lower elif convertCaseTo == 'upper': convert = str.upper else: raise ValueError( "convertCaseTo must be one of 'none', 'lower', or 'upper'") counts = defaultdict(Counter) for titleAlignment in self: read = titleAlignment.read for hsp in titleAlignment.hsps: for (subjectOffset, residue, inMatch) in read.walkHSP(hsp): counts[subjectOffset][convert(residue)] += 1 return counts
python
{ "resource": "" }
q37941
TitleAlignments.summary
train
def summary(self): """ Summarize the alignments for this subject. @return: A C{dict} with C{str} keys: bestScore: The C{float} best score of the matching reads. coverage: The C{float} fraction of the subject genome that is matched by at least one read. hspCount: The C{int} number of hsps that match the subject. medianScore: The C{float} median score of the matching reads. readCount: The C{int} number of reads that match the subject. subjectLength: The C{int} length of the subject. subjectTitle: The C{str} title of the subject. """ return { 'bestScore': self.bestHsp().score.score, 'coverage': self.coverage(), 'hspCount': self.hspCount(), 'medianScore': self.medianScore(), 'readCount': self.readCount(), 'subjectLength': self.subjectLength, 'subjectTitle': self.subjectTitle, }
python
{ "resource": "" }
q37942
TitleAlignments.toDict
train
def toDict(self): """ Get information about the title's alignments as a dictionary. @return: A C{dict} representation of the title's aligments. """ return { 'titleAlignments': [titleAlignment.toDict() for titleAlignment in self], 'subjectTitle': self.subjectTitle, 'subjectLength': self.subjectLength, }
python
{ "resource": "" }
q37943
TitlesAlignments.addTitle
train
def addTitle(self, title, titleAlignments): """ Add a new title to self. @param title: A C{str} title. @param titleAlignments: An instance of L{TitleAlignments}. @raises KeyError: If the title is already present. """ if title in self: raise KeyError('Title %r already present in TitlesAlignments ' 'instance.' % title) else: self[title] = titleAlignments
python
{ "resource": "" }
q37944
TitlesAlignments.filter
train
def filter(self, minMatchingReads=None, minMedianScore=None, withScoreBetterThan=None, minNewReads=None, minCoverage=None, maxTitles=None, sortOn='maxScore'): """ Filter the titles in self to create another TitlesAlignments. @param minMatchingReads: titles that are matched by fewer reads are unacceptable. @param minMedianScore: sequences that are matched with a median bit score that is less are unacceptable. @param withScoreBetterThan: if the best score for a title is not as good as this value, the title is not acceptable. @param minNewReads: The C{float} fraction of its reads by which a new title's read set must differ from the read sets of all previously seen titles in order for this title to be considered acceptably different (and therefore interesting). @param minCoverage: The C{float} minimum fraction of the title sequence that must be matched by at least one read. @param maxTitles: A non-negative C{int} maximum number of titles to keep. If more titles than this are present, titles will be sorted (according to C{sortOn}) and only the best will be retained. @param sortOn: A C{str} attribute to sort on, used only if C{maxTitles} is not C{None}. See the C{sortTitles} method below for the legal values. @raise: C{ValueError} if C{maxTitles} is less than zero or the value of C{sortOn} is unknown. @return: A new L{TitlesAlignments} instance containing only the matching titles. """ # Use a ReadSetFilter only if we're checking that read sets are # sufficiently new. if minNewReads is None: readSetFilter = None else: if self.readSetFilter is None: self.readSetFilter = ReadSetFilter(minNewReads) readSetFilter = self.readSetFilter result = TitlesAlignments( self.readsAlignments, self.scoreClass, self.readSetFilter, importReadsAlignmentsTitles=False) if maxTitles is not None and len(self) > maxTitles: if maxTitles < 0: raise ValueError('maxTitles (%r) cannot be negative.' % maxTitles) else: # There are too many titles. Make a sorted list of them so # we loop through them (below) in the desired order and can # break when/if we've reached the maximum. We can't just # take the first maxTitles titles from the sorted list now, # as some of those titles might later be discarded by the # filter and then we'd return a result with fewer titles # than we should. titles = self.sortTitles(sortOn) else: titles = self.keys() for title in titles: # Test max titles up front, as it may be zero. if maxTitles is not None and len(result) == maxTitles: break titleAlignments = self[title] if (minMatchingReads is not None and titleAlignments.readCount() < minMatchingReads): continue # To compare the median score with another score, we must # convert both values to instances of the score class used in # this data set so they can be compared without us needing to # know if numerically greater scores are considered better or # not. if (minMedianScore is not None and self.scoreClass(titleAlignments.medianScore()) < self.scoreClass(minMedianScore)): continue if (withScoreBetterThan is not None and not titleAlignments.hasScoreBetterThan(withScoreBetterThan)): continue if (minCoverage is not None and titleAlignments.coverage() < minCoverage): continue if (readSetFilter and not readSetFilter.accept(title, titleAlignments)): continue result.addTitle(title, titleAlignments) return result
python
{ "resource": "" }
q37945
TitlesAlignments.hsps
train
def hsps(self): """ Get all HSPs for all the alignments for all titles. @return: A generator yielding L{dark.hsp.HSP} instances. """ return (hsp for titleAlignments in self.values() for alignment in titleAlignments for hsp in alignment.hsps)
python
{ "resource": "" }
q37946
TitlesAlignments.sortTitles
train
def sortTitles(self, by): """ Sort titles by a given attribute and then by title. @param by: A C{str}, one of 'length', 'maxScore', 'medianScore', 'readCount', or 'title'. @raise ValueError: If an unknown C{by} value is given. @return: A sorted C{list} of titles. """ # First sort titles by the secondary key, which is always the title. titles = sorted(iter(self)) # Then sort on the primary key (if any). if by == 'length': return sorted( titles, reverse=True, key=lambda title: self[title].subjectLength) if by == 'maxScore': return sorted( titles, reverse=True, key=lambda title: self[title].bestHsp()) if by == 'medianScore': return sorted( titles, reverse=True, key=lambda title: self.scoreClass(self[title].medianScore())) if by == 'readCount': return sorted( titles, reverse=True, key=lambda title: self[title].readCount()) if by == 'title': return titles raise ValueError('Sort attribute must be one of "length", "maxScore", ' '"medianScore", "readCount", "title".')
python
{ "resource": "" }
q37947
TitlesAlignments.summary
train
def summary(self, sortOn=None): """ Summarize all the alignments for this title. @param sortOn: A C{str} attribute to sort titles on. One of 'length', 'maxScore', 'medianScore', 'readCount', or 'title'. @raise ValueError: If an unknown C{sortOn} value is given. @return: A generator that yields C{dict} instances as produced by C{TitleAlignments} (see class earlier in this file), sorted by C{sortOn}. """ titles = self if sortOn is None else self.sortTitles(sortOn) for title in titles: yield self[title].summary()
python
{ "resource": "" }
q37948
TitlesAlignments.tabSeparatedSummary
train
def tabSeparatedSummary(self, sortOn=None): """ Summarize all the alignments for this title as multi-line string with TAB-separated values on each line. @param sortOn: A C{str} attribute to sort titles on. One of 'length', 'maxScore', 'medianScore', 'readCount', or 'title'. @raise ValueError: If an unknown C{sortOn} value is given. @return: A newline-separated C{str}, each line with a summary of a title. Each summary line is TAB-separated. """ # The order of the fields returned here is somewhat arbitrary. The # subject titles are last because they are so variable in length. # Putting them last makes it more likely that the initial columns in # printed output will be easier to read down. # # Note that post-processing scripts will be relying on the field # ordering here. So you can't just add fields. It's probably safe # to add them at the end, but be careful / think. # # A TAB-separated file can easily be read by awk using e.g., # awk 'BEGIN {FS = "\t"} ...' result = [] for titleSummary in self.summary(sortOn): result.append('\t'.join([ '%(coverage)f', '%(medianScore)f', '%(bestScore)f', '%(readCount)d', '%(hspCount)d', '%(subjectLength)d', '%(subjectTitle)s', ]) % titleSummary) return '\n'.join(result)
python
{ "resource": "" }
q37949
TitlesAlignments.toDict
train
def toDict(self): """ Get information about the titles alignments as a dictionary. @return: A C{dict} representation of the titles aligments. """ return { 'scoreClass': self.scoreClass.__name__, 'titles': dict((title, titleAlignments.toDict()) for title, titleAlignments in self.items()), }
python
{ "resource": "" }
q37950
addFASTAFilteringCommandLineOptions
train
def addFASTAFilteringCommandLineOptions(parser): """ Add standard FASTA filtering command-line options to an argparse parser. These are options that can be used to select or omit entire FASTA records, NOT options that change them (for that see addFASTAEditingCommandLineOptions). @param parser: An C{argparse.ArgumentParser} instance. """ parser.add_argument( '--minLength', type=int, help='The minimum sequence length') parser.add_argument( '--maxLength', type=int, help='The maximum sequence length') parser.add_argument( '--whitelist', action='append', help='Sequence titles (ids) that should be whitelisted') parser.add_argument( '--blacklist', action='append', help='Sequence titles (ids) that should be blacklisted') parser.add_argument( '--whitelistFile', help=('The name of a file that contains sequence titles (ids) that ' 'should be whitelisted, one per line')) parser.add_argument( '--blacklistFile', help=('The name of a file that contains sequence titles (ids) that ' 'should be blacklisted, one per line')) parser.add_argument( '--titleRegex', help='A regex that sequence titles (ids) must match.') parser.add_argument( '--negativeTitleRegex', help='A regex that sequence titles (ids) must not match.') # A mutually exclusive group for --keepSequences and --removeSequences. group = parser.add_mutually_exclusive_group() group.add_argument( '--keepSequences', help=('Specify (1-based) ranges of sequence numbers that should be ' 'kept. E.g., --keepSequences 1-3,5 will output just the 1st, ' '2nd, 3rd, and 5th sequences. All others will be omitted.')) group.add_argument( '--removeSequences', help=('Specify (1-based) ranges of sequence numbers that should be ' 'removed. E.g., --removeSequences 1-3,5 will output all but the ' '1st, 2nd, 3rd, and 5th sequences. All others will be ouput.')) parser.add_argument( '--head', type=int, metavar='N', help='Only the first N sequences will be printed.') parser.add_argument( '--removeDuplicates', action='store_true', default=False, help=('Duplicate reads will be removed, based only on ' 'sequence identity. The first occurrence is kept.')) parser.add_argument( '--removeDuplicatesById', action='store_true', default=False, help=('Duplicate reads will be removed, based only on ' 'read id. The first occurrence is kept.')) # See the docstring for dark.reads.Reads.filter for more detail on # randomSubset. parser.add_argument( '--randomSubset', type=int, help=('An integer giving the number of sequences that should be kept. ' 'These will be selected at random.')) # See the docstring for dark.reads.Reads.filter for more detail on # trueLength. parser.add_argument( '--trueLength', type=int, help=('The number of reads in the FASTA input. Only to be used with ' 'randomSubset')) parser.add_argument( '--sampleFraction', type=float, help=('A [0.0, 1.0] C{float} indicating a fraction of the reads that ' 'should be allowed to pass through the filter. The sample size ' 'will only be approximately the product of the sample fraction ' 'and the number of reads. The sample is taken at random.')) parser.add_argument( '--sequenceNumbersFile', help=('A file of (1-based) sequence numbers to retain. Numbers must ' 'be one per line.'))
python
{ "resource": "" }
q37951
parseFASTAFilteringCommandLineOptions
train
def parseFASTAFilteringCommandLineOptions(args, reads): """ Examine parsed FASTA filtering command-line options and return filtered reads. @param args: An argparse namespace, as returned by the argparse C{parse_args} function. @param reads: A C{Reads} instance to filter. @return: The filtered C{Reads} instance. """ keepSequences = ( parseRangeString(args.keepSequences, convertToZeroBased=True) if args.keepSequences else None) removeSequences = ( parseRangeString(args.removeSequences, convertToZeroBased=True) if args.removeSequences else None) return reads.filter( minLength=args.minLength, maxLength=args.maxLength, whitelist=set(args.whitelist) if args.whitelist else None, blacklist=set(args.blacklist) if args.blacklist else None, whitelistFile=args.whitelistFile, blacklistFile=args.blacklistFile, titleRegex=args.titleRegex, negativeTitleRegex=args.negativeTitleRegex, keepSequences=keepSequences, removeSequences=removeSequences, head=args.head, removeDuplicates=args.removeDuplicates, removeDuplicatesById=args.removeDuplicatesById, randomSubset=args.randomSubset, trueLength=args.trueLength, sampleFraction=args.sampleFraction, sequenceNumbersFile=args.sequenceNumbersFile)
python
{ "resource": "" }
q37952
addFASTAEditingCommandLineOptions
train
def addFASTAEditingCommandLineOptions(parser): """ Add standard FASTA editing command-line options to an argparse parser. These are options that can be used to alter FASTA records, NOT options that simply select or reject those things (for those see addFASTAFilteringCommandLineOptions). @param parser: An C{argparse.ArgumentParser} instance. """ # A mutually exclusive group for --keepSites, --keepSitesFile, # --removeSites, and --removeSitesFile. group = parser.add_mutually_exclusive_group() # In the 4 options below, the 'indices' alternate names are kept for # backwards compatibility. group.add_argument( '--keepSites', '--keepIndices', help=('Specify 1-based sequence sites to keep. All other sites will ' 'be removed. The sites must be given in the form e.g., ' '24,100-200,260. Note that the requested sites will be taken ' 'from the input sequences in order, not in the order given by ' '--keepSites. I.e., --keepSites 5,8-10 will get you the same ' 'result as --keepSites 8-10,5.')) group.add_argument( '--keepSitesFile', '--keepIndicesFile', help=('Specify a file containing 1-based sites to keep. All other ' 'sequence sites will be removed. Lines in the file must be ' 'given in the form e.g., 24,100-200,260. See --keepSites for ' 'more detail.')) group.add_argument( '--removeSites', '--removeIndices', help=('Specify 1-based sites to remove. All other sequence sites will ' 'be kept. The sites must be given in the form e.g., ' '24,100-200,260. See --keepSites for more detail.')) group.add_argument( '--removeSitesFile', '--removeIndicesFile', help=('Specify a file containing 1-based sites to remove. All other ' 'sequence sites will be kept. Lines in the file must be given ' 'in the form e.g., 24,100-200,260. See --keepSites for more ' 'detail.')) parser.add_argument( '--removeGaps', action='store_true', default=False, help="If True, gap ('-') characters in sequences will be removed.") parser.add_argument( '--truncateTitlesAfter', help=('A string that sequence titles (ids) will be truncated beyond. ' 'If the truncated version of a title has already been seen, ' 'that title will be skipped.')) parser.add_argument( '--removeDescriptions', action='store_true', default=False, help=('Read id descriptions will be removed. The ' 'description is the part of a sequence id after the ' 'first whitespace (if any).')) parser.add_argument( '--idLambda', metavar='LAMBDA-FUNCTION', help=('A one-argument function taking and returning a read id. ' 'E.g., --idLambda "lambda id: id.split(\'_\')[0]" or ' '--idLambda "lambda id: id[:10]". If the function returns None, ' 'the read will be filtered out.')) parser.add_argument( '--readLambda', metavar='LAMBDA-FUNCTION', help=('A one-argument function taking and returning a read. ' 'E.g., --readLambda "lambda r: Read(r.id.split(\'_\')[0], ' 'r.sequence.strip(\'-\')". Make sure to also modify the quality ' 'string if you change the length of a FASTQ sequence. If the ' 'function returns None, the read will be filtered out. The ' 'function will be passed to eval with the dark.reads classes ' 'Read, DNARead, AARead, etc. all in scope.')) parser.add_argument( '--reverse', action='store_true', default=False, help=('Reverse the sequences. Note that this is NOT reverse ' 'complementing.')) parser.add_argument( '--reverseComplement', action='store_true', default=False, help='Reverse complement the sequences.')
python
{ "resource": "" }
q37953
parseFASTAEditingCommandLineOptions
train
def parseFASTAEditingCommandLineOptions(args, reads): """ Examine parsed FASTA editing command-line options and return information about kept sites and sequences. @param args: An argparse namespace, as returned by the argparse C{parse_args} function. @param reads: A C{Reads} instance to filter. @return: The filtered C{Reads} instance. """ removeGaps = args.removeGaps removeDescriptions = args.removeDescriptions truncateTitlesAfter = args.truncateTitlesAfter keepSites = ( parseRangeString(args.keepSites, convertToZeroBased=True) if args.keepSites else None) if args.keepSitesFile: keepSites = keepSites or set() with open(args.keepSitesFile) as fp: for lineNumber, line in enumerate(fp): try: keepSites.update( parseRangeString(line, convertToZeroBased=True)) except ValueError as e: raise ValueError( 'Keep sites file %r line %d could not be parsed: ' '%s' % (args.keepSitesFile, lineNumber, e)) removeSites = ( parseRangeString(args.removeSites, convertToZeroBased=True) if args.removeSites else None) if args.removeSitesFile: removeSites = removeSites or set() with open(args.removeSitesFile) as fp: for lineNumber, line in enumerate(fp): try: removeSites.update( parseRangeString(line, convertToZeroBased=True)) except ValueError as e: raise ValueError( 'Remove sites file %r line %d parse error: %s' % (args.removeSitesFile, lineNumber, e)) return reads.filter( removeGaps=removeGaps, truncateTitlesAfter=truncateTitlesAfter, removeDescriptions=removeDescriptions, idLambda=args.idLambda, readLambda=args.readLambda, keepSites=keepSites, removeSites=removeSites, reverse=args.reverse, reverseComplement=args.reverseComplement)
python
{ "resource": "" }
q37954
XMLRecordsReader.records
train
def records(self): """ Yield BLAST records, as read by the BioPython NCBIXML.parse method. Set self.params from data in the first record. """ first = True with as_handle(self._filename) as fp: for record in NCBIXML.parse(fp): if first: self.params = self._convertBlastParamsToDict(record) first = False yield record
python
{ "resource": "" }
q37955
XMLRecordsReader.saveAsJSON
train
def saveAsJSON(self, fp): """ Write the records out as JSON. The first JSON object saved contains the BLAST parameters. @param fp: A C{str} file pointer to write to. """ first = True for record in self.records(): if first: print(dumps(self.params, separators=(',', ':')), file=fp) first = False print(dumps(self._convertBlastRecordToDict(record), separators=(',', ':')), file=fp)
python
{ "resource": "" }
q37956
JSONRecordsReader._open
train
def _open(self, filename): """ Open the input file. Set self._fp to point to it. Read the first line of parameters. @param filename: A C{str} filename containing JSON BLAST records. @raise ValueError: if the first line of the file isn't valid JSON, if the input file is empty, or if the JSON does not contain an 'application' key. """ if filename.endswith('.bz2'): if six.PY3: self._fp = bz2.open(filename, mode='rt', encoding='UTF-8') else: self._fp = bz2.BZ2File(filename) else: self._fp = open(filename) line = self._fp.readline() if not line: raise ValueError('JSON file %r was empty.' % self._filename) try: self.params = loads(line[:-1]) except ValueError as e: raise ValueError( 'Could not convert first line of %r to JSON (%s). ' 'Line is %r.' % (self._filename, e, line[:-1])) else: if 'application' not in self.params: raise ValueError( '%r appears to be an old JSON file with no BLAST global ' 'parameters. Please re-run convert-blast-xml-to-json.py ' 'to convert it to the newest format.' % self._filename)
python
{ "resource": "" }
q37957
JSONRecordsReader.readAlignments
train
def readAlignments(self, reads): """ Read lines of JSON from self._filename, convert them to read alignments and yield them. @param reads: An iterable of L{Read} instances, corresponding to the reads that were given to BLAST. @raise ValueError: If any of the lines in the file cannot be converted to JSON. @return: A generator that yields C{dark.alignments.ReadAlignments} instances. """ if self._fp is None: self._open(self._filename) reads = iter(reads) try: for lineNumber, line in enumerate(self._fp, start=2): try: record = loads(line[:-1]) except ValueError as e: raise ValueError( 'Could not convert line %d of %r to JSON (%s). ' 'Line is %r.' % (lineNumber, self._filename, e, line[:-1])) else: try: read = next(reads) except StopIteration: raise ValueError( 'Read generator failed to yield read number %d ' 'during parsing of BLAST file %r.' % (lineNumber - 1, self._filename)) else: alignments = self._dictToAlignments(record, read) yield ReadAlignments(read, alignments) finally: self._fp.close() self._fp = None
python
{ "resource": "" }
q37958
_makeComplementTable
train
def _makeComplementTable(complementData): """ Make a sequence complement table. @param complementData: A C{dict} whose keys and values are strings of length one. A key, value pair indicates a substitution that should be performed during complementation. @return: A 256 character string that can be used as a translation table by the C{translate} method of a Python string. """ table = list(range(256)) for _from, to in complementData.items(): table[ord(_from[0].lower())] = ord(to[0].lower()) table[ord(_from[0].upper())] = ord(to[0].upper()) return ''.join(map(chr, table))
python
{ "resource": "" }
q37959
addFASTACommandLineOptions
train
def addFASTACommandLineOptions(parser): """ Add standard command-line options to an argparse parser. @param parser: An C{argparse.ArgumentParser} instance. """ parser.add_argument( '--fastaFile', type=open, default=sys.stdin, metavar='FILENAME', help=('The name of the FASTA input file. Standard input will be read ' 'if no file name is given.')) parser.add_argument( '--readClass', default='DNARead', choices=readClassNameToClass, metavar='CLASSNAME', help=('If specified, give the type of the reads in the input. ' 'Possible choices: %s.' % ', '.join(readClassNameToClass))) # A mutually exclusive group for either --fasta, --fastq, or --fasta-ss group = parser.add_mutually_exclusive_group() group.add_argument( '--fasta', default=False, action='store_true', help=('If specified, input will be treated as FASTA. This is the ' 'default.')) group.add_argument( '--fastq', default=False, action='store_true', help='If specified, input will be treated as FASTQ.') group.add_argument( '--fasta-ss', dest='fasta_ss', default=False, action='store_true', help=('If specified, input will be treated as PDB FASTA ' '(i.e., regular FASTA with each sequence followed by its ' 'structure).'))
python
{ "resource": "" }
q37960
parseFASTACommandLineOptions
train
def parseFASTACommandLineOptions(args): """ Examine parsed command-line options and return a Reads instance. @param args: An argparse namespace, as returned by the argparse C{parse_args} function. @return: A C{Reads} subclass instance, depending on the type of FASTA file given. """ # Set default FASTA type. if not (args.fasta or args.fastq or args.fasta_ss): args.fasta = True readClass = readClassNameToClass[args.readClass] if args.fasta: from dark.fasta import FastaReads return FastaReads(args.fastaFile, readClass=readClass) elif args.fastq: from dark.fastq import FastqReads return FastqReads(args.fastaFile, readClass=readClass) else: from dark.fasta_ss import SSFastaReads return SSFastaReads(args.fastaFile, readClass=readClass)
python
{ "resource": "" }
q37961
_NucleotideRead.translations
train
def translations(self): """ Yield all six translations of a nucleotide sequence. @return: A generator that produces six L{TranslatedRead} instances. """ rc = self.reverseComplement().sequence for reverseComplemented in False, True: for frame in 0, 1, 2: seq = rc if reverseComplemented else self.sequence # Get the suffix of the sequence for translation. I.e., # skip 0, 1, or 2 initial bases, depending on the frame. # Note that this makes a copy of the sequence, which we can # then safely append 'N' bases to to adjust its length to # be zero mod 3. suffix = seq[frame:] lengthMod3 = len(suffix) % 3 if lengthMod3: suffix += ('NN' if lengthMod3 == 1 else 'N') yield TranslatedRead(self, translate(suffix), frame, reverseComplemented)
python
{ "resource": "" }
q37962
_NucleotideRead.reverseComplement
train
def reverseComplement(self): """ Reverse complement a nucleotide sequence. @return: The reverse complemented sequence as an instance of the current class. """ quality = None if self.quality is None else self.quality[::-1] sequence = self.sequence.translate(self.COMPLEMENT_TABLE)[::-1] return self.__class__(self.id, sequence, quality)
python
{ "resource": "" }
q37963
AARead.checkAlphabet
train
def checkAlphabet(self, count=10): """ A function which checks if an AA read really contains amino acids. This additional testing is needed, because the letters in the DNA alphabet are also in the AA alphabet. @param count: An C{int}, indicating how many bases or amino acids at the start of the sequence should be considered. If C{None}, all bases are checked. @return: C{True} if the alphabet characters in the first C{count} positions of sequence is a subset of the allowed alphabet for this read class, or if the read class has a C{None} alphabet. @raise ValueError: If a DNA sequence has been passed to AARead(). """ if six.PY3: readLetters = super().checkAlphabet(count) else: readLetters = Read.checkAlphabet(self, count) if len(self) > 10 and readLetters.issubset(set('ACGT')): raise ValueError('It looks like a DNA sequence has been passed to ' 'AARead().') return readLetters
python
{ "resource": "" }
q37964
AARead.ORFs
train
def ORFs(self, openORFs=False): """ Find all ORFs in our sequence. @param openORFs: If C{True} allow ORFs that do not have a start codon and/or do not have a stop codon. @return: A generator that yields AAReadORF instances that correspond to the ORFs found in the AA sequence. """ # Return open ORFs to the left and right and closed ORFs within the # sequence. if openORFs: ORFStart = 0 inOpenORF = True # open on the left inORF = False for index, residue in enumerate(self.sequence): if residue == '*': if inOpenORF: if index: yield AAReadORF(self, ORFStart, index, True, False) inOpenORF = False elif inORF: if ORFStart != index: yield AAReadORF(self, ORFStart, index, False, False) inORF = False elif residue == 'M': if not inOpenORF and not inORF: ORFStart = index + 1 inORF = True # End of sequence. Yield the final ORF, open to the right, if # there is one and it has non-zero length. length = len(self.sequence) if inOpenORF and length > 0: yield AAReadORF(self, ORFStart, length, True, True) elif inORF and ORFStart < length: yield AAReadORF(self, ORFStart, length, False, True) # Return only closed ORFs. else: inORF = False for index, residue in enumerate(self.sequence): if residue == 'M': if not inORF: inORF = True ORFStart = index + 1 elif residue == '*': if inORF: if not ORFStart == index: yield AAReadORF(self, ORFStart, index, False, False) inORF = False
python
{ "resource": "" }
q37965
SSAARead.newFromSites
train
def newFromSites(self, sites, exclude=False): """ Create a new read from self, with only certain sites. @param sites: A set of C{int} 0-based sites (i.e., indices) in sequences that should be kept. If C{None} (the default), all sites are kept. @param exclude: If C{True} the C{sites} will be excluded, not included. """ if exclude: sites = set(range(len(self))) - sites newSequence = [] newStructure = [] for index, (base, structure) in enumerate(zip(self.sequence, self.structure)): if index in sites: newSequence.append(base) newStructure.append(structure) read = self.__class__(self.id, ''.join(newSequence), ''.join(newStructure)) return read
python
{ "resource": "" }
q37966
Reads.filterRead
train
def filterRead(self, read): """ Filter a read, according to our set of filters. @param read: A C{Read} instance or one of its subclasses. @return: C{False} if the read fails any of our filters, else the C{Read} instance returned by our list of filters. """ for filterFunc in self._filters: filteredRead = filterFunc(read) if filteredRead is False: return False else: read = filteredRead return read
python
{ "resource": "" }
q37967
Reads.summarizePosition
train
def summarizePosition(self, index): """ Compute residue counts at a specific sequence index. @param index: an C{int} index into the sequence. @return: A C{dict} with the count of too-short (excluded) sequences, and a Counter instance giving the residue counts. """ countAtPosition = Counter() excludedCount = 0 for read in self: try: countAtPosition[read.sequence[index]] += 1 except IndexError: excludedCount += 1 return { 'excludedCount': excludedCount, 'countAtPosition': countAtPosition }
python
{ "resource": "" }
q37968
condition2checker
train
def condition2checker(condition): """Converts different condition types to callback""" if isinstance(condition, string_types): def smatcher(info): return fnmatch.fnmatch(info.filename, condition) return smatcher elif isinstance(condition, (list, tuple)) and isinstance(condition[0], integer_types): def imatcher(info): return info.index in condition return imatcher elif callable(condition): return condition else: raise TypeError
python
{ "resource": "" }
q37969
GeneSetEnrichmentAnalysis.get_static_enrichment
train
def get_static_enrichment( self, genes: Iterable[str], pval_thresh: float, adjust_pval_thresh: bool = True, K_min: int = 3, gene_set_ids: Iterable[str] = None) -> StaticGSEResult: """Find enriched gene sets in a set of genes. Parameters ---------- genes : set of str The set of genes to test for gene set enrichment. pval_thresh : float The significance level (p-value threshold) to use in the analysis. adjust_pval_thresh : bool, optional Whether to adjust the p-value threshold using a Bonferroni correction. (Warning: This is a very conservative correction!) [True] K_min : int, optional The minimum number of gene set genes present in the analysis. [3] gene_set_ids : Iterable or None A list of gene set IDs to test. If ``None``, all gene sets are tested that meet the :attr:`K_min` criterion. Returns ------- list of `StaticGSEResult` A list of all significantly enriched gene sets. """ genes = set(genes) gene_set_coll = self._gene_set_coll gene_sets = self._gene_set_coll.gene_sets gene_memberships = self._gene_memberships sorted_genes = sorted(genes) # test only some terms? if gene_set_ids is not None: gs_indices = np.int64([self._gene_set_coll.index(id_) for id_ in gene_set_ids]) gene_sets = [gene_set_coll[id_] for id_ in gene_set_ids] # gene_set_coll = GeneSetCollection(gene_sets) gene_memberships = gene_memberships[:, gs_indices] # not a view! # determine K's K_vec = np.sum(gene_memberships, axis=0, dtype=np.int64) # exclude terms with too few genes sel = np.nonzero(K_vec >= K_min)[0] K_vec = K_vec[sel] gene_sets = [gene_sets[j] for j in sel] gene_memberships = gene_memberships[:, sel] # determine k's, ignoring unknown genes unknown = 0 sel = [] filtered_genes = [] logger.debug('Looking up indices for %d genes...', len(sorted_genes)) for i, g in enumerate(sorted_genes): try: idx = self._gene_indices[g] except KeyError: unknown += 1 else: sel.append(idx) filtered_genes.append(g) sel = np.int64(sel) gene_indices = np.int64(sel) # gene_memberships = gene_memberships[sel, :] k_vec = np.sum(gene_memberships[sel, :], axis=0, dtype=np.int64) if unknown > 0: logger.warn('%d / %d unknown genes (%.1f %%), will be ignored.', unknown, len(genes), 100 * (unknown / float(len(genes)))) # determine n and N n = len(filtered_genes) N, m = gene_memberships.shape logger.info('Conducting %d tests.', m) # correct p-value threshold, if specified final_pval_thresh = pval_thresh if adjust_pval_thresh: final_pval_thresh /= float(m) logger.info('Using Bonferroni-corrected p-value threshold: %.1e', final_pval_thresh) # calculate p-values and get significantly enriched gene sets enriched = [] logger.debug('N=%d, n=%d', N, n) sys.stdout.flush() genes = self._valid_genes for j in range(m): pval = hypergeom.sf(k_vec[j] - 1, N, K_vec[j], n) if pval <= final_pval_thresh: # found significant enrichment # sel_genes = [filtered_genes[i] for i in np.nonzero(gene_memberships[:, j])[0]] sel_genes = [genes[i] for i in np.nonzero(gene_memberships[gene_indices, j])[0]] enriched.append( StaticGSEResult(gene_sets[j], N, n, set(sel_genes), pval)) return enriched
python
{ "resource": "" }
q37970
get_connection_string
train
def get_connection_string(connection=None): """return SQLAlchemy connection string if it is set :param connection: get the SQLAlchemy connection string #TODO :rtype: str """ if not connection: config = configparser.ConfigParser() cfp = defaults.config_file_path if os.path.exists(cfp): log.info('fetch database configuration from %s', cfp) config.read(cfp) connection = config['database']['sqlalchemy_connection_string'] log.info('load connection string from %s: %s', cfp, connection) else: with open(cfp, 'w') as config_file: connection = defaults.sqlalchemy_connection_string_default config['database'] = {'sqlalchemy_connection_string': connection} config.write(config_file) log.info('create configuration file %s', cfp) return connection
python
{ "resource": "" }
q37971
export_obo
train
def export_obo(path_to_file, connection=None): """export database to obo file :param path_to_file: path to export file :param connection: connection string (optional) :return: """ db = DbManager(connection) db.export_obo(path_to_export_file=path_to_file) db.session.close()
python
{ "resource": "" }
q37972
DbManager.db_import_xml
train
def db_import_xml(self, url=None, force_download=False, taxids=None, silent=False): """Updates the CTD database 1. downloads gzipped XML 2. drops all tables in database 3. creates all tables in database 4. import XML 5. close session :param Optional[list[int]] taxids: list of NCBI taxonomy identifier :param str url: iterable of URL strings :param bool force_download: force method to download :param bool silent: """ log.info('Update UniProt database from {}'.format(url)) self._drop_tables() xml_gzipped_file_path, version_file_path = self.download(url, force_download) self._create_tables() self.import_version(version_file_path) self.import_xml(xml_gzipped_file_path, taxids, silent) self.session.close()
python
{ "resource": "" }
q37973
DbManager.insert_entries
train
def insert_entries(self, entries_xml, taxids=None): """Inserts UniProt entries from XML :param str entries_xml: XML string :param Optional[list[int]] taxids: NCBI taxonomy IDs """ entries = etree.fromstring(entries_xml) del entries_xml for entry in entries: self.insert_entry(entry, taxids) entry.clear() del entry entries.clear() del entries self.session.commit()
python
{ "resource": "" }
q37974
DbManager.insert_entry
train
def insert_entry(self, entry, taxids): """Insert UniProt entry" :param entry: XML node entry :param taxids: Optional[iter[int]] taxids: NCBI taxonomy IDs """ entry_dict = entry.attrib entry_dict['created'] = datetime.strptime(entry_dict['created'], '%Y-%m-%d') entry_dict['modified'] = datetime.strptime(entry_dict['modified'], '%Y-%m-%d') taxid = self.get_taxid(entry) if taxids is None or taxid in taxids: entry_dict = self.update_entry_dict(entry, entry_dict, taxid) entry_obj = models.Entry(**entry_dict) del entry_dict self.session.add(entry_obj)
python
{ "resource": "" }
q37975
DbManager.get_sequence
train
def get_sequence(cls, entry): """ get models.Sequence object from XML node entry :param entry: XML node entry :return: :class:`pyuniprot.manager.models.Sequence` object """ seq_tag = entry.find("./sequence") seq = seq_tag.text seq_tag.clear() return models.Sequence(sequence=seq)
python
{ "resource": "" }
q37976
DbManager.get_tissue_in_references
train
def get_tissue_in_references(self, entry): """ get list of models.TissueInReference from XML node entry :param entry: XML node entry :return: list of :class:`pyuniprot.manager.models.TissueInReference` objects """ tissue_in_references = [] query = "./reference/source/tissue" tissues = {x.text for x in entry.iterfind(query)} for tissue in tissues: if tissue not in self.tissues: self.tissues[tissue] = models.TissueInReference(tissue=tissue) tissue_in_references.append(self.tissues[tissue]) return tissue_in_references
python
{ "resource": "" }
q37977
DbManager.get_subcellular_locations
train
def get_subcellular_locations(self, entry): """ get list of models.SubcellularLocation object from XML node entry :param entry: XML node entry :return: list of :class:`pyuniprot.manager.models.SubcellularLocation` object """ subcellular_locations = [] query = './comment/subcellularLocation/location' sls = {x.text for x in entry.iterfind(query)} for sl in sls: if sl not in self.subcellular_locations: self.subcellular_locations[sl] = models.SubcellularLocation(location=sl) subcellular_locations.append(self.subcellular_locations[sl]) return subcellular_locations
python
{ "resource": "" }
q37978
DbManager.get_keywords
train
def get_keywords(self, entry): """ get list of models.Keyword objects from XML node entry :param entry: XML node entry :return: list of :class:`pyuniprot.manager.models.Keyword` objects """ keyword_objects = [] for keyword in entry.iterfind("./keyword"): identifier = keyword.get('id') name = keyword.text keyword_hash = hash(identifier) if keyword_hash not in self.keywords: self.keywords[keyword_hash] = models.Keyword(**{'identifier': identifier, 'name': name}) keyword_objects.append(self.keywords[keyword_hash]) return keyword_objects
python
{ "resource": "" }
q37979
DbManager.get_disease_comments
train
def get_disease_comments(self, entry): """ get list of models.Disease objects from XML node entry :param entry: XML node entry :return: list of :class:`pyuniprot.manager.models.Disease` objects """ disease_comments = [] query = "./comment[@type='disease']" for disease_comment in entry.iterfind(query): value_dict = {'comment': disease_comment.find('./text').text} disease = disease_comment.find("./disease") if disease is not None: disease_dict = {'identifier': disease.get('id')} for element in disease: key = element.tag if key in ['acronym', 'description', 'name']: disease_dict[key] = element.text if key == 'dbReference': disease_dict['ref_id'] = element.get('id') disease_dict['ref_type'] = element.get('type') disease_obj = models.get_or_create(self.session, models.Disease, **disease_dict) self.session.add(disease_obj) self.session.flush() value_dict['disease_id'] = disease_obj.id disease_comments.append(models.DiseaseComment(**value_dict)) return disease_comments
python
{ "resource": "" }
q37980
DbManager.get_alternative_full_names
train
def get_alternative_full_names(cls, entry): """ get list of models.AlternativeFullName objects from XML node entry :param entry: XML node entry :return: list of :class:`pyuniprot.manager.models.AlternativeFullName` objects """ names = [] query = "./protein/alternativeName/fullName" for name in entry.iterfind(query): names.append(models.AlternativeFullName(name=name.text)) return names
python
{ "resource": "" }
q37981
DbManager.get_alternative_short_names
train
def get_alternative_short_names(cls, entry): """ get list of models.AlternativeShortName objects from XML node entry :param entry: XML node entry :return: list of :class:`pyuniprot.manager.models.AlternativeShortName` objects """ names = [] query = "./protein/alternativeName/shortName" for name in entry.iterfind(query): names.append(models.AlternativeShortName(name=name.text)) return names
python
{ "resource": "" }
q37982
DbManager.get_ec_numbers
train
def get_ec_numbers(cls, entry): """ get list of models.ECNumber objects from XML node entry :param entry: XML node entry :return: list of models.ECNumber objects """ ec_numbers = [] for ec in entry.iterfind("./protein/recommendedName/ecNumber"): ec_numbers.append(models.ECNumber(ec_number=ec.text)) return ec_numbers
python
{ "resource": "" }
q37983
DbManager.get_gene_name
train
def get_gene_name(cls, entry): """ get primary gene name from XML node entry :param entry: XML node entry :return: str """ gene_name = entry.find("./gene/name[@type='primary']") return gene_name.text if gene_name is not None and gene_name.text.strip() else None
python
{ "resource": "" }
q37984
DbManager.get_other_gene_names
train
def get_other_gene_names(cls, entry): """ get list of `models.OtherGeneName` objects from XML node entry :param entry: XML node entry :return: list of :class:`pyuniprot.manager.models.models.OtherGeneName` objects """ alternative_gene_names = [] for alternative_gene_name in entry.iterfind("./gene/name"): if alternative_gene_name.attrib['type'] != 'primary': alternative_gene_name_dict = { 'type_': alternative_gene_name.attrib['type'], 'name': alternative_gene_name.text } alternative_gene_names.append(models.OtherGeneName(**alternative_gene_name_dict)) return alternative_gene_names
python
{ "resource": "" }
q37985
DbManager.get_accessions
train
def get_accessions(cls, entry): """ get list of models.Accession from XML node entry :param entry: XML node entry :return: list of :class:`pyuniprot.manager.models.Accession` objects """ return [models.Accession(accession=x.text) for x in entry.iterfind("./accession")]
python
{ "resource": "" }
q37986
DbManager.get_db_references
train
def get_db_references(cls, entry): """ get list of `models.DbReference` from XML node entry :param entry: XML node entry :return: list of :class:`pyuniprot.manager.models.DbReference` """ db_refs = [] for db_ref in entry.iterfind("./dbReference"): db_ref_dict = {'identifier': db_ref.attrib['id'], 'type_': db_ref.attrib['type']} db_refs.append(models.DbReference(**db_ref_dict)) return db_refs
python
{ "resource": "" }
q37987
DbManager.get_features
train
def get_features(cls, entry): """ get list of `models.Feature` from XML node entry :param entry: XML node entry :return: list of :class:`pyuniprot.manager.models.Feature` """ features = [] for feature in entry.iterfind("./feature"): feature_dict = { 'description': feature.attrib.get('description'), 'type_': feature.attrib['type'], 'identifier': feature.attrib.get('id') } features.append(models.Feature(**feature_dict)) return features
python
{ "resource": "" }
q37988
DbManager.get_recommended_protein_name
train
def get_recommended_protein_name(cls, entry): """ get recommended full and short protein name as tuple from XML node :param entry: XML node entry :return: (str, str) => (full, short) """ query_full = "./protein/recommendedName/fullName" full_name = entry.find(query_full).text short_name = None query_short = "./protein/recommendedName/shortName" short_name_tag = entry.find(query_short) if short_name_tag is not None: short_name = short_name_tag.text return full_name, short_name
python
{ "resource": "" }
q37989
DbManager.get_organism_hosts
train
def get_organism_hosts(cls, entry): """ get list of `models.OrganismHost` objects from XML node entry :param entry: XML node entry :return: list of :class:`pyuniprot.manager.models.OrganismHost` objects """ query = "./organismHost/dbReference[@type='NCBI Taxonomy']" return [models.OrganismHost(taxid=x.get('id')) for x in entry.iterfind(query)]
python
{ "resource": "" }
q37990
DbManager.get_pmids
train
def get_pmids(self, entry): """ get `models.Pmid` objects from XML node entry :param entry: XML node entry :return: list of :class:`pyuniprot.manager.models.Pmid` objects """ pmids = [] for citation in entry.iterfind("./reference/citation"): for pubmed_ref in citation.iterfind('dbReference[@type="PubMed"]'): pmid_number = pubmed_ref.get('id') if pmid_number in self.pmids: pmid_sqlalchemy_obj = self.session.query(models.Pmid)\ .filter(models.Pmid.pmid == pmid_number).one() pmids.append(pmid_sqlalchemy_obj) else: pmid_dict = citation.attrib if not re.search('^\d+$', pmid_dict['volume']): pmid_dict['volume'] = -1 del pmid_dict['type'] # not needed because already filtered for PubMed pmid_dict.update(pmid=pmid_number) title_tag = citation.find('./title') if title_tag is not None: pmid_dict.update(title=title_tag.text) pmid_sqlalchemy_obj = models.Pmid(**pmid_dict) self.session.add(pmid_sqlalchemy_obj) self.session.flush() pmids.append(pmid_sqlalchemy_obj) self.pmids |= set([pmid_number, ]) # extend the cache of identifiers return pmids
python
{ "resource": "" }
q37991
DbManager.get_functions
train
def get_functions(cls, entry): """ get `models.Function` objects from XML node entry :param entry: XML node entry :return: list of :class:`pyuniprot.manager.models.Function` objects """ comments = [] query = "./comment[@type='function']" for comment in entry.iterfind(query): text = comment.find('./text').text comments.append(models.Function(text=text)) return comments
python
{ "resource": "" }
q37992
Graph.resolve_nodes
train
def resolve_nodes(self, nodes): """ Resolve a given set of nodes. Dependencies of the nodes, even if they are not in the given list will also be resolved! :param list nodes: List of nodes to be resolved :return: A list of resolved nodes """ if not nodes: return [] resolved = [] for node in nodes: if node in resolved: continue self.resolve_node(node, resolved) return resolved
python
{ "resource": "" }
q37993
Graph.resolve_node
train
def resolve_node(self, node=None, resolved=None, seen=None): """ Resolve a single node or all when node is omitted. """ if seen is None: seen = [] if resolved is None: resolved = [] if node is None: dependencies = sorted(self._nodes.keys()) else: dependencies = self._nodes[node] seen.append(node) for dependency in dependencies: if dependency in resolved: continue if dependency in seen: raise Exception('Circular dependency %s > %s', str(node), str(dependency)) self.resolve_node(dependency, resolved, seen) if node is not None: resolved.append(node) return resolved
python
{ "resource": "" }
q37994
findCodons
train
def findCodons(seq, codons): """ Find all instances of the codons in 'codons' in the given sequence. seq: A Bio.Seq.Seq instance. codons: A set of codon strings. Return: a generator yielding matching codon offsets. """ seqLen = len(seq) start = 0 while start < seqLen: triplet = str(seq[start:start + 3]) if triplet in codons: yield start start = start + 3
python
{ "resource": "" }
q37995
needle
train
def needle(reads): """ Run a Needleman-Wunsch alignment and return the two sequences. @param reads: An iterable of two reads. @return: A C{Reads} instance with the two aligned sequences. """ from tempfile import mkdtemp from shutil import rmtree dir = mkdtemp() file1 = join(dir, 'file1.fasta') with open(file1, 'w') as fp: print(reads[0].toString('fasta'), end='', file=fp) file2 = join(dir, 'file2.fasta') with open(file2, 'w') as fp: print(reads[1].toString('fasta'), end='', file=fp) out = join(dir, 'result.fasta') Executor().execute("needle -asequence '%s' -bsequence '%s' -auto " "-outfile '%s' -aformat fasta" % ( file1, file2, out)) # Use 'list' in the following to force reading the FASTA from disk. result = Reads(list(FastaReads(out))) rmtree(dir) return result
python
{ "resource": "" }
q37996
read_until
train
def read_until(stream, delimiter, max_bytes=16): """Read until we have found the given delimiter. :param file stream: readable file-like object. :param bytes delimiter: delimiter string. :param int max_bytes: maximum bytes to read. :rtype: bytes|None """ buf = bytearray() delim_len = len(delimiter) while len(buf) < max_bytes: c = stream.read(1) if not c: break buf += c if buf[-delim_len:] == delimiter: return bytes(buf[:-delim_len])
python
{ "resource": "" }
q37997
dechunk
train
def dechunk(stream): """De-chunk HTTP body stream. :param file stream: readable file-like object. :rtype: __generator[bytes] :raise: DechunkError """ # TODO(vovan): Add support for chunk extensions: # TODO(vovan): http://tools.ietf.org/html/rfc2616#section-3.6.1 while True: chunk_len = read_until(stream, b'\r\n') if chunk_len is None: raise DechunkError( 'Could not extract chunk size: unexpected end of data.') try: chunk_len = int(chunk_len.strip(), 16) except (ValueError, TypeError) as err: raise DechunkError('Could not parse chunk size: %s' % (err,)) if chunk_len == 0: break bytes_to_read = chunk_len while bytes_to_read: chunk = stream.read(bytes_to_read) bytes_to_read -= len(chunk) yield chunk # chunk ends with \r\n crlf = stream.read(2) if crlf != b'\r\n': raise DechunkError('No CR+LF at the end of chunk!')
python
{ "resource": "" }
q37998
to_chunks
train
def to_chunks(stream_or_generator): """This generator function receives file-like or generator as input and returns generator. :param file|__generator[bytes] stream_or_generator: readable stream or generator. :rtype: __generator[bytes] :raise: TypeError """ if isinstance(stream_or_generator, types.GeneratorType): yield from stream_or_generator elif hasattr(stream_or_generator, 'read'): while True: chunk = stream_or_generator.read(CHUNK_SIZE) if not chunk: break # no more data yield chunk else: raise TypeError('Input must be either readable or generator.')
python
{ "resource": "" }
q37999
read_body_stream
train
def read_body_stream(stream, chunked=False, compression=None): """Read HTTP body stream, yielding blocks of bytes. De-chunk and de-compress data if needed. :param file stream: readable stream. :param bool chunked: whether stream is chunked. :param str|None compression: compression type is stream is compressed, otherwise None. :rtype: __generator[bytes] :raise: TypeError, BodyStreamError """ if not (chunked or compression): return to_chunks(stream) generator = stream if chunked: generator = dechunk(generator) if compression: generator = decompress(to_chunks(generator), compression) return generator
python
{ "resource": "" }