_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q37700
matchToString
train
def matchToString(aaMatch, read1, read2, indent='', offsets=None): """ Format amino acid sequence match as a string. @param aaMatch: A C{dict} returned by C{compareAaReads}. @param read1: A C{Read} instance or an instance of one of its subclasses. @param read2: A C{Read} instance or an instance of one of its subclasses. @param indent: A C{str} to indent all returned lines with. @param offsets: If not C{None}, a C{set} of offsets of interest that were only considered when making C{match}. @return: A C{str} describing the match. """ match = aaMatch['match'] matchCount = match['matchCount'] gapMismatchCount = match['gapMismatchCount'] gapGapMismatchCount = match['gapGapMismatchCount'] nonGapMismatchCount = match['nonGapMismatchCount'] if offsets: len1 = len2 = len(offsets) else: len1, len2 = map(len, (read1, read2)) result = [] append = result.append append(countPrint('%sMatches' % indent, matchCount, len1, len2)) mismatchCount = (gapMismatchCount + gapGapMismatchCount + nonGapMismatchCount) append(countPrint('%sMismatches' % indent, mismatchCount, len1, len2)) append(countPrint('%s Not involving gaps (i.e., conflicts)' % (indent), nonGapMismatchCount, len1, len2)) append(countPrint('%s Involving a gap in one sequence' % indent, gapMismatchCount, len1, len2)) append(countPrint('%s Involving a gap in both sequences' % indent, gapGapMismatchCount, len1, len2)) for read, key in zip((read1, read2), ('read1', 'read2')): append('%s Id: %s' % (indent, read.id)) length = len(read) append('%s Length: %d' % (indent, length)) gapCount = len(aaMatch[key]['gapOffsets']) append(countPrint('%s Gaps' % indent, gapCount, length)) if gapCount: append( '%s Gap locations (1-based): %s' % (indent, ', '.join(map(lambda offset: str(offset + 1), sorted(aaMatch[key]['gapOffsets']))))) extraCount = aaMatch[key]['extraCount'] if extraCount: append(countPrint('%s Extra nucleotides at end' % indent, extraCount, length)) return '\n'.join(result)
python
{ "resource": "" }
q37701
compareAaReads
train
def compareAaReads(read1, read2, gapChars='-', offsets=None): """ Compare two amino acid sequences. @param read1: A C{Read} instance or an instance of one of its subclasses. @param read2: A C{Read} instance or an instance of one of its subclasses. @param gapChars: An object supporting __contains__ with characters that should be considered to be gaps. @param offsets: If not C{None}, a C{set} of offsets of interest. Offsets not in the set will not be considered. @return: A C{dict} with information about the match and the individual sequences (see below). """ matchCount = 0 gapMismatchCount = nonGapMismatchCount = gapGapMismatchCount = 0 read1ExtraCount = read2ExtraCount = 0 read1GapOffsets = [] read2GapOffsets = [] for offset, (a, b) in enumerate(zip_longest(read1.sequence.upper(), read2.sequence.upper())): # Use 'is not None' in the following to allow an empty offsets set # to be passed. if offsets is not None and offset not in offsets: continue if a is None: # b has an extra character at its end (it cannot be None). assert b is not None read2ExtraCount += 1 if b in gapChars: read2GapOffsets.append(offset) elif b is None: # a has an extra character at its end. read1ExtraCount += 1 if a in gapChars: read1GapOffsets.append(offset) else: # We have a character from both sequences (they could still be # gap characters). if a in gapChars: read1GapOffsets.append(offset) if b in gapChars: # Both are gaps. This can happen (though hopefully not # if the sequences were pairwise aligned). gapGapMismatchCount += 1 read2GapOffsets.append(offset) else: # a is a gap, b is not. gapMismatchCount += 1 else: if b in gapChars: # b is a gap, a is not. gapMismatchCount += 1 read2GapOffsets.append(offset) else: # Neither is a gap character. if a == b: matchCount += 1 else: nonGapMismatchCount += 1 return { 'match': { 'matchCount': matchCount, 'gapMismatchCount': gapMismatchCount, 'gapGapMismatchCount': gapGapMismatchCount, 'nonGapMismatchCount': nonGapMismatchCount, }, 'read1': { 'extraCount': read1ExtraCount, 'gapOffsets': read1GapOffsets, }, 'read2': { 'extraCount': read2ExtraCount, 'gapOffsets': read2GapOffsets, }, }
python
{ "resource": "" }
q37702
parseColors
train
def parseColors(colors, args): """ Parse read id color specification. @param colors: A C{list} of C{str}s. Each item is of the form, e.g., 'green X Y Z...', where each of X, Y, Z, ... etc. is either a read id or the name of a FASTA or FASTQ file containing reads whose ids should be displayed with the corresponding color. Note that if read ids contain spaces you will need to use the latter (i.e. FASTA/Q file name) approach because C{args.colors} is split on whitespace. @param args: The argparse C{Namespace} instance holding the other parsed command line arguments. @return: A C{dict} whose keys are colors and whose values are sets of read ids. """ result = defaultdict(set) for colorInfo in colors: readIds = colorInfo.split() color = readIds.pop(0) for readId in readIds: if os.path.isfile(readId): filename = readId if args.fasta: reads = FastaReads(filename) else: reads = FastqReads(filename) for read in reads: result[color].add(read.id) else: result[color].add(readId) return result
python
{ "resource": "" }
q37703
download_release
train
def download_release(download_file, release=None): """Downloads the "go-basic.obo" file for the specified release.""" if release is None: release = get_latest_release() url = 'http://viewvc.geneontology.org/viewvc/GO-SVN/ontology-releases/%s/go-basic.obo' % release #download_file = 'go-basic_%s.obo' % release misc.http_download(url, download_file)
python
{ "resource": "" }
q37704
get_current_ontology_date
train
def get_current_ontology_date(): """Get the release date of the current Gene Ontolgo release.""" with closing(requests.get( 'http://geneontology.org/ontology/go-basic.obo', stream=True)) as r: for i, l in enumerate(r.iter_lines(decode_unicode=True)): if i == 1: assert l.split(':')[0] == 'data-version' date = l.split('/')[-1] break return date
python
{ "resource": "" }
q37705
execute
train
def execute(function, name): """ Execute a task, returning a TaskResult """ try: return TaskResult(name, True, None, function()) except Exception as exc: return TaskResult(name, False, exc, None)
python
{ "resource": "" }
q37706
truncatechars
train
def truncatechars(value, arg): """ Truncates a string after a certain number of chars. Argument: Number of chars to truncate after. """ try: length = int(arg) except ValueError: # Invalid literal for int(). return value # Fail silently. if len(value) > length: return value[:length] + '...' return value
python
{ "resource": "" }
q37707
get_gtf_argument_parser
train
def get_gtf_argument_parser(desc, default_field_name='gene'): """Return an argument parser with basic options for reading GTF files. Parameters ---------- desc: str Description of the ArgumentParser default_field_name: str, optional Name of field in GTF file to look for. Returns ------- `argparse.ArgumentParser` object The argument parser. """ parser = cli.get_argument_parser(desc=desc) parser.add_argument( '-a', '--annotation-file', default='-', type=str, help=textwrap.dedent("""\ Path of Ensembl gene annotation file (in GTF format). The file may be gzip'ed. If set to ``-``, read from ``stdin``.""") ) parser.add_argument( '-o', '--output-file', required=True, type=str, help=textwrap.dedent("""\ Path of output file. If set to ``-``, print to ``stdout``, and redirect logging messages to ``stderr``.""") ) #parser.add_argument( # '-s', '--species', type=str, # choices=sorted(ensembl.SPECIES_CHROMPAT.keys()), default='human', # help=textwrap.dedent("""\ # Species for which to extract genes. (This parameter is ignored # if ``--chromosome-pattern`` is specified.)""") #) parser.add_argument( '-c', '--chromosome-pattern', type=str, required=False, default=None, help=textwrap.dedent("""\ Regular expression that chromosome names have to match. [None] """) ) #parser.add_argument( # '-f', '--field-name', type=str, default=default_field_name, # help=textwrap.dedent("""\ # Rows in the GTF file that do not contain this value # in the third column are ignored.""") #) cli.add_reporting_args(parser) return parser
python
{ "resource": "" }
q37708
jsonresolver_loader
train
def jsonresolver_loader(url_map): """JSON resolver plugin that loads the schema endpoint. Injected into Invenio-Records JSON resolver. """ from flask import current_app from . import current_jsonschemas url_map.add(Rule( "{0}/<path:path>".format(current_app.config['JSONSCHEMAS_ENDPOINT']), endpoint=current_jsonschemas.get_schema, host=current_app.config['JSONSCHEMAS_HOST']))
python
{ "resource": "" }
q37709
merge_layouts
train
def merge_layouts(layouts): ''' Utility function for merging multiple layouts. Args: layouts (list): A list of BIDSLayout instances to merge. Returns: A BIDSLayout containing merged files and entities. Notes: Layouts will be merged in the order of the elements in the list. I.e., the first Layout will be updated with all values in the 2nd Layout, then the result will be updated with values from the 3rd Layout, etc. This means that order matters: in the event of entity or filename conflicts, later layouts will take precedence. ''' layout = layouts[0].clone() for l in layouts[1:]: layout.files.update(l.files) layout.domains.update(l.domains) for k, v in l.entities.items(): if k not in layout.entities: layout.entities[k] = v else: layout.entities[k].files.update(v.files) return layout
python
{ "resource": "" }
q37710
File.copy
train
def copy(self, path_patterns, symbolic_link=False, root=None, conflicts='fail'): ''' Copy the contents of a file to a new location, with target filename defined by the current File's entities and the specified path_patterns. ''' new_filename = build_path(self.entities, path_patterns) if not new_filename: return None if new_filename[-1] == os.sep: new_filename += self.filename if isabs(self.path) or root is None: path = self.path else: path = join(root, self.path) if not exists(path): raise ValueError("Target filename to copy/symlink (%s) doesn't " "exist." % path) if symbolic_link: contents = None link_to = path else: with open(path, 'r') as f: contents = f.read() link_to = None write_contents_to_file(new_filename, contents=contents, link_to=link_to, content_mode='text', root=root, conflicts=conflicts)
python
{ "resource": "" }
q37711
Entity.match_file
train
def match_file(self, f, update_file=False): """ Determine whether the passed file matches the Entity. Args: f (File): The File instance to match against. Returns: the matched value if a match was found, otherwise None. """ if self.map_func is not None: val = self.map_func(f) else: m = self.regex.search(f.path) val = m.group(1) if m is not None else None return self._astype(val)
python
{ "resource": "" }
q37712
Layout._get_or_load_domain
train
def _get_or_load_domain(self, domain): ''' Return a domain if one already exists, or create a new one if not. Args: domain (str, dict): Can be one of: - The name of the Domain to return (fails if none exists) - A path to the Domain configuration file - A dictionary containing configuration information ''' if isinstance(domain, six.string_types): if domain in self.domains: return self.domains[domain] elif exists(domain): with open(domain, 'r') as fobj: domain = json.load(fobj) else: raise ValueError("No domain could be found/loaded from input " "'{}'; value must be either the name of an " "existing Domain, or a valid path to a " "configuration file.".format(domain)) # At this point, domain is a dict name = domain['name'] if name in self.domains: msg = ("Domain with name '{}' already exists; returning existing " "Domain configuration.".format(name)) warnings.warn(msg) return self.domains[name] entities = domain.get('entities', []) domain = Domain(domain) for e in entities: self.add_entity(domain=domain, **e) self.domains[name] = domain return self.domains[name]
python
{ "resource": "" }
q37713
Layout._check_inclusions
train
def _check_inclusions(self, f, domains=None): ''' Check file or directory against regexes in config to determine if it should be included in the index ''' filename = f if isinstance(f, six.string_types) else f.path if domains is None: domains = list(self.domains.values()) # Inject the Layout at the first position for global include/exclude domains = list(domains) domains.insert(0, self) for dom in domains: # If file matches any include regex, then True if dom.include: for regex in dom.include: if re.search(regex, filename): return True return False else: # If file matches any exclude regex, then False for regex in dom.exclude: if re.search(regex, filename, flags=re.UNICODE): return False return True
python
{ "resource": "" }
q37714
Layout._find_entity
train
def _find_entity(self, entity): ''' Find an Entity instance by name. Checks both name and id fields.''' if entity in self.entities: return self.entities[entity] _ent = [e for e in self.entities.values() if e.name == entity] if len(_ent) > 1: raise ValueError("Entity name '%s' matches %d entities. To " "avoid ambiguity, please prefix the entity " "name with its domain (e.g., 'bids.%s'." % (entity, len(_ent), entity)) if _ent: return _ent[0] raise ValueError("No entity '%s' found." % entity)
python
{ "resource": "" }
q37715
Layout.save_index
train
def save_index(self, filename): ''' Save the current Layout's index to a .json file. Args: filename (str): Filename to write to. Note: At the moment, this won't serialize directory-specific config files. This means reconstructed indexes will only work properly in cases where there aren't multiple layout specs within a project. ''' data = {} for f in self.files.values(): entities = {v.entity.id: v.value for k, v in f.tags.items()} data[f.path] = {'domains': f.domains, 'entities': entities} with open(filename, 'w') as outfile: json.dump(data, outfile)
python
{ "resource": "" }
q37716
Layout.load_index
train
def load_index(self, filename, reindex=False): ''' Load the Layout's index from a plaintext file. Args: filename (str): Path to the plaintext index file. reindex (bool): If True, discards entity values provided in the loaded index and instead re-indexes every file in the loaded index against the entities defined in the config. Default is False, in which case it is assumed that all entity definitions in the loaded index are correct and do not need any further validation. Note: At the moment, directory-specific config files aren't serialized. This means reconstructed indexes will only work properly in cases where there aren't multiple layout specs within a project. ''' self._reset_index() with open(filename, 'r') as fobj: data = json.load(fobj) for path, file in data.items(): ents, domains = file['entities'], file['domains'] root, f = dirname(path), basename(path) if reindex: self._index_file(root, f, domains) else: f = self._make_file_object(root, f) tags = {k: Tag(self.entities[k], v) for k, v in ents.items()} f.tags = tags self.files[f.path] = f for ent, val in f.entities.items(): self.entities[ent].add_file(f.path, val)
python
{ "resource": "" }
q37717
Layout.add_entity
train
def add_entity(self, domain, **kwargs): ''' Add a new Entity to tracking. ''' # Set the entity's mapping func if one was specified map_func = kwargs.get('map_func', None) if map_func is not None and not callable(kwargs['map_func']): if self.entity_mapper is None: raise ValueError("Mapping function '%s' specified for Entity " "'%s', but no entity mapper was passed when " "initializing the current Layout. Please make" " sure the 'entity_mapper' argument is set." % (map_func, kwargs['name'])) map_func = getattr(self.entity_mapper, kwargs['map_func']) kwargs['map_func'] = map_func ent = Entity(domain=domain, **kwargs) domain.add_entity(ent) if ent.mandatory: self.mandatory.add(ent.id) if ent.directory is not None: ent.directory = ent.directory.replace('{{root}}', self.root) self.entities[ent.id] = ent for alias in ent.aliases: self.entities[alias] = ent if self.dynamic_getters: func = partial(getattr(self, 'get'), target=ent.name, return_type='id') func_name = inflect.engine().plural(ent.name) setattr(self, 'get_%s' % func_name, func)
python
{ "resource": "" }
q37718
Layout.count
train
def count(self, entity, files=False): """ Return the count of unique values or files for the named entity. Args: entity (str): The name of the entity. files (bool): If True, counts the number of filenames that contain at least one value of the entity, rather than the number of unique values of the entity. """ return self._find_entity(entity).count(files)
python
{ "resource": "" }
q37719
Layout.as_data_frame
train
def as_data_frame(self, **kwargs): """ Return information for all Files tracked in the Layout as a pandas DataFrame. Args: kwargs: Optional keyword arguments passed on to get(). This allows one to easily select only a subset of files for export. Returns: A pandas DataFrame, where each row is a file, and each column is a tracked entity. NaNs are injected whenever a file has no value for a given attribute. """ try: import pandas as pd except ImportError: raise ImportError("What are you doing trying to export a Layout " "as a pandas DataFrame when you don't have " "pandas installed? Eh? Eh?") if kwargs: files = self.get(return_type='obj', **kwargs) else: files = self.files.values() data = pd.DataFrame.from_records([f.entities for f in files]) data.insert(0, 'path', [f.path for f in files]) return data
python
{ "resource": "" }
q37720
configure_logger
train
def configure_logger(name, log_stream=sys.stdout, log_file=None, log_level=logging.INFO, keep_old_handlers=False, propagate=False): """Configures and returns a logger. This function serves to simplify the configuration of a logger that writes to a file and/or to a stream (e.g., stdout). Parameters ---------- name: str The name of the logger. Typically set to ``__name__``. log_stream: a stream object, optional The stream to write log messages to. If ``None``, do not write to any stream. The default value is `sys.stdout`. log_file: str, optional The path of a file to write log messages to. If None, do not write to any file. The default value is ``None``. log_level: int, optional A logging level as `defined`__ in Python's logging module. The default value is `logging.INFO`. keep_old_handlers: bool, optional If set to ``True``, keep any pre-existing handlers that are attached to the logger. The default value is ``False``. propagate: bool, optional If set to ``True``, propagate the loggers messages to the parent logger. The default value is ``False``. Returns ------- `logging.Logger` The logger. Notes ----- Note that if ``log_stream`` and ``log_file`` are both ``None``, no handlers will be created. __ loglvl_ .. _loglvl: https://docs.python.org/2/library/logging.html#logging-levels """ # create a child logger logger = logging.getLogger(name) # set the logger's level logger.setLevel(log_level) # set the logger's propagation attribute logger.propagate = propagate if not keep_old_handlers: # remove previously attached handlers logger.handlers = [] # create the formatter log_fmt = '[%(asctime)s] %(levelname)s: %(message)s' log_datefmt = '%Y-%m-%d %H:%M:%S' formatter = logging.Formatter(log_fmt, log_datefmt) # create and attach the handlers if log_stream is not None: # create a StreamHandler stream_handler = logging.StreamHandler(log_stream) stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) if log_file is not None: # create a FileHandler file_handler = logging.FileHandler(log_file) file_handler.setFormatter(formatter) logger.addHandler(file_handler) if log_stream is None and log_file is None: # "no handler" => use NullHandler logger.addHandler(logging.NullHandler()) return logger
python
{ "resource": "" }
q37721
get_logger
train
def get_logger(name='', log_stream=None, log_file=None, quiet=False, verbose=False): """Convenience function for getting a logger.""" # configure root logger log_level = logging.INFO if quiet: log_level = logging.WARNING elif verbose: log_level = logging.DEBUG if log_stream is None: log_stream = sys.stdout new_logger = configure_logger(name, log_stream=log_stream, log_file=log_file, log_level=log_level) return new_logger
python
{ "resource": "" }
q37722
start
train
def start(milliseconds, func, *args, **kwargs): """ Call function every interval. Starts the timer at call time. Although this could also be a decorator, that would not initiate the time at the same time, so would require additional work. Arguments following function will be sent to function. Note that these args are part of the defining state, and unless it is an object will reset each interval. The inine test will print "TickTock x.." every second, where x increments. >>> import time >>> class Tock(object): ... count = 0 ... stop = None >>> def tick(obj): ... obj.count += 1 ... if obj.stop and obj.count == 4: ... obj.stop.set() # shut itself off ... return ... print("TickTock {}..".format(obj.count)) >>> tock = Tock() >>> tock.stop = start(1000, tick, tock) >>> time.sleep(6) TickTock 1.. TickTock 2.. TickTock 3.. """ stopper = threading.Event() def interval(seconds, func, *args, **kwargs): """outer wrapper""" def wrapper(): """inner wrapper""" if stopper.isSet(): return interval(seconds, func, *args, **kwargs) try: func(*args, **kwargs) except: # pylint: disable=bare-except logging.error("Error during interval") logging.error(traceback.format_exc()) thread = threading.Timer(seconds, wrapper) thread.daemon = True thread.start() interval(milliseconds/1000, func, *args, **kwargs) return stopper
python
{ "resource": "" }
q37723
example_async_client
train
def example_async_client(api_client): """Example async client. """ try: pprint((yield from api_client.echo())) except errors.RequestError as exc: log.exception('Exception occurred: %s', exc) yield gen.Task(lambda *args, **kwargs: ioloop.IOLoop.current().stop())
python
{ "resource": "" }
q37724
example_sync_client
train
def example_sync_client(api_client): """Example sync client use with. """ try: pprint(api_client.echo()) except errors.RequestError as exc: log.exception('Exception occurred: %s', exc)
python
{ "resource": "" }
q37725
main
train
def main(): """Run the examples. """ logging.basicConfig(level=logging.INFO) example_sync_client(SyncAPIClient()) example_async_client(AsyncAPIClient()) io_loop = ioloop.IOLoop.current() io_loop.start()
python
{ "resource": "" }
q37726
main
train
def main(args=None): """Extract protein-coding genes and store in tab-delimited text file. Parameters ---------- args: argparse.Namespace object, optional The argument values. If not specified, the values will be obtained by parsing the command line arguments using the `argparse` module. Returns ------- int Exit code (0 if no error occurred). Raises ------ SystemError If the version of the Python interpreter is not >= 2.7. """ vinfo = sys.version_info if not vinfo >= (2, 7): raise SystemError('Python interpreter version >= 2.7 required, ' 'found %d.%d instead.' %(vinfo.major, vinfo.minor)) if args is None: # parse command-line arguments parser = get_argument_parser() args = parser.parse_args() input_file = args.annotation_file output_file = args.output_file # species = args.species chrom_pat = args.chromosome_pattern log_file = args.log_file quiet = args.quiet verbose = args.verbose # configure root logger log_stream = sys.stdout if output_file == '-': # if we print output to stdout, redirect log messages to stderr log_stream = sys.stderr logger = misc.get_logger(log_stream=log_stream, log_file=log_file, quiet=quiet, verbose=verbose) #if chrom_pat is None: # chrom_pat = ensembl.SPECIES_CHROMPAT[species] if chrom_pat is not None: logger.info('Regular expression used for filtering chromosome names: ' '"%s"', chrom_pat) if input_file == '-': input_file = sys.stdin if output_file == '-': output_file = sys.stdout genes = ensembl.get_protein_coding_genes( input_file, chromosome_pattern=chrom_pat) genes.to_csv(output_file, sep='\t', index=False) return 0
python
{ "resource": "" }
q37727
IxePortsStats.read_stats
train
def read_stats(self, *stats): """ Read port statistics from chassis. :param stats: list of requested statistics to read, if empty - read all statistics. """ self.statistics = OrderedDict() for port in self.ports: port_stats = IxeStatTotal(port).get_attributes(FLAG_RDONLY, *stats) port_stats.update({c + '_rate': v for c, v in IxeStatRate(port).get_attributes(FLAG_RDONLY, *stats).items()}) self.statistics[str(port)] = port_stats return self.statistics
python
{ "resource": "" }
q37728
IxeStreamsStats.read_stats
train
def read_stats(self, *stats): """ Read stream statistics from chassis. :param stats: list of requested statistics to read, if empty - read all statistics. """ from ixexplorer.ixe_stream import IxePacketGroupStream sleep_time = 0.1 # in cases we only want few counters but very fast we need a smaller sleep time if not stats: stats = [m.attrname for m in IxePgStats.__tcl_members__ if m.flags & FLAG_RDONLY] sleep_time = 1 # Read twice to refresh rate statistics. for port in self.tx_ports_streams: port.api.call_rc('streamTransmitStats get {} 1 4096'.format(port.uri)) for rx_port in self.rx_ports: rx_port.api.call_rc('packetGroupStats get {} 0 65536'.format(rx_port.uri)) time.sleep(sleep_time) self.statistics = OrderedDict() for tx_port, streams in self.tx_ports_streams.items(): for stream in streams: stream_stats = OrderedDict() tx_port.api.call_rc('streamTransmitStats get {} 1 4096'.format(tx_port.uri)) stream_tx_stats = IxeStreamTxStats(tx_port, stream.index) stream_stats_tx = {c: v for c, v in stream_tx_stats.get_attributes(FLAG_RDONLY).items()} stream_stats['tx'] = stream_stats_tx stream_stat_pgid = IxePacketGroupStream(stream).groupId stream_stats_pg = pg_stats_dict() for port in self.session.ports.values(): stream_stats_pg[str(port)] = OrderedDict(zip(stats, [-1] * len(stats))) for rx_port in self.rx_ports: if not stream.rx_ports or rx_port in stream.rx_ports: rx_port.api.call_rc('packetGroupStats get {} 0 65536'.format(rx_port.uri)) pg_stats = IxePgStats(rx_port, stream_stat_pgid) stream_stats_pg[str(rx_port)] = pg_stats.read_stats(*stats) stream_stats['rx'] = stream_stats_pg self.statistics[str(stream)] = stream_stats return self.statistics
python
{ "resource": "" }
q37729
arbiter
train
def arbiter(rst, clk, req_vec, gnt_vec=None, gnt_idx=None, gnt_vld=None, gnt_rdy=None, ARBITER_TYPE="priority"): ''' Wrapper that provides common interface to all arbiters ''' if ARBITER_TYPE == "priority": _arb = arbiter_priority(req_vec, gnt_vec, gnt_idx, gnt_vld) elif (ARBITER_TYPE == "roundrobin"): _arb = arbiter_roundrobin(rst, clk, req_vec, gnt_vec, gnt_idx, gnt_vld, gnt_rdy) else: assert "Arbiter: Unknown arbiter type: {}".format(ARBITER_TYPE) return _arb
python
{ "resource": "" }
q37730
seq_seqhash
train
def seq_seqhash(seq, normalize=True): """returns 24-byte Truncated Digest sequence `seq` >>> seq_seqhash("") 'z4PhNX7vuL3xVChQ1m2AB9Yg5AULVxXc' >>> seq_seqhash("ACGT") 'aKF498dAxcJAqme6QYQ7EZ07-fiw8Kw2' >>> seq_seqhash("acgt") 'aKF498dAxcJAqme6QYQ7EZ07-fiw8Kw2' >>> seq_seqhash("acgt", normalize=False) 'eFwawHHdibaZBDcs9kW3gm31h1NNJcQe' """ seq = normalize_sequence(seq) if normalize else seq return str(vmc_digest(seq, digest_size=24))
python
{ "resource": "" }
q37731
seq_seguid
train
def seq_seguid(seq, normalize=True): """returns seguid for sequence `seq` This seguid is compatible with BioPython's seguid. >>> seq_seguid('') '2jmj7l5rSw0yVb/vlWAYkK/YBwk' >>> seq_seguid('ACGT') 'IQiZThf2zKn/I1KtqStlEdsHYDQ' >>> seq_seguid('acgt') 'IQiZThf2zKn/I1KtqStlEdsHYDQ' >>> seq_seguid('acgt', normalize=False) 'lII0AoG1/I8qKY271rgv5CFZtsU' """ seq = normalize_sequence(seq) if normalize else seq bseq = seq.encode("ascii") return base64.b64encode(hashlib.sha1(bseq).digest()).decode("ascii").rstrip( '=')
python
{ "resource": "" }
q37732
seq_md5
train
def seq_md5(seq, normalize=True): """returns unicode md5 as hex digest for sequence `seq`. >>> seq_md5('') 'd41d8cd98f00b204e9800998ecf8427e' >>> seq_md5('ACGT') 'f1f8f4bf413b16ad135722aa4591043e' >>> seq_md5('ACGT*') 'f1f8f4bf413b16ad135722aa4591043e' >>> seq_md5(' A C G T ') 'f1f8f4bf413b16ad135722aa4591043e' >>> seq_md5('acgt') 'f1f8f4bf413b16ad135722aa4591043e' >>> seq_md5('acgt', normalize=False) 'db516c3913e179338b162b2476d1c23f' """ seq = normalize_sequence(seq) if normalize else seq bseq = seq.encode("ascii") return hashlib.md5(bseq).hexdigest()
python
{ "resource": "" }
q37733
seq_sha1
train
def seq_sha1(seq, normalize=True): """returns unicode sha1 hexdigest for sequence `seq`. >>> seq_sha1('') 'da39a3ee5e6b4b0d3255bfef95601890afd80709' >>> seq_sha1('ACGT') '2108994e17f6cca9ff2352ada92b6511db076034' >>> seq_sha1('acgt') '2108994e17f6cca9ff2352ada92b6511db076034' >>> seq_sha1('acgt', normalize=False) '9482340281b5fc8f2a298dbbd6b82fe42159b6c5' """ seq = normalize_sequence(seq) if normalize else seq bseq = seq.encode("ascii") return hashlib.sha1(bseq).hexdigest()
python
{ "resource": "" }
q37734
seq_sha512
train
def seq_sha512(seq, normalize=True): """returns unicode sequence sha512 hexdigest for sequence `seq`. >>> seq_sha512('') 'cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e' >>> seq_sha512('ACGT') '68a178f7c740c5c240aa67ba41843b119d3bf9f8b0f0ac36cf701d26672964efbd536d197f51ce634fc70634d1eefe575bec34c83247abc52010f6e2bbdb8253' >>> seq_sha512('acgt') '68a178f7c740c5c240aa67ba41843b119d3bf9f8b0f0ac36cf701d26672964efbd536d197f51ce634fc70634d1eefe575bec34c83247abc52010f6e2bbdb8253' >>> seq_sha512('acgt', normalize=False) '785c1ac071dd89b69904372cf645b7826df587534d25c41edb2862e54fb2940d697218f2883d2bf1a11cdaee658c7f7ab945a1cfd08eb26cbce57ee88790250a' """ seq = normalize_sequence(seq) if normalize else seq bseq = seq.encode("ascii") return hashlib.sha512(bseq).hexdigest()
python
{ "resource": "" }
q37735
map_single_end
train
def map_single_end(credentials, instance_config, instance_name, script_dir, index_dir, fastq_file, output_dir, num_threads=None, seed_start_lmax=None, mismatch_nmax=None, multimap_nmax=None, splice_min_overhang=None, out_mult_nmax=None, sort_bam=True, keep_unmapped=False, self_destruct=True, compressed=True, **kwargs): """Maps single-end reads using STAR. Reads are expected in FASTQ format. By default, they are also expected to be compressed with gzip. - recommended machine type: "n1-standard-16" (60 GB of RAM, 16 vCPUs). - recommended disk size: depends on size of FASTQ files, at least 128 GB. TODO: docstring""" if sort_bam: out_sam_type = 'BAM SortedByCoordinate' else: out_sam_type = 'BAM Unsorted' # template expects a list of FASTQ files fastq_files = fastq_file if isinstance(fastq_files, (str, _oldstr)): fastq_files = [fastq_file] template = _TEMPLATE_ENV.get_template( os.path.join('map_single-end.sh')) startup_script = template.render( script_dir=script_dir, index_dir=index_dir, fastq_files=fastq_files, output_dir=output_dir, num_threads=num_threads, seed_start_lmax=seed_start_lmax, self_destruct=self_destruct, mismatch_nmax=mismatch_nmax, multimap_nmax=multimap_nmax, splice_min_overhang=splice_min_overhang, out_mult_nmax=out_mult_nmax, keep_unmapped=keep_unmapped, compressed=compressed, out_sam_type=out_sam_type) if len(startup_script) > 32768: raise ValueError('Startup script larger than 32,768 bytes!') #print(startup_script) op_name = instance_config.create_instance( credentials, instance_name, startup_script=startup_script, **kwargs) return op_name
python
{ "resource": "" }
q37736
generate_index
train
def generate_index(credentials, instance_config, instance_name, script_dir, genome_file, output_dir, annotation_file=None, splice_overhang=100, num_threads=8, chromosome_bin_bits=18, genome_memory_limit=31000000000, self_destruct=True, **kwargs): """Generates a STAR index. Recommended machine type: "n1-highmem-8" (52 GB of RAM, 8 vCPUs) TODO: docstring""" template = _TEMPLATE_ENV.get_template( os.path.join('generate_index.sh')) startup_script = template.render( script_dir=script_dir, genome_file=genome_file, annotation_file=annotation_file, splice_overhang=splice_overhang, output_dir=output_dir, num_threads=num_threads, chromosome_bin_bits=chromosome_bin_bits, genome_memory_limit=genome_memory_limit, self_destruct=self_destruct) op_name = instance_config.create_instance( credentials, instance_name, startup_script=startup_script, **kwargs) return op_name
python
{ "resource": "" }
q37737
get_file_checksums
train
def get_file_checksums(url, ftp=None): """Download and parse an Ensembl CHECKSUMS file and obtain checksums. Parameters ---------- url : str The URL of the CHECKSUM file. ftp : `ftplib.FTP` or `None`, optional An FTP connection. Returns ------- `collections.OrderedDict` An ordered dictionary containing file names as keys and checksums as values. Notes ----- The checksums contains in Ensembl CHECKSUM files are obtained with the UNIX `sum` command. """ assert isinstance(url, (str, _oldstr)) if ftp is not None: assert isinstance(ftp, ftplib.FTP) # open FTP connection if necessary close_connection = False ftp_server = 'ftp.ensembl.org' ftp_user = 'anonymous' if ftp is None: ftp = ftplib.FTP(ftp_server) ftp.login(ftp_user) close_connection = True # download and parse CHECKSUM file data = [] ftp.retrbinary('RETR %s' % url, data.append) data = ''.join(d.decode('utf-8') for d in data).split('\n')[:-1] file_checksums = OrderedDict() for d in data: file_name = d[(d.rindex(' ') + 1):] sum_ = int(d[:d.index(' ')]) file_checksums[file_name] = sum_ logger.debug('Obtained checksums for %d files', len(file_checksums)) # close FTP connection if we opened it if close_connection: ftp.close() return file_checksums
python
{ "resource": "" }
q37738
listify
train
def listify(obj, ignore=(list, tuple, type(None))): ''' Wraps all non-list or tuple objects in a list; provides a simple way to accept flexible arguments. ''' return obj if isinstance(obj, ignore) else [obj]
python
{ "resource": "" }
q37739
_get_divisions
train
def _get_divisions(taxdump_file): """Returns a dictionary mapping division names to division IDs.""" with tarfile.open(taxdump_file) as tf: with tf.extractfile('division.dmp') as fh: df = pd.read_csv(fh, header=None, sep='|', encoding='ascii') # only keep division ids and names df = df.iloc[:, [0, 2]] # remove tab characters flanking each division name df.iloc[:, 1] = df.iloc[:, 1].str.strip('\t') # generate dictionary divisions = {} for _, row in df.iterrows(): divisions[row.iloc[1]] = row.iloc[0] return divisions
python
{ "resource": "" }
q37740
get_species
train
def get_species(taxdump_file, select_divisions=None, exclude_divisions=None, nrows=None): """Get a dataframe with species information.""" if select_divisions and exclude_divisions: raise ValueError('Cannot specify "select_divisions" and ' '"exclude_divisions" at the same time.') select_taxon_ids = _get_species_taxon_ids( taxdump_file, select_divisions=select_divisions, exclude_divisions=exclude_divisions) select_taxon_ids = set(select_taxon_ids) with tarfile.open(taxdump_file) as tf: with tf.extractfile('names.dmp') as fh: df = pd.read_csv(fh, header=None, sep='|', encoding='ascii', nrows=nrows) # only keep information we need df = df.iloc[:, [0, 1, 3]] # only select selected species df = df.loc[df.iloc[:, 0].isin(select_taxon_ids)] # remove tab characters flanking each "name class" entry df.iloc[:, 2] = df.iloc[:, 2].str.strip('\t') # select only "scientific name" and "common name" rows df = df.loc[df.iloc[:, 2].isin(['scientific name', 'common name'])] # remove tab characters flanking each "name" entry df.iloc[:, 1] = df.iloc[:, 1].str.strip('\t') # collapse common names for each scientific name common_names = defaultdict(list) cn = df.loc[df.iloc[:, 2] == 'common name'] for _, row in cn.iterrows(): common_names[row.iloc[0]].append(row.iloc[1]) # build final dataframe (this is very slow) sn = df.loc[df.iloc[:, 2] == 'scientific name'] species = [] for i, row in sn.iterrows(): species.append([row.iloc[0], row.iloc[1], '|'.join(common_names[row.iloc[0]])]) species_df = pd.DataFrame(species).set_index(0) species_df.columns = ['scientific_name', 'common_names'] species_df.index.name = 'taxon_id' return species_df
python
{ "resource": "" }
q37741
IxeObject.set_attributes
train
def set_attributes(self, **attributes): """ Set group of attributes without calling set between attributes regardless of global auto_set. Set will be called only after all attributes are set based on global auto_set. :param attributes: dictionary of <attribute, value> to set. """ auto_set = IxeObject.get_auto_set() IxeObject.set_auto_set(False) for name, value in attributes.items(): setattr(self, name, value) if auto_set: self.ix_set() IxeObject.set_auto_set(auto_set)
python
{ "resource": "" }
q37742
Correios.consulta_faixa
train
def consulta_faixa(self, localidade, uf): """Consulta site e retorna faixa para localidade""" url = 'consultaFaixaCepAction.do' data = { 'UF': uf, 'Localidade': localidade.encode('cp1252'), 'cfm': '1', 'Metodo': 'listaFaixaCEP', 'TipoConsulta': 'faixaCep', 'StartRow': '1', 'EndRow': '10', } html = self._url_open(url, data).read() return self._parse_faixa(html)
python
{ "resource": "" }
q37743
Correios.consulta
train
def consulta(self, endereco, primeiro=False, uf=None, localidade=None, tipo=None, numero=None): """Consulta site e retorna lista de resultados""" if uf is None: url = 'consultaEnderecoAction.do' data = { 'relaxation': endereco.encode('ISO-8859-1'), 'TipoCep': 'ALL', 'semelhante': 'N', 'cfm': 1, 'Metodo': 'listaLogradouro', 'TipoConsulta': 'relaxation', 'StartRow': '1', 'EndRow': '10' } else: url = 'consultaLogradouroAction.do' data = { 'Logradouro': endereco.encode('ISO-8859-1'), 'UF': uf, 'TIPO': tipo, 'Localidade': localidade.encode('ISO-8859-1'), 'Numero': numero, 'cfm': 1, 'Metodo': 'listaLogradouro', 'TipoConsulta': 'logradouro', 'StartRow': '1', 'EndRow': '10' } h = self._url_open(url, data) html = h.read() if primeiro: return self.detalhe() else: return self._parse_tabela(html)
python
{ "resource": "" }
q37744
SyncRequestEngine._request
train
def _request(self, url, *, method='GET', headers=None, data=None, result_callback=None): """Perform synchronous request. :param str url: request URL. :param str method: request method. :param object data: JSON-encodable object. :param object -> object result_callback: result callback. :rtype: dict :raise: APIError """ retries_left = self._conn_retries while True: s = self._make_session() try: cert = None if self._client_cert and self._client_key: cert = (self._client_cert, self._client_key) elif self._client_cert: cert = self._client_cert if self._verify_cert: verify = True if self._ca_certs: verify = self._ca_certs else: verify = False auth = None if self._username and self._password: auth = (self._username, self._password) response = s.request(method, url, data=data, timeout=self._connect_timeout, cert=cert, headers=headers, verify=verify, auth=auth) """:type: requests.models.Response """ if 400 <= response.status_code < 500: raise ClientError( response.status_code, response.content) elif response.status_code >= 500: raise ServerError( response.status_code, response.content) try: if result_callback: return result_callback(response.content) except (ValueError, TypeError) as err: raise MalformedResponse(err) from None return response.content except (requests.exceptions.RequestException, requests.exceptions.BaseHTTPError) as exc: if self._conn_retries is None or retries_left <= 0: raise CommunicationError(exc) from None else: retries_left -= 1 retry_in = (self._conn_retries - retries_left) * 2 self._log.warning('Server communication error: %s. ' 'Retrying in %s seconds.', exc, retry_in) time.sleep(retry_in) continue finally: s.close()
python
{ "resource": "" }
q37745
SyncRequestEngine._make_session
train
def _make_session(): """Create session object. :rtype: requests.Session """ sess = requests.Session() sess.mount('http://', requests.adapters.HTTPAdapter(max_retries=False)) sess.mount('https://', requests.adapters.HTTPAdapter(max_retries=False)) return sess
python
{ "resource": "" }
q37746
fastaSubtract
train
def fastaSubtract(fastaFiles): """ Given a list of open file descriptors, each with FASTA content, remove the reads found in the 2nd, 3rd, etc files from the first file in the list. @param fastaFiles: a C{list} of FASTA filenames. @raises IndexError: if passed an empty list. @return: An iterator producing C{Bio.SeqRecord} instances suitable for writing to a file using C{Bio.SeqIO.write}. """ reads = {} firstFile = fastaFiles.pop(0) for seq in SeqIO.parse(firstFile, 'fasta'): reads[seq.id] = seq for fastaFile in fastaFiles: for seq in SeqIO.parse(fastaFile, 'fasta'): # Make sure that reads with the same id have the same sequence. if seq.id in reads: assert str(seq.seq) == str(reads[seq.id].seq) reads.pop(seq.id, None) return iter(reads.values())
python
{ "resource": "" }
q37747
SqliteIndex._addFilename
train
def _addFilename(self, filename): """ Add a new file name. @param filename: A C{str} file name. @raise ValueError: If a file with this name has already been added. @return: The C{int} id of the newly added file. """ cur = self._connection.cursor() try: cur.execute('INSERT INTO files(name) VALUES (?)', (filename,)) except sqlite3.IntegrityError as e: if str(e).find('UNIQUE constraint failed') > -1: raise ValueError('Duplicate file name: %r' % filename) else: raise else: fileNumber = cur.lastrowid self._connection.commit() return fileNumber
python
{ "resource": "" }
q37748
SqliteIndex.addFile
train
def addFile(self, filename): """ Add a new FASTA file of sequences. @param filename: A C{str} file name, with the file in FASTA format. This file must (obviously) exist at indexing time. When __getitem__ is used to access sequences, it is possible to provide a C{fastaDirectory} argument to our C{__init__} to indicate the directory containing the original FASTA files, in which case the basename of the file here provided in C{filename} is used to find the file in the given directory. This allows the construction of a sqlite database from the shell in one directory and its use programmatically from another directory. @raise ValueError: If a file with this name has already been added or if the file contains a sequence whose id has already been seen. @return: The C{int} number of sequences added from the file. """ endswith = filename.lower().endswith if endswith('.bgz') or endswith('.gz'): useBgzf = True elif endswith('.bz2'): raise ValueError( 'Compressed FASTA is only supported in BGZF format. Use ' 'bgzip to compresss your FASTA.') else: useBgzf = False fileNumber = self._addFilename(filename) connection = self._connection count = 0 try: with connection: if useBgzf: try: fp = bgzf.open(filename, 'rb') except ValueError as e: if str(e).find('BGZF') > -1: raise ValueError( 'Compressed FASTA is only supported in BGZF ' 'format. Use the samtools bgzip utility ' '(instead of gzip) to compresss your FASTA.') else: raise else: try: for line in fp: if line[0] == '>': count += 1 id_ = line[1:].rstrip(' \t\n\r') connection.execute( 'INSERT INTO sequences(id, ' 'fileNumber, offset) VALUES (?, ?, ?)', (id_, fileNumber, fp.tell())) finally: fp.close() else: with open(filename) as fp: offset = 0 for line in fp: offset += len(line) if line[0] == '>': count += 1 id_ = line[1:].rstrip(' \t\n\r') connection.execute( 'INSERT INTO sequences(id, fileNumber, ' 'offset) VALUES (?, ?, ?)', (id_, fileNumber, offset)) except sqlite3.IntegrityError as e: if str(e).find('UNIQUE constraint failed') > -1: original = self._find(id_) if original is None: # The id must have appeared twice in the current file, # because we could not look it up in the database # (i.e., it was INSERTed but not committed). raise ValueError( "FASTA sequence id '%s' found twice in file '%s'." % (id_, filename)) else: origFilename, _ = original raise ValueError( "FASTA sequence id '%s', found in file '%s', was " "previously added from file '%s'." % (id_, filename, origFilename)) else: raise else: return count
python
{ "resource": "" }
q37749
SqliteIndex._find
train
def _find(self, id_): """ Find the filename and offset of a sequence, given its id. @param id_: A C{str} sequence id. @return: A 2-tuple, containing the C{str} file name and C{int} offset within that file of the sequence. """ cur = self._connection.cursor() cur.execute( 'SELECT fileNumber, offset FROM sequences WHERE id = ?', (id_,)) row = cur.fetchone() if row is None: return None else: return self._getFilename(row[0]), row[1]
python
{ "resource": "" }
q37750
PathogenSampleFiles.writeSampleIndex
train
def writeSampleIndex(self, fp): """ Write a file of sample indices and names, sorted by index. @param fp: A file-like object, opened for writing. """ print('\n'.join( '%d %s' % (index, name) for (index, name) in sorted((index, name) for (name, index) in self._samples.items()) ), file=fp)
python
{ "resource": "" }
q37751
PathogenSampleFiles.writePathogenIndex
train
def writePathogenIndex(self, fp): """ Write a file of pathogen indices and names, sorted by index. @param fp: A file-like object, opened for writing. """ print('\n'.join( '%d %s' % (index, name) for (index, name) in sorted((index, name) for (name, index) in self._pathogens.items()) ), file=fp)
python
{ "resource": "" }
q37752
ProteinGrouper._title
train
def _title(self): """ Create a title summarizing the pathogens and samples. @return: A C{str} title. """ return ( 'Overall, proteins from %d pathogen%s were found in %d sample%s.' % (len(self.pathogenNames), '' if len(self.pathogenNames) == 1 else 's', len(self.sampleNames), '' if len(self.sampleNames) == 1 else 's'))
python
{ "resource": "" }
q37753
ProteinGrouper.addFile
train
def addFile(self, filename, fp): """ Read and record protein information for a sample. @param filename: A C{str} file name. @param fp: An open file pointer to read the file's data from. @raise ValueError: If information for a pathogen/protein/sample combination is given more than once. """ if self._sampleName: sampleName = self._sampleName elif self._sampleNameRegex: match = self._sampleNameRegex.search(filename) if match: sampleName = match.group(1) else: sampleName = filename else: sampleName = filename outDir = join(dirname(filename), self._assetDir) self.sampleNames[sampleName] = join(outDir, 'index.html') for index, proteinLine in enumerate(fp): proteinLine = proteinLine[:-1] (coverage, medianScore, bestScore, readCount, hspCount, proteinLength, names) = proteinLine.split(None, 6) proteinName, pathogenName = splitNames(names) if pathogenName not in self.pathogenNames: self.pathogenNames[pathogenName] = {} if sampleName not in self.pathogenNames[pathogenName]: self.pathogenNames[pathogenName][sampleName] = { 'proteins': {}, 'uniqueReadCount': None, } proteins = self.pathogenNames[pathogenName][sampleName]['proteins'] # We should only receive one line of information for a given # pathogen/sample/protein combination. if proteinName in proteins: raise ValueError( 'Protein %r already seen for pathogen %r sample %r.' % (proteinName, pathogenName, sampleName)) readsFilename = join(outDir, '%d.%s' % (index, self._format)) proteins[proteinName] = { 'bestScore': float(bestScore), 'bluePlotFilename': join(outDir, '%d.png' % index), 'coverage': float(coverage), 'readsFilename': readsFilename, 'hspCount': int(hspCount), 'index': index, 'medianScore': float(medianScore), 'outDir': outDir, 'proteinLength': int(proteinLength), 'proteinName': proteinName, 'proteinURL': NCBISequenceLinkURL(proteinName), 'readCount': int(readCount), } if self._saveReadLengths: readsClass = (FastaReads if self._format == 'fasta' else FastqReads) proteins[proteinName]['readLengths'] = tuple( len(read) for read in readsClass(readsFilename))
python
{ "resource": "" }
q37754
ProteinGrouper.toStr
train
def toStr(self): """ Produce a string representation of the pathogen summary. @return: A C{str} suitable for printing. """ # Note that the string representation contains much less # information than the HTML summary. E.g., it does not contain the # unique (de-duplicated, by id) read count, since that is only computed # when we are making combined FASTA files of reads matching a # pathogen. readCountGetter = itemgetter('readCount') result = [] append = result.append append(self._title()) append('') for pathogenName in sorted(self.pathogenNames): samples = self.pathogenNames[pathogenName] sampleCount = len(samples) append('%s (in %d sample%s)' % (pathogenName, sampleCount, '' if sampleCount == 1 else 's')) for sampleName in sorted(samples): proteins = samples[sampleName]['proteins'] proteinCount = len(proteins) totalReads = sum(readCountGetter(p) for p in proteins.values()) append(' %s (%d protein%s, %d read%s)' % (sampleName, proteinCount, '' if proteinCount == 1 else 's', totalReads, '' if totalReads == 1 else 's')) for proteinName in sorted(proteins): append( ' %(coverage).2f\t%(medianScore).2f\t' '%(bestScore).2f\t%(readCount)4d\t%(hspCount)4d\t' '%(index)3d\t%(proteinName)s' % proteins[proteinName]) append('') return '\n'.join(result)
python
{ "resource": "" }
q37755
SSFastaReads.iter
train
def iter(self): """ Iterate over the sequences in self.file_, yielding each as an instance of the desired read class. @raise ValueError: If the input file has an odd number of records or if any sequence has a different length than its predicted secondary structure. """ upperCase = self._upperCase for _file in self._files: with asHandle(_file) as fp: records = SeqIO.parse(fp, 'fasta') while True: try: record = next(records) except StopIteration: break try: structureRecord = next(records) except StopIteration: raise ValueError('Structure file %r has an odd number ' 'of records.' % _file) if len(structureRecord) != len(record): raise ValueError( 'Sequence %r length (%d) is not equal to ' 'structure %r length (%d) in input file %r.' % ( record.description, len(record), structureRecord.description, len(structureRecord), _file)) if upperCase: read = self._readClass( record.description, str(record.seq.upper()), str(structureRecord.seq.upper())) else: read = self._readClass(record.description, str(record.seq), str(structureRecord.seq)) yield read
python
{ "resource": "" }
q37756
npartial
train
def npartial(func, *args, **kwargs): """ Returns a partial node visitor function """ def wrapped(self, node): func(self, *args, **kwargs) return wrapped
python
{ "resource": "" }
q37757
aa3_to_aa1
train
def aa3_to_aa1(seq): """convert string of 3-letter amino acids to 1-letter amino acids >>> aa3_to_aa1("CysAlaThrSerAlaArgGluLeuAlaMetGlu") 'CATSARELAME' >>> aa3_to_aa1(None) """ if seq is None: return None return "".join(aa3_to_aa1_lut[aa3] for aa3 in [seq[i:i + 3] for i in range(0, len(seq), 3)])
python
{ "resource": "" }
q37758
elide_sequence
train
def elide_sequence(s, flank=5, elision="..."): """trim a sequence to include the left and right flanking sequences of size `flank`, with the intervening sequence elided by `elision`. >>> elide_sequence("ABCDEFGHIJKLMNOPQRSTUVWXYZ") 'ABCDE...VWXYZ' >>> elide_sequence("ABCDEFGHIJKLMNOPQRSTUVWXYZ", flank=3) 'ABC...XYZ' >>> elide_sequence("ABCDEFGHIJKLMNOPQRSTUVWXYZ", elision="..") 'ABCDE..VWXYZ' >>> elide_sequence("ABCDEFGHIJKLMNOPQRSTUVWXYZ", flank=12) 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' >>> elide_sequence("ABCDEFGHIJKLMNOPQRSTUVWXYZ", flank=12, elision=".") 'ABCDEFGHIJKL.OPQRSTUVWXYZ' """ elided_sequence_len = flank + flank + len(elision) if len(s) <= elided_sequence_len: return s return s[:flank] + elision + s[-flank:]
python
{ "resource": "" }
q37759
normalize_sequence
train
def normalize_sequence(seq): """return normalized representation of sequence for hashing This really means ensuring that the sequence is represented as a binary blob and removing whitespace and asterisks and uppercasing. >>> normalize_sequence("ACGT") 'ACGT' >>> normalize_sequence(" A C G T * ") 'ACGT' >>> normalize_sequence("ACGT1") Traceback (most recent call last): ... RuntimeError: Normalized sequence contains non-alphabetic characters """ nseq = re.sub(r"[\s\*]", "", seq).upper() m = re.search("[^A-Z]", nseq) if m: _logger.debug("Original sequence: " + seq) _logger.debug("Normalized sequence: " + nseq) _logger.debug("First non-[A-Z] at {}".format(m.start())) raise RuntimeError("Normalized sequence contains non-alphabetic characters") return nseq
python
{ "resource": "" }
q37760
translate_cds
train
def translate_cds(seq, full_codons=True, ter_symbol="*"): """translate a DNA or RNA sequence into a single-letter amino acid sequence using the standard translation table If full_codons is True, a sequence whose length isn't a multiple of three generates a ValueError; else an 'X' will be added as the last amino acid. This matches biopython's behaviour when padding the last codon with 'N's. >>> translate_cds("ATGCGA") 'MR' >>> translate_cds("AUGCGA") 'MR' >>> translate_cds(None) >>> translate_cds("") '' >>> translate_cds("AUGCG") Traceback (most recent call last): ... ValueError: Sequence length must be a multiple of three >>> translate_cds("AUGCG", full_codons=False) 'M*' >>> translate_cds("AUGCGQ") Traceback (most recent call last): ... ValueError: Codon CGQ at position 4..6 is undefined in codon table """ if seq is None: return None if len(seq) == 0: return "" if full_codons and len(seq) % 3 != 0: raise ValueError("Sequence length must be a multiple of three") seq = replace_u_to_t(seq) seq = seq.upper() protein_seq = list() for i in range(0, len(seq) - len(seq) % 3, 3): try: aa = dna_to_aa1_lut[seq[i:i + 3]] except KeyError: raise ValueError("Codon {} at position {}..{} is undefined in codon table".format( seq[i:i + 3], i+1, i+3)) protein_seq.append(aa) # check for trailing bases and add the ter symbol if required if not full_codons and len(seq) % 3 != 0: protein_seq.append(ter_symbol) return ''.join(protein_seq)
python
{ "resource": "" }
q37761
BaseRequestEngine.request
train
def request(self, url, *, method='GET', headers=None, data=None, result_callback=None): """Perform request. :param str url: request URL. :param str method: request method. :param dict headers: request headers. :param object data: request data. :param object -> object result_callback: result callback. :rtype: dict :raise: APIError """ url = self._make_full_url(url) self._log.debug('Performing %s request to %s', method, url) return self._request(url, method=method, headers=headers, data=data, result_callback=result_callback)
python
{ "resource": "" }
q37762
BaseRequestEngine._make_full_url
train
def _make_full_url(self, url): """Given base and relative URL, construct the full URL. :param str url: relative URL. :return: full URL. :rtype: str """ return SLASH.join([self._api_base_url, url.lstrip(SLASH)])
python
{ "resource": "" }
q37763
merge_dictionaries
train
def merge_dictionaries(current, new, only_defaults=False, template_special_case=False): ''' Merge two settings dictionaries, recording how many changes were needed. ''' changes = 0 for key, value in new.items(): if key not in current: if hasattr(global_settings, key): current[key] = getattr(global_settings, key) LOGGER.debug("Set %r to global default %r.", key, current[key]) else: current[key] = copy.copy(value) LOGGER.debug("Set %r to %r.", key, current[key]) changes += 1 continue elif only_defaults: continue current_value = current[key] if hasattr(current_value, 'items'): changes += merge_dictionaries(current_value, value) elif isinstance(current_value, (list, tuple)): for element in value: if element not in current_value: if template_special_case and key == 'TEMPLATES': existing_matches = [ template for template in current_value if template['BACKEND'] == element['BACKEND'] ] if existing_matches: changes += merge_dictionaries(existing_matches[0], element) else: current[key] = list(current_value) + [element] LOGGER.debug("Added %r to %r.", element, key) changes += 1 else: current[key] = list(current_value) + [element] LOGGER.debug("Added %r to %r.", element, key) changes += 1 elif isinstance(current_value, Promise) or isinstance(value, Promise): # If we don't know what to do with it, replace it. if current_value is not value: current[key] = value LOGGER.debug("Set %r to %r.", key, current[key]) changes += 1 else: # If we don't know what to do with it, replace it. if current_value != value: current[key] = value LOGGER.debug("Set %r to %r.", key, current[key]) changes += 1 return changes
python
{ "resource": "" }
q37764
configure_settings
train
def configure_settings(settings, environment_settings=True): ''' Given a settings object, run automatic configuration of all the apps in INSTALLED_APPS. ''' changes = 1 iterations = 0 while changes: changes = 0 app_names = ['django_autoconfig'] + list(settings['INSTALLED_APPS']) if environment_settings: app_names.append('django_autoconfig.environment_settings') for app_name in app_names: import django_autoconfig.contrib if autoconfig_module_exists(app_name): module = importlib.import_module("%s.autoconfig" % (app_name,)) elif app_name in django_autoconfig.contrib.CONTRIB_CONFIGS: module = django_autoconfig.contrib.CONTRIB_CONFIGS[app_name] else: continue changes += merge_dictionaries( settings, getattr(module, 'SETTINGS', {}), template_special_case=True, ) changes += merge_dictionaries( settings, getattr(module, 'DEFAULT_SETTINGS', {}), only_defaults=True, ) for relationship in getattr(module, 'RELATIONSHIPS', []): changes += relationship.apply_changes(settings) if iterations >= MAX_ITERATIONS: raise ImproperlyConfigured( 'Autoconfiguration could not reach a consistent state' ) iterations += 1 LOGGER.debug("Autoconfiguration took %d iterations.", iterations)
python
{ "resource": "" }
q37765
configure_urls
train
def configure_urls(apps, index_view=None, prefixes=None): ''' Configure urls from a list of apps. ''' prefixes = prefixes or {} urlpatterns = patterns('') if index_view: from django.views.generic.base import RedirectView urlpatterns += patterns('', url(r'^$', RedirectView.as_view(pattern_name=index_view, permanent=False)), ) for app_name in apps: app_module = importlib.import_module(app_name) if module_has_submodule(app_module, 'urls'): module = importlib.import_module("%s.urls" % app_name) if not hasattr(module, 'urlpatterns'): # Resolver will break if the urls.py file is completely blank. continue app_prefix = prefixes.get(app_name, app_name.replace("_","-")) urlpatterns += patterns( '', url( r'^%s/' % app_prefix if app_prefix else '', include("%s.urls" % app_name), ), ) return urlpatterns
python
{ "resource": "" }
q37766
check_images
train
def check_images(data): """ Check and reformat input images if needed """ if isinstance(data, ndarray): data = fromarray(data) if not isinstance(data, Images): data = fromarray(asarray(data)) if len(data.shape) not in set([3, 4]): raise Exception('Number of image dimensions %s must be 2 or 3' % (len(data.shape))) return data
python
{ "resource": "" }
q37767
check_reference
train
def check_reference(images, reference): """ Ensure the reference matches image dimensions """ if not images.shape[1:] == reference.shape: raise Exception('Image shape %s and reference shape %s must match' % (images.shape[1:], reference.shape)) return reference
python
{ "resource": "" }
q37768
ExpGene.from_dict
train
def from_dict(cls, data: Dict[str, Union[str, int]]): """Generate an `ExpGene` object from a dictionary. Parameters ---------- data : dict A dictionary with keys corresponding to attribute names. Attributes with missing keys will be assigned `None`. Returns ------- `ExpGene` The gene. """ assert isinstance(data, dict) if 'ensembl_id' not in data: raise ValueError('An "ensembl_id" key is missing!') # make a copy data = dict(data) for attr in ['name', 'chromosome', 'position', 'length', 'type', 'source']: if attr in data and data[attr] == '': data[attr] = None data['type_'] = data['type'] del data['type'] return cls(**data)
python
{ "resource": "" }
q37769
ReadIntervals.add
train
def add(self, start, end): """ Add the start and end offsets of a matching read. @param start: The C{int} start offset of the read match in the subject. @param end: The C{int} end offset of the read match in the subject. This is Python-style: the end offset is not included in the match. """ assert start <= end self._intervals.append((start, end))
python
{ "resource": "" }
q37770
ReadIntervals.walk
train
def walk(self): """ Get the non-overlapping read intervals that match the subject. @return: A generator that produces (TYPE, (START, END)) tuples, where where TYPE is either self.EMPTY or self.FULL and (START, STOP) is the interval. The endpoint (STOP) of the interval is not considered to be in the interval. I.e., the interval is really [START, STOP). """ intervals = sorted(self._intervals) def nextFull(): start, stop = intervals.pop(0) while intervals: if intervals[0][0] <= stop: _, thisStop = intervals.pop(0) if thisStop > stop: stop = thisStop else: break return (start, stop) if intervals: # If the first interval (read) starts after zero, yield an # initial empty section to get us to the first interval. if intervals[0][0] > 0: yield (self.EMPTY, (0, intervals[0][0])) while intervals: # Yield a full interval followed by an empty one (if there # is another interval pending). lastFull = nextFull() yield (self.FULL, lastFull) if intervals: yield (self.EMPTY, (lastFull[1], intervals[0][0])) # Yield the final empty section, if any. if lastFull[1] < self._targetLength: yield (self.EMPTY, (lastFull[1], self._targetLength)) else: yield (self.EMPTY, (0, self._targetLength))
python
{ "resource": "" }
q37771
ReadIntervals.coverage
train
def coverage(self): """ Get the fraction of a subject is matched by its set of reads. @return: The C{float} fraction of a subject matched by its reads. """ if self._targetLength == 0: return 0.0 coverage = 0 for (intervalType, (start, end)) in self.walk(): if intervalType == self.FULL: # Adjust start and end to ignore areas where the read falls # outside the target. coverage += (min(end, self._targetLength) - max(0, start)) return float(coverage) / self._targetLength
python
{ "resource": "" }
q37772
ReadIntervals.coverageCounts
train
def coverageCounts(self): """ For each location in the subject, return a count of how many times that location is covered by a read. @return: a C{Counter} where the keys are the C{int} locations on the subject and the value is the number of times the location is covered by a read. """ coverageCounts = Counter() for start, end in self._intervals: coverageCounts.update(range(max(0, start), min(self._targetLength, end))) return coverageCounts
python
{ "resource": "" }
q37773
OffsetAdjuster._reductionForOffset
train
def _reductionForOffset(self, offset): """ Calculate the total reduction for a given X axis offset. @param offset: The C{int} offset. @return: The total C{float} reduction that should be made for this offset. """ reduction = 0 for (thisOffset, thisReduction) in self._adjustments: if offset >= thisOffset: reduction += thisReduction else: break return reduction
python
{ "resource": "" }
q37774
OffsetAdjuster.adjustHSP
train
def adjustHSP(self, hsp): """ Adjust the read and subject start and end offsets in an HSP. @param hsp: a L{dark.hsp.HSP} or L{dark.hsp.LSP} instance. """ reduction = self._reductionForOffset( min(hsp.readStartInSubject, hsp.subjectStart)) hsp.readEndInSubject = hsp.readEndInSubject - reduction hsp.readStartInSubject = hsp.readStartInSubject - reduction hsp.subjectEnd = hsp.subjectEnd - reduction hsp.subjectStart = hsp.subjectStart - reduction
python
{ "resource": "" }
q37775
GeneSetCollection.get_by_index
train
def get_by_index(self, i): """Look up a gene set by its index. Parameters ---------- i: int The index of the gene set. Returns ------- GeneSet The gene set. Raises ------ ValueError If the given index is out of bounds. """ if i >= self.n: raise ValueError('Index %d out of bounds ' % i + 'for database with %d gene sets.' % self.n) return self._gene_sets[self._gene_set_ids[i]]
python
{ "resource": "" }
q37776
GeneSetCollection.read_tsv
train
def read_tsv(cls, path, encoding='utf-8'): """Read a gene set database from a tab-delimited text file. Parameters ---------- path: str The path name of the the file. encoding: str The encoding of the text file. Returns ------- None """ gene_sets = [] n = 0 with open(path, 'rb') as fh: reader = csv.reader(fh, dialect='excel-tab', encoding=encoding) for l in reader: n += 1 gs = GeneSet.from_list(l) gene_sets.append(gs) logger.debug('Read %d gene sets.', n) logger.debug('Size of gene set list: %d', len(gene_sets)) return cls(gene_sets)
python
{ "resource": "" }
q37777
GeneSetCollection.write_tsv
train
def write_tsv(self, path): """Write the database to a tab-delimited text file. Parameters ---------- path: str The path name of the file. Returns ------- None """ with open(path, 'wb') as ofh: writer = csv.writer( ofh, dialect='excel-tab', quoting=csv.QUOTE_NONE, lineterminator=os.linesep ) for gs in self._gene_sets.values(): writer.writerow(gs.to_list())
python
{ "resource": "" }
q37778
GeneSetCollection.read_msigdb_xml
train
def read_msigdb_xml(cls, path, entrez2gene, species=None): # pragma: no cover """Read the complete MSigDB database from an XML file. The XML file can be downloaded from here: http://software.broadinstitute.org/gsea/msigdb/download_file.jsp?filePath=/resources/msigdb/5.0/msigdb_v5.0.xml Parameters ---------- path: str The path name of the XML file. entrez2gene: dict or OrderedDict (str: str) A dictionary mapping Entrez Gene IDs to gene symbols (names). species: str, optional A species name (e.g., "Homo_sapiens"). Only gene sets for that species will be retained. (None) Returns ------- GeneSetCollection The gene set database containing the MSigDB gene sets. """ # note: is XML file really encoded in UTF-8? assert isinstance(path, (str, _oldstr)) assert isinstance(entrez2gene, (dict, OrderedDict)) assert species is None or isinstance(species, (str, _oldstr)) logger.debug('Path: %s', path) logger.debug('entrez2gene type: %s', str(type(entrez2gene))) i = [0] gene_sets = [] total_gs = [0] total_genes = [0] species_excl = [0] unknown_entrezid = [0] src = 'MSigDB' def handle_item(pth, item): # callback function for xmltodict.parse() total_gs[0] += 1 data = pth[1][1] spec = data['ORGANISM'] # filter by species if species is not None and spec != species: species_excl[0] += 1 return True id_ = data['SYSTEMATIC_NAME'] name = data['STANDARD_NAME'] coll = data['CATEGORY_CODE'] desc = data['DESCRIPTION_BRIEF'] entrez = data['MEMBERS_EZID'].split(',') genes = [] for e in entrez: total_genes[0] += 1 try: genes.append(entrez2gene[e]) except KeyError: unknown_entrezid[0] += 1 if not genes: logger.warning('Gene set "%s" (%s) has no known genes!', name, id_) return True gs = GeneSet(id_, name, genes, source=src, collection=coll, description=desc) gene_sets.append(gs) i[0] += 1 return True # parse the XML file using the xmltodict package with io.open(path, 'rb') as fh: xmltodict.parse(fh.read(), encoding='UTF-8', item_depth=2, item_callback=handle_item) # report some statistics if species_excl[0] > 0: kept = total_gs[0] - species_excl[0] perc = 100 * (kept / float(total_gs[0])) logger.info('%d of all %d gene sets (%.1f %%) belonged to the ' 'specified species.', kept, total_gs[0], perc) if unknown_entrezid[0] > 0: unkn = unknown_entrezid[0] # known = total_genes[0] - unknown_entrezid[0] perc = 100 * (unkn / float(total_genes[0])) logger.warning('%d of a total of %d genes (%.1f %%) had an ' + 'unknown Entrez ID.', unkn, total_genes[0], perc) logger.info('Parsed %d entries, resulting in %d gene sets.', total_gs[0], len(gene_sets)) return cls(gene_sets)
python
{ "resource": "" }
q37779
Feature.legendLabel
train
def legendLabel(self): """ Provide a textual description of the feature and its qualifiers to be used as a label in a plot legend. @return: A C{str} description of the feature. """ excludedQualifiers = set(( 'codon_start', 'db_xref', 'protein_id', 'region_name', 'ribosomal_slippage', 'rpt_type', 'translation', 'transl_except', 'transl_table') ) maxValueLength = 30 result = [] if self.feature.qualifiers: for qualifier in sorted(self.feature.qualifiers.keys()): if qualifier not in excludedQualifiers: value = ', '.join(self.feature.qualifiers[qualifier]) if qualifier == 'site_type' and value == 'other': continue if len(value) > maxValueLength: value = value[:maxValueLength - 3] + '...' result.append('%s: %s' % (qualifier, value)) return '%d-%d %s%s.%s' % ( int(self.feature.location.start), int(self.feature.location.end), self.feature.type, ' (subfeature)' if self.subfeature else '', ' ' + ', '.join(result) if result else '')
python
{ "resource": "" }
q37780
recent_articles
train
def recent_articles(limit=10, exclude=None): """Returns list of latest article""" queryset = Article.objects.filter(published=True).order_by('-modified') if exclude: if hasattr(exclude, '__iter__'): queryset = queryset.exclude(pk__in=exclude) else: queryset = queryset.exclude(pk=exclude) return queryset
python
{ "resource": "" }
q37781
_flatten
train
def _flatten(n): """Recursively flatten a mixed sequence of sub-sequences and items""" if isinstance(n, collections.Sequence): for x in n: for y in _flatten(x): yield y else: yield n
python
{ "resource": "" }
q37782
wrap
train
def wrap(stream, unicode=False, window=1024, echo=False, close_stream=True): """Wrap a stream to implement expect functionality. This function provides a convenient way to wrap any Python stream (a file-like object) or socket with an appropriate :class:`Expecter` class for the stream type. The returned object adds an :func:`Expect.expect` method to the stream, while passing normal stream functions like *read*/*recv* and *write*/*send* through to the underlying stream. Here's an example of opening and wrapping a pair of network sockets:: import socket import streamexpect source, drain = socket.socketpair() expecter = streamexpect.wrap(drain) source.sendall(b'this is a test') match = expecter.expect_bytes(b'test', timeout=5) assert match is not None :param stream: The stream/socket to wrap. :param bool unicode: If ``True``, the wrapper will be configured for Unicode matching, otherwise matching will be done on binary. :param int window: Historical characters to buffer. :param bool echo: If ``True``, echoes received characters to stdout. :param bool close_stream: If ``True``, and the wrapper is used as a context manager, closes the stream at the end of the context manager. """ if hasattr(stream, 'read'): proxy = PollingStreamAdapter(stream) elif hasattr(stream, 'recv'): proxy = PollingSocketStreamAdapter(stream) else: raise TypeError('stream must have either read or recv method') if echo and unicode: callback = _echo_text elif echo and not unicode: callback = _echo_bytes else: callback = None if unicode: expecter = TextExpecter(proxy, input_callback=callback, window=window, close_adapter=close_stream) else: expecter = BytesExpecter(proxy, input_callback=callback, window=window, close_adapter=close_stream) return expecter
python
{ "resource": "" }
q37783
BytesSearcher.search
train
def search(self, buf): """Search the provided buffer for matching bytes. Search the provided buffer for matching bytes. If the *match* is found, returns a :class:`SequenceMatch` object, otherwise returns ``None``. :param buf: Buffer to search for a match. :return: :class:`SequenceMatch` if matched, None if no match was found. """ idx = self._check_type(buf).find(self._bytes) if idx < 0: return None else: start = idx end = idx + len(self._bytes) return SequenceMatch(self, buf[start:end], start, end)
python
{ "resource": "" }
q37784
TextSearcher.search
train
def search(self, buf): """Search the provided buffer for matching text. Search the provided buffer for matching text. If the *match* is found, returns a :class:`SequenceMatch` object, otherwise returns ``None``. :param buf: Buffer to search for a match. :return: :class:`SequenceMatch` if matched, None if no match was found. """ self._check_type(buf) normalized = unicodedata.normalize(self.FORM, buf) idx = normalized.find(self._text) if idx < 0: return None start = idx end = idx + len(self._text) return SequenceMatch(self, normalized[start:end], start, end)
python
{ "resource": "" }
q37785
RegexSearcher.search
train
def search(self, buf): """Search the provided buffer for a match to the object's regex. Search the provided buffer for a match to the object's regex. If the *match* is found, returns a :class:`RegexMatch` object, otherwise returns ``None``. :param buf: Buffer to search for a match. :return: :class:`RegexMatch` if matched, None if no match was found. """ match = self._regex.search(self._check_type(buf)) if match is not None: start = match.start() end = match.end() return RegexMatch(self, buf[start:end], start, end, match.groups())
python
{ "resource": "" }
q37786
SearcherCollection.search
train
def search(self, buf): """Search the provided buffer for a match to any sub-searchers. Search the provided buffer for a match to any of this collection's sub-searchers. If a single matching sub-searcher is found, returns that sub-searcher's *match* object. If multiple matches are found, the match with the smallest index is returned. If no matches are found, returns ``None``. :param buf: Buffer to search for a match. :return: :class:`RegexMatch` if matched, None if no match was found. """ self._check_type(buf) best_match = None best_index = sys.maxsize for searcher in self: match = searcher.search(buf) if match and match.start < best_index: best_match = match best_index = match.start return best_match
python
{ "resource": "" }
q37787
LoadFixtureRunner.init_graph
train
def init_graph(self): """ Initialize graph Load all nodes and set dependencies. To avoid errors about missing nodes all nodes get loaded first before setting the dependencies. """ self._graph = Graph() # First add all nodes for key in self.loader.disk_fixtures.keys(): self.graph.add_node(key) # Then set dependencies for key, fixture in self.loader.disk_fixtures.items(): for dependency in fixture.dependencies: self.graph.add_dependency(key, dependency)
python
{ "resource": "" }
q37788
LoadFixtureRunner.load_fixtures
train
def load_fixtures(self, nodes=None, progress_callback=None, dry_run=False): """Load all fixtures for given nodes. If no nodes are given all fixtures will be loaded. :param list nodes: list of nodes to be loaded. :param callable progress_callback: Callback which will be called while handling the nodes. """ if progress_callback and not callable(progress_callback): raise Exception('Callback should be callable') plan = self.get_plan(nodes=nodes) try: with transaction.atomic(): self.load_plan(plan=plan, progress_callback=progress_callback) if dry_run: raise DryRun except DryRun: # Dry-run to get the atomic transaction rolled back pass return len(plan)
python
{ "resource": "" }
q37789
getAPOBECFrequencies
train
def getAPOBECFrequencies(dotAlignment, orig, new, pattern): """ Gets mutation frequencies if they are in a certain pattern. @param dotAlignment: result from calling basePlotter @param orig: A C{str}, naming the original base @param new: A C{str}, what orig was mutated to @param pattern: A C{str}m which pattern we're looking for (must be one of 'cPattern', 'tPattern') """ cPattern = ['ACA', 'ACC', 'ACG', 'ACT', 'CCA', 'CCC', 'CCG', 'CCT', 'GCA', 'GCC', 'GCG', 'GCT', 'TCA', 'TCC', 'TCG', 'TCT'] tPattern = ['ATA', 'ATC', 'ATG', 'ATT', 'CTA', 'CTC', 'CTG', 'CTT', 'GTA', 'GTC', 'GTG', 'GTT', 'TTA', 'TTC', 'TTG', 'TTT'] # choose the right pattern if pattern == 'cPattern': patterns = cPattern middleBase = 'C' else: patterns = tPattern middleBase = 'T' # generate the freqs dict with the right pattern freqs = defaultdict(int) for pattern in patterns: freqs[pattern] = 0 # get the subject sequence from dotAlignment subject = dotAlignment[0].split('\t')[3] # exclude the subject from the dotAlignment, so just the queries # are left over queries = dotAlignment[1:] for item in queries: query = item.split('\t')[1] index = 0 for queryBase in query: qBase = query[index] sBase = subject[index] if qBase == new and sBase == orig: try: plusSb = subject[index + 1] minusSb = subject[index - 1] except IndexError: plusSb = 'end' motif = '%s%s%s' % (minusSb, middleBase, plusSb) if motif in freqs: freqs[motif] += 1 index += 1 return freqs
python
{ "resource": "" }
q37790
getCompleteFreqs
train
def getCompleteFreqs(blastHits): """ Make a dictionary which collects all mutation frequencies from all reads. Calls basePlotter to get dotAlignment, which is passed to getAPOBECFrequencies with the respective parameter, to collect the frequencies. @param blastHits: A L{dark.blast.BlastHits} instance. """ allFreqs = {} for title in blastHits.titles: allFreqs[title] = { 'C>A': {}, 'C>G': {}, 'C>T': {}, 'T>A': {}, 'T>C': {}, 'T>G': {}, } basesPlotted = basePlotter(blastHits, title) for mutation in allFreqs[title]: orig = mutation[0] new = mutation[2] if orig == 'C': pattern = 'cPattern' else: pattern = 'tPattern' freqs = getAPOBECFrequencies(basesPlotted, orig, new, pattern) allFreqs[title][mutation] = freqs numberOfReads = len(blastHits.titles[title]['plotInfo']['items']) allFreqs[title]['numberOfReads'] = numberOfReads allFreqs[title]['bitScoreMax'] = blastHits.titles[ title]['plotInfo']['bitScoreMax'] return allFreqs
python
{ "resource": "" }
q37791
writeDetails
train
def writeDetails(accept, readId, taxonomy, fp): """ Write read and taxonomy details. @param accept: A C{bool} indicating whether the read was accepted, according to its taxonomy. @param readId: The C{str} id of the read. @taxonomy: A C{list} of taxonomy C{str} levels. @fp: An open file pointer to write to. """ fp.write('%s %s\n %s\n\n' % ( 'MATCH:' if accept else 'MISS: ', readId, ' | '.join(taxonomy) if taxonomy else 'No taxonomy found.'))
python
{ "resource": "" }
q37792
Graph.add
train
def add(self, name, parents=None): """ add a node to the graph. Raises an exception if the node cannot be added (i.e., if a node that name already exists, or if it would create a cycle. NOTE: A node can be added before its parents are added. name: The name of the node to add to the graph. Name can be any unique Hashable value. parents: (optional, None) The name of the nodes parents. """ if not isinstance(name, Hashable): raise TypeError(name) parents = set(parents or ()) is_stub = False if name in self._nodes: if name in self._stubs: node = Node(name, self._nodes[name].children, parents) is_stub = True else: raise ValueError(name) else: node = Node(name, set(), parents) # cycle detection visited = set() for parent in parents: if self.ancestor_of(parent, name, visited=visited): raise ValueError(parent) elif parent == name: raise ValueError(parent) # Node safe to add if is_stub: self._stubs.remove(name) if parents: for parent_name in parents: parent_node = self._nodes.get(parent_name) if parent_node is not None: parent_node.children.add(name) else: # add stub self._nodes[parent_name] = Node( name=parent_name, children=set((name,)), parents=frozenset(), ) self._stubs.add(parent_name) else: self._roots.add(name) self._nodes[name] = node
python
{ "resource": "" }
q37793
Graph.remove
train
def remove(self, name, strategy=Strategy.promote): """ Remove a node from the graph. Returns the set of nodes that were removed. If the node doesn't exist, an exception will be raised. name: The name of the node to remove. strategy: (Optional, Strategy.promote) What to do with children or removed nodes. The options are: orphan: remove the node from the child's set of parents. promote: replace the node with the the node's parents in the childs set of parents. remove: recursively remove all children of the node. """ removed = set() stack = [name] while stack: current = stack.pop() node = self._nodes.pop(current) if strategy == Strategy.remove: for child_name in node.children: child_node = self._nodes[child_name] child_node.parents.remove(current) stack.append(child_name) else: for child_name in node.children: child_node = self._nodes[child_name] child_node.parents.remove(current) if strategy == Strategy.promote: for parent_name in node.parents: parent_node = self._nodes[parent_name] parent_node.children.add(child_name) child_node.parents.add(parent_name) if not child_node.parents: self._roots.add(child_name) if current in self._stubs: self._stubs.remove(current) elif current in self._roots: self._roots.remove(current) else: # stubs and roots (by definition) don't have parents for parent_name in node.parents: parent_node = self._nodes[parent_name] parent_node.children.remove(current) if parent_name in self._stubs and not parent_node.children: stack.append(parent_name) removed.add(current) return removed
python
{ "resource": "" }
q37794
Graph.ancestor_of
train
def ancestor_of(self, name, ancestor, visited=None): """ Check whether a node has another node as an ancestor. name: The name of the node being checked. ancestor: The name of the (possible) ancestor node. visited: (optional, None) If given, a set of nodes that have already been traversed. NOTE: The set will be updated with any new nodes that are visited. NOTE: If node doesn't exist, the method will return False. """ if visited is None: visited = set() node = self._nodes.get(name) if node is None or name not in self._nodes: return False stack = list(node.parents) while stack: current = stack.pop() if current == ancestor: return True if current not in visited: visited.add(current) node = self._nodes.get(current) if node is not None: stack.extend(node.parents) return False
python
{ "resource": "" }
q37795
respond
train
def respond(template, context={}, request=None, **kwargs): "Calls render_to_response with a RequestConext" from django.http import HttpResponse from django.template import RequestContext from django.template.loader import render_to_string if request: default = context_processors.default(request) default.update(context) else: default = context.copy() rendered = render_to_string(template, default, context_instance=request and RequestContext(request) or None) return HttpResponse(rendered, **kwargs)
python
{ "resource": "" }
q37796
update_subscription
train
def update_subscription(request, ident): "Shows subscriptions options for a verified subscriber." try: subscription = Subscription.objects.get(ident=ident) except Subscription.DoesNotExist: return respond('overseer/invalid_subscription_token.html', {}, request) if request.POST: form = UpdateSubscriptionForm(request.POST, instance=subscription) if form.is_valid(): if form.cleaned_data['unsubscribe']: subscription.delete() return respond('overseer/unsubscribe_confirmed.html', { 'email': subscription.email, }) else: form.save() return HttpResponseRedirect(request.get_full_path()) else: form = UpdateSubscriptionForm(instance=subscription) context = csrf(request) context.update({ 'form': form, 'subscription': subscription, 'service_list': Service.objects.all(), }) return respond('overseer/update_subscription.html', context, request)
python
{ "resource": "" }
q37797
verify_subscription
train
def verify_subscription(request, ident): """ Verifies an unverified subscription and create or appends to an existing subscription. """ try: unverified = UnverifiedSubscription.objects.get(ident=ident) except UnverifiedSubscription.DoesNotExist: return respond('overseer/invalid_subscription_token.html', {}, request) subscription = Subscription.objects.get_or_create(email=unverified.email, defaults={ 'ident': unverified.ident, })[0] subscription.services = unverified.services.all() unverified.delete() return respond('overseer/subscription_confirmed.html', { 'subscription': subscription, }, request)
python
{ "resource": "" }
q37798
ReadsAlignments.hsps
train
def hsps(self): """ Provide access to all HSPs for all alignments of all reads. @return: A generator that yields HSPs (or LSPs). """ for readAlignments in self: for alignment in readAlignments: for hsp in alignment.hsps: yield hsp
python
{ "resource": "" }
q37799
getSequence
train
def getSequence(title, db='nucleotide'): """ Get information about a sequence from Genbank. @param title: A C{str} sequence title from a BLAST hit. Of the form 'gi|63148399|gb|DQ011818.1| Description...'. @param db: The C{str} name of the Entrez database to consult. NOTE: this uses the network! Also, there is a 3 requests/second limit imposed by NCBI on these requests so be careful or your IP will be banned. """ titleId = title.split(' ', 1)[0] try: gi = titleId.split('|')[1] except IndexError: # Assume we have a gi number directly, and make sure it's a string. gi = str(titleId) try: client = Entrez.efetch(db=db, rettype='gb', retmode='text', id=gi) except URLError: return None else: record = SeqIO.read(client, 'gb') client.close() return record
python
{ "resource": "" }