code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
if random_seed is not None:
random.seed(random_seed)
result = []
for i, record in enumerate(records):
if len(result) < k:
result.append(record)
else:
r = random.randint(0, i)
if r < k:
result[r] = record
return result | def sample(records, k, random_seed=None) | Choose a length-``k`` subset of ``records``, retaining the input
order. If k > len(records), all are returned. If an integer
``random_seed`` is provided, sets ``random.seed()`` | 1.782074 | 1.902716 | 0.936595 |
logging.info('Applying _head generator: '
'limiting results to top ' + head + ' records.')
if head == '-0':
for record in records:
yield record
elif '-' in head:
with _record_buffer(records) as r:
record_count = sum(1 for record in r())
end_index = max(record_count + int(head), 0)
for record in itertools.islice(r(), end_index):
yield record
else:
for record in itertools.islice(records, int(head)):
yield record | def head(records, head) | Limit results to the top N records.
With the leading `-', print all but the last N records. | 4.752141 | 4.244349 | 1.119639 |
logging.info('Applying _tail generator: '
'limiting results to top ' + tail + ' records.')
if tail == '+0':
for record in records:
yield record
elif '+' in tail:
tail = int(tail) - 1
for record in itertools.islice(records, tail, None):
yield record
else:
with _record_buffer(records) as r:
record_count = sum(1 for record in r())
start_index = max(record_count - int(tail), 0)
for record in itertools.islice(r(), start_index, None):
yield record | def tail(records, tail) | Limit results to the bottom N records.
Use +N to output records starting with the Nth. | 4.37566 | 3.899229 | 1.122186 |
aln_len = None
gaps = []
for i, sequence in enumerate(sequences):
if aln_len is None:
aln_len = len(sequence)
gaps = [0] * aln_len
else:
if not len(sequence) == aln_len:
raise ValueError(("Unexpected sequence length {0}. Is this "
"an alignment?").format(len(sequence)))
# Update any gap positions in gap list
for j, char in enumerate(sequence.seq):
if char in gap_chars:
gaps[j] += 1
sequence_count = float(i + 1)
gap_props = [i / sequence_count for i in gaps]
return gap_props | def gap_proportion(sequences, gap_chars='-') | Generates a list with the proportion of gaps by index in a set of
sequences. | 3.036359 | 3.181043 | 0.954517 |
with _record_buffer(records) as r:
gap_proportions = gap_proportion(r())
keep_columns = [g < gap_threshold for g in gap_proportions]
for record in r():
sequence = str(record.seq)
# Trim
squeezed = itertools.compress(sequence, keep_columns)
yield SeqRecord(Seq(''.join(squeezed)), id=record.id,
description=record.description) | def squeeze(records, gap_threshold=1.0) | Remove any gaps that are present in the same position across all sequences
in an alignment. Takes a second sequence iterator for determining gap
positions. | 5.263414 | 4.854981 | 1.084127 |
logging.info('Applying _strip_range generator: '
'removing /<start>-<stop> ranges from IDs')
# Split up and be greedy.
cut_regex = re.compile(r"(?P<id>.*)\/(?P<start>\d+)\-(?P<stop>\d+)")
for record in records:
name = record.id
match = cut_regex.match(str(record.id))
if match:
sequence_id = match.group('id')
start = int(match.group('start'))
stop = int(match.group('stop'))
if start > 0 and start <= stop:
name = sequence_id
yield SeqRecord(record.seq, id=name,
description='') | def strip_range(records) | Cut off trailing /<start>-<stop> ranges from IDs. Ranges must be 1-indexed and
the stop integer must not be less than the start integer. | 4.287136 | 3.541526 | 1.210533 |
logging.info('Applying _transcribe generator: '
'operation to perform is ' + transcribe + '.')
for record in records:
sequence = str(record.seq)
description = record.description
name = record.id
if transcribe == 'dna2rna':
dna = Seq(sequence, IUPAC.ambiguous_dna)
rna = dna.transcribe()
yield SeqRecord(rna, id=name, description=description)
elif transcribe == 'rna2dna':
rna = Seq(sequence, IUPAC.ambiguous_rna)
dna = rna.back_transcribe()
yield SeqRecord(dna, id=name, description=description) | def transcribe(records, transcribe) | Perform transcription or back-transcription.
transcribe must be one of the following:
dna2rna
rna2dna | 2.774122 | 2.532181 | 1.095546 |
logging.info('Applying translation generator: '
'operation to perform is ' + translate + '.')
to_stop = translate.endswith('stop')
source_type = translate[:3]
alphabet = {'dna': IUPAC.ambiguous_dna, 'rna': IUPAC.ambiguous_rna}[source_type]
# Get a translation table
table = {'dna': CodonTable.ambiguous_dna_by_name["Standard"],
'rna': CodonTable.ambiguous_rna_by_name["Standard"]}[source_type]
# Handle ambiguities by replacing ambiguous codons with 'X'
# TODO: this copy operation causes infinite recursion with python3.6 -
# not sure why it was here to begin with.
# table = copy.deepcopy(table)
table.forward_table = CodonWarningTable(table.forward_table)
for record in records:
sequence = str(record.seq)
seq = Seq(sequence, alphabet)
protein = seq.translate(table, to_stop=to_stop)
yield SeqRecord(protein, id=record.id, description=record.description) | def translate(records, translate) | Perform translation from generic DNA/RNA to proteins. Bio.Seq
does not perform back-translation because the codons would
more-or-less be arbitrary. Option to translate only up until
reaching a stop codon. translate must be one of the following:
dna2protein
dna2proteinstop
rna2protein
rna2proteinstop | 4.783607 | 4.401067 | 1.08692 |
logging.info('Applying _max_length_discard generator: '
'discarding records longer than '
'.')
for record in records:
if len(record) > max_length:
# Discard
logging.debug('Discarding long sequence: %s, length=%d',
record.id, len(record))
else:
yield record | def max_length_discard(records, max_length) | Discard any records that are longer than max_length. | 4.976514 | 5.064428 | 0.982641 |
logging.info('Applying _min_length_discard generator: '
'discarding records shorter than %d.', min_length)
for record in records:
if len(record) < min_length:
logging.debug('Discarding short sequence: %s, length=%d',
record.id, len(record))
else:
yield record | def min_length_discard(records, min_length) | Discard any records that are shorter than min_length. | 3.636918 | 3.704447 | 0.981771 |
for record in records:
if len(ungap_all(record)) >= min_length:
yield record | def min_ungap_length_discard(records, min_length) | Discard any records that are shorter than min_length after removing gaps. | 4.384673 | 5.256048 | 0.834215 |
direction_text = 'ascending' if direction == 1 else 'descending'
logging.info('Indexing sequences by length: %s', direction_text)
# Adapted from the Biopython tutorial example.
# Get the lengths and ids, and sort on length
len_and_ids = sorted((len(rec), rec.id)
for rec in SeqIO.parse(source_file, source_file_type))
if direction == 0:
ids = reversed([seq_id for (length, seq_id) in len_and_ids])
else:
ids = [seq_id for (length, seq_id) in len_and_ids]
del len_and_ids # free this memory
# SeqIO.index does not handle gzip instances
if isinstance(source_file, gzip.GzipFile):
tmpfile = tempfile.NamedTemporaryFile()
source_file.seek(0)
tmpfile.write(source_file.read())
tmpfile.seek(0)
source_file = tmpfile
record_index = SeqIO.index(source_file.name, source_file_type)
for seq_id in ids:
yield record_index[seq_id] | def sort_length(source_file, source_file_type, direction=1) | Sort sequences by length. 1 is ascending (default) and 0 is descending. | 2.976954 | 2.835616 | 1.049844 |
i = iter(iterable)
while True:
r = list(itertools.islice(i, chunk_size))
if not r:
break
yield r | def batch(iterable, chunk_size) | Return items from iterable in chunk_size bits.
If len(iterable) % chunk_size > 0, the last item returned will be shorter. | 2.188579 | 2.46357 | 0.888377 |
# Ignore SIGPIPE, for head support
common.exit_on_sigpipe()
logging.basicConfig()
prot_sequences = SeqIO.parse(arguments.protein_align,
fileformat.from_handle(arguments.protein_align))
nucl_sequences = SeqIO.parse(arguments.nucl_align,
fileformat.from_handle(arguments.nucl_align))
instance = AlignmentMapper(TRANSLATION_TABLES[arguments.translation_table],
arguments.fail_action)
SeqIO.write(instance.map_all(prot_sequences, nucl_sequences),
arguments.out_file, fileformat.from_filename(arguments.out_file.name)) | def action(arguments) | Run | 5.390335 | 5.49285 | 0.981337 |
codons = [''.join(i) for i in batch(str(aligned_nucl), 3)]
for codon, aa in zip(codons, str(aligned_prot)):
# Check gaps
if codon == '---' and aa == '-':
continue
try:
trans = self.translation_table.forward_table[codon]
if not trans == aa:
raise ValueError("Codon {0} translates to {1}, not {2}".format(
codon, trans, aa))
except (KeyError, CodonTable.TranslationError):
if aa != 'X':
if self.unknown_action == 'fail':
raise ValueError("Unknown codon: {0} mapped to {1}".format(
codon, aa))
elif self.unknown_action == 'warn':
logging.warn('Cannot verify that unknown codon %s '
'maps to %s', codon, aa)
return True | def _validate_translation(self, aligned_prot, aligned_nucl) | Given a seq for protein and nucleotide, ensure that the translation holds | 3.831625 | 3.723269 | 1.029102 |
if prot_seq.id != nucl_seq.id:
logging.warning(
'ID mismatch: %s != %s. Are the sequences in the same order?',
prot_seq.id, nucl_seq.id)
# Ungap nucleotides
codons = batch(str(nucl_seq.seq.ungap('-')), 3)
codons = [''.join(i) for i in codons]
codon_iter = iter(codons)
ungapped_prot = str(prot_seq.seq).replace('-', '')
if len(ungapped_prot) != len(codons):
table = self.translation_table.forward_table
prot_str = ' '.join(' ' + p + ' ' for p in ungapped_prot)
codon_str = ' '.join(codons)
trans_str = ' '.join(' ' + table.get(codon, 'X') + ' '
for codon in codons)
raise ValueError(.format(len(codons), len(ungapped_prot), nucl_seq.id, prot_str,
codon_str, trans_str))
try:
nucl_align = ['---' if p == '-' else next(codon_iter)
for p in str(prot_seq.seq)]
except StopIteration:
assert False # Should be checked above
result = SeqRecord(Seq(''.join(nucl_align)), id=nucl_seq.id,
description=nucl_seq.description)
# Validate
self._validate_translation(prot_seq.seq.upper(), result.seq.upper())
return result | def map_alignment(self, prot_seq, nucl_seq) | Use aligned prot_seq to align nucl_seq | 3.31528 | 3.301343 | 1.004222 |
zipped = itertools.zip_longest(prot_alignment, nucl_sequences)
for p, n in zipped:
if p is None:
raise ValueError("Exhausted protein sequences")
elif n is None:
raise ValueError("Exhausted nucleotide sequences")
yield self.map_alignment(p, n) | def map_all(self, prot_alignment, nucl_sequences) | Convert protein sequences to nucleotide alignment | 2.915451 | 2.798032 | 1.041965 |
if not extension.startswith('.'):
raise ValueError("Extensions must begin with a period.")
try:
return EXTENSION_TO_TYPE[extension.lower()]
except KeyError:
raise UnknownExtensionError(
"seqmagick does not know how to handle " +
"files with extensions like this: " + extension) | def from_extension(extension) | Look up the BioPython file type corresponding with input extension.
Look up is case insensitive. | 4.836839 | 4.729134 | 1.022775 |
base, extension = os.path.splitext(file_name)
if extension in COMPRESS_EXT:
# Compressed file
extension = os.path.splitext(base)[1]
return from_extension(extension) | def from_filename(file_name) | Look up the BioPython file type corresponding to an input file name. | 4.25771 | 4.222269 | 1.008394 |
if fh in (sys.stdin, sys.stdout, sys.stderr):
return stream_default
return from_filename(fh.name) | def from_handle(fh, stream_default='fasta') | Look up the BioPython file type corresponding to a file-like object.
For stdin, stdout, and stderr, ``stream_default`` is used. | 3.538938 | 3.683815 | 0.960672 |
parser = argparse.ArgumentParser(description='seqmagick - Manipulate ' + \
' sequence files.', prog='seqmagick')
parser.add_argument('-V', '--version', action='version',
version='seqmagick v' + version,
help="Print the version number and exit")
parser.add_argument('-v', '--verbose', dest='verbosity',
action='count', default=1,
help="Be more verbose. Specify -vv or -vvv for even more")
parser.add_argument('-q', '--quiet', action='store_const', const=0,
dest='verbosity', help="Suppress output")
# Subparsers
subparsers = parser.add_subparsers(dest='subparser_name')
parser_help = subparsers.add_parser('help',
help='Detailed help for actions using help <action>')
parser_help.add_argument('action')
# Add actions
actions = {}
for name, mod in subcommands.itermodules():
subparser = subparsers.add_parser(name, help=mod.__doc__,
description=mod.__doc__)
mod.build_parser(subparser)
actions[name] = mod.action
arguments = parser.parse_args(argv)
arguments.argv = argv
action = arguments.subparser_name
if action == 'help':
return parse_arguments([str(arguments.action), '-h'])
return actions[action], arguments | def parse_arguments(argv) | Extract command-line arguments for different actions. | 3.094478 | 3.068791 | 1.00837 |
counter = itertools.count(0).__next__
ungap_indexes = [
counter() if c not in gap_chars else None for c in iter(sequence)
]
return dict(
(ungapped, gapped)
for ungapped, gapped in zip(ungap_indexes, range(len(sequence)))
if ungapped is not None) | def ungap_index_map(sequence, gap_chars='-') | Returns a dict mapping from an index in the ungapped sequence to an index
in the gapped sequence.
>>> ungap_index_map('AC-TG-')
{0: 0, 1: 1, 2: 3, 3: 4} | 4.031763 | 4.443757 | 0.907287 |
return dict(
(v, k) for k, v in list(ungap_index_map(sequence, gap_chars).items())) | def gap_index_map(sequence, gap_chars='-') | Opposite of ungap_index_map: returns mapping from gapped index to ungapped
index.
>>> gap_index_map('AC-TG-')
{0: 0, 1: 1, 3: 2, 4: 3} | 5.898921 | 6.86094 | 0.859783 |
iupac_translation = {
'A': 'A',
'C': 'C',
'G': 'G',
'T': 'T',
'U': 'U',
'R': 'AG',
'Y': 'CT',
'S': 'GC',
'W': 'AT',
'K': 'GT',
'M': 'AC',
'B': 'CGT',
'D': 'AGT',
'H': 'ACT',
'V': 'ACG',
'N': 'ACGT',
'-': '-'
}
for i in (ambig_base, unambig_base):
if not len(i) == 1:
raise ValueError("only one base may be passed.")
return unambig_base.upper() in iupac_translation[ambig_base.upper()] | def _iupac_ambiguous_equal(ambig_base, unambig_base) | Tests two bases for equality, accounting for IUPAC ambiguous DNA
ambiguous base may be IUPAC ambiguous, unambiguous must be one of ACGT | 1.792578 | 1.770711 | 1.012349 |
if not len(s1) == len(s2):
raise ValueError("String lengths are not equal")
# Number of non-matching characters:
return sum(not equality_function(c1, c2) for c1, c2 in zip(s1, s2)) | def hamming_distance(s1, s2, equality_function=operator.eq) | Returns the hamming distance between two strings. | 2.67173 | 2.535858 | 1.05358 |
forward_loc = None
reverse_loc = None
seq_length = None
# Reverse complement the reverse primer, if appropriate
if reverse_complement:
reverse_primer = reverse_primer.reverse_complement()
forward_aligner = PrimerAligner(forward_primer)
reverse_aligner = PrimerAligner(reverse_primer)
for i, sequence in enumerate(sequences):
if seq_length is None:
seq_length = len(sequence)
elif len(sequence) != seq_length:
raise ValueError(("Sequence Length Heterogeneity: {0} != {1}. "
"Is this an alignment?").format(
len(sequence), seq_length))
index_map = ungap_index_map(sequence.seq)
if forward_loc is None:
ham_dist, start, end = forward_aligner.align(sequence.seq.ungap())
if ham_dist <= max_hamming_distance:
forward_loc = index_map[start], index_map[end]
logging.info("Forward in sequence %d: indexes %d to %d", i + 1,
*forward_loc)
if reverse_loc is None:
ham_dist, start, end = reverse_aligner.align(sequence.seq.ungap())
if ham_dist <= max_hamming_distance:
reverse_loc = index_map[start], index_map[end]
logging.info("Reverse in sequence %d: indexes %d to %d", i + 1,
*reverse_loc)
if forward_loc and reverse_loc:
# Both found
# Check order
if forward_loc[0] > reverse_loc[0]:
raise PrimerOrderError(forward_loc[0], reverse_loc[0])
return forward_loc, reverse_loc
else:
logging.debug(
"Sequence %d: %d/2 primers found", i + 1,
sum(j is not None for j in (forward_loc, reverse_loc)))
# Did not find either the forward or reverse primer:
if not forward_loc:
raise PrimerNotFound(forward_primer)
else:
raise PrimerNotFound(reverse_primer) | def locate_primers(sequences, forward_primer, reverse_primer,
reverse_complement, max_hamming_distance) | Find forward and reverse primers in a set of sequences, return two tuples:
(forward_start, forward_end), (reverse_start, reverse_end) | 2.461906 | 2.43716 | 1.010154 |
logging.info("Trimming from %d to %d", start, end)
return (sequence[start:end] for sequence in sequences) | def trim(sequences, start, end) | Slice the input sequences from start to end | 3.604483 | 3.825739 | 0.942166 |
# Determine file format for input and output
source_format = (arguments.source_format or
fileformat.from_handle(arguments.source_file))
output_format = (arguments.output_format or
fileformat.from_handle(arguments.output_file))
# Load the alignment
with arguments.source_file:
sequences = SeqIO.parse(
arguments.source_file,
source_format,
alphabet=Alphabet.Gapped(Alphabet.single_letter_alphabet))
# Locate primers
(forward_start, forward_end), (reverse_start, reverse_end) = locate_primers(
sequences, arguments.forward_primer,
arguments.reverse_primer, arguments.reverse_complement,
arguments.max_hamming_distance)
# Generate slice indexes
if arguments.include_primers:
start = forward_start
end = reverse_end + 1
else:
start = forward_end + 1
end = reverse_start
# Rewind the input file
arguments.source_file.seek(0)
sequences = SeqIO.parse(
arguments.source_file,
source_format,
alphabet=Alphabet.Gapped(Alphabet.single_letter_alphabet))
# Apply the transformation
prune_action = _ACTIONS[arguments.prune_action]
transformed_sequences = prune_action(sequences, start, end)
with arguments.output_file:
SeqIO.write(transformed_sequences, arguments.output_file,
output_format) | def action(arguments) | Trim the alignment as specified | 2.94055 | 2.921592 | 1.006489 |
seq_aln, primer_aln, score, start, end = pairwise2.align.globalms(
str(sequence).upper(), str(self.primer).upper(),
self.match, self.difference, self.gap_open,
self.gap_extend, one_alignment_only=True,
penalize_end_gaps=self.penalize_end_gaps)[0]
# Get an ungapped mapping on the sequence
index_map = gap_index_map(seq_aln)
ungap_map = ungap_index_map(primer_aln)
# Trim to primer
start = ungap_map[0]
end = ungap_map[len(self.primer) - 1]
trimmed = seq_aln[start:end + 1]
ham_dist = hamming_distance(primer_aln[start:end + 1], trimmed,
_iupac_ambiguous_equal)
# assert primer_aln[start:end].replace('-', '') == str(self.primer)
# TODO: handle start or end being gap better. For now, just give up
# and return maxint for the hamming distance
if trimmed.endswith('-'):
tail = len(trimmed) - len(trimmed.rstrip('-'))
end = index_map[end - tail] + 1
ham_dist = sys.maxsize
else:
end = index_map[end]
if trimmed.startswith('-'):
start = 0
ham_dist = sys.maxsize
else:
start = index_map[start]
return ham_dist, start, end | def align(self, sequence) | Aligns the primer to the given query sequence, returning a tuple of:
hamming_distance, start, end
Where hamming distance is the distance between the primer and aligned
sequence, and start and end give the start and end index of the primer
relative to the input sequence. | 3.918909 | 3.855601 | 1.01642 |
for record in records:
seq_hash = hashlib.sha1(str(record.seq)).hexdigest()
if seq_hash[0].isdigit():
yield record | def hash_starts_numeric(records) | Very useful function that only accepts records with a numeric start to
their sha-1 hash. | 3.546213 | 3.137775 | 1.130168 |
if permissions is None:
permissions = apply_umask()
# Handle stdout:
if path == '-':
yield sys.stdout
else:
base_dir = os.path.dirname(path)
kwargs['suffix'] = os.path.basename(path)
tf = tempfile.NamedTemporaryFile(
dir=base_dir, mode=mode, delete=False, **kwargs)
# If a file_factory is given, close, and re-open a handle using the
# file_factory
if file_factory is not None:
tf.close()
tf = file_factory(tf.name)
try:
with tf:
yield tf
# Move
os.rename(tf.name, path)
os.chmod(path, permissions)
except:
os.remove(tf.name)
raise | def atomic_write(path, mode='wt', permissions=None, file_factory=None, **kwargs) | Open a file for atomic writing.
Generates a temp file, renames to value of ``path``.
Arguments:
``permissions``: Permissions to set (default: umask)
``file_factory``: If given, the handle yielded will be the result of
calling file_factory(path)
Additional arguments are passed to tempfile.NamedTemporaryFile | 3.179289 | 3.265848 | 0.973496 |
value_range = string.split(':')
if len(value_range) == 1:
start = int(value_range[0])
stop = start
elif len(value_range) == 2:
start, stop = tuple(int(i) if i else None for i in value_range)
else:
msg = "{0} is not a valid, 1-indexed range.".format(string)
raise argparse.ArgumentTypeError(msg)
if start == 0 or (stop or sys.maxsize) < (start or 0):
msg = "{0} is not a valid, 1-indexed range.".format(string)
raise argparse.ArgumentTypeError(msg)
# Convert from 1-indexed to 0-indexed
if start is not None and start > 0:
start -= 1
return slice(start, stop) | def cut_range(string) | A custom argparse 'type' to deal with sequences ranges such as 5:500.
Returns a 0-based slice corresponding to the selection defined by the slice | 2.430252 | 2.368044 | 1.02627 |
@functools.wraps(type_func)
def inner(string):
result = type_func(string)
if not result >= minimum and result <= maximum:
raise argparse.ArgumentTypeError(
"Please provide a value between {0} and {1}".format(
minimum, maximum))
return result
return inner | def typed_range(type_func, minimum, maximum) | Require variables to be of the specified type, between minimum and maximum | 2.526406 | 2.554895 | 0.988849 |
if isinstance(argument_keys, str):
argument_keys = [argument_keys]
argument_keys = argument_keys or []
class PartialAppendAction(argparse.Action):
def __init__(self,
option_strings,
dest,
const=None,
default=None,
required=False,
help=None,
type=None,
metavar=None,
nargs=None,
**kwargs):
super(PartialAppendAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=len(argument_keys),
const=const,
default=default,
required=required,
metavar=metavar,
type=type,
help=help, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
items = copy.copy(getattr(namespace, self.dest, None)) or []
# If no value was set default to empty list
if values is None:
values = []
elif not isinstance(values, list):
values = [values]
if len(argument_keys) != len(values):
raise ValueError("Unexpected number of values")
# Generate keyword arguments for the input function
kwargs = dict(list(zip(argument_keys, values)))
f = functools.partial(fn, **kwargs)
items.append(f)
setattr(namespace, self.dest, items)
return PartialAppendAction | def partial_append_action(fn, argument_keys=None) | Creates a new class extending argparse.Action, which appends a
partially-applied function to dest.
The optional argument_keys argument should either be None (no additional
arguments to fn) or an iterable of function keys to partially apply. | 1.998098 | 2.026181 | 0.98614 |
def inner(string):
value = target_type(string)
if not value >= 0:
raise argparse.ArgumentTypeError("Invalid positive number: " +
string)
return value
return inner | def positive_value(target_type) | Wraps target_type in a function that requires the parsed argument
be >= 0 | 5.721087 | 5.145907 | 1.111774 |
add_options(parser)
parser.add_argument('source_file', type=common.FileType('rt'),
help="Input sequence file")
parser.add_argument('dest_file', help="Output file")
return parser | def build_parser(parser) | Add shared arguments to the convert or mogrify parser. | 4.564107 | 4.133883 | 1.104073 |
parts = string.split(':', 2)
if len(parts) < 2:
raise ValueError(
"Illegal specification. Should be module:function[:parameter]")
module_path, function_name = parts[:2]
# Import the module
module_vars = {}
exec(compile(open(module_path).read(), module_path, 'exec'), module_vars)
try:
function = module_vars[function_name]
except KeyError:
raise argparse.ArgumentTypeError("{0} has no attribute '{1}'".format(
module_path, function_name))
if len(parts) == 3:
old_function = function
function = lambda r: old_function(r, parts[2])
return function | def module_function(string) | Load a function from a python module using a file name, function name
specification of format:
/path/to/x.py:function_name[:parameter] | 2.874858 | 2.700205 | 1.064681 |
out = defaultdict(list)
for header, seq in parse_fasta(cgmlst_fasta):
if not '|' in header:
raise Exception('Unexpected format for cgMLST fasta file header. No "|" (pipe) delimiter present! Header="{}"'.format(header))
marker_name, allele_name = header.split('|')
out[marker_name].append(seq)
return out | def parse_cgmlst_alleles(cgmlst_fasta) | Parse cgMLST alleles from fasta file
cgMLST FASTA file must have a header format of ">{marker name}|{allele name}"
Args:
cgmlst_fasta (str): cgMLST fasta file path
Returns:
dict of list: Marker name to list of allele sequences | 4.274293 | 3.646484 | 1.172169 |
'''
Parse a fasta file returning a generator yielding tuples of fasta headers to sequences.
Note:
This function should give equivalent results to SeqIO from BioPython
.. code-block:: python
from Bio import SeqIO
# biopython to dict of header-seq
hseqs_bio = {r.description:str(r.seq) for r in SeqIO.parse(fasta_path, 'fasta')}
# this func to dict of header-seq
hseqs = {header:seq for header, seq in parse_fasta(fasta_path)}
# both methods should return the same dict
assert hseqs == hseqs_bio
Args:
filepath (str): Fasta file path
Returns:
generator: yields tuples of (<fasta header>, <fasta sequence>)
'''
with open(filepath, 'r') as f:
seqs = []
header = ''
for line in f:
line = line.strip()
if line == '':
continue
if line[0] == '>':
if header == '':
header = line.replace('>','')
else:
yield header, ''.join(seqs)
seqs = []
header = line.replace('>','')
else:
seqs.append(line)
yield header, ''.join(seqs) | def parse_fasta(filepath) | Parse a fasta file returning a generator yielding tuples of fasta headers to sequences.
Note:
This function should give equivalent results to SeqIO from BioPython
.. code-block:: python
from Bio import SeqIO
# biopython to dict of header-seq
hseqs_bio = {r.description:str(r.seq) for r in SeqIO.parse(fasta_path, 'fasta')}
# this func to dict of header-seq
hseqs = {header:seq for header, seq in parse_fasta(fasta_path)}
# both methods should return the same dict
assert hseqs == hseqs_bio
Args:
filepath (str): Fasta file path
Returns:
generator: yields tuples of (<fasta header>, <fasta sequence>) | 3.043956 | 1.338936 | 2.273415 |
header_count = 0
line_count = 1
nt_count = 0
with open(fasta_path) as f:
for l in f:
l = l.strip()
if l == '':
continue
if l[0] == '>':
header_count += 1
continue
if header_count == 0 and l[0] != '>':
error_msg = 'First non-blank line (L:{line_count}) does not contain FASTA header. Line beginning with ">" expected.' \
.format(line_count=line_count)
logger.error(error_msg)
raise Exception(error_msg)
non_nucleotide_chars_in_line = set(l) - VALID_NUCLEOTIDES
if len(non_nucleotide_chars_in_line) > 0:
error_msg = 'Line {line} contains the following non-nucleotide characters: {non_nt_chars}' \
.format(line=line_count,
non_nt_chars=', '.join([x for x in non_nucleotide_chars_in_line]))
logger.error(error_msg)
raise Exception(error_msg)
nt_count += len(l)
line_count += 1
if nt_count == 0:
error_msg = 'File "{}" does not contain any nucleotide sequence.'.format(fasta_path)
logger.error(error_msg)
raise Exception(error_msg)
logger.info('Valid FASTA format "{}" ({} bp)'.format(fasta_path, nt_count)) | def fasta_format_check(fasta_path, logger) | Check that a file is valid FASTA format.
- First non-blank line needs to begin with a '>' header character.
- Sequence can only contain valid IUPAC nucleotide characters
Args:
fasta_str (str): FASTA file contents string
Raises:
Exception: If invalid FASTA format | 2.283862 | 2.189877 | 1.042918 |
return np.array([[NT_TO_INT[c] for c in x.upper()] for x in seqs]) | def seq_int_arr(seqs) | Convert list of ACGT strings to matix of 1-4 ints
Args:
seqs (list of str): nucleotide sequences with only 'ACGT' characters
Returns:
numpy.array of int: matrix of integers from 1 to 4 inclusive representing A, C, G, and T
str: nucleotide sequence string | 6.460312 | 9.066488 | 0.712548 |
starts = arr[:,0:bp]
ends = arr[:,-bp:]
starts_ends_idxs = defaultdict(list)
l, seq_len = arr.shape
for i in range(l):
start_i = starts[i]
end_i = ends[i]
start_i_str = ''.join([str(x) for x in start_i])
end_i_str = ''.join([str(x) for x in end_i])
starts_ends_idxs[start_i_str + end_i_str].append(i)
return starts_ends_idxs | def group_alleles_by_start_end_Xbp(arr, bp=28) | Group alleles by matching ends
Args:
arr (numpy.array): 2D int matrix of alleles
bp (int): length of ends to group by
Returns:
dict of lists: key of start + end strings to list of indices of alleles with matching ends | 2.270187 | 2.348312 | 0.966731 |
clusters = fcluster(linkage(dists), 0.025, criterion='distance')
cluster_idx = defaultdict(list)
for idx, cl in enumerate(clusters):
cluster_idx[cl].append(idx)
return cluster_idx | def allele_clusters(dists, t=0.025) | Flat clusters from distance matrix
Args:
dists (numpy.array): pdist distance matrix
t (float): fcluster (tree cutting) distance threshold
Returns:
dict of lists: cluster number to list of indices of distances in cluster | 2.611364 | 3.383235 | 0.771854 |
row_sums = np.apply_along_axis(arr=dists, axis=0, func1d=np.sum)
return row_sums.argmin() | def min_row_dist_sum_idx(dists) | Find the index of the row with the minimum row distance sum
This should return the index of the row index with the least distance overall
to all other rows.
Args:
dists (np.array): must be square distance matrix
Returns:
int: index of row with min dist row sum | 2.862901 | 3.9465 | 0.725428 |
centroid_alleles = set()
len_allele = group_alleles_by_size(alleles)
for length, seqs in len_allele.items():
# if only one alelle of a particular size, add as centroid, move onto next size group
if len(seqs) == 1:
centroid_alleles.add(seqs[0])
continue
# convert allele nucleotide sequences to integer matrix
seq_arr = seq_int_arr(seqs)
# group alleles by matching ends
starts_ends_idxs = group_alleles_by_start_end_Xbp(seq_arr, bp=bp)
for k, idxs in starts_ends_idxs.items():
# if only one allele for a particular matching ends group, then add as centroid and move onto next ends group
if len(idxs) == 1:
centroid_alleles.add(seqs[idxs[0]])
continue
# fetch subset of int allele sequences for a matching ends group
seq_arr_subset = seq_arr[idxs]
# Hamming distances between alleles
dists = pdist(seq_arr_subset, 'hamming')
# create flat clusters (tree cut) at t threshold
cl = allele_clusters(dists, t=t)
# for each allele cluster
dm_sq = squareform(dists)
for cl_key, cl_idxs in cl.items():
# if only 1 or 2 alleles in cluster then return first
if len(cl_idxs) == 1 or len(cl_idxs) == 2:
# get first cluster index and get nt seq for that index
centroid_alleles.add(seq_int_arr_to_nt(seq_arr_subset[cl_idxs[0]]))
continue
# else find allele with min distances to all other alleles in cluster
dm_sub = dm_subset(dm_sq, cl_idxs)
min_idx = min_row_dist_sum_idx(dm_sub)
# add nucleotide seq for cluster centroid allele to centroids set
centroid_alleles.add(seq_int_arr_to_nt(seq_arr_subset[min_idx]))
#end for cl_key, cl_idxs in cl.iteritems():
#end for k, idxs in starts_ends_idxs.iteritems():
#end for length, seqs in alleles.iteritems():
return centroid_alleles | def find_centroid_alleles(alleles, bp=28, t=0.025) | Reduce list of alleles to set of centroid alleles based on size grouping, ends matching and hierarchical clustering
Workflow for finding centroid alleles:
- grouping by size (e.g. 100bp, 101bp, 103bp, etc)
- then grouped by `bp` nucleotides at ends matching
- size and ends grouped alleles hierarchically clustered (Hamming distance, complete linkage)
- tree cutting at threshold `t`
- select allele with minimum distance to other alleles in cluster as centroid
Args:
alleles (iterable): collection of allele nucleotide sequences
bp (int): number of bp matching at allele ends for size grouping (default=28 due to default blastn megablast word size)
t (float): cluster generation (tree cutting) distance threshold for size grouped alleles
Returns:
set of str: centroid alleles | 4.141847 | 3.90484 | 1.060696 |
args = [MASH_BIN,
'dist',
MASH_SKETCH_FILE,
fasta_path]
p = Popen(args, stderr=PIPE, stdout=PIPE)
(stdout, stderr) = p.communicate()
retcode = p.returncode
if retcode != 0:
raise Exception('Could not run Mash dist {}'.format(stderr))
return stdout | def mash_dist_trusted(fasta_path) | Compute Mash distances of sketch file of genome fasta to RefSeq sketch DB.
Args:
mash_bin (str): Mash binary path
Returns:
(str): Mash STDOUT string | 3.119581 | 3.305265 | 0.943822 |
gs_collapse = []
genome_idx_dict = {}
indices = []
patt_dict = {}
for i, g in enumerate(genomes):
p = arr[i, :].tostring()
if p in patt_dict:
parent = patt_dict[p]
idx = genome_idx_dict[parent]
gs_collapse[idx].append(g)
else:
indices.append(i)
patt_dict[p] = g
genome_idx_dict[g] = len(gs_collapse)
gs_collapse.append([g])
return arr[indices, :], gs_collapse | def nr_profiles(arr, genomes) | Get a condensed cgMLST pairwise distance matrix for specified Genomes_
where condensed means redundant cgMLST profiles are only represented once in the distance matrix.
Args:
user_name (list): List of Genome_ names to retrieve condensed distance matrix for
Returns:
(numpy.array, list): tuple of condensed cgMLST distance matrix and list of grouped Genomes_ | 2.824436 | 3.048773 | 0.926417 |
assert isinstance(df, pd.DataFrame)
markers = []
alleles = []
for x in df['qseqid']:
marker, allele = x.split('|')
markers.append(marker)
alleles.append(int(allele))
df.loc[:, 'marker'] = markers
df.loc[:, 'allele'] = alleles
df.loc[:, 'is_match'] = (df['coverage'] >= 1.0) & (df['pident'] >= 90.0) & ~(df['is_trunc'])
df.loc[:, 'allele_name'] = df.apply(lambda x: allele_name(x.sseq.replace('-', '')), axis=1)
df.loc[:, 'is_perfect'] = (df['coverage'] == 1.0) & (df['pident'] == 100.0)
df_perf = df[df['is_perfect']]
perf_markers = df_perf['marker'].unique()
df.loc[:, 'has_perfect_match'] = df['marker'].isin(perf_markers)
start_idxs, end_idxs, needs_revcomps, trunc, is_extended = extend_subj_match_vec(df)
df.loc[:, 'start_idx'] = start_idxs
df.loc[:, 'end_idx'] = end_idxs
df.loc[:, 'needs_revcomp'] = needs_revcomps
df.loc[:, 'trunc'] = trunc
df.loc[:, 'is_extended'] = is_extended
df.loc[:, 'sseq_msa_gaps'] = np.zeros(df.shape[0], dtype=np.int64)
df.loc[:, 'sseq_msa_p_gaps'] = np.zeros(df.shape[0], dtype=np.float64)
df.loc[:, 'too_many_gaps'] = trunc
return df | def process_cgmlst_results(df) | Append informative fields to cgMLST330 BLAST results DataFrame
The `qseqid` column must contain cgMLST330 query IDs with `{marker name}|{allele number}` format.
The `qseqid` parsed allele numbers and marker names are appended as new fields.
`is_perfect` column contains boolean values for whether an allele result is 100% identity and coverage.
`has_perfect_match` denotes if a cgMLST330 marker has a perfect allele match.
The top result with the largest bitscore for a marker with no perfect match is used to retrieve the allele present
at that marker locus.
Args:
df (pandas.DataFrame): DataFrame of cgMLST330 BLAST results
Returns:
pandas.DataFrame: cgMLST330 BLAST results DataFrame with extra fields (`marker`, `allele`, `is_perfect`, `has_perfect_match`) | 2.720761 | 2.604042 | 1.044822 |
contig_blastn_records = defaultdict(list)
markers = df.marker.unique()
for m in markers:
dfsub = df[df.marker == m]
for i, r in dfsub.iterrows():
if r.coverage < 1.0:
contig_blastn_records[r.stitle].append(r)
break
return contig_blastn_records | def alleles_to_retrieve(df) | Alleles to retrieve from genome fasta
Get a dict of the genome fasta contig title to a list of blastn results of the allele sequences that must be
retrieved from the genome contig.
Args:
df (pandas.DataFrame): blastn results dataframe
Returns:
{str:[pandas.Series]}: dict of contig title (header name) to list of top blastn result records for each marker
for which the allele sequence must be retrieved from the original sequence. | 4.06161 | 3.568126 | 1.138303 |
assert isinstance(df, pd.DataFrame)
from collections import defaultdict
d = defaultdict(list)
for idx, row in df.iterrows():
marker = row['marker']
d[marker].append(row)
marker_results = {}
for k,v in d.items():
if len(v) > 1:
logging.debug('Multiple potential cgMLST allele matches (n=%s) found for marker %s. Selecting match on longest contig.', len(v), k)
df_marker = pd.DataFrame(v)
df_marker.sort_values('slen', ascending=False, inplace=True)
for i,r in df_marker.iterrows():
allele = r['allele_name']
slen = r['slen']
logging.debug('Selecting allele %s from contig with length %s', allele, slen)
seq = r['sseq']
if '-' in seq:
logging.warning('Gaps found in allele. Removing gaps. %s', r)
seq = seq.replace('-', '').upper()
allele = allele_name(seq)
marker_results[k] = allele_result_dict(allele, seq, r.to_dict())
break
elif len(v) == 1:
row = v[0]
seq = row['sseq']
if '-' in seq:
logging.warning('Gaps found in allele. Removing gaps. %s', row)
seq = seq.replace('-', '').upper()
allele = allele_name(seq)
marker_results[k] = allele_result_dict(allele, seq, row.to_dict())
else:
err_msg = 'Empty list of matches for marker {}'.format(k)
logging.error(err_msg)
raise Exception(err_msg)
return marker_results | def matches_to_marker_results(df) | Perfect BLAST matches to marker results dict
Parse perfect BLAST matches to marker results dict.
Args:
df (pandas.DataFrame): DataFrame of perfect BLAST matches
Returns:
dict: cgMLST330 marker names to matching allele numbers | 2.579875 | 2.562669 | 1.006714 |
closest_distance = df_relatives['distance'].min()
if closest_distance > CGMLST_SUBSPECIATION_DISTANCE_THRESHOLD:
logging.warning('Min cgMLST distance (%s) above subspeciation distance threshold (%s)',
closest_distance,
CGMLST_SUBSPECIATION_DISTANCE_THRESHOLD)
return None
else:
df_relatives = df_relatives.loc[df_relatives.distance <= CGMLST_SUBSPECIATION_DISTANCE_THRESHOLD, :]
df_relatives = df_relatives.sort_values('distance', ascending=True)
logging.debug('df_relatives by cgmlst %s', df_relatives.head())
genome_spp = genomes_to_subspecies()
subspecies_below_threshold = [genome_spp[member_genome] if member_genome in genome_spp else None for member_genome in df_relatives.index]
subspecies_below_threshold = filter(None, subspecies_below_threshold)
subspecies_counter = Counter(subspecies_below_threshold)
logging.debug('Subspecies counter: %s', subspecies_counter)
return (subspecies_counter.most_common(1)[0][0], closest_distance, dict(subspecies_counter)) | def cgmlst_subspecies_call(df_relatives) | Call Salmonella subspecies based on cgMLST results
This method attempts to find the majority subspecies type within curated
public genomes above a cgMLST allelic profile distance threshold.
Note:
``CGMLST_SUBSPECIATION_DISTANCE_THRESHOLD`` is the cgMLST distance
threshold used to determine the subspecies by cgMLST. It is set at a
distance of 0.9 which translates to a cgMLST allelic similarity of 10%.
A threshold of 0.9 is generous and reasonable given the congruence
between subspecies designations and 10% cgMLST clusters by Adjusted
Rand (~0.850) and Adjusted Wallace metrics (~0.850 both ways).
Args:
df_relatives (pandas.DataFrame): Table of genomes related by cgMLST to input genome
Returns:
None: if no curated public genomes found to have a cgMLST profile similarity of 10% or greater
(string, float, dict): most common subspecies, closest related public genome distance, subspecies frequencies | 2.867805 | 2.621683 | 1.093879 |
filename = os.path.basename(fasta_path)
return re.sub(r'(\.fa$)|(\.fas$)|(\.fasta$)|(\.fna$)|(\.\w{1,}$)', '', filename) | def genome_name_from_fasta_path(fasta_path) | Extract genome name from fasta filename
Get the filename without directory and remove the file extension.
Example:
With fasta file path ``/path/to/genome_1.fasta``::
fasta_path = '/path/to/genome_1.fasta'
genome_name = genome_name_from_fasta_path(fasta_path)
print(genome_name)
# => "genome_1"
Args:
fasta_path (str): fasta file path
Returns:
str: genome name | 3.154746 | 3.942099 | 0.800271 |
needs_revcomp = df.sstart > df.send
add_to_end = df.qlen - df.qend
add_to_start = df.qstart - 1
ssum2 = (df.send + df.sstart) / 2.0
sabs2 = np.abs(df.send - df.sstart) / 2.0
end_idx = ssum2 + sabs2 - 1
start_idx = ssum2 - sabs2 - 1
start_idx[needs_revcomp] -= add_to_end
start_idx[~needs_revcomp] -= add_to_start
end_idx[needs_revcomp] += add_to_start
end_idx[~needs_revcomp] += add_to_end
clipped_start_idx = np.clip(start_idx, 0, (df.slen - 1))
clipped_end_idx = np.clip(end_idx, 0, (df.slen - 1))
trunc = (clipped_start_idx != start_idx) | (clipped_end_idx != end_idx)
is_extended = (add_to_start > 0) | (add_to_end > 0)
return clipped_start_idx, clipped_end_idx, needs_revcomp, trunc, is_extended | def extend_subj_match_vec(df) | Get the extended clipped (clamped) start and end subject sequence indices
Also get whether each match needs to be reverse complemented and whether each extended match would be truncated by
the end of the subject sequence.
Args:
df (pandas.DataFrame): blastn results dataframe
Returns:
int pandas.Series: extended and clipped start indices
int pandas.Series: extended and clipped end indices
bool pandas.Series: does extracted seq need to be reverse complemented?
bool pandas.Series: would the extended seq be truncated by the ends of the subject sequence?
bool pandas.Series: was the subject seq extended? | 2.39604 | 2.166208 | 1.106099 |
return [attr for attr in dir(x) if not attr.startswith("__") and not callable(getattr(x, attr))] | def listattrs(x) | Get all instance and class attributes for an object
Get all instance and class attributes for an object except those that start
with "__" (double underscore).
__dict__ of an object only reports the instance attributes while dir()
reports all of the attributes of an object including private ones.
Callable attrs are filtered out.
Args:
x (object): Some object
Returns:
list str: List of non-callable non-private attributes of object x | 2.358208 | 3.389309 | 0.695778 |
if x is None or isinstance(x, (str, int, float, bool)):
return x
if isinstance(x, np.int_):
return int(x)
if isinstance(x, np.int64):
return int(x)
if isinstance(x, np.float_):
return float(x)
if isinstance(x, np.float64):
return float(x)
if isinstance(x, np.bool_):
return bool(x)
if depth + 1 > depth_threshold: return {}
if isinstance(x, list):
out = []
for v in x:
tmp = to_dict(v, depth + 1, exclude_keys, depth_threshold)
if tmp == {}: continue
out.append(tmp)
return out
out = {}
if isinstance(x, dict):
for k, v in x.items():
if k in exclude_keys: continue
if not isinstance(k, (str,)):
k = str(k)
tmp = to_dict(v, depth + 1, exclude_keys, depth_threshold)
if tmp == {}: continue
out[k] = tmp
return out
for attr in listattrs(x):
if attr in exclude_keys: continue
v = getattr(x, attr)
tmp = to_dict(v, depth + 1, exclude_keys, depth_threshold)
if tmp == {}: continue
out[attr] = tmp
return out | def to_dict(x, depth, exclude_keys=set(), depth_threshold=8) | Transform a nested object/dict/list into a regular dict
json.dump(s) and pickle don't like to un/serialize regular Python objects so
this function should handle arbitrarily nested objects to be serialized to
regular string, float, int, bool, None values.
This is a recursive function so by default it will exit at a certain depth (depth_threshold=8).
Args:
x (object): Some object to dict-ify unless x is a scalar/literal then return x as is
depth (int): Starting depth must be 0 (cannot supply default value due to weird Pythonisms)
exclude_keys (set): Keys to avoid adding to the output dict
depth_threshold (int): object/dict nesting depth to stop at
Returns:
dict: dict with only scalar/literal leaf values | 1.61829 | 1.66375 | 0.972677 |
if x is None or isinstance(x, (str, int, float, bool)):
out[key] = x
return out
if isinstance(x, list):
for i, v in enumerate(x):
new_key = '{}{}{}'.format(key, sep, i)
out = _recur_flatten(new_key, v, out, sep)
if isinstance(x, dict):
for k, v in x.items():
new_key = '{}{}{}'.format(key, sep, k)
out = _recur_flatten(new_key, v, out, sep)
return out | def _recur_flatten(key, x, out, sep='.') | Helper function to flatten_dict
Recursively flatten all nested values within a dict
Args:
key (str): parent key
x (object): object to flatten or add to out dict
out (dict): 1D output dict
sep (str): flattened key separator string
Returns:
dict: flattened 1D dict | 1.586797 | 1.550283 | 1.023553 |
out = {}
for k, v in x.items():
out = _recur_flatten(k, v, out)
return out | def flatten_dict(x) | Flatten a dict
Flatten an arbitrarily nested dict as output by to_dict
.. note::
Keys in the flattened dict may get very long.
Args:
x (dict): Arbitrarily nested dict (maybe resembling a tree) with literal/scalar leaf values
Returns:
dict: flattened 1D dict | 3.374915 | 4.662932 | 0.723775 |
if df is not None:
return [dict(r) for i, r in df.head(1).iterrows()][0] | def df_first_row_to_dict(df) | First DataFrame row to list of dict
Args:
df (pandas.DataFrame): A DataFrame with at least one row
Returns:
A list of dict that looks like:
[{'C1': 'x'}, {'C2': 'y'}, {'C3': 'z'}]
from a DataFrame that looks like:
C1 C2 C3
1 x y z
Else if `df` is `None`, returns `None` | 4.125495 | 5.812114 | 0.70981 |
q_match_len = abs(qstart - qend) + 1
s_max = max(sstart, send)
s_min = min(sstart, send)
return (q_match_len < qlen) and (s_max >= slen or s_min <= 1) | def is_blast_result_trunc(qstart, qend, sstart, send, qlen, slen) | Check if a query sequence is truncated by the end of a subject sequence
Args:
qstart (int): Query sequence start index
qend (int): Query sequence end index
sstart (int): Subject sequence start index
send (int): Subject sequence end index
qlen (int): Query sequence length
slen (int): Subject sequence length
Returns:
bool: Result truncated by subject sequence end? | 2.862886 | 3.345784 | 0.85567 |
ssum2 = (send + sstart) / 2.0
sabs2 = np.abs(send - sstart) / 2.0
smax = ssum2 + sabs2
smin = ssum2 - sabs2
q_match_len = np.abs(qstart - qend) + 1
return (q_match_len < qlen) & ((smax >= slen) | (smin <= 1)) | def trunc(qstart, qend, sstart, send, qlen, slen) | Check if a query sequence is truncated by the end of a subject sequence
Args:
qstart (int pandas.Series): Query sequence start index
qend (int pandas.Series): Query sequence end index
sstart (int pandas.Series): Subject sequence start index
send (int pandas.Series): Subject sequence end index
qlen (int pandas.Series): Query sequence length
slen (int pandas.Series): Subject sequence length
Returns:
Boolean pandas.Series: Result truncated by subject sequence end? | 3.865307 | 4.119784 | 0.938231 |
if self.is_missing:
return None
df_perfect_matches = self.df[(self.df['coverage'] == 1.0) & (self.df['pident'] == 100.0)]
if df_perfect_matches.shape[0] == 0:
return None
return df_perfect_matches | def perfect_matches(self) | Return pandas DataFrame with perfect BLAST matches (100% identity and coverage)
Returns:
pandas.DataFrame or None: DataFrame of perfect BLAST matches or None if no perfect matches exist | 3.074895 | 2.652994 | 1.159028 |
if self.is_missing:
return None
df_perfect_matches = self.df[(self.df['coverage'] == 1.0) & (self.df['pident'] == 100.0)]
if df_perfect_matches.shape[0]:
self.is_perfect_match = True
return BlastReader.df_first_row_to_dict(df_perfect_matches)
# Return the result with the highest bitscore.
# This is the first result in dataframe since the df is ordered by
# bitscore in descending order.
result_dict = BlastReader.df_first_row_to_dict(self.df)
result_trunc = BlastReader.is_blast_result_trunc(qstart=result_dict['qstart'],
qend=result_dict['qend'],
sstart=result_dict['sstart'],
send=result_dict['send'],
qlen=result_dict['qlen'],
slen=result_dict['slen'])
self.is_trunc = result_trunc
return result_dict | def top_result(self) | Return top `blastn` result
Try to find a 100% identity and coverage result (perfect match).
If one does not exist, then retrieve the result with the highest bitscore.
Returns:
Ordered dict of BLASTN results or None if no BLASTN results generated | 3.409648 | 3.030763 | 1.125013 |
genome_name = genome_name_from_fasta_path(fasta_path)
outpath = os.path.join(outdir, genome_name)
args = ['mash', 'sketch', '-o', outpath, fasta_path]
logging.info('Running Mash sketch with command: %s', ' '.join(args))
p = Popen(args)
p.wait()
sketch_path = outpath + '.msh'
assert os.path.exists(sketch_path), 'Mash sketch for genome {} was not created at {}'.format(
genome_name,
sketch_path)
return sketch_path | def sketch_fasta(fasta_path, outdir) | Create a Mash sketch from an input fasta file
Args:
fasta_path (str): input fasta file path. Genome name in fasta filename
outdir (str): output directory path to write Mash sketch file to
Returns:
str: output Mash sketch file path | 2.351736 | 2.385705 | 0.985761 |
merge_sketch_path = os.path.join(outdir, 'sistr.msh')
args = ['mash', 'paste', merge_sketch_path]
for x in sketch_paths:
args.append(x)
args.append(MASH_SKETCH_FILE)
logging.info('Running Mash paste with command: %s', ' '.join(args))
p = Popen(args)
p.wait()
assert os.path.exists(merge_sketch_path), 'Merged sketch was not created at {}'.format(merge_sketch_path)
return merge_sketch_path | def merge_sketches(outdir, sketch_paths) | Merge new Mash sketches with current Mash sketches
Args:
outdir (str): output directory to write merged Mash sketch file
sketch_paths (list of str): Mash sketch file paths for input fasta files
Returns:
str: output path for Mash sketch file with new and old sketches | 3.182477 | 3.079458 | 1.033454 |
self._dstore.delete(key)
self.cache.delete(key) | def delete(self, key) | Implementation of :meth:`~simplekv.KeyValueStore.delete`.
If an exception occurs in either the cache or backing store, all are
passing on. | 9.017484 | 10.008787 | 0.900957 |
try:
return self.cache.get(key)
except KeyError:
# cache miss or error, retrieve from backend
data = self._dstore.get(key)
# store in cache and return
self.cache.put(key, data)
return data
except IOError:
# cache error, ignore completely and return from backend
return self._dstore.get(key) | def get(self, key) | Implementation of :meth:`~simplekv.KeyValueStore.get`.
If a cache miss occurs, the value is retrieved, stored in the cache and
returned.
If the cache raises an :exc:`~exceptions.IOError`, the cache is
ignored, and the backing store is consulted directly.
It is possible for a caching error to occur while attempting to store
the value in the cache. It will not be handled as well. | 4.523324 | 4.071337 | 1.111017 |
try:
return self.cache.get_file(key, file)
except KeyError:
# cache miss, load into cache
fp = self._dstore.open(key)
self.cache.put_file(key, fp)
# return from cache
return self.cache.get_file(key, file) | def get_file(self, key, file) | Implementation of :meth:`~simplekv.KeyValueStore.get_file`.
If a cache miss occurs, the value is retrieved, stored in the cache and
returned.
If the cache raises an :exc:`~exceptions.IOError`, the retrieval cannot
proceed: If ``file`` was an open file, data maybe been written to it
already. The :exc:`~exceptions.IOError` bubbles up.
It is possible for a caching error to occur while attempting to store
the value in the cache. It will not be handled as well. | 3.760046 | 4.273208 | 0.879912 |
try:
return self.cache.open(key)
except KeyError:
# cache miss, load into cache
fp = self._dstore.open(key)
self.cache.put_file(key, fp)
return self.cache.open(key)
except IOError:
# cache error, ignore completely and return from backend
return self._dstore.open(key) | def open(self, key) | Implementation of :meth:`~simplekv.KeyValueStore.open`.
If a cache miss occurs, the value is retrieved, stored in the cache,
then then another open is issued on the cache.
If the cache raises an :exc:`~exceptions.IOError`, the cache is
ignored, and the backing store is consulted directly.
It is possible for a caching error to occur while attempting to store
the value in the cache. It will not be handled as well. | 5.072225 | 5.248951 | 0.966331 |
try:
k = self._dstore.copy(source, dest)
finally:
self.cache.delete(dest)
return k | def copy(self, source, dest) | Implementation of :meth:`~simplekv.CopyMixin.copy`.
Copies the data in the backing store and removes the destination key from the cache,
in case it was already populated.
Does not work when the backing store does not implement copy. | 7.235035 | 6.015285 | 1.202775 |
try:
return self._dstore.put(key, data)
finally:
self.cache.delete(key) | def put(self, key, data) | Implementation of :meth:`~simplekv.KeyValueStore.put`.
Will store the value in the backing store. After a successful or
unsuccessful store, the cache will be invalidated by deleting the key
from it. | 5.478619 | 6.711468 | 0.816307 |
try:
return self._dstore.put_file(key, file)
finally:
self.cache.delete(key) | def put_file(self, key, file) | Implementation of :meth:`~simplekv.KeyValueStore.put_file`.
Will store the value in the backing store. After a successful or
unsuccessful store, the cache will be invalidated by deleting the key
from it. | 4.922434 | 6.029267 | 0.816423 |
self._check_valid_key(key)
if isinstance(file, str):
return self._get_filename(key, file)
else:
return self._get_file(key, file) | def get_file(self, key, file) | Write contents of key to file
Like :meth:`.KeyValueStore.put_file`, this method allows backends to
implement a specialized function if data needs to be written to disk or
streamed.
If *file* is a string, contents of *key* are written to a newly
created file with the filename *file*. Otherwise, the data will be
written using the *write* method of *file*.
:param key: The key to be read
:param file: Output filename or an object with a *write* method.
:raises exceptions.ValueError: If the key is not valid.
:raises exceptions.IOError: If there was a problem reading or writing
data.
:raises exceptions.KeyError: If the key was not found. | 3.000023 | 3.492803 | 0.858916 |
# FIXME: shouldn't we call self._check_valid_key here?
if isinstance(file, str):
return self._put_filename(key, file)
else:
return self._put_file(key, file) | def put_file(self, key, file) | Store into key from file on disk
Stores data from a source into key. *file* can either be a string,
which will be interpretet as a filename, or an object with a *read()*
method.
If the passed object has a *fileno()* method, it may be used to speed
up the operation.
The file specified by *file*, if it is a filename, may be removed in
the process, to avoid copying if possible. If you need to make a copy,
pass the opened file instead.
:param key: The key under which the data is to be stored
:param file: A filename or an object with a read method. If a filename,
may be removed
:returns: The key under which data was stored
:raises exceptions.ValueError: If the key is not valid.
:raises exceptions.IOError: If there was a problem moving the file in. | 4.658128 | 5.147916 | 0.904857 |
if not isinstance(key, key_type):
raise ValueError('%r is not a valid key type' % key)
if not VALID_KEY_RE.match(key):
raise ValueError('%r contains illegal characters' % key) | def _check_valid_key(self, key) | Checks if a key is valid and raises a ValueError if its not.
When in need of checking a key for validity, always use this
method if possible.
:param key: The key to be checked | 3.379622 | 3.72654 | 0.906906 |
buf = BytesIO()
self._get_file(key, buf)
return buf.getvalue() | def _get(self, key) | Implementation for :meth:`~simplekv.KeyValueStore.get`. The default
implementation will create a :class:`io.BytesIO`-buffer and then call
:meth:`~simplekv.KeyValueStore._get_file`.
:param key: Key of value to be retrieved | 7.554614 | 6.883566 | 1.097485 |
bufsize = 1024 * 1024
# note: we do not use a context manager here or close the source.
# the source goes out of scope shortly after, taking care of the issue
# this allows us to support file-like objects without close as well,
# such as BytesIO.
source = self.open(key)
try:
while True:
buf = source.read(bufsize)
file.write(buf)
if len(buf) < bufsize:
break
finally:
source.close() | def _get_file(self, key, file) | Write key to file-like object file. Either this method or
:meth:`~simplekv.KeyValueStore._get_filename` will be called by
:meth:`~simplekv.KeyValueStore.get_file`. Note that this method does
not accept strings.
:param key: Key to be retrieved
:param file: File-like object to write to | 5.623796 | 6.357171 | 0.884638 |
with open(filename, 'wb') as dest:
return self._get_file(key, dest) | def _get_filename(self, key, filename) | Write key to file. Either this method or
:meth:`~simplekv.KeyValueStore._get_file` will be called by
:meth:`~simplekv.KeyValueStore.get_file`. This method only accepts
filenames and will open the file with a mode of ``wb``, then call
:meth:`~simplekv.KeyValueStore._get_file`.
:param key: Key to be retrieved
:param filename: Filename to write to | 5.38139 | 5.087996 | 1.057664 |
self._check_valid_key(key)
if not isinstance(data, bytes):
raise IOError("Provided data is not of type bytes")
return self._put(key, data, self._valid_ttl(ttl_secs)) | def put(self, key, data, ttl_secs=None) | Like :meth:`~simplekv.KeyValueStore.put`, but with an additional
parameter:
:param ttl_secs: Number of seconds until the key expires. See above
for valid values.
:raises exceptions.ValueError: If ``ttl_secs`` is invalid.
:raises exceptions.IOError: If storing failed or the file could not
be read | 3.969429 | 5.225909 | 0.759567 |
if ttl_secs is None:
ttl_secs = self.default_ttl_secs
self._check_valid_key(key)
if isinstance(file, str):
return self._put_filename(key, file, self._valid_ttl(ttl_secs))
else:
return self._put_file(key, file, self._valid_ttl(ttl_secs)) | def put_file(self, key, file, ttl_secs=None) | Like :meth:`~simplekv.KeyValueStore.put_file`, but with an
additional parameter:
:param ttl_secs: Number of seconds until the key expires. See above
for valid values.
:raises exceptions.ValueError: If ``ttl_secs`` is invalid. | 2.564226 | 2.802206 | 0.915074 |
self._check_valid_key(source)
self._check_valid_key(dest)
return self._copy(source, dest) | def copy(self, source, dest) | Copies a key. The destination is overwritten if does exist.
:param source: The source key to copy
:param dest: The destination for the copy
:returns: The destination key
:raises: exceptions.ValueError: If the source or target key are not valid
:raises: exceptions.KeyError: If the source key was not found | 3.491227 | 3.892233 | 0.896973 |
try:
if self.transport in ('http', 'https'):
connection = pyeapi.client.connect(
transport=self.transport,
host=self.hostname,
username=self.username,
password=self.password,
port=self.port,
timeout=self.timeout
)
elif self.transport == 'socket':
connection = pyeapi.client.connect(transport=self.transport)
else:
raise ConnectionException("Unknown transport: {}".format(self.transport))
if self.device is None:
self.device = pyeapi.client.Node(connection, enablepwd=self.enablepwd)
# does not raise an Exception if unusable
# let's try to run a very simple command
self.device.run_commands(['show clock'], encoding='text')
except ConnectionError as ce:
# and this is raised either if device not avaiable
# either if HTTP(S) agent is not enabled
# show management api http-commands
raise ConnectionException(ce.message) | def open(self) | Implementation of NAPALM method open. | 5.165677 | 4.910399 | 1.051987 |
if self.config_session is None:
return ''
else:
commands = ['show session-config named %s diffs' % self.config_session]
result = self.device.run_commands(commands, encoding='text')[0]['output']
result = '\n'.join(result.splitlines()[2:])
return result.strip() | def compare_config(self) | Implementation of NAPALM method compare_config. | 6.264561 | 5.408361 | 1.15831 |
commands = []
commands.append('copy startup-config flash:rollback-0')
commands.append('configure session {}'.format(self.config_session))
commands.append('commit')
commands.append('write memory')
self.device.run_commands(commands)
self.config_session = None | def commit_config(self) | Implementation of NAPALM method commit_config. | 6.103019 | 4.744704 | 1.28628 |
if self.config_session is not None:
commands = []
commands.append('configure session {}'.format(self.config_session))
commands.append('abort')
self.device.run_commands(commands)
self.config_session = None | def discard_config(self) | Implementation of NAPALM method discard_config. | 4.093068 | 3.387625 | 1.208241 |
commands = []
commands.append('configure replace flash:rollback-0')
commands.append('write memory')
self.device.run_commands(commands) | def rollback(self) | Implementation of NAPALM method rollback. | 9.962878 | 7.473568 | 1.333082 |
commands = []
commands.append('show version')
commands.append('show hostname')
commands.append('show interfaces')
result = self.device.run_commands(commands)
version = result[0]
hostname = result[1]
interfaces_dict = result[2]['interfaces']
uptime = time.time() - version['bootupTimestamp']
interfaces = [i for i in interfaces_dict.keys() if '.' not in i]
interfaces = string_parsers.sorted_nicely(interfaces)
return {
'hostname': hostname['hostname'],
'fqdn': hostname['fqdn'],
'vendor': u'Arista',
'model': version['modelName'],
'serial_number': version['serialNumber'],
'os_version': version['internalVersion'],
'uptime': int(uptime),
'interface_list': interfaces,
} | def get_facts(self) | Implementation of NAPALM method get_facts. | 3.399842 | 3.316766 | 1.025048 |
# Default values
snmp_dict = {
'chassis_id': '',
'location': '',
'contact': '',
'community': {}
}
commands = [
'show snmp chassis',
'show snmp location',
'show snmp contact'
]
snmp_config = self.device.run_commands(commands, encoding='json')
for line in snmp_config:
for k, v in line.items():
if k == 'chassisId':
snmp_dict['chassis_id'] = v
else:
# Some EOS versions add extra quotes
snmp_dict[k] = v.strip('"')
commands = ['show running-config | section snmp-server community']
raw_snmp_config = self.device.run_commands(commands, encoding='text')[0].get('output', '')
for line in raw_snmp_config.splitlines():
match = self._RE_SNMP_COMM.search(line)
if match:
matches = match.groupdict('')
snmp_dict['community'][match.group('community')] = {
'acl': py23_compat.text_type(matches['v4_acl']),
'mode': py23_compat.text_type(matches['access'])
}
return snmp_dict | def get_snmp_information(self) | get_snmp_information() for EOS. Re-written to not use TextFSM | 3.08269 | 3.009965 | 1.024162 |
get_startup = retrieve == "all" or retrieve == "startup"
get_running = retrieve == "all" or retrieve == "running"
get_candidate = (retrieve == "all" or retrieve == "candidate") and self.config_session
if retrieve == "all":
commands = ['show startup-config',
'show running-config']
if self.config_session:
commands.append('show session-config named {}'.format(self.config_session))
output = self.device.run_commands(commands, encoding="text")
return {
'startup': py23_compat.text_type(output[0]['output']) if get_startup else u"",
'running': py23_compat.text_type(output[1]['output']) if get_running else u"",
'candidate': py23_compat.text_type(output[2]['output']) if get_candidate else u"",
}
elif get_startup or get_running:
commands = ['show {}-config'.format(retrieve)]
output = self.device.run_commands(commands, encoding="text")
return {
'startup': py23_compat.text_type(output[0]['output']) if get_startup else u"",
'running': py23_compat.text_type(output[0]['output']) if get_running else u"",
'candidate': "",
}
elif get_candidate:
commands = ['show session-config named {}'.format(self.config_session)]
output = self.device.run_commands(commands, encoding="text")
return {
'startup': "",
'running': "",
'candidate': py23_compat.text_type(output[0]['output']),
}
elif retrieve == "candidate":
# If we get here it means that we want the candidate but there is none.
return {
'startup': "",
'running': "",
'candidate': "",
}
else:
raise Exception("Wrong retrieve filter: {}".format(retrieve)) | def get_config(self, retrieve="all") | get_config implementation for EOS. | 1.958822 | 1.930104 | 1.014879 |
output = self._show_vrf()
vrfs = {}
all_vrf_interfaces = {}
for vrf in output:
if (vrf.get('route_distinguisher', '') == "<not set>" or
vrf.get('route_distinguisher', '') == 'None'):
vrf['route_distinguisher'] = u''
else:
vrf['route_distinguisher'] = py23_compat.text_type(vrf['route_distinguisher'])
interfaces = {}
for interface_raw in vrf.get('interfaces', []):
interface = interface_raw.split(',')
for line in interface:
if line.strip() != '':
interfaces[py23_compat.text_type(line.strip())] = {}
all_vrf_interfaces[py23_compat.text_type(line.strip())] = {}
vrfs[py23_compat.text_type(vrf['name'])] = {
u'name': py23_compat.text_type(vrf['name']),
u'type': u'L3VRF',
u'state': {
u'route_distinguisher': vrf['route_distinguisher'],
},
u'interfaces': {
u'interface': interfaces,
},
}
all_interfaces = self.get_interfaces_ip().keys()
vrfs[u'default'] = {
u'name': u'default',
u'type': u'DEFAULT_INSTANCE',
u'state': {
u'route_distinguisher': u'',
},
u'interfaces': {
u'interface': {
k: {} for k in all_interfaces if k not in all_vrf_interfaces.keys()
},
},
}
if name:
if name in vrfs:
return {py23_compat.text_type(name): vrfs[name]}
return {}
else:
return vrfs | def get_network_instances(self, name='') | get_network_instances implementation for EOS. | 2.285931 | 2.232895 | 1.023752 |
from boto.exception import BotoClientError, BotoServerError, \
StorageResponseError
try:
yield
except StorageResponseError as e:
if e.code == 'NoSuchKey':
raise KeyError(key)
raise IOError(str(e))
except (BotoClientError, BotoServerError) as e:
if e.__class__.__name__ not in exc_pass:
raise IOError(str(e)) | def map_boto_exceptions(key=None, exc_pass=()) | Map boto-specific exceptions to the simplekv-API. | 2.839473 | 2.73574 | 1.037918 |
if not isinstance(key, key_type) and key is not None:
raise ValueError('%r is not a valid key type' % key)
if not VALID_KEY_RE_EXTENDED.match(key) or key == u'/':
raise ValueError('%r contains illegal characters' % key) | def _check_valid_key(self, key) | Checks if a key is valid and raises a ValueError if its not.
When in need of checking a key for validity, always use this
method if possible.
:param key: The key to be checked | 4.876864 | 5.497001 | 0.887186 |
md5 = hashlib.md5()
chunk_size = 128 * md5.block_size
for chunk in iter(lambda: file_.read(chunk_size), b''):
md5.update(chunk)
file_.seek(0)
byte_digest = md5.digest()
return base64.b64encode(byte_digest).decode() | def _file_md5(file_) | Compute the md5 digest of a file in base64 encoding. | 1.867354 | 1.762675 | 1.059387 |
md5 = hashlib.md5(buffer_)
byte_digest = md5.digest()
return base64.b64encode(byte_digest).decode() | def _byte_buffer_md5(buffer_) | Computes the md5 digest of a byte buffer in base64 encoding. | 2.571183 | 2.44601 | 1.051174 |
from azure.common import AzureMissingResourceHttpError, AzureHttpError,\
AzureException
try:
yield
except AzureMissingResourceHttpError as ex:
if ex.__class__.__name__ not in exc_pass:
s = str(ex)
if s.startswith(u"The specified container does not exist."):
raise IOError(s)
raise KeyError(key)
except AzureHttpError as ex:
if ex.__class__.__name__ not in exc_pass:
raise IOError(str(ex))
except AzureException as ex:
if ex.__class__.__name__ not in exc_pass:
raise IOError(str(ex)) | def map_azure_exceptions(key=None, exc_pass=()) | Map Azure-specific exceptions to the simplekv-API. | 2.381289 | 2.31908 | 1.026825 |
if self.closed:
raise ValueError("I/O operation on closed file")
with map_azure_exceptions(key=self.key):
if size < 0:
size = self.size - self.pos
end = min(self.pos + size - 1, self.size - 1)
if self.pos > end:
return b''
b = self.block_blob_service.get_blob_to_bytes(
container_name=self.container_name,
blob_name=self.key,
start_range=self.pos,
end_range=end, # end_range is inclusive
max_connections=self.max_connections,
)
self.pos += len(b.content)
return b.content | def read(self, size=-1) | Returns 'size' amount of bytes or less if there is no more data.
If no size is given all data is returned. size can be >= 0. | 2.701162 | 2.696676 | 1.001664 |
if self.closed:
raise ValueError("I/O operation on closed file")
if whence == 0:
if offset < 0:
raise IOError('seek would move position outside the file')
self.pos = offset
elif whence == 1:
if self.pos + offset < 0:
raise IOError('seek would move position outside the file')
self.pos += offset
elif whence == 2:
if self.size + offset < 0:
raise IOError('seek would move position outside the file')
self.pos = self.size + offset
return self.pos | def seek(self, offset, whence=0) | Move to a new offset either relative or absolute. whence=0 is
absolute, whence=1 is relative, whence=2 is relative to the end.
Any relative or absolute seek operation which would result in a
negative position is undefined and that case can be ignored
in the implementation.
Any seek operation which moves the position after the stream
should succeed. tell() should report that position and read()
should return an empty bytes object. | 1.75579 | 1.794564 | 0.978393 |
# pattern-matching:
if len(components) == 1:
if isinstance(obj, Blob):
mode = 0o100644
elif isinstance(obj, Tree):
mode = 0o040000
elif obj is None:
mode = None
else:
raise TypeError('Can only mount Blobs or Trees')
name = components[0]
if mode is not None:
tree[name] = mode, obj.id
return [tree]
if name in tree:
del tree[name]
return [tree]
elif len(components) > 1:
a, bc = components[0], components[1:]
if a in tree:
a_tree = repo[tree[a][1]]
if not isinstance(a_tree, Tree):
a_tree = Tree()
else:
a_tree = Tree()
res = _on_tree(repo, a_tree, bc, obj)
a_tree_new = res[-1]
if a_tree_new.items():
tree[a] = 0o040000, a_tree_new.id
return res + [tree]
# tree is empty
if a in tree:
del tree[a]
return [tree]
else:
raise ValueError('Components can\'t be empty.') | def _on_tree(repo, tree, components, obj) | Mounts an object on a tree, using the given path components.
:param tree: Tree object to mount on.
:param components: A list of strings of subpaths (i.e. ['foo', 'bar'] is
equivalent to '/foo/bar')
:param obj: Object to mount. If None, removes the object found at path
and prunes the tree downwards.
:return: A list of new entities that need to be added to the object store,
where the last one is the new tree. | 2.884693 | 2.685792 | 1.074057 |
allen_namespaces = {
'JAX': 'http://jaxmice.jax.org/strain/',
'MMRRC': 'http://www.mmrrc.org/catalog/getSDS.jsp?mmrrc_id=',
'AIBS': 'http://api.brain-map.org/api/v2/data/TransgenicLine/',
}
for prefix, iri in allen_namespaces.items():
self.g.add_namespace(prefix, iri)
for cell_line in self.neuron_data[:]:
for tl in cell_line['donor']['transgenic_lines']:
_id = tl['stock_number'] if tl['stock_number'] else tl['id']
prefix = tl['transgenic_line_source_name']
line_type = tl['transgenic_line_type_name']
if prefix not in ['JAX', 'MMRRC', 'AIBS']:
continue
_class = prefix + ':' + str(_id)
self.g.add_class(_class)
self.g.add_trip(_class, 'rdfs:label', tl['name'])
self.g.add_trip(_class, 'definition:', tl['description'])
self.g.add_trip(_class, 'rdfs:subClassOf', 'ilxtr:transgenicLine')
self.g.add_trip(_class, 'ilxtr:hasTransgenicType', 'ilxtr:' + line_type + 'Line')
self.g.write() | def build_transgenic_lines(self) | init class | "transgenic_line_source_name":"stock_number" a Class
add superClass | rdfs:subClassOf ilxtr:transgenicLine
add *order* | ilxtr:useObjectProperty ilxtr:<order>
add name | rdfs:label "name"
add def | definition: "description"
add transtype | rdfs:hasTransgenicType "transgenic_line_type_name" | 3.757436 | 3.270839 | 1.148768 |
with open('myfile', 'rt') as f:
rows = [r for r in csv.reader(f)]
dothing = lambda _: [i for i, v in enumerate(_)]
rows = [dothing(_) for _ in rows]
raise NotImplementedError('You need to implement this yourlself!')
return rows | def datagetter(cls) | example datagetter function, make any local modifications here | 8.04245 | 6.821722 | 1.178947 |
for thing in data:
graph.add_trip(*thing)
raise NotImplementedError('You need to implement this yourlself!') | def dataproc(cls, graph, data) | example datagetter function, make any local modifications here | 23.050945 | 18.174095 | 1.268341 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.