sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def from_extension(extension):
"""
Look up the BioPython file type corresponding with input extension.
Look up is case insensitive.
"""
if not extension.startswith('.'):
raise ValueError("Extensions must begin with a period.")
try:
return EXTENSION_TO_TYPE[extension.lower()]
except KeyError:
raise UnknownExtensionError(
"seqmagick does not know how to handle " +
"files with extensions like this: " + extension) | Look up the BioPython file type corresponding with input extension.
Look up is case insensitive. | entailment |
def from_filename(file_name):
"""
Look up the BioPython file type corresponding to an input file name.
"""
base, extension = os.path.splitext(file_name)
if extension in COMPRESS_EXT:
# Compressed file
extension = os.path.splitext(base)[1]
return from_extension(extension) | Look up the BioPython file type corresponding to an input file name. | entailment |
def from_handle(fh, stream_default='fasta'):
"""
Look up the BioPython file type corresponding to a file-like object.
For stdin, stdout, and stderr, ``stream_default`` is used.
"""
if fh in (sys.stdin, sys.stdout, sys.stderr):
return stream_default
return from_filename(fh.name) | Look up the BioPython file type corresponding to a file-like object.
For stdin, stdout, and stderr, ``stream_default`` is used. | entailment |
def parse_arguments(argv):
"""
Extract command-line arguments for different actions.
"""
parser = argparse.ArgumentParser(description='seqmagick - Manipulate ' + \
' sequence files.', prog='seqmagick')
parser.add_argument('-V', '--version', action='version',
version='seqmagick v' + version,
help="Print the version number and exit")
parser.add_argument('-v', '--verbose', dest='verbosity',
action='count', default=1,
help="Be more verbose. Specify -vv or -vvv for even more")
parser.add_argument('-q', '--quiet', action='store_const', const=0,
dest='verbosity', help="Suppress output")
# Subparsers
subparsers = parser.add_subparsers(dest='subparser_name')
parser_help = subparsers.add_parser('help',
help='Detailed help for actions using help <action>')
parser_help.add_argument('action')
# Add actions
actions = {}
for name, mod in subcommands.itermodules():
subparser = subparsers.add_parser(name, help=mod.__doc__,
description=mod.__doc__)
mod.build_parser(subparser)
actions[name] = mod.action
arguments = parser.parse_args(argv)
arguments.argv = argv
action = arguments.subparser_name
if action == 'help':
return parse_arguments([str(arguments.action), '-h'])
return actions[action], arguments | Extract command-line arguments for different actions. | entailment |
def ungap_index_map(sequence, gap_chars='-'):
"""
Returns a dict mapping from an index in the ungapped sequence to an index
in the gapped sequence.
>>> ungap_index_map('AC-TG-')
{0: 0, 1: 1, 2: 3, 3: 4}
"""
counter = itertools.count(0).__next__
ungap_indexes = [
counter() if c not in gap_chars else None for c in iter(sequence)
]
return dict(
(ungapped, gapped)
for ungapped, gapped in zip(ungap_indexes, range(len(sequence)))
if ungapped is not None) | Returns a dict mapping from an index in the ungapped sequence to an index
in the gapped sequence.
>>> ungap_index_map('AC-TG-')
{0: 0, 1: 1, 2: 3, 3: 4} | entailment |
def gap_index_map(sequence, gap_chars='-'):
"""
Opposite of ungap_index_map: returns mapping from gapped index to ungapped
index.
>>> gap_index_map('AC-TG-')
{0: 0, 1: 1, 3: 2, 4: 3}
"""
return dict(
(v, k) for k, v in list(ungap_index_map(sequence, gap_chars).items())) | Opposite of ungap_index_map: returns mapping from gapped index to ungapped
index.
>>> gap_index_map('AC-TG-')
{0: 0, 1: 1, 3: 2, 4: 3} | entailment |
def _iupac_ambiguous_equal(ambig_base, unambig_base):
"""
Tests two bases for equality, accounting for IUPAC ambiguous DNA
ambiguous base may be IUPAC ambiguous, unambiguous must be one of ACGT
"""
iupac_translation = {
'A': 'A',
'C': 'C',
'G': 'G',
'T': 'T',
'U': 'U',
'R': 'AG',
'Y': 'CT',
'S': 'GC',
'W': 'AT',
'K': 'GT',
'M': 'AC',
'B': 'CGT',
'D': 'AGT',
'H': 'ACT',
'V': 'ACG',
'N': 'ACGT',
'-': '-'
}
for i in (ambig_base, unambig_base):
if not len(i) == 1:
raise ValueError("only one base may be passed.")
return unambig_base.upper() in iupac_translation[ambig_base.upper()] | Tests two bases for equality, accounting for IUPAC ambiguous DNA
ambiguous base may be IUPAC ambiguous, unambiguous must be one of ACGT | entailment |
def hamming_distance(s1, s2, equality_function=operator.eq):
"""
Returns the hamming distance between two strings.
"""
if not len(s1) == len(s2):
raise ValueError("String lengths are not equal")
# Number of non-matching characters:
return sum(not equality_function(c1, c2) for c1, c2 in zip(s1, s2)) | Returns the hamming distance between two strings. | entailment |
def locate_primers(sequences, forward_primer, reverse_primer,
reverse_complement, max_hamming_distance):
"""
Find forward and reverse primers in a set of sequences, return two tuples:
(forward_start, forward_end), (reverse_start, reverse_end)
"""
forward_loc = None
reverse_loc = None
seq_length = None
# Reverse complement the reverse primer, if appropriate
if reverse_complement:
reverse_primer = reverse_primer.reverse_complement()
forward_aligner = PrimerAligner(forward_primer)
reverse_aligner = PrimerAligner(reverse_primer)
for i, sequence in enumerate(sequences):
if seq_length is None:
seq_length = len(sequence)
elif len(sequence) != seq_length:
raise ValueError(("Sequence Length Heterogeneity: {0} != {1}. "
"Is this an alignment?").format(
len(sequence), seq_length))
index_map = ungap_index_map(sequence.seq)
if forward_loc is None:
ham_dist, start, end = forward_aligner.align(sequence.seq.ungap())
if ham_dist <= max_hamming_distance:
forward_loc = index_map[start], index_map[end]
logging.info("Forward in sequence %d: indexes %d to %d", i + 1,
*forward_loc)
if reverse_loc is None:
ham_dist, start, end = reverse_aligner.align(sequence.seq.ungap())
if ham_dist <= max_hamming_distance:
reverse_loc = index_map[start], index_map[end]
logging.info("Reverse in sequence %d: indexes %d to %d", i + 1,
*reverse_loc)
if forward_loc and reverse_loc:
# Both found
# Check order
if forward_loc[0] > reverse_loc[0]:
raise PrimerOrderError(forward_loc[0], reverse_loc[0])
return forward_loc, reverse_loc
else:
logging.debug(
"Sequence %d: %d/2 primers found", i + 1,
sum(j is not None for j in (forward_loc, reverse_loc)))
# Did not find either the forward or reverse primer:
if not forward_loc:
raise PrimerNotFound(forward_primer)
else:
raise PrimerNotFound(reverse_primer) | Find forward and reverse primers in a set of sequences, return two tuples:
(forward_start, forward_end), (reverse_start, reverse_end) | entailment |
def trim(sequences, start, end):
"""
Slice the input sequences from start to end
"""
logging.info("Trimming from %d to %d", start, end)
return (sequence[start:end] for sequence in sequences) | Slice the input sequences from start to end | entailment |
def action(arguments):
"""
Trim the alignment as specified
"""
# Determine file format for input and output
source_format = (arguments.source_format or
fileformat.from_handle(arguments.source_file))
output_format = (arguments.output_format or
fileformat.from_handle(arguments.output_file))
# Load the alignment
with arguments.source_file:
sequences = SeqIO.parse(
arguments.source_file,
source_format,
alphabet=Alphabet.Gapped(Alphabet.single_letter_alphabet))
# Locate primers
(forward_start, forward_end), (reverse_start, reverse_end) = locate_primers(
sequences, arguments.forward_primer,
arguments.reverse_primer, arguments.reverse_complement,
arguments.max_hamming_distance)
# Generate slice indexes
if arguments.include_primers:
start = forward_start
end = reverse_end + 1
else:
start = forward_end + 1
end = reverse_start
# Rewind the input file
arguments.source_file.seek(0)
sequences = SeqIO.parse(
arguments.source_file,
source_format,
alphabet=Alphabet.Gapped(Alphabet.single_letter_alphabet))
# Apply the transformation
prune_action = _ACTIONS[arguments.prune_action]
transformed_sequences = prune_action(sequences, start, end)
with arguments.output_file:
SeqIO.write(transformed_sequences, arguments.output_file,
output_format) | Trim the alignment as specified | entailment |
def align(self, sequence):
"""
Aligns the primer to the given query sequence, returning a tuple of:
hamming_distance, start, end
Where hamming distance is the distance between the primer and aligned
sequence, and start and end give the start and end index of the primer
relative to the input sequence.
"""
seq_aln, primer_aln, score, start, end = pairwise2.align.globalms(
str(sequence).upper(), str(self.primer).upper(),
self.match, self.difference, self.gap_open,
self.gap_extend, one_alignment_only=True,
penalize_end_gaps=self.penalize_end_gaps)[0]
# Get an ungapped mapping on the sequence
index_map = gap_index_map(seq_aln)
ungap_map = ungap_index_map(primer_aln)
# Trim to primer
start = ungap_map[0]
end = ungap_map[len(self.primer) - 1]
trimmed = seq_aln[start:end + 1]
ham_dist = hamming_distance(primer_aln[start:end + 1], trimmed,
_iupac_ambiguous_equal)
# assert primer_aln[start:end].replace('-', '') == str(self.primer)
# TODO: handle start or end being gap better. For now, just give up
# and return maxint for the hamming distance
if trimmed.endswith('-'):
tail = len(trimmed) - len(trimmed.rstrip('-'))
end = index_map[end - tail] + 1
ham_dist = sys.maxsize
else:
end = index_map[end]
if trimmed.startswith('-'):
start = 0
ham_dist = sys.maxsize
else:
start = index_map[start]
return ham_dist, start, end | Aligns the primer to the given query sequence, returning a tuple of:
hamming_distance, start, end
Where hamming distance is the distance between the primer and aligned
sequence, and start and end give the start and end index of the primer
relative to the input sequence. | entailment |
def hash_starts_numeric(records):
"""
Very useful function that only accepts records with a numeric start to
their sha-1 hash.
"""
for record in records:
seq_hash = hashlib.sha1(str(record.seq)).hexdigest()
if seq_hash[0].isdigit():
yield record | Very useful function that only accepts records with a numeric start to
their sha-1 hash. | entailment |
def atomic_write(path, mode='wt', permissions=None, file_factory=None, **kwargs):
"""
Open a file for atomic writing.
Generates a temp file, renames to value of ``path``.
Arguments:
``permissions``: Permissions to set (default: umask)
``file_factory``: If given, the handle yielded will be the result of
calling file_factory(path)
Additional arguments are passed to tempfile.NamedTemporaryFile
"""
if permissions is None:
permissions = apply_umask()
# Handle stdout:
if path == '-':
yield sys.stdout
else:
base_dir = os.path.dirname(path)
kwargs['suffix'] = os.path.basename(path)
tf = tempfile.NamedTemporaryFile(
dir=base_dir, mode=mode, delete=False, **kwargs)
# If a file_factory is given, close, and re-open a handle using the
# file_factory
if file_factory is not None:
tf.close()
tf = file_factory(tf.name)
try:
with tf:
yield tf
# Move
os.rename(tf.name, path)
os.chmod(path, permissions)
except:
os.remove(tf.name)
raise | Open a file for atomic writing.
Generates a temp file, renames to value of ``path``.
Arguments:
``permissions``: Permissions to set (default: umask)
``file_factory``: If given, the handle yielded will be the result of
calling file_factory(path)
Additional arguments are passed to tempfile.NamedTemporaryFile | entailment |
def cut_range(string):
"""
A custom argparse 'type' to deal with sequences ranges such as 5:500.
Returns a 0-based slice corresponding to the selection defined by the slice
"""
value_range = string.split(':')
if len(value_range) == 1:
start = int(value_range[0])
stop = start
elif len(value_range) == 2:
start, stop = tuple(int(i) if i else None for i in value_range)
else:
msg = "{0} is not a valid, 1-indexed range.".format(string)
raise argparse.ArgumentTypeError(msg)
if start == 0 or (stop or sys.maxsize) < (start or 0):
msg = "{0} is not a valid, 1-indexed range.".format(string)
raise argparse.ArgumentTypeError(msg)
# Convert from 1-indexed to 0-indexed
if start is not None and start > 0:
start -= 1
return slice(start, stop) | A custom argparse 'type' to deal with sequences ranges such as 5:500.
Returns a 0-based slice corresponding to the selection defined by the slice | entailment |
def typed_range(type_func, minimum, maximum):
"""
Require variables to be of the specified type, between minimum and maximum
"""
@functools.wraps(type_func)
def inner(string):
result = type_func(string)
if not result >= minimum and result <= maximum:
raise argparse.ArgumentTypeError(
"Please provide a value between {0} and {1}".format(
minimum, maximum))
return result
return inner | Require variables to be of the specified type, between minimum and maximum | entailment |
def partial_append_action(fn, argument_keys=None):
"""
Creates a new class extending argparse.Action, which appends a
partially-applied function to dest.
The optional argument_keys argument should either be None (no additional
arguments to fn) or an iterable of function keys to partially apply.
"""
if isinstance(argument_keys, str):
argument_keys = [argument_keys]
argument_keys = argument_keys or []
class PartialAppendAction(argparse.Action):
def __init__(self,
option_strings,
dest,
const=None,
default=None,
required=False,
help=None,
type=None,
metavar=None,
nargs=None,
**kwargs):
super(PartialAppendAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=len(argument_keys),
const=const,
default=default,
required=required,
metavar=metavar,
type=type,
help=help, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
items = copy.copy(getattr(namespace, self.dest, None)) or []
# If no value was set default to empty list
if values is None:
values = []
elif not isinstance(values, list):
values = [values]
if len(argument_keys) != len(values):
raise ValueError("Unexpected number of values")
# Generate keyword arguments for the input function
kwargs = dict(list(zip(argument_keys, values)))
f = functools.partial(fn, **kwargs)
items.append(f)
setattr(namespace, self.dest, items)
return PartialAppendAction | Creates a new class extending argparse.Action, which appends a
partially-applied function to dest.
The optional argument_keys argument should either be None (no additional
arguments to fn) or an iterable of function keys to partially apply. | entailment |
def positive_value(target_type):
"""
Wraps target_type in a function that requires the parsed argument
be >= 0
"""
def inner(string):
value = target_type(string)
if not value >= 0:
raise argparse.ArgumentTypeError("Invalid positive number: " +
string)
return value
return inner | Wraps target_type in a function that requires the parsed argument
be >= 0 | entailment |
def add_options(parser):
"""
Add optional arguments to the parser
"""
partial_action = common.partial_append_action
file_mods = parser.add_argument_group("Sequence File Modification")
file_mods.add_argument('--line-wrap', dest='line_wrap', metavar='N',
type=int, help='Adjust line wrap for sequence strings. '
'When N is 0, all line breaks are removed. Only fasta files '
'are supported for the output format.')
file_mods.add_argument('--sort', dest='sort',
choices=['length-asc', 'length-desc', 'name-asc', 'name-desc'],
help='Perform sorting by length or name, ascending or descending. '
'ASCII sorting is performed for names')
parser.epilog = """Filters using regular expressions are case-sensitive
by default. Append "(?i)" to a pattern to make it case-insensitive."""
seq_mods = parser.add_argument_group("Sequence Modificaton")
seq_mods.add_argument('--apply-function', type=module_function,
metavar='/path/to/module.py:function_name[:parameter]',
help="""Specify a custom function to apply to the input sequences,
specified as /path/to/file.py:function_name. Function should accept
an iterable of Bio.SeqRecord objects, and yield SeqRecords. If the
parameter is specified, it will be passed as a string as the second
argument to the function. Specify more than one to chain.""",
default=[], action='append')
seq_mods.add_argument('--cut', dest='transforms',
metavar="start:end[,start2:end2]",
type=common.sequence_slices,
action=partial_action(transform.multi_cut_sequences, 'slices'),
help="""Keep only the residues within the 1-indexed start and end
positions specified, : separated. Includes last item. Start or end
can be left unspecified to indicate start/end of sequence. A
negative start may be provided to indicate an offset from the end
of the sequence. Note that to prevent negative numbers being
interpreted as flags, this should be written with an equals
sign between `--cut` and the argument, e.g.: `--cut=-10:`""")
seq_mods.add_argument('--relative-to', dest='cut_relative', metavar='ID',
help="""Apply --cut relative to the indexes of non-gap residues in
sequence identified by ID""")
seq_mods.add_argument('--drop', dest='transforms',
metavar='start:end[,start2:end2]',
type=common.sequence_slices,
action=partial_action(transform.drop_columns, 'slices'),
help="""Remove the residues at the specified indices. Same format as `--cut`.""")
seq_mods.add_argument('--dash-gap',
action=partial_action(transform.dashes_cleanup), dest='transforms',
help="""Replace any of the characters "?.:~" with a "-" for all
sequences""")
seq_mods.add_argument('--lower',
action=partial_action(transform.lower_sequences),
dest='transforms', help='Translate the sequences to lower case')
seq_mods.add_argument('--mask', metavar="start1:end1[,start2:end2]",
action=partial_action(transform.multi_mask_sequences, 'slices'),
type=common.sequence_slices, dest='transforms', help="""Replace
residues in 1-indexed slice with gap-characters. If --relative-to
is also specified, coordinates are relative to the sequence ID
provided.""")
seq_mods.add_argument('--reverse',
action=partial_action(transform.reverse_sequences),
dest='transforms', help='Reverse the order of sites in sequences')
seq_mods.add_argument('--reverse-complement', dest='transforms',
action=partial_action(transform.reverse_complement_sequences),
help='Convert sequences into reverse complements')
seq_mods.add_argument('--squeeze', action=partial_action(transform.squeeze),
dest='transforms',
help='''Remove any gaps that are present in the same
position across all sequences in an alignment (equivalent to
--squeeze-threshold=1.0)''')
seq_mods.add_argument('--squeeze-threshold', dest='transforms',
action=partial_action(transform.squeeze, 'gap_threshold'),
type=common.typed_range(float, 0.0, 1.0),
metavar='PROP', help="""Trim columns from an alignment which
have gaps in least the specified proportion of sequences.""")
seq_mods.add_argument('--transcribe', dest='transforms',
action=partial_action(transform.transcribe, 'transcribe'),
choices=('dna2rna', 'rna2dna'), help="""Transcription and back
transcription for generic DNA and RNA. Source sequences must be the
correct alphabet or this action will likely produce incorrect
results.""")
seq_mods.add_argument('--translate', dest='transforms',
action=partial_action(transform.translate, 'translate'),
choices=['dna2protein', 'rna2protein', 'dna2proteinstop',
'rna2proteinstop'], help="""Translate from generic DNA/RNA to
proteins. Options with "stop" suffix will NOT translate through
stop codons . Source sequences must be the correct alphabet or
this action will likely produce incorrect results.""")
seq_mods.add_argument('--ungap',
action=partial_action(transform.ungap_sequences),
dest='transforms', help='Remove gaps in the sequence alignment')
seq_mods.add_argument('--upper',
action=partial_action(transform.upper_sequences),
dest='transforms', help='Translate the sequences to upper case')
seq_select = parser.add_argument_group("Record Selection")
seq_select.add_argument('--deduplicate-sequences',
action='store_const', const=None, default=False,
dest='deduplicate_sequences', help='Remove any duplicate sequences '
'by sequence content, keep the first instance seen')
seq_select.add_argument('--deduplicated-sequences-file', action='store',
metavar='FILE', dest='deduplicate_sequences', default=False,
type=common.FileType('wt'),
help='Write all of the deduplicated sequences to a file')
seq_select.add_argument('--deduplicate-taxa',
action=partial_action(transform.deduplicate_taxa),
dest='transforms', help="""Remove any duplicate sequences by ID,
keep the first instance seen""")
seq_select.add_argument('--exclude-from-file', metavar='FILE',
type=common.FileType('rt'), help="""Filter sequences, removing
those sequence IDs in the specified file""", dest='transforms',
action=partial_action(transform.exclude_from_file, 'handle'))
seq_select.add_argument('--include-from-file', metavar='FILE',
type=common.FileType('rt'), help="""Filter sequences, keeping only
those sequence IDs in the specified file""", dest='transforms',
action=partial_action(transform.include_from_file, 'handle'))
seq_select.add_argument('--head', metavar='N', dest='transforms',
action=partial_action(transform.head, 'head'), help="""Trim
down to top N sequences. With the leading `-', print all but the last N sequences.""")
seq_select.add_argument('--max-length', dest='transforms', metavar='N',
action=partial_action(transform.max_length_discard, 'max_length'),
type=int, help="""Discard any sequences beyond the specified
maximum length. This operation occurs *before* all length-changing
options such as cut and squeeze.""")
seq_select.add_argument('--min-length', dest='transforms', metavar='N',
action=partial_action(transform.min_length_discard, 'min_length'),
type=int, help="""Discard any sequences less than the specified
minimum length. This operation occurs *before* cut and squeeze.""")
seq_select.add_argument('--min-ungapped-length', metavar='N',
action=partial_action(transform.min_ungap_length_discard,
'min_length'), type=int, help="""Discard any sequences less
than the specified minimum length, excluding gaps. This
operation occurs *before* cut and squeeze.""",
dest='transforms')
seq_select.add_argument('--pattern-include', metavar='REGEX',
action=partial_action(transform.name_include, 'filter_regex'),
dest='transforms', help="""Filter the sequences by regular
expression in ID or description""")
seq_select.add_argument('--pattern-exclude', metavar='REGEX',
action=partial_action(transform.name_exclude, 'filter_regex'),
dest='transforms', help="""Filter the sequences by regular
expression in ID or description""")
seq_select.add_argument('--prune-empty',
action=partial_action(transform.prune_empty), dest='transforms',
help="Prune sequences containing only gaps ('-')")
seq_select.add_argument('--sample', metavar='N', dest='transforms', type=int,
action=partial_action(transform.sample, 'k'),
help = """ Select a random sampling of sequences """)
seq_select.add_argument('--sample-seed', metavar='N', type=int,
help = """Set random seed for sampling of sequences""")
seq_select.add_argument('--seq-pattern-include', metavar='REGEX',
action=partial_action(transform.seq_include, 'filter_regex'),
dest='transforms', help="""Filter the sequences by regular
expression in sequence""")
seq_select.add_argument('--seq-pattern-exclude', metavar='REGEX',
action=partial_action(transform.seq_exclude, 'filter_regex'),
dest='transforms', help="""Filter the sequences by regular
expression in sequence""")
seq_select.add_argument('--tail', metavar='N', dest='transforms',
action=partial_action(transform.tail, 'tail'),
help="""Trim down to bottom N sequences. Use +N to output sequences starting with the Nth.""")
id_mods = parser.add_argument_group("Sequence ID Modification")
id_mods.add_argument('--first-name',
action=partial_action(transform.first_name_capture),
dest='transforms', help='''Take only the first whitespace-delimited
word as the name of the sequence''')
id_mods.add_argument('--name-suffix', metavar='SUFFIX',
action=partial_action(transform.name_append_suffix, 'suffix'),
dest='transforms', help='Append a suffix to all IDs.')
id_mods.add_argument('--name-prefix', metavar='PREFIX',
action=partial_action(transform.name_insert_prefix, 'prefix'),
dest='transforms', help="""Insert a prefix for all
IDs.""")
id_mods.add_argument('--pattern-replace', nargs=2,
metavar=('search_pattern', 'replace_pattern'),
action=partial_action(transform.name_replace, ('search_regex',
'replace_pattern')),
dest='transforms', help="""Replace regex pattern "search_pattern"
with "replace_pattern" in sequence ID and description""")
id_mods.add_argument('--strip-range', dest='transforms',
action=partial_action(transform.strip_range), help="""Strip ranges
from sequences IDs, matching </x-y>""")
format_group = parser.add_argument_group('Format Options')
format_group.add_argument('--input-format', metavar='FORMAT',
help="Input file format (default: determine from extension)")
format_group.add_argument('--output-format', metavar='FORMAT',
help="Output file format (default: determine from extension)")
parser.add_argument('--alphabet', choices=ALPHABETS,
help="""Input alphabet. Required for writing NEXUS.""")
return parser | Add optional arguments to the parser | entailment |
def build_parser(parser):
"""
Add shared arguments to the convert or mogrify parser.
"""
add_options(parser)
parser.add_argument('source_file', type=common.FileType('rt'),
help="Input sequence file")
parser.add_argument('dest_file', help="Output file")
return parser | Add shared arguments to the convert or mogrify parser. | entailment |
def module_function(string):
"""
Load a function from a python module using a file name, function name
specification of format:
/path/to/x.py:function_name[:parameter]
"""
parts = string.split(':', 2)
if len(parts) < 2:
raise ValueError(
"Illegal specification. Should be module:function[:parameter]")
module_path, function_name = parts[:2]
# Import the module
module_vars = {}
exec(compile(open(module_path).read(), module_path, 'exec'), module_vars)
try:
function = module_vars[function_name]
except KeyError:
raise argparse.ArgumentTypeError("{0} has no attribute '{1}'".format(
module_path, function_name))
if len(parts) == 3:
old_function = function
function = lambda r: old_function(r, parts[2])
return function | Load a function from a python module using a file name, function name
specification of format:
/path/to/x.py:function_name[:parameter] | entailment |
def parse_cgmlst_alleles(cgmlst_fasta):
"""Parse cgMLST alleles from fasta file
cgMLST FASTA file must have a header format of ">{marker name}|{allele name}"
Args:
cgmlst_fasta (str): cgMLST fasta file path
Returns:
dict of list: Marker name to list of allele sequences
"""
out = defaultdict(list)
for header, seq in parse_fasta(cgmlst_fasta):
if not '|' in header:
raise Exception('Unexpected format for cgMLST fasta file header. No "|" (pipe) delimiter present! Header="{}"'.format(header))
marker_name, allele_name = header.split('|')
out[marker_name].append(seq)
return out | Parse cgMLST alleles from fasta file
cgMLST FASTA file must have a header format of ">{marker name}|{allele name}"
Args:
cgmlst_fasta (str): cgMLST fasta file path
Returns:
dict of list: Marker name to list of allele sequences | entailment |
def parse_fasta(filepath):
'''
Parse a fasta file returning a generator yielding tuples of fasta headers to sequences.
Note:
This function should give equivalent results to SeqIO from BioPython
.. code-block:: python
from Bio import SeqIO
# biopython to dict of header-seq
hseqs_bio = {r.description:str(r.seq) for r in SeqIO.parse(fasta_path, 'fasta')}
# this func to dict of header-seq
hseqs = {header:seq for header, seq in parse_fasta(fasta_path)}
# both methods should return the same dict
assert hseqs == hseqs_bio
Args:
filepath (str): Fasta file path
Returns:
generator: yields tuples of (<fasta header>, <fasta sequence>)
'''
with open(filepath, 'r') as f:
seqs = []
header = ''
for line in f:
line = line.strip()
if line == '':
continue
if line[0] == '>':
if header == '':
header = line.replace('>','')
else:
yield header, ''.join(seqs)
seqs = []
header = line.replace('>','')
else:
seqs.append(line)
yield header, ''.join(seqs) | Parse a fasta file returning a generator yielding tuples of fasta headers to sequences.
Note:
This function should give equivalent results to SeqIO from BioPython
.. code-block:: python
from Bio import SeqIO
# biopython to dict of header-seq
hseqs_bio = {r.description:str(r.seq) for r in SeqIO.parse(fasta_path, 'fasta')}
# this func to dict of header-seq
hseqs = {header:seq for header, seq in parse_fasta(fasta_path)}
# both methods should return the same dict
assert hseqs == hseqs_bio
Args:
filepath (str): Fasta file path
Returns:
generator: yields tuples of (<fasta header>, <fasta sequence>) | entailment |
def fasta_format_check(fasta_path, logger):
"""
Check that a file is valid FASTA format.
- First non-blank line needs to begin with a '>' header character.
- Sequence can only contain valid IUPAC nucleotide characters
Args:
fasta_str (str): FASTA file contents string
Raises:
Exception: If invalid FASTA format
"""
header_count = 0
line_count = 1
nt_count = 0
with open(fasta_path) as f:
for l in f:
l = l.strip()
if l == '':
continue
if l[0] == '>':
header_count += 1
continue
if header_count == 0 and l[0] != '>':
error_msg = 'First non-blank line (L:{line_count}) does not contain FASTA header. Line beginning with ">" expected.' \
.format(line_count=line_count)
logger.error(error_msg)
raise Exception(error_msg)
non_nucleotide_chars_in_line = set(l) - VALID_NUCLEOTIDES
if len(non_nucleotide_chars_in_line) > 0:
error_msg = 'Line {line} contains the following non-nucleotide characters: {non_nt_chars}' \
.format(line=line_count,
non_nt_chars=', '.join([x for x in non_nucleotide_chars_in_line]))
logger.error(error_msg)
raise Exception(error_msg)
nt_count += len(l)
line_count += 1
if nt_count == 0:
error_msg = 'File "{}" does not contain any nucleotide sequence.'.format(fasta_path)
logger.error(error_msg)
raise Exception(error_msg)
logger.info('Valid FASTA format "{}" ({} bp)'.format(fasta_path, nt_count)) | Check that a file is valid FASTA format.
- First non-blank line needs to begin with a '>' header character.
- Sequence can only contain valid IUPAC nucleotide characters
Args:
fasta_str (str): FASTA file contents string
Raises:
Exception: If invalid FASTA format | entailment |
def seq_int_arr(seqs):
"""Convert list of ACGT strings to matix of 1-4 ints
Args:
seqs (list of str): nucleotide sequences with only 'ACGT' characters
Returns:
numpy.array of int: matrix of integers from 1 to 4 inclusive representing A, C, G, and T
str: nucleotide sequence string
"""
return np.array([[NT_TO_INT[c] for c in x.upper()] for x in seqs]) | Convert list of ACGT strings to matix of 1-4 ints
Args:
seqs (list of str): nucleotide sequences with only 'ACGT' characters
Returns:
numpy.array of int: matrix of integers from 1 to 4 inclusive representing A, C, G, and T
str: nucleotide sequence string | entailment |
def group_alleles_by_start_end_Xbp(arr, bp=28):
"""Group alleles by matching ends
Args:
arr (numpy.array): 2D int matrix of alleles
bp (int): length of ends to group by
Returns:
dict of lists: key of start + end strings to list of indices of alleles with matching ends
"""
starts = arr[:,0:bp]
ends = arr[:,-bp:]
starts_ends_idxs = defaultdict(list)
l, seq_len = arr.shape
for i in range(l):
start_i = starts[i]
end_i = ends[i]
start_i_str = ''.join([str(x) for x in start_i])
end_i_str = ''.join([str(x) for x in end_i])
starts_ends_idxs[start_i_str + end_i_str].append(i)
return starts_ends_idxs | Group alleles by matching ends
Args:
arr (numpy.array): 2D int matrix of alleles
bp (int): length of ends to group by
Returns:
dict of lists: key of start + end strings to list of indices of alleles with matching ends | entailment |
def allele_clusters(dists, t=0.025):
"""Flat clusters from distance matrix
Args:
dists (numpy.array): pdist distance matrix
t (float): fcluster (tree cutting) distance threshold
Returns:
dict of lists: cluster number to list of indices of distances in cluster
"""
clusters = fcluster(linkage(dists), 0.025, criterion='distance')
cluster_idx = defaultdict(list)
for idx, cl in enumerate(clusters):
cluster_idx[cl].append(idx)
return cluster_idx | Flat clusters from distance matrix
Args:
dists (numpy.array): pdist distance matrix
t (float): fcluster (tree cutting) distance threshold
Returns:
dict of lists: cluster number to list of indices of distances in cluster | entailment |
def min_row_dist_sum_idx(dists):
"""Find the index of the row with the minimum row distance sum
This should return the index of the row index with the least distance overall
to all other rows.
Args:
dists (np.array): must be square distance matrix
Returns:
int: index of row with min dist row sum
"""
row_sums = np.apply_along_axis(arr=dists, axis=0, func1d=np.sum)
return row_sums.argmin() | Find the index of the row with the minimum row distance sum
This should return the index of the row index with the least distance overall
to all other rows.
Args:
dists (np.array): must be square distance matrix
Returns:
int: index of row with min dist row sum | entailment |
def find_centroid_alleles(alleles, bp=28, t=0.025):
"""Reduce list of alleles to set of centroid alleles based on size grouping, ends matching and hierarchical clustering
Workflow for finding centroid alleles:
- grouping by size (e.g. 100bp, 101bp, 103bp, etc)
- then grouped by `bp` nucleotides at ends matching
- size and ends grouped alleles hierarchically clustered (Hamming distance, complete linkage)
- tree cutting at threshold `t`
- select allele with minimum distance to other alleles in cluster as centroid
Args:
alleles (iterable): collection of allele nucleotide sequences
bp (int): number of bp matching at allele ends for size grouping (default=28 due to default blastn megablast word size)
t (float): cluster generation (tree cutting) distance threshold for size grouped alleles
Returns:
set of str: centroid alleles
"""
centroid_alleles = set()
len_allele = group_alleles_by_size(alleles)
for length, seqs in len_allele.items():
# if only one alelle of a particular size, add as centroid, move onto next size group
if len(seqs) == 1:
centroid_alleles.add(seqs[0])
continue
# convert allele nucleotide sequences to integer matrix
seq_arr = seq_int_arr(seqs)
# group alleles by matching ends
starts_ends_idxs = group_alleles_by_start_end_Xbp(seq_arr, bp=bp)
for k, idxs in starts_ends_idxs.items():
# if only one allele for a particular matching ends group, then add as centroid and move onto next ends group
if len(idxs) == 1:
centroid_alleles.add(seqs[idxs[0]])
continue
# fetch subset of int allele sequences for a matching ends group
seq_arr_subset = seq_arr[idxs]
# Hamming distances between alleles
dists = pdist(seq_arr_subset, 'hamming')
# create flat clusters (tree cut) at t threshold
cl = allele_clusters(dists, t=t)
# for each allele cluster
dm_sq = squareform(dists)
for cl_key, cl_idxs in cl.items():
# if only 1 or 2 alleles in cluster then return first
if len(cl_idxs) == 1 or len(cl_idxs) == 2:
# get first cluster index and get nt seq for that index
centroid_alleles.add(seq_int_arr_to_nt(seq_arr_subset[cl_idxs[0]]))
continue
# else find allele with min distances to all other alleles in cluster
dm_sub = dm_subset(dm_sq, cl_idxs)
min_idx = min_row_dist_sum_idx(dm_sub)
# add nucleotide seq for cluster centroid allele to centroids set
centroid_alleles.add(seq_int_arr_to_nt(seq_arr_subset[min_idx]))
#end for cl_key, cl_idxs in cl.iteritems():
#end for k, idxs in starts_ends_idxs.iteritems():
#end for length, seqs in alleles.iteritems():
return centroid_alleles | Reduce list of alleles to set of centroid alleles based on size grouping, ends matching and hierarchical clustering
Workflow for finding centroid alleles:
- grouping by size (e.g. 100bp, 101bp, 103bp, etc)
- then grouped by `bp` nucleotides at ends matching
- size and ends grouped alleles hierarchically clustered (Hamming distance, complete linkage)
- tree cutting at threshold `t`
- select allele with minimum distance to other alleles in cluster as centroid
Args:
alleles (iterable): collection of allele nucleotide sequences
bp (int): number of bp matching at allele ends for size grouping (default=28 due to default blastn megablast word size)
t (float): cluster generation (tree cutting) distance threshold for size grouped alleles
Returns:
set of str: centroid alleles | entailment |
def mash_dist_trusted(fasta_path):
"""
Compute Mash distances of sketch file of genome fasta to RefSeq sketch DB.
Args:
mash_bin (str): Mash binary path
Returns:
(str): Mash STDOUT string
"""
args = [MASH_BIN,
'dist',
MASH_SKETCH_FILE,
fasta_path]
p = Popen(args, stderr=PIPE, stdout=PIPE)
(stdout, stderr) = p.communicate()
retcode = p.returncode
if retcode != 0:
raise Exception('Could not run Mash dist {}'.format(stderr))
return stdout | Compute Mash distances of sketch file of genome fasta to RefSeq sketch DB.
Args:
mash_bin (str): Mash binary path
Returns:
(str): Mash STDOUT string | entailment |
def nr_profiles(arr, genomes):
"""
Get a condensed cgMLST pairwise distance matrix for specified Genomes_
where condensed means redundant cgMLST profiles are only represented once in the distance matrix.
Args:
user_name (list): List of Genome_ names to retrieve condensed distance matrix for
Returns:
(numpy.array, list): tuple of condensed cgMLST distance matrix and list of grouped Genomes_
"""
gs_collapse = []
genome_idx_dict = {}
indices = []
patt_dict = {}
for i, g in enumerate(genomes):
p = arr[i, :].tostring()
if p in patt_dict:
parent = patt_dict[p]
idx = genome_idx_dict[parent]
gs_collapse[idx].append(g)
else:
indices.append(i)
patt_dict[p] = g
genome_idx_dict[g] = len(gs_collapse)
gs_collapse.append([g])
return arr[indices, :], gs_collapse | Get a condensed cgMLST pairwise distance matrix for specified Genomes_
where condensed means redundant cgMLST profiles are only represented once in the distance matrix.
Args:
user_name (list): List of Genome_ names to retrieve condensed distance matrix for
Returns:
(numpy.array, list): tuple of condensed cgMLST distance matrix and list of grouped Genomes_ | entailment |
def overall_serovar_call(serovar_prediction, antigen_predictor):
"""
Predict serovar from cgMLST cluster membership analysis and antigen BLAST results.
SerovarPrediction object is assigned H1, H2 and Serogroup from the antigen BLAST results.
Antigen BLAST results will predict a particular serovar or list of serovars, however,
the cgMLST membership may be able to help narrow down the list of potential serovars.
Notes:
If the cgMLST predicted serovar is within the list of antigen BLAST predicted serovars,
then the serovar is assigned the cgMLST predicted serovar.
If all antigens are found, but an antigen serovar is not found then the serovar is assigned
a pseudo-antigenic formula (Serogroup:H1:H2), otherwise the serovar is assigned the cgMLST prediction.
If the antigen predicted serovar does not match the cgMLST predicted serovar,
- the serovar is the cgMLST serovar if the cgMLST cluster level is <= 0.1 (10% or less)
- otherwise, the serovar is antigen predicted serovar(s)
Args:
serovar_prediction (src.serovar_prediction.SerovarPrediction): Serovar prediction results (antigen+cgMLST[+Mash])
antigen_predictor (src.serovar_prediction.SerovarPredictor): Antigen search results
Returns:
src.serovar_prediction.SerovarPrediction: Serovar prediction results with overall prediction from antigen + cgMLST
"""
assert isinstance(serovar_prediction, SerovarPrediction)
assert isinstance(antigen_predictor, SerovarPredictor)
h1 = antigen_predictor.h1
h2 = antigen_predictor.h2
sg = antigen_predictor.serogroup
spp = serovar_prediction.cgmlst_subspecies
if spp is None:
if 'mash_match' in serovar_prediction.__dict__:
spp = serovar_prediction.__dict__['mash_subspecies']
serovar_prediction.serovar_antigen = antigen_predictor.serovar
cgmlst_serovar = serovar_prediction.serovar_cgmlst
cgmlst_distance = float(serovar_prediction.cgmlst_distance)
null_result = '-:-:-'
try:
spp_roman = spp_name_to_roman[spp]
except:
spp_roman = None
is_antigen_null = lambda x: (x is None or x == '' or x == '-')
if antigen_predictor.serovar is None:
if is_antigen_null(sg) and is_antigen_null(h1) and is_antigen_null(h2):
if spp_roman is not None:
serovar_prediction.serovar = '{} {}:{}:{}'.format(spp_roman, sg, h1, h2)
else:
serovar_prediction.serovar = '{}:{}:{}'.format(spp_roman, sg, h1, h2)
elif cgmlst_serovar is not None and cgmlst_distance <= CGMLST_DISTANCE_THRESHOLD:
serovar_prediction.serovar = cgmlst_serovar
else:
serovar_prediction.serovar = null_result
if 'mash_match' in serovar_prediction.__dict__:
spd = serovar_prediction.__dict__
mash_dist = float(spd['mash_distance'])
if mash_dist <= MASH_DISTANCE_THRESHOLD:
serovar_prediction.serovar = spd['mash_serovar']
else:
serovars_from_antigen = antigen_predictor.serovar.split('|')
if not isinstance(serovars_from_antigen, list):
serovars_from_antigen = [serovars_from_antigen]
if cgmlst_serovar is not None:
if cgmlst_serovar in serovars_from_antigen:
serovar_prediction.serovar = cgmlst_serovar
else:
if float(cgmlst_distance) <= CGMLST_DISTANCE_THRESHOLD:
serovar_prediction.serovar = cgmlst_serovar
elif 'mash_match' in serovar_prediction.__dict__:
spd = serovar_prediction.__dict__
mash_serovar = spd['mash_serovar']
mash_dist = float(spd['mash_distance'])
if mash_serovar in serovars_from_antigen:
serovar_prediction.serovar = mash_serovar
else:
if mash_dist <= MASH_DISTANCE_THRESHOLD:
serovar_prediction.serovar = mash_serovar
if serovar_prediction.serovar is None:
serovar_prediction.serovar = serovar_prediction.serovar_antigen
if serovar_prediction.h1 is None:
serovar_prediction.h1 = '-'
if serovar_prediction.h2 is None:
serovar_prediction.h2 = '-'
if serovar_prediction.serogroup is None:
serovar_prediction.serogroup = '-'
if serovar_prediction.serovar_antigen is None:
if spp_roman is not None:
serovar_prediction.serovar_antigen = '{} -:-:-'.format(spp_roman)
else:
serovar_prediction.serovar_antigen = '-:-:-'
if serovar_prediction.serovar is None:
serovar_prediction.serovar = serovar_prediction.serovar_antigen
return serovar_prediction | Predict serovar from cgMLST cluster membership analysis and antigen BLAST results.
SerovarPrediction object is assigned H1, H2 and Serogroup from the antigen BLAST results.
Antigen BLAST results will predict a particular serovar or list of serovars, however,
the cgMLST membership may be able to help narrow down the list of potential serovars.
Notes:
If the cgMLST predicted serovar is within the list of antigen BLAST predicted serovars,
then the serovar is assigned the cgMLST predicted serovar.
If all antigens are found, but an antigen serovar is not found then the serovar is assigned
a pseudo-antigenic formula (Serogroup:H1:H2), otherwise the serovar is assigned the cgMLST prediction.
If the antigen predicted serovar does not match the cgMLST predicted serovar,
- the serovar is the cgMLST serovar if the cgMLST cluster level is <= 0.1 (10% or less)
- otherwise, the serovar is antigen predicted serovar(s)
Args:
serovar_prediction (src.serovar_prediction.SerovarPrediction): Serovar prediction results (antigen+cgMLST[+Mash])
antigen_predictor (src.serovar_prediction.SerovarPredictor): Antigen search results
Returns:
src.serovar_prediction.SerovarPrediction: Serovar prediction results with overall prediction from antigen + cgMLST | entailment |
def process_cgmlst_results(df):
"""Append informative fields to cgMLST330 BLAST results DataFrame
The `qseqid` column must contain cgMLST330 query IDs with `{marker name}|{allele number}` format.
The `qseqid` parsed allele numbers and marker names are appended as new fields.
`is_perfect` column contains boolean values for whether an allele result is 100% identity and coverage.
`has_perfect_match` denotes if a cgMLST330 marker has a perfect allele match.
The top result with the largest bitscore for a marker with no perfect match is used to retrieve the allele present
at that marker locus.
Args:
df (pandas.DataFrame): DataFrame of cgMLST330 BLAST results
Returns:
pandas.DataFrame: cgMLST330 BLAST results DataFrame with extra fields (`marker`, `allele`, `is_perfect`, `has_perfect_match`)
"""
assert isinstance(df, pd.DataFrame)
markers = []
alleles = []
for x in df['qseqid']:
marker, allele = x.split('|')
markers.append(marker)
alleles.append(int(allele))
df.loc[:, 'marker'] = markers
df.loc[:, 'allele'] = alleles
df.loc[:, 'is_match'] = (df['coverage'] >= 1.0) & (df['pident'] >= 90.0) & ~(df['is_trunc'])
df.loc[:, 'allele_name'] = df.apply(lambda x: allele_name(x.sseq.replace('-', '')), axis=1)
df.loc[:, 'is_perfect'] = (df['coverage'] == 1.0) & (df['pident'] == 100.0)
df_perf = df[df['is_perfect']]
perf_markers = df_perf['marker'].unique()
df.loc[:, 'has_perfect_match'] = df['marker'].isin(perf_markers)
start_idxs, end_idxs, needs_revcomps, trunc, is_extended = extend_subj_match_vec(df)
df.loc[:, 'start_idx'] = start_idxs
df.loc[:, 'end_idx'] = end_idxs
df.loc[:, 'needs_revcomp'] = needs_revcomps
df.loc[:, 'trunc'] = trunc
df.loc[:, 'is_extended'] = is_extended
df.loc[:, 'sseq_msa_gaps'] = np.zeros(df.shape[0], dtype=np.int64)
df.loc[:, 'sseq_msa_p_gaps'] = np.zeros(df.shape[0], dtype=np.float64)
df.loc[:, 'too_many_gaps'] = trunc
return df | Append informative fields to cgMLST330 BLAST results DataFrame
The `qseqid` column must contain cgMLST330 query IDs with `{marker name}|{allele number}` format.
The `qseqid` parsed allele numbers and marker names are appended as new fields.
`is_perfect` column contains boolean values for whether an allele result is 100% identity and coverage.
`has_perfect_match` denotes if a cgMLST330 marker has a perfect allele match.
The top result with the largest bitscore for a marker with no perfect match is used to retrieve the allele present
at that marker locus.
Args:
df (pandas.DataFrame): DataFrame of cgMLST330 BLAST results
Returns:
pandas.DataFrame: cgMLST330 BLAST results DataFrame with extra fields (`marker`, `allele`, `is_perfect`, `has_perfect_match`) | entailment |
def alleles_to_retrieve(df):
"""Alleles to retrieve from genome fasta
Get a dict of the genome fasta contig title to a list of blastn results of the allele sequences that must be
retrieved from the genome contig.
Args:
df (pandas.DataFrame): blastn results dataframe
Returns:
{str:[pandas.Series]}: dict of contig title (header name) to list of top blastn result records for each marker
for which the allele sequence must be retrieved from the original sequence.
"""
contig_blastn_records = defaultdict(list)
markers = df.marker.unique()
for m in markers:
dfsub = df[df.marker == m]
for i, r in dfsub.iterrows():
if r.coverage < 1.0:
contig_blastn_records[r.stitle].append(r)
break
return contig_blastn_records | Alleles to retrieve from genome fasta
Get a dict of the genome fasta contig title to a list of blastn results of the allele sequences that must be
retrieved from the genome contig.
Args:
df (pandas.DataFrame): blastn results dataframe
Returns:
{str:[pandas.Series]}: dict of contig title (header name) to list of top blastn result records for each marker
for which the allele sequence must be retrieved from the original sequence. | entailment |
def matches_to_marker_results(df):
"""Perfect BLAST matches to marker results dict
Parse perfect BLAST matches to marker results dict.
Args:
df (pandas.DataFrame): DataFrame of perfect BLAST matches
Returns:
dict: cgMLST330 marker names to matching allele numbers
"""
assert isinstance(df, pd.DataFrame)
from collections import defaultdict
d = defaultdict(list)
for idx, row in df.iterrows():
marker = row['marker']
d[marker].append(row)
marker_results = {}
for k,v in d.items():
if len(v) > 1:
logging.debug('Multiple potential cgMLST allele matches (n=%s) found for marker %s. Selecting match on longest contig.', len(v), k)
df_marker = pd.DataFrame(v)
df_marker.sort_values('slen', ascending=False, inplace=True)
for i,r in df_marker.iterrows():
allele = r['allele_name']
slen = r['slen']
logging.debug('Selecting allele %s from contig with length %s', allele, slen)
seq = r['sseq']
if '-' in seq:
logging.warning('Gaps found in allele. Removing gaps. %s', r)
seq = seq.replace('-', '').upper()
allele = allele_name(seq)
marker_results[k] = allele_result_dict(allele, seq, r.to_dict())
break
elif len(v) == 1:
row = v[0]
seq = row['sseq']
if '-' in seq:
logging.warning('Gaps found in allele. Removing gaps. %s', row)
seq = seq.replace('-', '').upper()
allele = allele_name(seq)
marker_results[k] = allele_result_dict(allele, seq, row.to_dict())
else:
err_msg = 'Empty list of matches for marker {}'.format(k)
logging.error(err_msg)
raise Exception(err_msg)
return marker_results | Perfect BLAST matches to marker results dict
Parse perfect BLAST matches to marker results dict.
Args:
df (pandas.DataFrame): DataFrame of perfect BLAST matches
Returns:
dict: cgMLST330 marker names to matching allele numbers | entailment |
def cgmlst_subspecies_call(df_relatives):
"""Call Salmonella subspecies based on cgMLST results
This method attempts to find the majority subspecies type within curated
public genomes above a cgMLST allelic profile distance threshold.
Note:
``CGMLST_SUBSPECIATION_DISTANCE_THRESHOLD`` is the cgMLST distance
threshold used to determine the subspecies by cgMLST. It is set at a
distance of 0.9 which translates to a cgMLST allelic similarity of 10%.
A threshold of 0.9 is generous and reasonable given the congruence
between subspecies designations and 10% cgMLST clusters by Adjusted
Rand (~0.850) and Adjusted Wallace metrics (~0.850 both ways).
Args:
df_relatives (pandas.DataFrame): Table of genomes related by cgMLST to input genome
Returns:
None: if no curated public genomes found to have a cgMLST profile similarity of 10% or greater
(string, float, dict): most common subspecies, closest related public genome distance, subspecies frequencies
"""
closest_distance = df_relatives['distance'].min()
if closest_distance > CGMLST_SUBSPECIATION_DISTANCE_THRESHOLD:
logging.warning('Min cgMLST distance (%s) above subspeciation distance threshold (%s)',
closest_distance,
CGMLST_SUBSPECIATION_DISTANCE_THRESHOLD)
return None
else:
df_relatives = df_relatives.loc[df_relatives.distance <= CGMLST_SUBSPECIATION_DISTANCE_THRESHOLD, :]
df_relatives = df_relatives.sort_values('distance', ascending=True)
logging.debug('df_relatives by cgmlst %s', df_relatives.head())
genome_spp = genomes_to_subspecies()
subspecies_below_threshold = [genome_spp[member_genome] if member_genome in genome_spp else None for member_genome in df_relatives.index]
subspecies_below_threshold = filter(None, subspecies_below_threshold)
subspecies_counter = Counter(subspecies_below_threshold)
logging.debug('Subspecies counter: %s', subspecies_counter)
return (subspecies_counter.most_common(1)[0][0], closest_distance, dict(subspecies_counter)) | Call Salmonella subspecies based on cgMLST results
This method attempts to find the majority subspecies type within curated
public genomes above a cgMLST allelic profile distance threshold.
Note:
``CGMLST_SUBSPECIATION_DISTANCE_THRESHOLD`` is the cgMLST distance
threshold used to determine the subspecies by cgMLST. It is set at a
distance of 0.9 which translates to a cgMLST allelic similarity of 10%.
A threshold of 0.9 is generous and reasonable given the congruence
between subspecies designations and 10% cgMLST clusters by Adjusted
Rand (~0.850) and Adjusted Wallace metrics (~0.850 both ways).
Args:
df_relatives (pandas.DataFrame): Table of genomes related by cgMLST to input genome
Returns:
None: if no curated public genomes found to have a cgMLST profile similarity of 10% or greater
(string, float, dict): most common subspecies, closest related public genome distance, subspecies frequencies | entailment |
def run_cgmlst(blast_runner, full=False):
"""Perform in silico cgMLST on an input genome
Args:
blast_runner (sistr.src.blast_wrapper.BlastRunner): blastn runner object with genome fasta initialized
Returns:
dict: cgMLST ref genome match, distance to closest ref genome, subspecies and serovar predictions
dict: marker allele match results (seq, allele name, blastn results)
"""
from sistr.src.serovar_prediction.constants import genomes_to_serovar
df_cgmlst_profiles = ref_cgmlst_profiles()
logging.debug('{} distinct cgMLST330 profiles'.format(df_cgmlst_profiles.shape[0]))
logging.info('Running BLAST on serovar predictive cgMLST330 alleles')
cgmlst_fasta_path = CGMLST_CENTROID_FASTA_PATH if not full else CGMLST_FULL_FASTA_PATH
blast_outfile = blast_runner.blast_against_query(cgmlst_fasta_path)
logging.info('Reading BLAST output file "{}"'.format(blast_outfile))
blast_reader = BlastReader(blast_outfile)
if blast_reader.df is None:
logging.error('No cgMLST330 alleles found!')
return ({'distance': 1.0,
'genome_match': None,
'serovar': None,
'matching_alleles': 0,
'subspecies': None,
'cgmlst330_ST': None,},
{}, )
logging.info('Found {} cgMLST330 allele BLAST results'.format(blast_reader.df.shape[0]))
df_cgmlst_blastn = process_cgmlst_results(blast_reader.df)
marker_match_results = matches_to_marker_results(df_cgmlst_blastn[df_cgmlst_blastn.is_match])
contig_blastn_records = alleles_to_retrieve(df_cgmlst_blastn)
retrieved_marker_alleles = get_allele_sequences(blast_runner.fasta_path,
contig_blastn_records,
full=full)
logging.info('Type retrieved_marker_alleles %s', type(retrieved_marker_alleles))
all_marker_results = marker_match_results.copy()
for marker, res in retrieved_marker_alleles.items():
all_marker_results[marker] = res
for marker in df_cgmlst_profiles.columns:
if marker not in all_marker_results:
all_marker_results[marker] = {'blast_result': None,
'name': None,
'seq': None,}
cgmlst_results = {}
for marker, res in all_marker_results.items():
try:
cgmlst_results[marker] = int(res['name'])
except:
logging.error('Missing cgmlst_results for %s', marker)
logging.debug(res)
logging.info('Calculating number of matching alleles to serovar predictive cgMLST330 profiles')
df_relatives = find_closest_related_genome(cgmlst_results, df_cgmlst_profiles)
genome_serovar_dict = genomes_to_serovar()
df_relatives['serovar'] = [genome_serovar_dict[genome] for genome in df_relatives.index]
logging.debug('Top 5 serovar predictive cgMLST profiles:\n{}'.format(df_relatives.head()))
spp = None
subspeciation_tuple = cgmlst_subspecies_call(df_relatives)
if subspeciation_tuple is not None:
spp, distance, spp_counter = subspeciation_tuple
logging.info('Top subspecies by cgMLST is "{}" (min dist={}, Counter={})'.format(spp, distance, spp_counter))
else:
logging.warning('Subspeciation by cgMLST was not possible!')
cgmlst_serovar = None
cgmlst_matching_genome = None
cgmlst_matching_alleles = 0
cgmlst_distance = 1.0
for idx, row in df_relatives.iterrows():
cgmlst_distance = row['distance']
cgmlst_matching_alleles = row['matching']
cgmlst_serovar = row['serovar'] if cgmlst_distance <= 1.0 else None
cgmlst_matching_genome = idx if cgmlst_distance <= 1.0 else None
logging.info('Top serovar by cgMLST profile matching: "{}" with {} matching alleles, distance={:.1%}'.format(
cgmlst_serovar,
cgmlst_matching_alleles,
cgmlst_distance
))
break
cgmlst_st = None
cgmlst_markers_sorted = sorted(all_marker_results.keys())
cgmlst_allele_names = []
marker = None
for marker in cgmlst_markers_sorted:
try:
aname = all_marker_results[marker]['name']
if aname:
cgmlst_allele_names.append(str(aname))
else:
break
except:
break
if len(cgmlst_allele_names) == len(cgmlst_markers_sorted):
cgmlst_st = allele_name('-'.join(cgmlst_allele_names))
logging.info('cgMLST330 Sequence Type=%s', cgmlst_st)
else:
logging.warning('Could not compute cgMLST330 Sequence Type due to missing data (marker %s)', marker)
return ({'distance': cgmlst_distance,
'genome_match': cgmlst_matching_genome,
'serovar': cgmlst_serovar,
'matching_alleles': cgmlst_matching_alleles,
'subspecies': spp,
'cgmlst330_ST': cgmlst_st,},
all_marker_results, ) | Perform in silico cgMLST on an input genome
Args:
blast_runner (sistr.src.blast_wrapper.BlastRunner): blastn runner object with genome fasta initialized
Returns:
dict: cgMLST ref genome match, distance to closest ref genome, subspecies and serovar predictions
dict: marker allele match results (seq, allele name, blastn results) | entailment |
def genome_name_from_fasta_path(fasta_path):
"""Extract genome name from fasta filename
Get the filename without directory and remove the file extension.
Example:
With fasta file path ``/path/to/genome_1.fasta``::
fasta_path = '/path/to/genome_1.fasta'
genome_name = genome_name_from_fasta_path(fasta_path)
print(genome_name)
# => "genome_1"
Args:
fasta_path (str): fasta file path
Returns:
str: genome name
"""
filename = os.path.basename(fasta_path)
return re.sub(r'(\.fa$)|(\.fas$)|(\.fasta$)|(\.fna$)|(\.\w{1,}$)', '', filename) | Extract genome name from fasta filename
Get the filename without directory and remove the file extension.
Example:
With fasta file path ``/path/to/genome_1.fasta``::
fasta_path = '/path/to/genome_1.fasta'
genome_name = genome_name_from_fasta_path(fasta_path)
print(genome_name)
# => "genome_1"
Args:
fasta_path (str): fasta file path
Returns:
str: genome name | entailment |
def extend_subj_match_vec(df):
"""
Get the extended clipped (clamped) start and end subject sequence indices
Also get whether each match needs to be reverse complemented and whether each extended match would be truncated by
the end of the subject sequence.
Args:
df (pandas.DataFrame): blastn results dataframe
Returns:
int pandas.Series: extended and clipped start indices
int pandas.Series: extended and clipped end indices
bool pandas.Series: does extracted seq need to be reverse complemented?
bool pandas.Series: would the extended seq be truncated by the ends of the subject sequence?
bool pandas.Series: was the subject seq extended?
"""
needs_revcomp = df.sstart > df.send
add_to_end = df.qlen - df.qend
add_to_start = df.qstart - 1
ssum2 = (df.send + df.sstart) / 2.0
sabs2 = np.abs(df.send - df.sstart) / 2.0
end_idx = ssum2 + sabs2 - 1
start_idx = ssum2 - sabs2 - 1
start_idx[needs_revcomp] -= add_to_end
start_idx[~needs_revcomp] -= add_to_start
end_idx[needs_revcomp] += add_to_start
end_idx[~needs_revcomp] += add_to_end
clipped_start_idx = np.clip(start_idx, 0, (df.slen - 1))
clipped_end_idx = np.clip(end_idx, 0, (df.slen - 1))
trunc = (clipped_start_idx != start_idx) | (clipped_end_idx != end_idx)
is_extended = (add_to_start > 0) | (add_to_end > 0)
return clipped_start_idx, clipped_end_idx, needs_revcomp, trunc, is_extended | Get the extended clipped (clamped) start and end subject sequence indices
Also get whether each match needs to be reverse complemented and whether each extended match would be truncated by
the end of the subject sequence.
Args:
df (pandas.DataFrame): blastn results dataframe
Returns:
int pandas.Series: extended and clipped start indices
int pandas.Series: extended and clipped end indices
bool pandas.Series: does extracted seq need to be reverse complemented?
bool pandas.Series: would the extended seq be truncated by the ends of the subject sequence?
bool pandas.Series: was the subject seq extended? | entailment |
def listattrs(x):
"""Get all instance and class attributes for an object
Get all instance and class attributes for an object except those that start
with "__" (double underscore).
__dict__ of an object only reports the instance attributes while dir()
reports all of the attributes of an object including private ones.
Callable attrs are filtered out.
Args:
x (object): Some object
Returns:
list str: List of non-callable non-private attributes of object x
"""
return [attr for attr in dir(x) if not attr.startswith("__") and not callable(getattr(x, attr))] | Get all instance and class attributes for an object
Get all instance and class attributes for an object except those that start
with "__" (double underscore).
__dict__ of an object only reports the instance attributes while dir()
reports all of the attributes of an object including private ones.
Callable attrs are filtered out.
Args:
x (object): Some object
Returns:
list str: List of non-callable non-private attributes of object x | entailment |
def to_dict(x, depth, exclude_keys=set(), depth_threshold=8):
"""Transform a nested object/dict/list into a regular dict
json.dump(s) and pickle don't like to un/serialize regular Python objects so
this function should handle arbitrarily nested objects to be serialized to
regular string, float, int, bool, None values.
This is a recursive function so by default it will exit at a certain depth (depth_threshold=8).
Args:
x (object): Some object to dict-ify unless x is a scalar/literal then return x as is
depth (int): Starting depth must be 0 (cannot supply default value due to weird Pythonisms)
exclude_keys (set): Keys to avoid adding to the output dict
depth_threshold (int): object/dict nesting depth to stop at
Returns:
dict: dict with only scalar/literal leaf values
"""
if x is None or isinstance(x, (str, int, float, bool)):
return x
if isinstance(x, np.int_):
return int(x)
if isinstance(x, np.int64):
return int(x)
if isinstance(x, np.float_):
return float(x)
if isinstance(x, np.float64):
return float(x)
if isinstance(x, np.bool_):
return bool(x)
if depth + 1 > depth_threshold: return {}
if isinstance(x, list):
out = []
for v in x:
tmp = to_dict(v, depth + 1, exclude_keys, depth_threshold)
if tmp == {}: continue
out.append(tmp)
return out
out = {}
if isinstance(x, dict):
for k, v in x.items():
if k in exclude_keys: continue
if not isinstance(k, (str,)):
k = str(k)
tmp = to_dict(v, depth + 1, exclude_keys, depth_threshold)
if tmp == {}: continue
out[k] = tmp
return out
for attr in listattrs(x):
if attr in exclude_keys: continue
v = getattr(x, attr)
tmp = to_dict(v, depth + 1, exclude_keys, depth_threshold)
if tmp == {}: continue
out[attr] = tmp
return out | Transform a nested object/dict/list into a regular dict
json.dump(s) and pickle don't like to un/serialize regular Python objects so
this function should handle arbitrarily nested objects to be serialized to
regular string, float, int, bool, None values.
This is a recursive function so by default it will exit at a certain depth (depth_threshold=8).
Args:
x (object): Some object to dict-ify unless x is a scalar/literal then return x as is
depth (int): Starting depth must be 0 (cannot supply default value due to weird Pythonisms)
exclude_keys (set): Keys to avoid adding to the output dict
depth_threshold (int): object/dict nesting depth to stop at
Returns:
dict: dict with only scalar/literal leaf values | entailment |
def _recur_flatten(key, x, out, sep='.'):
"""Helper function to flatten_dict
Recursively flatten all nested values within a dict
Args:
key (str): parent key
x (object): object to flatten or add to out dict
out (dict): 1D output dict
sep (str): flattened key separator string
Returns:
dict: flattened 1D dict
"""
if x is None or isinstance(x, (str, int, float, bool)):
out[key] = x
return out
if isinstance(x, list):
for i, v in enumerate(x):
new_key = '{}{}{}'.format(key, sep, i)
out = _recur_flatten(new_key, v, out, sep)
if isinstance(x, dict):
for k, v in x.items():
new_key = '{}{}{}'.format(key, sep, k)
out = _recur_flatten(new_key, v, out, sep)
return out | Helper function to flatten_dict
Recursively flatten all nested values within a dict
Args:
key (str): parent key
x (object): object to flatten or add to out dict
out (dict): 1D output dict
sep (str): flattened key separator string
Returns:
dict: flattened 1D dict | entailment |
def flatten_dict(x):
"""Flatten a dict
Flatten an arbitrarily nested dict as output by to_dict
.. note::
Keys in the flattened dict may get very long.
Args:
x (dict): Arbitrarily nested dict (maybe resembling a tree) with literal/scalar leaf values
Returns:
dict: flattened 1D dict
"""
out = {}
for k, v in x.items():
out = _recur_flatten(k, v, out)
return out | Flatten a dict
Flatten an arbitrarily nested dict as output by to_dict
.. note::
Keys in the flattened dict may get very long.
Args:
x (dict): Arbitrarily nested dict (maybe resembling a tree) with literal/scalar leaf values
Returns:
dict: flattened 1D dict | entailment |
def df_first_row_to_dict(df):
"""First DataFrame row to list of dict
Args:
df (pandas.DataFrame): A DataFrame with at least one row
Returns:
A list of dict that looks like:
[{'C1': 'x'}, {'C2': 'y'}, {'C3': 'z'}]
from a DataFrame that looks like:
C1 C2 C3
1 x y z
Else if `df` is `None`, returns `None`
"""
if df is not None:
return [dict(r) for i, r in df.head(1).iterrows()][0] | First DataFrame row to list of dict
Args:
df (pandas.DataFrame): A DataFrame with at least one row
Returns:
A list of dict that looks like:
[{'C1': 'x'}, {'C2': 'y'}, {'C3': 'z'}]
from a DataFrame that looks like:
C1 C2 C3
1 x y z
Else if `df` is `None`, returns `None` | entailment |
def is_blast_result_trunc(qstart, qend, sstart, send, qlen, slen):
"""Check if a query sequence is truncated by the end of a subject sequence
Args:
qstart (int): Query sequence start index
qend (int): Query sequence end index
sstart (int): Subject sequence start index
send (int): Subject sequence end index
qlen (int): Query sequence length
slen (int): Subject sequence length
Returns:
bool: Result truncated by subject sequence end?
"""
q_match_len = abs(qstart - qend) + 1
s_max = max(sstart, send)
s_min = min(sstart, send)
return (q_match_len < qlen) and (s_max >= slen or s_min <= 1) | Check if a query sequence is truncated by the end of a subject sequence
Args:
qstart (int): Query sequence start index
qend (int): Query sequence end index
sstart (int): Subject sequence start index
send (int): Subject sequence end index
qlen (int): Query sequence length
slen (int): Subject sequence length
Returns:
bool: Result truncated by subject sequence end? | entailment |
def trunc(qstart, qend, sstart, send, qlen, slen):
"""Check if a query sequence is truncated by the end of a subject sequence
Args:
qstart (int pandas.Series): Query sequence start index
qend (int pandas.Series): Query sequence end index
sstart (int pandas.Series): Subject sequence start index
send (int pandas.Series): Subject sequence end index
qlen (int pandas.Series): Query sequence length
slen (int pandas.Series): Subject sequence length
Returns:
Boolean pandas.Series: Result truncated by subject sequence end?
"""
ssum2 = (send + sstart) / 2.0
sabs2 = np.abs(send - sstart) / 2.0
smax = ssum2 + sabs2
smin = ssum2 - sabs2
q_match_len = np.abs(qstart - qend) + 1
return (q_match_len < qlen) & ((smax >= slen) | (smin <= 1)) | Check if a query sequence is truncated by the end of a subject sequence
Args:
qstart (int pandas.Series): Query sequence start index
qend (int pandas.Series): Query sequence end index
sstart (int pandas.Series): Subject sequence start index
send (int pandas.Series): Subject sequence end index
qlen (int pandas.Series): Query sequence length
slen (int pandas.Series): Subject sequence length
Returns:
Boolean pandas.Series: Result truncated by subject sequence end? | entailment |
def perfect_matches(self):
"""
Return pandas DataFrame with perfect BLAST matches (100% identity and coverage)
Returns:
pandas.DataFrame or None: DataFrame of perfect BLAST matches or None if no perfect matches exist
"""
if self.is_missing:
return None
df_perfect_matches = self.df[(self.df['coverage'] == 1.0) & (self.df['pident'] == 100.0)]
if df_perfect_matches.shape[0] == 0:
return None
return df_perfect_matches | Return pandas DataFrame with perfect BLAST matches (100% identity and coverage)
Returns:
pandas.DataFrame or None: DataFrame of perfect BLAST matches or None if no perfect matches exist | entailment |
def top_result(self):
"""Return top `blastn` result
Try to find a 100% identity and coverage result (perfect match).
If one does not exist, then retrieve the result with the highest bitscore.
Returns:
Ordered dict of BLASTN results or None if no BLASTN results generated
"""
if self.is_missing:
return None
df_perfect_matches = self.df[(self.df['coverage'] == 1.0) & (self.df['pident'] == 100.0)]
if df_perfect_matches.shape[0]:
self.is_perfect_match = True
return BlastReader.df_first_row_to_dict(df_perfect_matches)
# Return the result with the highest bitscore.
# This is the first result in dataframe since the df is ordered by
# bitscore in descending order.
result_dict = BlastReader.df_first_row_to_dict(self.df)
result_trunc = BlastReader.is_blast_result_trunc(qstart=result_dict['qstart'],
qend=result_dict['qend'],
sstart=result_dict['sstart'],
send=result_dict['send'],
qlen=result_dict['qlen'],
slen=result_dict['slen'])
self.is_trunc = result_trunc
return result_dict | Return top `blastn` result
Try to find a 100% identity and coverage result (perfect match).
If one does not exist, then retrieve the result with the highest bitscore.
Returns:
Ordered dict of BLASTN results or None if no BLASTN results generated | entailment |
def sketch_fasta(fasta_path, outdir):
"""Create a Mash sketch from an input fasta file
Args:
fasta_path (str): input fasta file path. Genome name in fasta filename
outdir (str): output directory path to write Mash sketch file to
Returns:
str: output Mash sketch file path
"""
genome_name = genome_name_from_fasta_path(fasta_path)
outpath = os.path.join(outdir, genome_name)
args = ['mash', 'sketch', '-o', outpath, fasta_path]
logging.info('Running Mash sketch with command: %s', ' '.join(args))
p = Popen(args)
p.wait()
sketch_path = outpath + '.msh'
assert os.path.exists(sketch_path), 'Mash sketch for genome {} was not created at {}'.format(
genome_name,
sketch_path)
return sketch_path | Create a Mash sketch from an input fasta file
Args:
fasta_path (str): input fasta file path. Genome name in fasta filename
outdir (str): output directory path to write Mash sketch file to
Returns:
str: output Mash sketch file path | entailment |
def merge_sketches(outdir, sketch_paths):
"""Merge new Mash sketches with current Mash sketches
Args:
outdir (str): output directory to write merged Mash sketch file
sketch_paths (list of str): Mash sketch file paths for input fasta files
Returns:
str: output path for Mash sketch file with new and old sketches
"""
merge_sketch_path = os.path.join(outdir, 'sistr.msh')
args = ['mash', 'paste', merge_sketch_path]
for x in sketch_paths:
args.append(x)
args.append(MASH_SKETCH_FILE)
logging.info('Running Mash paste with command: %s', ' '.join(args))
p = Popen(args)
p.wait()
assert os.path.exists(merge_sketch_path), 'Merged sketch was not created at {}'.format(merge_sketch_path)
return merge_sketch_path | Merge new Mash sketches with current Mash sketches
Args:
outdir (str): output directory to write merged Mash sketch file
sketch_paths (list of str): Mash sketch file paths for input fasta files
Returns:
str: output path for Mash sketch file with new and old sketches | entailment |
def delete(self, key):
"""Implementation of :meth:`~simplekv.KeyValueStore.delete`.
If an exception occurs in either the cache or backing store, all are
passing on.
"""
self._dstore.delete(key)
self.cache.delete(key) | Implementation of :meth:`~simplekv.KeyValueStore.delete`.
If an exception occurs in either the cache or backing store, all are
passing on. | entailment |
def get(self, key):
"""Implementation of :meth:`~simplekv.KeyValueStore.get`.
If a cache miss occurs, the value is retrieved, stored in the cache and
returned.
If the cache raises an :exc:`~exceptions.IOError`, the cache is
ignored, and the backing store is consulted directly.
It is possible for a caching error to occur while attempting to store
the value in the cache. It will not be handled as well.
"""
try:
return self.cache.get(key)
except KeyError:
# cache miss or error, retrieve from backend
data = self._dstore.get(key)
# store in cache and return
self.cache.put(key, data)
return data
except IOError:
# cache error, ignore completely and return from backend
return self._dstore.get(key) | Implementation of :meth:`~simplekv.KeyValueStore.get`.
If a cache miss occurs, the value is retrieved, stored in the cache and
returned.
If the cache raises an :exc:`~exceptions.IOError`, the cache is
ignored, and the backing store is consulted directly.
It is possible for a caching error to occur while attempting to store
the value in the cache. It will not be handled as well. | entailment |
def get_file(self, key, file):
"""Implementation of :meth:`~simplekv.KeyValueStore.get_file`.
If a cache miss occurs, the value is retrieved, stored in the cache and
returned.
If the cache raises an :exc:`~exceptions.IOError`, the retrieval cannot
proceed: If ``file`` was an open file, data maybe been written to it
already. The :exc:`~exceptions.IOError` bubbles up.
It is possible for a caching error to occur while attempting to store
the value in the cache. It will not be handled as well.
"""
try:
return self.cache.get_file(key, file)
except KeyError:
# cache miss, load into cache
fp = self._dstore.open(key)
self.cache.put_file(key, fp)
# return from cache
return self.cache.get_file(key, file) | Implementation of :meth:`~simplekv.KeyValueStore.get_file`.
If a cache miss occurs, the value is retrieved, stored in the cache and
returned.
If the cache raises an :exc:`~exceptions.IOError`, the retrieval cannot
proceed: If ``file`` was an open file, data maybe been written to it
already. The :exc:`~exceptions.IOError` bubbles up.
It is possible for a caching error to occur while attempting to store
the value in the cache. It will not be handled as well. | entailment |
def open(self, key):
"""Implementation of :meth:`~simplekv.KeyValueStore.open`.
If a cache miss occurs, the value is retrieved, stored in the cache,
then then another open is issued on the cache.
If the cache raises an :exc:`~exceptions.IOError`, the cache is
ignored, and the backing store is consulted directly.
It is possible for a caching error to occur while attempting to store
the value in the cache. It will not be handled as well.
"""
try:
return self.cache.open(key)
except KeyError:
# cache miss, load into cache
fp = self._dstore.open(key)
self.cache.put_file(key, fp)
return self.cache.open(key)
except IOError:
# cache error, ignore completely and return from backend
return self._dstore.open(key) | Implementation of :meth:`~simplekv.KeyValueStore.open`.
If a cache miss occurs, the value is retrieved, stored in the cache,
then then another open is issued on the cache.
If the cache raises an :exc:`~exceptions.IOError`, the cache is
ignored, and the backing store is consulted directly.
It is possible for a caching error to occur while attempting to store
the value in the cache. It will not be handled as well. | entailment |
def copy(self, source, dest):
"""Implementation of :meth:`~simplekv.CopyMixin.copy`.
Copies the data in the backing store and removes the destination key from the cache,
in case it was already populated.
Does not work when the backing store does not implement copy.
"""
try:
k = self._dstore.copy(source, dest)
finally:
self.cache.delete(dest)
return k | Implementation of :meth:`~simplekv.CopyMixin.copy`.
Copies the data in the backing store and removes the destination key from the cache,
in case it was already populated.
Does not work when the backing store does not implement copy. | entailment |
def put(self, key, data):
"""Implementation of :meth:`~simplekv.KeyValueStore.put`.
Will store the value in the backing store. After a successful or
unsuccessful store, the cache will be invalidated by deleting the key
from it.
"""
try:
return self._dstore.put(key, data)
finally:
self.cache.delete(key) | Implementation of :meth:`~simplekv.KeyValueStore.put`.
Will store the value in the backing store. After a successful or
unsuccessful store, the cache will be invalidated by deleting the key
from it. | entailment |
def put_file(self, key, file):
"""Implementation of :meth:`~simplekv.KeyValueStore.put_file`.
Will store the value in the backing store. After a successful or
unsuccessful store, the cache will be invalidated by deleting the key
from it.
"""
try:
return self._dstore.put_file(key, file)
finally:
self.cache.delete(key) | Implementation of :meth:`~simplekv.KeyValueStore.put_file`.
Will store the value in the backing store. After a successful or
unsuccessful store, the cache will be invalidated by deleting the key
from it. | entailment |
def get_file(self, key, file):
"""Write contents of key to file
Like :meth:`.KeyValueStore.put_file`, this method allows backends to
implement a specialized function if data needs to be written to disk or
streamed.
If *file* is a string, contents of *key* are written to a newly
created file with the filename *file*. Otherwise, the data will be
written using the *write* method of *file*.
:param key: The key to be read
:param file: Output filename or an object with a *write* method.
:raises exceptions.ValueError: If the key is not valid.
:raises exceptions.IOError: If there was a problem reading or writing
data.
:raises exceptions.KeyError: If the key was not found.
"""
self._check_valid_key(key)
if isinstance(file, str):
return self._get_filename(key, file)
else:
return self._get_file(key, file) | Write contents of key to file
Like :meth:`.KeyValueStore.put_file`, this method allows backends to
implement a specialized function if data needs to be written to disk or
streamed.
If *file* is a string, contents of *key* are written to a newly
created file with the filename *file*. Otherwise, the data will be
written using the *write* method of *file*.
:param key: The key to be read
:param file: Output filename or an object with a *write* method.
:raises exceptions.ValueError: If the key is not valid.
:raises exceptions.IOError: If there was a problem reading or writing
data.
:raises exceptions.KeyError: If the key was not found. | entailment |
def put_file(self, key, file):
"""Store into key from file on disk
Stores data from a source into key. *file* can either be a string,
which will be interpretet as a filename, or an object with a *read()*
method.
If the passed object has a *fileno()* method, it may be used to speed
up the operation.
The file specified by *file*, if it is a filename, may be removed in
the process, to avoid copying if possible. If you need to make a copy,
pass the opened file instead.
:param key: The key under which the data is to be stored
:param file: A filename or an object with a read method. If a filename,
may be removed
:returns: The key under which data was stored
:raises exceptions.ValueError: If the key is not valid.
:raises exceptions.IOError: If there was a problem moving the file in.
"""
# FIXME: shouldn't we call self._check_valid_key here?
if isinstance(file, str):
return self._put_filename(key, file)
else:
return self._put_file(key, file) | Store into key from file on disk
Stores data from a source into key. *file* can either be a string,
which will be interpretet as a filename, or an object with a *read()*
method.
If the passed object has a *fileno()* method, it may be used to speed
up the operation.
The file specified by *file*, if it is a filename, may be removed in
the process, to avoid copying if possible. If you need to make a copy,
pass the opened file instead.
:param key: The key under which the data is to be stored
:param file: A filename or an object with a read method. If a filename,
may be removed
:returns: The key under which data was stored
:raises exceptions.ValueError: If the key is not valid.
:raises exceptions.IOError: If there was a problem moving the file in. | entailment |
def _check_valid_key(self, key):
"""Checks if a key is valid and raises a ValueError if its not.
When in need of checking a key for validity, always use this
method if possible.
:param key: The key to be checked
"""
if not isinstance(key, key_type):
raise ValueError('%r is not a valid key type' % key)
if not VALID_KEY_RE.match(key):
raise ValueError('%r contains illegal characters' % key) | Checks if a key is valid and raises a ValueError if its not.
When in need of checking a key for validity, always use this
method if possible.
:param key: The key to be checked | entailment |
def _get(self, key):
"""Implementation for :meth:`~simplekv.KeyValueStore.get`. The default
implementation will create a :class:`io.BytesIO`-buffer and then call
:meth:`~simplekv.KeyValueStore._get_file`.
:param key: Key of value to be retrieved
"""
buf = BytesIO()
self._get_file(key, buf)
return buf.getvalue() | Implementation for :meth:`~simplekv.KeyValueStore.get`. The default
implementation will create a :class:`io.BytesIO`-buffer and then call
:meth:`~simplekv.KeyValueStore._get_file`.
:param key: Key of value to be retrieved | entailment |
def _get_file(self, key, file):
"""Write key to file-like object file. Either this method or
:meth:`~simplekv.KeyValueStore._get_filename` will be called by
:meth:`~simplekv.KeyValueStore.get_file`. Note that this method does
not accept strings.
:param key: Key to be retrieved
:param file: File-like object to write to
"""
bufsize = 1024 * 1024
# note: we do not use a context manager here or close the source.
# the source goes out of scope shortly after, taking care of the issue
# this allows us to support file-like objects without close as well,
# such as BytesIO.
source = self.open(key)
try:
while True:
buf = source.read(bufsize)
file.write(buf)
if len(buf) < bufsize:
break
finally:
source.close() | Write key to file-like object file. Either this method or
:meth:`~simplekv.KeyValueStore._get_filename` will be called by
:meth:`~simplekv.KeyValueStore.get_file`. Note that this method does
not accept strings.
:param key: Key to be retrieved
:param file: File-like object to write to | entailment |
def _get_filename(self, key, filename):
"""Write key to file. Either this method or
:meth:`~simplekv.KeyValueStore._get_file` will be called by
:meth:`~simplekv.KeyValueStore.get_file`. This method only accepts
filenames and will open the file with a mode of ``wb``, then call
:meth:`~simplekv.KeyValueStore._get_file`.
:param key: Key to be retrieved
:param filename: Filename to write to
"""
with open(filename, 'wb') as dest:
return self._get_file(key, dest) | Write key to file. Either this method or
:meth:`~simplekv.KeyValueStore._get_file` will be called by
:meth:`~simplekv.KeyValueStore.get_file`. This method only accepts
filenames and will open the file with a mode of ``wb``, then call
:meth:`~simplekv.KeyValueStore._get_file`.
:param key: Key to be retrieved
:param filename: Filename to write to | entailment |
def put(self, key, data, ttl_secs=None):
"""Like :meth:`~simplekv.KeyValueStore.put`, but with an additional
parameter:
:param ttl_secs: Number of seconds until the key expires. See above
for valid values.
:raises exceptions.ValueError: If ``ttl_secs`` is invalid.
:raises exceptions.IOError: If storing failed or the file could not
be read
"""
self._check_valid_key(key)
if not isinstance(data, bytes):
raise IOError("Provided data is not of type bytes")
return self._put(key, data, self._valid_ttl(ttl_secs)) | Like :meth:`~simplekv.KeyValueStore.put`, but with an additional
parameter:
:param ttl_secs: Number of seconds until the key expires. See above
for valid values.
:raises exceptions.ValueError: If ``ttl_secs`` is invalid.
:raises exceptions.IOError: If storing failed or the file could not
be read | entailment |
def put_file(self, key, file, ttl_secs=None):
"""Like :meth:`~simplekv.KeyValueStore.put_file`, but with an
additional parameter:
:param ttl_secs: Number of seconds until the key expires. See above
for valid values.
:raises exceptions.ValueError: If ``ttl_secs`` is invalid.
"""
if ttl_secs is None:
ttl_secs = self.default_ttl_secs
self._check_valid_key(key)
if isinstance(file, str):
return self._put_filename(key, file, self._valid_ttl(ttl_secs))
else:
return self._put_file(key, file, self._valid_ttl(ttl_secs)) | Like :meth:`~simplekv.KeyValueStore.put_file`, but with an
additional parameter:
:param ttl_secs: Number of seconds until the key expires. See above
for valid values.
:raises exceptions.ValueError: If ``ttl_secs`` is invalid. | entailment |
def copy(self, source, dest):
"""Copies a key. The destination is overwritten if does exist.
:param source: The source key to copy
:param dest: The destination for the copy
:returns: The destination key
:raises: exceptions.ValueError: If the source or target key are not valid
:raises: exceptions.KeyError: If the source key was not found"""
self._check_valid_key(source)
self._check_valid_key(dest)
return self._copy(source, dest) | Copies a key. The destination is overwritten if does exist.
:param source: The source key to copy
:param dest: The destination for the copy
:returns: The destination key
:raises: exceptions.ValueError: If the source or target key are not valid
:raises: exceptions.KeyError: If the source key was not found | entailment |
def open(self):
"""Implementation of NAPALM method open."""
try:
if self.transport in ('http', 'https'):
connection = pyeapi.client.connect(
transport=self.transport,
host=self.hostname,
username=self.username,
password=self.password,
port=self.port,
timeout=self.timeout
)
elif self.transport == 'socket':
connection = pyeapi.client.connect(transport=self.transport)
else:
raise ConnectionException("Unknown transport: {}".format(self.transport))
if self.device is None:
self.device = pyeapi.client.Node(connection, enablepwd=self.enablepwd)
# does not raise an Exception if unusable
# let's try to run a very simple command
self.device.run_commands(['show clock'], encoding='text')
except ConnectionError as ce:
# and this is raised either if device not avaiable
# either if HTTP(S) agent is not enabled
# show management api http-commands
raise ConnectionException(ce.message) | Implementation of NAPALM method open. | entailment |
def compare_config(self):
"""Implementation of NAPALM method compare_config."""
if self.config_session is None:
return ''
else:
commands = ['show session-config named %s diffs' % self.config_session]
result = self.device.run_commands(commands, encoding='text')[0]['output']
result = '\n'.join(result.splitlines()[2:])
return result.strip() | Implementation of NAPALM method compare_config. | entailment |
def commit_config(self):
"""Implementation of NAPALM method commit_config."""
commands = []
commands.append('copy startup-config flash:rollback-0')
commands.append('configure session {}'.format(self.config_session))
commands.append('commit')
commands.append('write memory')
self.device.run_commands(commands)
self.config_session = None | Implementation of NAPALM method commit_config. | entailment |
def discard_config(self):
"""Implementation of NAPALM method discard_config."""
if self.config_session is not None:
commands = []
commands.append('configure session {}'.format(self.config_session))
commands.append('abort')
self.device.run_commands(commands)
self.config_session = None | Implementation of NAPALM method discard_config. | entailment |
def rollback(self):
"""Implementation of NAPALM method rollback."""
commands = []
commands.append('configure replace flash:rollback-0')
commands.append('write memory')
self.device.run_commands(commands) | Implementation of NAPALM method rollback. | entailment |
def get_facts(self):
"""Implementation of NAPALM method get_facts."""
commands = []
commands.append('show version')
commands.append('show hostname')
commands.append('show interfaces')
result = self.device.run_commands(commands)
version = result[0]
hostname = result[1]
interfaces_dict = result[2]['interfaces']
uptime = time.time() - version['bootupTimestamp']
interfaces = [i for i in interfaces_dict.keys() if '.' not in i]
interfaces = string_parsers.sorted_nicely(interfaces)
return {
'hostname': hostname['hostname'],
'fqdn': hostname['fqdn'],
'vendor': u'Arista',
'model': version['modelName'],
'serial_number': version['serialNumber'],
'os_version': version['internalVersion'],
'uptime': int(uptime),
'interface_list': interfaces,
} | Implementation of NAPALM method get_facts. | entailment |
def get_snmp_information(self):
"""get_snmp_information() for EOS. Re-written to not use TextFSM"""
# Default values
snmp_dict = {
'chassis_id': '',
'location': '',
'contact': '',
'community': {}
}
commands = [
'show snmp chassis',
'show snmp location',
'show snmp contact'
]
snmp_config = self.device.run_commands(commands, encoding='json')
for line in snmp_config:
for k, v in line.items():
if k == 'chassisId':
snmp_dict['chassis_id'] = v
else:
# Some EOS versions add extra quotes
snmp_dict[k] = v.strip('"')
commands = ['show running-config | section snmp-server community']
raw_snmp_config = self.device.run_commands(commands, encoding='text')[0].get('output', '')
for line in raw_snmp_config.splitlines():
match = self._RE_SNMP_COMM.search(line)
if match:
matches = match.groupdict('')
snmp_dict['community'][match.group('community')] = {
'acl': py23_compat.text_type(matches['v4_acl']),
'mode': py23_compat.text_type(matches['access'])
}
return snmp_dict | get_snmp_information() for EOS. Re-written to not use TextFSM | entailment |
def get_bgp_neighbors_detail(self, neighbor_address=''):
"""Implementation of get_bgp_neighbors_detail"""
def _parse_per_peer_bgp_detail(peer_output):
"""This function parses the raw data per peer and returns a
json structure per peer.
"""
int_fields = ['local_as', 'remote_as',
'local_port', 'remote_port', 'local_port',
'input_messages', 'output_messages', 'input_updates',
'output_updates', 'messages_queued_out', 'holdtime',
'configured_holdtime', 'keepalive',
'configured_keepalive', 'advertised_prefix_count',
'received_prefix_count']
peer_details = []
# Using preset template to extract peer info
peer_info = (
napalm_base.helpers.textfsm_extractor(
self, 'bgp_detail', peer_output))
for item in peer_info:
# Determining a few other fields in the final peer_info
item['up'] = (
True if item['up'] == "up" else False)
item['local_address_configured'] = (
True if item['local_address'] else False)
item['multihop'] = (
False if item['multihop'] == 0 or
item['multihop'] == '' else True)
# TODO: The below fields need to be retrieved
# Currently defaulting their values to False or 0
item['multipath'] = False
item['remove_private_as'] = False
item['suppress_4byte_as'] = False
item['local_as_prepend'] = False
item['flap_count'] = 0
item['active_prefix_count'] = 0
item['suppressed_prefix_count'] = 0
# Converting certain fields into int
for key in int_fields:
item[key] = napalm_base.helpers.convert(int, item[key], 0)
# Conforming with the datatypes defined by the base class
item['export_policy'] = (
napalm_base.helpers.convert(
py23_compat.text_type, item['export_policy']))
item['last_event'] = (
napalm_base.helpers.convert(
py23_compat.text_type, item['last_event']))
item['remote_address'] = napalm_base.helpers.ip(item['remote_address'])
item['previous_connection_state'] = (
napalm_base.helpers.convert(
py23_compat.text_type, item['previous_connection_state']))
item['import_policy'] = (
napalm_base.helpers.convert(
py23_compat.text_type, item['import_policy']))
item['connection_state'] = (
napalm_base.helpers.convert(
py23_compat.text_type, item['connection_state']))
item['routing_table'] = (
napalm_base.helpers.convert(
py23_compat.text_type, item['routing_table']))
item['router_id'] = napalm_base.helpers.ip(item['router_id'])
item['local_address'] = napalm_base.helpers.convert(
napalm_base.helpers.ip, item['local_address'])
peer_details.append(item)
return peer_details
def _append(bgp_dict, peer_info):
remote_as = peer_info['remote_as']
vrf_name = peer_info['routing_table']
if vrf_name not in bgp_dict.keys():
bgp_dict[vrf_name] = {}
if remote_as not in bgp_dict[vrf_name].keys():
bgp_dict[vrf_name][remote_as] = []
bgp_dict[vrf_name][remote_as].append(peer_info)
commands = []
summary_commands = []
if not neighbor_address:
commands.append('show ip bgp neighbors vrf all')
commands.append('show ipv6 bgp neighbors vrf all')
summary_commands.append('show ip bgp summary vrf all')
summary_commands.append('show ipv6 bgp summary vrf all')
else:
try:
peer_ver = IPAddress(neighbor_address).version
except Exception as e:
raise e
if peer_ver == 4:
commands.append('show ip bgp neighbors %s vrf all' %
neighbor_address)
summary_commands.append('show ip bgp summary vrf all')
elif peer_ver == 6:
commands.append('show ipv6 bgp neighbors %s vrf all' %
neighbor_address)
summary_commands.append('show ipv6 bgp summary vrf all')
raw_output = (
self.device.run_commands(commands, encoding='text'))
bgp_summary = (
self.device.run_commands(summary_commands, encoding='json'))
bgp_detail_info = {}
v4_peer_info = []
v6_peer_info = []
if neighbor_address:
peer_info = _parse_per_peer_bgp_detail(raw_output[0]['output'])
if peer_ver == 4:
v4_peer_info.append(peer_info[0])
else:
v6_peer_info.append(peer_info[0])
else:
# Using preset template to extract peer info
v4_peer_info = _parse_per_peer_bgp_detail(raw_output[0]['output'])
v6_peer_info = _parse_per_peer_bgp_detail(raw_output[1]['output'])
for peer_info in v4_peer_info:
vrf_name = peer_info['routing_table']
peer_remote_addr = peer_info['remote_address']
peer_info['accepted_prefix_count'] = (
bgp_summary[0]['vrfs'][vrf_name]['peers'][peer_remote_addr]['prefixAccepted']
if peer_remote_addr in bgp_summary[0]['vrfs'][vrf_name]['peers'].keys()
else 0
)
_append(bgp_detail_info, peer_info)
for peer_info in v6_peer_info:
vrf_name = peer_info['routing_table']
peer_remote_addr = peer_info['remote_address']
peer_info['accepted_prefix_count'] = (
bgp_summary[1]['vrfs'][vrf_name]['peers'][peer_remote_addr]['prefixAccepted']
if peer_remote_addr in bgp_summary[1]['vrfs'][vrf_name]['peers'].keys()
else 0
)
_append(bgp_detail_info, peer_info)
return bgp_detail_info | Implementation of get_bgp_neighbors_detail | entailment |
def get_config(self, retrieve="all"):
"""get_config implementation for EOS."""
get_startup = retrieve == "all" or retrieve == "startup"
get_running = retrieve == "all" or retrieve == "running"
get_candidate = (retrieve == "all" or retrieve == "candidate") and self.config_session
if retrieve == "all":
commands = ['show startup-config',
'show running-config']
if self.config_session:
commands.append('show session-config named {}'.format(self.config_session))
output = self.device.run_commands(commands, encoding="text")
return {
'startup': py23_compat.text_type(output[0]['output']) if get_startup else u"",
'running': py23_compat.text_type(output[1]['output']) if get_running else u"",
'candidate': py23_compat.text_type(output[2]['output']) if get_candidate else u"",
}
elif get_startup or get_running:
commands = ['show {}-config'.format(retrieve)]
output = self.device.run_commands(commands, encoding="text")
return {
'startup': py23_compat.text_type(output[0]['output']) if get_startup else u"",
'running': py23_compat.text_type(output[0]['output']) if get_running else u"",
'candidate': "",
}
elif get_candidate:
commands = ['show session-config named {}'.format(self.config_session)]
output = self.device.run_commands(commands, encoding="text")
return {
'startup': "",
'running': "",
'candidate': py23_compat.text_type(output[0]['output']),
}
elif retrieve == "candidate":
# If we get here it means that we want the candidate but there is none.
return {
'startup': "",
'running': "",
'candidate': "",
}
else:
raise Exception("Wrong retrieve filter: {}".format(retrieve)) | get_config implementation for EOS. | entailment |
def get_network_instances(self, name=''):
"""get_network_instances implementation for EOS."""
output = self._show_vrf()
vrfs = {}
all_vrf_interfaces = {}
for vrf in output:
if (vrf.get('route_distinguisher', '') == "<not set>" or
vrf.get('route_distinguisher', '') == 'None'):
vrf['route_distinguisher'] = u''
else:
vrf['route_distinguisher'] = py23_compat.text_type(vrf['route_distinguisher'])
interfaces = {}
for interface_raw in vrf.get('interfaces', []):
interface = interface_raw.split(',')
for line in interface:
if line.strip() != '':
interfaces[py23_compat.text_type(line.strip())] = {}
all_vrf_interfaces[py23_compat.text_type(line.strip())] = {}
vrfs[py23_compat.text_type(vrf['name'])] = {
u'name': py23_compat.text_type(vrf['name']),
u'type': u'L3VRF',
u'state': {
u'route_distinguisher': vrf['route_distinguisher'],
},
u'interfaces': {
u'interface': interfaces,
},
}
all_interfaces = self.get_interfaces_ip().keys()
vrfs[u'default'] = {
u'name': u'default',
u'type': u'DEFAULT_INSTANCE',
u'state': {
u'route_distinguisher': u'',
},
u'interfaces': {
u'interface': {
k: {} for k in all_interfaces if k not in all_vrf_interfaces.keys()
},
},
}
if name:
if name in vrfs:
return {py23_compat.text_type(name): vrfs[name]}
return {}
else:
return vrfs | get_network_instances implementation for EOS. | entailment |
def map_boto_exceptions(key=None, exc_pass=()):
"""Map boto-specific exceptions to the simplekv-API."""
from boto.exception import BotoClientError, BotoServerError, \
StorageResponseError
try:
yield
except StorageResponseError as e:
if e.code == 'NoSuchKey':
raise KeyError(key)
raise IOError(str(e))
except (BotoClientError, BotoServerError) as e:
if e.__class__.__name__ not in exc_pass:
raise IOError(str(e)) | Map boto-specific exceptions to the simplekv-API. | entailment |
def _check_valid_key(self, key):
"""Checks if a key is valid and raises a ValueError if its not.
When in need of checking a key for validity, always use this
method if possible.
:param key: The key to be checked
"""
if not isinstance(key, key_type) and key is not None:
raise ValueError('%r is not a valid key type' % key)
if not VALID_KEY_RE_EXTENDED.match(key) or key == u'/':
raise ValueError('%r contains illegal characters' % key) | Checks if a key is valid and raises a ValueError if its not.
When in need of checking a key for validity, always use this
method if possible.
:param key: The key to be checked | entailment |
def _file_md5(file_):
"""
Compute the md5 digest of a file in base64 encoding.
"""
md5 = hashlib.md5()
chunk_size = 128 * md5.block_size
for chunk in iter(lambda: file_.read(chunk_size), b''):
md5.update(chunk)
file_.seek(0)
byte_digest = md5.digest()
return base64.b64encode(byte_digest).decode() | Compute the md5 digest of a file in base64 encoding. | entailment |
def _byte_buffer_md5(buffer_):
"""
Computes the md5 digest of a byte buffer in base64 encoding.
"""
md5 = hashlib.md5(buffer_)
byte_digest = md5.digest()
return base64.b64encode(byte_digest).decode() | Computes the md5 digest of a byte buffer in base64 encoding. | entailment |
def map_azure_exceptions(key=None, exc_pass=()):
"""Map Azure-specific exceptions to the simplekv-API."""
from azure.common import AzureMissingResourceHttpError, AzureHttpError,\
AzureException
try:
yield
except AzureMissingResourceHttpError as ex:
if ex.__class__.__name__ not in exc_pass:
s = str(ex)
if s.startswith(u"The specified container does not exist."):
raise IOError(s)
raise KeyError(key)
except AzureHttpError as ex:
if ex.__class__.__name__ not in exc_pass:
raise IOError(str(ex))
except AzureException as ex:
if ex.__class__.__name__ not in exc_pass:
raise IOError(str(ex)) | Map Azure-specific exceptions to the simplekv-API. | entailment |
def read(self, size=-1):
"""Returns 'size' amount of bytes or less if there is no more data.
If no size is given all data is returned. size can be >= 0."""
if self.closed:
raise ValueError("I/O operation on closed file")
with map_azure_exceptions(key=self.key):
if size < 0:
size = self.size - self.pos
end = min(self.pos + size - 1, self.size - 1)
if self.pos > end:
return b''
b = self.block_blob_service.get_blob_to_bytes(
container_name=self.container_name,
blob_name=self.key,
start_range=self.pos,
end_range=end, # end_range is inclusive
max_connections=self.max_connections,
)
self.pos += len(b.content)
return b.content | Returns 'size' amount of bytes or less if there is no more data.
If no size is given all data is returned. size can be >= 0. | entailment |
def seek(self, offset, whence=0):
"""Move to a new offset either relative or absolute. whence=0 is
absolute, whence=1 is relative, whence=2 is relative to the end.
Any relative or absolute seek operation which would result in a
negative position is undefined and that case can be ignored
in the implementation.
Any seek operation which moves the position after the stream
should succeed. tell() should report that position and read()
should return an empty bytes object."""
if self.closed:
raise ValueError("I/O operation on closed file")
if whence == 0:
if offset < 0:
raise IOError('seek would move position outside the file')
self.pos = offset
elif whence == 1:
if self.pos + offset < 0:
raise IOError('seek would move position outside the file')
self.pos += offset
elif whence == 2:
if self.size + offset < 0:
raise IOError('seek would move position outside the file')
self.pos = self.size + offset
return self.pos | Move to a new offset either relative or absolute. whence=0 is
absolute, whence=1 is relative, whence=2 is relative to the end.
Any relative or absolute seek operation which would result in a
negative position is undefined and that case can be ignored
in the implementation.
Any seek operation which moves the position after the stream
should succeed. tell() should report that position and read()
should return an empty bytes object. | entailment |
def _on_tree(repo, tree, components, obj):
"""Mounts an object on a tree, using the given path components.
:param tree: Tree object to mount on.
:param components: A list of strings of subpaths (i.e. ['foo', 'bar'] is
equivalent to '/foo/bar')
:param obj: Object to mount. If None, removes the object found at path
and prunes the tree downwards.
:return: A list of new entities that need to be added to the object store,
where the last one is the new tree.
"""
# pattern-matching:
if len(components) == 1:
if isinstance(obj, Blob):
mode = 0o100644
elif isinstance(obj, Tree):
mode = 0o040000
elif obj is None:
mode = None
else:
raise TypeError('Can only mount Blobs or Trees')
name = components[0]
if mode is not None:
tree[name] = mode, obj.id
return [tree]
if name in tree:
del tree[name]
return [tree]
elif len(components) > 1:
a, bc = components[0], components[1:]
if a in tree:
a_tree = repo[tree[a][1]]
if not isinstance(a_tree, Tree):
a_tree = Tree()
else:
a_tree = Tree()
res = _on_tree(repo, a_tree, bc, obj)
a_tree_new = res[-1]
if a_tree_new.items():
tree[a] = 0o040000, a_tree_new.id
return res + [tree]
# tree is empty
if a in tree:
del tree[a]
return [tree]
else:
raise ValueError('Components can\'t be empty.') | Mounts an object on a tree, using the given path components.
:param tree: Tree object to mount on.
:param components: A list of strings of subpaths (i.e. ['foo', 'bar'] is
equivalent to '/foo/bar')
:param obj: Object to mount. If None, removes the object found at path
and prunes the tree downwards.
:return: A list of new entities that need to be added to the object store,
where the last one is the new tree. | entailment |
def build_transgenic_lines(self):
"""
init class | "transgenic_line_source_name":"stock_number" a Class
add superClass | rdfs:subClassOf ilxtr:transgenicLine
add *order* | ilxtr:useObjectProperty ilxtr:<order>
add name | rdfs:label "name"
add def | definition: "description"
add transtype | rdfs:hasTransgenicType "transgenic_line_type_name"
"""
allen_namespaces = {
'JAX': 'http://jaxmice.jax.org/strain/',
'MMRRC': 'http://www.mmrrc.org/catalog/getSDS.jsp?mmrrc_id=',
'AIBS': 'http://api.brain-map.org/api/v2/data/TransgenicLine/',
}
for prefix, iri in allen_namespaces.items():
self.g.add_namespace(prefix, iri)
for cell_line in self.neuron_data[:]:
for tl in cell_line['donor']['transgenic_lines']:
_id = tl['stock_number'] if tl['stock_number'] else tl['id']
prefix = tl['transgenic_line_source_name']
line_type = tl['transgenic_line_type_name']
if prefix not in ['JAX', 'MMRRC', 'AIBS']:
continue
_class = prefix + ':' + str(_id)
self.g.add_class(_class)
self.g.add_trip(_class, 'rdfs:label', tl['name'])
self.g.add_trip(_class, 'definition:', tl['description'])
self.g.add_trip(_class, 'rdfs:subClassOf', 'ilxtr:transgenicLine')
self.g.add_trip(_class, 'ilxtr:hasTransgenicType', 'ilxtr:' + line_type + 'Line')
self.g.write() | init class | "transgenic_line_source_name":"stock_number" a Class
add superClass | rdfs:subClassOf ilxtr:transgenicLine
add *order* | ilxtr:useObjectProperty ilxtr:<order>
add name | rdfs:label "name"
add def | definition: "description"
add transtype | rdfs:hasTransgenicType "transgenic_line_type_name" | entailment |
def datagetter(cls):
""" example datagetter function, make any local modifications here """
with open('myfile', 'rt') as f:
rows = [r for r in csv.reader(f)]
dothing = lambda _: [i for i, v in enumerate(_)]
rows = [dothing(_) for _ in rows]
raise NotImplementedError('You need to implement this yourlself!')
return rows | example datagetter function, make any local modifications here | entailment |
def dataproc(cls, graph, data):
""" example datagetter function, make any local modifications here """
for thing in data:
graph.add_trip(*thing)
raise NotImplementedError('You need to implement this yourlself!') | example datagetter function, make any local modifications here | entailment |
def _connect(self):
"""Connect to ec2 resource."""
resource = None
try:
resource = boto3.resource(
'ec2',
aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key,
region_name=self.region
)
# boto3 resource is lazy so attempt method to test connection
resource.meta.client.describe_account_attributes()
except Exception:
raise EC2CloudException(
'Could not connect to region: %s' % self.region
)
return resource | Connect to ec2 resource. | entailment |
def _get_instance(self):
"""Retrieve instance matching instance_id."""
resource = self._connect()
try:
instance = resource.Instance(self.running_instance_id)
except Exception:
raise EC2CloudException(
'Instance with ID: {instance_id} not found.'.format(
instance_id=self.running_instance_id
)
)
return instance | Retrieve instance matching instance_id. | entailment |
def _get_instance_state(self):
"""
Attempt to retrieve the state of the instance.
Raises:
EC2CloudException: If the instance cannot be found.
"""
instance = self._get_instance()
state = None
try:
state = instance.state['Name']
except Exception:
raise EC2CloudException(
'Instance with id: {instance_id}, '
'cannot be found.'.format(
instance_id=self.running_instance_id
)
)
return state | Attempt to retrieve the state of the instance.
Raises:
EC2CloudException: If the instance cannot be found. | entailment |
def _get_user_data(self):
"""
Return formatted bash script string.
The public ssh key is added by cloud init to the instance based on
the ssh user and private key file.
"""
key = ipa_utils.generate_public_ssh_key(
self.ssh_private_key_file
).decode()
script = BASH_SSH_SCRIPT.format(user=self.ssh_user, key=key)
return script | Return formatted bash script string.
The public ssh key is added by cloud init to the instance based on
the ssh user and private key file. | entailment |
def _launch_instance(self):
"""Launch an instance of the given image."""
resource = self._connect()
instance_name = ipa_utils.generate_instance_name('ec2-ipa-test')
kwargs = {
'InstanceType': self.instance_type or EC2_DEFAULT_TYPE,
'ImageId': self.image_id,
'MaxCount': 1,
'MinCount': 1,
'TagSpecifications': [
{
'ResourceType': 'instance',
'Tags': [
{
'Key': 'Name',
'Value': instance_name
}
]
}
]
}
if self.zone:
kwargs['Placement'] = {'AvailabilityZone': self.zone}
if self.ssh_key_name:
kwargs['KeyName'] = self.ssh_key_name
else:
kwargs['UserData'] = self._get_user_data()
if self.subnet_id:
kwargs['SubnetId'] = self.subnet_id
if self.security_group_id:
kwargs['SecurityGroupIds'] = [self.security_group_id]
try:
instances = resource.create_instances(**kwargs)
except Exception as error:
raise EC2CloudException(
'Unable to create instance: {0}.'.format(error)
)
self.running_instance_id = instances[0].instance_id
self.logger.debug('ID of instance: %s' % self.running_instance_id)
self._wait_on_instance('running', self.timeout) | Launch an instance of the given image. | entailment |
def _set_instance_ip(self):
"""
Retrieve instance ip and cache it.
Current preference is for public ipv4, ipv6 and private.
"""
instance = self._get_instance()
# ipv6
try:
ipv6 = instance.network_interfaces[0].ipv6_addresses[0]
except (IndexError, TypeError):
ipv6 = None
self.instance_ip = instance.public_ip_address or \
ipv6 or instance.private_ip_address
if not self.instance_ip:
raise EC2CloudException(
'IP address for instance cannot be found.'
) | Retrieve instance ip and cache it.
Current preference is for public ipv4, ipv6 and private. | entailment |
def _start_instance(self):
"""Start the instance."""
instance = self._get_instance()
instance.start()
self._wait_on_instance('running', self.timeout) | Start the instance. | entailment |
def _stop_instance(self):
"""Stop the instance."""
instance = self._get_instance()
instance.stop()
self._wait_on_instance('stopped', self.timeout) | Stop the instance. | entailment |
def _process_worker(call_queue, result_queue):
""" This worker is wrapped to block KeyboardInterrupt """
signal.signal(signal.SIGINT, signal.SIG_IGN) #block ctrl-c
return _process_worker_base(call_queue, result_queue) | This worker is wrapped to block KeyboardInterrupt | entailment |
def _set_init_system(self, client):
"""Determine the init system of distribution."""
if not self.init_system:
try:
out = ipa_utils.execute_ssh_command(
client,
'ps -p 1 -o comm='
)
except Exception as e:
raise IpaDistroException(
'An error occurred while retrieving'
' the distro init system: %s' % e
)
if out:
self.init_system = out.strip() | Determine the init system of distribution. | entailment |
def get_vm_info(self, client):
"""Return vm info."""
out = ''
self._set_init_system(client)
if self.init_system == 'systemd':
try:
out += 'systemd-analyze:\n\n'
out += ipa_utils.execute_ssh_command(
client,
'systemd-analyze'
)
out += 'systemd-analyze blame:\n\n'
out += ipa_utils.execute_ssh_command(
client,
'systemd-analyze blame'
)
out += 'journalctl -b:\n\n'
out += ipa_utils.execute_ssh_command(
client,
'sudo journalctl -b'
)
except Exception as error:
out = 'Failed to collect VM info: {0}.'.format(error)
return out | Return vm info. | entailment |
def install_package(self, client, package):
"""Install package on instance."""
install_cmd = "{sudo} '{install} {package}'".format(
sudo=self.get_sudo_exec_wrapper(),
install=self.get_install_cmd(),
package=package
)
try:
out = ipa_utils.execute_ssh_command(
client,
install_cmd
)
except Exception as error:
raise IpaDistroException(
'An error occurred installing package {package} '
'on instance: {error}'.format(
package=package,
error=error
)
)
else:
return out | Install package on instance. | entailment |
def reboot(self, client):
"""Execute reboot command on instance."""
self._set_init_system(client)
reboot_cmd = "{sudo} '{stop_ssh};{reboot}'".format(
sudo=self.get_sudo_exec_wrapper(),
stop_ssh=self.get_stop_ssh_service_cmd(),
reboot=self.get_reboot_cmd()
)
try:
ipa_utils.execute_ssh_command(
client,
reboot_cmd
)
except Exception as error:
raise IpaDistroException(
'An error occurred rebooting instance: %s' % error
)
ipa_utils.clear_cache() | Execute reboot command on instance. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.