sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def fastq(self):
'''
str: Returns the sequence, as a FASTQ-formatted string
If ``Sequence.qual`` is ``None``, then ``None`` will be returned instead of a
FASTQ string
'''
if self.qual is None:
self._fastq = None
else:
if self._fastq is None:
self._fastq = '@{}\n{}\n+\n{}'.format(self.id, self.sequence, self.qual)
return self._fastq | str: Returns the sequence, as a FASTQ-formatted string
If ``Sequence.qual`` is ``None``, then ``None`` will be returned instead of a
FASTQ string | entailment |
def reverse_complement(self):
'''
str: Returns the reverse complement of ``Sequence.sequence``.
'''
if self._reverse_complement is None:
self._reverse_complement = self._get_reverse_complement()
return self._reverse_complement | str: Returns the reverse complement of ``Sequence.sequence``. | entailment |
def region(self, start=0, end=None):
'''
Returns a region of ``Sequence.sequence``, in FASTA format.
If called without kwargs, the entire sequence will be returned.
Args:
start (int): Start position of the region to be returned. Default
is 0.
end (int): End position of the region to be returned. Negative values
will function as they do when slicing strings.
Returns:
str: A region of ``Sequence.sequence``, in FASTA format
'''
if end is None:
end = len(self.sequence)
return '>{}\n{}'.format(self.id, self.sequence[start:end]) | Returns a region of ``Sequence.sequence``, in FASTA format.
If called without kwargs, the entire sequence will be returned.
Args:
start (int): Start position of the region to be returned. Default
is 0.
end (int): End position of the region to be returned. Negative values
will function as they do when slicing strings.
Returns:
str: A region of ``Sequence.sequence``, in FASTA format | entailment |
def underscore_to_camelcase(value, first_upper=True):
"""Transform string from underscore_string to camelCase.
:param value: string with underscores
:param first_upper: the result will have its first character in upper case
:type value: str
:return: string in CamelCase or camelCase according to the first_upper
:rtype: str
:Example:
>>> underscore_to_camelcase('camel_case')
'CamelCase'
>>> underscore_to_camelcase('camel_case', False)
'camelCase'
"""
value = str(value)
camelized = "".join(x.title() if x else '_' for x in value.split("_"))
if not first_upper:
camelized = camelized[0].lower() + camelized[1:]
return camelized | Transform string from underscore_string to camelCase.
:param value: string with underscores
:param first_upper: the result will have its first character in upper case
:type value: str
:return: string in CamelCase or camelCase according to the first_upper
:rtype: str
:Example:
>>> underscore_to_camelcase('camel_case')
'CamelCase'
>>> underscore_to_camelcase('camel_case', False)
'camelCase' | entailment |
def et_node_to_string(et_node, default=''):
"""Simple method to get stripped text from node or ``default`` string if None is given.
:param et_node: Element or None
:param default: string returned if None is given, default ``''``
:type et_node: xml.etree.ElementTree.Element, None
:type default: str
:return: text from node or default
:rtype: str
"""
return str(et_node.text).strip() if et_node is not None and et_node.text else default | Simple method to get stripped text from node or ``default`` string if None is given.
:param et_node: Element or None
:param default: string returned if None is given, default ``''``
:type et_node: xml.etree.ElementTree.Element, None
:type default: str
:return: text from node or default
:rtype: str | entailment |
def generate_random_string(size=6, chars=string.ascii_uppercase + string.digits):
"""Generate random string.
:param size: Length of the returned string. Default is 6.
:param chars: List of the usable characters. Default is string.ascii_uppercase + string.digits.
:type size: int
:type chars: str
:return: The random string.
:rtype: str
"""
return ''.join(random.choice(chars) for _ in range(size)) | Generate random string.
:param size: Length of the returned string. Default is 6.
:param chars: List of the usable characters. Default is string.ascii_uppercase + string.digits.
:type size: int
:type chars: str
:return: The random string.
:rtype: str | entailment |
def addslashes(s, escaped_chars=None):
"""Add slashes for given characters. Default is for ``\`` and ``'``.
:param s: string
:param escaped_chars: list of characters to prefix with a slash ``\``
:return: string with slashed characters
:rtype: str
:Example:
>>> addslashes("'")
"\\'"
"""
if escaped_chars is None:
escaped_chars = ["\\", "'", ]
# l = ["\\", '"', "'", "\0", ]
for i in escaped_chars:
if i in s:
s = s.replace(i, '\\' + i)
return s | Add slashes for given characters. Default is for ``\`` and ``'``.
:param s: string
:param escaped_chars: list of characters to prefix with a slash ``\``
:return: string with slashed characters
:rtype: str
:Example:
>>> addslashes("'")
"\\'" | entailment |
def join_list(values, delimiter=', ', transform=None):
"""
Concatenates the upper-cased values using the given delimiter if
the given values variable is a list. Otherwise it is just returned.
:param values: List of strings or string .
:param delimiter: The delimiter used to join the values.
:return: The concatenation or identity.
"""
# type: (Union[List[str], str], str)->str
if transform is None:
transform = _identity
if values is not None and not isinstance(values, (str, bytes)):
values = delimiter.join(transform(x) for x in values)
return values | Concatenates the upper-cased values using the given delimiter if
the given values variable is a list. Otherwise it is just returned.
:param values: List of strings or string .
:param delimiter: The delimiter used to join the values.
:return: The concatenation or identity. | entailment |
def mafft(sequences=None, alignment_file=None, fasta=None, fmt='fasta', threads=-1, as_file=False,
reorder=True, print_stdout=False, print_stderr=False, mafft_bin=None):
'''
Performs multiple sequence alignment with MAFFT.
Args:
sequences (list): Sequences to be aligned. ``sequences`` can be one of four things:
1. a FASTA-formatted string
2. a list of BioPython ``SeqRecord`` objects
3. a list of AbTools ``Sequence`` objects
4. a list of lists/tuples, of the format ``[sequence_id, sequence]``
alignment_file (str): Path for the output alignment file. If not supplied,
a name will be generated using ``tempfile.NamedTemporaryFile()``.
fasta (str): Path to a FASTA-formatted file of sequences. Used as an
alternative to ``sequences`` when suppling a FASTA file.
fmt (str): Format of the alignment. Options are 'fasta', 'phylip', and 'clustal'. Default
is 'fasta'.
threads (int): Number of threads for MAFFT to use. Default is ``-1``, which
results in MAFFT using ``multiprocessing.cpu_count()`` threads.
as_file (bool): If ``True``, returns a path to the alignment file. If ``False``,
returns a BioPython ``MultipleSeqAlignment`` object (obtained by calling
``Bio.AlignIO.read()`` on the alignment file).
print_stdout (bool): If ``True``, prints MAFFT's standard output. Default is ``False``.
print_stderr (bool): If ``True``, prints MAFFT's standard error. Default is ``False``.
mafft_bin (str): Path to MAFFT executable. ``abutils`` includes built-in MAFFT binaries
for MacOS and Linux, however, if a different MAFFT binary can be provided. Default is
``None``, which results in using the appropriate built-in MAFFT binary.
Returns:
Returns a BioPython ``MultipleSeqAlignment`` object, unless ``as_file`` is ``True``,
in which case the path to the alignment file is returned.
'''
if sequences:
fasta_string = _get_fasta_string(sequences)
fasta_file = tempfile.NamedTemporaryFile(delete=False)
fasta_file.close()
ffile = fasta_file.name
with open(ffile, 'w') as f:
f.write(fasta_string)
elif fasta:
ffile = fasta
if alignment_file is None:
alignment_file = tempfile.NamedTemporaryFile(delete=False).name
aln_format = ''
if fmt.lower() == 'clustal':
aln_format = '--clustalout '
if fmt.lower() == 'phylip':
aln_format = '--phylipout '
if reorder:
aln_format += '--reorder '
if mafft_bin is None:
mafft_bin = 'mafft'
# mod_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# mafft_bin = os.path.join(BINARY_DIR, 'mafft_{}'.format(platform.system().lower()))
mafft_cline = '{} --thread {} {}{} > {}'.format(mafft_bin, threads, aln_format, ffile, alignment_file)
mafft = sp.Popen(str(mafft_cline),
stdout=sp.PIPE,
stderr=sp.PIPE,
universal_newlines=True,
shell=True)
stdout, stderr = mafft.communicate()
if print_stdout:
print(mafft_cline)
print(stdout)
if print_stderr:
print(stderr)
os.unlink(ffile)
if os.stat(alignment_file).st_size == 0:
return None
if as_file:
return alignment_file
aln = AlignIO.read(open(alignment_file), fmt)
os.unlink(alignment_file)
return aln | Performs multiple sequence alignment with MAFFT.
Args:
sequences (list): Sequences to be aligned. ``sequences`` can be one of four things:
1. a FASTA-formatted string
2. a list of BioPython ``SeqRecord`` objects
3. a list of AbTools ``Sequence`` objects
4. a list of lists/tuples, of the format ``[sequence_id, sequence]``
alignment_file (str): Path for the output alignment file. If not supplied,
a name will be generated using ``tempfile.NamedTemporaryFile()``.
fasta (str): Path to a FASTA-formatted file of sequences. Used as an
alternative to ``sequences`` when suppling a FASTA file.
fmt (str): Format of the alignment. Options are 'fasta', 'phylip', and 'clustal'. Default
is 'fasta'.
threads (int): Number of threads for MAFFT to use. Default is ``-1``, which
results in MAFFT using ``multiprocessing.cpu_count()`` threads.
as_file (bool): If ``True``, returns a path to the alignment file. If ``False``,
returns a BioPython ``MultipleSeqAlignment`` object (obtained by calling
``Bio.AlignIO.read()`` on the alignment file).
print_stdout (bool): If ``True``, prints MAFFT's standard output. Default is ``False``.
print_stderr (bool): If ``True``, prints MAFFT's standard error. Default is ``False``.
mafft_bin (str): Path to MAFFT executable. ``abutils`` includes built-in MAFFT binaries
for MacOS and Linux, however, if a different MAFFT binary can be provided. Default is
``None``, which results in using the appropriate built-in MAFFT binary.
Returns:
Returns a BioPython ``MultipleSeqAlignment`` object, unless ``as_file`` is ``True``,
in which case the path to the alignment file is returned. | entailment |
def muscle(sequences=None, alignment_file=None, fasta=None,
fmt='fasta', as_file=False, maxiters=None, diags=False,
gap_open=None, gap_extend=None, muscle_bin=None):
'''
Performs multiple sequence alignment with MUSCLE.
Args:
sequences (list): Sequences to be aligned. ``sequences`` can be one of four things:
1. a FASTA-formatted string
2. a list of BioPython ``SeqRecord`` objects
3. a list of AbTools ``Sequence`` objects
4. a list of lists/tuples, of the format ``[sequence_id, sequence]``
alignment_file (str): Path for the output alignment file. If not supplied,
a name will be generated using ``tempfile.NamedTemporaryFile()``.
fasta (str): Path to a FASTA-formatted file of sequences. Used as an
alternative to ``sequences`` when suppling a FASTA file.
fmt (str): Format of the alignment. Options are 'fasta' and 'clustal'. Default
is 'fasta'.
threads (int): Number of threads (CPU cores) for MUSCLE to use. Default is ``-1``, which
results in MUSCLE using all available cores.
as_file (bool): If ``True``, returns a path to the alignment file. If ``False``,
returns a BioPython ``MultipleSeqAlignment`` object (obtained by calling
``Bio.AlignIO.read()`` on the alignment file).
maxiters (int): Passed directly to MUSCLE using the ``-maxiters`` flag.
diags (int): Passed directly to MUSCLE using the ``-diags`` flag.
gap_open (float): Passed directly to MUSCLE using the ``-gapopen`` flag. Ignored
if ``gap_extend`` is not also provided.
gap_extend (float): Passed directly to MUSCLE using the ``-gapextend`` flag. Ignored
if ``gap_open`` is not also provided.
muscle_bin (str): Path to MUSCLE executable. ``abutils`` includes built-in MUSCLE binaries
for MacOS and Linux, however, if a different MUSCLE binary can be provided. Default is
``None``, which results in using the appropriate built-in MUSCLE binary.
Returns:
Returns a BioPython ``MultipleSeqAlignment`` object, unless ``as_file`` is ``True``,
in which case the path to the alignment file is returned.
'''
if sequences:
fasta_string = _get_fasta_string(sequences)
elif fasta:
fasta_string = open(fasta, 'r').read()
if muscle_bin is None:
# mod_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
muscle_bin = os.path.join(BINARY_DIR, 'muscle_{}'.format(platform.system().lower()))
aln_format = ''
if fmt == 'clustal':
aln_format = ' -clwstrict'
muscle_cline = '{}{} '.format(muscle_bin, aln_format)
if maxiters is not None:
muscle_cline += ' -maxiters {}'.format(maxiters)
if diags:
muscle_cline += ' -diags'
if all([gap_open is not None, gap_extend is not None]):
muscle_cline += ' -gapopen {} -gapextend {}'.format(gap_open, gap_extend)
muscle = sp.Popen(str(muscle_cline),
stdin=sp.PIPE,
stdout=sp.PIPE,
stderr=sp.PIPE,
universal_newlines=True,
shell=True)
if sys.version_info[0] > 2:
alignment = muscle.communicate(input=fasta_string)[0]
else:
alignment = unicode(muscle.communicate(input=fasta_string)[0], 'utf-8')
aln = AlignIO.read(StringIO(alignment), fmt)
if as_file:
if not alignment_file:
alignment_file = tempfile.NamedTemporaryFile().name
AlignIO.write(aln, alignment_file, fmt)
return alignment_file
return aln | Performs multiple sequence alignment with MUSCLE.
Args:
sequences (list): Sequences to be aligned. ``sequences`` can be one of four things:
1. a FASTA-formatted string
2. a list of BioPython ``SeqRecord`` objects
3. a list of AbTools ``Sequence`` objects
4. a list of lists/tuples, of the format ``[sequence_id, sequence]``
alignment_file (str): Path for the output alignment file. If not supplied,
a name will be generated using ``tempfile.NamedTemporaryFile()``.
fasta (str): Path to a FASTA-formatted file of sequences. Used as an
alternative to ``sequences`` when suppling a FASTA file.
fmt (str): Format of the alignment. Options are 'fasta' and 'clustal'. Default
is 'fasta'.
threads (int): Number of threads (CPU cores) for MUSCLE to use. Default is ``-1``, which
results in MUSCLE using all available cores.
as_file (bool): If ``True``, returns a path to the alignment file. If ``False``,
returns a BioPython ``MultipleSeqAlignment`` object (obtained by calling
``Bio.AlignIO.read()`` on the alignment file).
maxiters (int): Passed directly to MUSCLE using the ``-maxiters`` flag.
diags (int): Passed directly to MUSCLE using the ``-diags`` flag.
gap_open (float): Passed directly to MUSCLE using the ``-gapopen`` flag. Ignored
if ``gap_extend`` is not also provided.
gap_extend (float): Passed directly to MUSCLE using the ``-gapextend`` flag. Ignored
if ``gap_open`` is not also provided.
muscle_bin (str): Path to MUSCLE executable. ``abutils`` includes built-in MUSCLE binaries
for MacOS and Linux, however, if a different MUSCLE binary can be provided. Default is
``None``, which results in using the appropriate built-in MUSCLE binary.
Returns:
Returns a BioPython ``MultipleSeqAlignment`` object, unless ``as_file`` is ``True``,
in which case the path to the alignment file is returned. | entailment |
def local_alignment(query, target=None, targets=None, match=3, mismatch=-2,
gap_open=-5, gap_extend=-2, matrix=None, aa=False, gap_open_penalty=None, gap_extend_penalty=None):
'''
Striped Smith-Waterman local pairwise alignment.
Args:
query: Query sequence. ``query`` can be one of four things:
1. a nucleotide or amino acid sequence, as a string
2. a Biopython ``SeqRecord`` object
3. an AbTools ``Sequence`` object
4. a list/tuple of the format ``[seq_id, sequence]``
target: A single target sequence. ``target`` can be anything that
``query`` accepts.
targets (list): A list of target sequences, to be proccssed iteratively.
Each element in the ``targets`` list can be anything accepted by
``query``.
match (int): Match score. Should be a positive integer. Default is 3.
mismatch (int): Mismatch score. Should be a negative integer. Default is -2.
gap_open (int): Penalty for opening gaps. Should be a negative integer.
Default is -5.
gap_extend (int): Penalty for extending gaps. Should be a negative
integer. Default is -2.
matrix (str, dict): Alignment scoring matrix. Two options for passing the
alignment matrix:
- The name of a built-in matrix. Current options are ``blosum62`` and ``pam250``.
- A nested dictionary, giving an alignment score for each residue pair. Should be formatted
such that retrieving the alignment score for A and G is accomplished by::
matrix['A']['G']
aa (bool): Must be set to ``True`` if aligning amino acid sequences. Default
is ``False``.
Returns:
If a single target sequence is provided (via ``target``), a single ``SSWAlignment``
object will be returned. If multiple target sequences are supplied (via ``targets``),
a list of ``SSWAlignment`` objects will be returned.
'''
if aa and not matrix:
err = 'ERROR: You must supply a scoring matrix for amino acid alignments'
raise RuntimeError(err)
if not target and not targets:
err = 'ERROR: You must supply a target sequence (or sequences).'
raise RuntimeError(err)
if target:
targets = [target, ]
# to maintain backward compatibility with earlier AbTools API
if gap_open_penalty is not None:
gap_open = -1 * gap_open_penalty
if gap_extend_penalty is not None:
gap_extend = -1 * gap_extend_penalty
alignments = []
for t in targets:
try:
alignment = SSWAlignment(query=query,
target=t,
match=match,
mismatch=mismatch,
matrix=matrix,
gap_open=-1 * gap_open,
gap_extend=-1 * gap_extend,
aa=aa)
alignments.append(alignment)
except IndexError:
continue
if len(alignments) == 1:
return alignments[0]
return alignments | Striped Smith-Waterman local pairwise alignment.
Args:
query: Query sequence. ``query`` can be one of four things:
1. a nucleotide or amino acid sequence, as a string
2. a Biopython ``SeqRecord`` object
3. an AbTools ``Sequence`` object
4. a list/tuple of the format ``[seq_id, sequence]``
target: A single target sequence. ``target`` can be anything that
``query`` accepts.
targets (list): A list of target sequences, to be proccssed iteratively.
Each element in the ``targets`` list can be anything accepted by
``query``.
match (int): Match score. Should be a positive integer. Default is 3.
mismatch (int): Mismatch score. Should be a negative integer. Default is -2.
gap_open (int): Penalty for opening gaps. Should be a negative integer.
Default is -5.
gap_extend (int): Penalty for extending gaps. Should be a negative
integer. Default is -2.
matrix (str, dict): Alignment scoring matrix. Two options for passing the
alignment matrix:
- The name of a built-in matrix. Current options are ``blosum62`` and ``pam250``.
- A nested dictionary, giving an alignment score for each residue pair. Should be formatted
such that retrieving the alignment score for A and G is accomplished by::
matrix['A']['G']
aa (bool): Must be set to ``True`` if aligning amino acid sequences. Default
is ``False``.
Returns:
If a single target sequence is provided (via ``target``), a single ``SSWAlignment``
object will be returned. If multiple target sequences are supplied (via ``targets``),
a list of ``SSWAlignment`` objects will be returned. | entailment |
def global_alignment(query, target=None, targets=None, match=3, mismatch=-2, gap_open=-5, gap_extend=-2,
score_match=None, score_mismatch=None, score_gap_open=None,
score_gap_extend=None, matrix=None, aa=False):
'''
Needleman-Wunch global pairwise alignment.
With ``global_alignment``, you can score an alignment using different
paramaters than were used to compute the alignment. This allows you to
compute pure identity scores (match=1, mismatch=0) on pairs of sequences
for which those alignment parameters would be unsuitable. For example::
seq1 = 'ATGCAGC'
seq2 = 'ATCAAGC'
using identity scoring params (match=1, all penalties are 0) for both alignment
and scoring produces the following alignment::
ATGCA-GC
|| || ||
AT-CAAGC
with an alignment score of 6 and an alignment length of 8 (identity = 75%). But
what if we want to calculate the identity of a gapless alignment? Using::
global_alignment(seq1, seq2,
gap_open=20,
score_match=1,
score_mismatch=0,
score_gap_open=10,
score_gap_extend=1)
we get the following alignment::
ATGCAGC
|| |||
ATCAAGC
which has an score of 5 and an alignment length of 7 (identity = 71%). Obviously,
this is an overly simple example (it would be much easier to force gapless alignment
by just iterating over each sequence and counting the matches), but there are several
real-life cases in which different alignment and scoring paramaters are desirable.
Args:
query: Query sequence. ``query`` can be one of four things:
1. a nucleotide or amino acid sequence, as a string
2. a Biopython ``SeqRecord`` object
3. an AbTools ``Sequence`` object
4. a list/tuple of the format ``[seq_id, sequence]``
target: A single target sequence. ``target`` can be anything that
``query`` accepts.
targets (list): A list of target sequences, to be proccssed iteratively.
Each element in the ``targets`` list can be anything accepted by
``query``.
match (int): Match score for alignment. Should be a positive integer. Default is 3.
mismatch (int): Mismatch score for alignment. Should be a negative integer. Default is -2.
gap_open (int): Penalty for opening gaps in alignment. Should be a negative integer.
Default is -5.
gap_extend (int): Penalty for extending gaps in alignment. Should be a negative
integer. Default is -2.
score_match (int): Match score for scoring the alignment. Should be a positive integer.
Default is to use the score from ``match`` or ``matrix``, whichever is provided.
score_mismatch (int): Mismatch score for scoring the alignment. Should be a negative
integer. Default is to use the score from ``mismatch`` or ``matrix``, whichever
is provided.
score_gap_open (int): Gap open penalty for scoring the alignment. Should be a negative
integer. Default is to use ``gap_open``.
score_gap_extend (int): Gap extend penalty for scoring the alignment. Should be a negative
integer. Default is to use ``gap_extend``.
matrix (str, dict): Alignment scoring matrix. Two options for passing the alignment matrix:
- The name of a built-in matrix. Current options are ``blosum62`` and ``pam250``.
- A nested dictionary, giving an alignment score for each residue pair. Should be
formatted such that retrieving the alignment score for A and G is accomplished by::
matrix['A']['G']
aa (bool): Must be set to ``True`` if aligning amino acid sequences. Default
is ``False``.
Returns:
If a single target sequence is provided (via ``target``), a single ``NWAlignment``
object will be returned. If multiple target sequences are supplied (via ``targets``),
a list of ``NWAlignment`` objects will be returned.
'''
if not target and not targets:
err = 'ERROR: You must supply a target sequence (or sequences).'
raise RuntimeError(err)
if target:
targets = [target, ]
if type(targets) not in (list, tuple):
err = 'ERROR: ::targets:: requires an iterable (list or tuple).'
err += 'For a single sequence, use ::target::'
raise RuntimeError(err)
alignments = []
for t in targets:
alignment = NWAlignment(query=query,
target=t,
match=match,
mismatch=mismatch,
gap_open=gap_open,
gap_extend=gap_extend,
score_match=score_match,
score_mismatch=score_mismatch,
score_gap_open=score_gap_open,
score_gap_extend=score_gap_extend,
matrix=matrix,
aa=aa)
alignments.append(alignment)
if target is not None:
return alignments[0]
return alignments | Needleman-Wunch global pairwise alignment.
With ``global_alignment``, you can score an alignment using different
paramaters than were used to compute the alignment. This allows you to
compute pure identity scores (match=1, mismatch=0) on pairs of sequences
for which those alignment parameters would be unsuitable. For example::
seq1 = 'ATGCAGC'
seq2 = 'ATCAAGC'
using identity scoring params (match=1, all penalties are 0) for both alignment
and scoring produces the following alignment::
ATGCA-GC
|| || ||
AT-CAAGC
with an alignment score of 6 and an alignment length of 8 (identity = 75%). But
what if we want to calculate the identity of a gapless alignment? Using::
global_alignment(seq1, seq2,
gap_open=20,
score_match=1,
score_mismatch=0,
score_gap_open=10,
score_gap_extend=1)
we get the following alignment::
ATGCAGC
|| |||
ATCAAGC
which has an score of 5 and an alignment length of 7 (identity = 71%). Obviously,
this is an overly simple example (it would be much easier to force gapless alignment
by just iterating over each sequence and counting the matches), but there are several
real-life cases in which different alignment and scoring paramaters are desirable.
Args:
query: Query sequence. ``query`` can be one of four things:
1. a nucleotide or amino acid sequence, as a string
2. a Biopython ``SeqRecord`` object
3. an AbTools ``Sequence`` object
4. a list/tuple of the format ``[seq_id, sequence]``
target: A single target sequence. ``target`` can be anything that
``query`` accepts.
targets (list): A list of target sequences, to be proccssed iteratively.
Each element in the ``targets`` list can be anything accepted by
``query``.
match (int): Match score for alignment. Should be a positive integer. Default is 3.
mismatch (int): Mismatch score for alignment. Should be a negative integer. Default is -2.
gap_open (int): Penalty for opening gaps in alignment. Should be a negative integer.
Default is -5.
gap_extend (int): Penalty for extending gaps in alignment. Should be a negative
integer. Default is -2.
score_match (int): Match score for scoring the alignment. Should be a positive integer.
Default is to use the score from ``match`` or ``matrix``, whichever is provided.
score_mismatch (int): Mismatch score for scoring the alignment. Should be a negative
integer. Default is to use the score from ``mismatch`` or ``matrix``, whichever
is provided.
score_gap_open (int): Gap open penalty for scoring the alignment. Should be a negative
integer. Default is to use ``gap_open``.
score_gap_extend (int): Gap extend penalty for scoring the alignment. Should be a negative
integer. Default is to use ``gap_extend``.
matrix (str, dict): Alignment scoring matrix. Two options for passing the alignment matrix:
- The name of a built-in matrix. Current options are ``blosum62`` and ``pam250``.
- A nested dictionary, giving an alignment score for each residue pair. Should be
formatted such that retrieving the alignment score for A and G is accomplished by::
matrix['A']['G']
aa (bool): Must be set to ``True`` if aligning amino acid sequences. Default
is ``False``.
Returns:
If a single target sequence is provided (via ``target``), a single ``NWAlignment``
object will be returned. If multiple target sequences are supplied (via ``targets``),
a list of ``NWAlignment`` objects will be returned. | entailment |
def dot_alignment(sequences, seq_field=None, name_field=None, root=None, root_name=None,
cluster_threshold=0.75, as_fasta=False, just_alignment=False):
'''
Creates a dot alignment (dots indicate identity, mismatches are represented by the mismatched
residue) for a list of sequences.
Args:
sequence (list(Sequence)): A list of Sequence objects to be aligned.
seq_field (str): Name of the sequence field key. Default is ``vdj_nt``.
name_field (str): Name of the name field key. Default is ``seq_id``.
root (str, Sequence): The sequence used to 'root' the alignment. This sequence will be at the
top of the alignment and is the sequence against which dots (identity) will be evaluated.
Can be provided either as a string corresponding to the name of one of the sequences in
``sequences`` or as a Sequence object. If not provided, ``sequences`` will be clustered
at ``cluster_threshold`` and the centroid of the largest cluster will be used.
root_name (str): Name of the root sequence. If not provided, the existing name of the root
sequence (``name_field``) will be used. If ``root`` is not provided, the default ``root_name``
is ``'centroid'``.
cluster_threshold (float): Threshold with which to cluster sequences if ``root`` is not provided.
Default is ``0.75``.
as_fasta (bool): If ``True``, returns the dot alignment as a FASTA-formatted string, rather than
a string formatted for human readability.
just_alignment (bool): If ``True``, returns just the dot-aligned sequences as a list.
Returns:
If ``just_alignment`` is ``True``, a list of dot-aligned sequences (without sequence names) will be returned.
If ``as_fasta`` is ``True``, a string containing the dot-aligned sequences in FASTA format will be returned.
Otherwise, a formatted string containing the aligned sequences (with sequence names) will be returned.
'''
import abstar
from .cluster import cluster
sequences = deepcopy(sequences)
root = copy(root)
# if custom seq_field is specified, copy to the .alignment_sequence attribute
if seq_field is not None:
if not all([seq_field in list(s.annotations.keys()) for s in sequences]):
print('\nERROR: {} is not present in all of the supplied sequences.\n'.format(seq_field))
sys.exit(1)
for s in sequences:
s.alignment_sequence = s[seq_field]
else:
for s in sequences:
s.alignment_sequence = s.sequence
# if custom name_field is specified, copy to the .id attribute
if name_field is not None:
if not all([name_field in list(s.annotations.keys()) for s in sequences]):
print('\nERROR: {} is not present in all of the supplied sequences.\n'.format(name_field))
sys.exit(1)
for s in sequences:
s.alignment_id = s[name_field]
else:
for s in sequences:
s.alignment_id = s.id
# parse the root sequence
if all([root is None, root_name is None]):
clusters = cluster(sequences, threshold=cluster_threshold, quiet=True)
clusters = sorted(clusters, key=lambda x: x.size, reverse=True)
centroid = clusters[0].centroid
root = abstar.run(('centroid', centroid.sequence))
root.alignment_id = 'centroid'
root.alignment_sequence = root[seq_field]
elif type(root) in STR_TYPES:
root = [s for s in sequences if s.alignment_id == root][0]
if not root:
print('\nERROR: The name of the root sequence ({}) was not found in the list of input sequences.'.format(root))
print('\n')
sys.exit(1)
sequences = [s for s in sequences if s.alignment_id != root.alignment_id]
elif type(root) == Sequence:
if seq_field is not None:
if seq_field not in list(root.anotations.keys()):
print('\nERROR: {} is not present in the supplied root sequence.\n'.format(seq_field))
sys.exit(1)
root.alignment_sequence = root[seq_field]
if name_field is not None:
if name_field not in list(root.anotations.keys()):
print('\nERROR: {} is not present in the supplied root sequence.\n'.format(name_field))
sys.exit(1)
root.alignment_id = root[name_field]
sequences = [s for s in sequences if s.alignment_id != root.alignment_id]
else:
print('\nERROR: If root is provided, it must be the name of a sequence \
found in the supplied list of sequences or it must be a Sequence object.')
print('\n')
sys.exit(1)
if root_name is not None:
root.alignment_id = root_name
else:
root_name = root.alignment_id
# compute and parse the alignment
seqs = [(root.alignment_id, root.alignment_sequence)]
seqs += [(s.alignment_id, s.alignment_sequence) for s in sequences]
aln = muscle(seqs)
g_aln = [a for a in aln if a.id == root_name][0]
dots = [(root_name, str(g_aln.seq)), ]
for seq in [a for a in aln if a.id != root_name]:
s_aln = ''
for g, q in zip(str(g_aln.seq), str(seq.seq)):
if g == q == '-':
s_aln += '-'
elif g == q:
s_aln += '.'
else:
s_aln += q
dots.append((seq.id, s_aln))
if just_alignment:
return [d[1] for d in dots]
name_len = max([len(d[0]) for d in dots]) + 2
dot_aln = []
for d in dots:
if as_fasta:
dot_aln.append('>{}\n{}'.format(d[0], d[1]))
else:
spaces = name_len - len(d[0])
dot_aln.append(d[0] + ' ' * spaces + d[1])
return '\n'.join(dot_aln) | Creates a dot alignment (dots indicate identity, mismatches are represented by the mismatched
residue) for a list of sequences.
Args:
sequence (list(Sequence)): A list of Sequence objects to be aligned.
seq_field (str): Name of the sequence field key. Default is ``vdj_nt``.
name_field (str): Name of the name field key. Default is ``seq_id``.
root (str, Sequence): The sequence used to 'root' the alignment. This sequence will be at the
top of the alignment and is the sequence against which dots (identity) will be evaluated.
Can be provided either as a string corresponding to the name of one of the sequences in
``sequences`` or as a Sequence object. If not provided, ``sequences`` will be clustered
at ``cluster_threshold`` and the centroid of the largest cluster will be used.
root_name (str): Name of the root sequence. If not provided, the existing name of the root
sequence (``name_field``) will be used. If ``root`` is not provided, the default ``root_name``
is ``'centroid'``.
cluster_threshold (float): Threshold with which to cluster sequences if ``root`` is not provided.
Default is ``0.75``.
as_fasta (bool): If ``True``, returns the dot alignment as a FASTA-formatted string, rather than
a string formatted for human readability.
just_alignment (bool): If ``True``, returns just the dot-aligned sequences as a list.
Returns:
If ``just_alignment`` is ``True``, a list of dot-aligned sequences (without sequence names) will be returned.
If ``as_fasta`` is ``True``, a string containing the dot-aligned sequences in FASTA format will be returned.
Otherwise, a formatted string containing the aligned sequences (with sequence names) will be returned. | entailment |
def fetch_class(full_class_name):
"""Fetches the given class.
:param string full_class_name: Name of the class to be fetched.
"""
(module_name, class_name) = full_class_name.rsplit('.', 1)
module = importlib.import_module(module_name)
return getattr(module, class_name) | Fetches the given class.
:param string full_class_name: Name of the class to be fetched. | entailment |
def has_chosen(state, correct, msgs):
"""Verify exercises of the type MultipleChoiceExercise
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
correct: index of correct option, where 1 is the first option.
msgs : list of feedback messages corresponding to each option.
:Example:
The following SCT is for a multiple choice exercise with 2 options, the first
of which is correct.::
Ex().has_chosen(1, ['Correct!', 'Incorrect. Try again!'])
"""
ctxt = {}
exec(state.student_code, globals(), ctxt)
sel_indx = ctxt["selected_option"]
if sel_indx != correct:
state.report(Feedback(msgs[sel_indx - 1]))
else:
state.reporter.success_msg = msgs[correct - 1]
return state | Verify exercises of the type MultipleChoiceExercise
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
correct: index of correct option, where 1 is the first option.
msgs : list of feedback messages corresponding to each option.
:Example:
The following SCT is for a multiple choice exercise with 2 options, the first
of which is correct.::
Ex().has_chosen(1, ['Correct!', 'Incorrect. Try again!']) | entailment |
def multi(state, *tests):
"""Run multiple subtests. Return original state (for chaining).
This function could be thought as an AND statement, since all tests it runs must pass
Args:
state: State instance describing student and solution code, can be omitted if used with Ex()
tests: one or more sub-SCTs to run.
:Example:
The SCT below checks two has_code cases.. ::
Ex().multi(has_code('SELECT'), has_code('WHERE'))
The SCT below uses ``multi`` to 'branch out' to check that
the SELECT statement has both a WHERE and LIMIT clause.. ::
Ex().check_node('SelectStmt', 0).multi(
check_edge('where_clause'),
check_edge('limit_clause')
)
"""
for test in iter_tests(tests):
# assume test is function needing a state argument
# partial state so reporter can test
state.do_test(partial(test, state))
# return original state, so can be chained
return state | Run multiple subtests. Return original state (for chaining).
This function could be thought as an AND statement, since all tests it runs must pass
Args:
state: State instance describing student and solution code, can be omitted if used with Ex()
tests: one or more sub-SCTs to run.
:Example:
The SCT below checks two has_code cases.. ::
Ex().multi(has_code('SELECT'), has_code('WHERE'))
The SCT below uses ``multi`` to 'branch out' to check that
the SELECT statement has both a WHERE and LIMIT clause.. ::
Ex().check_node('SelectStmt', 0).multi(
check_edge('where_clause'),
check_edge('limit_clause')
) | entailment |
def check_not(state, *tests, msg):
"""Run multiple subtests that should fail. If all subtests fail, returns original state (for chaining)
- This function is currently only tested in working with ``has_code()`` in the subtests.
- This function can be thought as a ``NOT(x OR y OR ...)`` statement, since all tests it runs must fail
- This function can be considered a direct counterpart of multi.
Args:
state: State instance describing student and solution code, can be omitted if used with Ex()
*tests: one or more sub-SCTs to run
msg: feedback message that is shown in case not all tests specified in ``*tests`` fail.
:Example:
Thh SCT below runs two has_code cases.. ::
Ex().check_not(
has_code('INNER'),
has_code('OUTER'),
incorrect_msg="Don't use `INNER` or `OUTER`!"
)
If students use ``INNER (JOIN)`` or ``OUTER (JOIN)`` in their code, this test will fail.
"""
for test in iter_tests(tests):
try:
test(state)
except TestFail:
# it fails, as expected, off to next one
continue
return state.report(Feedback(msg))
# return original state, so can be chained
return state | Run multiple subtests that should fail. If all subtests fail, returns original state (for chaining)
- This function is currently only tested in working with ``has_code()`` in the subtests.
- This function can be thought as a ``NOT(x OR y OR ...)`` statement, since all tests it runs must fail
- This function can be considered a direct counterpart of multi.
Args:
state: State instance describing student and solution code, can be omitted if used with Ex()
*tests: one or more sub-SCTs to run
msg: feedback message that is shown in case not all tests specified in ``*tests`` fail.
:Example:
Thh SCT below runs two has_code cases.. ::
Ex().check_not(
has_code('INNER'),
has_code('OUTER'),
incorrect_msg="Don't use `INNER` or `OUTER`!"
)
If students use ``INNER (JOIN)`` or ``OUTER (JOIN)`` in their code, this test will fail. | entailment |
def check_or(state, *tests):
"""Test whether at least one SCT passes.
If all of the tests fail, the feedback of the first test will be presented to the student.
Args:
state: State instance describing student and solution code, can be omitted if used with Ex()
tests: one or more sub-SCTs to run
:Example:
The SCT below tests that the student typed either 'SELECT' or 'WHERE' (or both).. ::
Ex().check_or(
has_code('SELECT'),
has_code('WHERE')
)
The SCT below checks that a SELECT statement has at least a WHERE c or LIMIT clause.. ::
Ex().check_node('SelectStmt', 0).check_or(
check_edge('where_clause'),
check_edge('limit_clause')
)
"""
success = False
first_feedback = None
for test in iter_tests(tests):
try:
multi(state, test)
success = True
except TestFail as e:
if not first_feedback:
first_feedback = e.feedback
if success:
return state # todo: add test
state.report(first_feedback) | Test whether at least one SCT passes.
If all of the tests fail, the feedback of the first test will be presented to the student.
Args:
state: State instance describing student and solution code, can be omitted if used with Ex()
tests: one or more sub-SCTs to run
:Example:
The SCT below tests that the student typed either 'SELECT' or 'WHERE' (or both).. ::
Ex().check_or(
has_code('SELECT'),
has_code('WHERE')
)
The SCT below checks that a SELECT statement has at least a WHERE c or LIMIT clause.. ::
Ex().check_node('SelectStmt', 0).check_or(
check_edge('where_clause'),
check_edge('limit_clause')
) | entailment |
def check_correct(state, check, diagnose):
"""Allows feedback from a diagnostic SCT, only if a check SCT fails.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
check: An sct chain that must succeed.
diagnose: An sct chain to run if the check fails.
:Example:
The SCT below tests whether students query result is correct, before running diagnostic SCTs.. ::
Ex().check_correct(
check_result(),
check_node('SelectStmt')
)
"""
feedback = None
try:
multi(state, check)
except TestFail as e:
feedback = e.feedback
# todo: let if from except wrap try-except
# only once teach uses force_diagnose
try:
multi(state, diagnose)
except TestFail as e:
if feedback is not None or state.force_diagnose:
feedback = e.feedback
if feedback is not None:
state.report(feedback)
return state | Allows feedback from a diagnostic SCT, only if a check SCT fails.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
check: An sct chain that must succeed.
diagnose: An sct chain to run if the check fails.
:Example:
The SCT below tests whether students query result is correct, before running diagnostic SCTs.. ::
Ex().check_correct(
check_result(),
check_node('SelectStmt')
) | entailment |
def fail(state, msg="fail"):
"""Always fails the SCT, with an optional msg.
This function takes a single argument, ``msg``, that is the feedback given to the student.
Note that this would be a terrible idea for grading submissions, but may be useful while writing SCTs.
For example, failing a test will highlight the code as if the previous test/check had failed.
"""
_msg = state.build_message(msg)
state.report(Feedback(_msg, state))
return state | Always fails the SCT, with an optional msg.
This function takes a single argument, ``msg``, that is the feedback given to the student.
Note that this would be a terrible idea for grading submissions, but may be useful while writing SCTs.
For example, failing a test will highlight the code as if the previous test/check had failed. | entailment |
def required_attributes(element, *attributes):
"""Check element for required attributes. Raise ``NotValidXmlException`` on error.
:param element: ElementTree element
:param attributes: list of attributes names to check
:raises NotValidXmlException: if some argument is missing
"""
if not reduce(lambda still_valid, param: still_valid and param in element.attrib, attributes, True):
raise NotValidXmlException(msg_err_missing_attributes(element.tag, *attributes)) | Check element for required attributes. Raise ``NotValidXmlException`` on error.
:param element: ElementTree element
:param attributes: list of attributes names to check
:raises NotValidXmlException: if some argument is missing | entailment |
def required_elements(element, *children):
"""Check element (``xml.etree.ElementTree.Element``) for required children, defined as XPath. Raise
``NotValidXmlException`` on error.
:param element: ElementTree element
:param children: list of XPaths to check
:raises NotValidXmlException: if some child is missing
"""
for child in children:
if element.find(child) is None:
raise NotValidXmlException(msg_err_missing_children(element.tag, *children)) | Check element (``xml.etree.ElementTree.Element``) for required children, defined as XPath. Raise
``NotValidXmlException`` on error.
:param element: ElementTree element
:param children: list of XPaths to check
:raises NotValidXmlException: if some child is missing | entailment |
def required_items(element, children, attributes):
"""Check an xml element to include given attributes and children.
:param element: ElementTree element
:param children: list of XPaths to check
:param attributes: list of attributes names to check
:raises NotValidXmlException: if some argument is missing
:raises NotValidXmlException: if some child is missing
"""
required_elements(element, *children)
required_attributes(element, *attributes) | Check an xml element to include given attributes and children.
:param element: ElementTree element
:param children: list of XPaths to check
:param attributes: list of attributes names to check
:raises NotValidXmlException: if some argument is missing
:raises NotValidXmlException: if some child is missing | entailment |
def attrib_to_dict(element, *args, **kwargs):
"""For an ElementTree ``element`` extract specified attributes. If an attribute does not exists, its value will be
``None``.
attrib_to_dict(element, 'attr_a', 'attr_b') -> {'attr_a': 'value', 'attr_a': 'value'}
Mapping between xml attributes and dictionary keys is done with kwargs.
attrib_to_dict(element, my_new_name = 'xml_atribute_name', ..)
"""
if len(args) > 0:
return {key: element.get(key) for key in args}
if len(kwargs) > 0:
return {new_key: element.get(old_key) for new_key, old_key in viewitems(kwargs)}
return element.attrib | For an ElementTree ``element`` extract specified attributes. If an attribute does not exists, its value will be
``None``.
attrib_to_dict(element, 'attr_a', 'attr_b') -> {'attr_a': 'value', 'attr_a': 'value'}
Mapping between xml attributes and dictionary keys is done with kwargs.
attrib_to_dict(element, my_new_name = 'xml_atribute_name', ..) | entailment |
def get_xml_root(xml_path):
"""Load and parse an xml by given xml_path and return its root.
:param xml_path: URL to a xml file
:type xml_path: str
:return: xml root
"""
r = requests.get(xml_path)
root = ET.fromstring(r.content)
return root | Load and parse an xml by given xml_path and return its root.
:param xml_path: URL to a xml file
:type xml_path: str
:return: xml root | entailment |
def element_to_int(element, attribute=None):
"""Convert ``element`` object to int. If attribute is not given, convert ``element.text``.
:param element: ElementTree element
:param attribute: attribute name
:type attribute: str
:returns: integer
:rtype: int
"""
if attribute is not None:
return int(element.get(attribute))
else:
return int(element.text) | Convert ``element`` object to int. If attribute is not given, convert ``element.text``.
:param element: ElementTree element
:param attribute: attribute name
:type attribute: str
:returns: integer
:rtype: int | entailment |
def create_el(name, text=None, attrib=None):
"""Create element with given attributes and set element.text property to given
text value (if text is not None)
:param name: element name
:type name: str
:param text: text node value
:type text: str
:param attrib: attributes
:type attrib: dict
:returns: xml element
:rtype: Element
"""
if attrib is None:
attrib = {}
el = ET.Element(name, attrib)
if text is not None:
el.text = text
return el | Create element with given attributes and set element.text property to given
text value (if text is not None)
:param name: element name
:type name: str
:param text: text node value
:type text: str
:param attrib: attributes
:type attrib: dict
:returns: xml element
:rtype: Element | entailment |
def modules(cls):
"""Collect all the public class attributes.
All class attributes should be a DI modules, this method collects them
and returns as a list.
:return: list of DI modules
:rtype: list[Union[Module, Callable]]
"""
members = inspect.getmembers(cls, lambda a: not (inspect.isroutine(a) and a.__name__ == 'modules'))
modules = [module for name, module in members if not name.startswith('_')]
return modules | Collect all the public class attributes.
All class attributes should be a DI modules, this method collects them
and returns as a list.
:return: list of DI modules
:rtype: list[Union[Module, Callable]] | entailment |
def get_pairs(db, collection, experiment=None, subject=None, group=None, name='seq_id',
delim=None, delim_occurance=1, pairs_only=False, h_selection_func=None, l_selection_func=None):
'''
Gets sequences and assigns them to the appropriate mAb pair, based on the sequence name.
Inputs:
::db:: is a pymongo database connection object
::collection:: is the collection name, as a string
If ::subject:: is provided, only sequences with a 'subject' field matching ::subject:: will
be included. ::subject:: can be either a single subject (as a string) or an iterable
(list or tuple) of subject strings.
If ::group:: is provided, only sequences with a 'group' field matching ::group:: will
be included. ::group:: can be either a single group (as a string) or an iterable
(list or tuple) of group strings.
::name:: is the dict key of the field to be used to group the sequences into pairs.
Default is 'seq_id'
::delim:: is an optional delimiter used to truncate the contents of the ::name:: field.
Default is None, which results in no name truncation.
::delim_occurance:: is the occurance of the delimiter at which to trim. Trimming is performed
as delim.join(name.split(delim)[:delim_occurance]), so setting delim_occurance to -1 will
trucate after the last occurance of delim. Default is 1.
::pairs_only:: setting to True results in only truly paired sequences (pair.is_pair == True)
will be returned. Default is False.
Returns a list of Pair objects, one for each mAb pair.
'''
match = {}
if subject is not None:
if type(subject) in (list, tuple):
match['subject'] = {'$in': subject}
elif type(subject) in STR_TYPES:
match['subject'] = subject
if group is not None:
if type(group) in (list, tuple):
match['group'] = {'$in': group}
elif type(group) in STR_TYPES:
match['group'] = group
if experiment is not None:
if type(experiment) in (list, tuple):
match['experiment'] = {'$in': experiment}
elif type(experiment) in STR_TYPES:
match['experiment'] = experiment
seqs = list(db[collection].find(match))
return assign_pairs(seqs, name=name, delim=delim,
delim_occurance=delim_occurance, pairs_only=pairs_only,
h_selection_func=h_selection_func, l_selection_func=l_selection_func) | Gets sequences and assigns them to the appropriate mAb pair, based on the sequence name.
Inputs:
::db:: is a pymongo database connection object
::collection:: is the collection name, as a string
If ::subject:: is provided, only sequences with a 'subject' field matching ::subject:: will
be included. ::subject:: can be either a single subject (as a string) or an iterable
(list or tuple) of subject strings.
If ::group:: is provided, only sequences with a 'group' field matching ::group:: will
be included. ::group:: can be either a single group (as a string) or an iterable
(list or tuple) of group strings.
::name:: is the dict key of the field to be used to group the sequences into pairs.
Default is 'seq_id'
::delim:: is an optional delimiter used to truncate the contents of the ::name:: field.
Default is None, which results in no name truncation.
::delim_occurance:: is the occurance of the delimiter at which to trim. Trimming is performed
as delim.join(name.split(delim)[:delim_occurance]), so setting delim_occurance to -1 will
trucate after the last occurance of delim. Default is 1.
::pairs_only:: setting to True results in only truly paired sequences (pair.is_pair == True)
will be returned. Default is False.
Returns a list of Pair objects, one for each mAb pair. | entailment |
def assign_pairs(seqs, name='seq_id', delim=None, delim_occurance=1, pairs_only=False,
h_selection_func=None, l_selection_func=None):
'''
Assigns sequences to the appropriate mAb pair, based on the sequence name.
Inputs:
::seqs:: is a list of dicts, of the format returned by querying a MongoDB containing
Abstar output.
::name:: is the dict key of the field to be used to group the sequences into pairs.
Default is 'seq_id'
::delim:: is an optional delimiter used to truncate the contents of the ::name:: field.
Default is None, which results in no name truncation.
::delim_occurance:: is the occurance of the delimiter at which to trim. Trimming is performed
as delim.join(name.split(delim)[:delim_occurance]), so setting delim_occurance to -1 will
trucate after the last occurance of delim. Default is 1.
::pairs_only:: setting to True results in only truly paired sequences (pair.is_pair == True)
will be returned. Default is False.
Returns a list of Pair objects, one for each mAb pair.
'''
pdict = {}
for s in seqs:
if delim is not None:
pname = delim.join(s[name].split(delim)[:delim_occurance])
else:
pname = s[name]
if pname not in pdict:
pdict[pname] = [s, ]
else:
pdict[pname].append(s)
pairs = [Pair(pdict[n], name=n,
h_selection_func=h_selection_func,
l_selection_func=l_selection_func) for n in pdict.keys()]
if pairs_only:
pairs = [p for p in pairs if p.is_pair]
return pairs | Assigns sequences to the appropriate mAb pair, based on the sequence name.
Inputs:
::seqs:: is a list of dicts, of the format returned by querying a MongoDB containing
Abstar output.
::name:: is the dict key of the field to be used to group the sequences into pairs.
Default is 'seq_id'
::delim:: is an optional delimiter used to truncate the contents of the ::name:: field.
Default is None, which results in no name truncation.
::delim_occurance:: is the occurance of the delimiter at which to trim. Trimming is performed
as delim.join(name.split(delim)[:delim_occurance]), so setting delim_occurance to -1 will
trucate after the last occurance of delim. Default is 1.
::pairs_only:: setting to True results in only truly paired sequences (pair.is_pair == True)
will be returned. Default is False.
Returns a list of Pair objects, one for each mAb pair. | entailment |
def deduplicate(pairs, aa=False, ignore_primer_regions=False):
'''
Removes duplicate sequences from a list of Pair objects.
If a Pair has heavy and light chains, both chains must identically match heavy and light chains
from another Pair to be considered a duplicate. If a Pair has only a single chain,
identical matches to that chain will cause the single chain Pair to be considered a duplicate,
even if the comparison Pair has both chains.
Note that identical sequences are identified by simple string comparison, so sequences of
different length that are identical over the entirety of the shorter sequence are not
considered duplicates.
By default, comparison is made on the nucleotide sequence. To use the amino acid sequence instead,
set aa=True.
'''
nr_pairs = []
just_pairs = [p for p in pairs if p.is_pair]
single_chains = [p for p in pairs if not p.is_pair]
_pairs = just_pairs + single_chains
for p in _pairs:
duplicates = []
for nr in nr_pairs:
identical = True
vdj = 'vdj_aa' if aa else 'vdj_nt'
offset = 4 if aa else 12
if p.heavy is not None:
if nr.heavy is None:
identical = False
else:
heavy = p.heavy[vdj][offset:-offset] if ignore_primer_regions else p.heavy[vdj]
nr_heavy = nr.heavy[vdj][offset:-offset] if ignore_primer_regions else nr.heavy[vdj]
if heavy != nr_heavy:
identical = False
if p.light is not None:
if nr.light is None:
identical = False
else:
light = p.light[vdj][offset:-offset] if ignore_primer_regions else p.light[vdj]
nr_light = nr.light[vdj][offset:-offset] if ignore_primer_regions else nr.light[vdj]
if light != nr_light:
identical = False
duplicates.append(identical)
if any(duplicates):
continue
else:
nr_pairs.append(p)
return nr_pairs | Removes duplicate sequences from a list of Pair objects.
If a Pair has heavy and light chains, both chains must identically match heavy and light chains
from another Pair to be considered a duplicate. If a Pair has only a single chain,
identical matches to that chain will cause the single chain Pair to be considered a duplicate,
even if the comparison Pair has both chains.
Note that identical sequences are identified by simple string comparison, so sequences of
different length that are identical over the entirety of the shorter sequence are not
considered duplicates.
By default, comparison is made on the nucleotide sequence. To use the amino acid sequence instead,
set aa=True. | entailment |
def _refine_v(seq, species):
'''
Completes the 5' end of a a truncated sequence with germline nucleotides.
Input is a MongoDB dict (seq) and the species.
'''
vgerm = germlines.get_germline(seq['v_gene']['full'], species)
aln = global_alignment(seq['vdj_nt'], vgerm)
prepend = ''
for s, g in zip(aln.aligned_query, aln.aligned_target):
if s != '-':
break
else:
prepend += g
seq['vdj_nt'] = prepend + seq['vdj_nt'] | Completes the 5' end of a a truncated sequence with germline nucleotides.
Input is a MongoDB dict (seq) and the species. | entailment |
def _refine_j(seq, species):
'''
Completes the 3' end of a a truncated sequence with germline nucleotides.
Input is a MongoDB dict (seq) and the species.
'''
jgerm = germlines.get_germline(seq['j_gene']['full'], species)
aln = global_alignment(seq['vdj_nt'], jgerm)
append = ''
for s, g in zip(aln.aligned_query[::-1], aln.aligned_target[::-1]):
if s != '-':
break
else:
append += g
seq['vdj_nt'] = seq['vdj_nt'] + append[::-1] | Completes the 3' end of a a truncated sequence with germline nucleotides.
Input is a MongoDB dict (seq) and the species. | entailment |
def _retranslate(seq):
'''
Retranslates a nucleotide sequence following refinement.
Input is a Pair sequence (basically a dict of MongoDB output).
'''
if len(seq['vdj_nt']) % 3 != 0:
trunc = len(seq['vdj_nt']) % 3
seq['vdj_nt'] = seq['vdj_nt'][:-trunc]
seq['vdj_aa'] = Seq(seq['vdj_nt'], generic_dna).translate() | Retranslates a nucleotide sequence following refinement.
Input is a Pair sequence (basically a dict of MongoDB output). | entailment |
def fasta(self, key='vdj_nt', append_chain=True):
'''
Returns the sequence pair as a fasta string. If the Pair object contains
both heavy and light chain sequences, both will be returned as a single string.
By default, the fasta string contains the 'vdj_nt' sequence for each chain. To change,
use the <key> option to select an alternate sequence.
By default, the chain (heavy or light) will be appended to the sequence name:
>MySequence_heavy
To just use the pair name (which will result in duplicate sequence names for Pair objects
with both heavy and light chains), set <append_chain> to False.
'''
fastas = []
for s, chain in [(self.heavy, 'heavy'), (self.light, 'light')]:
if s is not None:
c = '_{}'.format(chain) if append_chain else ''
fastas.append('>{}{}\n{}'.format(s['seq_id'], c, s[key]))
return '\n'.join(fastas) | Returns the sequence pair as a fasta string. If the Pair object contains
both heavy and light chain sequences, both will be returned as a single string.
By default, the fasta string contains the 'vdj_nt' sequence for each chain. To change,
use the <key> option to select an alternate sequence.
By default, the chain (heavy or light) will be appended to the sequence name:
>MySequence_heavy
To just use the pair name (which will result in duplicate sequence names for Pair objects
with both heavy and light chains), set <append_chain> to False. | entailment |
def cmap_from_color(color, dark=False):
'''
Generates a matplotlib colormap from a single color.
Colormap will be built, by default, from white to ``color``.
Args:
color: Can be one of several things:
1. Hex code
2. HTML color name
3. RGB tuple
dark (bool): If ``True``, colormap will be built from ``color`` to
black. Default is ``False``, which builds a colormap from
white to ``color``.
Returns:
colormap: A matplotlib colormap
'''
if dark:
return sns.dark_palette(color, as_cmap=True)
else:
return sns.light_palette(color, as_cmap=True) | Generates a matplotlib colormap from a single color.
Colormap will be built, by default, from white to ``color``.
Args:
color: Can be one of several things:
1. Hex code
2. HTML color name
3. RGB tuple
dark (bool): If ``True``, colormap will be built from ``color`` to
black. Default is ``False``, which builds a colormap from
white to ``color``.
Returns:
colormap: A matplotlib colormap | entailment |
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=256):
"""
Truncates a colormap, such that the new colormap consists of
``cmap[minval:maxval]``.
If maxval is larger than minval, the truncated colormap will be reversed.
Args:
cmap (colormap): Colormap to be truncated
minval (float): Lower bound. Should be a float betwee 0 and 1.
maxval (float): Upper bound. Should be a float between 0 and 1
n (int): Number of colormap steps. Default is ``256``.
Returns:
colormap: A matplotlib colormap
http://stackoverflow.com/questions/18926031/how-to-extract-a-subset-of-a-colormap-as-a-new-colormap-in-matplotlib
"""
cmap = get_cmap(cmap)
name = "%s-trunc-%.2g-%.2g" % (cmap.name, minval, maxval)
return colors.LinearSegmentedColormap.from_list(
name, cmap(np.linspace(minval, maxval, n))) | Truncates a colormap, such that the new colormap consists of
``cmap[minval:maxval]``.
If maxval is larger than minval, the truncated colormap will be reversed.
Args:
cmap (colormap): Colormap to be truncated
minval (float): Lower bound. Should be a float betwee 0 and 1.
maxval (float): Upper bound. Should be a float between 0 and 1
n (int): Number of colormap steps. Default is ``256``.
Returns:
colormap: A matplotlib colormap
http://stackoverflow.com/questions/18926031/how-to-extract-a-subset-of-a-colormap-as-a-new-colormap-in-matplotlib | entailment |
def stack_colormap(lower, upper, n=256):
"""
Stacks two colormaps (``lower`` and ``upper``) such that
low half -> ``lower`` colors, high half -> ``upper`` colors
Args:
lower (colormap): colormap for the lower half of the stacked colormap.
upper (colormap): colormap for the upper half of the stacked colormap.
n (int): Number of colormap steps. Default is ``256``.
"""
A = get_cmap(lower)
B = get_cmap(upper)
name = "%s-%s" % (A.name, B.name)
lin = np.linspace(0, 1, n)
return array_cmap(np.vstack((A(lin), B(lin))), name, n=n) | Stacks two colormaps (``lower`` and ``upper``) such that
low half -> ``lower`` colors, high half -> ``upper`` colors
Args:
lower (colormap): colormap for the lower half of the stacked colormap.
upper (colormap): colormap for the upper half of the stacked colormap.
n (int): Number of colormap steps. Default is ``256``. | entailment |
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used."""
@functools.wraps(func)
def decorated(*args, **kwargs):
warnings.warn_explicit(
"Call to deprecated function {}.".format(func.__name__),
category=DeprecationWarning,
filename=func.__code__.co_filename,
lineno=func.__code__.co_firstlineno + 1
)
return func(*args, **kwargs)
return decorated | This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used. | entailment |
def nested_model(model, nested_fields):
"""
Return :class:`zsl.db.model.app_model import AppModel` with the nested
models attached. ``nested_fields`` can be a simple list as model
fields, or it can be a tree definition in dict with leafs as keys with
``None`` value
"""
# type: (ModelBase, Any)->Optional[AppModel]
if model is None:
return None
app_model = model.get_app_model()
is_dict = isinstance(nested_fields, dict)
for field in nested_fields:
field = get_nested_field_name(field)
nested_nested = nested_fields.get(
field) if is_dict and nested_fields.get(field) else []
value = getattr(model, field, None)
# we can have also lists in field
nm_fn = nested_models if isinstance(value, list) else nested_model
setattr(app_model, field, nm_fn(value, nested_nested))
return app_model | Return :class:`zsl.db.model.app_model import AppModel` with the nested
models attached. ``nested_fields`` can be a simple list as model
fields, or it can be a tree definition in dict with leafs as keys with
``None`` value | entailment |
def check_node(
state,
name,
index=0,
missing_msg="Check the {ast_path}. Could not find the {index}{node_name}.",
priority=None,
):
"""Select a node from abstract syntax tree (AST), using its name and index position.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
name : the name of the abstract syntax tree node to find.
index: the position of that node (see below for details).
missing_msg: feedback message if node is not in student AST.
priority: the priority level of the node being searched for. This determines whether to
descend into other AST nodes during the search. Higher priority nodes descend
into lower priority. Currently, the only important part of priority is that
setting a very high priority (e.g. 99) will search every node.
:Example:
If both the student and solution code are.. ::
SELECT a FROM b; SELECT x FROM y;
then we can focus on the first select with::
# approach 1: with manually created State instance
state = State(*args, **kwargs)
new_state = check_node(state, 'SelectStmt', 0)
# approach 2: with Ex and chaining
new_state = Ex().check_node('SelectStmt', 0)
"""
df = partial(state.ast_dispatcher, name, priority=priority)
sol_stmt_list = df(state.solution_ast)
try:
sol_stmt = sol_stmt_list[index]
except IndexError:
raise IndexError("Can't get %s statement at index %s" % (name, index))
stu_stmt_list = df(state.student_ast)
try:
stu_stmt = stu_stmt_list[index]
except IndexError:
# use speaker on ast dialect module to get message, or fall back to generic
ast_path = state.get_ast_path() or "highlighted code"
_msg = state.ast_dispatcher.describe(
sol_stmt, missing_msg, index=index, ast_path=ast_path
)
if _msg is None:
_msg = MSG_CHECK_FALLBACK
state.report(Feedback(_msg))
action = {
"type": "check_node",
"kwargs": {"name": name, "index": index},
"node": stu_stmt,
}
return state.to_child(
student_ast=stu_stmt, solution_ast=sol_stmt, history=state.history + (action,)
) | Select a node from abstract syntax tree (AST), using its name and index position.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
name : the name of the abstract syntax tree node to find.
index: the position of that node (see below for details).
missing_msg: feedback message if node is not in student AST.
priority: the priority level of the node being searched for. This determines whether to
descend into other AST nodes during the search. Higher priority nodes descend
into lower priority. Currently, the only important part of priority is that
setting a very high priority (e.g. 99) will search every node.
:Example:
If both the student and solution code are.. ::
SELECT a FROM b; SELECT x FROM y;
then we can focus on the first select with::
# approach 1: with manually created State instance
state = State(*args, **kwargs)
new_state = check_node(state, 'SelectStmt', 0)
# approach 2: with Ex and chaining
new_state = Ex().check_node('SelectStmt', 0) | entailment |
def check_edge(
state,
name,
index=0,
missing_msg="Check the {ast_path}. Could not find the {index}{field_name}.",
):
"""Select an attribute from an abstract syntax tree (AST) node, using the attribute name.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
name: the name of the attribute to select from current AST node.
index: entry to get from a list field. If too few entires, will fail with missing_msg.
missing_msg: feedback message if attribute is not in student AST.
:Example:
If both the student and solution code are.. ::
SELECT a FROM b; SELECT x FROM y;
then we can get the from_clause using ::
# approach 1: with manually created State instance -----
state = State(*args, **kwargs)
select = check_node(state, 'SelectStmt', 0)
clause = check_edge(select, 'from_clause')
# approach 2: with Ex and chaining ---------------------
select = Ex().check_node('SelectStmt', 0) # get first select statement
clause = select.check_edge('from_clause', None) # get from_clause (a list)
clause2 = select.check_edge('from_clause', 0) # get first entry in from_clause
"""
try:
sol_attr = getattr(state.solution_ast, name)
if sol_attr and isinstance(sol_attr, list) and index is not None:
sol_attr = sol_attr[index]
except IndexError:
raise IndexError("Can't get %s attribute" % name)
# use speaker on ast dialect module to get message, or fall back to generic
ast_path = state.get_ast_path() or "highlighted code"
_msg = state.ast_dispatcher.describe(
state.student_ast, missing_msg, field=name, index=index, ast_path=ast_path
)
if _msg is None:
_msg = MSG_CHECK_FALLBACK
try:
stu_attr = getattr(state.student_ast, name)
if stu_attr and isinstance(stu_attr, list) and index is not None:
stu_attr = stu_attr[index]
except:
state.report(Feedback(_msg))
# fail if attribute exists, but is none only for student
if stu_attr is None and sol_attr is not None:
state.report(Feedback(_msg))
action = {"type": "check_edge", "kwargs": {"name": name, "index": index}}
return state.to_child(
student_ast=stu_attr, solution_ast=sol_attr, history=state.history + (action,)
) | Select an attribute from an abstract syntax tree (AST) node, using the attribute name.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
name: the name of the attribute to select from current AST node.
index: entry to get from a list field. If too few entires, will fail with missing_msg.
missing_msg: feedback message if attribute is not in student AST.
:Example:
If both the student and solution code are.. ::
SELECT a FROM b; SELECT x FROM y;
then we can get the from_clause using ::
# approach 1: with manually created State instance -----
state = State(*args, **kwargs)
select = check_node(state, 'SelectStmt', 0)
clause = check_edge(select, 'from_clause')
# approach 2: with Ex and chaining ---------------------
select = Ex().check_node('SelectStmt', 0) # get first select statement
clause = select.check_edge('from_clause', None) # get from_clause (a list)
clause2 = select.check_edge('from_clause', 0) # get first entry in from_clause | entailment |
def has_code(
state,
text,
incorrect_msg="Check the {ast_path}. The checker expected to find {text}.",
fixed=False,
):
"""Test whether the student code contains text.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
text : text that student code must contain. Can be a regex pattern or a simple string.
incorrect_msg: feedback message if text is not in student code.
fixed: whether to match text exactly, rather than using regular expressions.
Note:
Functions like ``check_node`` focus on certain parts of code.
Using these functions followed by ``has_code`` will only look
in the code being focused on.
:Example:
If the student code is.. ::
SELECT a FROM b WHERE id < 100
Then the first test below would (unfortunately) pass, but the second would fail..::
# contained in student code
Ex().has_code(text="id < 10")
# the $ means that you are matching the end of a line
Ex().has_code(text="id < 10$")
By setting ``fixed = True``, you can search for fixed strings::
# without fixed = True, '*' matches any character
Ex().has_code(text="SELECT * FROM b") # passes
Ex().has_code(text="SELECT \\\\* FROM b") # fails
Ex().has_code(text="SELECT * FROM b", fixed=True) # fails
You can check only the code corresponding to the WHERE clause, using ::
where = Ex().check_node('SelectStmt', 0).check_edge('where_clause')
where.has_code(text = "id < 10)
"""
stu_ast = state.student_ast
stu_code = state.student_code
# fallback on using complete student code if no ast
ParseError = state.ast_dispatcher.ParseError
def get_text(ast, code):
if isinstance(ast, ParseError):
return code
try:
return ast.get_text(code)
except:
return code
stu_text = get_text(stu_ast, stu_code)
_msg = incorrect_msg.format(
ast_path=state.get_ast_path() or "highlighted code", text=text
)
# either simple text matching or regex test
res = text in stu_text if fixed else re.search(text, stu_text)
if not res:
state.report(Feedback(_msg))
return state | Test whether the student code contains text.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
text : text that student code must contain. Can be a regex pattern or a simple string.
incorrect_msg: feedback message if text is not in student code.
fixed: whether to match text exactly, rather than using regular expressions.
Note:
Functions like ``check_node`` focus on certain parts of code.
Using these functions followed by ``has_code`` will only look
in the code being focused on.
:Example:
If the student code is.. ::
SELECT a FROM b WHERE id < 100
Then the first test below would (unfortunately) pass, but the second would fail..::
# contained in student code
Ex().has_code(text="id < 10")
# the $ means that you are matching the end of a line
Ex().has_code(text="id < 10$")
By setting ``fixed = True``, you can search for fixed strings::
# without fixed = True, '*' matches any character
Ex().has_code(text="SELECT * FROM b") # passes
Ex().has_code(text="SELECT \\\\* FROM b") # fails
Ex().has_code(text="SELECT * FROM b", fixed=True) # fails
You can check only the code corresponding to the WHERE clause, using ::
where = Ex().check_node('SelectStmt', 0).check_edge('where_clause')
where.has_code(text = "id < 10) | entailment |
def has_equal_ast(
state,
incorrect_msg="Check the {ast_path}. {extra}",
sql=None,
start=["expression", "subquery", "sql_script"][0],
exact=None,
):
"""Test whether the student and solution code have identical AST representations
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
incorrect_msg: feedback message if student and solution ASTs don't match
sql : optional code to use instead of the solution ast that is zoomed in on.
start: if ``sql`` arg is used, the parser rule to parse the sql code.
One of 'expression' (the default), 'subquery', or 'sql_script'.
exact: whether to require an exact match (True), or only that the
student AST contains the solution AST. If not specified, this
defaults to ``True`` if ``sql`` is not specified, and to ``False``
if ``sql`` is specified. You can always specify it manually.
:Example:
Example 1 - Suppose the solution code is ::
SELECT * FROM cities
and you want to verify whether the `FROM` part is correct: ::
Ex().check_node('SelectStmt').from_clause().has_equal_ast()
Example 2 - Suppose the solution code is ::
SELECT * FROM b WHERE id > 1 AND name = 'filip'
Then the following SCT makes sure ``id > 1`` was used somewhere in the WHERE clause.::
Ex().check_node('SelectStmt') \\/
.check_edge('where_clause') \\/
.has_equal_ast(sql = 'id > 1')
"""
ast = state.ast_dispatcher.ast_mod
sol_ast = state.solution_ast if sql is None else ast.parse(sql, start)
# if sql is set, exact defaults to False.
# if sql not set, exact defaults to True.
if exact is None:
exact = sql is None
stu_rep = repr(state.student_ast)
sol_rep = repr(sol_ast)
def get_str(ast, code, sql):
if sql:
return sql
if isinstance(ast, str):
return ast
try:
return ast.get_text(code)
except:
return None
sol_str = get_str(state.solution_ast, state.solution_code, sql)
_msg = incorrect_msg.format(
ast_path=state.get_ast_path() or "highlighted code",
extra="The checker expected to find `{}` in there.".format(sol_str)
if sol_str
else "Something is missing.",
)
if (exact and (sol_rep != stu_rep)) or (not exact and (sol_rep not in stu_rep)):
state.report(Feedback(_msg))
return state | Test whether the student and solution code have identical AST representations
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
incorrect_msg: feedback message if student and solution ASTs don't match
sql : optional code to use instead of the solution ast that is zoomed in on.
start: if ``sql`` arg is used, the parser rule to parse the sql code.
One of 'expression' (the default), 'subquery', or 'sql_script'.
exact: whether to require an exact match (True), or only that the
student AST contains the solution AST. If not specified, this
defaults to ``True`` if ``sql`` is not specified, and to ``False``
if ``sql`` is specified. You can always specify it manually.
:Example:
Example 1 - Suppose the solution code is ::
SELECT * FROM cities
and you want to verify whether the `FROM` part is correct: ::
Ex().check_node('SelectStmt').from_clause().has_equal_ast()
Example 2 - Suppose the solution code is ::
SELECT * FROM b WHERE id > 1 AND name = 'filip'
Then the following SCT makes sure ``id > 1`` was used somewhere in the WHERE clause.::
Ex().check_node('SelectStmt') \\/
.check_edge('where_clause') \\/
.has_equal_ast(sql = 'id > 1') | entailment |
def cache_model(key_params, timeout='default'):
"""
Caching decorator for app models in task.perform
"""
def decorator_fn(fn):
return CacheModelDecorator().decorate(key_params, timeout, fn)
return decorator_fn | Caching decorator for app models in task.perform | entailment |
def cache_page(key_params, timeout='default'):
"""
Cache a page (slice) of a list of AppModels
"""
def decorator_fn(fn):
d = CachePageDecorator()
return d.decorate(key_params, timeout, fn)
return decorator_fn | Cache a page (slice) of a list of AppModels | entailment |
def create_key_for_data(prefix, data, key_params):
"""
From ``data`` params in task create corresponding key with help of ``key_params`` (defined in decorator)
"""
d = data.get_data()
values = []
for k in key_params:
if k in d and type(d[k]) is list:
values.append("{0}:{1}".format(k, " -".join(d[k])))
else:
value = d[k] if k in d else ''
values.append("{0}:{1}".format(k, value))
return "{0}-{1}".format(prefix, "-".join(values)) | From ``data`` params in task create corresponding key with help of ``key_params`` (defined in decorator) | entailment |
def omnihash(obj):
""" recursively hash unhashable objects """
if isinstance(obj, set):
return hash(frozenset(omnihash(e) for e in obj))
elif isinstance(obj, (tuple, list)):
return hash(tuple(omnihash(e) for e in obj))
elif isinstance(obj, dict):
return hash(frozenset((k, omnihash(v)) for k, v in obj.items()))
else:
return hash(obj) | recursively hash unhashable objects | entailment |
def items_differ(jsonitems, dbitems, subfield_dict):
""" check whether or not jsonitems and dbitems differ """
# short circuit common cases
if len(jsonitems) == len(dbitems) == 0:
# both are empty
return False
elif len(jsonitems) != len(dbitems):
# if lengths differ, they're definitely different
return True
original_jsonitems = jsonitems
jsonitems = copy.deepcopy(jsonitems)
keys = jsonitems[0].keys()
# go over dbitems looking for matches
for dbitem in dbitems:
order = getattr(dbitem, 'order', None)
match = None
for i, jsonitem in enumerate(jsonitems):
# check if all keys (excluding subfields) match
for k in keys:
if k not in subfield_dict and getattr(dbitem, k) != jsonitem.get(k, None):
break
else:
# all fields match so far, possibly equal, just check subfields now
for k in subfield_dict:
jsonsubitems = jsonitem[k]
dbsubitems = list(getattr(dbitem, k).all())
if items_differ(jsonsubitems, dbsubitems, subfield_dict[k][2]):
break
else:
# if the dbitem sets 'order', then the order matters
if order is not None and int(order) != original_jsonitems.index(jsonitem):
break
# these items are equal, so let's mark it for removal
match = i
break
if match is not None:
# item exists in both, remove from jsonitems
jsonitems.pop(match)
else:
# exists in db but not json
return True
# if we get here, jsonitems has to be empty because we asserted that the length was
# the same and we found a match for each thing in dbitems, here's a safety check just in case
if jsonitems: # pragma: no cover
return True
return False | check whether or not jsonitems and dbitems differ | entailment |
def resolve_json_id(self, json_id, allow_no_match=False):
"""
Given an id found in scraped JSON, return a DB id for the object.
params:
json_id: id from json
allow_no_match: just return None if id can't be resolved
returns:
database id
raises:
ValueError if id couldn't be resolved
"""
if not json_id:
return None
if json_id.startswith('~'):
# keep caches of all the pseudo-ids to avoid doing 1000s of lookups during import
if json_id not in self.pseudo_id_cache:
spec = get_pseudo_id(json_id)
spec = self.limit_spec(spec)
if isinstance(spec, Q):
objects = self.model_class.objects.filter(spec)
else:
objects = self.model_class.objects.filter(**spec)
ids = {each.id for each in objects}
if len(ids) == 1:
self.pseudo_id_cache[json_id] = ids.pop()
errmsg = None
elif not ids:
errmsg = 'cannot resolve pseudo id to {}: {}'.format(
self.model_class.__name__, json_id)
else:
errmsg = 'multiple objects returned for {} pseudo id {}: {}'.format(
self.model_class.__name__, json_id, ids)
# either raise or log error
if errmsg:
if not allow_no_match:
raise UnresolvedIdError(errmsg)
else:
self.error(errmsg)
self.pseudo_id_cache[json_id] = None
# return the cached object
return self.pseudo_id_cache[json_id]
# get the id that the duplicate points to, or use self
json_id = self.duplicates.get(json_id, json_id)
try:
return self.json_to_db_id[json_id]
except KeyError:
raise UnresolvedIdError('cannot resolve id: {}'.format(json_id)) | Given an id found in scraped JSON, return a DB id for the object.
params:
json_id: id from json
allow_no_match: just return None if id can't be resolved
returns:
database id
raises:
ValueError if id couldn't be resolved | entailment |
def import_directory(self, datadir):
""" import a JSON directory into the database """
def json_stream():
# load all json, mapped by json_id
for fname in glob.glob(os.path.join(datadir, self._type + '_*.json')):
with open(fname) as f:
yield json.load(f)
return self.import_data(json_stream()) | import a JSON directory into the database | entailment |
def _prepare_imports(self, dicts):
""" filters the import stream to remove duplicates
also serves as a good place to override if anything special has to be done to the
order of the import stream (see OrganizationImporter)
"""
# hash(json): id
seen_hashes = {}
for data in dicts:
json_id = data.pop('_id')
# map duplicates (using omnihash to tell if json dicts are identical-ish)
objhash = omnihash(data)
if objhash not in seen_hashes:
seen_hashes[objhash] = json_id
yield json_id, data
else:
self.duplicates[json_id] = seen_hashes[objhash] | filters the import stream to remove duplicates
also serves as a good place to override if anything special has to be done to the
order of the import stream (see OrganizationImporter) | entailment |
def import_data(self, data_items):
""" import a bunch of dicts together """
# keep counts of all actions
record = {
'insert': 0, 'update': 0, 'noop': 0,
'start': utcnow(),
'records': {
'insert': [],
'update': [],
'noop': [],
}
}
for json_id, data in self._prepare_imports(data_items):
obj_id, what = self.import_item(data)
self.json_to_db_id[json_id] = obj_id
record['records'][what].append(obj_id)
record[what] += 1
# all objects are loaded, a perfect time to do inter-object resolution and other tasks
self.postimport()
record['end'] = utcnow()
return {self._type: record} | import a bunch of dicts together | entailment |
def import_item(self, data):
""" function used by import_data """
what = 'noop'
# remove the JSON _id (may still be there if called directly)
data.pop('_id', None)
# add fields/etc.
data = self.apply_transformers(data)
data = self.prepare_for_db(data)
try:
obj = self.get_object(data)
except self.model_class.DoesNotExist:
obj = None
# remove pupa_id which does not belong in the OCD data models
pupa_id = data.pop('pupa_id', None)
# pull related fields off
related = {}
for field in self.related_models:
related[field] = data.pop(field)
# obj existed, check if we need to do an update
if obj:
if obj.id in self.json_to_db_id.values():
raise DuplicateItemError(data, obj, related.get('sources', []))
# check base object for changes
for key, value in data.items():
if getattr(obj, key) != value and key not in obj.locked_fields:
setattr(obj, key, value)
what = 'update'
updated = self._update_related(obj, related, self.related_models)
if updated:
what = 'update'
if what == 'update':
obj.save()
# need to create the data
else:
what = 'insert'
try:
obj = self.model_class.objects.create(**data)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, data,
self.model_class))
self._create_related(obj, related, self.related_models)
if pupa_id:
Identifier.objects.get_or_create(identifier=pupa_id,
jurisdiction_id=self.jurisdiction_id,
defaults={'content_object': obj})
return obj.id, what | function used by import_data | entailment |
def _update_related(self, obj, related, subfield_dict):
"""
update DB objects related to a base object
obj: a base object to create related
related: dict mapping field names to lists of related objects
subfield_list: where to get the next layer of subfields
"""
# keep track of whether or not anything was updated
updated = False
# for each related field - check if there are differences
for field, items in related.items():
# skip subitem check if it's locked anyway
if field in obj.locked_fields:
continue
# get items from database
dbitems = list(getattr(obj, field).all())
dbitems_count = len(dbitems)
# default to doing nothing
do_delete = do_update = False
if items and dbitems_count: # we have items, so does db, check for conflict
do_delete = do_update = items_differ(items, dbitems, subfield_dict[field][2])
elif items and not dbitems_count: # we have items, db doesn't, just update
do_update = True
elif not items and dbitems_count: # db has items, we don't, just delete
do_delete = True
# otherwise: no items or dbitems, so nothing is done
# don't delete if field is in merge_related
if field in self.merge_related:
new_items = []
# build a list of keyfields to existing database objects
keylist = self.merge_related[field]
keyed_dbitems = {tuple(getattr(item, k) for k in keylist):
item for item in dbitems}
# go through 'new' items
# if item with the same keyfields exists:
# update the database item w/ the new item's properties
# else:
# add it to new_items
for item in items:
key = tuple(item.get(k) for k in keylist)
dbitem = keyed_dbitems.get(key)
if not dbitem:
new_items.append(item)
else:
# update dbitem
for fname, val in item.items():
setattr(dbitem, fname, val)
dbitem.save()
# import anything that made it to new_items in the usual fashion
self._create_related(obj, {field: new_items}, subfield_dict)
else:
# default logic is to just wipe and recreate subobjects
if do_delete:
updated = True
getattr(obj, field).all().delete()
if do_update:
updated = True
self._create_related(obj, {field: items}, subfield_dict)
return updated | update DB objects related to a base object
obj: a base object to create related
related: dict mapping field names to lists of related objects
subfield_list: where to get the next layer of subfields | entailment |
def _create_related(self, obj, related, subfield_dict):
"""
create DB objects related to a base object
obj: a base object to create related
related: dict mapping field names to lists of related objects
subfield_list: where to get the next layer of subfields
"""
for field, items in related.items():
subobjects = []
all_subrelated = []
Subtype, reverse_id_field, subsubdict = subfield_dict[field]
for order, item in enumerate(items):
# pull off 'subrelated' (things that are related to this obj)
subrelated = {}
for subfield in subsubdict:
subrelated[subfield] = item.pop(subfield)
if field in self.preserve_order:
item['order'] = order
item[reverse_id_field] = obj.id
try:
subobjects.append(Subtype(**item))
all_subrelated.append(subrelated)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, item, Subtype))
# add all subobjects at once (really great for actions & votes)
try:
Subtype.objects.bulk_create(subobjects)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, subobjects, Subtype))
# after import the subobjects, import their subsubobjects
for subobj, subrel in zip(subobjects, all_subrelated):
self._create_related(subobj, subrel, subsubdict) | create DB objects related to a base object
obj: a base object to create related
related: dict mapping field names to lists of related objects
subfield_list: where to get the next layer of subfields | entailment |
def add_edge(self, fro, to):
"""
When doing topological sorting, the semantics of the edge mean that
the depedency runs from the parent to the child - which is to say that
the parent is required to be sorted *before* the child.
[ FROM ] ------> [ TO ]
Committee on Finance -> Subcommittee of the Finance Committee on Budget
-> Subcommittee of the Finance Committee on Roads
"""
self.add_node(fro)
self.add_node(to)
self.edges[fro].add(to) | When doing topological sorting, the semantics of the edge mean that
the depedency runs from the parent to the child - which is to say that
the parent is required to be sorted *before* the child.
[ FROM ] ------> [ TO ]
Committee on Finance -> Subcommittee of the Finance Committee on Budget
-> Subcommittee of the Finance Committee on Roads | entailment |
def leaf_nodes(self):
"""
Return an interable of nodes with no edges pointing at them. This is
helpful to find all nodes without dependencies.
"""
# Now contains all nodes that contain dependencies.
deps = {item for sublist in self.edges.values() for item in sublist}
# contains all nodes *without* any dependencies (leaf nodes)
return self.nodes - deps | Return an interable of nodes with no edges pointing at them. This is
helpful to find all nodes without dependencies. | entailment |
def prune_node(self, node, remove_backrefs=False):
"""
remove node `node` from the network (including any edges that may
have been pointing at `node`).
"""
if not remove_backrefs:
for fro, connections in self.edges.items():
if node in self.edges[fro]:
raise ValueError("""Attempting to remove a node with
backrefs. You may consider setting
`remove_backrefs` to true.""")
# OK. Otherwise, let's do our removal.
self.nodes.remove(node)
if node in self.edges:
# Remove add edges from this node if we're pruning it.
self.edges.pop(node)
for fro, connections in self.edges.items():
# Remove any links to this node (if they exist)
if node in self.edges[fro]:
# If we should remove backrefs:
self.edges[fro].remove(node) | remove node `node` from the network (including any edges that may
have been pointing at `node`). | entailment |
def sort(self):
"""
Return an iterable of nodes, toplogically sorted to correctly import
dependencies before leaf nodes.
"""
while self.nodes:
iterated = False
for node in self.leaf_nodes():
iterated = True
self.prune_node(node)
yield node
if not iterated:
raise CyclicGraphError("Sorting has found a cyclic graph.") | Return an iterable of nodes, toplogically sorted to correctly import
dependencies before leaf nodes. | entailment |
def dot(self):
"""
Return a buffer that represents something dot(1) can render.
"""
buff = "digraph graphname {"
for fro in self.edges:
for to in self.edges[fro]:
buff += "%s -> %s;" % (fro, to)
buff += "}"
return buff | Return a buffer that represents something dot(1) can render. | entailment |
def cycles(self):
"""
Fairly expensive cycle detection algorithm. This method
will return the shortest unique cycles that were detected.
Debug usage may look something like:
print("The following cycles were found:")
for cycle in network.cycles():
print(" ", " -> ".join(cycle))
"""
def walk_node(node, seen):
"""
Walk each top-level node we know about, and recurse
along the graph.
"""
if node in seen:
yield (node,)
return
seen.add(node)
for edge in self.edges[node]:
for cycle in walk_node(edge, set(seen)):
yield (node,) + cycle
# First, let's get a iterable of all known cycles.
cycles = chain.from_iterable(
(walk_node(node, set()) for node in self.nodes))
shortest = set()
# Now, let's go through and sift through the cycles, finding
# the shortest unique cycle known, ignoring cycles which contain
# already known cycles.
for cycle in sorted(cycles, key=len):
for el in shortest:
if set(el).issubset(set(cycle)):
break
else:
shortest.add(cycle)
# And return that unique list.
return shortest | Fairly expensive cycle detection algorithm. This method
will return the shortest unique cycles that were detected.
Debug usage may look something like:
print("The following cycles were found:")
for cycle in network.cycles():
print(" ", " -> ".join(cycle)) | entailment |
def pseudo_organization(organization, classification, default=None):
""" helper for setting an appropriate ID for organizations """
if organization and classification:
raise ScrapeValueError('cannot specify both classification and organization')
elif classification:
return _make_pseudo_id(classification=classification)
elif organization:
if isinstance(organization, Organization):
return organization._id
elif isinstance(organization, str):
return organization
else:
return _make_pseudo_id(**organization)
elif default is not None:
return _make_pseudo_id(classification=default)
else:
return None | helper for setting an appropriate ID for organizations | entailment |
def add_membership(self, name_or_org, role='member', **kwargs):
"""
add a membership in an organization and return the membership
object in case there are more details to add
"""
if isinstance(name_or_org, Organization):
membership = Membership(person_id=self._id,
person_name=self.name,
organization_id=name_or_org._id,
role=role, **kwargs)
else:
membership = Membership(person_id=self._id,
person_name=self.name,
organization_id=_make_pseudo_id(name=name_or_org),
role=role, **kwargs)
self._related.append(membership)
return membership | add a membership in an organization and return the membership
object in case there are more details to add | entailment |
def save_object(self, obj):
"""
Save object to disk as JSON.
Generally shouldn't be called directly.
"""
obj.pre_save(self.jurisdiction.jurisdiction_id)
filename = '{0}_{1}.json'.format(obj._type, obj._id).replace('/', '-')
self.info('save %s %s as %s', obj._type, obj, filename)
self.debug(json.dumps(OrderedDict(sorted(obj.as_dict().items())),
cls=utils.JSONEncoderPlus, indent=4, separators=(',', ': ')))
self.output_names[obj._type].add(filename)
with open(os.path.join(self.datadir, filename), 'w') as f:
json.dump(obj.as_dict(), f, cls=utils.JSONEncoderPlus)
# validate after writing, allows for inspection on failure
try:
obj.validate()
except ValueError as ve:
if self.strict_validation:
raise ve
else:
self.warning(ve)
# after saving and validating, save subordinate objects
for obj in obj._related:
self.save_object(obj) | Save object to disk as JSON.
Generally shouldn't be called directly. | entailment |
def validate(self, schema=None):
"""
Validate that we have a valid object.
On error, this will raise a `ScrapeValueError`
This also expects that the schemas assume that omitting required
in the schema asserts the field is optional, not required. This is
due to upstream schemas being in JSON Schema v3, and not validictory's
modified syntax.
^ TODO: FIXME
"""
if schema is None:
schema = self._schema
type_checker = Draft3Validator.TYPE_CHECKER.redefine(
"datetime", lambda c, d: isinstance(d, (datetime.date, datetime.datetime))
)
ValidatorCls = jsonschema.validators.extend(Draft3Validator, type_checker=type_checker)
validator = ValidatorCls(schema, format_checker=FormatChecker())
errors = [str(error) for error in validator.iter_errors(self.as_dict())]
if errors:
raise ScrapeValueError('validation of {} {} failed: {}'.format(
self.__class__.__name__, self._id, '\n\t'+'\n\t'.join(errors)
)) | Validate that we have a valid object.
On error, this will raise a `ScrapeValueError`
This also expects that the schemas assume that omitting required
in the schema asserts the field is optional, not required. This is
due to upstream schemas being in JSON Schema v3, and not validictory's
modified syntax.
^ TODO: FIXME | entailment |
def add_source(self, url, *, note=''):
""" Add a source URL from which data was collected """
new = {'url': url, 'note': note}
self.sources.append(new) | Add a source URL from which data was collected | entailment |
def evolve_genomes(rng, pop, params, recorder=None):
"""
Evolve a population without tree sequence recordings. In other words,
complete genomes must be simulated and tracked.
:param rng: random number generator
:type rng: :class:`fwdpy11.GSLrng`
:param pop: A population
:type pop: :class:`fwdpy11.DiploidPopulation`
:param params: simulation parameters
:type params: :class:`fwdpy11.ModelParams`
:param recorder: (None) A temporal sampler/data recorder.
:type recorder: callable
.. note::
If recorder is None,
then :class:`fwdpy11.RecordNothing` will be used.
"""
import warnings
# Test parameters while suppressing warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Will throw exception if anything is wrong:
params.validate()
from ._fwdpy11 import MutationRegions
from ._fwdpy11 import evolve_without_tree_sequences
from ._fwdpy11 import dispatch_create_GeneticMap
pneutral = params.mutrate_n/(params.mutrate_n+params.mutrate_s)
mm = MutationRegions.create(pneutral, params.nregions, params.sregions)
rm = dispatch_create_GeneticMap(params.recrate, params.recregions)
if recorder is None:
from ._fwdpy11 import RecordNothing
recorder = RecordNothing()
evolve_without_tree_sequences(rng, pop, params.demography,
params.mutrate_n, params.mutrate_s,
params.recrate, mm, rm, params.gvalue,
recorder, params.pself, params.prune_selected) | Evolve a population without tree sequence recordings. In other words,
complete genomes must be simulated and tracked.
:param rng: random number generator
:type rng: :class:`fwdpy11.GSLrng`
:param pop: A population
:type pop: :class:`fwdpy11.DiploidPopulation`
:param params: simulation parameters
:type params: :class:`fwdpy11.ModelParams`
:param recorder: (None) A temporal sampler/data recorder.
:type recorder: callable
.. note::
If recorder is None,
then :class:`fwdpy11.RecordNothing` will be used. | entailment |
def _initializeIndividualTable(pop, tc):
"""
Returns node ID -> individual map
"""
# First, alive individuals:
individal_nodes = {}
for i in range(pop.N):
individal_nodes[2*i] = i
individal_nodes[2*i+1] = i
metadata_strings = _generate_individual_metadata(pop.diploid_metadata, tc)
# Now, preserved nodes
num_ind_nodes = pop.N
for i in pop.ancient_sample_metadata:
assert i not in individal_nodes, "indivudal record error"
individal_nodes[i.nodes[0]] = num_ind_nodes
individal_nodes[i.nodes[1]] = num_ind_nodes
num_ind_nodes += 1
metadata_strings.extend(_generate_individual_metadata(
pop.ancient_sample_metadata, tc))
md, mdo = tskit.pack_bytes(metadata_strings)
flags = [0 for i in range(pop.N+len(pop.ancient_sample_metadata))]
tc.individuals.set_columns(flags=flags, metadata=md, metadata_offset=mdo)
return individal_nodes | Returns node ID -> individual map | entailment |
def dump_tables_to_tskit(pop):
"""
Converts fwdpy11.TableCollection to an
tskit.TreeSequence
"""
node_view = np.array(pop.tables.nodes, copy=True)
node_view['time'] -= node_view['time'].max()
node_view['time'][np.where(node_view['time'] != 0.0)[0]] *= -1.0
edge_view = np.array(pop.tables.edges, copy=False)
mut_view = np.array(pop.tables.mutations, copy=False)
tc = tskit.TableCollection(pop.tables.genome_length)
# We must initialize population and individual
# tables before we can do anything else.
# Attempting to set population to anything
# other than -1 in an tskit.NodeTable will
# raise an exception if the PopulationTable
# isn't set up.
_initializePopulationTable(node_view, tc)
node_to_individual = _initializeIndividualTable(pop, tc)
individual = [-1 for i in range(len(node_view))]
for k, v in node_to_individual.items():
individual[k] = v
flags = [1]*2*pop.N + [0]*(len(node_view) - 2*pop.N)
# Bug fixed in 0.3.1: add preserved nodes to samples list
for i in pop.tables.preserved_nodes:
flags[i] = 1
tc.nodes.set_columns(flags=flags, time=node_view['time'],
population=node_view['population'],
individual=individual)
tc.edges.set_columns(left=edge_view['left'],
right=edge_view['right'],
parent=edge_view['parent'],
child=edge_view['child'])
mpos = np.array([pop.mutations[i].pos for i in mut_view['key']])
ancestral_state = np.zeros(len(mut_view), dtype=np.int8)+ord('0')
ancestral_state_offset = np.arange(len(mut_view)+1, dtype=np.uint32)
tc.sites.set_columns(position=mpos,
ancestral_state=ancestral_state,
ancestral_state_offset=ancestral_state_offset)
derived_state = np.zeros(len(mut_view), dtype=np.int8)+ord('1')
md, mdo = _generate_mutation_metadata(pop)
tc.mutations.set_columns(site=np.arange(len(mpos), dtype=np.int32),
node=mut_view['node'],
derived_state=derived_state,
derived_state_offset=ancestral_state_offset,
metadata=md,
metadata_offset=mdo)
return tc.tree_sequence() | Converts fwdpy11.TableCollection to an
tskit.TreeSequence | entailment |
def mslike(pop, **kwargs):
"""
Function to establish default parameters
for a single-locus simulation for standard pop-gen
modeling scenarios.
:params pop: An instance of :class:`fwdpy11.DiploidPopulation`
:params kwargs: Keyword arguments.
"""
import fwdpy11
if isinstance(pop, fwdpy11.DiploidPopulation) is False:
raise ValueError("incorrect pop type: " + str(type(pop)))
defaults = {'simlen': 10*pop.N,
'beg': 0.0,
'end': 1.0,
'theta': 100.0,
'pneutral': 1.0,
'rho': 100.0,
'dfe': None
}
for key, value in kwargs.items():
if key in defaults:
defaults[key] = value
import numpy as np
params = {'demography': np.array([pop.N]*defaults['simlen'],
dtype=np.uint32),
'nregions': [fwdpy11.Region(defaults['beg'],
defaults['end'], 1.0)],
'recregions': [fwdpy11.Region(defaults['beg'],
defaults['end'], 1.0)],
'rates': ((defaults['pneutral']*defaults['theta'])/(4.0*pop.N),
((1.0-defaults['pneutral'])*defaults['theta']) /
(4.0*pop.N),
defaults['rho']/(4.0*float(pop.N))),
'gvalue': fwdpy11.Multiplicative(2.0)
}
if defaults['dfe'] is None:
params['sregions'] = []
else:
params['sregions'] = [defaults['dfe']]
return params | Function to establish default parameters
for a single-locus simulation for standard pop-gen
modeling scenarios.
:params pop: An instance of :class:`fwdpy11.DiploidPopulation`
:params kwargs: Keyword arguments. | entailment |
def limit_spec(self, spec):
"""
Whenever we do a Pseudo ID lookup from the database, we need to limit
based on the memberships -> organization -> jurisdiction, so we scope
the resolution.
"""
if list(spec.keys()) == ['name']:
# if we're just resolving on name, include other names
return ((Q(name=spec['name']) | Q(other_names__name=spec['name'])) &
Q(memberships__organization__jurisdiction_id=self.jurisdiction_id))
spec['memberships__organization__jurisdiction_id'] = self.jurisdiction_id
return spec | Whenever we do a Pseudo ID lookup from the database, we need to limit
based on the memberships -> organization -> jurisdiction, so we scope
the resolution. | entailment |
def validate(self):
"""
Error check model params.
:raises TypeError: Throws TypeError if validation fails.
"""
if self.nregions is None:
raise TypeError("neutral regions cannot be None")
if self.sregions is None:
raise TypeError("selected regions cannot be None")
if self.recregions is None:
raise TypeError("recombination regions cannot be None")
if self.demography is None:
raise TypeError("demography cannot be None")
if self.prune_selected is None:
raise TypeError("prune_selected cannot be None")
if self.gvalue is None:
raise TypeError("gvalue cannot be None")
if self.rates is None:
raise TypeError("rates cannot be None") | Error check model params.
:raises TypeError: Throws TypeError if validation fails. | entailment |
def _prepare_imports(self, dicts):
""" an override for prepare imports that sorts the imports by parent_id dependencies """
# all pseudo parent ids we've seen
pseudo_ids = set()
# pseudo matches
pseudo_matches = {}
# get prepared imports from parent
prepared = dict(super(OrganizationImporter, self)._prepare_imports(dicts))
# collect parent pseudo_ids
for _, data in prepared.items():
parent_id = data.get('parent_id', None) or ''
if parent_id.startswith('~'):
pseudo_ids.add(parent_id)
# turn pseudo_ids into a tuple of dictionaries
pseudo_ids = [(ppid, get_pseudo_id(ppid)) for ppid in pseudo_ids]
# loop over all data again, finding the pseudo ids true json id
for json_id, data in prepared.items():
# check if this matches one of our ppids
for ppid, spec in pseudo_ids:
match = True
for k, v in spec.items():
if data[k] != v:
match = False
break
if match:
if ppid in pseudo_matches:
raise UnresolvedIdError('multiple matches for pseudo id: ' + ppid)
pseudo_matches[ppid] = json_id
# toposort the nodes so parents are imported first
network = Network()
in_network = set()
import_order = []
for json_id, data in prepared.items():
parent_id = data.get('parent_id', None)
# resolve pseudo_ids to their json id before building the network
if parent_id in pseudo_matches:
parent_id = pseudo_matches[parent_id]
network.add_node(json_id)
if parent_id:
# Right. There's an import dep. We need to add the edge from
# the parent to the current node, so that we import the parent
# before the current node.
network.add_edge(parent_id, json_id)
# resolve the sorted import order
for jid in network.sort():
import_order.append((jid, prepared[jid]))
in_network.add(jid)
# ensure all data made it into network (paranoid check, should never fail)
if in_network != set(prepared.keys()): # pragma: no cover
raise PupaInternalError("import is missing nodes in network set")
return import_order | an override for prepare imports that sorts the imports by parent_id dependencies | entailment |
def evolvets(rng, pop, params, simplification_interval, recorder=None,
suppress_table_indexing=False, record_gvalue_matrix=False,
stopping_criterion=None,
track_mutation_counts=False,
remove_extinct_variants=True):
"""
Evolve a population with tree sequence recording
:param rng: random number generator
:type rng: :class:`fwdpy11.GSLrng`
:param pop: A population
:type pop: :class:`fwdpy11.DiploidPopulation`
:param params: simulation parameters
:type params: :class:`fwdpy11.ModelParams`
:param simplification_interval: Number of generations between simplifications.
:type simplification_interval: int
:param recorder: (None) A temporal sampler/data recorder.
:type recorder: callable
:param suppress_table_indexing: (False) Prevents edge table indexing until end of simulation
:type suppress_table_indexing: boolean
:param record_gvalue_matrix: (False) Whether to record genetic values into :attr:`fwdpy11.Population.genetic_values`.
:type record_gvalue_matrix: boolean
The recording of genetic values into :attr:`fwdpy11.Population.genetic_values` is supprssed by default. First, it
is redundant with :attr:`fwdpy11.DiploidMetadata.g` for the common case of mutational effects on a single trait.
Second, we save some memory by not tracking these matrices. However, it is useful to track these data for some
cases when simulating multivariate mutational effects (pleiotropy).
.. note::
If recorder is None,
then :class:`fwdpy11.NoAncientSamples` will be used.
"""
import warnings
# Currently, we do not support simulating neutral mutations
# during tree sequence simulations, so we make sure that there
# are no neutral regions/rates:
if len(params.nregions) != 0:
raise ValueError(
"Simulation of neutral mutations on tree sequences not supported (yet).")
# Test parameters while suppressing warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Will throw exception if anything is wrong:
params.validate()
if recorder is None:
from ._fwdpy11 import NoAncientSamples
recorder = NoAncientSamples()
if stopping_criterion is None:
from ._fwdpy11 import _no_stopping
stopping_criterion = _no_stopping
from ._fwdpy11 import MutationRegions
from ._fwdpy11 import dispatch_create_GeneticMap
from ._fwdpy11 import evolve_with_tree_sequences
# TODO: update to allow neutral mutations
pneutral = 0
mm = MutationRegions.create(pneutral, params.nregions, params.sregions)
rm = dispatch_create_GeneticMap(params.recrate, params.recregions)
from ._fwdpy11 import SampleRecorder
sr = SampleRecorder()
evolve_with_tree_sequences(rng, pop, sr, simplification_interval,
params.demography, params.mutrate_s,
mm, rm, params.gvalue,
recorder, stopping_criterion,
params.pself, params.prune_selected is False,
suppress_table_indexing, record_gvalue_matrix,
track_mutation_counts,
remove_extinct_variants) | Evolve a population with tree sequence recording
:param rng: random number generator
:type rng: :class:`fwdpy11.GSLrng`
:param pop: A population
:type pop: :class:`fwdpy11.DiploidPopulation`
:param params: simulation parameters
:type params: :class:`fwdpy11.ModelParams`
:param simplification_interval: Number of generations between simplifications.
:type simplification_interval: int
:param recorder: (None) A temporal sampler/data recorder.
:type recorder: callable
:param suppress_table_indexing: (False) Prevents edge table indexing until end of simulation
:type suppress_table_indexing: boolean
:param record_gvalue_matrix: (False) Whether to record genetic values into :attr:`fwdpy11.Population.genetic_values`.
:type record_gvalue_matrix: boolean
The recording of genetic values into :attr:`fwdpy11.Population.genetic_values` is supprssed by default. First, it
is redundant with :attr:`fwdpy11.DiploidMetadata.g` for the common case of mutational effects on a single trait.
Second, we save some memory by not tracking these matrices. However, it is useful to track these data for some
cases when simulating multivariate mutational effects (pleiotropy).
.. note::
If recorder is None,
then :class:`fwdpy11.NoAncientSamples` will be used. | entailment |
def exponential_size_change(Nstart, Nstop, time):
"""
Generate a list of population sizes
according to exponential size_change model
:param Nstart: population size at onset of size change
:param Nstop: Population size to reach at end of size change
:param time: Time (in generations) to get from Nstart to Nstop
:return: A list of integers representing population size over time.
.. versionadded:: 0.1.1
"""
if time < 1:
raise RuntimeError("time must be >= 1")
if Nstart < 1 or Nstop < 1:
raise RuntimeError("Nstart and Nstop must both be >= 1")
G = math.exp((math.log(Nstop) - math.log(Nstart))/time)
rv = []
for i in range(time):
rv.append(round(Nstart*pow(G, i+1)))
return rv | Generate a list of population sizes
according to exponential size_change model
:param Nstart: population size at onset of size change
:param Nstop: Population size to reach at end of size change
:param time: Time (in generations) to get from Nstart to Nstop
:return: A list of integers representing population size over time.
.. versionadded:: 0.1.1 | entailment |
def split_sql(sql):
"""generate hunks of SQL that are between the bookends
return: tuple of beginning bookend, closing bookend, and contents
note: beginning & end of string are returned as None"""
bookends = ("\n", ";", "--", "/*", "*/")
last_bookend_found = None
start = 0
while start <= len(sql):
results = get_next_occurence(sql, start, bookends)
if results is None:
yield (last_bookend_found, None, sql[start:])
start = len(sql) + 1
else:
(end, bookend) = results
yield (last_bookend_found, bookend, sql[start:end])
start = end + len(bookend)
last_bookend_found = bookend | generate hunks of SQL that are between the bookends
return: tuple of beginning bookend, closing bookend, and contents
note: beginning & end of string are returned as None | entailment |
def check_file(filename=None, show_filename=False, add_semicolon=False):
"""
Check whether an input file is valid PostgreSQL. If no filename is
passed, STDIN is checked.
Returns a status code: 0 if the input is valid, 1 if invalid.
"""
# either work with sys.stdin or open the file
if filename is not None:
with open(filename, "r") as filelike:
sql_string = filelike.read()
else:
with sys.stdin as filelike:
sql_string = sys.stdin.read()
success, msg = check_string(sql_string, add_semicolon=add_semicolon)
# report results
result = 0
if not success:
# possibly show the filename with the error message
prefix = ""
if show_filename and filename is not None:
prefix = filename + ": "
print(prefix + msg)
result = 1
return result | Check whether an input file is valid PostgreSQL. If no filename is
passed, STDIN is checked.
Returns a status code: 0 if the input is valid, 1 if invalid. | entailment |
def check_string(sql_string, add_semicolon=False):
"""
Check whether a string is valid PostgreSQL. Returns a boolean
indicating validity and a message from ecpg, which will be an
empty string if the input was valid, or a description of the
problem otherwise.
"""
prepped_sql = sqlprep.prepare_sql(sql_string, add_semicolon=add_semicolon)
success, msg = ecpg.check_syntax(prepped_sql)
return success, msg | Check whether a string is valid PostgreSQL. Returns a boolean
indicating validity and a message from ecpg, which will be an
empty string if the input was valid, or a description of the
problem otherwise. | entailment |
def check_syntax(string):
""" Check syntax of a string of PostgreSQL-dialect SQL """
args = ["ecpg", "-o", "-", "-"]
with open(os.devnull, "w") as devnull:
try:
proc = subprocess.Popen(args, shell=False,
stdout=devnull,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
_, err = proc.communicate(string)
except OSError:
msg = "Unable to execute 'ecpg', you likely need to install it.'"
raise OSError(msg)
if proc.returncode == 0:
return (True, "")
else:
return (False, parse_error(err)) | Check syntax of a string of PostgreSQL-dialect SQL | entailment |
def get_inventory(self, context):
"""
Will locate vm in vcenter and fill its uuid
:type context: cloudshell.shell.core.context.ResourceCommandContext
"""
vcenter_vm_name = context.resource.attributes['vCenter VM']
vcenter_vm_name = vcenter_vm_name.replace('\\', '/')
vcenter_name = context.resource.attributes['vCenter Name']
self.logger.info('start autoloading vm_path: {0} on vcenter: {1}'.format(vcenter_vm_name, vcenter_name))
with CloudShellSessionContext(context) as cloudshell_session:
session = cloudshell_session
vcenter_api_res = session.GetResourceDetails(vcenter_name)
vcenter_resource = self.model_parser.convert_to_vcenter_model(vcenter_api_res)
si = None
try:
self.logger.info('connecting to vcenter ({0})'.format(vcenter_api_res.Address))
si = self._get_connection_to_vcenter(self.pv_service, session, vcenter_resource, vcenter_api_res.Address)
self.logger.info('loading vm uuid')
vm_loader = VMLoader(self.pv_service)
uuid = vm_loader.load_vm_uuid_by_name(si, vcenter_resource, vcenter_vm_name)
self.logger.info('vm uuid: {0}'.format(uuid))
self.logger.info('loading the ip of the vm')
ip = self._try_get_ip(self.pv_service, si, uuid, vcenter_resource)
if ip:
session.UpdateResourceAddress(context.resource.name, ip)
except Exception:
self.logger.exception("Get inventory command failed")
raise
finally:
if si:
self.pv_service.disconnect(si)
return self._get_auto_load_response(uuid, vcenter_name, context.resource) | Will locate vm in vcenter and fill its uuid
:type context: cloudshell.shell.core.context.ResourceCommandContext | entailment |
def get_snapshots(self, si, logger, vm_uuid):
"""
Restores a virtual machine to a snapshot
:param vim.ServiceInstance si: py_vmomi service instance
:param logger: Logger
:param vm_uuid: uuid of the virtual machine
"""
vm = self.pyvmomi_service.find_by_uuid(si, vm_uuid)
logger.info("Get snapshots")
snapshots = SnapshotRetriever.get_vm_snapshots(vm)
return snapshots.keys() | Restores a virtual machine to a snapshot
:param vim.ServiceInstance si: py_vmomi service instance
:param logger: Logger
:param vm_uuid: uuid of the virtual machine | entailment |
def connect(self, address, user, password, port=443):
"""
Connect to vCenter via SSL and return SI object
:param address: vCenter address (host / ip address)
:param user: user name for authentication
:param password:password for authentication
:param port: port for the SSL connection. Default = 443
"""
'# Disabling urllib3 ssl warnings'
requests.packages.urllib3.disable_warnings()
'# Disabling SSL certificate verification'
context = None
import ssl
if hasattr(ssl, 'SSLContext'):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
if context:
try:
'#si = SmartConnect(host=address, user=user, pwd=password, port=port, sslContext=context)'
si = self.pyvmomi_connect(host=address, user=user, pwd=password, port=port, sslContext=context)
except ssl.SSLEOFError:
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.verify_mode = ssl.CERT_NONE
si = self.pyvmomi_connect(host=address, user=user, pwd=password, port=port, sslContext=context)
else:
'#si = SmartConnect(host=address, user=user, pwd=password, port=port)'
si = self.pyvmomi_connect(host=address, user=user, pwd=password, port=port)
return si
except vim.fault.InvalidLogin as e:
raise VCenterAuthError(e.msg, e)
except IOError as e:
# logger.info("I/O error({0}): {1}".format(e.errno, e.strerror))
raise ValueError('Cannot connect to vCenter, please check that the address is valid') | Connect to vCenter via SSL and return SI object
:param address: vCenter address (host / ip address)
:param user: user name for authentication
:param password:password for authentication
:param port: port for the SSL connection. Default = 443 | entailment |
def find_datacenter_by_name(self, si, path, name):
"""
Finds datacenter in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
:param name: the datacenter name to return
"""
return self.find_obj_by_path(si, path, name, self.Datacenter) | Finds datacenter in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
:param name: the datacenter name to return | entailment |
def find_by_uuid(self, si, uuid, is_vm=True, path=None, data_center=None):
"""
Finds vm/host by his uuid in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param uuid: the object uuid
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
:param is_vm: if true, search for virtual machines, otherwise search for hosts
:param data_center:
"""
if uuid is None:
return None
if path is not None:
data_center = self.find_item_in_path_by_type(si, path, vim.Datacenter)
search_index = si.content.searchIndex
return search_index.FindByUuid(data_center, uuid, is_vm) | Finds vm/host by his uuid in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param uuid: the object uuid
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
:param is_vm: if true, search for virtual machines, otherwise search for hosts
:param data_center: | entailment |
def find_item_in_path_by_type(self, si, path, obj_type):
"""
This function finds the first item of that type in path
:param ServiceInstance si: pyvmomi ServiceInstance
:param str path: the path to search in
:param type obj_type: the vim type of the object
:return: pyvmomi type instance object or None
"""
if obj_type is None:
return None
search_index = si.content.searchIndex
sub_folder = si.content.rootFolder
if path is None or not path:
return sub_folder
paths = path.split("/")
for currPath in paths:
if currPath is None or not currPath:
continue
manage = search_index.FindChild(sub_folder, currPath)
if isinstance(manage, obj_type):
return manage
return None | This function finds the first item of that type in path
:param ServiceInstance si: pyvmomi ServiceInstance
:param str path: the path to search in
:param type obj_type: the vim type of the object
:return: pyvmomi type instance object or None | entailment |
def find_host_by_name(self, si, path, name):
"""
Finds datastore in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
:param name: the datastore name to return
"""
return self.find_obj_by_path(si, path, name, self.Host) | Finds datastore in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
:param name: the datastore name to return | entailment |
def find_datastore_by_name(self, si, path, name):
"""
Finds datastore in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
:param name: the datastore name to return
"""
return self.find_obj_by_path(si, path, name, self.Datastore) | Finds datastore in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
:param name: the datastore name to return | entailment |
def find_portgroup(self, si, dv_switch_path, name):
"""
Returns the portgroup on the dvSwitch
:param name: str
:param dv_switch_path: str
:param si: service instance
"""
dv_switch = self.get_folder(si, dv_switch_path)
if dv_switch and dv_switch.portgroup:
for port in dv_switch.portgroup:
if port.name == name:
return port
return None | Returns the portgroup on the dvSwitch
:param name: str
:param dv_switch_path: str
:param si: service instance | entailment |
def find_network_by_name(self, si, path, name):
"""
Finds network in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
:param name: the datastore name to return
"""
return self.find_obj_by_path(si, path, name, self.Network) | Finds network in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
:param name: the datastore name to return | entailment |
def find_vm_by_name(self, si, path, name):
"""
Finds vm in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
:param name: the vm name to return
"""
return self.find_obj_by_path(si, path, name, self.VM) | Finds vm in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
:param name: the vm name to return | entailment |
def find_obj_by_path(self, si, path, name, type_name):
"""
Finds object in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
:param name: the object name to return
:param type_name: the name of the type, can be (vm, network, host, datastore)
"""
folder = self.get_folder(si, path)
if folder is None:
raise ValueError('vmomi managed object not found at: {0}'.format(path))
look_in = None
if hasattr(folder, type_name):
look_in = getattr(folder, type_name)
if hasattr(folder, self.ChildEntity):
look_in = folder
if look_in is None:
raise ValueError('vmomi managed object not found at: {0}'.format(path))
search_index = si.content.searchIndex
'#searches for the specific vm in the folder'
return search_index.FindChild(look_in, name) | Finds object in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
:param name: the object name to return
:param type_name: the name of the type, can be (vm, network, host, datastore) | entailment |
def find_dvs_by_path(self,si ,path):
"""
Finds vm in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
"""
dvs = self.get_folder(si, path)
if not dvs:
raise ValueError('Could not find Default DvSwitch in path {0}'.format(path))
elif not isinstance(dvs, vim.dvs.VmwareDistributedVirtualSwitch):
raise ValueError('The object in path {0} is {1} and not a DvSwitch'.format(path, type(dvs)))
return dvs | Finds vm in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...') | entailment |
def get_folder(self, si, path, root=None):
"""
Finds folder in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...')
"""
search_index = si.content.searchIndex
sub_folder = root if root else si.content.rootFolder
if not path:
return sub_folder
paths = [p for p in path.split("/") if p]
child = None
try:
new_root = search_index.FindChild(sub_folder, paths[0])
if new_root:
child = self.get_folder(si, '/'.join(paths[1:]), new_root)
except:
child = None
if child is None and hasattr(sub_folder, self.ChildEntity):
new_root = search_index.FindChild(sub_folder, paths[0])
if new_root:
child = self.get_folder(si, '/'.join(paths[1:]), new_root)
if child is None and hasattr(sub_folder, self.VM):
new_root = search_index.FindChild(sub_folder.vmFolder, paths[0])
if new_root:
child = self.get_folder(si, '/'.join(paths[1:]), new_root)
if child is None and hasattr(sub_folder, self.Datastore):
new_root = search_index.FindChild(sub_folder.datastoreFolder, paths[0])
if new_root:
child = self.get_folder(si, '/'.join(paths[1:]), new_root)
if child is None and hasattr(sub_folder, self.Network):
new_root = search_index.FindChild(sub_folder.networkFolder, paths[0])
if new_root:
child = self.get_folder(si, '/'.join(paths[1:]), new_root)
if child is None and hasattr(sub_folder, self.Host):
new_root = search_index.FindChild(sub_folder.hostFolder, paths[0])
if new_root:
child = self.get_folder(si, '/'.join(paths[1:]), new_root)
if child is None and hasattr(sub_folder, self.Datacenter):
new_root = search_index.FindChild(sub_folder.datacenterFolder, paths[0])
if new_root:
child = self.get_folder(si, '/'.join(paths[1:]), new_root)
if child is None and hasattr(sub_folder, 'resourcePool'):
new_root = search_index.FindChild(sub_folder.resourcePool, paths[0])
if new_root:
child = self.get_folder(si, '/'.join(paths[1:]), new_root)
return child | Finds folder in the vCenter or returns "None"
:param si: pyvmomi 'ServiceInstance'
:param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...') | entailment |
def get_network_by_full_name(self, si, default_network_full_name):
"""
Find network by a Full Name
:param default_network_full_name: <str> Full Network Name - likes 'Root/Folder/Network'
:return:
"""
path, name = get_path_and_name(default_network_full_name)
return self.find_network_by_name(si, path, name) if name else None | Find network by a Full Name
:param default_network_full_name: <str> Full Network Name - likes 'Root/Folder/Network'
:return: | entailment |
def get_obj(self, content, vimtype, name):
"""
Return an object by name for a specific type, if name is None the
first found object is returned
:param content: pyvmomi content object
:param vimtype: the type of object too search
:param name: the object name to return
"""
obj = None
container = self._get_all_objects_by_type(content, vimtype)
# If no name was given will return the first object from list of a objects matching the given vimtype type
for c in container.view:
if name:
if c.name == name:
obj = c
break
else:
obj = c
break
return obj | Return an object by name for a specific type, if name is None the
first found object is returned
:param content: pyvmomi content object
:param vimtype: the type of object too search
:param name: the object name to return | entailment |
def clone_vm(self, clone_params, logger, cancellation_context):
"""
Clone a VM from a template/VM and return the vm oject or throws argument is not valid
:param cancellation_context:
:param clone_params: CloneVmParameters =
:param logger:
"""
result = self.CloneVmResult()
if not isinstance(clone_params.si, self.vim.ServiceInstance):
result.error = 'si must be init as ServiceInstance'
return result
if clone_params.template_name is None:
result.error = 'template_name param cannot be None'
return result
if clone_params.vm_name is None:
result.error = 'vm_name param cannot be None'
return result
if clone_params.vm_folder is None:
result.error = 'vm_folder param cannot be None'
return result
datacenter = self.get_datacenter(clone_params)
dest_folder = self._get_destination_folder(clone_params)
vm_location = VMLocation.create_from_full_path(clone_params.template_name)
template = self._get_template(clone_params, vm_location)
snapshot = self._get_snapshot(clone_params, template)
resource_pool, host = self._get_resource_pool(datacenter.name, clone_params)
if not resource_pool and not host:
raise ValueError('The specifed host, cluster or resource pool could not be found')
'# set relo_spec'
placement = self.vim.vm.RelocateSpec()
if resource_pool:
placement.pool = resource_pool
if host:
placement.host = host
clone_spec = self.vim.vm.CloneSpec()
if snapshot:
clone_spec.snapshot = snapshot
clone_spec.template = False
placement.diskMoveType = 'createNewChildDiskBacking'
placement.datastore = self._get_datastore(clone_params)
# after deployment the vm must be powered off and will be powered on if needed by orchestration driver
clone_spec.location = placement
# clone_params.power_on
# due to hotfix 1 for release 1.0,
clone_spec.powerOn = False
logger.info("cloning VM...")
try:
task = template.Clone(folder=dest_folder, name=clone_params.vm_name, spec=clone_spec)
vm = self.task_waiter.wait_for_task(task=task, logger=logger, action_name='Clone VM',
cancellation_context=cancellation_context)
except TaskFaultException:
raise
except vim.fault.NoPermission as error:
logger.error("vcenter returned - no permission: {0}".format(error))
raise Exception('Permissions is not set correctly, please check the log for more info.')
except Exception as e:
logger.error("error deploying: {0}".format(e))
raise Exception('Error has occurred while deploying, please look at the log for more info.')
result.vm = vm
return result | Clone a VM from a template/VM and return the vm oject or throws argument is not valid
:param cancellation_context:
:param clone_params: CloneVmParameters =
:param logger: | entailment |
def destroy_vm(self, vm, logger):
"""
destroy the given vm
:param vm: virutal machine pyvmomi object
:param logger:
"""
self.power_off_before_destroy(logger, vm)
logger.info(("Destroying VM {0}".format(vm.name)))
task = vm.Destroy_Task()
return self.task_waiter.wait_for_task(task=task, logger=logger, action_name="Destroy VM") | destroy the given vm
:param vm: virutal machine pyvmomi object
:param logger: | entailment |
def destroy_vm_by_name(self, si, vm_name, vm_path, logger):
"""
destroy the given vm
:param si: pyvmomi 'ServiceInstance'
:param vm_name: str name of the vm to destroyed
:param vm_path: str path to the vm that will be destroyed
:param logger:
"""
if vm_name is not None:
vm = self.find_vm_by_name(si, vm_path, vm_name)
if vm:
return self.destroy_vm(vm, logger)
raise ValueError('vm not found') | destroy the given vm
:param si: pyvmomi 'ServiceInstance'
:param vm_name: str name of the vm to destroyed
:param vm_path: str path to the vm that will be destroyed
:param logger: | entailment |
def destroy_vm_by_uuid(self, si, vm_uuid, vm_path, logger):
"""
destroy the given vm
:param si: pyvmomi 'ServiceInstance'
:param vm_uuid: str uuid of the vm to destroyed
:param vm_path: str path to the vm that will be destroyed
:param logger:
"""
if vm_uuid is not None:
vm = self.find_by_uuid(si, vm_uuid, vm_path)
if vm:
return self.destroy_vm(vm, logger)
# return 'vm not found'
# for apply the same Interface as for 'destroy_vm_by_name'
raise ValueError('vm not found') | destroy the given vm
:param si: pyvmomi 'ServiceInstance'
:param vm_uuid: str uuid of the vm to destroyed
:param vm_path: str path to the vm that will be destroyed
:param logger: | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.