Search is not available for this dataset
text stringlengths 75 104k |
|---|
def functional(self):
"""All required enzymes for reaction are functional.
Returns
-------
bool
True if the gene-protein-reaction (GPR) rule is fulfilled for
this reaction, or if reaction is not associated to a model,
otherwise False.
"""
if self._model:
tree, _ = parse_gpr(self.gene_reaction_rule)
return eval_gpr(tree, {gene.id for gene in self.genes if
not gene.functional})
return True |
def _update_awareness(self):
"""Make sure all metabolites and genes that are associated with
this reaction are aware of it.
"""
for x in self._metabolites:
x._reaction.add(self)
for x in self._genes:
x._reaction.add(self) |
def remove_from_model(self, remove_orphans=False):
"""Removes the reaction from a model.
This removes all associations between a reaction the associated
model, metabolites and genes.
The change is reverted upon exit when using the model as a context.
Parameters
----------
remove_orphans : bool
Remove orphaned genes and metabolites from the model as well
"""
self._model.remove_reactions([self], remove_orphans=remove_orphans) |
def delete(self, remove_orphans=False):
"""Removes the reaction from a model.
This removes all associations between a reaction the associated
model, metabolites and genes.
The change is reverted upon exit when using the model as a context.
Deprecated, use `reaction.remove_from_model` instead.
Parameters
----------
remove_orphans : bool
Remove orphaned genes and metabolites from the model as well
"""
warn("delete is deprecated. Use reaction.remove_from_model instead",
DeprecationWarning)
self.remove_from_model(remove_orphans=remove_orphans) |
def copy(self):
"""Copy a reaction
The referenced metabolites and genes are also copied.
"""
# no references to model when copying
model = self._model
self._model = None
for i in self._metabolites:
i._model = None
for i in self._genes:
i._model = None
# now we can copy
new_reaction = deepcopy(self)
# restore the references
self._model = model
for i in self._metabolites:
i._model = model
for i in self._genes:
i._model = model
return new_reaction |
def get_coefficient(self, metabolite_id):
"""
Return the stoichiometric coefficient of a metabolite.
Parameters
----------
metabolite_id : str or cobra.Metabolite
"""
if isinstance(metabolite_id, Metabolite):
return self._metabolites[metabolite_id]
_id_to_metabolites = {m.id: m for m in self._metabolites}
return self._metabolites[_id_to_metabolites[metabolite_id]] |
def add_metabolites(self, metabolites_to_add, combine=True,
reversibly=True):
"""Add metabolites and stoichiometric coefficients to the reaction.
If the final coefficient for a metabolite is 0 then it is removed
from the reaction.
The change is reverted upon exit when using the model as a context.
Parameters
----------
metabolites_to_add : dict
Dictionary with metabolite objects or metabolite identifiers as
keys and coefficients as values. If keys are strings (name of a
metabolite) the reaction must already be part of a model and a
metabolite with the given name must exist in the model.
combine : bool
Describes behavior a metabolite already exists in the reaction.
True causes the coefficients to be added.
False causes the coefficient to be replaced.
reversibly : bool
Whether to add the change to the context to make the change
reversibly or not (primarily intended for internal use).
"""
old_coefficients = self.metabolites
new_metabolites = []
_id_to_metabolites = dict([(x.id, x) for x in self._metabolites])
for metabolite, coefficient in iteritems(metabolites_to_add):
# Make sure metabolites being added belong to the same model, or
# else copy them.
if isinstance(metabolite, Metabolite):
if ((metabolite.model is not None) and
(metabolite.model is not self._model)):
metabolite = metabolite.copy()
met_id = str(metabolite)
# If a metabolite already exists in the reaction then
# just add them.
if met_id in _id_to_metabolites:
reaction_metabolite = _id_to_metabolites[met_id]
if combine:
self._metabolites[reaction_metabolite] += coefficient
else:
self._metabolites[reaction_metabolite] = coefficient
else:
# If the reaction is in a model, ensure we aren't using
# a duplicate metabolite.
if self._model:
try:
metabolite = \
self._model.metabolites.get_by_id(met_id)
except KeyError as e:
if isinstance(metabolite, Metabolite):
new_metabolites.append(metabolite)
else:
# do we want to handle creation here?
raise e
elif isinstance(metabolite, string_types):
# if we want to handle creation, this should be changed
raise ValueError("Reaction '%s' does not belong to a "
"model. Either add the reaction to a "
"model or use Metabolite objects instead "
"of strings as keys."
% self.id)
self._metabolites[metabolite] = coefficient
# make the metabolite aware that it is involved in this
# reaction
metabolite._reaction.add(self)
# from cameo ...
model = self.model
if model is not None:
model.add_metabolites(new_metabolites)
for metabolite, coefficient in self._metabolites.items():
model.constraints[
metabolite.id].set_linear_coefficients(
{self.forward_variable: coefficient,
self.reverse_variable: -coefficient
})
for metabolite, the_coefficient in list(self._metabolites.items()):
if the_coefficient == 0:
# make the metabolite aware that it no longer participates
# in this reaction
metabolite._reaction.remove(self)
self._metabolites.pop(metabolite)
context = get_context(self)
if context and reversibly:
if combine:
# Just subtract the metabolites that were added
context(partial(
self.subtract_metabolites, metabolites_to_add,
combine=True, reversibly=False))
else:
# Reset them with add_metabolites
mets_to_reset = {
key: old_coefficients[model.metabolites.get_by_any(key)[0]]
for key in iterkeys(metabolites_to_add)}
context(partial(
self.add_metabolites, mets_to_reset,
combine=False, reversibly=False)) |
def subtract_metabolites(self, metabolites, combine=True, reversibly=True):
"""Subtract metabolites from a reaction.
That means add the metabolites with -1*coefficient. If the final
coefficient for a metabolite is 0 then the metabolite is removed from
the reaction.
Notes
-----
* A final coefficient < 0 implies a reactant.
* The change is reverted upon exit when using the model as a context.
Parameters
----------
metabolites : dict
Dictionary where the keys are of class Metabolite and the values
are the coefficients. These metabolites will be added to the
reaction.
combine : bool
Describes behavior a metabolite already exists in the reaction.
True causes the coefficients to be added.
False causes the coefficient to be replaced.
reversibly : bool
Whether to add the change to the context to make the change
reversibly or not (primarily intended for internal use).
"""
self.add_metabolites({
k: -v for k, v in iteritems(metabolites)},
combine=combine, reversibly=reversibly) |
def build_reaction_string(self, use_metabolite_names=False):
"""Generate a human readable reaction string"""
def format(number):
return "" if number == 1 else str(number).rstrip(".") + " "
id_type = 'id'
if use_metabolite_names:
id_type = 'name'
reactant_bits = []
product_bits = []
for met in sorted(self._metabolites, key=attrgetter("id")):
coefficient = self._metabolites[met]
name = str(getattr(met, id_type))
if coefficient >= 0:
product_bits.append(format(coefficient) + name)
else:
reactant_bits.append(format(abs(coefficient)) + name)
reaction_string = ' + '.join(reactant_bits)
if not self.reversibility:
if self.lower_bound < 0 and self.upper_bound <= 0:
reaction_string += ' <-- '
else:
reaction_string += ' --> '
else:
reaction_string += ' <=> '
reaction_string += ' + '.join(product_bits)
return reaction_string |
def check_mass_balance(self):
"""Compute mass and charge balance for the reaction
returns a dict of {element: amount} for unbalanced elements.
"charge" is treated as an element in this dict
This should be empty for balanced reactions.
"""
reaction_element_dict = defaultdict(int)
for metabolite, coefficient in iteritems(self._metabolites):
if metabolite.charge is not None:
reaction_element_dict["charge"] += \
coefficient * metabolite.charge
if metabolite.elements is None:
raise ValueError("No elements found in metabolite %s"
% metabolite.id)
for element, amount in iteritems(metabolite.elements):
reaction_element_dict[element] += coefficient * amount
# filter out 0 values
return {k: v for k, v in iteritems(reaction_element_dict) if v != 0} |
def compartments(self):
"""lists compartments the metabolites are in"""
if self._compartments is None:
self._compartments = {met.compartment for met in self._metabolites
if met.compartment is not None}
return self._compartments |
def _associate_gene(self, cobra_gene):
"""Associates a cobra.Gene object with a cobra.Reaction.
Parameters
----------
cobra_gene : cobra.core.Gene.Gene
"""
self._genes.add(cobra_gene)
cobra_gene._reaction.add(self)
cobra_gene._model = self._model |
def _dissociate_gene(self, cobra_gene):
"""Dissociates a cobra.Gene object with a cobra.Reaction.
Parameters
----------
cobra_gene : cobra.core.Gene.Gene
"""
self._genes.discard(cobra_gene)
cobra_gene._reaction.discard(self) |
def build_reaction_from_string(self, reaction_str, verbose=True,
fwd_arrow=None, rev_arrow=None,
reversible_arrow=None, term_split="+"):
"""Builds reaction from reaction equation reaction_str using parser
Takes a string and using the specifications supplied in the optional
arguments infers a set of metabolites, metabolite compartments and
stoichiometries for the reaction. It also infers the reversibility
of the reaction from the reaction arrow.
Changes to the associated model are reverted upon exit when using
the model as a context.
Parameters
----------
reaction_str : string
a string containing a reaction formula (equation)
verbose: bool
setting verbosity of function
fwd_arrow : re.compile
for forward irreversible reaction arrows
rev_arrow : re.compile
for backward irreversible reaction arrows
reversible_arrow : re.compile
for reversible reaction arrows
term_split : string
dividing individual metabolite entries
"""
# set the arrows
forward_arrow_finder = _forward_arrow_finder if fwd_arrow is None \
else re.compile(re.escape(fwd_arrow))
reverse_arrow_finder = _reverse_arrow_finder if rev_arrow is None \
else re.compile(re.escape(rev_arrow))
reversible_arrow_finder = _reversible_arrow_finder \
if reversible_arrow is None \
else re.compile(re.escape(reversible_arrow))
if self._model is None:
warn("no model found")
model = None
else:
model = self._model
found_compartments = compartment_finder.findall(reaction_str)
if len(found_compartments) == 1:
compartment = found_compartments[0]
reaction_str = compartment_finder.sub("", reaction_str)
else:
compartment = ""
# reversible case
arrow_match = reversible_arrow_finder.search(reaction_str)
if arrow_match is not None:
self.lower_bound = -1000
self.upper_bound = 1000
else: # irreversible
# try forward
arrow_match = forward_arrow_finder.search(reaction_str)
if arrow_match is not None:
self.upper_bound = 1000
self.lower_bound = 0
else:
# must be reverse
arrow_match = reverse_arrow_finder.search(reaction_str)
if arrow_match is None:
raise ValueError("no suitable arrow found in '%s'" %
reaction_str)
else:
self.upper_bound = 0
self.lower_bound = -1000
reactant_str = reaction_str[:arrow_match.start()].strip()
product_str = reaction_str[arrow_match.end():].strip()
self.subtract_metabolites(self.metabolites, combine=True)
for substr, factor in ((reactant_str, -1), (product_str, 1)):
if len(substr) == 0:
continue
for term in substr.split(term_split):
term = term.strip()
if term.lower() == "nothing":
continue
if " " in term:
num_str, met_id = term.split()
num = float(num_str.lstrip("(").rstrip(")")) * factor
else:
met_id = term
num = factor
met_id += compartment
try:
met = model.metabolites.get_by_id(met_id)
except KeyError:
if verbose:
print("unknown metabolite '%s' created" % met_id)
met = Metabolite(met_id)
self.add_metabolites({met: num}) |
def _clip(sid, prefix):
"""Clips a prefix from the beginning of a string if it exists."""
return sid[len(prefix):] if sid.startswith(prefix) else sid |
def _f_gene(sid, prefix="G_"):
"""Clips gene prefix from id."""
sid = sid.replace(SBML_DOT, ".")
return _clip(sid, prefix) |
def read_sbml_model(filename, number=float, f_replace=F_REPLACE,
set_missing_bounds=False, **kwargs):
"""Reads SBML model from given filename.
If the given filename ends with the suffix ''.gz'' (for example,
''myfile.xml.gz'),' the file is assumed to be compressed in gzip
format and will be automatically decompressed upon reading. Similarly,
if the given filename ends with ''.zip'' or ''.bz2',' the file is
assumed to be compressed in zip or bzip2 format (respectively). Files
whose names lack these suffixes will be read uncompressed. Note that
if the file is in zip format but the archive contains more than one
file, only the first file in the archive will be read and the rest
ignored.
To read a gzip/zip file, libSBML needs to be configured and linked
with the zlib library at compile time. It also needs to be linked
with the bzip2 library to read files in bzip2 format. (Both of these
are the default configurations for libSBML.)
This function supports SBML with FBC-v1 and FBC-v2. FBC-v1 models
are converted to FBC-v2 models before reading.
The parser tries to fall back to information in notes dictionaries
if information is not available in the FBC packages, e.g.,
CHARGE, FORMULA on species, or GENE_ASSOCIATION, SUBSYSTEM on reactions.
Parameters
----------
filename : path to SBML file, or SBML string, or SBML file handle
SBML which is read into cobra model
number: data type of stoichiometry: {float, int}
In which data type should the stoichiometry be parsed.
f_replace : dict of replacement functions for id replacement
Dictionary of replacement functions for gene, specie, and reaction.
By default the following id changes are performed on import:
clip G_ from genes, clip M_ from species, clip R_ from reactions
If no replacements should be performed, set f_replace={}, None
set_missing_bounds : boolean flag to set missing bounds
Missing bounds are set to default bounds in configuration.
Returns
-------
cobra.core.Model
Notes
-----
Provided file handles cannot be opened in binary mode, i.e., use
with open(path, "r" as f):
read_sbml_model(f)
File handles to compressed files are not supported yet.
"""
try:
doc = _get_doc_from_filename(filename)
return _sbml_to_model(doc,
number=number,
f_replace=f_replace,
set_missing_bounds=set_missing_bounds,
**kwargs)
except IOError as e:
raise e
except Exception:
LOGGER.error(traceback.print_exc())
raise CobraSBMLError(
"Something went wrong reading the SBML model. Most likely the SBML"
" model is not valid. Please check that your model is valid using "
"the `cobra.io.sbml.validate_sbml_model` function or via the "
"online validator at http://sbml.org/validator .\n"
"\t`(model, errors) = validate_sbml_model(filename)`"
"\nIf the model is valid and cannot be read please open an issue "
"at https://github.com/opencobra/cobrapy/issues .") |
def _get_doc_from_filename(filename):
"""Get SBMLDocument from given filename.
Parameters
----------
filename : path to SBML, or SBML string, or filehandle
Returns
-------
libsbml.SBMLDocument
"""
if isinstance(filename, string_types):
if ("win" in platform) and (len(filename) < 260) \
and os.path.exists(filename):
# path (win)
doc = libsbml.readSBMLFromFile(filename) # noqa: E501 type: libsbml.SBMLDocument
elif ("win" not in platform) and os.path.exists(filename):
# path other
doc = libsbml.readSBMLFromFile(filename) # noqa: E501 type: libsbml.SBMLDocument
else:
# string representation
if "<sbml" not in filename:
raise IOError("The file with 'filename' does not exist, "
"or is not an SBML string. Provide the path to "
"an existing SBML file or a valid SBML string "
"representation: \n%s", filename)
doc = libsbml.readSBMLFromString(filename) # noqa: E501 type: libsbml.SBMLDocument
elif hasattr(filename, "read"):
# file handle
doc = libsbml.readSBMLFromString(filename.read()) # noqa: E501 type: libsbml.SBMLDocument
else:
raise CobraSBMLError("Input type '%s' for 'filename' is not supported."
" Provide a path, SBML str, "
"or file handle.", type(filename))
return doc |
def _sbml_to_model(doc, number=float, f_replace=F_REPLACE,
set_missing_bounds=False, **kwargs):
"""Creates cobra model from SBMLDocument.
Parameters
----------
doc: libsbml.SBMLDocument
number: data type of stoichiometry: {float, int}
In which data type should the stoichiometry be parsed.
f_replace : dict of replacement functions for id replacement
set_missing_bounds : flag to set missing bounds
Returns
-------
cobra.core.Model
"""
if f_replace is None:
f_replace = {}
# SBML model
model = doc.getModel() # type: libsbml.Model
if model is None:
raise CobraSBMLError("No SBML model detected in file.")
model_fbc = model.getPlugin("fbc") # type: libsbml.FbcModelPlugin
if not model_fbc:
LOGGER.warning("Model does not contain SBML fbc package information.")
else:
if not model_fbc.isSetStrict():
LOGGER.warning('Loading SBML model without fbc:strict="true"')
# fbc-v1 (legacy)
doc_fbc = doc.getPlugin("fbc") # type: libsbml.FbcSBMLDocumentPlugin
fbc_version = doc_fbc.getPackageVersion()
if fbc_version == 1:
LOGGER.warning("Loading SBML with fbc-v1 (models should be encoded"
" using fbc-v2)")
conversion_properties = libsbml.ConversionProperties()
conversion_properties.addOption("convert fbc v1 to fbc v2", True,
"Convert FBC-v1 model to FBC-v2")
result = doc.convert(conversion_properties)
if result != libsbml.LIBSBML_OPERATION_SUCCESS:
raise Exception("Conversion of SBML fbc v1 to fbc v2 failed")
# Model
cobra_model = Model(model.getId())
cobra_model.name = model.getName()
# meta information
meta = {
"model.id": model.getId(),
"level": model.getLevel(),
"version": model.getVersion(),
"packages": []
}
# History
creators = []
created = None
if model.isSetModelHistory():
history = model.getModelHistory() # type: libsbml.ModelHistory
if history.isSetCreatedDate():
created = history.getCreatedDate()
for c in history.getListCreators(): # type: libsbml.ModelCreator
creators.append({
"familyName": c.getFamilyName() if c.isSetFamilyName() else None, # noqa: E501
"givenName": c.getGivenName() if c.isSetGivenName() else None,
"organisation": c.getOrganisation() if c.isSetOrganisation() else None, # noqa: E501
"email": c.getEmail() if c.isSetEmail() else None,
})
meta["creators"] = creators
meta["created"] = created
meta["notes"] = _parse_notes_dict(doc)
meta["annotation"] = _parse_annotations(doc)
info = "<{}> SBML L{}V{}".format(model.getId(),
model.getLevel(), model.getVersion())
packages = {}
for k in range(doc.getNumPlugins()):
plugin = doc.getPlugin(k) # type:libsbml.SBasePlugin
key, value = plugin.getPackageName(), plugin.getPackageVersion()
packages[key] = value
info += ", {}-v{}".format(key, value)
if key not in ["fbc", "groups"]:
LOGGER.warning("SBML package '%s' not supported by cobrapy,"
"information is not parsed", key)
meta["info"] = info
meta["packages"] = packages
cobra_model._sbml = meta
# notes and annotations
cobra_model.notes = _parse_notes_dict(model)
cobra_model.annotation = _parse_annotations(model)
# Compartments
# FIXME: update with new compartments
cobra_model.compartments = {c.getId(): c.getName()
for c in model.getListOfCompartments()}
# Species
metabolites = []
boundary_metabolites = []
if model.getNumSpecies() == 0:
LOGGER.warning("No metabolites in model")
for specie in model.getListOfSpecies(): # type: libsbml.Species
sid = _check_required(specie, specie.getId(), "id")
if f_replace and F_SPECIE in f_replace:
sid = f_replace[F_SPECIE](sid)
met = Metabolite(sid)
met.name = specie.getName()
met.notes = _parse_notes_dict(specie)
met.annotation = _parse_annotations(specie)
met.compartment = specie.getCompartment()
specie_fbc = specie.getPlugin("fbc") # type: libsbml.FbcSpeciesPlugin
if specie_fbc:
met.charge = specie_fbc.getCharge()
met.formula = specie_fbc.getChemicalFormula()
else:
if specie.isSetCharge():
LOGGER.warning("Use of the species charge attribute is "
"discouraged, use fbc:charge "
"instead: %s", specie)
met.charge = specie.getCharge()
else:
if 'CHARGE' in met.notes:
LOGGER.warning("Use of CHARGE in the notes element is "
"discouraged, use fbc:charge "
"instead: %s", specie)
try:
met.charge = int(met.notes['CHARGE'])
except ValueError:
# handle nan, na, NA, ...
pass
if 'FORMULA' in met.notes:
LOGGER.warning("Use of FORMULA in the notes element is "
"discouraged, use fbc:chemicalFormula "
"instead: %s", specie)
met.formula = met.notes['FORMULA']
# Detect boundary metabolites
if specie.getBoundaryCondition() is True:
boundary_metabolites.append(met)
metabolites.append(met)
cobra_model.add_metabolites(metabolites)
# Add exchange reactions for boundary metabolites
ex_reactions = []
for met in boundary_metabolites:
ex_rid = "EX_{}".format(met.id)
ex_reaction = Reaction(ex_rid)
ex_reaction.name = ex_rid
ex_reaction.annotation = {
'sbo': SBO_EXCHANGE_REACTION
}
ex_reaction.lower_bound = -float("Inf")
ex_reaction.upper_bound = float("Inf")
LOGGER.warning("Adding exchange reaction %s for boundary metabolite: "
"%s" % (ex_reaction.id, met.id))
# species is reactant
ex_reaction.add_metabolites({met: -1})
ex_reactions.append(ex_reaction)
cobra_model.add_reactions(ex_reactions)
# Genes
if model_fbc:
for gp in model_fbc.getListOfGeneProducts(): # noqa: E501 type: libsbml.GeneProduct
gid = gp.getId()
if f_replace and F_GENE in f_replace:
gid = f_replace[F_GENE](gid)
cobra_gene = Gene(gid)
cobra_gene.name = gp.getName()
if cobra_gene.name is None:
cobra_gene.name = gid
cobra_gene.annotation = _parse_annotations(gp)
cobra_gene.notes = _parse_notes_dict(gp)
cobra_model.genes.append(cobra_gene)
else:
for cobra_reaction in model.getListOfReactions(): # noqa: E501 type: libsbml.Reaction
# fallback to notes information
notes = _parse_notes_dict(cobra_reaction)
if "GENE ASSOCIATION" in notes:
gpr = notes['GENE ASSOCIATION']
elif "GENE_ASSOCIATION" in notes:
gpr = notes['GENE_ASSOCIATION']
else:
gpr = ''
if len(gpr) > 0:
gpr = gpr.replace("(", ";")
gpr = gpr.replace(")", ";")
gpr = gpr.replace("or", ";")
gpr = gpr.replace("and", ";")
gids = [t.strip() for t in gpr.split(';')]
# create missing genes
for gid in gids:
if f_replace and F_GENE in f_replace:
gid = f_replace[F_GENE](gid)
if gid not in cobra_model.genes:
cobra_gene = Gene(gid)
cobra_gene.name = gid
cobra_model.genes.append(cobra_gene)
# GPR rules
def process_association(ass):
""" Recursively convert gpr association to a gpr string.
Defined as inline functions to not pass the replacement dict around.
"""
if ass.isFbcOr():
return " ".join(
["(", ' or '.join(process_association(c)
for c in ass.getListOfAssociations()), ")"]
)
elif ass.isFbcAnd():
return " ".join(
["(", ' and '.join(process_association(c)
for c in ass.getListOfAssociations()), ")"])
elif ass.isGeneProductRef():
gid = ass.getGeneProduct()
if f_replace and F_GENE in f_replace:
return f_replace[F_GENE](gid)
else:
return gid
# Reactions
missing_bounds = False
reactions = []
if model.getNumReactions() == 0:
LOGGER.warning("No reactions in model")
for reaction in model.getListOfReactions(): # type: libsbml.Reaction
rid = _check_required(reaction, reaction.getId(), "id")
if f_replace and F_REACTION in f_replace:
rid = f_replace[F_REACTION](rid)
cobra_reaction = Reaction(rid)
cobra_reaction.name = reaction.getName()
cobra_reaction.annotation = _parse_annotations(reaction)
cobra_reaction.notes = _parse_notes_dict(reaction)
# set bounds
p_ub, p_lb = None, None
r_fbc = reaction.getPlugin("fbc") # type: libsbml.FbcReactionPlugin
if r_fbc:
# bounds in fbc
lb_id = r_fbc.getLowerFluxBound()
if lb_id:
p_lb = model.getParameter(lb_id) # type: libsbml.Parameter
if p_lb and p_lb.getConstant() and \
(p_lb.getValue() is not None):
cobra_reaction.lower_bound = p_lb.getValue()
else:
raise CobraSBMLError("No constant bound '%s' for "
"reaction: %s" % (p_lb, reaction))
ub_id = r_fbc.getUpperFluxBound()
if ub_id:
p_ub = model.getParameter(ub_id) # type: libsbml.Parameter
if p_ub and p_ub.getConstant() and \
(p_ub.getValue() is not None):
cobra_reaction.upper_bound = p_ub.getValue()
else:
raise CobraSBMLError("No constant bound '%s' for "
"reaction: %s" % (p_ub, reaction))
elif reaction.isSetKineticLaw():
# some legacy models encode bounds in kinetic laws
klaw = reaction.getKineticLaw() # type: libsbml.KineticLaw
p_lb = klaw.getParameter("LOWER_BOUND") # noqa: E501 type: libsbml.LocalParameter
if p_lb:
cobra_reaction.lower_bound = p_lb.getValue()
p_ub = klaw.getParameter("UPPER_BOUND") # noqa: E501 type: libsbml.LocalParameter
if p_ub:
cobra_reaction.upper_bound = p_ub.getValue()
if p_ub is not None or p_lb is not None:
LOGGER.warning("Encoding LOWER_BOUND and UPPER_BOUND in "
"KineticLaw is discouraged, "
"use fbc:fluxBounds instead: %s", reaction)
if p_lb is None:
missing_bounds = True
if set_missing_bounds:
lower_bound = config.lower_bound
else:
lower_bound = -float("Inf")
cobra_reaction.lower_bound = lower_bound
LOGGER.warning("Missing lower flux bound set to '%s' for "
" reaction: '%s'", lower_bound, reaction)
if p_ub is None:
missing_bounds = True
if set_missing_bounds:
upper_bound = config.upper_bound
else:
upper_bound = float("Inf")
cobra_reaction.upper_bound = upper_bound
LOGGER.warning("Missing upper flux bound set to '%s' for "
" reaction: '%s'", upper_bound, reaction)
# add reaction
reactions.append(cobra_reaction)
# parse equation
stoichiometry = defaultdict(lambda: 0)
for sref in reaction.getListOfReactants(): # noqa: E501 type: libsbml.SpeciesReference
sid = sref.getSpecies()
if f_replace and F_SPECIE in f_replace:
sid = f_replace[F_SPECIE](sid)
stoichiometry[sid] -= number(
_check_required(sref, sref.getStoichiometry(),
"stoichiometry"))
for sref in reaction.getListOfProducts(): # noqa: E501 type: libsbml.SpeciesReference
sid = sref.getSpecies()
if f_replace and F_SPECIE in f_replace:
sid = f_replace[F_SPECIE](sid)
stoichiometry[sid] += number(
_check_required(sref, sref.getStoichiometry(),
"stoichiometry"))
# convert to metabolite objects
object_stoichiometry = {}
for met_id in stoichiometry:
metabolite = cobra_model.metabolites.get_by_id(met_id)
object_stoichiometry[metabolite] = stoichiometry[met_id]
cobra_reaction.add_metabolites(object_stoichiometry)
# GPR
if r_fbc:
gpr = ''
gpa = r_fbc.getGeneProductAssociation() # noqa: E501 type: libsbml.GeneProductAssociation
if gpa is not None:
association = gpa.getAssociation() # noqa: E501 type: libsbml.FbcAssociation
gpr = process_association(association)
else:
# fallback to notes information
notes = cobra_reaction.notes
if "GENE ASSOCIATION" in notes:
gpr = notes['GENE ASSOCIATION']
elif "GENE_ASSOCIATION" in notes:
gpr = notes['GENE_ASSOCIATION']
else:
gpr = ''
if len(gpr) > 0:
LOGGER.warning("Use of GENE ASSOCIATION or GENE_ASSOCIATION "
"in the notes element is discouraged, use "
"fbc:gpr instead: %s", reaction)
if f_replace and F_GENE in f_replace:
gpr = " ".join(
f_replace[F_GENE](t) for t in gpr.split(' ')
)
# remove outside parenthesis, if any
if gpr.startswith("(") and gpr.endswith(")"):
gpr = gpr[1:-1].strip()
cobra_reaction.gene_reaction_rule = gpr
cobra_model.add_reactions(reactions)
# Objective
obj_direction = "max"
coefficients = {}
if model_fbc:
obj_list = model_fbc.getListOfObjectives() # noqa: E501 type: libsbml.ListOfObjectives
if obj_list is None:
LOGGER.warning("listOfObjectives element not found")
elif obj_list.size() == 0:
LOGGER.warning("No objective in listOfObjectives")
elif not obj_list.getActiveObjective():
LOGGER.warning("No active objective in listOfObjectives")
else:
obj_id = obj_list.getActiveObjective()
obj = model_fbc.getObjective(obj_id) # type: libsbml.Objective
obj_direction = LONG_SHORT_DIRECTION[obj.getType()]
for flux_obj in obj.getListOfFluxObjectives(): # noqa: E501 type: libsbml.FluxObjective
rid = flux_obj.getReaction()
if f_replace and F_REACTION in f_replace:
rid = f_replace[F_REACTION](rid)
try:
objective_reaction = cobra_model.reactions.get_by_id(rid)
except KeyError:
raise CobraSBMLError("Objective reaction '%s' "
"not found" % rid)
try:
coefficients[objective_reaction] = number(
flux_obj.getCoefficient()
)
except ValueError as e:
LOGGER.warning(str(e))
else:
# some legacy models encode objective coefficients in kinetic laws
for reaction in model.getListOfReactions(): # noqa: E501 type: libsbml.Reaction
if reaction.isSetKineticLaw():
klaw = reaction.getKineticLaw() # type: libsbml.KineticLaw
p_oc = klaw.getParameter(
"OBJECTIVE_COEFFICIENT") # noqa: E501 type: libsbml.LocalParameter
if p_oc:
rid = reaction.getId()
if f_replace and F_REACTION in f_replace:
rid = f_replace[F_REACTION](rid)
try:
objective_reaction = cobra_model.reactions.get_by_id(
rid)
except KeyError:
raise CobraSBMLError("Objective reaction '%s' "
"not found", rid)
try:
coefficients[objective_reaction] = number(
p_oc.getValue())
except ValueError as e:
LOGGER.warning(str(e))
LOGGER.warning("Encoding OBJECTIVE_COEFFICIENT in "
"KineticLaw is discouraged, "
"use fbc:fluxObjective "
"instead: %s", cobra_reaction)
if len(coefficients) == 0:
LOGGER.error("No objective coefficients in model. Unclear what should "
"be optimized")
set_objective(cobra_model, coefficients)
cobra_model.solver.objective.direction = obj_direction
# parse groups
model_groups = model.getPlugin("groups") # type: libsbml.GroupsModelPlugin
groups = []
if model_groups:
# calculate hashmaps to lookup objects in O(1)
sid_map = {}
metaid_map = {}
for obj_list in [model.getListOfCompartments(),
model.getListOfSpecies(),
model.getListOfReactions(),
model_groups.getListOfGroups()]:
for sbase in obj_list: # type: libsbml.SBase
if sbase.isSetId():
sid_map[sbase.getId()] = sbase
if sbase.isSetMetaId():
metaid_map[sbase.getMetaId()] = sbase
# create groups
for group in model_groups.getListOfGroups(): # type: libsbml.Group
cobra_group = Group(group.getId())
cobra_group.name = group.getName()
if group.isSetKind():
cobra_group.kind = group.getKindAsString()
cobra_group.annotation = _parse_annotations(group)
cobra_group.notes = _parse_notes_dict(group)
cobra_members = []
for member in group.getListOfMembers(): # type: libsbml.Member
if member.isSetIdRef():
obj = sid_map[member.getIdRef()]
# obj = doc.getElementBySId(member.getIdRef())
elif member.isSetMetaIdRef():
obj = metaid_map[member.getMetaIdRef()]
# obj = doc.getElementByMetaId(member.getMetaIdRef())
typecode = obj.getTypeCode()
obj_id = obj.getId()
# id replacements
cobra_member = None
if typecode == libsbml.SBML_SPECIES:
if f_replace and F_SPECIE in f_replace:
obj_id = f_replace[F_SPECIE](obj_id)
cobra_member = cobra_model.metabolites.get_by_id(obj_id)
elif typecode == libsbml.SBML_REACTION:
if f_replace and F_REACTION in f_replace:
obj_id = f_replace[F_REACTION](obj_id)
cobra_member = cobra_model.reactions.get_by_id(obj_id)
elif typecode == libsbml.SBML_FBC_GENEPRODUCT:
if f_replace and F_GENE in f_replace:
obj_id = f_replace[F_GENE](obj_id)
cobra_member = cobra_model.genes.get_by_id(obj_id)
else:
LOGGER.warning("Member %s could not be added to group %s."
"unsupported type code: "
"%s" % (member, group, typecode))
if cobra_member:
cobra_members.append(cobra_member)
cobra_group.add_members(cobra_members)
groups.append(cobra_group)
else:
# parse deprecated subsystems on reactions
groups_dict = {}
for cobra_reaction in cobra_model.reactions:
if "SUBSYSTEM" in cobra_reaction.notes:
g_name = cobra_reaction.notes["SUBSYSTEM"]
if g_name in groups_dict:
groups_dict[g_name].append(cobra_reaction)
else:
groups_dict[g_name] = [cobra_reaction]
for gid, cobra_members in groups_dict.items():
cobra_group = Group(gid, name=gid, kind="collection")
cobra_group.add_members(cobra_members)
groups.append(cobra_group)
cobra_model.add_groups(groups)
# general hint for missing flux bounds
if missing_bounds and not set_missing_bounds:
LOGGER.warning("Missing flux bounds on reactions. As best practise "
"and to avoid confusion flux bounds should be set "
"explicitly on all reactions. "
"To set the missing flux bounds to default bounds "
"specified in cobra.Configuration use the flag "
"`read_sbml_model(..., set_missing_bounds=True)`.")
return cobra_model |
def write_sbml_model(cobra_model, filename, f_replace=F_REPLACE, **kwargs):
"""Writes cobra model to filename.
The created model is SBML level 3 version 1 (L1V3) with
fbc package v2 (fbc-v2).
If the given filename ends with the suffix ".gz" (for example,
"myfile.xml.gz"), libSBML assumes the caller wants the file to be
written compressed in gzip format. Similarly, if the given filename
ends with ".zip" or ".bz2", libSBML assumes the caller wants the
file to be compressed in zip or bzip2 format (respectively). Files
whose names lack these suffixes will be written uncompressed. Special
considerations for the zip format: If the given filename ends with
".zip", the file placed in the zip archive will have the suffix
".xml" or ".sbml". For example, the file in the zip archive will
be named "test.xml" if the given filename is "test.xml.zip" or
"test.zip". Similarly, the filename in the archive will be
"test.sbml" if the given filename is "test.sbml.zip".
Parameters
----------
cobra_model : cobra.core.Model
Model instance which is written to SBML
filename : string
path to which the model is written
use_fbc_package : boolean {True, False}
should the fbc package be used
f_replace: dict of replacement functions for id replacement
"""
doc = _model_to_sbml(cobra_model, f_replace=f_replace, **kwargs)
if isinstance(filename, string_types):
# write to path
libsbml.writeSBMLToFile(doc, filename)
elif hasattr(filename, "write"):
# write to file handle
sbml_str = libsbml.writeSBMLToString(doc)
filename.write(sbml_str) |
def _model_to_sbml(cobra_model, f_replace=None, units=True):
"""Convert Cobra model to SBMLDocument.
Parameters
----------
cobra_model : cobra.core.Model
Cobra model instance
f_replace : dict of replacement functions
Replacement to apply on identifiers.
units : boolean
Should the FLUX_UNITS be written in the SBMLDocument.
Returns
-------
libsbml.SBMLDocument
"""
if f_replace is None:
f_replace = {}
sbml_ns = libsbml.SBMLNamespaces(3, 1) # SBML L3V1
sbml_ns.addPackageNamespace("fbc", 2) # fbc-v2
doc = libsbml.SBMLDocument(sbml_ns) # noqa: E501 type: libsbml.SBMLDocument
doc.setPackageRequired("fbc", False)
doc.setSBOTerm(SBO_FBA_FRAMEWORK)
model = doc.createModel() # type: libsbml.Model
model_fbc = model.getPlugin("fbc") # type: libsbml.FbcModelPlugin
model_fbc.setStrict(True)
if cobra_model.id is not None:
model.setId(cobra_model.id)
model.setMetaId("meta_" + cobra_model.id)
else:
model.setMetaId("meta_model")
if cobra_model.name is not None:
model.setName(cobra_model.name)
_sbase_annotations(model, cobra_model.annotation)
# Meta information (ModelHistory)
if hasattr(cobra_model, "_sbml"):
meta = cobra_model._sbml
if "annotation" in meta:
_sbase_annotations(doc, meta["annotation"])
if "notes" in meta:
_sbase_notes_dict(doc, meta["notes"])
history = libsbml.ModelHistory() # type: libsbml.ModelHistory
if "created" in meta and meta["created"]:
history.setCreatedDate(meta["created"])
else:
time = datetime.datetime.now()
timestr = time.strftime('%Y-%m-%dT%H:%M:%S')
date = libsbml.Date(timestr)
_check(history.setCreatedDate(date), 'set creation date')
_check(history.setModifiedDate(date), 'set modified date')
if "creators" in meta:
for cobra_creator in meta["creators"]:
creator = libsbml.ModelCreator() # noqa: E501 type: libsbml.ModelCreator
if cobra_creator.get("familyName", None):
creator.setFamilyName(cobra_creator["familyName"])
if cobra_creator.get("givenName", None):
creator.setGivenName(cobra_creator["givenName"])
if cobra_creator.get("organisation", None):
creator.setOrganisation(cobra_creator["organisation"])
if cobra_creator.get("email", None):
creator.setEmail(cobra_creator["email"])
_check(history.addCreator(creator),
"adding creator to ModelHistory.")
_check(model.setModelHistory(history), 'set model history')
# Units
if units:
flux_udef = model.createUnitDefinition() # noqa: E501 type: libsbml.UnitDefinition
flux_udef.setId(UNITS_FLUX[0])
for u in UNITS_FLUX[1]:
unit = flux_udef.createUnit() # type: libsbml.Unit
unit.setKind(u.kind)
unit.setExponent(u.exponent)
unit.setScale(u.scale)
unit.setMultiplier(u.multiplier)
# minimum and maximum value from model
if len(cobra_model.reactions) > 0:
min_value = min(cobra_model.reactions.list_attr("lower_bound"))
max_value = max(cobra_model.reactions.list_attr("upper_bound"))
else:
min_value = config.lower_bound
max_value = config.upper_bound
_create_parameter(model, pid=LOWER_BOUND_ID,
value=min_value, sbo=SBO_DEFAULT_FLUX_BOUND)
_create_parameter(model, pid=UPPER_BOUND_ID,
value=max_value, sbo=SBO_DEFAULT_FLUX_BOUND)
_create_parameter(model, pid=ZERO_BOUND_ID,
value=0, sbo=SBO_DEFAULT_FLUX_BOUND)
_create_parameter(model, pid=BOUND_MINUS_INF,
value=-float("Inf"), sbo=SBO_FLUX_BOUND)
_create_parameter(model, pid=BOUND_PLUS_INF,
value=float("Inf"), sbo=SBO_FLUX_BOUND)
# Compartments
# FIXME: use first class compartment model (and write notes & annotations)
# (https://github.com/opencobra/cobrapy/issues/811)
for cid, name in iteritems(cobra_model.compartments):
compartment = model.createCompartment() # type: libsbml.Compartment
compartment.setId(cid)
compartment.setName(name)
compartment.setConstant(True)
# FIXME: write annotations and notes
# _sbase_notes(c, com.notes)
# _sbase_annotations(c, com.annotation)
# Species
for metabolite in cobra_model.metabolites:
specie = model.createSpecies() # type: libsbml.Species
mid = metabolite.id
if f_replace and F_SPECIE_REV in f_replace:
mid = f_replace[F_SPECIE_REV](mid)
specie.setId(mid)
specie.setConstant(False)
specie.setBoundaryCondition(False)
specie.setHasOnlySubstanceUnits(False)
specie.setName(metabolite.name)
specie.setCompartment(metabolite.compartment)
s_fbc = specie.getPlugin("fbc") # type: libsbml.FbcSpeciesPlugin
if metabolite.charge is not None:
s_fbc.setCharge(metabolite.charge)
if metabolite.formula is not None:
s_fbc.setChemicalFormula(metabolite.formula)
_sbase_annotations(specie, metabolite.annotation)
_sbase_notes_dict(specie, metabolite.notes)
# Genes
for cobra_gene in cobra_model.genes:
gp = model_fbc.createGeneProduct() # type: libsbml.GeneProduct
gid = cobra_gene.id
if f_replace and F_GENE_REV in f_replace:
gid = f_replace[F_GENE_REV](gid)
gp.setId(gid)
gname = cobra_gene.name
if gname is None or len(gname) == 0:
gname = gid
gp.setName(gname)
gp.setLabel(gid)
_sbase_annotations(gp, cobra_gene.annotation)
_sbase_notes_dict(gp, cobra_gene.notes)
# Objective
objective = model_fbc.createObjective() # type: libsbml.Objective
objective.setId("obj")
objective.setType(SHORT_LONG_DIRECTION[cobra_model.objective.direction])
model_fbc.setActiveObjectiveId("obj")
# Reactions
reaction_coefficients = linear_reaction_coefficients(cobra_model)
for cobra_reaction in cobra_model.reactions:
rid = cobra_reaction.id
if f_replace and F_REACTION_REV in f_replace:
rid = f_replace[F_REACTION_REV](rid)
reaction = model.createReaction() # type: libsbml.Reaction
reaction.setId(rid)
reaction.setName(cobra_reaction.name)
reaction.setFast(False)
reaction.setReversible((cobra_reaction.lower_bound < 0))
_sbase_annotations(reaction, cobra_reaction.annotation)
_sbase_notes_dict(reaction, cobra_reaction.notes)
# stoichiometry
for metabolite, stoichiometry in iteritems(cobra_reaction._metabolites): # noqa: E501
sid = metabolite.id
if f_replace and F_SPECIE_REV in f_replace:
sid = f_replace[F_SPECIE_REV](sid)
if stoichiometry < 0:
sref = reaction.createReactant() # noqa: E501 type: libsbml.SpeciesReference
sref.setSpecies(sid)
sref.setStoichiometry(-stoichiometry)
sref.setConstant(True)
else:
sref = reaction.createProduct() # noqa: E501 type: libsbml.SpeciesReference
sref.setSpecies(sid)
sref.setStoichiometry(stoichiometry)
sref.setConstant(True)
# bounds
r_fbc = reaction.getPlugin("fbc") # type: libsbml.FbcReactionPlugin
r_fbc.setLowerFluxBound(_create_bound(model, cobra_reaction,
"lower_bound",
f_replace=f_replace, units=units,
flux_udef=flux_udef))
r_fbc.setUpperFluxBound(_create_bound(model, cobra_reaction,
"upper_bound",
f_replace=f_replace, units=units,
flux_udef=flux_udef))
# GPR
gpr = cobra_reaction.gene_reaction_rule
if gpr is not None and len(gpr) > 0:
# replace ids in string
if f_replace and F_GENE_REV in f_replace:
gpr = gpr.replace('(', '( ')
gpr = gpr.replace(')', ' )')
tokens = gpr.split(' ')
for k in range(len(tokens)):
if tokens[k] not in [' ', 'and', 'or', '(', ')']:
tokens[k] = f_replace[F_GENE_REV](tokens[k])
gpr_new = " ".join(tokens)
gpa = r_fbc.createGeneProductAssociation() # noqa: E501 type: libsbml.GeneProductAssociation
gpa.setAssociation(gpr_new)
# objective coefficients
if reaction_coefficients.get(cobra_reaction, 0) != 0:
flux_obj = objective.createFluxObjective() # noqa: E501 type: libsbml.FluxObjective
flux_obj.setReaction(rid)
flux_obj.setCoefficient(cobra_reaction.objective_coefficient)
# write groups
if len(cobra_model.groups) > 0:
doc.enablePackage(
"http://www.sbml.org/sbml/level3/version1/groups/version1",
"groups", True)
doc.setPackageRequired("groups", False)
model_group = model.getPlugin("groups") # noqa: E501 type: libsbml.GroupsModelPlugin
for cobra_group in cobra_model.groups:
group = model_group.createGroup() # type: libsbml.Group
group.setId(cobra_group.id)
group.setName(cobra_group.name)
group.setKind(cobra_group.kind)
_sbase_notes_dict(group, cobra_group.notes)
_sbase_annotations(group, cobra_group.annotation)
for cobra_member in cobra_group.members:
member = group.createMember() # type: libsbml.Member
mid = cobra_member.id
m_type = str(type(cobra_member))
# id replacements
if "Reaction" in m_type:
if f_replace and F_REACTION_REV in f_replace:
mid = f_replace[F_REACTION_REV](mid)
if "Metabolite" in m_type:
if f_replace and F_SPECIE_REV in f_replace:
mid = f_replace[F_SPECIE_REV](mid)
if "Gene" in m_type:
if f_replace and F_GENE_REV in f_replace:
mid = f_replace[F_GENE_REV](mid)
member.setIdRef(mid)
if cobra_member.name and len(cobra_member.name) > 0:
member.setName(cobra_member.name)
return doc |
def _create_bound(model, reaction, bound_type, f_replace, units=None,
flux_udef=None):
"""Creates bound in model for given reaction.
Adds the parameters for the bounds to the SBML model.
Parameters
----------
model : libsbml.Model
SBML model instance
reaction : cobra.core.Reaction
Cobra reaction instance from which the bounds are read.
bound_type : {LOWER_BOUND, UPPER_BOUND}
Type of bound
f_replace : dict of id replacement functions
units : flux units
Returns
-------
Id of bound parameter.
"""
value = getattr(reaction, bound_type)
if value == config.lower_bound:
return LOWER_BOUND_ID
elif value == 0:
return ZERO_BOUND_ID
elif value == config.upper_bound:
return UPPER_BOUND_ID
elif value == -float("Inf"):
return BOUND_MINUS_INF
elif value == float("Inf"):
return BOUND_PLUS_INF
else:
# new parameter
rid = reaction.id
if f_replace and F_REACTION_REV in f_replace:
rid = f_replace[F_REACTION_REV](rid)
pid = rid + "_" + bound_type
_create_parameter(model, pid=pid, value=value, sbo=SBO_FLUX_BOUND,
units=units, flux_udef=flux_udef)
return pid |
def _create_parameter(model, pid, value, sbo=None, constant=True, units=None,
flux_udef=None):
"""Create parameter in SBML model."""
parameter = model.createParameter() # type: libsbml.Parameter
parameter.setId(pid)
parameter.setValue(value)
parameter.setConstant(constant)
if sbo:
parameter.setSBOTerm(sbo)
if units:
parameter.setUnits(flux_udef.getId()) |
def _check_required(sbase, value, attribute):
"""Get required attribute from SBase.
Parameters
----------
sbase : libsbml.SBase
value : existing value
attribute: name of attribute
Returns
-------
attribute value (or value if already set)
"""
if (value is None) or (value == ""):
msg = "Required attribute '%s' cannot be found or parsed in '%s'" % \
(attribute, sbase)
if hasattr(sbase, "getId") and sbase.getId():
msg += " with id '%s'" % sbase.getId()
elif hasattr(sbase, "getName") and sbase.getName():
msg += " with name '%s'" % sbase.getName()
elif hasattr(sbase, "getMetaId") and sbase.getMetaId():
msg += " with metaId '%s'" % sbase.getName()
raise CobraSBMLError(msg)
return value |
def _check(value, message):
"""
Checks the libsbml return value and logs error messages.
If 'value' is None, logs an error message constructed using
'message' and then exits with status code 1. If 'value' is an integer,
it assumes it is a libSBML return status code. If the code value is
LIBSBML_OPERATION_SUCCESS, returns without further action; if it is not,
prints an error message constructed using 'message' along with text from
libSBML explaining the meaning of the code, and exits with status code 1.
"""
if value is None:
LOGGER.error('Error: LibSBML returned a null value trying '
'to <' + message + '>.')
elif type(value) is int:
if value == libsbml.LIBSBML_OPERATION_SUCCESS:
return
else:
LOGGER.error('Error encountered trying to <' + message + '>.')
LOGGER.error('LibSBML error code {}: {}'.format(str(value),
libsbml.OperationReturnValue_toString(value).strip()))
else:
return |
def _parse_notes_dict(sbase):
""" Creates dictionary of COBRA notes.
Parameters
----------
sbase : libsbml.SBase
Returns
-------
dict of notes
"""
notes = sbase.getNotesString()
if notes and len(notes) > 0:
pattern = r"<p>\s*(\w+\s*\w*)\s*:\s*([\w|\s]+)<"
matches = re.findall(pattern, notes)
d = {k.strip(): v.strip() for (k, v) in matches}
return {k: v for k, v in d.items() if len(v) > 0}
else:
return {} |
def _sbase_notes_dict(sbase, notes):
"""Set SBase notes based on dictionary.
Parameters
----------
sbase : libsbml.SBase
SBML object to set notes on
notes : notes object
notes information from cobra object
"""
if notes and len(notes) > 0:
tokens = ['<html xmlns = "http://www.w3.org/1999/xhtml" >'] + \
["<p>{}: {}</p>".format(k, v) for (k, v) in notes.items()] + \
["</html>"]
_check(
sbase.setNotes("\n".join(tokens)),
"Setting notes on sbase: {}".format(sbase)
) |
def _parse_annotations(sbase):
"""Parses cobra annotations from a given SBase object.
Annotations are dictionaries with the providers as keys.
Parameters
----------
sbase : libsbml.SBase
SBase from which the SBML annotations are read
Returns
-------
dict (annotation dictionary)
FIXME: annotation format must be updated (this is a big collection of
fixes) - see: https://github.com/opencobra/cobrapy/issues/684)
"""
annotation = {}
# SBO term
if sbase.isSetSBOTerm():
# FIXME: correct handling of annotations
annotation["sbo"] = sbase.getSBOTermID()
# RDF annotation
cvterms = sbase.getCVTerms()
if cvterms is None:
return annotation
for cvterm in cvterms: # type: libsbml.CVTerm
for k in range(cvterm.getNumResources()):
# FIXME: read and store the qualifier
uri = cvterm.getResourceURI(k)
match = URL_IDENTIFIERS_PATTERN.match(uri)
if not match:
LOGGER.warning("%s does not conform to "
"http(s)://identifiers.org/collection/id", uri)
continue
provider, identifier = match.group(1), match.group(2)
if provider in annotation:
if isinstance(annotation[provider], string_types):
annotation[provider] = [annotation[provider]]
# FIXME: use a list
if identifier not in annotation[provider]:
annotation[provider].append(identifier)
else:
# FIXME: always in list
annotation[provider] = identifier
return annotation |
def _sbase_annotations(sbase, annotation):
"""Set SBase annotations based on cobra annotations.
Parameters
----------
sbase : libsbml.SBase
SBML object to annotate
annotation : cobra annotation structure
cobra object with annotation information
FIXME: annotation format must be updated
(https://github.com/opencobra/cobrapy/issues/684)
"""
if not annotation or len(annotation) == 0:
return
# standardize annotations
annotation_data = deepcopy(annotation)
for key, value in annotation_data.items():
# handling of non-string annotations (e.g. integers)
if isinstance(value, (float, int)):
value = str(value)
if isinstance(value, string_types):
annotation_data[key] = [("is", value)]
for key, value in annotation_data.items():
for idx, item in enumerate(value):
if isinstance(item, string_types):
value[idx] = ("is", item)
# set metaId
meta_id = "meta_{}".format(sbase.getId())
sbase.setMetaId(meta_id)
# rdf_items = []
for provider, data in iteritems(annotation_data):
# set SBOTerm
if provider in ["SBO", "sbo"]:
if provider == "SBO":
LOGGER.warning("'SBO' provider is deprecated, "
"use 'sbo' provider instead")
sbo_term = data[0][1]
_check(sbase.setSBOTerm(sbo_term),
"Setting SBOTerm: {}".format(sbo_term))
# FIXME: sbo should also be written as CVTerm
continue
for item in data:
qualifier_str, entity = item[0], item[1]
qualifier = QUALIFIER_TYPES.get(qualifier_str, None)
if qualifier is None:
qualifier = libsbml.BQB_IS
LOGGER.error("Qualifier type is not supported on "
"annotation: '{}'".format(qualifier_str))
qualifier_type = libsbml.BIOLOGICAL_QUALIFIER
if qualifier_str.startswith("bqm_"):
qualifier_type = libsbml.MODEL_QUALIFIER
cv = libsbml.CVTerm() # type: libsbml.CVTerm
cv.setQualifierType(qualifier_type)
if qualifier_type == libsbml.BIOLOGICAL_QUALIFIER:
cv.setBiologicalQualifierType(qualifier)
elif qualifier_type == libsbml.MODEL_QUALIFIER:
cv.setModelQualifierType(qualifier)
else:
raise CobraSBMLError('Unsupported qualifier: '
'%s' % qualifier)
resource = "%s/%s/%s" % (URL_IDENTIFIERS_PREFIX, provider, entity)
cv.addResource(resource)
_check(sbase.addCVTerm(cv),
"Setting cvterm: {}, resource: {}".format(cv, resource)) |
def validate_sbml_model(filename,
check_model=True,
internal_consistency=True,
check_units_consistency=False,
check_modeling_practice=False, **kwargs):
"""Validate SBML model and returns the model along with a list of errors.
Parameters
----------
filename : str
The filename (or SBML string) of the SBML model to be validated.
internal_consistency: boolean {True, False}
Check internal consistency.
check_units_consistency: boolean {True, False}
Check consistency of units.
check_modeling_practice: boolean {True, False}
Check modeling practise.
check_model: boolean {True, False}
Whether to also check some basic model properties such as reaction
boundaries and compartment formulas.
Returns
-------
(model, errors)
model : :class:`~cobra.core.Model.Model` object
The cobra model if the file could be read successfully or None
otherwise.
errors : dict
Warnings and errors grouped by their respective types.
Raises
------
CobraSBMLError
"""
# Errors and warnings are grouped based on their type. SBML_* types are
# from the libsbml validator. COBRA_* types are from the cobrapy SBML
# parser.
keys = (
"SBML_FATAL",
"SBML_ERROR",
"SBML_SCHEMA_ERROR",
"SBML_WARNING",
"COBRA_FATAL",
"COBRA_ERROR",
"COBRA_WARNING",
"COBRA_CHECK",
)
errors = {key: [] for key in keys}
# [1] libsbml validation
doc = _get_doc_from_filename(filename) # type: libsbml.SBMLDocument
# set checking of units & modeling practise
doc.setConsistencyChecks(libsbml.LIBSBML_CAT_UNITS_CONSISTENCY,
check_units_consistency)
doc.setConsistencyChecks(libsbml.LIBSBML_CAT_MODELING_PRACTICE,
check_modeling_practice)
# check internal consistency
if internal_consistency:
doc.checkInternalConsistency()
doc.checkConsistency()
for k in range(doc.getNumErrors()):
e = doc.getError(k) # type: libsbml.SBMLError
msg = _error_string(e, k=k)
sev = e.getSeverity()
if sev == libsbml.LIBSBML_SEV_FATAL:
errors["SBML_FATAL"].append(msg)
elif sev == libsbml.LIBSBML_SEV_ERROR:
errors["SBML_ERROR"].append(msg)
elif sev == libsbml.LIBSBML_SEV_SCHEMA_ERROR:
errors["SBML_SCHEMA_ERROR"].append(msg)
elif sev == libsbml.LIBSBML_SEV_WARNING:
errors["SBML_WARNING"].append(msg)
# [2] cobrapy validation (check that SBML can be read into model)
# all warnings generated while loading will be logged as errors
log_stream = StringIO()
stream_handler = logging.StreamHandler(log_stream)
formatter = logging.Formatter('%(levelname)s:%(message)s')
stream_handler.setFormatter(formatter)
stream_handler.setLevel(logging.INFO)
LOGGER.addHandler(stream_handler)
LOGGER.propagate = False
try:
# read model and allow additional parser arguments
model = _sbml_to_model(doc, **kwargs)
except CobraSBMLError as e:
errors["COBRA_ERROR"].append(str(e))
return None, errors
except Exception as e:
errors["COBRA_FATAL"].append(str(e))
return None, errors
cobra_errors = log_stream.getvalue().split("\n")
for cobra_error in cobra_errors:
tokens = cobra_error.split(":")
error_type = tokens[0]
error_msg = ":".join(tokens[1:])
if error_type == "WARNING":
errors["COBRA_WARNING"].append(error_msg)
elif error_type == "ERROR":
errors["COBRA_ERROR"].append(error_msg)
# remove stream handler
LOGGER.removeHandler(stream_handler)
LOGGER.propagate = True
# [3] additional model tests
if check_model:
errors["COBRA_CHECK"].extend(
check_metabolite_compartment_formula(model)
)
for key in ["SBML_FATAL", "SBML_ERROR", "SBML_SCHEMA_ERROR"]:
if len(errors[key]) > 0:
LOGGER.error("SBML errors in validation, check error log "
"for details.")
break
for key in ["SBML_WARNING"]:
if len(errors[key]) > 0:
LOGGER.error("SBML warnings in validation, check error log "
"for details.")
break
for key in ["COBRA_FATAL", "COBRA_ERROR"]:
if len(errors[key]) > 0:
LOGGER.error("COBRA errors in validation, check error log "
"for details.")
break
for key in ["COBRA_WARNING", "COBRA_CHECK"]:
if len(errors[key]) > 0:
LOGGER.error("COBRA warnings in validation, check error log "
"for details.")
break
return model, errors |
def _error_string(error, k=None):
"""String representation of SBMLError.
Parameters
----------
error : libsbml.SBMLError
k : index of error
Returns
-------
string representation of error
"""
package = error.getPackage()
if package == '':
package = 'core'
template = 'E{} ({}): {} ({}, L{}); {}; {}'
error_str = template.format(k, error.getSeverityAsString(),
error.getCategoryAsString(), package,
error.getLine(), error.getShortMessage(),
error.getMessage())
return error_str |
def production_envelope(model, reactions, objective=None, carbon_sources=None,
points=20, threshold=None):
"""Calculate the objective value conditioned on all combinations of
fluxes for a set of chosen reactions
The production envelope can be used to analyze a model's ability to
produce a given compound conditional on the fluxes for another set of
reactions, such as the uptake rates. The model is alternately optimized
with respect to minimizing and maximizing the objective and the
obtained fluxes are recorded. Ranges to compute production is set to the
effective
bounds, i.e., the minimum / maximum fluxes that can be obtained given
current reaction bounds.
Parameters
----------
model : cobra.Model
The model to compute the production envelope for.
reactions : list or string
A list of reactions, reaction identifiers or a single reaction.
objective : string, dict, model.solver.interface.Objective, optional
The objective (reaction) to use for the production envelope. Use the
model's current objective if left missing.
carbon_sources : list or string, optional
One or more reactions or reaction identifiers that are the source of
carbon for computing carbon (mol carbon in output over mol carbon in
input) and mass yield (gram product over gram output). Only objectives
with a carbon containing input and output metabolite is supported.
Will identify active carbon sources in the medium if none are specified.
points : int, optional
The number of points to calculate production for.
threshold : float, optional
A cut-off under which flux values will be considered to be zero
(default model.tolerance).
Returns
-------
pandas.DataFrame
A data frame with one row per evaluated point and
- reaction id : one column per input reaction indicating the flux at
each given point,
- carbon_source: identifiers of carbon exchange reactions
A column for the maximum and minimum each for the following types:
- flux: the objective flux
- carbon_yield: if carbon source is defined and the product is a
single metabolite (mol carbon product per mol carbon feeding source)
- mass_yield: if carbon source is defined and the product is a
single metabolite (gram product per 1 g of feeding source)
Examples
--------
>>> import cobra.test
>>> from cobra.flux_analysis import production_envelope
>>> model = cobra.test.create_test_model("textbook")
>>> production_envelope(model, ["EX_glc__D_e", "EX_o2_e"])
"""
reactions = model.reactions.get_by_any(reactions)
objective = model.solver.objective if objective is None else objective
data = dict()
if carbon_sources is None:
c_input = find_carbon_sources(model)
else:
c_input = model.reactions.get_by_any(carbon_sources)
if c_input is None:
data['carbon_source'] = None
elif hasattr(c_input, 'id'):
data['carbon_source'] = c_input.id
else:
data['carbon_source'] = ', '.join(rxn.id for rxn in c_input)
threshold = normalize_cutoff(model, threshold)
size = points ** len(reactions)
for direction in ('minimum', 'maximum'):
data['flux_{}'.format(direction)] = full(size, nan, dtype=float)
data['carbon_yield_{}'.format(direction)] = full(
size, nan, dtype=float)
data['mass_yield_{}'.format(direction)] = full(
size, nan, dtype=float)
grid = pd.DataFrame(data)
with model:
model.objective = objective
objective_reactions = list(sutil.linear_reaction_coefficients(model))
if len(objective_reactions) != 1:
raise ValueError('cannot calculate yields for objectives with '
'multiple reactions')
c_output = objective_reactions[0]
min_max = fva(model, reactions, fraction_of_optimum=0)
min_max[min_max.abs() < threshold] = 0.0
points = list(product(*[
linspace(min_max.at[rxn.id, "minimum"],
min_max.at[rxn.id, "maximum"],
points, endpoint=True) for rxn in reactions]))
tmp = pd.DataFrame(points, columns=[rxn.id for rxn in reactions])
grid = pd.concat([grid, tmp], axis=1, copy=False)
add_envelope(model, reactions, grid, c_input, c_output, threshold)
return grid |
def total_yield(input_fluxes, input_elements, output_flux, output_elements):
"""
Compute total output per input unit.
Units are typically mol carbon atoms or gram of source and product.
Parameters
----------
input_fluxes : list
A list of input reaction fluxes in the same order as the
``input_components``.
input_elements : list
A list of reaction components which are in turn list of numbers.
output_flux : float
The output flux value.
output_elements : list
A list of stoichiometrically weighted output reaction components.
Returns
-------
float
The ratio between output (mol carbon atoms or grams of product) and
input (mol carbon atoms or grams of source compounds).
"""
carbon_input_flux = sum(
total_components_flux(flux, components, consumption=True)
for flux, components in zip(input_fluxes, input_elements))
carbon_output_flux = total_components_flux(
output_flux, output_elements, consumption=False)
try:
return carbon_output_flux / carbon_input_flux
except ZeroDivisionError:
return nan |
def reaction_elements(reaction):
"""
Split metabolites into the atoms times their stoichiometric coefficients.
Parameters
----------
reaction : Reaction
The metabolic reaction whose components are desired.
Returns
-------
list
Each of the reaction's metabolites' desired carbon elements (if any)
times that metabolite's stoichiometric coefficient.
"""
c_elements = [coeff * met.elements.get('C', 0)
for met, coeff in iteritems(reaction.metabolites)]
return [elem for elem in c_elements if elem != 0] |
def reaction_weight(reaction):
"""Return the metabolite weight times its stoichiometric coefficient."""
if len(reaction.metabolites) != 1:
raise ValueError('Reaction weight is only defined for single '
'metabolite products or educts.')
met, coeff = next(iteritems(reaction.metabolites))
return [coeff * met.formula_weight] |
def total_components_flux(flux, components, consumption=True):
"""
Compute the total components consumption or production flux.
Parameters
----------
flux : float
The reaction flux for the components.
components : list
List of stoichiometrically weighted components.
consumption : bool, optional
Whether to sum up consumption or production fluxes.
"""
direction = 1 if consumption else -1
c_flux = [elem * flux * direction for elem in components]
return sum([flux for flux in c_flux if flux > 0]) |
def find_carbon_sources(model):
"""
Find all active carbon source reactions.
Parameters
----------
model : Model
A genome-scale metabolic model.
Returns
-------
list
The medium reactions with carbon input flux.
"""
try:
model.slim_optimize(error_value=None)
except OptimizationError:
return []
reactions = model.reactions.get_by_any(list(model.medium))
reactions_fluxes = [
(rxn, total_components_flux(rxn.flux, reaction_elements(rxn),
consumption=True)) for rxn in reactions]
return [rxn for rxn, c_flux in reactions_fluxes if c_flux > 0] |
def assess(model, reaction, flux_coefficient_cutoff=0.001, solver=None):
"""Assesses production capacity.
Assesses the capacity of the model to produce the precursors for the
reaction and absorb the production of the reaction while the reaction is
operating at, or above, the specified cutoff.
Parameters
----------
model : cobra.Model
The cobra model to assess production capacity for
reaction : reaction identifier or cobra.Reaction
The reaction to assess
flux_coefficient_cutoff : float
The minimum flux that reaction must carry to be considered active.
solver : basestring
Solver name. If None, the default solver will be used.
Returns
-------
bool or dict
True if the model can produce the precursors and absorb the products
for the reaction operating at, or above, flux_coefficient_cutoff.
Otherwise, a dictionary of {'precursor': Status, 'product': Status}.
Where Status is the results from assess_precursors and
assess_products, respectively.
"""
reaction = model.reactions.get_by_any(reaction)[0]
with model as m:
m.objective = reaction
if _optimize_or_value(m, solver=solver) >= flux_coefficient_cutoff:
return True
else:
results = dict()
results['precursors'] = assess_component(
model, reaction, 'reactants', flux_coefficient_cutoff)
results['products'] = assess_component(
model, reaction, 'products', flux_coefficient_cutoff)
return results |
def assess_component(model, reaction, side, flux_coefficient_cutoff=0.001,
solver=None):
"""Assesses the ability of the model to provide sufficient precursors,
or absorb products, for a reaction operating at, or beyond,
the specified cutoff.
Parameters
----------
model : cobra.Model
The cobra model to assess production capacity for
reaction : reaction identifier or cobra.Reaction
The reaction to assess
side : basestring
Side of the reaction, 'products' or 'reactants'
flux_coefficient_cutoff : float
The minimum flux that reaction must carry to be considered active.
solver : basestring
Solver name. If None, the default solver will be used.
Returns
-------
bool or dict
True if the precursors can be simultaneously produced at the
specified cutoff. False, if the model has the capacity to produce
each individual precursor at the specified threshold but not all
precursors at the required level simultaneously. Otherwise a
dictionary of the required and the produced fluxes for each reactant
that is not produced in sufficient quantities.
"""
reaction = model.reactions.get_by_any(reaction)[0]
result_key = dict(reactants='produced', products='capacity')[side]
get_components = attrgetter(side)
with model as m:
m.objective = reaction
if _optimize_or_value(m, solver=solver) >= flux_coefficient_cutoff:
return True
simulation_results = {}
# build the demand reactions and add all at once
demand_reactions = {}
for component in get_components(reaction):
coeff = reaction.metabolites[component]
demand = m.add_boundary(component, type='demand')
demand.metabolites[component] = coeff
demand_reactions[demand] = (component, coeff)
# First assess whether all precursors can be produced simultaneously
joint_demand = Reaction("joint_demand")
for demand_reaction in demand_reactions:
joint_demand += demand_reaction
m.add_reactions([joint_demand])
m.objective = joint_demand
if _optimize_or_value(m, solver=solver) >= flux_coefficient_cutoff:
return True
# Otherwise assess the ability of the model to produce each precursor
# individually. Now assess the ability of the model to produce each
# reactant for a reaction
for demand_reaction, (component, coeff) in iteritems(demand_reactions):
# Calculate the maximum amount of the
with m:
m.objective = demand_reaction
flux = _optimize_or_value(m, solver=solver)
# metabolite that can be produced.
if flux_coefficient_cutoff > flux:
# Scale the results to a single unit
simulation_results.update({
component: {
'required': flux_coefficient_cutoff / abs(coeff),
result_key: flux / abs(coeff)
}})
if len(simulation_results) == 0:
simulation_results = False
return simulation_results |
def assess_precursors(model, reaction, flux_coefficient_cutoff=0.001,
solver=None):
"""Assesses the ability of the model to provide sufficient precursors for
a reaction operating at, or beyond, the specified cutoff.
Deprecated: use assess_component instead
Parameters
----------
model : cobra.Model
The cobra model to assess production capacity for
reaction : reaction identifier or cobra.Reaction
The reaction to assess
flux_coefficient_cutoff : float
The minimum flux that reaction must carry to be considered active.
solver : basestring
Solver name. If None, the default solver will be used.
Returns
-------
bool or dict
True if the precursors can be simultaneously produced at the
specified cutoff. False, if the model has the capacity to produce
each individual precursor at the specified threshold but not all
precursors at the required level simultaneously. Otherwise a
dictionary of the required and the produced fluxes for each reactant
that is not produced in sufficient quantities.
"""
warn('use assess_component instead', DeprecationWarning)
return assess_component(model, reaction, 'reactants',
flux_coefficient_cutoff, solver) |
def assess_products(model, reaction, flux_coefficient_cutoff=0.001,
solver=None):
"""Assesses whether the model has the capacity to absorb the products of
a reaction at a given flux rate.
Useful for identifying which components might be blocking a reaction
from achieving a specific flux rate.
Deprecated: use assess_component instead
Parameters
----------
model : cobra.Model
The cobra model to assess production capacity for
reaction : reaction identifier or cobra.Reaction
The reaction to assess
flux_coefficient_cutoff : float
The minimum flux that reaction must carry to be considered active.
solver : basestring
Solver name. If None, the default solver will be used.
Returns
-------
bool or dict
True if the model has the capacity to absorb all the reaction
products being simultaneously given the specified cutoff. False,
if the model has the capacity to absorb each individual product but
not all products at the required level simultaneously. Otherwise a
dictionary of the required and the capacity fluxes for each product
that is not absorbed in sufficient quantities.
"""
warn('use assess_component instead', DeprecationWarning)
return assess_component(model, reaction, 'products',
flux_coefficient_cutoff, solver) |
def add_loopless(model, zero_cutoff=None):
"""Modify a model so all feasible flux distributions are loopless.
In most cases you probably want to use the much faster `loopless_solution`.
May be used in cases where you want to add complex constraints and
objecives (for instance quadratic objectives) to the model afterwards
or use an approximation of Gibbs free energy directions in you model.
Adds variables and constraints to a model which will disallow flux
distributions with loops. The used formulation is described in [1]_.
This function *will* modify your model.
Parameters
----------
model : cobra.Model
The model to which to add the constraints.
zero_cutoff : positive float, optional
Cutoff used for null space. Coefficients with an absolute value smaller
than `zero_cutoff` are considered to be zero (default model.tolerance).
Returns
-------
Nothing
References
----------
.. [1] Elimination of thermodynamically infeasible loops in steady-state
metabolic models. Schellenberger J, Lewis NE, Palsson BO. Biophys J.
2011 Feb 2;100(3):544-53. doi: 10.1016/j.bpj.2010.12.3707. Erratum
in: Biophys J. 2011 Mar 2;100(5):1381.
"""
zero_cutoff = normalize_cutoff(model, zero_cutoff)
internal = [i for i, r in enumerate(model.reactions) if not r.boundary]
s_int = create_stoichiometric_matrix(model)[:, numpy.array(internal)]
n_int = nullspace(s_int).T
max_bound = max(max(abs(b) for b in r.bounds) for r in model.reactions)
prob = model.problem
# Add indicator variables and new constraints
to_add = []
for i in internal:
rxn = model.reactions[i]
# indicator variable a_i
indicator = prob.Variable("indicator_" + rxn.id, type="binary")
# -M*(1 - a_i) <= v_i <= M*a_i
on_off_constraint = prob.Constraint(
rxn.flux_expression - max_bound * indicator,
lb=-max_bound, ub=0, name="on_off_" + rxn.id)
# -(max_bound + 1) * a_i + 1 <= G_i <= -(max_bound + 1) * a_i + 1000
delta_g = prob.Variable("delta_g_" + rxn.id)
delta_g_range = prob.Constraint(
delta_g + (max_bound + 1) * indicator,
lb=1, ub=max_bound, name="delta_g_range_" + rxn.id)
to_add.extend([indicator, on_off_constraint, delta_g, delta_g_range])
model.add_cons_vars(to_add)
# Add nullspace constraints for G_i
for i, row in enumerate(n_int):
name = "nullspace_constraint_" + str(i)
nullspace_constraint = prob.Constraint(Zero, lb=0, ub=0, name=name)
model.add_cons_vars([nullspace_constraint])
coefs = {model.variables[
"delta_g_" + model.reactions[ridx].id]: row[i]
for i, ridx in enumerate(internal) if
abs(row[i]) > zero_cutoff}
model.constraints[name].set_linear_coefficients(coefs) |
def _add_cycle_free(model, fluxes):
"""Add constraints for CycleFreeFlux."""
model.objective = model.solver.interface.Objective(
Zero, direction="min", sloppy=True)
objective_vars = []
for rxn in model.reactions:
flux = fluxes[rxn.id]
if rxn.boundary:
rxn.bounds = (flux, flux)
continue
if flux >= 0:
rxn.bounds = max(0, rxn.lower_bound), max(flux, rxn.upper_bound)
objective_vars.append(rxn.forward_variable)
else:
rxn.bounds = min(flux, rxn.lower_bound), min(0, rxn.upper_bound)
objective_vars.append(rxn.reverse_variable)
model.objective.set_linear_coefficients({v: 1.0 for v in objective_vars}) |
def loopless_solution(model, fluxes=None):
"""Convert an existing solution to a loopless one.
Removes as many loops as possible (see Notes).
Uses the method from CycleFreeFlux [1]_ and is much faster than
`add_loopless` and should therefore be the preferred option to get loopless
flux distributions.
Parameters
----------
model : cobra.Model
The model to which to add the constraints.
fluxes : dict
A dictionary {rxn_id: flux} that assigns a flux to each reaction. If
not None will use the provided flux values to obtain a close loopless
solution.
Returns
-------
cobra.Solution
A solution object containing the fluxes with the least amount of
loops possible or None if the optimization failed (usually happening
if the flux distribution in `fluxes` is infeasible).
Notes
-----
The returned flux solution has the following properties:
- it contains the minimal number of loops possible and no loops at all if
all flux bounds include zero
- it has an objective value close to the original one and the same
objective value id the objective expression can not form a cycle
(which is usually true since it consumes metabolites)
- it has the same exact exchange fluxes as the previous solution
- all fluxes have the same sign (flow in the same direction) as the
previous solution
References
----------
.. [1] CycleFreeFlux: efficient removal of thermodynamically infeasible
loops from flux distributions. Desouki AA, Jarre F, Gelius-Dietrich
G, Lercher MJ. Bioinformatics. 2015 Jul 1;31(13):2159-65. doi:
10.1093/bioinformatics/btv096.
"""
# Need to reoptimize otherwise spurious solution artifacts can cause
# all kinds of havoc
# TODO: check solution status
if fluxes is None:
sol = model.optimize(objective_sense=None)
fluxes = sol.fluxes
with model:
prob = model.problem
# Needs one fixed bound for cplex...
loopless_obj_constraint = prob.Constraint(
model.objective.expression,
lb=-1e32, name="loopless_obj_constraint")
model.add_cons_vars([loopless_obj_constraint])
_add_cycle_free(model, fluxes)
solution = model.optimize(objective_sense=None)
solution.objective_value = loopless_obj_constraint.primal
return solution |
def loopless_fva_iter(model, reaction, solution=False, zero_cutoff=None):
"""Plugin to get a loopless FVA solution from single FVA iteration.
Assumes the following about `model` and `reaction`:
1. the model objective is set to be `reaction`
2. the model has been optimized and contains the minimum/maximum flux for
`reaction`
3. the model contains an auxiliary variable called "fva_old_objective"
denoting the previous objective
Parameters
----------
model : cobra.Model
The model to be used.
reaction : cobra.Reaction
The reaction currently minimized/maximized.
solution : boolean, optional
Whether to return the entire solution or only the minimum/maximum for
`reaction`.
zero_cutoff : positive float, optional
Cutoff used for loop removal. Fluxes with an absolute value smaller
than `zero_cutoff` are considered to be zero (default model.tolerance).
Returns
-------
single float or dict
Returns the minimized/maximized flux through `reaction` if
all_fluxes == False (default). Otherwise returns a loopless flux
solution containing the minimum/maximum flux for `reaction`.
"""
zero_cutoff = normalize_cutoff(model, zero_cutoff)
current = model.objective.value
sol = get_solution(model)
objective_dir = model.objective.direction
# boundary reactions can not be part of cycles
if reaction.boundary:
if solution:
return sol
else:
return current
with model:
_add_cycle_free(model, sol.fluxes)
model.slim_optimize()
# If the previous optimum is maintained in the loopless solution it was
# loopless and we are done
if abs(reaction.flux - current) < zero_cutoff:
if solution:
return sol
return current
# If previous optimum was not in the loopless solution create a new
# almost loopless solution containing only loops including the current
# reaction. Than remove all of those loops.
ll_sol = get_solution(model).fluxes
reaction.bounds = (current, current)
model.slim_optimize()
almost_ll_sol = get_solution(model).fluxes
with model:
# find the reactions with loops using the current reaction and remove
# the loops
for rxn in model.reactions:
rid = rxn.id
if ((abs(ll_sol[rid]) < zero_cutoff) and
(abs(almost_ll_sol[rid]) > zero_cutoff)):
rxn.bounds = max(0, rxn.lower_bound), min(0, rxn.upper_bound)
if solution:
best = model.optimize()
else:
model.slim_optimize()
best = reaction.flux
model.objective.direction = objective_dir
return best |
def create_stoichiometric_matrix(model, array_type='dense', dtype=None):
"""Return a stoichiometric array representation of the given model.
The the columns represent the reactions and rows represent
metabolites. S[i,j] therefore contains the quantity of metabolite `i`
produced (negative for consumed) by reaction `j`.
Parameters
----------
model : cobra.Model
The cobra model to construct the matrix for.
array_type : string
The type of array to construct. if 'dense', return a standard
numpy.array, 'dok', or 'lil' will construct a sparse array using
scipy of the corresponding type and 'DataFrame' will give a
pandas `DataFrame` with metabolite indices and reaction columns
dtype : data-type
The desired data-type for the array. If not given, defaults to float.
Returns
-------
matrix of class `dtype`
The stoichiometric matrix for the given model.
"""
if array_type not in ('DataFrame', 'dense') and not dok_matrix:
raise ValueError('Sparse matrices require scipy')
if dtype is None:
dtype = np.float64
array_constructor = {
'dense': np.zeros, 'dok': dok_matrix, 'lil': lil_matrix,
'DataFrame': np.zeros,
}
n_metabolites = len(model.metabolites)
n_reactions = len(model.reactions)
array = array_constructor[array_type]((n_metabolites, n_reactions),
dtype=dtype)
m_ind = model.metabolites.index
r_ind = model.reactions.index
for reaction in model.reactions:
for metabolite, stoich in iteritems(reaction.metabolites):
array[m_ind(metabolite), r_ind(reaction)] = stoich
if array_type == 'DataFrame':
metabolite_ids = [met.id for met in model.metabolites]
reaction_ids = [rxn.id for rxn in model.reactions]
return pd.DataFrame(array, index=metabolite_ids, columns=reaction_ids)
else:
return array |
def nullspace(A, atol=1e-13, rtol=0):
"""Compute an approximate basis for the nullspace of A.
The algorithm used by this function is based on the singular value
decomposition of `A`.
Parameters
----------
A : numpy.ndarray
A should be at most 2-D. A 1-D array with length k will be treated
as a 2-D with shape (1, k)
atol : float
The absolute tolerance for a zero singular value. Singular values
smaller than `atol` are considered to be zero.
rtol : float
The relative tolerance. Singular values less than rtol*smax are
considered to be zero, where smax is the largest singular value.
If both `atol` and `rtol` are positive, the combined tolerance is the
maximum of the two; that is::
tol = max(atol, rtol * smax)
Singular values smaller than `tol` are considered to be zero.
Returns
-------
numpy.ndarray
If `A` is an array with shape (m, k), then `ns` will be an array
with shape (k, n), where n is the estimated dimension of the
nullspace of `A`. The columns of `ns` are a basis for the
nullspace; each element in numpy.dot(A, ns) will be approximately
zero.
Notes
-----
Taken from the numpy cookbook.
"""
A = np.atleast_2d(A)
u, s, vh = np.linalg.svd(A)
tol = max(atol, rtol * s[0])
nnz = (s >= tol).sum()
ns = vh[nnz:].conj().T
return ns |
def constraint_matrices(model, array_type='dense', include_vars=False,
zero_tol=1e-6):
"""Create a matrix representation of the problem.
This is used for alternative solution approaches that do not use optlang.
The function will construct the equality matrix, inequality matrix and
bounds for the complete problem.
Notes
-----
To accomodate non-zero equalities the problem will add the variable
"const_one" which is a variable that equals one.
Arguments
---------
model : cobra.Model
The model from which to obtain the LP problem.
array_type : string
The type of array to construct. if 'dense', return a standard
numpy.array, 'dok', or 'lil' will construct a sparse array using
scipy of the corresponding type and 'DataFrame' will give a
pandas `DataFrame` with metabolite indices and reaction columns.
zero_tol : float
The zero tolerance used to judge whether two bounds are the same.
Returns
-------
collections.namedtuple
A named tuple consisting of 6 matrices and 2 vectors:
- "equalities" is a matrix S such that S*vars = b. It includes a row
for each constraint and one column for each variable.
- "b" the right side of the equality equation such that S*vars = b.
- "inequalities" is a matrix M such that lb <= M*vars <= ub.
It contains a row for each inequality and as many columns as
variables.
- "bounds" is a compound matrix [lb ub] containing the lower and
upper bounds for the inequality constraints in M.
- "variable_fixed" is a boolean vector indicating whether the variable
at that index is fixed (lower bound == upper_bound) and
is thus bounded by an equality constraint.
- "variable_bounds" is a compound matrix [lb ub] containing the
lower and upper bounds for all variables.
"""
if array_type not in ('DataFrame', 'dense') and not dok_matrix:
raise ValueError('Sparse matrices require scipy')
array_builder = {
'dense': np.array, 'dok': dok_matrix, 'lil': lil_matrix,
'DataFrame': pd.DataFrame,
}[array_type]
Problem = namedtuple("Problem",
["equalities", "b", "inequalities", "bounds",
"variable_fixed", "variable_bounds"])
equality_rows = []
inequality_rows = []
inequality_bounds = []
b = []
for const in model.constraints:
lb = -np.inf if const.lb is None else const.lb
ub = np.inf if const.ub is None else const.ub
equality = (ub - lb) < zero_tol
coefs = const.get_linear_coefficients(model.variables)
coefs = [coefs[v] for v in model.variables]
if equality:
b.append(lb if abs(lb) > zero_tol else 0.0)
equality_rows.append(coefs)
else:
inequality_rows.append(coefs)
inequality_bounds.append([lb, ub])
var_bounds = np.array([[v.lb, v.ub] for v in model.variables])
fixed = var_bounds[:, 1] - var_bounds[:, 0] < zero_tol
results = Problem(
equalities=array_builder(equality_rows),
b=np.array(b),
inequalities=array_builder(inequality_rows),
bounds=array_builder(inequality_bounds),
variable_fixed=np.array(fixed),
variable_bounds=array_builder(var_bounds))
return results |
def room(model, solution=None, linear=False, delta=0.03, epsilon=1E-03):
"""
Compute a single solution based on regulatory on/off minimization (ROOM).
Compute a new flux distribution that minimizes the number of active
reactions needed to accommodate a previous reference solution.
Regulatory on/off minimization (ROOM) is generally used to assess the
impact of knock-outs. Thus the typical usage is to provide a wildtype flux
distribution as reference and a model in knock-out state.
Parameters
----------
model : cobra.Model
The model state to compute a ROOM-based solution for.
solution : cobra.Solution, optional
A (wildtype) reference solution.
linear : bool, optional
Whether to use the linear ROOM formulation or not (default False).
delta: float, optional
The relative tolerance range (additive) (default 0.03).
epsilon: float, optional
The absolute tolerance range (multiplicative) (default 0.001).
Returns
-------
cobra.Solution
A flux distribution with minimal active reaction changes compared to
the reference.
See Also
--------
add_room : add ROOM constraints and objective
"""
with model:
add_room(model=model, solution=solution, linear=linear, delta=delta,
epsilon=epsilon)
solution = model.optimize()
return solution |
def add_room(model, solution=None, linear=False, delta=0.03, epsilon=1E-03):
r"""
Add constraints and objective for ROOM.
This function adds variables and constraints for applying regulatory
on/off minimization (ROOM) to the model.
Parameters
----------
model : cobra.Model
The model to add ROOM constraints and objective to.
solution : cobra.Solution, optional
A previous solution to use as a reference. If no solution is given,
one will be computed using pFBA.
linear : bool, optional
Whether to use the linear ROOM formulation or not (default False).
delta: float, optional
The relative tolerance range which is additive in nature
(default 0.03).
epsilon: float, optional
The absolute range of tolerance which is multiplicative
(default 0.001).
Notes
-----
The formulation used here is the same as stated in the original paper [1]_.
The mathematical expression is given below:
minimize \sum_{i=1}^m y^i
s.t. Sv = 0
v_min <= v <= v_max
v_j = 0
j ∈ A
for 1 <= i <= m
v_i - y_i(v_{max,i} - w_i^u) <= w_i^u (1)
v_i - y_i(v_{min,i} - w_i^l) <= w_i^l (2)
y_i ∈ {0,1} (3)
w_i^u = w_i + \delta|w_i| + \epsilon
w_i^l = w_i - \delta|w_i| - \epsilon
So, for the linear version of the ROOM , constraint (3) is relaxed to
0 <= y_i <= 1.
See Also
--------
pfba : parsimonious FBA
References
----------
.. [1] Tomer Shlomi, Omer Berkman and Eytan Ruppin, "Regulatory on/off
minimization of metabolic flux changes after genetic perturbations",
PNAS 2005 102 (21) 7695-7700; doi:10.1073/pnas.0406346102
"""
if 'room_old_objective' in model.solver.variables:
raise ValueError('model is already adjusted for ROOM')
# optimizes if no reference solution is provided
if solution is None:
solution = pfba(model)
prob = model.problem
variable = prob.Variable("room_old_objective", ub=solution.objective_value)
constraint = prob.Constraint(
model.solver.objective.expression - variable,
ub=0.0,
lb=0.0,
name="room_old_objective_constraint"
)
model.objective = prob.Objective(Zero, direction="min", sloppy=True)
vars_and_cons = [variable, constraint]
obj_vars = []
for rxn in model.reactions:
flux = solution.fluxes[rxn.id]
if linear:
y = prob.Variable("y_" + rxn.id, lb=0, ub=1)
delta = epsilon = 0.0
else:
y = prob.Variable("y_" + rxn.id, type="binary")
# upper constraint
w_u = flux + (delta * abs(flux)) + epsilon
upper_const = prob.Constraint(
rxn.flux_expression - y * (rxn.upper_bound - w_u),
ub=w_u, name="room_constraint_upper_" + rxn.id)
# lower constraint
w_l = flux - (delta * abs(flux)) - epsilon
lower_const = prob.Constraint(
rxn.flux_expression - y * (rxn.lower_bound - w_l),
lb=w_l, name="room_constraint_lower_" + rxn.id)
vars_and_cons.extend([y, upper_const, lower_const])
obj_vars.append(y)
model.add_cons_vars(vars_and_cons)
model.objective.set_linear_coefficients({v: 1.0 for v in obj_vars}) |
def sample(model, n, method="optgp", thinning=100, processes=1, seed=None):
"""Sample valid flux distributions from a cobra model.
The function samples valid flux distributions from a cobra model.
Currently we support two methods:
1. 'optgp' (default) which uses the OptGPSampler that supports parallel
sampling [1]_. Requires large numbers of samples to be performant
(n < 1000). For smaller samples 'achr' might be better suited.
or
2. 'achr' which uses artificial centering hit-and-run. This is a single
process method with good convergence [2]_.
Parameters
----------
model : cobra.Model
The model from which to sample flux distributions.
n : int
The number of samples to obtain. When using 'optgp' this must be a
multiple of `processes`, otherwise a larger number of samples will be
returned.
method : str, optional
The sampling algorithm to use.
thinning : int, optional
The thinning factor of the generated sampling chain. A thinning of 10
means samples are returned every 10 steps. Defaults to 100 which in
benchmarks gives approximately uncorrelated samples. If set to one
will return all iterates.
processes : int, optional
Only used for 'optgp'. The number of processes used to generate
samples.
seed : int > 0, optional
The random number seed to be used. Initialized to current time stamp
if None.
Returns
-------
pandas.DataFrame
The generated flux samples. Each row corresponds to a sample of the
fluxes and the columns are the reactions.
Notes
-----
The samplers have a correction method to ensure equality feasibility for
long-running chains, however this will only work for homogeneous models,
meaning models with no non-zero fixed variables or constraints (
right-hand side of the equalities are zero).
References
----------
.. [1] Megchelenbrink W, Huynen M, Marchiori E (2014)
optGpSampler: An Improved Tool for Uniformly Sampling the Solution-Space
of Genome-Scale Metabolic Networks.
PLoS ONE 9(2): e86587.
.. [2] Direction Choice for Accelerated Convergence in Hit-and-Run Sampling
David E. Kaufman Robert L. Smith
Operations Research 199846:1 , 84-95
"""
if method == "optgp":
sampler = OptGPSampler(model, processes, thinning=thinning, seed=seed)
elif method == "achr":
sampler = ACHRSampler(model, thinning=thinning, seed=seed)
else:
raise ValueError("method must be 'optgp' or 'achr'!")
return pandas.DataFrame(columns=[rxn.id for rxn in model.reactions],
data=sampler.sample(n)) |
def fastcc(model, flux_threshold=1.0, zero_cutoff=None):
r"""
Check consistency of a metabolic network using FASTCC [1]_.
FASTCC (Fast Consistency Check) is an algorithm for rapid and
efficient consistency check in metabolic networks. FASTCC is
a pure LP implementation and is low on computation resource
demand. FASTCC also circumvents the problem associated with
reversible reactions for the purpose. Given a global model,
it will generate a consistent global model i.e., remove
blocked reactions. For more details on FASTCC, please
check [1]_.
Parameters
----------
model: cobra.Model
The constraint-based model to operate on.
flux_threshold: float, optional (default 1.0)
The flux threshold to consider.
zero_cutoff: float, optional
The cutoff to consider for zero flux (default model.tolerance).
Returns
-------
cobra.Model
The consistent constraint-based model.
Notes
-----
The LP used for FASTCC is like so:
maximize: \sum_{i \in J} z_i
s.t. : z_i \in [0, \varepsilon] \forall i \in J, z_i \in \mathbb{R}_+
v_i \ge z_i \forall i \in J
Sv = 0 v \in B
References
----------
.. [1] Vlassis N, Pacheco MP, Sauter T (2014)
Fast Reconstruction of Compact Context-Specific Metabolic Network
Models.
PLoS Comput Biol 10(1): e1003424. doi:10.1371/journal.pcbi.1003424
"""
zero_cutoff = normalize_cutoff(model, zero_cutoff)
with model:
obj_vars = []
vars_and_cons = []
prob = model.problem
for rxn in model.reactions:
var = prob.Variable("auxiliary_{}".format(rxn.id),
lb=0.0, ub=flux_threshold)
const = prob.Constraint(rxn.forward_variable +
rxn.reverse_variable -
var, name="constraint_{}".format(rxn.id),
lb=0.0)
vars_and_cons.extend([var, const])
obj_vars.append(var)
model.add_cons_vars(vars_and_cons)
model.objective = prob.Objective(Zero, sloppy=True, direction="max")
model.objective.set_linear_coefficients({v: 1.0 for v in obj_vars})
sol = model.optimize()
rxns_to_remove = sol.fluxes[sol.fluxes.abs() < zero_cutoff].index
consistent_model = model.copy()
consistent_model.remove_reactions(rxns_to_remove, remove_orphans=True)
return consistent_model |
def sample(self, n, fluxes=True):
"""Generate a set of samples.
This is the basic sampling function for all hit-and-run samplers.
Parameters
----------
n : int
The number of samples that are generated at once.
fluxes : boolean
Whether to return fluxes or the internal solver variables. If set
to False will return a variable for each forward and backward flux
as well as all additional variables you might have defined in the
model.
Returns
-------
numpy.matrix
Returns a matrix with `n` rows, each containing a flux sample.
Notes
-----
Performance of this function linearly depends on the number
of reactions in your model and the thinning factor.
"""
samples = np.zeros((n, self.warmup.shape[1]))
for i in range(1, self.thinning * n + 1):
self.__single_iteration()
if i % self.thinning == 0:
samples[i//self.thinning - 1, ] = self.prev
if fluxes:
names = [r.id for r in self.model.reactions]
return pandas.DataFrame(
samples[:, self.fwd_idx] - samples[:, self.rev_idx],
columns=names)
else:
names = [v.name for v in self.model.variables]
return pandas.DataFrame(samples, columns=names) |
def optimizely(parser, token):
"""
Optimizely template tag.
Renders Javascript code to set-up A/B testing. You must supply
your Optimizely account number in the ``OPTIMIZELY_ACCOUNT_NUMBER``
setting.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return OptimizelyNode() |
def google_analytics(parser, token):
"""
Google Analytics tracking template tag.
Renders Javascript code to track page visits. You must supply
your website property ID (as a string) in the
``GOOGLE_ANALYTICS_PROPERTY_ID`` setting.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return GoogleAnalyticsNode() |
def google_analytics_js(parser, token):
"""
Google Analytics tracking template tag.
Renders Javascript code to track page visits. You must supply
your website property ID (as a string) in the
``GOOGLE_ANALYTICS_JS_PROPERTY_ID`` setting.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return GoogleAnalyticsJsNode() |
def rating_mailru(parser, token):
"""
Rating@Mail.ru counter template tag.
Renders Javascript code to track page visits. You must supply
your website counter ID (as a string) in the
``RATING_MAILRU_COUNTER_ID`` setting.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return RatingMailruNode() |
def clicky(parser, token):
"""
Clicky tracking template tag.
Renders Javascript code to track page visits. You must supply
your Clicky Site ID (as a string) in the ``CLICKY_SITE_ID``
setting.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return ClickyNode() |
def chartbeat_top(parser, token):
"""
Top Chartbeat template tag.
Render the top Javascript code for Chartbeat.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return ChartbeatTopNode() |
def chartbeat_bottom(parser, token):
"""
Bottom Chartbeat template tag.
Render the bottom Javascript code for Chartbeat. You must supply
your Chartbeat User ID (as a string) in the ``CHARTBEAT_USER_ID``
setting.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return ChartbeatBottomNode() |
def woopra(parser, token):
"""
Woopra tracking template tag.
Renders Javascript code to track page visits. You must supply
your Woopra domain in the ``WOOPRA_DOMAIN`` setting.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return WoopraNode() |
def spring_metrics(parser, token):
"""
Spring Metrics tracking template tag.
Renders Javascript code to track page visits. You must supply
your Spring Metrics Tracking ID in the
``SPRING_METRICS_TRACKING_ID`` setting.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return SpringMetricsNode() |
def kiss_insights(parser, token):
"""
KISSinsights set-up template tag.
Renders Javascript code to set-up surveys. You must supply
your account number and site code in the
``KISS_INSIGHTS_ACCOUNT_NUMBER`` and ``KISS_INSIGHTS_SITE_CODE``
settings.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return KissInsightsNode() |
def matomo(parser, token):
"""
Matomo tracking template tag.
Renders Javascript code to track page visits. You must supply
your Matomo domain (plus optional URI path), and tracked site ID
in the ``MATOMO_DOMAIN_PATH`` and the ``MATOMO_SITE_ID`` setting.
Custom variables can be passed in the ``matomo_vars`` context
variable. It is an iterable of custom variables as tuples like:
``(index, name, value[, scope])`` where scope may be ``'page'``
(default) or ``'visit'``. Index should be an integer and the
other parameters should be strings.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return MatomoNode() |
def snapengage(parser, token):
"""
SnapEngage set-up template tag.
Renders Javascript code to set-up SnapEngage chat. You must supply
your widget ID in the ``SNAPENGAGE_WIDGET_ID`` setting.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return SnapEngageNode() |
def performable(parser, token):
"""
Performable template tag.
Renders Javascript code to set-up Performable tracking. You must
supply your Performable API key in the ``PERFORMABLE_API_KEY``
setting.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return PerformableNode() |
def _timestamp(when):
"""
Python 2 compatibility for `datetime.timestamp()`.
"""
return (time.mktime(when.timetuple()) if sys.version_info < (3,) else
when.timestamp()) |
def _hashable_bytes(data):
"""
Coerce strings to hashable bytes.
"""
if isinstance(data, bytes):
return data
elif isinstance(data, str):
return data.encode('ascii') # Fail on anything non-ASCII.
else:
raise TypeError(data) |
def intercom_user_hash(data):
"""
Return a SHA-256 HMAC `user_hash` as expected by Intercom, if configured.
Return None if the `INTERCOM_HMAC_SECRET_KEY` setting is not configured.
"""
if getattr(settings, 'INTERCOM_HMAC_SECRET_KEY', None):
return hmac.new(
key=_hashable_bytes(settings.INTERCOM_HMAC_SECRET_KEY),
msg=_hashable_bytes(data),
digestmod=hashlib.sha256,
).hexdigest()
else:
return None |
def intercom(parser, token):
"""
Intercom.io template tag.
Renders Javascript code to intercom.io testing. You must supply
your APP ID account number in the ``INTERCOM_APP_ID``
setting.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return IntercomNode() |
def uservoice(parser, token):
"""
UserVoice tracking template tag.
Renders Javascript code to track page visits. You must supply
your UserVoice Widget Key in the ``USERVOICE_WIDGET_KEY``
setting or the ``uservoice_widget_key`` template context variable.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return UserVoiceNode() |
def kiss_metrics(parser, token):
"""
KISSinsights tracking template tag.
Renders Javascript code to track page visits. You must supply
your KISSmetrics API key in the ``KISS_METRICS_API_KEY``
setting.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return KissMetricsNode() |
def piwik(parser, token):
"""
Piwik tracking template tag.
Renders Javascript code to track page visits. You must supply
your Piwik domain (plus optional URI path), and tracked site ID
in the ``PIWIK_DOMAIN_PATH`` and the ``PIWIK_SITE_ID`` setting.
Custom variables can be passed in the ``piwik_vars`` context
variable. It is an iterable of custom variables as tuples like:
``(index, name, value[, scope])`` where scope may be ``'page'``
(default) or ``'visit'``. Index should be an integer and the
other parameters should be strings.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return PiwikNode() |
def get_required_setting(setting, value_re, invalid_msg):
"""
Return a constant from ``django.conf.settings``. The `setting`
argument is the constant name, the `value_re` argument is a regular
expression used to validate the setting value and the `invalid_msg`
argument is used as exception message if the value is not valid.
"""
try:
value = getattr(settings, setting)
except AttributeError:
raise AnalyticalException("%s setting: not found" % setting)
if not value:
raise AnalyticalException("%s setting is not set" % setting)
value = str(value)
if not value_re.search(value):
raise AnalyticalException("%s setting: %s: '%s'"
% (setting, invalid_msg, value))
return value |
def get_user_from_context(context):
"""
Get the user instance from the template context, if possible.
If the context does not contain a `request` or `user` attribute,
`None` is returned.
"""
try:
return context['user']
except KeyError:
pass
try:
request = context['request']
return request.user
except (KeyError, AttributeError):
pass
return None |
def get_identity(context, prefix=None, identity_func=None, user=None):
"""
Get the identity of a logged in user from a template context.
The `prefix` argument is used to provide different identities to
different analytics services. The `identity_func` argument is a
function that returns the identity of the user; by default the
identity is the username.
"""
if prefix is not None:
try:
return context['%s_identity' % prefix]
except KeyError:
pass
try:
return context['analytical_identity']
except KeyError:
pass
if getattr(settings, 'ANALYTICAL_AUTO_IDENTIFY', True):
try:
if user is None:
user = get_user_from_context(context)
if get_user_is_authenticated(user):
if identity_func is not None:
return identity_func(user)
else:
return user.get_username()
except (KeyError, AttributeError):
pass
return None |
def get_domain(context, prefix):
"""
Return the domain used for the tracking code. Each service may be
configured with its own domain (called `<name>_domain`), or a
django-analytical-wide domain may be set (using `analytical_domain`.
If no explicit domain is found in either the context or the
settings, try to get the domain from the contrib sites framework.
"""
domain = context.get('%s_domain' % prefix)
if domain is None:
domain = context.get('analytical_domain')
if domain is None:
domain = getattr(settings, '%s_DOMAIN' % prefix.upper(), None)
if domain is None:
domain = getattr(settings, 'ANALYTICAL_DOMAIN', None)
if domain is None:
if 'django.contrib.sites' in settings.INSTALLED_APPS:
from django.contrib.sites.models import Site
try:
domain = Site.objects.get_current().domain
except (ImproperlyConfigured, Site.DoesNotExist):
pass
return domain |
def is_internal_ip(context, prefix=None):
"""
Return whether the visitor is coming from an internal IP address,
based on information from the template context.
The prefix is used to allow different analytics services to have
different notions of internal addresses.
"""
try:
request = context['request']
remote_ip = request.META.get('HTTP_X_FORWARDED_FOR', '')
if not remote_ip:
remote_ip = request.META.get('REMOTE_ADDR', '')
if not remote_ip:
return False
internal_ips = None
if prefix is not None:
internal_ips = getattr(settings, '%s_INTERNAL_IPS' % prefix, None)
if internal_ips is None:
internal_ips = getattr(settings, 'ANALYTICAL_INTERNAL_IPS', None)
if internal_ips is None:
internal_ips = getattr(settings, 'INTERNAL_IPS', None)
return remote_ip in (internal_ips or [])
except (KeyError, AttributeError):
return False |
def mixpanel(parser, token):
"""
Mixpanel tracking template tag.
Renders Javascript code to track page visits. You must supply
your Mixpanel token in the ``MIXPANEL_API_TOKEN`` setting.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return MixpanelNode() |
def gosquared(parser, token):
"""
GoSquared tracking template tag.
Renders Javascript code to track page visits. You must supply
your GoSquared site token in the ``GOSQUARED_SITE_TOKEN`` setting.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return GoSquaredNode() |
def olark(parser, token):
"""
Olark set-up template tag.
Renders Javascript code to set-up Olark chat. You must supply
your site ID in the ``OLARK_SITE_ID`` setting.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return OlarkNode() |
def clickmap(parser, token):
"""
Clickmap tracker template tag.
Renders Javascript code to track page visits. You must supply
your clickmap tracker ID (as a string) in the ``CLICKMAP_TRACKER_ID``
setting.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return ClickmapNode() |
def gauges(parser, token):
"""
Gaug.es template tag.
Renders Javascript code to gaug.es testing. You must supply
your Site ID account number in the ``GAUGES_SITE_ID``
setting.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return GaugesNode() |
def crazy_egg(parser, token):
"""
Crazy Egg tracking template tag.
Renders Javascript code to track page clicks. You must supply
your Crazy Egg account number (as a string) in the
``CRAZY_EGG_ACCOUNT_NUMBER`` setting.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return CrazyEggNode() |
def yandex_metrica(parser, token):
"""
Yandex.Metrica counter template tag.
Renders Javascript code to track page visits. You must supply
your website counter ID (as a string) in the
``YANDEX_METRICA_COUNTER_ID`` setting.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return YandexMetricaNode() |
def hubspot(parser, token):
"""
HubSpot tracking template tag.
Renders Javascript code to track page visits. You must supply
your portal ID (as a string) in the ``HUBSPOT_PORTAL_ID`` setting.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return HubSpotNode() |
def status_printer():
"""Manage the printing and in-place updating of a line of characters
.. note::
If the string is longer than a line, then in-place updating may not
work (it will print a new line at each refresh).
"""
last_len = [0]
def p(s):
s = next(spinner) + ' ' + s
len_s = len(s)
output = '\r' + s + (' ' * max(last_len[0] - len_s, 0))
sys.stdout.write(output)
sys.stdout.flush()
last_len[0] = len_s
return p |
def get_or_guess_paths_to_mutate(paths_to_mutate):
"""
:type paths_to_mutate: str or None
:rtype: str
"""
if paths_to_mutate is None:
# Guess path with code
this_dir = os.getcwd().split(os.sep)[-1]
if isdir('lib'):
return 'lib'
elif isdir('src'):
return 'src'
elif isdir(this_dir):
return this_dir
elif isdir(this_dir.replace('-', '_')):
return this_dir.replace('-', '_')
elif isdir(this_dir.replace(' ', '_')):
return this_dir.replace(' ', '_')
elif isdir(this_dir.replace('-', '')):
return this_dir.replace('-', '')
elif isdir(this_dir.replace(' ', '')):
return this_dir.replace(' ', '')
else:
raise FileNotFoundError(
'Could not figure out where the code to mutate is. '
'Please specify it on the command line using --paths-to-mutate, '
'or by adding "paths_to_mutate=code_dir" in setup.cfg to the [mutmut] section.')
else:
return paths_to_mutate |
def do_apply(mutation_pk, dict_synonyms, backup):
"""Apply a specified mutant to the source code
:param mutation_pk: mutmut cache primary key of the mutant to apply
:type mutation_pk: str
:param dict_synonyms: list of synonym keywords for a python dictionary
:type dict_synonyms: list[str]
:param backup: if :obj:`True` create a backup of the source file
before applying the mutation
:type backup: bool
"""
filename, mutation_id = filename_and_mutation_id_from_pk(int(mutation_pk))
update_line_numbers(filename)
context = Context(
mutation_id=mutation_id,
filename=filename,
dict_synonyms=dict_synonyms,
)
mutate_file(
backup=backup,
context=context,
)
if context.number_of_performed_mutations == 0:
raise RuntimeError('No mutations performed.') |
def climain(command, argument, argument2, paths_to_mutate, backup, runner, tests_dir,
test_time_multiplier, test_time_base,
swallow_output, use_coverage, dict_synonyms, cache_only, version,
suspicious_policy, untested_policy, pre_mutation, post_mutation,
use_patch_file):
"""
commands:\n
run [mutation id]\n
Runs mutmut. You probably want to start with just trying this. If you supply a mutation ID mutmut will check just this mutant.\n
results\n
Print the results.\n
apply [mutation id]\n
Apply a mutation on disk.\n
show [mutation id]\n
Show a mutation diff.\n
junitxml\n
Show a mutation diff with junitxml format.
"""
if test_time_base is None: # click sets the default=0.0 to None
test_time_base = 0.0
if test_time_multiplier is None: # click sets the default=0.0 to None
test_time_multiplier = 0.0
sys.exit(main(command, argument, argument2, paths_to_mutate, backup, runner,
tests_dir, test_time_multiplier, test_time_base,
swallow_output, use_coverage, dict_synonyms, cache_only,
version, suspicious_policy, untested_policy, pre_mutation,
post_mutation, use_patch_file)) |
def main(command, argument, argument2, paths_to_mutate, backup, runner, tests_dir,
test_time_multiplier, test_time_base,
swallow_output, use_coverage, dict_synonyms, cache_only, version,
suspicious_policy, untested_policy, pre_mutation, post_mutation,
use_patch_file):
"""return exit code, after performing an mutation test run.
:return: the exit code from executing the mutation tests
:rtype: int
"""
if version:
print("mutmut version %s" % __version__)
return 0
if use_coverage and use_patch_file:
raise click.BadArgumentUsage("You can't combine --use-coverage and --use-patch")
valid_commands = ['run', 'results', 'apply', 'show', 'junitxml']
if command not in valid_commands:
raise click.BadArgumentUsage('%s is not a valid command, must be one of %s' % (command, ', '.join(valid_commands)))
if command == 'results' and argument:
raise click.BadArgumentUsage('The %s command takes no arguments' % command)
dict_synonyms = [x.strip() for x in dict_synonyms.split(',')]
if command in ('show', 'diff'):
if not argument:
print_result_cache()
return 0
if argument == 'all':
print_result_cache(show_diffs=True, dict_synonyms=dict_synonyms, print_only_filename=argument2)
return 0
print(get_unified_diff(argument, dict_synonyms))
return 0
if use_coverage and not exists('.coverage'):
raise FileNotFoundError('No .coverage file found. You must generate a coverage file to use this feature.')
if command == 'results':
print_result_cache()
return 0
if command == 'junitxml':
print_result_cache_junitxml(dict_synonyms, suspicious_policy, untested_policy)
return 0
if command == 'apply':
do_apply(argument, dict_synonyms, backup)
return 0
paths_to_mutate = get_or_guess_paths_to_mutate(paths_to_mutate)
if not isinstance(paths_to_mutate, (list, tuple)):
paths_to_mutate = [x.strip() for x in paths_to_mutate.split(',')]
if not paths_to_mutate:
raise click.BadOptionUsage('--paths-to-mutate', 'You must specify a list of paths to mutate. Either as a command line argument, or by setting paths_to_mutate under the section [mutmut] in setup.cfg')
tests_dirs = []
for p in tests_dir.split(':'):
tests_dirs.extend(glob(p, recursive=True))
for p in paths_to_mutate:
for pt in tests_dir.split(':'):
tests_dirs.extend(glob(p + '/**/' + pt, recursive=True))
del tests_dir
os.environ['PYTHONDONTWRITEBYTECODE'] = '1' # stop python from creating .pyc files
using_testmon = '--testmon' in runner
print("""
- Mutation testing starting -
These are the steps:
1. A full test suite run will be made to make sure we
can run the tests successfully and we know how long
it takes (to detect infinite loops for example)
2. Mutants will be generated and checked
Results are stored in .mutmut-cache.
Print found mutants with `mutmut results`.
Legend for output:
🎉 Killed mutants. The goal is for everything to end up in this bucket.
⏰ Timeout. Test suite took 10 times as long as the baseline so were killed.
🤔 Suspicious. Tests took a long time, but not long enough to be fatal.
🙁 Survived. This means your tests needs to be expanded.
""")
baseline_time_elapsed = time_test_suite(
swallow_output=not swallow_output,
test_command=runner,
using_testmon=using_testmon
)
if using_testmon:
copy('.testmondata', '.testmondata-initial')
# if we're running in a mode with externally whitelisted lines
if use_coverage or use_patch_file:
covered_lines_by_filename = {}
if use_coverage:
coverage_data = read_coverage_data()
else:
assert use_patch_file
covered_lines_by_filename = read_patch_data(use_patch_file)
coverage_data = None
def _exclude(context):
try:
covered_lines = covered_lines_by_filename[context.filename]
except KeyError:
if coverage_data is not None:
covered_lines = coverage_data.lines(os.path.abspath(context.filename))
covered_lines_by_filename[context.filename] = covered_lines
else:
covered_lines = None
if covered_lines is None:
return True
current_line = context.current_line_index + 1
if current_line not in covered_lines:
return True
return False
else:
def _exclude(context):
del context
return False
if command != 'run':
raise click.BadArgumentUsage("Invalid command %s" % command)
mutations_by_file = {}
if argument is None:
for path in paths_to_mutate:
for filename in python_source_files(path, tests_dirs):
update_line_numbers(filename)
add_mutations_by_file(mutations_by_file, filename, _exclude, dict_synonyms)
else:
filename, mutation_id = filename_and_mutation_id_from_pk(int(argument))
mutations_by_file[filename] = [mutation_id]
total = sum(len(mutations) for mutations in mutations_by_file.values())
print()
print('2. Checking mutants')
config = Config(
swallow_output=not swallow_output,
test_command=runner,
exclude_callback=_exclude,
baseline_time_elapsed=baseline_time_elapsed,
backup=backup,
dict_synonyms=dict_synonyms,
total=total,
using_testmon=using_testmon,
cache_only=cache_only,
tests_dirs=tests_dirs,
hash_of_tests=hash_of_tests(tests_dirs),
test_time_multiplier=test_time_multiplier,
test_time_base=test_time_base,
pre_mutation=pre_mutation,
post_mutation=post_mutation,
)
try:
run_mutation_tests(config=config, mutations_by_file=mutations_by_file)
except Exception as e:
traceback.print_exc()
return compute_exit_code(config, e)
else:
return compute_exit_code(config)
finally:
print() |
def popen_streaming_output(cmd, callback, timeout=None):
"""Open a subprocess and stream its output without hard-blocking.
:param cmd: the command to execute within the subprocess
:type cmd: str
:param callback: function that intakes the subprocess' stdout line by line.
It is called for each line received from the subprocess' stdout stream.
:type callback: Callable[[Context], bool]
:param timeout: the timeout time of the subprocess
:type timeout: float
:raises TimeoutError: if the subprocess' execution time exceeds
the timeout time
:return: the return code of the executed subprocess
:rtype: int
"""
if os.name == 'nt': # pragma: no cover
process = subprocess.Popen(
shlex.split(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout = process.stdout
else:
master, slave = os.openpty()
process = subprocess.Popen(
shlex.split(cmd, posix=True),
stdout=slave,
stderr=slave
)
stdout = os.fdopen(master)
os.close(slave)
def kill(process_):
"""Kill the specified process on Timer completion"""
try:
process_.kill()
except OSError:
pass
# python 2-3 agnostic process timer
timer = Timer(timeout, kill, [process])
timer.setDaemon(True)
timer.start()
while process.returncode is None:
try:
if os.name == 'nt': # pragma: no cover
line = stdout.readline()
# windows gives readline() raw stdout as a b''
# need to decode it
line = line.decode("utf-8")
if line: # ignore empty strings and None
callback(line.rstrip())
else:
while True:
line = stdout.readline()
if not line:
break
callback(line.rstrip())
except (IOError, OSError):
# This seems to happen on some platforms, including TravisCI.
# It seems like it's ok to just let this pass here, you just
# won't get as nice feedback.
pass
if not timer.is_alive():
raise TimeoutError("subprocess running command '{}' timed out after {} seconds".format(cmd, timeout))
process.poll()
# we have returned from the subprocess cancel the timer if it is running
timer.cancel()
return process.returncode |
def run_mutation(config, filename, mutation_id):
"""
:type config: Config
:type filename: str
:type mutation_id: MutationID
:return: (computed or cached) status of the tested mutant
:rtype: str
"""
context = Context(
mutation_id=mutation_id,
filename=filename,
exclude=config.exclude_callback,
dict_synonyms=config.dict_synonyms,
config=config,
)
cached_status = cached_mutation_status(filename, mutation_id, config.hash_of_tests)
if cached_status == BAD_SURVIVED:
config.surviving_mutants += 1
elif cached_status == BAD_TIMEOUT:
config.surviving_mutants_timeout += 1
elif cached_status == OK_KILLED:
config.killed_mutants += 1
elif cached_status == OK_SUSPICIOUS:
config.suspicious_mutants += 1
else:
assert cached_status == UNTESTED, cached_status
config.print_progress()
if cached_status != UNTESTED:
return cached_status
if config.pre_mutation:
result = subprocess.check_output(config.pre_mutation, shell=True).decode().strip()
if result:
print(result)
try:
number_of_mutations_performed = mutate_file(
backup=True,
context=context
)
assert number_of_mutations_performed
start = time()
try:
survived = tests_pass(config)
except TimeoutError:
context.config.surviving_mutants_timeout += 1
return BAD_TIMEOUT
time_elapsed = time() - start
if time_elapsed > config.test_time_base + (config.baseline_time_elapsed * config.test_time_multipler):
config.suspicious_mutants += 1
return OK_SUSPICIOUS
if survived:
context.config.surviving_mutants += 1
return BAD_SURVIVED
else:
context.config.killed_mutants += 1
return OK_KILLED
finally:
move(filename + '.bak', filename)
if config.post_mutation:
result = subprocess.check_output(config.post_mutation, shell=True).decode().strip()
if result:
print(result) |
def read_coverage_data():
"""
:rtype: CoverageData or None
"""
print('Using coverage data from .coverage file')
# noinspection PyPackageRequirements,PyUnresolvedReferences
from coverage import Coverage
cov = Coverage('.coverage')
cov.load()
return cov.get_data() |
def add_mutations_by_file(mutations_by_file, filename, exclude, dict_synonyms):
"""
:type mutations_by_file: dict[str, list[MutationID]]
:type filename: str
:type exclude: Callable[[Context], bool]
:type dict_synonyms: list[str]
"""
with open(filename) as f:
source = f.read()
context = Context(
source=source,
filename=filename,
exclude=exclude,
dict_synonyms=dict_synonyms,
)
try:
mutations_by_file[filename] = list_mutations(context)
register_mutants(mutations_by_file)
except Exception as e:
raise RuntimeError('Failed while creating mutations for %s, for line "%s"' % (context.filename, context.current_source_line), e) |
def python_source_files(path, tests_dirs):
"""Attempt to guess where the python source files to mutate are and yield
their paths
:param path: path to a python source file or package directory
:type path: str
:param tests_dirs: list of directory paths containing test files
(we do not want to mutate these!)
:type tests_dirs: list[str]
:return: generator listing the paths to the python source files to mutate
:rtype: Generator[str, None, None]
"""
if isdir(path):
for root, dirs, files in os.walk(path):
dirs[:] = [d for d in dirs if os.path.join(root, d) not in tests_dirs]
for filename in files:
if filename.endswith('.py'):
yield os.path.join(root, filename)
else:
yield path |
def compute_exit_code(config, exception=None):
"""Compute an exit code for mutmut mutation testing
The following exit codes are available for mutmut:
* 0 if all mutants were killed (OK_KILLED)
* 1 if a fatal error occurred
* 2 if one or more mutants survived (BAD_SURVIVED)
* 4 if one or more mutants timed out (BAD_TIMEOUT)
* 8 if one or more mutants caused tests to take twice as long (OK_SUSPICIOUS)
Exit codes 1 to 8 will be bit-ORed so that it is possible to know what
different mutant statuses occurred during mutation testing.
:param exception:
:type exception: Exception
:param config:
:type config: Config
:return: integer noting the exit code of the mutation tests.
:rtype: int
"""
code = 0
if exception is not None:
code = code | 1
if config.surviving_mutants > 0:
code = code | 2
if config.surviving_mutants_timeout > 0:
code = code | 4
if config.suspicious_mutants > 0:
code = code | 8
return code |
def argument_mutation(children, context, **_):
"""
:type context: Context
"""
if len(context.stack) >= 3 and context.stack[-3].type in ('power', 'atom_expr'):
stack_pos_of_power_node = -3
elif len(context.stack) >= 4 and context.stack[-4].type in ('power', 'atom_expr'):
stack_pos_of_power_node = -4
else:
return
power_node = context.stack[stack_pos_of_power_node]
if power_node.children[0].type == 'name' and power_node.children[0].value in context.dict_synonyms:
c = children[0]
if c.type == 'name':
children = children[:]
children[0] = Name(c.value + 'XX', start_pos=c.start_pos, prefix=c.prefix)
return children |
def mutate(context):
"""
:type context: Context
:return: tuple: mutated source code, number of mutations performed
:rtype: tuple[str, int]
"""
try:
result = parse(context.source, error_recovery=False)
except Exception:
print('Failed to parse %s. Internal error from parso follows.' % context.filename)
print('----------------------------------')
raise
mutate_list_of_nodes(result, context=context)
mutated_source = result.get_code().replace(' not not ', ' ')
if context.remove_newline_at_end:
assert mutated_source[-1] == '\n'
mutated_source = mutated_source[:-1]
if context.number_of_performed_mutations:
# If we said we mutated the code, check that it has actually changed
assert context.source != mutated_source
context.mutated_source = mutated_source
return mutated_source, context.number_of_performed_mutations |
def mutate_node(node, context):
"""
:type context: Context
"""
context.stack.append(node)
try:
if node.type in ('tfpdef', 'import_from', 'import_name'):
return
if node.start_pos[0] - 1 != context.current_line_index:
context.current_line_index = node.start_pos[0] - 1
context.index = 0 # indexes are unique per line, so start over here!
if hasattr(node, 'children'):
mutate_list_of_nodes(node, context=context)
# this is just an optimization to stop early
if context.number_of_performed_mutations and context.mutation_id != ALL:
return
mutation = mutations_by_type.get(node.type)
if mutation is None:
return
for key, value in sorted(mutation.items()):
old = getattr(node, key)
if context.exclude_line():
continue
new = evaluate(
value,
context=context,
node=node,
value=getattr(node, 'value', None),
children=getattr(node, 'children', None),
)
assert not callable(new)
if new is not None and new != old:
if context.should_mutate():
context.number_of_performed_mutations += 1
context.performed_mutation_ids.append(context.mutation_id_of_current_index)
setattr(node, key, new)
context.index += 1
# this is just an optimization to stop early
if context.number_of_performed_mutations and context.mutation_id != ALL:
return
finally:
context.stack.pop() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.