repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
jaketanderson/openff-evaluator | openff/evaluator/storage/data.py | """
A collection of classes representing data stored by a storage backend.
"""
import abc
from typing import Optional
from openff.evaluator.attributes import AttributeClass
from openff.evaluator.datasets import PropertyPhase
from openff.evaluator.forcefield import ForceFieldSource
from openff.evaluator.storage.attributes import FilePath, StorageAttribute
from openff.evaluator.substances import Substance
from openff.evaluator.thermodynamics import ThermodynamicState
from openff.evaluator.utils.observables import Observable, ObservableFrame
class BaseStoredData(AttributeClass, abc.ABC):
"""A base representation of cached data to be stored by
a storage backend.
The expectation is that stored data may exist in storage
as two parts:
1) A JSON serialized representation of this class (or
a subclass), which contains lightweight information
such as the state and composition of the system. Any
larger pieces of data, such as coordinates or
trajectories, should be referenced as a file name.
2) A directory like structure (either directly a directory,
or some NetCDF like compressed archive) of ancillary
files which do not easily lend themselves to be
serialized within a JSON object, whose files are referenced
by their file name by the data object.
The ancillary directory-like structure is not required if the
data may be suitably stored in the data object itself.
"""
@classmethod
@abc.abstractmethod
def has_ancillary_data(cls):
"""Returns whether this data object requires an
accompanying data directory-like structure.
Returns
-------
bool
True if this class requires an accompanying
data directory-like structure.
"""
raise NotImplementedError()
def to_storage_query(self):
"""Returns the storage query which would match this
data object.
Returns
-------
BaseDataQuery
The storage query which would match this
data object.
"""
raise NotImplementedError()
class HashableStoredData(BaseStoredData, abc.ABC):
"""Represents a class of data objects which can be
rapidly compared / indexed by their hash values.
"""
def __eq__(self, other):
return type(self) == type(other) and hash(self) == hash(other)
def __ne__(self, other):
return not self.__eq__(other)
@abc.abstractmethod
def __hash__(self):
raise NotImplementedError
class ForceFieldData(HashableStoredData):
"""A data container for force field objects which
will be saved to disk.
"""
force_field_source = StorageAttribute(
docstring="The force field source object.",
type_hint=ForceFieldSource,
)
@classmethod
def has_ancillary_data(cls):
return False
def to_storage_query(self):
"""
Returns
-------
SimulationDataQuery
The storage query which would match this
data object.
"""
from .query import ForceFieldQuery
return ForceFieldQuery.from_data_object(self)
def __eq__(self, other):
return super(ForceFieldData, self).__eq__(other)
def __ne__(self, other):
return super(ForceFieldData, self).__ne__(other)
def __hash__(self):
force_field_string = self.force_field_source.json()
return hash(force_field_string.encode())
class ReplaceableData(BaseStoredData, abc.ABC):
"""Represents a piece of stored data which can be
replaced in a `StorageBackend` by another piece of
data of the same type.
This may be the case for example when attempting to
store a piece of `StoredSimulationData`, but another
piece of data measured from the same calculation and
for the same system already exists in the system, but
stores less configurations.
"""
@classmethod
@abc.abstractmethod
def most_information(cls, stored_data_1, stored_data_2):
"""Returns the data object with the highest information
content.
Parameters
----------
stored_data_1: ReplaceableData
The first piece of data to compare.
stored_data_2: ReplaceableData
The second piece of data to compare.
Returns
-------
ReplaceableData, optional
The data object with the highest information
content, or `None` if the two pieces of information
are incompatible with one another.
"""
assert isinstance(stored_data_1, ReplaceableData)
assert type(stored_data_1) == type(stored_data_2)
# Make sure the two objects are compatible.
data_query = stored_data_1.to_storage_query()
if data_query.apply(stored_data_2) is None:
return None
return stored_data_1
class BaseSimulationData(ReplaceableData, abc.ABC):
"""A base class for classes which will store the outputs of a molecular simulation"""
substance = StorageAttribute(
docstring="A description of the composition of the stored system.",
type_hint=Substance,
)
thermodynamic_state = StorageAttribute(
docstring="The state at which the data was collected.",
type_hint=ThermodynamicState,
)
property_phase = StorageAttribute(
docstring="The phase of the system (e.g. liquid, gas).",
type_hint=PropertyPhase,
)
source_calculation_id = StorageAttribute(
docstring="The server id of the calculation which yielded this data.",
type_hint=str,
)
force_field_id = StorageAttribute(
docstring="The id of the force field parameters used to generate the data.",
type_hint=str,
)
@classmethod
def has_ancillary_data(cls):
return True
class StoredSimulationData(BaseSimulationData):
"""A representation of data which has been cached from a single previous simulation.
Notes
-----
The ancillary directory which stores larger information such as trajectories should
be of the form:
.. code-block::
|--- data_object.json
|--- data_directory
|--- coordinate_file_name.pdb
|--- trajectory_file_name.dcd
"""
coordinate_file_name = StorageAttribute(
docstring="The name of a coordinate file which encodes the "
"topology information of the system.",
type_hint=FilePath,
)
trajectory_file_name = StorageAttribute(
docstring="The name of a .dcd trajectory file containing "
"configurations generated by the simulation.",
type_hint=FilePath,
)
observables = StorageAttribute(
docstring="A frame of observables collected over the duration of the "
"simulation.",
type_hint=ObservableFrame,
)
statistical_inefficiency = StorageAttribute(
docstring="The statistical inefficiency of the collected data.",
type_hint=float,
)
number_of_molecules = StorageAttribute(
docstring="The total number of molecules in the system.",
type_hint=int,
)
@classmethod
def most_information(cls, stored_data_1, stored_data_2):
"""Returns the data object with the lowest
`statistical_inefficiency`.
Parameters
----------
stored_data_1: StoredSimulationData
The first piece of data to compare.
stored_data_2: StoredSimulationData
The second piece of data to compare.
Returns
-------
StoredSimulationData
"""
if (
super(StoredSimulationData, cls).most_information(
stored_data_1, stored_data_2
)
is None
):
return None
if (
stored_data_1.statistical_inefficiency
< stored_data_2.statistical_inefficiency
):
return stored_data_1
return stored_data_2
def to_storage_query(self):
"""
Returns
-------
SimulationDataQuery
The storage query which would match this
data object.
"""
from .query import SimulationDataQuery
return SimulationDataQuery.from_data_object(self)
class StoredFreeEnergyData(BaseSimulationData):
"""A representation of data which has been cached from an free energy calculation
which computed the free energy difference between a start and end state.
Notes
-----
The ancillary directory which stores larger information such as trajectories should
be of the form:
.. code-block::
|--- data_object.json
|--- data_directory
|--- topology_file_name.pdb
|--- start_state_trajectory.dcd
|--- end_state_trajectory.dcd
"""
free_energy_difference = StorageAttribute(
docstring="The free energy difference between the end state "
"and the start state.",
type_hint=Observable,
)
topology_file_name = StorageAttribute(
docstring="The name of a coordinate file which encodes the topology of the "
"system.",
type_hint=FilePath,
)
start_state_trajectory = StorageAttribute(
docstring="The name of a .dcd trajectory file containing configurations "
"generated by the simulation of the start state of the system.",
type_hint=FilePath,
)
end_state_trajectory = StorageAttribute(
docstring="The name of a .dcd trajectory file containing configurations "
"generated by the simulation of the end state of the system.",
type_hint=FilePath,
)
@classmethod
def most_information(
cls,
stored_data_1: "StoredFreeEnergyData",
stored_data_2: "StoredFreeEnergyData",
) -> Optional["StoredFreeEnergyData"]:
"""A comparison function which will always retain both pieces of free energy
data. At this time no situation can be envisaged that the same free energy data
from exactly the same calculation will be store.
Parameters
----------
stored_data_1
The first piece of data to compare.
stored_data_2:
The second piece of data to compare.
"""
return None
def to_storage_query(self):
"""
Returns
-------
FreeEnergyDataQuery
The storage query which would match this data object.
"""
from .query import FreeEnergyDataQuery
return FreeEnergyDataQuery.from_data_object(self)
|
jaketanderson/openff-evaluator | openff/evaluator/datasets/curation/workflow.py | <gh_stars>10-100
import logging
from typing import List, Union, overload
import numpy
import pandas
from pydantic import BaseModel, Field
from openff.evaluator.datasets import PhysicalPropertyDataSet
from openff.evaluator.datasets.curation.components import CurationComponent
from openff.evaluator.datasets.curation.components.conversion import (
ConversionComponentSchema,
)
from openff.evaluator.datasets.curation.components.filtering import (
FilterComponentSchema,
)
from openff.evaluator.datasets.curation.components.freesolv import (
FreeSolvComponentSchema,
)
from openff.evaluator.datasets.curation.components.selection import (
SelectionComponentSchema,
)
from openff.evaluator.datasets.curation.components.thermoml import (
ThermoMLComponentSchema,
)
logger = logging.getLogger(__name__)
class CurationWorkflowSchema(BaseModel):
"""A schemas which encodes how a set of curation components should be applied
sequentially to a data set."""
component_schemas: List[
Union[
ConversionComponentSchema,
FilterComponentSchema,
FreeSolvComponentSchema,
SelectionComponentSchema,
ThermoMLComponentSchema,
]
] = Field(
default_factory=list,
description="The schemas of the components to apply as part of this workflow. "
"The components will be applied in the order they appear in this list.",
)
class CurationWorkflow:
"""A convenience class for applying a set of curation components
sequentially to a data set."""
@classmethod
@overload
def apply(
cls,
data_set: PhysicalPropertyDataSet,
schema: CurationWorkflowSchema,
n_processes: int = 1,
) -> PhysicalPropertyDataSet:
...
@classmethod
@overload
def apply(
cls,
data_set: pandas.DataFrame,
schema: CurationWorkflowSchema,
n_processes: int = 1,
) -> pandas.DataFrame:
...
@classmethod
def apply(cls, data_set, schema, n_processes=1):
"""Apply each component of this curation workflow to an initial data set in
sequence.
Parameters
----------
data_set
The data set to apply the workflow to. This may either be a
data set object or it's pandas representation.
schema
The schema which defines the components to apply.
n_processes
The number of processes that each component is allowed to
parallelize across.
Returns
-------
The data set which has had the curation workflow applied to it.
"""
component_classes = CurationComponent.components
data_frame = data_set
if isinstance(data_frame, PhysicalPropertyDataSet):
data_frame = data_frame.to_pandas()
data_frame = data_frame.copy()
data_frame = data_frame.fillna(value=numpy.nan)
for component_schema in schema.component_schemas:
component_class_name = component_schema.__class__.__name__.replace(
"Schema", ""
)
component_class = component_classes[component_class_name]
logger.info(f"Applying {component_class_name}")
data_frame = component_class.apply(
data_frame, component_schema, n_processes
)
logger.info(f"{component_class_name} applied")
data_frame = data_frame.fillna(value=numpy.nan)
if isinstance(data_set, PhysicalPropertyDataSet):
data_frame = PhysicalPropertyDataSet.from_pandas(data_frame)
return data_frame
|
jaketanderson/openff-evaluator | openff/evaluator/protocols/storage.py | <reponame>jaketanderson/openff-evaluator
"""
A collection of protocols for loading cached data off of the disk.
"""
from os import path
from typing import Union
from openff.evaluator.attributes import UNDEFINED
from openff.evaluator.storage.data import StoredSimulationData
from openff.evaluator.substances import Substance
from openff.evaluator.thermodynamics import ThermodynamicState
from openff.evaluator.utils.observables import ObservableFrame
from openff.evaluator.workflow import Protocol, workflow_protocol
from openff.evaluator.workflow.attributes import InputAttribute, OutputAttribute
@workflow_protocol()
class UnpackStoredSimulationData(Protocol):
"""Loads a `StoredSimulationData` object from disk,
and makes its attributes easily accessible to other protocols.
"""
simulation_data_path = InputAttribute(
docstring="A list / tuple which contains both the path to the simulation data "
"object, it's ancillary data directory, and the force field which "
"was used to generate the stored data.",
type_hint=Union[list, tuple],
default_value=UNDEFINED,
)
substance = OutputAttribute(
docstring="The substance which was stored.", type_hint=Substance
)
total_number_of_molecules = OutputAttribute(
docstring="The total number of molecules in the stored system.", type_hint=int
)
thermodynamic_state = OutputAttribute(
docstring="The thermodynamic state which was stored.",
type_hint=ThermodynamicState,
)
observables = OutputAttribute(
docstring="The stored observables frame.", type_hint=ObservableFrame
)
coordinate_file_path = OutputAttribute(
docstring="A path to the stored simulation output coordinates.", type_hint=str
)
trajectory_file_path = OutputAttribute(
docstring="A path to the stored simulation trajectory.", type_hint=str
)
force_field_path = OutputAttribute(
docstring="A path to the force field parameters used to generate the stored "
"data.",
type_hint=str,
)
def _execute(self, directory, available_resources):
if len(self.simulation_data_path) != 3:
raise ValueError(
"The simulation data path should be a tuple of a path to the data "
"object, directory, and a path to the force field used to generate it."
)
data_object_path = self.simulation_data_path[0]
data_directory = self.simulation_data_path[1]
force_field_path = self.simulation_data_path[2]
if not path.isdir(data_directory):
raise ValueError(
f"The path to the data directory is invalid: {data_directory}"
)
if not path.isfile(force_field_path):
raise ValueError(
f"The path to the force field is invalid: {force_field_path}"
)
data_object = StoredSimulationData.from_json(data_object_path)
if not isinstance(data_object, StoredSimulationData):
raise ValueError(
f"The data path must point to a `StoredSimulationData` "
f"object, and not a {data_object.__class__.__name__}",
)
self.substance = data_object.substance
self.total_number_of_molecules = data_object.number_of_molecules
self.thermodynamic_state = data_object.thermodynamic_state
self.observables = data_object.observables
self.coordinate_file_path = path.join(
data_directory, data_object.coordinate_file_name
)
self.trajectory_file_path = path.join(
data_directory, data_object.trajectory_file_name
)
self.force_field_path = force_field_path
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_utils/test_utils.py | <reponame>jaketanderson/openff-evaluator
"""
Units tests for openff.evaluator.utils.exceptions
"""
import os
import tempfile
from openff.evaluator.utils.utils import get_nested_attribute, is_file_and_not_empty
class DummyNestedClass:
def __init__(self):
self.object_a = None
self.object_b = None
class DummyDescriptor1:
def __init__(self, class_attribute):
self.attribute = class_attribute.__name__
def __get__(self, instance, owner=None):
pass
def __set__(self, instance, value):
pass
class DummyDescriptor2:
def __init__(self, class_attribute):
self.attribute = class_attribute.__name__
def __get__(self, instance, owner=None):
pass
def __set__(self, instance, value):
pass
def test_get_nested_attribute():
dummy_object = DummyNestedClass()
dummy_object.object_a = "a"
dummy_nested_object_a = DummyNestedClass()
dummy_nested_object_a.object_a = 1
dummy_nested_object_a.object_b = [0]
dummy_nested_list_object_0 = DummyNestedClass()
dummy_nested_list_object_0.object_a = "a"
dummy_nested_list_object_0.object_b = "b"
dummy_nested_object_b = DummyNestedClass()
dummy_nested_object_b.object_a = 2
dummy_nested_object_b.object_b = [dummy_nested_list_object_0]
dummy_object.object_b = {"a": dummy_nested_object_a, "b": dummy_nested_object_b}
assert get_nested_attribute(dummy_object, "object_a") == "a"
assert get_nested_attribute(dummy_object, "object_b[a].object_a") == 1
assert get_nested_attribute(dummy_object, "object_b[a].object_b[0]") == 0
assert get_nested_attribute(dummy_object, "object_b[b].object_a") == 2
assert get_nested_attribute(dummy_object, "object_b[b].object_b[0].object_a") == "a"
assert get_nested_attribute(dummy_object, "object_b[b].object_b[0].object_b") == "b"
def test_is_file_and_not_empty(tmpdir):
path = "testfile"
with tempfile.TemporaryDirectory():
with open(path, "w") as f:
pass
assert os.path.isfile(path)
assert os.path.getsize(path) == 0
assert not is_file_and_not_empty(path)
path = "testfile2"
with tempfile.TemporaryDirectory():
with open(path, "w") as f:
f.write("ubiquitous mendacious polyglottal")
assert os.path.isfile(path)
assert os.path.getsize(path) != 0
assert is_file_and_not_empty(path)
|
jaketanderson/openff-evaluator | openff/evaluator/datasets/taproom/taproom.py | <reponame>jaketanderson/openff-evaluator
"""
An API for importing a data set from the `taproom
<https://github.com/slochower/host-guest-benchmarks>`_ package.
"""
import logging
import os
from typing import Any, Dict, List, Optional
import pkg_resources
import yaml
from openff.units import unit
from openff.evaluator.datasets import PhysicalPropertyDataSet, PropertyPhase, Source
from openff.evaluator.properties import HostGuestBindingAffinity
from openff.evaluator.substances import Component, ExactAmount, MoleFraction, Substance
from openff.evaluator.thermodynamics import ThermodynamicState
from openff.evaluator.utils.exceptions import MissingOptionalDependency
logger = logging.getLogger(__name__)
class TaproomSource(Source):
"""Contains metadata about the source of a host-guest binding affinity
measurement which was pulled from the ``taproom`` package.
"""
def __init__(
self, doi="", comment="", technique="", host_identifier="", guest_identifier=""
):
"""Constructs a new MeasurementSource object.
Parameters
----------
doi : str
The DOI for the source
comment : str
A description of where the value came from in the source.
technique : str
The technique used to measure this value.
host_identifier : str
The unique three letter host identifier
guest_identifier : str
The unique three letter guest identifier
"""
self.doi = doi
self.comment = comment
self.technique = technique
self.host_identifier = host_identifier
self.guest_identifier = guest_identifier
def __getstate__(self):
return {
"doi": self.doi,
"comment": self.comment,
"technique": self.technique,
"host_identifier": self.host_identifier,
"guest_identifier": self.guest_identifier,
}
def __setstate__(self, state):
self.doi = state["doi"]
self.comment = state["comment"]
self.technique = state["technique"]
self.host_identifier = state["host_identifier"]
self.guest_identifier = state["guest_identifier"]
def __str__(self):
return (
f"host={self.host_identifier} "
f"guest={self.guest_identifier} "
f"doi={self.doi}"
)
def __repr__(self):
return f"<TaproomSource {self.__str__()}>"
class TaproomDataSet(PhysicalPropertyDataSet):
"""A dataset of host-guest binding affinity measurements which sources its data
from the `taproom <https://github.com/slochower/host-guest-benchmarks>`_ package.
The loaded ``HostGuestBindingAffinity`` properties will also be optionally (enabled
by default) initialized with the metadata required by the APR estimation workflow.
"""
def __init__(
self,
host_codes: List[str] = None,
guest_codes: List[str] = None,
default_ionic_strength: Optional[unit.Quantity] = 150 * unit.millimolar,
negative_buffer_ion: str = "[Cl-]",
positive_buffer_ion: str = "[Na+]",
attach_apr_meta_data: bool = True,
):
"""
Parameters
----------
host_codes
The three letter codes of the host molecules to load from ``taproom``
If no list is provided, all hosts will be loaded.
guest_codes
The three letter codes of the guest molecules to load from ``taproom``.
If no list is provided, all guests will be loaded.
default_ionic_strength
The default ionic strength to use for measurements. The value
specified in ``taproom`` will be ignored and this value used
instead. If no value is provided, no buffer will be included.
negative_buffer_ion
The SMILES pattern of the negative buffer ion to use. The value
specified in ``taproom`` will be ignored and this value used
instead.
positive_buffer_ion
The SMILES pattern of the positive buffer ion to use. The value
specified in ``taproom`` will be ignored and this value used
instead.
attach_apr_meta_data
Whether to add the metadata required for an APR based calculation
using the ``paprika`` based workflow.
"""
super().__init__()
try:
from openeye import oechem
except ImportError:
raise MissingOptionalDependency("openeye.oechem", False)
unlicensed_library = "openeye.oechem" if not oechem.OEChemIsLicensed() else None
if unlicensed_library is not None:
raise MissingOptionalDependency(unlicensed_library, True)
# TODO: Don't overwrite the taproom ionic strength and buffer ions.
self._initialize(
host_codes,
guest_codes,
default_ionic_strength,
negative_buffer_ion,
positive_buffer_ion,
attach_apr_meta_data,
)
@staticmethod
def _mol2_to_smiles(file_path: str) -> str:
"""Converts a mol2 file into a smiles string.
Parameters
----------
file_path: str
The file path to the mol2 file.
Returns
-------
str
The smiles descriptor of the loaded molecule
"""
from openff.toolkit.topology import Molecule
receptor_molecule = Molecule.from_file(file_path, "MOL2")
return receptor_molecule.to_smiles()
@staticmethod
def _build_substance(
guest_smiles: Optional[str],
host_smiles: str,
ionic_strength: Optional[unit.Quantity],
negative_buffer_ion: str = "[Cl-]",
positive_buffer_ion: str = "[Na+]",
):
"""Builds a substance containing a ligand and receptor solvated in an aqueous
solution with a given ionic strength
Parameters
----------
guest_smiles
The SMILES descriptor of the guest.
host_smiles
The SMILES descriptor of the host.
ionic_strength
The ionic strength of the aqueous solvent.
Returns
-------
The built substance.
"""
from openff.toolkit.topology import Molecule
try:
from openmm import unit as openmm_unit
except ImportError:
from simtk.openmm import unit as openmm_unit
substance = Substance()
if guest_smiles is not None:
guest = Component(smiles=guest_smiles, role=Component.Role.Ligand)
substance.add_component(component=guest, amount=ExactAmount(1))
host = Component(smiles=host_smiles, role=Component.Role.Receptor)
substance.add_component(component=host, amount=ExactAmount(1))
water = Component(smiles="O", role=Component.Role.Solvent)
sodium = Component(smiles=positive_buffer_ion, role=Component.Role.Solvent)
chlorine = Component(smiles=negative_buffer_ion, role=Component.Role.Solvent)
water_mole_fraction = 1.0
if ionic_strength is not None:
salt_mole_fraction = Substance.calculate_aqueous_ionic_mole_fraction(
ionic_strength
)
if isinstance(salt_mole_fraction, unit.Quantity):
# noinspection PyUnresolvedReferences
salt_mole_fraction = salt_mole_fraction.magnitude
water_mole_fraction = 1.0 - salt_mole_fraction * 2
substance.add_component(
component=sodium,
amount=MoleFraction(salt_mole_fraction),
)
substance.add_component(
component=chlorine,
amount=MoleFraction(salt_mole_fraction),
)
substance.add_component(
component=water, amount=MoleFraction(water_mole_fraction)
)
host_molecule_charge = Molecule.from_smiles(host_smiles).total_charge
guest_molecule_charge = (
0.0 * openmm_unit.elementary_charge
if guest_smiles is None
else Molecule.from_smiles(guest_smiles).total_charge
)
net_charge = (host_molecule_charge + guest_molecule_charge).value_in_unit(
openmm_unit.elementary_charge
)
n_counter_ions = abs(int(net_charge))
if net_charge <= -0.9999:
substance.add_component(sodium, ExactAmount(n_counter_ions))
elif net_charge >= 0.9999:
substance.add_component(chlorine, ExactAmount(n_counter_ions))
return substance
@staticmethod
def _unnest_restraint_specs(
restraint_specs: List[Dict[str, Any]]
) -> List[Dict[str, Any]]:
"""A helper method to un-nest restraint lists parsed from a taproom
yaml file.
Parameters
----------
restraint_specs
The restraint specs to un-nest.
"""
return [
value["restraint"]
for value in restraint_specs
if value["restraint"] is not None
]
@classmethod
def _build_metadata(
cls,
host_yaml_paths: Dict[str, str],
guest_yaml_path: str,
host_substance: Substance,
) -> Dict[str, Any]:
"""Constructs the metadata dictionary for a given host-guest
system.
Parameters
----------
host_yaml_paths
The file path to the host YAML file for each guest orientation.
guest_yaml_path
The file path to the guest YAML file.
host_substance
A substance containing only the host molecule.
Returns
-------
The constructed metadata dictionary.
"""
from paprika.restraints.read_yaml import read_yaml
guest_spec = read_yaml(guest_yaml_path)
guest_aliases = {
guest_alias: atom_mask
for guest_alias_entry in guest_spec["aliases"]
for guest_alias, atom_mask in guest_alias_entry.items()
}
metadata = {
"host_substance": host_substance,
"guest_restraints": cls._unnest_restraint_specs(
guest_spec["restraints"]["guest"]
),
"guest_orientation_mask": " ".join(
[guest_aliases["G1"].strip(), guest_aliases["G2"].strip()]
),
"guest_orientations": [],
"n_guest_microstates": guest_spec["symmetry_correction"]["microstates"],
"wall_restraints": cls._unnest_restraint_specs(
guest_spec["restraints"]["wall_restraints"]
),
"symmetry_restraints": guest_spec["symmetry_correction"]["restraints"],
}
for restraint in metadata["symmetry_restraints"]:
del restraint["restraint"]
dummy_atom_offset = metadata["guest_restraints"][0]["attach"]["target"]
pull_distance = (
metadata["guest_restraints"][0]["pull"]["target"] - dummy_atom_offset
)
metadata["dummy_atom_offset"] = dummy_atom_offset * unit.angstrom
metadata["pull_distance"] = pull_distance * unit.angstrom
unique_attach_lambdas = set()
unique_n_pull_windows = set()
unique_release_lambdas = set()
unique_host_structures = set()
for orientation, host_yaml_path in host_yaml_paths.items():
host_spec = read_yaml(host_yaml_path)
root_host_path = os.path.dirname(host_yaml_path)
host_path = os.path.join(
root_host_path,
host_spec["structure"].replace(".mol2", ".pdb"),
)
unique_host_structures.add(host_path)
root_complex_path = os.path.dirname(guest_yaml_path)
complex_path = os.path.join(
root_complex_path,
guest_spec["complex"].replace(".pdb", f"-{orientation}.pdb"),
)
metadata["guest_orientations"].append(
{
"coordinate_path": complex_path,
"static_restraints": cls._unnest_restraint_specs(
host_spec["restraints"]["static"]
),
"conformational_restraints": cls._unnest_restraint_specs(
host_spec["restraints"]["conformational"]
),
}
)
unique_attach_lambdas.add(
tuple(host_spec["calculation"]["lambda"]["attach"])
)
unique_n_pull_windows.add(host_spec["calculation"]["windows"]["pull"])
unique_release_lambdas.add(
tuple(host_spec["calculation"]["lambda"]["release"])
)
if len(unique_host_structures) != 1:
raise NotImplementedError("There must only be a single host structure.")
if (
len(unique_attach_lambdas) != 1
or len(unique_n_pull_windows) != 1
or len(unique_release_lambdas) != 1
):
raise NotImplementedError(
"Currently all host orientations must use the same lambda paths."
)
attach_lambdas = [*next(iter(unique_attach_lambdas))]
n_pull_windows = next(iter(unique_n_pull_windows))
release_lambdas = [*next(iter(unique_release_lambdas))]
metadata.update(
{
"host_coordinate_path": next(iter(unique_host_structures)),
"attach_windows_indices": [*range(len(attach_lambdas))],
"attach_lambdas": attach_lambdas,
"pull_windows_indices": [*range(n_pull_windows)],
"n_pull_windows": n_pull_windows,
"release_windows_indices": [*range(len(attach_lambdas))],
"release_lambdas": release_lambdas,
}
)
return metadata
def _initialize(
self,
host_codes: List[str],
guest_codes: List[str],
ionic_strength: Optional[unit.Quantity],
negative_buffer_ion: str,
positive_buffer_ion: str,
attach_apr_meta_data: bool,
):
"""Initializes the data set from the data made available by taproom.
Parameters
----------
host_codes
The three letter codes of the host molecules to load from ``taproom``
If no list is provided, all hosts will be loaded.
guest_codes
The three letter codes of the guest molecules to load from ``taproom``.
If no list is provided, all guests will be loaded.
ionic_strength
The default ionic strength to use for measurements. The value
specified in ``taproom`` will be ignored and this value used
instead.
negative_buffer_ion
The SMILES pattern of the negative buffer ion to use. The value
specified in ``taproom`` will be ignored and this value used
instead.
positive_buffer_ion
The SMILES pattern of the positive buffer ion to use. The value
specified in ``taproom`` will be ignored and this value used
instead.
attach_apr_meta_data
Whether to add the metadata required for an APR based calculation
using the ``paprika`` based workflow.
"""
installed_benchmarks = {}
for entry_point in pkg_resources.iter_entry_points(group="taproom.benchmarks"):
installed_benchmarks[entry_point.name] = entry_point.load()
if len(installed_benchmarks) == 0:
raise ValueError(
"No installed benchmarks could be found. Make sure the "
"`host-guest-benchmarks` package is installed."
)
measurements = installed_benchmarks["host_guest_measurements"]
systems = installed_benchmarks["host_guest_systems"]
all_properties = []
for host_name in measurements:
if host_codes and host_name not in host_codes:
continue
for guest_name in measurements[host_name]:
if guest_codes and guest_name not in guest_codes:
continue
# Make sure this measurement has a corresponding system
if host_name not in systems or guest_name not in systems[host_name]:
continue
measurement_path = measurements[host_name][guest_name]["yaml"]
with open(measurement_path, "r") as file:
measurement_yaml = yaml.safe_load(file)
temperature = unit.Quantity(measurement_yaml["state"]["temperature"])
pressure = unit.Quantity(measurement_yaml["state"]["pressure"])
value = unit.Quantity(measurement_yaml["measurement"]["delta_G"])
uncertainty = unit.Quantity(
measurement_yaml["measurement"]["delta_G_uncertainty"]
)
source = TaproomSource(
doi=measurement_yaml["provenance"]["doi"],
comment=measurement_yaml["provenance"]["comment"],
technique=measurement_yaml["measurement"]["technique"],
host_identifier=host_name,
guest_identifier=guest_name,
)
orientations = [
orientation for orientation in systems[host_name]["yaml"]
]
host_yaml_path = systems[host_name]["yaml"][orientations[0]]
with open(host_yaml_path, "r") as file:
host_yaml = yaml.safe_load(file)
host_mol2_path = str(
host_yaml_path.parent.joinpath(host_yaml["structure"])
)
host_smiles = self._mol2_to_smiles(host_mol2_path)
guest_yaml_path = systems[host_name][guest_name]["yaml"]
with open(guest_yaml_path, "r") as file:
guest_yaml = yaml.safe_load(file)
guest_mol2_path = str(
host_yaml_path.parent.joinpath(guest_name).joinpath(
guest_yaml["structure"]
)
)
guest_smiles = self._mol2_to_smiles(guest_mol2_path)
substance = self._build_substance(
guest_smiles,
host_smiles,
ionic_strength,
negative_buffer_ion,
positive_buffer_ion,
)
host_only_substance = self._build_substance(
None,
host_smiles,
ionic_strength,
negative_buffer_ion,
positive_buffer_ion,
)
measured_property = HostGuestBindingAffinity(
thermodynamic_state=ThermodynamicState(temperature, pressure),
phase=PropertyPhase.Liquid,
substance=substance,
value=value,
uncertainty=uncertainty,
source=source,
)
if attach_apr_meta_data:
measured_property.metadata = self._build_metadata(
systems[host_name]["yaml"],
systems[host_name][guest_name]["yaml"],
host_only_substance,
)
all_properties.append(measured_property)
self.add_properties(*all_properties)
|
jaketanderson/openff-evaluator | openff/evaluator/thermodynamics.py | <filename>openff/evaluator/thermodynamics.py
"""
Defines an API for defining thermodynamic states.
"""
from enum import Enum
from openff.units import unit
from openff.evaluator.attributes import UNDEFINED, Attribute, AttributeClass
class Ensemble(Enum):
"""An enum describing the supported thermodynamic ensembles."""
NVT = "NVT"
NPT = "NPT"
class ThermodynamicState(AttributeClass):
"""Data specifying a physical thermodynamic state obeying
Boltzmann statistics.
Notes
-----
Equality of two thermodynamic states is determined by comparing
the temperature in kelvin to within 3 decimal places, and comparing
the pressure (if defined) in pascals to within 3 decimal places.
Examples
--------
Specify an NPT state at 298 K and 1 atm pressure.
>>> state = ThermodynamicState(temperature=298.0*unit.kelvin, pressure=1.0*unit.atmospheres)
Note that the pressure is only relevant for periodic systems.
"""
temperature = Attribute(
docstring="The external temperature.", type_hint=unit.Quantity
)
pressure = Attribute(
docstring="The external pressure.", type_hint=unit.Quantity, optional=True
)
@property
def inverse_beta(self):
"""Returns the temperature multiplied by the molar gas constant"""
return (self.temperature * unit.molar_gas_constant).to(
unit.kilojoule / unit.mole
)
@property
def beta(self):
"""Returns one divided by the temperature multiplied by the molar gas constant"""
return 1.0 / self.inverse_beta
def __init__(self, temperature=None, pressure=None):
"""Constructs a new ThermodynamicState object.
Parameters
----------
temperature : openff.evaluator.unit.Quantity
The external temperature
pressure : openff.evaluator.unit.Quantity
The external pressure
"""
if temperature is not None:
self.temperature = temperature
if pressure is not None:
self.pressure = pressure
def validate(self, attribute_type=None):
super(ThermodynamicState, self).validate(attribute_type)
if self.pressure != UNDEFINED:
self.pressure.to(unit.pascals)
assert self.pressure > 0.0 * unit.pascals
self.temperature.to(unit.kelvin)
assert self.temperature > 0.0 * unit.kelvin
def __repr__(self):
return f"<ThermodynamicState {str(self)}>"
def __str__(self):
return_value = f"T={self.temperature:~}"
if self.pressure != UNDEFINED:
return_value += f" P={self.pressure:~}"
return return_value
def __hash__(self):
temperature = self.temperature.to(unit.kelvin).magnitude
pressure = (
None
if self.pressure == UNDEFINED
else self.pressure.to(unit.pascal).magnitude
)
return hash(
(f"{temperature:.3f}", None if pressure is None else f"{pressure:.3f}")
)
def __eq__(self, other):
if not isinstance(other, ThermodynamicState):
return False
return hash(self) == hash(other)
def __ne__(self, other):
return not (self == other)
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_datasets/test_thermoml.py | <reponame>jaketanderson/openff-evaluator<gh_stars>10-100
"""
Units tests for openff.evaluator.datasets
"""
import pytest
from openff.units import unit
from openff.evaluator.attributes import UNDEFINED
from openff.evaluator.datasets import PhysicalProperty, PropertyPhase
from openff.evaluator.datasets.thermoml import thermoml_property
from openff.evaluator.datasets.thermoml.thermoml import (
ThermoMLDataSet,
_unit_from_thermoml_string,
)
from openff.evaluator.plugins import register_default_plugins
from openff.evaluator.properties import EnthalpyOfMixing
from openff.evaluator.utils import get_data_filename
register_default_plugins()
@thermoml_property("Osmotic coefficient", supported_phases=PropertyPhase.Liquid)
class OsmoticCoefficient(PhysicalProperty):
def default_unit(cls):
return unit.dimensionless
@thermoml_property(
"Vapor or sublimation pressure, kPa",
supported_phases=PropertyPhase.Liquid | PropertyPhase.Gas,
)
class VaporPressure(PhysicalProperty):
def default_unit(cls):
return unit.kilopascal
@thermoml_property("Activity coefficient", supported_phases=PropertyPhase.Liquid)
class ActivityCoefficient(PhysicalProperty):
def default_unit(cls):
return unit.dimensionless
supported_units = [
"K",
"kPa",
"kg/m3",
"mol/kg",
"mol/dm3",
"kJ/mol",
"m3/kg",
"mol/m3",
"m3/mol",
"J/K/mol",
"J/K/kg",
"J/K/m3",
"1/kPa",
"m/s",
"MHz",
]
@pytest.mark.parametrize("unit_string", supported_units)
def test_thermoml_unit_from_string(unit_string):
"""A test to ensure all unit conversions are valid."""
dummy_string = f"Property, {unit_string}"
returned_unit = _unit_from_thermoml_string(dummy_string)
assert returned_unit is not None and isinstance(returned_unit, unit.Unit)
def test_thermoml_from_url():
"""A test to ensure that ThermoML archive files can be loaded from a url."""
data_set = ThermoMLDataSet.from_url(
"https://trc.nist.gov/ThermoML/10.1021/acs.jced.6b00916.xml"
)
assert data_set is not None
assert len(data_set) > 0
data_set = ThermoMLDataSet.from_url(
"https://trc.nist.gov/ThermoML/10.1021/acs.jced.6b00916.xmld"
)
assert data_set is None
def test_thermoml_from_doi():
"""A test to ensure that ThermoML archive files can be loaded from a doi."""
data_set = ThermoMLDataSet.from_doi("10.1016/j.jct.2016.10.001")
assert data_set is not None
assert len(data_set) > 0
data_set = ThermoMLDataSet.from_doi("10.1016/j.jct.2016.12.009x")
assert data_set is None
def test_thermoml_from_files():
"""A test to ensure that ThermoML archive files can be loaded from local sources."""
data_set = ThermoMLDataSet.from_file(
get_data_filename("properties/single_density.xml"),
get_data_filename("properties/single_dielectric.xml"),
get_data_filename("properties/single_enthalpy_mixing.xml"),
)
assert data_set is not None
assert len(data_set) == 3
# Make sure the DOI was found from the enthalpy file
for physical_property in data_set:
if isinstance(physical_property, EnthalpyOfMixing):
assert physical_property.source.doi != UNDEFINED
assert physical_property.source.doi == "10.1016/j.jct.2008.12.004"
else:
assert physical_property.source.doi == ""
assert physical_property.source.reference != UNDEFINED
data_set = ThermoMLDataSet.from_file("dummy_filename")
assert data_set is None
def test_thermoml_mass_constraints():
"""A collection of tests to ensure that the Mass fraction constraint is
implemented correctly alongside solvent constraints."""
# Mass fraction
data_set = ThermoMLDataSet.from_file(get_data_filename("test/properties/mass.xml"))
assert data_set is not None
assert len(data_set) > 0
# Mass fraction + Solvent: Mass fraction
data_set = ThermoMLDataSet.from_file(
get_data_filename("test/properties/mass_mass.xml")
)
assert data_set is not None
assert len(data_set) > 0
# Mass fraction + Solvent: Mole fraction
data_set = ThermoMLDataSet.from_file(
get_data_filename("test/properties/mass_mole.xml")
)
assert data_set is not None
assert len(data_set) > 0
def test_thermoml_molality_constraints():
"""A collection of tests to ensure that the Molality constraint is
implemented correctly alongside solvent constraints."""
# Molality
data_set = ThermoMLDataSet.from_file(
get_data_filename("test/properties/molality.xml")
)
assert data_set is not None
assert len(data_set) > 0
# Molality + Solvent: Mass fraction
data_set = ThermoMLDataSet.from_file(
get_data_filename("test/properties/molality_mass.xml")
)
assert data_set is not None
assert len(data_set) > 0
# Molality + Solvent: Mole fraction
data_set = ThermoMLDataSet.from_file(
get_data_filename("test/properties/molality_mole.xml")
)
assert data_set is not None
assert len(data_set) > 0
# Molality + Solvent: Molality
data_set = ThermoMLDataSet.from_file(
get_data_filename("test/properties/molality_molality.xml")
)
assert data_set is not None
assert len(data_set) > 0
def test_thermoml_mole_constraints():
"""A collection of tests to ensure that the Mole fraction constraint is
implemented correctly alongside solvent constraints."""
# Mole fraction
data_set = ThermoMLDataSet.from_file(get_data_filename("test/properties/mole.xml"))
assert data_set is not None
assert len(data_set) > 0
# Mole fraction + Solvent: Mass fraction
data_set = ThermoMLDataSet.from_file(
get_data_filename("test/properties/mole_mass.xml")
)
assert data_set is not None
assert len(data_set) > 0
# Mole fraction + Solvent: Mole fraction
data_set = ThermoMLDataSet.from_file(
get_data_filename("test/properties/mole_mole.xml")
)
assert data_set is not None
assert len(data_set) > 0
# Mole fraction + Solvent: Molality
data_set = ThermoMLDataSet.from_file(
get_data_filename("test/properties/mole_molality.xml")
)
assert data_set is not None
assert len(data_set) > 0
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_datasets/test_taproom.py | import pytest
from openff.evaluator.datasets.taproom import TaproomDataSet
from openff.evaluator.datasets.taproom.taproom import TaproomSource
from openff.evaluator.utils.exceptions import MissingOptionalDependency
try:
import openeye.oechem
except ImportError:
openeye = None
@pytest.mark.skipif(
openeye is None or not openeye.oechem.OEChemIsLicensed(),
reason="OpenEye is required for this test.",
)
def test_taproom():
data_set = TaproomDataSet(host_codes=["acd"], guest_codes=["bam"])
assert len(data_set) == 1
assert isinstance(data_set.properties[0].source, TaproomSource)
assert data_set.properties[0].source.host_identifier == "acd"
assert data_set.properties[0].source.guest_identifier == "bam"
@pytest.mark.skipif(
openeye is None or not openeye.oechem.OEChemIsLicensed(),
reason="OpenEye is required for this test.",
)
def test_taproom_missing_oe_license(monkeypatch):
from openeye import oechem
def mock_return():
return False
monkeypatch.setattr(oechem, "OEChemIsLicensed", mock_return)
with pytest.raises(MissingOptionalDependency) as error_info:
TaproomDataSet()
assert error_info.value.library_name == "openeye.oechem"
assert error_info.value.license_issue
@pytest.mark.skipif(
openeye is not None,
reason="OpenEye must not be present for this test.",
)
def test_taproom_missing_oe():
with pytest.raises(MissingOptionalDependency) as error_info:
TaproomDataSet()
assert error_info.value.library_name == "openeye.oechem"
assert not error_info.value.license_issue
|
jaketanderson/openff-evaluator | openff/evaluator/workflow/exceptions.py | <filename>openff/evaluator/workflow/exceptions.py
from openff.evaluator.utils.exceptions import EvaluatorException
class WorkflowException(EvaluatorException):
"""An exception which was raised while executing a workflow
protocol.
"""
def __init__(self, message=None, protocol_id=None):
"""Constructs a new EvaluatorException object.
Parameters
----------
message: str or list of str
Information about the raised exception.
protocol_id: str
The id of the protocol which was the exception.
"""
super(WorkflowException, self).__init__(message)
self.protocol_id = protocol_id
def __getstate__(self):
state = super(WorkflowException, self).__getstate__()
if self.protocol_id is not None:
state["protocol_id"] = self.protocol_id
return state
def __setstate__(self, state):
super(WorkflowException, self).__setstate__(state)
if "protocol_id" in state:
self.protocol_id = state["protocol_id"]
def __str__(self):
base_str = super(WorkflowException, self).__str__()
return f"{self.protocol_id} failed to execute.\n\n{base_str}"
|
jaketanderson/openff-evaluator | openff/evaluator/workflow/schemas.py | <filename>openff/evaluator/workflow/schemas.py<gh_stars>10-100
"""
A collection of schemas which represent elements of a workflow.
"""
import re
from typing import Dict, Iterable, List
from openff.evaluator.attributes import UNDEFINED, Attribute, AttributeClass
from openff.evaluator.attributes.typing import is_type_subclass_of_type
from openff.evaluator.storage.attributes import StorageAttribute
from openff.evaluator.storage.data import BaseStoredData
from openff.evaluator.utils.observables import Observable
from openff.evaluator.utils.serialization import TypedBaseModel
from openff.evaluator.workflow.attributes import InputAttribute
from openff.evaluator.workflow.plugins import registered_workflow_protocols
from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue
class ProtocolSchema(AttributeClass):
"""A json serializable representation of a workflow protocol."""
id = Attribute(
docstring="The unique id associated with the protocol.",
type_hint=str,
)
type = Attribute(
docstring="The type of protocol associated with this schema.",
type_hint=str,
read_only=True,
)
inputs = Attribute(
docstring="The inputs to the protocol.", type_hint=dict, read_only=True
)
def __init__(self, unique_id=None, protocol_type=None, inputs=None):
if unique_id is not None:
self._set_value("id", unique_id)
if protocol_type is not None:
self._set_value("type", protocol_type)
if inputs is not None:
self._set_value("inputs", inputs)
def to_protocol(self):
"""Creates a new protocol object from this schema.
Returns
-------
Protocol
The protocol created from this schema.
"""
from openff.evaluator.workflow import Protocol
return Protocol.from_schema(self)
class ProtocolGroupSchema(ProtocolSchema):
"""A json serializable representation of a workflow protocol
group.
"""
protocol_schemas = Attribute(
docstring="The schemas of the protocols within this group.",
type_hint=dict,
read_only=True,
)
def __init__(
self, unique_id=None, protocol_type=None, inputs=None, protocol_schemas=None
):
super(ProtocolGroupSchema, self).__init__(unique_id, protocol_type, inputs)
if protocol_schemas is not None:
self._set_value("protocol_schemas", protocol_schemas)
def validate(self, attribute_type=None):
super(ProtocolGroupSchema, self).validate(attribute_type)
for key, value in self.protocol_schemas.items():
assert isinstance(key, str)
assert isinstance(value, ProtocolSchema)
class ProtocolReplicator(TypedBaseModel):
"""A protocol replicator contains the information necessary to replicate
parts of a property estimation workflow.
Any protocol whose id includes `$(replicator.id)` (where `replicator.id` is the
id of a replicator) will be cloned for each value present in `template_values`.
Protocols that are being replicated will also have any ReplicatorValue inputs replaced
with the actual value taken from `template_values`.
When the protocol is replicated, the `$(replicator.id)` placeholder in the protocol
id will be replaced an integer which corresponds to the index of a value in the
`template_values` array.
Any protocols which take input from a replicated protocol will be updated to
instead take a list of value, populated by the outputs of the replicated
protocols.
Notes
-----
* The `template_values` property must be a list of either constant values,
or `ProtocolPath` objects which take their value from the `global` scope.
* If children of replicated protocols are also flagged as to be replicated,
they will only have their ids changed to match the index of the parent
protocol, as opposed to being fully replicated.
"""
@property
def placeholder_id(self):
"""The string which protocols to be replicated should include in
their ids."""
return f"$({self.id})"
def __init__(self, replicator_id=""):
"""Constructs a new ProtocolReplicator object.
Parameters
----------
replicator_id: str
The id of this replicator.
"""
self.id = replicator_id
self.template_values = None
def __getstate__(self):
return {"id": self.id, "template_values": self.template_values}
def __setstate__(self, state):
self.id = state["id"]
self.template_values = state["template_values"]
def apply(
self, protocols, template_values=None, template_index=-1, template_value=None
):
"""Applies this replicator to the provided set of protocols and any of
their children.
This protocol should be followed by a call to `update_references`
to ensure that all protocols which take their input from a replicated
protocol get correctly updated.
Parameters
----------
protocols: dict of str and Protocol
The protocols to apply the replicator to.
template_values: list of Any
A list of the values which will be inserted
into the newly replicated protocols.
This parameter is mutually exclusive with
`template_index` and `template_value`
template_index: int, optional
A specific value which should be used for any
protocols flagged as to be replicated by this
replicator. This option is mainly used when
replicating children of an already replicated
protocol.
This parameter is mutually exclusive with
`template_values` and must be set along with
a `template_value`.
template_value: Any, optional
A specific index which should be used for any
protocols flagged as to be replicated by this
replicator. This option is mainly used when
replicating children of an already replicated
protocol.
This parameter is mutually exclusive with
`template_values` and must be set along with
a `template_index`.
Returns
-------
dict of str and Protocol
The replicated protocols.
dict of ProtocolPath and list of tuple of ProtocolPath and int
A dictionary of references to all of the protocols which have
been replicated, with keys of original protocol ids. Each value
is comprised of a list of the replicated protocol ids, and their
index into the `template_values` array.
"""
if (
template_values is not None
and (template_index >= 0 or template_value is not None)
) or (
template_values is None and (template_index < 0 or template_value is None)
):
raise ValueError(
"Either the template values array must be set, or a specific "
"template index and value must be passed."
)
replicated_protocols = {}
replicated_protocol_map = {}
for protocol_id, protocol in protocols.items():
should_replicate = self.placeholder_id in protocol_id
# If this protocol should not be directly replicated then try and
# replicate any child protocols...
if not should_replicate:
replicated_protocols[protocol_id] = protocol
if template_index is not None and template_index >= 0:
# Make sure to include children of replicated protocols in the
# map to ensure correct behaviour when updating children of replicated
# protocols which have the replicator id in their name, and take input
# from another child protocol which doesn't have the replicator id in
# its name.
if ProtocolPath("", protocol_id) not in replicated_protocol_map:
replicated_protocol_map[ProtocolPath("", protocol_id)] = []
replicated_protocol_map[ProtocolPath("", protocol_id)].append(
(ProtocolPath("", protocol_id), template_index)
)
self._apply_to_protocol_children(
protocol,
replicated_protocol_map,
template_values,
template_index,
template_value,
)
continue
# ..otherwise, we need to replicate this protocol.
replicated_protocols.update(
self._apply_to_protocol(
protocol,
replicated_protocol_map,
template_values,
template_index,
template_value,
)
)
return replicated_protocols, replicated_protocol_map
def _apply_to_protocol(
self,
protocol,
replicated_protocol_map,
template_values=None,
template_index=-1,
template_value=None,
):
replicated_protocol_map[ProtocolPath("", protocol.id)] = []
replicated_protocols = {}
template_values_dict = {template_index: template_value}
if template_values is not None:
template_values_dict = {
index: template_value
for index, template_value in enumerate(template_values)
}
for index, template_value in template_values_dict.items():
protocol_schema = protocol.schema
protocol_schema.id = protocol_schema.id.replace(
self.placeholder_id, str(index)
)
replicated_protocol = registered_workflow_protocols[protocol_schema.type](
protocol_schema.id
)
replicated_protocol.schema = protocol_schema
replicated_protocol_map[ProtocolPath("", protocol.id)].append(
(ProtocolPath("", replicated_protocol.id), index)
)
# Pass the template values to any inputs which require them.
for required_input in replicated_protocol.required_inputs:
input_value = replicated_protocol.get_value(required_input)
if not isinstance(input_value, ReplicatorValue):
continue
elif input_value.replicator_id != self.id:
input_value.replicator_id = input_value.replicator_id.replace(
self.placeholder_id, str(index)
)
continue
replicated_protocol.set_value(required_input, template_value)
self._apply_to_protocol_children(
replicated_protocol,
replicated_protocol_map,
None,
index,
template_value,
)
replicated_protocols[replicated_protocol.id] = replicated_protocol
return replicated_protocols
def _apply_to_protocol_children(
self,
protocol,
replicated_protocol_map,
template_values=None,
template_index=-1,
template_value=None,
):
replicated_child_ids = protocol.apply_replicator(
self, template_values, template_index, template_value
)
# Append the id of this protocols to any replicated child protocols.
for child_id, replicated_ids in replicated_child_ids.items():
child_id.prepend_protocol_id(protocol.id)
for replicated_id, _ in replicated_ids:
replicated_id.prepend_protocol_id(protocol.id)
replicated_protocol_map.update(replicated_child_ids)
def update_references(self, protocols, replication_map, template_values):
"""Redirects the input references of protocols to the replicated
versions.
Parameters
----------
protocols: dict of str and Protocol
The protocols which have had this replicator applied
to them.
replication_map: dict of ProtocolPath and list of tuple of ProtocolPath and int
A dictionary of references to all of the protocols which have
been replicated, with keys of original protocol ids. Each value
is comprised of a list of the replicated protocol ids, and their
index into the `template_values` array.
template_values: List of Any
A list of the values which will be inserted
into the newly replicated protocols.
"""
inverse_replication_map = {}
for original_id, replicated_ids in replication_map.items():
for replicated_id, index in replicated_ids:
inverse_replication_map[replicated_id] = (original_id, index)
for protocol_id, protocol in protocols.items():
# Look at each of the protocols inputs and see if its value is either a ProtocolPath,
# or a list of ProtocolPath's.
for required_input in protocol.required_inputs:
all_value_references = protocol.get_value_references(required_input)
replicated_value_references = {}
for source_path, value_reference in all_value_references.items():
if self.placeholder_id not in value_reference.full_path:
continue
replicated_value_references[source_path] = value_reference
# If this protocol does not take input from one of the replicated protocols,
# then we are done.
if len(replicated_value_references) == 0:
continue
for source_path, value_reference in replicated_value_references.items():
full_source_path = source_path.copy()
full_source_path.prepend_protocol_id(protocol_id)
# If the protocol was not itself replicated by this replicator, its value
# is set to a list containing references to all newly replicated protocols.
# Otherwise, the value will be set to a reference to just the protocol which
# was replicated using the same index.
value_source = [
ProtocolPath.from_string(
value_reference.full_path.replace(
self.placeholder_id, str(index)
)
)
for index in range(len(template_values))
]
for replicated_id, map_tuple in inverse_replication_map.items():
original_id, replicated_index = map_tuple
if (
full_source_path.protocol_path
!= replicated_id.protocol_path
):
continue
value_source = ProtocolPath.from_string(
value_reference.full_path.replace(
self.placeholder_id, str(replicated_index)
)
)
break
# Replace the input value with a list of ProtocolPath's that point to
# the newly generated protocols.
protocol.set_value(source_path, value_source)
class WorkflowSchema(AttributeClass):
"""The schematic for a property estimation workflow."""
protocol_schemas = Attribute(
docstring="The schemas for the protocols which will make up the workflow.",
type_hint=list,
default_value=[],
)
protocol_replicators = Attribute(
docstring="A set of replicators which will replicate parts of the workflow.",
type_hint=list,
optional=True,
)
final_value_source = Attribute(
docstring="A reference to which protocol output corresponds to the estimated "
"value of the property.",
type_hint=ProtocolPath,
optional=True,
)
outputs_to_store = Attribute(
docstring="A collection of data classes to populate ready to be stored by a "
"`StorageBackend`.",
type_hint=dict,
optional=True,
)
def replace_protocol_types(self, protocol_replacements, protocol_group_schema=None):
"""Replaces protocols with given types with other protocols
of specified replacements. This is useful when replacing
the default protocols with custom ones, or swapping out base
protocols with actual implementations
Warnings
--------
This method is NOT fully implemented and is likely to fail in
all but a few specific cases. This method should be used with
extreme caution.
Parameters
----------
protocol_replacements: dict of str and str, optional
A dictionary with keys of the types of protocols which should be replaced
with those protocols named by the values.
protocol_group_schema: ProtocolGroupSchema
The protocol group to apply the replacements to. This
is mainly used when applying this method recursively.
"""
if protocol_replacements is None:
return
if protocol_group_schema is None:
protocol_schemas = {x.id: x for x in self.protocol_schemas}
else:
protocol_schemas = protocol_group_schema.protocol_schemas
for protocol_schema_key in protocol_schemas:
protocol_schema = protocol_schemas[protocol_schema_key]
if protocol_schema.type not in protocol_replacements:
continue
protocol = protocol_schema.to_protocol()
new_protocol = registered_workflow_protocols[
protocol_replacements[protocol_schema.type]
](protocol_schema.id)
for input_path in new_protocol.required_inputs:
if input_path not in protocol.required_inputs:
continue
value = protocol.get_value(input_path)
new_protocol.set_value(input_path, value)
protocol_schemas[protocol_schema_key] = new_protocol.schema
self.protocol_schemas.remove(protocol_schema)
self.protocol_schemas.append(new_protocol.schema)
if isinstance(protocol_schemas[protocol_schema_key], ProtocolGroupSchema):
self.replace_protocol_types(
protocol_replacements, protocol_schemas[protocol_schema_key]
)
def _find_protocols_to_be_replicated(self, replicator, protocols=None):
"""Finds all protocols which have been flagged to be replicated
by a specified replicator.
Parameters
----------
replicator: ProtocolReplicator
The replicator of interest.
protocols: dict of str and ProtocolSchema or list of ProtocolSchema, optional
The protocols to search through. If None, then
all protocols in this schema will be searched.
Returns
-------
list of str
The ids of the protocols to be replicated by the specified replicator
"""
if protocols is None:
protocols = {x.id: x for x in self.protocol_schemas}
if isinstance(protocols, list):
protocols = {protocol.id: protocol for protocol in protocols}
protocols_to_replicate = []
for protocol_id, protocol in protocols.items():
if protocol_id.find(replicator.placeholder_id) >= 0:
protocols_to_replicate.append(protocol_id)
# Search through any children
if not isinstance(protocol, ProtocolGroupSchema):
continue
protocols_to_replicate.extend(
self._find_protocols_to_be_replicated(
replicator, protocol.protocol_schemas
)
)
return protocols_to_replicate
def _get_unreplicated_path(self, protocol_path):
"""Checks to see if the protocol pointed to by this path will only
exist after a replicator has been applied, and if so, returns a
path to the unreplicated protocol.
Parameters
----------
protocol_path: ProtocolPath
The path to convert to an unreplicated path.
Returns
-------
ProtocolPath
The path which should point to only unreplicated protocols
"""
if self.protocol_replicators == UNDEFINED:
return protocol_path.copy()
full_unreplicated_path = str(protocol_path.full_path)
for replicator in self.protocol_replicators:
if replicator.placeholder_id in full_unreplicated_path:
continue
protocols_to_replicate = self._find_protocols_to_be_replicated(replicator)
for protocol_id in protocols_to_replicate:
match_pattern = re.escape(
protocol_id.replace(replicator.placeholder_id, r"\d+")
)
match_pattern = match_pattern.replace(re.escape(r"\d+"), r"\d+")
full_unreplicated_path = re.sub(
match_pattern, protocol_id, full_unreplicated_path
)
return ProtocolPath.from_string(full_unreplicated_path)
@staticmethod
def _get_unnested_protocol_path(protocol_path):
"""Returns a protocol path whose nested property name
has been truncated to only include the top level name,
e.g:
`some_protocol_id.value.error` would be truncated to `some_protocol_id.value`
and
`some_protocol_id.value[1]` would be truncated to `some_protocol_id.value`
Parameters
----------
protocol_path: ProtocolPath
The path to truncate.
Returns
-------
ProtocolPath
The truncated path.
"""
property_name = protocol_path.property_name
# Remove any nested property names from the path
if protocol_path.property_name.find(".") >= 0:
property_name = property_name.split(".")[0]
# Remove any array indices from the path
if protocol_path.property_name.find("[") >= 0:
property_name = property_name.split("[")[0]
return ProtocolPath(property_name, *protocol_path.protocol_ids)
def _validate_replicators(self, schemas_by_id):
if self.protocol_replicators == UNDEFINED:
return
assert all(isinstance(x, ProtocolReplicator) for x in self.protocol_replicators)
for replicator in self.protocol_replicators:
assert replicator.id is not None and len(replicator.id) > 0
if not isinstance(replicator.template_values, list) and not isinstance(
replicator.template_values, ProtocolPath
):
raise ValueError(
"The template values of a replicator must either be "
"a list of values, or a reference to a list of values."
)
if isinstance(replicator.template_values, list):
for template_value in replicator.template_values:
if not isinstance(template_value, ProtocolPath):
continue
if template_value.start_protocol not in schemas_by_id:
raise ValueError(
f"The value source {template_value} does not exist."
)
elif isinstance(replicator.template_values, ProtocolPath):
if not replicator.template_values.is_global:
raise ValueError(
"Template values must either be a constant, or come from the "
"global scope."
)
if (
self.final_value_source != UNDEFINED
and self.final_value_source.protocol_path.find(
replicator.placeholder_id
)
>= 0
):
raise ValueError(
"The final value source cannot come from"
"a protocol which is being replicated."
)
def _validate_final_value(self, schemas_by_id):
if self.final_value_source == UNDEFINED:
return
assert isinstance(self.final_value_source, ProtocolPath)
if self.final_value_source.start_protocol not in schemas_by_id:
raise ValueError(
f"The value source {self.final_value_source} does not exist."
)
protocol_schema = schemas_by_id[self.final_value_source.start_protocol]
protocol_object = protocol_schema.to_protocol()
protocol_object.get_value(self.final_value_source)
attribute_type = protocol_object.get_class_attribute(
self.final_value_source
).type_hint
assert is_type_subclass_of_type(attribute_type, Observable)
def _validate_outputs_to_store(self, schemas_by_id):
"""Validates that the references to the outputs to store
are valid.
"""
if self.outputs_to_store == UNDEFINED:
return
assert all(
isinstance(x, BaseStoredData) for x in self.outputs_to_store.values()
)
for output_label in self.outputs_to_store:
output_to_store = self.outputs_to_store[output_label]
output_to_store.validate()
for attribute_name in output_to_store.get_attributes(StorageAttribute):
attribute_value = getattr(output_to_store, attribute_name)
if isinstance(attribute_value, ReplicatorValue):
matching_replicas = [
x
for x in self.protocol_replicators
if attribute_value.replicator_id == x.id
]
if len(matching_replicas) == 0:
raise ValueError(
f"An output to store is trying to take its value from a "
f"replicator {attribute_value.replicator_id} which does "
f"not exist."
)
if (
not isinstance(attribute_value, ProtocolPath)
or attribute_value.is_global
):
continue
if attribute_value.start_protocol not in schemas_by_id:
raise ValueError(f"The {attribute_value} source does not exist.")
protocol_schema = schemas_by_id[attribute_value.start_protocol]
# Currently we do not support validating nested or indexed attributes.
attribute_value = ProtocolPath(
attribute_value.property_name.split(".")[0].split("[")[0],
*attribute_value.protocol_ids,
)
protocol_object = protocol_schema.to_protocol()
protocol_object.get_value(attribute_value)
def _validate_interfaces(self, schemas_by_id):
"""Validates the flow of the data between protocols, ensuring
that inputs and outputs correctly match up.
"""
for protocol_schema in schemas_by_id.values():
protocol_object = protocol_schema.to_protocol()
for input_path in protocol_object.required_inputs:
input_value = protocol_object.get_value(input_path)
input_attribute = protocol_object.get_class_attribute(input_path)
if not isinstance(input_attribute, InputAttribute):
continue
is_optional = input_attribute.optional
if input_value == UNDEFINED and is_optional is False:
raise ValueError(
f"The {input_path} required input of protocol "
f"{protocol_schema.id} was not set."
)
for input_path in protocol_object.required_inputs:
value_references = protocol_object.get_value_references(input_path)
for source_path, value_reference in value_references.items():
if value_reference.is_global:
# We handle global input validation separately
continue
value_reference = self._get_unreplicated_path(value_reference)
# Make sure the other protocol whose output we are interested
# in actually exists.
if (
value_reference.start_protocol not in schemas_by_id
and value_reference.start_protocol != protocol_object.id
):
raise ValueError(
f"The {protocol_object.id} protocol tries to take input "
f"from a non-existent protocol: {value_reference.full_path}"
)
if value_reference.start_protocol != protocol_object.id:
other_protocol_schema = schemas_by_id[
value_reference.start_protocol
]
other_protocol_object = other_protocol_schema.to_protocol()
else:
other_protocol_object = protocol_object
unnested_value_reference = self._get_unnested_protocol_path(
value_reference
)
unnested_source_path = self._get_unnested_protocol_path(source_path)
# Make sure the other protocol has the output referenced
# by this input.
other_protocol_object.get_value(unnested_value_reference)
# Do a very rudimentary type check between the input and
# output types. This is not currently possible for nested
# or indexed properties, or outputs of replicated protocols.
if (
value_reference.full_path != unnested_value_reference.full_path
or source_path.full_path != unnested_source_path.full_path
):
continue
is_replicated_reference = False
protocol_replicators = self.protocol_replicators
if protocol_replicators == UNDEFINED:
protocol_replicators = []
for replicator in protocol_replicators:
if (
replicator.placeholder_id in protocol_schema.id
and replicator.placeholder_id
in value_reference.protocol_path
) or (
replicator.placeholder_id not in protocol_schema.id
and replicator.placeholder_id
not in value_reference.protocol_path
):
continue
is_replicated_reference = True
break
if is_replicated_reference:
continue
expected_input_type = protocol_object.get_class_attribute(
unnested_source_path
).type_hint
expected_output_type = other_protocol_object.get_class_attribute(
unnested_value_reference
).type_hint
if expected_input_type is None or expected_output_type is None:
continue
if not is_type_subclass_of_type(
expected_output_type, expected_input_type
):
raise ValueError(
f"The output type ({expected_output_type}) of "
f"{value_reference} does not match the requested "
f"input type ({expected_input_type}) of {source_path}."
)
@classmethod
def _find_child_ids(cls, schemas_by_id: Dict[str, ProtocolSchema]) -> List[str]:
"""A function which will recursive find the ids of all protocols in a
workflow.
Parameters
----------
schemas_by_id
The protocols to find the child ids of.
"""
protocol_ids = []
for protocol_id, protocol_schema in schemas_by_id.items():
protocol_ids.append(protocol_id)
if not isinstance(protocol_schema, ProtocolGroupSchema):
continue
protocol_ids.extend(cls._find_child_ids(protocol_schema.protocol_schemas))
return protocol_ids
@classmethod
def _find_duplicates(cls, iterable: Iterable[str]) -> List[str]:
"""Returns the duplicate items in a list.
Notes
-----
* Based on the answer by moooeeeep (accessed 09/11/2020 14:56) on stack overflow
here: https://stackoverflow.com/a/9836685
"""
seen = set()
seen_add = seen.add
seen_twice = set(x for x in iterable if x in seen or seen_add(x))
return list(seen_twice)
def _validate_unique_children(self, schemas_by_id: Dict[str, ProtocolSchema]):
"""Validates that every protocol in a workflow has a unique id."""
all_protocol_ids = self._find_child_ids(schemas_by_id)
duplicate_ids = self._find_duplicates(all_protocol_ids)
if len(duplicate_ids) > 0:
raise ValueError(
f"Several protocols in the schema have the same id: "
f"{duplicate_ids}. This is currently unsupported due to issues "
f"with merging two graphs which contain duplicate ids."
)
def _validate_replicated_child_ids(self, schemas_by_id: Dict[str, ProtocolSchema]):
"""Validates that the children of replicated protocols also unique ids to
avoid issues when merging workflows."""
if self.protocol_replicators == UNDEFINED:
return
replicator_ids = [x.placeholder_id for x in self.protocol_replicators]
for protocol_id, protocol_schema in schemas_by_id.items():
if not isinstance(protocol_schema, ProtocolGroupSchema):
continue
for replicator_id in replicator_ids:
if replicator_id not in protocol_id:
continue
if any(
replicator_id not in child_id
for child_id in protocol_schema.protocol_schemas
):
raise ValueError(
f"The children of replicated protocol {protocol_id} must also "
f"contain the replicators placeholder id in their id to ensure "
f"all replicated protocols have a unique id. This is to avoid "
f"issues when mering multiple workflows."
)
self._validate_replicated_child_ids(protocol_schema.protocol_schemas)
def validate(self, attribute_type=None):
super(WorkflowSchema, self).validate(attribute_type)
# Do some simple type checking.
assert len(self.protocol_schemas) > 0
assert all(isinstance(x, ProtocolSchema) for x in self.protocol_schemas)
schemas_by_id = {x.id: x for x in self.protocol_schemas}
# Validate unique ids. This is critical to ensure correct merging.
self._validate_unique_children(schemas_by_id)
self._validate_replicated_child_ids(schemas_by_id)
# Validate the different pieces of data to populate / draw from.
self._validate_final_value(schemas_by_id)
self._validate_replicators(schemas_by_id)
self._validate_outputs_to_store(schemas_by_id)
# Validate the interfaces between protocols
self._validate_interfaces(schemas_by_id)
|
jaketanderson/openff-evaluator | openff/evaluator/utils/utils.py | <reponame>jaketanderson/openff-evaluator<gh_stars>10-100
"""
A collection of general utilities.
"""
import contextlib
import logging
import os
import sys
from tempfile import TemporaryDirectory
from typing import Optional
from openff.evaluator.utils.string import extract_variable_index_and_name
def get_data_filename(relative_path):
"""Get the full path to one of the reference files in data.
In the source distribution, these files are in ``evaluator/data/``,
but on installation, they're moved to somewhere in the user's python
site-packages directory.
Parameters
----------
relative_path : str
The relative path of the file to load.
"""
from pkg_resources import resource_filename
fn = resource_filename("openff.evaluator", os.path.join("data", relative_path))
if not os.path.exists(fn):
raise ValueError(
"Sorry! %s does not exist. If you just added it, you'll have to re-install"
% fn
)
return fn
def timestamp_formatter() -> logging.Formatter:
"""Returns a logging formatter which outputs in the style of
``YEAR-MONTH-DAY HOUR:MINUTE:SECOND.MILLISECOND LEVEL MESSAGE``.
"""
return logging.Formatter(
fmt="%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
def setup_timestamp_logging(file_path=None):
"""Set up timestamp based logging which outputs in the style of
``YEAR-MONTH-DAY HOUR:MINUTE:SECOND.MILLISECOND LEVEL MESSAGE``.
Parameters
----------
file_path: str, optional
The file to write the log to. If none, the logger will
print to the terminal.
"""
formatter = timestamp_formatter()
if file_path is None:
logger_handler = logging.StreamHandler(stream=sys.stdout)
else:
logger_handler = logging.FileHandler(file_path)
logger_handler.setFormatter(formatter)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.addHandler(logger_handler)
def safe_unlink(file_path):
"""Attempts to remove the file at the given path,
catching any file not found exceptions.
Parameters
----------
file_path: str
The path to the file to remove.
"""
try:
os.unlink(file_path)
except OSError:
pass
def get_nested_attribute(containing_object, name):
"""A recursive version of getattr, which has full support
for attribute names which contain list / dict indices
Parameters
----------
containing_object: Any
The object which contains the nested attribute.
name: str
The name (/ path) of the nested attribute with successive
attribute names separated by periods, for example:
name = 'attribute_a.attribute_b[index].attribute_c'
Returns
-------
Any
The value of the attribute.
"""
attribute_name_split = name.split(".")
current_attribute = containing_object
for index, full_attribute_name in enumerate(attribute_name_split):
array_index = None
attribute_name = full_attribute_name
if attribute_name.find("[") >= 0 or attribute_name.find("]") >= 0:
attribute_name, array_index = extract_variable_index_and_name(
attribute_name
)
if current_attribute is None:
return None
if not isinstance(current_attribute, dict):
if not hasattr(current_attribute, attribute_name):
raise ValueError(
"This object does not have a {} "
"attribute.".format(".".join(attribute_name_split[: index + 1]))
)
else:
current_attribute = getattr(current_attribute, attribute_name)
else:
if attribute_name not in current_attribute:
raise ValueError(
"This object does not have a {} "
"attribute.".format(".".join(attribute_name_split[: index + 1]))
)
else:
current_attribute = current_attribute[attribute_name]
if array_index is not None:
if isinstance(current_attribute, list):
try:
array_index = int(array_index)
except ValueError:
raise ValueError(
"List indices must be integer: "
"{}".format(".".join(attribute_name_split[: index + 1]))
)
array_value = current_attribute[array_index]
current_attribute = array_value
return current_attribute
def set_nested_attribute(containing_object, name, value):
"""A recursive version of setattr, which has full support
for attribute names which contain list / dict indices
Parameters
----------
containing_object: Any
The object which contains the nested attribute.
name: str
The name (/ path) of the nested attribute with successive
attribute names separated by periods, for example:
name = 'attribute_a.attribute_b[index].attribute_c'
value: Any
The value to set on the attribute.
"""
current_attribute = containing_object
attribute_name = name
if attribute_name.find(".") > 1:
last_separator_index = attribute_name.rfind(".")
current_attribute = get_nested_attribute(
current_attribute, attribute_name[:last_separator_index]
)
attribute_name = attribute_name[last_separator_index + 1 :]
if attribute_name.find("[") >= 0:
attribute_name, array_index = extract_variable_index_and_name(attribute_name)
if not hasattr(current_attribute, attribute_name):
raise ValueError(
"This object does not have a {} attribute.".format(attribute_name)
)
current_attribute = getattr(current_attribute, attribute_name)
if isinstance(current_attribute, list):
try:
array_index = int(array_index)
except ValueError:
raise ValueError(
"List indices must be integer: {}".format(attribute_name)
)
current_attribute[array_index] = value
else:
if not hasattr(current_attribute, attribute_name):
raise ValueError(
"This object does not have a {} attribute.".format(attribute_name)
)
setattr(current_attribute, attribute_name, value)
@contextlib.contextmanager
def temporarily_change_directory(directory_path: Optional[str] = None):
"""Temporarily move the current working directory to the path
specified. If no path is given, a temporary directory will be
created, moved into, and then destroyed when the context manager
is closed.
Parameters
----------
directory_path
The directory to change into. If None, a temporary directory will
be created and changed into.
"""
if directory_path is not None and len(directory_path) == 0:
yield
return
old_directory = os.getcwd()
try:
if directory_path is None:
with TemporaryDirectory() as new_directory:
os.chdir(new_directory)
yield
else:
os.chdir(directory_path)
yield
finally:
os.chdir(old_directory)
def has_openeye():
"""Checks whether the `openeye` toolkits are available for use
Returns
-------
bool
True if the `openeye` toolkit can be imported and has a valid
license.
"""
try:
from openeye import oechem
available = True
if not oechem.OEChemIsLicensed():
available = False
except ImportError:
available = False
return available
def is_file_and_not_empty(file_path):
"""Checks that a file both exists at the specified ``path`` and is not empty.
Parameters
----------
file_path: str
The file path to check.
Returns
-------
bool
That a file both exists at the specified ``path`` and is not empty.
"""
return os.path.isfile(file_path) and (os.path.getsize(file_path) != 0)
|
jaketanderson/openff-evaluator | openff/evaluator/protocols/reweighting.py | <reponame>jaketanderson/openff-evaluator
"""
A collection of protocols for reweighting cached simulation data.
"""
import abc
import copy
import functools
import typing
from os import path
import numpy as np
import pymbar
from openff.units import unit
from openff.evaluator.attributes import UNDEFINED
from openff.evaluator.forcefield import ParameterGradient
from openff.evaluator.forcefield.system import ParameterizedSystem
from openff.evaluator.protocols.analysis import compute_dielectric_constant
from openff.evaluator.thermodynamics import ThermodynamicState
from openff.evaluator.utils.observables import (
Observable,
ObservableArray,
ObservableFrame,
bootstrap,
)
from openff.evaluator.workflow import Protocol, workflow_protocol
from openff.evaluator.workflow.attributes import InputAttribute, OutputAttribute
@workflow_protocol()
class ConcatenateTrajectories(Protocol):
"""A protocol which concatenates multiple trajectories into
a single one.
"""
input_coordinate_paths = InputAttribute(
docstring="A list of paths to the starting PDB coordinates for each of the "
"trajectories.",
type_hint=list,
default_value=UNDEFINED,
)
input_trajectory_paths = InputAttribute(
docstring="A list of paths to the trajectories to concatenate.",
type_hint=list,
default_value=UNDEFINED,
)
output_coordinate_path = OutputAttribute(
docstring="The path the PDB coordinate file which contains the topology "
"of the concatenated trajectory.",
type_hint=str,
)
output_trajectory_path = OutputAttribute(
docstring="The path to the concatenated trajectory.", type_hint=str
)
def _execute(self, directory, available_resources):
import mdtraj
if len(self.input_coordinate_paths) != len(self.input_trajectory_paths):
raise ValueError(
"There should be the same number of coordinate and trajectory paths."
)
if len(self.input_trajectory_paths) == 0:
raise ValueError("No trajectories were given to concatenate.")
trajectories = []
output_coordinate_path = None
for coordinate_path, trajectory_path in zip(
self.input_coordinate_paths, self.input_trajectory_paths
):
output_coordinate_path = output_coordinate_path or coordinate_path
trajectories.append(mdtraj.load_dcd(trajectory_path, coordinate_path))
self.output_coordinate_path = output_coordinate_path
output_trajectory = (
trajectories[0]
if len(trajectories) == 1
else mdtraj.join(trajectories, False, False)
)
self.output_trajectory_path = path.join(directory, "output_trajectory.dcd")
output_trajectory.save_dcd(self.output_trajectory_path)
@workflow_protocol()
class ConcatenateObservables(Protocol):
"""A protocol which concatenates multiple ``ObservableFrame`` objects into
a single ``ObservableFrame`` object.
"""
input_observables = InputAttribute(
docstring="A list of observable arrays to concatenate.",
type_hint=list,
default_value=UNDEFINED,
)
output_observables = OutputAttribute(
docstring="The concatenated observable array.",
type_hint=typing.Union[ObservableArray, ObservableFrame],
)
def _execute(self, directory, available_resources):
if len(self.input_observables) == 0:
raise ValueError("No arrays were given to concatenate.")
if not all(
isinstance(observables, type(self.input_observables[0]))
for observables in self.input_observables
):
raise ValueError("The observables to concatenate must be the same type.")
object_type = type(self.input_observables[0])
if len(self.input_observables) > 1:
self.output_observables = object_type.join(*self.input_observables)
else:
self.output_observables = copy.deepcopy(self.input_observables[0])
class BaseEvaluateEnergies(Protocol, abc.ABC):
"""A base class for protocols which will re-evaluate the energy of a series
of configurations for a given set of force field parameters.
"""
thermodynamic_state = InputAttribute(
docstring="The state to calculate the reduced potentials at.",
type_hint=ThermodynamicState,
default_value=UNDEFINED,
)
parameterized_system = InputAttribute(
docstring="The parameterized system object which encodes the systems potential "
"energy function.",
type_hint=ParameterizedSystem,
default_value=UNDEFINED,
)
enable_pbc = InputAttribute(
docstring="If true, periodic boundary conditions will be enabled.",
type_hint=bool,
default_value=True,
)
trajectory_file_path = InputAttribute(
docstring="The path to the trajectory file which contains the "
"configurations to calculate the energies of.",
type_hint=str,
default_value=UNDEFINED,
)
gradient_parameters = InputAttribute(
docstring="An optional list of parameters to differentiate the evaluated "
"energies with respect to.",
type_hint=list,
default_value=lambda: list(),
)
output_observables = OutputAttribute(
docstring="An observable array which stores the reduced potentials potential "
"energies evaluated at the specified state and using the specified system "
"object for each configuration in the trajectory.",
type_hint=ObservableFrame,
)
class BaseMBARProtocol(Protocol, abc.ABC):
"""Re-weights a set of observables using MBAR to calculate
the average value of the observables at a different state
than they were originally measured.
"""
reference_reduced_potentials: typing.List[ObservableArray] = InputAttribute(
docstring="The reduced potentials of each configuration evaluated at each of "
"the reference states.",
type_hint=list,
default_value=UNDEFINED,
)
target_reduced_potentials = InputAttribute(
docstring="The reduced potentials of each configuration evaluated at the "
"target state.",
type_hint=ObservableArray,
default_value=UNDEFINED,
)
frame_counts = InputAttribute(
docstring="The number of configurations per reference state. The sum of these"
"should equal the length of the ``reference_reduced_potentials`` and "
"``target_reduced_potentials`` input arrays as well any input observable "
"arrays.",
type_hint=list,
default_value=UNDEFINED,
)
bootstrap_uncertainties = InputAttribute(
docstring="If true, bootstrapping will be used to estimated the total "
"uncertainty in the reweighted value.",
type_hint=bool,
default_value=False,
)
bootstrap_iterations = InputAttribute(
docstring="The number of bootstrap iterations to perform if bootstraped "
"uncertainties have been requested",
type_hint=int,
default_value=250,
)
required_effective_samples = InputAttribute(
docstring="The minimum number of effective samples required to be able to "
"reweight the observable. If the effective samples is less than this minimum "
"an exception will be raised.",
type_hint=int,
default_value=50,
)
value = OutputAttribute(
docstring="The re-weighted average value of the observable at the target "
"state.",
type_hint=Observable,
)
effective_samples = OutputAttribute(
docstring="The number of effective samples which were re-weighted.",
type_hint=float,
)
@abc.abstractmethod
def _observables(self) -> typing.Dict[str, ObservableArray]:
"""The observables which will be re-weighted to yield the final average
observable of interest.
"""
raise NotImplementedError()
@staticmethod
def _compute_weights(
mbar: pymbar.MBAR, target_reduced_potentials: ObservableArray
) -> ObservableArray:
"""Return the values that each sample at the target state should be weighted
by.
Parameters
----------
mbar
A pre-computed MBAR object encoded information from the reference states.
target_reduced_potentials
The reduced potentials at the target state.
Returns
-------
The values to weight each sample by.
"""
from scipy.special import logsumexp
u_kn = target_reduced_potentials.value.to(unit.dimensionless).magnitude.T
log_denominator_n = logsumexp(mbar.f_k - mbar.u_kn.T, b=mbar.N_k, axis=1)
f_hat = -logsumexp(-u_kn - log_denominator_n, axis=1)
# Calculate the weights
weights = np.exp(f_hat - u_kn - log_denominator_n) * unit.dimensionless
# Compute the gradients of the weights.
weight_gradients = []
for gradient in target_reduced_potentials.gradients:
gradient_value = gradient.value.magnitude.flatten()
# Compute the numerator of the gradient. We need to specifically ask for the
# sign of the exp sum as the numerator may be negative.
d_f_hat_numerator, d_f_hat_numerator_sign = logsumexp(
-u_kn - log_denominator_n, b=gradient_value, axis=1, return_sign=True
)
d_f_hat_d_theta = d_f_hat_numerator_sign * np.exp(d_f_hat_numerator + f_hat)
d_weights_d_theta = (
(d_f_hat_d_theta - gradient_value) * weights * gradient.value.units
)
weight_gradients.append(
ParameterGradient(key=gradient.key, value=d_weights_d_theta.T)
)
return ObservableArray(value=weights.T, gradients=weight_gradients)
def _compute_effective_samples(
self, reference_reduced_potentials: ObservableArray
) -> float:
"""Compute the effective number of samples which contribute to the final
re-weighted estimate.
Parameters
----------
reference_reduced_potentials
An 2D array containing the reduced potentials of each configuration
evaluated at each reference state.
Returns
-------
The effective number of samples.
"""
# Construct an MBAR object so that the number of effective samples can
# be computed.
mbar = pymbar.MBAR(
reference_reduced_potentials.value.to(unit.dimensionless).magnitude.T,
self.frame_counts,
verbose=False,
relative_tolerance=1e-12,
)
weights = (
self._compute_weights(mbar, self.target_reduced_potentials)
.value.to(unit.dimensionless)
.magnitude
)
effective_samples = 1.0 / np.sum(weights**2)
return float(effective_samples)
def _execute(self, directory, available_resources):
# Retrieve the observables to reweight.
observables = self._observables()
if len(observables) == 0:
raise ValueError("There were no observables to reweight.")
if len(self.frame_counts) != len(self.reference_reduced_potentials):
raise ValueError("A frame count must be provided for each reference state.")
expected_frames = sum(self.frame_counts)
if any(
len(input_array) != expected_frames
for input_array in [
self.target_reduced_potentials,
*self.reference_reduced_potentials,
*observables.values(),
]
):
raise ValueError(
f"The length of the input arrays do not match the expected length "
f"specified by the frame counts ({expected_frames})."
)
# Concatenate the reduced reference potentials into a single array.
# We ignore the gradients of the reference state potential as these
# should be all zero.
reference_reduced_potentials = ObservableArray(
value=np.hstack(
[
reduced_potentials.value
for reduced_potentials in self.reference_reduced_potentials
]
)
)
# Ensure that there is enough effective samples to re-weight.
self.effective_samples = self._compute_effective_samples(
reference_reduced_potentials
)
if self.effective_samples < self.required_effective_samples:
raise ValueError(
f"There was not enough effective samples to reweight - "
f"{self.effective_samples} < {self.required_effective_samples}"
)
if self.bootstrap_uncertainties:
self.value = bootstrap(
self._bootstrap_function,
self.bootstrap_iterations,
1.0,
self.frame_counts,
reference_reduced_potentials=reference_reduced_potentials,
target_reduced_potentials=self.target_reduced_potentials,
**observables,
)
else:
self.value = self._bootstrap_function(
reference_reduced_potentials=reference_reduced_potentials,
target_reduced_potentials=self.target_reduced_potentials,
**observables,
)
def _bootstrap_function(self, **observables: ObservableArray) -> Observable:
"""Re-weights a set of reference observables to the target state.
Parameters
-------
observables
The observables to reweight, in addition to the reference and target
reduced potentials.
"""
reference_reduced_potentials = observables.pop("reference_reduced_potentials")
target_reduced_potentials = observables.pop("target_reduced_potentials")
# Construct the mbar object using the specified reference reduced potentials.
# These may be the input values or values which have been sampled during
# bootstrapping, hence why it is not precomputed once.
mbar = pymbar.MBAR(
reference_reduced_potentials.value.to(unit.dimensionless).magnitude.T,
self.frame_counts,
verbose=False,
relative_tolerance=1e-12,
)
# Compute the MBAR weights.
weights = self._compute_weights(mbar, target_reduced_potentials)
return self._reweight_observables(
weights, mbar, target_reduced_potentials, **observables
)
def _reweight_observables(
self,
weights: ObservableArray,
mbar: pymbar.MBAR,
target_reduced_potentials: ObservableArray,
**observables: ObservableArray,
) -> typing.Union[ObservableArray, Observable]:
"""A function which computes the average value of an observable using
weights computed from MBAR and from a set of component observables.
Parameters
----------
weights
The MBAR weights
observables
The component observables which may be combined to yield the final
average observable of interest.
mbar
A pre-computed MBAR object encoded information from the reference states.
This will be used to compute the std error when not bootstrapping.
target_reduced_potentials
The reduced potentials at the target state. This will be used to compute
the std error when not bootstrapping.
Returns
-------
The re-weighted average observable.
"""
observable = observables.pop("observable")
assert len(observables) == 0
return_type = ObservableArray if observable.value.shape[1] > 1 else Observable
weighted_observable = weights * observable
average_value = weighted_observable.value.sum(axis=0)
average_gradients = [
ParameterGradient(key=gradient.key, value=gradient.value.sum(axis=0))
for gradient in weighted_observable.gradients
]
if return_type == Observable:
average_value = average_value.item()
average_gradients = [
ParameterGradient(key=gradient.key, value=gradient.value.item())
for gradient in average_gradients
]
else:
average_value = average_value.reshape(1, -1)
average_gradients = [
ParameterGradient(key=gradient.key, value=gradient.value.reshape(1, -1))
for gradient in average_gradients
]
if self.bootstrap_uncertainties is False:
# Unfortunately we need to re-compute the average observable for now
# as pymbar does not expose an easier way to compute the average
# uncertainty.
observable_dimensions = observable.value.shape[1]
assert observable_dimensions == 1
results = mbar.computeExpectations(
observable.value.T.magnitude,
target_reduced_potentials.value.T.magnitude,
state_dependent=True,
)
uncertainty = results[1][-1] * observable.value.units
average_value = average_value.plus_minus(uncertainty)
return return_type(value=average_value, gradients=average_gradients)
@workflow_protocol()
class ReweightObservable(BaseMBARProtocol):
"""Reweight an array of observables to a new state using MBAR."""
observable = InputAttribute(
docstring="The observables to reweight. The array should contain the values of "
"the observable evaluated for of each configuration at the target state.",
type_hint=ObservableArray,
default_value=UNDEFINED,
)
def _observables(self) -> typing.Dict[str, ObservableArray]:
return {"observable": self.observable}
@workflow_protocol()
class ReweightDielectricConstant(BaseMBARProtocol):
"""Computes the avergage value of the dielectric constant be re-weighting a set
a set of dipole moments and volumes using MBAR.
"""
dipole_moments = InputAttribute(
docstring="The dipole moments evaluated at reference state's configurations"
"using the force field of the target state.",
type_hint=typing.Union[ObservableArray, list],
default_value=UNDEFINED,
)
volumes = InputAttribute(
docstring="The dipole moments evaluated at reference state's configurations"
"using the force field of the target state.",
type_hint=typing.Union[ObservableArray, list],
default_value=UNDEFINED,
)
thermodynamic_state = InputAttribute(
docstring="The thermodynamic state to re-weight to.",
type_hint=ThermodynamicState,
default_value=UNDEFINED,
)
def __init__(self, protocol_id):
super().__init__(protocol_id)
self.bootstrap_uncertainties = True
def _observables(self) -> typing.Dict[str, ObservableArray]:
return {"volumes": self.volumes, "dipole_moments": self.dipole_moments}
def _reweight_observables(
self,
weights: ObservableArray,
mbar: pymbar.MBAR,
target_reduced_potentials: ObservableArray,
**observables: ObservableArray,
) -> Observable:
volumes = observables.pop("volumes")
dipole_moments = observables.pop("dipole_moments")
dielectric_constant = compute_dielectric_constant(
dipole_moments,
volumes,
self.thermodynamic_state.temperature,
functools.partial(
super(ReweightDielectricConstant, self)._reweight_observables,
weights=weights,
mbar=mbar,
target_reduced_potentials=target_reduced_potentials,
),
)
return dielectric_constant
def _execute(self, directory, available_resources):
if not self.bootstrap_uncertainties:
raise ValueError(
"The uncertainty in the average dielectric constant should only be "
"computed using bootstrapping."
)
super(ReweightDielectricConstant, self)._execute(directory, available_resources)
|
jaketanderson/openff-evaluator | openff/evaluator/properties/solvation.py | <reponame>jaketanderson/openff-evaluator
"""
A collection of physical property definitions relating to
solvation free energies.
"""
from openff.units import unit
from openff.evaluator.attributes import UNDEFINED
from openff.evaluator.datasets import PhysicalProperty
from openff.evaluator.layers import register_calculation_schema
from openff.evaluator.layers.simulation import SimulationLayer, SimulationSchema
from openff.evaluator.protocols import (
coordinates,
forcefield,
groups,
miscellaneous,
openmm,
yank,
)
from openff.evaluator.substances import Component, Substance
from openff.evaluator.thermodynamics import Ensemble
from openff.evaluator.workflow import WorkflowSchema
from openff.evaluator.workflow.utils import ProtocolPath
class SolvationFreeEnergy(PhysicalProperty):
"""A class representation of a solvation free energy property."""
@classmethod
def default_unit(cls):
return unit.kilojoule / unit.mole
@staticmethod
def default_simulation_schema(
absolute_tolerance=UNDEFINED, relative_tolerance=UNDEFINED, n_molecules=2000
):
"""Returns the default calculation schema to use when estimating
this class of property from direct simulations.
Parameters
----------
absolute_tolerance: openff.evaluator.unit.Quantity, optional
The absolute tolerance to estimate the property to within.
relative_tolerance: float
The tolerance (as a fraction of the properties reported
uncertainty) to estimate the property to within.
n_molecules: int
The number of molecules to use in the simulation.
Returns
-------
SimulationSchema
The schema to follow when estimating this property.
"""
assert absolute_tolerance == UNDEFINED or relative_tolerance == UNDEFINED
calculation_schema = SimulationSchema()
calculation_schema.absolute_tolerance = absolute_tolerance
calculation_schema.relative_tolerance = relative_tolerance
use_target_uncertainty = (
absolute_tolerance != UNDEFINED or relative_tolerance != UNDEFINED
)
# Setup the fully solvated systems.
build_full_coordinates = coordinates.BuildCoordinatesPackmol(
"build_solvated_coordinates"
)
build_full_coordinates.substance = ProtocolPath("substance", "global")
build_full_coordinates.max_molecules = n_molecules
assign_full_parameters = forcefield.BaseBuildSystem(
"assign_solvated_parameters"
)
assign_full_parameters.force_field_path = ProtocolPath(
"force_field_path", "global"
)
assign_full_parameters.substance = ProtocolPath("substance", "global")
assign_full_parameters.coordinate_file_path = ProtocolPath(
"coordinate_file_path", build_full_coordinates.id
)
# Perform a quick minimisation of the full system to give
# YANK a better starting point for its minimisation.
energy_minimisation = openmm.OpenMMEnergyMinimisation("energy_minimisation")
energy_minimisation.parameterized_system = ProtocolPath(
"parameterized_system", assign_full_parameters.id
)
energy_minimisation.input_coordinate_file = ProtocolPath(
"coordinate_file_path", build_full_coordinates.id
)
equilibration_simulation = openmm.OpenMMSimulation("equilibration_simulation")
equilibration_simulation.ensemble = Ensemble.NPT
equilibration_simulation.steps_per_iteration = 100000
equilibration_simulation.output_frequency = 10000
equilibration_simulation.timestep = 2.0 * unit.femtosecond
equilibration_simulation.thermodynamic_state = ProtocolPath(
"thermodynamic_state", "global"
)
equilibration_simulation.parameterized_system = ProtocolPath(
"parameterized_system", assign_full_parameters.id
)
equilibration_simulation.input_coordinate_file = ProtocolPath(
"output_coordinate_file", energy_minimisation.id
)
# Create a substance which only contains the solute (e.g. for the
# vacuum phase simulations).
filter_solvent = miscellaneous.FilterSubstanceByRole("filter_solvent")
filter_solvent.input_substance = ProtocolPath("substance", "global")
filter_solvent.component_roles = [Component.Role.Solvent]
filter_solute = miscellaneous.FilterSubstanceByRole("filter_solute")
filter_solute.input_substance = ProtocolPath("substance", "global")
filter_solute.component_roles = [Component.Role.Solute]
# Setup the solute in vacuum system.
build_vacuum_coordinates = coordinates.BuildCoordinatesPackmol(
"build_vacuum_coordinates"
)
build_vacuum_coordinates.substance = ProtocolPath(
"filtered_substance", filter_solute.id
)
build_vacuum_coordinates.max_molecules = 1
assign_vacuum_parameters = forcefield.BaseBuildSystem("assign_parameters")
assign_vacuum_parameters.force_field_path = ProtocolPath(
"force_field_path", "global"
)
assign_vacuum_parameters.substance = ProtocolPath(
"filtered_substance", filter_solute.id
)
assign_vacuum_parameters.coordinate_file_path = ProtocolPath(
"coordinate_file_path", build_vacuum_coordinates.id
)
# Set up the protocol to run yank.
run_yank = yank.SolvationYankProtocol("run_solvation_yank")
run_yank.solute = ProtocolPath("filtered_substance", filter_solute.id)
run_yank.solvent_1 = ProtocolPath("filtered_substance", filter_solvent.id)
run_yank.solvent_2 = Substance()
run_yank.thermodynamic_state = ProtocolPath("thermodynamic_state", "global")
run_yank.steps_per_iteration = 500
run_yank.checkpoint_interval = 1
run_yank.solution_1_coordinates = ProtocolPath(
"output_coordinate_file", equilibration_simulation.id
)
run_yank.solution_1_system = ProtocolPath(
"parameterized_system", assign_full_parameters.id
)
run_yank.solution_2_coordinates = ProtocolPath(
"coordinate_file_path", build_vacuum_coordinates.id
)
run_yank.solution_2_system = ProtocolPath(
"parameterized_system", assign_vacuum_parameters.id
)
run_yank.gradient_parameters = ProtocolPath("parameter_gradient_keys", "global")
# Set up the group which will run yank until the free energy has been determined
# to within a given uncertainty
conditional_group = groups.ConditionalGroup("conditional_group")
conditional_group.max_iterations = 20
if use_target_uncertainty:
condition = groups.ConditionalGroup.Condition()
condition.type = groups.ConditionalGroup.Condition.Type.LessThan
condition.right_hand_value = ProtocolPath("target_uncertainty", "global")
condition.left_hand_value = ProtocolPath(
"free_energy_difference.error", conditional_group.id, run_yank.id
)
conditional_group.add_condition(condition)
# Define the total number of iterations that yank should run for.
total_iterations = miscellaneous.MultiplyValue("total_iterations")
total_iterations.value = 2000
total_iterations.multiplier = ProtocolPath(
"current_iteration", conditional_group.id
)
# Make sure the simulations gets extended after each iteration.
run_yank.number_of_iterations = ProtocolPath("result", total_iterations.id)
conditional_group.add_protocols(total_iterations, run_yank)
# Define the full workflow schema.
schema = WorkflowSchema()
schema.protocol_schemas = [
build_full_coordinates.schema,
assign_full_parameters.schema,
energy_minimisation.schema,
equilibration_simulation.schema,
filter_solvent.schema,
filter_solute.schema,
build_vacuum_coordinates.schema,
assign_vacuum_parameters.schema,
conditional_group.schema,
]
schema.final_value_source = ProtocolPath(
"free_energy_difference", conditional_group.id, run_yank.id
)
calculation_schema.workflow_schema = schema
return calculation_schema
# Register the properties via the plugin system.
register_calculation_schema(
SolvationFreeEnergy, SimulationLayer, SolvationFreeEnergy.default_simulation_schema
)
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_datasets/test_curation/test_conversion.py | import numpy
import pandas
from openff.evaluator.datasets.curation.components.conversion import (
ConvertExcessDensityData,
ConvertExcessDensityDataSchema,
)
def test_convert_density_v_excess():
"""Tests the `ConvertExcessDensityData` component."""
data_rows = [
{
"N Components": 1,
"Phase": "Liquid",
"Temperature (K)": 298.15,
"Pressure (kPa)": 101.325,
"Component 1": "CCCCCCCC",
"Mole Fraction 1": 1.0,
"Density Value (g / ml)": 0.69867,
"Source": "x",
},
{
"N Components": 1,
"Phase": "Liquid",
"Temperature (K)": 298.15,
"Pressure (kPa)": 101.325,
"Component 1": "CCCCCCC(C)O",
"Mole Fraction 1": 1.0,
"Density Value (g / ml)": 0.81705,
"Source": "y",
},
{
"N Components": 2,
"Phase": "Liquid",
"Temperature (K)": 298.15,
"Pressure (kPa)": 101.325,
"Component 1": "CCCCCCCC",
"Mole Fraction 1": 0.8,
"Component 2": "CCCCCCC(C)O",
"Mole Fraction 2": 0.2,
"Density Value (g / ml)": 0.72157,
"Source": "z",
},
{
"N Components": 2,
"Phase": "Liquid",
"Temperature (K)": 298.15,
"Pressure (kPa)": 101.325,
"Component 1": "CCCCCCCC",
"Mole Fraction 1": 0.8,
"Component 2": "CCCCCCC(C)O",
"Mole Fraction 2": 0.2,
"ExcessMolarVolume Value (cm ** 3 / mol)": 0.06715,
"Source": "w",
},
]
data_frame = pandas.DataFrame(data_rows)
converted_data_frame = ConvertExcessDensityData.apply(
data_frame, ConvertExcessDensityDataSchema(), 1
)
converted_data_frame = converted_data_frame[
converted_data_frame["N Components"] == 2
]
assert len(converted_data_frame) == 4
excess_molar_volume = (
converted_data_frame["ExcessMolarVolume Value (cm ** 3 / mol)"]
.round(5)
.unique()
)
excess_molar_volume = [x for x in excess_molar_volume if not pandas.isnull(x)]
assert len(excess_molar_volume) == 1
assert numpy.isclose(excess_molar_volume, 0.06715)
density = converted_data_frame["Density Value (g / ml)"].round(5).unique()
density = [x for x in density if not pandas.isnull(x)]
assert len(density) == 1
assert numpy.isclose(density, 0.72157)
|
jaketanderson/openff-evaluator | openff/evaluator/layers/__init__.py | from .layers import CalculationLayer, CalculationLayerResult, CalculationLayerSchema
from .plugins import (
calculation_layer,
register_calculation_layer,
register_calculation_schema,
registered_calculation_layers,
registered_calculation_schemas,
)
__all__ = [
calculation_layer,
CalculationLayer,
CalculationLayerResult,
CalculationLayerSchema,
register_calculation_layer,
register_calculation_schema,
registered_calculation_layers,
registered_calculation_schemas,
]
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_utils/test_observables.py | """
Units tests for openff.evaluator.utils.observables
"""
import json
from typing import List, Tuple, Type, Union
import numpy
import pytest
from openff.units import unit
from openff.evaluator.forcefield import ParameterGradient, ParameterGradientKey
from openff.evaluator.tests.utils import does_not_raise
from openff.evaluator.utils import get_data_filename
from openff.evaluator.utils.observables import (
Observable,
ObservableArray,
ObservableFrame,
ObservableType,
bootstrap,
)
from openff.evaluator.utils.serialization import TypedJSONDecoder, TypedJSONEncoder
ValueType = Union[float, int, unit.Quantity, unit.Measurement, numpy.ndarray]
def _compare_observables(
observable_a: Union[Observable, ObservableArray],
observable_b: Union[Observable, ObservableArray],
):
assert isinstance(observable_a, type(observable_b))
assert isinstance(observable_a.value.magnitude, type(observable_b.value.magnitude))
assert numpy.allclose(observable_a.value, observable_b.value)
if isinstance(observable_a.value.magnitude, numpy.ndarray):
assert observable_a.value.shape == observable_b.value.shape
if isinstance(observable_a, Observable):
assert numpy.isclose(observable_a.error, observable_b.error)
observable_a_gradients = {
gradient.key: gradient for gradient in observable_a.gradients
}
observable_b_gradients = {
gradient.key: gradient for gradient in observable_b.gradients
}
assert {*observable_a_gradients} == {*observable_b_gradients}
for gradient_key in observable_a_gradients:
gradient_a = observable_a_gradients[gradient_key]
gradient_b = observable_b_gradients[gradient_key]
assert isinstance(gradient_a.value.magnitude, type(gradient_b.value.magnitude))
assert numpy.allclose(gradient_a.value, gradient_b.value)
if isinstance(gradient_a.value.magnitude, numpy.ndarray):
assert gradient_a.value.shape == gradient_b.value.shape
def _mock_observable(
value: ValueType,
gradient_values: List[Tuple[str, str, str, ValueType]],
object_type: Union[Type[Observable], Type[ObservableArray]],
):
return object_type(
value=value,
gradients=[
ParameterGradient(
key=ParameterGradientKey(tag, smirks, attribute),
value=value * unit.kelvin,
)
for tag, smirks, attribute, value in gradient_values
],
)
@pytest.mark.parametrize(
"value, gradient_values, expected_value, expected_gradient_values",
[
(
numpy.ones(1) * unit.kelvin,
[numpy.ones(1) * unit.kelvin],
numpy.ones((1, 1)) * unit.kelvin,
[numpy.ones((1, 1)) * unit.kelvin],
),
(
1.0 * unit.kelvin,
[numpy.ones(1) * unit.kelvin],
numpy.ones((1, 1)) * unit.kelvin,
[numpy.ones((1, 1)) * unit.kelvin],
),
(
numpy.ones(1) * unit.kelvin,
[1.0 * unit.kelvin],
numpy.ones((1, 1)) * unit.kelvin,
[numpy.ones((1, 1)) * unit.kelvin],
),
(
numpy.ones(3) * unit.kelvin,
[numpy.ones((3, 1)) * unit.kelvin],
numpy.ones((3, 1)) * unit.kelvin,
[numpy.ones((3, 1)) * unit.kelvin],
),
(
numpy.ones((3, 1)) * unit.kelvin,
[numpy.ones(3) * unit.kelvin],
numpy.ones((3, 1)) * unit.kelvin,
[numpy.ones((3, 1)) * unit.kelvin],
),
(
numpy.ones((2, 3)) * unit.kelvin,
[numpy.ones((2, 3)) * unit.kelvin],
numpy.ones((2, 3)) * unit.kelvin,
[numpy.ones((2, 3)) * unit.kelvin],
),
],
)
def test_observable_array_valid_initializer(
value: unit.Quantity,
gradient_values: List[unit.Quantity],
expected_value: unit.Quantity,
expected_gradient_values: List[unit.Quantity],
):
observable = ObservableArray(
value,
[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
value=gradient_value,
)
for gradient_value in gradient_values
],
)
# noinspection PyUnresolvedReferences
assert observable.value.shape == expected_value.shape
assert numpy.allclose(observable.value, expected_value)
assert all(
observable.gradients[i].value.shape == expected_gradient_values[i].shape
for i in range(len(expected_gradient_values))
)
assert all(
numpy.allclose(observable.gradients[i].value, expected_gradient_values[i])
for i in range(len(expected_gradient_values))
)
@pytest.mark.parametrize(
"value, gradients, expected_raises, expected_message",
[
(
numpy.ones(1),
[],
pytest.raises(TypeError),
"The value must be a unit-wrapped integer, float or numpy array.",
),
(
"str" * unit.kelvin,
[],
pytest.raises(TypeError),
"The value must be a unit-wrapped integer, float or numpy array.",
),
(
numpy.ones((2, 2, 2)) * unit.kelvin,
[],
pytest.raises(ValueError),
"The wrapped array must not contain more than two dimensions.",
),
(
None,
[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
value=numpy.ones((2, 2)) * unit.kelvin,
),
],
pytest.raises(ValueError),
"A valid value must be provided.",
),
(
1.0 * unit.kelvin,
[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
value=numpy.ones(1),
),
],
pytest.raises(TypeError),
"The gradient values must be unit-wrapped integers, floats or numpy arrays.",
),
(
1.0 * unit.kelvin,
[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
value="str" * unit.kelvin,
),
],
pytest.raises(TypeError),
"The gradient values must be unit-wrapped integers, floats or numpy arrays.",
),
(
1.0 * unit.kelvin,
[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
value=numpy.ones((2, 2, 2)) * unit.kelvin,
),
],
pytest.raises(ValueError),
"Gradient values must not contain more than two dimensions.",
),
(
numpy.ones((2, 1)) * unit.kelvin,
[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
value=numpy.ones((1, 2)) * unit.kelvin,
),
],
pytest.raises(ValueError),
"Gradient values should be 1-dimensional to match the dimensionality of the "
"value.",
),
(
numpy.ones((1, 2)) * unit.kelvin,
[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
value=numpy.ones((2, 1)) * unit.kelvin,
),
],
pytest.raises(ValueError),
"Gradient values should be 2-dimensional to match the dimensionality of the "
"value.",
),
(
numpy.ones((3, 2)) * unit.kelvin,
[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
value=numpy.ones((2, 2)) * unit.kelvin,
),
],
pytest.raises(ValueError),
"Gradient values should have a length of 3 to match the length of the "
"value.",
),
],
)
def test_observable_array_invalid_initializer(
value, gradients, expected_raises, expected_message
):
with expected_raises as error_info:
ObservableArray(value, gradients)
assert expected_message in str(error_info.value)
@pytest.mark.parametrize("value", [0.1, numpy.ones(1)])
def test_observable_array_round_trip(value):
observable = ObservableArray(
value=value * unit.kelvin,
gradients=[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
value=value * 2.0 * unit.kelvin,
)
],
)
round_tripped: ObservableArray = json.loads(
json.dumps(observable, cls=TypedJSONEncoder), cls=TypedJSONDecoder
)
assert isinstance(round_tripped, ObservableArray)
assert numpy.isclose(observable.value, round_tripped.value)
assert len(observable.gradients) == len(round_tripped.gradients)
assert observable.gradients[0] == round_tripped.gradients[0]
def test_observable_array_subset():
observable = ObservableArray(
value=numpy.arange(4) * unit.kelvin,
gradients=[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
value=numpy.arange(4) * unit.kelvin,
)
],
)
subset = observable.subset([1, 3])
assert len(subset) == 2
assert numpy.allclose(subset.value, numpy.array([[1.0], [3.0]]) * unit.kelvin)
assert numpy.allclose(
subset.gradients[0].value, numpy.array([[1.0], [3.0]]) * unit.kelvin
)
def test_observable_array_join():
gradient_unit = unit.mole / unit.kilojoule
observables = [
ObservableArray(
value=(numpy.arange(2) + i * 2) * unit.kelvin,
gradients=[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
value=(numpy.arange(2) + i * 2) * unit.kelvin * gradient_unit,
)
],
)
for i in range(2)
]
joined = ObservableArray.join(*observables)
assert len(joined) == 4
assert numpy.allclose(joined.value, numpy.arange(4).reshape(-1, 1) * unit.kelvin)
assert numpy.allclose(
joined.gradients[0].value,
numpy.arange(4).reshape(-1, 1) * unit.kelvin * gradient_unit,
)
def test_observable_array_join_single():
gradient_unit = unit.mole / unit.kilojoule
joined = ObservableArray.join(
ObservableArray(
value=(numpy.arange(2)) * unit.kelvin,
gradients=[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
value=(numpy.arange(2)) * unit.kelvin * gradient_unit,
)
],
)
)
assert len(joined) == 2
def test_observable_array_len():
assert len(ObservableArray(value=numpy.arange(5) * unit.kelvin)) == 5
@pytest.mark.parametrize(
"observables, expected_raises, expected_message",
[
(
[],
pytest.raises(ValueError),
"At least one observable must be provided.",
),
(
[
ObservableArray(value=numpy.ones(1) * unit.kelvin),
ObservableArray(value=numpy.ones(1) * unit.pascal),
],
pytest.raises(ValueError),
"The observables must all have compatible units.",
),
(
[
ObservableArray(
value=numpy.ones(2) * unit.kelvin,
gradients=[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#1:1]", "sigma"),
value=numpy.ones(2) * unit.kelvin / unit.angstrom,
)
],
),
ObservableArray(
value=numpy.ones(2) * unit.kelvin,
gradients=[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#6:1]", "sigma"),
value=numpy.ones(2) * unit.kelvin / unit.angstrom,
)
],
),
],
pytest.raises(ValueError),
"The observables must contain gradient information for the same "
"parameters.",
),
(
[
ObservableArray(
value=numpy.ones(2) * unit.kelvin,
gradients=[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#6:1]", "sigma"),
value=numpy.ones(2) * unit.kelvin / unit.angstrom,
)
],
),
ObservableArray(
value=numpy.ones(2) * unit.kelvin,
gradients=[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#6:1]", "sigma"),
value=numpy.ones(2) * unit.kelvin / unit.meter,
)
],
),
],
pytest.raises(ValueError),
"The gradients of each of the observables must have the same units.",
),
],
)
def test_observables_join_fail(observables, expected_raises, expected_message):
with expected_raises as error_info:
ObservableArray.join(*observables)
assert (
expected_message is None
and error_info is None
or expected_message in str(error_info.value)
)
@pytest.mark.parametrize(
"value, gradients, expected_raises, expected_message",
[
(
0.1 * unit.kelvin,
[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
value=0.1 * unit.kelvin,
)
],
does_not_raise(),
None,
),
(
(0.1 * unit.kelvin).plus_minus(0.2 * unit.kelvin),
[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
value=0.1 * unit.kelvin,
)
],
does_not_raise(),
None,
),
(
0.1,
[],
pytest.raises(TypeError),
"The value must be either an `openff.evaluator.unit.Measurement` or "
"an `openff.evaluator.unit.Quantity`.",
),
(
numpy.ones(3) * unit.kelvin,
[],
pytest.raises(TypeError),
"The value must be a unit-wrapped integer or float.",
),
(
None,
[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"), value=0.1
)
],
pytest.raises(ValueError),
"A valid value must be provided.",
),
(
(0.1 * unit.kelvin).plus_minus(0.2 * unit.kelvin),
[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"), value=0.1
)
],
pytest.raises(TypeError),
"The gradient values must be unit-wrapped integers or floats.",
),
(
(0.1 * unit.kelvin).plus_minus(0.2 * unit.kelvin),
[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
value="str" * unit.kelvin,
)
],
pytest.raises(TypeError),
"The gradient values must be unit-wrapped integers or floats.",
),
],
)
def test_observable_initializer(value, gradients, expected_raises, expected_message):
with expected_raises as error_info:
Observable(value, gradients)
if expected_message is not None:
assert expected_message in str(error_info.value)
def test_observable_round_trip():
observable = Observable(
value=(0.1 * unit.kelvin).plus_minus(0.2 * unit.kelvin),
gradients=[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
value=0.2 * unit.kelvin,
)
],
)
round_tripped: Observable = json.loads(
json.dumps(observable, cls=TypedJSONEncoder), cls=TypedJSONDecoder
)
assert isinstance(round_tripped, Observable)
assert numpy.isclose(observable.value, round_tripped.value)
assert numpy.isclose(observable.error, round_tripped.error)
assert len(observable.gradients) == len(round_tripped.gradients)
assert observable.gradients[0] == round_tripped.gradients[0]
@pytest.mark.parametrize(
"value_a, value_b, expected_value",
[
observable_tuple
for object_type in [Observable, ObservableArray]
for observable_tuple in [
(
_mock_observable(
2.0 * unit.kelvin,
[
("vdW", "[#6:1]", "epsilon", 2.0 * unit.kelvin),
("vdW", "[#1:1]", "epsilon", 4.0 * unit.kelvin),
],
object_type,
),
_mock_observable(
4.0 * unit.kelvin,
[
("vdW", "[#1:1]", "epsilon", 2.0 * unit.kelvin),
("vdW", "[#6:1]", "epsilon", 4.0 * unit.kelvin),
],
object_type,
),
_mock_observable(
6.0 * unit.kelvin,
[
("vdW", "[#1:1]", "epsilon", 6.0 * unit.kelvin),
("vdW", "[#6:1]", "epsilon", 6.0 * unit.kelvin),
],
object_type,
),
),
(
2.0 * unit.kelvin,
_mock_observable(
4.0 * unit.kelvin,
[
("vdW", "[#1:1]", "epsilon", 2.0 * unit.kelvin),
("vdW", "[#6:1]", "epsilon", 4.0 * unit.kelvin),
],
object_type,
),
_mock_observable(
6.0 * unit.kelvin,
[
("vdW", "[#1:1]", "epsilon", 2.0 * unit.kelvin),
("vdW", "[#6:1]", "epsilon", 4.0 * unit.kelvin),
],
object_type,
),
),
]
],
)
def test_add_observables(value_a, value_b, expected_value):
_compare_observables(value_a + value_b, expected_value)
_compare_observables(value_b + value_a, expected_value)
@pytest.mark.parametrize(
"value_a, value_b, expected_value",
[
observable_tuple
for object_type in [Observable, ObservableArray]
for observable_tuple in [
(
_mock_observable(
2.0 * unit.kelvin,
[
("vdW", "[#6:1]", "epsilon", 2.0 * unit.kelvin),
("vdW", "[#1:1]", "epsilon", 4.0 * unit.kelvin),
],
object_type,
),
_mock_observable(
4.0 * unit.kelvin,
[
("vdW", "[#1:1]", "epsilon", 2.0 * unit.kelvin),
("vdW", "[#6:1]", "epsilon", 4.0 * unit.kelvin),
],
object_type,
),
_mock_observable(
2.0 * unit.kelvin,
[
("vdW", "[#1:1]", "epsilon", -2.0 * unit.kelvin),
("vdW", "[#6:1]", "epsilon", 2.0 * unit.kelvin),
],
object_type,
),
),
(
_mock_observable(
2.0 * unit.kelvin,
[
("vdW", "[#6:1]", "epsilon", 2.0 * unit.kelvin),
("vdW", "[#1:1]", "epsilon", 4.0 * unit.kelvin),
],
object_type,
),
2.0 * unit.kelvin,
_mock_observable(
0.0 * unit.kelvin,
[
("vdW", "[#1:1]", "epsilon", -4.0 * unit.kelvin),
("vdW", "[#6:1]", "epsilon", -2.0 * unit.kelvin),
],
object_type,
),
),
(
2.0 * unit.kelvin,
_mock_observable(
2.0 * unit.kelvin,
[
("vdW", "[#6:1]", "epsilon", 2.0 * unit.kelvin),
("vdW", "[#1:1]", "epsilon", 4.0 * unit.kelvin),
],
object_type,
),
_mock_observable(
0.0 * unit.kelvin,
[
("vdW", "[#1:1]", "epsilon", 4.0 * unit.kelvin),
("vdW", "[#6:1]", "epsilon", 2.0 * unit.kelvin),
],
object_type,
),
),
]
],
)
def test_subtract_observables(value_a, value_b, expected_value):
_compare_observables(value_b - value_a, expected_value)
@pytest.mark.parametrize(
"value_a, value_b, expected_value",
[
observable_tuple
for object_type in [Observable, ObservableArray]
for observable_tuple in [
(
_mock_observable(
2.0 * unit.kelvin,
[
("vdW", "[#6:1]", "epsilon", 2.0 * unit.kelvin),
("vdW", "[#1:1]", "epsilon", 4.0 * unit.kelvin),
],
object_type,
),
_mock_observable(
4.0 * unit.kelvin,
[
("vdW", "[#1:1]", "epsilon", 2.0 * unit.kelvin),
("vdW", "[#6:1]", "epsilon", 4.0 * unit.kelvin),
],
object_type,
),
_mock_observable(
8.0 * unit.kelvin**2,
[
("vdW", "[#1:1]", "epsilon", 20.0 * unit.kelvin**2),
("vdW", "[#6:1]", "epsilon", 16.0 * unit.kelvin**2),
],
object_type,
),
),
(
2.0 * unit.kelvin,
_mock_observable(
4.0 * unit.kelvin,
[
("vdW", "[#1:1]", "epsilon", 2.0 * unit.kelvin),
("vdW", "[#6:1]", "epsilon", 4.0 * unit.kelvin),
],
object_type,
),
_mock_observable(
8.0 * unit.kelvin**2,
[
("vdW", "[#1:1]", "epsilon", 4.0 * unit.kelvin**2),
("vdW", "[#6:1]", "epsilon", 8.0 * unit.kelvin**2),
],
object_type,
),
),
(
2.0,
_mock_observable(
4.0 * unit.kelvin,
[
("vdW", "[#1:1]", "epsilon", 2.0 * unit.kelvin),
("vdW", "[#6:1]", "epsilon", 4.0 * unit.kelvin),
],
object_type,
),
_mock_observable(
8.0 * unit.kelvin,
[
("vdW", "[#1:1]", "epsilon", 4.0 * unit.kelvin),
("vdW", "[#6:1]", "epsilon", 8.0 * unit.kelvin),
],
object_type,
),
),
]
],
)
def test_multiply_observables(value_a, value_b, expected_value):
_compare_observables(value_a * value_b, expected_value)
_compare_observables(value_b * value_a, expected_value)
@pytest.mark.parametrize(
"value_a, value_b, expected_value",
[
observable_tuple
for object_type in [Observable, ObservableArray]
for observable_tuple in [
(
_mock_observable(
4.0 * unit.kelvin,
[
("vdW", "[#1:1]", "epsilon", 2.0 * unit.kelvin),
("vdW", "[#6:1]", "epsilon", 4.0 * unit.kelvin),
],
object_type,
),
_mock_observable(
2.0 * unit.kelvin,
[
("vdW", "[#6:1]", "epsilon", 2.0 * unit.kelvin),
("vdW", "[#1:1]", "epsilon", 4.0 * unit.kelvin),
],
object_type,
),
_mock_observable(
2.0 * unit.dimensionless,
[
("vdW", "[#1:1]", "epsilon", -3.0 * unit.dimensionless),
("vdW", "[#6:1]", "epsilon", 0.0 * unit.dimensionless),
],
object_type,
),
),
(
_mock_observable(
4.0 * unit.kelvin,
[
("vdW", "[#1:1]", "epsilon", 2.0 * unit.kelvin),
("vdW", "[#6:1]", "epsilon", 4.0 * unit.kelvin),
],
object_type,
),
2.0 * unit.kelvin,
_mock_observable(
2.0 * unit.dimensionless,
[
("vdW", "[#1:1]", "epsilon", 1.0 * unit.dimensionless),
("vdW", "[#6:1]", "epsilon", 2.0 * unit.dimensionless),
],
object_type,
),
),
(
2.0 * unit.kelvin,
_mock_observable(
4.0 * unit.kelvin,
[
("vdW", "[#1:1]", "epsilon", 2.0 * unit.kelvin),
("vdW", "[#6:1]", "epsilon", 4.0 * unit.kelvin),
],
object_type,
),
_mock_observable(
1.0 / 2.0 * unit.dimensionless,
[
("vdW", "[#1:1]", "epsilon", -1.0 / 4.0 * unit.dimensionless),
("vdW", "[#6:1]", "epsilon", -1.0 / 2.0 * unit.dimensionless),
],
object_type,
),
),
(
_mock_observable(
4.0 * unit.kelvin,
[
("vdW", "[#1:1]", "epsilon", 2.0 * unit.kelvin),
("vdW", "[#6:1]", "epsilon", 4.0 * unit.kelvin),
],
object_type,
),
2.0,
_mock_observable(
2.0 * unit.kelvin,
[
("vdW", "[#1:1]", "epsilon", 1.0 * unit.kelvin),
("vdW", "[#6:1]", "epsilon", 2.0 * unit.kelvin),
],
object_type,
),
),
(
2.0,
_mock_observable(
4.0 * unit.kelvin,
[
("vdW", "[#1:1]", "epsilon", 2.0 * unit.kelvin),
("vdW", "[#6:1]", "epsilon", 4.0 * unit.kelvin),
],
object_type,
),
_mock_observable(
1.0 / 2.0 / unit.kelvin,
[
("vdW", "[#1:1]", "epsilon", -1.0 / 4.0 / unit.kelvin),
("vdW", "[#6:1]", "epsilon", -1.0 / 2.0 / unit.kelvin),
],
object_type,
),
),
]
],
)
def test_divide_observables(value_a, value_b, expected_value):
_compare_observables(value_a / value_b, expected_value)
@pytest.mark.parametrize(
"observables",
[
{"Temperature": ObservableArray(value=numpy.ones(2) * unit.kelvin)},
{
ObservableType.Temperature: ObservableArray(
value=numpy.ones(2) * unit.kelvin
)
},
ObservableFrame(
{
ObservableType.Temperature: ObservableArray(
value=numpy.ones(2) * unit.kelvin
)
}
),
],
)
def test_frame_constructor(observables):
observable_frame = ObservableFrame(observables)
assert all(observable_type in observable_frame for observable_type in observables)
assert all(
observable_frame[observable_type] == observables[observable_type]
for observable_type in observables
)
def test_frame_round_trip():
observable_frame = ObservableFrame(
{"Temperature": ObservableArray(value=numpy.ones(2) * unit.kelvin)}
)
round_tripped: ObservableFrame = json.loads(
json.dumps(observable_frame, cls=TypedJSONEncoder), cls=TypedJSONDecoder
)
assert isinstance(round_tripped, ObservableFrame)
assert {*observable_frame} == {*round_tripped}
assert len(observable_frame) == len(round_tripped)
@pytest.mark.parametrize(
"key, expected",
[
*((key, key) for key in ObservableType),
*((key.value, key) for key in ObservableType),
],
)
def test_frame_validate_key(key, expected):
assert ObservableFrame._validate_key(key) == expected
@pytest.mark.parametrize("key", [ObservableType.Temperature, "Temperature"])
def test_frame_magic_functions(key):
observable_frame = ObservableFrame()
assert len(observable_frame) == 0
observable_frame[key] = ObservableArray(value=numpy.ones(1) * unit.kelvin)
assert len(observable_frame) == 1
assert key in observable_frame
assert {*observable_frame} == {ObservableType.Temperature}
del observable_frame[key]
assert len(observable_frame) == 0
assert key not in observable_frame
@pytest.mark.parametrize(
"observable_frame, key, value, expected_raises, expected_message",
[
(
ObservableFrame(
{"Temperature": ObservableArray(value=numpy.ones(2) * unit.kelvin)}
),
"Volume",
numpy.ones(1) * unit.nanometer**3,
pytest.raises(ValueError),
"The length of the data (1) must match the length of the data already in "
"the frame (2).",
),
(
ObservableFrame(),
"Temperature",
numpy.ones(1) * unit.pascals,
pytest.raises(ValueError),
"Temperature data must have units compatible with K.",
),
],
)
def test_frame_set_invalid_item(
observable_frame, key, value, expected_raises, expected_message
):
with expected_raises as error_info:
observable_frame[key] = ObservableArray(value=value)
assert (
expected_message is None
and error_info is None
or expected_message in str(error_info.value)
)
@pytest.mark.parametrize("pressure", [None, 1 * unit.atmosphere])
def test_frame_from_openmm(pressure):
observable_frame = ObservableFrame.from_openmm(
get_data_filename("test/statistics/openmm_statistics.csv"), pressure
)
expected_types = {*ObservableType} - {ObservableType.ReducedPotential}
if pressure is None:
expected_types -= {ObservableType.Enthalpy}
assert {*observable_frame} == expected_types
assert len(observable_frame) == 10
expected_values = {
ObservableType.PotentialEnergy: 7934.831868494968 * unit.kilojoule / unit.mole,
ObservableType.KineticEnergy: 5939.683117957521 * unit.kilojoule / unit.mole,
ObservableType.TotalEnergy: 13874.51498645249 * unit.kilojoule / unit.mole,
ObservableType.Temperature: 286.38157154881503 * unit.kelvin,
ObservableType.Volume: 26.342326662784938 * unit.nanometer**3,
ObservableType.Density: 0.6139877476363793 * unit.gram / unit.milliliter,
}
for observable_type, expected_value in expected_values.items():
assert numpy.isclose(observable_frame[observable_type].value[0], expected_value)
if pressure is not None:
expected_enthalpy = (
13874.51498645249 * unit.kilojoule / unit.mole
+ pressure
* 26.342326662784938
* unit.nanometer**3
* unit.avogadro_constant
)
assert numpy.isclose(observable_frame["Enthalpy"].value[0], expected_enthalpy)
def test_frame_subset():
observable_frame = ObservableFrame(
{
"Temperature": ObservableArray(
value=numpy.arange(4) * unit.kelvin,
gradients=[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
value=numpy.arange(4) * unit.kelvin,
)
],
)
}
)
subset = observable_frame.subset([1, 3])
assert len(subset) == 2
assert numpy.allclose(
subset["Temperature"].value, numpy.array([[1.0], [3.0]]) * unit.kelvin
)
assert numpy.allclose(
subset["Temperature"].gradients[0].value,
numpy.array([[1.0], [3.0]]) * unit.kelvin,
)
def test_frame_join():
gradient_unit = unit.mole / unit.kilojoule
observable_frames = [
ObservableFrame(
{
"Temperature": ObservableArray(
value=(numpy.arange(2) + i * 2) * unit.kelvin,
gradients=[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
value=(numpy.arange(2) + i * 2)
* unit.kelvin
* gradient_unit,
)
],
)
}
)
for i in range(2)
]
joined = ObservableFrame.join(*observable_frames)
assert len(joined) == 4
assert numpy.allclose(
joined["Temperature"].value,
numpy.arange(4).reshape(-1, 1) * unit.kelvin,
)
assert numpy.allclose(
joined["Temperature"].gradients[0].value,
numpy.arange(4).reshape(-1, 1) * unit.kelvin * gradient_unit,
)
@pytest.mark.parametrize(
"observable_frames, expected_raises, expected_message",
[
(
[
ObservableFrame(
{"Temperature": ObservableArray(value=numpy.ones(2) * unit.kelvin)}
)
],
pytest.raises(ValueError),
"At least two observable frames must be provided.",
),
(
[
ObservableFrame(
{"Temperature": ObservableArray(value=numpy.ones(2) * unit.kelvin)}
),
ObservableFrame(
{
"Volume": ObservableArray(
value=numpy.ones(2) * unit.nanometer**3
)
}
),
],
pytest.raises(ValueError),
"The observable frames must contain the same types of observable.",
),
],
)
def test_frame_join_fail(observable_frames, expected_raises, expected_message):
with expected_raises as error_info:
ObservableFrame.join(*observable_frames)
assert (
expected_message is None
and error_info is None
or expected_message in str(error_info.value)
)
@pytest.mark.parametrize(
"data_values, expected_error, sub_counts",
[
(
numpy.random.normal(0.0, 1.0, (1000,)) * unit.kelvin,
1.0 / numpy.sqrt(1000) * unit.kelvin,
None,
),
(
numpy.random.normal(0.0, 1.0, (1000, 1)) * unit.kelvin,
1.0 / numpy.sqrt(1000) * unit.kelvin,
None,
),
(
numpy.array([1, 2, 2, 3, 3, 3]) * unit.kelvin,
None,
[1, 2, 3],
),
],
)
def test_bootstrap(data_values, expected_error, sub_counts):
def bootstrap_function(values: ObservableArray) -> Observable:
return Observable(
value=values.value.mean().plus_minus(0.0 * values.value.units),
gradients=[
ParameterGradient(gradient.key, numpy.mean(gradient.value))
for gradient in values.gradients
],
)
data = ObservableArray(
value=data_values,
gradients=[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#6:1]", "epsilon"),
value=data_values,
)
],
)
average = bootstrap(bootstrap_function, 1000, 1.0, sub_counts, values=data)
assert numpy.isclose(average.value, data.value.mean())
assert numpy.isclose(average.gradients[0].value, data.value.mean())
if expected_error is not None:
assert numpy.isclose(average.error, expected_error, rtol=0.1)
|
jaketanderson/openff-evaluator | openff/evaluator/datasets/datasets.py | """
An API for defining, storing, and loading sets of physical
property data.
"""
import abc
import re
import uuid
from enum import IntFlag, unique
import numpy
import pandas
from openff.units import unit
from openff.evaluator.attributes import UNDEFINED, Attribute, AttributeClass
from openff.evaluator.datasets import CalculationSource, MeasurementSource, Source
from openff.evaluator.substances import Component, ExactAmount, MoleFraction, Substance
from openff.evaluator.thermodynamics import ThermodynamicState
from openff.evaluator.utils.serialization import TypedBaseModel
@unique
class PropertyPhase(IntFlag):
"""An enum describing the phase that a property was
collected in.
Examples
--------
Properties measured in multiple phases (e.g. enthalpies of
vaporization) can be defined be concatenating `PropertyPhase`
enums:
>>> gas_liquid_phase = PropertyPhase.Gas | PropertyPhase.Liquid
"""
Undefined = 0x00
Solid = 0x01
Liquid = 0x02
Gas = 0x04
@classmethod
def from_string(cls, enum_string):
"""Parses a phase enum from its string representation.
Parameters
----------
enum_string: str
The str representation of a `PropertyPhase`
Returns
-------
PropertyPhase
The created enum
Examples
--------
To round-trip convert a phase enum:
>>> phase = PropertyPhase.Liquid | PropertyPhase.Gas
>>> phase_str = str(phase)
>>> parsed_phase = PropertyPhase.from_string(phase_str)
"""
if len(enum_string) == 0:
return PropertyPhase.Undefined
components = [cls[x] for x in enum_string.split(" + ")]
if len(components) == 0:
return PropertyPhase.Undefined
enum_value = components[0]
for component in components[1:]:
enum_value |= component
return enum_value
def __str__(self):
return " + ".join([phase.name for phase in PropertyPhase if self & phase])
def __repr__(self):
return f"<PropertyPhase {str(self)}>"
class PhysicalProperty(AttributeClass, abc.ABC):
"""Represents the value of any physical property and it's uncertainty
if provided.
It additionally stores the thermodynamic state at which the property
was collected, the phase it was collected in, information about
the composition of the observed system, and metadata about how the
property was collected.
"""
@classmethod
@abc.abstractmethod
def default_unit(cls):
"""openff.evaluator.unit.Unit: The default unit (e.g. g / mol) associated with this
class of property."""
raise NotImplementedError()
id = Attribute(
docstring="A unique identifier string assigned to this property",
type_hint=str,
default_value=lambda: str(uuid.uuid4()).replace("-", ""),
)
substance = Attribute(
docstring="The substance that this property was measured estimated for.",
type_hint=Substance,
)
phase = Attribute(
docstring="The phase / phases that this property was measured in.",
type_hint=PropertyPhase,
)
thermodynamic_state = Attribute(
docstring="The thermodynamic state that this property"
"was measured / estimated at.",
type_hint=ThermodynamicState,
)
value = Attribute(
docstring="The measured / estimated value of this property.",
type_hint=unit.Quantity,
)
uncertainty = Attribute(
docstring="The uncertainty in measured / estimated value of this property.",
type_hint=unit.Quantity,
optional=True,
)
source = Attribute(
docstring="The original source of this physical property.",
type_hint=Source,
optional=True,
)
metadata = Attribute(
docstring="Additional metadata associated with this property. All property "
"metadata will be made accessible to estimation workflows.",
type_hint=dict,
optional=True,
)
gradients = Attribute(
docstring="The gradients of this property with respect to "
"different force field parameters.",
type_hint=list,
optional=True,
)
def __init__(
self,
thermodynamic_state=None,
phase=PropertyPhase.Undefined,
substance=None,
value=None,
uncertainty=None,
source=None,
):
"""Constructs a new PhysicalProperty object.
Parameters
----------
thermodynamic_state : ThermodynamicState
The thermodynamic state that the property was measured in.
phase : PropertyPhase
The phase that the property was measured in.
substance : Substance
The composition of the substance that was measured.
value: openff.evaluator.unit.Quantity
The value of the measured physical property.
uncertainty: openff.evaluator.unit.Quantity
The uncertainty in the measured value.
source: Source
The source of this property.
"""
if thermodynamic_state is not None:
self.thermodynamic_state = thermodynamic_state
if phase is not None:
self.phase = phase
if substance is not None:
self.substance = substance
if value is not None:
self.value = value
if uncertainty is not None:
self.uncertainty = uncertainty
self.gradients = []
if source is not None:
self.source = source
def __setstate__(self, state):
if "id" not in state:
state["id"] = str(uuid.uuid4()).replace("-", "")
super(PhysicalProperty, self).__setstate__(state)
def validate(self, attribute_type=None):
super(PhysicalProperty, self).validate(attribute_type)
assert self.value.units.dimensionality == self.default_unit().dimensionality
if self.uncertainty != UNDEFINED:
assert (
self.uncertainty.units.dimensionality
== self.default_unit().dimensionality
)
class PhysicalPropertyDataSet(TypedBaseModel):
"""
An object for storing and curating data sets of both physical property
measurements and estimated. This class defines a number of convenience
functions for filtering out unwanted properties, and for generating
general statistics (such as the number of properties per substance)
about the set.
"""
def __init__(self):
"""
Constructs a new PhysicalPropertyDataSet object.
"""
self._properties = []
@property
def properties(self):
"""tuple of PhysicalProperty: A list of all of the properties
within this set.
"""
return tuple(self._properties)
@property
def property_types(self):
"""set of str: The types of property within this data set."""
return set([x.__class__.__name__ for x in self._properties])
@property
def substances(self):
"""set of Substance: The substances for which the properties in this data set
were collected for."""
return set([x.substance for x in self._properties])
@property
def sources(self):
"""set of Source: The sources from which the properties in this data set were
gathered."""
return set([x.source for x in self._properties])
def merge(self, data_set, validate=True):
"""Merge another data set into the current one.
Parameters
----------
data_set : PhysicalPropertyDataSet
The secondary data set to merge into this one.
validate: bool
Whether to validate the other data set before merging.
"""
if data_set is None:
return
self.add_properties(*data_set, validate=validate)
def add_properties(self, *physical_properties, validate=True):
"""Adds a physical property to the data set.
Parameters
----------
physical_properties: PhysicalProperty
The physical property to add.
validate: bool
Whether to validate the properties before adding them
to the set.
"""
all_ids = set(x.id for x in self)
# TODO: Do we need to check for adding the same property twice?
for physical_property in physical_properties:
if validate:
physical_property.validate()
if physical_property.id in all_ids:
raise KeyError(
f"A property with the unique id {physical_property.id} already "
f"exists."
)
all_ids.add(physical_property.id)
self._properties.extend(physical_properties)
def properties_by_substance(self, substance):
"""A generator which may be used to loop over all of the properties
which were measured for a particular substance.
Parameters
----------
substance: Substance
The substance of interest.
Returns
-------
generator of PhysicalProperty
"""
for physical_property in self._properties:
if physical_property.substance != substance:
continue
yield physical_property
def properties_by_type(self, property_type):
"""A generator which may be used to loop over all of properties
of a particular type, e.g. all "Density" properties.
Parameters
----------
property_type: str or type of PhysicalProperty
The type of property of interest. This may either be the string
class name of the property or the class type.
Returns
-------
generator of PhysicalProperty
"""
if not isinstance(property_type, str):
property_type = property_type.__name__
for physical_property in self._properties:
if physical_property.__class__.__name__ != property_type:
continue
yield physical_property
def validate(self):
"""Checks to ensure that all properties within
the set are valid physical property object.
"""
for physical_property in self._properties:
physical_property.validate()
def to_pandas(self):
"""Converts a `PhysicalPropertyDataSet` to a `pandas.DataFrame` object
with columns of
- 'Id'
- 'Temperature (K)'
- 'Pressure (kPa)'
- 'Phase'
- 'N Components'
- 'Component 1'
- 'Role 1'
- 'Mole Fraction 1'
- 'Exact Amount 1'
- ...
- 'Component N'
- 'Role N'
- 'Mole Fraction N'
- 'Exact Amount N'
- '<Property 1> Value (<default unit>)'
- '<Property 1> Uncertainty / (<default unit>)'
- ...
- '<Property N> Value / (<default unit>)'
- '<Property N> Uncertainty / (<default unit>)'
- `'Source'`
where 'Component X' is a column containing the smiles representation of
component X.
Returns
-------
pandas.DataFrame
The create data frame.
"""
if len(self) == 0:
return pandas.DataFrame()
# Keep track of the maximum number of components in any substance
# as this determines the number of component columns.
maximum_number_of_components = 0
data_rows = []
# Extract the data from the data set.
default_units = {}
for physical_property in self:
# Extract the measured state.
temperature = physical_property.thermodynamic_state.temperature.to(
unit.kelvin
).magnitude
pressure = None
if physical_property.thermodynamic_state.pressure != UNDEFINED:
pressure = physical_property.thermodynamic_state.pressure.to(
unit.kilopascal
).magnitude
phase = str(physical_property.phase)
# Extract the component data.
components = []
amounts = []
roles = []
for index, component in enumerate(physical_property.substance):
component_amounts = {MoleFraction: None, ExactAmount: None}
for x in physical_property.substance.get_amounts(component):
assert isinstance(x, (MoleFraction, ExactAmount))
component_amounts[type(x)] = x.value
components.append(component.smiles)
amounts.append(component_amounts)
roles.append(component.role.name)
# Extract the value data as a string.
default_unit = physical_property.default_unit()
default_units[physical_property.__class__.__name__] = default_unit
value = (
None
if physical_property.value == UNDEFINED
else physical_property.value.to(default_unit).magnitude
)
uncertainty = (
None
if physical_property.uncertainty == UNDEFINED
else physical_property.uncertainty.to(default_unit).magnitude
)
# Extract the data source.
source = None
if isinstance(physical_property.source, MeasurementSource):
source = physical_property.source.doi
if source is None or len(source) == 0:
source = physical_property.source.reference
elif isinstance(physical_property.source, CalculationSource):
source = physical_property.source.fidelity
# Create the data row.
data_row = {
"Id": physical_property.id,
"Temperature (K)": temperature,
"Pressure (kPa)": pressure,
"Phase": phase,
"N Components": len(physical_property.substance),
}
for index in range(len(components)):
data_row[f"Component {index + 1}"] = components[index]
data_row[f"Role {index + 1}"] = roles[index]
data_row[f"Mole Fraction {index + 1}"] = amounts[index][MoleFraction]
data_row[f"Exact Amount {index + 1}"] = amounts[index][ExactAmount]
data_row[
f"{type(physical_property).__name__} Value ({default_unit:~})"
] = value
data_row[
f"{type(physical_property).__name__} Uncertainty ({default_unit:~})"
] = uncertainty
data_row["Source"] = source
data_rows.append(data_row)
maximum_number_of_components = max(
maximum_number_of_components, len(physical_property.substance)
)
# Set up the column headers.
if len(data_rows) == 0:
return None
data_columns = [
"Id",
"Temperature (K)",
"Pressure (kPa)",
"Phase",
"N Components",
]
for index in range(maximum_number_of_components):
data_columns.append(f"Component {index + 1}")
data_columns.append(f"Role {index + 1}")
data_columns.append(f"Mole Fraction {index + 1}")
data_columns.append(f"Exact Amount {index + 1}")
for property_type in self.property_types:
default_unit = default_units[property_type]
data_columns.append(f"{property_type} Value ({default_unit:~})")
data_columns.append(f"{property_type} Uncertainty ({default_unit:~})")
data_columns.append("Source")
data_frame = pandas.DataFrame(data_rows, columns=data_columns)
return data_frame
@classmethod
def from_pandas(cls, data_frame: pandas.DataFrame) -> "PhysicalPropertyDataSet":
"""Constructs a data set object from a pandas ``DataFrame`` object.
Notes
-----
* All physical properties are assumed to be source from experimental
measurements.
* Currently this method onlu supports data frames containing properties
which are built-in to the framework (e.g. Density).
* This method assumes the data frame has a structure identical to that
produced by the ``PhysicalPropertyDataSet.to_pandas`` function.
Parameters
----------
data_frame
The data frame to construct the data set from.
Returns
-------
The constructed data set.
"""
from openff.evaluator import properties
property_header_matches = {
re.match(r"^([a-zA-Z]+) Value \(([a-zA-Z0-9+-/\s*^]*)\)$", header)
for header in data_frame
if header.find(" Value ") >= 0
}
property_headers = {}
# Validate that the headers have the correct format, specify a
# built-in property type, and specify correctly the properties
# units.
for match in property_header_matches:
assert match
property_type_string, property_unit_string = match.groups()
assert hasattr(properties, property_type_string)
property_type = getattr(properties, property_type_string)
property_unit = unit.Unit(property_unit_string)
assert property_unit is not None
assert (
property_unit.dimensionality
== property_type.default_unit().dimensionality
)
property_headers[match.group(0)] = (property_type, property_unit)
# Convert the data rows to property objects.
physical_properties = []
for _, data_row in data_frame.iterrows():
data_row = data_row.dropna()
# Extract the state at which the measurement was made.
thermodynamic_state = ThermodynamicState(
temperature=data_row["Temperature (K)"] * unit.kelvin,
pressure=data_row["Pressure (kPa)"] * unit.kilopascal,
)
property_phase = PropertyPhase.from_string(data_row["Phase"])
# Extract the substance the measurement was made for.
substance = Substance()
for i in range(data_row["N Components"]):
component = Component(
smiles=data_row[f"Component {i + 1}"],
role=Component.Role[data_row.get(f"Role {i + 1}", "Solvent")],
)
mole_fraction = data_row.get(f"Mole Fraction {i + 1}", 0.0)
exact_amount = data_row.get(f"Exact Amount {i + 1}", 0)
if not numpy.isclose(mole_fraction, 0.0):
substance.add_component(component, MoleFraction(mole_fraction))
if not numpy.isclose(exact_amount, 0.0):
substance.add_component(component, ExactAmount(exact_amount))
for (
property_header,
(property_type, property_unit),
) in property_headers.items():
# Check to see whether the row contains a value for this
# type of property.
if property_header not in data_row:
continue
uncertainty_header = property_header.replace("Value", "Uncertainty")
source_string = data_row["Source"]
is_doi = all(
any(
re.match(pattern, split_string, re.I)
for pattern in [
r"^10.\d{4,9}/[-._;()/:A-Z0-9]+$",
r"^10.1002/[^\s]+$",
r"^10.\d{4}/\d+-\d+X?(\d+)\d+<[\d\w]+:[\d\w]*>\d+.\d+.\w+;\d$",
r"^10.1021/\w\w\d+$",
r"^10.1207/[\w\d]+\&\d+_\d+$",
]
)
for split_string in source_string.split(" + ")
)
physical_property = property_type(
thermodynamic_state=thermodynamic_state,
phase=property_phase,
value=data_row[property_header] * property_unit,
uncertainty=None
if uncertainty_header not in data_row
else data_row[uncertainty_header] * property_unit,
substance=substance,
source=MeasurementSource(
doi="" if not is_doi else source_string,
reference=source_string if not is_doi else "",
),
)
identifier = data_row.get("Id", None)
if identifier:
physical_property.id = identifier
physical_properties.append(physical_property)
data_set = PhysicalPropertyDataSet()
data_set.add_properties(*physical_properties)
return data_set
def __len__(self):
return len(self._properties)
def __iter__(self):
return iter(self._properties)
def __getstate__(self):
return {"properties": self._properties}
def __setstate__(self, state):
self._properties = state["properties"]
assert all(isinstance(x, PhysicalProperty) for x in self)
# Ensure each property has a unique id.
all_ids = set(x.id for x in self)
assert len(all_ids) == len(self)
def __str__(self):
return (
f"n_properties={len(self)} n_substances={len(self.substances)} "
f"n_sources={len(self.sources)}"
)
def __repr__(self):
return f"<PhysicalPropertyDataSet {str(self)}>"
|
jaketanderson/openff-evaluator | openff/evaluator/datasets/taproom/__init__.py | <gh_stars>10-100
from .taproom import TaproomDataSet, TaproomSource
__all__ = [TaproomDataSet, TaproomSource]
|
jaketanderson/openff-evaluator | openff/evaluator/properties/dielectric.py | <filename>openff/evaluator/properties/dielectric.py
"""
A collection of dielectric physical property definitions.
"""
from openff.units import unit
from openff.evaluator.attributes import UNDEFINED
from openff.evaluator.datasets import PhysicalProperty, PropertyPhase
from openff.evaluator.datasets.thermoml import thermoml_property
from openff.evaluator.layers import register_calculation_schema
from openff.evaluator.layers.reweighting import ReweightingLayer, ReweightingSchema
from openff.evaluator.layers.simulation import SimulationLayer, SimulationSchema
from openff.evaluator.protocols.analysis import (
AverageDielectricConstant,
ComputeDipoleMoments,
DecorrelateObservables,
)
from openff.evaluator.protocols.reweighting import (
ConcatenateObservables,
ReweightDielectricConstant,
)
from openff.evaluator.protocols.utils import (
generate_base_reweighting_protocols,
generate_simulation_protocols,
)
from openff.evaluator.utils.observables import ObservableType
from openff.evaluator.workflow import WorkflowSchema
from openff.evaluator.workflow.utils import ProtocolPath
@thermoml_property(
"Relative permittivity at zero frequency",
supported_phases=PropertyPhase.Liquid,
)
class DielectricConstant(PhysicalProperty):
"""A class representation of a dielectric property"""
@classmethod
def default_unit(cls):
return unit.dimensionless
@staticmethod
def default_simulation_schema(
absolute_tolerance=UNDEFINED, relative_tolerance=UNDEFINED, n_molecules=1000
):
"""Returns the default calculation schema to use when estimating
this class of property from direct simulations.
Parameters
----------
absolute_tolerance: openff.evaluator.unit.Quantity, optional
The absolute tolerance to estimate the property to within.
relative_tolerance: float
The tolerance (as a fraction of the properties reported
uncertainty) to estimate the property to within.
n_molecules: int
The number of molecules to use in the simulation.
Returns
-------
SimulationSchema
The schema to follow when estimating this property.
"""
assert absolute_tolerance == UNDEFINED or relative_tolerance == UNDEFINED
calculation_schema = SimulationSchema()
calculation_schema.absolute_tolerance = absolute_tolerance
calculation_schema.relative_tolerance = relative_tolerance
use_target_uncertainty = (
absolute_tolerance != UNDEFINED or relative_tolerance != UNDEFINED
)
# Define the protocols which will run the simulation itself.
protocols, value_source, output_to_store = generate_simulation_protocols(
AverageDielectricConstant("average_dielectric"),
use_target_uncertainty,
n_molecules=n_molecules,
)
# Add a protocol to compute the dipole moments and pass these to
# the analysis protocol.
compute_dipoles = ComputeDipoleMoments("compute_dipoles")
compute_dipoles.parameterized_system = ProtocolPath(
"parameterized_system", protocols.assign_parameters.id
)
compute_dipoles.trajectory_path = ProtocolPath(
"trajectory_file_path", protocols.production_simulation.id
)
compute_dipoles.gradient_parameters = ProtocolPath(
"parameter_gradient_keys", "global"
)
protocols.converge_uncertainty.add_protocols(compute_dipoles)
protocols.analysis_protocol.volumes = ProtocolPath(
f"observables[{ObservableType.Volume.value}]",
protocols.production_simulation.id,
)
protocols.analysis_protocol.dipole_moments = ProtocolPath(
"dipole_moments",
compute_dipoles.id,
)
# Build the workflow schema.
schema = WorkflowSchema()
schema.protocol_schemas = [
protocols.build_coordinates.schema,
protocols.assign_parameters.schema,
protocols.energy_minimisation.schema,
protocols.equilibration_simulation.schema,
protocols.converge_uncertainty.schema,
protocols.decorrelate_trajectory.schema,
protocols.decorrelate_observables.schema,
]
schema.outputs_to_store = {"full_system": output_to_store}
schema.final_value_source = value_source
calculation_schema.workflow_schema = schema
return calculation_schema
@staticmethod
def default_reweighting_schema(
absolute_tolerance=UNDEFINED,
relative_tolerance=UNDEFINED,
n_effective_samples=50,
):
"""Returns the default calculation schema to use when estimating
this property by reweighting existing data.
Parameters
----------
absolute_tolerance: openff.evaluator.unit.Quantity, optional
The absolute tolerance to estimate the property to within.
relative_tolerance: float
The tolerance (as a fraction of the properties reported
uncertainty) to estimate the property to within.
n_effective_samples: int
The minimum number of effective samples to require when
reweighting the cached simulation data.
Returns
-------
ReweightingSchema
The schema to follow when estimating this property.
"""
assert absolute_tolerance == UNDEFINED or relative_tolerance == UNDEFINED
calculation_schema = ReweightingSchema()
calculation_schema.absolute_tolerance = absolute_tolerance
calculation_schema.relative_tolerance = relative_tolerance
protocols, data_replicator = generate_base_reweighting_protocols(
statistical_inefficiency=AverageDielectricConstant(
"average_dielectric_$(data_replicator)"
),
reweight_observable=ReweightDielectricConstant("reweight_dielectric"),
)
protocols.zero_gradients.input_observables = ProtocolPath(
"output_observables[Volume]",
protocols.join_observables.id,
)
protocols.statistical_inefficiency.thermodynamic_state = ProtocolPath(
"thermodynamic_state", "global"
)
protocols.reweight_observable.required_effective_samples = n_effective_samples
# We don't need to perform bootstrapping as this protocol is only used to
# calculate the statistical inefficiency and equilibration time. The
# re-weighting protocol will instead compute the bootstrapped uncertainties.
protocols.statistical_inefficiency.bootstrap_iterations = 1
# Set up a protocol to re-evaluate the dipole moments at the target state
# and concatenate the into a single array.
compute_dipoles = ComputeDipoleMoments("compute_dipoles_$(data_replicator)")
compute_dipoles.parameterized_system = ProtocolPath(
"parameterized_system", protocols.build_target_system.id
)
compute_dipoles.trajectory_path = ProtocolPath(
"trajectory_file_path", protocols.unpack_stored_data.id
)
compute_dipoles.gradient_parameters = ProtocolPath(
"parameter_gradient_keys", "global"
)
join_dipoles = ConcatenateObservables("join_dipoles")
join_dipoles.input_observables = ProtocolPath(
"dipole_moments",
compute_dipoles.id,
)
# Point the dielectric protocols to the volumes and dipole moments.
protocols.statistical_inefficiency.volumes = ProtocolPath(
"observables[Volume]", protocols.unpack_stored_data.id
)
protocols.statistical_inefficiency.dipole_moments = ProtocolPath(
"dipole_moments", compute_dipoles.id
)
# Make sure to decorrelate the dipole moments.
decorrelate_dipoles = DecorrelateObservables("decorrelate_dipoles")
decorrelate_dipoles.time_series_statistics = ProtocolPath(
"time_series_statistics", protocols.statistical_inefficiency.id
)
decorrelate_dipoles.input_observables = ProtocolPath(
"output_observables", join_dipoles.id
)
protocols.reweight_observable.dipole_moments = ProtocolPath(
"output_observables", decorrelate_dipoles.id
)
protocols.reweight_observable.volumes = ProtocolPath(
"output_observables", protocols.decorrelate_observable.id
)
protocols.reweight_observable.thermodynamic_state = ProtocolPath(
"thermodynamic_state", "global"
)
schema = WorkflowSchema()
schema.protocol_schemas = [
*(x.schema for x in protocols),
compute_dipoles.schema,
join_dipoles.schema,
decorrelate_dipoles.schema,
]
schema.protocol_replicators = [data_replicator]
schema.final_value_source = ProtocolPath(
"value", protocols.reweight_observable.id
)
calculation_schema.workflow_schema = schema
return calculation_schema
# Register the properties via the plugin system.
register_calculation_schema(
DielectricConstant, SimulationLayer, DielectricConstant.default_simulation_schema
)
register_calculation_schema(
DielectricConstant, ReweightingLayer, DielectricConstant.default_reweighting_schema
)
|
jaketanderson/openff-evaluator | openff/evaluator/__init__.py | <filename>openff/evaluator/__init__.py
"""
openff-evaluator
A physical property evaluation toolkit from the Open Forcefield Consortium.
"""
from openff.units import unit
from ._version import get_versions
from .plugins import register_default_plugins, register_external_plugins
# Load the default plugins
register_default_plugins()
# Load in any found external plugins.
register_external_plugins()
# Handle versioneer
versions = get_versions()
__version__ = versions["version"]
__git_revision__ = versions["full-revisionid"]
del get_versions, versions
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_utils/test_openmm.py | import os.path
from random import randint, random
import mdtraj
import numpy
import numpy as np
import pytest
from openff.toolkit.topology import Molecule, Topology
from openff.toolkit.typing.engines.smirnoff import ForceField, vdWHandler
from openff.toolkit.typing.engines.smirnoff.parameters import (
ChargeIncrementModelHandler,
ElectrostaticsHandler,
LibraryChargeHandler,
VirtualSiteHandler,
)
from openff.units import unit
try:
import openmm
from openff.units.openmm import from_openmm, to_openmm
from openmm import unit as openmm_unit
from openmm.app import PDBFile
except ImportError:
from simtk import openmm
from simtk.openmm import unit as openmm_unit
from simtk.openmm.app import PDBFile
from openff.units.simtk import from_simtk as from_openmm, to_simtk as to_openmm
from openff.evaluator.backends import ComputeResources
from openff.evaluator.forcefield import ParameterGradientKey
from openff.evaluator.protocols.openmm import _compute_gradients
from openff.evaluator.thermodynamics import ThermodynamicState
from openff.evaluator.utils import get_data_filename
from openff.evaluator.utils.observables import ObservableArray, ObservableFrame
from openff.evaluator.utils.openmm import (
extract_atom_indices,
extract_positions,
system_subset,
update_context_with_pdb,
update_context_with_positions,
)
def test_daltons():
openmm_quantity = random() * openmm_unit.dalton
openmm_raw_value = openmm_quantity.value_in_unit(
openmm_unit.gram / openmm_unit.mole
)
pint_quantity = from_openmm(openmm_quantity)
pint_raw_value = pint_quantity.to(unit.gram / unit.mole).magnitude
assert np.allclose(openmm_raw_value, pint_raw_value)
@pytest.mark.parametrize(
"openmm_unit",
[
openmm_unit.dalton,
openmm_unit.kilojoules_per_mole,
openmm_unit.angstrom,
openmm_unit.kelvin,
openmm_unit.atmosphere,
openmm_unit.gram,
openmm_unit.liter,
openmm_unit.gram / openmm_unit.liter,
],
)
@pytest.mark.parametrize(
"value",
[random(), randint(1, 10), [random(), random()], np.array([random(), random()])],
)
def test_openmm_to_pint(openmm_unit, value):
openmm_quantity = value * openmm_unit
openmm_raw_value = openmm_quantity.value_in_unit(openmm_unit)
pint_quantity = from_openmm(openmm_quantity)
pint_raw_value = pint_quantity.magnitude
assert np.allclose(openmm_raw_value, pint_raw_value)
@pytest.mark.parametrize(
"pint_unit",
[
unit.dalton,
unit.kilojoules / unit.mole,
unit.angstrom,
unit.kelvin,
unit.atmosphere,
unit.gram,
unit.liter,
unit.gram / unit.liter,
],
)
@pytest.mark.parametrize(
"value",
[random(), randint(1, 10), [random(), random()], np.array([random(), random()])],
)
def test_pint_to_openmm(pint_unit, value):
pint_quantity = value * pint_unit
pint_raw_value = pint_quantity.magnitude
openmm_quantity = to_openmm(pint_quantity)
openmm_raw_value = openmm_quantity.value_in_unit(openmm_quantity.unit)
assert np.allclose(openmm_raw_value, pint_raw_value)
def test_constants():
assert np.isclose(
openmm_unit.AVOGADRO_CONSTANT_NA.value_in_unit((1.0 / openmm_unit.mole).unit),
(1.0 * unit.avogadro_constant).to((1.0 / unit.mole).units).magnitude,
)
assert np.isclose(
openmm_unit.BOLTZMANN_CONSTANT_kB.value_in_unit(
openmm_unit.joule / openmm_unit.kelvin
),
(1.0 * unit.boltzmann_constant).to(unit.joule / unit.kelvin).magnitude,
)
assert np.isclose(
openmm_unit.MOLAR_GAS_CONSTANT_R.value_in_unit(
openmm_unit.joule / openmm_unit.kelvin / openmm_unit.mole
),
(1.0 * unit.molar_gas_constant)
.to(unit.joule / unit.kelvin / unit.mole)
.magnitude,
)
assert np.isclose(
openmm_unit.SPEED_OF_LIGHT_C.value_in_unit(
openmm_unit.meter / openmm_unit.seconds
),
(1.0 * unit.speed_of_light).to(unit.meter / unit.seconds).magnitude,
)
def hydrogen_chloride_force_field(
library_charge: bool,
charge_increment: bool,
vsite: bool,
) -> ForceField:
"""Returns a SMIRNOFF force field which is able to parameterize hydrogen chloride."""
# Create the FF
force_field = ForceField()
# Add a Vdw handler.
vdw_handler = vdWHandler(version=0.3)
vdw_handler.method = "cutoff"
vdw_handler.cutoff = 6.0 * openmm_unit.angstrom
vdw_handler.scale14 = 1.0
vdw_handler.add_parameter(
{
"smirks": "[#1:1]",
"epsilon": 0.0 * openmm_unit.kilojoules_per_mole,
"sigma": 1.0 * openmm_unit.angstrom,
}
)
vdw_handler.add_parameter(
{
"smirks": "[#17:1]",
"epsilon": 2.0 * openmm_unit.kilojoules_per_mole,
"sigma": 2.0 * openmm_unit.angstrom,
}
)
force_field.register_parameter_handler(vdw_handler)
# Add an electrostatic, a library charge and a charge increment handler.
electrostatics_handler = ElectrostaticsHandler(version=0.3)
electrostatics_handler.cutoff = 6.0 * openmm_unit.angstrom
electrostatics_handler.method = "PME"
force_field.register_parameter_handler(electrostatics_handler)
if library_charge:
library_charge_handler = LibraryChargeHandler(version=0.3)
library_charge_handler.add_parameter(
parameter_kwargs={
"smirks": "[#1:1]",
"charge1": 1.0 * openmm_unit.elementary_charge,
}
)
library_charge_handler.add_parameter(
parameter_kwargs={
"smirks": "[#17:1]",
"charge1": -1.0 * openmm_unit.elementary_charge,
}
)
force_field.register_parameter_handler(library_charge_handler)
if charge_increment:
charge_increment_handler = ChargeIncrementModelHandler(version=0.3)
charge_increment_handler.add_parameter(
parameter_kwargs={
"smirks": "[#1:1]-[#17:2]",
"charge_increment1": -1.0 * openmm_unit.elementary_charge,
"charge_increment2": 1.0 * openmm_unit.elementary_charge,
}
)
force_field.register_parameter_handler(charge_increment_handler)
if vsite:
vsite_handler = VirtualSiteHandler(version=0.3)
vsite_handler.add_parameter(
{
"smirks": "[#1:1]-[#17:2]",
"type": "BondCharge",
"distance": -0.2 * openmm_unit.nanometers,
"match": "all_permutations",
"charge_increment1": 0.0 * openmm_unit.elementary_charge,
"charge_increment2": 0.0 * openmm_unit.elementary_charge,
}
)
force_field.register_parameter_handler(vsite_handler)
return force_field
def test_system_subset_vdw():
# Create a dummy topology
topology = Molecule.from_smiles("Cl").to_topology()
# Create the system subset.
system, parameter_value = system_subset(
parameter_key=ParameterGradientKey("vdW", "[#1:1]", "epsilon"),
force_field=hydrogen_chloride_force_field(True, True, False),
topology=topology,
scale_amount=0.5,
)
assert system.getNumForces() == 1
assert system.getNumParticles() == 2
charge_0, sigma_0, epsilon_0 = system.getForce(0).getParticleParameters(0)
charge_1, sigma_1, epsilon_1 = system.getForce(0).getParticleParameters(1)
assert np.isclose(charge_0.value_in_unit(openmm_unit.elementary_charge), 0.0)
assert np.isclose(charge_1.value_in_unit(openmm_unit.elementary_charge), 0.0)
assert np.isclose(sigma_0.value_in_unit(openmm_unit.angstrom), 2.0)
assert np.isclose(sigma_1.value_in_unit(openmm_unit.angstrom), 1.0)
assert np.isclose(epsilon_0.value_in_unit(openmm_unit.kilojoules_per_mole), 2.0)
assert np.isclose(epsilon_1.value_in_unit(openmm_unit.kilojoules_per_mole), 0.5)
def test_system_subset_vdw_cutoff():
"""Test that handler attributes are correctly handled."""
# Create a dummy topology
topology: Topology = Molecule.from_smiles("Cl").to_topology()
topology.box_vectors = numpy.eye(3) * openmm_unit.nanometers
# Create the system subset.
system, parameter_value = system_subset(
parameter_key=ParameterGradientKey("vdW", None, "cutoff"),
force_field=hydrogen_chloride_force_field(True, True, False),
topology=topology,
scale_amount=0.5,
)
assert system.getNumForces() == 1
assert system.getNumParticles() == 2
cutoff = system.getForce(0).getCutoffDistance()
assert np.isclose(cutoff.value_in_unit(openmm_unit.angstrom), 9.0)
def test_system_subset_library_charge():
force_field = hydrogen_chloride_force_field(True, False, False)
# Ensure a zero charge after perturbation.
force_field.get_parameter_handler("LibraryCharges").parameters["[#1:1]"].charge1 = (
1.5 * openmm_unit.elementary_charge
)
# Create a dummy topology
topology = Molecule.from_smiles("Cl").to_topology()
# Create the system subset.
system, parameter_value = system_subset(
parameter_key=ParameterGradientKey("LibraryCharges", "[#17:1]", "charge1"),
force_field=force_field,
topology=topology,
scale_amount=0.5,
)
assert system.getNumForces() == 1
assert system.getNumParticles() == 2
charge_0, sigma_0, epsilon_0 = system.getForce(0).getParticleParameters(0)
charge_1, sigma_1, epsilon_1 = system.getForce(0).getParticleParameters(1)
assert np.isclose(charge_0.value_in_unit(openmm_unit.elementary_charge), -1.5)
assert np.isclose(charge_1.value_in_unit(openmm_unit.elementary_charge), 1.5)
assert np.isclose(sigma_0.value_in_unit(openmm_unit.angstrom), 10.0)
assert np.isclose(sigma_1.value_in_unit(openmm_unit.angstrom), 10.0)
assert np.isclose(epsilon_0.value_in_unit(openmm_unit.kilojoules_per_mole), 0.0)
assert np.isclose(epsilon_1.value_in_unit(openmm_unit.kilojoules_per_mole), 0.0)
def test_system_subset_charge_increment():
pytest.skip(
"This test will fail until the SMIRNOFF charge increment handler allows "
"N - 1 charges to be specified."
)
# Create a dummy topology
topology = Molecule.from_smiles("Cl").to_topology()
# Create the system subset.
system, parameter_value = system_subset(
parameter_key=ParameterGradientKey(
"ChargeIncrementModel", "[#1:1]-[#17:2]", "charge_increment1"
),
force_field=hydrogen_chloride_force_field(False, True, False),
topology=topology,
scale_amount=0.5,
)
assert system.getNumForces() == 1
assert system.getNumParticles() == 2
charge_0, sigma_0, epsilon_0 = system.getForce(0).getParticleParameters(0)
charge_1, sigma_1, epsilon_1 = system.getForce(0).getParticleParameters(1)
assert not np.isclose(charge_0.value_in_unit(openmm_unit.elementary_charge), -1.0)
assert np.isclose(charge_1.value_in_unit(openmm_unit.elementary_charge), 1.0)
assert np.isclose(sigma_0.value_in_unit(openmm_unit.angstrom), 10.0)
assert np.isclose(sigma_1.value_in_unit(openmm_unit.angstrom), 10.0)
assert np.isclose(epsilon_0.value_in_unit(openmm_unit.kilojoules_per_mole), 0.0)
assert np.isclose(epsilon_1.value_in_unit(openmm_unit.kilojoules_per_mole), 0.0)
@pytest.mark.parametrize(
"smirks, all_zeros", [("[#6X4:1]", True), ("[#8:1]", False), (None, False)]
)
def test_compute_gradients(tmpdir, smirks, all_zeros):
# Load a short trajectory.
coordinate_path = get_data_filename("test/trajectories/water.pdb")
trajectory_path = get_data_filename("test/trajectories/water.dcd")
trajectory = mdtraj.load_dcd(trajectory_path, coordinate_path)
observables = ObservableFrame(
{
"PotentialEnergy": ObservableArray(
np.zeros(len(trajectory)) * unit.kilojoule / unit.mole
)
}
)
_compute_gradients(
[
ParameterGradientKey(
"vdW", smirks, "epsilon" if smirks is not None else "cutoff"
)
],
observables,
ForceField("openff-1.2.0.offxml"),
ThermodynamicState(298.15 * unit.kelvin, 1.0 * unit.atmosphere),
Topology.from_mdtraj(trajectory.topology, [Molecule.from_smiles("O")]),
trajectory,
ComputeResources(),
True,
)
assert len(observables["PotentialEnergy"].gradients[0].value) == len(trajectory)
if all_zeros:
assert np.allclose(
observables["PotentialEnergy"].gradients[0].value,
0.0 * observables["PotentialEnergy"].gradients[0].value.units,
)
else:
assert not np.allclose(
observables["PotentialEnergy"].gradients[0].value,
0.0 * observables["PotentialEnergy"].gradients[0].value.units,
)
@pytest.mark.parametrize(
"box_vectors", [None, (numpy.eye(3) * 3.0) * openmm_unit.nanometers]
)
def test_update_context_with_positions(box_vectors):
force_field = hydrogen_chloride_force_field(True, False, True)
topology: Topology = Molecule.from_mapped_smiles("[Cl:1][H:2]").to_topology()
system = force_field.create_openmm_system(topology)
context = openmm.Context(
system, openmm.VerletIntegrator(0.1 * openmm_unit.femtoseconds)
)
positions = numpy.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0]]) * openmm_unit.angstrom
update_context_with_positions(context, positions, box_vectors)
context_positions = context.getState(getPositions=True).getPositions(asNumpy=True)
context_box_vectors = context.getState(getPositions=True).getPeriodicBoxVectors()
assert numpy.allclose(
context_positions.value_in_unit(openmm_unit.angstrom),
numpy.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [-1.0, 0.0, 0.0]]),
)
assert numpy.isclose(
context_box_vectors[0].x, (2.0 if box_vectors is None else 3.0)
)
assert numpy.isclose(
context_box_vectors[1].y, (2.0 if box_vectors is None else 3.0)
)
assert numpy.isclose(
context_box_vectors[2].z, (2.0 if box_vectors is None else 3.0)
)
def test_update_context_with_pdb(tmpdir):
force_field = hydrogen_chloride_force_field(True, False, True)
topology: Topology = Molecule.from_mapped_smiles("[Cl:1][H:2]").to_topology()
system = force_field.create_openmm_system(topology)
context = openmm.Context(
system, openmm.VerletIntegrator(0.1 * openmm_unit.femtoseconds)
)
positions = numpy.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0]]) * openmm_unit.angstrom
pdb_path = os.path.join(tmpdir, "tmp.pdb")
topology.to_file(pdb_path, positions)
pdb_file = PDBFile(pdb_path)
update_context_with_pdb(context, pdb_file)
context_positions = context.getState(getPositions=True).getPositions(asNumpy=True)
context_box_vectors = context.getState(getPositions=True).getPeriodicBoxVectors()
assert numpy.allclose(
context_positions.value_in_unit(openmm_unit.angstrom),
numpy.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [-1.0, 0.0, 0.0]]),
)
assert numpy.allclose(
extract_positions(context.getState(getPositions=True), [2]).value_in_unit(
openmm_unit.angstrom
),
numpy.array([[-1.0, 0.0, 0.0]]),
)
assert numpy.isclose(context_box_vectors[0].x, 2.0)
assert numpy.isclose(context_box_vectors[1].y, 2.0)
assert numpy.isclose(context_box_vectors[2].z, 2.0)
def test_extract_atom_indices():
force_field = hydrogen_chloride_force_field(True, False, True)
topology: Topology = Molecule.from_smiles("Cl").to_topology()
system = force_field.create_openmm_system(topology)
assert system.getNumParticles() == 3
assert extract_atom_indices(system) == [0, 1]
|
jaketanderson/openff-evaluator | openff/evaluator/utils/__init__.py | <filename>openff/evaluator/utils/__init__.py
from .utils import (
get_data_filename,
has_openeye,
is_file_and_not_empty,
setup_timestamp_logging,
)
__all__ = [
get_data_filename,
has_openeye,
setup_timestamp_logging,
is_file_and_not_empty,
]
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_protocols/test_paprika.py | import os
import numpy
import pytest
from openff.units import unit
from openff.evaluator.attributes import UNDEFINED
from openff.evaluator.forcefield.system import ParameterizedSystem
from openff.evaluator.protocols.paprika.analysis import (
AnalyzeAPRPhase,
ComputeReferenceWork,
ComputeSymmetryCorrection,
)
from openff.evaluator.protocols.paprika.coordinates import (
AddDummyAtoms,
PreparePullCoordinates,
PrepareReleaseCoordinates,
_atom_indices_by_role,
_components_by_role,
)
from openff.evaluator.protocols.paprika.restraints import (
ApplyRestraints,
GenerateAttachRestraints,
GeneratePullRestraints,
GenerateReleaseRestraints,
)
from openff.evaluator.substances import Component, ExactAmount, Substance
from openff.evaluator.thermodynamics import ThermodynamicState
from openff.evaluator.utils import get_data_filename
@pytest.fixture(scope="module")
def dummy_complex() -> Substance:
substance = Substance()
substance.add_component(
Component(smiles="C", role=Component.Role.Ligand), ExactAmount(1)
)
substance.add_component(
Component(smiles="CO", role=Component.Role.Receptor), ExactAmount(1)
)
return substance
@pytest.fixture()
def complex_file_path(tmp_path):
import parmed.geometry
from paprika.evaluator import Setup
complex_path = get_data_filename(
os.path.join("test", "molecules", "methanol_methane.pdb")
)
# noinspection PyTypeChecker
structure: parmed.Structure = parmed.load_file(complex_path, structure=True)
# noinspection PyTypeChecker
center_of_mass = parmed.geometry.center_of_mass(
structure.coordinates, masses=numpy.ones(len(structure.coordinates))
)
Setup.add_dummy_atoms_to_structure(
structure,
[
numpy.array([0.0, 0.0, 10.0]),
numpy.array([0.0, 0.0, 20.0]),
numpy.array([0.0, 5.0, 25.0]),
],
center_of_mass,
)
complex_path = os.path.join(tmp_path, "complex.pdb")
structure.save(complex_path)
return complex_path
@pytest.fixture(scope="module")
def restraints_schema():
return {
"static": [{"atoms": "@12 @1", "force_constant": 5.0}],
"conformational": [
{"atoms": "@1 @2 @3 @4", "force_constant": 6.0, "target": 104.3}
],
"symmetry": [{"atoms": "@12 @7 @3 @4", "force_constant": 50.0, "target": 11.0}],
"wall": [{"atoms": "@12 @7 @3 @4", "force_constant": 50.0, "target": 11.0}],
"guest": [
{
"atoms": "@12 @7",
"attach": {"force_constant": 5.0, "target": 6.0},
"pull": {"force_constant": 5.0, "target": 24.0},
}
],
}
@pytest.fixture()
def attach_restraints_path(tmp_path, complex_file_path, restraints_schema):
protocol = GenerateAttachRestraints("")
protocol.complex_coordinate_path = complex_file_path
protocol.attach_lambdas = [1.0]
protocol.restraint_schemas = restraints_schema
protocol.execute(str(tmp_path))
return protocol.restraints_path
@pytest.fixture()
def pull_restraints_path(tmp_path, complex_file_path, restraints_schema):
protocol = GeneratePullRestraints("")
protocol.complex_coordinate_path = complex_file_path
protocol.attach_lambdas = [0.0]
protocol.n_pull_windows = 2
protocol.restraint_schemas = restraints_schema
protocol.execute(str(tmp_path))
return protocol.restraints_path
@pytest.fixture()
def release_restraints_path(tmp_path, complex_file_path, restraints_schema):
protocol = GenerateReleaseRestraints("")
protocol.host_coordinate_path = complex_file_path
protocol.release_lambdas = [1.0]
protocol.restraint_schemas = restraints_schema
protocol.execute(str(tmp_path))
return protocol.restraints_path
def test_components_by_role(dummy_complex):
components_by_role = _components_by_role(dummy_complex)
assert len(components_by_role) == 2
assert Component.Role.Ligand in components_by_role
assert Component.Role.Receptor in components_by_role
assert len(components_by_role[Component.Role.Receptor]) == 1
assert components_by_role[Component.Role.Receptor][0].smiles == "CO"
assert len(components_by_role[Component.Role.Ligand]) == 1
assert components_by_role[Component.Role.Ligand][0].smiles == "C"
def test_atom_indices_by_role(dummy_complex):
atom_indices_by_role = _atom_indices_by_role(
dummy_complex,
get_data_filename(os.path.join("test", "molecules", "methanol_methane.pdb")),
)
assert len(atom_indices_by_role) == 2
assert Component.Role.Ligand in atom_indices_by_role
assert Component.Role.Receptor in atom_indices_by_role
assert len(atom_indices_by_role[Component.Role.Receptor]) == 6
assert atom_indices_by_role[Component.Role.Receptor] == [0, 1, 2, 3, 4, 5]
assert len(atom_indices_by_role[Component.Role.Ligand]) == 5
assert atom_indices_by_role[Component.Role.Ligand] == [6, 7, 8, 9, 10]
def test_prepare_release_coordinates(tmp_path, dummy_complex):
import mdtraj
protocol = PrepareReleaseCoordinates("")
protocol.substance = dummy_complex
protocol.complex_file_path = get_data_filename(
os.path.join("test", "molecules", "methanol_methane.pdb")
)
protocol.execute(str(tmp_path))
assert os.path.isfile(protocol.output_coordinate_path)
host_trajectory = mdtraj.load_pdb(protocol.output_coordinate_path)
assert host_trajectory.topology.n_atoms == 6
@pytest.mark.parametrize("window_index,expected_z", [(0, 0.0), (1, 2.4)])
def test_prepare_pull_coordinates(tmp_path, dummy_complex, window_index, expected_z):
import mdtraj
protocol = PreparePullCoordinates("")
protocol.substance = dummy_complex
protocol.complex_file_path = get_data_filename(
os.path.join("test", "molecules", "methanol_methane.pdb")
)
protocol.guest_orientation_mask = "@7 @8"
protocol.pull_distance = 24.0 * unit.angstrom
protocol.pull_window_index = window_index
protocol.n_pull_windows = 2
protocol.execute(str(tmp_path))
assert os.path.isfile(protocol.output_coordinate_path)
host_trajectory = mdtraj.load_pdb(protocol.output_coordinate_path)
assert host_trajectory.topology.n_atoms == 11
assert numpy.allclose(
host_trajectory.xyz[0][6, :], numpy.array([0.0, 0.0, expected_z])
)
assert numpy.allclose(host_trajectory.xyz[0][7, :2], numpy.zeros(2))
def test_add_dummy_atoms(tmp_path, dummy_complex):
import mdtraj
try:
import openmm
from openmm import unit as openmm_unit
except ImportError:
from simtk import openmm
from simtk.openmm import unit as openmm_unit
# Create an empty system to add the dummy atoms to.
system_path = os.path.join(tmp_path, "input.xml")
system = openmm.System()
system.addForce(openmm.NonbondedForce())
with open(system_path, "w") as file:
file.write(openmm.XmlSerializer.serialize(system))
protocol = AddDummyAtoms("release_add_dummy_atoms")
protocol.substance = dummy_complex
protocol.input_coordinate_path = get_data_filename(
os.path.join("test", "molecules", "methanol_methane.pdb")
)
protocol.input_system = ParameterizedSystem(
substance=dummy_complex,
force_field=None,
topology_path=get_data_filename(
os.path.join("test", "molecules", "methanol_methane.pdb")
),
system_path=system_path,
)
protocol.offset = 6.0 * unit.angstrom
protocol.execute(str(tmp_path))
# Validate that dummy atoms have been added to the configuration file
# and the structure has been correctly shifted.
trajectory = mdtraj.load_pdb(protocol.output_coordinate_path)
assert trajectory.topology.n_atoms == 14
assert numpy.allclose(trajectory.xyz[0][11:12, :2], 2.5)
assert numpy.isclose(trajectory.xyz[0][11, 2], 0.62)
assert numpy.isclose(trajectory.xyz[0][12, 2], 0.32)
assert numpy.isclose(trajectory.xyz[0][13, 0], 2.5)
assert numpy.isclose(trajectory.xyz[0][13, 1], 2.72)
assert numpy.isclose(trajectory.xyz[0][13, 2], 0.1)
# Validate the atom / residue names.
all_atoms = [*trajectory.topology.atoms]
dummy_atoms = all_atoms[11:14]
assert all(atom.name == "DUM" for atom in dummy_atoms)
assert all(dummy_atoms[i].residue.name == f"DM{i + 1}" for i in range(3))
# Validate that the dummy atoms got added to the system
with open(protocol.output_system.system_path) as file:
system: openmm.System = openmm.XmlSerializer.deserialize(file.read())
assert system.getNumParticles() == 3
assert all(
numpy.isclose(
system.getParticleMass(i).value_in_unit(openmm_unit.dalton), 207.0
)
for i in range(3)
)
assert system.getNumForces() == 1
assert system.getForce(0).getNumParticles() == 3
def validate_generated_restraints(restraints_path, expected_restraint_types, phase):
restraints_dictionary = ApplyRestraints.load_restraints(restraints_path)
restraints_dictionary = {
restraint_type: restraints
for restraint_type, restraints in restraints_dictionary.items()
if len(restraints) > 0
}
assert {*restraints_dictionary} == expected_restraint_types
restraints = [
restraint
for restraints in restraints_dictionary.values()
for restraint in restraints
]
assert all(
"release" in restraint.phase and restraint.phase[phase] is not None
for restraint in restraints
)
def validate_system_file(system_path, expected_force_groups):
try:
import openmm
except ImportError:
from simtk import openmm
assert os.path.isfile(system_path)
with open(system_path) as file:
system: openmm.System = openmm.XmlSerializer.deserialize(file.read())
assert system.getNumForces() == len(expected_force_groups) + 3
for force in system.getForces():
assert force.getForceGroup() in {*expected_force_groups, 15}
def test_generate_attach_restraints(tmp_path, complex_file_path, restraints_schema):
protocol = GenerateAttachRestraints("")
protocol.complex_coordinate_path = complex_file_path
protocol.attach_lambdas = [0.0, 0.5, 1.0]
protocol.restraint_schemas = restraints_schema
protocol.execute(str(tmp_path))
assert os.path.isfile(protocol.restraints_path)
validate_generated_restraints(
protocol.restraints_path,
{"static", "conformational", "guest", "wall", "symmetry"},
"attach",
)
def test_generate_pull_restraints(tmp_path, complex_file_path, restraints_schema):
protocol = GeneratePullRestraints("")
protocol.complex_coordinate_path = complex_file_path
protocol.attach_lambdas = [0.0, 1.0]
protocol.n_pull_windows = 2
protocol.restraint_schemas = restraints_schema
protocol.execute(str(tmp_path))
assert os.path.isfile(protocol.restraints_path)
validate_generated_restraints(
protocol.restraints_path, {"static", "conformational", "guest"}, "pull"
)
def test_generate_release_restraints(tmp_path, complex_file_path, restraints_schema):
protocol = GenerateReleaseRestraints("")
protocol.host_coordinate_path = complex_file_path
protocol.release_lambdas = [1.0, 0.0]
protocol.restraint_schemas = restraints_schema
protocol.execute(str(tmp_path))
assert os.path.isfile(protocol.restraints_path)
validate_generated_restraints(
protocol.restraints_path, {"static", "conformational"}, "release"
)
def test_apply_attach_restraints(
tmp_path, dummy_complex, complex_file_path, attach_restraints_path
):
try:
import openmm
except ImportError:
from simtk import openmm
with open(os.path.join(tmp_path, "system.xml"), "w") as file:
file.write(openmm.XmlSerializer.serialize(openmm.System()))
protocol = ApplyRestraints("")
protocol.restraints_path = attach_restraints_path
protocol.input_coordinate_path = complex_file_path
protocol.input_system = ParameterizedSystem(
substance=dummy_complex,
force_field=None,
topology_path=complex_file_path,
system_path=os.path.join(tmp_path, "system.xml"),
)
protocol.phase = "attach"
protocol.window_index = 0
protocol.execute(str(tmp_path))
validate_system_file(protocol.output_system.system_path, {10, 11, 12, 13, 14})
def test_apply_pull_restraints(
tmp_path, dummy_complex, complex_file_path, pull_restraints_path
):
try:
import openmm
except ImportError:
from simtk import openmm
with open(os.path.join(tmp_path, "system.xml"), "w") as file:
file.write(openmm.XmlSerializer.serialize(openmm.System()))
protocol = ApplyRestraints("")
protocol.restraints_path = pull_restraints_path
protocol.input_coordinate_path = complex_file_path
protocol.input_system = ParameterizedSystem(
substance=dummy_complex,
force_field=None,
topology_path=complex_file_path,
system_path=os.path.join(tmp_path, "system.xml"),
)
protocol.phase = "pull"
protocol.window_index = 0
protocol.execute(str(tmp_path))
validate_system_file(protocol.output_system.system_path, {10, 11, 12})
def test_apply_release_restraints(
tmp_path, dummy_complex, complex_file_path, release_restraints_path
):
try:
import openmm
except ImportError:
from simtk import openmm
with open(os.path.join(tmp_path, "system.xml"), "w") as file:
file.write(openmm.XmlSerializer.serialize(openmm.System()))
protocol = ApplyRestraints("")
protocol.restraints_path = release_restraints_path
protocol.input_coordinate_path = complex_file_path
protocol.input_system = ParameterizedSystem(
substance=dummy_complex,
force_field=None,
topology_path=complex_file_path,
system_path=os.path.join(tmp_path, "system.xml"),
)
protocol.phase = "release"
protocol.window_index = 0
protocol.execute(str(tmp_path))
validate_system_file(protocol.output_system.system_path, {10, 11})
def test_compute_reference_work(tmp_path, complex_file_path):
# Generate a dummy set of pull restraints
restraints_protocol = GeneratePullRestraints("")
restraints_protocol.complex_coordinate_path = complex_file_path
restraints_protocol.attach_lambdas = [0.0, 1.0]
restraints_protocol.n_pull_windows = 2
restraints_protocol.restraint_schemas = {
"guest": [
{
"atoms": ":DM1 :7@C4",
"attach": {"force_constant": 5, "target": 6},
"pull": {"force_constant": 5, "target": 24},
},
{
"atoms": ":DM2 :DM1 :7@C4",
"attach": {"force_constant": 100, "target": 180},
"pull": {"force_constant": 100, "target": 180},
},
{
"atoms": ":DM1 :7@C4 :7@N1",
"attach": {"force_constant": 100, "target": 180},
"pull": {"force_constant": 100, "target": 180},
},
]
}
restraints_protocol.execute(str(tmp_path))
protocol = ComputeReferenceWork("")
protocol.thermodynamic_state = ThermodynamicState(temperature=298.15 * unit.kelvin)
protocol.restraints_path = restraints_protocol.restraints_path
protocol.execute(str(tmp_path))
assert protocol.result != UNDEFINED
assert numpy.isclose(protocol.result.error.magnitude, 0.0)
assert numpy.isclose(protocol.result.value.magnitude, 7.141515)
@pytest.mark.parametrize("temperature", [298.15, 308.15])
@pytest.mark.parametrize("n_microstates", [1, 2])
def test_compute_symmetry_correction(temperature, n_microstates):
protocol = ComputeSymmetryCorrection("")
protocol.thermodynamic_state = ThermodynamicState(
temperature=temperature * unit.kelvin
)
protocol.n_microstates = n_microstates
protocol.execute()
assert protocol.result != UNDEFINED
assert numpy.isclose(protocol.result.error.magnitude, 0.0)
expected_value = -protocol.thermodynamic_state.inverse_beta * numpy.log(
n_microstates
)
assert numpy.isclose(protocol.result.value, expected_value)
def test_analyse_apr(tmp_path, monkeypatch, complex_file_path):
import mdtraj
from paprika import analyze
# Generate a dummy set of attach restraints
restraints_protocol = GenerateAttachRestraints("")
restraints_protocol.complex_coordinate_path = complex_file_path
restraints_protocol.attach_lambdas = [0.0, 1.0]
restraints_protocol.restraint_schemas = {
"guest": [
{"atoms": ":DM1 @7", "attach": {"force_constant": 5, "target": 6}},
{
"atoms": ":DM2 :DM1 @7",
"attach": {"force_constant": 100, "target": 180},
},
{"atoms": ":DM1 @7 @8", "attach": {"force_constant": 100, "target": 180}},
]
}
restraints_protocol.execute(str(tmp_path))
# Create a set of trajectories to load
trajectory_paths = [os.path.join(tmp_path, f"{i}.dcd") for i in range(2)]
trajectory: mdtraj.Trajectory = mdtraj.load_pdb(complex_file_path)
for trajectory_path in trajectory_paths:
trajectory.save_dcd(trajectory_path)
# Mock the paprika call so we don't need to generate sensible fake data.
def mock_analyze_return(**_):
return {"attach": {"ti-block": {"fe": 1.0, "sem": 2.0}}}
# Application of the monkeypatch to replace Path.home
# with the behavior of mockreturn defined above.
monkeypatch.setattr(analyze, "compute_phase_free_energy", mock_analyze_return)
protocol = AnalyzeAPRPhase("analyze_release_phase")
protocol.topology_path = complex_file_path
protocol.trajectory_paths = trajectory_paths
protocol.phase = "attach"
protocol.restraints_path = restraints_protocol.restraints_path
protocol.execute(str(tmp_path))
assert numpy.isclose(protocol.result.value.magnitude, -1.0)
assert numpy.isclose(protocol.result.error.magnitude, 2.0)
|
jaketanderson/openff-evaluator | openff/evaluator/workflow/attributes.py | <filename>openff/evaluator/workflow/attributes.py
"""
A collection of descriptors used to mark-up class fields which
hold importance to the workflow engine, such as the inputs or
outputs of workflow protocols.
"""
from enum import Enum
from openff.evaluator.attributes import UNDEFINED, Attribute
class BaseMergeBehaviour(Enum):
"""A base class for enums which will describes how attributes should
be handled when attempting to merge similar protocols.
"""
pass
class MergeBehaviour(BaseMergeBehaviour):
"""A enum which describes how attributes should be handled when
attempting to merge similar protocols.
This enum may take values of
* ExactlyEqual: This attribute must be exactly equal between two protocols for
them to be able to merge.
* Custom: This attribute will be ignored by the built-in merging code such that
user specified behavior can be implemented.
"""
ExactlyEqual = "ExactlyEqual"
Custom = "Custom"
class InequalityMergeBehaviour(BaseMergeBehaviour):
"""A enum which describes how attributes which can be compared
with inequalities should be merged.
This enum may take values of
* SmallestValue: When two protocols are merged, the smallest value of this
attribute from either protocol is retained.
* LargestValue: When two protocols are merged, the largest value of this
attribute from either protocol is retained.
"""
SmallestValue = "SmallestValue"
LargestValue = "LargestValue"
class InputAttribute(Attribute):
"""A descriptor used to mark an attribute of an object as
an input to that object.
An attribute can either be set with a value directly, or it
can also be set to a `ProtocolPath` to be set be the workflow
manager.
Examples
----------
To mark an attribute as an input:
>>> from openff.evaluator.attributes import AttributeClass
>>> from openff.evaluator.workflow.attributes import InputAttribute
>>>
>>> class MyObject(AttributeClass):
>>>
>>> my_input = InputAttribute(
>>> docstring='An input will be used.',
>>> type_hint=float,
>>> default_value=0.1
>>> )
"""
def __init__(
self,
docstring,
type_hint,
default_value,
optional=False,
merge_behavior=MergeBehaviour.ExactlyEqual,
):
"""Initializes a new InputAttribute object.
Parameters
----------
merge_behavior: BaseMergeBehaviour
An enum describing how this input should be handled when considering
whether to, and actually merging two different objects.
"""
docstring = f"**Input** - {docstring}"
if not isinstance(merge_behavior, BaseMergeBehaviour):
raise ValueError(
"The merge behaviour must inherit from `BaseMergeBehaviour`"
)
if (
merge_behavior == InequalityMergeBehaviour.SmallestValue
or merge_behavior == InequalityMergeBehaviour.LargestValue
):
merge_docstring = ""
if merge_behavior == InequalityMergeBehaviour.SmallestValue:
merge_docstring = (
"When two protocols are merged, the smallest value of "
"this attribute from either protocol is retained."
)
if merge_behavior == InequalityMergeBehaviour.SmallestValue:
merge_docstring = (
"When two protocols are merged, the largest value of "
"this attribute from either protocol is retained."
)
docstring = f"{docstring} {merge_docstring}"
super().__init__(docstring, type_hint, default_value, optional)
self.merge_behavior = merge_behavior
class OutputAttribute(Attribute):
"""A descriptor used to mark an attribute of an as
an output of that object. This attribute is expected
to be populated by the object itself, rather than be
set externally.
Examples
----------
To mark an attribute as an output:
>>> from openff.evaluator.attributes import AttributeClass
>>> from openff.evaluator.workflow.attributes import OutputAttribute
>>>
>>> class MyObject(AttributeClass):
>>>
>>> my_output = OutputAttribute(
>>> docstring='An output that will be filled.',
>>> type_hint=float
>>> )
"""
def __init__(self, docstring, type_hint):
"""Initializes a new OutputAttribute object."""
docstring = f"**Output** - {docstring}"
super().__init__(docstring, type_hint, UNDEFINED, optional=False)
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_datasets/test_curation/test_thermoml.py | <reponame>jaketanderson/openff-evaluator
import os
import tarfile
from tempfile import NamedTemporaryFile
import pandas
from openff.evaluator.datasets.curation.components.thermoml import (
ImportThermoMLData,
ImportThermoMLDataSchema,
)
from openff.evaluator.utils import get_data_filename
def test_import_thermoml_data(requests_mock):
"""Tests that ThermoML archive files can be imported from a
remote source."""
# Create a tarball to be downloaded.
source_path = get_data_filename(os.path.join("test", "properties", "mass.xml"))
with NamedTemporaryFile(suffix=".tgz") as tar_file:
with tarfile.open(tar_file.name, "w:gz") as tar:
tar.add(
source_path,
arcname=os.path.join("10.1021", os.path.basename(source_path)),
)
schema = ImportThermoMLDataSchema()
with open(tar_file.name, "rb") as file:
requests_mock.get(schema.root_archive_url, content=file.read())
data_frame = ImportThermoMLData.apply(pandas.DataFrame(), schema)
assert data_frame is not None and len(data_frame) == 1
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_workflow/test_utils.py | <gh_stars>10-100
from openff.evaluator.workflow.utils import ProtocolPath
def test_protocol_path_id_replacement():
"""Tests that the protocol id function on the protocol path
behaves as expected."""
protocol_path = ProtocolPath("", "protocol_id_1", "protocol_id_11")
assert protocol_path.full_path == "protocol_id_1/protocol_id_11."
# Make sure only full matches lead to id replacement
protocol_path.replace_protocol("protocol_id_", "new_id_1")
assert protocol_path.full_path == "protocol_id_1/protocol_id_11."
protocol_path.replace_protocol("rotocol_id_1", "new_id_1")
assert protocol_path.full_path == "protocol_id_1/protocol_id_11."
protocol_path.replace_protocol("protocol_id_1", "new_id_1")
assert protocol_path.full_path == "new_id_1/protocol_id_11."
|
jaketanderson/openff-evaluator | openff/evaluator/substances/__init__.py | from .amounts import Amount, ExactAmount, MoleFraction # isort:skip
from .components import Component # isort:skip
from .substances import Substance # isort:skip
__all__ = [Amount, ExactAmount, MoleFraction, Component, Substance]
|
jaketanderson/openff-evaluator | openff/evaluator/datasets/curation/components/freesolv.py | import io
import logging
import re
from typing import List, Union
import pandas
import requests
from typing_extensions import Literal
from openff.evaluator.datasets import (
MeasurementSource,
PhysicalPropertyDataSet,
PropertyPhase,
)
from openff.evaluator.datasets.curation.components import (
CurationComponent,
CurationComponentSchema,
)
from openff.evaluator.properties import SolvationFreeEnergy
from openff.evaluator.substances import Component, ExactAmount, MoleFraction, Substance
from openff.evaluator.thermodynamics import ThermodynamicState
logger = logging.getLogger(__name__)
class ImportFreeSolvSchema(CurationComponentSchema):
type: Literal["ImportFreeSolv"] = "ImportFreeSolv"
class ImportFreeSolv(CurationComponent):
"""A component which will import the latest version of the FreeSolv
data set from the GitHub repository where it is stored.
"""
@classmethod
def _download_free_solv(cls) -> pandas.DataFrame:
"""Downloads the FreeSolv data set from GitHub.
Returns
-------
The Free Solv data stored in a pandas data frame.
"""
# Download the database from GitHub
download_request = requests.get(
"https://raw.githubusercontent.com/MobleyLab/FreeSolv/master/database.txt"
)
download_request.raise_for_status()
text_contents = download_request.text
# Unify the delimiter
text_contents = text_contents.replace("; ", ";")
# Convert the set to a pandas object
text_buffer = io.StringIO(text_contents)
free_solv_data_frame = pandas.read_csv(text_buffer, delimiter=";", skiprows=2)
return free_solv_data_frame
@classmethod
def _validate_doi(cls, doi: str):
"""Attempts to validate a string which may contain a (or multiple)
digital object identifier. If a valid DOI is not found, the FreeSolv
DOI itself is returned."""
fall_back_doi = "10.5281/zenodo.596537"
# From https://www.crossref.org/blog/dois-and-matching-regular-expressions/
doi_patterns = [
r"^10.\d{4,9}/[-._;()/:A-Z0-9]+$",
r"^10.1002/[^\s]+$",
r"^10.\d{4}/\d+-\d+X?(\d+)\d+<[\d\w]+:[\d\w]*>\d+.\d+.\w+;\d$",
r"^10.1021/\w\w\d+$",
r"^10.1207/[\w\d]+\&\d+_\d+$",
]
# Split the string to try and catch concatenated DOIs
doi_split = doi.split(" and ")
matched_dois: List[str] = []
for split_doi in doi_split:
matched_doi = None
for doi_pattern in doi_patterns:
regex_match = re.match(doi_pattern, split_doi, re.I)
if not regex_match:
continue
matched_doi = regex_match.group()
break
if not isinstance(matched_doi, str):
continue
matched_dois.append(matched_doi)
final_doi = (
fall_back_doi if len(matched_dois) == 0 else " + ".join(matched_dois)
)
return final_doi
@classmethod
def _apply(
cls,
data_frame: pandas.DataFrame,
schema: ImportFreeSolvSchema,
n_processes,
) -> pandas.DataFrame:
from openff.units import unit
from openff.evaluator import properties, substances
# Convert the data frame into data rows.
free_solv_data_frame = cls._download_free_solv()
data_entries = []
for _, row in free_solv_data_frame.iterrows():
# Extract and standardize the SMILES pattern of the
solute_smiles = row["SMILES"].lstrip().rstrip()
solute_smiles = substances.Component(solute_smiles).smiles
# Build the substance.
substance = Substance()
substance.add_component(Component(smiles="O"), MoleFraction(1.0))
substance.add_component(
Component(smiles=solute_smiles, role=Component.Role.Solute),
ExactAmount(1),
)
# Extract the value and uncertainty
value = (
float(row["experimental value (kcal/mol)"])
* unit.kilocalorie
/ unit.mole
)
std_error = (
float(row["experimental uncertainty (kcal/mol)"])
* unit.kilocalorie
/ unit.mole
)
# Attempt to extract a DOI
original_source = row[
"experimental reference (original or paper this value was taken from)"
]
doi = cls._validate_doi(original_source)
data_entry = SolvationFreeEnergy(
thermodynamic_state=ThermodynamicState(
temperature=298.15 * unit.kelvin,
pressure=101.325 * unit.kilopascal,
),
phase=PropertyPhase.Liquid,
substance=substance,
value=value.to(properties.SolvationFreeEnergy.default_unit()),
uncertainty=std_error.to(properties.SolvationFreeEnergy.default_unit()),
source=MeasurementSource(doi=doi),
)
data_entries.append(data_entry)
data_set = PhysicalPropertyDataSet()
data_set.add_properties(*data_entries)
free_solv_data_frame = data_set.to_pandas()
data_frame = pandas.concat(
[data_frame, free_solv_data_frame], ignore_index=True, sort=False
)
return data_frame
FreeSolvComponentSchema = Union[ImportFreeSolvSchema]
|
jaketanderson/openff-evaluator | openff/evaluator/layers/simulation.py | """A calculation layer which employs molecular simulation
to estimate sets of physical properties.
"""
from openff.evaluator.layers import calculation_layer
from openff.evaluator.layers.workflow import (
WorkflowCalculationLayer,
WorkflowCalculationSchema,
)
class SimulationSchema(WorkflowCalculationSchema):
"""A schema which encodes the options and the workflow schema
that the `SimulationLayer` should use when estimating a given class
of physical properties using the built-in workflow framework.
"""
pass
@calculation_layer()
class SimulationLayer(WorkflowCalculationLayer):
"""A calculation layer which employs molecular simulation
to estimate sets of physical properties.
"""
@classmethod
def required_schema_type(cls):
return SimulationSchema
|
jaketanderson/openff-evaluator | openff/evaluator/properties/__init__.py | from .binding import HostGuestBindingAffinity
from .density import Density, ExcessMolarVolume
from .dielectric import DielectricConstant
from .enthalpy import EnthalpyOfMixing, EnthalpyOfVaporization
from .solvation import SolvationFreeEnergy
__all__ = [
HostGuestBindingAffinity,
Density,
ExcessMolarVolume,
DielectricConstant,
EnthalpyOfMixing,
EnthalpyOfVaporization,
SolvationFreeEnergy,
]
|
jaketanderson/openff-evaluator | openff/evaluator/tests/conftest.py | <reponame>jaketanderson/openff-evaluator
"""A temporary fix for ensuring processes run correctly
within pytest.
"""
def pytest_configure(config):
from openff import evaluator
evaluator._called_from_test = True
def pytest_unconfigure(config):
from openff import evaluator
del evaluator._called_from_test
|
jaketanderson/openff-evaluator | openff/evaluator/datasets/curation/components/selection.py | import functools
import itertools
from enum import Enum
from typing import TYPE_CHECKING, List, Set, Tuple, Union
import numpy
import pandas
from pydantic import BaseModel, Field, conlist, validator
from typing_extensions import Literal
from openff.evaluator.datasets.curation.components import (
CurationComponent,
CurationComponentSchema,
)
from openff.evaluator.datasets.curation.components.filtering import (
FilterByEnvironments,
FilterByEnvironmentsSchema,
FilterBySubstances,
FilterBySubstancesSchema,
)
from openff.evaluator.datasets.utilities import (
data_frame_to_substances,
reorder_data_frame,
)
from openff.evaluator.utils.checkmol import ChemicalEnvironment
from openff.evaluator.utils.exceptions import MissingOptionalDependency
PropertyType = Tuple[str, int]
if TYPE_CHECKING:
PositiveInt = int
try:
from openeye.oegraphsim import OEFingerPrint
except ImportError:
OEFingerPrint = None
else:
from pydantic import PositiveInt
class State(BaseModel):
temperature: float = Field(..., description="The temperature (K) of interest.")
pressure: float = Field(..., description="The pressure (kPa) of interest.")
mole_fractions: Tuple[float, ...] = Field(
..., description="The composition of interest."
)
class TargetState(BaseModel):
property_types: List[PropertyType] = Field(
..., description="The properties to select at the specified states."
)
states: List[State] = Field(
..., description="The states at which data points should be selected."
)
@classmethod
@validator("property_types")
def property_types_validator(cls, value):
assert len(value) > 0
n_components = value[0][1]
assert all(x[1] == n_components for x in value)
return value
class FingerPrintType(Enum):
Tree = "Tree"
MACCS166 = "MACCS166"
class SelectSubstancesSchema(CurationComponentSchema):
type: Literal["SelectSubstances"] = "SelectSubstances"
target_environments: conlist(ChemicalEnvironment, min_items=1) = Field(
...,
description="The chemical environments which selected substances should "
"contain.",
)
n_per_environment: PositiveInt = Field(
...,
description="The number of substances to ideally select for each chemical "
"environment of interest. In the case of pure substances, this will be the "
"number of substances to choose per environment specified in "
"`target_environments`. For binary mixtures, this will be the number of "
"substances to select for each pair of environments constructed from the "
"`target_environments` list and so on.",
)
substances_to_exclude: List[Tuple[str, ...]] = Field(
default_factory=list,
description="The substances to 1) filter from the available data before "
"selecting substances and 2) to penalize similarity to. This field is "
"mainly expected to be used when creating a test set which is distinct from "
"a training set.",
)
finger_print_type: FingerPrintType = Field(
FingerPrintType.Tree,
description="The type of finger print to use in the distance metrics.",
)
per_property: bool = Field(
...,
description="Whether the selection algorithm should be run once per "
"property (e.g. select substances for pure densities, and then select "
"substances for pure enthalpies of vaporization), or whether to run it "
"once for the whole data set without consideration for if the selected "
"substances have data points available for each property type in the set. "
"This option should usually be set to false when the data set to select "
"from strictly contains data points for each type of property for the same"
"set of systems, and true otherwise.",
)
class SelectSubstances(CurationComponent):
"""A component for selecting a specified number data points which were
measured for systems containing a specified set of chemical functionalities.
"""
@classmethod
def _check_oe_available(cls):
"""Check if the `oechem` and `oegraphsim` modules are available for import.
Raises
-------
MissingOptionalDependency
"""
try:
from openeye import oechem, oegraphsim
except ImportError as e:
raise MissingOptionalDependency(e.path, False)
unlicensed_library = (
"openeye.oechem"
if not oechem.OEChemIsLicensed()
else "openeye.oegraphsim"
if not oegraphsim.OEGraphSimIsLicensed()
else None
)
if unlicensed_library is not None:
raise MissingOptionalDependency(unlicensed_library, True)
@classmethod
@functools.lru_cache(3000)
def _compute_molecule_finger_print(
cls, smiles: str, finger_print_type: FingerPrintType
) -> "OEFingerPrint":
"""Computes the finger print for a given molecule
using the OpenEye toolkit.
Parameters
----------
smiles
The smiles pattern to generate a finger print for.
finger_print_type
The type of finger print to generate.
Returns
-------
The generated finger print.
"""
from openeye.oegraphsim import (
OEFingerPrint,
OEFPType_MACCS166,
OEFPType_Tree,
OEMakeFP,
)
from openff.toolkit.topology import Molecule
oe_molecule = Molecule.from_smiles(smiles).to_openeye()
if finger_print_type == FingerPrintType.Tree:
oe_finger_print_type = OEFPType_Tree
elif finger_print_type == FingerPrintType.MACCS166:
oe_finger_print_type = OEFPType_MACCS166
else:
raise NotImplementedError()
finger_print = OEFingerPrint()
OEMakeFP(finger_print, oe_molecule, oe_finger_print_type)
return finger_print
@classmethod
def _compute_mixture_finger_print(
cls, mixture: Tuple[str, ...], finger_print_type: FingerPrintType
) -> Tuple["OEFingerPrint", ...]:
"""Computes the finger print of a mixture of molecules as defined
by their smiles patterns.
Parameters
----------
mixture
The smiles patterns of the molecules in the mixture.
finger_print_type
The type of finger print to generate.
Returns
-------
tuple of OEFingerPrint
The finger print of each molecule in the mixture.
"""
mixture_finger_print = tuple(
cls._compute_molecule_finger_print(x, finger_print_type) for x in mixture
)
return mixture_finger_print
@classmethod
@functools.lru_cache(3000)
def _compute_distance(
cls,
mixture_a: Tuple[str, ...],
mixture_b: Tuple[str, ...],
finger_print_type: FingerPrintType,
):
"""Computes the 'distance' between two mixtures based on
their finger prints.
The distance is defined as the minimum of
- the OETanimoto distance between component a of mixture a and
component a of mixture b + the OETanimoto distance between
component b of mixture a and component b of mixture b
and
- the OETanimoto distance between component b of mixture a and
component a of mixture b + the OETanimoto distance between
component a of mixture a and component b of mixture b
Parameters
----------
mixture_a
The smiles patterns of the molecules in mixture a.
mixture_b
The smiles patterns of the molecules in mixture b.
finger_print_type
The type of finger print to base the distance metric
on.
Returns
-------
float
The distance between the mixtures
"""
from openeye.oegraphsim import OETanimoto
if sorted(mixture_a) == sorted(mixture_b):
return 0.0
finger_print_a = cls._compute_mixture_finger_print(mixture_a, finger_print_type)
finger_print_b = cls._compute_mixture_finger_print(mixture_b, finger_print_type)
if len(mixture_a) == 1 and len(mixture_b) == 1:
distance = 1.0 - OETanimoto(finger_print_a[0], finger_print_b[0])
elif len(mixture_a) == 2 and len(mixture_b) == 2:
distance = min(
(1.0 - OETanimoto(finger_print_a[0], finger_print_b[0]))
+ (1.0 - OETanimoto(finger_print_a[1], finger_print_b[1])),
(1.0 - OETanimoto(finger_print_a[1], finger_print_b[0]))
+ (1.0 - OETanimoto(finger_print_a[0], finger_print_b[1])),
)
else:
raise NotImplementedError()
return distance
@classmethod
def _compute_distance_with_set(
cls,
mixture,
mixtures: List[Tuple[str, ...]],
finger_print_type: FingerPrintType,
) -> float:
"""Computes the distances between a given mixture and a set of other mixtures.
This is computed as the sum of `_compute_distance(mixture, mixtures[i])`
for all i in `mixtures`.
Parameters
----------
mixture
The mixture to compute the distances from.
mixtures
The set of mixtures to compare with `mixture`.
finger_print_type: OEFPTypeBase
The type of finger print to base the distance metric
on.
Returns
-------
The calculated distance.
"""
distance = sum(
cls._compute_distance(mixture, x, finger_print_type) for x in mixtures
)
return distance
@classmethod
def _select_substances(
cls,
data_frame: pandas.DataFrame,
n_substances: int,
previously_chosen: List[Tuple[str, ...]],
finger_print_type: FingerPrintType,
) -> List[Tuple[str, ...]]:
# Store the substances which can be selected, and those which
# have already been selected.
open_list = [*data_frame_to_substances(data_frame)]
closed_list = []
# Determine the maximum number of substances which can be selected.
max_n_possible = min(len(open_list), n_substances)
while len(open_list) > 0 and len(closed_list) < max_n_possible:
def distance_metric(mixture):
return cls._compute_distance_with_set(
mixture, [*previously_chosen, *closed_list], finger_print_type
)
least_similar = sorted(open_list, key=distance_metric, reverse=True)[0]
open_list.remove(least_similar)
closed_list.append(least_similar)
return closed_list
@classmethod
def _apply_to_data_frame(cls, data_frame, schema, n_processes):
"""Applies the selection algorithm to a specified data frame.
Parameters
----------
data_frame
The data frame to apply the algorithm to.
schema
This component schema.
n_processes
The number of processes available to the component.
"""
selected_substances = []
min_n_components = data_frame["N Components"].min()
max_n_components = data_frame["N Components"].max()
# Perform the selection one for each size of substance (e.g. once
# for pure, once for binary etc.)
for n_components in range(min_n_components, max_n_components + 1):
component_data = data_frame[data_frame["N Components"] == n_components]
if len(component_data) == 0:
continue
# Define all permutations of the target environments.
if n_components == 1:
chemical_environments = [(x,) for x in schema.target_environments]
elif n_components == 2:
chemical_environments = [
*[(x, x) for x in schema.target_environments],
*itertools.combinations(schema.target_environments, r=2),
]
else:
raise NotImplementedError()
# Keep a track of the selected substances
selected_n_substances: Set[Tuple[str, ...]] = set()
for chemical_environment in chemical_environments:
# Filter out any environments not currently being considered.
environment_filter = FilterByEnvironmentsSchema(
per_component_environments={
n_components: [[x] for x in chemical_environment]
}
)
environment_data = FilterByEnvironments.apply(
component_data, environment_filter, n_processes
)
if len(environment_data) == 0:
continue
# Define the substances which the newly selected substance
# should be unique to.
substances_to_penalize = {
*selected_n_substances,
*[
x
for x in schema.substances_to_exclude
if len(x) == n_components
],
}
environment_selected_substances = cls._select_substances(
environment_data,
schema.n_per_environment,
[*substances_to_penalize],
schema.finger_print_type,
)
selected_n_substances.update(environment_selected_substances)
# Remove the newly selected substances from the pool to select from.
component_data = FilterBySubstances.apply(
component_data,
FilterBySubstancesSchema(
substances_to_exclude=environment_selected_substances
),
n_processes=n_processes,
)
selected_substances.extend(selected_n_substances)
# Filter the data frame to retain only the selected substances.
data_frame = FilterBySubstances.apply(
data_frame,
FilterBySubstancesSchema(substances_to_include=selected_substances),
n_processes=n_processes,
)
return data_frame
@classmethod
def _apply(
cls, data_frame: pandas.DataFrame, schema: SelectSubstancesSchema, n_processes
) -> pandas.DataFrame:
# Make sure OpenEye is available for computing the finger prints.
cls._check_oe_available()
# Filter out any substances which should be excluded
data_frame = FilterBySubstances.apply(
data_frame,
FilterBySubstancesSchema(
substances_to_exclude=schema.substances_to_exclude
),
n_processes=n_processes,
)
max_n_components = data_frame["N Components"].max()
if max_n_components > 2:
raise NotImplementedError()
if schema.per_property:
# Partition the data frame into ones which only contain a
# single property type.
property_headers = [
header for header in data_frame if header.find(" Value ") >= 0
]
data_frames_to_filter = [
data_frame[data_frame[header].notna()] for header in property_headers
]
else:
data_frames_to_filter = [data_frame]
filtered_data_frames = [
cls._apply_to_data_frame(filtered_data_frame, schema, n_processes)
for filtered_data_frame in data_frames_to_filter
]
if len(filtered_data_frames) == 1:
filtered_data_frame = filtered_data_frames[0]
else:
filtered_data_frame = pandas.concat(
filtered_data_frames, ignore_index=True, sort=False
)
return filtered_data_frame
class SelectDataPointsSchema(CurationComponentSchema):
type: Literal["SelectDataPoints"] = "SelectDataPoints"
target_states: List[TargetState] = Field(
...,
description="A list of the target states for which we would ideally include "
"data points for (e.g. density data points measured at ambient conditions, or "
"for density AND enthalpy of mixing measurements made for systems with a "
"roughly 50:50 composition).",
)
class SelectDataPoints(CurationComponent):
"""A component for selecting a set of data points which are
measured as close as possible to a particular set of states.
The points will be chosen so as to try and maximise the number of
properties measured at the same condition (e.g. ideally we would
have a data point for each property at T=298.15 and p=1atm) as this
will maximise the chances that we can extract all properties from a
single simulation.
"""
@classmethod
def _property_header(cls, data_frame, property_type):
for column in data_frame:
if column.find(f"{property_type} Value") < 0:
continue
return column
return None
@classmethod
def _distances_to_state(cls, data_frame: pandas.DataFrame, state_point: State):
distance_sqr = (
data_frame["Temperature (K)"] - state_point.temperature
) ** 2 + (
data_frame["Pressure (kPa)"] / 10.0 - state_point.pressure / 10.0
) ** 2
for component_index in range(len(state_point.mole_fractions)):
distance_sqr += (
data_frame[f"Mole Fraction {component_index + 1}"]
- state_point.mole_fractions[component_index]
) ** 2
return distance_sqr
@classmethod
def _distances_to_cluster(
cls, data_frame: pandas.DataFrame, target_state: TargetState
):
distances_sqr = pandas.DataFrame()
for index, state_point in enumerate(target_state.states):
distances_sqr[index] = cls._distances_to_state(data_frame, state_point)
return distances_sqr
@classmethod
def _select_substance_data_points(cls, original_data_frame, target_state):
n_components = target_state.property_types[0][1]
data_frame = original_data_frame[
original_data_frame["N Components"] == n_components
].copy()
data_frame["Property Type"] = ""
property_types = [x[0] for x in target_state.property_types]
for property_type in property_types:
property_header = cls._property_header(data_frame, property_type)
if not property_header:
continue
data_frame.loc[
data_frame[property_header].notna(), "Property Type"
] = property_type
data_frame["Temperature (K)"] = data_frame["Temperature (K)"].round(2)
data_frame["Pressure (kPa)"] = data_frame["Pressure (kPa)"].round(1)
for index in range(n_components):
data_frame[f"Mole Fraction {index + 1}"] = data_frame[
f"Mole Fraction {index + 1}"
].round(3)
# Compute the distance to each cluster
distances = cls._distances_to_cluster(data_frame, target_state)
data_frame["Cluster"] = distances.idxmin(axis=1)
cluster_headers = [
"Temperature (K)",
"Pressure (kPa)",
*[f"Mole Fraction {index + 1}" for index in range(n_components)],
]
# Compute how may data points are present for each state in the different
# clusters.
grouped_data = data_frame.groupby(
by=[*cluster_headers, "Cluster"],
as_index=False,
).agg({"Property Type": pandas.Series.nunique})
selected_data = [False] * len(data_frame)
for cluster_index in range(len(target_state.states)):
# Calculate the distance between each clustered state and
# the center of the cluster (i.e the clustered state).
cluster_data = grouped_data[grouped_data["Cluster"] == cluster_index]
cluster_data["Distance"] = cls._distances_to_state(
cluster_data, target_state.states[cluster_index]
)
if len(cluster_data) == 0:
continue
open_list = [x[0] for x in target_state.property_types]
while len(open_list) > 0 and len(cluster_data) > 0:
# Find the clustered state which is closest to the center of
# the cluster. Points measured at this state will becomes the
# candidates to be selected.
sorted_cluster_data = cluster_data.sort_values(
by=["Property Type", "Distance"], ascending=[False, True]
)
closest_index = sorted_cluster_data.index[0]
# Find the data points which were measured at the clustered state.
select_data = data_frame["Property Type"].isin(open_list)
for cluster_header in cluster_headers:
select_data = select_data & numpy.isclose(
data_frame[cluster_header],
sorted_cluster_data.loc[closest_index, cluster_header],
)
selected_property_types = data_frame[select_data][
"Property Type"
].unique()
# Make sure to select a single data point for each type of property.
for selected_property_type in selected_property_types:
selected_property_data = original_data_frame[
select_data
& (data_frame["Property Type"] == selected_property_type)
]
if len(selected_property_data) <= 1:
continue
# Multiple data points were measured for this property type
# at the clustered state. We sort these multiple data points
# by their distance to the target state and select the closest.
# This is not guaranteed to be optimal but should be an ok
# approximation in most cases.
selected_data_distances = cls._distances_to_state(
selected_property_data, target_state.states[cluster_index]
)
sorted_data_distances = selected_data_distances.sort_values(
ascending=True
)
select_data[sorted_data_distances.index] = False
select_data[sorted_data_distances.index[0]] = True
selected_data = selected_data | select_data
for property_type in data_frame[select_data]["Property Type"].unique():
open_list.remove(property_type)
cluster_data = cluster_data.drop(closest_index)
if len(selected_data) == 0:
return pandas.DataFrame()
return original_data_frame[selected_data]
@classmethod
def _apply(
cls, data_frame: pandas.DataFrame, schema: SelectDataPointsSchema, n_processes
) -> pandas.DataFrame:
max_n_substances = data_frame["N Components"].max()
component_headers = [f"Component {i + 1}" for i in range(max_n_substances)]
# Re-order the data frame so that the components are alphabetically sorted.
# This will make it easier to find unique substances.
ordered_data_frame = reorder_data_frame(data_frame)
# Find all of the unique substances in the data frame.
unique_substances = ordered_data_frame[component_headers].drop_duplicates()
selected_data = []
# Start to choose the state points for each unique substance.
for _, unique_substance in unique_substances.iterrows():
substance_data_frame = ordered_data_frame
for index, component in enumerate(unique_substance[component_headers]):
if pandas.isnull(component):
substance_data_frame = substance_data_frame[
substance_data_frame[component_headers[index]].isna()
]
else:
substance_data_frame = substance_data_frame[
substance_data_frame[component_headers[index]] == component
]
for target_state in schema.target_states:
substance_selected_data = cls._select_substance_data_points(
substance_data_frame, target_state
)
if len(substance_selected_data) == 0:
continue
selected_data.append(substance_selected_data)
selected_data = pandas.concat(selected_data, ignore_index=True, sort=False)
return selected_data
SelectionComponentSchema = Union[SelectSubstancesSchema, SelectDataPointsSchema]
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_utils/test_execeptions.py | <gh_stars>10-100
"""
Units tests for openff.evaluator.utils.exceptions
"""
from openff.evaluator.utils import exceptions
def test_estimator_exceptions():
"""Test estimator, json based exceptions."""
estimator_exception = exceptions.EvaluatorException(message="dummy_message")
exception_state = estimator_exception.__getstate__()
assert len(exception_state) == 1
assert "message" in exception_state
recreated_exception = exceptions.EvaluatorException()
recreated_exception.__setstate__(exception_state)
assert estimator_exception.message == recreated_exception.message
|
jaketanderson/openff-evaluator | openff/evaluator/storage/attributes.py | <reponame>jaketanderson/openff-evaluator
"""
A collection of descriptors used to add extra metadata
to storage class attributes.
"""
from openff.evaluator.attributes import UNDEFINED, Attribute
class FilePath(str):
"""Represents a string file path."""
pass
class StorageAttribute(Attribute):
"""A descriptor used to mark attributes of a class as those
which store information about a cached piece of data.
"""
def __init__(
self,
docstring,
type_hint,
optional=False,
):
super().__init__(docstring, type_hint, UNDEFINED, optional)
def _set_value(self, instance, value):
# Handle the special case of turning strings
# into file path objects for convenience.
if (
isinstance(value, str)
and isinstance(self.type_hint, type)
and issubclass(self.type_hint, FilePath)
):
# This is necessary as the json library currently doesn't
# support custom serialization of IntFlag or IntEnum.
value = FilePath(value)
super(StorageAttribute, self)._set_value(instance, value)
class QueryAttribute(Attribute):
"""A descriptor used to add additional metadata to
attributes of a storage query.
"""
def __init__(self, docstring, type_hint, optional=False, custom_match=False):
"""Initializes self.
Parameters
----------
custom_match: bool
Whether a custom behaviour will be implemented when
matching this attribute against the matching data object
attribute.
"""
super().__init__(docstring, type_hint, UNDEFINED, optional)
self.custom_match = custom_match
|
jaketanderson/openff-evaluator | openff/evaluator/utils/openmm.py | <reponame>jaketanderson/openff-evaluator<filename>openff/evaluator/utils/openmm.py
"""
A set of utilities for helping to perform simulations using openmm.
"""
import copy
import logging
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy
from openff.evaluator.attributes.attributes import UndefinedAttribute
try:
import openmm
from openmm import app
from openmm import unit as _openmm_unit
except ImportError:
from simtk import openmm
from simtk.openmm import app
from simtk.openmm import unit as _openmm_unit
from openff.evaluator.forcefield import ParameterGradientKey
if TYPE_CHECKING:
from openff.toolkit.topology import Topology
from openff.toolkit.typing.engines.smirnoff import ForceField
logger = logging.getLogger(__name__)
def setup_platform_with_resources(compute_resources, high_precision=False):
"""Creates an OpenMM `Platform` object which requests a set
amount of compute resources (e.g with a certain number of cpus).
Parameters
----------
compute_resources: ComputeResources
The compute resources which describe which platform is most
appropriate.
high_precision: bool
If true, a platform with the highest possible precision (double
for CUDA and OpenCL, Reference for CPU only) will be returned.
Returns
-------
Platform
The created platform
"""
try:
from openmm import Platform
except ImportError:
from simtk.openmm import Platform
# Setup the requested platform:
if compute_resources.number_of_gpus > 0:
# TODO: Make sure use mixing precision - CUDA, OpenCL.
# TODO: Deterministic forces = True
from openff.evaluator.backends import ComputeResources
toolkit_enum = ComputeResources.GPUToolkit(
compute_resources.preferred_gpu_toolkit
)
# A platform which runs on GPUs has been requested.
platform_name = (
"CUDA"
if toolkit_enum == ComputeResources.GPUToolkit.CUDA
else ComputeResources.GPUToolkit.OpenCL
)
# noinspection PyCallByClass,PyTypeChecker
platform = Platform.getPlatformByName(platform_name)
if compute_resources.gpu_device_indices is not None:
property_platform_name = platform_name
if toolkit_enum == ComputeResources.GPUToolkit.CUDA:
property_platform_name = platform_name.lower().capitalize()
platform.setPropertyDefaultValue(
property_platform_name + "DeviceIndex",
compute_resources.gpu_device_indices,
)
if high_precision:
platform.setPropertyDefaultValue("Precision", "double")
logger.info(
"Setting up an openmm platform on GPU {}".format(
compute_resources.gpu_device_indices or 0
)
)
else:
if not high_precision:
# noinspection PyCallByClass,PyTypeChecker
platform = Platform.getPlatformByName("CPU")
platform.setPropertyDefaultValue(
"Threads", str(compute_resources.number_of_threads)
)
else:
# noinspection PyCallByClass,PyTypeChecker
platform = Platform.getPlatformByName("Reference")
logger.info(
"Setting up a simulation with {} threads".format(
compute_resources.number_of_threads
)
)
return platform
def disable_pbc(system):
"""Disables any periodic boundary conditions being applied
to non-bonded forces by setting the non-bonded method to
`NoCutoff = 0`
Parameters
----------
system: openmm.system
The system which should have periodic boundary conditions
disabled.
"""
for force_index in range(system.getNumForces()):
force = system.getForce(force_index)
if not isinstance(force, (openmm.NonbondedForce, openmm.CustomNonbondedForce)):
continue
force.setNonbondedMethod(
0
) # NoCutoff = 0, NonbondedMethod.CutoffNonPeriodic = 1
def system_subset(
parameter_key: ParameterGradientKey,
force_field: "ForceField",
topology: "Topology",
scale_amount: Optional[float] = None,
) -> Tuple["openmm.System", "_openmm_unit.Quantity"]:
"""Produces an OpenMM system containing the minimum number of forces while
still containing a specified force field parameter, and those other parameters
which may interact with it (e.g. in the case of vdW parameters).
The value of the parameter of interest may optionally be perturbed by an amount
specified by ``scale_amount``.
Parameters
----------
parameter_key
The parameter of interest.
force_field
The force field to create the system from (and optionally perturb).
topology
The topology of the system to apply the force field to.
scale_amount: float, optional
The optional amount to perturb the ``parameter`` by such that
``parameter = (1.0 + scale_amount) * parameter``.
Returns
-------
The created system as well as the value of the specified ``parameter``.
"""
# As this method deals mainly with the toolkit, we stick to
# openmm units here.
from openff.toolkit.typing.engines.smirnoff import ForceField
# Create the force field subset.
force_field_subset = ForceField()
handlers_to_register = {parameter_key.tag}
if parameter_key.tag in {"ChargeIncrementModel", "LibraryCharges"}:
# Make sure to retain all of the electrostatic handlers when dealing with
# charges as the applied charges will depend on which charges have been applied
# by previous handlers.
handlers_to_register.update(
{"Electrostatics", "ChargeIncrementModel", "LibraryCharges"}
)
registered_handlers = force_field.registered_parameter_handlers
for handler_to_register in handlers_to_register:
if handler_to_register not in registered_handlers:
continue
force_field_subset.register_parameter_handler(
copy.deepcopy(force_field.get_parameter_handler(handler_to_register))
)
handler = force_field_subset.get_parameter_handler(parameter_key.tag)
if handler._OPENMMTYPE == openmm.CustomNonbondedForce:
vdw_handler = force_field_subset.get_parameter_handler("vdW")
# we need a generic blank parameter to work around this toolkit issue
# <https://github.com/openforcefield/openff-toolkit/issues/1102>
vdw_handler.add_parameter(
parameter_kwargs={
"smirks": "[*:1]",
"epsilon": 0.0 * _openmm_unit.kilocalories_per_mole,
"sigma": 1.0 * _openmm_unit.angstrom,
}
)
parameter = (
handler
if parameter_key.smirks is None
else handler.parameters[parameter_key.smirks]
)
parameter_value = getattr(parameter, parameter_key.attribute)
is_quantity = isinstance(parameter_value, _openmm_unit.Quantity)
if not is_quantity:
parameter_value = parameter_value * _openmm_unit.dimensionless
# Optionally perturb the parameter of interest.
if scale_amount is not None:
if numpy.isclose(parameter_value.value_in_unit(parameter_value.unit), 0.0):
# Careful thought needs to be given to this. Consider cases such as
# epsilon or sigma where negative values are not allowed.
parameter_value = (
scale_amount if scale_amount > 0.0 else 0.0
) * parameter_value.unit
else:
parameter_value *= 1.0 + scale_amount
if not isinstance(parameter_value, _openmm_unit.Quantity):
# Handle the case where OMM down-converts a dimensionless quantity to a float.
parameter_value = parameter_value * _openmm_unit.dimensionless
setattr(
parameter,
parameter_key.attribute,
parameter_value
if is_quantity
else parameter_value.value_in_unit(_openmm_unit.dimensionless),
)
# Create the parameterized sub-system.
system = force_field_subset.create_openmm_system(topology)
return system, parameter_value
def update_context_with_positions(
context: openmm.Context,
positions: _openmm_unit.Quantity,
box_vectors: Optional[_openmm_unit.Quantity],
):
"""Set a collection of positions and box vectors on an OpenMM context and compute
any extra positions such as v-site positions.
Parameters
----------
context
The OpenMM context to set the positions on.
positions
A unit wrapped numpy array with shape=(n_atoms, 3) that contains the positions
to set.
box_vectors
An optional unit wrapped numpy array with shape=(3, 3) that contains the box
vectors to set.
"""
system = context.getSystem()
n_vsites = sum(
1 for i in range(system.getNumParticles()) if system.isVirtualSite(i)
)
n_atoms = system.getNumParticles() - n_vsites
if len(positions) != n_atoms and len(positions) != (n_atoms + n_vsites):
raise ValueError(
"The length of the positions array does not match either the "
"the number of atoms or the number of atoms + v-sites."
)
if n_vsites > 0 and len(positions) != (n_atoms + n_vsites):
new_positions = numpy.zeros((system.getNumParticles(), 3))
i = 0
for j in range(system.getNumParticles()):
if not system.isVirtualSite(j):
# take an old position and update the index
new_positions[j] = positions[i].value_in_unit(_openmm_unit.nanometers)
i += 1
positions = new_positions * _openmm_unit.nanometers
if box_vectors is not None:
context.setPeriodicBoxVectors(*box_vectors)
context.setPositions(positions)
if n_vsites > 0:
context.computeVirtualSites()
def update_context_with_pdb(
context: openmm.Context,
pdb_file: app.PDBFile,
):
"""Extracts the positions and box vectors from a PDB file object and set these
on an OpenMM context and compute any extra positions such as v-site positions.
Parameters
----------
context
The OpenMM context to set the positions on.
pdb_file
The PDB file object to extract the positions and box vectors from.
"""
positions = pdb_file.getPositions(asNumpy=True)
box_vectors = pdb_file.topology.getPeriodicBoxVectors()
if box_vectors is None:
box_vectors = context.getSystem().getDefaultPeriodicBoxVectors()
update_context_with_positions(context, positions, box_vectors)
def extract_atom_indices(system: openmm.System) -> List[int]:
"""Returns the indices of atoms in a system excluding any virtual sites."""
return [i for i in range(system.getNumParticles()) if not system.isVirtualSite(i)]
def extract_positions(
state: openmm.State,
particle_indices: Optional[List[int]] = None,
) -> _openmm_unit.Quantity:
"""Extracts the positions from an OpenMM context, optionally excluding any v-site
positions which should be uniquely defined by the atomic positions.
"""
positions = state.getPositions(asNumpy=True)
if particle_indices is not None:
positions = positions[particle_indices]
return positions
def openmm_quantity_to_pint(openmm_quantity):
"""Converts a `openmm.unit.Quantity` to a `openff.evaluator.unit.Quantity`.
Parameters
----------
openmm_quantity: openmm.unit.Quantity
The quantity to convert.
Returns
-------
openff.evaluator.unit.Quantity
The converted quantity.
"""
from openff.units.openmm import from_openmm
if openmm_quantity is None or isinstance(openmm_quantity, UndefinedAttribute):
return None
return from_openmm(openmm_quantity)
def pint_quantity_to_openmm(pint_quantity):
"""Converts a `openff.evaluator.unit.Quantity` to a `openmm.unit.Quantity`.
Notes
-----
Not all pint units are available in OpenMM.
Parameters
----------
pint_quantity: openff.evaluator.unit.Quantity
The quantity to convert.
Returns
-------
openmm.unit.Quantity
The converted quantity.
"""
from openff.units.openmm import to_openmm
if pint_quantity is None or isinstance(pint_quantity, UndefinedAttribute):
return None
return to_openmm(pint_quantity)
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_utils/test_graph.py | """
Units tests for openff.evaluator.utils.graph
"""
import pytest
from openff.evaluator.utils import graph
def test_transitive_reduction():
"""Test transitive graph reduction utility."""
dummy_graph1 = {"A": ["B", "C"], "B": ["C"], "C": []}
graph.apply_transitive_reduction(dummy_graph1)
assert (
len(dummy_graph1["A"]) == 1
and len(dummy_graph1["B"]) == 1
and len(dummy_graph1["C"]) == 0
and dummy_graph1["A"][0] == "B"
and dummy_graph1["B"][0] == "C"
)
dummy_graph2 = {"A": ["B", "C"], "B": ["D"], "C": ["D"], "D": []}
graph.apply_transitive_reduction(dummy_graph2)
assert (
len(dummy_graph2["A"]) == 2
and len(dummy_graph2["B"]) == 1
and len(dummy_graph2["C"]) == 1
and len(dummy_graph2["D"]) == 0
and dummy_graph2["A"][0] == "B"
and dummy_graph2["A"][1] == "C"
and dummy_graph2["B"][0] == "D"
and dummy_graph2["C"][0] == "D"
)
def test_find_roots():
"""Test find graph roots utility."""
dummy_graph1 = {"A": ["C"], "B": ["D"], "C": ["E"], "D": ["E"], "E": []}
root_nodes = graph.find_root_nodes(dummy_graph1)
assert len(root_nodes) == 2
assert "A" in root_nodes and "B" in root_nodes
def test_topological_sort():
"""Test topological sort graph utility."""
dummy_graph1 = {"A": ["B"], "B": ["C"], "C": ["D"], "D": ["E"], "E": []}
sorted_order = graph.topological_sort(dummy_graph1)
assert (
sorted_order[0] == "A"
and sorted_order[1] == "B"
and sorted_order[2] == "C"
and sorted_order[3] == "D"
and sorted_order[4] == "E"
)
dummy_graph2 = {
"A": ["B", "C"],
"B": ["D"],
"C": ["D"],
"D": [],
}
sorted_order = graph.topological_sort(dummy_graph2)
has_order_1 = (
sorted_order[0] == "A"
and sorted_order[1] == "B"
and sorted_order[2] == "C"
and sorted_order[3] == "D"
)
has_order_2 = (
sorted_order[0] == "A"
and sorted_order[1] == "C"
and sorted_order[2] == "B"
and sorted_order[3] == "D"
)
assert len(sorted_order) == len(dummy_graph2)
assert has_order_1 or has_order_2
def test_is_acyclic():
"""Test graph utility cycle detection."""
dummy_graph1 = {}
assert graph.is_acyclic(dummy_graph1)
dummy_graph2 = {"A": ["B"], "B": ["C"], "C": ["A"]}
assert not graph.is_acyclic(dummy_graph2)
dummy_graph3 = {"A": ["B"], "B": ["C"], "C": []}
assert graph.is_acyclic(dummy_graph3)
dummy_graph4 = {"A": ["B"], "B": ["C"], "C": ["B"]}
assert not graph.is_acyclic(dummy_graph4)
def test_dependants_to_dependencies():
"""Test inverting a dependants graph."""
dummy_graph1 = {
"A": ["B"],
"B": ["C"],
"C": [],
}
dependencies = graph.dependants_to_dependencies(dummy_graph1)
assert (
len(dependencies["A"]) == 0
and len(dependencies["B"]) == 1
and dependencies["B"][0] == "A"
and len(dependencies["C"]) == 1
and dependencies["C"][0] == "B"
)
def test_uuid_utils():
"""Test appending a uuid to a protocol"""
dummy_uuid = "99ca09d3-3ddb-475e-b82c-22b0c12c0e25"
dummy_protocol_id = "protocol_id"
appended_id = graph.append_uuid(dummy_protocol_id, dummy_uuid)
assert appended_id == "99ca09d3-3ddb-475e-b82c-22b0c12c0e25|protocol_id"
assert graph.retrieve_uuid(appended_id) == dummy_uuid
dummy_protocol_id_2 = "d2209b46-cd33-4122-a88d-764862c71a6e|protocol_id"
appended_id_2 = graph.append_uuid(dummy_protocol_id_2, dummy_uuid)
assert appended_id_2 == "99ca09d3-3ddb-475e-b82c-22b0c12c0e25|protocol_id"
assert graph.retrieve_uuid(appended_id_2) == dummy_uuid
invalid_protocol_id = "|".join(["a", "b", "c"])
with pytest.raises(ValueError):
graph.append_uuid(invalid_protocol_id, dummy_uuid)
|
jaketanderson/openff-evaluator | openff/evaluator/server/__init__.py | from .server import Batch, EvaluatorServer
__all__ = [Batch, EvaluatorServer]
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_attributes/test_attributes.py | <filename>openff/evaluator/tests/test_attributes/test_attributes.py
"""
Units tests for openff.evaluator.attributes
"""
import json
import pytest
from openff.evaluator.attributes import UNDEFINED, Attribute, AttributeClass
from openff.evaluator.utils.serialization import TypedJSONDecoder, TypedJSONEncoder
from openff.evaluator.workflow.attributes import InputAttribute, OutputAttribute
class AttributeObject(AttributeClass):
required_input = InputAttribute("", str, UNDEFINED, optional=False)
optional_input = InputAttribute("", int, UNDEFINED, optional=True)
some_output = OutputAttribute("", int)
def __init__(self):
self.some_output = 5
class NestedAttributeObject(AttributeClass):
some_value = Attribute("", AttributeObject)
some_list = Attribute("", list, UNDEFINED, optional=True)
some_dict = Attribute("", dict, UNDEFINED, optional=True)
def test_undefined_singleton():
"""A test of the UNDEFINED singleton pattern"""
from openff.evaluator.attributes.attributes import UndefinedAttribute
value_a = UndefinedAttribute()
value_b = UndefinedAttribute()
assert value_a == value_b
def test_undefined_serialization():
"""A test of serializing the UNDEFINED placeholder"""
value_a = UNDEFINED
value_a_json = json.dumps(value_a, cls=TypedJSONEncoder)
value_a_recreated = json.loads(value_a_json, cls=TypedJSONDecoder)
assert value_a == value_a_recreated
def test_get_attributes():
input_attributes = AttributeObject.get_attributes(InputAttribute)
assert input_attributes == ["required_input", "optional_input"]
output_attributes = AttributeObject.get_attributes(OutputAttribute)
assert output_attributes == ["some_output"]
all_attributes = AttributeObject.get_attributes()
assert all_attributes == ["required_input", "optional_input", "some_output"]
def test_type_check():
some_object = AttributeObject()
with pytest.raises(ValueError):
some_object.required_input = 5
def test_state_methods():
some_object = AttributeObject()
some_object.required_input = "Set"
state = some_object.__getstate__()
assert len(state) == 2
new_object = AttributeObject()
new_object.required_input = ""
new_object.optional_input = 10
new_object.__setstate__(state)
assert new_object.required_input == some_object.required_input
assert new_object.optional_input == some_object.optional_input
assert new_object.some_output == some_object.some_output
def test_nested_validation():
nested_object = NestedAttributeObject()
nested_object.some_value = AttributeObject()
# Should fail
with pytest.raises(ValueError):
nested_object.validate()
nested_object.some_value.required_input = ""
nested_object.validate()
nested_object.some_list = [AttributeObject()]
# Should fail
with pytest.raises(ValueError):
nested_object.validate()
nested_object.some_list[0].required_input = ""
nested_object.validate()
nested_object.some_dict = {"x": AttributeObject()}
# Should fail
with pytest.raises(ValueError):
nested_object.validate()
nested_object.some_dict["x"].required_input = ""
nested_object.validate()
|
jaketanderson/openff-evaluator | openff/evaluator/backends/dask.py | """
A collection of openff-evaluator compute backends which use dask as the distribution engine.
"""
import abc
import importlib
import logging
import multiprocessing
import os
import platform
import shutil
import traceback
import dask
from dask import distributed
from distributed import get_worker
from openff import evaluator
from openff.evaluator.backends import (
CalculationBackend,
ComputeResources,
QueueWorkerResources,
)
from openff.evaluator.utils.utils import timestamp_formatter
logger = logging.getLogger(__name__)
class _Multiprocessor:
"""A temporary utility class which runs a given
function in a separate process.
"""
@staticmethod
def _wrapper(func, queue, args, kwargs):
"""A wrapper around the function to run in a separate
process which sets up logging and handle any extra
module loading.
Parameters
----------
func: function
The function to run in this process.
queue: Queue
The queue used to pass the results back
to the parent process.
args: tuple
The args to pass to the function
kwargs: dict
The kwargs to pass to the function
"""
try:
from openff.evaluator.workflow.plugins import registered_workflow_protocols
# Each spun up worker doesn't automatically import
# all of the modules which were imported in the main
# launch script, and as such custom plugins will no
# longer be registered. We re-import / register them
# here.
if "registered_workflow_protocols" in kwargs:
protocols_to_import = kwargs.pop("registered_workflow_protocols")
for protocol_class in protocols_to_import:
module_name = ".".join(protocol_class.split(".")[:-1])
class_name = protocol_class.split(".")[-1]
imported_module = importlib.import_module(module_name)
registered_workflow_protocols[class_name] = getattr(
imported_module, class_name
)
if "logger_path" in kwargs:
formatter = timestamp_formatter()
logger_path = kwargs.pop("logger_path")
worker_logger = logging.getLogger()
if not len(worker_logger.handlers):
logger_handler = logging.FileHandler(logger_path)
logger_handler.setFormatter(formatter)
worker_logger.setLevel(logging.INFO)
worker_logger.addHandler(logger_handler)
if (
not os.path.exists(logger_path)
or os.stat(logger_path).st_size == 0
):
worker_logger.info("=========================================")
worker_logger.info(f"HOSTNAME: {platform.node()}")
if os.environ.get("PBS_JOBID") is not None:
worker_logger.info(
f"PBSJOBID: {os.environ.get('PBS_JOBID')}"
)
elif os.environ.get("LSB_JOBID") is not None:
worker_logger.info(
f"LSBJOBID: {os.environ.get('LSB_JOBID')}"
)
elif os.environ.get("SLURM_JOB_ID") is not None:
worker_logger.info(
f"SLURMJOBID: {os.environ.get('SLURM_JOBID')}"
)
worker_logger.info(f"PLATFORM: {platform.platform()}")
worker_logger.info("-----------------------------------------")
worker_logger.info(
"PYTHON VERSION: "
f"{platform.python_version()} - "
f"{platform.python_implementation()}"
)
worker_logger.info("=========================================")
return_value = func(*args, **kwargs)
queue.put(return_value)
except Exception as e:
queue.put((e, e.__traceback__))
@staticmethod
def run(function, *args, **kwargs):
"""Runs a functions in its own process.
Parameters
----------
function: function
The function to run.
args: Any
The arguments to pass to the function.
kwargs: Any
The key word arguments to pass to the function.
Returns
-------
Any
The result of the function
"""
# An unpleasant way to ensure that codecov works correctly
# when testing on GHA.
if hasattr(evaluator, "_called_from_test"):
return function(*args, **kwargs)
# queue = multiprocessing.Queue()
manager = multiprocessing.Manager()
queue = manager.Queue()
target_args = [function, queue, args, kwargs]
process = multiprocessing.Process(
target=_Multiprocessor._wrapper, args=target_args
)
process.start()
return_value = queue.get()
process.join()
if (
isinstance(return_value, tuple)
and len(return_value) > 0
and isinstance(return_value[0], Exception)
):
formatted_exception = traceback.format_exception(
None, return_value[0], return_value[1]
)
logger.info(f"{formatted_exception} {return_value[0]} {return_value[1]}")
raise return_value[0]
return return_value
class BaseDaskBackend(CalculationBackend, abc.ABC):
"""A base `dask` backend class, which implements functionality
which is common to all other `dask` based backends.
"""
def __init__(self, number_of_workers=1, resources_per_worker=ComputeResources()):
"""Constructs a new BaseDaskBackend object."""
super().__init__(number_of_workers, resources_per_worker)
self._cluster = None
self._client = None
def start(self):
super(BaseDaskBackend, self).start()
self._client = distributed.Client(self._cluster)
def stop(self):
if self._client is not None:
self._client.close()
if self._cluster is not None:
self._cluster.close()
if os.path.isdir("dask-worker-space"):
shutil.rmtree("dask-worker-space")
@staticmethod
def _wrapped_function(function, *args, **kwargs):
"""A function which is wrapped around any function submitted via
`submit_task`, which adds extra meta data to the args and kwargs
(such as the compute resources available to the function) and may
perform extra validation before the function is passed to `dask`.
Parameters
----------
function: function
The function which will be executed by dask.
args: Any
The list of args to pass to the function.
kwargs: Any
The list of kwargs to pass to the function.
Returns
-------
Any
Returns the output of the function without modification, unless
an uncaught exception is raised in which case a EvaluatorException
is returned.
"""
raise NotImplementedError()
class BaseDaskJobQueueBackend(BaseDaskBackend):
"""An openff-evaluator backend which uses a `dask_jobqueue.JobQueueCluster`
object to run calculations within an existing HPC queuing system.
See Also
--------
dask_jobqueue.JobQueueCluster
"""
def __init__(
self,
minimum_number_of_workers=1,
maximum_number_of_workers=1,
resources_per_worker=QueueWorkerResources(),
queue_name="default",
setup_script_commands=None,
extra_script_options=None,
adaptive_interval="10000ms",
disable_nanny_process=False,
cluster_type=None,
adaptive_class=None,
):
"""Constructs a new BaseDaskJobQueueBackend object
Parameters
----------
minimum_number_of_workers: int
The minimum number of workers to request from the queue system.
maximum_number_of_workers: int
The maximum number of workers to request from the queue system.
resources_per_worker: QueueWorkerResources
The resources to request per worker.
queue_name: str
The name of the queue which the workers will be requested
from.
setup_script_commands: list of str
A list of bash script commands to call within the queue submission
script before the call to launch the dask worker.
This may include activating a python environment, or loading
an environment module
extra_script_options: list of str
A list of extra job specific options to include in the queue
submission script. These will get added to the script header in the form
#BSUB <extra_script_options[x]>
adaptive_interval: str
The interval between attempting to either scale up or down
the cluster, of of the from 'XXXms'.
disable_nanny_process: bool
If true, dask workers will be started in `--no-nanny` mode. This
is required if using multiprocessing code within submitted tasks.
This has not been fully tested yet and my lead to stability issues
with the workers.
adaptive_class: class of type `distributed.deploy.AdaptiveCore`, optional
An optional class to pass to dask to use for its adaptive
scaling handling. This is mainly exposed to allow easily working around
certain dask bugs / quirks.
"""
super().__init__(minimum_number_of_workers, resources_per_worker)
assert isinstance(resources_per_worker, QueueWorkerResources)
assert minimum_number_of_workers <= maximum_number_of_workers
assert cluster_type is not None
if resources_per_worker.number_of_gpus > 0:
if (
resources_per_worker.preferred_gpu_toolkit
== ComputeResources.GPUToolkit.OpenCL
):
raise ValueError("The OpenCL gpu backend is not currently supported.")
if resources_per_worker.number_of_gpus > 1:
raise ValueError("Only one GPU per worker is currently supported.")
# For now we need to set this to some high number to ensure
# jobs restarting because of workers being killed (due to
# wall-clock time limits mainly) do not get terminated. This
# should mostly be safe as we most wrap genuinely thrown
# exceptions up as EvaluatorExceptions and return these
# gracefully (such that the task won't be marked as failed by
# dask).
dask.config.set({"distributed.scheduler.allowed-failures": 500})
self._minimum_number_of_workers = minimum_number_of_workers
self._maximum_number_of_workers = maximum_number_of_workers
self._queue_name = queue_name
self._setup_script_commands = setup_script_commands
self._extra_script_options = extra_script_options
self._adaptive_interval = adaptive_interval
self._adaptive_class = adaptive_class
self._disable_nanny_process = disable_nanny_process
self._cluster_type = cluster_type
def _get_env_extra(self):
"""Returns a list of extra commands to run before
the dask worker is started.
Returns
-------
list of str
The extra commands to run.
"""
env_extra = dask.config.get(
f"jobqueue.{self._cluster_type}.env-extra", default=[]
)
if self._setup_script_commands is not None:
env_extra.extend(self._setup_script_commands)
return env_extra
def _get_job_extra(self):
"""Returns a list of extra options to add to the
worker job script header lines.
Returns
-------
list of str
The extra header options to add.
"""
job_extra = dask.config.get(
f"jobqueue.{self._cluster_type}.job-extra", default=[]
)
if self._extra_script_options is not None:
job_extra.extend(self._extra_script_options)
return job_extra
def _get_cluster_class(self):
"""Returns the type of `dask_jobqueue.JobQueueCluster` to
create.
Returns
-------
class
The class of cluster to create.
"""
raise NotImplementedError()
def _get_extra_cluster_kwargs(self):
"""Returns a dictionary of extra kwargs to pass to the cluster.
Returns
-------
dict of str and Any
The kwargs dictionary to pass.
"""
return {}
def job_script(self):
"""Returns the job script that dask will use to submit workers.
The backend must be started before calling this function.
Returns
-------
str
"""
if self._cluster is None:
raise ValueError(
"The cluster is not initialized. This is usually"
"caused by calling `job_script` before `start`."
)
return self._cluster.job_script()
def start(self):
requested_memory = self._resources_per_worker.per_thread_memory_limit
memory_string = f"{requested_memory.to(evaluator.unit.byte):~}".replace(" ", "")
job_extra = self._get_job_extra()
env_extra = self._get_env_extra()
extra = None if not self._disable_nanny_process else ["--no-nanny"]
cluster_class = self._get_cluster_class()
self._cluster = cluster_class(
queue=self._queue_name,
cores=self._resources_per_worker.number_of_threads,
memory=memory_string,
walltime=self._resources_per_worker.wallclock_time_limit,
job_extra=job_extra,
env_extra=env_extra,
extra=extra,
local_directory="dask-worker-space",
**self._get_extra_cluster_kwargs(),
)
# The very small target duration is an attempt to force dask to scale
# based on the number of processing tasks per worker.
extra_kwargs = {}
if self._adaptive_class is not None:
extra_kwargs["Adaptive"] = self._adaptive_class
self._cluster.adapt(
minimum=self._minimum_number_of_workers,
maximum=self._maximum_number_of_workers,
interval=self._adaptive_interval,
target_duration="0.00000000001s",
**extra_kwargs,
)
super(BaseDaskJobQueueBackend, self).start()
@staticmethod
def _wrapped_function(function, *args, **kwargs):
available_resources = kwargs["available_resources"]
per_worker_logging = kwargs.pop("per_worker_logging")
gpu_assignments = kwargs.pop("gpu_assignments")
# Set up the logging per worker if the flag is set to True.
if per_worker_logging:
# Each worker should have its own log file.
os.makedirs("worker-logs", exist_ok=True)
kwargs["logger_path"] = os.path.join(
"worker-logs", f"{get_worker().id}.log"
)
if available_resources.number_of_gpus > 0:
worker_id = distributed.get_worker().id
available_resources._gpu_device_indices = (
"0" if worker_id not in gpu_assignments else gpu_assignments[worker_id]
)
logger.info(
f"Launching a job with access to GPUs "
f"{available_resources._gpu_device_indices}"
)
return_value = _Multiprocessor.run(function, *args, **kwargs)
return return_value
# return function(*args, **kwargs)
def submit_task(self, function, *args, **kwargs):
from openff.evaluator.workflow.plugins import registered_workflow_protocols
key = kwargs.pop("key", None)
protocols_to_import = [
protocol_class.__module__ + "." + protocol_class.__qualname__
for protocol_class in registered_workflow_protocols.values()
]
return self._client.submit(
BaseDaskJobQueueBackend._wrapped_function,
function,
*args,
**kwargs,
available_resources=self._resources_per_worker,
registered_workflow_protocols=protocols_to_import,
gpu_assignments={},
per_worker_logging=True,
key=key,
)
class DaskLSFBackend(BaseDaskJobQueueBackend):
"""An openff-evaluator backend which uses a `dask_jobqueue.LSFCluster`
object to run calculations within an existing LSF queue.
See Also
--------
dask_jobqueue.LSFCluster
DaskPBSBackend
"""
def __init__(
self,
minimum_number_of_workers=1,
maximum_number_of_workers=1,
resources_per_worker=QueueWorkerResources(),
queue_name="default",
setup_script_commands=None,
extra_script_options=None,
adaptive_interval="10000ms",
disable_nanny_process=False,
adaptive_class=None,
):
"""Constructs a new DaskLSFBackend object
Examples
--------
To create an LSF queueing compute backend which will attempt to spin up
workers which have access to a single GPU.
>>> # Create a resource object which will request a worker with
>>> # one gpu which will stay alive for five hours.
>>> from openff.evaluator.backends import QueueWorkerResources
>>>
>>> resources = QueueWorkerResources(number_of_threads=1,
>>> number_of_gpus=1,
>>> preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA,
>>> wallclock_time_limit='05:00')
>>>
>>> # Define the set of commands which will set up the correct environment
>>> # for each of the workers.
>>> setup_script_commands = [
>>> 'module load cuda/9.2',
>>> ]
>>>
>>> # Define extra options to only run on certain node groups
>>> extra_script_options = [
>>> '-m "ls-gpu lt-gpu"'
>>> ]
>>>
>>>
>>> # Create the backend which will adaptively try to spin up between one and
>>> # ten workers with the requested resources depending on the calculation load.
>>> from openff.evaluator.backends.dask import DaskLSFBackend
>>>
>>> lsf_backend = DaskLSFBackend(minimum_number_of_workers=1,
>>> maximum_number_of_workers=10,
>>> resources_per_worker=resources,
>>> queue_name='gpuqueue',
>>> setup_script_commands=setup_script_commands,
>>> extra_script_options=extra_script_options)
"""
super().__init__(
minimum_number_of_workers,
maximum_number_of_workers,
resources_per_worker,
queue_name,
setup_script_commands,
extra_script_options,
adaptive_interval,
disable_nanny_process,
cluster_type="lsf",
adaptive_class=adaptive_class,
)
def _get_job_extra(self):
job_extra = super(DaskLSFBackend, self)._get_job_extra()
if self._resources_per_worker.number_of_gpus > 0:
job_extra.append(
"-gpu num={}:j_exclusive=yes:mode=shared:mps=no:".format(
self._resources_per_worker.number_of_gpus
)
)
return job_extra
def _get_extra_cluster_kwargs(self):
requested_memory = self._resources_per_worker.per_thread_memory_limit
memory_bytes = requested_memory.to(evaluator.unit.byte).magnitude
extra_kwargs = super(DaskLSFBackend, self)._get_extra_cluster_kwargs()
extra_kwargs.update({"mem": memory_bytes})
return extra_kwargs
def _get_cluster_class(self):
from dask_jobqueue import LSFCluster
return LSFCluster
class DaskPBSBackend(BaseDaskJobQueueBackend):
"""An openff-evaluator backend which uses a `dask_jobqueue.PBSCluster`
object to run calculations within an existing PBS queue.
See Also
--------
dask_jobqueue.LSFCluster
DaskLSFBackend
"""
def __init__(
self,
minimum_number_of_workers=1,
maximum_number_of_workers=1,
resources_per_worker=QueueWorkerResources(),
queue_name="default",
setup_script_commands=None,
extra_script_options=None,
adaptive_interval="10000ms",
disable_nanny_process=False,
resource_line=None,
adaptive_class=None,
):
"""Constructs a new DaskLSFBackend object
Parameters
----------
resource_line: str
The string to pass to the `#PBS -l` line.
Examples
--------
To create a PBS queueing compute backend which will attempt to spin up
workers which have access to a single GPU.
>>> # Create a resource object which will request a worker with
>>> # one gpu which will stay alive for five hours.
>>> from openff.evaluator.backends import QueueWorkerResources
>>>
>>> resources = QueueWorkerResources(number_of_threads=1,
>>> number_of_gpus=1,
>>> preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA,
>>> wallclock_time_limit='05:00')
>>>
>>> # Define the set of commands which will set up the correct environment
>>> # for each of the workers.
>>> setup_script_commands = [
>>> 'module load cuda/9.2',
>>> ]
>>>
>>> # Create the backend which will adaptively try to spin up between one and
>>> # ten workers with the requested resources depending on the calculation load.
>>> from openff.evaluator.backends.dask import DaskPBSBackend
>>>
>>> pbs_backend = DaskPBSBackend(minimum_number_of_workers=1,
>>> maximum_number_of_workers=10,
>>> resources_per_worker=resources,
>>> queue_name='gpuqueue',
>>> setup_script_commands=setup_script_commands)
"""
super().__init__(
minimum_number_of_workers,
maximum_number_of_workers,
resources_per_worker,
queue_name,
setup_script_commands,
extra_script_options,
adaptive_interval,
disable_nanny_process,
cluster_type="pbs",
adaptive_class=adaptive_class,
)
self._resource_line = resource_line
def _get_extra_cluster_kwargs(self):
extra_kwargs = super(DaskPBSBackend, self)._get_extra_cluster_kwargs()
extra_kwargs.update({"resource_spec": self._resource_line})
return extra_kwargs
def _get_cluster_class(self):
from dask_jobqueue import PBSCluster
return PBSCluster
class DaskSLURMBackend(BaseDaskJobQueueBackend):
"""An openff-evaluator backend which uses a `dask_jobqueue.SLURMCluster`
object to run calculations within an existing SLURM queue.
See Also
--------
dask_jobqueue.SLURMCluster
DaskSLURMBackend
"""
def __init__(
self,
minimum_number_of_workers=1,
maximum_number_of_workers=1,
resources_per_worker=QueueWorkerResources(),
queue_name="default",
setup_script_commands=None,
extra_script_options=None,
adaptive_interval="10000ms",
disable_nanny_process=False,
adaptive_class=None,
):
"""
Examples
--------
To create a SLURM queueing compute backend which will attempt to spin up
workers which have access to a single GPU.
>>> # Create a resource object which will request a worker with
>>> # one gpu which will stay alive for five hours.
>>> from openff.evaluator.backends import QueueWorkerResources
>>>
>>> resources = QueueWorkerResources(number_of_threads=1,
>>> number_of_gpus=1,
>>> preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA,
>>> wallclock_time_limit='05:00')
>>>
>>> # Define the set of commands which will set up the correct environment
>>> # for each of the workers.
>>> setup_script_commands = [
>>> 'module load cuda/9.2',
>>> ]
>>>
>>> # Create the backend which will adaptively try to spin up between one and
>>> # ten workers with the requested resources depending on the calculation load.
>>> from openff.evaluator.backends.dask import DaskSLURMBackend
>>>
>>> pbs_backend = DaskPBSBackend(minimum_number_of_workers=1,
>>> maximum_number_of_workers=10,
>>> resources_per_worker=resources,
>>> queue_name='gpuqueue',
>>> setup_script_commands=setup_script_commands)
"""
super().__init__(
minimum_number_of_workers,
maximum_number_of_workers,
resources_per_worker,
queue_name,
setup_script_commands,
extra_script_options,
adaptive_interval,
disable_nanny_process,
cluster_type="slurm",
adaptive_class=adaptive_class,
)
def _get_cluster_class(self):
from dask_jobqueue import SLURMCluster
return SLURMCluster
class DaskLocalCluster(BaseDaskBackend):
"""An openff-evaluator backend which uses a `dask` `LocalCluster`
object to run calculations on a single machine.
See Also
--------
dask.LocalCluster
"""
def __init__(self, number_of_workers=1, resources_per_worker=ComputeResources()):
"""Constructs a new DaskLocalCluster"""
super().__init__(number_of_workers, resources_per_worker)
self._gpu_device_indices_by_worker = {}
maximum_threads = multiprocessing.cpu_count()
requested_threads = number_of_workers * resources_per_worker.number_of_threads
if requested_threads > maximum_threads:
raise ValueError(
"The total number of requested threads ({})is greater than is available on the"
"machine ({})".format(requested_threads, maximum_threads)
)
if resources_per_worker.number_of_gpus > 0:
if (
resources_per_worker.preferred_gpu_toolkit
== ComputeResources.GPUToolkit.OpenCL
):
raise ValueError("The OpenCL gpu backend is not currently supported.")
if resources_per_worker.number_of_gpus > 1:
raise ValueError("Only one GPU per worker is currently supported.")
visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES")
if visible_devices is None:
raise ValueError("The CUDA_VISIBLE_DEVICES variable is empty.")
gpu_device_indices = visible_devices.split(",")
if len(gpu_device_indices) != number_of_workers:
raise ValueError(
"The number of available GPUs {} must match "
"the number of requested workers {}."
)
def start(self):
self._cluster = distributed.LocalCluster(
self._number_of_workers, 1, processes=False
)
if self._resources_per_worker.number_of_gpus > 0:
if isinstance(self._cluster.workers, dict):
for index, worker in self._cluster.workers.items():
self._gpu_device_indices_by_worker[worker.id] = str(index)
else:
for index, worker in enumerate(self._cluster.workers):
self._gpu_device_indices_by_worker[worker.id] = str(index)
super(DaskLocalCluster, self).start()
@staticmethod
def _wrapped_function(function, *args, **kwargs):
available_resources = kwargs["available_resources"]
gpu_assignments = kwargs.pop("gpu_assignments")
if available_resources.number_of_gpus > 0:
worker_id = distributed.get_worker().id
available_resources._gpu_device_indices = gpu_assignments[worker_id]
logger.info(
"Launching a job with access to GPUs {}".format(
gpu_assignments[worker_id]
)
)
return_value = _Multiprocessor.run(function, *args, **kwargs)
return return_value
# return function(*args, **kwargs)
def submit_task(self, function, *args, **kwargs):
key = kwargs.pop("key", None)
return self._client.submit(
DaskLocalCluster._wrapped_function,
function,
*args,
**kwargs,
key=key,
available_resources=self._resources_per_worker,
gpu_assignments=self._gpu_device_indices_by_worker,
)
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_substances.py | <reponame>jaketanderson/openff-evaluator<filename>openff/evaluator/tests/test_substances.py
"""
Units tests for openff.evaluator.substances
"""
import numpy as np
import pytest
from openff.evaluator.substances import Component, ExactAmount, MoleFraction, Substance
@pytest.mark.parametrize(
"smiles,expected",
[
("C1=CC=CC=C1", "c1ccccc1"),
("c1ccccc1", "c1ccccc1"),
("[C@H](F)(Cl)Br", "F[C@@H](Cl)Br"),
("C(F)(Cl)Br", "FC(Cl)Br"),
],
)
def test_component_standardization(smiles, expected):
component = Component(smiles=smiles)
assert component.smiles == expected
def test_add_mole_fractions():
substance = Substance()
substance.add_component(Component("C"), MoleFraction(0.5))
substance.add_component(Component("C"), MoleFraction(0.5))
assert substance.number_of_components == 1
amounts = substance.get_amounts(substance.components[0])
assert len(amounts) == 1
amount = next(iter(amounts))
assert isinstance(amount, MoleFraction)
assert np.isclose(amount.value, 1.0)
def test_multiple_amounts():
substance = Substance()
sodium = Component("[Na+]")
chloride = Component("[Cl-]")
substance.add_component(sodium, MoleFraction(0.75))
substance.add_component(sodium, ExactAmount(1))
substance.add_component(chloride, MoleFraction(0.25))
substance.add_component(chloride, ExactAmount(1))
assert substance.number_of_components == 2
sodium_amounts = substance.get_amounts(sodium)
chlorine_amounts = substance.get_amounts(chloride)
assert len(sodium_amounts) == 2
assert len(chlorine_amounts) == 2
molecule_counts = substance.get_molecules_per_component(6)
assert len(molecule_counts) == 2
assert molecule_counts[sodium.identifier] == 4
assert molecule_counts[chloride.identifier] == 2
def test_truncate_n_molecules():
substance = Substance()
substance.add_component(
component=Component(smiles="[Na+]"),
amount=MoleFraction(0.00267),
)
substance.add_component(
component=Component(smiles="[Cl-]"),
amount=MoleFraction(0.00267),
)
substance.add_component(
component=Component(smiles="O"), amount=MoleFraction(1.0 - 2.0 * 0.00267)
)
# Attempt to get the number of molecules without truncating.
with pytest.raises(ValueError):
substance.get_molecules_per_component(1000, truncate_n_molecules=False)
# Attempt to get the number of molecules with truncating.
molecule_counts = substance.get_molecules_per_component(
1000, truncate_n_molecules=True
)
assert molecule_counts == {"[Na+]{solv}": 3, "[Cl-]{solv}": 3, "O{solv}": 994}
def test_substance_len():
substance = Substance.from_components("C", "CC", "CCC", "CCC")
assert len(substance) == 3
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_utils/test_checkmol.py | import pytest
from openff.evaluator.utils.checkmol import (
ChemicalEnvironment,
analyse_functional_groups,
)
@pytest.mark.parametrize(
"smiles, expected_environment",
[
("O", ChemicalEnvironment.Aqueous),
("N", ChemicalEnvironment.Amine),
("C", ChemicalEnvironment.Alkane),
("CO", ChemicalEnvironment.Alcohol),
("C=O", ChemicalEnvironment.Aldehyde),
],
)
def test_analyse_functional_groups(smiles, expected_environment):
"""Performs a simple test of the analyse_functional_groups function."""
chemical_moieties = analyse_functional_groups(smiles)
assert expected_environment in chemical_moieties
def test_analyse_functional_groups_error():
"""Tests the the function returns None when an unknown
smiles pattern is passed."""
assert analyse_functional_groups("[Ar]") is None
|
jaketanderson/openff-evaluator | openff/evaluator/protocols/yank.py | <reponame>jaketanderson/openff-evaluator
"""
A collection of protocols for performing free energy calculations
using the YANK package.
"""
import abc
import logging
import os
import shutil
import threading
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import numpy as np
import yaml
from openff.units import unit
from openff.units.openmm import from_openmm, to_openmm
from openff.evaluator.attributes import UNDEFINED
from openff.evaluator.backends import ComputeResources
from openff.evaluator.forcefield import (
ParameterGradient,
ParameterGradientKey,
SmirnoffForceFieldSource,
)
from openff.evaluator.forcefield.system import ParameterizedSystem
from openff.evaluator.protocols.openmm import _compute_gradients
from openff.evaluator.substances import Component, Substance
from openff.evaluator.thermodynamics import ThermodynamicState
from openff.evaluator.utils import timeseries
from openff.evaluator.utils.observables import (
Observable,
ObservableArray,
ObservableFrame,
ObservableType,
)
from openff.evaluator.utils.openmm import disable_pbc, setup_platform_with_resources
from openff.evaluator.utils.timeseries import (
TimeSeriesStatistics,
get_uncorrelated_indices,
)
from openff.evaluator.utils.utils import temporarily_change_directory
from openff.evaluator.workflow import Protocol, workflow_protocol
from openff.evaluator.workflow.attributes import (
InequalityMergeBehaviour,
InputAttribute,
OutputAttribute,
)
if TYPE_CHECKING:
import mdtraj
from openff.toolkit.topology import Topology
from openff.toolkit.typing.engines.smirnoff.forcefield import ForceField
logger = logging.getLogger(__name__)
class BaseYankProtocol(Protocol, abc.ABC):
"""An abstract base class for protocols which will performs a set of
alchemical free energy simulations using the YANK framework.
"""
thermodynamic_state = InputAttribute(
docstring="The state at which to run the calculations.",
type_hint=ThermodynamicState,
default_value=UNDEFINED,
)
number_of_equilibration_iterations = InputAttribute(
docstring="The number of iterations used for equilibration before production "
"run. Only post-equilibration iterations are written to file.",
type_hint=int,
merge_behavior=InequalityMergeBehaviour.LargestValue,
default_value=1,
)
number_of_iterations = InputAttribute(
docstring="The number of YANK iterations to perform.",
type_hint=int,
merge_behavior=InequalityMergeBehaviour.LargestValue,
default_value=5000,
)
steps_per_iteration = InputAttribute(
docstring="The number of steps per YANK iteration to perform.",
type_hint=int,
merge_behavior=InequalityMergeBehaviour.LargestValue,
default_value=500,
)
checkpoint_interval = InputAttribute(
docstring="The number of iterations between saving YANK checkpoint files.",
type_hint=int,
merge_behavior=InequalityMergeBehaviour.SmallestValue,
default_value=1,
)
timestep = InputAttribute(
docstring="The length of the timestep to take.",
type_hint=unit.Quantity,
merge_behavior=InequalityMergeBehaviour.SmallestValue,
default_value=2 * unit.femtosecond,
)
verbose = InputAttribute(
docstring="Controls whether or not to run YANK at high verbosity.",
type_hint=bool,
default_value=False,
)
setup_only = InputAttribute(
docstring="If true, YANK will only create and validate the setup files, "
"but not actually run any simulations. This argument is mainly "
"only to be used for testing purposes.",
type_hint=bool,
default_value=False,
)
gradient_parameters = InputAttribute(
docstring="An optional list of parameters to differentiate the estimated "
"free energy with respect to.",
type_hint=list,
default_value=lambda: list(),
)
free_energy_difference = OutputAttribute(
docstring="The estimated free energy difference between the two phases of"
"interest.",
type_hint=Observable,
)
def __init__(self, protocol_id):
super(BaseYankProtocol, self).__init__(protocol_id)
self._analysed_output = None
@staticmethod
def _get_residue_names_from_role(substances, coordinate_path, role):
"""Returns a list of all of the residue names of
components which have been assigned a given role.
Parameters
----------
substances: list of Substance
The substances which contains the components.
coordinate_path: str
The path to the coordinates which describe the systems
topology.
role: Component.Role, optional
The role of the component to identify.
Returns
-------
set of str
The identified residue names.
"""
from openff.toolkit.topology import Molecule, Topology
try:
from openmm import app
except ImportError:
from simtk.openmm import app
if role is None:
return "all"
unique_molecules = [
Molecule.from_smiles(component.smiles)
for substance in substances
for component in substance.components
]
openmm_topology = app.PDBFile(coordinate_path).topology
topology = Topology.from_openmm(openmm_topology, unique_molecules)
# Determine the smiles of all molecules in the system. We need to use
# the toolkit to re-generate the smiles as later we will compare these
# against more toolkit generated smiles.
components = [
component
for substance in substances
for component in substance.components
if component.role == role
]
component_smiles = [
Molecule.from_smiles(component.smiles).to_smiles()
for component in components
]
residue_names = set()
all_openmm_atoms = list(openmm_topology.atoms())
# Find the resiude names of the molecules which have the correct
# role.
for topology_molecule in topology.topology_molecules:
molecule_smiles = topology_molecule.reference_molecule.to_smiles()
if molecule_smiles not in component_smiles:
continue
molecule_residue_names = set(
[
all_openmm_atoms[topology_atom.topology_atom_index].residue.name
for topology_atom in topology_molecule.atoms
]
)
assert len(molecule_residue_names) == 1
residue_names.update(molecule_residue_names)
return residue_names
@staticmethod
def _get_dsl_from_role(substances, coordinate_path, role):
"""Returns an MDTraj DSL string which identifies those
atoms which belong to components flagged with a specific
role.
Parameters
----------
substances: list of Substance
The substances which contains the components.
coordinate_path: str
The path to the coordinates which describe the systems
topology.
role: Component.Role, optional
The role of the component to identify.
Returns
-------
str
The DSL string.
"""
residue_names = BaseYankProtocol._get_residue_names_from_role(
substances, coordinate_path, role
)
dsl_string = " or ".join(
[f"resname {residue_name}" for residue_name in residue_names]
)
return dsl_string
def _get_options_dictionary(self, available_resources):
"""Returns a dictionary of options which will be serialized
to a yaml file and passed to YANK.
Parameters
----------
available_resources: ComputeResources
The resources available to execute on.
Returns
-------
dict of str and Any
A yaml compatible dictionary of YANK options.
"""
from openff.toolkit.utils import quantity_to_string
platform_name = "CPU"
if available_resources.number_of_gpus > 0:
# A platform which runs on GPUs has been requested.
from openff.evaluator.backends import ComputeResources
toolkit_enum = ComputeResources.GPUToolkit(
available_resources.preferred_gpu_toolkit
)
# A platform which runs on GPUs has been requested.
platform_name = (
"CUDA"
if toolkit_enum == ComputeResources.GPUToolkit.CUDA
else ComputeResources.GPUToolkit.OpenCL
)
return {
"verbose": self.verbose,
"output_dir": ".",
"temperature": quantity_to_string(
to_openmm(self.thermodynamic_state.temperature)
),
"pressure": quantity_to_string(
to_openmm(self.thermodynamic_state.pressure)
),
"minimize": False,
"number_of_equilibration_iterations": (
self.number_of_equilibration_iterations
),
"default_number_of_iterations": self.number_of_iterations,
"default_nsteps_per_iteration": self.steps_per_iteration,
"start_from_trailblaze_samples": False,
"checkpoint_interval": self.checkpoint_interval,
"default_timestep": quantity_to_string(to_openmm(self.timestep)),
"annihilate_electrostatics": True,
"annihilate_sterics": False,
"platform": platform_name,
}
@abc.abstractmethod
def _get_system_dictionary(self):
"""Returns a dictionary of the system which will be serialized
to a yaml file and passed to YANK. Only a single system may be
specified.
Returns
-------
dict of str and Any
A yaml compatible dictionary of YANK systems.
"""
raise NotImplementedError()
@abc.abstractmethod
def _get_protocol_dictionary(self):
"""Returns a dictionary of the protocol which will be serialized
to a yaml file and passed to YANK. Only a single protocol may be
specified.
Returns
-------
dict of str and Any
A yaml compatible dictionary of a YANK protocol.
"""
raise NotImplementedError()
def _get_experiments_dictionary(self):
"""Returns a dictionary of the experiments which will be serialized
to a yaml file and passed to YANK. Only a single experiment may be
specified.
Returns
-------
dict of str and Any
A yaml compatible dictionary of a YANK experiment.
"""
system_dictionary = self._get_system_dictionary()
system_key = next(iter(system_dictionary))
protocol_dictionary = self._get_protocol_dictionary()
protocol_key = next(iter(protocol_dictionary))
return {"system": system_key, "protocol": protocol_key}
def _get_full_input_dictionary(self, available_resources):
"""Returns a dictionary of the full YANK inputs which will be serialized
to a yaml file and passed to YANK
Parameters
----------
available_resources: ComputeResources
The resources available to execute on.
Returns
-------
dict of str and Any
A yaml compatible dictionary of a YANK input file.
"""
return {
"options": self._get_options_dictionary(available_resources),
"systems": self._get_system_dictionary(),
"protocols": self._get_protocol_dictionary(),
"experiments": self._get_experiments_dictionary(),
}
def _time_series_statistics(self, phase: str) -> TimeSeriesStatistics:
"""Returns the time series statistics (such as the equilibration time) for
a particular phase."""
equilibration = self._analysed_output["equilibration"][phase]
n_expected = self.number_of_iterations
uncorrelated_indices = get_uncorrelated_indices(
n_expected - equilibration["discarded_from_start"],
equilibration["subsample_rate"],
)
time_series_statistics = TimeSeriesStatistics(
n_total_points=n_expected,
n_uncorrelated_points=len(uncorrelated_indices),
statistical_inefficiency=equilibration["subsample_rate"],
equilibration_index=equilibration["discarded_from_start"],
)
return time_series_statistics
@staticmethod
def _extract_trajectory(
checkpoint_path: str,
output_trajectory_path: Optional[str],
statistics: TimeSeriesStatistics,
state_index: int = 0,
) -> "mdtraj.Trajectory":
"""Extracts the stored trajectory of the 'initial' state from a
yank `.nc` checkpoint file and stores it to disk as a `.dcd` file.
Parameters
----------
checkpoint_path
The path to the yank `.nc` file
output_trajectory_path
The path to optionally store the extracted trajectory at.
statistics
Statistics about the time series to use to decorrelate and remove
un-equilibrated samples.
"""
from yank.analyze import extract_trajectory
trajectory = extract_trajectory(
checkpoint_path, state_index=state_index, image_molecules=True
)
trajectory = trajectory[statistics.equilibration_index :]
uncorrelated_indices = timeseries.get_uncorrelated_indices(
statistics.n_total_points - statistics.equilibration_index,
statistics.statistical_inefficiency,
)
trajectory = trajectory[np.array(uncorrelated_indices)]
if output_trajectory_path is not None:
trajectory.save_dcd(output_trajectory_path)
return trajectory
@staticmethod
def _run_yank(directory, available_resources, setup_only) -> Dict[str, Any]:
"""Runs YANK within the specified directory which contains a `yank.yaml`
input file.
Parameters
----------
directory: str
The directory within which to run yank.
available_resources: ComputeResources
The compute resources available to yank.
setup_only: bool
If true, YANK will only create and validate the setup files,
but not actually run any simulations. This argument is mainly
only to be used for testing purposes.
Returns
-------
The analysed output of the yank calculation.
"""
from yank.analyze import ExperimentAnalyzer
from yank.experiment import ExperimentBuilder
with temporarily_change_directory(directory):
# Set the default properties on the desired platform
# before calling into yank.
setup_platform_with_resources(available_resources)
exp_builder = ExperimentBuilder("yank.yaml")
if setup_only is True:
try:
from openmm import unit as openmm_unit
except ImportError:
from simtk.openmm import unit as openmm_unit
return {
"free_energy": {
"free_energy_diff_unit": 0.0 * openmm_unit.kilojoules_per_mole,
"free_energy_diff_error_unit": 0.0
* openmm_unit.kilojoules_per_mole,
}
}
exp_builder.run_experiments()
analyzer = ExperimentAnalyzer("experiments")
analysed_output = analyzer.auto_analyze()
return analysed_output
@staticmethod
def _run_yank_as_process(queue, directory, available_resources, setup_only):
"""A wrapper around the `_run_yank` method which takes
a `multiprocessing.Queue` as input, thereby allowing it
to be launched from a separate process and still return
it's output back to the main process.
Parameters
----------
queue: multiprocessing.Queue
The queue object which will communicate with the
launched process.
directory: str
The directory within which to run yank.
available_resources: ComputeResources
The compute resources available to yank.
setup_only: bool
If true, YANK will only create and validate the setup files,
but not actually run any simulations. This argument is mainly
only to be used for testing purposes.
"""
analysed_output = None
exception = None
try:
analysed_output = BaseYankProtocol._run_yank(
directory, available_resources, setup_only
)
except Exception as e:
exception = e
queue.put((analysed_output, exception))
def _compute_state_energy_gradients(
self,
trajectory: "mdtraj.Trajectory",
topology: "Topology",
force_field: "ForceField",
enable_pbc: bool,
compute_resources: ComputeResources,
) -> List[ParameterGradient]:
"""Computes the value of <dU / d theta> for a specified trajectory and for
each force field parameter (theta) of interest.
Parameters
----------
trajectory
The trajectory of interest.
topology
The topology of the system.
force_field
The force field containing the parameters of interest.
enable_pbc
Whether periodic boundary conditions should be enabled when evaluating
the potential energies of each frame and their gradients.
compute_resources
The resources available when computing the gradients.
Returns
-------
The average gradient of the potential energy with respect to each force
field parameter of interest.
"""
# Mock an observable frame to store the gradients in
observables = ObservableFrame(
{
ObservableType.PotentialEnergy: ObservableArray(
value=np.zeros((len(trajectory), 1)) * unit.kilojoule / unit.mole
)
}
)
# Compute the gradient in the first solvent.
_compute_gradients(
self.gradient_parameters,
observables,
force_field,
self.thermodynamic_state,
topology,
trajectory,
compute_resources,
enable_pbc,
)
return [
ParameterGradient(key=gradient.key, value=gradient.value.mean().item())
for gradient in observables[ObservableType.PotentialEnergy].gradients
]
def _execute(self, directory, available_resources):
yaml_filename = os.path.join(directory, "yank.yaml")
# Create the yank yaml input file from a dictionary of options.
with open(yaml_filename, "w") as file:
yaml.dump(
self._get_full_input_dictionary(available_resources),
file,
sort_keys=False,
)
setup_only = self.setup_only
# Yank is not safe to be called from anything other than the main thread.
# If the current thread is not detected as the main one, then yank should
# be spun up in a new process which should itself be safe to run yank in.
if threading.current_thread() is threading.main_thread():
logger.info("Launching YANK in the main thread.")
analysed_output = self._run_yank(directory, available_resources, setup_only)
else:
from multiprocessing import Process, Queue
logger.info("Launching YANK in a new process.")
# Create a queue to pass the results back to the main process.
queue = Queue()
# Create the process within which yank will run.
# noinspection PyTypeChecker
process = Process(
target=BaseYankProtocol._run_yank_as_process,
args=[queue, directory, available_resources, setup_only],
)
# Start the process and gather back the output.
process.start()
analysed_output, exception = queue.get()
process.join()
if exception is not None:
raise exception
free_energy_difference = analysed_output["free_energy"]["free_energy_diff_unit"]
free_energy_difference_std = analysed_output["free_energy"][
"free_energy_diff_error_unit"
]
self._analysed_output = analysed_output
self.free_energy_difference = Observable(
value=from_openmm(free_energy_difference).plus_minus(
from_openmm(free_energy_difference_std)
)
)
def validate(self, attribute_type=None):
super(BaseYankProtocol, self).validate(attribute_type)
if self.checkpoint_interval != 1:
raise ValueError(
"The checkpoint interval must currently be set to one due to a bug in "
"how YANK extracts trajectories from checkpoint files."
)
@workflow_protocol()
class LigandReceptorYankProtocol(BaseYankProtocol):
"""A protocol for performing ligand-receptor alchemical free energy
calculations using the YANK framework.
"""
class RestraintType(Enum):
"""The types of ligand restraints available within yank."""
Harmonic = "Harmonic"
FlatBottom = "FlatBottom"
ligand_residue_name = InputAttribute(
docstring="The residue name of the ligand.",
type_hint=str,
default_value=UNDEFINED,
)
receptor_residue_name = InputAttribute(
docstring="The residue name of the receptor.",
type_hint=str,
default_value=UNDEFINED,
)
solvated_ligand_coordinates = InputAttribute(
docstring="The file path to the solvated ligand coordinates.",
type_hint=str,
default_value=UNDEFINED,
)
solvated_ligand_system = InputAttribute(
docstring="The parameterized solvated ligand system object.",
type_hint=ParameterizedSystem,
default_value=UNDEFINED,
)
solvated_complex_coordinates = InputAttribute(
docstring="The file path to the solvated complex coordinates.",
type_hint=str,
default_value=UNDEFINED,
)
solvated_complex_system = InputAttribute(
docstring="The parameterized solvated complex system object.",
type_hint=ParameterizedSystem,
default_value=UNDEFINED,
)
force_field_path = InputAttribute(
docstring="The path to the force field which defines the charge method "
"to use for the calculation.",
type_hint=str,
default_value=UNDEFINED,
)
apply_restraints = InputAttribute(
docstring="Determines whether the ligand should be explicitly restrained to the "
"receptor in order to stop the ligand from temporarily unbinding.",
type_hint=bool,
default_value=True,
)
restraint_type = InputAttribute(
docstring="The type of ligand restraint applied, provided that "
"`apply_restraints` is `True`",
type_hint=RestraintType,
default_value=RestraintType.Harmonic,
)
ligand_electrostatic_lambdas = InputAttribute(
docstring="The list of electrostatic alchemical states that YANK should sample "
"at when calculating the free energy of the solvated ligand. If no option is "
"set, YANK will use `trailblaze` algorithm to determine this option "
"automatically.",
type_hint=list,
optional=True,
default_value=UNDEFINED,
)
ligand_steric_lambdas = InputAttribute(
docstring="The list of steric alchemical states that YANK should sample "
"at when calculating the free energy of the solvated ligand. If no option is "
"set, YANK will use `trailblaze` algorithm to determine this option "
"automatically.",
type_hint=list,
optional=True,
default_value=UNDEFINED,
)
complex_electrostatic_lambdas = InputAttribute(
docstring="The list of electrostatic alchemical states that YANK should sample "
"at when calculating the free energy of the ligand in complex with the "
"receptor. If no option is set, YANK will use `trailblaze` algorithm to "
"determine this option automatically.",
type_hint=list,
optional=True,
default_value=UNDEFINED,
)
complex_steric_lambdas = InputAttribute(
docstring="The list of steric alchemical states that YANK should sample "
"at when calculating the free energy of the ligand in complex with the "
"receptor. If no option is set, YANK will use `trailblaze` algorithm to "
"determine this option automatically.",
type_hint=list,
optional=True,
default_value=UNDEFINED,
)
solvated_ligand_trajectory_path = OutputAttribute(
docstring="The file path to the generated ligand trajectory.", type_hint=str
)
solvated_complex_trajectory_path = OutputAttribute(
docstring="The file path to the generated ligand trajectory.", type_hint=str
)
def __init__(self, protocol_id):
"""Constructs a new LigandReceptorYankProtocol object."""
super().__init__(protocol_id)
self._local_ligand_coordinates = "ligand.pdb"
self._local_ligand_system = "ligand.xml"
self._local_complex_coordinates = "complex.pdb"
self._local_complex_system = "complex.xml"
def _get_solvent_dictionary(self):
"""Returns a dictionary of the solvent which will be serialized
to a yaml file and passed to YANK. In most cases, this should
just be passing force field settings over, such as PME settings.
Returns
-------
dict of str and Any
A yaml compatible dictionary of YANK solvents.
"""
with open(self.force_field_path, "r") as file:
force_field_source = SmirnoffForceFieldSource.parse_json(file.read())
force_field = force_field_source.to_force_field()
charge_method = force_field.get_parameter_handler("Electrostatics").method
if charge_method.lower() != "pme":
raise ValueError("Currently only PME electrostatics are supported.")
return {"default": {"nonbonded_method": charge_method}}
def _get_system_dictionary(self):
solvent_dictionary = self._get_solvent_dictionary()
solvent_key = next(iter(solvent_dictionary))
host_guest_dictionary = {
"phase1_path": [
self._local_complex_system,
self._local_complex_coordinates,
],
"phase2_path": [self._local_ligand_system, self._local_ligand_coordinates],
"ligand_dsl": f"resname {self.ligand_residue_name}",
"solvent": solvent_key,
}
return {"host-guest": host_guest_dictionary}
def _get_protocol_dictionary(self):
ligand_protocol_dictionary = {
"lambda_electrostatics": self.ligand_electrostatic_lambdas,
"lambda_sterics": self.ligand_steric_lambdas,
}
if (
self.ligand_electrostatic_lambdas == UNDEFINED
and self.ligand_steric_lambdas == UNDEFINED
):
ligand_protocol_dictionary = "auto"
elif (
self.ligand_electrostatic_lambdas != UNDEFINED
and self.ligand_steric_lambdas == UNDEFINED
) or (
self.ligand_electrostatic_lambdas == UNDEFINED
and self.ligand_steric_lambdas != UNDEFINED
):
raise ValueError(
"Either both of `ligand_electrostatic_lambdas` and "
"`ligand_steric_lambdas` must be set, or neither "
"must be set."
)
complex_protocol_dictionary = {
"lambda_electrostatics": self.complex_electrostatic_lambdas,
"lambda_sterics": self.complex_steric_lambdas,
}
if (
self.complex_electrostatic_lambdas == UNDEFINED
and self.complex_steric_lambdas == UNDEFINED
):
complex_protocol_dictionary = "auto"
elif (
self.complex_electrostatic_lambdas != UNDEFINED
and self.complex_steric_lambdas == UNDEFINED
) or (
self.complex_electrostatic_lambdas == UNDEFINED
and self.complex_steric_lambdas != UNDEFINED
):
raise ValueError(
"Either both of `complex_electrostatic_lambdas` and "
"`complex_steric_lambdas` must be set, or neither "
"must be set."
)
absolute_binding_dictionary = {
"complex": {"alchemical_path": complex_protocol_dictionary},
"solvent": {"alchemical_path": ligand_protocol_dictionary},
}
return {"absolute_binding_dictionary": absolute_binding_dictionary}
def _get_experiments_dictionary(self):
experiments_dictionary = super(
LigandReceptorYankProtocol, self
)._get_experiments_dictionary()
if self.apply_restraints:
ligand_dsl = f"(resname {self.ligand_residue_name}) and (mass > 1.5)"
receptor_dsl = f"(resname {self.receptor_residue_name}) and (mass > 1.5)"
experiments_dictionary["restraint"] = {
"restrained_ligand_atoms": ligand_dsl,
"restrained_receptor_atoms": receptor_dsl,
"type": self.restraint_type.value,
}
return experiments_dictionary
def _get_full_input_dictionary(self, available_resources):
full_dictionary = super(
LigandReceptorYankProtocol, self
)._get_full_input_dictionary(available_resources)
full_dictionary["solvents"] = self._get_solvent_dictionary()
return full_dictionary
def _execute(self, directory, available_resources):
# Because of quirks in where Yank looks files while doing temporary
# directory changes, we need to copy the coordinate files locally so
# they are correctly found.
shutil.copyfile(
self.solvated_ligand_coordinates,
os.path.join(directory, self._local_ligand_coordinates),
)
shutil.copyfile(
self.solvated_ligand_system.system_path,
os.path.join(directory, self._local_ligand_system),
)
shutil.copyfile(
self.solvated_complex_coordinates,
os.path.join(directory, self._local_complex_coordinates),
)
shutil.copyfile(
self.solvated_complex_system.system_path,
os.path.join(directory, self._local_complex_system),
)
super(LigandReceptorYankProtocol, self)._execute(directory, available_resources)
if self.setup_only:
return
ligand_yank_path = os.path.join(directory, "experiments", "solvent.nc")
complex_yank_path = os.path.join(directory, "experiments", "complex.nc")
self.solvated_ligand_trajectory_path = os.path.join(directory, "ligand.dcd")
self.solvated_complex_trajectory_path = os.path.join(directory, "complex.dcd")
self._extract_trajectory(
ligand_yank_path,
self.solvated_ligand_trajectory_path,
self._time_series_statistics("solvent"),
)
self._extract_trajectory(
complex_yank_path,
self.solvated_complex_trajectory_path,
self._time_series_statistics("complex"),
)
@workflow_protocol()
class SolvationYankProtocol(BaseYankProtocol):
"""A protocol for estimating the change in free energy upon transferring a solute
into a solvent (referred to as solvent 1) from a second solvent (referred to as
solvent 2) by performing an alchemical free energy calculation using the YANK
framework.
This protocol can be used for box solvation free energies (setting the `solvent_1`
input to the solvent of interest and setting `solvent_2` as an empty `Substance`) or
transfer free energies (setting both the `solvent_1` and `solvent_2` inputs to
different solvents).
"""
solute = InputAttribute(
docstring="The substance describing the composition of "
"the solute. This should include the solute "
"molecule as well as any counter ions.",
type_hint=Substance,
default_value=UNDEFINED,
)
solvent_1 = InputAttribute(
docstring="The substance describing the composition of the first solvent.",
type_hint=Substance,
default_value=UNDEFINED,
)
solvent_2 = InputAttribute(
docstring="The substance describing the composition of the second solvent.",
type_hint=Substance,
default_value=UNDEFINED,
)
solution_1_coordinates = InputAttribute(
docstring="The file path to the coordinates of the solute embedded in the "
"first solvent.",
type_hint=str,
default_value=UNDEFINED,
)
solution_1_system = InputAttribute(
docstring="The parameterized system object of the solute embedded in the "
"first solvent.",
type_hint=ParameterizedSystem,
default_value=UNDEFINED,
)
solution_2_coordinates = InputAttribute(
docstring="The file path to the coordinates of the solute embedded in the "
"second solvent.",
type_hint=str,
default_value=UNDEFINED,
)
solution_2_system = InputAttribute(
docstring="The parameterized system object of the solute embedded in the "
"second solvent.",
type_hint=ParameterizedSystem,
default_value=UNDEFINED,
)
electrostatic_lambdas_1 = InputAttribute(
docstring="The list of electrostatic alchemical states that YANK should sample "
"at. These values will be passed to the YANK `lambda_electrostatics` option. "
"If no option is set, YANK will use `trailblaze` algorithm to determine "
"this option automatically.",
type_hint=list,
optional=True,
default_value=UNDEFINED,
)
steric_lambdas_1 = InputAttribute(
docstring="The list of steric alchemical states that YANK should sample at. "
"These values will be passed to the YANK `lambda_sterics` option. "
"If no option is set, YANK will use `trailblaze` algorithm to determine "
"this option automatically.",
type_hint=list,
optional=True,
default_value=UNDEFINED,
)
electrostatic_lambdas_2 = InputAttribute(
docstring="The list of electrostatic alchemical states that YANK should sample "
"at. These values will be passed to the YANK `lambda_electrostatics` option. "
"If no option is set, YANK will use `trailblaze` algorithm to determine "
"this option automatically.",
type_hint=list,
optional=True,
default_value=UNDEFINED,
)
steric_lambdas_2 = InputAttribute(
docstring="The list of steric alchemical states that YANK should sample at. "
"These values will be passed to the YANK `lambda_sterics` option. "
"If no option is set, YANK will use `trailblaze` algorithm to determine "
"this option automatically.",
type_hint=list,
optional=True,
default_value=UNDEFINED,
)
solution_1_free_energy = OutputAttribute(
docstring="The free energy change of transforming the an ideal solute molecule "
"into a fully interacting molecule in the first solvent.",
type_hint=Observable,
)
solvent_1_coordinate_path = OutputAttribute(
docstring="The file path to the coordinates of only the first solvent.",
type_hint=str,
)
solvent_1_trajectory_path = OutputAttribute(
docstring="The file path to the trajectory containing only the first solvent.",
type_hint=str,
)
solution_1_trajectory_path = OutputAttribute(
docstring="The file path to the trajectory containing the solute in the first "
"solvent.",
type_hint=str,
)
solution_2_free_energy = OutputAttribute(
docstring="The free energy change of transforming the an ideal solute molecule "
"into a fully interacting molecule in the second solvent.",
type_hint=Observable,
)
solvent_2_coordinate_path = OutputAttribute(
docstring="The file path to the coordinates of only the second solvent.",
type_hint=str,
)
solvent_2_trajectory_path = OutputAttribute(
docstring="The file path to the trajectory containing only the second solvent.",
type_hint=str,
)
solution_2_trajectory_path = OutputAttribute(
docstring="The file path to the trajectory containing the solute in the second "
"solvent.",
type_hint=str,
)
free_energy_difference = OutputAttribute(
docstring="The estimated free energy difference between the solute in the"
"first solvent and the second solvent (i.e. ΔG = ΔG_1 - ΔG_2).",
type_hint=Observable,
)
def __init__(self, protocol_id):
super().__init__(protocol_id)
self._local_solution_1_coordinates = "solvent_1.pdb"
self._local_solution_1_system = "solvent_1.xml"
self._local_solution_2_coordinates = "solvent_2.pdb"
self._local_solution_2_system = "solvent_2.xml"
def _get_system_dictionary(self):
solvent_1_dsl = self._get_dsl_from_role(
[self.solute, self.solvent_1],
self.solution_1_coordinates,
Component.Role.Solvent,
)
solvent_2_dsl = self._get_dsl_from_role(
[self.solute, self.solvent_2],
self.solution_2_coordinates,
Component.Role.Solvent,
)
full_solvent_dsl_components = []
if len(solvent_1_dsl) > 0:
full_solvent_dsl_components.append(solvent_1_dsl)
if len(solvent_2_dsl) > 0:
full_solvent_dsl_components.append(solvent_2_dsl)
solvation_system_dictionary = {
"phase1_path": [
self._local_solution_1_system,
self._local_solution_1_coordinates,
],
"phase2_path": [
self._local_solution_2_system,
self._local_solution_2_coordinates,
],
"solvent_dsl": " or ".join(full_solvent_dsl_components),
}
return {"solvation-system": solvation_system_dictionary}
def _get_protocol_dictionary(self):
solvent_1_protocol_dictionary = {
"lambda_electrostatics": self.electrostatic_lambdas_1,
"lambda_sterics": self.steric_lambdas_1,
}
if (
self.electrostatic_lambdas_1 == UNDEFINED
and self.steric_lambdas_1 == UNDEFINED
):
solvent_1_protocol_dictionary = "auto"
elif (
self.electrostatic_lambdas_1 != UNDEFINED
and self.steric_lambdas_1 == UNDEFINED
) or (
self.electrostatic_lambdas_1 == UNDEFINED
and self.steric_lambdas_1 != UNDEFINED
):
raise ValueError(
"Either both of `electrostatic_lambdas_1` and "
"`steric_lambdas_1` must be set, or neither "
"must be set."
)
solvent_2_protocol_dictionary = {
"lambda_electrostatics": self.electrostatic_lambdas_2,
"lambda_sterics": self.steric_lambdas_2,
}
if (
self.electrostatic_lambdas_2 == UNDEFINED
and self.steric_lambdas_2 == UNDEFINED
):
solvent_2_protocol_dictionary = "auto"
elif (
self.electrostatic_lambdas_2 != UNDEFINED
and self.steric_lambdas_2 == UNDEFINED
) or (
self.electrostatic_lambdas_2 == UNDEFINED
and self.steric_lambdas_2 != UNDEFINED
):
raise ValueError(
"Either both of `electrostatic_lambdas_2` and "
"`steric_lambdas_2` must be set, or neither "
"must be set."
)
protocol_dictionary = {
"solvent1": {"alchemical_path": solvent_1_protocol_dictionary},
"solvent2": {"alchemical_path": solvent_2_protocol_dictionary},
}
return {"solvation-protocol": protocol_dictionary}
@classmethod
def _extract_solvent_trajectory(
cls,
checkpoint_path: str,
output_trajectory_path: Optional[str],
statistics: TimeSeriesStatistics,
state_index: int = 0,
) -> "mdtraj.Trajectory":
"""Extracts the stored trajectory of the from a yank `.nc` checkpoint file,
removes the solute, and stores it to disk as a `.dcd` file.
Parameters
----------
checkpoint_path
The path to the yank `.nc` file
output_trajectory_path
The path to optionally store the extracted trajectory at.
statistics
Statistics about the time series to use to decorrelate and remove
un-equilibrated samples.
state_index
The state index to extract.
"""
import openmmtools
trajectory = cls._extract_trajectory(
checkpoint_path, None, statistics, state_index
)
reporter = None
try:
reporter = openmmtools.multistate.MultiStateReporter(
checkpoint_path, open_mode="r"
)
solute_indices = reporter.analysis_particle_indices
finally:
if reporter is not None:
reporter.close()
solvent_indices = {*range(trajectory.n_atoms)} - set(solute_indices)
solvent_trajectory = trajectory.atom_slice([*solvent_indices])
if output_trajectory_path is not None:
solvent_trajectory.save_dcd(output_trajectory_path)
return solvent_trajectory
def _analyze_phase(
self,
checkpoint_path: str,
parameterized_system: ParameterizedSystem,
phase_name: str,
available_resources: ComputeResources,
) -> Tuple[
Observable,
"mdtraj.Trajectory",
"mdtraj.Trajectory",
Dict[ParameterGradientKey, ParameterGradient],
Dict[ParameterGradientKey, ParameterGradient],
]:
"""Analyzes a particular phase, extracting the relevant free energies
and computing the required free energies."""
from openff.toolkit.topology import Molecule, Topology
free_energies = self._analysed_output["free_energy"]
# Extract the free energy change.
free_energy = -Observable(
from_openmm(
(
free_energies[phase_name]["free_energy_diff"]
* free_energies[phase_name]["kT"]
)
).plus_minus(
from_openmm(
free_energies[phase_name]["free_energy_diff_error"]
* free_energies[phase_name]["kT"]
)
)
)
# Extract the statistical inefficiency of the data.
time_series_statistics = self._time_series_statistics(phase_name)
# Extract the solution and solvent trajectories.
solution_system = parameterized_system
solution_trajectory = self._extract_trajectory(
checkpoint_path, None, time_series_statistics
)
solvent_trajectory = self._extract_solvent_trajectory(
checkpoint_path,
None,
time_series_statistics,
self._analysed_output["general"][phase_name]["nstates"] - 1,
)
solvent_topology_omm = solvent_trajectory.topology.to_openmm()
solvent_topology = Topology.from_openmm(
solvent_topology_omm,
[
Molecule.from_smiles(component.smiles)
for component in solution_system.substance
],
)
# Optionally compute any gradients.
if len(self.gradient_parameters) == 0:
return free_energy, solution_trajectory, solvent_trajectory, {}, {}
force_field_source = solution_system.force_field
if not isinstance(force_field_source, SmirnoffForceFieldSource):
raise ValueError(
"Derivates can only be computed for systems parameterized with "
"SMIRNOFF force fields."
)
force_field = force_field_source.to_force_field()
solution_gradients = {
gradient.key: gradient
for gradient in self._compute_state_energy_gradients(
solution_trajectory,
solution_system.topology,
force_field,
solvent_topology.n_topology_atoms != 0,
available_resources,
)
}
solvent_gradients = {
gradient.key: gradient
for gradient in self._compute_state_energy_gradients(
solvent_trajectory,
solvent_topology,
force_field,
solvent_topology.n_topology_atoms != 0,
available_resources,
)
}
return (
free_energy,
solution_trajectory,
solvent_trajectory,
solution_gradients,
solvent_gradients,
)
def _execute(self, directory, available_resources):
try:
from openmm import XmlSerializer
except ImportError:
from simtk.openmm import XmlSerializer
solute_components = [
component
for component in self.solute.components
if component.role == Component.Role.Solute
]
solvent_1_components = [
component
for component in self.solvent_1.components
if component.role == Component.Role.Solvent
]
solvent_2_components = [
component
for component in self.solvent_2.components
if component.role == Component.Role.Solvent
]
if len(solute_components) != 1:
raise ValueError(
"There must only be a single component marked as a solute."
)
if len(solvent_1_components) == 0 and len(solvent_2_components) == 0:
raise ValueError("At least one of the solvents must not be vacuum.")
# Because of quirks in where Yank looks files while doing temporary
# directory changes, we need to copy the coordinate files locally so
# they are correctly found.
shutil.copyfile(
self.solution_1_coordinates,
os.path.join(directory, self._local_solution_1_coordinates),
)
shutil.copyfile(
self.solution_1_system.system_path,
os.path.join(directory, self._local_solution_1_system),
)
shutil.copyfile(
self.solution_2_coordinates,
os.path.join(directory, self._local_solution_2_coordinates),
)
shutil.copyfile(
self.solution_2_system.system_path,
os.path.join(directory, self._local_solution_2_system),
)
# Disable the pbc of the any solvents which should be treated
# as vacuum.
vacuum_system_path = None
if len(solvent_1_components) == 0:
vacuum_system_path = self._local_solution_1_system
elif len(solvent_2_components) == 0:
vacuum_system_path = self._local_solution_2_system
if vacuum_system_path is not None:
logger.info(
f"Disabling the periodic boundary conditions in {vacuum_system_path} "
f"by setting the cutoff type to NoCutoff"
)
with open(os.path.join(directory, vacuum_system_path), "r") as file:
vacuum_system = XmlSerializer.deserialize(file.read())
disable_pbc(vacuum_system)
with open(os.path.join(directory, vacuum_system_path), "w") as file:
file.write(XmlSerializer.serialize(vacuum_system))
# Set up the yank input file.
super(SolvationYankProtocol, self)._execute(directory, available_resources)
if self.setup_only:
return
(
self.solvent_1_free_energy,
solution_1_trajectory,
solvent_1_trajectory,
solution_1_gradients,
solvent_1_gradients,
) = self._analyze_phase(
os.path.join(directory, "experiments", "solvent1.nc"),
self.solution_1_system,
"solvent1",
available_resources,
)
self.solution_1_trajectory_path = os.path.join(directory, "solution_1.dcd")
solution_1_trajectory.save_dcd(self.solution_1_trajectory_path)
self.solvent_1_coordinate_path = os.path.join(directory, "solvent_1.pdb")
self.solvent_1_trajectory_path = os.path.join(directory, "solvent_1.dcd")
solvent_1_trajectory[0].save_pdb(self.solvent_1_coordinate_path)
if solvent_1_trajectory.n_atoms > 0:
solvent_1_trajectory.save_dcd(self.solvent_1_trajectory_path)
else:
with open(self.solvent_1_trajectory_path, "wb") as file:
file.write(b"")
(
self.solvent_2_free_energy,
solution_2_trajectory,
solvent_2_trajectory,
solution_2_gradients,
solvent_2_gradients,
) = self._analyze_phase(
os.path.join(directory, "experiments", "solvent2.nc"),
self.solution_2_system,
"solvent2",
available_resources,
)
self.solution_2_trajectory_path = os.path.join(directory, "solution_2.dcd")
solution_2_trajectory.save_dcd(self.solution_2_trajectory_path)
self.solvent_2_coordinate_path = os.path.join(directory, "solvent_2.pdb")
self.solvent_2_trajectory_path = os.path.join(directory, "solvent_2.dcd")
solvent_2_trajectory[0].save_pdb(self.solvent_2_coordinate_path)
if solvent_2_trajectory.n_atoms > 0:
solvent_2_trajectory.save_dcd(self.solvent_2_trajectory_path)
else:
with open(self.solvent_2_trajectory_path, "wb") as file:
file.write(b"")
self.free_energy_difference = Observable(
self.free_energy_difference.value.plus_minus(
self.free_energy_difference.error
),
gradients=[
solution_1_gradients[key]
- solvent_1_gradients[key]
+ solvent_2_gradients[key]
- solution_2_gradients[key]
for key in solvent_1_gradients
],
)
assert np.isclose(
self.free_energy_difference.value,
self.solvent_1_free_energy.value - self.solvent_2_free_energy.value,
)
|
jaketanderson/openff-evaluator | openff/evaluator/workflow/plugins.py | """
An API for registering new workflow protocols.
Attributes
----------
registered_workflow_protocols: dict of str and type of Protocol
The workflow protocols which have been registered as being
available to use in property estimations.
"""
registered_workflow_protocols = {}
def register_workflow_protocol(protocol_class):
"""Registers a class as being a protocol which may be included
in workflows.
"""
from openff.evaluator.workflow.protocols import Protocol
assert issubclass(protocol_class, Protocol)
if protocol_class.__name__ in registered_workflow_protocols:
raise ValueError(f"The {protocol_class} protocol is already registered.")
registered_workflow_protocols[protocol_class.__name__] = protocol_class
def workflow_protocol():
"""A decorator which registers a class as being a protocol
which may be included in workflows.
"""
def decorator(cls):
register_workflow_protocol(cls)
return cls
return decorator
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_datasets/test_curation/test_freesolv.py | <reponame>jaketanderson/openff-evaluator
import pandas
from openff.evaluator.datasets.curation.components.freesolv import (
ImportFreeSolv,
ImportFreeSolvSchema,
)
def test_import_free_solv_data():
"""Tests that the FreeSolv data set can be imported from a
remote source."""
free_solv_data_frame = ImportFreeSolv._download_free_solv()
data_frame = ImportFreeSolv.apply(pandas.DataFrame(), ImportFreeSolvSchema())
assert data_frame is not None and len(data_frame) == len(free_solv_data_frame)
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_backends/test_dask.py | <filename>openff/evaluator/tests/test_backends/test_dask.py
import pytest
from openff.evaluator.backends import QueueWorkerResources
from openff.evaluator.backends.dask import (
DaskLSFBackend,
DaskPBSBackend,
DaskSLURMBackend,
_Multiprocessor,
)
from openff.evaluator.workflow.plugins import registered_workflow_protocols
def dummy_function(*args, **kwargs):
assert len(args) == 1
return args[0]
def test_dask_job_script_creation():
"""Test creating and starting a new dask LSF backend."""
cpu_backend = DaskLSFBackend()
cpu_backend.start()
assert cpu_backend.job_script() is not None
cpu_backend.stop()
@pytest.mark.parametrize(
"cluster_class", [DaskLSFBackend, DaskPBSBackend, DaskSLURMBackend]
)
def test_dask_jobqueue_backend_creation(cluster_class):
"""Test creating and starting a new dask jobqueue backend."""
cpu_backend = cluster_class()
cpu_backend.start()
cpu_backend.stop()
gpu_resources = QueueWorkerResources(
1, 1, preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA
)
gpu_commands = [
"module load cuda/9.2",
]
gpu_backend = cluster_class(
resources_per_worker=gpu_resources,
queue_name="gpuqueue",
setup_script_commands=gpu_commands,
)
gpu_backend.start()
assert "module load cuda/9.2" in gpu_backend.job_script()
gpu_backend.stop()
@pytest.mark.skip(reason="This code currently hangs only on travis.")
def test_multiprocessor():
expected_output = 12345
return_value = _Multiprocessor.run(dummy_function, expected_output)
assert expected_output == return_value
@pytest.mark.skip(reason="This code currently hangs only on travis.")
def test_lsf_wrapped_function():
available_resources = QueueWorkerResources()
protocols_to_import = [
protocol_class.__module__ + "." + protocol_class.__qualname__
for protocol_class in registered_workflow_protocols.values()
]
per_worker_logging = True
gpu_assignments = None
expected_output = 12345
result = DaskLSFBackend._wrapped_function(
dummy_function,
expected_output,
available_resources=available_resources,
registered_workflow_protocols=protocols_to_import,
per_worker_logging=per_worker_logging,
gpu_assignments=gpu_assignments,
)
assert expected_output == result
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_client.py | """
Units tests for the openff.evaluator.client module.
"""
import tempfile
import pytest
from openff.evaluator.backends.dask import DaskLocalCluster
from openff.evaluator.client import EvaluatorClient, Request, RequestResult
from openff.evaluator.datasets import PhysicalPropertyDataSet
from openff.evaluator.forcefield import (
LigParGenForceFieldSource,
SmirnoffForceFieldSource,
TLeapForceFieldSource,
)
from openff.evaluator.properties import (
Density,
DielectricConstant,
EnthalpyOfMixing,
EnthalpyOfVaporization,
ExcessMolarVolume,
)
from openff.evaluator.server import EvaluatorServer
from openff.evaluator.tests.utils import create_dummy_property
from openff.evaluator.utils.utils import temporarily_change_directory
property_types = [
Density,
DielectricConstant,
EnthalpyOfMixing,
EnthalpyOfVaporization,
ExcessMolarVolume,
]
def test_default_options():
"""Test creating the default estimation options."""
data_set = PhysicalPropertyDataSet()
force_field_source = SmirnoffForceFieldSource.from_path(
"smirnoff99Frosst-1.1.0.offxml"
)
for property_type in property_types:
physical_property = create_dummy_property(property_type)
data_set.add_properties(physical_property)
options = EvaluatorClient.default_request_options(data_set, force_field_source)
options.validate()
assert len(options.calculation_layers) == 2
assert len(options.calculation_schemas) == len(property_types)
assert all(
len(x) == len(options.calculation_layers)
for x in options.calculation_schemas.values()
)
@pytest.mark.parametrize(
"force_field_source, expected_protocol_type",
[
(
SmirnoffForceFieldSource.from_path("smirnoff99Frosst-1.1.0.offxml"),
"BuildSmirnoffSystem",
),
(TLeapForceFieldSource(), "BuildTLeapSystem"),
(LigParGenForceFieldSource(), "BuildLigParGenSystem"),
],
)
def test_protocol_replacement(force_field_source, expected_protocol_type):
data_set = PhysicalPropertyDataSet()
for property_type in property_types:
physical_property = create_dummy_property(property_type)
data_set.add_properties(physical_property)
options = EvaluatorClient.default_request_options(data_set, force_field_source)
options_json = options.json(format=True)
assert options_json.find('BaseBuildSystem"') < 0
assert options_json.find(expected_protocol_type) >= 0
def test_submission():
with tempfile.TemporaryDirectory() as directory:
with temporarily_change_directory(directory):
with DaskLocalCluster() as calculation_backend:
# Spin up a server instance.
server = EvaluatorServer(
calculation_backend=calculation_backend,
working_directory=directory,
)
with server:
# Connect a client.
client = EvaluatorClient()
# Submit an empty data set.
force_field_path = "smirnoff99Frosst-1.1.0.offxml"
force_field_source = SmirnoffForceFieldSource.from_path(
force_field_path
)
request, error = client.request_estimate(
PhysicalPropertyDataSet(), force_field_source
)
assert error is None
assert isinstance(request, Request)
result, error = request.results(polling_interval=0.01)
assert error is None
assert isinstance(result, RequestResult)
|
jaketanderson/openff-evaluator | openff/evaluator/utils/serialization.py | <gh_stars>10-100
"""
A collection of classes which aid in serializing data types.
"""
import importlib
import inspect
import json
from abc import ABC, abstractmethod
from datetime import datetime
from enum import Enum
import dateutil.parser
import numpy as np
from openff.units import unit
def _type_string_to_object(type_string):
if type_string.startswith("evaluator."):
# Make files produced with the beta evaluator release compatible with
# the full evaluator release.
type_string = type_string.replace("evaluator.", "openff.evaluator.")
if type_string == "openff.evaluator.unit.Unit":
return unit.Unit
if type_string == "openff.evaluator.unit.Quantity":
return unit.Quantity
if type_string == "openff.evaluator.unit.Measurement":
return unit.Measurement
last_period_index = type_string.rfind(".")
if last_period_index < 0 or last_period_index == len(type_string) - 1:
raise ValueError(
"The type string is invalid - it should be of the form "
"module_path.class_name: {}".format(type_string)
)
type_string_split = type_string.split(".")
class_object = None
module_path = None
while len(type_string_split) > 0:
class_name = type_string_split.pop(0)
try:
if module_path is None:
module_path = class_name
else:
module_path = module_path + "." + class_name
# First try and treat the current string as a module
module = importlib.import_module(module_path)
class_object = module
except ImportError:
# If we get an import error, try then to treat the string
# as the name of a nested class.
class_object = getattr(class_object, class_name)
return class_object
def _type_to_type_string(object_type):
"""Converts a type to a serializable string.
Parameters
----------
object_type: type
The type to convert.
Returns
-------
str
The converted type.
"""
if issubclass(object_type, unit.Unit):
return "openff.evaluator.unit.Unit"
if issubclass(object_type, unit.Measurement):
return "openff.evaluator.unit.Measurement"
if issubclass(object_type, unit.Quantity):
return "openff.evaluator.unit.Quantity"
qualified_name = object_type.__qualname__
return_value = "{}.{}".format(object_type.__module__, qualified_name)
if return_value.startswith("evaluator."):
return_value = return_value.replace("evaluator.", "openff.evaluator.")
return return_value
def serialize_quantity(quantity):
"""Serializes a openff.evaluator.unit.Quantity into a dictionary of the form
`{'value': quantity.value_in_unit(quantity.unit), 'unit': quantity.unit}`
Parameters
----------
quantity : openff.evaluator.unit.Quantity
The quantity to serialize
Returns
-------
dict of str and str
A dictionary representation of a openff.evaluator.unit.Quantity
with keys of {"value", "unit"}
"""
value = quantity.magnitude
return {"value": value, "unit": str(quantity.units)}
def deserialize_quantity(serialized):
"""Deserialize a openff.evaluator.unit.Quantity from a dictionary.
Parameters
----------
serialized : dict of str and str
A dictionary representation of a openff.evaluator.unit.Quantity
which must have keys {"value", "unit"}
Returns
-------
openff.evaluator.unit.Quantity
The deserialized quantity.
"""
if "@type" in serialized:
serialized.pop("@type")
value_unit = unit.dimensionless
if serialized["unit"] is not None:
value_unit = unit(serialized["unit"])
return serialized["value"] * value_unit
def serialize_measurement(measurement):
"""Serializes a `openff.evaluator.unit.Measurement` into a dictionary of the form
`{'value', 'error'}`.
Parameters
----------
measurement : openff.evaluator.unit.Measurement
The measurement to serialize
Returns
-------
dict of str and str
A dictionary representation of a openff.evaluator.unit.Measurement
with keys of {"value", "error"}
"""
return {"value": measurement.value, "error": measurement.error}
def deserialize_measurement(serialized):
"""Deserialize a `openff.evaluator.unit.Measurement` from a dictionary of the form
`{'value', 'error'}`.
Parameters
----------
serialized : dict of str and str
A dictionary representation of a `openff.evaluator.unit.Measurement`
which must have keys {"value", "error"}
Returns
-------
openff.evaluator.unit.Measurement
The deserialized measurement.
"""
if "@type" in serialized:
serialized.pop("@type")
return serialized["value"].plus_minus(serialized["error"])
def serialize_enum(enum):
if not isinstance(enum, Enum):
raise ValueError("{} is not an Enum".format(type(enum)))
return {"value": enum.value}
def deserialize_enum(enum_dictionary):
if "@type" not in enum_dictionary:
raise ValueError(
"The serialized enum dictionary must includewhich type the enum is."
)
if "value" not in enum_dictionary:
raise ValueError("The serialized enum dictionary must includethe enum value.")
enum_type_string = enum_dictionary["@type"]
enum_value = enum_dictionary["value"]
enum_class = _type_string_to_object(enum_type_string)
if not issubclass(enum_class, Enum):
raise ValueError("<{}> is not an Enum".format(enum_class))
return enum_class(enum_value)
def serialize_set(set_object):
if not isinstance(set_object, set):
raise ValueError("{} is not a set".format(type(set)))
return {"value": list(set_object)}
def deserialize_set(set_dictionary):
if "value" not in set_dictionary:
raise ValueError(
"The serialized set dictionary must includethe value of the set."
)
set_value = set_dictionary["value"]
if not isinstance(set_value, list):
raise ValueError("The value of the serialized set must be a list.")
return set(set_value)
def serialize_frozen_set(set_object):
if not isinstance(set_object, frozenset):
raise ValueError("{} is not a frozenset".format(type(frozenset)))
return {"value": list(set_object)}
def deserialize_frozen_set(set_dictionary):
if "value" not in set_dictionary:
raise ValueError(
"The serialized frozenset dictionary must includethe value of the set."
)
set_value = set_dictionary["value"]
if not isinstance(set_value, list):
raise ValueError("The value of the serialized set must be a list.")
return frozenset(set_value)
class TypedJSONEncoder(json.JSONEncoder):
_natively_supported_types = [dict, list, tuple, str, int, float, bool]
_custom_supported_types = {
Enum: serialize_enum,
unit.Measurement: serialize_measurement,
unit.Quantity: serialize_quantity,
set: serialize_set,
frozenset: serialize_frozen_set,
np.float16: lambda x: {"value": float(x)},
np.float32: lambda x: {"value": float(x)},
np.float64: lambda x: {"value": float(x)},
np.int32: lambda x: {"value": int(x)},
np.int64: lambda x: {"value": int(x)},
np.ndarray: lambda x: {"value": x.tolist()},
datetime: lambda x: {"value": x.isoformat()},
}
def default(self, value_to_serialize):
if value_to_serialize is None:
return None
type_to_serialize = type(value_to_serialize)
if type_to_serialize in TypedJSONEncoder._natively_supported_types:
# If the value is a native type, then let the default serializer
# handle it.
return super(TypedJSONEncoder, self).default(value_to_serialize)
# Otherwise, we need to add a @type attribute to it.
type_tag = _type_to_type_string(type_to_serialize)
if type_tag == "openff.evaluator.unit.Unit":
type_to_serialize = unit.Unit
if type_tag == "openff.evaluator.unit.Quantity":
type_to_serialize = unit.Quantity
if type_tag == "openff.evaluator.unit.Measurement":
type_to_serialize = unit.Measurement
custom_encoder = None
for encoder_type in TypedJSONEncoder._custom_supported_types:
if isinstance(encoder_type, str):
qualified_name = type_to_serialize.__qualname__
if encoder_type != qualified_name:
continue
elif not issubclass(type_to_serialize, encoder_type):
continue
custom_encoder = TypedJSONEncoder._custom_supported_types[encoder_type]
break
if custom_encoder is not None:
try:
serializable_dictionary = custom_encoder(value_to_serialize)
except Exception as e:
raise ValueError(
"{} ({}) could not be serialized "
"using a specialized custom encoder: {}".format(
value_to_serialize, type_to_serialize, e
)
)
elif hasattr(value_to_serialize, "__getstate__"):
try:
serializable_dictionary = value_to_serialize.__getstate__()
except Exception as e:
raise ValueError(
"{} ({}) could not be serialized "
"using its __getstate__ method: {}".format(
value_to_serialize, type_to_serialize, e
)
)
else:
raise ValueError(
"Objects of type {} are not serializable, please either"
"add a __getstate__ method, or add the object to the list"
"of custom supported types.".format(type_to_serialize)
)
serializable_dictionary["@type"] = type_tag
return serializable_dictionary
class TypedJSONDecoder(json.JSONDecoder):
def __init__(self, *args, **kwargs):
json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs)
_custom_supported_types = {
Enum: deserialize_enum,
unit.Measurement: deserialize_measurement,
unit.Quantity: deserialize_quantity,
set: deserialize_set,
frozenset: deserialize_frozen_set,
np.float16: lambda x: np.float16(x["value"]),
np.float32: lambda x: np.float32(x["value"]),
np.float64: lambda x: np.float64(x["value"]),
np.int32: lambda x: np.int32(x["value"]),
np.int64: lambda x: np.int64(x["value"]),
np.ndarray: lambda x: np.array(x["value"]),
datetime: lambda x: dateutil.parser.parse(x["value"]),
}
@staticmethod
def object_hook(object_dictionary):
if "@type" not in object_dictionary:
return object_dictionary
type_string = object_dictionary["@type"]
class_type = _type_string_to_object(type_string)
custom_decoder = None
for decoder_type in TypedJSONDecoder._custom_supported_types:
if isinstance(decoder_type, str):
if decoder_type != class_type.__qualname__:
continue
elif not issubclass(class_type, decoder_type):
continue
custom_decoder = TypedJSONDecoder._custom_supported_types[decoder_type]
break
if custom_decoder is not None:
try:
deserialized_object = custom_decoder(object_dictionary)
except Exception as e:
raise ValueError(
"{} ({}) could not be deserialized "
"using a specialized custom decoder: {}".format(
object_dictionary, type(class_type), e
)
)
elif hasattr(class_type, "__setstate__"):
class_init_signature = inspect.signature(class_type)
for parameter in class_init_signature.parameters.values():
if (
parameter.default != inspect.Parameter.empty
or parameter.kind == inspect.Parameter.VAR_KEYWORD
or parameter.kind == inspect.Parameter.VAR_POSITIONAL
):
continue
raise ValueError(
f"Cannot deserialize objects ({class_type}) which have non-"
f"optional arguments {parameter.name} in the constructor."
)
deserialized_object = class_type()
deserialized_object.__setstate__(object_dictionary)
else:
raise ValueError(
"Objects of type {} are not deserializable, please either"
"add a __setstate__ method, or add the object to the list"
"of custom supported types.".format(type(class_type))
)
return deserialized_object
class TypedBaseModel(ABC):
"""An abstract base class which represents any object which
can be serialized to JSON.
JSON produced using this class will include extra @type tags
for any non-primitive typed values (e.g not a str, int...),
which ensure that the correct class structure is correctly
reproduced on deserialization.
EXAMPLE
It is a requirement that any classes inheriting from this one
must implement a valid `__getstate__` and `__setstate__` method,
as these are what determines the structure of the serialized
output.
"""
def json(self, file_path=None, format=False):
"""Creates a JSON representation of this class.
Parameters
----------
file_path: str, optional
The (optional) file path to save the JSON file to.
format: bool
Whether to format the JSON or not.
Returns
-------
str
The JSON representation of this class.
"""
if format:
json_string = json.dumps(
self,
sort_keys=True,
indent=2,
separators=(",", ": "),
cls=TypedJSONEncoder,
)
else:
json_string = json.dumps(self, cls=TypedJSONEncoder)
if file_path is not None:
with open(file_path, "w") as file:
file.write(json_string)
return json_string
@classmethod
def from_json(cls, file_path):
"""Create this object from a JSON file.
Parameters
----------
file_path: str
The path to load the JSON from.
Returns
-------
cls
The parsed class.
"""
with open(file_path, "r") as file:
return cls.parse_json(file.read())
@classmethod
def parse_json(cls, string_contents):
"""Parses a typed json string into the corresponding class
structure.
Parameters
----------
string_contents: str or bytes
The typed json string.
Returns
-------
Any
The parsed class.
"""
return_object = json.loads(string_contents, cls=TypedJSONDecoder)
return return_object
@abstractmethod
def __getstate__(self):
"""Returns a dictionary representation of this object.
Returns
-------
dict of str, Any
The dictionary representation of this object.
"""
pass
@abstractmethod
def __setstate__(self, state):
"""Sets the fields of this object from its dictionary representation.
Parameters
----------
state: dict of str, Any
The dictionary representation of the object.
"""
pass
|
jaketanderson/openff-evaluator | openff/evaluator/attributes/__init__.py | from .attributes import UNDEFINED, Attribute, AttributeClass, PlaceholderValue
__all__ = [
UNDEFINED,
Attribute,
AttributeClass,
PlaceholderValue,
]
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_storage/test_local.py | <filename>openff/evaluator/tests/test_storage/test_local.py
"""
Units tests for openff.evaluator.storage.localfile
"""
import os
from openff.evaluator.storage import LocalFileStorage
def test_root_directory(tmpdir):
local_storage_path = os.path.join(tmpdir, "stored-data")
local_storage = LocalFileStorage(root_directory=local_storage_path)
assert os.path.isdir(local_storage_path)
assert local_storage.root_directory == local_storage_path
|
jaketanderson/openff-evaluator | openff/evaluator/utils/packmol.py | <gh_stars>10-100
"""
An API for interacting with `packmol <http://m3g.iqm.unicamp.br/packmol/home.shtml>`_.
Notes
-----
Based on the `SolvationToolkit <https://github.com/MobleyLab/SolvationToolkit>`_.
"""
import logging
import os
import random
import shutil
import string
import subprocess
import tempfile
import warnings
from collections import defaultdict
from distutils.spawn import find_executable
from functools import reduce
import numpy as np
from openff.units import unit
from openff.units.openmm import from_openmm
from openff.evaluator.substances import Component
from openff.evaluator.utils.utils import temporarily_change_directory
logger = logging.getLogger(__name__)
class PackmolRuntimeException(Exception):
"""An exception raised when packmol fails to
execute / converge for some reason.
"""
pass
def _find_packmol():
"""Attempts to find the path to the `packmol` binary.
Returns
-------
str, optional
The path to the packmol binary if it could be found, otherwise
`None`.
"""
return (
find_executable("packmol") or shutil.which("packmol") or None
if "PACKMOL" not in os.environ
else os.environ["PACKMOL"]
)
def _validate_inputs(
molecules,
number_of_copies,
structure_to_solvate,
box_aspect_ratio,
box_size,
mass_density,
):
"""Validate the inputs which were passed to the main pack method.
Parameters
----------
molecules : list of openff.toolkit.topology.Molecule
The molecules in the system.
number_of_copies : list of int
A list of the number of copies of each molecule type, of length
equal to the length of `molecules`.
structure_to_solvate: str, optional
A file path to the PDB coordinates of the structure to be solvated.
box_size : openff.evaluator.unit.Quantity, optional
The size of the box to generate in units compatible with angstroms.
If `None`, `mass_density` must be provided.
mass_density : openff.evaluator.unit.Quantity, optional
Target mass density for final system with units compatible with g / mL.
If `None`, `box_size` must be provided.
box_aspect_ratio: list of float, optional
The aspect ratio of the simulation box, used in conjunction with
the `mass_density` parameter.
"""
if box_size is None and mass_density is None:
raise ValueError("Either a `box_size` or `mass_density` must be specified.")
if box_size is not None and len(box_size) != 3:
raise ValueError(
"`box_size` must be a openff.evaluator.unit.Quantity wrapped list of length 3"
)
if box_aspect_ratio is not None:
# noinspection PyTypeChecker
assert len(box_aspect_ratio) == 3
assert all(x > 0.0 for x in box_aspect_ratio)
# noinspection PyTypeChecker
if len(molecules) != len(number_of_copies):
raise ValueError(
"The length of `molecules` and `number_of_copies` must be identical."
)
if structure_to_solvate is not None:
assert os.path.isfile(structure_to_solvate)
def _approximate_box_size_by_density(
molecules,
n_copies,
mass_density,
box_aspect_ratio,
box_scaleup_factor=1.1,
):
"""Generate an approximate box size based on the number and molecular
weight of the molecules present, and a target density for the final
solvated mixture.
Parameters
----------
molecules : list of openff.toolkit.topology.Molecule
The molecules in the system.
n_copies : list of int
The number of copies of each molecule.
mass_density : openff.evaluator.unit.Quantity
The target mass density for final system. It should have units
compatible with g / mL.
box_aspect_ratio: List of float
The aspect ratio of the simulation box, used in conjunction with
the `mass_density` parameter.
box_scaleup_factor : float
The factor by which the estimated box size should be
increased.
Returns
-------
openff.evaluator.unit.Quantity
A list of the three box lengths in units compatible with angstroms.
"""
volume = 0.0 * unit.angstrom**3
for (molecule, number) in zip(molecules, n_copies):
molecule_mass = reduce(
(lambda x, y: x + y), [atom.mass for atom in molecule.atoms]
)
molecule_mass = from_openmm(molecule_mass) / unit.avogadro_constant
molecule_volume = molecule_mass / mass_density
volume += molecule_volume * number
box_length = volume ** (1.0 / 3.0) * box_scaleup_factor
box_length_angstrom = box_length.to(unit.angstrom).magnitude
aspect_ratio_normalizer = (
box_aspect_ratio[0] * box_aspect_ratio[1] * box_aspect_ratio[2]
) ** (1.0 / 3.0)
box_size = [
box_length_angstrom * box_aspect_ratio[0],
box_length_angstrom * box_aspect_ratio[1],
box_length_angstrom * box_aspect_ratio[2],
] * unit.angstrom
box_size /= aspect_ratio_normalizer
return box_size
def _generate_residue_name(residue, smiles):
"""Generates residue name for a particular residue which
corresponds to a particular smiles pattern.
Where possible (i.e for amino acids and ions) a standard residue
name will be returned, otherwise a random name will be used.
Parameters
----------
residue: mdtraj.core.topology.Residue
The residue to assign the name to.
smiles: str
The SMILES pattern to generate a resiude name for.
"""
from mdtraj.core import residue_names
from openff.toolkit.topology import Molecule
# Define the set of residue names which should be discarded
# if randomly generated as they have a reserved meaning.
# noinspection PyProtectedMember
forbidden_residue_names = [
*residue_names._AMINO_ACID_CODES,
*residue_names._SOLVENT_TYPES,
*residue_names._WATER_RESIDUES,
"ADE",
"CYT",
"CYX",
"DAD",
"DGU",
"FOR",
"GUA",
"HID",
"HIE",
"HIH",
"HSD",
"HSH",
"HSP",
"NMA",
"THY",
"URA",
]
amino_residue_mappings = {
"C[C@H](N)C(=O)O": "ALA",
"N=C(N)NCCC[C@H](N)C(=O)O": "ARG",
"NC(=O)C[C@H](N)C(=O)O": "ASN",
"N[C@@H](CC(=O)O)C(=O)O": "ASP",
"N[C@@H](CS)C(=O)O": "CYS",
"N[C@@H](CCC(=O)O)C(=O)O": "GLU",
"NC(=O)CC[C@H](N)C(=O)O": "GLN",
"NCC(=O)O": "GLY",
"N[C@@H](Cc1c[nH]cn1)C(=O)O": "HIS",
"CC[C@H](C)[C@H](N)C(=O)O": "ILE",
"CC(C)C[C@H](N)C(=O)O": "LEU",
"NCCCC[C@H](N)C(=O)O": "LYS",
"CSCC[C@H](N)C(=O)O": "MET",
"N[C@@H](Cc1ccccc1)C(=O)O": "PHE",
"O=C(O)[C@@H]1CCCN1": "PRO",
"N[C@@H](CO)C(=O)O": "SER",
"C[C@@H](O)[C@H](N)C(=O)O": "THR",
"N[C@@H](Cc1c[nH]c2ccccc12)C(=O)O": "TRP",
"N[C@@H](Cc1ccc(O)cc1)C(=O)O": "TYR",
"CC(C)[C@H](N)C(=O)O": "VAL",
}
standardized_smiles = Component(smiles=smiles).smiles
# Check for amino acids.
if standardized_smiles in amino_residue_mappings:
residue.name = amino_residue_mappings[standardized_smiles]
return
# Check for water
if standardized_smiles == "O":
residue.name = "HOH"
# Re-assign the water atom names. These need to be set to get
# correct CONECT statements.
h_counter = 1
for atom in residue.atoms:
if atom.element.symbol == "O":
atom.name = "O1"
else:
atom.name = f"H{h_counter}"
h_counter += 1
return
# Check for ions
openff_molecule = Molecule.from_smiles(smiles, allow_undefined_stereo=True)
if openff_molecule.n_atoms == 1:
residue.name = _ion_residue_name(openff_molecule)
residue.atom(0).name = residue.name
return
# Randomly generate a name
random_residue_name = "".join(
[random.choice(string.ascii_uppercase) for _ in range(3)]
)
while random_residue_name in forbidden_residue_names:
# Re-choose the residue name until we find a safe one.
random_residue_name = "".join(
[random.choice(string.ascii_uppercase) for _ in range(3)]
)
residue.name = random_residue_name
# Assign unique atom names.
element_counter = defaultdict(int)
for atom in residue.atoms:
atom.name = f"{atom.element.symbol}{element_counter[atom.element.symbol] + 1}"
element_counter[atom.element.symbol] += 1
def _ion_residue_name(molecule):
"""Generates a residue name for a monatomic ion.
Parameters
----------
molecule: openff.toolkit.topology.Molecule
The monoatomic ion to generate a resiude name for.
Returns
-------
str
The residue name of the ion
"""
try:
from openmm import unit as openmm_unit
except ImportError:
from simtk.openmm import unit as openmm_unit
element_symbol = molecule.atoms[0].element.symbol
charge_symbol = ""
formal_charge = molecule.atoms[0].formal_charge
if isinstance(formal_charge, openmm_unit.Quantity):
formal_charge = formal_charge.value_in_unit(openmm_unit.elementary_charge)
formal_charge = int(formal_charge)
if formal_charge != 0:
charge_symbol = "-" if formal_charge < 0 else "+"
formal_charge = abs(formal_charge)
if formal_charge > 1:
charge_symbol = f"{formal_charge}{charge_symbol}"
residue_name = f"{element_symbol}{charge_symbol}"
residue_name = residue_name[:3]
return residue_name
def _create_trajectory(molecule):
"""Create an `mdtraj` topology from a molecule object.
Parameters
----------
molecule: openff.toolkit.topology.Molecule
The SMILES pattern.
Returns
-------
mdtraj.Trajectory
The created trajectory.
"""
import mdtraj
# Check whether the molecule has a configuration defined, and if not,
# define one.
if molecule.n_conformers <= 0:
molecule.generate_conformers(n_conformers=1)
# We need to save out the molecule and then reload it as the toolkit
# will not always save the atoms in the same order that they are
# present in the molecule object.
with tempfile.NamedTemporaryFile(suffix=".pdb") as file:
molecule.to_file(file.name, "PDB")
# Load the pdb into an mdtraj object.
mdtraj_trajectory = mdtraj.load_pdb(file.name)
# Change the assigned residue name (sometimes molecules are assigned
# an amino acid residue name even if that molecule is not an amino acid,
# e.g. C(CO)N is not Gly) and save the altered object as a pdb.
for residue in mdtraj_trajectory.topology.residues:
_generate_residue_name(residue, molecule.to_smiles())
return mdtraj_trajectory
def _build_input_file(
molecule_file_names,
molecule_counts,
structure_to_solvate,
center_solute,
box_size,
tolerance,
output_file_name,
):
"""Construct the packmol input file.
Parameters
----------
molecule_file_names: list of str
The paths to the molecule pdb files.
molecule_counts: list of int
The number of each molecule to add.
structure_to_solvate: str, optional
The path to the structure to solvate.
center_solute: str
If `True`, the structure to solvate will be centered in the
simulation box.
box_size: openff.evaluator.unit.Quantity
The lengths of each box vector.
tolerance: openff.evaluator.unit.Quantity
The packmol convergence tolerance.
output_file_name: str
The path to save the packed pdb to.
Returns
-------
str
The path to the input file.
"""
box_size = box_size.to(unit.angstrom).magnitude
tolerance = tolerance.to(unit.angstrom).magnitude
# Add the global header options.
input_lines = [
f"tolerance {tolerance:f}",
"filetype pdb",
f"output {output_file_name}",
"",
]
# Add the section of the molecule to solvate if provided.
if structure_to_solvate is not None:
solute_position = [0.0] * 3
if center_solute:
solute_position = [box_size[i] / 2.0 for i in range(3)]
input_lines.extend(
[
f"structure {structure_to_solvate}",
" number 1",
" fixed "
f"{solute_position[0]} "
f"{solute_position[1]} "
f"{solute_position[2]} "
"0. 0. 0.",
"centerofmass" if center_solute else "",
"end structure",
"",
]
)
# Add a section for each type of molecule to add.
for file_name, count in zip(molecule_file_names, molecule_counts):
input_lines.extend(
[
f"structure {file_name}",
f" number {count}",
f" inside box 0. 0. 0. {box_size[0]} {box_size[1]} {box_size[2]}",
"end structure",
"",
]
)
packmol_input = "\n".join(input_lines)
# Write packmol input
packmol_file_name = "packmol_input.txt"
with open(packmol_file_name, "w") as file_handle:
file_handle.write(packmol_input)
return packmol_file_name
def _correct_packmol_output(
file_path, molecule_topologies, number_of_copies, structure_to_solvate
):
"""Corrects the PDB file output by packmol, namely be
adding full connectivity information.
Parameters
----------
file_path: str
The file path to the packmol output file.
molecule_topologies: list of mdtraj.Topology
A list of topologies for the molecules which packmol has
added.
number_of_copies: list of int
The total number of each molecule which packmol should have
created.
structure_to_solvate: str, optional
The file path to a preexisting structure which packmol
has solvated.
Returns
-------
mdtraj.Trajectory
A trajectory containing the packed system with full connectivity.
"""
import mdtraj
with warnings.catch_warnings():
if structure_to_solvate is not None:
# Catch the known warning which is fixed in the next section.
warnings.filterwarnings(
"ignore", message="WARNING: two consecutive residues with same number"
)
trajectory = mdtraj.load(file_path)
all_topologies = []
all_n_copies = []
if structure_to_solvate is not None:
solvated_trajectory = mdtraj.load(structure_to_solvate)
all_topologies.append(solvated_trajectory.topology)
all_n_copies.append(1)
# We have to split the topology to ensure the structure to solvate
# ends up in its own chain.
n_solvent_atoms = trajectory.n_atoms - solvated_trajectory.n_atoms
solvent_indices = np.arange(n_solvent_atoms) + solvated_trajectory.n_atoms
solvent_topology = trajectory.topology.subset(solvent_indices)
full_topology = solvated_trajectory.topology.join(solvent_topology)
trajectory.topology = full_topology
all_topologies.extend(molecule_topologies)
all_n_copies.extend(number_of_copies)
all_bonds = []
offset = 0
for (molecule_topology, count) in zip(all_topologies, all_n_copies):
_, molecule_bonds = molecule_topology.to_dataframe()
for i in range(count):
for bond in molecule_bonds:
all_bonds.append(
[int(bond[0].item()) + offset, int(bond[1].item()) + offset]
)
offset += molecule_topology.n_atoms
if len(all_bonds) > 0:
all_bonds = np.unique(all_bonds, axis=0).tolist()
# We have to check whether there are any existing bonds, because mdtraj
# will sometimes automatically detect some based on residue names (e.g HOH),
# and this behaviour cannot be disabled.
existing_bonds = []
for bond in trajectory.topology.bonds:
existing_bonds.append(bond)
for bond in all_bonds:
atom_a = trajectory.topology.atom(bond[0])
atom_b = trajectory.topology.atom(bond[1])
bond_exists = False
for existing_bond in existing_bonds:
if (existing_bond.atom1 == atom_a and existing_bond.atom2 == atom_b) or (
existing_bond.atom2 == atom_a and existing_bond.atom1 == atom_b
):
bond_exists = True
break
if bond_exists:
continue
trajectory.topology.add_bond(atom_a, atom_b)
return trajectory
def pack_box(
molecules,
number_of_copies,
structure_to_solvate=None,
center_solute=True,
tolerance=2.0 * unit.angstrom,
box_size=None,
mass_density=None,
box_aspect_ratio=None,
verbose=False,
working_directory=None,
retain_working_files=False,
):
"""Run packmol to generate a box containing a mixture of molecules.
Parameters
----------
molecules : list of openff.toolkit.topology.Molecule
The molecules in the system.
number_of_copies : list of int
A list of the number of copies of each molecule type, of length
equal to the length of `molecules`.
structure_to_solvate: str, optional
A file path to the PDB coordinates of the structure to be solvated.
center_solute: str
If `True`, the structure to solvate will be centered in the
simulation box. This option is only applied when `structure_to_solvate`
is set.
tolerance : openff.evaluator.unit.Quantity
The minimum spacing between molecules during packing in units
compatible with angstroms.
box_size : openff.evaluator.unit.Quantity, optional
The size of the box to generate in units compatible with angstroms.
If `None`, `mass_density` must be provided.
mass_density : openff.evaluator.unit.Quantity, optional
Target mass density for final system with units compatible with g / mL.
If `None`, `box_size` must be provided.
box_aspect_ratio: list of float, optional
The aspect ratio of the simulation box, used in conjunction with
the `mass_density` parameter. If none, an isotropic ratio (i.e.
[1.0, 1.0, 1.0]) is used.
verbose : bool
If True, verbose output is written.
working_directory: str, optional
The directory in which to generate the temporary working files. If `None`,
a temporary one will be created.
retain_working_files: bool
If True all of the working files, such as individual molecule coordinate
files, will be retained.
Returns
-------
mdtraj.Trajectory
The packed box encoded in an mdtraj trajectory.
list of str
The residue names which were assigned to each of the
molecules in the `molecules` list.
Raises
------
PackmolRuntimeException
When packmol fails to execute / converge.
"""
if mass_density is not None and box_aspect_ratio is None:
box_aspect_ratio = [1.0, 1.0, 1.0]
# Make sure packmol can be found.
packmol_path = _find_packmol()
if packmol_path is None:
raise IOError("Packmol not found, cannot run pack_box()")
# Validate the inputs.
_validate_inputs(
molecules,
number_of_copies,
structure_to_solvate,
box_aspect_ratio,
box_size,
mass_density,
)
# Estimate the box_size from mass density if one is not provided.
if box_size is None:
box_size = _approximate_box_size_by_density(
molecules, number_of_copies, mass_density, box_aspect_ratio
)
# Set up the directory to create the working files in.
temporary_directory = False
if working_directory is None:
working_directory = tempfile.mkdtemp()
temporary_directory = True
if len(working_directory) > 0:
os.makedirs(working_directory, exist_ok=True)
# Copy the structure to solvate if one is provided.
if structure_to_solvate is not None:
import mdtraj
trajectory = mdtraj.load_pdb(structure_to_solvate)
# Fix mdtraj #1611
for atom in trajectory.topology.atoms:
atom.serial = None
structure_to_solvate = "solvate.pdb"
trajectory.save_pdb(os.path.join(working_directory, structure_to_solvate))
assigned_residue_names = []
with temporarily_change_directory(working_directory):
# Create PDB files for all of the molecules.
pdb_file_names = []
mdtraj_topologies = []
for index, molecule in enumerate(molecules):
mdtraj_trajectory = _create_trajectory(molecule)
pdb_file_name = f"{index}.pdb"
pdb_file_names.append(pdb_file_name)
mdtraj_trajectory.save_pdb(pdb_file_name)
mdtraj_topologies.append(mdtraj_trajectory.topology)
residue_name = mdtraj_trajectory.topology.residue(0).name
assigned_residue_names.append(residue_name)
# Generate the input file.
output_file_name = "packmol_output.pdb"
input_file_path = _build_input_file(
pdb_file_names,
number_of_copies,
structure_to_solvate,
center_solute,
box_size,
tolerance,
output_file_name,
)
with open(input_file_path) as file_handle:
result = subprocess.check_output(
packmol_path, stdin=file_handle, stderr=subprocess.STDOUT
).decode("utf-8")
if verbose:
logger.info(result)
packmol_succeeded = result.find("Success!") > 0
if not retain_working_files:
os.unlink(input_file_path)
for file_path in pdb_file_names:
os.unlink(file_path)
if not packmol_succeeded:
if verbose:
logger.info("Packmol failed to converge")
if os.path.isfile(output_file_name):
os.unlink(output_file_name)
if temporary_directory and not retain_working_files:
shutil.rmtree(working_directory)
raise PackmolRuntimeException(result)
# Add a 2 angstrom buffer to help alleviate PBC issues.
box_size = [
(x + 2.0 * unit.angstrom).to(unit.nanometer).magnitude for x in box_size
]
# Append missing connect statements to the end of the
# output file.
trajectory = _correct_packmol_output(
output_file_name, mdtraj_topologies, number_of_copies, structure_to_solvate
)
trajectory.unitcell_lengths = box_size
trajectory.unitcell_angles = [90.0] * 3
if not retain_working_files:
os.unlink(output_file_name)
if temporary_directory and not retain_working_files:
shutil.rmtree(working_directory)
return trajectory, assigned_residue_names
|
jaketanderson/openff-evaluator | openff/evaluator/server/server.py | <reponame>jaketanderson/openff-evaluator<filename>openff/evaluator/server/server.py
"""
The core functionality of the 'server' side of the
openff-evaluator framework.
"""
import copy
import json
import logging
import os
import select
import shutil
import socket
import threading
import traceback
import uuid
from glob import glob
import networkx
from openff.evaluator.attributes import Attribute, AttributeClass
from openff.evaluator.client import (
BatchMode,
EvaluatorClient,
RequestOptions,
RequestResult,
)
from openff.evaluator.datasets import PhysicalProperty
from openff.evaluator.forcefield import ParameterGradientKey
from openff.evaluator.layers import registered_calculation_layers
from openff.evaluator.storage import LocalFileStorage
from openff.evaluator.utils.exceptions import EvaluatorException
from openff.evaluator.utils.serialization import TypedJSONEncoder
from openff.evaluator.utils.tcp import (
EvaluatorMessageTypes,
pack_int,
recvall,
unpack_int,
)
logger = logging.getLogger(__name__)
class Batch(AttributeClass):
"""Represents a batch of physical properties which are being estimated by
the server for a given set of force field parameters.
The expectation is that this object will be passed between calculation layers,
whereby each layer will attempt to estimate each of the `queued_properties`.
Those properties which can be estimated will be moved to the `estimated_properties`
set, while those that couldn't will remain in the `queued_properties` set ready
for the next layer.
"""
id = Attribute(
docstring="The unique id of this batch.",
type_hint=str,
default_value=lambda: str(uuid.uuid4()).replace("-", ""),
)
force_field_id = Attribute(
docstring="The id of the force field being used to estimate"
"this batch of properties.",
type_hint=str,
)
options = Attribute(
docstring="The options being used to estimate this batch.",
type_hint=RequestOptions,
)
parameter_gradient_keys = Attribute(
docstring="The parameters that this batch of physical properties "
"should be differentiated with respect to.",
type_hint=list,
)
enable_data_caching = Attribute(
docstring="Whether the server should attempt to cache any data, mainly the "
"output of simulations, produced by this batch for future re-processing (e.g "
"for reweighting).",
type_hint=bool,
default_value=True,
)
queued_properties = Attribute(
docstring="The set of properties which have yet to be estimated.",
type_hint=list,
default_value=[],
)
estimated_properties = Attribute(
docstring="The set of properties which have been successfully estimated.",
type_hint=list,
default_value=[],
)
unsuccessful_properties = Attribute(
docstring="The set of properties which have been could not be estimated.",
type_hint=list,
default_value=[],
)
exceptions = Attribute(
docstring="The set of properties which have yet to be, or "
"are currently being estimated.",
type_hint=list,
default_value=[],
)
def validate(self, attribute_type=None):
super(Batch, self).validate(attribute_type)
assert all(isinstance(x, PhysicalProperty) for x in self.queued_properties)
assert all(isinstance(x, PhysicalProperty) for x in self.estimated_properties)
assert all(
isinstance(x, PhysicalProperty) for x in self.unsuccessful_properties
)
assert all(isinstance(x, EvaluatorException) for x in self.exceptions)
assert all(
isinstance(x, ParameterGradientKey) for x in self.parameter_gradient_keys
)
class EvaluatorServer:
"""The object responsible for coordinating all properties estimations to
be ran using the openff-evaluator framework.
This server is responsible for receiving estimation requests from the client,
determining which calculation layer to use to launch the request, and
distributing that estimation across the available compute resources.
Notes
-----
Every client request is split into logical chunk batches. This enables batches
of related properties (e.g. all properties for CO) to be estimated in one go
(or one task graph in the case of workflow based layers) and returned when ready,
rather than waiting for the full data set to complete.
Examples
--------
Setting up a general server instance using a dask based calculation backend,
and a local file storage backend:
>>> # Create the backend which will be responsible for distributing the calculations
>>> from openff.evaluator.backends.dask import DaskLocalCluster
>>> calculation_backend = DaskLocalCluster()
>>> calculation_backend.start()
>>>
>>> # Create the server to which all estimation requests will be submitted
>>> from openff.evaluator.server import EvaluatorServer
>>> property_server = EvaluatorServer(calculation_backend)
>>>
>>> # Instruct the server to listen for incoming requests
>>> # This command will run until killed.
>>> property_server.start()
"""
def __init__(
self,
calculation_backend,
storage_backend=None,
port=8000,
working_directory="working-data",
enable_data_caching=True,
delete_working_files=True,
):
"""Constructs a new EvaluatorServer object.
Parameters
----------
calculation_backend: CalculationBackend
The backend to use for executing calculations.
storage_backend: StorageBackend, optional
The backend to use for storing information from any calculations.
If `None`, a default `LocalFileStorage` backend will be used.
port: int
The port on which to listen for incoming client requests.
working_directory: str
The local directory in which to store all local, temporary calculation data.
enable_data_caching: bool
Whether the server should attempt to cache any data, mainly the output of
simulations, produced by estimation requests for future re-processing (e.g
for reweighting).
delete_working_files: bool
Whether to delete the working files produced while estimated a batch of
properties using a specific calculation layer.
"""
# Initialize the main 'server' attributes.
self._port = port
self._server_thread = None
self._socket = None
self._started = False
self._stopped = True
# Initialize the internal components.
assert calculation_backend is not None and calculation_backend.started
self._calculation_backend = calculation_backend
if storage_backend is None:
storage_backend = LocalFileStorage()
self._storage_backend = storage_backend
self._enable_data_caching = enable_data_caching
self._working_directory = working_directory
os.makedirs(self._working_directory, exist_ok=True)
self._delete_working_files = delete_working_files
self._queued_batches = {}
self._finished_batches = {}
self._batch_ids_per_client_id = {}
def _query_request_status(self, client_request_id):
"""Queries the the current state of an estimation request
and stores it in a `RequestResult`.
Parameters
----------
client_request_id: str
The id of the request to query.
Returns
-------
RequestResult
The state of the request.
EvaluatorException, optional
The exception raised while retrieving the status,
if any.
"""
request_results = RequestResult()
for batch_id in self._batch_ids_per_client_id[client_request_id]:
# Find the batch.
if batch_id in self._queued_batches:
batch = self._queued_batches[batch_id]
elif batch_id in self._finished_batches:
batch = self._finished_batches[batch_id]
if len(batch.queued_properties) > 0:
return (
None,
EvaluatorException(
message=f"An internal error occurred - the {batch_id} "
f"batch was prematurely marked us finished."
),
)
else:
return (
None,
EvaluatorException(
message=f"An internal error occurred - the {batch_id} "
f"request was not found on the server."
),
)
request_results.queued_properties.add_properties(*batch.queued_properties)
request_results.unsuccessful_properties.add_properties(
*batch.unsuccessful_properties
)
request_results.estimated_properties.add_properties(
*batch.estimated_properties
)
request_results.exceptions.extend(batch.exceptions)
return request_results, None
def _batch_by_same_component(self, submission, force_field_id):
"""Batches a set of requested properties based on which substance they were
measured for. Properties which were measured for substances containing the
exact same components (but not necessarily in the same amounts) will be placed
into the same batch.
Parameters
----------
submission: EvaluatorClient._Submission
The full request submission.
force_field_id: str
The unique id of the force field to use.
Returns
-------
list of Batch
The property batches.
"""
reserved_batch_ids = {
*self._queued_batches.keys(),
*self._finished_batches.keys(),
}
batches = []
for substance in submission.dataset.substances:
batch = Batch()
batch.force_field_id = force_field_id
batch.enable_data_caching = self._enable_data_caching
# Make sure we don't somehow generate the same uuid
# twice (although this is very unlikely to ever happen).
while batch.id in reserved_batch_ids:
batch.id = str(uuid.uuid4()).replace("-", "")
batch.queued_properties = [
x for x in submission.dataset.properties_by_substance(substance)
]
batch.options = RequestOptions.parse_json(submission.options.json())
batch.parameter_gradient_keys = copy.deepcopy(
submission.parameter_gradient_keys
)
reserved_batch_ids.add(batch.id)
batches.append(batch)
return batches
def _batch_by_shared_component(self, submission, force_field_id):
"""Batches a set of requested properties based on which substance they were
measured for. Properties which were measured for substances sharing at least
one common component (defined only by its smiles pattern and not necessarily
in the same amount) will be placed into the same batch.
Parameters
----------
submission: EvaluatorClient._Submission
The full request submission.
force_field_id: str
The unique id of the force field to use.
Returns
-------
list of Batch
The property batches.
"""
reserved_batch_ids = {
*self._queued_batches.keys(),
*self._finished_batches.keys(),
}
all_smiles = set(x.smiles for y in submission.dataset.substances for x in y)
# Build a graph containing all of the different component
# smiles patterns as nodes.
substance_graph = networkx.Graph()
substance_graph.add_nodes_from(all_smiles)
# Add edges to the graph based on which substances contain
# the different component nodes.
for substance in submission.dataset.substances:
if len(substance) < 2:
continue
smiles = [x.smiles for x in substance]
for smiles_a, smiles_b in zip(smiles, smiles[1:]):
substance_graph.add_edge(smiles_a, smiles_b)
# Find clustered islands of those smiles which exist in
# overlapping substances.
islands = [
substance_graph.subgraph(c)
for c in networkx.connected_components(substance_graph)
]
# Create one batch per island
batches = []
for _ in range(len(islands)):
batch = Batch()
batch.force_field_id = force_field_id
batch.enable_data_caching = self._enable_data_caching
# Make sure we don't somehow generate the same uuid
# twice (although this is very unlikely to ever happen).
while batch.id in reserved_batch_ids:
batch.id = str(uuid.uuid4()).replace("-", "")
batch.options = RequestOptions.parse_json(submission.options.json())
batch.parameter_gradient_keys = copy.deepcopy(
submission.parameter_gradient_keys
)
reserved_batch_ids.add(batch.id)
batches.append(batch)
for physical_property in submission.dataset:
smiles = [x.smiles for x in physical_property.substance]
island_id = 0
for island_id, island in enumerate(islands):
if not any(x in island for x in smiles):
continue
break
batches[island_id].queued_properties.append(physical_property)
return batches
def _prepare_batches(self, submission, request_id):
"""Turns an estimation request into chunked batches to
calculate separately.
This enables batches of related properties (e.g. all properties
for CO) to be estimated in one go (or one task graph in the case
of workflow based layers) and returned when ready, rather than waiting
for the full data set to complete.
Parameters
----------
submission: EvaluatorClient._Submission
The full request submission.
request_id: str
The id that was assigned to the request.
Returns
-------
list of Batch
A list of the batches to launch.
"""
force_field_source = submission.force_field_source
force_field_id = self._storage_backend.store_force_field(force_field_source)
batch_mode = submission.options.batch_mode
if batch_mode == BatchMode.SameComponents:
batches = self._batch_by_same_component(submission, force_field_id)
elif batch_mode == BatchMode.SharedComponents:
batches = self._batch_by_shared_component(submission, force_field_id)
else:
raise NotImplementedError()
for batch in batches:
self._queued_batches[batch.id] = batch
self._batch_ids_per_client_id[request_id].append(batch.id)
return batches
def _launch_batch(self, batch):
"""Launch a batch of properties to estimate.
This method will recursively cascade through all allowed calculation
layers or until all properties have been calculated.
Parameters
----------
batch : Batch
The batch to launch.
"""
# Optionally clean-up any files produced while estimating the batch with the
# previous layer.
if self._delete_working_files:
for directory in glob(os.path.join(self._working_directory, "*", batch.id)):
if not os.path.isdir(directory):
continue
shutil.rmtree(directory, ignore_errors=True)
if (
len(batch.options.calculation_layers) == 0
or len(batch.queued_properties) == 0
):
# Move any remaining properties to the unsuccessful list.
batch.unsuccessful_properties = [*batch.queued_properties]
batch.queued_properties = []
self._queued_batches.pop(batch.id)
self._finished_batches[batch.id] = batch
logger.info(f"Finished server request {batch.id}")
return
current_layer_type = batch.options.calculation_layers.pop(0)
if current_layer_type not in registered_calculation_layers:
# Add an exception if we reach an unsupported calculation layer.
error_object = EvaluatorException(
message=f"The {current_layer_type} layer is not "
f"supported by / available on the server."
)
batch.exceptions.append(error_object)
self._launch_batch(batch)
return
logger.info(f"Launching batch {batch.id} using the {current_layer_type} layer")
layer_directory = os.path.join(
self._working_directory, current_layer_type, batch.id
)
os.makedirs(layer_directory, exist_ok=True)
current_layer = registered_calculation_layers[current_layer_type]
current_layer.schedule_calculation(
self._calculation_backend,
self._storage_backend,
layer_directory,
batch,
self._launch_batch,
)
def _handle_job_submission(self, connection, address, message_length):
"""An asynchronous routine for handling the receiving and processing
of job submissions from a client.
Parameters
----------
connection:
An IO stream used to pass messages between the
server and client.
address: str
The address from which the request came.
message_length: int
The length of the message being received.
"""
logger.info("Received estimation request from {}".format(address))
# Read the incoming request from the server. The first four bytes
# of the response should be the length of the message being sent.
# Decode the client submission json.
encoded_json = recvall(connection, message_length)
json_model = encoded_json.decode()
request_id = None
error = None
try:
# noinspection PyProtectedMember
submission = EvaluatorClient._Submission.parse_json(json_model)
submission.validate()
except Exception as e:
formatted_exception = traceback.format_exception(None, e, e.__traceback__)
error = EvaluatorException(
message=f"An exception occured when parsing "
f"the submission: {formatted_exception}"
)
submission = None
if error is None:
while request_id is None or request_id in self._batch_ids_per_client_id:
request_id = str(uuid.uuid4()).replace("-", "")
self._batch_ids_per_client_id[request_id] = []
# Pass the id of the submitted requests back to the client
# as well as any error which may have occurred.
return_packet = json.dumps((request_id, error), cls=TypedJSONEncoder)
encoded_return_packet = return_packet.encode()
length = pack_int(len(encoded_return_packet))
connection.sendall(length + encoded_return_packet)
if error is not None:
# Exit early if there is an error.
return
# Batch the request into more managable chunks.
batches = self._prepare_batches(submission, request_id)
# Launch the batches
for batch in batches:
self._launch_batch(batch)
def _handle_job_query(self, connection, message_length):
"""An asynchronous routine for handling the receiving and
processing of request status queries from a client
Parameters
----------
connection:
An IO stream used to pass messages between the
server and client.
message_length: int
The length of the message being received.
"""
encoded_request_id = recvall(connection, message_length)
client_request_id = encoded_request_id.decode()
response = None
if client_request_id not in self._batch_ids_per_client_id:
error = EvaluatorException(
message=f"The request id ({client_request_id}) was not found "
f"on the server.",
)
else:
response, error = self._query_request_status(client_request_id)
response_json = json.dumps((response, error), cls=TypedJSONEncoder)
encoded_response = response_json.encode()
length = pack_int(len(encoded_response))
connection.sendall(length + encoded_response)
def _handle_stream(self, connection, address):
"""A routine to handle incoming requests from
a TCP client.
"""
# Receive an introductory message with the message type.
packed_message_type = recvall(connection, 4)
message_type_int = unpack_int(packed_message_type)[0]
packed_message_length = recvall(connection, 4)
message_length = unpack_int(packed_message_length)[0]
try:
message_type = EvaluatorMessageTypes(message_type_int)
except ValueError as e:
trace = traceback.format_exception(None, e, e.__traceback__)
logger.info(f"Bad message type received: {trace}")
# Discard the unrecognised message.
if message_length > 0:
recvall(connection, message_length)
return
if message_type is EvaluatorMessageTypes.Submission:
self._handle_job_submission(connection, address, message_length)
elif message_type is EvaluatorMessageTypes.Query:
self._handle_job_query(connection, message_length)
def _handle_connections(self):
"""Handles incoming client TCP connections."""
to_read = [self._socket]
try:
while not self._stopped:
ready, _, _ = select.select(to_read, [], [], 0.1)
for data in ready:
if data == self._socket:
connection, address = self._socket.accept()
to_read.append(connection)
else:
connection = data
self._handle_stream(connection, connection.getpeername())
connection.close()
to_read.remove(data)
except Exception:
logger.exception("Fatal error in the main server loop")
def start(self, asynchronous=False):
"""Instructs the server to begin listening for incoming
requests from any `EvaluatorClients`.
Parameters
----------
asynchronous: bool
If `True` the server will run on a separate thread in the background,
returning control back to the main thread. Otherwise, this function
will block the main thread until this server is killed.
"""
if self._started:
raise RuntimeError("The server has already been started.")
logger.info("Server listening at port {}".format(self._port))
self._started = True
self._stopped = False
# Create the TCP socket
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind(("localhost", self._port))
self._socket.listen(128)
try:
if asynchronous:
self._server_thread = threading.Thread(
target=self._handle_connections, daemon=True
)
self._server_thread.start()
else:
self._handle_connections()
except KeyboardInterrupt:
self.stop()
def stop(self):
"""Stops the property calculation server and it's
provided backend.
"""
if not self._started:
raise ValueError("The server has not yet been started.")
self._stopped = True
self._started = False
if self._server_thread is not None:
self._server_thread.join()
self._server_thread = None
if self._socket is not None:
self._socket.close()
self._socket = None
def __enter__(self):
self.start(asynchronous=True)
return self
def __exit__(self, *args):
self.stop()
def __del__(self):
if self._started and not self._stopped:
self.stop()
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_storage/test_storage.py | <gh_stars>10-100
"""
Units tests for openff.evaluator.storage
"""
import os
import tempfile
import pytest
from openff.evaluator.forcefield import SmirnoffForceFieldSource
from openff.evaluator.storage import LocalFileStorage
from openff.evaluator.storage.data import StoredSimulationData
from openff.evaluator.storage.query import SimulationDataQuery, SubstanceQuery
from openff.evaluator.substances import Substance
from openff.evaluator.tests.test_storage.data import HashableData, SimpleData
from openff.evaluator.tests.utils import create_dummy_simulation_data
@pytest.mark.parametrize("data_class", [SimpleData, HashableData])
def test_simple_store_and_retrieve(data_class):
"""Tests storing and retrieving a simple object."""
with tempfile.TemporaryDirectory() as temporary_directory:
local_storage = LocalFileStorage(temporary_directory)
storage_object = data_class()
# Make sure the validation fails
with pytest.raises(ValueError):
local_storage.store_object(storage_object)
# This should now pass.
storage_object.some_attribute = 10
storage_key = local_storage.store_object(storage_object)
assert local_storage.has_object(storage_object)
retrieved_object, _ = local_storage.retrieve_object(storage_key)
assert retrieved_object is not None
assert storage_object.json() == retrieved_object.json()
# Ensure that the same key is returned when storing duplicate
# data
new_storage_key = local_storage.store_object(storage_object)
assert storage_key == new_storage_key
def test_force_field_storage():
"""A simple test to that force fields can be stored and
retrieved using the local storage backend."""
force_field_source = SmirnoffForceFieldSource.from_path(
"smirnoff99Frosst-1.1.0.offxml"
)
with tempfile.TemporaryDirectory() as temporary_directory:
local_storage = LocalFileStorage(temporary_directory)
force_field_id = local_storage.store_force_field(force_field_source)
retrieved_force_field = local_storage.retrieve_force_field(force_field_id)
assert force_field_source.json() == retrieved_force_field.json()
local_storage_new = LocalFileStorage(temporary_directory)
assert local_storage_new.has_force_field(force_field_source)
new_force_field_id = local_storage_new.store_force_field(force_field_source)
assert new_force_field_id == force_field_id
def test_base_simulation_data_storage():
substance = Substance.from_components("C")
with tempfile.TemporaryDirectory() as base_directory:
data_directory = os.path.join(base_directory, "data_directory")
data_object = create_dummy_simulation_data(data_directory, substance)
backend_directory = os.path.join(base_directory, "storage_dir")
storage = LocalFileStorage(backend_directory)
storage_key = storage.store_object(data_object, data_directory)
# Regenerate the data directory.
os.makedirs(data_directory, exist_ok=True)
assert storage.has_object(data_object)
assert storage_key == storage.store_object(data_object, data_directory)
retrieved_object, retrieved_directory = storage.retrieve_object(
storage_key, StoredSimulationData
)
assert backend_directory in retrieved_directory
assert data_object.json() == retrieved_object.json()
def test_base_simulation_data_query():
substance_a = Substance.from_components("C")
substance_b = Substance.from_components("CO")
substance_full = Substance.from_components("C", "CO")
substances = [substance_a, substance_b, substance_full]
with tempfile.TemporaryDirectory() as base_directory:
backend_directory = os.path.join(base_directory, "storage_dir")
storage = LocalFileStorage(backend_directory)
for substance in substances:
data_directory = os.path.join(base_directory, f"{substance.identifier}")
data_object = create_dummy_simulation_data(data_directory, substance)
storage.store_object(data_object, data_directory)
for substance in substances:
substance_query = SimulationDataQuery()
substance_query.substance = substance
results = storage.query(substance_query)
assert results is not None and len(results) == 1
assert len(next(iter(results.values()))[0]) == 3
component_query = SimulationDataQuery()
component_query.substance = substance_full
component_query.substance_query = SubstanceQuery()
component_query.substance_query.components_only = True
results = storage.query(component_query)
assert results is not None and len(results) == 2
@pytest.mark.parametrize("reverse_order", [True, False])
def test_duplicate_simulation_data_storage(reverse_order):
substance = Substance.from_components("CO")
with tempfile.TemporaryDirectory() as base_directory_path:
storage_directory = os.path.join(base_directory_path, "storage")
local_storage = LocalFileStorage(storage_directory)
# Construct some data to store with increasing
# statistical inefficiencies.
data_to_store = []
for index in range(3):
data_directory = os.path.join(base_directory_path, f"data_{index}")
coordinate_name = f"data_{index}.pdb"
data_object = create_dummy_simulation_data(
directory_path=data_directory,
substance=substance,
force_field_id="ff_id_1",
coordinate_file_name=coordinate_name,
statistical_inefficiency=float(index),
calculation_id="id",
)
data_to_store.append((data_object, data_directory))
# Keep a track of the storage keys.
all_storage_keys = set()
iterator = enumerate(data_to_store)
if reverse_order:
iterator = reversed(list(iterator))
# Store the data
for index, data in iterator:
data_object, data_directory = data
storage_key = local_storage.store_object(data_object, data_directory)
all_storage_keys.add(storage_key)
retrieved_object, stored_directory = local_storage.retrieve_object(
storage_key
)
# Handle the case where we haven't reversed the order of
# the data to store. Here only the first object in the list
# should be stored an never replaced as it has the lowest
# statistical inefficiency.
if not reverse_order:
expected_index = 0
# Handle the case where we have reversed the order of
# the data to store. Here only the each new piece of
# data should replace the last, as it will have a lower
# statistical inefficiency.
else:
expected_index = index
assert retrieved_object.json() == data_to_store[expected_index][0].json()
# Make sure the directory has been correctly overwritten / retained
# depending on the data order.
coordinate_path = os.path.join(
stored_directory, f"data_{expected_index}.pdb"
)
assert os.path.isfile(coordinate_path)
# Make sure all pieces of data got assigned the same key if
# reverse order.
assert len(all_storage_keys) == 1
|
jaketanderson/openff-evaluator | openff/evaluator/datasets/thermoml/__init__.py | from .thermoml import ThermoMLDataSet
from .plugins import register_thermoml_property, thermoml_property # isort:skip
__all__ = [ThermoMLDataSet, register_thermoml_property, thermoml_property]
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_datasets/test_curation/test_filtering.py | from typing import List, Tuple
import numpy
import pandas
import pytest
from openff.units import unit
from pydantic import ValidationError
from openff.evaluator.datasets import (
MeasurementSource,
PhysicalPropertyDataSet,
PropertyPhase,
)
from openff.evaluator.datasets.curation.components.filtering import (
FilterByCharged,
FilterByChargedSchema,
FilterByElements,
FilterByElementsSchema,
FilterByEnvironments,
FilterByEnvironmentsSchema,
FilterByIonicLiquid,
FilterByIonicLiquidSchema,
FilterByMoleFraction,
FilterByMoleFractionSchema,
FilterByNComponents,
FilterByNComponentsSchema,
FilterByPressure,
FilterByPressureSchema,
FilterByPropertyTypes,
FilterByPropertyTypesSchema,
FilterByRacemic,
FilterByRacemicSchema,
FilterBySmiles,
FilterBySmilesSchema,
FilterBySmirks,
FilterBySmirksSchema,
FilterByStereochemistry,
FilterByStereochemistrySchema,
FilterBySubstances,
FilterBySubstancesSchema,
FilterByTemperature,
FilterByTemperatureSchema,
FilterDuplicates,
FilterDuplicatesSchema,
)
from openff.evaluator.datasets.utilities import data_frame_to_substances
from openff.evaluator.properties import Density, EnthalpyOfMixing
from openff.evaluator.substances import Component, MoleFraction, Substance
from openff.evaluator.thermodynamics import ThermodynamicState
from openff.evaluator.utils.checkmol import ChemicalEnvironment
def _build_entry(*smiles: str) -> Density:
"""Builds a density data entry measured at ambient conditions
and for a system containing the specified smiles patterns in
equal amounts.
Parameters
----------
smiles
The smiles to build components for.
Returns
-------
The built components.
"""
assert len(smiles) > 0
return Density(
thermodynamic_state=ThermodynamicState(
temperature=298.15 * unit.kelvin,
pressure=101.325 * unit.kilopascal,
),
phase=PropertyPhase.Liquid,
value=1.0 * Density.default_unit(),
uncertainty=1.0 * Density.default_unit(),
source=MeasurementSource(doi=" "),
substance=Substance.from_components(*smiles),
)
def _build_data_frame(
property_types: List[str],
substance_entries: List[Tuple[Tuple[str, ...], Tuple[bool, ...]]],
) -> pandas.DataFrame:
data_rows = []
for substance, include_properties in substance_entries:
for property_type, include_property in zip(property_types, include_properties):
if not include_property:
continue
data_row = {
"N Components": len(substance),
f"{property_type} Value (unit)": 1.0,
}
for index, component in enumerate(substance):
data_row[f"Component {index + 1}"] = component
data_rows.append(data_row)
data_frame = pandas.DataFrame(data_rows)
return data_frame
@pytest.fixture(scope="module")
def data_frame() -> pandas.DataFrame:
temperatures = [298.15, 318.15]
pressures = [101.325, 101.0]
properties = [Density, EnthalpyOfMixing]
mole_fractions = [(1.0,), (1.0,), (0.25, 0.75), (0.75, 0.25)]
smiles = {1: [("C(F)(Cl)(Br)",), ("C",)], 2: [("CO", "C"), ("C", "CO")]}
loop_variables = [
(
temperature,
pressure,
property_type,
mole_fraction,
)
for temperature in temperatures
for pressure in pressures
for property_type in properties
for mole_fraction in mole_fractions
]
data_entries = []
for temperature, pressure, property_type, mole_fraction in loop_variables:
n_components = len(mole_fraction)
for smiles_tuple in smiles[n_components]:
substance = Substance()
for smiles_pattern, x in zip(smiles_tuple, mole_fraction):
substance.add_component(Component(smiles_pattern), MoleFraction(x))
data_entries.append(
property_type(
thermodynamic_state=ThermodynamicState(
temperature=temperature * unit.kelvin,
pressure=pressure * unit.kilopascal,
),
phase=PropertyPhase.Liquid,
value=1.0 * property_type.default_unit(),
uncertainty=1.0 * property_type.default_unit(),
source=MeasurementSource(doi=" "),
substance=substance,
)
)
data_set = PhysicalPropertyDataSet()
data_set.add_properties(*data_entries)
return data_set.to_pandas()
def test_filter_duplicates(data_frame):
filtered_frame = FilterDuplicates.apply(data_frame, FilterDuplicatesSchema(), 1)
pure_data: pandas.DataFrame = filtered_frame[filtered_frame["N Components"] == 1]
assert len(pure_data) == 16
assert len(pure_data[pure_data["EnthalpyOfMixing Value (kJ / mol)"].notna()]) == 8
assert len(pure_data[pure_data["Density Value (g / ml)"].notna()]) == 8
binary_data: pandas.DataFrame = filtered_frame[filtered_frame["N Components"] == 2]
assert len(filtered_frame[filtered_frame["N Components"] == 2]) == 16
assert (
len(binary_data[binary_data["EnthalpyOfMixing Value (kJ / mol)"].notna()]) == 8
)
assert len(binary_data[binary_data["Density Value (g / ml)"].notna()]) == 8
def test_validate_filter_by_temperature():
# Ensure a valid schema passes
FilterByTemperatureSchema(minimum_temperature=1.0, maximum_temperature=2.0)
# Test that an exception is raised when the minimum temperature is
# greater than the maximum.
with pytest.raises(ValidationError):
FilterByTemperatureSchema(minimum_temperature=2.0, maximum_temperature=1.0)
def test_filter_by_temperature(data_frame):
# Apply a filter which should have no effect.
filtered_frame = FilterByTemperature.apply(
data_frame,
FilterByTemperatureSchema(minimum_temperature=290.0, maximum_temperature=320.0),
)
assert len(filtered_frame) == len(data_frame)
# Filter out the minimum values
filtered_frame = FilterByTemperature.apply(
data_frame,
FilterByTemperatureSchema(minimum_temperature=300.0, maximum_temperature=None),
)
assert len(filtered_frame) == len(data_frame) / 2
temperatures = filtered_frame["Temperature (K)"].unique()
assert len(temperatures) == 1
assert numpy.isclose(temperatures[0], 318.15)
# Filter out the maximum values
filtered_frame = FilterByTemperature.apply(
data_frame,
FilterByTemperatureSchema(
minimum_temperature=None,
maximum_temperature=300.0,
),
)
assert len(filtered_frame) == len(data_frame) / 2
temperatures = filtered_frame["Temperature (K)"].unique()
assert len(temperatures) == 1
assert numpy.isclose(temperatures[0], 298.15)
def test_validate_filter_by_pressure():
# Ensure a valid schema passes
FilterByPressureSchema(minimum_pressure=1.0, maximum_pressure=2.0)
# Test that an exception is raised when the minimum pressure is
# greater than the maximum.
with pytest.raises(ValidationError):
FilterByPressureSchema(minimum_pressure=2.0, maximum_pressure=1.0)
def test_filter_by_pressure(data_frame):
# Apply a filter which should have no effect.
filtered_frame = FilterByPressure.apply(
data_frame,
FilterByPressureSchema(minimum_pressure=100.0, maximum_pressure=140.0),
)
assert len(filtered_frame) == len(data_frame)
# Filter out the minimum values
filtered_frame = FilterByPressure.apply(
data_frame,
FilterByPressureSchema(minimum_pressure=101.2, maximum_pressure=None),
)
assert len(filtered_frame) == len(data_frame) / 2
pressures = filtered_frame["Pressure (kPa)"].unique()
assert len(pressures) == 1
assert numpy.isclose(pressures[0], 101.325)
# Filter out the maximum values
filtered_frame = FilterByPressure.apply(
data_frame,
FilterByPressureSchema(
minimum_pressure=None,
maximum_pressure=101.2,
),
)
assert len(filtered_frame) == len(data_frame) / 2
pressures = filtered_frame["Pressure (kPa)"].unique()
assert len(pressures) == 1
assert numpy.isclose(pressures[0], 101.0)
def test_validate_filter_by_mole_fraction():
# Ensure a valid schema passes
FilterByMoleFractionSchema(
mole_fraction_ranges={2: [[(0.2, 0.8)]], 3: [[(0.1, 0.2)], [(0.4, 0.5)]]}
)
# Test that an exception is raised when the wrong number of component
# lists is provided.
with pytest.raises(ValidationError):
FilterByMoleFractionSchema(
mole_fraction_ranges={2: [[(0.2, 0.8)], [(0.2, 0.8)]]}
)
with pytest.raises(ValidationError):
FilterByMoleFractionSchema(mole_fraction_ranges={3: [[(0.2, 0.8)]]})
# Test that an exception is raised when a bad range is provided.
with pytest.raises(ValidationError):
FilterByMoleFractionSchema(mole_fraction_ranges={2: [[(0.8, 0.2)]]})
with pytest.raises(ValidationError):
FilterByMoleFractionSchema(mole_fraction_ranges={2: [[(-0.8, 0.2)]]})
with pytest.raises(ValidationError):
FilterByMoleFractionSchema(mole_fraction_ranges={2: [[(0.8, 1.2)]]})
def test_filter_by_mole_fraction(data_frame):
data_rows = [
{"N Components": 1, "Component 1": "CCCCC", "Mole Fraction 1": 1.0},
{
"N Components": 2,
"Component 1": "CCCCC",
"Mole Fraction 1": 0.2,
"Component 2": "CCCCCO",
"Mole Fraction 2": 0.8,
},
{
"N Components": 2,
"Component 1": "CCCCC",
"Mole Fraction 1": 0.8,
"Component 2": "CCCCCO",
"Mole Fraction 2": 0.2,
},
{
"N Components": 2,
"Component 1": "CCCCC",
"Mole Fraction 1": 0.5,
"Component 2": "CCCCCO",
"Mole Fraction 2": 0.5,
},
]
data_frame = pandas.DataFrame(data_rows)
# Apply a filter which should have no effect.
filtered_frame = FilterByMoleFraction.apply(
data_frame, FilterByMoleFractionSchema(mole_fraction_ranges={})
)
assert len(filtered_frame) == len(data_frame)
# Retain only the minimum value
filtered_frame = FilterByMoleFraction.apply(
data_frame, FilterByMoleFractionSchema(mole_fraction_ranges={2: [[(0.1, 0.3)]]})
)
assert len(filtered_frame) == 2
assert len(filtered_frame[filtered_frame["N Components"] == 1]) == 1
assert len(filtered_frame[filtered_frame["N Components"] == 2]) == 1
filtered_frame = filtered_frame[filtered_frame["N Components"] == 2]
assert numpy.isclose(filtered_frame["Mole Fraction 1"], 0.2)
# Drop the pure data point to make the test cleaner from this point on.
data_frame = data_frame[data_frame["N Components"] == 2]
# Retain only the maximum value
filtered_frame = FilterByMoleFraction.apply(
data_frame, FilterByMoleFractionSchema(mole_fraction_ranges={2: [[(0.7, 0.9)]]})
)
assert len(filtered_frame) == 1
assert numpy.isclose(filtered_frame["Mole Fraction 1"], 0.8)
# Retain both the minimum and maximum values
filtered_frame = FilterByMoleFraction.apply(
data_frame,
FilterByMoleFractionSchema(
mole_fraction_ranges={2: [[(0.1, 0.3), (0.7, 0.9)]]}
),
)
assert len(filtered_frame) == 2
assert all(filtered_frame["Mole Fraction 1"].round(1).isin([0.2, 0.8]))
def test_filter_by_racemic():
data_rows = [
{"N Components": 1, "Component 1": "N[C@H](C)C(=O)O"},
{"N Components": 1, "Component 1": "N[C@@H](C)C(=O)O"},
{"N Components": 2, "Component 1": "C", "Component 2": "N[C@H](C)C(=O)O"},
{
"N Components": 2,
"Component 1": "N[C@@H](C)C(=O)O",
"Component 2": "N[C@H](C)C(=O)O",
},
{
"N Components": 3,
"Component 1": "C",
"Component 2": "N[C@@H](C)C(=O)O",
"Component 3": "N[C@H](C)C(=O)O",
},
{
"N Components": 3,
"Component 1": "N[C@@H](C)C(=O)O",
"Component 2": "C",
"Component 3": "N[C@H](C)C(=O)O",
},
{
"N Components": 3,
"Component 1": "N[C@@H](C)C(=O)O",
"Component 2": "N[C@H](C)C(=O)O",
"Component 3": "C",
},
]
data_frame = pandas.DataFrame(data_rows)
# Apply the filter
filtered_frame = FilterByRacemic.apply(data_frame, FilterByRacemicSchema())
assert len(filtered_frame[filtered_frame["N Components"] == 1]) == 2
assert len(filtered_frame[filtered_frame["N Components"] == 2]) == 1
binary_data = filtered_frame[filtered_frame["N Components"] == 2]
assert binary_data["Component 1"].unique()[0] == "C"
assert len(filtered_frame[filtered_frame["N Components"] == 3]) == 0
def test_validate_filter_by_elements():
# Ensure a valid schema passes
FilterByElementsSchema(allowed_elements=["C"])
FilterByElementsSchema(forbidden_elements=["C"])
# Test that an exception is raised when mutually exclusive options
# are provided.
with pytest.raises(ValidationError):
FilterByElementsSchema(allowed_elements=["C"], forbidden_elements=["C"])
def test_filter_by_elements(data_frame):
# Apply a filter which should have no effect.
filtered_frame = FilterByElements.apply(
data_frame,
FilterByElementsSchema(allowed_elements=["C", "O", "H", "F", "Cl", "Br"]),
)
assert len(filtered_frame) == len(data_frame)
filtered_frame = FilterByElements.apply(
data_frame,
FilterByElementsSchema(forbidden_elements=[]),
)
assert len(filtered_frame) == len(data_frame)
# Filter out all oxygen containing molecules. This should leave pure
# only measurements.
filtered_frame = FilterByElements.apply(
data_frame,
FilterByElementsSchema(forbidden_elements=["O"]),
)
assert len(filtered_frame[filtered_frame["N Components"] == 1]) == 32
# Filter out any non-hydrocarbons.
filtered_frame = FilterByElements.apply(
data_frame,
FilterByElementsSchema(allowed_elements=["C", "H"]),
)
assert len(filtered_frame) == 16
assert len(filtered_frame["Component 1"].unique()) == 1
assert filtered_frame["Component 1"].unique()[0] == "C"
assert filtered_frame["N Components"].max() == 1
def test_validate_filter_by_property():
# Ensure a valid schema passes
FilterByPropertyTypesSchema(property_types=["Density"])
FilterByPropertyTypesSchema(
property_types=["Density"], n_components={"Density": [1]}
)
# Test that an exception is raised when a property type is included
# in `n_components` but not `property_types`
with pytest.raises(ValidationError):
FilterByPropertyTypesSchema(property_types=[], n_components={"Density": [1]})
def test_filter_by_property(data_frame):
# Apply a filter which should have no effect.
filtered_frame = FilterByPropertyTypes.apply(
data_frame,
FilterByPropertyTypesSchema(property_types=["Density", "EnthalpyOfMixing"]),
)
assert len(filtered_frame) == len(data_frame)
# Filter out all density measurements.
filtered_frame = FilterByPropertyTypes.apply(
data_frame, FilterByPropertyTypesSchema(property_types=["EnthalpyOfMixing"])
)
assert len(filtered_frame) == len(data_frame) / 2
filtered_frame = FilterByPropertyTypes.apply(
data_frame,
FilterByPropertyTypesSchema(
property_types=["EnthalpyOfMixing"],
n_components={"EnthalpyOfMixing": [1, 2]},
),
)
assert len(filtered_frame) == len(data_frame) / 2
# Filter out anything but pure density measurements.
filtered_frame = FilterByPropertyTypes.apply(
data_frame,
FilterByPropertyTypesSchema(
property_types=["Density"], n_components={"Density": [1]}
),
)
assert len(filtered_frame) == 16
assert filtered_frame["N Components"].max() == 1
assert len(filtered_frame[filtered_frame["Density Value (g / ml)"].notna()]) == 16
assert "EnthalpyOfMixing Value (kJ / mol)" not in filtered_frame
# Retain only pure densities and binary enthalpies of mixing.
filtered_frame = FilterByPropertyTypes.apply(
data_frame,
FilterByPropertyTypesSchema(
property_types=["Density", "EnthalpyOfMixing"],
n_components={"Density": [1], "EnthalpyOfMixing": [2]},
),
)
assert len(filtered_frame) == 32
assert len(filtered_frame[filtered_frame["N Components"] == 1]) == 16
assert len(filtered_frame[filtered_frame["N Components"] == 2]) == 16
assert len(filtered_frame[filtered_frame["Density Value (g / ml)"].notna()]) == 16
assert (
len(filtered_frame[filtered_frame["EnthalpyOfMixing Value (kJ / mol)"].notna()])
== 16
)
assert (
filtered_frame[filtered_frame["Density Value (g / ml)"].notna()][
"N Components"
].max()
== 1
)
assert (
filtered_frame[filtered_frame["EnthalpyOfMixing Value (kJ / mol)"].notna()][
"N Components"
].min()
== 2
)
def test_filter_by_property_strict():
"""Tests that the FilterByPropertyTypes filter works
correctly when strict mode is set but n_components is not.
"""
property_types = ["Density", "DielectricConstant"]
substance_entries = [
(("CC",), (True, True)),
(("CCC",), (True, False)),
(("CCCCC",), (True, True)),
(("CC", "CCC"), (True, True)),
(("CCC", "CCC"), (True, False)),
(("CCC", "CCCC"), (False, True)),
]
data_frame = _build_data_frame(property_types, substance_entries)
filtered_frame = FilterByPropertyTypes.apply(
data_frame,
FilterByPropertyTypesSchema(property_types=property_types, strict=True),
)
assert len(filtered_frame) == 6
assert data_frame_to_substances(filtered_frame) == {
("CC",),
("CCCCC",),
("CC", "CCC"),
}
def test_filter_by_property_strict_n_components():
"""Tests that the FilterByPropertyTypes filter works
correctly when strict mode and n_components is set.
"""
property_types = ["Density", "EnthalpyOfVaporization", "EnthalpyOfMixing"]
substance_entries = [
(("CC",), (True, True, False)),
(("CCC",), (True, True, False)),
(("CCCCC",), (True, False, False)),
(("CCCCCC",), (True, True, False)),
(("CC", "CCC"), (True, False, True)),
(("CC", "CCCCC"), (True, False, True)),
(("CCC", "CCC"), (True, False, False)),
(("CCC", "CCCC"), (False, False, True)),
]
data_frame = _build_data_frame(property_types, substance_entries)
filtered_frame = FilterByPropertyTypes.apply(
data_frame,
FilterByPropertyTypesSchema(
property_types=property_types,
n_components={
"Density": [1, 2],
"EnthalpyOfVaporization": [1],
"EnthalpyOfMixing": [2],
},
strict=True,
),
)
assert len(filtered_frame) == 6
assert data_frame_to_substances(filtered_frame) == {
("CC",),
("CCC",),
("CC", "CCC"),
}
def test_filter_stereochemistry(data_frame):
# Ensure molecules with undefined stereochemistry are filtered.
filtered_frame = FilterByStereochemistry.apply(
data_frame,
FilterByStereochemistrySchema(),
)
assert len(filtered_frame) == len(data_frame) - 16
def test_filter_charged():
thermodynamic_state = ThermodynamicState(
temperature=298.15 * unit.kelvin,
pressure=101.325 * unit.kilopascal,
)
# Ensure charged molecules are filtered.
data_set = PhysicalPropertyDataSet()
data_set.add_properties(
Density(
thermodynamic_state=thermodynamic_state,
phase=PropertyPhase.Liquid,
value=1.0 * Density.default_unit(),
uncertainty=1.0 * Density.default_unit(),
source=MeasurementSource(doi=" "),
substance=Substance.from_components("[Cl-]"),
),
Density(
thermodynamic_state=thermodynamic_state,
phase=PropertyPhase.Liquid,
value=1.0 * Density.default_unit(),
uncertainty=1.0 * Density.default_unit(),
source=MeasurementSource(doi=" "),
substance=Substance.from_components("[Cl-]", "C"),
),
Density(
thermodynamic_state=thermodynamic_state,
phase=PropertyPhase.Liquid,
value=1.0 * Density.default_unit(),
uncertainty=1.0 * Density.default_unit(),
source=MeasurementSource(doi=" "),
substance=Substance.from_components("C"),
),
)
data_frame = data_set.to_pandas()
filtered_frame = FilterByCharged.apply(
data_frame,
FilterByChargedSchema(),
)
assert len(filtered_frame) == 1
assert filtered_frame["N Components"].max() == 1
def test_filter_ionic_liquid():
thermodynamic_state = ThermodynamicState(
temperature=298.15 * unit.kelvin,
pressure=101.325 * unit.kilopascal,
)
# Ensure ionic liquids are filtered.
data_set = PhysicalPropertyDataSet()
data_set.add_properties(
Density(
thermodynamic_state=thermodynamic_state,
phase=PropertyPhase.Liquid,
value=1.0 * Density.default_unit(),
uncertainty=1.0 * Density.default_unit(),
source=MeasurementSource(doi=" "),
substance=Substance.from_components("[Na+].[Cl-]"),
),
Density(
thermodynamic_state=thermodynamic_state,
phase=PropertyPhase.Liquid,
value=1.0 * Density.default_unit(),
uncertainty=1.0 * Density.default_unit(),
source=MeasurementSource(doi=" "),
substance=Substance.from_components("C"),
),
)
data_frame = data_set.to_pandas()
filtered_frame = FilterByIonicLiquid.apply(
data_frame,
FilterByIonicLiquidSchema(),
)
assert len(filtered_frame) == 1
def test_validate_filter_by_smiles():
# Ensure a valid schema passes
FilterBySmilesSchema(smiles_to_include=["C"])
FilterBySmilesSchema(smiles_to_exclude=["C"])
# Test that an exception is raised when mutually exclusive options
# are provided.
with pytest.raises(ValidationError):
FilterBySmilesSchema(smiles_to_include=["C"], smiles_to_exclude=["C"])
def test_filter_by_smiles(data_frame):
# Strictly only retain hydrocarbons. This should only leave pure
# properties.
filtered_frame = FilterBySmiles.apply(
data_frame,
FilterBySmilesSchema(smiles_to_include=["C"]),
)
assert len(filtered_frame) == 16
assert filtered_frame["N Components"].max() == 1
assert {*filtered_frame["Component 1"].unique()} == {"C"}
# Make sure that partial inclusion works well when there are only
# pure components.
pure_data = data_frame[data_frame["N Components"] == 1]
filtered_frame = FilterBySmiles.apply(
pure_data,
FilterBySmilesSchema(smiles_to_include=["C"], allow_partial_inclusion=True),
)
assert len(filtered_frame) == 16
assert {*filtered_frame["Component 1"].unique()} == {"C"}
# Now retain only retain hydrocarbons or mixtures containing hydrocarbons.
filtered_frame = FilterBySmiles.apply(
data_frame,
FilterBySmilesSchema(smiles_to_include=["C"], allow_partial_inclusion=True),
)
pure_data = filtered_frame[filtered_frame["N Components"] == 1]
binary_data = filtered_frame[filtered_frame["N Components"] == 2]
assert len(pure_data) == 16
assert {*pure_data["Component 1"].unique()} == {"C"}
assert len(binary_data) == len(data_frame[data_frame["N Components"] == 2])
# Exclude any hydrocarbons
filtered_frame = FilterBySmiles.apply(
data_frame,
FilterBySmilesSchema(smiles_to_exclude=["C"]),
)
pure_data = filtered_frame[filtered_frame["N Components"] == 1]
binary_data = filtered_frame[filtered_frame["N Components"] == 2]
assert len(pure_data) == 16
unique_components = {*pure_data["Component 1"].unique()}
assert unique_components == {"C(F)(Cl)(Br)"} or unique_components == {"FC(Cl)Br"}
assert len(binary_data) == 0
def test_validate_filter_by_smirks():
# Ensure a valid schema passes
FilterBySmirksSchema(smirks_to_include=["[#6]"])
FilterBySmirksSchema(smirks_to_exclude=["[#6]"])
# Test that an exception is raised when mutually exclusive options
# are provided.
with pytest.raises(ValidationError):
FilterBySmirksSchema(smirks_to_include=["[#6]"], smirks_to_exclude=["[#6]"])
def test_find_smirks_matches():
"""A simple test that the `FilterBySmirks` smirks matching utility
functions as expected."""
# Test that nothing is returned when no smirks are provided.
assert FilterBySmirks._find_smirks_matches("CCC") == []
# Test that an alkane is correctly matched
assert FilterBySmirks._find_smirks_matches("CCC", "[#6:1]") == ["[#6:1]"]
# Test that no matches are found for water
assert FilterBySmirks._find_smirks_matches("O", "[#6:1]") == []
def test_filter_by_smirks(data_frame):
# Apply a filter which should do nothing.
filtered_frame = FilterBySmirks.apply(
data_frame,
FilterBySmirksSchema(smirks_to_include=["[#6]"]),
)
assert len(filtered_frame) == len(data_frame) == 64
# Retain only oxygen or halogen containing compounds.
filtered_frame = FilterBySmirks.apply(
data_frame,
FilterBySmirksSchema(
smirks_to_include=["[#8]", "[#9,#17,#35]"], allow_partial_inclusion=True
),
)
pure_data = filtered_frame[filtered_frame["N Components"] == 1]
binary_data = filtered_frame[filtered_frame["N Components"] == 2]
assert len(pure_data) == 16
unique_components = {*pure_data["Component 1"].unique()}
assert unique_components == {"C(F)(Cl)(Br)"} or unique_components == {"FC(Cl)Br"}
assert len(binary_data) == len(data_frame[data_frame["N Components"] == 2])
# Exclude all oxygen containing compounds
filtered_frame = FilterBySmirks.apply(
data_frame,
FilterBySmirksSchema(smirks_to_exclude=["[#8]"]),
)
pure_data = filtered_frame[filtered_frame["N Components"] == 1]
binary_data = filtered_frame[filtered_frame["N Components"] == 2]
assert len(pure_data) == 32
assert len(binary_data) == 0
def test_filter_by_n_components(data_frame):
# Apply a filter which should do nothing
filtered_frame = FilterByNComponents.apply(
data_frame, FilterByNComponentsSchema(n_components=[1, 2])
)
assert len(filtered_frame) == len(data_frame)
# Retrain only pure measurements
filtered_frame = FilterByNComponents.apply(
data_frame, FilterByNComponentsSchema(n_components=[1])
)
assert len(filtered_frame) == len(data_frame) / 2
assert filtered_frame["N Components"].max() == 1
# Retrain only binary measurements
filtered_frame = FilterByNComponents.apply(
data_frame, FilterByNComponentsSchema(n_components=[2])
)
assert len(filtered_frame) == len(data_frame) / 2
assert filtered_frame["N Components"].min() == 2
def test_validate_filter_by_substances():
# Ensure a valid schema passes
FilterBySubstancesSchema(substances_to_include=[("C",)])
FilterBySubstancesSchema(substances_to_exclude=[("C",)])
# Test that an exception is raised when mutually exclusive options
# are provided.
with pytest.raises(ValidationError):
FilterBySubstancesSchema(
substances_to_include=[("C",)], substances_to_exclude=[("C",)]
)
def test_filter_by_substances(data_frame):
# Retain only the pure hydrocarbons.
filtered_frame = FilterBySubstances.apply(
data_frame, FilterBySubstancesSchema(substances_to_include=[("C",)])
)
assert len(filtered_frame) == 16
assert filtered_frame["N Components"].max() == 1
assert {*filtered_frame["Component 1"].unique()} == {"C"}
# Retain only the mixtures, making sure the filter is invariant to component
# order.
filtered_frame = FilterBySubstances.apply(
data_frame, FilterBySubstancesSchema(substances_to_include=[("C", "CO")])
)
assert len(filtered_frame) == 32
assert filtered_frame["N Components"].min() == 2
filtered_frame = FilterBySubstances.apply(
data_frame, FilterBySubstancesSchema(substances_to_include=[("CO", "C")])
)
assert len(filtered_frame) == 32
assert filtered_frame["N Components"].min() == 2
# Exclude the mixtures, making sure the filter is invariant to component
# order.
filtered_frame = FilterBySubstances.apply(
data_frame, FilterBySubstancesSchema(substances_to_exclude=[("C", "CO")])
)
assert len(filtered_frame) == 32
assert filtered_frame["N Components"].max() == 1
filtered_frame = FilterBySubstances.apply(
data_frame, FilterBySubstancesSchema(substances_to_exclude=[("CO", "C")])
)
assert len(filtered_frame) == 32
assert filtered_frame["N Components"].max() == 1
def test_validate_environment():
# Ensure a valid schema passes
FilterByEnvironmentsSchema(
per_component_environments={1: [[ChemicalEnvironment.Alcohol]]},
)
FilterByEnvironmentsSchema(
environments=[ChemicalEnvironment.Alcohol],
at_least_one_environment=True,
strictly_specified_environments=False,
)
FilterByEnvironmentsSchema(
environments=[ChemicalEnvironment.Alcohol],
at_least_one_environment=False,
strictly_specified_environments=True,
)
# Test that an exception is raised when mutually exclusive options
# are provided.
with pytest.raises(ValidationError):
FilterByEnvironmentsSchema(
per_component_environments={1: [[ChemicalEnvironment.Alcohol]]},
environments=[ChemicalEnvironment.Alcohol],
)
with pytest.raises(ValidationError):
FilterByEnvironmentsSchema(
environments=[ChemicalEnvironment.Alcohol],
at_least_one_environment=True,
strictly_specified_environments=True,
)
# Test that the validation logic which checks the correct number of
# environment lists have been provided to per_component_environments
with pytest.raises(ValidationError):
FilterByEnvironmentsSchema(
per_component_environments={1: []},
)
with pytest.raises(ValidationError):
FilterByEnvironmentsSchema(
per_component_environments={2: [[ChemicalEnvironment.Alcohol]]},
)
def test_filter_by_environment_list():
"""Test that the ``FilterByEnvironments`` filter works well with the
``environments`` schema option"""
data_set = PhysicalPropertyDataSet()
data_set.add_properties(
_build_entry("O"),
_build_entry("C"),
_build_entry("C", "O"),
_build_entry("O", "CC(=O)CC=O"),
_build_entry("CC(=O)CC=O", "O"),
)
data_frame = data_set.to_pandas()
# Retain only aqueous functionality
filtered_frame = FilterByEnvironments.apply(
data_frame,
FilterByEnvironmentsSchema(
environments=[ChemicalEnvironment.Aqueous], at_least_one_environment=True
),
)
assert len(filtered_frame) == 1
assert filtered_frame["N Components"].max() == 1
assert {*filtered_frame["Component 1"].unique()} == {"O"}
# Retain both aqueous and aldehyde functionality but not strictly
filtered_frame = FilterByEnvironments.apply(
data_frame,
FilterByEnvironmentsSchema(
environments=[ChemicalEnvironment.Aqueous, ChemicalEnvironment.Aldehyde],
at_least_one_environment=True,
),
)
assert len(filtered_frame) == 3
assert filtered_frame["N Components"].min() == 1
assert filtered_frame["N Components"].max() == 2
pure_data = filtered_frame[filtered_frame["N Components"] == 1]
binary_data = filtered_frame[filtered_frame["N Components"] == 2]
assert len(pure_data) == 1
assert {*pure_data["Component 1"].unique()} == {"O"}
assert len(binary_data) == 2
assert {
*binary_data["Component 1"].unique(),
*binary_data["Component 2"].unique(),
} == {"CC(=O)CC=O", "O"}
# Ensure enforcing the strict behaviour correctly filters out the
# combined aldehyde and ketone functionality when only aldehyde and
# aqueous is permitted.
filtered_frame = FilterByEnvironments.apply(
data_frame,
FilterByEnvironmentsSchema(
environments=[ChemicalEnvironment.Aqueous, ChemicalEnvironment.Aldehyde],
at_least_one_environment=False,
strictly_specified_environments=True,
),
)
assert len(filtered_frame) == 1
assert filtered_frame["N Components"].max() == 1
assert {*filtered_frame["Component 1"].unique()} == {"O"}
def test_filter_by_environment_per_component():
"""Test that the ``FilterByEnvironments`` filter works well with the
``per_component_environments`` schema option"""
data_set = PhysicalPropertyDataSet()
data_set.add_properties(
_build_entry("O"),
_build_entry("C"),
_build_entry("C", "O"),
_build_entry("O", "CC(=O)CC=O"),
_build_entry("CC(=O)CC=O", "O"),
)
data_frame = data_set.to_pandas()
# Retain only aqueous functionality
filtered_frame = FilterByEnvironments.apply(
data_frame,
FilterByEnvironmentsSchema(
per_component_environments={
1: [[ChemicalEnvironment.Aqueous]],
2: [[ChemicalEnvironment.Aqueous], [ChemicalEnvironment.Aqueous]],
},
at_least_one_environment=True,
),
)
assert len(filtered_frame) == 1
assert filtered_frame["N Components"].max() == 1
assert {*filtered_frame["Component 1"].unique()} == {"O"}
# Retain any pure component data, and only aqueous aldehyde mixture data.
filtered_frame = FilterByEnvironments.apply(
data_frame,
FilterByEnvironmentsSchema(
per_component_environments={
2: [[ChemicalEnvironment.Aldehyde], [ChemicalEnvironment.Aqueous]]
},
at_least_one_environment=True,
),
)
assert len(filtered_frame) == 4
assert filtered_frame["N Components"].min() == 1
assert filtered_frame["N Components"].max() == 2
pure_data = filtered_frame[filtered_frame["N Components"] == 1]
binary_data = filtered_frame[filtered_frame["N Components"] == 2]
assert len(pure_data) == 2
assert {*pure_data["Component 1"].unique()} == {"O", "C"}
assert len(binary_data) == 2
assert {
*binary_data["Component 1"].unique(),
*binary_data["Component 2"].unique(),
} == {"CC(=O)CC=O", "O"}
# Repeat the last test but this time make the filtering strict.
filtered_frame = FilterByEnvironments.apply(
data_frame,
FilterByEnvironmentsSchema(
per_component_environments={
2: [[ChemicalEnvironment.Aldehyde], [ChemicalEnvironment.Aqueous]]
},
at_least_one_environment=False,
strictly_specified_environments=True,
),
)
assert len(filtered_frame) == 2
assert filtered_frame["N Components"].max() == 1
assert {*filtered_frame["Component 1"].unique()} == {"O", "C"}
filtered_frame = FilterByEnvironments.apply(
data_frame,
FilterByEnvironmentsSchema(
per_component_environments={
2: [
[
ChemicalEnvironment.Aldehyde,
ChemicalEnvironment.Ketone,
ChemicalEnvironment.Carbonyl,
],
[ChemicalEnvironment.Aqueous],
]
},
at_least_one_environment=False,
strictly_specified_environments=True,
),
)
assert len(filtered_frame) == 4
assert filtered_frame["N Components"].min() == 1
assert filtered_frame["N Components"].max() == 2
pure_data = filtered_frame[filtered_frame["N Components"] == 1]
binary_data = filtered_frame[filtered_frame["N Components"] == 2]
assert len(pure_data) == 2
assert {*pure_data["Component 1"].unique()} == {"O", "C"}
assert len(binary_data) == 2
|
jaketanderson/openff-evaluator | openff/evaluator/storage/query.py | """
A collection of classes used to query a storage backend for
data which matches a set of criteria.
"""
import abc
from openff.evaluator.attributes import UNDEFINED, Attribute, AttributeClass
from openff.evaluator.datasets import PropertyPhase
from openff.evaluator.forcefield import ForceFieldSource
from openff.evaluator.storage.attributes import QueryAttribute
from openff.evaluator.storage.data import (
ForceFieldData,
StoredFreeEnergyData,
StoredSimulationData,
)
from openff.evaluator.substances import ExactAmount, Substance
from openff.evaluator.thermodynamics import ThermodynamicState
class BaseDataQuery(AttributeClass, abc.ABC):
"""A base class for queries which can be made to
a `StorageBackend`.
"""
@classmethod
@abc.abstractmethod
def data_class(cls):
"""The type of data class that this
query can be applied to.
Returns
-------
type of BaseStoredData
"""
raise NotImplementedError()
def apply(self, data_object):
"""Apply this query to a data object.
Parameters
----------
data_object: BaseStoredData
The data object to apply the query to.
Returns
-------
tuple of Any, optional
The values of the matched parameters of the data
object fully matched this query, otherwise `None`.
"""
if not isinstance(data_object, self.data_class()):
return None
matches = []
for attribute_name in self.get_attributes(QueryAttribute):
attribute = getattr(self.__class__, attribute_name)
if not hasattr(data_object, attribute_name) or attribute.custom_match:
continue
query_value = getattr(self, attribute_name)
if query_value == UNDEFINED:
continue
data_value = getattr(data_object, attribute_name)
matches.append(None if data_value != query_value else data_value)
if any(x is None for x in matches):
return None
return tuple(matches)
@classmethod
def from_data_object(cls, data_object):
"""Returns the query which would match this data
object.
Parameters
----------
data_object: BaseStoredData
The data object to construct the query for.
Returns
-------
cls
The query which would match this data object.
"""
query = cls()
for attribute_name in cls.get_attributes():
if not hasattr(data_object, attribute_name):
continue
attribute_value = getattr(data_object, attribute_name)
setattr(query, attribute_name, attribute_value)
return query
class SubstanceQuery(AttributeClass, abc.ABC):
"""A query which focuses on finding data which was
collected for substances with specific traits, e.g
which contains both a solute and solvent, or only a
solvent etc.
"""
components_only = Attribute(
docstring="Only match pure data which was collected for "
"one of the components in the query substance.",
type_hint=bool,
default_value=False,
)
# component_roles = QueryAttribute(
# docstring="Returns data for only the subset of a substance "
# "which has the requested roles.",
# type_hint=list,
# optional=True,
# )
def validate(self, attribute_type=None):
super(SubstanceQuery, self).validate(attribute_type)
# if (
# self.components_only
# and self.component_roles != UNDEFINED
# and len(self.components_only) > 0
# ):
#
# raise ValueError(
# "The `component_roles` attribute cannot be used when "
# "the `components_only` attribute is `True`."
# )
class ForceFieldQuery(BaseDataQuery):
"""A class used to query a `StorageBackend` for
`ForceFieldData` which meet the specified criteria.
"""
@classmethod
def data_class(cls):
return ForceFieldData
force_field_source = QueryAttribute(
docstring="The force field source to query for.",
type_hint=ForceFieldSource,
optional=True,
)
class BaseSimulationDataQuery(BaseDataQuery, abc.ABC):
"""The base class for queries which will retrieve ``BaseSimulationData`` derived
data.
"""
substance = QueryAttribute(
docstring="The substance which the data should have been collected "
"for. Data for a subset of this substance can be queried for by "
"using the `substance_query` attribute",
type_hint=Substance,
optional=True,
custom_match=True,
)
substance_query = QueryAttribute(
docstring="The subset of the `substance` to query for. This option "
"can only be used when the `substance` attribute is set.",
type_hint=SubstanceQuery,
optional=True,
custom_match=True,
)
thermodynamic_state = QueryAttribute(
docstring="The state at which the data should have been collected.",
type_hint=ThermodynamicState,
optional=True,
)
property_phase = QueryAttribute(
docstring="The phase of the substance (e.g. liquid, gas).",
type_hint=PropertyPhase,
optional=True,
)
source_calculation_id = QueryAttribute(
docstring="The server id which should have generated this data.",
type_hint=str,
optional=True,
)
force_field_id = QueryAttribute(
docstring="The id of the force field parameters which used to "
"generate the data.",
type_hint=str,
optional=True,
)
def _match_substance(self, data_object):
"""Attempt to match the substance (or a subset of it).
Parameters
----------
data_object: StoredSimulationData
The data object to match against.
Returns
-------
Substance, optional
The matched substance if a match is made, otherwise
`None`.
"""
if self.substance == UNDEFINED:
return None
data_substance: Substance = data_object.substance
if self.substance_query == UNDEFINED:
return None if self.substance != data_substance else self.substance
# Handle the sub-substance match.
if self.substance_query.components_only:
if data_substance.number_of_components != 1:
# We are only interested in pure data.
return None
for component in self.substance.components:
if component.smiles != data_substance.components[0].smiles:
continue
# Make sure the amount type matches up i.e either both
# are defined in mole fraction, or both as an exact amount.
data_amount = next(
iter(data_substance.get_amounts(component.identifier))
)
query_amount = next(
iter(self.substance.get_amounts(component.identifier))
)
if type(data_amount) != type(query_amount):
continue
if isinstance(data_amount, ExactAmount) and data_amount != query_amount:
# Make sure there is the same amount if we are
# dealing with exact amounts.
continue
# A match was found.
return data_substance
return None
def apply(self, data_object, attributes_to_ignore=None):
matches = []
# Apply a custom match behaviour for the substance
# attribute.
if self.substance != UNDEFINED:
matches.append(self._match_substance(data_object))
base_matches = super(BaseSimulationDataQuery, self).apply(data_object)
base_matches = [None] if base_matches is None else base_matches
matches = [*matches, *base_matches]
if len(matches) == 0 or any(x is None for x in matches):
return None
return tuple(matches)
def validate(self, attribute_type=None):
super(BaseSimulationDataQuery, self).validate(attribute_type)
if self.substance_query != UNDEFINED and self.substance == UNDEFINED:
raise ValueError(
"The `substance_query` can only be used when the "
"`substance` attribute is set."
)
class SimulationDataQuery(BaseSimulationDataQuery):
"""A class used to query a ``StorageBackend`` for ``StoredSimulationData`` objects
which meet the specified set of criteria.
"""
@classmethod
def data_class(cls):
return StoredSimulationData
number_of_molecules = QueryAttribute(
docstring="The total number of molecules in the system.",
type_hint=int,
optional=True,
)
class FreeEnergyDataQuery(BaseSimulationDataQuery):
"""A class used to query a ``StorageBackend`` for ``FreeEnergyData`` objects which
meet the specified set of criteria.
"""
@classmethod
def data_class(cls):
return StoredFreeEnergyData
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_storage/data.py | from openff.evaluator.storage.attributes import QueryAttribute, StorageAttribute
from openff.evaluator.storage.data import BaseStoredData, HashableStoredData
from openff.evaluator.storage.query import BaseDataQuery
class SimpleData(BaseStoredData):
some_attribute = StorageAttribute(docstring="", type_hint=int)
@classmethod
def has_ancillary_data(cls):
return False
def to_storage_query(self):
return SimpleDataQuery.from_data_object(self)
class SimpleDataQuery(BaseDataQuery):
@classmethod
def data_class(cls):
return SimpleData
some_attribute = QueryAttribute(docstring="", type_hint=int)
class HashableData(HashableStoredData):
some_attribute = StorageAttribute(docstring="", type_hint=int)
@classmethod
def has_ancillary_data(cls):
return False
def to_storage_query(self):
raise NotImplementedError()
def __hash__(self):
return hash(self.some_attribute)
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_properties/test_properties.py | """
Units tests for the calculation schemas registered by
`evaluator.properties`.
"""
from collections import OrderedDict
import pytest
import openff.evaluator.properties
from openff.evaluator.layers import registered_calculation_schemas
from openff.evaluator.layers.workflow import WorkflowCalculationSchema
from openff.evaluator.tests.test_workflow.utils import create_dummy_metadata
from openff.evaluator.tests.utils import create_dummy_property
from openff.evaluator.utils import graph
from openff.evaluator.workflow import Workflow, WorkflowGraph, WorkflowSchema
def calculation_schema_generator():
"""A generator which loops over all registered calculation
layers and the corresponding calculation schemas."""
for calculation_layer in registered_calculation_schemas:
for property_type in registered_calculation_schemas[calculation_layer]:
yield calculation_layer, property_type
def workflow_merge_functions():
"""Returns functions which will merge two work flows into
a single graph.
"""
def function_a(workflow_a, workflow_b):
workflow_graph = WorkflowGraph()
workflow_graph.add_workflows(workflow_a)
workflow_graph.add_workflows(workflow_b)
return workflow_graph
def function_b(workflow_a, workflow_b):
workflow_graph = WorkflowGraph()
workflow_graph.add_workflows(workflow_a, workflow_b)
return workflow_graph
return [function_a, function_b]
@pytest.mark.parametrize(
"calculation_layer, property_type", calculation_schema_generator()
)
def test_validate_schemas(calculation_layer, property_type):
"""Tests that all registered calculation schemas are valid."""
schema = registered_calculation_schemas[calculation_layer][property_type]
if callable(schema):
schema = schema()
schema.validate()
@pytest.mark.parametrize(
"calculation_layer, property_type", calculation_schema_generator()
)
def test_schema_serialization(calculation_layer, property_type):
"""Tests serialisation and deserialization of a calculation schema."""
schema = registered_calculation_schemas[calculation_layer][property_type]
if callable(schema):
schema = schema()
json_schema = schema.json()
schema_from_json = WorkflowSchema.parse_json(json_schema)
property_recreated_json = schema_from_json.json()
assert json_schema == property_recreated_json
@pytest.mark.parametrize(
"calculation_layer, property_type", calculation_schema_generator()
)
@pytest.mark.parametrize("workflow_merge_function", workflow_merge_functions())
def test_workflow_schema_merging(
calculation_layer, property_type, workflow_merge_function
):
"""Tests that two of the exact the same calculations get merged into one
by the `WorkflowGraph`."""
if property_type == "HostGuestBindingAffinity":
pytest.skip(
"This test does not currently support host-guest binding affinities "
"which usually require specialised property metadata."
)
schema = registered_calculation_schemas[calculation_layer][property_type]
if callable(schema):
schema = schema()
if not isinstance(schema, WorkflowCalculationSchema):
pytest.skip("Not a `WorkflowCalculationSchema`.")
property_class = getattr(openff.evaluator.properties, property_type)
dummy_property = create_dummy_property(property_class)
global_metadata = create_dummy_metadata(dummy_property, calculation_layer)
workflow_a = Workflow(global_metadata, "workflow_a")
workflow_a.schema = schema.workflow_schema
workflow_b = Workflow(global_metadata, "workflow_b")
workflow_b.schema = schema.workflow_schema
workflow_graph = workflow_merge_function(workflow_a, workflow_b)
workflow_graph_a = workflow_a.to_graph()
workflow_graph_b = workflow_b.to_graph()
dependants_graph_a = workflow_graph_a._protocol_graph._build_dependants_graph(
workflow_graph_a.protocols, False, apply_reduction=True
)
dependants_graph_b = workflow_graph_b._protocol_graph._build_dependants_graph(
workflow_graph_b.protocols, False, apply_reduction=True
)
ordered_dict_a = OrderedDict(sorted(dependants_graph_a.items()))
ordered_dict_a = {key: sorted(value) for key, value in ordered_dict_a.items()}
ordered_dict_b = OrderedDict(sorted(dependants_graph_b.items()))
ordered_dict_b = {key: sorted(value) for key, value in ordered_dict_b.items()}
merge_order_a = graph.topological_sort(ordered_dict_a)
merge_order_b = graph.topological_sort(ordered_dict_b)
assert len(workflow_graph.protocols) == len(workflow_a.protocols)
for protocol_id in workflow_a.protocols:
assert protocol_id in workflow_graph.protocols
for protocol_id_A, protocol_id_B in zip(merge_order_a, merge_order_b):
assert protocol_id_A == protocol_id_B
assert (
workflow_a.protocols[protocol_id_A].schema.json()
== workflow_b.protocols[protocol_id_B].schema.json()
)
|
jaketanderson/openff-evaluator | openff/evaluator/client/__init__.py | from .client import (
BatchMode,
ConnectionOptions,
EvaluatorClient,
Request,
RequestOptions,
RequestResult,
)
__all__ = [
BatchMode,
ConnectionOptions,
EvaluatorClient,
Request,
RequestOptions,
RequestResult,
]
|
jaketanderson/openff-evaluator | openff/evaluator/datasets/thermoml/plugins.py | <reponame>jaketanderson/openff-evaluator
"""A collection of utility functions for registering mappings between
ThermoML archive properties, and properties supported by the estimation
framework.
"""
import functools
from openff.evaluator.datasets.thermoml import ThermoMLDataSet
class _ThermoMLPlugin:
"""Represents a property which may be extracted from a ThermoML archive."""
def __init__(self, string_identifier, conversion_function, supported_phases):
"""Constructs a new ThermoMLPlugin object.
Parameters
----------
string_identifier: str
The ThermoML string identifier (ePropName) for this property.
conversion_function: function
A function which maps a `ThermoMLProperty` into a
`PhysicalProperty`.
supported_phases: PropertyPhase:
An enum which encodes all of the phases for which this
property supports being estimated in.
"""
self.string_identifier = string_identifier
self.conversion_function = conversion_function
self.supported_phases = supported_phases
def _default_mapping(property_class, property_to_map):
"""
Parameters
----------
property_class: type of PhysicalProperty
The class to map this property into.
property_to_map: ThermoMLProperty
The ThermoML property to map.
"""
mapped_property = property_class()
mapped_property.value = property_to_map.value
if property_to_map.uncertainty is not None:
mapped_property.uncertainty = property_to_map.uncertainty
mapped_property.phase = property_to_map.phase
mapped_property.thermodynamic_state = property_to_map.thermodynamic_state
mapped_property.substance = property_to_map.substance
return mapped_property
def register_thermoml_property(
thermoml_string, supported_phases, property_class=None, conversion_function=None
):
"""A function used to map a property from the ThermoML archive
to an internal `PhysicalProperty` object of the correct type.
This function takes either a specific class (e.g. `Density`)
which maps directly to the specified `thermoml_string`, or a
a function which maps a `ThermoMLProperty` into a `PhysicalProperty`
allowing fuller control.
Parameters
----------
thermoml_string: str
The ThermoML string identifier (ePropName) for this property.
supported_phases: PropertyPhase:
An enum which encodes all of the phases for which this property
supports being estimated in.
property_class: type of PhysicalProperty, optional
The class associated with this physical property. This argument
is mutually exclusive with the `conversion_function` argument.
conversion_function: function
A function which maps a `ThermoMLProperty` into a `PhysicalProperty`.
This argument is mutually exclusive with the `property_class` argument.
"""
if (property_class is None and conversion_function is None) or (
property_class is not None and conversion_function is not None
):
raise ValueError(
"Only one of the `property_class` and `conversion_function` must be set."
)
if conversion_function is None:
conversion_function = functools.partial(_default_mapping, property_class)
ThermoMLDataSet.registered_properties[thermoml_string] = _ThermoMLPlugin(
thermoml_string, conversion_function, supported_phases
)
def thermoml_property(thermoml_string, supported_phases):
"""A decorator which wraps around the `register_thermoml_property`
method.
Parameters
----------
thermoml_string: str
The ThermoML string identifier (ePropName) for this property.
supported_phases: PropertyPhase:
An enum which encodes all of the phases for which this
property supports being estimated in.
"""
def decorator(cls):
register_thermoml_property(thermoml_string, supported_phases, cls)
return cls
return decorator
|
jaketanderson/openff-evaluator | openff/evaluator/workflow/utils.py | """A set of helper classes for manipulating and passing inputs between
buildings blocks in a property estimation workflow.
"""
from openff.evaluator.attributes import PlaceholderValue
from openff.evaluator.utils import graph
class ReplicatorValue(PlaceholderValue):
"""A placeholder value which will be set by a protocol replicator
with the specified id.
"""
def __init__(self, replicator_id=""):
"""Constructs a new ReplicatorValue object
Parameters
----------
replicator_id: str
The id of the replicator which will set this value.
"""
self.replicator_id = replicator_id
def __getstate__(self):
return {"replicator_id": self.replicator_id}
def __setstate__(self, state):
self.replicator_id = state["replicator_id"]
class ProtocolPath(PlaceholderValue):
"""Represents a pointer to the output of another protocol."""
# The character which separates protocol ids.
path_separator = "/"
# The character which separates the property name from the path.
property_separator = "."
@property
def property_name(self):
"""str: The property name pointed to by the path."""
return self._property_name
@property
def protocol_ids(self):
"""tuple of str: The ids of the protocols referenced by this
object."""
return self._protocol_ids
@property
def start_protocol(self):
"""str: The leading protocol id of the path."""
return None if len(self._protocol_ids) == 0 else self._protocol_ids[0]
@property
def last_protocol(self):
"""str: The end protocol id of the path."""
return None if len(self._protocol_ids) == 0 else self._protocol_ids[-1]
@property
def protocol_path(self):
"""str: The full path referenced by this object excluding the
property name."""
return self._protocol_path
@property
def full_path(self):
"""str: The full path referenced by this object."""
return self._full_path
@property
def is_global(self):
return self.start_protocol == "global"
def __init__(self, property_name="", *protocol_ids):
"""Constructs a new ProtocolPath object.
Parameters
----------
property_name: str
The property name referenced by the path.
protocol_ids: str
An args list of protocol ids in the order in which they will appear in the path.
"""
if property_name is None:
property_name = ""
self._property_name = property_name
self._protocol_ids = tuple(protocol_ids)
self._protocol_path = None
self._full_path = None
self._update_string_paths()
def _update_string_paths(self):
"""Combines the property name and protocol ids into string representations
and stores them on the object.
"""
self._protocol_path = ""
if len(self._protocol_ids) > 0:
self._protocol_path = ProtocolPath.path_separator.join(self._protocol_ids)
property_name = "" if self._property_name is None else self._property_name
self._full_path = (
f"{self._protocol_path}{ProtocolPath.property_separator}{property_name}"
)
@classmethod
def from_string(cls, existing_path_string: str):
property_name, protocol_ids = ProtocolPath._to_components(existing_path_string)
if any(x is None or len(x) == 0 for x in protocol_ids):
raise ValueError("An invalid protocol id (either None or empty) was found.")
return ProtocolPath(property_name, *protocol_ids)
@staticmethod
def _to_components(path_string):
"""Splits a protocol path string into the property
name, and the individual protocol ids.
Parameters
----------
path_string: str
The protocol path to split.
Returns
-------
str, list of str
A tuple of the property name, and a list of the protocol ids in the path.
"""
path_string = path_string.lstrip().rstrip()
property_name_index = path_string.find(ProtocolPath.property_separator)
if property_name_index < 0:
raise ValueError(
f"A protocol path must contain a {ProtocolPath.property_separator} "
f"followed by the property name this path represents"
)
property_name_index = path_string.find(ProtocolPath.property_separator)
property_name = path_string[property_name_index + 1 :]
protocol_id_path = path_string[:property_name_index]
protocol_ids = protocol_id_path.split(ProtocolPath.path_separator)
if len(protocol_id_path) == 0:
protocol_ids = tuple()
return property_name, protocol_ids
def prepend_protocol_id(self, id_to_prepend):
"""Prepend a new protocol id onto the front of the path.
Parameters
----------
id_to_prepend: str
The protocol id to prepend to the path
"""
if len(self._protocol_ids) > 0 and self._protocol_ids[0] == id_to_prepend:
return
self._protocol_ids = (id_to_prepend, *self._protocol_ids)
self._update_string_paths()
def pop_next_in_path(self):
"""Pops and then returns the leading protocol id from the path.
Returns
-------
str:
The previously leading protocol id.
"""
if len(self._protocol_ids) == 0:
return None
next_in_path = self._protocol_ids[0]
self._protocol_ids = self._protocol_ids[1:]
self._update_string_paths()
return next_in_path
def append_uuid(self, uuid):
"""Appends a uuid to each of the protocol id's in the path
Parameters
----------
uuid: str
The uuid to append.
"""
if self.is_global:
# Don't append uuids to global paths.
return
self._protocol_ids = tuple(
graph.append_uuid(x, uuid) for x in self._protocol_ids
)
self._update_string_paths()
def replace_protocol(self, old_id, new_id):
"""Redirect the input to point at a new protocol.
The main use of this method is when merging multiple protocols
into one.
Parameters
----------
old_id : str
The id of the protocol to replace.
new_id : str
The id of the new protocol to use.
"""
self._protocol_ids = tuple(
new_id if x == old_id else x for x in self._protocol_ids
)
self._update_string_paths()
def copy(self):
"""Returns a copy of this path."""
return ProtocolPath(self._property_name, *self._protocol_ids)
def __str__(self):
return self._full_path
def __repr__(self):
return f"<ProtocolPath full_path={self._full_path}>"
def __hash__(self):
"""Returns the hash key of this ProtocolPath."""
return hash(self._full_path)
def __eq__(self, other):
return type(self) == type(other) and self._full_path == other.full_path
def __ne__(self, other):
return not (self == other)
def __getstate__(self):
return {"full_path": self._full_path}
def __setstate__(self, state):
self._property_name, self._protocol_ids = ProtocolPath._to_components(
state["full_path"]
)
self._update_string_paths()
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_protocols/test_defaults.py | import pytest
from openff.evaluator.plugins import register_default_plugins
from openff.evaluator.workflow import registered_workflow_protocols
# Load the default protocols.
register_default_plugins()
@pytest.mark.parametrize("available_protocol", registered_workflow_protocols)
def test_default_protocol_schemas(available_protocol):
"""A simple test to ensure that each available protocol
can both create, and be created from a schema."""
protocol_class = registered_workflow_protocols[available_protocol]
if (
protocol_class.__abstractmethods__ is not None
and len(protocol_class.__abstractmethods__) > 0
):
# Skip base classes.
return
protocol = protocol_class("dummy_id")
protocol_schema = protocol.schema
recreated_protocol = registered_workflow_protocols[available_protocol]("dummy_id")
recreated_protocol.schema = protocol_schema
assert protocol.schema.json() == recreated_protocol.schema.json()
|
jaketanderson/openff-evaluator | openff/evaluator/utils/tcp.py | """
A collection of utilities which aid in sending and receiving messages sent over tcp.
"""
import struct
from enum import IntEnum
int_struct = struct.Struct("<i")
unpack_int = int_struct.unpack
pack_int = int_struct.pack
class EvaluatorMessageTypes(IntEnum):
Undefined = 0
Submission = 1
Query = 2
def recvall(sock, n):
# Helper function to recv n bytes or return None if EOF is hit
data = bytearray()
while len(data) < n:
packet = sock.recv(n - len(data))
if not packet:
return None
data.extend(packet)
return data
|
jaketanderson/openff-evaluator | openff/evaluator/workflow/__init__.py | from .exceptions import WorkflowException
from .schemas import ProtocolGroupSchema, ProtocolSchema, WorkflowSchema # isort:skip
from .plugins import ( # isort:skip
register_workflow_protocol,
registered_workflow_protocols,
workflow_protocol,
)
from .protocols import Protocol, ProtocolGraph, ProtocolGroup # isort:skip
from .workflow import Workflow, WorkflowGraph, WorkflowResult # isort:skip
__all__ = [
Protocol,
ProtocolGraph,
ProtocolGroup,
ProtocolSchema,
ProtocolGroupSchema,
register_workflow_protocol,
registered_workflow_protocols,
workflow_protocol,
Workflow,
WorkflowException,
WorkflowGraph,
WorkflowResult,
WorkflowSchema,
]
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_protocols/test_coordinates.py | <reponame>jaketanderson/openff-evaluator<filename>openff/evaluator/tests/test_protocols/test_coordinates.py
"""
Units tests for openff-evaluator.protocols.coordinates
"""
import tempfile
import pytest
try:
from openmm.app import PDBFile
except ImportError:
from simtk.openmm.app import PDBFile
from openff.evaluator.backends import ComputeResources
from openff.evaluator.protocols.coordinates import (
BuildCoordinatesPackmol,
BuildDockedCoordinates,
SolvateExistingStructure,
)
from openff.evaluator.substances import Component, ExactAmount, MoleFraction, Substance
from openff.evaluator.utils import get_data_filename, has_openeye
def _build_input_output_substances():
"""Builds sets if input and expected substances for the
`test_build_coordinate_composition` test.
Returns
-------
list of tuple of Substance and Substance
A list of input and expected substances.
"""
# Start with some easy cases
substances = [
(Substance.from_components("O"), Substance.from_components("O")),
(Substance.from_components("O", "C"), Substance.from_components("O", "C")),
(
Substance.from_components("O", "C", "CO"),
Substance.from_components("O", "C", "CO"),
),
]
# Handle some cases where rounding will need to occur.
input_substance = Substance()
input_substance.add_component(Component("O"), MoleFraction(0.41))
input_substance.add_component(Component("C"), MoleFraction(0.59))
expected_substance = Substance()
expected_substance.add_component(Component("O"), MoleFraction(0.4))
expected_substance.add_component(Component("C"), MoleFraction(0.6))
substances.append((input_substance, expected_substance))
input_substance = Substance()
input_substance.add_component(Component("O"), MoleFraction(0.59))
input_substance.add_component(Component("C"), MoleFraction(0.41))
expected_substance = Substance()
expected_substance.add_component(Component("O"), MoleFraction(0.6))
expected_substance.add_component(Component("C"), MoleFraction(0.4))
substances.append((input_substance, expected_substance))
return substances
@pytest.mark.parametrize("input_substance, expected", _build_input_output_substances())
def test_build_coordinates_packmol(input_substance, expected):
"""Tests that the build coordinate protocols correctly report
the composition of the built system."""
build_coordinates = BuildCoordinatesPackmol("build_coordinates")
build_coordinates.max_molecules = 10
build_coordinates.substance = input_substance
with tempfile.TemporaryDirectory() as directory:
build_coordinates.execute(directory)
assert build_coordinates.output_substance == expected
for component in input_substance:
assert component.identifier in build_coordinates.assigned_residue_names
if component.smiles == "O":
assigned_name = build_coordinates.assigned_residue_names[
component.identifier
]
assert assigned_name[:3] == "HOH"
@pytest.mark.parametrize("count_exact_amount", [False, True])
def test_build_coordinates_packmol_exact(count_exact_amount):
"""Tests that the build coordinate protocol behaves correctly for substances
with exact amounts."""
import mdtraj
substance = Substance()
substance.add_component(Component("O"), MoleFraction(1.0))
substance.add_component(Component("C"), ExactAmount(1))
max_molecule = 11 if count_exact_amount else 10
build_coordinates = BuildCoordinatesPackmol("build_coordinates")
build_coordinates.max_molecules = max_molecule
build_coordinates.count_exact_amount = count_exact_amount
build_coordinates.substance = substance
with tempfile.TemporaryDirectory() as directory:
build_coordinates.execute(directory)
built_system = mdtraj.load_pdb(build_coordinates.coordinate_file_path)
assert built_system.n_residues == 11
def test_solvate_existing_structure_protocol():
"""Tests solvating a single methanol molecule in water."""
import mdtraj
methanol_component = Component("CO")
methanol_substance = Substance()
methanol_substance.add_component(methanol_component, ExactAmount(1))
water_substance = Substance()
water_substance.add_component(Component("O"), MoleFraction(1.0))
with tempfile.TemporaryDirectory() as temporary_directory:
build_methanol_coordinates = BuildCoordinatesPackmol("build_methanol")
build_methanol_coordinates.max_molecules = 1
build_methanol_coordinates.substance = methanol_substance
build_methanol_coordinates.execute(temporary_directory, ComputeResources())
methanol_residue_name = build_methanol_coordinates.assigned_residue_names[
methanol_component.identifier
]
solvate_coordinates = SolvateExistingStructure("solvate_methanol")
solvate_coordinates.max_molecules = 9
solvate_coordinates.substance = water_substance
solvate_coordinates.solute_coordinate_file = (
build_methanol_coordinates.coordinate_file_path
)
solvate_coordinates.execute(temporary_directory, ComputeResources())
solvated_system = mdtraj.load_pdb(solvate_coordinates.coordinate_file_path)
assert solvated_system.n_residues == 10
assert solvated_system.top.residue(0).name == methanol_residue_name
def test_build_docked_coordinates_protocol():
"""Tests docking a methanol molecule into alpha-Cyclodextrin."""
if not has_openeye():
pytest.skip("The `BuildDockedCoordinates` protocol requires OpenEye.")
ligand_substance = Substance()
ligand_substance.add_component(
Component("CO", role=Component.Role.Ligand),
ExactAmount(1),
)
# TODO: This test could likely be made substantially faster
# by storing the binary prepared receptor. Would this
# be in breach of any oe license terms?
with tempfile.TemporaryDirectory() as temporary_directory:
build_docked_coordinates = BuildDockedCoordinates("build_methanol")
build_docked_coordinates.ligand_substance = ligand_substance
build_docked_coordinates.number_of_ligand_conformers = 5
build_docked_coordinates.receptor_coordinate_file = get_data_filename(
"test/molecules/acd.mol2"
)
build_docked_coordinates.execute(temporary_directory, ComputeResources())
docked_pdb = PDBFile(build_docked_coordinates.docked_complex_coordinate_path)
assert docked_pdb.topology.getNumResidues() == 2
if __name__ == "__main__":
test_build_docked_coordinates_protocol()
|
jaketanderson/openff-evaluator | openff/evaluator/utils/checkmol.py | import functools
from enum import Enum
from openff.evaluator.utils.exceptions import MissingOptionalDependency
class ChemicalEnvironment(Enum):
Alkane = "Alkane"
Cation = "Cation"
Anion = "Anion"
Carbonyl = "Carbonyl"
Aldehyde = "Aldehyde"
Ketone = "Ketone"
Thiocarbonyl = "Thiocarbonyl"
Thioaldehyde = "Thioaldehyde"
Thioketone = "Thioketone"
Imine = "Imine"
Hydrazone = "Hydrazone"
Semicarbazone = "Semicarbazone"
Thiosemicarbazone = "Thiosemicarbazone"
Oxime = "Oxime"
OximeEther = "Oxime Ether"
Ketene = "Ketene"
KeteneAcetalDeriv = "Ketene Acetal Derivative"
CarbonylHydrate = "Carbonyl Hydrate"
Hemiacetal = "Hemiacetal"
Acetal = "Acetal"
Hemiaminal = "Hemiaminal"
Aminal = "Aminal"
Thiohemiaminal = "Thiohemiaminal"
Thioacetal = "Thioacetal"
Enamine = "Enamine"
Enol = "Enol"
Enolether = "Enolether"
Hydroxy = "Hydroxy"
Alcohol = "Alcohol"
PrimaryAlcohol = "Primary Alcohol"
SecondaryAlcohol = "Secondary Alcohol"
TertiaryAlcohol = "Tertiary Alcohol"
Diol_1_2 = "1,2 Diol"
Aminoalcohol_1_2 = "1,2 Aminoalcohol"
Phenol = "Phenol"
Diphenol_1_2 = "1,2 Diphenol"
Enediol = "Enediol"
Ether = "Ether"
Dialkylether = "Dialkylether"
Alkylarylether = "Alkylarylether"
Diarylether = "Diarylether"
Thioether = "Thioether"
Disulfide = "Disulfide"
Peroxide = "Peroxide"
Hydroperoxide = "Hydroperoxide"
Hydrazine = "Hydrazine"
Hydroxylamine = "Hydroxylamine"
Amine = "Amine"
PrimaryAmine = "Primary Amine"
PrimaryAliphAmine = "Primary Aliphatic Amine"
PrimaryAromAmine = "Primary Aromatic Amine"
SecondaryAmine = "Secondary Amine"
SecondaryAliphAmine = "Secondary Aliphatic Amine"
SecondaryMixedAmine = "Secondary Mixed Amine"
SecondaryAromAmine = "Secondary Aromatic Amine"
TertiaryAmine = "Tertiary Amine"
TertiaryAliphAmine = "Tertiary Aliphatic Amine"
TertiaryMixedAmine = "Tertiary Mixed Amine"
TertiaryAromAmine = "Tertiary Aromatic Amine"
QuartAmmonium = "Quart Ammonium"
NOxide = "NOxide"
HalogenDeriv = "Halogen Derivative"
AlkylHalide = "Alkyl Halide"
AlkylFluoride = "Alkyl Fluoride"
AlkylChloride = "Alkyl Chloride"
AlkylBromide = "Alkyl Bromide"
AlkylIodide = "Alkyl Iodide"
ArylHalide = "Aryl Halide"
ArylFluoride = "Aryl Fluoride"
ArylChloride = "Aryl Chloride"
ArylBromide = "Aryl Bromide"
ArylIodide = "Aryl Iodide"
Organometallic = "Organometallic"
Organolithium = "Organolithium"
Organomagnesium = "Organomagnesium"
CarboxylicAcidDeriv = "Carboxylic Acid Derivative"
CarboxylicAcid = "Carboxylic Acid"
CarboxylicAcidSalt = "Carboxylic Acid Salt"
CarboxylicAcidEster = "Carboxylic Acid Ester"
Lactone = "Lactone"
CarboxylicAcidAmide = "Carboxylic Acid Amide"
CarboxylicAcidPrimaryAmide = "Carboxylic Acid Primary Amide"
CarboxylicAcidSecondaryAmide = "Carboxylic Acid Secondary Amide"
CarboxylicAcidTertiaryAmide = "Carboxylic Acid Tertiary Amide"
Lactam = "Lactam"
CarboxylicAcidHydrazide = "Carboxylic Acid Hydrazide"
CarboxylicAcidAzide = "Carboxylic Acid Azide"
HydroxamicAcid = "Hydroxamic Acid"
CarboxylicAcidAmidine = "Carboxylic Acid Amidine"
CarboxylicAcidAmidrazone = "Carboxylic Acid Amidrazone"
Nitrile = "Nitrile"
AcylHalide = "Acyl Halide"
AcylFluoride = "Acyl Fluoride"
AcylChloride = "Acyl Chloride"
AcylBromide = "Acyl Bromide"
AcylIodide = "Acyl Iodide"
AcylCyanide = "Acyl Cyanide"
ImidoEster = "Imido Ester"
ImidoylHalide = "Imidoyl Halide"
ThiocarboxylicAcidDeriv = "Thiocarboxylic Acid Derivative"
ThiocarboxylicAcid = "Thiocarboxylic Acid"
ThiocarboxylicAcidEster = "Thiocarboxylic Acid Ester"
Thiolactone = "Thiolactone"
ThiocarboxylicAcidAmide = "Thiocarboxylic Acid Amide"
Thiolactam = "Thiolactam"
ImidoThioester = "Imido Thioester"
Oxohetarene = "Oxohetarene"
Thioxohetarene = "Thioxohetarene"
Iminohetarene = "Iminohetarene"
OrthocarboxylicAcidDeriv = "Orthocarboxylic Acid Derivative"
CarboxylicAcidOrthoester = "Carboxylic Acid Orthoester"
CarboxylicAcidAmideAcetal = "Carboxylic Acid Amide Acetal"
CarboxylicAcidAnhydride = "Carboxylic Acid Anhydride"
CarboxylicAcidImide = "Carboxylic Acid Imide"
CarboxylicAcidUnsubstImide = "Carboxylic Acid Unsubst Imide"
CarboxylicAcidSubstImide = "Carboxylic Acid Subst Imide"
Co2Deriv = "CO2 Derivative"
CarbonicAcidDeriv = "Carbonic Acid Derivative"
CarbonicAcidMonoester = "Carbonic Acid Monoester"
CarbonicAcidDiester = "Carbonic Acid Diester"
CarbonicAcidEsterHalide = "Carbonic Acid Ester Halide"
ThiocarbonicAcidDeriv = "Thiocarbonic Acid Derivative"
ThiocarbonicAcidMonoester = "Thiocarbonic Acid Monoester"
ThiocarbonicAcidDiester = "Thiocarbonic Acid Diester"
ThiocarbonicAcidEsterHalide = "Thiocarbonic Acid Ester Halide"
CarbamicAcidDeriv = "Carbamic Acid Derivative"
CarbamicAcid = "Carbamic Acid"
CarbamicAcidEster = "Carbamic Acid Ester"
CarbamicAcidHalide = "Carbamic Acid Halide"
ThiocarbamicAcidDeriv = "Thiocarbamic Acid Derivative"
ThiocarbamicAcid = "Thiocarbamic Acid"
ThiocarbamicAcidEster = "Thiocarbamic Acid Ester"
ThiocarbamicAcidHalide = "Thiocarbamic Acid Halide"
Urea = "Urea"
Isourea = "Isourea"
Thiourea = "Thiourea"
Isothiourea = "Isothiourea"
Guanidine = "Guanidine"
Semicarbazide = "Semicarbazide"
Thiosemicarbazide = "Thiosemicarbazide"
Azide = "Azide"
AzoCompound = "Azo Compound"
DiazoniumSalt = "Diazonium Salt"
Isonitrile = "Isonitrile"
Cyanate = "Cyanate"
Isocyanate = "Isocyanate"
Thiocyanate = "Thiocyanate"
Isothiocyanate = "Isothiocyanate"
Carbodiimide = "Carbodiimide"
NitrosoCompound = "Nitroso Compound"
NitroCompound = "Nitro Compound"
Nitrite = "Nitrite"
Nitrate = "Nitrate"
SulfuricAcidDeriv = "Sulfuric Acid Derivative"
SulfuricAcid = "Sulfuric Acid"
SulfuricAcidMonoester = "Sulfuric Acid Monoester"
SulfuricAcidDiester = "Sulfuric Acid Diester"
SulfuricAcidAmideEster = "Sulfuric Acid Amide Ester"
SulfuricAcidAmide = "Sulfuric Acid Amide"
SulfuricAcidDiamide = "Sulfuric Acid Diamide"
SulfurylHalide = "Sulfuryl Halide"
SulfonicAcidDeriv = "Sulfonic Acid Derivative"
SulfonicAcid = "Sulfonic Acid"
SulfonicAcidEster = "Sulfonic Acid Ester"
Sulfonamide = "Sulfonamide"
SulfonylHalide = "Sulfonyl Halide"
Sulfone = "Sulfone"
Sulfoxide = "Sulfoxide"
SulfinicAcidDeriv = "Sulfinic Acid Derivative"
SulfinicAcid = "Sulfinic Acid"
SulfinicAcidEster = "Sulfinic Acid Ester"
SulfinicAcidHalide = "Sulfinic Acid Halide"
SulfinicAcidAmide = "Sulfinic Acid Amide"
SulfenicAcidDeriv = "Sulfenic Acid Derivative"
SulfenicAcid = "Sulfenic Acid"
SulfenicAcidEster = "Sulfenic Acid Ester"
SulfenicAcidHalide = "Sulfenic Acid Halide"
SulfenicAcidAmide = "Sulfenic Acid Amide"
Thiol = "Thiol"
Alkylthiol = "Alkylthiol"
Arylthiol = "Arylthiol"
PhosphoricAcidDeriv = "Phosphoric Acid Derivative"
PhosphoricAcid = "Phosphoric Acid"
PhosphoricAcidEster = "Phosphoric Acid Ester"
PhosphoricAcidHalide = "Phosphoric Acid Halide"
PhosphoricAcidAmide = "Phosphoric Acid Amide"
ThiophosphoricAcidDeriv = "Thiophosphoric Acid Derivative"
ThiophosphoricAcid = "Thiophosphoric Acid"
ThiophosphoricAcidEster = "Thiophosphoric Acid Ester"
ThiophosphoricAcidHalide = "Thiophosphoric Acid Halide"
ThiophosphoricAcidAmide = "Thiophosphoric Acid Amide"
PhosphonicAcidDeriv = "Phosphonic Acid Derivative"
PhosphonicAcid = "Phosphonic Acid"
PhosphonicAcidEster = "Phosphonic Acid Ester"
Phosphine = "Phosphine"
Phosphinoxide = "Phosphinoxide"
BoronicAcidDeriv = "Boronic Acid Derivative"
BoronicAcid = "Boronic Acid"
BoronicAcidEster = "Boronic Acid Ester"
Alkene = "Alkene"
Alkyne = "Alkyne"
Aromatic = "Aromatic"
Heterocycle = "Heterocycle"
AlphaAminoacid = "Alpha Aminoacid"
AlphaHydroxyacid = "Alpha Hydroxyacid"
Aqueous = "Aqueous"
def checkmol_code_to_environment(checkmol_code) -> ChemicalEnvironment:
checkmol_code_map = {
"000": ChemicalEnvironment.Alkane,
"001": ChemicalEnvironment.Cation,
"002": ChemicalEnvironment.Anion,
"003": ChemicalEnvironment.Carbonyl,
"004": ChemicalEnvironment.Aldehyde,
"005": ChemicalEnvironment.Ketone,
"006": ChemicalEnvironment.Thiocarbonyl,
"007": ChemicalEnvironment.Thioaldehyde,
"008": ChemicalEnvironment.Thioketone,
"009": ChemicalEnvironment.Imine,
"010": ChemicalEnvironment.Hydrazone,
"011": ChemicalEnvironment.Semicarbazone,
"012": ChemicalEnvironment.Thiosemicarbazone,
"013": ChemicalEnvironment.Oxime,
"014": ChemicalEnvironment.OximeEther,
"015": ChemicalEnvironment.Ketene,
"016": ChemicalEnvironment.KeteneAcetalDeriv,
"017": ChemicalEnvironment.CarbonylHydrate,
"018": ChemicalEnvironment.Hemiacetal,
"019": ChemicalEnvironment.Acetal,
"020": ChemicalEnvironment.Hemiaminal,
"021": ChemicalEnvironment.Aminal,
"022": ChemicalEnvironment.Thiohemiaminal,
"023": ChemicalEnvironment.Thioacetal,
"024": ChemicalEnvironment.Enamine,
"025": ChemicalEnvironment.Enol,
"026": ChemicalEnvironment.Enolether,
"027": ChemicalEnvironment.Hydroxy,
"028": ChemicalEnvironment.Alcohol,
"029": ChemicalEnvironment.PrimaryAlcohol,
"030": ChemicalEnvironment.SecondaryAlcohol,
"031": ChemicalEnvironment.TertiaryAlcohol,
"032": ChemicalEnvironment.Diol_1_2,
"033": ChemicalEnvironment.Aminoalcohol_1_2,
"034": ChemicalEnvironment.Phenol,
"035": ChemicalEnvironment.Diphenol_1_2,
"036": ChemicalEnvironment.Enediol,
"037": ChemicalEnvironment.Ether,
"038": ChemicalEnvironment.Dialkylether,
"039": ChemicalEnvironment.Alkylarylether,
"040": ChemicalEnvironment.Diarylether,
"041": ChemicalEnvironment.Thioether,
"042": ChemicalEnvironment.Disulfide,
"043": ChemicalEnvironment.Peroxide,
"044": ChemicalEnvironment.Hydroperoxide,
"045": ChemicalEnvironment.Hydrazine,
"046": ChemicalEnvironment.Hydroxylamine,
"047": ChemicalEnvironment.Amine,
"048": ChemicalEnvironment.PrimaryAmine,
"049": ChemicalEnvironment.PrimaryAliphAmine,
"050": ChemicalEnvironment.PrimaryAromAmine,
"051": ChemicalEnvironment.SecondaryAmine,
"052": ChemicalEnvironment.SecondaryAliphAmine,
"053": ChemicalEnvironment.SecondaryMixedAmine,
"054": ChemicalEnvironment.SecondaryAromAmine,
"055": ChemicalEnvironment.TertiaryAmine,
"056": ChemicalEnvironment.TertiaryAliphAmine,
"057": ChemicalEnvironment.TertiaryMixedAmine,
"058": ChemicalEnvironment.TertiaryAromAmine,
"059": ChemicalEnvironment.QuartAmmonium,
"060": ChemicalEnvironment.NOxide,
"061": ChemicalEnvironment.HalogenDeriv,
"062": ChemicalEnvironment.AlkylHalide,
"063": ChemicalEnvironment.AlkylFluoride,
"064": ChemicalEnvironment.AlkylChloride,
"065": ChemicalEnvironment.AlkylBromide,
"066": ChemicalEnvironment.AlkylIodide,
"067": ChemicalEnvironment.ArylHalide,
"068": ChemicalEnvironment.ArylFluoride,
"069": ChemicalEnvironment.ArylChloride,
"070": ChemicalEnvironment.ArylBromide,
"071": ChemicalEnvironment.ArylIodide,
"072": ChemicalEnvironment.Organometallic,
"073": ChemicalEnvironment.Organolithium,
"074": ChemicalEnvironment.Organomagnesium,
"075": ChemicalEnvironment.CarboxylicAcidDeriv,
"076": ChemicalEnvironment.CarboxylicAcid,
"077": ChemicalEnvironment.CarboxylicAcidSalt,
"078": ChemicalEnvironment.CarboxylicAcidEster,
"079": ChemicalEnvironment.Lactone,
"080": ChemicalEnvironment.CarboxylicAcidAmide,
"081": ChemicalEnvironment.CarboxylicAcidPrimaryAmide,
"082": ChemicalEnvironment.CarboxylicAcidSecondaryAmide,
"083": ChemicalEnvironment.CarboxylicAcidTertiaryAmide,
"084": ChemicalEnvironment.Lactam,
"085": ChemicalEnvironment.CarboxylicAcidHydrazide,
"086": ChemicalEnvironment.CarboxylicAcidAzide,
"087": ChemicalEnvironment.HydroxamicAcid,
"088": ChemicalEnvironment.CarboxylicAcidAmidine,
"089": ChemicalEnvironment.CarboxylicAcidAmidrazone,
"090": ChemicalEnvironment.Nitrile,
"091": ChemicalEnvironment.AcylHalide,
"092": ChemicalEnvironment.AcylFluoride,
"093": ChemicalEnvironment.AcylChloride,
"094": ChemicalEnvironment.AcylBromide,
"095": ChemicalEnvironment.AcylIodide,
"096": ChemicalEnvironment.AcylCyanide,
"097": ChemicalEnvironment.ImidoEster,
"098": ChemicalEnvironment.ImidoylHalide,
"099": ChemicalEnvironment.ThiocarboxylicAcidDeriv,
"100": ChemicalEnvironment.ThiocarboxylicAcid,
"101": ChemicalEnvironment.ThiocarboxylicAcidEster,
"102": ChemicalEnvironment.Thiolactone,
"103": ChemicalEnvironment.ThiocarboxylicAcidAmide,
"104": ChemicalEnvironment.Thiolactam,
"105": ChemicalEnvironment.ImidoThioester,
"106": ChemicalEnvironment.Oxohetarene,
"107": ChemicalEnvironment.Thioxohetarene,
"108": ChemicalEnvironment.Iminohetarene,
"109": ChemicalEnvironment.OrthocarboxylicAcidDeriv,
"110": ChemicalEnvironment.CarboxylicAcidOrthoester,
"111": ChemicalEnvironment.CarboxylicAcidAmideAcetal,
"112": ChemicalEnvironment.CarboxylicAcidAnhydride,
"113": ChemicalEnvironment.CarboxylicAcidImide,
"114": ChemicalEnvironment.CarboxylicAcidUnsubstImide,
"115": ChemicalEnvironment.CarboxylicAcidSubstImide,
"116": ChemicalEnvironment.Co2Deriv,
"117": ChemicalEnvironment.CarbonicAcidDeriv,
"118": ChemicalEnvironment.CarbonicAcidMonoester,
"119": ChemicalEnvironment.CarbonicAcidDiester,
"120": ChemicalEnvironment.CarbonicAcidEsterHalide,
"121": ChemicalEnvironment.ThiocarbonicAcidDeriv,
"122": ChemicalEnvironment.ThiocarbonicAcidMonoester,
"123": ChemicalEnvironment.ThiocarbonicAcidDiester,
"124": ChemicalEnvironment.ThiocarbonicAcidEsterHalide,
"125": ChemicalEnvironment.CarbamicAcidDeriv,
"126": ChemicalEnvironment.CarbamicAcid,
"127": ChemicalEnvironment.CarbamicAcidEster,
"128": ChemicalEnvironment.CarbamicAcidHalide,
"129": ChemicalEnvironment.ThiocarbamicAcidDeriv,
"130": ChemicalEnvironment.ThiocarbamicAcid,
"131": ChemicalEnvironment.ThiocarbamicAcidEster,
"132": ChemicalEnvironment.ThiocarbamicAcidHalide,
"133": ChemicalEnvironment.Urea,
"134": ChemicalEnvironment.Isourea,
"135": ChemicalEnvironment.Thiourea,
"136": ChemicalEnvironment.Isothiourea,
"137": ChemicalEnvironment.Guanidine,
"138": ChemicalEnvironment.Semicarbazide,
"139": ChemicalEnvironment.Thiosemicarbazide,
"140": ChemicalEnvironment.Azide,
"141": ChemicalEnvironment.AzoCompound,
"142": ChemicalEnvironment.DiazoniumSalt,
"143": ChemicalEnvironment.Isonitrile,
"144": ChemicalEnvironment.Cyanate,
"145": ChemicalEnvironment.Isocyanate,
"146": ChemicalEnvironment.Thiocyanate,
"147": ChemicalEnvironment.Isothiocyanate,
"148": ChemicalEnvironment.Carbodiimide,
"149": ChemicalEnvironment.NitrosoCompound,
"150": ChemicalEnvironment.NitroCompound,
"151": ChemicalEnvironment.Nitrite,
"152": ChemicalEnvironment.Nitrate,
"153": ChemicalEnvironment.SulfuricAcidDeriv,
"154": ChemicalEnvironment.SulfuricAcid,
"155": ChemicalEnvironment.SulfuricAcidMonoester,
"156": ChemicalEnvironment.SulfuricAcidDiester,
"157": ChemicalEnvironment.SulfuricAcidAmideEster,
"158": ChemicalEnvironment.SulfuricAcidAmide,
"159": ChemicalEnvironment.SulfuricAcidDiamide,
"160": ChemicalEnvironment.SulfurylHalide,
"161": ChemicalEnvironment.SulfonicAcidDeriv,
"162": ChemicalEnvironment.SulfonicAcid,
"163": ChemicalEnvironment.SulfonicAcidEster,
"164": ChemicalEnvironment.Sulfonamide,
"165": ChemicalEnvironment.SulfonylHalide,
"166": ChemicalEnvironment.Sulfone,
"167": ChemicalEnvironment.Sulfoxide,
"168": ChemicalEnvironment.SulfinicAcidDeriv,
"169": ChemicalEnvironment.SulfinicAcid,
"170": ChemicalEnvironment.SulfinicAcidEster,
"171": ChemicalEnvironment.SulfinicAcidHalide,
"172": ChemicalEnvironment.SulfinicAcidAmide,
"173": ChemicalEnvironment.SulfenicAcidDeriv,
"174": ChemicalEnvironment.SulfenicAcid,
"175": ChemicalEnvironment.SulfenicAcidEster,
"176": ChemicalEnvironment.SulfenicAcidHalide,
"177": ChemicalEnvironment.SulfenicAcidAmide,
"178": ChemicalEnvironment.Thiol,
"179": ChemicalEnvironment.Alkylthiol,
"180": ChemicalEnvironment.Arylthiol,
"181": ChemicalEnvironment.PhosphoricAcidDeriv,
"182": ChemicalEnvironment.PhosphoricAcid,
"183": ChemicalEnvironment.PhosphoricAcidEster,
"184": ChemicalEnvironment.PhosphoricAcidHalide,
"185": ChemicalEnvironment.PhosphoricAcidAmide,
"186": ChemicalEnvironment.ThiophosphoricAcidDeriv,
"187": ChemicalEnvironment.ThiophosphoricAcid,
"188": ChemicalEnvironment.ThiophosphoricAcidEster,
"189": ChemicalEnvironment.ThiophosphoricAcidHalide,
"190": ChemicalEnvironment.ThiophosphoricAcidAmide,
"191": ChemicalEnvironment.PhosphonicAcidDeriv,
"192": ChemicalEnvironment.PhosphonicAcid,
"193": ChemicalEnvironment.PhosphonicAcidEster,
"194": ChemicalEnvironment.Phosphine,
"195": ChemicalEnvironment.Phosphinoxide,
"196": ChemicalEnvironment.BoronicAcidDeriv,
"197": ChemicalEnvironment.BoronicAcid,
"198": ChemicalEnvironment.BoronicAcidEster,
"199": ChemicalEnvironment.Alkene,
"200": ChemicalEnvironment.Alkyne,
"201": ChemicalEnvironment.Aromatic,
"202": ChemicalEnvironment.Heterocycle,
"203": ChemicalEnvironment.AlphaAminoacid,
"204": ChemicalEnvironment.AlphaHydroxyacid,
}
return checkmol_code_map[checkmol_code]
@functools.lru_cache(1000)
def analyse_functional_groups(smiles):
"""Employs checkmol to determine which chemical moieties
are encoded by a given smiles pattern.
Notes
-----
See https://homepage.univie.ac.at/norbert.haider/cheminf/fgtable.pdf
for information about the group numbers (i.e moiety types).
Parameters
----------
smiles: str
The smiles pattern to examine.
Returns
-------
dict of ChemicalEnvironment and int, optional
A dictionary where each key corresponds to the `checkmol` defined group
number, and each value if the number of instances of that moiety. If
`checkmol` did not execute correctly, returns None.
"""
import shutil
import subprocess
import tempfile
from openff.toolkit.topology import Molecule
if smiles == "O" or smiles == "[H]O[H]":
return {ChemicalEnvironment.Aqueous: 1}
if smiles == "N":
return {ChemicalEnvironment.Amine: 1}
# Make sure the checkmol utility has been installed separately.
if shutil.which("checkmol") is None:
raise MissingOptionalDependency(
"checkmol",
False,
"Checkmol can be optianed for free from "
"http://merian.pch.univie.ac.at/~nhaider/cheminf/cmmm.html.",
)
openff_molecule: Molecule = Molecule.from_smiles(
smiles, allow_undefined_stereo=True
)
# Save the smile pattern out as an SDF file, ready to use as input to checkmol.
with tempfile.NamedTemporaryFile(suffix=".sdf") as file:
openff_molecule.to_file(file.name, "SDF")
# Execute checkmol.
try:
result = subprocess.check_output(
["checkmol", "-p", file.name],
stderr=subprocess.STDOUT,
).decode()
except subprocess.CalledProcessError:
result = None
if result is None:
return None
elif len(result) == 0:
return {ChemicalEnvironment.Alkane: 1}
groups = {}
for group in result.splitlines():
group_code, group_count, _ = group.split(":")
group_environment = checkmol_code_to_environment(group_code[1:])
groups[group_environment] = int(group_count)
return groups
|
jaketanderson/openff-evaluator | openff/evaluator/storage/storage.py | """
Defines the base API for the openff-evaluator storage backend.
"""
import abc
import uuid
from collections import defaultdict
from threading import RLock
from typing import Dict
from openff.evaluator.attributes import Attribute
from openff.evaluator.storage.data import (
BaseStoredData,
ForceFieldData,
HashableStoredData,
ReplaceableData,
)
class StorageBackend(abc.ABC):
"""An abstract base representation of how the openff-evaluator will
interact with and store simulation data.
Notes
-----
When implementing this class, only private methods should be overridden
as the public methods only mainly implement thread locks, while their
private version perform their actual function.
"""
class _ObjectKeyData(BaseStoredData):
"""An object which keeps track of the items in
the storage system.
"""
object_keys = Attribute(
docstring="The unique keys of the objects stored in a `StorageBackend`.",
type_hint=dict,
default_value=dict(),
)
@classmethod
def has_ancillary_data(cls):
return False
def to_storage_query(self):
# This should never be called so doesn't need an
# implementation.
raise NotImplementedError()
def __init__(self):
"""Constructs a new StorageBackend object."""
self._stored_object_keys = None
self._stored_object_keys_id = "object_keys"
# Store a map between the unique id of a stored object,
# and its hash value for easy comparision.
self._object_hashes: Dict[int, str] = dict()
# Create a thread lock to prevent concurrent
# thread access.
self._lock = RLock()
self._load_stored_object_keys()
def _load_stored_object_keys(self):
"""Load the unique key to each object stored in the
storage system.
"""
keys_object, _ = self._retrieve_object(self._stored_object_keys_id)
if keys_object is None:
keys_object = StorageBackend._ObjectKeyData()
assert isinstance(keys_object, StorageBackend._ObjectKeyData)
stored_object_keys = keys_object.object_keys
self._stored_object_keys = defaultdict(list)
all_object_keys = set()
for data_type in stored_object_keys:
for unique_key in stored_object_keys[data_type]:
if not self._object_exists(unique_key):
# The stored entry key does not exist in the system,
# so skip the entry. This may happen when the local
# file do not exist on disk any more for example.
continue
if unique_key in all_object_keys:
raise KeyError(
"Two objects with the same unique key have been found."
)
stored_object, _ = self.retrieve_object(unique_key)
# Make sure the data matches the expected type and is valid.
assert stored_object.__class__.__name__ == data_type
stored_object.validate()
if isinstance(stored_object, HashableStoredData):
self._object_hashes[hash(stored_object)] = unique_key
self._stored_object_keys[data_type].append(unique_key)
all_object_keys.add(unique_key)
# Store a fresh copy of the key dictionary so that only entries
# that exist in the system actually referenced.
self._save_stored_object_keys()
def _save_stored_object_keys(self):
"""Save the unique key of each of the objects stored in the storage system."""
keys_object = StorageBackend._ObjectKeyData()
keys_object.object_keys = self._stored_object_keys
self._store_object(keys_object, self._stored_object_keys_id)
@abc.abstractmethod
def _object_exists(self, storage_key):
"""Check whether an object with the specified key exists in the
storage system.
Parameters
----------
storage_key: str
A unique key that describes where the stored object can be found
within the storage system.
Returns
-------
True if the object is within the storage system.
"""
raise NotImplementedError()
def _is_key_unique(self, storage_key):
"""Checks whether a given key is already in the storage system.
Parameters
----------
storage_key: str
The key to check for.
Returns
-------
bool
`True` if the key exists in the system, `False`
otherwise.
"""
# Make sure the key in unique.
return not any(
storage_key in self._stored_object_keys[data_type]
for data_type in self._stored_object_keys
)
@abc.abstractmethod
def _store_object(
self, object_to_store, storage_key=None, ancillary_data_path=None
):
"""The internal implementation of the `store_object` method.
It is safe to assume here that all object and key validation
has already been performed, and that this method is called under
a thread lock.
Notes
-----
This method should overwrite any existing data with the same key.
Parameters
----------
object_to_store: BaseStoredData
The object to store.
storage_key: str, optional
A unique key to associate with the stored object. If `None`,
one will be randomly generated
ancillary_data_path: str, optional
The data path to the ancillary directory-like
data to store alongside the object if the data
type requires one.
"""
raise NotImplementedError()
def store_object(self, object_to_store, ancillary_data_path=None):
"""Store an object in the storage system, returning the key
of the stored object. This may be different to `storage_key`
depending on whether the same or a similar object was already
present in the system.
Parameters
----------
object_to_store: BaseStoredData
The object to store.
ancillary_data_path: str, optional
The data path to the ancillary directory-like
data to store alongside the object if the data
type requires one.
Returns
-------
str
The unique key assigned to the stored object.
"""
# Make sure the object is valid.
if object_to_store is None:
raise ValueError("The object to store cannot be None.")
object_to_store.validate()
# Make sure the object is a supported type.
if not isinstance(object_to_store, BaseStoredData):
raise ValueError(
"Only objects inheriting from `BaseStoredData` can "
"be stored in the storage system."
)
# Make sure we have ancillary data if required.
object_class = object_to_store.__class__
if object_class.has_ancillary_data() and ancillary_data_path is None:
raise ValueError("This object requires ancillary data.")
# Check whether the exact same object already exists within
# the storage system based on its hash.
storage_key = self.has_object(object_to_store)
if storage_key is not None:
if not isinstance(object_to_store, ReplaceableData):
# Handle the case where the existing data
# should be returned, rather than storing
# the passed object.
return storage_key
existing_object, _ = self.retrieve_object(storage_key, ReplaceableData)
# noinspection PyTypeChecker
object_to_store = object_to_store.most_information(
existing_object, object_to_store
)
if object_to_store is None:
raise ValueError(
"Something went wrong when trying to "
"determine whether the object trying to "
"be stored is redundant."
)
elif object_to_store == existing_object:
# Don't try to re-store the existing object.
return storage_key
else:
# Generate a unique id for this object.
while storage_key is None or not self._is_key_unique(storage_key):
storage_key = str(uuid.uuid4()).replace("-", "")
# Hash this object if appropriate
if isinstance(object_to_store, HashableStoredData):
self._object_hashes[hash(object_to_store)] = storage_key
# Save the object into the storage system with the given key.
with self._lock:
self._store_object(object_to_store, storage_key, ancillary_data_path)
# Register the key in the storage system.
if (
not isinstance(object_to_store, StorageBackend._ObjectKeyData)
and storage_key not in self._stored_object_keys[object_class.__name__]
):
self._stored_object_keys[object_class.__name__].append(storage_key)
self._save_stored_object_keys()
return storage_key
def store_force_field(self, force_field):
"""A convenience method for storing `ForceFieldSource` objects.
Parameters
----------
force_field: ForceFieldSource
The force field to store.
Returns
-------
str
The unique id of the stored force field.
"""
force_field_data = ForceFieldData()
force_field_data.force_field_source = force_field
return self.store_object(force_field_data)
@abc.abstractmethod
def _retrieve_object(self, storage_key, expected_type=None):
"""The internal implementation of the `retrieve_object` method.
It is safe to assume that this method is called under a thread lock.
Parameters
----------
storage_key: str
A unique key that describes where the stored object can be found
within the storage system.
expected_type: type of BaseStoredData, optional
The expected data type. An exception is raised if
the retrieved data doesn't match the type.
Returns
-------
BaseStoredData, optional
The stored object if the object key is found, otherwise None.
str, optional
The path to the ancillary data if present.
"""
raise NotImplementedError()
def retrieve_object(self, storage_key, expected_type=None):
"""Retrieves a stored object for the estimators storage system.
Parameters
----------
storage_key: str
A unique key that describes where the stored object can be found
within the storage system.
expected_type: type of BaseStoredData, optional
The expected data type. An exception is raised if
the retrieved data doesn't match the type.
Returns
-------
BaseStoredData, optional
The stored object if the object key is found, otherwise None.
str, optional
The path to the ancillary data if present.
"""
with self._lock:
return self._retrieve_object(storage_key, expected_type)
def retrieve_force_field(self, storage_key):
"""A convenience method for retrieving `ForceFieldSource` objects.
Parameters
----------
storage_key: str
The key of the force field to retrieve.
Returns
-------
ForceFieldSource
The retrieved force field source.
"""
force_field_data, _ = self.retrieve_object(storage_key, ForceFieldData)
if force_field_data is None:
raise KeyError(
f"The force field with id {storage_key} does not exist "
f"in the storage system."
)
return force_field_data.force_field_source
def _has_object(self, storage_object):
"""The internal implementation of the `has_object` method.
It is safe to assume that this method is called under a
thread lock.
Parameters
----------
storage_object: BaseStoredData
The object to check for.
Returns
-------
str, optional
The unique key of the object if it is in the system, `None` otherwise.
"""
if isinstance(storage_object, HashableStoredData):
hash_key = hash(storage_object)
return self._object_hashes.get(hash_key, None)
data_query = storage_object.to_storage_query()
query_results = self.query(data_query)
if len(query_results) == 0:
return None
if len(query_results) > 1 or len(query_results[0]) > 1:
raise ValueError(
"The backend contains multiple copies of the "
"same piece of data. This should not be possible."
)
storage_key, _, _ = next(iter(query_results.values()))[0]
return storage_key
def has_object(self, storage_object):
"""Checks whether a given hashable object exists in the
storage system.
Parameters
----------
storage_object: BaseStoredData
The object to check for.
Returns
-------
str, optional
The unique key of the object if it is in the system, `None` otherwise.
"""
with self._lock:
return self._has_object(storage_object)
def has_force_field(self, force_field):
"""A convenience method for checking whether the specified
`ForceFieldSource` object is stored in the backend.
Parameters
----------
force_field: ForceFieldSource
The force field to look for.
Returns
-------
str, optional
The unique key of the object if it is in the system, `None` otherwise.
"""
force_field_data = ForceFieldData()
force_field_data.force_field_source = force_field
return self.has_object(force_field_data)
def _query(self, data_query):
"""The internal implementation of the `query` method.
It is safe to assume that this method is called under a
thread lock.
Parameters
----------
data_query: BaseDataQuery
The query to perform.
Returns
-------
dict of tuple and list of tuple of str, BaseStoredData and str
The data that matches the query partitioned by the
matched values. The list values take the form
(storage_key, data_object, data_directory_path).
"""
data_class = data_query.data_class()
results = defaultdict(list)
if len(self._stored_object_keys.get(data_class.__name__, [])) == 0:
# Exit early of there are no objects of the correct type.
return results
for unique_key in self._stored_object_keys[data_class.__name__]:
if not self._object_exists(unique_key):
# Make sure the object is still in the system.
continue
stored_object, stored_directory = self.retrieve_object(
unique_key, data_class
)
matches = data_query.apply(stored_object)
if matches is None:
continue
results[matches].append((unique_key, stored_object, stored_directory))
return results
def query(self, data_query):
"""Query the storage backend for data matching the
query criteria.
Parameters
----------
data_query: BaseDataQuery
The query to perform.
Returns
-------
dict of tuple and list of tuple of str, BaseStoredData and str
The data that matches the query partitioned by the
matched values. The list values take the form
(storage_key, data_object, data_directory_path).
"""
with self._lock:
return self._query(data_query)
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_protocols/test_gradient_protocols.py | <gh_stars>10-100
import os
import tempfile
import numpy as np
from openff.units import unit
from openff.evaluator.forcefield import ParameterGradientKey
from openff.evaluator.protocols.gradients import ZeroGradients
from openff.evaluator.tests.utils import build_tip3p_smirnoff_force_field
from openff.evaluator.utils.observables import ObservableArray
def test_zero_gradient():
with tempfile.TemporaryDirectory() as directory:
force_field_path = os.path.join(directory, "ff.json")
with open(force_field_path, "w") as file:
file.write(build_tip3p_smirnoff_force_field().json())
gradient_keys = [
ParameterGradientKey("vdW", "[#1]-[#8X2H2+0:1]-[#1]", "epsilon"),
ParameterGradientKey("vdW", None, "scale14"),
]
zero_gradients = ZeroGradients("")
zero_gradients.input_observables = ObservableArray(value=0.0 * unit.kelvin)
zero_gradients.gradient_parameters = gradient_keys
zero_gradients.force_field_path = force_field_path
zero_gradients.execute()
assert len(zero_gradients.output_observables.gradients) == 2
assert {
gradient.key for gradient in zero_gradients.output_observables.gradients
} == {*gradient_keys}
for gradient in zero_gradients.output_observables.gradients:
assert np.allclose(gradient.value, 0.0)
|
jaketanderson/openff-evaluator | openff/evaluator/storage/__init__.py | <reponame>jaketanderson/openff-evaluator
from .storage import StorageBackend # isort:skip
from .localfile import LocalFileStorage # isort:skip
__all__ = [
LocalFileStorage,
StorageBackend,
]
|
jaketanderson/openff-evaluator | openff/evaluator/protocols/groups.py | <reponame>jaketanderson/openff-evaluator<filename>openff/evaluator/protocols/groups.py
"""
A collection of specialized workflow protocols, which serve to group together
multiple individual protocol building blocks, and apply special behaviours when
executing them.
Such behaviours may include for example running the grouped together
protocols until certain conditions have been met.
"""
import json
import logging
import typing
from enum import Enum, unique
from os import path
from openff.units import unit
from openff.evaluator.attributes import UNDEFINED, Attribute, AttributeClass
from openff.evaluator.workflow import ProtocolGroup, workflow_protocol
from openff.evaluator.workflow.attributes import (
InequalityMergeBehaviour,
InputAttribute,
MergeBehaviour,
OutputAttribute,
)
from openff.evaluator.workflow.utils import ProtocolPath
logger = logging.getLogger(__name__)
@workflow_protocol()
class ConditionalGroup(ProtocolGroup):
"""A collection of protocols which are to execute until
a given condition is met.
"""
class Condition(AttributeClass):
"""Defines a specific condition which must be met of the form
`left_hand_value` [TYPE] `right_hand_value`, where `[TYPE]` may
be less than or greater than.
"""
@unique
class Type(Enum):
"""The available condition types."""
LessThan = "lessthan"
GreaterThan = "greaterthan"
left_hand_value = Attribute(
docstring="The left-hand value to compare.",
type_hint=typing.Union[int, float, unit.Quantity],
)
right_hand_value = Attribute(
docstring="The right-hand value to compare.",
type_hint=typing.Union[int, float, unit.Quantity],
)
type = Attribute(
docstring="The right-hand value to compare.",
type_hint=Type,
default_value=Type.LessThan,
)
def __eq__(self, other):
return (
type(self) == type(other)
and self.left_hand_value == other.left_hand_value
and self.right_hand_value == other.right_hand_value
and self.type == other.type
)
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return f"{self.left_hand_value} {self.type} {self.right_hand_value}"
def __repr__(self):
return f"<Condition {str(self)}>"
conditions = InputAttribute(
docstring="The conditions which must be satisfied before"
"the group will cleanly exit.",
type_hint=list,
default_value=[],
merge_behavior=MergeBehaviour.Custom,
)
current_iteration = OutputAttribute(
docstring="The current number of iterations this group has performed while "
"attempting to satisfy the specified conditions. This value starts "
"from one.",
type_hint=int,
)
max_iterations = InputAttribute(
docstring="The maximum number of iterations to run for to try and satisfy the "
"groups conditions.",
type_hint=int,
default_value=100,
merge_behavior=InequalityMergeBehaviour.LargestValue,
)
def __init__(self, protocol_id):
super(ConditionalGroup, self).__init__(protocol_id)
# We disable checkpoint, as protocols may change their inputs
# at each iteration and hence their checkpointed outputs may
# be invalidated.
self._enable_checkpointing = False
def _evaluate_condition(self, condition):
"""Evaluates whether a condition has been successfully met.
Parameters
----------
condition: ConditionalGroup.Condition
The condition to evaluate.
Returns
-------
bool
True if the condition has been met.
"""
left_hand_value = condition.left_hand_value
right_hand_value = condition.right_hand_value
if isinstance(condition.left_hand_value, ProtocolPath):
left_hand_value = self.get_value(condition.left_hand_value)
if isinstance(condition.right_hand_value, ProtocolPath):
right_hand_value = self.get_value(condition.right_hand_value)
if left_hand_value == UNDEFINED or right_hand_value == UNDEFINED:
return False
if isinstance(right_hand_value, unit.Quantity) and isinstance(
left_hand_value, unit.Quantity
):
right_hand_value = right_hand_value.to(left_hand_value.units)
logger.info(
f"Evaluating condition for protocol {self.id}: "
f"{left_hand_value} {condition.type} {right_hand_value}"
)
if condition.type == self.Condition.Type.LessThan:
return left_hand_value < right_hand_value
elif condition.type == self.Condition.Type.GreaterThan:
return left_hand_value > right_hand_value
raise NotImplementedError()
@staticmethod
def _write_checkpoint(directory, current_iteration):
"""Creates a checkpoint file for this group so that it can continue
executing where it left off if it was killed for some reason (e.g the
worker it was running on was killed).
Parameters
----------
directory: str
The path to the working directory of this protocol
current_iteration: int
The number of iterations this group has performed so far.
"""
checkpoint_path = path.join(directory, "checkpoint.json")
with open(checkpoint_path, "w") as file:
json.dump({"current_iteration": current_iteration}, file)
@staticmethod
def _read_checkpoint(directory):
"""Creates a checkpoint file for this group so that it can continue
executing where it left off if it was killed for some reason (e.g the
worker it was running on was killed).
Parameters
----------
directory: str
The path to the working directory of this protocol
Returns
-------
int
The number of iterations this group has performed so far.
"""
current_iteration = 0
checkpoint_path = path.join(directory, "checkpoint.json")
if not path.isfile(checkpoint_path):
return current_iteration
with open(checkpoint_path, "r") as file:
checkpoint_dictionary = json.load(file)
current_iteration = checkpoint_dictionary["current_iteration"]
return current_iteration
def _execute(self, directory, available_resources):
"""Executes the protocols within this groups
Parameters
----------
directory : str
The root directory in which to run the protocols
available_resources: ComputeResources
The resources available to execute on.
Returns
-------
bool
True if all the protocols execute correctly.
"""
should_continue = True
self.current_iteration = self._read_checkpoint(directory)
# Keep a track of the original protocol schemas
original_schemas = [x.schema for x in self._protocols]
while should_continue:
# Create a checkpoint file so we can pick off where
# we left off if this execution fails due to time
# constraints for e.g.
self._write_checkpoint(directory, self.current_iteration)
self.current_iteration += 1
# Reset the protocols from their schemas - this will ensure
# that at each iteration protocols which take their inputs from
# other protocols in the group get their inputs updated correctly.
for protocol, schema in zip(self._protocols, original_schemas):
protocol.schema = schema
super(ConditionalGroup, self)._execute(directory, available_resources)
conditions_met = True
for condition in self._conditions:
# Check to see if we have reached our goal.
if not self._evaluate_condition(condition):
conditions_met = False
if conditions_met:
logger.info(
f"{self.id} loop finished after {self.current_iteration} iterations"
)
return
if self.current_iteration >= self.max_iterations:
raise RuntimeError(f"{self.id} failed to converge.")
logger.info(
f"{self.id} criteria not yet met after {self.current_iteration} "
f"iterations"
)
def merge(self, other):
"""Merges another ProtocolGroup with this one. The id
of this protocol will remain unchanged.
It is assumed that can_merge has already returned that
these protocol groups are compatible to be merged together.
Parameters
----------
other: ConditionalGroup
The protocol to merge into this one.
"""
merged_ids = super(ConditionalGroup, self).merge(other)
for condition in other.conditions:
if isinstance(condition.left_hand_value, ProtocolPath):
condition.left_hand_value.replace_protocol(other.id, self.id)
if isinstance(condition.right_hand_value, ProtocolPath):
condition.right_hand_value.replace_protocol(other.id, self.id)
for merged_id in merged_ids:
if isinstance(condition.left_hand_value, ProtocolPath):
condition.left_hand_value.replace_protocol(
merged_id, merged_ids[merged_id]
)
if isinstance(condition.right_hand_value, ProtocolPath):
condition.right_hand_value.replace_protocol(
merged_id, merged_ids[merged_id]
)
self.add_condition(condition)
return merged_ids
def add_condition(self, condition_to_add):
"""Adds a condition to this groups list of conditions if it
not already in the condition list.
Parameters
----------
condition_to_add: :obj:`ConditionalGroup.Condition`
The condition to add.
"""
for condition in self.conditions:
if condition == condition_to_add:
return
self.conditions.append(condition_to_add)
def get_value_references(self, input_path):
if input_path.property_name != "conditions":
return super(ConditionalGroup, self).get_value_references(input_path)
value_references = {}
for index, condition in enumerate(self.conditions):
if isinstance(condition.left_hand_value, ProtocolPath):
source_path = ProtocolPath(
"conditions[{}].left_hand_value".format(index)
)
value_references[source_path] = condition.left_hand_value
if isinstance(condition.right_hand_value, ProtocolPath):
source_path = ProtocolPath(
"conditions[{}].right_hand_value".format(index)
)
value_references[source_path] = condition.right_hand_value
return value_references
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_datasets/test_utilities.py | <reponame>jaketanderson/openff-evaluator
import pandas
from openff.evaluator.datasets.utilities import (
data_frame_to_substances,
reorder_data_frame,
)
def test_reorder_data_frame():
"""Tests that the ``reorder_data_frame`` function behaves as expected
for 1 and 2 component entries."""
data_rows = [
{
"N Components": 1,
"Component 1": "C",
"Role": "Solvent",
"Mole Fraction 1": 1.0,
"Exact Amount": 1,
},
{
"N Components": 2,
"Component 1": "CC",
"Role 1": "Solvent",
"Mole Fraction 1": 0.25,
"Exact Amount 1": 1,
"Component 2": "CO",
"Role 2": "Solute",
"Mole Fraction 2": 0.75,
"Exact Amount 2": 2,
},
{
"N Components": 2,
"Component 1": "CO",
"Role 1": "Solute",
"Mole Fraction 1": 0.75,
"Exact Amount 1": 2,
"Component 2": "CC",
"Role 2": "Solvent",
"Mole Fraction 2": 0.25,
"Exact Amount 2": 1,
},
]
data_frame = pandas.DataFrame(data_rows)
reordered_data_frame = reorder_data_frame(data_frame)
assert len(reordered_data_frame) == 3
assert reordered_data_frame.loc[0, "N Components"] == 1
for index in [1, 2]:
assert reordered_data_frame.loc[index, "N Components"] == 2
assert reordered_data_frame.loc[index, "Component 1"] == "CC"
assert reordered_data_frame.loc[index, "Role 1"] == "Solvent"
assert reordered_data_frame.loc[index, "Mole Fraction 1"] == 0.25
assert reordered_data_frame.loc[index, "Exact Amount 1"] == 1
assert reordered_data_frame.loc[index, "Component 2"] == "CO"
assert reordered_data_frame.loc[index, "Role 2"] == "Solute"
assert reordered_data_frame.loc[index, "Mole Fraction 2"] == 0.75
assert reordered_data_frame.loc[index, "Exact Amount 2"] == 2
def test_data_frame_to_substances():
"""Tests that the ``data_frame_to_substances`` function behaves as expected
for 1 and 2 component entries, especially when identical substances but
with different ordering are present."""
data_rows = [
{"N Components": 1, "Component 1": "C"},
{"N Components": 2, "Component 1": "CC", "Component 2": "CO"},
{"N Components": 2, "Component 1": "CO", "Component 2": "CC"},
]
data_frame = pandas.DataFrame(data_rows)
substances = data_frame_to_substances(data_frame)
assert substances == {("C",), ("CC", "CO")}
|
jaketanderson/openff-evaluator | openff/evaluator/datasets/curation/components/components.py | <reponame>jaketanderson/openff-evaluator<filename>openff/evaluator/datasets/curation/components/components.py<gh_stars>10-100
import abc
import logging
from typing import overload
import pandas
from pydantic import BaseModel
from openff.evaluator.datasets import PhysicalPropertyDataSet
logger = logging.getLogger(__name__)
class _MetaCurationComponent(type):
components = {}
def __init__(cls, name, bases, attrs):
type.__init__(cls, name, bases, attrs)
if name in _MetaCurationComponent.components:
raise ValueError(
"Cannot have more than one curation component with the same name"
)
_MetaCurationComponent.components[name] = cls
class CurationComponentSchema(BaseModel, abc.ABC):
"""A base class for schemas which specify how particular curation
components should be applied to a data set."""
class CurationComponent(metaclass=_MetaCurationComponent):
"""A base component for curation components which apply a particular operation
(such as filtering or data conversion) to a data set."""
@classmethod
@abc.abstractmethod
def _apply(
cls, data_frame: pandas.DataFrame, schema, n_processes
) -> pandas.DataFrame:
raise NotImplementedError()
@classmethod
@overload
def apply(
cls,
data_set: PhysicalPropertyDataSet,
schema: CurationComponentSchema,
n_processes: int = 1,
) -> PhysicalPropertyDataSet:
...
@classmethod
@overload
def apply(
cls,
data_set: pandas.DataFrame,
schema: CurationComponentSchema,
n_processes: int = 1,
) -> pandas.DataFrame:
...
@classmethod
def apply(cls, data_set, schema, n_processes=1):
"""Apply this curation component to a data set.
Parameters
----------
data_set
The data frame to apply the component to.
schema
The schema which defines how this component should be applied.
n_processes
The number of processes that this component is allowed to
parallelize across.
Returns
-------
The data set which has had the component applied to it.
"""
data_frame = data_set
if isinstance(data_frame, PhysicalPropertyDataSet):
data_frame = data_frame.to_pandas()
modified_data_frame = cls._apply(data_frame, schema, n_processes)
n_data_points = len(data_frame)
n_filtered = len(modified_data_frame)
if n_filtered != n_data_points:
direction = "removed" if n_filtered < n_data_points else "added"
logger.info(
f"{abs(n_filtered - n_data_points)} data points were {direction} after "
f"applying the {cls.__name__} component."
)
if isinstance(data_set, PhysicalPropertyDataSet):
modified_data_frame = PhysicalPropertyDataSet.from_pandas(
modified_data_frame
)
return modified_data_frame
|
jaketanderson/openff-evaluator | openff/evaluator/client/client.py | """
Evaluator client side API.
"""
import copy
import json
import logging
import socket
import traceback
from collections import defaultdict
from enum import Enum
from time import sleep
from openff.evaluator.attributes import UNDEFINED, Attribute, AttributeClass
from openff.evaluator.datasets import PhysicalPropertyDataSet
from openff.evaluator.forcefield import (
ForceFieldSource,
LigParGenForceFieldSource,
ParameterGradientKey,
SmirnoffForceFieldSource,
TLeapForceFieldSource,
)
from openff.evaluator.layers import (
registered_calculation_layers,
registered_calculation_schemas,
)
from openff.evaluator.layers.workflow import WorkflowCalculationSchema
from openff.evaluator.utils.exceptions import EvaluatorException
from openff.evaluator.utils.serialization import TypedJSONDecoder
from openff.evaluator.utils.tcp import (
EvaluatorMessageTypes,
pack_int,
recvall,
unpack_int,
)
logger = logging.getLogger(__name__)
class ConnectionOptions(AttributeClass):
"""The options to use when connecting to an `EvaluatorServer`"""
server_address = Attribute(
docstring="The address of the server to connect to.",
type_hint=str,
default_value="localhost",
)
server_port = Attribute(
docstring="The port of the server to connect to.",
type_hint=int,
default_value=8000,
)
def __init__(self, server_address=None, server_port=None):
"""
Parameters
----------
server_address: str
The address of the server to connect to.
server_port: int
The port of the server to connect to.
"""
if server_address is not None:
self.server_address = server_address
if server_port is not None:
self.server_port = server_port
class BatchMode(Enum):
"""The different modes in which a server can batch together properties
to estimate.
This enum may take values of
* SameComponents: All properties measured for substances containing exactly
the same components will be placed into a single batch. E.g. The density of
a 80:20 and a 20:80 mix of ethanol and water would be batched together, but
the density of pure ethanol and the density of pure water would be placed into
separate batches.
* SharedComponents: All properties measured for substances containing at least
common component will be batched together. E.g.The densities of 80:20 and 20:80
mixtures of ethanol and water, and the pure densities of ethanol and water would
be batched together.
Properties will only be marked as estimated by the server when all properties in a
single batch are completed.
"""
SameComponents = "SameComponents"
SharedComponents = "SharedComponents"
class Request(AttributeClass):
"""An estimation request which has been sent to a `EvaluatorServer`
instance.
This object can be used to query and retrieve the results of the
request when finished, or be stored to retrieve the request at some
point in the future."""
id = Attribute(
docstring="The unique id assigned to this request by the server.", type_hint=str
)
connection_options = Attribute(
docstring="The options used to connect to the server handling the request.",
type_hint=ConnectionOptions,
)
def __init__(self, client=None):
"""
Parameters
----------
client: EvaluatorClient, optional
The client which submitted this request.
"""
if client is not None:
self.connection_options = ConnectionOptions()
self.connection_options.server_address = client.server_address
self.connection_options.server_port = client.server_port
self._client = client
def results(self, synchronous=False, polling_interval=5):
"""Attempt to retrieve the results of the request from the
server.
If the method is run synchronously it will block the main
thread either all of the requested properties have been
estimated, or an exception is returned.
Parameters
----------
synchronous: bool
If `True`, this method will block the main thread until
the server either returns a result or an error.
polling_interval: float
If running synchronously, this is the time interval (seconds)
between checking if the calculation has finished. This will
be ignored if running asynchronously.
Returns
-------
RequestResult, optional
Returns the current results of the request. This may
be `None` if any unexpected exceptions occurred while
retrieving the estimate.
EvaluatorException, optional
The exception raised will trying to retrieve the result
if any.
"""
if (
self._client is None
or self._client.server_address != self._client.server_address
or self._client.server_port != self._client.server_port
):
self.validate()
self._client = EvaluatorClient(self.connection_options)
return self._client.retrieve_results(self.id, synchronous, polling_interval)
def __str__(self):
return f"Request id={self.id}"
def __repr__(self):
return f"<{str(self)}>"
class RequestOptions(AttributeClass):
"""The options to use when requesting a set of physical
properties be estimated by the server.
"""
calculation_layers = Attribute(
docstring="The calculation layers which may be used to "
"estimate the set of physical properties. The order in which "
"the layers appears in this list determines the order in which "
"the layers will attempt to estimate the data set.",
type_hint=list,
default_value=["ReweightingLayer", "SimulationLayer"],
)
calculation_schemas = Attribute(
docstring="The schemas that each calculation layer should "
"use when estimating the set of physical properties. The "
"dictionary should be of the form [property_type][layer_type].",
type_hint=dict,
optional=True,
)
batch_mode = Attribute(
docstring="The way in which the server should batch together "
"properties to estimate. Properties will only be marked as finished "
"when all properties in a single batch are completed.",
type_hint=BatchMode,
default_value=BatchMode.SharedComponents,
optional=True,
)
def add_schema(self, layer_type, property_type, schema):
"""A convenience function for adding a calculation schema
to the schema dictionary.
Parameters
----------
layer_type: str or type of CalculationLayer
The layer to associate the schema with.
property_type: str or type of PhysicalProperty
The class of property to associate the schema
with.
schema: CalculationSchema
The schema to add.
"""
# Validate the schema.
schema.validate()
# Make sure the schema is compatible with the layer.
assert layer_type in registered_calculation_layers
calculation_layer = registered_calculation_layers[layer_type]
assert type(schema) == calculation_layer.required_schema_type()
if isinstance(property_type, type):
property_type = property_type.__name__
if self.calculation_schemas == UNDEFINED:
self.calculation_schemas = {}
if property_type not in self.calculation_schemas:
self.calculation_schemas[property_type] = {}
if layer_type not in self.calculation_schemas[property_type]:
self.calculation_schemas[property_type][layer_type] = {}
self.calculation_schemas[property_type][layer_type] = schema
def validate(self, attribute_type=None):
super(RequestOptions, self).validate(attribute_type)
assert all(isinstance(x, str) for x in self.calculation_layers)
assert all(x in registered_calculation_layers for x in self.calculation_layers)
if self.calculation_schemas != UNDEFINED:
for property_type in self.calculation_schemas:
assert isinstance(self.calculation_schemas[property_type], dict)
for layer_type in self.calculation_schemas[property_type]:
assert layer_type in self.calculation_layers
calculation_layer = registered_calculation_layers[layer_type]
schema = self.calculation_schemas[property_type][layer_type]
required_type = calculation_layer.required_schema_type()
assert isinstance(schema, required_type)
class RequestResult(AttributeClass):
"""The current results of an estimation request - these
results may be partial if the server hasn't yet completed
the request.
"""
queued_properties = Attribute(
docstring="The set of properties which have yet to be, or "
"are currently being estimated.",
type_hint=PhysicalPropertyDataSet,
default_value=PhysicalPropertyDataSet(),
)
estimated_properties = Attribute(
docstring="The set of properties which have been successfully estimated.",
type_hint=PhysicalPropertyDataSet,
default_value=PhysicalPropertyDataSet(),
)
unsuccessful_properties = Attribute(
docstring="The set of properties which could not be successfully estimated.",
type_hint=PhysicalPropertyDataSet,
default_value=PhysicalPropertyDataSet(),
)
exceptions = Attribute(
docstring="The set of properties which have yet to be, or "
"are currently being estimated.",
type_hint=list,
default_value=[],
)
def validate(self, attribute_type=None):
super(RequestResult, self).validate(attribute_type)
assert all((isinstance(x, EvaluatorException) for x in self.exceptions))
class EvaluatorClient:
"""The object responsible for connecting to, and submitting
physical property estimation requests to an `EvaluatorServer`.
Examples
--------
These examples assume that an `EvaluatorServer` has been set up
and is running (either synchronously or asynchronously). This
server can be connect to be creating an `EvaluatorClient`:
>>> from openff.evaluator.client import EvaluatorClient
>>> property_estimator = EvaluatorClient()
If the `EvaluatorServer` is not running on the local machine, you will
need to specify its address and the port that it is listening on:
>>> from openff.evaluator.client import ConnectionOptions
>>>
>>> connection_options = ConnectionOptions(server_address='server_address',
>>> server_port=8000)
>>> property_estimator = EvaluatorClient(connection_options)
To asynchronously submit a request to the running server using the default
estimation options:
>>> # Load in the data set of properties which will be used for comparisons
>>> from openff.evaluator.datasets.thermoml import ThermoMLDataSet
>>> data_set = ThermoMLDataSet.from_doi('10.1016/j.jct.2016.10.001')
>>>
>>> # Filter the dataset to only include densities measured between 130-260 K
>>> from openff.units import unit
>>> from openff.evaluator.properties import Density
>>>
>>> data_set.filter_by_property_types(Density)
>>> data_set.filter_by_temperature(
>>> min_temperature=130*unit.kelvin,
>>> max_temperature=260*unit.kelvin
>>> )
>>>
>>> # Load in the force field parameters
>>> from openff.evaluator.forcefield import SmirnoffForceFieldSource
>>> force_field_source = SmirnoffForceFieldSource.from_path('smirnoff99Frosst-1.1.0.offxml')
>>>
>>> # Submit the estimation request to a running server.
>>> request = property_estimator.request_estimate(data_set, force_field_source)
The status of the request can be asynchronously queried by calling
>>> results = request.results()
or the main thread can be blocked until the results are
available by calling
>>> results = request.results(synchronous=True)
How the property set will be estimated can easily be controlled by passing a
`RequestOptions` object to the estimate commands.
The calculations layers which will be used to estimate the properties can be
controlled for example like so:
>>> from openff.evaluator.layers.reweighting import ReweightingLayer
>>> from openff.evaluator.layers.simulation import SimulationLayer
>>>
>>> options = RequestOptions(calculation_layers=[
>>> "ReweightingLayer",
>>> "SimulationLayer"
>>> ])
>>>
>>> request = property_estimator.request_estimate(data_set, force_field_source, options)
Options for how properties should be estimated can be set on a per property, and per layer
basis by providing a calculation schema to the options object.
>>> from openff.evaluator.properties import DielectricConstant
>>>
>>> # Generate a schema to use when estimating densities directly
>>> # from simulations.
>>> density_simulation_schema = Density.default_simulation_schema()
>>> # Generate a schema to use when estimating dielectric constants
>>> # from cached simulation data.
>>> dielectric_reweighting_schema = DielectricConstant.default_reweighting_schema()
>>>
>>> options.workflow_options = {
>>> 'Density': {'SimulationLayer': density_simulation_schema},
>>> 'Dielectric': {'SimulationLayer': dielectric_reweighting_schema}
>>> }
>>>
>>> property_estimator.request_estimate(
>>> data_set,
>>> force_field_source,
>>> options,
>>> )
The gradients of the observables of interest with respect to a number of chosen
parameters can be requested by passing a `parameter_gradient_keys` parameter.
In the below example, gradients will be calculated with respect to both the
bond length parameter for the [#6:1]-[#8:2] chemical environment, and the bond
angle parameter for the [*:1]-[#8:2]-[*:3] chemical environment:
>>> from openff.evaluator.forcefield import ParameterGradientKey
>>>
>>> parameter_gradient_keys = [
>>> ParameterGradientKey('Bonds', '[#6:1]-[#8:2]', 'length')
>>> ParameterGradientKey('Angles', '[*:1]-[#8:2]-[*:3]', 'angle')
>>> ]
>>>
>>> property_estimator.request_estimate(
>>> data_set,
>>> force_field_source,
>>> options,
>>> parameter_gradient_keys
>>> )
"""
class _Submission(AttributeClass):
"""The data packet encoding an estimation request which will be sent to
the server.
"""
dataset = Attribute(
docstring="The set of properties to estimate.",
type_hint=PhysicalPropertyDataSet,
)
options = Attribute(
docstring="The options to use when estimating the dataset.",
type_hint=RequestOptions,
)
force_field_source = Attribute(
docstring="The force field parameters to estimate the dataset using.",
type_hint=ForceFieldSource,
)
parameter_gradient_keys = Attribute(
docstring="A list of the parameters that the physical properties "
"should be differentiated with respect to.",
type_hint=list,
)
def validate(self, attribute_type=None):
super(EvaluatorClient._Submission, self).validate(attribute_type)
assert all(
isinstance(x, ParameterGradientKey)
for x in self.parameter_gradient_keys
)
@property
def server_address(self):
"""str: The address of the server that this client is connected to."""
return self._connection_options.server_address
@property
def server_port(self):
"""int: The port of the server that this client is connected to."""
return self._connection_options.server_port
def __init__(self, connection_options=None):
"""
Parameters
----------
connection_options: ConnectionOptions, optional
The options used when connecting to the calculation
server. If `None`, default options are used.
"""
if connection_options is None:
connection_options = ConnectionOptions()
if connection_options.server_address is None:
raise ValueError(
"The address of the server which will run"
"these calculations must be given."
)
self._connection_options = connection_options
@staticmethod
def default_request_options(data_set, force_field_source):
"""Returns the default `RequestOptions` options used
to estimate a set of properties if `None` are provided.
Parameters
----------
data_set: PhysicalPropertyDataSet
The data set which would be estimated.
force_field_source: ForceFieldSource
The force field parameters which will be used by the
request.
Returns
-------
RequestOptions
The default options.
"""
options = RequestOptions()
EvaluatorClient._populate_request_options(options, data_set, force_field_source)
return options
@staticmethod
def _default_protocol_replacements(force_field_source):
"""Returns the default set of protocols in a workflow to replace
with different types. This is mainly to handle replacing the base
force field assignment protocol with one specific to the force field
source.
Parameters
----------
force_field_source: ForceFieldSource
The force field parameters which will be used by the
request.
Returns
-------
dict of str and str
A map between the type of protocol to replace, and the type of
protocol to use in its place.
"""
replacements = {}
if isinstance(force_field_source, SmirnoffForceFieldSource):
replacements["BaseBuildSystem"] = "BuildSmirnoffSystem"
elif isinstance(force_field_source, LigParGenForceFieldSource):
replacements["BaseBuildSystem"] = "BuildLigParGenSystem"
elif isinstance(force_field_source, TLeapForceFieldSource):
replacements["BaseBuildSystem"] = "BuildTLeapSystem"
return replacements
@staticmethod
def _populate_request_options(options, data_set, force_field_source):
"""Populates any missing attributes of a `RequestOptions`
object with default values registered via the plug-in
system.
Parameters
----------
options: RequestOptions
The object to populate with defaults.
data_set: PhysicalPropertyDataSet
The data set to be estimated using the options.
force_field_source: ForceFieldSource
The force field parameters which will be used by the
request.
"""
# Retrieve the types of properties in the data set.
property_types = data_set.property_types
if options.calculation_schemas == UNDEFINED:
options.calculation_schemas = defaultdict(dict)
properties_without_schemas = set(property_types)
for property_type in options.calculation_schemas:
if property_type not in properties_without_schemas:
continue
properties_without_schemas.remove(property_type)
# Assign default calculation schemas in the cases where the user
# hasn't provided one.
for calculation_layer in options.calculation_layers:
for property_type in property_types:
# Check if the user has already provided a schema.
existing_schema = options.calculation_schemas.get(
property_type, {}
).get(calculation_layer, None)
if existing_schema is not None:
continue
# Check if this layer has any registered schemas.
if calculation_layer not in registered_calculation_schemas:
continue
default_layer_schemas = registered_calculation_schemas[
calculation_layer
]
# Check if this property type has any registered schemas for
# the given calculation layer.
if property_type not in default_layer_schemas:
continue
# noinspection PyTypeChecker
default_schema = default_layer_schemas[property_type]
if callable(default_schema):
default_schema = default_schema()
# Mark this property as having at least one registered
# calculation schema.
if property_type in properties_without_schemas:
properties_without_schemas.remove(property_type)
if property_type not in options.calculation_schemas:
options.calculation_schemas[property_type] = {}
options.calculation_schemas[property_type][
calculation_layer
] = default_schema
# Make sure all property types have at least one registered
# calculation schema.
if len(properties_without_schemas) >= 1:
type_string = ", ".join(properties_without_schemas)
raise ValueError(
f"No calculation schema could be found for "
f"the {type_string} properties."
)
# Perform any protocol type replacements
replacement_types = EvaluatorClient._default_protocol_replacements(
force_field_source
)
for calculation_layer in options.calculation_layers:
for property_type in property_types:
# Check if the user has already provided a schema.
if (
property_type not in options.calculation_schemas
or calculation_layer
not in options.calculation_schemas[property_type]
):
continue
schema = options.calculation_schemas[property_type][calculation_layer]
if not isinstance(schema, WorkflowCalculationSchema):
continue
workflow_schema = schema.workflow_schema
workflow_schema.replace_protocol_types(replacement_types)
def request_estimate(
self,
property_set,
force_field_source,
options=None,
parameter_gradient_keys=None,
):
"""Submits a request for the `EvaluatorServer` to attempt to estimate
the data set of physical properties using the specified force field
parameters according to the provided options.
Parameters
----------
property_set : PhysicalPropertyDataSet
The set of properties to estimate.
force_field_source : ForceFieldSource or openff.toolkit.typing.engines.smirnoff.ForceField
The force field parameters to estimate the properties using.
options : RequestOptions, optional
A set of estimator options. If `None` default options
will be used (see `default_request_options`).
parameter_gradient_keys: list of ParameterGradientKey, optional
A list of the parameters that the physical properties should
be differentiated with respect to.
Returns
-------
Request
An object which will provide access to the
results of this request.
EvaluatorException, optional
Any exceptions raised while attempting the submit the request.
"""
from openff.toolkit.typing.engines import smirnoff
if property_set is None or force_field_source is None:
raise ValueError(
"Both a data set and force field source must be "
"present to compute physical properties."
)
if parameter_gradient_keys is None:
parameter_gradient_keys = []
# Handle the conversion of a SMIRNOFF force field object
# for backwards compatibility.
if isinstance(force_field_source, smirnoff.ForceField):
force_field_source = SmirnoffForceFieldSource.from_object(
force_field_source
)
# Fill in any missing options with default values
if options is None:
options = self.default_request_options(property_set, force_field_source)
else:
options = copy.deepcopy(options)
self._populate_request_options(options, property_set, force_field_source)
# Make sure the options are valid.
options.validate()
# Build the submission object.
submission = EvaluatorClient._Submission()
submission.dataset = property_set
submission.force_field_source = force_field_source
submission.options = options
submission.parameter_gradient_keys = parameter_gradient_keys
# Ensure the submission is valid.
submission.validate()
# Send the submission to the server.
request_id, error = self._send_calculations_to_server(submission)
# Build the object which represents this request.
request_object = None
if error is None:
request_object = Request(self)
request_object.id = request_id
return request_object, error
def retrieve_results(self, request_id, synchronous=False, polling_interval=5):
"""Retrieves the current results of a request from the server.
Parameters
----------
request_id: str
The server assigned id of the request.
synchronous: bool
If true, this method will block the main thread until the server
either returns a result or an error.
polling_interval: float
If running synchronously, this is the time interval (seconds)
between checking if the request has completed.
Returns
-------
RequestResult, optional
Returns the current results of the request. This may
be `None` if any unexpected exceptions occurred while
retrieving the estimate.
EvaluatorException, optional
The exception raised will trying to retrieve the result,
if any.
"""
# If running asynchronously, just return whatever the server
# sends back.
if synchronous is False:
return self._send_query_to_server(request_id)
assert polling_interval >= 0
response = None
error = None
should_run = True
while should_run:
if polling_interval > 0:
sleep(polling_interval)
response, error = self._send_query_to_server(request_id)
if (
isinstance(response, RequestResult)
and len(response.queued_properties) > 0
):
continue
logger.info(f"The server has completed request {request_id}.")
should_run = False
return response, error
def _send_calculations_to_server(self, submission):
"""Attempts to connect to the calculation server, and
submit the requested calculations.
Parameters
----------
submission: _Submission
The jobs to submit.
Returns
-------
str, optional:
The id which the server has assigned the submitted calculations.
This can be used to query the server for when the calculation
has completed.
Returns None if the calculation could not be submitted.
EvaluatorException, optional
Any exceptions raised while attempting the submit the request.
"""
# Attempt to establish a connection to the server.
connection_settings = (
self._connection_options.server_address,
self._connection_options.server_port,
)
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.connect(connection_settings)
request_id = None
try:
# Encode the submission json into an encoded
# packet ready to submit to the server.
message_type = pack_int(EvaluatorMessageTypes.Submission)
encoded_json = submission.json().encode()
length = pack_int(len(encoded_json))
connection.sendall(message_type + length + encoded_json)
# Wait for confirmation that the server has received
# the jobs.
header = recvall(connection, 4)
length = unpack_int(header)[0]
# Decode the response from the server. If everything
# went well, this should be the id of the submitted
# calculations.
encoded_json = recvall(connection, length)
request_id, error = json.loads(encoded_json.decode(), cls=TypedJSONDecoder)
except Exception as e:
trace = traceback.format_exception(None, e, e.__traceback__)
error = EvaluatorException(message=trace)
finally:
if connection is not None:
connection.close()
# Return the ids of the submitted jobs.
return request_id, error
def _send_query_to_server(self, request_id):
"""Attempts to connect to the calculation server, and
submit the requested calculations.
Parameters
----------
request_id: str
The id of the job to query.
Returns
-------
str, optional:
The status of the submitted job.
Returns None if the calculation has not yet completed.
"""
server_response = None
# Attempt to establish a connection to the server.
connection_settings = (
self._connection_options.server_address,
self._connection_options.server_port,
)
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.connect(connection_settings)
try:
# Encode the request id into the message.
message_type = pack_int(EvaluatorMessageTypes.Query)
encoded_request_id = request_id.encode()
length = pack_int(len(encoded_request_id))
connection.sendall(message_type + length + encoded_request_id)
# Wait for the server response.
header = recvall(connection, 4)
length = unpack_int(header)[0]
# Decode the response from the server. If everything
# went well, this should be the finished calculation.
if length > 0:
encoded_json = recvall(connection, length)
server_response = encoded_json.decode()
finally:
if connection is not None:
connection.close()
response = None
error = None
if server_response is not None:
response, error = json.loads(server_response, cls=TypedJSONDecoder)
return response, error
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_protocols/test_storage_protocols.py | """
Units tests for openff.evaluator.protocols.storage
"""
import json
import os
import tempfile
from openff.evaluator.protocols.storage import UnpackStoredSimulationData
from openff.evaluator.tests.utils import (
build_tip3p_smirnoff_force_field,
create_dummy_simulation_data,
create_dummy_substance,
)
from openff.evaluator.utils.serialization import TypedJSONEncoder
def test_unpack_stored_simulation_data():
"""A test that compatible simulation data gets merged
together within the`LocalStorage` system."""
with tempfile.TemporaryDirectory() as directory:
force_field_path = os.path.join(directory, "ff.json")
with open(force_field_path, "w") as file:
file.write(build_tip3p_smirnoff_force_field().json())
dummy_substance = create_dummy_substance(1)
dummy_directory_path = os.path.join(directory, "data")
dummy_data_path = os.path.join(directory, "data.json")
data_coordinate_name = "data_1.pdb"
data_object = create_dummy_simulation_data(
directory_path=dummy_directory_path,
substance=dummy_substance,
force_field_id="ff_id_1",
coordinate_file_name=data_coordinate_name,
statistical_inefficiency=1.0,
)
with open(dummy_data_path, "w") as file:
json.dump(data_object, file, cls=TypedJSONEncoder)
unpack_stored_data = UnpackStoredSimulationData("unpack_data")
unpack_stored_data.simulation_data_path = (
dummy_data_path,
dummy_directory_path,
force_field_path,
)
unpack_stored_data.execute(directory, None)
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_protocols/test_forcefield.py | """
Units tests for openff.evaluator.protocols.forcefield
"""
import re
import tempfile
from os import path
from cmiles.utils import load_molecule, mol_to_smiles
from openff.evaluator.forcefield import LigParGenForceFieldSource, TLeapForceFieldSource
from openff.evaluator.protocols.coordinates import BuildCoordinatesPackmol
from openff.evaluator.protocols.forcefield import (
BuildLigParGenSystem,
BuildSmirnoffSystem,
BuildTLeapSystem,
)
from openff.evaluator.substances import Substance
from openff.evaluator.tests.utils import build_tip3p_smirnoff_force_field
def test_build_smirnoff_system():
with tempfile.TemporaryDirectory() as directory:
force_field_path = path.join(directory, "ff.json")
with open(force_field_path, "w") as file:
file.write(build_tip3p_smirnoff_force_field().json())
substance = Substance.from_components("C", "O", "CO", "C(=O)N")
build_coordinates = BuildCoordinatesPackmol("build_coordinates")
build_coordinates.max_molecules = 8
build_coordinates.substance = substance
build_coordinates.execute(directory)
assign_parameters = BuildSmirnoffSystem("assign_parameters")
assign_parameters.force_field_path = force_field_path
assign_parameters.coordinate_file_path = build_coordinates.coordinate_file_path
assign_parameters.substance = substance
assign_parameters.execute(directory)
assert path.isfile(assign_parameters.parameterized_system.system_path)
def test_build_tleap_system():
with tempfile.TemporaryDirectory() as directory:
force_field_path = path.join(directory, "ff.json")
with open(force_field_path, "w") as file:
file.write(TLeapForceFieldSource().json())
substance = Substance.from_components("CCCCCCCC", "O", "C(=O)N")
build_coordinates = BuildCoordinatesPackmol("build_coordinates")
build_coordinates.max_molecules = 9
build_coordinates.substance = substance
build_coordinates.execute(directory)
assign_parameters = BuildTLeapSystem("assign_parameters")
assign_parameters.force_field_path = force_field_path
assign_parameters.coordinate_file_path = build_coordinates.coordinate_file_path
assign_parameters.substance = substance
assign_parameters.execute(directory)
assert path.isfile(assign_parameters.parameterized_system.system_path)
def test_build_ligpargen_system(requests_mock):
force_field_source = LigParGenForceFieldSource(
request_url="http://testligpargen.com/request",
download_url="http://testligpargen.com/download",
)
substance = Substance.from_components("C", "O")
def request_callback(request, context):
context.status_code = 200
smiles = re.search(r'"smiData"\r\n\r\n(.*?)\r\n', request.text).group(1)
cmiles_molecule = load_molecule(smiles, toolkit="rdkit")
smiles = mol_to_smiles(
cmiles_molecule, isomeric=False, explicit_hydrogen=False, mapped=False
)
assert smiles == "C"
return 'value="/tmp/0000.xml"'
def download_callback(_, context):
context.status_code = 200
return """
<ForceField>
<AtomTypes>
<Type name="opls_802" class="H802" element="H" mass="1.008000" />
<Type name="opls_804" class="H804" element="H" mass="1.008000" />
<Type name="opls_803" class="H803" element="H" mass="1.008000" />
<Type name="opls_800" class="C800" element="C" mass="12.011000" />
<Type name="opls_801" class="H801" element="H" mass="1.008000" />
</AtomTypes>
<Residues>
<Residue name="UNK">
<Atom name="C00" type="opls_800" />
<Atom name="H01" type="opls_801" />
<Atom name="H02" type="opls_802" />
<Atom name="H03" type="opls_803" />
<Atom name="H04" type="opls_804" />
<Bond from="0" to="1"/>
<Bond from="0" to="2"/>
<Bond from="0" to="3"/>
<Bond from="0" to="4"/>
</Residue>
</Residues>
<HarmonicBondForce>
<Bond class1="H801" class2="C800" length="0.109000" k="284512.000000"/>
<Bond class1="H802" class2="C800" length="0.109000" k="284512.000000"/>
<Bond class1="H803" class2="C800" length="0.109000" k="284512.000000"/>
<Bond class1="H804" class2="C800" length="0.109000" k="284512.000000"/>
</HarmonicBondForce>
<HarmonicAngleForce>
<Angle class1="H801" class2="C800" class3="H802" angle="1.881465" k="276.144000"/>
<Angle class1="H801" class2="C800" class3="H803" angle="1.881465" k="276.144000"/>
<Angle class1="H801" class2="C800" class3="H804" angle="1.881465" k="276.144000"/>
<Angle class1="H802" class2="C800" class3="H803" angle="1.881465" k="276.144000"/>
<Angle class1="H803" class2="C800" class3="H804" angle="1.881465" k="276.144000"/>
<Angle class1="H802" class2="C800" class3="H804" angle="1.881465" k="276.144000"/>
</HarmonicAngleForce>
<PeriodicTorsionForce>
<Improper class1="C800" class2="H801" class3="H802" class4="H803" k1="0.000000" k2="0.000000" k3="0.000000"
k4="0.000000" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00"
phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Improper class1="C800" class2="H801" class3="H802" class4="H804" k1="0.000000" k2="0.000000" k3="0.000000"
k4="0.000000" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00"
phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
</PeriodicTorsionForce>
<NonbondedForce coulomb14scale="0.5" lj14scale="0.5">
<Atom type="opls_803" charge="0.074800" sigma="0.250000" epsilon="0.125520" />
<Atom type="opls_802" charge="0.074800" sigma="0.250000" epsilon="0.125520" />
<Atom type="opls_800" charge="-0.299400" sigma="0.350000" epsilon="0.276144" />
<Atom type="opls_804" charge="0.074800" sigma="0.250000" epsilon="0.125520" />
<Atom type="opls_801" charge="0.074800" sigma="0.250000" epsilon="0.125520" />
</NonbondedForce>
</ForceField>
"""
requests_mock.post(force_field_source.request_url, text=request_callback)
requests_mock.post(force_field_source.download_url, text=download_callback)
with tempfile.TemporaryDirectory() as directory:
force_field_path = path.join(directory, "ff.json")
with open(force_field_path, "w") as file:
file.write(force_field_source.json())
build_coordinates = BuildCoordinatesPackmol("build_coordinates")
build_coordinates.max_molecules = 8
build_coordinates.substance = substance
build_coordinates.execute(directory)
assign_parameters = BuildLigParGenSystem("assign_parameters")
assign_parameters.force_field_path = force_field_path
assign_parameters.coordinate_file_path = build_coordinates.coordinate_file_path
assign_parameters.substance = substance
assign_parameters.execute(directory)
assert path.isfile(assign_parameters.parameterized_system.system_path)
|
jaketanderson/openff-evaluator | openff/evaluator/workflow/protocols.py | """
A collection of specialized workflow building blocks, which when chained together,
form a larger property estimation workflow.
"""
import abc
import copy
import json
import logging
import os
import time
from collections import defaultdict
from openff.evaluator.attributes import Attribute, AttributeClass, PlaceholderValue
from openff.evaluator.backends import ComputeResources
from openff.evaluator.utils import graph
from openff.evaluator.utils.exceptions import EvaluatorException
from openff.evaluator.utils.serialization import TypedJSONDecoder, TypedJSONEncoder
from openff.evaluator.utils.string import extract_variable_index_and_name
from openff.evaluator.utils.utils import get_nested_attribute, set_nested_attribute
from openff.evaluator.workflow import (
ProtocolGroupSchema,
ProtocolSchema,
WorkflowException,
registered_workflow_protocols,
workflow_protocol,
)
from openff.evaluator.workflow.attributes import (
InequalityMergeBehaviour,
InputAttribute,
MergeBehaviour,
OutputAttribute,
)
from openff.evaluator.workflow.utils import ProtocolPath
logger = logging.getLogger(__name__)
class ProtocolMeta(abc.ABCMeta):
def __init__(cls, name, bases, dct):
super().__init__(name, bases, dct)
cls._input_attributes = [
ProtocolPath(x) for x in cls.get_attributes(InputAttribute)
]
cls._output_attributes = [
ProtocolPath(x) for x in cls.get_attributes(OutputAttribute)
]
class Protocol(AttributeClass, abc.ABC, metaclass=ProtocolMeta):
"""The base class for a protocol which would form one
step of a larger property calculation workflow.
A protocol may for example:
* create the coordinates of a mixed simulation box
* set up a bound ligand-protein system
* build the simulation topology
* perform an energy minimisation
An individual protocol may require a set of inputs, which may either be
set as constants
>>> from openff.evaluator.protocols.openmm import OpenMMSimulation
>>>
>>> npt_equilibration = OpenMMSimulation('npt_equilibration')
>>> npt_equilibration.ensemble = OpenMMSimulation.Ensemble.NPT
or from the output of another protocol, pointed to by a ProtocolPath
>>> npt_production = OpenMMSimulation('npt_production')
>>> # Use the coordinate file output by the npt_equilibration protocol
>>> # as the input to the npt_production protocol
>>> npt_production.input_coordinate_file = ProtocolPath('output_coordinate_file',
>>> npt_equilibration.id)
In this way protocols may be chained together, thus defining a larger property
calculation workflow from simple, reusable building blocks.
"""
id = Attribute(docstring="The unique id of this protocol.", type_hint=str)
allow_merging = InputAttribute(
docstring="Defines whether this protocols is allowed "
"to merge with other protocols.",
type_hint=bool,
default_value=True,
)
@property
def schema(self):
"""ProtocolSchema: A serializable schema for this object."""
return self._get_schema()
@schema.setter
def schema(self, schema_value):
self._set_schema(schema_value)
@property
def required_inputs(self):
"""list of ProtocolPath: The inputs which must be set on this protocol."""
return [x.copy() for x in self._input_attributes]
@property
def outputs(self):
"""dict of ProtocolPath and Any: A dictionary of the outputs of this property."""
outputs = {}
for output_attribute in self._output_attributes:
outputs[output_attribute] = getattr(self, output_attribute.property_name)
return outputs
@property
def dependencies(self):
"""list of ProtocolPath: A list of pointers to the protocols which this
protocol takes input from.
"""
return_dependencies = []
for input_path in self.required_inputs:
value_references = self.get_value_references(input_path)
if len(value_references) == 0:
continue
for value_reference in value_references.values():
if value_reference in return_dependencies:
continue
if (
value_reference.start_protocol is None
or value_reference.start_protocol == self.id
):
continue
return_dependencies.append(value_reference)
return return_dependencies
def __init__(self, protocol_id):
self.id = protocol_id
def _get_schema(self, schema_type=ProtocolSchema, *args):
"""Returns the schema representation of this protocol.
Parameters
----------
schema_type: type of ProtocolSchema
The type of schema to create.
Returns
-------
schema_type
The schema representation.
"""
inputs = {}
for input_path in self.required_inputs:
if (
len(input_path.protocol_path) > 0
and input_path.protocol_path != self.id
):
continue
# Always make sure to only pass a copy of the input.
# Changing the schema should NOT change the protocol.
inputs[input_path.full_path] = copy.deepcopy(self.get_value(input_path))
schema = schema_type(self.id, self.__class__.__name__, inputs, *args)
return schema
def _set_schema(self, schema):
"""Sets this protocols properties from a `ProtocolSchema`
Parameters
----------
schema: ProtocolSchema
The schema to set.
"""
# Make sure this protocol matches the schema type.
if self.__class__.__name__ != schema.type:
raise ValueError(
f"The schema type {schema.type} does not match this protocol."
)
self.id = schema.id
for input_full_path in schema.inputs:
value = copy.deepcopy(schema.inputs[input_full_path])
input_path = ProtocolPath.from_string(input_full_path)
self.set_value(input_path, value)
@classmethod
def from_schema(cls, schema):
"""Initializes a protocol from it's schema definition.
Parameters
----------
schema: ProtocolSchema
The schema to initialize the protocol using.
Returns
-------
cls
The initialized protocol.
"""
protocol = registered_workflow_protocols[schema.type](schema.id)
protocol.schema = schema
return protocol
def set_uuid(self, value):
"""Prepend a unique identifier to this protocols id. If the id
already has a prepended uuid, it will be overwritten by this value.
Parameters
----------
value : str
The uuid to prepend.
"""
if len(value) == 0:
return
id_with_uuid = graph.append_uuid(self.id, value)
if self.id == id_with_uuid:
return
self.id = graph.append_uuid(self.id, value)
for input_path in self.required_inputs:
input_path.append_uuid(value)
value_references = self.get_value_references(input_path)
for key, value_reference in value_references.items():
value_reference.append_uuid(value)
self.set_value(key, value_reference)
def replace_protocol(self, old_id, new_id):
"""Finds each input which came from a given protocol
and redirects it to instead take input from a new one.
Notes
-----
This method is mainly intended to be used only when merging
multiple protocols into one.
Parameters
----------
old_id : str
The id of the old input protocol.
new_id : str
The id of the new input protocol.
"""
for input_path in self.required_inputs:
input_path.replace_protocol(old_id, new_id)
if input_path.start_protocol is not None or (
input_path.start_protocol != input_path.last_protocol
and input_path.start_protocol != self.id
):
continue
value_references = self.get_value_references(input_path)
for key, value_reference in value_references.items():
value_reference.replace_protocol(old_id, new_id)
self.set_value(key, value_reference)
if self.id == old_id:
self.id = new_id
def _find_inputs_to_merge(self):
"""Returns a list of those inputs which should
be considered when attempting to merge two different
protocols of the same type.
Returns
-------
set of ProtocolPath
References to those inputs which should be
considered.
"""
inputs_to_consider = set()
for input_path in self.required_inputs:
# Do not consider paths that point to child (e.g grouped) protocols.
# These should be handled by the container classes themselves.
if (
input_path.start_protocol is not None
and input_path.start_protocol != self.id
):
continue
if not (
input_path.start_protocol is None
or (
input_path.start_protocol == input_path.last_protocol
and input_path.start_protocol == self.id
)
):
continue
# If no merge behaviour flag is present (for example in the case of
# ConditionalGroup conditions), simply assume this is handled explicitly
# elsewhere.
if not hasattr(
getattr(type(self), input_path.property_name), "merge_behavior"
):
continue
inputs_to_consider.add(input_path)
return inputs_to_consider
def can_merge(self, other, path_replacements=None):
"""Determines whether this protocol can be merged with another.
Parameters
----------
other : :obj:`Protocol`
The protocol to compare against.
path_replacements: list of tuple of str, optional
Replacements to make in any value reference protocol paths
before comparing for equality.
Returns
----------
bool
True if the two protocols are safe to merge.
"""
if not self.allow_merging or not isinstance(self, type(other)):
return False
if path_replacements is None:
path_replacements = []
inputs_to_consider = self._find_inputs_to_merge()
for input_path in inputs_to_consider:
# Do a quick sanity check that the other protocol
# does in fact also require this input.
if input_path not in other.required_inputs:
return False
merge_behavior = getattr(
type(self), input_path.property_name
).merge_behavior
self_value = self.get_value(input_path)
other_value = other.get_value(input_path)
if (
isinstance(self_value, PlaceholderValue)
and not isinstance(other_value, PlaceholderValue)
) or (
isinstance(other_value, PlaceholderValue)
and not isinstance(self_value, PlaceholderValue)
):
# We cannot safely merge inputs when only one of the values
# is currently known.
return False
if isinstance(self_value, ProtocolPath) and isinstance(
other_value, ProtocolPath
):
other_value_post_merge = ProtocolPath.from_string(other_value.full_path)
for original_id, new_id in path_replacements:
other_value_post_merge.replace_protocol(original_id, new_id)
# We cannot safely choose which value to take when the
# values are not know ahead of time unless the two values
# come from the exact same source.
if self_value.full_path != other_value_post_merge.full_path:
return False
elif isinstance(self_value, PlaceholderValue) and isinstance(
other_value, PlaceholderValue
):
return False
elif (
merge_behavior == MergeBehaviour.ExactlyEqual
and self_value != other_value
):
return False
return True
def merge(self, other):
"""Merges another Protocol with this one. The id
of this protocol will remain unchanged.
Parameters
----------
other: Protocol
The protocol to merge into this one.
Returns
-------
Dict[str, str]
A map between any original protocol ids and their new merged values.
"""
if not self.can_merge(other):
raise ValueError("These protocols cannot be safely merged.")
inputs_to_consider = self._find_inputs_to_merge()
for input_path in inputs_to_consider:
merge_behavior = getattr(
type(self), input_path.property_name
).merge_behavior
if (
merge_behavior == MergeBehaviour.ExactlyEqual
or merge_behavior == MergeBehaviour.Custom
):
continue
if isinstance(self.get_value(input_path), ProtocolPath) or isinstance(
other.get_value(input_path), ProtocolPath
):
continue
if merge_behavior == InequalityMergeBehaviour.SmallestValue:
value = min(self.get_value(input_path), other.get_value(input_path))
elif merge_behavior == InequalityMergeBehaviour.LargestValue:
value = max(self.get_value(input_path), other.get_value(input_path))
else:
raise NotImplementedError()
self.set_value(input_path, value)
return {}
def get_value_references(self, input_path):
"""Returns a dictionary of references to the protocols which one of this
protocols inputs (specified by `input_path`) takes its value from.
Notes
-----
Currently this method only functions correctly for an input value which
is either currently a :obj:`ProtocolPath`, or a `list` / `dict` which contains
at least one :obj:`ProtocolPath`.
Parameters
----------
input_path: ProtocolPath
The input value to check.
Returns
-------
dict of ProtocolPath and ProtocolPath
A dictionary of the protocol paths that the input targeted by `input_path` depends upon.
"""
input_value = self.get_value(input_path)
if isinstance(input_value, ProtocolPath):
return {input_path: input_value}
if (
not isinstance(input_value, list)
and not isinstance(input_value, tuple)
and not isinstance(input_value, dict)
):
return {}
return_paths = {}
if isinstance(input_value, list) or isinstance(input_value, tuple):
for index, list_value in enumerate(input_value):
if not isinstance(list_value, ProtocolPath):
continue
path_index = ProtocolPath(
input_path.property_name + f"[{index}]", *input_path.protocol_ids
)
return_paths[path_index] = list_value
else:
for dict_key in input_value:
if not isinstance(input_value[dict_key], ProtocolPath):
continue
path_index = ProtocolPath(
input_path.property_name + f"[{dict_key}]", *input_path.protocol_ids
)
return_paths[path_index] = input_value[dict_key]
return return_paths
def get_class_attribute(self, reference_path):
"""Returns one of this protocols, or any of its children's,
attributes directly (rather than its value).
Parameters
----------
reference_path: ProtocolPath
The path pointing to the attribute to return.
Returns
----------
object:
The class attribute.
"""
if (
reference_path.start_protocol is not None
and reference_path.start_protocol != self.id
):
raise ValueError(
"The reference path {} does not point to this protocol".format(
reference_path
)
)
if (
reference_path.property_name.count(ProtocolPath.property_separator) >= 1
or reference_path.property_name.find("[") > 0
):
raise ValueError(
"The expected attribute cannot be found for "
"nested property names: {}".format(reference_path.property_name)
)
return getattr(type(self), reference_path.property_name)
def get_value(self, reference_path):
"""Returns the value of one of this protocols inputs / outputs.
Parameters
----------
reference_path: ProtocolPath
The path pointing to the value to return.
Returns
----------
Any:
The value of the input / output
"""
if (
reference_path.start_protocol is not None
and reference_path.start_protocol != self.id
):
raise ValueError("The reference path does not target this protocol.")
if reference_path.property_name is None or reference_path.property_name == "":
raise ValueError("The reference path does specify a property to return.")
return get_nested_attribute(self, reference_path.property_name)
def set_value(self, reference_path, value):
"""Sets the value of one of this protocols inputs.
Parameters
----------
reference_path: ProtocolPath
The path pointing to the value to return.
value: Any
The value to set.
"""
if (
reference_path.start_protocol is not None
and reference_path.start_protocol != self.id
):
raise ValueError("The reference path does not target this protocol.")
if reference_path.property_name is None or reference_path.property_name == "":
raise ValueError("The reference path does specify a property to set.")
set_nested_attribute(self, reference_path.property_name, value)
def apply_replicator(
self,
replicator,
template_values,
template_index=-1,
template_value=None,
update_input_references=False,
):
"""Applies a `ProtocolReplicator` to this protocol. This method
should clone any protocols whose id contains the id of the
replicator (in the format `$(replicator.id)`).
Parameters
----------
replicator: ProtocolReplicator
The replicator to apply.
template_values: list of Any
A list of the values which will be inserted
into the newly replicated protocols.
This parameter is mutually exclusive with
`template_index` and `template_value`
template_index: int, optional
A specific value which should be used for any
protocols flagged as to be replicated by the
replicator. This option is mainly used when
replicating children of an already replicated
protocol.
This parameter is mutually exclusive with
`template_values` and must be set along with
a `template_value`.
template_value: Any, optional
A specific index which should be used for any
protocols flagged as to be replicated by the
replicator. This option is mainly used when
replicating children of an already replicated
protocol.
This parameter is mutually exclusive with
`template_values` and must be set along with
a `template_index`.
update_input_references: bool
If true, any protocols which take their input from a protocol
which was flagged for replication will be updated to take input
from the actually replicated protocol. This should only be set
to true if this protocol is not nested within a workflow or a
protocol group.
This option cannot be used when a specific `template_index` or
`template_value` is providied.
Returns
-------
dict of ProtocolPath and list of tuple of ProtocolPath and int
A dictionary of references to all of the protocols which have
been replicated, with keys of original protocol ids. Each value
is comprised of a list of the replicated protocol ids, and their
index into the `template_values` array.
"""
return {}
@abc.abstractmethod
def _execute(self, directory, available_resources):
"""The implementation of the public facing `execute`
method.
This method will be called by `execute` after all inputs
have been validated.
Parameters
----------
directory: str
The directory to store output data in.
available_resources: ComputeResources
The resources available to execute on.
"""
def execute(self, directory="", available_resources=None):
"""Execute the protocol.
Parameters
----------
directory: str
The directory to store output data in.
available_resources: ComputeResources
The resources available to execute on. If `None`, the protocol
will be executed on a single CPU.
"""
if len(directory) > 0 and not os.path.isdir(directory):
os.makedirs(directory, exist_ok=True)
if available_resources is None:
available_resources = ComputeResources(number_of_threads=1)
self.validate(InputAttribute)
self._execute(directory, available_resources)
class ProtocolGraph:
"""A graph of connected protocols which may be
executed together.
"""
@property
def protocols(self):
"""dict of str and Protocol: The protocols in this graph."""
return self._protocols_by_id
@property
def root_protocols(self):
"""list of str: The ids of the protocols in the group which do not
take input from the other grouped protocols."""
return self._root_protocols
def __init__(self):
self._protocols_by_id = {}
self._root_protocols = []
def _build_dependants_graph(
self, protocols, allow_external_dependencies, apply_reduction=False
):
"""Builds a dictionary of key value pairs where each key
is the id of a protocol in the graph and each value is a
list ids of protocols which depend on this protocol.
Parameters
----------
dict of str and Protocol
The protocols in the graph.
allow_external_dependencies: bool
If `False`, an exception will be raised if a protocol
has a dependency outside of this graph.
apply_reduction: bool
Whether or not to apply transitive reduction to the
graph.
"""
internal_protocol_ids = {*protocols, *self._protocols_by_id}
dependants_graph = defaultdict(set)
for protocol_id in protocols:
dependants_graph[protocol_id] = set()
for protocol in protocols.values():
for dependency in protocol.dependencies:
# Check for external dependencies.
if dependency.start_protocol not in internal_protocol_ids:
if allow_external_dependencies:
continue
else:
raise ValueError(
f"The {dependency.start_protocol} dependency "
f"is outside of this graph."
)
# Skip global or self dependencies.
if dependency.is_global or dependency.start_protocol == protocol.id:
continue
# Add the dependency
dependants_graph[dependency.start_protocol].add(protocol.id)
dependants_graph = {key: list(value) for key, value in dependants_graph.items()}
if not graph.is_acyclic(dependants_graph):
raise ValueError("The protocols in this graph have cyclical dependencies.")
if apply_reduction:
# Remove any redundant connections from the graph.
graph.apply_transitive_reduction(dependants_graph)
return dependants_graph
def _add_protocol(
self,
protocol_id,
protocols_to_add,
dependant_ids,
parent_protocol_ids,
full_dependants_graph,
):
"""Adds a protocol into the graph.
Parameters
----------
protocol_id : str
The id of the protocol to insert.
protocols_to_add: dict of str and Protocol
A dictionary of all of the protocols currently being
added to the graph.
dependant_ids: list of str
The ids of the protocols which depend on the output of the
protocol to be inserted.
parent_protocol_ids : `list` of str
The ids of the parents of the node to be inserted. If None,
the protocol will be added as a new root node.
full_dependants_graph: dict of str and list of str
The current dependants graph of the entire workflow
graph. This will be used to find the child protocols
of existing protocols in the graph.
Returns
-------
str
The id of the protocol which was inserted. This may not be
the same as `protocol_id` if the protocol to insert was merged
with an existing one.
dict of str and str
A mapping between all current protocol ids, and the new ids of
protocols after the protocol has been inserted due to protocol
merging.
"""
# Build a list of protocols which have the same ancestors
# as the protocols to insert. This will be used to check
# if we are trying to add a redundant protocol to the graph.
existing_protocols = (
self._root_protocols if len(parent_protocol_ids) == 0 else []
)
for parent_protocol_id in parent_protocol_ids:
existing_protocols.extend(
x
for x in full_dependants_graph[parent_protocol_id]
if x not in existing_protocols
)
# Don't merge protocols from the same workflow / batch
existing_protocols = [
x
for x in existing_protocols
if x not in protocols_to_add
or graph.retrieve_uuid(x) != graph.retrieve_uuid(protocol_id)
]
protocol_to_insert = protocols_to_add[protocol_id]
existing_protocol = None
# Start by checking to see if the starting protocol of the workflow graph is
# already present in the full graph.
for existing_id in existing_protocols:
protocol = self._protocols_by_id[existing_id]
if not protocol.can_merge(protocol_to_insert):
continue
existing_protocol = protocol
break
# Store a mapping between original and merged protocols.
merged_ids = {}
if existing_protocol is not None:
# Make a note that the existing protocol should be used in place
# of this workflows version.
protocols_to_add[protocol_id] = existing_protocol
merged_ids = existing_protocol.merge(protocol_to_insert)
merged_ids[protocol_to_insert.id] = existing_protocol.id
for old_id, new_id in merged_ids.items():
for dependant_id in dependant_ids:
protocols_to_add[dependant_id].replace_protocol(old_id, new_id)
for parent_id in parent_protocol_ids:
if parent_id not in protocols_to_add:
continue
if existing_protocol.id not in full_dependants_graph[parent_id]:
full_dependants_graph[parent_id].append(existing_protocol.id)
else:
# Add the protocol as a new protocol in the graph.
self._protocols_by_id[protocol_id] = protocol_to_insert
existing_protocol = self._protocols_by_id[protocol_id]
full_dependants_graph[protocol_id] = []
if len(parent_protocol_ids) == 0:
self._root_protocols.append(protocol_id)
for parent_id in parent_protocol_ids:
if protocol_id not in full_dependants_graph[parent_id]:
full_dependants_graph[parent_id].append(protocol_id)
return existing_protocol.id, merged_ids
def add_protocols(self, *protocols, allow_external_dependencies=False):
"""Adds a set of protocols to the graph.
Parameters
----------
protocols : tuple of Protocol
The protocols to add.
allow_external_dependencies: bool
If `False`, an exception will be raised if a protocol
has a dependency outside of this graph.
Returns
-------
dict of str and str
A mapping between the original protocols and protocols which
were merged over the course of adding the new protocols.
"""
# noinspection PyUnresolvedReferences
conflicting_ids = [x.id for x in protocols if x.id in self._protocols_by_id]
# Make sure we aren't trying to add protocols with conflicting ids.
if len(conflicting_ids) > 0:
raise ValueError(
f"The graph already contains protocols with ids {conflicting_ids}"
)
# Add the protocols to the graph
# noinspection PyUnresolvedReferences
protocols_by_id = {x.id: x for x in protocols}
# Build a the dependants graph of the protocols to add,
# and determine the order that they would be executed in.
# This is the order we should try to insert them in.
dependants_graph = self._build_dependants_graph(
protocols_by_id, allow_external_dependencies, apply_reduction=False
)
# Determine if any of the protocols to add depend on protocols which
# are already in the graph.
existing_parents = {
x: set(y) for x, y in dependants_graph.items() if x in self._protocols_by_id
}
child_to_existing_parents = graph.dependants_to_dependencies(existing_parents)
child_to_existing_parents = {
x: y for x, y in child_to_existing_parents.items() if x in protocols_by_id
}
# Compute the reduced new graph to add.
dependants_graph = {
x: y for x, y in dependants_graph.items() if x in protocols_by_id
}
reduced_dependants_graph = {x: [*y] for x, y in dependants_graph.items()}
graph.apply_transitive_reduction(reduced_dependants_graph)
# Determine the order in which the new protocols would execute.
# This will be the order we attempt to insert them into the graph.
protocol_execution_order = graph.topological_sort(dependants_graph)
# Construct the full dependants graph which will be used to find the
# children of existing parent protocols
full_dependants_graph = self._build_dependants_graph(
self._protocols_by_id, allow_external_dependencies, apply_reduction=True
)
# Store a mapping between original and merged protocols.
merged_ids = {}
parent_protocol_ids = defaultdict(set)
parent_protocol_ids.update(child_to_existing_parents)
for protocol_id in protocol_execution_order:
parent_ids = parent_protocol_ids.get(protocol_id) or []
inserted_id, new_ids = self._add_protocol(
protocol_id,
protocols_by_id,
dependants_graph[protocol_id],
parent_ids,
full_dependants_graph,
)
# Keep track of any merged protocols
merged_ids.update(new_ids)
# Update the parent graph
for dependant in reduced_dependants_graph[protocol_id]:
parent_protocol_ids[dependant].add(inserted_id)
return merged_ids
def execute(
self,
root_directory="",
calculation_backend=None,
compute_resources=None,
enable_checkpointing=True,
safe_exceptions=True,
):
"""Execute the protocol graph in the specified directory,
and either using a `CalculationBackend`, or using a specified
set of compute resources.
Parameters
----------
root_directory: str
The directory to execute the graph in.
calculation_backend: CalculationBackend, optional.
The backend to execute the graph on. This parameter
is mutually exclusive with `compute_resources`.
compute_resources: CalculationBackend, optional.
The compute resources to run using. This parameter
is mutually exclusive with `calculation_backend`.
enable_checkpointing: bool
If enabled, protocols will not be executed more than once if the
output from their previous execution is found.
safe_exceptions: bool
If true, exceptions will be serialized into the results file rather
than directly raised, otherwise, the exception will be raised as
normal.
Returns
-------
dict of str and str or Future:
The paths to the JSON serialized outputs of the executed protocols.
If executed using a calculation backend, these will be `Future` objects
which will return the output paths on calling `future.result()`.
"""
if len(root_directory) > 0:
os.makedirs(root_directory, exist_ok=True)
assert (calculation_backend is None and compute_resources is not None) or (
calculation_backend is not None and compute_resources is None
)
# Determine the order in which to submit the protocols, such
# that all dependencies are satisfied.
dependants_graph = self._build_dependants_graph(
self._protocols_by_id, False, False
)
execution_order = graph.topological_sort(dependants_graph)
# Build a dependency graph from the dependants graph so that
# futures can easily be passed in the correct place.
dependencies = graph.dependants_to_dependencies(dependants_graph)
protocol_outputs = {}
for protocol_id in execution_order:
protocol = self._protocols_by_id[protocol_id]
parent_outputs = []
for dependency in dependencies[protocol_id]:
parent_outputs.append(protocol_outputs[dependency])
directory_name = protocol_id.replace("|", "_")
directory_name = directory_name.replace(":", "_")
directory_name = directory_name.replace(";", "_")
directory = os.path.join(root_directory, directory_name)
if calculation_backend is not None:
protocol_outputs[protocol_id] = calculation_backend.submit_task(
ProtocolGraph._execute_protocol,
directory,
protocol.schema.json(),
enable_checkpointing,
*parent_outputs,
safe_exceptions=safe_exceptions,
)
else:
protocol_outputs[protocol_id] = ProtocolGraph._execute_protocol(
directory,
protocol,
enable_checkpointing,
*parent_outputs,
available_resources=compute_resources,
safe_exceptions=safe_exceptions,
)
return protocol_outputs
@staticmethod
def _execute_protocol(
directory,
protocol,
enable_checkpointing,
*previous_output_paths,
available_resources,
safe_exceptions,
**_,
):
"""Executes the protocol defined by the ``protocol_schema``.
Parameters
----------
directory: str
The directory to execute the protocol in.
protocol: Protocol or str
Either the protocol to execute, or the JSON schema which defines
the protocol to execute.
enable_checkpointing: bool
If enabled, the protocol will not be executed again if the
output of its previous execution is found.
parent_outputs: tuple of str
Paths to the outputs of the protocols which the
protocol to execute depends on.
safe_exceptions: bool
If true, exceptions will be serialized into the results file rather
than directly raised, otherwise, the exception will be raised as
normal.
Returns
-------
str
The id of the executed protocol.
str
The path to the JSON serialized output of the executed
protocol.
"""
if isinstance(protocol, str):
protocol_schema = ProtocolSchema.parse_json(protocol)
protocol = protocol_schema.to_protocol()
# The path where the output of this protocol will be stored.
os.makedirs(directory, exist_ok=True)
output_path = os.path.join(directory, f"{protocol.id}_output.json")
# We need to make sure ALL exceptions are handled within this method,
# to avoid accidentally killing the compute backend / server.
try:
# Check if the output of this protocol already exists and
# whether we allow returning the found output.
if os.path.isfile(output_path) and enable_checkpointing:
with open(output_path) as file:
outputs = json.load(file, cls=TypedJSONDecoder)
if not isinstance(outputs, WorkflowException):
for protocol_path, output in outputs.items():
protocol_path = ProtocolPath.from_string(protocol_path)
protocol.set_value(protocol_path, output)
return protocol.id, output_path
# Store the results of the relevant previous protocols in a
# convenient dictionary.
previous_outputs = {}
for parent_id, previous_output_path in previous_output_paths:
with open(previous_output_path) as file:
parent_output = json.load(file, cls=TypedJSONDecoder)
# If one of the results is a failure exit early and propagate
# the exception up the graph.
if isinstance(parent_output, EvaluatorException):
return protocol.id, previous_output_path
for protocol_path, output_value in parent_output.items():
protocol_path = ProtocolPath.from_string(protocol_path)
if (
protocol_path.start_protocol is None
or protocol_path.start_protocol != parent_id
):
protocol_path.prepend_protocol_id(parent_id)
previous_outputs[protocol_path] = output_value
# Pass the outputs of previously executed protocols as input to the
# protocol to execute.
for input_path in protocol.required_inputs:
value_references = protocol.get_value_references(input_path)
for source_path, target_path in value_references.items():
if (
target_path.start_protocol == input_path.start_protocol
or target_path.start_protocol == protocol.id
or target_path.start_protocol is None
):
# The protocol takes input from itself / a nested protocol.
# This is handled by the protocol directly so we can skip here.
continue
property_name = target_path.property_name
property_index = None
nested_property_name = None
if property_name.find(".") > 0:
nested_property_name = ".".join(property_name.split(".")[1:])
property_name = property_name.split(".")[0]
if property_name.find("[") >= 0 or property_name.find("]") >= 0:
property_name, property_index = extract_variable_index_and_name(
property_name
)
target_protocol_ids = target_path.protocol_ids
target_value = previous_outputs[
ProtocolPath(property_name, *target_protocol_ids)
]
if property_index is not None:
target_value = target_value[property_index]
if nested_property_name is not None:
target_value = get_nested_attribute(
target_value, nested_property_name
)
protocol.set_value(source_path, target_value)
logger.info(f"Executing {protocol.id}")
start_time = time.perf_counter()
protocol.execute(directory, available_resources)
output = {key.full_path: value for key, value in protocol.outputs.items()}
output = json.dumps(output, cls=TypedJSONEncoder)
end_time = time.perf_counter()
execution_time = (end_time - start_time) * 1000
logger.info(f"{protocol.id} finished executing after {execution_time} ms")
except Exception as e:
logger.info(f"Protocol failed to execute: {protocol.id}")
if not safe_exceptions:
raise
exception = WorkflowException.from_exception(e)
exception.protocol_id = protocol.id
output = exception.json()
with open(output_path, "w") as file:
file.write(output)
return protocol.id, output_path
@workflow_protocol()
class ProtocolGroup(Protocol):
"""A group of workflow protocols to be executed in one batch.
This may be used for example to cluster together multiple protocols
that will execute in a linear chain so that multiple scheduler
execution calls are reduced into a single one.
Additionally, a group may provide enhanced behaviour, for example
running all protocols within the group self consistently until
a given condition is met (e.g run a simulation until a given observable
has converged).
"""
@property
def required_inputs(self):
"""list of ProtocolPath: The inputs which must be set on this protocol."""
required_inputs = super(ProtocolGroup, self).required_inputs
# Pull each of an individual protocols inputs up so that they
# become a required input of the group.
for protocol in self._protocols:
for input_path in protocol.required_inputs:
input_path = input_path.copy()
if input_path.start_protocol != protocol.id:
input_path.prepend_protocol_id(protocol.id)
input_path.prepend_protocol_id(self.id)
required_inputs.append(input_path)
return required_inputs
@property
def dependencies(self):
"""list of ProtocolPath: A list of pointers to the protocols which this
protocol takes input from.
"""
dependencies = super(ProtocolGroup, self).dependencies
# Remove child dependencies.
dependencies = [
x for x in dependencies if x.start_protocol not in self.protocols
]
return dependencies
@property
def outputs(self):
"""dict of ProtocolPath and Any: A dictionary of the outputs of this property."""
outputs = super(ProtocolGroup, self).outputs
for protocol in self._protocols:
for output_path in protocol.outputs:
output_value = protocol.get_value(output_path)
output_path = output_path.copy()
if output_path.start_protocol != protocol.id:
output_path.prepend_protocol_id(protocol.id)
output_path.prepend_protocol_id(self.id)
outputs[output_path] = output_value
return outputs
@property
def protocols(self):
"""dict of str and Protocol: A dictionary of the protocols in
this groups, where the dictionary key is the protocol id, and
the value is the protocol itself.
Notes
-----
This property should *not* be altered. Use `add_protocols` to
add new protocols to the group.
"""
return {protocol.id: protocol for protocol in self._protocols}
def __init__(self, protocol_id):
"""Constructs a new ProtocolGroup."""
super().__init__(protocol_id)
self._protocols = []
self._inner_graph = ProtocolGraph()
self._enable_checkpointing = True
def _get_schema(self, schema_type=ProtocolGroupSchema, *args):
protocol_schemas = {x.id: x.schema for x in self._protocols}
schema = super(ProtocolGroup, self)._get_schema(
schema_type, protocol_schemas, *args
)
return schema
def _set_schema(self, schema_value):
"""
Parameters
----------
schema_value: ProtocolGroupSchema
The schema from which this group should take its properties.
"""
super(ProtocolGroup, self)._set_schema(schema_value)
self._protocols = []
self._inner_graph = ProtocolGraph()
protocols_to_add = []
for protocol_schema in schema_value.protocol_schemas.values():
protocol = Protocol.from_schema(protocol_schema)
protocols_to_add.append(protocol)
self.add_protocols(*protocols_to_add)
def add_protocols(self, *protocols):
"""Add protocols to this group.
Parameters
----------
protocols: Protocol
The protocols to add.
"""
for protocol in protocols:
if protocol.id in self.protocols:
raise ValueError(
f"The {self.id} group already contains a protocol "
f"with id {protocol.id}."
)
self._protocols.append(protocol)
self._inner_graph.add_protocols(*protocols, allow_external_dependencies=True)
def set_uuid(self, value):
"""Store the uuid of the calculation this protocol belongs to
Parameters
----------
value : str
The uuid of the parent calculation.
"""
for protocol in self._protocols:
protocol.set_uuid(value)
super(ProtocolGroup, self).set_uuid(value)
# Rebuild the inner graph
self._inner_graph = ProtocolGraph()
self._inner_graph.add_protocols(
*self._protocols, allow_external_dependencies=True
)
def replace_protocol(self, old_id, new_id):
"""Finds each input which came from a given protocol
and redirects it to instead take input from a different one.
Parameters
----------
old_id : str
The id of the old input protocol.
new_id : str
The id of the new input protocol.
"""
for protocol in self._protocols:
protocol.replace_protocol(old_id, new_id)
super(ProtocolGroup, self).replace_protocol(old_id, new_id)
# Rebuild the inner graph
if old_id in self.protocols:
self._inner_graph = ProtocolGraph()
self._inner_graph.add_protocols(
*self._protocols, allow_external_dependencies=True
)
def _execute(self, directory, available_resources):
# Update the inputs of protocols which require values
# from the protocol group itself (i.e. the current
# iteration from a conditional group).
for required_input in self.required_inputs:
if required_input.start_protocol != self.id:
continue
value_references = self.get_value_references(required_input)
if len(value_references) == 0:
continue
for input_path, value_reference in value_references.items():
if (
value_reference.protocol_path != self.id
and value_reference.start_protocol is not None
):
continue
value = self.get_value(value_reference)
self.set_value(input_path, value)
self._inner_graph.execute(
directory,
compute_resources=available_resources,
enable_checkpointing=self._enable_checkpointing,
safe_exceptions=False,
)
def can_merge(self, other, path_replacements=None):
if path_replacements is None:
path_replacements = []
path_replacements.append((other.id, self.id))
if not isinstance(other, ProtocolGroup):
return False
if not super(ProtocolGroup, self).can_merge(other, path_replacements):
return False
# Ensure that at least one root protocol from each group can be merged.
for self_id in self._inner_graph.root_protocols:
for other_id in other._inner_graph.root_protocols:
if self.protocols[self_id].can_merge(
other.protocols[other_id], path_replacements
):
return True
return False
def merge(self, other):
assert isinstance(other, ProtocolGroup)
merged_ids = super(ProtocolGroup, self).merge(other)
# Update the protocol ids of the other grouped protocols.
for protocol in other.protocols.values():
protocol.replace_protocol(other.id, self.id)
# Merge the two groups using the inner protocol graph.
new_merged_ids = self._inner_graph.add_protocols(
*other.protocols.values(), allow_external_dependencies=True
)
# Replace the original protocol list with the new one.
self._protocols = list(self._inner_graph.protocols.values())
merged_ids.update(new_merged_ids)
return merged_ids
def get_value_references(self, input_path):
values_references = super(ProtocolGroup, self).get_value_references(input_path)
for key, value_reference in values_references.items():
if value_reference.start_protocol not in self.protocols:
continue
value_reference = value_reference.copy()
value_reference.prepend_protocol_id(self.id)
values_references[key] = value_reference
return values_references
def _get_next_in_path(self, reference_path):
"""Returns the id of the next protocol in a protocol path,
making sure that the targeted protocol is within this group.
Parameters
----------
reference_path: ProtocolPath
The path being traversed.
Returns
-------
str
The id of the next protocol in the path.
ProtocolPath
The remainder of the path to be traversed.
"""
# Make a copy of the path so we can alter it safely.
reference_path_clone = copy.deepcopy(reference_path)
if reference_path.start_protocol == self.id:
reference_path_clone.pop_next_in_path()
target_protocol_id = reference_path_clone.pop_next_in_path()
if target_protocol_id not in self.protocols:
raise ValueError(
"The reference path does not target this protocol "
"or any of its children."
)
return target_protocol_id, reference_path_clone
def get_class_attribute(self, reference_path):
if (
len(reference_path.protocol_path) == 0
or reference_path.protocol_path == self.id
):
return super(ProtocolGroup, self).get_class_attribute(reference_path)
target_protocol_id, truncated_path = self._get_next_in_path(reference_path)
return self.protocols[target_protocol_id].get_class_attribute(truncated_path)
def get_value(self, reference_path):
if (
len(reference_path.protocol_path) == 0
or reference_path.protocol_path == self.id
):
return super(ProtocolGroup, self).get_value(reference_path)
target_protocol_id, truncated_path = self._get_next_in_path(reference_path)
return self.protocols[target_protocol_id].get_value(truncated_path)
def set_value(self, reference_path, value):
if (
len(reference_path.protocol_path) == 0
or reference_path.protocol_path == self.id
):
return super(ProtocolGroup, self).set_value(reference_path, value)
if isinstance(value, ProtocolPath) and value.start_protocol == self.id:
value.pop_next_in_path()
target_protocol_id, truncated_path = self._get_next_in_path(reference_path)
return self.protocols[target_protocol_id].set_value(truncated_path, value)
def apply_replicator(
self,
replicator,
template_values,
template_index=-1,
template_value=None,
update_input_references=False,
):
protocols, replication_map = replicator.apply(
self.protocols, template_values, template_index, template_value
)
if (
template_index >= 0 or template_value is not None
) and update_input_references is True:
raise ValueError(
"Specific template indices and values cannot be passed "
"when `update_input_references` is True"
)
if update_input_references:
replicator.update_references(protocols, replication_map, template_values)
# Re-initialize the group using the replicated protocols.
self._protocols = []
self._inner_graph = ProtocolGraph()
self.add_protocols(*protocols.values())
return replication_map
|
jaketanderson/openff-evaluator | openff/evaluator/plugins.py | """
A collection of convenience utilities for loading the built-in
'plugins', such as workflow protocols, calculation layers and
physical properties.
"""
import importlib
import logging
import pkgutil
import pkg_resources
logger = logging.getLogger(__name__)
def register_default_plugins():
"""Registers the built-in workflow protocols, calculation layers and
physical properties with the plugin system.
"""
# Import the default properties.
importlib.import_module("openff.evaluator.properties")
# Import the default layers
importlib.import_module("openff.evaluator.layers.simulation")
importlib.import_module("openff.evaluator.layers.reweighting")
# Import the default workflow protocols.
protocols_module = importlib.import_module("openff.evaluator.protocols")
for _, module_name, _ in pkgutil.iter_modules(protocols_module.__path__):
importlib.import_module(f"openff.evaluator.protocols.{module_name}")
def register_external_plugins():
"""Registers any supported plugins found in external packages with the
plugin system.
"""
for entry_point in pkg_resources.iter_entry_points("openff_evaluator.plugins"):
try:
entry_point.load()
except ImportError:
logger.exception(f"Could not load the {entry_point} plugin")
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_workflow/test_protocols.py | <gh_stars>10-100
"""
Units tests for openff.evaluator.workflow
"""
import json
import os
import tempfile
import pytest
from openff.units import unit
from openff.evaluator.attributes import UNDEFINED
from openff.evaluator.backends import ComputeResources
from openff.evaluator.backends.dask import DaskLocalCluster
from openff.evaluator.protocols.miscellaneous import AddValues, DummyProtocol
from openff.evaluator.utils.serialization import TypedJSONDecoder
from openff.evaluator.workflow import (
Protocol,
ProtocolGraph,
ProtocolGroup,
workflow_protocol,
)
from openff.evaluator.workflow.utils import ProtocolPath
@workflow_protocol()
class ExceptionProtocol(Protocol):
def _execute(self, directory, available_resources):
raise RuntimeError()
def test_nested_protocol_paths():
value_protocol_a = DummyProtocol("protocol_a")
value_protocol_a.input_value = (1 * unit.kelvin).plus_minus(0.1 * unit.kelvin)
assert (
value_protocol_a.get_value(ProtocolPath("input_value.value"))
== value_protocol_a.input_value.value
)
value_protocol_b = DummyProtocol("protocol_b")
value_protocol_b.input_value = (2 * unit.kelvin).plus_minus(0.05 * unit.kelvin)
value_protocol_c = DummyProtocol("protocol_c")
value_protocol_c.input_value = (4 * unit.kelvin).plus_minus(0.01 * unit.kelvin)
add_values_protocol = AddValues("add_values")
add_values_protocol.values = [
ProtocolPath("output_value", value_protocol_a.id),
ProtocolPath("output_value", value_protocol_b.id),
ProtocolPath("output_value", value_protocol_b.id),
5,
]
with pytest.raises(ValueError):
add_values_protocol.get_value(ProtocolPath("valus[string]"))
with pytest.raises(ValueError):
add_values_protocol.get_value(ProtocolPath("values[string]"))
input_values = add_values_protocol.get_value_references(ProtocolPath("values"))
assert isinstance(input_values, dict) and len(input_values) == 3
for index, value_reference in enumerate(input_values):
input_value = add_values_protocol.get_value(value_reference)
assert input_value.full_path == add_values_protocol.values[index].full_path
add_values_protocol.set_value(value_reference, index)
assert set(add_values_protocol.values) == {0, 1, 2, 5}
dummy_dict_protocol = DummyProtocol("dict_protocol")
dummy_dict_protocol.input_value = {
"value_a": ProtocolPath("output_value", value_protocol_a.id),
"value_b": ProtocolPath("output_value", value_protocol_b.id),
}
input_values = dummy_dict_protocol.get_value_references(ProtocolPath("input_value"))
assert isinstance(input_values, dict) and len(input_values) == 2
for index, value_reference in enumerate(input_values):
input_value = dummy_dict_protocol.get_value(value_reference)
dummy_dict_keys = list(dummy_dict_protocol.input_value.keys())
assert (
input_value.full_path
== dummy_dict_protocol.input_value[dummy_dict_keys[index]].full_path
)
dummy_dict_protocol.set_value(value_reference, index)
add_values_protocol_2 = AddValues("add_values")
add_values_protocol_2.values = [
[ProtocolPath("output_value", value_protocol_a.id)],
[
ProtocolPath("output_value", value_protocol_b.id),
ProtocolPath("output_value", value_protocol_b.id),
],
]
with pytest.raises(ValueError):
add_values_protocol_2.get_value(ProtocolPath("valus[string]"))
with pytest.raises(ValueError):
add_values_protocol.get_value(ProtocolPath("values[string]"))
pass
def build_merge(prefix):
# a - b \
# | - e - f
# c - d /
protocol_a = DummyProtocol(prefix + "protocol_a")
protocol_a.input_value = 1
protocol_b = DummyProtocol(prefix + "protocol_b")
protocol_b.input_value = ProtocolPath("output_value", protocol_a.id)
protocol_c = DummyProtocol(prefix + "protocol_c")
protocol_c.input_value = 2
protocol_d = DummyProtocol(prefix + "protocol_d")
protocol_d.input_value = ProtocolPath("output_value", protocol_c.id)
protocol_e = DummyProtocol(prefix + "protocol_e")
protocol_e.input_value = [
ProtocolPath("output_value", protocol_b.id),
ProtocolPath("output_value", protocol_d.id),
]
protocol_f = DummyProtocol(prefix + "protocol_f")
protocol_f.input_value = ProtocolPath("output_value", protocol_e.id)
return [
protocol_a,
protocol_b,
protocol_c,
protocol_d,
protocol_e,
protocol_f,
]
def build_fork(prefix):
# / i - j
# g - h - |
# \ k - l
protocol_g = DummyProtocol(prefix + "protocol_g")
protocol_g.input_value = 3
protocol_h = DummyProtocol(prefix + "protocol_h")
protocol_h.input_value = ProtocolPath("output_value", protocol_g.id)
protocol_i = DummyProtocol(prefix + "protocol_i")
protocol_i.input_value = ProtocolPath("output_value", protocol_h.id)
protocol_j = DummyProtocol(prefix + "protocol_j")
protocol_j.input_value = ProtocolPath("output_value", protocol_i.id)
protocol_k = DummyProtocol(prefix + "protocol_k")
protocol_k.input_value = ProtocolPath("output_value", protocol_h.id)
protocol_l = DummyProtocol(prefix + "protocol_l")
protocol_l.input_value = ProtocolPath("output_value", protocol_k.id)
return [protocol_g, protocol_h, protocol_i, protocol_j, protocol_k, protocol_l]
def build_easy_graph():
protocol_a = DummyProtocol("protocol_a")
protocol_a.input_value = 1
protocol_b = DummyProtocol("protocol_b")
protocol_b.input_value = 1
return [protocol_a], [protocol_b]
def build_medium_graph():
# a - b \
# | - e - f
# c - d /
#
# / i - j
# g - h - |
# \ k - l
return (
[*build_merge("_a"), *build_fork("_a")],
[*build_merge("_b"), *build_fork("_b")],
)
def build_hard_graph():
# a - b \ / i - j
# | - e - f - g - h - |
# c - d / \ k - l
def build_graph(prefix):
merger = build_merge(prefix)
fork = build_fork(prefix)
fork[0].input_value = ProtocolPath("output_value", prefix + "protocol_f")
return [*merger, *fork]
return build_graph("a_"), build_graph("b_")
@pytest.mark.parametrize(
"protocols_a, protocols_b",
[build_easy_graph(), build_medium_graph(), build_hard_graph()],
)
def test_protocol_graph_simple(protocols_a, protocols_b):
# Make sure that the graph can merge simple protocols
# when they are added one after the other.
protocol_graph = ProtocolGraph()
protocol_graph.add_protocols(*protocols_a)
dependants_graph = protocol_graph._build_dependants_graph(
protocol_graph.protocols, False, apply_reduction=True
)
assert len(protocol_graph.protocols) == len(protocols_a)
assert len(dependants_graph) == len(protocols_a)
n_root_protocols = len(protocol_graph.root_protocols)
protocol_graph.add_protocols(*protocols_b)
dependants_graph = protocol_graph._build_dependants_graph(
protocol_graph.protocols, False, apply_reduction=False
)
assert len(protocol_graph.protocols) == len(protocols_a)
assert len(dependants_graph) == len(protocols_a)
assert len(protocol_graph.root_protocols) == n_root_protocols
# Currently the graph shouldn't merge with an
# addition
protocol_graph = ProtocolGraph()
protocol_graph.add_protocols(*protocols_a, *protocols_b)
dependants_graph = protocol_graph._build_dependants_graph(
protocol_graph.protocols, False, apply_reduction=False
)
assert len(protocol_graph.protocols) == len(protocols_a) + len(protocols_b)
assert len(dependants_graph) == len(protocols_a) + len(protocols_b)
assert len(protocol_graph.root_protocols) == 2 * n_root_protocols
@pytest.mark.parametrize(
"calculation_backend, compute_resources",
[(DaskLocalCluster(), None), (None, ComputeResources())],
)
def test_protocol_graph_execution(calculation_backend, compute_resources):
if calculation_backend is not None:
calculation_backend.start()
protocol_a = DummyProtocol("protocol_a")
protocol_a.input_value = 1
protocol_b = DummyProtocol("protocol_b")
protocol_b.input_value = ProtocolPath("output_value", protocol_a.id)
protocol_graph = ProtocolGraph()
protocol_graph.add_protocols(protocol_a, protocol_b)
with tempfile.TemporaryDirectory() as directory:
results = protocol_graph.execute(
directory, calculation_backend, compute_resources
)
final_result = results[protocol_b.id]
if calculation_backend is not None:
final_result = final_result.result()
with open(final_result[1]) as file:
results_b = json.load(file, cls=TypedJSONDecoder)
assert results_b[".output_value"] == protocol_a.input_value
if compute_resources is not None:
assert protocol_b.output_value == protocol_a.input_value
if calculation_backend is not None:
calculation_backend.stop()
def test_protocol_group_merging():
def build_protocols(prefix):
# .-------------------.
# | / i - j -|- b
# a - | g - h - | |
# | \ k - l -|- c
# .-------------------.
protocol_a = DummyProtocol(prefix + "protocol_a")
protocol_a.input_value = 1
fork_protocols = build_fork(prefix)
fork_protocols[0].input_value = ProtocolPath("output_value", protocol_a.id)
protocol_group = ProtocolGroup(prefix + "protocol_group")
protocol_group.add_protocols(*fork_protocols)
protocol_b = DummyProtocol(prefix + "protocol_b")
protocol_b.input_value = ProtocolPath(
"output_value", protocol_group.id, "protocol_j"
)
protocol_c = DummyProtocol(prefix + "protocol_c")
protocol_c.input_value = ProtocolPath(
"output_value", protocol_group.id, "protocol_l"
)
return [protocol_a, protocol_group, protocol_b, protocol_c]
protocols_a = build_protocols("a_")
protocols_b = build_protocols("b_")
protocol_graph = ProtocolGraph()
protocol_graph.add_protocols(*protocols_a)
protocol_graph.add_protocols(*protocols_b)
assert len(protocol_graph.protocols) == len(protocols_a)
assert "a_protocol_group" in protocol_graph.protocols
original_protocol_group = protocols_a[1]
merged_protocol_group = protocol_graph.protocols["a_protocol_group"]
assert original_protocol_group.schema.json() == merged_protocol_group.schema.json()
def test_protocol_group_execution():
protocol_a = DummyProtocol("protocol_a")
protocol_a.input_value = 1
protocol_b = DummyProtocol("protocol_b")
protocol_b.input_value = ProtocolPath("output_value", protocol_a.id)
protocol_group = ProtocolGroup("protocol_group")
protocol_group.add_protocols(protocol_a, protocol_b)
with tempfile.TemporaryDirectory() as directory:
protocol_group.execute(directory, ComputeResources())
value_path = ProtocolPath("output_value", protocol_group.id, protocol_b.id)
final_value = protocol_group.get_value(value_path)
assert final_value == protocol_a.input_value
def test_protocol_group_exceptions():
exception_protocol = ExceptionProtocol("exception_protocol")
protocol_group = ProtocolGroup("protocol_group")
protocol_group.add_protocols(exception_protocol)
with tempfile.TemporaryDirectory() as directory:
with pytest.raises(RuntimeError):
protocol_group.execute(directory, ComputeResources())
def test_protocol_group_resume():
"""A test that protocol groups can recover after being killed
(e.g. by a worker being killed due to hitting a wallclock limit)
"""
compute_resources = ComputeResources()
# Fake a protocol group which executes the first
# two protocols and then 'gets killed'.
protocol_a = DummyProtocol("protocol_a")
protocol_a.input_value = 1
protocol_b = DummyProtocol("protocol_b")
protocol_b.input_value = ProtocolPath("output_value", protocol_a.id)
protocol_group_a = ProtocolGroup("group_a")
protocol_group_a.add_protocols(protocol_a, protocol_b)
protocol_graph = ProtocolGraph()
protocol_graph.add_protocols(protocol_group_a)
protocol_graph.execute("graph_a", compute_resources=compute_resources)
# Remove the output file so it appears the the protocol group had not
# completed.
os.unlink(
os.path.join(
"graph_a", protocol_group_a.id, f"{protocol_group_a.id}_output.json"
)
)
# Build the 'full' group with the last two protocols which
# 'had not been exited' after the group was 'killed'
protocol_a = DummyProtocol("protocol_a")
protocol_a.input_value = 1
protocol_b = DummyProtocol("protocol_b")
protocol_b.input_value = ProtocolPath("output_value", protocol_a.id)
protocol_c = DummyProtocol("protocol_c")
protocol_c.input_value = ProtocolPath("output_value", protocol_b.id)
protocol_d = DummyProtocol("protocol_d")
protocol_d.input_value = ProtocolPath("output_value", protocol_c.id)
protocol_group_a = ProtocolGroup("group_a")
protocol_group_a.add_protocols(protocol_a, protocol_b, protocol_c, protocol_d)
protocol_graph = ProtocolGraph()
protocol_graph.add_protocols(protocol_group_a)
protocol_graph.execute("graph_a", compute_resources=compute_resources)
assert all(x != UNDEFINED for x in protocol_group_a.outputs.values())
|
jaketanderson/openff-evaluator | openff/evaluator/protocols/gradients.py | <reponame>jaketanderson/openff-evaluator
"""
A collection of protocols for calculating the gradients of observables with respect to
force field parameters.
"""
import abc
from typing import Union
import numpy
from openff.units import unit
from openff.units.openmm import from_openmm
from openff.evaluator.attributes import UNDEFINED
from openff.evaluator.forcefield import (
ForceFieldSource,
ParameterGradient,
SmirnoffForceFieldSource,
)
from openff.evaluator.utils.observables import Observable, ObservableArray
from openff.evaluator.workflow import Protocol, workflow_protocol
from openff.evaluator.workflow.attributes import InputAttribute, OutputAttribute
@workflow_protocol()
class ZeroGradients(Protocol, abc.ABC):
"""Zeros the gradients of an observable with respect to a specified set of force
field parameters.
"""
input_observables = InputAttribute(
docstring="The observable to set the gradients of.",
type_hint=Union[Observable, ObservableArray],
default_value=UNDEFINED,
)
force_field_path = InputAttribute(
docstring="The path to the force field which contains the parameters to "
"differentiate the observable with respect to. This is many used to get the "
"correct units for the parameters.",
type_hint=str,
default_value=UNDEFINED,
)
gradient_parameters = InputAttribute(
docstring="The parameters to zero the gradient with respect to.",
type_hint=list,
default_value=lambda: list(),
)
output_observables = OutputAttribute(
docstring="The observable with zeroed gradients.",
type_hint=Union[Observable, ObservableArray],
)
def _execute(self, directory, available_resources):
try:
from openmm import unit as openmm_unit
except ImportError:
from simtk.openmm import unit as openmm_unit
force_field_source = ForceFieldSource.from_json(self.force_field_path)
if not isinstance(force_field_source, SmirnoffForceFieldSource):
raise ValueError("Only SMIRNOFF force fields are supported.")
force_field = force_field_source.to_force_field()
def _get_parameter_unit(gradient_key):
parameter = force_field.get_parameter_handler(gradient_key.tag)
if gradient_key.smirks is not None:
parameter = parameter.parameters[gradient_key.smirks]
value = getattr(parameter, gradient_key.attribute)
if isinstance(value, openmm_unit.Quantity):
return from_openmm(value).units
return unit.dimensionless
parameter_units = {
gradient_key: _get_parameter_unit(gradient_key)
for gradient_key in self.gradient_parameters
}
self.input_observables.clear_gradients()
if isinstance(self.input_observables, Observable):
self.output_observables = Observable(
value=self.input_observables.value,
gradients=[
ParameterGradient(
key=gradient_key,
value=(
0.0
* self.input_observables.value.units
/ parameter_units[gradient_key]
),
)
for gradient_key in self.gradient_parameters
],
)
elif isinstance(self.input_observables, ObservableArray):
self.output_observables = ObservableArray(
value=self.input_observables.value,
gradients=[
ParameterGradient(
key=gradient_key,
value=(
numpy.zeros(self.input_observables.value.shape)
* self.input_observables.value.units
/ parameter_units[gradient_key]
),
)
for gradient_key in self.gradient_parameters
],
)
else:
raise NotImplementedError()
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_workflow/test_workflow.py | <gh_stars>10-100
"""
Units tests for openff.evaluator.layers.simulation
"""
import math
import os
import tempfile
import numpy
import pytest
from openff.toolkit.typing.engines.smirnoff import ForceField, VirtualSiteHandler
from openff.units import unit
from openff.evaluator.attributes import UNDEFINED
from openff.evaluator.backends import ComputeResources
from openff.evaluator.backends.dask import DaskLocalCluster
from openff.evaluator.forcefield import ParameterGradientKey, SmirnoffForceFieldSource
from openff.evaluator.properties import Density
from openff.evaluator.protocols.groups import ConditionalGroup
from openff.evaluator.protocols.miscellaneous import DummyProtocol
from openff.evaluator.substances import Substance
from openff.evaluator.tests.utils import create_dummy_property
from openff.evaluator.thermodynamics import ThermodynamicState
from openff.evaluator.workflow import (
ProtocolGroup,
Workflow,
WorkflowResult,
WorkflowSchema,
)
from openff.evaluator.workflow.schemas import ProtocolReplicator
from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue
@pytest.mark.parametrize(
"calculation_backend, compute_resources, exception",
[
(None, None, False),
(None, ComputeResources(number_of_threads=1), False),
(DaskLocalCluster(), None, False),
(DaskLocalCluster(), ComputeResources(number_of_threads=1), True),
],
)
def test_simple_workflow_graph(calculation_backend, compute_resources, exception):
expected_value = (1 * unit.kelvin).plus_minus(0.1 * unit.kelvin)
protocol_a = DummyProtocol("protocol_a")
protocol_a.input_value = expected_value
protocol_b = DummyProtocol("protocol_b")
protocol_b.input_value = ProtocolPath("output_value", protocol_a.id)
schema = WorkflowSchema()
schema.protocol_schemas = [protocol_a.schema, protocol_b.schema]
schema.final_value_source = ProtocolPath("output_value", protocol_b.id)
schema.validate()
workflow = Workflow({})
workflow.schema = schema
workflow_graph = workflow.to_graph()
with tempfile.TemporaryDirectory() as directory:
if calculation_backend is not None:
with DaskLocalCluster() as calculation_backend:
if exception:
with pytest.raises(AssertionError):
workflow_graph.execute(
directory, calculation_backend, compute_resources
)
return
else:
results_futures = workflow_graph.execute(
directory, calculation_backend, compute_resources
)
assert len(results_futures) == 1
result = results_futures[0].result()
else:
result = workflow_graph.execute(
directory, calculation_backend, compute_resources
)[0]
if exception:
with pytest.raises(AssertionError):
workflow_graph.execute(
directory, calculation_backend, compute_resources
)
return
assert isinstance(result, WorkflowResult)
assert result.value.value == expected_value.value
def test_workflow_with_groups():
expected_value = (1 * unit.kelvin).plus_minus(0.1 * unit.kelvin)
protocol_a = DummyProtocol("protocol_a")
protocol_a.input_value = expected_value
protocol_b = DummyProtocol("protocol_b")
protocol_b.input_value = ProtocolPath("output_value", protocol_a.id)
conditional_group = ConditionalGroup("conditional_group")
conditional_group.add_protocols(protocol_a, protocol_b)
condition = ConditionalGroup.Condition()
condition.right_hand_value = 2 * unit.kelvin
condition.type = ConditionalGroup.Condition.Type.LessThan
condition.left_hand_value = ProtocolPath(
"output_value.value", conditional_group.id, protocol_b.id
)
conditional_group.add_condition(condition)
schema = WorkflowSchema()
schema.protocol_schemas = [conditional_group.schema]
schema.final_value_source = ProtocolPath(
"output_value", conditional_group.id, protocol_b.id
)
schema.validate()
workflow = Workflow({})
workflow.schema = schema
workflow_graph = workflow.to_graph()
with tempfile.TemporaryDirectory() as directory:
with DaskLocalCluster() as calculation_backend:
results_futures = workflow_graph.execute(directory, calculation_backend)
assert len(results_futures) == 1
result = results_futures[0].result()
assert isinstance(result, WorkflowResult)
assert result.value.value == expected_value.value
def test_nested_input():
dict_protocol = DummyProtocol("dict_protocol")
dict_protocol.input_value = {"a": ThermodynamicState(1.0 * unit.kelvin)}
quantity_protocol = DummyProtocol("quantity_protocol")
quantity_protocol.input_value = ProtocolPath(
"output_value[a].temperature", dict_protocol.id
)
schema = WorkflowSchema()
schema.protocol_schemas = [dict_protocol.schema, quantity_protocol.schema]
schema.validate()
workflow = Workflow({})
workflow.schema = schema
workflow_graph = workflow.to_graph()
with tempfile.TemporaryDirectory() as temporary_directory:
with DaskLocalCluster() as calculation_backend:
results_futures = workflow_graph.execute(
temporary_directory, calculation_backend
)
assert len(results_futures) == 1
result = results_futures[0].result()
assert isinstance(result, WorkflowResult)
def test_index_replicated_protocol():
replicator = ProtocolReplicator("replicator")
replicator.template_values = ["a", "b", "c", "d"]
replicated_protocol = DummyProtocol(f"protocol_{replicator.placeholder_id}")
replicated_protocol.input_value = ReplicatorValue(replicator.id)
schema = WorkflowSchema()
schema.protocol_replicators = [replicator]
schema.protocol_schemas = [replicated_protocol.schema]
for index in range(len(replicator.template_values)):
indexing_protocol = DummyProtocol(f"indexing_protocol_{index}")
indexing_protocol.input_value = ProtocolPath(
"output_value", f"protocol_{index}"
)
schema.protocol_schemas.append(indexing_protocol.schema)
schema.validate()
workflow = Workflow({})
workflow.schema = schema
def test_from_schema():
protocol_a = DummyProtocol("protocol_a")
protocol_a.input_value = 1 * unit.kelvin
schema = WorkflowSchema()
schema.protocol_schemas = [protocol_a.schema]
workflow = Workflow.from_schema(schema, {}, unique_id="")
assert workflow is not None
rebuilt_schema = workflow.schema
rebuilt_schema.outputs_to_store = UNDEFINED
assert rebuilt_schema.json(format=True) == schema.json(format=True)
def test_unique_ids():
protocol_a = DummyProtocol("protocol-a")
protocol_a.input_value = 1
group_a = ProtocolGroup("group-a")
group_a.add_protocols(protocol_a)
group_b = ProtocolGroup("group-b")
group_b.add_protocols(protocol_a)
schema = WorkflowSchema()
schema.protocol_schemas = [group_a.schema, group_b.schema]
with pytest.raises(ValueError) as error_info:
schema.validate()
assert "Several protocols in the schema have the same id" in str(error_info.value)
assert "protocol-a" in str(error_info.value)
def test_replicated_ids():
replicator = ProtocolReplicator("replicator-a")
protocol_a = DummyProtocol("protocol-a")
protocol_a.input_value = 1
group_a = ProtocolGroup(f"group-a-{replicator.placeholder_id}")
group_a.add_protocols(protocol_a)
schema = WorkflowSchema()
schema.protocol_schemas = [group_a.schema]
schema.protocol_replicators = [replicator]
with pytest.raises(ValueError) as error_info:
schema.validate()
assert (
f"The children of replicated protocol {group_a.id} must also contain the "
"replicators placeholder" in str(error_info.value)
)
def test_find_relevant_gradient_keys(tmpdir):
try:
from openmm import unit as openmm_unit
except ImportError:
from simtk.openmm import unit as openmm_unit
force_field = ForceField()
vdw_handler = force_field.get_parameter_handler("vdW")
vdw_handler.add_parameter(
{
"smirks": "[#1:1]",
"epsilon": 0.0 * openmm_unit.kilocalorie_per_mole,
"sigma": 1.0 * openmm_unit.angstrom,
}
)
vdw_handler.add_parameter(
{
"smirks": "[#17:1]",
"epsilon": 0.0 * openmm_unit.kilocalorie_per_mole,
"sigma": 1.0 * openmm_unit.angstrom,
}
)
vdw_handler.add_parameter(
{
"smirks": "[#6:1]",
"epsilon": 0.0 * openmm_unit.kilocalorie_per_mole,
"sigma": 1.0 * openmm_unit.angstrom,
}
)
vsite_handler = VirtualSiteHandler(version=0.3)
vsite_handler.add_parameter(
{
"smirks": "[#1:1][#17:2]",
"type": "BondCharge",
"distance": 0.1 * openmm_unit.nanometers,
"match": "all_permutations",
"charge_increment1": 0.0 * openmm_unit.elementary_charge,
"charge_increment2": 0.0 * openmm_unit.elementary_charge,
}
)
force_field.register_parameter_handler(vsite_handler)
force_field_path = os.path.join(tmpdir, "ff.json")
SmirnoffForceFieldSource.from_object(force_field).json(force_field_path)
expected_gradient_keys = {
ParameterGradientKey(tag="vdW", smirks=None, attribute="scale14"),
ParameterGradientKey(tag="vdW", smirks="[#1:1]", attribute="epsilon"),
ParameterGradientKey(
tag="VirtualSites", smirks="[#1:1][#17:2]", attribute="distance"
),
}
gradient_keys = Workflow._find_relevant_gradient_keys(
Substance.from_components("[H]Cl"),
force_field_path,
[
*expected_gradient_keys,
ParameterGradientKey(tag="vdW", smirks="[#6:1]", attribute="sigma"),
],
)
assert len(gradient_keys) == len(expected_gradient_keys)
assert {*gradient_keys} == expected_gradient_keys
def test_generate_default_metadata_defaults():
dummy_property = create_dummy_property(Density)
dummy_forcefield = "smirnoff99Frosst-1.1.0.offxml"
data = Workflow.generate_default_metadata(dummy_property, dummy_forcefield)
assert data["parameter_gradient_keys"] == []
assert numpy.isclose(
data["target_uncertainty"], math.inf * unit.gram / unit.milliliter
)
assert numpy.isclose(
data["per_component_uncertainty"], math.inf * unit.gram / unit.milliliter
)
|
jaketanderson/openff-evaluator | openff/evaluator/tests/test_workflow/utils.py | <reponame>jaketanderson/openff-evaluator<filename>openff/evaluator/tests/test_workflow/utils.py
from typing import Union
from openff.units import unit
from openff.evaluator.attributes import UNDEFINED
from openff.evaluator.layers import registered_calculation_schemas
from openff.evaluator.workflow import Protocol, Workflow, workflow_protocol
from openff.evaluator.workflow.attributes import InputAttribute, OutputAttribute
def create_dummy_metadata(dummy_property, calculation_layer):
global_metadata = Workflow.generate_default_metadata(
dummy_property, "smirnoff99Frosst-1.1.0.offxml", []
)
if calculation_layer == "ReweightingLayer":
schema = registered_calculation_schemas[calculation_layer][
dummy_property.__class__.__name__
]
if callable(schema):
schema = schema()
for key, query in schema.storage_queries.items():
fake_data = [
(f"data_path_{index}_{key}", f"ff_path_{index}_{key}")
for index in range(3)
]
if (
query.substance_query != UNDEFINED
and query.substance_query.components_only
):
fake_data = []
for component_index in enumerate(dummy_property.substance.components):
fake_data.append(
[
(
f"data_path_{index}_{key}_{component_index}",
f"ff_path_{index}_{key}",
)
for index in range(3)
]
)
global_metadata[key] = fake_data
return global_metadata
@workflow_protocol()
class DummyReplicableProtocol(Protocol):
replicated_value_a = InputAttribute(
docstring="", type_hint=Union[str, int, float], default_value=UNDEFINED
)
replicated_value_b = InputAttribute(
docstring="", type_hint=Union[str, int, float], default_value=UNDEFINED
)
final_value = OutputAttribute(docstring="", type_hint=unit.Measurement)
def _execute(self, directory, available_resources):
pass
|
ann-marie-ward/dash-docs | dash_docs/chapters/getting_started/index.py | <reponame>ann-marie-ward/dash-docs
# -*- coding: utf-8 -*-
import dash_core_components as dcc
import dash_html_components as html
from dash_docs import styles, tools
from dash_docs.tools import load_examples
from dash_docs import reusable_components as rc
import os
examples = load_examples(__file__)
layout = html.Div([
rc.Markdown('''
# Dash Layout
<blockquote>
This is the 2nd chapter of the <dccLink children="Dash Tutorial" href="/"/>.
The <dccLink href="/installation" children="previous chapter"/> covered installation
and the <dccLink href="/basic-callbacks" children="next chapter"/> covers Dash callbacks.
</blockquote>
'''),
rc.Markdown('''
This tutorial will walk you through a fundamental aspect of Dash apps, the
app `layout`, through {} self-contained apps.
'''.format(len(examples))),
rc.Markdown('''
***
Dash apps are composed of two parts. The first part is the "`layout`" of
the app and it describes what the application looks like.
The second part describes the interactivity of the application and will be
covered in the <dccLink href="/basic-callbacks" children="next chapter"/>.
Dash provides Python classes for all of the visual components of
the application. We maintain a set of components in the
`dash_core_components` and the `dash_html_components` library
but you can also [build your own](https://github.com/plotly/dash-component-boilerplate)
with JavaScript and React.js.
Note: Throughout this documentation, Python code examples are meant to be saved as files and executed using `python app.py`. These examples are not intended to run in Jupyter notebooks as-is, although most can be modified slightly to function in that environment.
'''),
rc.Syntax(examples['getting_started_layout_1.py'][0], summary='''
To get started, create a file named `app.py` with the following code:
'''),
rc.Markdown('''
Run the app with
```
$ python app.py
...Running on http://127.0.0.1:8050/ (Press CTRL+C to quit)
```
and visit [http://127.0.0.1:8050/](http://127.0.0.1:8050/)
in your web browser. You should see an app that looks like this.
'''),
rc.Example(examples['getting_started_layout_1.py'][1]),
rc.Markdown(
'''
Note:
1. The `layout` is composed of a tree of "components" like `html.Div`
and `dcc.Graph`.
2. The `dash_html_components` library has a component for every HTML
tag. The `html.H1(children='Hello Dash')` component generates
a `<h1>Hello Dash</h1>` HTML element in your application.
3. Not all components are pure HTML. The `dash_core_components` describe
higher-level components that are interactive and are generated with
JavaScript, HTML, and CSS through the React.js library.
4. Each component is described entirely through keyword attributes.
Dash is _declarative_: you will primarily describe your application
through these attributes.
5. The `children` property is special. By convention, it's always the
first attribute which means that you can omit it:
`html.H1(children='Hello Dash')` is the same as `html.H1('Hello Dash')`.
Also, it can contain a string, a number, a single component, or a
list of components.
6. The fonts in your application will look a little bit different than
what is displayed here. This application is using a
custom CSS stylesheet to modify the default styles of the elements.
You can learn more in the <dccLink href="/external-resources" children="css tutorial"/>,
but for now you can initialize your app with
```
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
```
to get the same look and feel of these examples.
### Making your first change
**New in dash 0.30.0 and dash-renderer 0.15.0**
Dash includes "hot-reloading", this features is activated by default when
you run your app with `app.run_server(debug=True)`. This means that Dash
will automatically refresh your browser when you make a change in your code.
Give it a try: change the title "Hello Dash" in your application or change the `x` or the `y` data. Your app should auto-refresh with your change.
> Don't like hot-reloading? You can turn this off with `app.run_server(dev_tools_hot_reload=False)`.
> Learn more in <dccLink href="/devtools" children="Dash Dev Tools documentation"/> Questions? See the [community forum hot reloading discussion](https://community.plotly.com/t/announcing-hot-reload/14177).
#### More about HTML
The `dash_html_components` library contains a component class for every
HTML tag as well as keyword arguments for all of the HTML arguments.
'''),
rc.Syntax(examples['getting_started_layout_2.py'][0], summary='''
Let's customize the text in our app by modifying the inline styles of the
components. Create a file named `app.py` with the following code:
'''),
html.Div(examples['getting_started_layout_2.py'][1], className="example-container", style={
'padding-right': '35px',
'padding-bottom': '30px'
}),
rc.Markdown('''
In this example, we modified the inline styles of the `html.Div`
and `html.H1` components with the `style` property.
`html.H1('Hello Dash', style={'textAlign': 'center', 'color': '#7FDBFF'})`
is rendered in the Dash application as
`<h1 style="text-align: center; color: #7FDBFF">Hello Dash</h1>`.
There are a few important differences between the `dash_html_components`
and the HTML attributes:
1. The `style` property in HTML is a semicolon-separated string. In Dash,
you can just supply a dictionary.
2. The keys in the `style` dictionary are [camelCased](https://en.wikipedia.org/wiki/Camel_case).
So, instead of `text-align`, it's `textAlign`.
3. The HTML `class` attribute is `className` in Dash.
4. The children of the HTML tag is specified through the `children` keyword
argument. By convention, this is always the _first_ argument and
so it is often omitted.
Besides that, all of the available HTML attributes and tags are available
to you within your Python context.
***
#### Reusable Components
By writing our markup in Python, we can create complex reusable
components like tables without switching contexts or languages.
'''.replace(' ', '')),
rc.Syntax(
examples['getting_started_table.py'][0],
summary="""
Here's a quick example that
generates a `Table` from a Pandas dataframe. Create a file named `app.py` with the following code:
"""
),
rc.Example(examples['getting_started_table.py'][1]),
rc.Markdown('''
#### More about Visualization
The `dash_core_components` library includes a component called `Graph`.
`Graph` renders interactive data visualizations using the open source
[plotly.js](https://github.com/plotly/plotly.js) JavaScript graphing
library. Plotly.js supports over 35 chart types and renders charts in
both vector-quality SVG and high-performance WebGL.
The `figure` argument in the `dash_core_components.Graph` component is
the same `figure` argument that is used by `plotly.py`, Plotly's
open source Python graphing library.
Check out the [plotly.py documentation and gallery](https://plotly.com/python)
to learn more.
'''),
rc.Syntax(examples['getting_started_viz.py'][0], summary='''
Here's an example that creates a scatter plot from a Pandas dataframe. Create a file named `app.py` with the following code:
'''),
rc.Example(examples['getting_started_viz.py'][1]),
rc.Markdown('''
*These graphs are interactive and responsive.
**Hover** over points to see their values,
**click** on legend items to toggle traces,
**click and drag** to zoom,
**hold down shift, and click and drag** to pan.*
#### Markdown
While Dash exposes HTML through the `dash_html_components` library,
it can be tedious to write your copy in HTML.
For writing blocks of text, you can use the `Markdown` component in the
`dash_core_components` library. Create a file named `app.py` with the following code:
'''),
rc.Syntax(examples['getting_started_markdown.py'][0]),
rc.Example(examples['getting_started_markdown.py'][1]),
rc.Markdown('''
#### Core Components
The `dash_core_components` includes a set of higher-level components like
dropdowns, graphs, markdown blocks, and more.
Like all Dash components, they are described entirely declaratively.
Every option that is configurable is available as a keyword argument
of the component.
'''),
html.P(['''
We'll see many of these components throughout the tutorial.
You can view all of the available components in the
''', dcc.Link(
'Dash Core Components Gallery',
href=tools.relpath('/dash-core-components')
)]),
rc.Syntax(
examples['getting_started_core_components.py'][0],
summary="Here are a few of the available components. Create a file named `app.py` with the following code:"),
html.Div(examples['getting_started_core_components.py'][1], className="example-container"),
rc.Markdown('''
#### Calling `help`
Dash components are declarative: every configurable aspect of these
components is set during instantiation as a keyword argument.
Call `help` in your Python console on any of the components to
learn more about a component and its available arguments.
'''),
html.Div(
rc.Markdown('''```python
>>> help(dcc.Dropdown)
class Dropdown(dash.development.base_component.Component)
| A Dropdown component.
| Dropdown is an interactive dropdown element for selecting one or more
| items.
| The values and labels of the dropdown items are specified in the `options`
| property and the selected item(s) are specified with the `value` property.
|
| Use a dropdown when you have many options (more than 5) or when you are
| constrained for space. Otherwise, you can use RadioItems or a Checklist,
| which have the benefit of showing the users all of the items at once.
|
| Keyword arguments:
| - id (string; optional)
| - className (string; optional)
| - disabled (boolean; optional): If true, the option is disabled
| - multi (boolean; optional): If true, the user can select multiple values
| - options (list; optional)
| - placeholder (string; optional): The grey, default text shown when no option is selected
| - value (string | list; optional): The value of the input. If `multi` is false (the default)
| then value is just a string that corresponds to the values
| provided in the `options` property. If `multi` is true, then
| multiple values can be selected at once, and `value` is an
| array of items with values corresponding to those in the
| `options` prop.```'''), style=styles.code_container),
rc.Markdown('''
### Summary
The `layout` of a Dash app describes what the app looks like.
The `layout` is a hierarchical tree of components.
The `dash_html_components` library provides classes for all of the HTML
tags and the keyword arguments describe the HTML attributes like `style`,
`className`, and `id`.
The `dash_core_components` library generates higher-level
components like controls and graphs.
For reference, see:
'''),
html.Ul([
html.Li(
dcc.Link(
[html.Code('dash_core_components'), ' gallery'],
href=tools.relpath('/dash-core-components')
)
),
html.Li(
dcc.Link(
[html.Code('dash_html_components'), ' gallery'],
href=tools.relpath('/dash-html-components')
)
)
]),
html.P('''
The next part of the Dash tutorial covers how to make these apps
interactive.
'''),
dcc.Link(
'Dash Tutorial Part 3: Basic Callbacks',
href=tools.relpath("/basic-callbacks")
)
])
|
ann-marie-ward/dash-docs | dash_docs/chapters/support/index.py | <gh_stars>1-10
# -*- coding: utf-8 -*-
import dash_core_components as dcc
from dash_docs import reusable_components as rc
layout = rc.Markdown('''
# Dash Support and Contact
Dash is an open-source product that is
developed and maintained by [Plotly](https://plotly.com).
### Dash Demos and Enterprise Trials
If you or your team would like to demo, trial, or license a Dash Enterprise,
[get in touch with us directly](https://plotly.com/get-demo).
### Sponsored Feature Requests and Customizations
If you or your company would like to sponsor a specific feature or enterprise
customization, get in touch with our
[advanced development team](https://plotly.com/products/consulting-and-oem).
### Community Support
Our community forum at [community.plotly.com](https://community.plotly.com) has
a topic dedicated on [Dash](https://community.plotly.com/c/dash).
This forum is great for showing off projects, feature requests,
and general questions.
If you have found a bug, you can open an issue on GitHub at
[plotly/dash](https://github.com/plotly/dash) or
[plotly/dash-docs](https://github.com/plotly/dash-docs/).
### Direct Contact
If you would like to reach out to me directly,
you can email me at <<EMAIL>>.
Plotly is also on Twitter at [@plotlygraphs](https://twitter.com/plotlygraphs).
We are based in Montréal, Canada and our headquarters are in the Mile End.
If you're in the neighborhood, come say hi!
''')
|
ppm-shreya/payabbhi-python | payabbhi/test/test_invoice_item.py | <filename>payabbhi/test/test_invoice_item.py
import sys
import json
import responses
import payabbhi
import unittest2
from .helpers import mock_file, assert_list_of_invoice_items, assert_invoice_item
class TestInvoiceItem(unittest2.TestCase):
def setUp(self):
self.client = payabbhi.Client(access_id='access_id', secret_key='secret_key')
payabbhi.api_base = 'https://payabbhi.com'
self.invoice_item_id = 'dummy_invoice_item_id'
self.invoice_item_url = payabbhi.api_base + '/api/v1/invoiceitems'
@responses.activate
def test_invoice_item_all(self):
result = mock_file('dummy_invoice_item_collection')
responses.add(responses.GET, self.invoice_item_url, status=200,
body=result, match_querystring=True)
response = self.client.invoiceitem.all()
resp = json.loads(result)
assert_list_of_invoice_items(self, response, resp)
@responses.activate
def test_invoice_item_with_options(self):
result = mock_file('dummy_invoice_item_collection_filters')
count = 3
skip = 2
url = '{0}?count={1}&skip={2}'.format(self.invoice_item_url, count, skip)
responses.add(responses.GET, url, status=200,
body=result, match_querystring=True)
response = self.client.invoiceitem.all(data={'count': count, 'skip':skip})
resp = json.loads(result)
assert_list_of_invoice_items(self, response, resp)
@responses.activate
def test_invoice_item_retrieve(self):
result = mock_file('dummy_invoice_item')
url = '{0}/{1}'.format(self.invoice_item_url, self.invoice_item_id)
responses.add(responses.GET, url, status=200,
body=result, match_querystring=True)
response = self.client.invoiceitem.retrieve(self.invoice_item_id)
resp = json.loads(result)
assert_invoice_item(self, response, resp)
@responses.activate
def test_invoice_item_create(self):
result = mock_file('dummy_invoice_item')
url = self.invoice_item_url
responses.add(responses.POST, url, status=200,
body=result, match_querystring=True)
response = self.client.invoiceitem.create(data={'customer_id':'cust_2WmsQoSRZMWWkcZg', 'name':'Line Item', 'currency':'INR', 'amount':200})
resp = json.loads(result)
assert_invoice_item(self, response, resp)
@responses.activate
def test_invoice_item_delete(self):
result = mock_file('dummy_invoice_item_delete')
url = '{0}/{1}'.format(self.invoice_item_url, self.invoice_item_id)
responses.add(responses.DELETE, url, status=200,
body=result, match_querystring=True)
response = self.client.invoiceitem.delete(self.invoice_item_id)
resp = json.loads(result)
assert_invoice_item(self, response, resp)
|
ppm-shreya/payabbhi-python | payabbhi/resources/__init__.py | <gh_stars>0
from .payment import Payment
from .refund import Refund
from .order import Order
from .product import Product
from .plan import Plan
from .customer import Customer
from .subscription import Subscription
from .invoice import Invoice
from .invoice_item import InvoiceItem
from .list import List
from .api_resource import APIResource
__all__ = [
'Payment',
'Refund',
'Order',
'Product',
'Plan',
'Customer',
'Subscription',
'Invoice',
'InvoiceItem',
'List',
]
|
ppm-shreya/payabbhi-python | payabbhi/resources/invoice_item.py | <filename>payabbhi/resources/invoice_item.py
from ..error import InvalidRequestError
from .api_resource import APIResource
class InvoiceItem(APIResource):
def __init__(self, client=None):
super(InvoiceItem, self).__init__(client)
def all(self, data=None, **kwargs):
""""
Get all Invoice Items
Args:
data : Dictionary having keys using which invoice item list will be filtered
count: Count of invoice items to be retrieved
skip: Number of invoice items to be skipped
to: Invoice Item list till this timestamp will be retrieved
from: Invoice Item list from this timestamp will be retrieved
Returns:
List of Invoice Item objects
"""
if data is None:
data = {}
return self._all(data, **kwargs)
def create(self, data, **kwargs):
""""
Create Invoice Item from given data
Args:
data : Dictionary having keys using which invoice item has to be created
customer_id: The unique identifier of the Customer who will pay this invoice
name: Name of the invoice item
amount: Amount of the invoice item
currency: Currency of the invoice item amoount
invoice_id: The unique identifier of the invoice to which this invoice item to be added
description: Description of the invoice item
quantity: Quantity of the invoice item
notes: key value pair as notes
Returns:
Invoice Item object containing data for created invoice item
"""
return self._post(self.class_url(), data, **kwargs)
def retrieve(self, invoice_item_id, **kwargs):
""""
Retrieve Invoice Item for given Id
Args:
invoice_item_id : Id for which Invoice Item object is to be retrieved
Returns:
Invoice Item object for given invoice item Id
"""
return self._retrieve(invoice_item_id, **kwargs)
def delete(self, invoice_item_id, **kwargs):
""""
Delete Invoice Item for given Id
Args:
invoice_item_id : Id for which Invoice Item object is to be deleted
Returns:
Invoice Item object corresponding to the invoice item Id after successful deletion
"""
return self._delete(invoice_item_id, **kwargs)
|
clay-lfj/test008 | test/login.py | print("你好")
123
num = 10
num02 = 20
zxy到此一游...
|
john850512/osdi2020 | lab1/pytest.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import pexpect
import sys
image = "kernel8.img"
cmd = "qemu-system-aarch64 "
cmd += "-M raspi3 -kernel %s " % image
cmd += "-serial null -serial stdio"
child = pexpect.spawnu(cmd)
child.logfile_read = sys.stdout
with open("pytest_cmd", "r") as f:
line = f.readline().strip('\n')
while line:
print(line)
child.expect("# ")
child.sendline(line)
line = f.readline().strip('\n')
child.expect("# ")
# interact mode
child.logfile_read = None
child.interact()
|
John4064/Electron-App | backend/app.py | from flask import Flask, request, jsonify
import twelvedata as td
import pandas as pd
app = Flask(__name__)
tda = td.TDClient(apikey="<KEY>")
@app.route('/stock', methods = ['GET'])
def test():
stock = tda.time_series(symbol='AAPL', interval="1day", outputsize=30).as_pandas()
sum = 0
for x in range(len(stock)):
sum += stock['volume'][x]
msg="The average volume for APPL is {}".format(sum / len(stock))
response = {
'message': msg,
'index': 1
}
return jsonify(response),200
if __name__ == '__main__':
app.run(host='0.0.0.0',port=5000)
#check() |
jburky15/basic-python-scraper | scraper.py | from bs4 import BeautifulSoup
import requests
import re
# Get the compenent the user is looking for from all pages of the given site
pc_component = input("Enter the product you want to find: ")
url = f"https://www.newegg.com/p/pl?d={pc_component}&N=4131"
page = requests.get(url).text
doc = BeautifulSoup(page, "html.parser")
page_number = doc.find(class_="list-tool-pagination-text").strong
pages = int(str(page_number).split("/")[-2].split(">")[-1][:-1])
items_found = {}
# Get links and pricing for all of the given compenent that are currently available
for page in range(1, pages + 1):
url = f"https://www.newegg.com/p/pl?d={pc_component}&N=4131&page={page}"
page = requests.get(url).text
doc = BeautifulSoup(page, "html.parser")
inner_content = doc.find(
class_="item-cells-wrap border-cells items-grid-view four-cells expulsion-one-cell")
items = inner_content.find_all(text=re.compile(pc_component))
for item in items:
parent = item.parent
if parent.name != "a":
continue
link = parent['href']
parent_price = item.find_parent(class_="item-container")
try:
price = parent_price.find(
class_="price-current").find("strong").string
items_found[item] = {"price": int(
price.replace(",", "")), "link": link}
except:
pass
# Sort all found products by price and print
sort_items = sorted(items_found.items(), key=lambda x: x[1]['price'])
for item in sort_items:
print(item[0])
print(f"${item[1]['price']}")
print(item[1]['link'])
print("")
|
ToddlD/test | Test.py | <reponame>ToddlD/test<gh_stars>0
prinr("hell yeah")
|
guettli/simple21term | simple21/tests/templatetags/test_menu.py | import os
import html2text
os.environ['DJANGO_SETTINGS_MODULE'] = 'mysite.settings'
import django
django.setup()
from simple21.testutils import AbstractPageTest
from django.template import Context, Template
class MenuTest(AbstractPageTest):
def test_templatetag_menu(self):
self.assertEqual(1, self.root.get_children().count())
self.assertEqual(['S21', '*', 'Hello,', 'USER', '*', 'myPage'],
html2text.html2text(
Template('''{% load menu %} {% menu %}''').render(context=Context())).split())
|
guettli/simple21term | simple21/middleware.py | from simple21.models import GlobalConfig
class SetAnonymousUserMiddleware:
"""
Avoid fancy if/else dancing in the code. The user in request.user is always a real row in the
users table.
Related: https://github.com/guettli/programming-guidelines/blob/master/README.md#avoid-nullable-foreign-keys
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
if not request.user.is_authenticated:
request.user = GlobalConfig.get().anonymous_user
return self.get_response(request)
|
guettli/simple21term | simple21/admin.py | <gh_stars>0
from django.contrib import admin
from django.contrib.admin import ModelAdmin
from .models import Page, SearchLog
class PageAdmin(ModelAdmin):
class Media:
css = {
"all": ("//cdn.quilljs.com/1.3.6/quill.snow.css",)
}
js = (
"//cdn.quilljs.com/1.3.6/quill.min.js",
"/static/simple21/quill-textarea.js",
"/static/simple21/load_quill.js",
)
list_display = ['id', '__str__']
admin.site.register(Page, PageAdmin)
class SearchLogAdmin(ModelAdmin):
list_display = ['query', 'user', 'datetime', 'result_count']
readonly_fields = list_display
admin.site.register(SearchLog, SearchLogAdmin) |
guettli/simple21term | simple21/tests/test_models.py | <reponame>guettli/simple21term<gh_stars>0
import os
from django.db import IntegrityError
os.environ['DJANGO_SETTINGS_MODULE'] = 'mysite.settings'
import django
django.setup()
from simple21.models import Page
from simple21.testutils import AbstractPageTest
class PageTests(AbstractPageTest):
def test_get_ancestors(self):
self.assertEqual([''], [page.name for page in self.page.get_ancestors()])
def test_get_ancestors__include_self(self):
self.assertEqual(['', 'myPage'], [page.name for page in self.page.get_ancestors(include_self=True)])
def test_page__get_absolute_url(self):
self.assertEqual('/page/{}/'.format(self.page.id), self.page.get_absolute_url())
def test_str_of_page(self):
self.assertEqual('myPage', str(self.page))
def test_root__str(self):
self.assertEqual('<root>', str(self.root))
def test_get_children__of_leaf(self):
self.assertEqual('<QuerySet []>', repr(self.page.get_children()))
def test_get_children__of_root(self):
self.assertEqual('<QuerySet [<Page: myPage>]>', repr(self.root.get_children()))
def test_page_tree_must_have_only_one_root(self):
self.assertRaises(IntegrityError, Page.objects.create, name='root2')
|
guettli/simple21term | simple21/migrations/0002_auto_20201211_2140.py | # Generated by Django 3.1.4 on 2020-12-11 21:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('simple21', '0001_initial'),
]
operations = [
migrations.RunSQL('''CREATE UNIQUE INDEX page_tree_must_have_only_one_root ON simple21_page ((1)) WHERE parent_id IS NULL;''')
]
|
guettli/simple21term | simple21/testutils.py | from django.utils.functional import cached_property
from django.test import TestCase
from simple21.models import Page
class AbstractPageTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.root = cls.get_root()
cls.page = cls.get_page()
@classmethod
def get_root(cls):
return Page.objects.update_or_create(parent=None, defaults=dict(text='Root Page', name=''))[0]
@classmethod
def get_page(cls):
return Page.objects.update_or_create(name='myPage', defaults=dict(
parent=cls.root,
text='My first fun sub-page'))[0]
|
guettli/simple21term | simple21/views.py | <filename>simple21/views.py
from django.db.models import Q
from django.http import HttpResponse
from django.template import loader
from simple21.models import Page, SearchLog
def search(request):
template = loader.get_template('simple21/index.html')
query = get_query_from_request(request)
queryset = get_queryset(query)
create_search_log(request, query, queryset)
return HttpResponse(template.render(dict(queryset=queryset), request))
def create_search_log(request, query, queryset):
SearchLog.objects.create(query=query, user=request.user, result_count=queryset.count(),
page_ids=list(queryset.values_list('id', flat=True)))
def get_query_from_request(request):
if not request.GET:
return ''
return request.GET.get('q', '')
def get_queryset(query):
return Page.objects.filter(Q(name__icontains=query) | Q(text__icontains=query)).distinct()
def page(request, id):
template = loader.get_template('simple21/page.html')
page = Page.objects.get(id=id)
return HttpResponse(template.render(dict(page=page), request))
def test_session_of_anonymous_user(request):
my_list = request.session.get('get', [])
my_list.append(dict(data=request.GET, user=request.user.username, id=request.user.id))
request.session['get']=my_list
return HttpResponse('ok') |
guettli/simple21term | setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(name='simple21',
version='1.0',
description='Simple21Tree: A tree of #Hashtags to increase obviousness',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/guettli/simple21tree/',
packages=['simple21'],
install_requires=[
'Django>=3.1.4',
'python-dotenv',
'html2text',
],
scripts=['manage.py'],
)
|
guettli/simple21term | simple21/apps.py | <gh_stars>0
from django.apps import AppConfig
from django.db.models.signals import post_migrate
from django.urls import reverse
class Simple21Config(AppConfig):
name = 'simple21'
|
guettli/simple21term | simple21/templatetags/menu.py | from django import template
from simple21.models import Page
register = template.Library()
@register.inclusion_tag('simple21/menu.html')
def menu(current_page_id=None, takes_context=True):
if not current_page_id:
page = Page.get_root()
else:
page = Page.objects.get(id=current_page_id)
return {'children': page.get_children()}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.