_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q4800
|
PDBParser.get_linkage
|
train
|
def get_linkage(self, line):
"""Get the linkage information from a LINK entry PDB line."""
conf1, id1, chain1, pos1 = line[16].strip(), line[17:20].strip(), line[21].strip(), int(line[22:26])
conf2, id2, chain2, pos2 = line[46].strip(), line[47:50].strip(), line[51].strip(), int(line[52:56])
return self.covlinkage(id1=id1, chain1=chain1, pos1=pos1, conf1=conf1,
id2=id2, chain2=chain2, pos2=pos2, conf2=conf2)
|
python
|
{
"resource": ""
}
|
q4801
|
LigandFinder.getpeptides
|
train
|
def getpeptides(self, chain):
"""If peptide ligand chains are defined via the command line options,
try to extract the underlying ligand formed by all residues in the
given chain without water
"""
all_from_chain = [o for o in pybel.ob.OBResidueIter(
self.proteincomplex.OBMol) if o.GetChain() == chain] # All residues from chain
if len(all_from_chain) == 0:
return None
else:
non_water = [o for o in all_from_chain if not o.GetResidueProperty(9)]
ligand = self.extract_ligand(non_water)
return ligand
|
python
|
{
"resource": ""
}
|
q4802
|
LigandFinder.getligs
|
train
|
def getligs(self):
"""Get all ligands from a PDB file and prepare them for analysis.
Returns all non-empty ligands.
"""
if config.PEPTIDES == [] and config.INTRA is None:
# Extract small molecule ligands (default)
ligands = []
# Filter for ligands using lists
ligand_residues, self.lignames_all, self.water = self.filter_for_ligands()
all_res_dict = {(a.GetName(), a.GetChain(), a.GetNum()): a for a in ligand_residues}
self.lignames_kept = list(set([a.GetName() for a in ligand_residues]))
if not config.BREAKCOMPOSITE:
# Update register of covalent links with those between DNA/RNA subunits
self.covalent += nucleotide_linkage(all_res_dict)
# Find fragment linked by covalent bonds
res_kmers = self.identify_kmers(all_res_dict)
else:
res_kmers = [[a, ] for a in ligand_residues]
write_message("{} ligand kmer(s) detected for closer inspection.\n".format(len(res_kmers)), mtype='debug')
for kmer in res_kmers: # iterate over all ligands and extract molecules + information
if len(kmer) > config.MAX_COMPOSITE_LENGTH:
write_message("Ligand kmer(s) filtered out with a length of {} fragments ({} allowed).\n".format(
len(kmer), config.MAX_COMPOSITE_LENGTH), mtype='debug')
else:
ligands.append(self.extract_ligand(kmer))
else:
# Extract peptides from given chains
self.water = [o for o in pybel.ob.OBResidueIter(self.proteincomplex.OBMol) if o.GetResidueProperty(9)]
if config.PEPTIDES != []:
peptide_ligands = [self.getpeptides(chain) for chain in config.PEPTIDES]
elif config.INTRA is not None:
peptide_ligands = [self.getpeptides(config.INTRA), ]
ligands = [p for p in peptide_ligands if p is not None]
self.covalent, self.lignames_kept, self.lignames_all = [], [], set()
return [lig for lig in ligands if len(lig.mol.atoms) != 0]
|
python
|
{
"resource": ""
}
|
q4803
|
LigandFinder.is_het_residue
|
train
|
def is_het_residue(self, obres):
"""Given an OBResidue, determines if the residue is indeed a possible ligand
in the PDB file"""
if not obres.GetResidueProperty(0):
# If the residue is NOT amino (0)
# It can be amino_nucleo, coenzme, ion, nucleo, protein, purine, pyrimidine, solvent
# In these cases, it is a ligand candidate
return True
else:
# Here, the residue is classified as amino
# Amino acids can still be ligands, so we check for HETATM entries
# Only residues with at least one HETATM entry are processed as ligands
het_atoms = []
for atm in pybel.ob.OBResidueAtomIter(obres):
het_atoms.append(obres.IsHetAtom(atm))
if True in het_atoms:
return True
return False
|
python
|
{
"resource": ""
}
|
q4804
|
LigandFinder.filter_for_ligands
|
train
|
def filter_for_ligands(self):
"""Given an OpenBabel Molecule, get all ligands, their names, and water"""
candidates1 = [o for o in pybel.ob.OBResidueIter(
self.proteincomplex.OBMol) if not o.GetResidueProperty(9) and self.is_het_residue(o)]
if config.DNARECEPTOR: # If DNA is the receptor, don't consider DNA as a ligand
candidates1 = [res for res in candidates1 if res.GetName() not in config.DNA+config.RNA]
all_lignames = set([a.GetName() for a in candidates1])
water = [o for o in pybel.ob.OBResidueIter(self.proteincomplex.OBMol) if o.GetResidueProperty(9)]
# Filter out non-ligands
if not config.KEEPMOD: # Keep modified residues as ligands
candidates2 = [a for a in candidates1 if is_lig(a.GetName()) and a.GetName() not in self.modresidues]
else:
candidates2 = [a for a in candidates1 if is_lig(a.GetName())]
write_message("%i ligand(s) after first filtering step.\n" % len(candidates2), mtype='debug')
############################################
# Filtering by counting and artifacts list #
############################################
artifacts = []
unique_ligs = set(a.GetName() for a in candidates2)
for ulig in unique_ligs:
# Discard if appearing 15 times or more and is possible artifact
if ulig in config.biolip_list and [a.GetName() for a in candidates2].count(ulig) >= 15:
artifacts.append(ulig)
selected_ligands = [a for a in candidates2 if a.GetName() not in artifacts]
return selected_ligands, all_lignames, water
|
python
|
{
"resource": ""
}
|
q4805
|
Mapper.id_to_atom
|
train
|
def id_to_atom(self, idx):
"""Returns the atom for a given original ligand ID.
To do this, the ID is mapped to the protein first and then the atom returned.
"""
mapped_idx = self.mapid(idx, 'reversed')
return pybel.Atom(self.original_structure.GetAtom(mapped_idx))
|
python
|
{
"resource": ""
}
|
q4806
|
Mol.find_hba
|
train
|
def find_hba(self, all_atoms):
"""Find all possible hydrogen bond acceptors"""
data = namedtuple('hbondacceptor', 'a a_orig_atom a_orig_idx type')
a_set = []
for atom in filter(lambda at: at.OBAtom.IsHbondAcceptor(), all_atoms):
if atom.atomicnum not in [9, 17, 35, 53] and atom.idx not in self.altconf: # Exclude halogen atoms
a_orig_idx = self.Mapper.mapid(atom.idx, mtype=self.mtype, bsid=self.bsid)
a_orig_atom = self.Mapper.id_to_atom(a_orig_idx)
a_set.append(data(a=atom, a_orig_atom=a_orig_atom, a_orig_idx=a_orig_idx, type='regular'))
return a_set
|
python
|
{
"resource": ""
}
|
q4807
|
Mol.find_rings
|
train
|
def find_rings(self, mol, all_atoms):
"""Find rings and return only aromatic.
Rings have to be sufficiently planar OR be detected by OpenBabel as aromatic."""
data = namedtuple('aromatic_ring', 'atoms orig_atoms atoms_orig_idx normal obj center type')
rings = []
aromatic_amino = ['TYR', 'TRP', 'HIS', 'PHE']
ring_candidates = mol.OBMol.GetSSSR()
write_message("Number of aromatic ring candidates: %i\n" % len(ring_candidates), mtype="debug")
# Check here first for ligand rings not being detected as aromatic by Babel and check for planarity
for ring in ring_candidates:
r_atoms = [a for a in all_atoms if ring.IsMember(a.OBAtom)]
if 4 < len(r_atoms) <= 6:
res = list(set([whichrestype(a) for a in r_atoms]))
if ring.IsAromatic() or res[0] in aromatic_amino or ring_is_planar(ring, r_atoms):
# Causes segfault with OpenBabel 2.3.2, so deactivated
# typ = ring.GetType() if not ring.GetType() == '' else 'unknown'
# Alternative typing
typ = '%s-membered' % len(r_atoms)
ring_atms = [r_atoms[a].coords for a in [0, 2, 4]] # Probe atoms for normals, assuming planarity
ringv1 = vector(ring_atms[0], ring_atms[1])
ringv2 = vector(ring_atms[2], ring_atms[0])
atoms_orig_idx = [self.Mapper.mapid(r_atom.idx, mtype=self.mtype,
bsid=self.bsid) for r_atom in r_atoms]
orig_atoms = [self.Mapper.id_to_atom(idx) for idx in atoms_orig_idx]
rings.append(data(atoms=r_atoms,
orig_atoms=orig_atoms,
atoms_orig_idx=atoms_orig_idx,
normal=normalize_vector(np.cross(ringv1, ringv2)),
obj=ring,
center=centroid([ra.coords for ra in r_atoms]),
type=typ))
return rings
|
python
|
{
"resource": ""
}
|
q4808
|
PLInteraction.find_unpaired_ligand
|
train
|
def find_unpaired_ligand(self):
"""Identify unpaired functional in groups in ligands, involving H-Bond donors, acceptors, halogen bond donors.
"""
unpaired_hba, unpaired_hbd, unpaired_hal = [], [], []
# Unpaired hydrogen bond acceptors/donors in ligand (not used for hydrogen bonds/water, salt bridges/mcomplex)
involved_atoms = [hbond.a.idx for hbond in self.hbonds_pdon] + [hbond.d.idx for hbond in self.hbonds_ldon]
[[involved_atoms.append(atom.idx) for atom in sb.negative.atoms] for sb in self.saltbridge_lneg]
[[involved_atoms.append(atom.idx) for atom in sb.positive.atoms] for sb in self.saltbridge_pneg]
[involved_atoms.append(wb.a.idx) for wb in self.water_bridges if wb.protisdon]
[involved_atoms.append(wb.d.idx) for wb in self.water_bridges if not wb.protisdon]
[involved_atoms.append(mcomplex.target.atom.idx) for mcomplex in self.metal_complexes
if mcomplex.location == 'ligand']
for atom in [hba.a for hba in self.ligand.get_hba()]:
if atom.idx not in involved_atoms:
unpaired_hba.append(atom)
for atom in [hbd.d for hbd in self.ligand.get_hbd()]:
if atom.idx not in involved_atoms:
unpaired_hbd.append(atom)
# unpaired halogen bond donors in ligand (not used for the previous + halogen bonds)
[involved_atoms.append(atom.don.x.idx) for atom in self.halogen_bonds]
for atom in [haldon.x for haldon in self.ligand.halogenbond_don]:
if atom.idx not in involved_atoms:
unpaired_hal.append(atom)
return unpaired_hba, unpaired_hbd, unpaired_hal
|
python
|
{
"resource": ""
}
|
q4809
|
PLInteraction.refine_hbonds_ldon
|
train
|
def refine_hbonds_ldon(self, all_hbonds, salt_lneg, salt_pneg):
"""Refine selection of hydrogen bonds. Do not allow groups which already form salt bridges to form H-Bonds."""
i_set = {}
for hbond in all_hbonds:
i_set[hbond] = False
for salt in salt_pneg:
protidx, ligidx = [at.idx for at in salt.negative.atoms], [at.idx for at in salt.positive.atoms]
if hbond.d.idx in ligidx and hbond.a.idx in protidx:
i_set[hbond] = True
for salt in salt_lneg:
protidx, ligidx = [at.idx for at in salt.positive.atoms], [at.idx for at in salt.negative.atoms]
if hbond.d.idx in ligidx and hbond.a.idx in protidx:
i_set[hbond] = True
# Allow only one hydrogen bond per donor, select interaction with larger donor angle
second_set = {}
hbls = [k for k in i_set.keys() if not i_set[k]]
for hbl in hbls:
if hbl.d.idx not in second_set:
second_set[hbl.d.idx] = (hbl.angle, hbl)
else:
if second_set[hbl.d.idx][0] < hbl.angle:
second_set[hbl.d.idx] = (hbl.angle, hbl)
return [hb[1] for hb in second_set.values()]
|
python
|
{
"resource": ""
}
|
q4810
|
PLInteraction.refine_pi_cation_laro
|
train
|
def refine_pi_cation_laro(self, all_picat, stacks):
"""Just important for constellations with histidine involved. If the histidine ring is positioned in stacking
position to an aromatic ring in the ligand, there is in most cases stacking and pi-cation interaction reported
as histidine also carries a positive charge in the ring. For such cases, only report stacking.
"""
i_set = []
for picat in all_picat:
exclude = False
for stack in stacks:
if whichrestype(stack.proteinring.atoms[0]) == 'HIS' and picat.ring.obj == stack.ligandring.obj:
exclude = True
if not exclude:
i_set.append(picat)
return i_set
|
python
|
{
"resource": ""
}
|
q4811
|
PLInteraction.refine_water_bridges
|
train
|
def refine_water_bridges(self, wbridges, hbonds_ldon, hbonds_pdon):
"""A donor atom already forming a hydrogen bond is not allowed to form a water bridge. Each water molecule
can only be donor for two water bridges, selecting the constellation with the omega angle closest to 110 deg."""
donor_atoms_hbonds = [hb.d.idx for hb in hbonds_ldon + hbonds_pdon]
wb_dict = {}
wb_dict2 = {}
omega = 110.0
# Just one hydrogen bond per donor atom
for wbridge in [wb for wb in wbridges if wb.d.idx not in donor_atoms_hbonds]:
if (wbridge.water.idx, wbridge.a.idx) not in wb_dict:
wb_dict[(wbridge.water.idx, wbridge.a.idx)] = wbridge
else:
if abs(omega - wb_dict[(wbridge.water.idx, wbridge.a.idx)].w_angle) < abs(omega - wbridge.w_angle):
wb_dict[(wbridge.water.idx, wbridge.a.idx)] = wbridge
for wb_tuple in wb_dict:
water, acceptor = wb_tuple
if water not in wb_dict2:
wb_dict2[water] = [(abs(omega - wb_dict[wb_tuple].w_angle), wb_dict[wb_tuple]), ]
elif len(wb_dict2[water]) == 1:
wb_dict2[water].append((abs(omega - wb_dict[wb_tuple].w_angle), wb_dict[wb_tuple]))
wb_dict2[water] = sorted(wb_dict2[water])
else:
if wb_dict2[water][1][0] < abs(omega - wb_dict[wb_tuple].w_angle):
wb_dict2[water] = [wb_dict2[water][0], (wb_dict[wb_tuple].w_angle, wb_dict[wb_tuple])]
filtered_wb = []
for fwbridges in wb_dict2.values():
[filtered_wb.append(fwb[1]) for fwb in fwbridges]
return filtered_wb
|
python
|
{
"resource": ""
}
|
q4812
|
BindingSite.find_charged
|
train
|
def find_charged(self, mol):
"""Looks for positive charges in arginine, histidine or lysine, for negative in aspartic and glutamic acid."""
data = namedtuple('pcharge', 'atoms atoms_orig_idx type center restype resnr reschain')
a_set = []
# Iterate through all residue, exclude those in chains defined as peptides
for res in [r for r in pybel.ob.OBResidueIter(mol.OBMol) if not r.GetChain() in config.PEPTIDES]:
if config.INTRA is not None:
if res.GetChain() != config.INTRA:
continue
a_contributing = []
a_contributing_orig_idx = []
if res.GetName() in ('ARG', 'HIS', 'LYS'): # Arginine, Histidine or Lysine have charged sidechains
for a in pybel.ob.OBResidueAtomIter(res):
if a.GetType().startswith('N') and res.GetAtomProperty(a, 8) \
and not self.Mapper.mapid(a.GetIdx(), mtype='protein') in self.altconf:
a_contributing.append(pybel.Atom(a))
a_contributing_orig_idx.append(self.Mapper.mapid(a.GetIdx(), mtype='protein'))
if not len(a_contributing) == 0:
a_set.append(data(atoms=a_contributing,
atoms_orig_idx=a_contributing_orig_idx,
type='positive',
center=centroid([ac.coords for ac in a_contributing]),
restype=res.GetName(),
resnr=res.GetNum(),
reschain=res.GetChain()))
if res.GetName() in ('GLU', 'ASP'): # Aspartic or Glutamic Acid
for a in pybel.ob.OBResidueAtomIter(res):
if a.GetType().startswith('O') and res.GetAtomProperty(a, 8) \
and not self.Mapper.mapid(a.GetIdx(), mtype='protein') in self.altconf:
a_contributing.append(pybel.Atom(a))
a_contributing_orig_idx.append(self.Mapper.mapid(a.GetIdx(), mtype='protein'))
if not len(a_contributing) == 0:
a_set.append(data(atoms=a_contributing,
atoms_orig_idx=a_contributing_orig_idx,
type='negative',
center=centroid([ac.coords for ac in a_contributing]),
restype=res.GetName(),
resnr=res.GetNum(),
reschain=res.GetChain()))
return a_set
|
python
|
{
"resource": ""
}
|
q4813
|
Ligand.is_functional_group
|
train
|
def is_functional_group(self, atom, group):
"""Given a pybel atom, look up if it belongs to a function group"""
n_atoms = [a_neighbor.GetAtomicNum() for a_neighbor in pybel.ob.OBAtomAtomIter(atom.OBAtom)]
if group in ['quartamine', 'tertamine'] and atom.atomicnum == 7: # Nitrogen
# It's a nitrogen, so could be a protonated amine or quaternary ammonium
if '1' not in n_atoms and len(n_atoms) == 4:
return True if group == 'quartamine' else False # It's a quat. ammonium (N with 4 residues != H)
elif atom.OBAtom.GetHyb() == 3 and len(n_atoms) >= 3:
return True if group == 'tertamine' else False # It's sp3-hybridized, so could pick up an hydrogen
else:
return False
if group in ['sulfonium', 'sulfonicacid', 'sulfate'] and atom.atomicnum == 16: # Sulfur
if '1' not in n_atoms and len(n_atoms) == 3: # It's a sulfonium (S with 3 residues != H)
return True if group == 'sulfonium' else False
elif n_atoms.count(8) == 3: # It's a sulfonate or sulfonic acid
return True if group == 'sulfonicacid' else False
elif n_atoms.count(8) == 4: # It's a sulfate
return True if group == 'sulfate' else False
if group == 'phosphate' and atom.atomicnum == 15: # Phosphor
if set(n_atoms) == {8}: # It's a phosphate
return True
if group in ['carboxylate', 'guanidine'] and atom.atomicnum == 6: # It's a carbon atom
if n_atoms.count(8) == 2 and n_atoms.count(6) == 1: # It's a carboxylate group
return True if group == 'carboxylate' else False
elif n_atoms.count(7) == 3 and len(n_atoms) == 3: # It's a guanidine group
nitro_partners = []
for nitro in pybel.ob.OBAtomAtomIter(atom.OBAtom):
nitro_partners.append(len([b_neighbor for b_neighbor in pybel.ob.OBAtomAtomIter(nitro)]))
if min(nitro_partners) == 1: # One nitrogen is only connected to the carbon, can pick up a H
return True if group == 'guanidine' else False
if group == 'halocarbon' and atom.atomicnum in [9, 17, 35, 53]: # Halogen atoms
n_atoms = [na for na in pybel.ob.OBAtomAtomIter(atom.OBAtom) if na.GetAtomicNum() == 6]
if len(n_atoms) == 1: # Halocarbon
return True
else:
return False
|
python
|
{
"resource": ""
}
|
q4814
|
PDBComplex.extract_bs
|
train
|
def extract_bs(self, cutoff, ligcentroid, resis):
"""Return list of ids from residues belonging to the binding site"""
return [obres.GetIdx() for obres in resis if self.res_belongs_to_bs(obres, cutoff, ligcentroid)]
|
python
|
{
"resource": ""
}
|
q4815
|
url_info
|
train
|
def url_info(request):
"""
Make MEDIA_URL and current HttpRequest object
available in template code.
"""
return {
'MEDIA_URL' : core_settings.MEDIA_URL,
'STATIC_URL': core_settings.STATIC_URL,
'VERSION' : core_settings.VERSION,
'SERVER_INFO' : core_settings.SERVER_INFO,
'SITE_NAME' : current_site_name,
'CURRENT_SITE': current_site,
}
|
python
|
{
"resource": ""
}
|
q4816
|
RelatedManager.collect_related
|
train
|
def collect_related(self, finder_funcs, obj, count, *args, **kwargs):
"""
Collects objects related to ``obj`` using a list of ``finder_funcs``.
Stops when required count is collected or the function list is
exhausted.
"""
collected = []
for func in finder_funcs:
gathered = func(obj, count, collected, *args, **kwargs)
if gathered:
collected += gathered
if len(collected) >= count:
return collected[:count]
return collected
|
python
|
{
"resource": ""
}
|
q4817
|
RelatedManager.get_related_for_object
|
train
|
def get_related_for_object(self, obj, count, finder=None, mods=[], only_from_same_site=True):
"""
Returns at most ``count`` publishable objects related to ``obj`` using
named related finder ``finder``.
If only specific type of publishable is prefered, use ``mods`` attribute
to list required classes.
Finally, use ``only_from_same_site`` if you don't want cross-site
content.
``finder`` atribute uses ``RELATED_FINDERS`` settings to find out
what finder function to use. If none is specified, ``default``
is used to perform the query.
"""
return self.collect_related(self._get_finders(finder), obj, count, mods, only_from_same_site)
|
python
|
{
"resource": ""
}
|
q4818
|
ListingManager.get_listing
|
train
|
def get_listing(self, category=None, children=ListingHandler.NONE, count=10, offset=0, content_types=[], date_range=(), exclude=None, **kwargs):
"""
Get top objects for given category and potentionally also its child categories.
Params:
category - Category object to list objects for. None if any category will do
count - number of objects to output, defaults to 10
offset - starting with object number... 1-based
content_types - list of ContentTypes to list, if empty, object from all models are included
date_range - range for listing's publish_from field
**kwargs - rest of the parameter are passed to the queryset unchanged
"""
assert offset >= 0, "Offset must be a positive integer"
assert count >= 0, "Count must be a positive integer"
if not count:
return []
limit = offset + count
qset = self.get_listing_queryset(category, children, content_types, date_range, exclude, **kwargs)
# direct listings, we don't need to check for duplicates
if children == ListingHandler.NONE:
return qset[offset:limit]
seen = set()
out = []
while len(out) < count:
skip = 0
# 2 i a reasonable value for padding, wouldn't you say dear Watson?
for l in qset[offset:limit + 2]:
if l.publishable_id not in seen:
seen.add(l.publishable_id)
out.append(l)
if len(out) == count:
break
else:
skip += 1
# no enough skipped, or not enough listings to work with, no need for another try
if skip <= 2 or (len(out) + skip) < (count + 2):
break
# get another page to fill in the gaps
offset += count
limit += count
return out
|
python
|
{
"resource": ""
}
|
q4819
|
do_author_listing
|
train
|
def do_author_listing(parser, token):
"""
Get N listing objects that were published by given author recently and optionally
omit a publishable object in results.
**Usage**::
{% author_listing <author> <limit> as <result> [omit <obj>] %}
**Parameters**::
================================== ================================================
Option Description
================================== ================================================
``author`` Author to load objects for.
``limit`` Maximum number of objects to store,
``result`` Store the resulting list in context under given
name.
================================== ================================================
**Examples**::
{% author_listing object.authors.all.0 10 as article_listing %}
"""
contents = token.split_contents()
if len(contents) not in [5, 7]:
raise template.TemplateSyntaxError('%r tag requires 4 or 6 arguments.' % contents[0])
elif len(contents) == 5:
tag, obj_var, count, fill, var_name = contents
return AuthorListingNode(obj_var, count, var_name)
else:
tag, obj_var, count, fill, var_name, filll, omit_var = contents
return AuthorListingNode(obj_var, count, var_name, omit_var)
|
python
|
{
"resource": ""
}
|
q4820
|
_Tb
|
train
|
def _Tb(P, S):
"""Procedure to calculate the boiling temperature of seawater
Parameters
----------
P : float
Pressure, [MPa]
S : float
Salinity, [kg/kg]
Returns
-------
Tb : float
Boiling temperature, [K]
References
----------
IAPWS, Advisory Note No. 5: Industrial Calculation of the Thermodynamic
Properties of Seawater, http://www.iapws.org/relguide/Advise5.html, Eq 7
"""
def f(T):
pw = _Region1(T, P)
gw = pw["h"]-T*pw["s"]
pv = _Region2(T, P)
gv = pv["h"]-T*pv["s"]
ps = SeaWater._saline(T, P, S)
return -ps["g"]+S*ps["gs"]-gw+gv
Tb = fsolve(f, 300)[0]
return Tb
|
python
|
{
"resource": ""
}
|
q4821
|
_Tf
|
train
|
def _Tf(P, S):
"""Procedure to calculate the freezing temperature of seawater
Parameters
----------
P : float
Pressure, [MPa]
S : float
Salinity, [kg/kg]
Returns
-------
Tf : float
Freezing temperature, [K]
References
----------
IAPWS, Advisory Note No. 5: Industrial Calculation of the Thermodynamic
Properties of Seawater, http://www.iapws.org/relguide/Advise5.html, Eq 12
"""
def f(T):
T = float(T)
pw = _Region1(T, P)
gw = pw["h"]-T*pw["s"]
gih = _Ice(T, P)["g"]
ps = SeaWater._saline(T, P, S)
return -ps["g"]+S*ps["gs"]-gw+gih
Tf = fsolve(f, 300)[0]
return Tf
|
python
|
{
"resource": ""
}
|
q4822
|
_Triple
|
train
|
def _Triple(S):
"""Procedure to calculate the triple point pressure and temperature for
seawater
Parameters
----------
S : float
Salinity, [kg/kg]
Returns
-------
prop : dict
Dictionary with the triple point properties:
* Tt: Triple point temperature, [K]
* Pt: Triple point pressure, [MPa]
References
----------
IAPWS, Advisory Note No. 5: Industrial Calculation of the Thermodynamic
Properties of Seawater, http://www.iapws.org/relguide/Advise5.html, Eq 7
"""
def f(parr):
T, P = parr
pw = _Region1(T, P)
gw = pw["h"]-T*pw["s"]
pv = _Region2(T, P)
gv = pv["h"]-T*pv["s"]
gih = _Ice(T, P)["g"]
ps = SeaWater._saline(T, P, S)
return -ps["g"]+S*ps["gs"]-gw+gih, -ps["g"]+S*ps["gs"]-gw+gv
Tt, Pt = fsolve(f, [273, 6e-4])
prop = {}
prop["Tt"] = Tt
prop["Pt"] = Pt
return prop
|
python
|
{
"resource": ""
}
|
q4823
|
_OsmoticPressure
|
train
|
def _OsmoticPressure(T, P, S):
"""Procedure to calculate the osmotic pressure of seawater
Parameters
----------
T : float
Tmperature, [K]
P : float
Pressure, [MPa]
S : float
Salinity, [kg/kg]
Returns
-------
Posm : float
Osmotic pressure, [MPa]
References
----------
IAPWS, Advisory Note No. 5: Industrial Calculation of the Thermodynamic
Properties of Seawater, http://www.iapws.org/relguide/Advise5.html, Eq 15
"""
pw = _Region1(T, P)
gw = pw["h"]-T*pw["s"]
def f(Posm):
pw2 = _Region1(T, P+Posm)
gw2 = pw2["h"]-T*pw2["s"]
ps = SeaWater._saline(T, P+Posm, S)
return -ps["g"]+S*ps["gs"]-gw+gw2
Posm = fsolve(f, 0)[0]
return Posm
|
python
|
{
"resource": ""
}
|
q4824
|
_ThCond_SeaWater
|
train
|
def _ThCond_SeaWater(T, P, S):
"""Equation for the thermal conductivity of seawater
Parameters
----------
T : float
Temperature, [K]
P : float
Pressure, [MPa]
S : float
Salinity, [kg/kg]
Returns
-------
k : float
Thermal conductivity excess relative to that of the pure water, [W/mK]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 273.15 ≤ T ≤ 523.15
* 0 ≤ P ≤ 140
* 0 ≤ S ≤ 0.17
Examples
--------
>>> _ThCond_Seawater(293.15, 0.1, 0.035)
-0.00418604
References
----------
IAPWS, Guideline on the Thermal Conductivity of Seawater,
http://www.iapws.org/relguide/Seawater-ThCond.html
"""
# Check input parameters
if T < 273.15 or T > 523.15 or P < 0 or P > 140 or S < 0 or S > 0.17:
raise NotImplementedError("Incoming out of bound")
# Eq 4
a1 = -7.180891e-5+1.831971e-7*P
a2 = 1.048077e-3-4.494722e-6*P
# Eq 5
b1 = 1.463375e-1+9.208586e-4*P
b2 = -3.086908e-3+1.798489e-5*P
a = a1*exp(a2*(T-273.15)) # Eq 2
b = b1*exp(b2*(T-273.15)) # Eq 3
# Eq 1
DL = a*(1000*S)**(1+b)
return DL
|
python
|
{
"resource": ""
}
|
q4825
|
_solNa2SO4
|
train
|
def _solNa2SO4(T, mH2SO4, mNaCl):
"""Equation for the solubility of sodium sulfate in aqueous mixtures of
sodium chloride and sulfuric acid
Parameters
----------
T : float
Temperature, [K]
mH2SO4 : float
Molality of sufuric acid, [mol/kg(water)]
mNaCl : float
Molality of sodium chloride, [mol/kg(water)]
Returns
-------
S : float
Molal solutility of sodium sulfate, [mol/kg(water)]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 523.15 ≤ T ≤ 623.15
* 0 ≤ mH2SO4 ≤ 0.75
* 0 ≤ mNaCl ≤ 2.25
Examples
--------
>>> _solNa2SO4(523.15, 0.25, 0.75)
2.68
References
----------
IAPWS, Solubility of Sodium Sulfate in Aqueous Mixtures of Sodium Chloride
and Sulfuric Acid from Water to Concentrated Solutions,
http://www.iapws.org/relguide/na2so4.pdf
"""
# Check input parameters
if T < 523.15 or T > 623.15 or mH2SO4 < 0 or mH2SO4 > 0.75 or \
mNaCl < 0 or mNaCl > 2.25:
raise NotImplementedError("Incoming out of bound")
A00 = -0.8085987*T+81.4613752+0.10537803*T*log(T)
A10 = 3.4636364*T-281.63322-0.46779874*T*log(T)
A20 = -6.0029634*T+480.60108+0.81382854*T*log(T)
A30 = 4.4540258*T-359.36872-0.60306734*T*log(T)
A01 = 0.4909061*T-46.556271-0.064612393*T*log(T)
A02 = -0.002781314*T+1.722695+0.0000013319698*T*log(T)
A03 = -0.014074108*T+0.99020227+0.0019397832*T*log(T)
A11 = -0.87146573*T+71.808756+0.11749585*T*log(T)
S = A00 + A10*mH2SO4 + A20*mH2SO4**2 + A30*mH2SO4**3 + A01*mNaCl + \
A02*mNaCl**2 + A03*mNaCl**3 + A11*mH2SO4*mNaCl
return S
|
python
|
{
"resource": ""
}
|
q4826
|
_critNaCl
|
train
|
def _critNaCl(x):
"""Equation for the critical locus of aqueous solutions of sodium chloride
Parameters
----------
x : float
Mole fraction of NaCl, [-]
Returns
-------
prop : dict
A dictionary withe the properties:
* Tc: critical temperature, [K]
* Pc: critical pressure, [MPa]
* rhoc: critical density, [kg/m³]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 0 ≤ x ≤ 0.12
Examples
--------
>>> _critNaCl(0.1)
975.571016
References
----------
IAPWS, Revised Guideline on the Critical Locus of Aqueous Solutions of
Sodium Chloride, http://www.iapws.org/relguide/critnacl.html
"""
# Check input parameters
if x < 0 or x > 0.12:
raise NotImplementedError("Incoming out of bound")
T1 = Tc*(1 + 2.3e1*x - 3.3e2*x**1.5 - 1.8e3*x**2)
T2 = Tc*(1 + 1.757e1*x - 3.026e2*x**1.5 + 2.838e3*x**2 - 1.349e4*x**2.5 +
3.278e4*x**3 - 3.674e4*x**3.5 + 1.437e4*x**4)
f1 = (abs(10000*x-10-1)-abs(10000*x-10+1))/4+0.5
f2 = (abs(10000*x-10+1)-abs(10000*x-10-1))/4+0.5
# Eq 1
tc = f1*T1+f2*T2
# Eq 7
rc = rhoc*(1 + 1.7607e2*x - 2.9693e3*x**1.5 + 2.4886e4*x**2 -
1.1377e5*x**2.5 + 2.8847e5*x**3 - 3.8195e5*x**3.5 +
2.0633e5*x**4)
# Eq 8
DT = tc-Tc
pc = Pc*(1+9.1443e-3*DT+5.1636e-5*DT**2-2.5360e-7*DT**3+3.6494e-10*DT**4)
prop = {}
prop["Tc"] = tc
prop["rhoc"] = rc
prop["Pc"] = pc
return prop
|
python
|
{
"resource": ""
}
|
q4827
|
SeaWater._water
|
train
|
def _water(cls, T, P):
"""Get properties of pure water, Table4 pag 8"""
water = IAPWS95(P=P, T=T)
prop = {}
prop["g"] = water.h-T*water.s
prop["gt"] = -water.s
prop["gp"] = 1./water.rho
prop["gtt"] = -water.cp/T
prop["gtp"] = water.betas*water.cp/T
prop["gpp"] = -1e6/(water.rho*water.w)**2-water.betas**2*1e3*water.cp/T
prop["gs"] = 0
prop["gsp"] = 0
prop["thcond"] = water.k
return prop
|
python
|
{
"resource": ""
}
|
q4828
|
MEoS._saturation
|
train
|
def _saturation(self, T):
"""Saturation calculation for two phase search"""
rhoc = self._constants.get("rhoref", self.rhoc)
Tc = self._constants.get("Tref", self.Tc)
if T > Tc:
T = Tc
tau = Tc/T
rhoLo = self._Liquid_Density(T)
rhoGo = self._Vapor_Density(T)
def f(parr):
rhol, rhog = parr
deltaL = rhol/rhoc
deltaG = rhog/rhoc
phirL = _phir(tau, deltaL, self._constants)
phirG = _phir(tau, deltaG, self._constants)
phirdL = _phird(tau, deltaL, self._constants)
phirdG = _phird(tau, deltaG, self._constants)
Jl = deltaL*(1+deltaL*phirdL)
Jv = deltaG*(1+deltaG*phirdG)
Kl = deltaL*phirdL+phirL+log(deltaL)
Kv = deltaG*phirdG+phirG+log(deltaG)
return Kv-Kl, Jv-Jl
rhoL, rhoG = fsolve(f, [rhoLo, rhoGo])
if rhoL == rhoG:
Ps = self.Pc
else:
deltaL = rhoL/self.rhoc
deltaG = rhoG/self.rhoc
firL = _phir(tau, deltaL, self._constants)
firG = _phir(tau, deltaG, self._constants)
Ps = self.R*T*rhoL*rhoG/(rhoL-rhoG)*(firL-firG+log(deltaL/deltaG))
return rhoL, rhoG, Ps
|
python
|
{
"resource": ""
}
|
q4829
|
MEoS._Helmholtz
|
train
|
def _Helmholtz(self, rho, T):
"""Calculated properties from helmholtz free energy and derivatives
Parameters
----------
rho : float
Density, [kg/m³]
T : float
Temperature, [K]
Returns
-------
prop : dict
Dictionary with calculated properties:
* fir: [-]
* fird: ∂fir/∂δ|τ
* firdd: ∂²fir/∂δ²|τ
* delta: Reducen density rho/rhoc, [-]
* P: Pressure, [kPa]
* h: Enthalpy, [kJ/kg]
* s: Entropy, [kJ/kgK]
* cv: Isochoric specific heat, [kJ/kgK]
* alfav: Thermal expansion coefficient, [1/K]
* betap: Isothermal compressibility, [1/kPa]
References
----------
IAPWS, Revised Release on the IAPWS Formulation 1995 for the
Thermodynamic Properties of Ordinary Water Substance for General and
Scientific Use, September 2016, Table 3
http://www.iapws.org/relguide/IAPWS-95.html
"""
if isinstance(rho, ndarray):
rho = rho[0]
if isinstance(T, ndarray):
T = T[0]
if rho < 0:
rho = 1e-20
if T < 50:
T = 50
rhoc = self._constants.get("rhoref", self.rhoc)
Tc = self._constants.get("Tref", self.Tc)
delta = rho/rhoc
tau = Tc/T
ideal = self._phi0(tau, delta)
fio = ideal["fio"]
fiot = ideal["fiot"]
fiott = ideal["fiott"]
res = self._phir(tau, delta)
fir = res["fir"]
firt = res["firt"]
firtt = res["firtt"]
fird = res["fird"]
firdd = res["firdd"]
firdt = res["firdt"]
propiedades = {}
propiedades["fir"] = fir
propiedades["fird"] = fird
propiedades["firdd"] = firdd
propiedades["delta"] = delta
propiedades["rho"] = rho
propiedades["P"] = (1+delta*fird)*self.R*T*rho
propiedades["h"] = self.R*T*(1+tau*(fiot+firt)+delta*fird)
propiedades["s"] = self.R*(tau*(fiot+firt)-fio-fir)
propiedades["cv"] = -self.R*tau**2*(fiott+firtt)
propiedades["alfap"] = (1-delta*tau*firdt/(1+delta*fird))/T
propiedades["betap"] = rho*(
1+(delta*fird+delta**2*firdd)/(1+delta*fird))
return propiedades
|
python
|
{
"resource": ""
}
|
q4830
|
MEoS._phi0
|
train
|
def _phi0(self, tau, delta):
"""Ideal gas Helmholtz free energy and derivatives
Parameters
----------
tau : float
Inverse reduced temperature Tc/T, [-]
delta : float
Reduced density rho/rhoc, [-]
Returns
-------
prop : dictionary with ideal adimensional helmholtz energy and deriv
fio, [-]
fiot: ∂fio/∂τ|δ
fiod: ∂fio/∂δ|τ
fiott: ∂²fio/∂τ²|δ
fiodt: ∂²fio/∂τ∂δ
fiodd: ∂²fio/∂δ²|τ
References
----------
IAPWS, Revised Release on the IAPWS Formulation 1995 for the
Thermodynamic Properties of Ordinary Water Substance for General and
Scientific Use, September 2016, Table 4
http://www.iapws.org/relguide/IAPWS-95.html
"""
Fi0 = self.Fi0
fio = Fi0["ao_log"][0]*log(delta)+Fi0["ao_log"][1]*log(tau)
fiot = +Fi0["ao_log"][1]/tau
fiott = -Fi0["ao_log"][1]/tau**2
fiod = 1/delta
fiodd = -1/delta**2
fiodt = 0
for n, t in zip(Fi0["ao_pow"], Fi0["pow"]):
fio += n*tau**t
if t != 0:
fiot += t*n*tau**(t-1)
if t not in [0, 1]:
fiott += n*t*(t-1)*tau**(t-2)
for n, t in zip(Fi0["ao_exp"], Fi0["titao"]):
fio += n*log(1-exp(-tau*t))
fiot += n*t*((1-exp(-t*tau))**-1-1)
fiott -= n*t**2*exp(-t*tau)*(1-exp(-t*tau))**-2
# Extension to especial terms of air
if "ao_exp2" in Fi0:
for n, g, C in zip(Fi0["ao_exp2"], Fi0["titao2"], Fi0["sum2"]):
fio += n*log(C+exp(g*tau))
fiot += n*g/(C*exp(-g*tau)+1)
fiott += C*n*g**2*exp(-g*tau)/(C*exp(-g*tau)+1)**2
prop = {}
prop["fio"] = fio
prop["fiot"] = fiot
prop["fiott"] = fiott
prop["fiod"] = fiod
prop["fiodd"] = fiodd
prop["fiodt"] = fiodt
return prop
|
python
|
{
"resource": ""
}
|
q4831
|
MEoS._derivDimensional
|
train
|
def _derivDimensional(self, rho, T):
"""Calcule the dimensional form or Helmholtz free energy derivatives
Parameters
----------
rho : float
Density, [kg/m³]
T : float
Temperature, [K]
Returns
-------
prop : dict
Dictionary with residual helmholtz energy and derivatives:
* fir, [kJ/kg]
* firt: ∂fir/∂T|ρ, [kJ/kgK]
* fird: ∂fir/∂ρ|T, [kJ/m³kg²]
* firtt: ∂²fir/∂T²|ρ, [kJ/kgK²]
* firdt: ∂²fir/∂T∂ρ, [kJ/m³kg²K]
* firdd: ∂²fir/∂ρ²|T, [kJ/m⁶kg]
References
----------
IAPWS, Guideline on an Equation of State for Humid Air in Contact with
Seawater and Ice, Consistent with the IAPWS Formulation 2008 for the
Thermodynamic Properties of Seawater, Table 7,
http://www.iapws.org/relguide/SeaAir.html
"""
if not rho:
prop = {}
prop["fir"] = 0
prop["firt"] = 0
prop["fird"] = 0
prop["firtt"] = 0
prop["firdt"] = 0
prop["firdd"] = 0
return prop
R = self._constants.get("R")/self._constants.get("M", self.M)
rhoc = self._constants.get("rhoref", self.rhoc)
Tc = self._constants.get("Tref", self.Tc)
delta = rho/rhoc
tau = Tc/T
ideal = self._phi0(tau, delta)
fio = ideal["fio"]
fiot = ideal["fiot"]
fiott = ideal["fiott"]
fiod = ideal["fiod"]
fiodd = ideal["fiodd"]
res = self._phir(tau, delta)
fir = res["fir"]
firt = res["firt"]
firtt = res["firtt"]
fird = res["fird"]
firdd = res["firdd"]
firdt = res["firdt"]
prop = {}
prop["fir"] = R*T*(fio+fir)
prop["firt"] = R*(fio+fir-(fiot+firt)*tau)
prop["fird"] = R*T/rhoc*(fiod+fird)
prop["firtt"] = R*tau**2/T*(fiott+firtt)
prop["firdt"] = R/rhoc*(fiod+fird-firdt*tau)
prop["firdd"] = R*T/rhoc**2*(fiodd+firdd)
return prop
|
python
|
{
"resource": ""
}
|
q4832
|
MEoS._surface
|
train
|
def _surface(self, T):
"""Generic equation for the surface tension
Parameters
----------
T : float
Temperature, [K]
Returns
-------
σ : float
Surface tension, [N/m]
Notes
-----
Need a _surf dict in the derived class with the parameters keys:
sigma: coefficient
exp: exponent
"""
tau = 1-T/self.Tc
sigma = 0
for n, t in zip(self._surf["sigma"], self._surf["exp"]):
sigma += n*tau**t
return sigma
|
python
|
{
"resource": ""
}
|
q4833
|
MEoS._Vapor_Pressure
|
train
|
def _Vapor_Pressure(cls, T):
"""Auxiliary equation for the vapour pressure
Parameters
----------
T : float
Temperature, [K]
Returns
-------
Pv : float
Vapour pressure, [Pa]
References
----------
IAPWS, Revised Supplementary Release on Saturation Properties of
Ordinary Water Substance September 1992,
http://www.iapws.org/relguide/Supp-sat.html, Eq.1
"""
Tita = 1-T/cls.Tc
suma = 0
for n, x in zip(cls._Pv["ao"], cls._Pv["exp"]):
suma += n*Tita**x
Pr = exp(cls.Tc/T*suma)
Pv = Pr*cls.Pc
return Pv
|
python
|
{
"resource": ""
}
|
q4834
|
MEoS._Liquid_Density
|
train
|
def _Liquid_Density(cls, T):
"""Auxiliary equation for the density of saturated liquid
Parameters
----------
T : float
Temperature, [K]
Returns
-------
rho : float
Saturated liquid density, [kg/m³]
References
----------
IAPWS, Revised Supplementary Release on Saturation Properties of
Ordinary Water Substance September 1992,
http://www.iapws.org/relguide/Supp-sat.html, Eq.2
"""
eq = cls._rhoL["eq"]
Tita = 1-T/cls.Tc
if eq == 2:
Tita = Tita**(1./3)
suma = 0
for n, x in zip(cls._rhoL["ao"], cls._rhoL["exp"]):
suma += n*Tita**x
Pr = suma+1
rho = Pr*cls.rhoc
return rho
|
python
|
{
"resource": ""
}
|
q4835
|
MEoS._Vapor_Density
|
train
|
def _Vapor_Density(cls, T):
"""Auxiliary equation for the density of saturated vapor
Parameters
----------
T : float
Temperature, [K]
Returns
-------
rho : float
Saturated vapor density, [kg/m³]
References
----------
IAPWS, Revised Supplementary Release on Saturation Properties of
Ordinary Water Substance September 1992,
http://www.iapws.org/relguide/Supp-sat.html, Eq.3
"""
eq = cls._rhoG["eq"]
Tita = 1-T/cls.Tc
if eq == 4:
Tita = Tita**(1./3)
suma = 0
for n, x in zip(cls._rhoG["ao"], cls._rhoG["exp"]):
suma += n*Tita**x
Pr = exp(suma)
rho = Pr*cls.rhoc
return rho
|
python
|
{
"resource": ""
}
|
q4836
|
_fugacity
|
train
|
def _fugacity(T, P, x):
"""Fugacity equation for humid air
Parameters
----------
T : float
Temperature, [K]
P : float
Pressure, [MPa]
x : float
Mole fraction of water-vapor, [-]
Returns
-------
fv : float
fugacity coefficient, [MPa]
Notes
------
Raise :class:`NotImplementedError` if input isn't in range of validity:
* 193 ≤ T ≤ 473
* 0 ≤ P ≤ 5
* 0 ≤ x ≤ 1
Really the xmax is the xsaturation but isn't implemented
Examples
--------
>>> _fugacity(300, 1, 0.1)
0.0884061686
References
----------
IAPWS, Guideline on a Virial Equation for the Fugacity of H2O in Humid Air,
http://www.iapws.org/relguide/VirialFugacity.html
"""
# Check input parameters
if T < 193 or T > 473 or P < 0 or P > 5 or x < 0 or x > 1:
raise(NotImplementedError("Input not in range of validity"))
R = 8.314462 # J/molK
# Virial coefficients
vir = _virial(T)
# Eq 3
beta = x*(2-x)*vir["Bww"]+(1-x)**2*(2*vir["Baw"]-vir["Baa"])
# Eq 4
gamma = x**2*(3-2*x)*vir["Cwww"] + \
(1-x)**2*(6*x*vir["Caww"]+3*(1-2*x)*vir["Caaw"]-2*(1-x)*vir["Caaa"]) +\
(x**2*vir["Bww"]+2*x*(1-x)*vir["Baw"]+(1-x)**2*vir["Baa"]) * \
(x*(3*x-4)*vir["Bww"]+2*(1-x)*(3*x-2)*vir["Baw"]+3*(1-x)**2*vir["Baa"])
# Eq 2
fv = x*P*exp(beta*P*1e6/R/T+0.5*gamma*(P*1e6/R/T)**2)
return fv
|
python
|
{
"resource": ""
}
|
q4837
|
MEoSBlend._bubbleP
|
train
|
def _bubbleP(cls, T):
"""Using ancillary equation return the pressure of bubble point"""
c = cls._blend["bubble"]
Tj = cls._blend["Tj"]
Pj = cls._blend["Pj"]
Tita = 1-T/Tj
suma = 0
for i, n in zip(c["i"], c["n"]):
suma += n*Tita**(i/2.)
P = Pj*exp(Tj/T*suma)
return P
|
python
|
{
"resource": ""
}
|
q4838
|
HumidAir._eq
|
train
|
def _eq(self, T, P):
"""Procedure for calculate the composition in saturation state
Parameters
----------
T : float
Temperature [K]
P : float
Pressure [MPa]
Returns
-------
Asat : float
Saturation mass fraction of dry air in humid air [kg/kg]
"""
if T <= 273.16:
ice = _Ice(T, P)
gw = ice["g"]
else:
water = IAPWS95(T=T, P=P)
gw = water.g
def f(parr):
rho, a = parr
if a > 1:
a = 1
fa = self._fav(T, rho, a)
muw = fa["fir"]+rho*fa["fird"]-a*fa["fira"]
return gw-muw, rho**2*fa["fird"]/1000-P
rinput = fsolve(f, [1, 0.95], full_output=True)
Asat = rinput[0][1]
return Asat
|
python
|
{
"resource": ""
}
|
q4839
|
HumidAir._prop
|
train
|
def _prop(self, T, rho, fav):
"""Thermodynamic properties of humid air
Parameters
----------
T : float
Temperature, [K]
rho : float
Density, [kg/m³]
fav : dict
dictionary with helmholtz energy and derivatives
Returns
-------
prop : dict
Dictionary with thermodynamic properties of humid air:
* P: Pressure, [MPa]
* s: Specific entropy, [kJ/kgK]
* cp: Specific isobaric heat capacity, [kJ/kgK]
* h: Specific enthalpy, [kJ/kg]
* g: Specific gibbs energy, [kJ/kg]
* alfav: Thermal expansion coefficient, [1/K]
* betas: Isentropic T-P coefficient, [K/MPa]
* xkappa: Isothermal compressibility, [1/MPa]
* ks: Isentropic compressibility, [1/MPa]
* w: Speed of sound, [m/s]
References
----------
IAPWS, Guideline on an Equation of State for Humid Air in Contact with
Seawater and Ice, Consistent with the IAPWS Formulation 2008 for the
Thermodynamic Properties of Seawater, Table 5,
http://www.iapws.org/relguide/SeaAir.html
"""
prop = {}
prop["P"] = rho**2*fav["fird"]/1000 # Eq T1
prop["s"] = -fav["firt"] # Eq T2
prop["cp"] = -T*fav["firtt"]+T*rho*fav["firdt"]**2/( # Eq T3
2*fav["fird"]+rho*fav["firdd"])
prop["h"] = fav["fir"]-T*fav["firt"]+rho*fav["fird"] # Eq T4
prop["g"] = fav["fir"]+rho*fav["fird"] # Eq T5
prop["alfav"] = fav["firdt"]/(2*fav["fird"]+rho*fav["firdd"]) # Eq T6
prop["betas"] = 1000*fav["firdt"]/rho/( # Eq T7
rho*fav["firdt"]**2-fav["firtt"]*(2*fav["fird"]+rho*fav["firdd"]))
prop["xkappa"] = 1e3/(rho**2*(2*fav["fird"]+rho*fav["firdd"])) # Eq T8
prop["ks"] = 1000*fav["firtt"]/rho**2/( # Eq T9
fav["firtt"]*(2*fav["fird"]+rho*fav["firdd"])-rho*fav["firdt"]**2)
prop["w"] = (rho**2*1000*(fav["firtt"]*fav["firdd"]-fav["firdt"]**2) /
fav["firtt"]+2*rho*fav["fird"]*1000)**0.5 # Eq T10
return prop
|
python
|
{
"resource": ""
}
|
q4840
|
HumidAir._coligative
|
train
|
def _coligative(self, rho, A, fav):
"""Miscelaneous properties of humid air
Parameters
----------
rho : float
Density, [kg/m³]
A : float
Mass fraction of dry air in humid air, [kg/kg]
fav : dict
dictionary with helmholtz energy and derivatives
Returns
-------
prop : dict
Dictionary with calculated properties:
* mu: Relative chemical potential, [kJ/kg]
* muw: Chemical potential of water, [kJ/kg]
* M: Molar mass of humid air, [g/mol]
* HR: Humidity ratio, [-]
* xa: Mole fraction of dry air, [-]
* xw: Mole fraction of water, [-]
References
----------
IAPWS, Guideline on an Equation of State for Humid Air in Contact with
Seawater and Ice, Consistent with the IAPWS Formulation 2008 for the
Thermodynamic Properties of Seawater, Table 12,
http://www.iapws.org/relguide/SeaAir.html
"""
prop = {}
prop["mu"] = fav["fira"]
prop["muw"] = fav["fir"]+rho*fav["fird"]-A*fav["fira"]
prop["M"] = 1/((1-A)/Mw+A/Ma)
prop["HR"] = 1/A-1
prop["xa"] = A*Mw/Ma/(1-A*(1-Mw/Ma))
prop["xw"] = 1-prop["xa"]
return prop
|
python
|
{
"resource": ""
}
|
q4841
|
HumidAir._fav
|
train
|
def _fav(self, T, rho, A):
r"""Specific Helmholtz energy of humid air and derivatives
Parameters
----------
T : float
Temperature, [K]
rho : float
Density, [kg/m³]
A : float
Mass fraction of dry air in humid air, [kg/kg]
Returns
-------
prop : dict
Dictionary with helmholtz energy and derivatives:
* fir, [kJ/kg]
* fira: :math:`\left.\frac{\partial f_{av}}{\partial A}\right|_{T,\rho}`, [kJ/kg]
* firt: :math:`\left.\frac{\partial f_{av}}{\partial T}\right|_{A,\rho}`, [kJ/kgK]
* fird: :math:`\left.\frac{\partial f_{av}}{\partial \rho}\right|_{A,T}`, [kJ/m³kg²]
* firaa: :math:`\left.\frac{\partial^2 f_{av}}{\partial A^2}\right|_{T, \rho}`, [kJ/kg]
* firat: :math:`\left.\frac{\partial^2 f_{av}}{\partial A \partial T}\right|_{\rho}`, [kJ/kgK]
* firad: :math:`\left.\frac{\partial^2 f_{av}}{\partial A \partial \rho}\right|_T`, [kJ/m³kg²]
* firtt: :math:`\left.\frac{\partial^2 f_{av}}{\partial T^2}\right|_{A, \rho}`, [kJ/kgK²]
* firdt: :math:`\left.\frac{\partial^2 f_{av}}{\partial \rho \partial T}\right|_A`, [kJ/m³kg²K]
* firdd: :math:`\left.\frac{\partial^2 f_{av}}{\partial \rho^2}\right|_{A, T}`, [kJ/m⁶kg³]
References
----------
IAPWS, Guideline on an Equation of State for Humid Air in Contact with
Seawater and Ice, Consistent with the IAPWS Formulation 2008 for the
Thermodynamic Properties of Seawater, Table 6,
http://www.iapws.org/relguide/SeaAir.html
"""
water = IAPWS95()
rhov = (1-A)*rho
fv = water._derivDimensional(rhov, T)
air = Air()
rhoa = A*rho
fa = air._derivDimensional(rhoa, T)
fmix = self._fmix(T, rho, A)
prop = {}
# Eq T11
prop["fir"] = (1-A)*fv["fir"] + A*fa["fir"] + fmix["fir"]
# Eq T12
prop["fira"] = -fv["fir"]-rhov*fv["fird"]+fa["fir"] + \
rhoa*fa["fird"]+fmix["fira"]
# Eq T13
prop["firt"] = (1-A)*fv["firt"]+A*fa["firt"]+fmix["firt"]
# Eq T14
prop["fird"] = (1-A)**2*fv["fird"]+A**2*fa["fird"]+fmix["fird"]
# Eq T15
prop["firaa"] = rho*(2*fv["fird"]+rhov*fv["firdd"] +
2*fa["fird"]+rhoa*fa["firdd"])+fmix["firaa"]
# Eq T16
prop["firat"] = -fv["firt"]-rhov*fv["firdt"]+fa["firt"] + \
rhoa*fa["firdt"]+fmix["firat"]
# Eq T17
prop["firad"] = -(1-A)*(2*fv["fird"]+rhov*fv["firdd"]) + \
A*(2*fa["fird"]+rhoa*fa["firdd"])+fmix["firad"]
# Eq T18
prop["firtt"] = (1-A)*fv["firtt"]+A*fa["firtt"]+fmix["firtt"]
# Eq T19
prop["firdt"] = (1-A)**2*fv["firdt"]+A**2*fa["firdt"]+fmix["firdt"]
# Eq T20
prop["firdd"] = (1-A)**3*fv["firdd"]+A**3*fa["firdd"]+fmix["firdd"]
return prop
|
python
|
{
"resource": ""
}
|
q4842
|
_Sublimation_Pressure
|
train
|
def _Sublimation_Pressure(T):
"""Sublimation Pressure correlation
Parameters
----------
T : float
Temperature, [K]
Returns
-------
P : float
Pressure at sublimation line, [MPa]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 50 ≤ T ≤ 273.16
Examples
--------
>>> _Sublimation_Pressure(230)
8.947352740189152e-06
References
----------
IAPWS, Revised Release on the Pressure along the Melting and Sublimation
Curves of Ordinary Water Substance, http://iapws.org/relguide/MeltSub.html.
"""
if 50 <= T <= 273.16:
Tita = T/Tt
suma = 0
a = [-0.212144006e2, 0.273203819e2, -0.61059813e1]
expo = [0.333333333e-2, 1.20666667, 1.70333333]
for ai, expi in zip(a, expo):
suma += ai*Tita**expi
return exp(suma/Tita)*Pt
else:
raise NotImplementedError("Incoming out of bound")
|
python
|
{
"resource": ""
}
|
q4843
|
_Melting_Pressure
|
train
|
def _Melting_Pressure(T, ice="Ih"):
"""Melting Pressure correlation
Parameters
----------
T : float
Temperature, [K]
ice: string
Type of ice: Ih, III, V, VI, VII.
Below 273.15 is a mandatory input, the ice Ih is the default value.
Above 273.15, the ice type is unnecesary.
Returns
-------
P : float
Pressure at sublimation line, [MPa]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 251.165 ≤ T ≤ 715
Examples
--------
>>> _Melting_Pressure(260)
8.947352740189152e-06
>>> _Melting_Pressure(254, "III")
268.6846466336108
References
----------
IAPWS, Revised Release on the Pressure along the Melting and Sublimation
Curves of Ordinary Water Substance, http://iapws.org/relguide/MeltSub.html.
"""
if ice == "Ih" and 251.165 <= T <= 273.16:
# Ice Ih
Tref = Tt
Pref = Pt
Tita = T/Tref
a = [0.119539337e7, 0.808183159e5, 0.33382686e4]
expo = [3., 0.2575e2, 0.10375e3]
suma = 1
for ai, expi in zip(a, expo):
suma += ai*(1-Tita**expi)
P = suma*Pref
elif ice == "III" and 251.165 < T <= 256.164:
# Ice III
Tref = 251.165
Pref = 208.566
Tita = T/Tref
P = Pref*(1-0.299948*(1-Tita**60.))
elif (ice == "V" and 256.164 < T <= 273.15) or 273.15 < T <= 273.31:
# Ice V
Tref = 256.164
Pref = 350.100
Tita = T/Tref
P = Pref*(1-1.18721*(1-Tita**8.))
elif 273.31 < T <= 355:
# Ice VI
Tref = 273.31
Pref = 632.400
Tita = T/Tref
P = Pref*(1-1.07476*(1-Tita**4.6))
elif 355. < T <= 715:
# Ice VII
Tref = 355
Pref = 2216.000
Tita = T/Tref
P = Pref*exp(1.73683*(1-1./Tita)-0.544606e-1*(1-Tita**5) +
0.806106e-7*(1-Tita**22))
else:
raise NotImplementedError("Incoming out of bound")
return P
|
python
|
{
"resource": ""
}
|
q4844
|
_Tension
|
train
|
def _Tension(T):
"""Equation for the surface tension
Parameters
----------
T : float
Temperature, [K]
Returns
-------
σ : float
Surface tension, [N/m]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 248.15 ≤ T ≤ 647
* Estrapolate to -25ºC in supercooled liquid metastable state
Examples
--------
>>> _Tension(300)
0.0716859625
>>> _Tension(450)
0.0428914992
References
----------
IAPWS, Revised Release on Surface Tension of Ordinary Water Substance
June 2014, http://www.iapws.org/relguide/Surf-H2O.html
"""
if 248.15 <= T <= Tc:
Tr = T/Tc
return 1e-3*(235.8*(1-Tr)**1.256*(1-0.625*(1-Tr)))
else:
raise NotImplementedError("Incoming out of bound")
|
python
|
{
"resource": ""
}
|
q4845
|
_Dielectric
|
train
|
def _Dielectric(rho, T):
"""Equation for the Dielectric constant
Parameters
----------
rho : float
Density, [kg/m³]
T : float
Temperature, [K]
Returns
-------
epsilon : float
Dielectric constant, [-]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 238 ≤ T ≤ 1200
Examples
--------
>>> _Dielectric(999.242866, 298.15)
78.5907250
>>> _Dielectric(26.0569558, 873.15)
1.12620970
References
----------
IAPWS, Release on the Static Dielectric Constant of Ordinary Water
Substance for Temperatures from 238 K to 873 K and Pressures up to 1000
MPa, http://www.iapws.org/relguide/Dielec.html
"""
# Check input parameters
if T < 238 or T > 1200:
raise NotImplementedError("Incoming out of bound")
k = 1.380658e-23
Na = 6.0221367e23
alfa = 1.636e-40
epsilon0 = 8.854187817e-12
mu = 6.138e-30
d = rho/rhoc
Tr = Tc/T
I = [1, 1, 1, 2, 3, 3, 4, 5, 6, 7, 10, None]
J = [0.25, 1, 2.5, 1.5, 1.5, 2.5, 2, 2, 5, 0.5, 10, None]
n = [0.978224486826, -0.957771379375, 0.237511794148, 0.714692244396,
-0.298217036956, -0.108863472196, .949327488264e-1, -.980469816509e-2,
.165167634970e-4, .937359795772e-4, -.12317921872e-9,
.196096504426e-2]
g = 1+n[11]*d/(Tc/228/Tr-1)**1.2
for i in range(11):
g += n[i]*d**I[i]*Tr**J[i]
A = Na*mu**2*rho*g/M*1000/epsilon0/k/T
B = Na*alfa*rho/3/M*1000/epsilon0
e = (1+A+5*B+(9+2*A+18*B+A**2+10*A*B+9*B**2)**0.5)/4/(1-B)
return e
|
python
|
{
"resource": ""
}
|
q4846
|
_Refractive
|
train
|
def _Refractive(rho, T, l=0.5893):
"""Equation for the refractive index
Parameters
----------
rho : float
Density, [kg/m³]
T : float
Temperature, [K]
l : float, optional
Light Wavelength, [μm]
Returns
-------
n : float
Refractive index, [-]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 0 ≤ ρ ≤ 1060
* 261.15 ≤ T ≤ 773.15
* 0.2 ≤ λ ≤ 1.1
Examples
--------
>>> _Refractive(997.047435, 298.15, 0.2265)
1.39277824
>>> _Refractive(30.4758534, 773.15, 0.5893)
1.00949307
References
----------
IAPWS, Release on the Refractive Index of Ordinary Water Substance as a
Function of Wavelength, Temperature and Pressure,
http://www.iapws.org/relguide/rindex.pdf
"""
# Check input parameters
if rho < 0 or rho > 1060 or T < 261.15 or T > 773.15 or l < 0.2 or l > 1.1:
raise NotImplementedError("Incoming out of bound")
Lir = 5.432937
Luv = 0.229202
d = rho/1000.
Tr = T/273.15
L = l/0.589
a = [0.244257733, 0.974634476e-2, -0.373234996e-2, 0.268678472e-3,
0.158920570e-2, 0.245934259e-2, 0.900704920, -0.166626219e-1]
A = d*(a[0]+a[1]*d+a[2]*Tr+a[3]*L**2*Tr+a[4]/L**2+a[5]/(L**2-Luv**2)+a[6]/(
L**2-Lir**2)+a[7]*d**2)
return ((2*A+1)/(1-A))**0.5
|
python
|
{
"resource": ""
}
|
q4847
|
_Kw
|
train
|
def _Kw(rho, T):
"""Equation for the ionization constant of ordinary water
Parameters
----------
rho : float
Density, [kg/m³]
T : float
Temperature, [K]
Returns
-------
pKw : float
Ionization constant in -log10(kw), [-]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 0 ≤ ρ ≤ 1250
* 273.15 ≤ T ≤ 1073.15
Examples
--------
>>> _Kw(1000, 300)
13.906565
References
----------
IAPWS, Release on the Ionization Constant of H2O,
http://www.iapws.org/relguide/Ionization.pdf
"""
# Check input parameters
if rho < 0 or rho > 1250 or T < 273.15 or T > 1073.15:
raise NotImplementedError("Incoming out of bound")
# The internal method of calculation use rho in g/cm³
d = rho/1000.
# Water molecular weight different
Mw = 18.015268
gamma = [6.1415e-1, 4.825133e4, -6.770793e4, 1.01021e7]
pKg = 0
for i, g in enumerate(gamma):
pKg += g/T**i
Q = d*exp(-0.864671+8659.19/T-22786.2/T**2*d**(2./3))
pKw = -12*(log10(1+Q)-Q/(Q+1)*d*(0.642044-56.8534/T-0.375754*d)) + \
pKg+2*log10(Mw/1000)
return pKw
|
python
|
{
"resource": ""
}
|
q4848
|
_D2O_Viscosity
|
train
|
def _D2O_Viscosity(rho, T):
"""Equation for the Viscosity of heavy water
Parameters
----------
rho : float
Density, [kg/m³]
T : float
Temperature, [K]
Returns
-------
μ : float
Viscosity, [Pa·s]
Examples
--------
>>> _D2O_Viscosity(998, 298.15)
0.0008897351001498108
>>> _D2O_Viscosity(600, 873.15)
7.743019522728247e-05
References
----------
IAPWS, Revised Release on Viscosity and Thermal Conductivity of Heavy
Water Substance, http://www.iapws.org/relguide/TransD2O-2007.pdf
"""
Tr = T/643.847
rhor = rho/358.0
no = [1.0, 0.940695, 0.578377, -0.202044]
fi0 = Tr**0.5/sum([n/Tr**i for i, n in enumerate(no)])
Li = [0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 0, 1, 2, 5, 0, 1, 2, 3, 0, 1, 3,
5, 0, 1, 5, 3]
Lj = [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
4, 5, 5, 5, 6]
Lij = [0.4864192, -0.2448372, -0.8702035, 0.8716056, -1.051126,
0.3458395, 0.3509007, 1.315436, 1.297752, 1.353448, -0.2847572,
-1.037026, -1.287846, -0.02148229, 0.07013759, 0.4660127,
0.2292075, -0.4857462, 0.01641220, -0.02884911, 0.1607171,
-.009603846, -.01163815, -.008239587, 0.004559914, -0.003886659]
arr = [lij*(1./Tr-1)**i*(rhor-1)**j for i, j, lij in zip(Li, Lj, Lij)]
fi1 = exp(rhor*sum(arr))
return 55.2651e-6*fi0*fi1
|
python
|
{
"resource": ""
}
|
q4849
|
_D2O_ThCond
|
train
|
def _D2O_ThCond(rho, T):
"""Equation for the thermal conductivity of heavy water
Parameters
----------
rho : float
Density, [kg/m³]
T : float
Temperature, [K]
Returns
-------
k : float
Thermal conductivity, [W/mK]
Examples
--------
>>> _D2O_ThCond(998, 298.15)
0.6077128675880629
>>> _D2O_ThCond(0, 873.15)
0.07910346589648833
References
----------
IAPWS, Revised Release on Viscosity and Thermal Conductivity of Heavy
Water Substance, http://www.iapws.org/relguide/TransD2O-2007.pdf
"""
rhor = rho/358
Tr = T/643.847
tau = Tr/(abs(Tr-1.1)+1.1)
no = [1.0, 37.3223, 22.5485, 13.0465, 0.0, -2.60735]
Lo = sum([Li*Tr**i for i, Li in enumerate(no)])
nr = [483.656, -191.039, 73.0358, -7.57467]
Lr = -167.31*(1-exp(-2.506*rhor))+sum(
[Li*rhor**(i+1) for i, Li in enumerate(nr)])
f1 = exp(0.144847*Tr-5.64493*Tr**2)
f2 = exp(-2.8*(rhor-1)**2)-0.080738543*exp(-17.943*(rhor-0.125698)**2)
f3 = 1+exp(60*(tau-1)+20)
f4 = 1+exp(100*(tau-1)+15)
Lc = 35429.6*f1*f2*(1+f2**2*(5e9*f1**4/f3+3.5*f2/f4))
Ll = -741.112*f1**1.2*(1-exp(-(rhor/2.5)**10))
return 0.742128e-3*(Lo+Lr+Lc+Ll)
|
python
|
{
"resource": ""
}
|
q4850
|
_D2O_Sublimation_Pressure
|
train
|
def _D2O_Sublimation_Pressure(T):
"""Sublimation Pressure correlation for heavy water
Parameters
----------
T : float
Temperature, [K]
Returns
-------
P : float
Pressure at sublimation line, [MPa]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 210 ≤ T ≤ 276.969
Examples
--------
>>> _Sublimation_Pressure(245)
3.27390934e-5
References
----------
IAPWS, Revised Release on the IAPWS Formulation 2017 for the Thermodynamic
Properties of Heavy Water, http://www.iapws.org/relguide/Heavy.html.
"""
if 210 <= T <= 276.969:
Tita = T/276.969
suma = 0
ai = [-0.1314226e2, 0.3212969e2]
ti = [-1.73, -1.42]
for a, t in zip(ai, ti):
suma += a*(1-Tita**t)
return exp(suma)*0.00066159
else:
raise NotImplementedError("Incoming out of bound")
|
python
|
{
"resource": ""
}
|
q4851
|
_D2O_Melting_Pressure
|
train
|
def _D2O_Melting_Pressure(T, ice="Ih"):
"""Melting Pressure correlation for heavy water
Parameters
----------
T : float
Temperature, [K]
ice: string
Type of ice: Ih, III, V, VI, VII.
Below 276.969 is a mandatory input, the ice Ih is the default value.
Above 276.969, the ice type is unnecesary.
Returns
-------
P : float
Pressure at melting line, [MPa]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 254.415 ≤ T ≤ 315
Examples
--------
>>> _D2O__Melting_Pressure(260)
8.947352740189152e-06
>>> _D2O__Melting_Pressure(254, "III")
268.6846466336108
References
----------
IAPWS, Revised Release on the Pressure along the Melting and Sublimation
Curves of Ordinary Water Substance, http://iapws.org/relguide/MeltSub.html.
"""
if ice == "Ih" and 254.415 <= T <= 276.969:
# Ice Ih, Eq 9
Tita = T/276.969
ai = [-0.30153e5, 0.692503e6]
ti = [5.5, 8.2]
suma = 1
for a, t in zip(ai, ti):
suma += a*(1-Tita**t)
P = suma*0.00066159
elif ice == "III" and 254.415 < T <= 258.661:
# Ice III, Eq 10
Tita = T/254.415
P = 222.41*(1-0.802871*(1-Tita**33))
elif ice == "V" and 258.661 < T <= 275.748:
# Ice V, Eq 11
Tita = T/258.661
P = 352.19*(1-1.280388*(1-Tita**7.6))
elif (ice == "VI" and 275.748 < T <= 276.969) or 276.969 < T <= 315:
# Ice VI
Tita = T/275.748
P = 634.53*(1-1.276026*(1-Tita**4))
else:
raise NotImplementedError("Incoming out of bound")
return P
|
python
|
{
"resource": ""
}
|
q4852
|
getphase
|
train
|
def getphase(Tc, Pc, T, P, x, region):
"""Return fluid phase string name
Parameters
----------
Tc : float
Critical temperature, [K]
Pc : float
Critical pressure, [MPa]
T : float
Temperature, [K]
P : float
Pressure, [MPa]
x : float
Quality, [-]
region: int
Region number, used only for IAPWS97 region definition
Returns
-------
phase : str
Phase name
"""
# Avoid round problem
P = round(P, 8)
T = round(T, 8)
if P > Pc and T > Tc:
phase = "Supercritical fluid"
elif T > Tc:
phase = "Gas"
elif P > Pc:
phase = "Compressible liquid"
elif P == Pc and T == Tc:
phase = "Critical point"
elif region == 4 and x == 1:
phase = "Saturated vapor"
elif region == 4 and x == 0:
phase = "Saturated liquid"
elif region == 4:
phase = "Two phases"
elif x == 1:
phase = "Vapour"
elif x == 0:
phase = "Liquid"
return phase
|
python
|
{
"resource": ""
}
|
q4853
|
Region2_cp0
|
train
|
def Region2_cp0(Tr, Pr):
"""Ideal properties for Region 2
Parameters
----------
Tr : float
Reduced temperature, [-]
Pr : float
Reduced pressure, [-]
Returns
-------
prop : array
Array with ideal Gibbs energy partial derivatives:
* g: Ideal Specific Gibbs energy [kJ/kg]
* gp: ∂g/∂P|T
* gpp: ∂²g/∂P²|T
* gt: ∂g/∂T|P
* gtt: ∂²g/∂T²|P
* gpt: ∂²g/∂T∂P
References
----------
IAPWS, Revised Release on the IAPWS Industrial Formulation 1997 for the
Thermodynamic Properties of Water and Steam August 2007,
http://www.iapws.org/relguide/IF97-Rev.html, Eq 16
"""
Jo = [0, 1, -5, -4, -3, -2, -1, 2, 3]
no = [-0.96927686500217E+01, 0.10086655968018E+02, -0.56087911283020E-02,
0.71452738081455E-01, -0.40710498223928E+00, 0.14240819171444E+01,
-0.43839511319450E+01, -0.28408632460772E+00, 0.21268463753307E-01]
go = log(Pr)
gop = Pr**-1
gopp = -Pr**-2
got = gott = gopt = 0
for j, ni in zip(Jo, no):
go += ni * Tr**j
got += ni*j * Tr**(j-1)
gott += ni*j*(j-1) * Tr**(j-2)
return go, gop, gopp, got, gott, gopt
|
python
|
{
"resource": ""
}
|
q4854
|
_Region4
|
train
|
def _Region4(P, x):
"""Basic equation for region 4
Parameters
----------
P : float
Pressure, [MPa]
x : float
Vapor quality, [-]
Returns
-------
prop : dict
Dict with calculated properties. The available properties are:
* T: Saturated temperature, [K]
* P: Saturated pressure, [MPa]
* x: Vapor quality, [-]
* v: Specific volume, [m³/kg]
* h: Specific enthalpy, [kJ/kg]
* s: Specific entropy, [kJ/kgK]
"""
T = _TSat_P(P)
if T > 623.15:
rhol = 1./_Backward3_sat_v_P(P, T, 0)
P1 = _Region3(rhol, T)
rhov = 1./_Backward3_sat_v_P(P, T, 1)
P2 = _Region3(rhov, T)
else:
P1 = _Region1(T, P)
P2 = _Region2(T, P)
propiedades = {}
propiedades["T"] = T
propiedades["P"] = P
propiedades["v"] = P1["v"]+x*(P2["v"]-P1["v"])
propiedades["h"] = P1["h"]+x*(P2["h"]-P1["h"])
propiedades["s"] = P1["s"]+x*(P2["s"]-P1["s"])
propiedades["cp"] = None
propiedades["cv"] = None
propiedades["w"] = None
propiedades["alfav"] = None
propiedades["kt"] = None
propiedades["region"] = 4
propiedades["x"] = x
return propiedades
|
python
|
{
"resource": ""
}
|
q4855
|
_Bound_TP
|
train
|
def _Bound_TP(T, P):
"""Region definition for input T and P
Parameters
----------
T : float
Temperature, [K]
P : float
Pressure, [MPa]
Returns
-------
region : float
IAPWS-97 region code
References
----------
Wagner, W; Kretzschmar, H-J: International Steam Tables: Properties of
Water and Steam Based on the Industrial Formulation IAPWS-IF97; Springer,
2008; doi: 10.1007/978-3-540-74234-0. Fig. 2.3
"""
region = None
if 1073.15 < T <= 2273.15 and Pmin <= P <= 50:
region = 5
elif Pmin <= P <= Ps_623:
Tsat = _TSat_P(P)
if 273.15 <= T <= Tsat:
region = 1
elif Tsat < T <= 1073.15:
region = 2
elif Ps_623 < P <= 100:
T_b23 = _t_P(P)
if 273.15 <= T <= 623.15:
region = 1
elif 623.15 < T < T_b23:
region = 3
elif T_b23 <= T <= 1073.15:
region = 2
return region
|
python
|
{
"resource": ""
}
|
q4856
|
_Bound_Ph
|
train
|
def _Bound_Ph(P, h):
"""Region definition for input P y h
Parameters
----------
P : float
Pressure, [MPa]
h : float
Specific enthalpy, [kJ/kg]
Returns
-------
region : float
IAPWS-97 region code
References
----------
Wagner, W; Kretzschmar, H-J: International Steam Tables: Properties of
Water and Steam Based on the Industrial Formulation IAPWS-IF97; Springer,
2008; doi: 10.1007/978-3-540-74234-0. Fig. 2.5
"""
region = None
if Pmin <= P <= Ps_623:
h14 = _Region1(_TSat_P(P), P)["h"]
h24 = _Region2(_TSat_P(P), P)["h"]
h25 = _Region2(1073.15, P)["h"]
hmin = _Region1(273.15, P)["h"]
hmax = _Region5(2273.15, P)["h"]
if hmin <= h <= h14:
region = 1
elif h14 < h < h24:
region = 4
elif h24 <= h <= h25:
region = 2
elif h25 < h <= hmax:
region = 5
elif Ps_623 < P < Pc:
hmin = _Region1(273.15, P)["h"]
h13 = _Region1(623.15, P)["h"]
h32 = _Region2(_t_P(P), P)["h"]
h25 = _Region2(1073.15, P)["h"]
hmax = _Region5(2273.15, P)["h"]
if hmin <= h <= h13:
region = 1
elif h13 < h < h32:
try:
p34 = _PSat_h(h)
except NotImplementedError:
p34 = Pc
if P < p34:
region = 4
else:
region = 3
elif h32 <= h <= h25:
region = 2
elif h25 < h <= hmax:
region = 5
elif Pc <= P <= 100:
hmin = _Region1(273.15, P)["h"]
h13 = _Region1(623.15, P)["h"]
h32 = _Region2(_t_P(P), P)["h"]
h25 = _Region2(1073.15, P)["h"]
hmax = _Region5(2273.15, P)["h"]
if hmin <= h <= h13:
region = 1
elif h13 < h < h32:
region = 3
elif h32 <= h <= h25:
region = 2
elif P <= 50 and h25 <= h <= hmax:
region = 5
return region
|
python
|
{
"resource": ""
}
|
q4857
|
_Bound_Ps
|
train
|
def _Bound_Ps(P, s):
"""Region definition for input P and s
Parameters
----------
P : float
Pressure, [MPa]
s : float
Specific entropy, [kJ/kgK]
Returns
-------
region : float
IAPWS-97 region code
References
----------
Wagner, W; Kretzschmar, H-J: International Steam Tables: Properties of
Water and Steam Based on the Industrial Formulation IAPWS-IF97; Springer,
2008; doi: 10.1007/978-3-540-74234-0. Fig. 2.9
"""
region = None
if Pmin <= P <= Ps_623:
smin = _Region1(273.15, P)["s"]
s14 = _Region1(_TSat_P(P), P)["s"]
s24 = _Region2(_TSat_P(P), P)["s"]
s25 = _Region2(1073.15, P)["s"]
smax = _Region5(2273.15, P)["s"]
if smin <= s <= s14:
region = 1
elif s14 < s < s24:
region = 4
elif s24 <= s <= s25:
region = 2
elif s25 < s <= smax:
region = 5
elif Ps_623 < P < Pc:
smin = _Region1(273.15, P)["s"]
s13 = _Region1(623.15, P)["s"]
s32 = _Region2(_t_P(P), P)["s"]
s25 = _Region2(1073.15, P)["s"]
smax = _Region5(2273.15, P)["s"]
if smin <= s <= s13:
region = 1
elif s13 < s < s32:
try:
p34 = _PSat_s(s)
except NotImplementedError:
p34 = Pc
if P < p34:
region = 4
else:
region = 3
elif s32 <= s <= s25:
region = 2
elif s25 < s <= smax:
region = 5
elif Pc <= P <= 100:
smin = _Region1(273.15, P)["s"]
s13 = _Region1(623.15, P)["s"]
s32 = _Region2(_t_P(P), P)["s"]
s25 = _Region2(1073.15, P)["s"]
smax = _Region5(2273.15, P)["s"]
if smin <= s <= s13:
region = 1
elif s13 < s < s32:
region = 3
elif s32 <= s <= s25:
region = 2
elif P <= 50 and s25 <= s <= smax:
region = 5
return region
|
python
|
{
"resource": ""
}
|
q4858
|
IAPWS97.calculable
|
train
|
def calculable(self):
"""Check if class is calculable by its kwargs"""
self._thermo = ""
if self.kwargs["T"] and self.kwargs["P"]:
self._thermo = "TP"
elif self.kwargs["P"] and self.kwargs["h"] is not None:
self._thermo = "Ph"
elif self.kwargs["P"] and self.kwargs["s"] is not None:
self._thermo = "Ps"
# TODO: Add other pairs definitions options
# elif self.kwargs["P"] and self.kwargs["v"]:
# self._thermo = "Pv"
# elif self.kwargs["T"] and self.kwargs["s"] is not None:
# self._thermo = "Ts"
elif self.kwargs["h"] is not None and self.kwargs["s"] is not None:
self._thermo = "hs"
elif self.kwargs["T"] and self.kwargs["x"] is not None:
self._thermo = "Tx"
elif self.kwargs["P"] and self.kwargs["x"] is not None:
self._thermo = "Px"
return self._thermo
|
python
|
{
"resource": ""
}
|
q4859
|
Ttr
|
train
|
def Ttr(x):
"""Equation for the triple point of ammonia-water mixture
Parameters
----------
x : float
Mole fraction of ammonia in mixture, [mol/mol]
Returns
-------
Ttr : float
Triple point temperature, [K]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 0 ≤ x ≤ 1
References
----------
IAPWS, Guideline on the IAPWS Formulation 2001 for the Thermodynamic
Properties of Ammonia-Water Mixtures,
http://www.iapws.org/relguide/nh3h2o.pdf, Eq 9
"""
if 0 <= x <= 0.33367:
Ttr = 273.16*(1-0.3439823*x-1.3274271*x**2-274.973*x**3)
elif 0.33367 < x <= 0.58396:
Ttr = 193.549*(1-4.987368*(x-0.5)**2)
elif 0.58396 < x <= 0.81473:
Ttr = 194.38*(1-4.886151*(x-2/3)**2+10.37298*(x-2/3)**3)
elif 0.81473 < x <= 1:
Ttr = 195.495*(1-0.323998*(1-x)-15.87560*(1-x)**4)
else:
raise NotImplementedError("Incoming out of bound")
return Ttr
|
python
|
{
"resource": ""
}
|
q4860
|
H2ONH3._prop
|
train
|
def _prop(self, rho, T, x):
"""Thermodynamic properties of ammonia-water mixtures
Parameters
----------
T : float
Temperature [K]
rho : float
Density [kg/m³]
x : float
Mole fraction of ammonia in mixture [mol/mol]
Returns
-------
prop : dict
Dictionary with thermodynamic properties of ammonia-water mixtures:
* M: Mixture molecular mass, [g/mol]
* P: Pressure, [MPa]
* u: Specific internal energy, [kJ/kg]
* s: Specific entropy, [kJ/kgK]
* h: Specific enthalpy, [kJ/kg]
* a: Specific Helmholtz energy, [kJ/kg]
* g: Specific gibbs energy, [kJ/kg]
* cv: Specific isochoric heat capacity, [kJ/kgK]
* cp: Specific isobaric heat capacity, [kJ/kgK]
* w: Speed of sound, [m/s]
* fugH2O: Fugacity of water, [-]
* fugNH3: Fugacity of ammonia, [-]
References
----------
IAPWS, Guideline on the IAPWS Formulation 2001 for the Thermodynamic
Properties of Ammonia-Water Mixtures,
http://www.iapws.org/relguide/nh3h2o.pdf, Table 4
"""
# FIXME: The values are good, bad difer by 1%, a error I can find
# In Pressure happen and only use fird
M = (1-x)*IAPWS95.M + x*NH3.M
R = 8.314471/M
phio = self._phi0(rho, T, x)
fio = phio["fio"]
tau0 = phio["tau"]
fiot = phio["fiot"]
fiott = phio["fiott"]
phir = self._phir(rho, T, x)
fir = phir["fir"]
tau = phir["tau"]
delta = phir["delta"]
firt = phir["firt"]
firtt = phir["firtt"]
fird = phir["fird"]
firdd = phir["firdd"]
firdt = phir["firdt"]
F = phir["F"]
prop = {}
Z = 1 + delta*fird
prop["M"] = M
prop["P"] = Z*R*T*rho/1000
prop["u"] = R*T*(tau0*fiot + tau*firt)
prop["s"] = R*(tau0*fiot + tau*firt - fio - fir)
prop["h"] = R*T*(1+delta*fird+tau0*fiot+tau*firt)
prop["g"] = prop["h"]-T*prop["s"]
prop["a"] = prop["u"]-T*prop["s"]
cvR = -tau0**2*fiott - tau**2*firtt
prop["cv"] = R*cvR
prop["cp"] = R*(cvR+(1+delta*fird-delta*tau*firdt)**2 /
(1+2*delta*fird+delta**2*firdd))
prop["w"] = (R*T*1000*(1+2*delta*fird+delta**2*firdd +
(1+delta*fird-delta*tau*firdt)**2 / cvR))**0.5
prop["fugH2O"] = Z*exp(fir+delta*fird-x*F)
prop["fugNH3"] = Z*exp(fir+delta*fird+(1-x)*F)
return prop
|
python
|
{
"resource": ""
}
|
q4861
|
_preprocess_and_rename_grid_attrs
|
train
|
def _preprocess_and_rename_grid_attrs(func, grid_attrs=None, **kwargs):
"""Call a custom preprocessing method first then rename grid attrs.
This wrapper is needed to generate a single function to pass to the
``preprocesss`` of xr.open_mfdataset. It makes sure that the
user-specified preprocess function is called on the loaded Dataset before
aospy's is applied. An example for why this might be needed is output from
the WRF model; one needs to add a CF-compliant units attribute to the time
coordinate of all input files, because it is not present by default.
Parameters
----------
func : function
An arbitrary function to call before calling
``grid_attrs_to_aospy_names`` in ``_load_data_from_disk``. Must take
an xr.Dataset as an argument as well as ``**kwargs``.
grid_attrs : dict (optional)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
Returns
-------
function
A function that calls the provided function ``func`` on the Dataset
before calling ``grid_attrs_to_aospy_names``; this is meant to be
passed as a ``preprocess`` argument to ``xr.open_mfdataset``.
"""
def func_wrapper(ds):
return grid_attrs_to_aospy_names(func(ds, **kwargs), grid_attrs)
return func_wrapper
|
python
|
{
"resource": ""
}
|
q4862
|
grid_attrs_to_aospy_names
|
train
|
def grid_attrs_to_aospy_names(data, grid_attrs=None):
"""Rename grid attributes to be consistent with aospy conventions.
Search all of the dataset's coords and dims looking for matches to known
grid attribute names; any that are found subsequently get renamed to the
aospy name as specified in ``aospy.internal_names.GRID_ATTRS``.
Also forces any renamed grid attribute that is saved as a dim without a
coord to have a coord, which facilitates subsequent slicing/subsetting.
This function does not compare to Model coordinates or add missing
coordinates from Model objects.
Parameters
----------
data : xr.Dataset
grid_attrs : dict (default None)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
Returns
-------
xr.Dataset
Data returned with coordinates consistent with aospy
conventions
"""
if grid_attrs is None:
grid_attrs = {}
# Override GRID_ATTRS with entries in grid_attrs
attrs = GRID_ATTRS.copy()
for k, v in grid_attrs.items():
if k not in attrs:
raise ValueError(
'Unrecognized internal name, {!r}, specified for a custom '
'grid attribute name. See the full list of valid internal '
'names below:\n\n{}'.format(k, list(GRID_ATTRS.keys())))
attrs[k] = (v, )
dims_and_vars = set(data.variables).union(set(data.dims))
for name_int, names_ext in attrs.items():
data_coord_name = set(names_ext).intersection(dims_and_vars)
if data_coord_name:
data = data.rename({data_coord_name.pop(): name_int})
return set_grid_attrs_as_coords(data)
|
python
|
{
"resource": ""
}
|
q4863
|
set_grid_attrs_as_coords
|
train
|
def set_grid_attrs_as_coords(ds):
"""Set available grid attributes as coordinates in a given Dataset.
Grid attributes are assumed to have their internal aospy names. Grid
attributes are set as coordinates, such that they are carried by all
selected DataArrays with overlapping index dimensions.
Parameters
----------
ds : Dataset
Input data
Returns
-------
Dataset
Dataset with grid attributes set as coordinates
"""
grid_attrs_in_ds = set(GRID_ATTRS.keys()).intersection(
set(ds.coords) | set(ds.data_vars))
ds = ds.set_coords(grid_attrs_in_ds)
return ds
|
python
|
{
"resource": ""
}
|
q4864
|
_maybe_cast_to_float64
|
train
|
def _maybe_cast_to_float64(da):
"""Cast DataArrays to np.float64 if they are of type np.float32.
Parameters
----------
da : xr.DataArray
Input DataArray
Returns
-------
DataArray
"""
if da.dtype == np.float32:
logging.warning('Datapoints were stored using the np.float32 datatype.'
'For accurate reduction operations using bottleneck, '
'datapoints are being cast to the np.float64 datatype.'
' For more information see: https://github.com/pydata/'
'xarray/issues/1346')
return da.astype(np.float64)
else:
return da
|
python
|
{
"resource": ""
}
|
q4865
|
_sel_var
|
train
|
def _sel_var(ds, var, upcast_float32=True):
"""Select the specified variable by trying all possible alternative names.
Parameters
----------
ds : Dataset
Dataset possibly containing var
var : aospy.Var
Variable to find data for
upcast_float32 : bool (default True)
Whether to cast a float32 DataArray up to float64
Returns
-------
DataArray
Raises
------
KeyError
If the variable is not in the Dataset
"""
for name in var.names:
try:
da = ds[name].rename(var.name)
if upcast_float32:
return _maybe_cast_to_float64(da)
else:
return da
except KeyError:
pass
msg = '{0} not found among names: {1} in\n{2}'.format(var, var.names, ds)
raise LookupError(msg)
|
python
|
{
"resource": ""
}
|
q4866
|
_prep_time_data
|
train
|
def _prep_time_data(ds):
"""Prepare time coordinate information in Dataset for use in aospy.
1. If the Dataset contains a time bounds coordinate, add attributes
representing the true beginning and end dates of the time interval used
to construct the Dataset
2. If the Dataset contains a time bounds coordinate, overwrite the time
coordinate values with the averages of the time bounds at each timestep
3. Decode the times into np.datetime64 objects for time indexing
Parameters
----------
ds : Dataset
Pre-processed Dataset with time coordinate renamed to
internal_names.TIME_STR
Returns
-------
Dataset
The processed Dataset
"""
ds = times.ensure_time_as_index(ds)
if TIME_BOUNDS_STR in ds:
ds = times.ensure_time_avg_has_cf_metadata(ds)
ds[TIME_STR] = times.average_time_bounds(ds)
else:
logging.warning("dt array not found. Assuming equally spaced "
"values in time, even though this may not be "
"the case")
ds = times.add_uniform_time_weights(ds)
# Suppress enable_cftimeindex is a no-op warning; we'll keep setting it for
# now to maintain backwards compatibility for older xarray versions.
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
with xr.set_options(enable_cftimeindex=True):
ds = xr.decode_cf(ds, decode_times=True, decode_coords=False,
mask_and_scale=True)
return ds
|
python
|
{
"resource": ""
}
|
q4867
|
_load_data_from_disk
|
train
|
def _load_data_from_disk(file_set, preprocess_func=lambda ds: ds,
data_vars='minimal', coords='minimal',
grid_attrs=None, **kwargs):
"""Load a Dataset from a list or glob-string of files.
Datasets from files are concatenated along time,
and all grid attributes are renamed to their aospy internal names.
Parameters
----------
file_set : list or str
List of paths to files or glob-string
preprocess_func : function (optional)
Custom function to call before applying any aospy logic
to the loaded dataset
data_vars : str (default 'minimal')
Mode for concatenating data variables in call to ``xr.open_mfdataset``
coords : str (default 'minimal')
Mode for concatenating coordinate variables in call to
``xr.open_mfdataset``.
grid_attrs : dict
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
Returns
-------
Dataset
"""
apply_preload_user_commands(file_set)
func = _preprocess_and_rename_grid_attrs(preprocess_func, grid_attrs,
**kwargs)
return xr.open_mfdataset(file_set, preprocess=func, concat_dim=TIME_STR,
decode_times=False, decode_coords=False,
mask_and_scale=True, data_vars=data_vars,
coords=coords)
|
python
|
{
"resource": ""
}
|
q4868
|
_setattr_default
|
train
|
def _setattr_default(obj, attr, value, default):
"""Set an attribute of an object to a value or default value."""
if value is None:
setattr(obj, attr, default)
else:
setattr(obj, attr, value)
|
python
|
{
"resource": ""
}
|
q4869
|
DataLoader.load_variable
|
train
|
def load_variable(self, var=None, start_date=None, end_date=None,
time_offset=None, grid_attrs=None, **DataAttrs):
"""Load a DataArray for requested variable and time range.
Automatically renames all grid attributes to match aospy conventions.
Parameters
----------
var : Var
aospy Var object
start_date : datetime.datetime
start date for interval
end_date : datetime.datetime
end date for interval
time_offset : dict
Option to add a time offset to the time coordinate to correct for
incorrect metadata.
grid_attrs : dict (optional)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
**DataAttrs
Attributes needed to identify a unique set of files to load from
Returns
-------
da : DataArray
DataArray for the specified variable, date range, and interval in
"""
file_set = self._generate_file_set(var=var, start_date=start_date,
end_date=end_date, **DataAttrs)
ds = _load_data_from_disk(
file_set, self.preprocess_func, data_vars=self.data_vars,
coords=self.coords, start_date=start_date, end_date=end_date,
time_offset=time_offset, grid_attrs=grid_attrs, **DataAttrs
)
if var.def_time:
ds = _prep_time_data(ds)
start_date = times.maybe_convert_to_index_date_type(
ds.indexes[TIME_STR], start_date)
end_date = times.maybe_convert_to_index_date_type(
ds.indexes[TIME_STR], end_date)
ds = set_grid_attrs_as_coords(ds)
da = _sel_var(ds, var, self.upcast_float32)
if var.def_time:
da = self._maybe_apply_time_shift(da, time_offset, **DataAttrs)
return times.sel_time(da, start_date, end_date).load()
else:
return da.load()
|
python
|
{
"resource": ""
}
|
q4870
|
DataLoader._load_or_get_from_model
|
train
|
def _load_or_get_from_model(self, var, start_date=None, end_date=None,
time_offset=None, model=None, **DataAttrs):
"""Load a DataArray for the requested variable and time range
Supports both access of grid attributes either through the DataLoader
or through an optionally-provided Model object. Defaults to using
the version found in the DataLoader first.
"""
grid_attrs = None if model is None else model.grid_attrs
try:
return self.load_variable(
var, start_date=start_date, end_date=end_date,
time_offset=time_offset, grid_attrs=grid_attrs, **DataAttrs)
except (KeyError, IOError) as e:
if var.name not in GRID_ATTRS or model is None:
raise e
else:
try:
return getattr(model, var.name)
except AttributeError:
raise AttributeError(
'Grid attribute {} could not be located either '
'through this DataLoader or in the provided Model '
'object: {}.'.format(var, model))
|
python
|
{
"resource": ""
}
|
q4871
|
DataLoader.recursively_compute_variable
|
train
|
def recursively_compute_variable(self, var, start_date=None, end_date=None,
time_offset=None, model=None,
**DataAttrs):
"""Compute a variable recursively, loading data where needed.
An obvious requirement here is that the variable must eventually be
able to be expressed in terms of model-native quantities; otherwise the
recursion will never stop.
Parameters
----------
var : Var
aospy Var object
start_date : datetime.datetime
start date for interval
end_date : datetime.datetime
end date for interval
time_offset : dict
Option to add a time offset to the time coordinate to correct for
incorrect metadata.
model : Model
aospy Model object (optional)
**DataAttrs
Attributes needed to identify a unique set of files to load from
Returns
-------
da : DataArray
DataArray for the specified variable, date range, and interval in
"""
if var.variables is None:
return self._load_or_get_from_model(
var, start_date, end_date, time_offset, model, **DataAttrs)
else:
data = [self.recursively_compute_variable(
v, start_date, end_date, time_offset, model, **DataAttrs)
for v in var.variables]
return var.func(*data).rename(var.name)
|
python
|
{
"resource": ""
}
|
q4872
|
DataLoader._maybe_apply_time_shift
|
train
|
def _maybe_apply_time_shift(da, time_offset=None, **DataAttrs):
"""Apply specified time shift to DataArray"""
if time_offset is not None:
time = times.apply_time_offset(da[TIME_STR], **time_offset)
da[TIME_STR] = time
return da
|
python
|
{
"resource": ""
}
|
q4873
|
DictDataLoader._generate_file_set
|
train
|
def _generate_file_set(self, var=None, start_date=None, end_date=None,
domain=None, intvl_in=None, dtype_in_vert=None,
dtype_in_time=None, intvl_out=None):
"""Returns the file_set for the given interval in."""
try:
return self.file_map[intvl_in]
except KeyError:
raise KeyError('File set does not exist for the specified'
' intvl_in {0}'.format(intvl_in))
|
python
|
{
"resource": ""
}
|
q4874
|
GFDLDataLoader._maybe_apply_time_shift
|
train
|
def _maybe_apply_time_shift(da, time_offset=None, **DataAttrs):
"""Correct off-by-one error in GFDL instantaneous model data.
Instantaneous data that is outputted by GFDL models is generally off by
one timestep. For example, a netCDF file that is supposed to
correspond to 6 hourly data for the month of January, will have its
last time value be in February.
"""
if time_offset is not None:
time = times.apply_time_offset(da[TIME_STR], **time_offset)
da[TIME_STR] = time
else:
if DataAttrs['dtype_in_time'] == 'inst':
if DataAttrs['intvl_in'].endswith('hr'):
offset = -1 * int(DataAttrs['intvl_in'][0])
else:
offset = 0
time = times.apply_time_offset(da[TIME_STR], hours=offset)
da[TIME_STR] = time
return da
|
python
|
{
"resource": ""
}
|
q4875
|
Var.to_plot_units
|
train
|
def to_plot_units(self, data, dtype_vert=False):
"""Convert the given data to plotting units."""
if dtype_vert == 'vert_av' or not dtype_vert:
conv_factor = self.units.plot_units_conv
elif dtype_vert == ('vert_int'):
conv_factor = self.units.vert_int_plot_units_conv
else:
raise ValueError("dtype_vert value `{0}` not recognized. Only "
"bool(dtype_vert) = False, 'vert_av', and "
"'vert_int' supported.".format(dtype_vert))
if isinstance(data, dict):
return {key: val*conv_factor for key, val in data.items()}
return data*conv_factor
|
python
|
{
"resource": ""
}
|
q4876
|
Var.mask_unphysical
|
train
|
def mask_unphysical(self, data):
"""Mask data array where values are outside physically valid range."""
if not self.valid_range:
return data
else:
return np.ma.masked_outside(data, np.min(self.valid_range),
np.max(self.valid_range))
|
python
|
{
"resource": ""
}
|
q4877
|
to_radians
|
train
|
def to_radians(arr, is_delta=False):
"""Force data with units either degrees or radians to be radians."""
# Infer the units from embedded metadata, if it's there.
try:
units = arr.units
except AttributeError:
pass
else:
if units.lower().startswith('degrees'):
warn_msg = ("Conversion applied: degrees -> radians to array: "
"{}".format(arr))
logging.debug(warn_msg)
return np.deg2rad(arr)
# Otherwise, assume degrees if the values are sufficiently large.
threshold = 0.1*np.pi if is_delta else 4*np.pi
if np.max(np.abs(arr)) > threshold:
warn_msg = ("Conversion applied: degrees -> radians to array: "
"{}".format(arr))
logging.debug(warn_msg)
return np.deg2rad(arr)
return arr
|
python
|
{
"resource": ""
}
|
q4878
|
to_pascal
|
train
|
def to_pascal(arr, is_dp=False):
"""Force data with units either hPa or Pa to be in Pa."""
threshold = 400 if is_dp else 1200
if np.max(np.abs(arr)) < threshold:
warn_msg = "Conversion applied: hPa -> Pa to array: {}".format(arr)
logging.debug(warn_msg)
return arr*100.
return arr
|
python
|
{
"resource": ""
}
|
q4879
|
replace_coord
|
train
|
def replace_coord(arr, old_dim, new_dim, new_coord):
"""Replace a coordinate with new one; new and old must have same shape."""
new_arr = arr.rename({old_dim: new_dim})
new_arr[new_dim] = new_coord
return new_arr
|
python
|
{
"resource": ""
}
|
q4880
|
to_pfull_from_phalf
|
train
|
def to_pfull_from_phalf(arr, pfull_coord):
"""Compute data at full pressure levels from values at half levels."""
phalf_top = arr.isel(**{internal_names.PHALF_STR: slice(1, None)})
phalf_top = replace_coord(phalf_top, internal_names.PHALF_STR,
internal_names.PFULL_STR, pfull_coord)
phalf_bot = arr.isel(**{internal_names.PHALF_STR: slice(None, -1)})
phalf_bot = replace_coord(phalf_bot, internal_names.PHALF_STR,
internal_names.PFULL_STR, pfull_coord)
return 0.5*(phalf_bot + phalf_top)
|
python
|
{
"resource": ""
}
|
q4881
|
to_phalf_from_pfull
|
train
|
def to_phalf_from_pfull(arr, val_toa=0, val_sfc=0):
"""Compute data at half pressure levels from values at full levels.
Could be the pressure array itself, but it could also be any other data
defined at pressure levels. Requires specification of values at surface
and top of atmosphere.
"""
phalf = np.zeros((arr.shape[0] + 1, arr.shape[1], arr.shape[2]))
phalf[0] = val_toa
phalf[-1] = val_sfc
phalf[1:-1] = 0.5*(arr[:-1] + arr[1:])
return phalf
|
python
|
{
"resource": ""
}
|
q4882
|
pfull_from_ps
|
train
|
def pfull_from_ps(bk, pk, ps, pfull_coord):
"""Compute pressure at full levels from surface pressure."""
return to_pfull_from_phalf(phalf_from_ps(bk, pk, ps), pfull_coord)
|
python
|
{
"resource": ""
}
|
q4883
|
d_deta_from_phalf
|
train
|
def d_deta_from_phalf(arr, pfull_coord):
"""Compute pressure level thickness from half level pressures."""
d_deta = arr.diff(dim=internal_names.PHALF_STR, n=1)
return replace_coord(d_deta, internal_names.PHALF_STR,
internal_names.PFULL_STR, pfull_coord)
|
python
|
{
"resource": ""
}
|
q4884
|
dp_from_ps
|
train
|
def dp_from_ps(bk, pk, ps, pfull_coord):
"""Compute pressure level thickness from surface pressure"""
return d_deta_from_phalf(phalf_from_ps(bk, pk, ps), pfull_coord)
|
python
|
{
"resource": ""
}
|
q4885
|
integrate
|
train
|
def integrate(arr, ddim, dim=False, is_pressure=False):
"""Integrate along the given dimension."""
if is_pressure:
dim = vert_coord_name(ddim)
return (arr*ddim).sum(dim=dim)
|
python
|
{
"resource": ""
}
|
q4886
|
get_dim_name
|
train
|
def get_dim_name(arr, names):
"""Determine if an object has an attribute name matching a given list."""
for name in names:
# TODO: raise warning/exception when multiple names arr attrs.
if hasattr(arr, name):
return name
raise AttributeError("No attributes of the object `{0}` match the "
"specified names of `{1}`".format(arr, names))
|
python
|
{
"resource": ""
}
|
q4887
|
int_dp_g
|
train
|
def int_dp_g(arr, dp):
"""Mass weighted integral."""
return integrate(arr, to_pascal(dp, is_dp=True),
vert_coord_name(dp)) / GRAV_EARTH
|
python
|
{
"resource": ""
}
|
q4888
|
dp_from_p
|
train
|
def dp_from_p(p, ps, p_top=0., p_bot=1.1e5):
"""Get level thickness of pressure data, incorporating surface pressure.
Level edges are defined as halfway between the levels, as well as the user-
specified uppermost and lowermost values. The dp of levels whose bottom
pressure is less than the surface pressure is not changed by ps, since they
don't intersect the surface. If ps is in between a level's top and bottom
pressures, then its dp becomes the pressure difference between its top and
ps. If ps is less than a level's top and bottom pressures, then that level
is underground and its values are masked.
Note that postprocessing routines (e.g. at GFDL) typically mask out data
wherever the surface pressure is less than the level's given value, not the
level's upper edge. This masks out more levels than the
"""
p_str = get_dim_name(p, (internal_names.PLEVEL_STR, 'plev'))
p_vals = to_pascal(p.values.copy())
# Layer edges are halfway between the given pressure levels.
p_edges_interior = 0.5*(p_vals[:-1] + p_vals[1:])
p_edges = np.concatenate(([p_bot], p_edges_interior, [p_top]))
p_edge_above = p_edges[1:]
p_edge_below = p_edges[:-1]
dp = p_edge_below - p_edge_above
if not all(np.sign(dp)):
raise ValueError("dp array not all > 0 : {}".format(dp))
# Pressure difference between ps and the upper edge of each pressure level.
p_edge_above_xr = xr.DataArray(p_edge_above, dims=p.dims, coords=p.coords)
dp_to_sfc = ps - p_edge_above_xr
# Find the level adjacent to the masked, under-ground levels.
change = xr.DataArray(np.zeros(dp_to_sfc.shape), dims=dp_to_sfc.dims,
coords=dp_to_sfc.coords)
change[{p_str: slice(1, None)}] = np.diff(
np.sign(ps - to_pascal(p.copy()))
)
dp_combined = xr.DataArray(np.where(change, dp_to_sfc, dp),
dims=dp_to_sfc.dims, coords=dp_to_sfc.coords)
# Mask levels that are under ground.
above_ground = ps > to_pascal(p.copy())
above_ground[p_str] = p[p_str]
dp_with_ps = dp_combined.where(above_ground)
# Revert to original dim order.
possible_dim_orders = [
(internal_names.TIME_STR, p_str, internal_names.LAT_STR,
internal_names.LON_STR),
(internal_names.TIME_STR, p_str, internal_names.LAT_STR),
(internal_names.TIME_STR, p_str, internal_names.LON_STR),
(internal_names.TIME_STR, p_str),
(p_str, internal_names.LAT_STR, internal_names.LON_STR),
(p_str, internal_names.LAT_STR),
(p_str, internal_names.LON_STR),
(p_str,),
]
for dim_order in possible_dim_orders:
try:
return dp_with_ps.transpose(*dim_order)
except ValueError:
logging.debug("Failed transpose to dims: {}".format(dim_order))
else:
logging.debug("No transpose was successful.")
return dp_with_ps
|
python
|
{
"resource": ""
}
|
q4889
|
level_thickness
|
train
|
def level_thickness(p, p_top=0., p_bot=1.01325e5):
"""
Calculates the thickness, in Pa, of each pressure level.
Assumes that the pressure values given are at the center of that model
level, except for the lowest value (typically 1000 hPa), which is the
bottom boundary. The uppermost level extends to 0 hPa.
Unlike `dp_from_p`, this does not incorporate the surface pressure.
"""
p_vals = to_pascal(p.values.copy())
dp_vals = np.empty_like(p_vals)
# Bottom level extends from p[0] to halfway betwen p[0] and p[1].
dp_vals[0] = p_bot - 0.5*(p_vals[0] + p_vals[1])
# Middle levels extend from halfway between [k-1], [k] and [k], [k+1].
dp_vals[1:-1] = 0.5*(p_vals[0:-2] - p_vals[2:])
# Top level extends from halfway between top two levels to 0 hPa.
dp_vals[-1] = 0.5*(p_vals[-2] + p_vals[-1]) - p_top
dp = p.copy()
dp.values = dp_vals
return dp
|
python
|
{
"resource": ""
}
|
q4890
|
does_coord_increase_w_index
|
train
|
def does_coord_increase_w_index(arr):
"""Determine if the array values increase with the index.
Useful, e.g., for pressure, which sometimes is indexed surface to TOA and
sometimes the opposite.
"""
diff = np.diff(arr)
if not np.all(np.abs(np.sign(diff))):
raise ValueError("Array is not monotonic: {}".format(arr))
# Since we know its monotonic, just test the first value.
return bool(diff[0])
|
python
|
{
"resource": ""
}
|
q4891
|
apply_time_offset
|
train
|
def apply_time_offset(time, years=0, months=0, days=0, hours=0):
"""Apply a specified offset to the given time array.
This is useful for GFDL model output of instantaneous values. For example,
3 hourly data postprocessed to netCDF files spanning 1 year each will
actually have time values that are offset by 3 hours, such that the first
value is for 1 Jan 03:00 and the last value is 1 Jan 00:00 of the
subsequent year. This causes problems in xarray, e.g. when trying to group
by month. It is resolved by manually subtracting off those three hours,
such that the dates span from 1 Jan 00:00 to 31 Dec 21:00 as desired.
Parameters
----------
time : xarray.DataArray representing a timeseries
years, months, days, hours : int, optional
The number of years, months, days, and hours, respectively, to offset
the time array by. Positive values move the times later.
Returns
-------
pandas.DatetimeIndex
Examples
--------
Case of a length-1 input time array:
>>> times = xr.DataArray(datetime.datetime(1899, 12, 31, 21))
>>> apply_time_offset(times)
Timestamp('1900-01-01 00:00:00')
Case of input time array with length greater than one:
>>> times = xr.DataArray([datetime.datetime(1899, 12, 31, 21),
... datetime.datetime(1899, 1, 31, 21)])
>>> apply_time_offset(times) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['1900-01-01', '1899-02-01'], dtype='datetime64[ns]',
freq=None)
"""
return (pd.to_datetime(time.values) +
pd.DateOffset(years=years, months=months, days=days, hours=hours))
|
python
|
{
"resource": ""
}
|
q4892
|
average_time_bounds
|
train
|
def average_time_bounds(ds):
"""Return the average of each set of time bounds in the Dataset.
Useful for creating a new time array to replace the Dataset's native time
array, in the case that the latter matches either the start or end bounds.
This can cause errors in grouping (akin to an off-by-one error) if the
timesteps span e.g. one full month each. Note that the Dataset's times
must not have already undergone "CF decoding", wherein they are converted
from floats using the 'units' attribute into datetime objects.
Parameters
----------
ds : xarray.Dataset
A Dataset containing a time bounds array with name matching
internal_names.TIME_BOUNDS_STR. This time bounds array must have two
dimensions, one of which's coordinates is the Dataset's time array, and
the other is length-2.
Returns
-------
xarray.DataArray
The mean of the start and end times of each timestep in the original
Dataset.
Raises
------
ValueError
If the time bounds array doesn't match the shape specified above.
"""
bounds = ds[TIME_BOUNDS_STR]
new_times = bounds.mean(dim=BOUNDS_STR, keep_attrs=True)
new_times = new_times.drop(TIME_STR).rename(TIME_STR)
new_times[TIME_STR] = new_times
return new_times
|
python
|
{
"resource": ""
}
|
q4893
|
monthly_mean_at_each_ind
|
train
|
def monthly_mean_at_each_ind(monthly_means, sub_monthly_timeseries):
"""Copy monthly mean over each time index in that month.
Parameters
----------
monthly_means : xarray.DataArray
array of monthly means
sub_monthly_timeseries : xarray.DataArray
array of a timeseries at sub-monthly time resolution
Returns
-------
xarray.DataArray with eath monthly mean value from `monthly_means` repeated
at each time within that month from `sub_monthly_timeseries`
See Also
--------
monthly_mean_ts : Create timeseries of monthly mean values
"""
time = monthly_means[TIME_STR]
start = time.indexes[TIME_STR][0].replace(day=1, hour=0)
end = time.indexes[TIME_STR][-1]
new_indices = pd.DatetimeIndex(start=start, end=end, freq='MS')
arr_new = monthly_means.reindex(time=new_indices, method='backfill')
return arr_new.reindex_like(sub_monthly_timeseries, method='pad')
|
python
|
{
"resource": ""
}
|
q4894
|
yearly_average
|
train
|
def yearly_average(arr, dt):
"""Average a sub-yearly time-series over each year.
Resulting timeseries comprises one value for each year in which the
original array had valid data. Accounts for (i.e. ignores) masked values
in original data when computing the annual averages.
Parameters
----------
arr : xarray.DataArray
The array to be averaged
dt : xarray.DataArray
Array of the duration of each timestep
Returns
-------
xarray.DataArray
Has the same shape and mask as the original ``arr``, except for the
time dimension, which is truncated to one value for each year that
``arr`` spanned
"""
assert_matching_time_coord(arr, dt)
yr_str = TIME_STR + '.year'
# Retain original data's mask.
dt = dt.where(np.isfinite(arr))
return ((arr*dt).groupby(yr_str).sum(TIME_STR) /
dt.groupby(yr_str).sum(TIME_STR))
|
python
|
{
"resource": ""
}
|
q4895
|
ensure_datetime
|
train
|
def ensure_datetime(obj):
"""Return the object if it is a datetime-like object
Parameters
----------
obj : Object to be tested.
Returns
-------
The original object if it is a datetime-like object
Raises
------
TypeError if `obj` is not datetime-like
"""
_VALID_TYPES = (str, datetime.datetime, cftime.datetime,
np.datetime64)
if isinstance(obj, _VALID_TYPES):
return obj
raise TypeError("datetime-like object required. "
"Type given: {}".format(type(obj)))
|
python
|
{
"resource": ""
}
|
q4896
|
month_indices
|
train
|
def month_indices(months):
"""Convert string labels for months to integer indices.
Parameters
----------
months : str, int
If int, number of the desired month, where January=1, February=2,
etc. If str, must match either 'ann' or some subset of
'jfmamjjasond'. If 'ann', use all months. Otherwise, use the
specified months.
Returns
-------
np.ndarray of integers corresponding to desired month indices
Raises
------
TypeError : If `months` is not an int or str
See also
--------
_month_conditional
"""
if not isinstance(months, (int, str)):
raise TypeError("`months` must be of type int or str: "
"type(months) == {}".format(type(months)))
if isinstance(months, int):
return [months]
if months.lower() == 'ann':
return np.arange(1, 13)
first_letter = 'jfmamjjasond' * 2
# Python indexing starts at 0; month indices start at 1 for January.
count = first_letter.count(months)
if (count == 0) or (count > 2):
message = ("The user must provide a unique pattern of consecutive "
"first letters of months within '{}'. The provided "
"string '{}' does not comply."
" For individual months use integers."
"".format(first_letter, months))
raise ValueError(message)
st_ind = first_letter.find(months.lower())
return np.arange(st_ind, st_ind + len(months)) % 12 + 1
|
python
|
{
"resource": ""
}
|
q4897
|
_month_conditional
|
train
|
def _month_conditional(time, months):
"""Create a conditional statement for selecting data in a DataArray.
Parameters
----------
time : xarray.DataArray
Array of times for which to subsample for specific months.
months : int, str, or xarray.DataArray of times
If int or str, passed to `month_indices`
Returns
-------
Array of bools specifying which months to keep
See Also
--------
month_indices
"""
if isinstance(months, (int, str)):
months_array = month_indices(months)
else:
months_array = months
cond = False
for month in months_array:
cond |= (time['{}.month'.format(TIME_STR)] == month)
return cond
|
python
|
{
"resource": ""
}
|
q4898
|
extract_months
|
train
|
def extract_months(time, months):
"""Extract times within specified months of the year.
Parameters
----------
time : xarray.DataArray
Array of times that can be represented by numpy.datetime64 objects
(i.e. the year is between 1678 and 2262).
months : Desired months of the year to include
Returns
-------
xarray.DataArray of the desired times
"""
inds = _month_conditional(time, months)
return time.sel(time=inds)
|
python
|
{
"resource": ""
}
|
q4899
|
ensure_time_avg_has_cf_metadata
|
train
|
def ensure_time_avg_has_cf_metadata(ds):
"""Add time interval length and bounds coordinates for time avg data.
If the Dataset or DataArray contains time average data, enforce
that there are coordinates that track the lower and upper bounds of
the time intervals, and that there is a coordinate that tracks the
amount of time per time average interval.
CF conventions require that a quantity stored as time averages
over time intervals must have time and time_bounds coordinates [1]_.
aospy further requires AVERAGE_DT for time average data, for accurate
time-weighted averages, which can be inferred from the CF-required
time_bounds coordinate if needed. This step should be done
prior to decoding CF metadata with xarray to ensure proper
computed timedeltas for different calendar types.
.. [1] http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#_data_representative_of_cells
Parameters
----------
ds : Dataset or DataArray
Input data
Returns
-------
Dataset or DataArray
Time average metadata attributes added if needed.
""" # noqa: E501
if TIME_WEIGHTS_STR not in ds:
time_weights = ds[TIME_BOUNDS_STR].diff(BOUNDS_STR)
time_weights = time_weights.rename(TIME_WEIGHTS_STR).squeeze()
if BOUNDS_STR in time_weights.coords:
time_weights = time_weights.drop(BOUNDS_STR)
ds[TIME_WEIGHTS_STR] = time_weights
raw_start_date = ds[TIME_BOUNDS_STR].isel(**{TIME_STR: 0, BOUNDS_STR: 0})
ds[RAW_START_DATE_STR] = raw_start_date.reset_coords(drop=True)
raw_end_date = ds[TIME_BOUNDS_STR].isel(**{TIME_STR: -1, BOUNDS_STR: 1})
ds[RAW_END_DATE_STR] = raw_end_date.reset_coords(drop=True)
for coord in [TIME_BOUNDS_STR, RAW_START_DATE_STR, RAW_END_DATE_STR]:
ds[coord].attrs['units'] = ds[TIME_STR].attrs['units']
if 'calendar' in ds[TIME_STR].attrs:
ds[coord].attrs['calendar'] = ds[TIME_STR].attrs['calendar']
unit_interval = ds[TIME_STR].attrs['units'].split('since')[0].strip()
ds[TIME_WEIGHTS_STR].attrs['units'] = unit_interval
return ds
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.