sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def trim(self, lower=None, upper=None):
"""Trim upper values in accordance with
:math:`EQI2 \\leq EQI1 \\leq EQB`.
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> eqb.value = 3.0
>>> eqi2.value = 1.0
>>> eqi1(0.0)
>>> eqi1
eqi1(1.0)
>>> eqi1(1.0)
>>> eqi1
eqi1(1.0)
>>> eqi1(2.0)
>>> eqi1
eqi1(2.0)
>>> eqi1(3.0)
>>> eqi1
eqi1(3.0)
>>> eqi1(4.0)
>>> eqi1
eqi1(3.0)
"""
if lower is None:
lower = getattr(self.subpars.eqi2, 'value', None)
if upper is None:
upper = getattr(self.subpars.eqb, 'value', None)
super().trim(lower, upper)
|
Trim upper values in accordance with
:math:`EQI2 \\leq EQI1 \\leq EQB`.
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> eqb.value = 3.0
>>> eqi2.value = 1.0
>>> eqi1(0.0)
>>> eqi1
eqi1(1.0)
>>> eqi1(1.0)
>>> eqi1
eqi1(1.0)
>>> eqi1(2.0)
>>> eqi1
eqi1(2.0)
>>> eqi1(3.0)
>>> eqi1
eqi1(3.0)
>>> eqi1(4.0)
>>> eqi1
eqi1(3.0)
|
entailment
|
def time_choices():
"""Return digital time choices every half hour from 00:00 to 23:30."""
hours = list(range(0, 24))
times = []
for h in hours:
hour = str(h).zfill(2)
times.append(hour+':00')
times.append(hour+':30')
return list(zip(times, times))
|
Return digital time choices every half hour from 00:00 to 23:30.
|
entailment
|
def _add_lines(specification, module):
"""Return autodoc commands for a basemodels docstring.
Note that `collection classes` (e.g. `Model`, `ControlParameters`,
`InputSequences` are placed on top of the respective section and the
`contained classes` (e.g. model methods, `ControlParameter` instances,
`InputSequence` instances at the bottom. This differs from the order
of their definition in the respective modules, but results in a better
documentation structure.
"""
caption = _all_spec2capt.get(specification, 'dummy')
if caption.split()[-1] in ('parameters', 'sequences', 'Masks'):
exists_collectionclass = True
name_collectionclass = caption.title().replace(' ', '')
else:
exists_collectionclass = False
lines = []
if specification == 'model':
lines += [f'',
f'.. autoclass:: {module.__name__}.Model',
f' :members:',
f' :show-inheritance:',
f' :exclude-members: {", ".join(EXCLUDE_MEMBERS)}']
elif exists_collectionclass:
lines += [f'',
f'.. autoclass:: {module.__name__}.{name_collectionclass}',
f' :members:',
f' :show-inheritance:',
f' :exclude-members: {", ".join(EXCLUDE_MEMBERS)}']
lines += ['',
'.. automodule:: ' + module.__name__,
' :members:',
' :show-inheritance:']
if specification == 'model':
lines += [' :exclude-members: Model']
elif exists_collectionclass:
lines += [' :exclude-members: ' + name_collectionclass]
return lines
|
Return autodoc commands for a basemodels docstring.
Note that `collection classes` (e.g. `Model`, `ControlParameters`,
`InputSequences` are placed on top of the respective section and the
`contained classes` (e.g. model methods, `ControlParameter` instances,
`InputSequence` instances at the bottom. This differs from the order
of their definition in the respective modules, but results in a better
documentation structure.
|
entailment
|
def autodoc_basemodel(module):
"""Add an exhaustive docstring to the given module of a basemodel.
Works onlye when all modules of the basemodel are named in the
standard way, e.g. `lland_model`, `lland_control`, `lland_inputs`.
"""
autodoc_tuple2doc(module)
namespace = module.__dict__
doc = namespace.get('__doc__')
if doc is None:
doc = ''
basemodulename = namespace['__name__'].split('.')[-1]
modules = {key: value for key, value in namespace.items()
if (isinstance(value, types.ModuleType) and
key.startswith(basemodulename+'_'))}
substituter = Substituter(hydpy.substituter)
lines = []
specification = 'model'
modulename = basemodulename+'_'+specification
if modulename in modules:
module = modules[modulename]
lines += _add_title('Model features', '-')
lines += _add_lines(specification, module)
substituter.add_module(module)
for (title, spec2capt) in (('Parameter features', _PAR_SPEC2CAPT),
('Sequence features', _SEQ_SPEC2CAPT),
('Auxiliary features', _AUX_SPEC2CAPT)):
found_module = False
new_lines = _add_title(title, '-')
for (specification, caption) in spec2capt.items():
modulename = basemodulename+'_'+specification
module = modules.get(modulename)
if module:
found_module = True
new_lines += _add_title(caption, '.')
new_lines += _add_lines(specification, module)
substituter.add_module(module)
if found_module:
lines += new_lines
doc += '\n'.join(lines)
namespace['__doc__'] = doc
basemodule = importlib.import_module(namespace['__name__'])
substituter.add_module(basemodule)
substituter.update_masters()
namespace['substituter'] = substituter
|
Add an exhaustive docstring to the given module of a basemodel.
Works onlye when all modules of the basemodel are named in the
standard way, e.g. `lland_model`, `lland_control`, `lland_inputs`.
|
entailment
|
def autodoc_applicationmodel(module):
"""Improves the docstrings of application models when called
at the bottom of the respective module.
|autodoc_applicationmodel| requires, similar to
|autodoc_basemodel|, that both the application model and its
base model are defined in the conventional way.
"""
autodoc_tuple2doc(module)
name_applicationmodel = module.__name__
name_basemodel = name_applicationmodel.split('_')[0]
module_basemodel = importlib.import_module(name_basemodel)
substituter = Substituter(module_basemodel.substituter)
substituter.add_module(module)
substituter.update_masters()
module.substituter = substituter
|
Improves the docstrings of application models when called
at the bottom of the respective module.
|autodoc_applicationmodel| requires, similar to
|autodoc_basemodel|, that both the application model and its
base model are defined in the conventional way.
|
entailment
|
def prepare_mainsubstituter():
"""Prepare and return a |Substituter| object for the main `__init__`
file of *HydPy*."""
substituter = Substituter()
for module in (builtins, numpy, datetime, unittest, doctest, inspect, io,
os, sys, time, collections, itertools, subprocess, scipy,
typing):
substituter.add_module(module)
for subpackage in (auxs, core, cythons, exe):
for dummy, name, dummy in pkgutil.walk_packages(subpackage.__path__):
full_name = subpackage.__name__ + '.' + name
substituter.add_module(importlib.import_module(full_name))
substituter.add_modules(models)
for cymodule in (annutils, smoothutils, pointerutils):
substituter.add_module(cymodule, cython=True)
substituter._short2long['|pub|'] = ':mod:`~hydpy.pub`'
substituter._short2long['|config|'] = ':mod:`~hydpy.config`'
return substituter
|
Prepare and return a |Substituter| object for the main `__init__`
file of *HydPy*.
|
entailment
|
def _number_of_line(member_tuple):
"""Try to return the number of the first line of the definition of a
member of a module."""
member = member_tuple[1]
try:
return member.__code__.co_firstlineno
except AttributeError:
pass
try:
return inspect.findsource(member)[1]
except BaseException:
pass
for value in vars(member).values():
try:
return value.__code__.co_firstlineno
except AttributeError:
pass
return 0
|
Try to return the number of the first line of the definition of a
member of a module.
|
entailment
|
def autodoc_module(module):
"""Add a short summary of all implemented members to a modules docstring.
"""
doc = getattr(module, '__doc__')
if doc is None:
doc = ''
members = []
for name, member in inspect.getmembers(module):
if ((not name.startswith('_')) and
(inspect.getmodule(member) is module)):
members.append((name, member))
members = sorted(members, key=_number_of_line)
if members:
lines = ['\n\nModule :mod:`~%s` implements the following members:\n'
% module.__name__]
for (name, member) in members:
if inspect.isfunction(member):
type_ = 'func'
elif inspect.isclass(member):
type_ = 'class'
else:
type_ = 'obj'
lines.append(' * :%s:`~%s` %s'
% (type_, name, objecttools.description(member)))
doc = doc + '\n\n' + '\n'.join(lines) + '\n\n' + 80*'_'
module.__doc__ = doc
|
Add a short summary of all implemented members to a modules docstring.
|
entailment
|
def autodoc_tuple2doc(module):
"""Include tuples as `CLASSES` of `ControlParameters` and `RUN_METHODS`
of `Models` into the respective docstring."""
modulename = module.__name__
for membername, member in inspect.getmembers(module):
for tuplename, descr in _name2descr.items():
tuple_ = getattr(member, tuplename, None)
if tuple_:
logstring = f'{modulename}.{membername}.{tuplename}'
if logstring not in _loggedtuples:
_loggedtuples.add(logstring)
lst = [f'\n\n\n {descr}:']
if tuplename == 'CLASSES':
type_ = 'func'
else:
type_ = 'class'
for cls in tuple_:
lst.append(
f' * '
f':{type_}:`{cls.__module__}.{cls.__name__}`'
f' {objecttools.description(cls)}')
doc = getattr(member, '__doc__')
if doc is None:
doc = ''
member.__doc__ = doc + '\n'.join(l for l in lst)
|
Include tuples as `CLASSES` of `ControlParameters` and `RUN_METHODS`
of `Models` into the respective docstring.
|
entailment
|
def consider_member(name_member, member, module, class_=None):
"""Return |True| if the given member should be added to the
substitutions. If not return |False|.
Some examples based on the site-package |numpy|:
>>> from hydpy.core.autodoctools import Substituter
>>> import numpy
A constant like |nan| should be added:
>>> Substituter.consider_member(
... 'nan', numpy.nan, numpy)
True
Members with a prefixed underscore should not be added:
>>> Substituter.consider_member(
... '_NoValue', numpy._NoValue, numpy)
False
Members that are actually imported modules should not be added:
>>> Substituter.consider_member(
... 'warnings', numpy.warnings, numpy)
False
Members that are actually defined in other modules should
not be added:
>>> numpy.Substituter = Substituter
>>> Substituter.consider_member(
... 'Substituter', numpy.Substituter, numpy)
False
>>> del numpy.Substituter
Members that are defined in submodules of a given package
(either from the standard library or from site-packages)
should be added...
>>> Substituter.consider_member(
... 'clip', numpy.clip, numpy)
True
...but not members defined in *HydPy* submodules:
>>> import hydpy
>>> Substituter.consider_member(
... 'Node', hydpy.Node, hydpy)
False
For descriptor instances (with method `__get__`) beeing members
of classes should be added:
>>> from hydpy.auxs import anntools
>>> Substituter.consider_member(
... 'shape_neurons', anntools.ANN.shape_neurons,
... anntools, anntools.ANN)
True
"""
if name_member.startswith('_'):
return False
if inspect.ismodule(member):
return False
real_module = getattr(member, '__module__', None)
if not real_module:
return True
if real_module != module.__name__:
if class_ and hasattr(member, '__get__'):
return True
if 'hydpy' in real_module:
return False
if module.__name__ not in real_module:
return False
return True
|
Return |True| if the given member should be added to the
substitutions. If not return |False|.
Some examples based on the site-package |numpy|:
>>> from hydpy.core.autodoctools import Substituter
>>> import numpy
A constant like |nan| should be added:
>>> Substituter.consider_member(
... 'nan', numpy.nan, numpy)
True
Members with a prefixed underscore should not be added:
>>> Substituter.consider_member(
... '_NoValue', numpy._NoValue, numpy)
False
Members that are actually imported modules should not be added:
>>> Substituter.consider_member(
... 'warnings', numpy.warnings, numpy)
False
Members that are actually defined in other modules should
not be added:
>>> numpy.Substituter = Substituter
>>> Substituter.consider_member(
... 'Substituter', numpy.Substituter, numpy)
False
>>> del numpy.Substituter
Members that are defined in submodules of a given package
(either from the standard library or from site-packages)
should be added...
>>> Substituter.consider_member(
... 'clip', numpy.clip, numpy)
True
...but not members defined in *HydPy* submodules:
>>> import hydpy
>>> Substituter.consider_member(
... 'Node', hydpy.Node, hydpy)
False
For descriptor instances (with method `__get__`) beeing members
of classes should be added:
>>> from hydpy.auxs import anntools
>>> Substituter.consider_member(
... 'shape_neurons', anntools.ANN.shape_neurons,
... anntools, anntools.ANN)
True
|
entailment
|
def get_role(member, cython=False):
"""Return the reStructuredText role `func`, `class`, or `const`
best describing the given member.
Some examples based on the site-package |numpy|. |numpy.clip|
is a function:
>>> from hydpy.core.autodoctools import Substituter
>>> import numpy
>>> Substituter.get_role(numpy.clip)
'func'
|numpy.ndarray| is a class:
>>> Substituter.get_role(numpy.ndarray)
'class'
|numpy.ndarray.clip| is a method, for which also the `function`
role is returned:
>>> Substituter.get_role(numpy.ndarray.clip)
'func'
For everything else the `constant` role is returned:
>>> Substituter.get_role(numpy.nan)
'const'
When analysing cython extension modules, set the option `cython`
flag to |True|. |Double| is correctly identified as a class:
>>> from hydpy.cythons import pointerutils
>>> Substituter.get_role(pointerutils.Double, cython=True)
'class'
Only with the `cython` flag beeing |True|, for everything else
the `function` text role is returned (doesn't make sense here,
but the |numpy| module is not something defined in module
|pointerutils| anyway):
>>> Substituter.get_role(pointerutils.numpy, cython=True)
'func'
"""
if inspect.isroutine(member) or isinstance(member, numpy.ufunc):
return 'func'
elif inspect.isclass(member):
return 'class'
elif cython:
return 'func'
return 'const'
|
Return the reStructuredText role `func`, `class`, or `const`
best describing the given member.
Some examples based on the site-package |numpy|. |numpy.clip|
is a function:
>>> from hydpy.core.autodoctools import Substituter
>>> import numpy
>>> Substituter.get_role(numpy.clip)
'func'
|numpy.ndarray| is a class:
>>> Substituter.get_role(numpy.ndarray)
'class'
|numpy.ndarray.clip| is a method, for which also the `function`
role is returned:
>>> Substituter.get_role(numpy.ndarray.clip)
'func'
For everything else the `constant` role is returned:
>>> Substituter.get_role(numpy.nan)
'const'
When analysing cython extension modules, set the option `cython`
flag to |True|. |Double| is correctly identified as a class:
>>> from hydpy.cythons import pointerutils
>>> Substituter.get_role(pointerutils.Double, cython=True)
'class'
Only with the `cython` flag beeing |True|, for everything else
the `function` text role is returned (doesn't make sense here,
but the |numpy| module is not something defined in module
|pointerutils| anyway):
>>> Substituter.get_role(pointerutils.numpy, cython=True)
'func'
|
entailment
|
def add_substitution(self, short, medium, long, module):
"""Add the given substitutions both as a `short2long` and a
`medium2long` mapping.
Assume `variable1` is defined in the hydpy module `module1` and the
short and medium descriptions are `var1` and `mod1.var1`:
>>> import types
>>> module1 = types.ModuleType('hydpy.module1')
>>> from hydpy.core.autodoctools import Substituter
>>> substituter = Substituter()
>>> substituter.add_substitution(
... 'var1', 'mod1.var1', 'module1.variable1', module1)
>>> print(substituter.get_commands())
.. var1 replace:: module1.variable1
.. mod1.var1 replace:: module1.variable1
Adding `variable2` of `module2` has no effect on the predefined
substitutions:
>>> module2 = types.ModuleType('hydpy.module2')
>>> substituter.add_substitution(
... 'var2', 'mod2.var2', 'module2.variable2', module2)
>>> print(substituter.get_commands())
.. var1 replace:: module1.variable1
.. var2 replace:: module2.variable2
.. mod1.var1 replace:: module1.variable1
.. mod2.var2 replace:: module2.variable2
But when adding `variable1` of `module2`, the `short2long` mapping
of `variable1` would become inconclusive, which is why the new
one (related to `module2`) is not stored and the old one (related
to `module1`) is removed:
>>> substituter.add_substitution(
... 'var1', 'mod2.var1', 'module2.variable1', module2)
>>> print(substituter.get_commands())
.. var2 replace:: module2.variable2
.. mod1.var1 replace:: module1.variable1
.. mod2.var1 replace:: module2.variable1
.. mod2.var2 replace:: module2.variable2
Adding `variable2` of `module2` accidentally again, does not
result in any undesired side-effects:
>>> substituter.add_substitution(
... 'var2', 'mod2.var2', 'module2.variable2', module2)
>>> print(substituter.get_commands())
.. var2 replace:: module2.variable2
.. mod1.var1 replace:: module1.variable1
.. mod2.var1 replace:: module2.variable1
.. mod2.var2 replace:: module2.variable2
In order to reduce the risk of name conflicts, only the
`medium2long` mapping is supported for modules not part of the
*HydPy* package:
>>> module3 = types.ModuleType('module3')
>>> substituter.add_substitution(
... 'var3', 'mod3.var3', 'module3.variable3', module3)
>>> print(substituter.get_commands())
.. var2 replace:: module2.variable2
.. mod1.var1 replace:: module1.variable1
.. mod2.var1 replace:: module2.variable1
.. mod2.var2 replace:: module2.variable2
.. mod3.var3 replace:: module3.variable3
The only exception to this rule is |builtins|, for which only
the `short2long` mapping is supported (note also, that the
module name `builtins` is removed from string `long`):
>>> import builtins
>>> substituter.add_substitution(
... 'str', 'blt.str', ':func:`~builtins.str`', builtins)
>>> print(substituter.get_commands())
.. str replace:: :func:`str`
.. var2 replace:: module2.variable2
.. mod1.var1 replace:: module1.variable1
.. mod2.var1 replace:: module2.variable1
.. mod2.var2 replace:: module2.variable2
.. mod3.var3 replace:: module3.variable3
"""
name = module.__name__
if 'builtin' in name:
self._short2long[short] = long.split('~')[0] + long.split('.')[-1]
else:
if ('hydpy' in name) and (short not in self._blacklist):
if short in self._short2long:
if self._short2long[short] != long:
self._blacklist.add(short)
del self._short2long[short]
else:
self._short2long[short] = long
self._medium2long[medium] = long
|
Add the given substitutions both as a `short2long` and a
`medium2long` mapping.
Assume `variable1` is defined in the hydpy module `module1` and the
short and medium descriptions are `var1` and `mod1.var1`:
>>> import types
>>> module1 = types.ModuleType('hydpy.module1')
>>> from hydpy.core.autodoctools import Substituter
>>> substituter = Substituter()
>>> substituter.add_substitution(
... 'var1', 'mod1.var1', 'module1.variable1', module1)
>>> print(substituter.get_commands())
.. var1 replace:: module1.variable1
.. mod1.var1 replace:: module1.variable1
Adding `variable2` of `module2` has no effect on the predefined
substitutions:
>>> module2 = types.ModuleType('hydpy.module2')
>>> substituter.add_substitution(
... 'var2', 'mod2.var2', 'module2.variable2', module2)
>>> print(substituter.get_commands())
.. var1 replace:: module1.variable1
.. var2 replace:: module2.variable2
.. mod1.var1 replace:: module1.variable1
.. mod2.var2 replace:: module2.variable2
But when adding `variable1` of `module2`, the `short2long` mapping
of `variable1` would become inconclusive, which is why the new
one (related to `module2`) is not stored and the old one (related
to `module1`) is removed:
>>> substituter.add_substitution(
... 'var1', 'mod2.var1', 'module2.variable1', module2)
>>> print(substituter.get_commands())
.. var2 replace:: module2.variable2
.. mod1.var1 replace:: module1.variable1
.. mod2.var1 replace:: module2.variable1
.. mod2.var2 replace:: module2.variable2
Adding `variable2` of `module2` accidentally again, does not
result in any undesired side-effects:
>>> substituter.add_substitution(
... 'var2', 'mod2.var2', 'module2.variable2', module2)
>>> print(substituter.get_commands())
.. var2 replace:: module2.variable2
.. mod1.var1 replace:: module1.variable1
.. mod2.var1 replace:: module2.variable1
.. mod2.var2 replace:: module2.variable2
In order to reduce the risk of name conflicts, only the
`medium2long` mapping is supported for modules not part of the
*HydPy* package:
>>> module3 = types.ModuleType('module3')
>>> substituter.add_substitution(
... 'var3', 'mod3.var3', 'module3.variable3', module3)
>>> print(substituter.get_commands())
.. var2 replace:: module2.variable2
.. mod1.var1 replace:: module1.variable1
.. mod2.var1 replace:: module2.variable1
.. mod2.var2 replace:: module2.variable2
.. mod3.var3 replace:: module3.variable3
The only exception to this rule is |builtins|, for which only
the `short2long` mapping is supported (note also, that the
module name `builtins` is removed from string `long`):
>>> import builtins
>>> substituter.add_substitution(
... 'str', 'blt.str', ':func:`~builtins.str`', builtins)
>>> print(substituter.get_commands())
.. str replace:: :func:`str`
.. var2 replace:: module2.variable2
.. mod1.var1 replace:: module1.variable1
.. mod2.var1 replace:: module2.variable1
.. mod2.var2 replace:: module2.variable2
.. mod3.var3 replace:: module3.variable3
|
entailment
|
def add_module(self, module, cython=False):
"""Add the given module, its members, and their submembers.
The first examples are based on the site-package |numpy|: which
is passed to method |Substituter.add_module|:
>>> from hydpy.core.autodoctools import Substituter
>>> substituter = Substituter()
>>> import numpy
>>> substituter.add_module(numpy)
Firstly, the module itself is added:
>>> substituter.find('|numpy|')
|numpy| :mod:`~numpy`
Secondly, constants like |numpy.nan| are added:
>>> substituter.find('|numpy.nan|')
|numpy.nan| :const:`~numpy.nan`
Thirdly, functions like |numpy.clip| are added:
>>> substituter.find('|numpy.clip|')
|numpy.clip| :func:`~numpy.clip`
Fourthly, clases line |numpy.ndarray| are added:
>>> substituter.find('|numpy.ndarray|')
|numpy.ndarray| :class:`~numpy.ndarray`
When adding Cython modules, the `cython` flag should be set |True|:
>>> from hydpy.cythons import pointerutils
>>> substituter.add_module(pointerutils, cython=True)
>>> substituter.find('set_pointer')
|PPDouble.set_pointer| \
:func:`~hydpy.cythons.autogen.pointerutils.PPDouble.set_pointer`
|pointerutils.PPDouble.set_pointer| \
:func:`~hydpy.cythons.autogen.pointerutils.PPDouble.set_pointer`
"""
name_module = module.__name__.split('.')[-1]
short = ('|%s|'
% name_module)
long = (':mod:`~%s`'
% module.__name__)
self._short2long[short] = long
for (name_member, member) in vars(module).items():
if self.consider_member(
name_member, member, module):
role = self.get_role(member, cython)
short = ('|%s|'
% name_member)
medium = ('|%s.%s|'
% (name_module,
name_member))
long = (':%s:`~%s.%s`'
% (role,
module.__name__,
name_member))
self.add_substitution(short, medium, long, module)
if inspect.isclass(member):
for name_submember, submember in vars(member).items():
if self.consider_member(
name_submember, submember, module, member):
role = self.get_role(submember, cython)
short = ('|%s.%s|'
% (name_member,
name_submember))
medium = ('|%s.%s.%s|'
% (name_module,
name_member,
name_submember))
long = (':%s:`~%s.%s.%s`'
% (role,
module.__name__,
name_member,
name_submember))
self.add_substitution(short, medium, long, module)
|
Add the given module, its members, and their submembers.
The first examples are based on the site-package |numpy|: which
is passed to method |Substituter.add_module|:
>>> from hydpy.core.autodoctools import Substituter
>>> substituter = Substituter()
>>> import numpy
>>> substituter.add_module(numpy)
Firstly, the module itself is added:
>>> substituter.find('|numpy|')
|numpy| :mod:`~numpy`
Secondly, constants like |numpy.nan| are added:
>>> substituter.find('|numpy.nan|')
|numpy.nan| :const:`~numpy.nan`
Thirdly, functions like |numpy.clip| are added:
>>> substituter.find('|numpy.clip|')
|numpy.clip| :func:`~numpy.clip`
Fourthly, clases line |numpy.ndarray| are added:
>>> substituter.find('|numpy.ndarray|')
|numpy.ndarray| :class:`~numpy.ndarray`
When adding Cython modules, the `cython` flag should be set |True|:
>>> from hydpy.cythons import pointerutils
>>> substituter.add_module(pointerutils, cython=True)
>>> substituter.find('set_pointer')
|PPDouble.set_pointer| \
:func:`~hydpy.cythons.autogen.pointerutils.PPDouble.set_pointer`
|pointerutils.PPDouble.set_pointer| \
:func:`~hydpy.cythons.autogen.pointerutils.PPDouble.set_pointer`
|
entailment
|
def add_modules(self, package):
"""Add the modules of the given package without their members."""
for name in os.listdir(package.__path__[0]):
if name.startswith('_'):
continue
name = name.split('.')[0]
short = '|%s|' % name
long = ':mod:`~%s.%s`' % (package.__package__, name)
self._short2long[short] = long
|
Add the modules of the given package without their members.
|
entailment
|
def update_masters(self):
"""Update all `master` |Substituter| objects.
If a |Substituter| object is passed to the constructor of another
|Substituter| object, they become `master` and `slave`:
>>> from hydpy.core.autodoctools import Substituter
>>> sub1 = Substituter()
>>> from hydpy.core import devicetools
>>> sub1.add_module(devicetools)
>>> sub2 = Substituter(sub1)
>>> sub3 = Substituter(sub2)
>>> sub3.master.master is sub1
True
>>> sub2 in sub1.slaves
True
During initialization, all mappings handled by the master object
are passed to its new slave:
>>> sub3.find('Node|')
|Node| :class:`~hydpy.core.devicetools.Node`
|devicetools.Node| :class:`~hydpy.core.devicetools.Node`
Updating a slave, does not affect its master directly:
>>> from hydpy.core import hydpytools
>>> sub3.add_module(hydpytools)
>>> sub3.find('HydPy|')
|HydPy| :class:`~hydpy.core.hydpytools.HydPy`
|hydpytools.HydPy| :class:`~hydpy.core.hydpytools.HydPy`
>>> sub2.find('HydPy|')
Through calling |Substituter.update_masters|, the `medium2long`
mappings are passed from the slave to its master:
>>> sub3.update_masters()
>>> sub2.find('HydPy|')
|hydpytools.HydPy| :class:`~hydpy.core.hydpytools.HydPy`
Then each master object updates its own master object also:
>>> sub1.find('HydPy|')
|hydpytools.HydPy| :class:`~hydpy.core.hydpytools.HydPy`
In reverse, subsequent updates of master objects to not affect
their slaves directly:
>>> from hydpy.core import masktools
>>> sub1.add_module(masktools)
>>> sub1.find('Masks|')
|Masks| :class:`~hydpy.core.masktools.Masks`
|masktools.Masks| :class:`~hydpy.core.masktools.Masks`
>>> sub2.find('Masks|')
Through calling |Substituter.update_slaves|, the `medium2long`
mappings are passed the master to all of its slaves:
>>> sub1.update_slaves()
>>> sub2.find('Masks|')
|masktools.Masks| :class:`~hydpy.core.masktools.Masks`
>>> sub3.find('Masks|')
|masktools.Masks| :class:`~hydpy.core.masktools.Masks`
"""
if self.master is not None:
self.master._medium2long.update(self._medium2long)
self.master.update_masters()
|
Update all `master` |Substituter| objects.
If a |Substituter| object is passed to the constructor of another
|Substituter| object, they become `master` and `slave`:
>>> from hydpy.core.autodoctools import Substituter
>>> sub1 = Substituter()
>>> from hydpy.core import devicetools
>>> sub1.add_module(devicetools)
>>> sub2 = Substituter(sub1)
>>> sub3 = Substituter(sub2)
>>> sub3.master.master is sub1
True
>>> sub2 in sub1.slaves
True
During initialization, all mappings handled by the master object
are passed to its new slave:
>>> sub3.find('Node|')
|Node| :class:`~hydpy.core.devicetools.Node`
|devicetools.Node| :class:`~hydpy.core.devicetools.Node`
Updating a slave, does not affect its master directly:
>>> from hydpy.core import hydpytools
>>> sub3.add_module(hydpytools)
>>> sub3.find('HydPy|')
|HydPy| :class:`~hydpy.core.hydpytools.HydPy`
|hydpytools.HydPy| :class:`~hydpy.core.hydpytools.HydPy`
>>> sub2.find('HydPy|')
Through calling |Substituter.update_masters|, the `medium2long`
mappings are passed from the slave to its master:
>>> sub3.update_masters()
>>> sub2.find('HydPy|')
|hydpytools.HydPy| :class:`~hydpy.core.hydpytools.HydPy`
Then each master object updates its own master object also:
>>> sub1.find('HydPy|')
|hydpytools.HydPy| :class:`~hydpy.core.hydpytools.HydPy`
In reverse, subsequent updates of master objects to not affect
their slaves directly:
>>> from hydpy.core import masktools
>>> sub1.add_module(masktools)
>>> sub1.find('Masks|')
|Masks| :class:`~hydpy.core.masktools.Masks`
|masktools.Masks| :class:`~hydpy.core.masktools.Masks`
>>> sub2.find('Masks|')
Through calling |Substituter.update_slaves|, the `medium2long`
mappings are passed the master to all of its slaves:
>>> sub1.update_slaves()
>>> sub2.find('Masks|')
|masktools.Masks| :class:`~hydpy.core.masktools.Masks`
>>> sub3.find('Masks|')
|masktools.Masks| :class:`~hydpy.core.masktools.Masks`
|
entailment
|
def update_slaves(self):
"""Update all `slave` |Substituter| objects.
See method |Substituter.update_masters| for further information.
"""
for slave in self.slaves:
slave._medium2long.update(self._medium2long)
slave.update_slaves()
|
Update all `slave` |Substituter| objects.
See method |Substituter.update_masters| for further information.
|
entailment
|
def get_commands(self, source=None):
"""Return a string containing multiple `reStructuredText`
replacements with the substitutions currently defined.
Some examples based on the subpackage |optiontools|:
>>> from hydpy.core.autodoctools import Substituter
>>> substituter = Substituter()
>>> from hydpy.core import optiontools
>>> substituter.add_module(optiontools)
When calling |Substituter.get_commands| with the `source`
argument, the complete `short2long` and `medium2long` mappings
are translated into replacement commands (only a few of them
are shown):
>>> print(substituter.get_commands())
.. |Options.autocompile| replace:: \
:const:`~hydpy.core.optiontools.Options.autocompile`
.. |Options.checkseries| replace:: \
:const:`~hydpy.core.optiontools.Options.checkseries`
...
.. |optiontools.Options.warntrim| replace:: \
:const:`~hydpy.core.optiontools.Options.warntrim`
.. |optiontools.Options| replace:: \
:class:`~hydpy.core.optiontools.Options`
Through passing a string (usually the source code of a file
to be documented), only the replacement commands relevant for
this string are translated:
>>> from hydpy.core import objecttools
>>> import inspect
>>> source = inspect.getsource(objecttools)
>>> print(substituter.get_commands(source))
.. |Options.reprdigits| replace:: \
:const:`~hydpy.core.optiontools.Options.reprdigits`
"""
commands = []
for key, value in self:
if (source is None) or (key in source):
commands.append('.. %s replace:: %s' % (key, value))
return '\n'.join(commands)
|
Return a string containing multiple `reStructuredText`
replacements with the substitutions currently defined.
Some examples based on the subpackage |optiontools|:
>>> from hydpy.core.autodoctools import Substituter
>>> substituter = Substituter()
>>> from hydpy.core import optiontools
>>> substituter.add_module(optiontools)
When calling |Substituter.get_commands| with the `source`
argument, the complete `short2long` and `medium2long` mappings
are translated into replacement commands (only a few of them
are shown):
>>> print(substituter.get_commands())
.. |Options.autocompile| replace:: \
:const:`~hydpy.core.optiontools.Options.autocompile`
.. |Options.checkseries| replace:: \
:const:`~hydpy.core.optiontools.Options.checkseries`
...
.. |optiontools.Options.warntrim| replace:: \
:const:`~hydpy.core.optiontools.Options.warntrim`
.. |optiontools.Options| replace:: \
:class:`~hydpy.core.optiontools.Options`
Through passing a string (usually the source code of a file
to be documented), only the replacement commands relevant for
this string are translated:
>>> from hydpy.core import objecttools
>>> import inspect
>>> source = inspect.getsource(objecttools)
>>> print(substituter.get_commands(source))
.. |Options.reprdigits| replace:: \
:const:`~hydpy.core.optiontools.Options.reprdigits`
|
entailment
|
def find(self, text):
"""Print all substitutions that include the given text string."""
for key, value in self:
if (text in key) or (text in value):
print(key, value)
|
Print all substitutions that include the given text string.
|
entailment
|
def print_progress(wrapped, _=None, args=None, kwargs=None):
"""Add print commands time to the given function informing about
execution time.
To show how the |print_progress| decorator works, we need to modify the
functions used by |print_progress| to gain system time information
available in module |time|.
First, we mock the functions |time.strftime| and |time.perf_counter|:
>>> import time
>>> from unittest import mock
>>> strftime = time.strftime
>>> perf_counter = time.perf_counter
>>> strftime_mock = mock.MagicMock()
>>> time.strftime = strftime_mock
>>> time.perf_counter = mock.MagicMock()
The mock of |time.strftime| shall respond to two calls, as if the first
call to a decorated function occurs at quarter past eight, and the second
one two seconds later:
>>> time.strftime.side_effect = '20:15:00', '20:15:02'
The mock of |time.perf_counter| shall respond to four calls, as if the
subsequent calls by decorated functions occur at second 1, 3, 4, and 7:
>>> time.perf_counter.side_effect = 1, 3, 4, 7
Now we decorate two test functions. The first one does nothing; the
second one only calls the first one:
>>> from hydpy.core.printtools import print_progress
>>> @print_progress
... def test1():
... pass
>>> @print_progress
... def test2():
... test1()
The first example shows that the output is appropriately indented,
tat the returned times are at the right place, that the calculated
execution the is correct, and that the mock of |time.strftime|
received a valid format string:
>>> from hydpy import pub
>>> pub.options.printprogress = True
>>> test2()
method test2 started at 20:15:00
method test1 started at 20:15:02
seconds elapsed: 1
seconds elapsed: 6
>>> strftime_mock.call_args
call('%H:%M:%S')
The second example verifies that resetting the indentation works:
>>> time.strftime.side_effect = '20:15:00', '20:15:02'
>>> time.perf_counter.side_effect = 1, 3, 4, 7
>>> test2()
method test2 started at 20:15:00
method test1 started at 20:15:02
seconds elapsed: 1
seconds elapsed: 6
The last example shows that disabling the |Options.printprogress|
option works as expected:
>>> pub.options.printprogress = False
>>> test2()
>>> time.strftime = strftime
>>> time.perf_counter = perf_counter
"""
global _printprogress_indentation
_printprogress_indentation += 4
try:
if hydpy.pub.options.printprogress:
blanks = ' ' * _printprogress_indentation
name = wrapped.__name__
time_ = time.strftime('%H:%M:%S')
with PrintStyle(color=34, font=1):
print(f'{blanks}method {name} started at {time_}')
seconds = time.perf_counter()
sys.stdout.flush()
wrapped(*args, **kwargs)
blanks = ' ' * (_printprogress_indentation+4)
seconds = time.perf_counter()-seconds
with PrintStyle(color=34, font=1):
print(f'{blanks}seconds elapsed: {seconds}')
sys.stdout.flush()
else:
wrapped(*args, **kwargs)
finally:
_printprogress_indentation -= 4
|
Add print commands time to the given function informing about
execution time.
To show how the |print_progress| decorator works, we need to modify the
functions used by |print_progress| to gain system time information
available in module |time|.
First, we mock the functions |time.strftime| and |time.perf_counter|:
>>> import time
>>> from unittest import mock
>>> strftime = time.strftime
>>> perf_counter = time.perf_counter
>>> strftime_mock = mock.MagicMock()
>>> time.strftime = strftime_mock
>>> time.perf_counter = mock.MagicMock()
The mock of |time.strftime| shall respond to two calls, as if the first
call to a decorated function occurs at quarter past eight, and the second
one two seconds later:
>>> time.strftime.side_effect = '20:15:00', '20:15:02'
The mock of |time.perf_counter| shall respond to four calls, as if the
subsequent calls by decorated functions occur at second 1, 3, 4, and 7:
>>> time.perf_counter.side_effect = 1, 3, 4, 7
Now we decorate two test functions. The first one does nothing; the
second one only calls the first one:
>>> from hydpy.core.printtools import print_progress
>>> @print_progress
... def test1():
... pass
>>> @print_progress
... def test2():
... test1()
The first example shows that the output is appropriately indented,
tat the returned times are at the right place, that the calculated
execution the is correct, and that the mock of |time.strftime|
received a valid format string:
>>> from hydpy import pub
>>> pub.options.printprogress = True
>>> test2()
method test2 started at 20:15:00
method test1 started at 20:15:02
seconds elapsed: 1
seconds elapsed: 6
>>> strftime_mock.call_args
call('%H:%M:%S')
The second example verifies that resetting the indentation works:
>>> time.strftime.side_effect = '20:15:00', '20:15:02'
>>> time.perf_counter.side_effect = 1, 3, 4, 7
>>> test2()
method test2 started at 20:15:00
method test1 started at 20:15:02
seconds elapsed: 1
seconds elapsed: 6
The last example shows that disabling the |Options.printprogress|
option works as expected:
>>> pub.options.printprogress = False
>>> test2()
>>> time.strftime = strftime
>>> time.perf_counter = perf_counter
|
entailment
|
def progressbar(iterable, length=23):
"""Print a simple progress bar while processing the given iterable.
Function |progressbar| does print the progress bar when option
`printprogress` is activted:
>>> from hydpy import pub
>>> pub.options.printprogress = True
You can pass an iterable object. Say you want to calculate the the sum
of all integer values from 1 to 100 and print the progress of the
calculation. Using function |range| (which returns a list in Python 2
and an iterator in Python3, but both are fine), one just has to
interpose function |progressbar|:
>>> from hydpy.core.printtools import progressbar
>>> x_sum = 0
>>> for x in progressbar(range(1, 101)):
... x_sum += x
|---------------------|
***********************
>>> x_sum
5050
To prevent possible interim print commands from dismembering the status
bar, they are delayed until the status bar is complete. For intermediate
print outs of each fiftieth calculation, the result looks as follows:
>>> x_sum = 0
>>> for x in progressbar(range(1, 101)):
... x_sum += x
... if not x % 50:
... print(x, x_sum)
|---------------------|
***********************
50 1275
100 5050
The number of characters of the progress bar can be changed:
>>> for i in progressbar(range(100), length=50):
... continue
|------------------------------------------------|
**************************************************
But its maximum number of characters is restricted by the length of the
given iterable:
>>> for i in progressbar(range(10), length=50):
... continue
|--------|
**********
The smallest possible progress bar has two characters:
>>> for i in progressbar(range(2)):
... continue
||
**
For iterables of length one or zero, no progress bar is plottet:
>>> for i in progressbar(range(1)):
... continue
The same is True when the `printprogress` option is inactivated:
>>> pub.options.printprogress = False
>>> for i in progressbar(range(100)):
... continue
"""
if hydpy.pub.options.printprogress and (len(iterable) > 1):
temp_name = os.path.join(tempfile.gettempdir(),
'HydPy_progressbar_stdout')
temp_stdout = open(temp_name, 'w')
real_stdout = sys.stdout
try:
sys.stdout = temp_stdout
nmbstars = min(len(iterable), length)
nmbcounts = len(iterable)/nmbstars
indentation = ' '*max(_printprogress_indentation, 0)
with PrintStyle(color=36, font=1, file=real_stdout):
print(' %s|%s|\n%s ' % (indentation,
'-'*(nmbstars-2),
indentation),
end='',
file=real_stdout)
counts = 1.
for next_ in iterable:
counts += 1.
if counts >= nmbcounts:
print(end='*', file=real_stdout)
counts -= nmbcounts
yield next_
finally:
try:
temp_stdout.close()
except BaseException:
pass
sys.stdout = real_stdout
print()
with open(temp_name, 'r') as temp_stdout:
sys.stdout.write(temp_stdout.read())
sys.stdout.flush()
else:
for next_ in iterable:
yield next_
|
Print a simple progress bar while processing the given iterable.
Function |progressbar| does print the progress bar when option
`printprogress` is activted:
>>> from hydpy import pub
>>> pub.options.printprogress = True
You can pass an iterable object. Say you want to calculate the the sum
of all integer values from 1 to 100 and print the progress of the
calculation. Using function |range| (which returns a list in Python 2
and an iterator in Python3, but both are fine), one just has to
interpose function |progressbar|:
>>> from hydpy.core.printtools import progressbar
>>> x_sum = 0
>>> for x in progressbar(range(1, 101)):
... x_sum += x
|---------------------|
***********************
>>> x_sum
5050
To prevent possible interim print commands from dismembering the status
bar, they are delayed until the status bar is complete. For intermediate
print outs of each fiftieth calculation, the result looks as follows:
>>> x_sum = 0
>>> for x in progressbar(range(1, 101)):
... x_sum += x
... if not x % 50:
... print(x, x_sum)
|---------------------|
***********************
50 1275
100 5050
The number of characters of the progress bar can be changed:
>>> for i in progressbar(range(100), length=50):
... continue
|------------------------------------------------|
**************************************************
But its maximum number of characters is restricted by the length of the
given iterable:
>>> for i in progressbar(range(10), length=50):
... continue
|--------|
**********
The smallest possible progress bar has two characters:
>>> for i in progressbar(range(2)):
... continue
||
**
For iterables of length one or zero, no progress bar is plottet:
>>> for i in progressbar(range(1)):
... continue
The same is True when the `printprogress` option is inactivated:
>>> pub.options.printprogress = False
>>> for i in progressbar(range(100)):
... continue
|
entailment
|
def start_server(socket, projectname, xmlfilename: str) -> None:
"""Start the *HydPy* server using the given socket.
The folder with the given `projectname` must be available within the
current working directory. The XML configuration file must be placed
within the project folder unless `xmlfilename` is an absolute file path.
The XML configuration file must be valid concerning the schema file
`HydPyConfigMultipleRuns.xsd` (see method |ServerState.initialise|
for further information).
"""
state.initialise(projectname, xmlfilename)
server = http.server.HTTPServer(('', int(socket)), HydPyServer)
server.serve_forever()
|
Start the *HydPy* server using the given socket.
The folder with the given `projectname` must be available within the
current working directory. The XML configuration file must be placed
within the project folder unless `xmlfilename` is an absolute file path.
The XML configuration file must be valid concerning the schema file
`HydPyConfigMultipleRuns.xsd` (see method |ServerState.initialise|
for further information).
|
entailment
|
def await_server(port, seconds):
"""Block the current process until either the *HydPy* server is responding
on the given `port` or the given number of `seconds` elapsed.
>>> from hydpy import run_subprocess, TestIO
>>> with TestIO(): # doctest: +ELLIPSIS
... run_subprocess('hyd.py await_server 8080 0.1')
Invoking hyd.py with arguments `...hyd.py, await_server, 8080, 0.1` \
resulted in the following error:
<urlopen error Waited for 0.1 seconds without response on port 8080.>
...
>>> from hydpy.core.examples import prepare_full_example_1
>>> prepare_full_example_1()
>>> with TestIO():
... process = run_subprocess(
... 'hyd.py start_server 8080 LahnH multiple_runs.xml',
... blocking=False, verbose=False)
... run_subprocess('hyd.py await_server 8080 10', verbose=False)
>>> from urllib import request
>>> _ = request.urlopen('http://localhost:8080/close_server')
>>> process.kill()
>>> _ = process.communicate()
"""
now = time.perf_counter()
end = now + float(seconds)
while now <= end:
try:
urllib.request.urlopen(f'http://localhost:{port}/status')
break
except urllib.error.URLError:
time.sleep(0.1)
now = time.perf_counter()
else:
raise urllib.error.URLError(
f'Waited for {seconds} seconds without response on port {port}.')
|
Block the current process until either the *HydPy* server is responding
on the given `port` or the given number of `seconds` elapsed.
>>> from hydpy import run_subprocess, TestIO
>>> with TestIO(): # doctest: +ELLIPSIS
... run_subprocess('hyd.py await_server 8080 0.1')
Invoking hyd.py with arguments `...hyd.py, await_server, 8080, 0.1` \
resulted in the following error:
<urlopen error Waited for 0.1 seconds without response on port 8080.>
...
>>> from hydpy.core.examples import prepare_full_example_1
>>> prepare_full_example_1()
>>> with TestIO():
... process = run_subprocess(
... 'hyd.py start_server 8080 LahnH multiple_runs.xml',
... blocking=False, verbose=False)
... run_subprocess('hyd.py await_server 8080 10', verbose=False)
>>> from urllib import request
>>> _ = request.urlopen('http://localhost:8080/close_server')
>>> process.kill()
>>> _ = process.communicate()
|
entailment
|
def initialise(self, projectname: str, xmlfile: str) -> None:
"""Initialise a *HydPy* project based on the given XML configuration
file agreeing with `HydPyConfigMultipleRuns.xsd`.
We use the `LahnH` project and its rather complex XML configuration
file `multiple_runs.xml` as an example (module |xmltools| provides
information on interpreting this file):
>>> from hydpy.core.examples import prepare_full_example_1
>>> prepare_full_example_1()
>>> from hydpy import print_values, TestIO
>>> from hydpy.exe.servertools import ServerState
>>> state = ServerState()
>>> with TestIO(): # doctest: +ELLIPSIS
... state.initialise('LahnH', 'multiple_runs.xml')
Start HydPy project `LahnH` (...).
Read configuration file `multiple_runs.xml` (...).
Interpret the defined options (...).
Interpret the defined period (...).
Read all network files (...).
Activate the selected network (...).
Read the required control files (...).
Read the required condition files (...).
Read the required time series files (...).
After initialisation, all defined exchange items are available:
>>> for item in state.parameteritems:
... print(item)
SetItem('alpha', 'hland_v1', 'control.alpha', 0)
SetItem('beta', 'hland_v1', 'control.beta', 0)
SetItem('lag', 'hstream_v1', 'control.lag', 0)
SetItem('damp', 'hstream_v1', 'control.damp', 0)
AddItem('sfcf_1', 'hland_v1', 'control.sfcf', 'control.rfcf', 0)
AddItem('sfcf_2', 'hland_v1', 'control.sfcf', 'control.rfcf', 0)
AddItem('sfcf_3', 'hland_v1', 'control.sfcf', 'control.rfcf', 1)
>>> for item in state.conditionitems:
... print(item)
SetItem('sm_lahn_2', 'hland_v1', 'states.sm', 0)
SetItem('sm_lahn_1', 'hland_v1', 'states.sm', 1)
SetItem('quh', 'hland_v1', 'logs.quh', 0)
>>> for item in state.getitems:
... print(item)
GetItem('hland_v1', 'fluxes.qt')
GetItem('hland_v1', 'fluxes.qt.series')
GetItem('hland_v1', 'states.sm')
GetItem('hland_v1', 'states.sm.series')
GetItem('nodes', 'nodes.sim.series')
The initialisation also memorises the initial conditions of
all elements:
>>> for element in state.init_conditions:
... print(element)
land_dill
land_lahn_1
land_lahn_2
land_lahn_3
stream_dill_lahn_2
stream_lahn_1_lahn_2
stream_lahn_2_lahn_3
Initialisation also prepares all selected series arrays and
reads the required input data:
>>> print_values(
... state.hp.elements.land_dill.model.sequences.inputs.t.series)
-0.298846, -0.811539, -2.493848, -5.968849, -6.999618
>>> state.hp.nodes.dill.sequences.sim.series
InfoArray([ nan, nan, nan, nan, nan])
"""
write = commandtools.print_textandtime
write(f'Start HydPy project `{projectname}`')
hp = hydpytools.HydPy(projectname)
write(f'Read configuration file `{xmlfile}`')
interface = xmltools.XMLInterface(xmlfile)
write('Interpret the defined options')
interface.update_options()
write('Interpret the defined period')
interface.update_timegrids()
write('Read all network files')
hp.prepare_network()
write('Activate the selected network')
hp.update_devices(interface.fullselection)
write('Read the required control files')
hp.init_models()
write('Read the required condition files')
interface.conditions_io.load_conditions()
write('Read the required time series files')
interface.series_io.prepare_series()
interface.exchange.prepare_series()
interface.series_io.load_series()
self.hp = hp
self.parameteritems = interface.exchange.parameteritems
self.conditionitems = interface.exchange.conditionitems
self.getitems = interface.exchange.getitems
self.conditions = {}
self.parameteritemvalues = collections.defaultdict(lambda: {})
self.modifiedconditionitemvalues = collections.defaultdict(lambda: {})
self.getitemvalues = collections.defaultdict(lambda: {})
self.init_conditions = hp.conditions
self.timegrids = {}
|
Initialise a *HydPy* project based on the given XML configuration
file agreeing with `HydPyConfigMultipleRuns.xsd`.
We use the `LahnH` project and its rather complex XML configuration
file `multiple_runs.xml` as an example (module |xmltools| provides
information on interpreting this file):
>>> from hydpy.core.examples import prepare_full_example_1
>>> prepare_full_example_1()
>>> from hydpy import print_values, TestIO
>>> from hydpy.exe.servertools import ServerState
>>> state = ServerState()
>>> with TestIO(): # doctest: +ELLIPSIS
... state.initialise('LahnH', 'multiple_runs.xml')
Start HydPy project `LahnH` (...).
Read configuration file `multiple_runs.xml` (...).
Interpret the defined options (...).
Interpret the defined period (...).
Read all network files (...).
Activate the selected network (...).
Read the required control files (...).
Read the required condition files (...).
Read the required time series files (...).
After initialisation, all defined exchange items are available:
>>> for item in state.parameteritems:
... print(item)
SetItem('alpha', 'hland_v1', 'control.alpha', 0)
SetItem('beta', 'hland_v1', 'control.beta', 0)
SetItem('lag', 'hstream_v1', 'control.lag', 0)
SetItem('damp', 'hstream_v1', 'control.damp', 0)
AddItem('sfcf_1', 'hland_v1', 'control.sfcf', 'control.rfcf', 0)
AddItem('sfcf_2', 'hland_v1', 'control.sfcf', 'control.rfcf', 0)
AddItem('sfcf_3', 'hland_v1', 'control.sfcf', 'control.rfcf', 1)
>>> for item in state.conditionitems:
... print(item)
SetItem('sm_lahn_2', 'hland_v1', 'states.sm', 0)
SetItem('sm_lahn_1', 'hland_v1', 'states.sm', 1)
SetItem('quh', 'hland_v1', 'logs.quh', 0)
>>> for item in state.getitems:
... print(item)
GetItem('hland_v1', 'fluxes.qt')
GetItem('hland_v1', 'fluxes.qt.series')
GetItem('hland_v1', 'states.sm')
GetItem('hland_v1', 'states.sm.series')
GetItem('nodes', 'nodes.sim.series')
The initialisation also memorises the initial conditions of
all elements:
>>> for element in state.init_conditions:
... print(element)
land_dill
land_lahn_1
land_lahn_2
land_lahn_3
stream_dill_lahn_2
stream_lahn_1_lahn_2
stream_lahn_2_lahn_3
Initialisation also prepares all selected series arrays and
reads the required input data:
>>> print_values(
... state.hp.elements.land_dill.model.sequences.inputs.t.series)
-0.298846, -0.811539, -2.493848, -5.968849, -6.999618
>>> state.hp.nodes.dill.sequences.sim.series
InfoArray([ nan, nan, nan, nan, nan])
|
entailment
|
def POST_evaluate(self) -> None:
"""Evaluate any valid Python expression with the *HydPy* server
process and get its result.
Method |HydPyServer.POST_evaluate| serves to test and debug, primarily.
The main documentation on module |servertools| explains its usage.
"""
for name, value in self._inputs.items():
result = eval(value)
self._outputs[name] = objecttools.flatten_repr(result)
|
Evaluate any valid Python expression with the *HydPy* server
process and get its result.
Method |HydPyServer.POST_evaluate| serves to test and debug, primarily.
The main documentation on module |servertools| explains its usage.
|
entailment
|
def GET_close_server(self) -> None:
"""Stop and close the *HydPy* server."""
def _close_server():
self.server.shutdown()
self.server.server_close()
shutter = threading.Thread(target=_close_server)
shutter.deamon = True
shutter.start()
|
Stop and close the *HydPy* server.
|
entailment
|
def GET_parameteritemtypes(self) -> None:
"""Get the types of all current exchange items supposed to change
the values of |Parameter| objects."""
for item in state.parameteritems:
self._outputs[item.name] = self._get_itemtype(item)
|
Get the types of all current exchange items supposed to change
the values of |Parameter| objects.
|
entailment
|
def GET_conditionitemtypes(self) -> None:
"""Get the types of all current exchange items supposed to change
the values of |StateSequence| or |LogSequence| objects."""
for item in state.conditionitems:
self._outputs[item.name] = self._get_itemtype(item)
|
Get the types of all current exchange items supposed to change
the values of |StateSequence| or |LogSequence| objects.
|
entailment
|
def GET_getitemtypes(self) -> None:
"""Get the types of all current exchange items supposed to return
the values of |Parameter| or |Sequence| objects or the time series
of |IOSequence| objects."""
for item in state.getitems:
type_ = self._get_itemtype(item)
for name, _ in item.yield_name2value():
self._outputs[name] = type_
|
Get the types of all current exchange items supposed to return
the values of |Parameter| or |Sequence| objects or the time series
of |IOSequence| objects.
|
entailment
|
def POST_timegrid(self) -> None:
"""Change the current simulation |Timegrid|."""
init = hydpy.pub.timegrids.init
sim = hydpy.pub.timegrids.sim
sim.firstdate = self._inputs['firstdate']
sim.lastdate = self._inputs['lastdate']
state.idx1 = init[sim.firstdate]
state.idx2 = init[sim.lastdate]
|
Change the current simulation |Timegrid|.
|
entailment
|
def GET_parameteritemvalues(self) -> None:
"""Get the values of all |ChangeItem| objects handling |Parameter|
objects."""
for item in state.parameteritems:
self._outputs[item.name] = item.value
|
Get the values of all |ChangeItem| objects handling |Parameter|
objects.
|
entailment
|
def GET_conditionitemvalues(self) -> None:
"""Get the values of all |ChangeItem| objects handling |StateSequence|
or |LogSequence| objects."""
for item in state.conditionitems:
self._outputs[item.name] = item.value
|
Get the values of all |ChangeItem| objects handling |StateSequence|
or |LogSequence| objects.
|
entailment
|
def GET_getitemvalues(self) -> None:
"""Get the values of all |Variable| objects observed by the
current |GetItem| objects.
For |GetItem| objects observing time series,
|HydPyServer.GET_getitemvalues| returns only the values within
the current simulation period.
"""
for item in state.getitems:
for name, value in item.yield_name2value(state.idx1, state.idx2):
self._outputs[name] = value
|
Get the values of all |Variable| objects observed by the
current |GetItem| objects.
For |GetItem| objects observing time series,
|HydPyServer.GET_getitemvalues| returns only the values within
the current simulation period.
|
entailment
|
def GET_load_conditionvalues(self) -> None:
"""Assign the |StateSequence| or |LogSequence| object values available
for the current simulation start point to the current |HydPy| instance.
When the simulation start point is identical with the initialisation
time point and you did not save conditions for it beforehand, the
"original" initial conditions are used (normally those of the
conditions files of the respective *HydPy* project).
"""
try:
state.hp.conditions = state.conditions[self._id][state.idx1]
except KeyError:
if state.idx1:
self._statuscode = 500
raise RuntimeError(
f'Conditions for ID `{self._id}` and time point '
f'`{hydpy.pub.timegrids.sim.firstdate}` are required, '
f'but have not been calculated so far.')
else:
state.hp.conditions = state.init_conditions
|
Assign the |StateSequence| or |LogSequence| object values available
for the current simulation start point to the current |HydPy| instance.
When the simulation start point is identical with the initialisation
time point and you did not save conditions for it beforehand, the
"original" initial conditions are used (normally those of the
conditions files of the respective *HydPy* project).
|
entailment
|
def GET_save_conditionvalues(self) -> None:
"""Save the |StateSequence| and |LogSequence| object values of the
current |HydPy| instance for the current simulation endpoint."""
state.conditions[self._id] = state.conditions.get(self._id, {})
state.conditions[self._id][state.idx2] = state.hp.conditions
|
Save the |StateSequence| and |LogSequence| object values of the
current |HydPy| instance for the current simulation endpoint.
|
entailment
|
def GET_save_parameteritemvalues(self) -> None:
"""Save the values of those |ChangeItem| objects which are
handling |Parameter| objects."""
for item in state.parameteritems:
state.parameteritemvalues[self._id][item.name] = item.value.copy()
|
Save the values of those |ChangeItem| objects which are
handling |Parameter| objects.
|
entailment
|
def GET_savedparameteritemvalues(self) -> None:
"""Get the previously saved values of those |ChangeItem| objects
which are handling |Parameter| objects."""
dict_ = state.parameteritemvalues.get(self._id)
if dict_ is None:
self.GET_parameteritemvalues()
else:
for name, value in dict_.items():
self._outputs[name] = value
|
Get the previously saved values of those |ChangeItem| objects
which are handling |Parameter| objects.
|
entailment
|
def GET_save_modifiedconditionitemvalues(self) -> None:
"""ToDo: extend functionality and add tests"""
for item in state.conditionitems:
state.modifiedconditionitemvalues[self._id][item.name] = \
list(item.device2target.values())[0].value
|
ToDo: extend functionality and add tests
|
entailment
|
def GET_savedmodifiedconditionitemvalues(self) -> None:
"""ToDo: extend functionality and add tests"""
dict_ = state.modifiedconditionitemvalues.get(self._id)
if dict_ is None:
self.GET_conditionitemvalues()
else:
for name, value in dict_.items():
self._outputs[name] = value
|
ToDo: extend functionality and add tests
|
entailment
|
def GET_save_getitemvalues(self) -> None:
"""Save the values of all current |GetItem| objects."""
for item in state.getitems:
for name, value in item.yield_name2value(state.idx1, state.idx2):
state.getitemvalues[self._id][name] = value
|
Save the values of all current |GetItem| objects.
|
entailment
|
def GET_savedgetitemvalues(self) -> None:
"""Get the previously saved values of all |GetItem| objects."""
dict_ = state.getitemvalues.get(self._id)
if dict_ is None:
self.GET_getitemvalues()
else:
for name, value in dict_.items():
self._outputs[name] = value
|
Get the previously saved values of all |GetItem| objects.
|
entailment
|
def GET_save_timegrid(self) -> None:
"""Save the current simulation period."""
state.timegrids[self._id] = copy.deepcopy(hydpy.pub.timegrids.sim)
|
Save the current simulation period.
|
entailment
|
def GET_savedtimegrid(self) -> None:
"""Get the previously saved simulation period."""
try:
self._write_timegrid(state.timegrids[self._id])
except KeyError:
self._write_timegrid(hydpy.pub.timegrids.init)
|
Get the previously saved simulation period.
|
entailment
|
def trim(self: 'Variable', lower=None, upper=None) -> None:
"""Trim the value(s) of a |Variable| instance.
Usually, users do not need to apply function |trim| directly.
Instead, some |Variable| subclasses implement their own `trim`
methods relying on function |trim|. Model developers should
implement individual `trim` methods for their |Parameter| or
|Sequence| subclasses when their boundary values depend on the
actual project configuration (one example is soil moisture;
its lowest possible value should possibly be zero in all cases,
but its highest possible value could depend on another parameter
defining the maximum storage capacity).
For the following examples, we prepare a simple (not fully
functional) |Variable| subclass, making use of function |trim|
without any modifications. Function |trim| works slightly
different for variables handling |float|, |int|, and |bool|
values. We start with the most common content type |float|:
>>> from hydpy.core.variabletools import trim, Variable
>>> class Var(Variable):
... NDIM = 0
... TYPE = float
... SPAN = 1.0, 3.0
... trim = trim
... initinfo = 2.0, False
... __hydpy__connect_variable2subgroup__ = None
First, we enable the printing of warning messages raised by function
|trim|:
>>> from hydpy import pub
>>> pub.options.warntrim = True
When not passing boundary values, function |trim| extracts them from
class attribute `SPAN` of the given |Variable| instance, if available:
>>> var = Var(None)
>>> var.value = 2.0
>>> var.trim()
>>> var
var(2.0)
>>> var.value = 0.0
>>> var.trim()
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `0.0` and `1.0`, respectively.
>>> var
var(1.0)
>>> var.value = 4.0
>>> var.trim()
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `4.0` and `3.0`, respectively.
>>> var
var(3.0)
In the examples above, outlier values are set to the respective
boundary value, accompanied by suitable warning messages. For very
tiny deviations, which might be due to precision problems only,
outliers are trimmed but not reported:
>>> var.value = 1.0 - 1e-15
>>> var == 1.0
False
>>> trim(var)
>>> var == 1.0
True
>>> var.value = 3.0 + 1e-15
>>> var == 3.0
False
>>> var.trim()
>>> var == 3.0
True
Use arguments `lower` and `upper` to override the (eventually)
available `SPAN` entries:
>>> var.trim(lower=4.0)
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `3.0` and `4.0`, respectively.
>>> var.trim(upper=3.0)
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `4.0` and `3.0`, respectively.
Function |trim| interprets both |None| and |numpy.nan| values as if
no boundary value exists:
>>> import numpy
>>> var.value = 0.0
>>> var.trim(lower=numpy.nan)
>>> var.value = 5.0
>>> var.trim(upper=numpy.nan)
You can disable function |trim| via option |Options.trimvariables|:
>>> with pub.options.trimvariables(False):
... var.value = 5.0
... var.trim()
>>> var
var(5.0)
Alternatively, you can omit the warning messages only:
>>> with pub.options.warntrim(False):
... var.value = 5.0
... var.trim()
>>> var
var(3.0)
If a |Variable| subclass does not have (fixed) boundaries, give it
either no `SPAN` attribute or a |tuple| containing |None| values:
>>> del Var.SPAN
>>> var.value = 5.0
>>> var.trim()
>>> var
var(5.0)
>>> Var.SPAN = (None, None)
>>> var.trim()
>>> var
var(5.0)
The above examples deal with a 0-dimensional |Variable| subclass.
The following examples repeat the most relevant examples for a
2-dimensional subclass:
>>> Var.SPAN = 1.0, 3.0
>>> Var.NDIM = 2
>>> var.shape = 1, 3
>>> var.values = 2.0
>>> var.trim()
>>> var.values = 0.0, 1.0, 2.0
>>> var.trim()
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `[[ 0. 1. 2.]]` and `[[ 1. 1. 2.]]`, \
respectively.
>>> var
var([[1.0, 1.0, 2.0]])
>>> var.values = 2.0, 3.0, 4.0
>>> var.trim()
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `[[ 2. 3. 4.]]` and `[[ 2. 3. 3.]]`, \
respectively.
>>> var
var([[2.0, 3.0, 3.0]])
>>> var.values = 1.0-1e-15, 2.0, 3.0+1e-15
>>> var.values == (1.0, 2.0, 3.0)
array([[False, True, False]], dtype=bool)
>>> var.trim()
>>> var.values == (1.0, 2.0, 3.0)
array([[ True, True, True]], dtype=bool)
>>> var.values = 0.0, 2.0, 4.0
>>> var.trim(lower=numpy.nan, upper=numpy.nan)
>>> var
var([[0.0, 2.0, 4.0]])
>>> var.trim(lower=[numpy.nan, 3.0, 3.0])
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `[[ 0. 2. 4.]]` and `[[ 0. 3. 3.]]`, \
respectively.
>>> var.values = 0.0, 2.0, 4.0
>>> var.trim(upper=[numpy.nan, 1.0, numpy.nan])
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `[[ 0. 2. 4.]]` and `[[ 1. 1. 4.]]`, \
respectively.
For |Variable| subclasses handling |float| values, setting outliers
to the respective boundary value might often be an acceptable approach.
However, this is often not the case for subclasses handling |int|
values, which often serve as option flags (e.g. to enable/disable
a certain hydrological process for different land-use types). Hence,
function |trim| raises an exception instead of a warning and does
not modify the wrong |int| value:
>>> Var.TYPE = int
>>> Var.NDIM = 0
>>> Var.SPAN = 1, 3
>>> var.value = 2
>>> var.trim()
>>> var
var(2)
>>> var.value = 0
>>> var.trim()
Traceback (most recent call last):
...
ValueError: The value `0` of parameter `var` of element `?` is not valid.
>>> var
var(0)
>>> var.value = 4
>>> var.trim()
Traceback (most recent call last):
...
ValueError: The value `4` of parameter `var` of element `?` is not valid.
>>> var
var(4)
>>> from hydpy import INT_NAN
>>> var.value = 0
>>> var.trim(lower=0)
>>> var.trim(lower=INT_NAN)
>>> var.value = 4
>>> var.trim(upper=4)
>>> var.trim(upper=INT_NAN)
>>> Var.SPAN = 1, None
>>> var.value = 0
>>> var.trim()
Traceback (most recent call last):
...
ValueError: The value `0` of parameter `var` of element `?` is not valid.
>>> var
var(0)
>>> Var.SPAN = None, 3
>>> var.value = 0
>>> var.trim()
>>> var.value = 4
>>> var.trim()
Traceback (most recent call last):
...
ValueError: The value `4` of parameter `var` of element `?` is not valid.
>>> del Var.SPAN
>>> var.value = 0
>>> var.trim()
>>> var.value = 4
>>> var.trim()
>>> Var.SPAN = 1, 3
>>> Var.NDIM = 2
>>> var.shape = (1, 3)
>>> var.values = 2
>>> var.trim()
>>> var.values = 0, 1, 2
>>> var.trim()
Traceback (most recent call last):
...
ValueError: At least one value of parameter `var` of element `?` \
is not valid.
>>> var
var([[0, 1, 2]])
>>> var.values = 2, 3, 4
>>> var.trim()
Traceback (most recent call last):
...
ValueError: At least one value of parameter `var` of element `?` \
is not valid.
>>> var
var([[2, 3, 4]])
>>> var.values = 0, 0, 2
>>> var.trim(lower=[0, INT_NAN, 2])
>>> var.values = 2, 4, 4
>>> var.trim(upper=[2, INT_NAN, 4])
For |bool| values, defining outliers does not make much sense,
which is why function |trim| does nothing when applied on
variables handling |bool| values:
>>> Var.TYPE = bool
>>> var.trim()
If function |trim| encounters an unmanageable type, it raises an
exception like the following:
>>> Var.TYPE = str
>>> var.trim()
Traceback (most recent call last):
...
NotImplementedError: Method `trim` can only be applied on parameters \
handling floating point, integer, or boolean values, but the "value type" \
of parameter `var` is `str`.
>>> pub.options.warntrim = False
"""
if hydpy.pub.options.trimvariables:
if lower is None:
lower = self.SPAN[0]
if upper is None:
upper = self.SPAN[1]
type_ = getattr(self, 'TYPE', float)
if type_ is float:
if self.NDIM == 0:
_trim_float_0d(self, lower, upper)
else:
_trim_float_nd(self, lower, upper)
elif type_ is int:
if self.NDIM == 0:
_trim_int_0d(self, lower, upper)
else:
_trim_int_nd(self, lower, upper)
elif type_ is bool:
pass
else:
raise NotImplementedError(
f'Method `trim` can only be applied on parameters '
f'handling floating point, integer, or boolean values, '
f'but the "value type" of parameter `{self.name}` is '
f'`{objecttools.classname(self.TYPE)}`.')
|
Trim the value(s) of a |Variable| instance.
Usually, users do not need to apply function |trim| directly.
Instead, some |Variable| subclasses implement their own `trim`
methods relying on function |trim|. Model developers should
implement individual `trim` methods for their |Parameter| or
|Sequence| subclasses when their boundary values depend on the
actual project configuration (one example is soil moisture;
its lowest possible value should possibly be zero in all cases,
but its highest possible value could depend on another parameter
defining the maximum storage capacity).
For the following examples, we prepare a simple (not fully
functional) |Variable| subclass, making use of function |trim|
without any modifications. Function |trim| works slightly
different for variables handling |float|, |int|, and |bool|
values. We start with the most common content type |float|:
>>> from hydpy.core.variabletools import trim, Variable
>>> class Var(Variable):
... NDIM = 0
... TYPE = float
... SPAN = 1.0, 3.0
... trim = trim
... initinfo = 2.0, False
... __hydpy__connect_variable2subgroup__ = None
First, we enable the printing of warning messages raised by function
|trim|:
>>> from hydpy import pub
>>> pub.options.warntrim = True
When not passing boundary values, function |trim| extracts them from
class attribute `SPAN` of the given |Variable| instance, if available:
>>> var = Var(None)
>>> var.value = 2.0
>>> var.trim()
>>> var
var(2.0)
>>> var.value = 0.0
>>> var.trim()
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `0.0` and `1.0`, respectively.
>>> var
var(1.0)
>>> var.value = 4.0
>>> var.trim()
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `4.0` and `3.0`, respectively.
>>> var
var(3.0)
In the examples above, outlier values are set to the respective
boundary value, accompanied by suitable warning messages. For very
tiny deviations, which might be due to precision problems only,
outliers are trimmed but not reported:
>>> var.value = 1.0 - 1e-15
>>> var == 1.0
False
>>> trim(var)
>>> var == 1.0
True
>>> var.value = 3.0 + 1e-15
>>> var == 3.0
False
>>> var.trim()
>>> var == 3.0
True
Use arguments `lower` and `upper` to override the (eventually)
available `SPAN` entries:
>>> var.trim(lower=4.0)
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `3.0` and `4.0`, respectively.
>>> var.trim(upper=3.0)
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `4.0` and `3.0`, respectively.
Function |trim| interprets both |None| and |numpy.nan| values as if
no boundary value exists:
>>> import numpy
>>> var.value = 0.0
>>> var.trim(lower=numpy.nan)
>>> var.value = 5.0
>>> var.trim(upper=numpy.nan)
You can disable function |trim| via option |Options.trimvariables|:
>>> with pub.options.trimvariables(False):
... var.value = 5.0
... var.trim()
>>> var
var(5.0)
Alternatively, you can omit the warning messages only:
>>> with pub.options.warntrim(False):
... var.value = 5.0
... var.trim()
>>> var
var(3.0)
If a |Variable| subclass does not have (fixed) boundaries, give it
either no `SPAN` attribute or a |tuple| containing |None| values:
>>> del Var.SPAN
>>> var.value = 5.0
>>> var.trim()
>>> var
var(5.0)
>>> Var.SPAN = (None, None)
>>> var.trim()
>>> var
var(5.0)
The above examples deal with a 0-dimensional |Variable| subclass.
The following examples repeat the most relevant examples for a
2-dimensional subclass:
>>> Var.SPAN = 1.0, 3.0
>>> Var.NDIM = 2
>>> var.shape = 1, 3
>>> var.values = 2.0
>>> var.trim()
>>> var.values = 0.0, 1.0, 2.0
>>> var.trim()
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `[[ 0. 1. 2.]]` and `[[ 1. 1. 2.]]`, \
respectively.
>>> var
var([[1.0, 1.0, 2.0]])
>>> var.values = 2.0, 3.0, 4.0
>>> var.trim()
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `[[ 2. 3. 4.]]` and `[[ 2. 3. 3.]]`, \
respectively.
>>> var
var([[2.0, 3.0, 3.0]])
>>> var.values = 1.0-1e-15, 2.0, 3.0+1e-15
>>> var.values == (1.0, 2.0, 3.0)
array([[False, True, False]], dtype=bool)
>>> var.trim()
>>> var.values == (1.0, 2.0, 3.0)
array([[ True, True, True]], dtype=bool)
>>> var.values = 0.0, 2.0, 4.0
>>> var.trim(lower=numpy.nan, upper=numpy.nan)
>>> var
var([[0.0, 2.0, 4.0]])
>>> var.trim(lower=[numpy.nan, 3.0, 3.0])
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `[[ 0. 2. 4.]]` and `[[ 0. 3. 3.]]`, \
respectively.
>>> var.values = 0.0, 2.0, 4.0
>>> var.trim(upper=[numpy.nan, 1.0, numpy.nan])
Traceback (most recent call last):
...
UserWarning: For variable `var` at least one value needed to be trimmed. \
The old and the new value(s) are `[[ 0. 2. 4.]]` and `[[ 1. 1. 4.]]`, \
respectively.
For |Variable| subclasses handling |float| values, setting outliers
to the respective boundary value might often be an acceptable approach.
However, this is often not the case for subclasses handling |int|
values, which often serve as option flags (e.g. to enable/disable
a certain hydrological process for different land-use types). Hence,
function |trim| raises an exception instead of a warning and does
not modify the wrong |int| value:
>>> Var.TYPE = int
>>> Var.NDIM = 0
>>> Var.SPAN = 1, 3
>>> var.value = 2
>>> var.trim()
>>> var
var(2)
>>> var.value = 0
>>> var.trim()
Traceback (most recent call last):
...
ValueError: The value `0` of parameter `var` of element `?` is not valid.
>>> var
var(0)
>>> var.value = 4
>>> var.trim()
Traceback (most recent call last):
...
ValueError: The value `4` of parameter `var` of element `?` is not valid.
>>> var
var(4)
>>> from hydpy import INT_NAN
>>> var.value = 0
>>> var.trim(lower=0)
>>> var.trim(lower=INT_NAN)
>>> var.value = 4
>>> var.trim(upper=4)
>>> var.trim(upper=INT_NAN)
>>> Var.SPAN = 1, None
>>> var.value = 0
>>> var.trim()
Traceback (most recent call last):
...
ValueError: The value `0` of parameter `var` of element `?` is not valid.
>>> var
var(0)
>>> Var.SPAN = None, 3
>>> var.value = 0
>>> var.trim()
>>> var.value = 4
>>> var.trim()
Traceback (most recent call last):
...
ValueError: The value `4` of parameter `var` of element `?` is not valid.
>>> del Var.SPAN
>>> var.value = 0
>>> var.trim()
>>> var.value = 4
>>> var.trim()
>>> Var.SPAN = 1, 3
>>> Var.NDIM = 2
>>> var.shape = (1, 3)
>>> var.values = 2
>>> var.trim()
>>> var.values = 0, 1, 2
>>> var.trim()
Traceback (most recent call last):
...
ValueError: At least one value of parameter `var` of element `?` \
is not valid.
>>> var
var([[0, 1, 2]])
>>> var.values = 2, 3, 4
>>> var.trim()
Traceback (most recent call last):
...
ValueError: At least one value of parameter `var` of element `?` \
is not valid.
>>> var
var([[2, 3, 4]])
>>> var.values = 0, 0, 2
>>> var.trim(lower=[0, INT_NAN, 2])
>>> var.values = 2, 4, 4
>>> var.trim(upper=[2, INT_NAN, 4])
For |bool| values, defining outliers does not make much sense,
which is why function |trim| does nothing when applied on
variables handling |bool| values:
>>> Var.TYPE = bool
>>> var.trim()
If function |trim| encounters an unmanageable type, it raises an
exception like the following:
>>> Var.TYPE = str
>>> var.trim()
Traceback (most recent call last):
...
NotImplementedError: Method `trim` can only be applied on parameters \
handling floating point, integer, or boolean values, but the "value type" \
of parameter `var` is `str`.
>>> pub.options.warntrim = False
|
entailment
|
def _get_tolerance(values):
"""Return some "numerical accuracy" to be expected for the
given floating point value(s) (see method |trim|)."""
tolerance = numpy.abs(values*1e-15)
if hasattr(tolerance, '__setitem__'):
tolerance[numpy.isinf(tolerance)] = 0.
elif numpy.isinf(tolerance):
tolerance = 0.
return tolerance
|
Return some "numerical accuracy" to be expected for the
given floating point value(s) (see method |trim|).
|
entailment
|
def _compare_variables_function_generator(
method_string, aggregation_func):
"""Return a function usable as a comparison method for class |Variable|.
Pass the specific method (e.g. `__eq__`) and the corresponding
operator (e.g. `==`) as strings. Also pass either |numpy.all| or
|numpy.any| for aggregating multiple boolean values.
"""
def comparison_function(self, other):
"""Wrapper for comparison functions for class |Variable|."""
if self is other:
return method_string in ('__eq__', '__le__', '__ge__')
method = getattr(self.value, method_string)
try:
if hasattr(type(other), '__hydpy__get_value__'):
other = other.__hydpy__get_value__()
result = method(other)
if result is NotImplemented:
return result
return aggregation_func(result)
except BaseException:
objecttools.augment_excmessage(
f'While trying to compare variable '
f'{objecttools.elementphrase(self)} with object '
f'`{other}` of type `{objecttools.classname(other)}`')
return comparison_function
|
Return a function usable as a comparison method for class |Variable|.
Pass the specific method (e.g. `__eq__`) and the corresponding
operator (e.g. `==`) as strings. Also pass either |numpy.all| or
|numpy.any| for aggregating multiple boolean values.
|
entailment
|
def to_repr(self: Variable, values, brackets1d: Optional[bool] = False) \
-> str:
"""Return a valid string representation for the given |Variable|
object.
Function |to_repr| it thought for internal purposes only, more
specifically for defining string representations of subclasses
of class |Variable| like the following:
>>> from hydpy.core.variabletools import to_repr, Variable
>>> class Var(Variable):
... NDIM = 0
... TYPE = int
... __hydpy__connect_variable2subgroup__ = None
... initinfo = 1.0, False
>>> var = Var(None)
>>> var.value = 2
>>> var
var(2)
The following examples demonstrate all covered cases. Note that
option `brackets1d` allows choosing between a "vararg" and an
"iterable" string representation for 1-dimensional variables
(the first one being the default):
>>> print(to_repr(var, 2))
var(2)
>>> Var.NDIM = 1
>>> var = Var(None)
>>> var.shape = 3
>>> print(to_repr(var, range(3)))
var(0, 1, 2)
>>> print(to_repr(var, range(3), True))
var([0, 1, 2])
>>> print(to_repr(var, range(30)))
var(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29)
>>> print(to_repr(var, range(30), True))
var([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29])
>>> Var.NDIM = 2
>>> var = Var(None)
>>> var.shape = (2, 3)
>>> print(to_repr(var, [range(3), range(3, 6)]))
var([[0, 1, 2],
[3, 4, 5]])
>>> print(to_repr(var, [range(30), range(30, 60)]))
var([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59]])
"""
prefix = f'{self.name}('
if isinstance(values, str):
string = f'{self.name}({values})'
elif self.NDIM == 0:
string = f'{self.name}({objecttools.repr_(values)})'
elif self.NDIM == 1:
if brackets1d:
string = objecttools.assignrepr_list(values, prefix, 72) + ')'
else:
string = objecttools.assignrepr_values(
values, prefix, 72) + ')'
else:
string = objecttools.assignrepr_list2(values, prefix, 72) + ')'
return '\n'.join(self.commentrepr + [string])
|
Return a valid string representation for the given |Variable|
object.
Function |to_repr| it thought for internal purposes only, more
specifically for defining string representations of subclasses
of class |Variable| like the following:
>>> from hydpy.core.variabletools import to_repr, Variable
>>> class Var(Variable):
... NDIM = 0
... TYPE = int
... __hydpy__connect_variable2subgroup__ = None
... initinfo = 1.0, False
>>> var = Var(None)
>>> var.value = 2
>>> var
var(2)
The following examples demonstrate all covered cases. Note that
option `brackets1d` allows choosing between a "vararg" and an
"iterable" string representation for 1-dimensional variables
(the first one being the default):
>>> print(to_repr(var, 2))
var(2)
>>> Var.NDIM = 1
>>> var = Var(None)
>>> var.shape = 3
>>> print(to_repr(var, range(3)))
var(0, 1, 2)
>>> print(to_repr(var, range(3), True))
var([0, 1, 2])
>>> print(to_repr(var, range(30)))
var(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29)
>>> print(to_repr(var, range(30), True))
var([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29])
>>> Var.NDIM = 2
>>> var = Var(None)
>>> var.shape = (2, 3)
>>> print(to_repr(var, [range(3), range(3, 6)]))
var([[0, 1, 2],
[3, 4, 5]])
>>> print(to_repr(var, [range(30), range(30, 60)]))
var([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59]])
|
entailment
|
def verify(self) -> None:
"""Raises a |RuntimeError| if at least one of the required values
of a |Variable| object is |None| or |numpy.nan|. The descriptor
`mask` defines, which values are considered to be necessary.
Example on a 0-dimensional |Variable|:
>>> from hydpy.core.variabletools import Variable
>>> class Var(Variable):
... NDIM = 0
... TYPE = float
... __hydpy__connect_variable2subgroup__ = None
... initinfo = 0.0, False
>>> var = Var(None)
>>> import numpy
>>> var.shape = ()
>>> var.value = 1.0
>>> var.verify()
>>> var.value = numpy.nan
>>> var.verify()
Traceback (most recent call last):
...
RuntimeError: For variable `var`, 1 required value has not been set yet.
Example on a 2-dimensional |Variable|:
>>> Var.NDIM = 2
>>> var = Var(None)
>>> var.shape = (2, 3)
>>> var.value = numpy.ones((2,3))
>>> var.value[:, 1] = numpy.nan
>>> var.verify()
Traceback (most recent call last):
...
RuntimeError: For variable `var`, 2 required values \
have not been set yet.
>>> Var.mask = var.mask
>>> Var.mask[0, 1] = False
>>> var.verify()
Traceback (most recent call last):
...
RuntimeError: For variable `var`, 1 required value has not been set yet.
>>> Var.mask[1, 1] = False
>>> var.verify()
"""
nmbnan: int = numpy.sum(numpy.isnan(
numpy.array(self.value)[self.mask]))
if nmbnan:
if nmbnan == 1:
text = 'value has'
else:
text = 'values have'
raise RuntimeError(
f'For variable {objecttools.devicephrase(self)}, '
f'{nmbnan} required {text} not been set yet.')
|
Raises a |RuntimeError| if at least one of the required values
of a |Variable| object is |None| or |numpy.nan|. The descriptor
`mask` defines, which values are considered to be necessary.
Example on a 0-dimensional |Variable|:
>>> from hydpy.core.variabletools import Variable
>>> class Var(Variable):
... NDIM = 0
... TYPE = float
... __hydpy__connect_variable2subgroup__ = None
... initinfo = 0.0, False
>>> var = Var(None)
>>> import numpy
>>> var.shape = ()
>>> var.value = 1.0
>>> var.verify()
>>> var.value = numpy.nan
>>> var.verify()
Traceback (most recent call last):
...
RuntimeError: For variable `var`, 1 required value has not been set yet.
Example on a 2-dimensional |Variable|:
>>> Var.NDIM = 2
>>> var = Var(None)
>>> var.shape = (2, 3)
>>> var.value = numpy.ones((2,3))
>>> var.value[:, 1] = numpy.nan
>>> var.verify()
Traceback (most recent call last):
...
RuntimeError: For variable `var`, 2 required values \
have not been set yet.
>>> Var.mask = var.mask
>>> Var.mask[0, 1] = False
>>> var.verify()
Traceback (most recent call last):
...
RuntimeError: For variable `var`, 1 required value has not been set yet.
>>> Var.mask[1, 1] = False
>>> var.verify()
|
entailment
|
def average_values(self, *args, **kwargs) -> float:
"""Average the actual values of the |Variable| object.
For 0-dimensional |Variable| objects, the result of method
|Variable.average_values| equals |Variable.value|. The
following example shows this for the sloppily defined class
`SoilMoisture`:
>>> from hydpy.core.variabletools import Variable
>>> class SoilMoisture(Variable):
... NDIM = 0
... TYPE = float
... refweigths = None
... availablemasks = None
... __hydpy__connect_variable2subgroup__ = None
... initinfo = None
>>> sm = SoilMoisture(None)
>>> sm.value = 200.0
>>> sm.average_values()
200.0
When the dimensionality of this class is increased to one,
applying method |Variable.average_values| results in the
following error:
>>> SoilMoisture.NDIM = 1
>>> import numpy
>>> SoilMoisture.shape = (3,)
>>> SoilMoisture.value = numpy.array([200.0, 400.0, 500.0])
>>> sm.average_values()
Traceback (most recent call last):
...
AttributeError: While trying to calculate the mean value \
of variable `soilmoisture`, the following error occurred: Variable \
`soilmoisture` does not define any weighting coefficients.
So model developers have to define another (in this case
1-dimensional) |Variable| subclass (usually a |Parameter|
subclass), and make the relevant object available via property
|Variable.refweights|:
>>> class Area(Variable):
... NDIM = 1
... shape = (3,)
... value = numpy.array([1.0, 1.0, 2.0])
... __hydpy__connect_variable2subgroup__ = None
... initinfo = None
>>> area = Area(None)
>>> SoilMoisture.refweights = property(lambda self: area)
>>> sm.average_values()
400.0
In the examples above, all single entries of `values` are relevant,
which is the default case. However, subclasses of |Variable| can
define an alternative mask, allowing to make some entries
irrelevant. Assume for example, that our `SoilMoisture` object
contains three single values, each one associated with a specific
hydrological response unit (hru). To indicate that soil moisture
is undefined for the third unit, (maybe because it is a water area),
we set the third entry of the verification mask to |False|:
>>> from hydpy.core.masktools import DefaultMask
>>> class Soil(DefaultMask):
... @classmethod
... def new(cls, variable, **kwargs):
... return cls.array2mask([True, True, False])
>>> SoilMoisture.mask = Soil()
>>> sm.average_values()
300.0
Alternatively, method |Variable.average_values| accepts additional
masking information as positional or keyword arguments. Therefore,
the corresponding model must implement some alternative masks,
which are provided by property |Variable.availablemasks|.
We mock this property with a new |Masks| object, handling one
mask for flat soils (only the first hru), one mask for deep soils
(only the second hru), and one mask for water areas (only the
third hru):
>>> class FlatSoil(DefaultMask):
... @classmethod
... def new(cls, variable, **kwargs):
... return cls.array2mask([True, False, False])
>>> class DeepSoil(DefaultMask):
... @classmethod
... def new(cls, variable, **kwargs):
... return cls.array2mask([False, True, False])
>>> class Water(DefaultMask):
... @classmethod
... def new(cls, variable, **kwargs):
... return cls.array2mask([False, False, True])
>>> from hydpy.core import masktools
>>> class Masks(masktools.Masks):
... CLASSES = (FlatSoil,
... DeepSoil,
... Water)
>>> SoilMoisture.availablemasks = Masks(None)
One can pass either the mask classes themselves or their names:
>>> sm.average_values(sm.availablemasks.flatsoil)
200.0
>>> sm.average_values('deepsoil')
400.0
Both variants can be combined:
>>> sm.average_values(sm.availablemasks.deepsoil, 'flatsoil')
300.0
The following error happens if the general mask of the variable
does not contain the given masks:
>>> sm.average_values('flatsoil', 'water')
Traceback (most recent call last):
...
ValueError: While trying to calculate the mean value of variable \
`soilmoisture`, the following error occurred: Based on the arguments \
`('flatsoil', 'water')` and `{}` the mask `CustomMask([ True, False, True])` \
has been determined, which is not a submask of `Soil([ True, True, False])`.
Applying masks with custom options is also supported. One can change
the behaviour of the following mask via the argument `complete`:
>>> class AllOrNothing(DefaultMask):
... @classmethod
... def new(cls, variable, complete):
... if complete:
... bools = [True, True, True]
... else:
... bools = [False, False, False]
... return cls.array2mask(bools)
>>> class Masks(Masks):
... CLASSES = (FlatSoil,
... DeepSoil,
... Water,
... AllOrNothing)
>>> SoilMoisture.availablemasks = Masks(None)
Again, one can apply the mask class directly (but note that one
has to pass the relevant variable as the first argument.):
>>> sm.average_values( # doctest: +ELLIPSIS
... sm.availablemasks.allornothing(sm, complete=True))
Traceback (most recent call last):
...
ValueError: While trying to...
Alternatively, one can pass the mask name as a keyword and pack
the mask's options into a |dict| object:
>>> sm.average_values(allornothing={'complete': False})
nan
You can combine all variants explained above:
>>> sm.average_values(
... 'deepsoil', flatsoil={}, allornothing={'complete': False})
300.0
"""
try:
if not self.NDIM:
return self.value
mask = self.get_submask(*args, **kwargs)
if numpy.any(mask):
weights = self.refweights[mask]
return numpy.sum(weights*self[mask])/numpy.sum(weights)
return numpy.nan
except BaseException:
objecttools.augment_excmessage(
f'While trying to calculate the mean value of variable '
f'{objecttools.devicephrase(self)}')
|
Average the actual values of the |Variable| object.
For 0-dimensional |Variable| objects, the result of method
|Variable.average_values| equals |Variable.value|. The
following example shows this for the sloppily defined class
`SoilMoisture`:
>>> from hydpy.core.variabletools import Variable
>>> class SoilMoisture(Variable):
... NDIM = 0
... TYPE = float
... refweigths = None
... availablemasks = None
... __hydpy__connect_variable2subgroup__ = None
... initinfo = None
>>> sm = SoilMoisture(None)
>>> sm.value = 200.0
>>> sm.average_values()
200.0
When the dimensionality of this class is increased to one,
applying method |Variable.average_values| results in the
following error:
>>> SoilMoisture.NDIM = 1
>>> import numpy
>>> SoilMoisture.shape = (3,)
>>> SoilMoisture.value = numpy.array([200.0, 400.0, 500.0])
>>> sm.average_values()
Traceback (most recent call last):
...
AttributeError: While trying to calculate the mean value \
of variable `soilmoisture`, the following error occurred: Variable \
`soilmoisture` does not define any weighting coefficients.
So model developers have to define another (in this case
1-dimensional) |Variable| subclass (usually a |Parameter|
subclass), and make the relevant object available via property
|Variable.refweights|:
>>> class Area(Variable):
... NDIM = 1
... shape = (3,)
... value = numpy.array([1.0, 1.0, 2.0])
... __hydpy__connect_variable2subgroup__ = None
... initinfo = None
>>> area = Area(None)
>>> SoilMoisture.refweights = property(lambda self: area)
>>> sm.average_values()
400.0
In the examples above, all single entries of `values` are relevant,
which is the default case. However, subclasses of |Variable| can
define an alternative mask, allowing to make some entries
irrelevant. Assume for example, that our `SoilMoisture` object
contains three single values, each one associated with a specific
hydrological response unit (hru). To indicate that soil moisture
is undefined for the third unit, (maybe because it is a water area),
we set the third entry of the verification mask to |False|:
>>> from hydpy.core.masktools import DefaultMask
>>> class Soil(DefaultMask):
... @classmethod
... def new(cls, variable, **kwargs):
... return cls.array2mask([True, True, False])
>>> SoilMoisture.mask = Soil()
>>> sm.average_values()
300.0
Alternatively, method |Variable.average_values| accepts additional
masking information as positional or keyword arguments. Therefore,
the corresponding model must implement some alternative masks,
which are provided by property |Variable.availablemasks|.
We mock this property with a new |Masks| object, handling one
mask for flat soils (only the first hru), one mask for deep soils
(only the second hru), and one mask for water areas (only the
third hru):
>>> class FlatSoil(DefaultMask):
... @classmethod
... def new(cls, variable, **kwargs):
... return cls.array2mask([True, False, False])
>>> class DeepSoil(DefaultMask):
... @classmethod
... def new(cls, variable, **kwargs):
... return cls.array2mask([False, True, False])
>>> class Water(DefaultMask):
... @classmethod
... def new(cls, variable, **kwargs):
... return cls.array2mask([False, False, True])
>>> from hydpy.core import masktools
>>> class Masks(masktools.Masks):
... CLASSES = (FlatSoil,
... DeepSoil,
... Water)
>>> SoilMoisture.availablemasks = Masks(None)
One can pass either the mask classes themselves or their names:
>>> sm.average_values(sm.availablemasks.flatsoil)
200.0
>>> sm.average_values('deepsoil')
400.0
Both variants can be combined:
>>> sm.average_values(sm.availablemasks.deepsoil, 'flatsoil')
300.0
The following error happens if the general mask of the variable
does not contain the given masks:
>>> sm.average_values('flatsoil', 'water')
Traceback (most recent call last):
...
ValueError: While trying to calculate the mean value of variable \
`soilmoisture`, the following error occurred: Based on the arguments \
`('flatsoil', 'water')` and `{}` the mask `CustomMask([ True, False, True])` \
has been determined, which is not a submask of `Soil([ True, True, False])`.
Applying masks with custom options is also supported. One can change
the behaviour of the following mask via the argument `complete`:
>>> class AllOrNothing(DefaultMask):
... @classmethod
... def new(cls, variable, complete):
... if complete:
... bools = [True, True, True]
... else:
... bools = [False, False, False]
... return cls.array2mask(bools)
>>> class Masks(Masks):
... CLASSES = (FlatSoil,
... DeepSoil,
... Water,
... AllOrNothing)
>>> SoilMoisture.availablemasks = Masks(None)
Again, one can apply the mask class directly (but note that one
has to pass the relevant variable as the first argument.):
>>> sm.average_values( # doctest: +ELLIPSIS
... sm.availablemasks.allornothing(sm, complete=True))
Traceback (most recent call last):
...
ValueError: While trying to...
Alternatively, one can pass the mask name as a keyword and pack
the mask's options into a |dict| object:
>>> sm.average_values(allornothing={'complete': False})
nan
You can combine all variants explained above:
>>> sm.average_values(
... 'deepsoil', flatsoil={}, allornothing={'complete': False})
300.0
|
entailment
|
def get_submask(self, *args, **kwargs) -> masktools.CustomMask:
"""Get a sub-mask of the mask handled by the actual |Variable| object
based on the given arguments.
See the documentation on method |Variable.average_values| for
further information.
"""
if args or kwargs:
masks = self.availablemasks
mask = masktools.CustomMask(numpy.full(self.shape, False))
for arg in args:
mask = mask + self._prepare_mask(arg, masks)
for key, value in kwargs.items():
mask = mask + self._prepare_mask(key, masks, **value)
if mask not in self.mask:
raise ValueError(
f'Based on the arguments `{args}` and `{kwargs}` '
f'the mask `{repr(mask)}` has been determined, '
f'which is not a submask of `{repr(self.mask)}`.')
else:
mask = self.mask
return mask
|
Get a sub-mask of the mask handled by the actual |Variable| object
based on the given arguments.
See the documentation on method |Variable.average_values| for
further information.
|
entailment
|
def commentrepr(self) -> List[str]:
"""A list with comments for making string representations
more informative.
With option |Options.reprcomments| being disabled,
|Variable.commentrepr| is empty.
"""
if hydpy.pub.options.reprcomments:
return [f'# {line}' for line in
textwrap.wrap(objecttools.description(self), 72)]
return []
|
A list with comments for making string representations
more informative.
With option |Options.reprcomments| being disabled,
|Variable.commentrepr| is empty.
|
entailment
|
def AddTable(p_workSheet = None, p_headerDict = None, p_startColumn = 1, p_startRow = 1, p_headerHeight = None, p_data = None, p_mainTable = False, p_conditionalFormatting = None, p_tableStyleInfo = None, p_withFilters = True):
"""Insert a table in a given worksheet.
Args:
p_workSheet (openpyxl.worksheet.worksheet.Worksheet): the worksheet where the table will be inserted. Defaults to None.
p_headerDict (collections.OrderedDict): an ordered dict that contains table header columns.
Notes:
Each entry is in the following form:
Key: Name of the column to be searched in p_data.Columns.
Value: Spartacus.Report.Field instance.
Examples:
p_headerDict = collections.OrderedDict([
(
'field_one',
Field(
p_name = 'Code',
p_width = 15,
p_data = Data(
p_type = 'int'
)
)
),
(
'field_two',
Field(
p_name = 'Result',
p_width = 15,
p_data = Data(
p_type = 'int_formula'
)
)
)
])
p_startColumn (int): the column number where the table should start. Defaults to 1.
Notes:
Must be a positive integer.
p_startRow (int): the row number where the table should start. Defaults to 1.
Notes:
Must be a positive integer.
p_headerHeight (float): the header row height in pt. Defaults to None.
Notes:
Must be a non-negative number or None.
p_data (Spartacus.Database.DataTable): the datatable that contains the data that will be inserted into the excel table. Defaults to None.
Notes:
If the corresponding column data type in p_headerDict is some kind of formula, then below wildcards can be used:
#row#: the current row.
#column_columname#: will be replaced by the letter of the column.
Examples:
p_data = Spartacus.Database.DataTable that contains:
Columns: ['field_one', 'field_two'].
Rows: [
[
'HAHAHA',
'=if(#column_field_one##row# = "HAHAHA", 1, 0)'
],
[
'HEHEHE',
'=if(#column_field_one##row# = "HAHAHA", 1, 0)'
]
]
p_mainTable (bool): if this table is the main table of the current worksheet. Defaults to False.
Notes:
If it's the main table, then it will consider p_width, p_hidden and freeze panes in the first table row. The 3 parameters are ignored otherwise.
p_conditionalFormatting (Spartacus.Report.ConditionalFormatting): a conditional formatting that should be applied to data rows. Defaults to None.
Notes:
Will be applied to all data rows of this table.
A wildcard can be used and be replaced properly:
#row#: the current data row.
#column_columname#: will be replaced by the letter of the column.
Examples:
p_conditionalFormatting = ConditionalFormatting(
p_formula = '$Y#row# = 2',
p_differentialStyle = openpyxl.styles.differential.DifferentialStyle(
fill = openpyxl.styles.PatternFill(
bgColor = 'D3D3D3'
)
)
)
p_tableStyleInfo (openpyxl.worksheet.table.TableStyleInfo): a style to be applied to this table. Defaults to None.
Notes:
Will not be applied to summaries, if any.
Examples:
p_tableStyleInfo = openpyxl.worksheet.table.TableStyleInfo(
name = 'TableStyleMedium23',
showFirstColumn = True,
showLastColumn = True,
showRowStripes = True,
showColumnStripes = False
)
p_withFilters (bool): if the table must contain auto-filters.
Yields:
int: Every 1000 lines inserted into the table, yields actual line number.
Raises:
Spartacus.Report.Exception: custom exceptions occurred in this script.
"""
if not isinstance(p_workSheet, openpyxl.worksheet.worksheet.Worksheet):
raise Spartacus.Report.Exception('Error during execution of method "Static.AddTable": Parameter "p_workSheet" must be of type "openpyxl.worksheet.worksheet.Worksheet".')
if not isinstance(p_headerDict, collections.OrderedDict):
raise Spartacus.Report.Exception('Error during execution of method "Static.AddTable": Parameter "p_headerDict" must be of type "collections.OrderedDict".')
if not isinstance(p_startColumn, int):
raise Spartacus.Report.Exception('Error during execution of method "Static.AddTable": Parameter "p_startColumn" must be of type "int".')
if p_startColumn < 1:
raise Spartacus.Report.Exception('Error during execution of method "Static.AddTable": Parameter "p_startColumn" must be a positive integer.')
if not isinstance(p_startRow, int):
raise Spartacus.Report.Exception('Error during execution of method "Static.AddTable": Parameter "p_startRow" must be of type "int".')
if p_startRow < 1:
raise Spartacus.Report.Exception('Error during execution of method "Static.AddTable": Parameter "p_startRow" must be a positive integer.')
if p_headerHeight is not None and not isinstance(p_headerHeight, int) and not isinstance(p_headerHeight, float):
raise Spartacus.Report.Exception('Error during execution of method "Static.AddTable": Parameter "p_headerHeight" must be None or of type "int" or "float".')
if not isinstance(p_data, Spartacus.Database.DataTable):
raise Spartacus.Report.Exception('Error during execution of method "Static.AddTable": Parameter "p_data" must be of type "Spartacus.Database.DataTable".')
if not isinstance(p_mainTable, bool):
raise Spartacus.Report.Exception('Error during execution of method "Static.AddTable": Parameter "p_mainTable" must be of type "bool".')
if p_conditionalFormatting is not None and not isinstance(p_conditionalFormatting, ConditionalFormatting):
raise Spartacus.Report.Exception('Error during execution of method "Static.AddTable": Parameter "p_conditionalFormatting" must be None or of type "Spartacus.Report.ConditionalFormatting".')
if p_tableStyleInfo is not None and not isinstance(p_tableStyleInfo, openpyxl.worksheet.table.TableStyleInfo):
raise Spartacus.Report.Exception('Error during execution of method "Static.AddTable": Parameter "p_tableStyleInfo" must be None or of type "openpyxl.worksheet.table.TableStyleInfo".')
if p_withFilters is not None and not isinstance(p_withFilters, bool):
raise Spartacus.Report.Exception('Error during execution of method "Static.AddTable": Parameter "p_withFilters" must be None or of type "bool".')
#Format Header
if p_headerHeight is not None:
p_workSheet.row_dimensions[p_startRow].height = p_headerHeight
v_headerList = list(p_headerDict.keys())
for i in range(len(v_headerList)):
v_header = p_headerDict[v_headerList[i]]
v_letter = openpyxl.utils.get_column_letter(i + p_startColumn)
v_cell = p_workSheet['{0}{1}'.format(v_letter, p_startRow)]
v_cell.value = v_header.name
if p_mainTable:
p_workSheet.column_dimensions[v_letter].width = v_header.width
p_workSheet.column_dimensions[v_letter].hidden = v_header.hidden
if v_header.comment is not None:
v_cell.comment = v_header.comment
if v_header.border is not None:
v_cell.border = v_header.border
if v_header.font is not None:
v_cell.font = v_header.font
if v_header.fill is not None:
v_cell.fill = v_header.fill
if v_header.alignment is not None:
v_cell.alignment = v_header.alignment
if p_mainTable:
p_workSheet.freeze_panes = 'A{0}'.format(p_startRow + 1)
#used in formula fields, if it's the case
v_pattern = re.compile(r'#column_[^\n\r#]*#')
v_line = 0
#Fill content
for v_row in p_data.Rows:
v_line += 1
for i in range(len(v_headerList)):
v_headerData = p_headerDict[v_headerList[i]].data
v_letter = openpyxl.utils.get_column_letter(i + p_startColumn)
v_cell = p_workSheet['{0}{1}'.format(v_letter, v_line + p_startRow)] #Plus p_startRow to "jump" report header lines
if v_headerData.border is not None:
v_cell.border = v_headerData.border
if v_headerData.font is not None:
v_cell.font = v_headerData.font
if v_headerData.fill is not None:
v_cell.fill = v_headerData.fill
if v_headerData.alignment is not None:
v_cell.alignment = v_headerData.alignment
if v_headerData.type == 'int':
v_key = str(v_row[v_headerList[i]])
if v_key in v_headerData.valueMapping:
v_cell.value = v_headerData.valueMapping[v_key]
else:
try:
v_cell.value = int(v_row[v_headerList[i]])
except (Exception, TypeError, ValueError):
v_cell.value = v_row[v_headerList[i]] if v_row[v_headerList[i]] is not None else ''
v_cell.number_format = '0'
elif v_headerData.type == 'float':
v_key = str(v_row[v_headerList[i]])
if v_key in v_headerData.valueMapping:
v_cell.value = v_headerData.valueMapping[v_key]
else:
try:
v_cell.value = float(v_row[v_headerList[i]])
except (Exception, TypeError, ValueError):
v_cell.value = v_row[v_headerList[i]] if v_row[v_headerList[i]] is not None else ''
v_cell.number_format = '#,##0.00'
elif v_headerData.type == 'float4':
v_key = str(v_row[v_headerList[i]])
if v_key in v_headerData.valueMapping:
v_cell.value = v_headerData.valueMapping[v_key]
else:
try:
v_cell.value = float(v_row[v_headerList[i]])
except (Exception, TypeError, ValueError):
v_cell.value = v_row[v_headerList[i]] if v_row[v_headerList[i]] is not None else ''
v_cell.number_format = '#,##0.0000'
elif v_headerData.type == 'percent':
v_key = str(v_row[v_headerList[i]])
if v_key in v_headerData.valueMapping:
v_cell.value = v_headerData.valueMapping[v_key]
else:
try:
v_cell.value = float(v_row[v_headerList[i]])
except (Exception, TypeError, ValueError):
v_cell.value = v_row[v_headerList[i]] if v_row[v_headerList[i]] is not None else ''
v_cell.number_format = '0.00%'
elif v_headerData.type == 'date':
v_key = str(v_row[v_headerList[i]])
if v_key in v_headerData.valueMapping:
v_cell.value = v_headerData.valueMapping[v_key]
else:
v_cell.value = v_row[v_headerList[i]] if v_row[v_headerList[i]] is not None else ''
v_cell.number_format = 'DD/MM/YYYY'
elif v_headerData.type == 'str':
v_key = str(v_row[v_headerList[i]])
if v_key in v_headerData.valueMapping:
v_cell.value = v_headerData.valueMapping[v_key]
else:
v_cell.value = v_row[v_headerList[i]] if v_row[v_headerList[i]] is not None else ''
elif v_headerData.type == 'bool':
v_key = str(v_row[v_headerList[i]])
if v_key in v_headerData.valueMapping:
v_cell.value = v_headerData.valueMapping[v_key]
else:
try:
v_cell.value = bool(v_row[v_headerList[i]]) if v_row[v_headerList[i]] is not None and str(v_row[v_headerList[i]]).strip() != '' else ''
except (Exception, TypeError, ValueError):
v_cell.value = v_row[v_headerList[i]] if v_row[v_headerList[i]] is not None else ''
if v_headerData.type == 'int_formula':
v_value = v_row[v_headerList[i]].replace('#row#', str(p_startRow + v_line))
v_match = re.search(v_pattern, v_value)
while v_match is not None:
v_start = v_match.start()
v_end = v_match.end()
v_matchColumn = openpyxl.utils.get_column_letter(p_startColumn + v_headerList.index(v_value[v_start + 8 : v_end - 1])) #Discard starting #column_ and ending # in match
v_value = v_value[:v_start] + v_matchColumn + v_value[v_end:]
v_match = re.search(v_pattern, v_value)
v_cell.value = v_value
v_cell.number_format = '0'
elif v_headerData.type == 'float_formula':
v_value = v_row[v_headerList[i]].replace('#row#', str(p_startRow + v_line))
v_match = re.search(v_pattern, v_value)
while v_match is not None:
v_start = v_match.start()
v_end = v_match.end()
v_matchColumn = openpyxl.utils.get_column_letter(p_startColumn + v_headerList.index(v_value[v_start + 8 : v_end - 1])) #Discard starting #column_ and ending # in match
v_value = v_value[:v_start] + v_matchColumn + v_value[v_end:]
v_match = re.search(v_pattern, v_value)
v_cell.value = v_value
v_cell.number_format = '#,##0.00'
elif v_headerData.type == 'float4_formula':
v_value = v_row[v_headerList[i]].replace('#row#', str(p_startRow + v_line))
v_match = re.search(v_pattern, v_value)
while v_match is not None:
v_start = v_match.start()
v_end = v_match.end()
v_matchColumn = openpyxl.utils.get_column_letter(p_startColumn + v_headerList.index(v_value[v_start + 8 : v_end - 1])) #Discard starting #column_ and ending # in match
v_value = v_value[:v_start] + v_matchColumn + v_value[v_end:]
v_match = re.search(v_pattern, v_value)
v_cell.value = v_value
v_cell.number_format = '#,##0.0000'
elif v_headerData.type == 'percent_formula':
v_value = v_row[v_headerList[i]].replace('#row#', str(p_startRow + v_line))
v_match = re.search(v_pattern, v_value)
while v_match is not None:
v_start = v_match.start()
v_end = v_match.end()
v_matchColumn = openpyxl.utils.get_column_letter(p_startColumn + v_headerList.index(v_value[v_start + 8 : v_end - 1])) #Discard starting #column_ and ending # in match
v_value = v_value[:v_start] + v_matchColumn + v_value[v_end:]
v_match = re.search(v_pattern, v_value)
v_cell.value = v_value
v_cell.number_format = '0.00%'
elif v_headerData.type == 'date_formula':
v_value = v_row[v_headerList[i]].replace('#row#', str(p_startRow + v_line))
v_match = re.search(v_pattern, v_value)
while v_match is not None:
v_start = v_match.start()
v_end = v_match.end()
v_matchColumn = openpyxl.utils.get_column_letter(p_startColumn + v_headerList.index(v_value[v_start + 8 : v_end - 1])) #Discard starting #column_ and ending # in match
v_value = v_value[:v_start] + v_matchColumn + v_value[v_end:]
v_match = re.search(v_pattern, v_value)
v_cell.value = v_value
v_cell.number_format = 'DD/MM/YYYY'
elif v_headerData.type == 'str_formula':
v_value = v_row[v_headerList[i]].replace('#row#', str(p_startRow + v_line))
v_match = re.search(v_pattern, v_value)
while v_match is not None:
v_start = v_match.start()
v_end = v_match.end()
v_matchColumn = openpyxl.utils.get_column_letter(p_startColumn + v_headerList.index(v_value[v_start + 8 : v_end - 1])) #Discard starting #column_ and ending # in match
v_value = v_value[:v_start] + v_matchColumn + v_value[v_end:]
v_match = re.search(v_pattern, v_value)
v_cell.value = v_value
if v_line % 1000 == 0:
yield v_line
v_lastLine = len(p_data.Rows) + p_startRow
#Apply conditional formatting, if any
if p_conditionalFormatting is not None:
v_startLetter = openpyxl.utils.get_column_letter(p_startColumn)
v_finalLetter = openpyxl.utils.get_column_letter(len(v_headerList) + p_startColumn - 1)
v_formula = p_conditionalFormatting.formula.replace('#row#', str(p_startRow + 1))
v_match = re.search(v_pattern, v_formula)
while v_match is not None:
v_start = v_match.start()
v_end = v_match.end()
v_matchColumn = openpyxl.utils.get_column_letter(p_startColumn + v_headerList.index(v_formula[v_start + 8 : v_end - 1])) #Discard starting #column_ and ending # in match
v_formula = v_formula[:v_start] + v_matchColumn + v_formula[v_end:]
v_match = re.search(v_pattern, v_formula)
v_rule = openpyxl.formatting.rule.Rule(
type = 'expression',
formula = [v_formula],
dxf = p_conditionalFormatting.differentialStyle
)
p_workSheet.conditional_formatting.add(
'{0}{1}:{2}{3}'.format(v_startLetter, p_startRow + 1, v_finalLetter, v_lastLine),
v_rule
)
#Build Summary
for i in range(len(v_headerList)):
v_headerSummaryList = p_headerDict[v_headerList[i]].summaryList
for v_headerSummary in v_headerSummaryList:
v_letter = openpyxl.utils.get_column_letter(i + p_startColumn)
v_index = p_startRow - 1
if v_headerSummary.index < 0:
v_index = p_startRow + v_headerSummary.index
elif v_headerSummary.index > 0:
v_index = v_lastLine + v_headerSummary.index
v_value = v_headerSummary.function.replace('#column#', v_letter).replace('#start_row#', str(p_startRow + 1)).replace('#end_row#', str(v_lastLine))
v_match = re.search(v_pattern, v_value)
while v_match is not None:
v_start = v_match.start()
v_end = v_match.end()
v_matchColumn = openpyxl.utils.get_column_letter(p_startColumn + v_headerList.index(v_value[v_start + 8 : v_end - 1])) #Discard starting #column_ and ending # in match
v_value = v_value[:v_start] + v_matchColumn + v_value[v_end:]
v_match = re.search(v_pattern, v_value)
v_cell = p_workSheet['{0}{1}'.format(v_letter, v_index)]
v_cell.value = v_value
if v_headerSummary.border is not None:
v_cell.border = v_headerSummary.border
if v_headerSummary.font is not None:
v_cell.font = v_headerSummary.font
if v_headerSummary.fill is not None:
v_cell.fill = v_headerSummary.fill
if v_headerSummary.type == 'int':
v_cell.number_format = '0'
elif v_headerSummary.type == 'float':
v_cell.number_format = '#,##0.00'
elif v_headerSummary.type == 'float4':
v_cell.number_format = '#,##0.0000'
elif v_headerSummary.type == 'percent':
v_cell.number_format = '0.00%'
#Create a new table and add it to worksheet
v_name = 'Table_{0}_{1}'.format(p_workSheet.title.replace(' ', ''), len(p_workSheet._tables) + 1) #excel doesn't accept same displayName in more than one table.
v_name = ''.join([c for c in v_name if c.isalnum()]) #Excel doesn't accept non-alphanumeric characters.
v_table = openpyxl.worksheet.table.Table(
displayName = v_name,
ref = '{0}{1}:{2}{3}'.format(
openpyxl.utils.get_column_letter(p_startColumn),
p_startRow,
openpyxl.utils.get_column_letter(p_startColumn + len(v_headerList) - 1),
v_lastLine
)
)
if p_tableStyleInfo is not None:
v_table.tableStyleInfo = p_tableStyleInfo
if not p_withFilters:
v_table.headerRowCount = 0
p_workSheet.add_table(v_table)
|
Insert a table in a given worksheet.
Args:
p_workSheet (openpyxl.worksheet.worksheet.Worksheet): the worksheet where the table will be inserted. Defaults to None.
p_headerDict (collections.OrderedDict): an ordered dict that contains table header columns.
Notes:
Each entry is in the following form:
Key: Name of the column to be searched in p_data.Columns.
Value: Spartacus.Report.Field instance.
Examples:
p_headerDict = collections.OrderedDict([
(
'field_one',
Field(
p_name = 'Code',
p_width = 15,
p_data = Data(
p_type = 'int'
)
)
),
(
'field_two',
Field(
p_name = 'Result',
p_width = 15,
p_data = Data(
p_type = 'int_formula'
)
)
)
])
p_startColumn (int): the column number where the table should start. Defaults to 1.
Notes:
Must be a positive integer.
p_startRow (int): the row number where the table should start. Defaults to 1.
Notes:
Must be a positive integer.
p_headerHeight (float): the header row height in pt. Defaults to None.
Notes:
Must be a non-negative number or None.
p_data (Spartacus.Database.DataTable): the datatable that contains the data that will be inserted into the excel table. Defaults to None.
Notes:
If the corresponding column data type in p_headerDict is some kind of formula, then below wildcards can be used:
#row#: the current row.
#column_columname#: will be replaced by the letter of the column.
Examples:
p_data = Spartacus.Database.DataTable that contains:
Columns: ['field_one', 'field_two'].
Rows: [
[
'HAHAHA',
'=if(#column_field_one##row# = "HAHAHA", 1, 0)'
],
[
'HEHEHE',
'=if(#column_field_one##row# = "HAHAHA", 1, 0)'
]
]
p_mainTable (bool): if this table is the main table of the current worksheet. Defaults to False.
Notes:
If it's the main table, then it will consider p_width, p_hidden and freeze panes in the first table row. The 3 parameters are ignored otherwise.
p_conditionalFormatting (Spartacus.Report.ConditionalFormatting): a conditional formatting that should be applied to data rows. Defaults to None.
Notes:
Will be applied to all data rows of this table.
A wildcard can be used and be replaced properly:
#row#: the current data row.
#column_columname#: will be replaced by the letter of the column.
Examples:
p_conditionalFormatting = ConditionalFormatting(
p_formula = '$Y#row# = 2',
p_differentialStyle = openpyxl.styles.differential.DifferentialStyle(
fill = openpyxl.styles.PatternFill(
bgColor = 'D3D3D3'
)
)
)
p_tableStyleInfo (openpyxl.worksheet.table.TableStyleInfo): a style to be applied to this table. Defaults to None.
Notes:
Will not be applied to summaries, if any.
Examples:
p_tableStyleInfo = openpyxl.worksheet.table.TableStyleInfo(
name = 'TableStyleMedium23',
showFirstColumn = True,
showLastColumn = True,
showRowStripes = True,
showColumnStripes = False
)
p_withFilters (bool): if the table must contain auto-filters.
Yields:
int: Every 1000 lines inserted into the table, yields actual line number.
Raises:
Spartacus.Report.Exception: custom exceptions occurred in this script.
|
entailment
|
def get_controlfileheader(
model: Union[str, 'modeltools.Model'],
parameterstep: timetools.PeriodConstrArg = None,
simulationstep: timetools.PeriodConstrArg = None) -> str:
"""Return the header of a regular or auxiliary parameter control file.
The header contains the default coding information, the import command
for the given model and the actual parameter and simulation step sizes.
The first example shows that, if you pass the model argument as a
string, you have to take care that this string makes sense:
>>> from hydpy.core.parametertools import get_controlfileheader, Parameter
>>> from hydpy import Period, prepare_model, pub, Timegrids, Timegrid
>>> print(get_controlfileheader(model='no model class',
... parameterstep='-1h',
... simulationstep=Period('1h')))
# -*- coding: utf-8 -*-
<BLANKLINE>
from hydpy.models.no model class import *
<BLANKLINE>
simulationstep('1h')
parameterstep('-1h')
<BLANKLINE>
<BLANKLINE>
The second example shows the saver option to pass the proper model
object. It also shows that function |get_controlfileheader| tries
to gain the parameter and simulation step sizes from the global
|Timegrids| object contained in the module |pub| when necessary:
>>> model = prepare_model('lland_v1')
>>> _ = Parameter.parameterstep('1d')
>>> pub.timegrids = '2000.01.01', '2001.01.01', '1h'
>>> print(get_controlfileheader(model=model))
# -*- coding: utf-8 -*-
<BLANKLINE>
from hydpy.models.lland_v1 import *
<BLANKLINE>
simulationstep('1h')
parameterstep('1d')
<BLANKLINE>
<BLANKLINE>
"""
with Parameter.parameterstep(parameterstep):
if simulationstep is None:
simulationstep = Parameter.simulationstep
else:
simulationstep = timetools.Period(simulationstep)
return (f"# -*- coding: utf-8 -*-\n\n"
f"from hydpy.models.{model} import *\n\n"
f"simulationstep('{simulationstep}')\n"
f"parameterstep('{Parameter.parameterstep}')\n\n")
|
Return the header of a regular or auxiliary parameter control file.
The header contains the default coding information, the import command
for the given model and the actual parameter and simulation step sizes.
The first example shows that, if you pass the model argument as a
string, you have to take care that this string makes sense:
>>> from hydpy.core.parametertools import get_controlfileheader, Parameter
>>> from hydpy import Period, prepare_model, pub, Timegrids, Timegrid
>>> print(get_controlfileheader(model='no model class',
... parameterstep='-1h',
... simulationstep=Period('1h')))
# -*- coding: utf-8 -*-
<BLANKLINE>
from hydpy.models.no model class import *
<BLANKLINE>
simulationstep('1h')
parameterstep('-1h')
<BLANKLINE>
<BLANKLINE>
The second example shows the saver option to pass the proper model
object. It also shows that function |get_controlfileheader| tries
to gain the parameter and simulation step sizes from the global
|Timegrids| object contained in the module |pub| when necessary:
>>> model = prepare_model('lland_v1')
>>> _ = Parameter.parameterstep('1d')
>>> pub.timegrids = '2000.01.01', '2001.01.01', '1h'
>>> print(get_controlfileheader(model=model))
# -*- coding: utf-8 -*-
<BLANKLINE>
from hydpy.models.lland_v1 import *
<BLANKLINE>
simulationstep('1h')
parameterstep('1d')
<BLANKLINE>
<BLANKLINE>
|
entailment
|
def _prepare_docstrings(self, frame):
"""Assign docstrings to the constants handled by |Constants|
to make them available in the interactive mode of Python."""
if config.USEAUTODOC:
filename = inspect.getsourcefile(frame)
with open(filename) as file_:
sources = file_.read().split('"""')
for code, doc in zip(sources[::2], sources[1::2]):
code = code.strip()
key = code.split('\n')[-1].split()[0]
value = self.get(key)
if value:
value.__doc__ = doc
|
Assign docstrings to the constants handled by |Constants|
to make them available in the interactive mode of Python.
|
entailment
|
def update(self) -> None:
"""Call method |Parameter.update| of all "secondary" parameters.
Directly after initialisation, neither the primary (`control`)
parameters nor the secondary (`derived`) parameters of
application model |hstream_v1| are ready for usage:
>>> from hydpy.models.hstream_v1 import *
>>> parameterstep('1d')
>>> simulationstep('1d')
>>> derived
nmbsegments(?)
c1(?)
c3(?)
c2(?)
Trying to update the values of the secondary parameters while the
primary ones are still not defined, raises errors like the following:
>>> model.parameters.update()
Traceback (most recent call last):
...
AttributeError: While trying to update parameter ``nmbsegments` \
of element `?``, the following error occurred: For variable `lag`, \
no value has been defined so far.
With proper values both for parameter |hstream_control.Lag| and
|hstream_control.Damp|, updating the derived parameters succeeds:
>>> lag(0.0)
>>> damp(0.0)
>>> model.parameters.update()
>>> derived
nmbsegments(0)
c1(0.0)
c3(0.0)
c2(1.0)
"""
for subpars in self.secondary_subpars:
for par in subpars:
try:
par.update()
except BaseException:
objecttools.augment_excmessage(
f'While trying to update parameter '
f'`{objecttools.elementphrase(par)}`')
|
Call method |Parameter.update| of all "secondary" parameters.
Directly after initialisation, neither the primary (`control`)
parameters nor the secondary (`derived`) parameters of
application model |hstream_v1| are ready for usage:
>>> from hydpy.models.hstream_v1 import *
>>> parameterstep('1d')
>>> simulationstep('1d')
>>> derived
nmbsegments(?)
c1(?)
c3(?)
c2(?)
Trying to update the values of the secondary parameters while the
primary ones are still not defined, raises errors like the following:
>>> model.parameters.update()
Traceback (most recent call last):
...
AttributeError: While trying to update parameter ``nmbsegments` \
of element `?``, the following error occurred: For variable `lag`, \
no value has been defined so far.
With proper values both for parameter |hstream_control.Lag| and
|hstream_control.Damp|, updating the derived parameters succeeds:
>>> lag(0.0)
>>> damp(0.0)
>>> model.parameters.update()
>>> derived
nmbsegments(0)
c1(0.0)
c3(0.0)
c2(1.0)
|
entailment
|
def save_controls(self, filepath: Optional[str] = None,
parameterstep: timetools.PeriodConstrArg = None,
simulationstep: timetools.PeriodConstrArg = None,
auxfiler: 'auxfiletools.Auxfiler' = None):
"""Write the control parameters to file.
Usually, a control file consists of a header (see the documentation
on the method |get_controlfileheader|) and the string representations
of the individual |Parameter| objects handled by the `control`
|SubParameters| object.
The main functionality of method |Parameters.save_controls| is
demonstrated in the documentation on the method |HydPy.save_controls|
of class |HydPy|, which one would apply to write the parameter
information of complete *HydPy* projects. However, to call
|Parameters.save_controls| on individual |Parameters| objects
offers the advantage to choose an arbitrary file path, as shown
in the following example:
>>> from hydpy.models.hstream_v1 import *
>>> parameterstep('1d')
>>> simulationstep('1h')
>>> lag(1.0)
>>> damp(0.5)
>>> from hydpy import Open
>>> with Open():
... model.parameters.save_controls('otherdir/otherfile.py')
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
otherdir/otherfile.py
-------------------------------------
# -*- coding: utf-8 -*-
<BLANKLINE>
from hydpy.models.hstream_v1 import *
<BLANKLINE>
simulationstep('1h')
parameterstep('1d')
<BLANKLINE>
lag(1.0)
damp(0.5)
<BLANKLINE>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Without a given file path and a proper project configuration,
method |Parameters.save_controls| raises the following error:
>>> model.parameters.save_controls()
Traceback (most recent call last):
...
RuntimeError: To save the control parameters of a model to a file, \
its filename must be known. This can be done, by passing a filename to \
function `save_controls` directly. But in complete HydPy applications, \
it is usally assumed to be consistent with the name of the element \
handling the model.
"""
if self.control:
variable2auxfile = getattr(auxfiler, str(self.model), None)
lines = [get_controlfileheader(
self.model, parameterstep, simulationstep)]
with Parameter.parameterstep(parameterstep):
for par in self.control:
if variable2auxfile:
auxfilename = variable2auxfile.get_filename(par)
if auxfilename:
lines.append(
f"{par.name}(auxfile='{auxfilename}')\n")
continue
lines.append(repr(par) + '\n')
text = ''.join(lines)
if filepath:
with open(filepath, mode='w', encoding='utf-8') as controlfile:
controlfile.write(text)
else:
filename = objecttools.devicename(self)
if filename == '?':
raise RuntimeError(
'To save the control parameters of a model to a file, '
'its filename must be known. This can be done, by '
'passing a filename to function `save_controls` '
'directly. But in complete HydPy applications, it is '
'usally assumed to be consistent with the name of the '
'element handling the model.')
hydpy.pub.controlmanager.save_file(filename, text)
|
Write the control parameters to file.
Usually, a control file consists of a header (see the documentation
on the method |get_controlfileheader|) and the string representations
of the individual |Parameter| objects handled by the `control`
|SubParameters| object.
The main functionality of method |Parameters.save_controls| is
demonstrated in the documentation on the method |HydPy.save_controls|
of class |HydPy|, which one would apply to write the parameter
information of complete *HydPy* projects. However, to call
|Parameters.save_controls| on individual |Parameters| objects
offers the advantage to choose an arbitrary file path, as shown
in the following example:
>>> from hydpy.models.hstream_v1 import *
>>> parameterstep('1d')
>>> simulationstep('1h')
>>> lag(1.0)
>>> damp(0.5)
>>> from hydpy import Open
>>> with Open():
... model.parameters.save_controls('otherdir/otherfile.py')
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
otherdir/otherfile.py
-------------------------------------
# -*- coding: utf-8 -*-
<BLANKLINE>
from hydpy.models.hstream_v1 import *
<BLANKLINE>
simulationstep('1h')
parameterstep('1d')
<BLANKLINE>
lag(1.0)
damp(0.5)
<BLANKLINE>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Without a given file path and a proper project configuration,
method |Parameters.save_controls| raises the following error:
>>> model.parameters.save_controls()
Traceback (most recent call last):
...
RuntimeError: To save the control parameters of a model to a file, \
its filename must be known. This can be done, by passing a filename to \
function `save_controls` directly. But in complete HydPy applications, \
it is usally assumed to be consistent with the name of the element \
handling the model.
|
entailment
|
def _get_values_from_auxiliaryfile(self, auxfile):
"""Try to return the parameter values from the auxiliary control file
with the given name.
Things are a little complicated here. To understand this method, you
should first take a look at the |parameterstep| function.
"""
try:
frame = inspect.currentframe().f_back.f_back
while frame:
namespace = frame.f_locals
try:
subnamespace = {'model': namespace['model'],
'focus': self}
break
except KeyError:
frame = frame.f_back
else:
raise RuntimeError(
'Cannot determine the corresponding model. Use the '
'`auxfile` keyword in usual parameter control files only.')
filetools.ControlManager.read2dict(auxfile, subnamespace)
try:
subself = subnamespace[self.name]
except KeyError:
raise RuntimeError(
f'The selected file does not define value(s) for '
f'parameter {self.name}')
return subself.values
except BaseException:
objecttools.augment_excmessage(
f'While trying to extract information for parameter '
f'`{self.name}` from file `{auxfile}`')
|
Try to return the parameter values from the auxiliary control file
with the given name.
Things are a little complicated here. To understand this method, you
should first take a look at the |parameterstep| function.
|
entailment
|
def initinfo(self) -> Tuple[Union[float, int, bool], bool]:
"""The actual initial value of the given parameter.
Some |Parameter| subclasses define another value for class
attribute `INIT` than |None| to provide a default value.
Let's define a parameter test class and prepare a function for
initialising it and connecting the resulting instance to a
|SubParameters| object:
>>> from hydpy.core.parametertools import Parameter, SubParameters
>>> class Test(Parameter):
... NDIM = 0
... TYPE = float
... TIME = None
... INIT = 2.0
>>> class SubGroup(SubParameters):
... CLASSES = (Test,)
>>> def prepare():
... subpars = SubGroup(None)
... test = Test(subpars)
... test.__hydpy__connect_variable2subgroup__()
... return test
By default, making use of the `INIT` attribute is disabled:
>>> test = prepare()
>>> test
test(?)
Enable it through setting |Options.usedefaultvalues| to |True|:
>>> from hydpy import pub
>>> pub.options.usedefaultvalues = True
>>> test = prepare()
>>> test
test(2.0)
When no `INIT` attribute is defined, enabling
|Options.usedefaultvalues| has no effect, of course:
>>> del Test.INIT
>>> test = prepare()
>>> test
test(?)
For time-dependent parameter values, the `INIT` attribute is assumed
to be related to a |Parameterstep| of one day:
>>> test.parameterstep = '2d'
>>> test.simulationstep = '12h'
>>> Test.INIT = 2.0
>>> Test.TIME = True
>>> test = prepare()
>>> test
test(4.0)
>>> test.value
1.0
"""
init = self.INIT
if (init is not None) and hydpy.pub.options.usedefaultvalues:
with Parameter.parameterstep('1d'):
return self.apply_timefactor(init), True
return variabletools.TYPE2MISSINGVALUE[self.TYPE], False
|
The actual initial value of the given parameter.
Some |Parameter| subclasses define another value for class
attribute `INIT` than |None| to provide a default value.
Let's define a parameter test class and prepare a function for
initialising it and connecting the resulting instance to a
|SubParameters| object:
>>> from hydpy.core.parametertools import Parameter, SubParameters
>>> class Test(Parameter):
... NDIM = 0
... TYPE = float
... TIME = None
... INIT = 2.0
>>> class SubGroup(SubParameters):
... CLASSES = (Test,)
>>> def prepare():
... subpars = SubGroup(None)
... test = Test(subpars)
... test.__hydpy__connect_variable2subgroup__()
... return test
By default, making use of the `INIT` attribute is disabled:
>>> test = prepare()
>>> test
test(?)
Enable it through setting |Options.usedefaultvalues| to |True|:
>>> from hydpy import pub
>>> pub.options.usedefaultvalues = True
>>> test = prepare()
>>> test
test(2.0)
When no `INIT` attribute is defined, enabling
|Options.usedefaultvalues| has no effect, of course:
>>> del Test.INIT
>>> test = prepare()
>>> test
test(?)
For time-dependent parameter values, the `INIT` attribute is assumed
to be related to a |Parameterstep| of one day:
>>> test.parameterstep = '2d'
>>> test.simulationstep = '12h'
>>> Test.INIT = 2.0
>>> Test.TIME = True
>>> test = prepare()
>>> test
test(4.0)
>>> test.value
1.0
|
entailment
|
def get_timefactor(cls) -> float:
"""Factor to adjust a new value of a time-dependent parameter.
For a time-dependent parameter, its effective value depends on the
simulation step size. Method |Parameter.get_timefactor| returns
the fraction between the current simulation step size and the
current parameter step size.
.. testsetup::
>>> from hydpy import pub
>>> del pub.timegrids
>>> from hydpy.core.parametertools import Parameter
>>> Parameter.simulationstep.delete()
Period()
Method |Parameter.get_timefactor| raises the following error
when time information is not available:
>>> from hydpy.core.parametertools import Parameter
>>> Parameter.get_timefactor()
Traceback (most recent call last):
...
RuntimeError: To calculate the conversion factor for adapting the \
values of the time-dependent parameters, you need to define both a \
parameter and a simulation time step size first.
One can define both time step sizes directly:
>>> _ = Parameter.parameterstep('1d')
>>> _ = Parameter.simulationstep('6h')
>>> Parameter.get_timefactor()
0.25
As usual, the "global" simulation step size of the |Timegrids|
object of module |pub| is prefered:
>>> from hydpy import pub
>>> pub.timegrids = '2000-01-01', '2001-01-01', '12h'
>>> Parameter.get_timefactor()
0.5
"""
try:
parfactor = hydpy.pub.timegrids.parfactor
except RuntimeError:
if not (cls.parameterstep and cls.simulationstep):
raise RuntimeError(
f'To calculate the conversion factor for adapting '
f'the values of the time-dependent parameters, '
f'you need to define both a parameter and a simulation '
f'time step size first.')
else:
date1 = timetools.Date('2000.01.01')
date2 = date1 + cls.simulationstep
parfactor = timetools.Timegrids(timetools.Timegrid(
date1, date2, cls.simulationstep)).parfactor
return parfactor(cls.parameterstep)
|
Factor to adjust a new value of a time-dependent parameter.
For a time-dependent parameter, its effective value depends on the
simulation step size. Method |Parameter.get_timefactor| returns
the fraction between the current simulation step size and the
current parameter step size.
.. testsetup::
>>> from hydpy import pub
>>> del pub.timegrids
>>> from hydpy.core.parametertools import Parameter
>>> Parameter.simulationstep.delete()
Period()
Method |Parameter.get_timefactor| raises the following error
when time information is not available:
>>> from hydpy.core.parametertools import Parameter
>>> Parameter.get_timefactor()
Traceback (most recent call last):
...
RuntimeError: To calculate the conversion factor for adapting the \
values of the time-dependent parameters, you need to define both a \
parameter and a simulation time step size first.
One can define both time step sizes directly:
>>> _ = Parameter.parameterstep('1d')
>>> _ = Parameter.simulationstep('6h')
>>> Parameter.get_timefactor()
0.25
As usual, the "global" simulation step size of the |Timegrids|
object of module |pub| is prefered:
>>> from hydpy import pub
>>> pub.timegrids = '2000-01-01', '2001-01-01', '12h'
>>> Parameter.get_timefactor()
0.5
|
entailment
|
def apply_timefactor(cls, values):
"""Change and return the given value(s) in accordance with
|Parameter.get_timefactor| and the type of time-dependence
of the actual parameter subclass.
.. testsetup::
>>> from hydpy import pub
>>> del pub.timegrids
For the same conversion factor returned by method
|Parameter.get_timefactor|, method |Parameter.apply_timefactor|
behaves differently depending on the `TIME` attribute of the
respective |Parameter| subclass. We first prepare a parameter
test class and define both the parameter and simulation step size:
>>> from hydpy.core.parametertools import Parameter
>>> class Par(Parameter):
... TIME = None
>>> Par.parameterstep = '1d'
>>> Par.simulationstep = '6h'
|None| means the value(s) of the parameter are not time-dependent
(e.g. maximum storage capacity). Hence, |Parameter.apply_timefactor|
returns the original value(s):
>>> Par.apply_timefactor(4.0)
4.0
|True| means the effective parameter value is proportional to
the simulation step size (e.g. travel time). Hence,
|Parameter.apply_timefactor| returns a reduced value in the
next example (where the simulation step size is smaller than
the parameter step size):
>>> Par.TIME = True
>>> Par.apply_timefactor(4.0)
1.0
|False| means the effective parameter value is inversely
proportional to the simulation step size (e.g. storage
coefficient). Hence, |Parameter.apply_timefactor| returns
an increased value in the next example:
>>> Par.TIME = False
>>> Par.apply_timefactor(4.0)
16.0
"""
if cls.TIME is True:
return values * cls.get_timefactor()
if cls.TIME is False:
return values / cls.get_timefactor()
return values
|
Change and return the given value(s) in accordance with
|Parameter.get_timefactor| and the type of time-dependence
of the actual parameter subclass.
.. testsetup::
>>> from hydpy import pub
>>> del pub.timegrids
For the same conversion factor returned by method
|Parameter.get_timefactor|, method |Parameter.apply_timefactor|
behaves differently depending on the `TIME` attribute of the
respective |Parameter| subclass. We first prepare a parameter
test class and define both the parameter and simulation step size:
>>> from hydpy.core.parametertools import Parameter
>>> class Par(Parameter):
... TIME = None
>>> Par.parameterstep = '1d'
>>> Par.simulationstep = '6h'
|None| means the value(s) of the parameter are not time-dependent
(e.g. maximum storage capacity). Hence, |Parameter.apply_timefactor|
returns the original value(s):
>>> Par.apply_timefactor(4.0)
4.0
|True| means the effective parameter value is proportional to
the simulation step size (e.g. travel time). Hence,
|Parameter.apply_timefactor| returns a reduced value in the
next example (where the simulation step size is smaller than
the parameter step size):
>>> Par.TIME = True
>>> Par.apply_timefactor(4.0)
1.0
|False| means the effective parameter value is inversely
proportional to the simulation step size (e.g. storage
coefficient). Hence, |Parameter.apply_timefactor| returns
an increased value in the next example:
>>> Par.TIME = False
>>> Par.apply_timefactor(4.0)
16.0
|
entailment
|
def revert_timefactor(cls, values):
"""The inverse version of method |Parameter.apply_timefactor|.
See the explanations on method Parameter.apply_timefactor| to
understand the following examples:
.. testsetup::
>>> from hydpy import pub
>>> del pub.timegrids
>>> from hydpy.core.parametertools import Parameter
>>> class Par(Parameter):
... TIME = None
>>> Par.parameterstep = '1d'
>>> Par.simulationstep = '6h'
>>> Par.revert_timefactor(4.0)
4.0
>>> Par.TIME = True
>>> Par.revert_timefactor(4.0)
16.0
>>> Par.TIME = False
>>> Par.revert_timefactor(4.0)
1.0
"""
if cls.TIME is True:
return values / cls.get_timefactor()
if cls.TIME is False:
return values * cls.get_timefactor()
return values
|
The inverse version of method |Parameter.apply_timefactor|.
See the explanations on method Parameter.apply_timefactor| to
understand the following examples:
.. testsetup::
>>> from hydpy import pub
>>> del pub.timegrids
>>> from hydpy.core.parametertools import Parameter
>>> class Par(Parameter):
... TIME = None
>>> Par.parameterstep = '1d'
>>> Par.simulationstep = '6h'
>>> Par.revert_timefactor(4.0)
4.0
>>> Par.TIME = True
>>> Par.revert_timefactor(4.0)
16.0
>>> Par.TIME = False
>>> Par.revert_timefactor(4.0)
1.0
|
entailment
|
def compress_repr(self) -> Optional[str]:
"""Try to find a compressed parameter value representation and
return it.
|Parameter.compress_repr| raises a |NotImplementedError| when
failing to find a compressed representation.
.. testsetup::
>>> from hydpy import pub
>>> del pub.timegrids
For the following examples, we define a 1-dimensional sequence
handling time-dependent floating point values:
>>> from hydpy.core.parametertools import Parameter
>>> class Test(Parameter):
... NDIM = 1
... TYPE = float
... TIME = True
>>> test = Test(None)
Before and directly after defining the parameter shape, `nan`
is returned:
>>> test.compress_repr()
'?'
>>> test
test(?)
>>> test.shape = 4
>>> test
test(?)
Due to the time-dependence of the values of our test class,
we need to specify a parameter and a simulation time step:
>>> test.parameterstep = '1d'
>>> test.simulationstep = '8h'
Compression succeeds when all required values are identical:
>>> test(3.0, 3.0, 3.0, 3.0)
>>> test.values
array([ 1., 1., 1., 1.])
>>> test.compress_repr()
'3.0'
>>> test
test(3.0)
Method |Parameter.compress_repr| returns |None| in case the
required values are not identical:
>>> test(1.0, 2.0, 3.0, 3.0)
>>> test.compress_repr()
>>> test
test(1.0, 2.0, 3.0, 3.0)
If some values are not required, indicate this by the `mask`
descriptor:
>>> import numpy
>>> test(3.0, 3.0, 3.0, numpy.nan)
>>> test
test(3.0, 3.0, 3.0, nan)
>>> Test.mask = numpy.array([True, True, True, False])
>>> test
test(3.0)
For a shape of zero, the string representing includes an empty list:
>>> test.shape = 0
>>> test.compress_repr()
'[]'
>>> test
test([])
Method |Parameter.compress_repr| works similarly for different
|Parameter| subclasses. The following examples focus on a
2-dimensional parameter handling integer values:
>>> from hydpy.core.parametertools import Parameter
>>> class Test(Parameter):
... NDIM = 2
... TYPE = int
... TIME = None
>>> test = Test(None)
>>> test.compress_repr()
'?'
>>> test
test(?)
>>> test.shape = (2, 3)
>>> test
test(?)
>>> test([[3, 3, 3],
... [3, 3, 3]])
>>> test
test(3)
>>> test([[3, 3, -999999],
... [3, 3, 3]])
>>> test
test([[3, 3, -999999],
[3, 3, 3]])
>>> Test.mask = numpy.array([
... [True, True, False],
... [True, True, True]])
>>> test
test(3)
>>> test.shape = (0, 0)
>>> test
test([[]])
"""
if not hasattr(self, 'value'):
return '?'
if not self:
return f"{self.NDIM * '['}{self.NDIM * ']'}"
unique = numpy.unique(self[self.mask])
if sum(numpy.isnan(unique)) == len(unique.flatten()):
unique = numpy.array([numpy.nan])
else:
unique = self.revert_timefactor(unique)
if len(unique) == 1:
return objecttools.repr_(unique[0])
return None
|
Try to find a compressed parameter value representation and
return it.
|Parameter.compress_repr| raises a |NotImplementedError| when
failing to find a compressed representation.
.. testsetup::
>>> from hydpy import pub
>>> del pub.timegrids
For the following examples, we define a 1-dimensional sequence
handling time-dependent floating point values:
>>> from hydpy.core.parametertools import Parameter
>>> class Test(Parameter):
... NDIM = 1
... TYPE = float
... TIME = True
>>> test = Test(None)
Before and directly after defining the parameter shape, `nan`
is returned:
>>> test.compress_repr()
'?'
>>> test
test(?)
>>> test.shape = 4
>>> test
test(?)
Due to the time-dependence of the values of our test class,
we need to specify a parameter and a simulation time step:
>>> test.parameterstep = '1d'
>>> test.simulationstep = '8h'
Compression succeeds when all required values are identical:
>>> test(3.0, 3.0, 3.0, 3.0)
>>> test.values
array([ 1., 1., 1., 1.])
>>> test.compress_repr()
'3.0'
>>> test
test(3.0)
Method |Parameter.compress_repr| returns |None| in case the
required values are not identical:
>>> test(1.0, 2.0, 3.0, 3.0)
>>> test.compress_repr()
>>> test
test(1.0, 2.0, 3.0, 3.0)
If some values are not required, indicate this by the `mask`
descriptor:
>>> import numpy
>>> test(3.0, 3.0, 3.0, numpy.nan)
>>> test
test(3.0, 3.0, 3.0, nan)
>>> Test.mask = numpy.array([True, True, True, False])
>>> test
test(3.0)
For a shape of zero, the string representing includes an empty list:
>>> test.shape = 0
>>> test.compress_repr()
'[]'
>>> test
test([])
Method |Parameter.compress_repr| works similarly for different
|Parameter| subclasses. The following examples focus on a
2-dimensional parameter handling integer values:
>>> from hydpy.core.parametertools import Parameter
>>> class Test(Parameter):
... NDIM = 2
... TYPE = int
... TIME = None
>>> test = Test(None)
>>> test.compress_repr()
'?'
>>> test
test(?)
>>> test.shape = (2, 3)
>>> test
test(?)
>>> test([[3, 3, 3],
... [3, 3, 3]])
>>> test
test(3)
>>> test([[3, 3, -999999],
... [3, 3, 3]])
>>> test
test([[3, 3, -999999],
[3, 3, 3]])
>>> Test.mask = numpy.array([
... [True, True, False],
... [True, True, True]])
>>> test
test(3)
>>> test.shape = (0, 0)
>>> test
test([[]])
|
entailment
|
def compress_repr(self) -> str:
"""Works as |Parameter.compress_repr|, but returns a
string with constant names instead of constant values.
See the main documentation on class |NameParameter| for
further information.
"""
string = super().compress_repr()
if string in ('?', '[]'):
return string
if string is None:
values = self.values
else:
values = [int(string)]
invmap = {value: key for key, value in
self.CONSTANTS.items()}
result = ', '.join(
invmap.get(value, repr(value)) for value in values)
if len(self) > 255:
result = f'[{result}]'
return result
|
Works as |Parameter.compress_repr|, but returns a
string with constant names instead of constant values.
See the main documentation on class |NameParameter| for
further information.
|
entailment
|
def compress_repr(self) -> Optional[str]:
"""Works as |Parameter.compress_repr|, but alternatively
tries to compress by following an external classification.
See the main documentation on class |ZipParameter| for
further information.
"""
string = super().compress_repr()
if string is not None:
return string
results = []
mask = self.mask
refindices = mask.refindices.values
for (key, value) in self.MODEL_CONSTANTS.items():
if value in mask.RELEVANT_VALUES:
unique = numpy.unique(self.values[refindices == value])
unique = self.revert_timefactor(unique)
length = len(unique)
if length == 1:
results.append(
f'{key.lower()}={objecttools.repr_(unique[0])}')
elif length > 1:
return None
return ', '.join(sorted(results))
|
Works as |Parameter.compress_repr|, but alternatively
tries to compress by following an external classification.
See the main documentation on class |ZipParameter| for
further information.
|
entailment
|
def refresh(self) -> None:
"""Update the actual simulation values based on the toy-value pairs.
Usually, one does not need to call refresh explicitly. The
"magic" methods __call__, __setattr__, and __delattr__ invoke
it automatically, when required.
Instantiate a 1-dimensional |SeasonalParameter| object:
>>> from hydpy.core.parametertools import SeasonalParameter
>>> class Par(SeasonalParameter):
... NDIM = 1
... TYPE = float
... TIME = None
>>> par = Par(None)
>>> par.simulationstep = '1d'
>>> par.shape = (None,)
When a |SeasonalParameter| object does not contain any toy-value
pairs yet, the method |SeasonalParameter.refresh| sets all actual
simulation values to zero:
>>> par.values = 1.
>>> par.refresh()
>>> par.values[0]
0.0
When there is only one toy-value pair, its values are relevant
for all actual simulation values:
>>> par.toy_1 = 2. # calls refresh automatically
>>> par.values[0]
2.0
Method |SeasonalParameter.refresh| performs a linear interpolation
for the central time points of each simulation time step. Hence,
in the following example, the original values of the toy-value
pairs do not show up:
>>> par.toy_12_31 = 4.
>>> from hydpy import round_
>>> round_(par.values[0])
2.00274
>>> round_(par.values[-2])
3.99726
>>> par.values[-1]
3.0
If one wants to preserve the original values in this example, one
would have to set the corresponding toy instances in the middle of
some simulation step intervals:
>>> del par.toy_1
>>> del par.toy_12_31
>>> par.toy_1_1_12 = 2
>>> par.toy_12_31_12 = 4.
>>> par.values[0]
2.0
>>> round_(par.values[1])
2.005479
>>> round_(par.values[-2])
3.994521
>>> par.values[-1]
4.0
"""
if not self:
self.values[:] = 0.
elif len(self) == 1:
values = list(self._toy2values.values())[0]
self.values[:] = self.apply_timefactor(values)
else:
for idx, date in enumerate(
timetools.TOY.centred_timegrid(self.simulationstep)):
values = self.interp(date)
self.values[idx] = self.apply_timefactor(values)
|
Update the actual simulation values based on the toy-value pairs.
Usually, one does not need to call refresh explicitly. The
"magic" methods __call__, __setattr__, and __delattr__ invoke
it automatically, when required.
Instantiate a 1-dimensional |SeasonalParameter| object:
>>> from hydpy.core.parametertools import SeasonalParameter
>>> class Par(SeasonalParameter):
... NDIM = 1
... TYPE = float
... TIME = None
>>> par = Par(None)
>>> par.simulationstep = '1d'
>>> par.shape = (None,)
When a |SeasonalParameter| object does not contain any toy-value
pairs yet, the method |SeasonalParameter.refresh| sets all actual
simulation values to zero:
>>> par.values = 1.
>>> par.refresh()
>>> par.values[0]
0.0
When there is only one toy-value pair, its values are relevant
for all actual simulation values:
>>> par.toy_1 = 2. # calls refresh automatically
>>> par.values[0]
2.0
Method |SeasonalParameter.refresh| performs a linear interpolation
for the central time points of each simulation time step. Hence,
in the following example, the original values of the toy-value
pairs do not show up:
>>> par.toy_12_31 = 4.
>>> from hydpy import round_
>>> round_(par.values[0])
2.00274
>>> round_(par.values[-2])
3.99726
>>> par.values[-1]
3.0
If one wants to preserve the original values in this example, one
would have to set the corresponding toy instances in the middle of
some simulation step intervals:
>>> del par.toy_1
>>> del par.toy_12_31
>>> par.toy_1_1_12 = 2
>>> par.toy_12_31_12 = 4.
>>> par.values[0]
2.0
>>> round_(par.values[1])
2.005479
>>> round_(par.values[-2])
3.994521
>>> par.values[-1]
4.0
|
entailment
|
def interp(self, date: timetools.Date) -> float:
"""Perform a linear value interpolation for the given `date` and
return the result.
Instantiate a 1-dimensional |SeasonalParameter| object:
>>> from hydpy.core.parametertools import SeasonalParameter
>>> class Par(SeasonalParameter):
... NDIM = 1
... TYPE = float
... TIME = None
>>> par = Par(None)
>>> par.simulationstep = '1d'
>>> par.shape = (None,)
Define three toy-value pairs:
>>> par(_1=2.0, _2=5.0, _12_31=4.0)
Passing a |Date| object matching a |TOY| object exactly returns
the corresponding |float| value:
>>> from hydpy import Date
>>> par.interp(Date('2000.01.01'))
2.0
>>> par.interp(Date('2000.02.01'))
5.0
>>> par.interp(Date('2000.12.31'))
4.0
For all intermediate points, |SeasonalParameter.interp| performs
a linear interpolation:
>>> from hydpy import round_
>>> round_(par.interp(Date('2000.01.02')))
2.096774
>>> round_(par.interp(Date('2000.01.31')))
4.903226
>>> round_(par.interp(Date('2000.02.02')))
4.997006
>>> round_(par.interp(Date('2000.12.30')))
4.002994
Linear interpolation is also allowed between the first and the
last pair when they do not capture the endpoints of the year:
>>> par(_1_2=2.0, _12_30=4.0)
>>> round_(par.interp(Date('2000.12.29')))
3.99449
>>> par.interp(Date('2000.12.30'))
4.0
>>> round_(par.interp(Date('2000.12.31')))
3.333333
>>> round_(par.interp(Date('2000.01.01')))
2.666667
>>> par.interp(Date('2000.01.02'))
2.0
>>> round_(par.interp(Date('2000.01.03')))
2.00551
The following example briefly shows interpolation performed for
a 2-dimensional parameter:
>>> Par.NDIM = 2
>>> par = Par(None)
>>> par.shape = (None, 2)
>>> par(_1_1=[1., 2.], _1_3=[-3, 0.])
>>> result = par.interp(Date('2000.01.02'))
>>> round_(result[0])
-1.0
>>> round_(result[1])
1.0
"""
xnew = timetools.TOY(date)
xys = list(self)
for idx, (x_1, y_1) in enumerate(xys):
if x_1 > xnew:
x_0, y_0 = xys[idx-1]
break
else:
x_0, y_0 = xys[-1]
x_1, y_1 = xys[0]
return y_0+(y_1-y_0)/(x_1-x_0)*(xnew-x_0)
|
Perform a linear value interpolation for the given `date` and
return the result.
Instantiate a 1-dimensional |SeasonalParameter| object:
>>> from hydpy.core.parametertools import SeasonalParameter
>>> class Par(SeasonalParameter):
... NDIM = 1
... TYPE = float
... TIME = None
>>> par = Par(None)
>>> par.simulationstep = '1d'
>>> par.shape = (None,)
Define three toy-value pairs:
>>> par(_1=2.0, _2=5.0, _12_31=4.0)
Passing a |Date| object matching a |TOY| object exactly returns
the corresponding |float| value:
>>> from hydpy import Date
>>> par.interp(Date('2000.01.01'))
2.0
>>> par.interp(Date('2000.02.01'))
5.0
>>> par.interp(Date('2000.12.31'))
4.0
For all intermediate points, |SeasonalParameter.interp| performs
a linear interpolation:
>>> from hydpy import round_
>>> round_(par.interp(Date('2000.01.02')))
2.096774
>>> round_(par.interp(Date('2000.01.31')))
4.903226
>>> round_(par.interp(Date('2000.02.02')))
4.997006
>>> round_(par.interp(Date('2000.12.30')))
4.002994
Linear interpolation is also allowed between the first and the
last pair when they do not capture the endpoints of the year:
>>> par(_1_2=2.0, _12_30=4.0)
>>> round_(par.interp(Date('2000.12.29')))
3.99449
>>> par.interp(Date('2000.12.30'))
4.0
>>> round_(par.interp(Date('2000.12.31')))
3.333333
>>> round_(par.interp(Date('2000.01.01')))
2.666667
>>> par.interp(Date('2000.01.02'))
2.0
>>> round_(par.interp(Date('2000.01.03')))
2.00551
The following example briefly shows interpolation performed for
a 2-dimensional parameter:
>>> Par.NDIM = 2
>>> par = Par(None)
>>> par.shape = (None, 2)
>>> par(_1_1=[1., 2.], _1_3=[-3, 0.])
>>> result = par.interp(Date('2000.01.02'))
>>> round_(result[0])
-1.0
>>> round_(result[1])
1.0
|
entailment
|
def update(self) -> None:
"""Update subclass of |RelSubweightsMixin| based on `refweights`."""
mask = self.mask
weights = self.refweights[mask]
self[~mask] = numpy.nan
self[mask] = weights/numpy.sum(weights)
|
Update subclass of |RelSubweightsMixin| based on `refweights`.
|
entailment
|
def alternative_initvalue(self) -> Union[bool, int, float]:
"""A user-defined value to be used instead of the value of class
constant `INIT`.
See the main documentation on class |SolverParameter| for more
information.
"""
if self._alternative_initvalue is None:
raise AttributeError(
f'No alternative initial value for solver parameter '
f'{objecttools.elementphrase(self)} has been defined so far.')
else:
return self._alternative_initvalue
|
A user-defined value to be used instead of the value of class
constant `INIT`.
See the main documentation on class |SolverParameter| for more
information.
|
entailment
|
def update(self) -> None:
"""Reference the actual |Indexer.timeofyear| array of the
|Indexer| object available in module |pub|.
>>> from hydpy import pub
>>> pub.timegrids = '27.02.2004', '3.03.2004', '1d'
>>> from hydpy.core.parametertools import TOYParameter
>>> toyparameter = TOYParameter(None)
>>> toyparameter.update()
>>> toyparameter
toyparameter(57, 58, 59, 60, 61)
"""
indexarray = hydpy.pub.indexer.timeofyear
self.shape = indexarray.shape
self.values = indexarray
|
Reference the actual |Indexer.timeofyear| array of the
|Indexer| object available in module |pub|.
>>> from hydpy import pub
>>> pub.timegrids = '27.02.2004', '3.03.2004', '1d'
>>> from hydpy.core.parametertools import TOYParameter
>>> toyparameter = TOYParameter(None)
>>> toyparameter.update()
>>> toyparameter
toyparameter(57, 58, 59, 60, 61)
|
entailment
|
def get_premises_model():
"""
Support for custom company premises model
with developer friendly validation.
"""
try:
app_label, model_name = PREMISES_MODEL.split('.')
except ValueError:
raise ImproperlyConfigured("OPENINGHOURS_PREMISES_MODEL must be of the"
" form 'app_label.model_name'")
premises_model = get_model(app_label=app_label, model_name=model_name)
if premises_model is None:
raise ImproperlyConfigured("OPENINGHOURS_PREMISES_MODEL refers to"
" model '%s' that has not been installed"
% PREMISES_MODEL)
return premises_model
|
Support for custom company premises model
with developer friendly validation.
|
entailment
|
def get_now():
"""
Allows to access global request and read a timestamp from query.
"""
if not get_current_request:
return datetime.datetime.now()
request = get_current_request()
if request:
openinghours_now = request.GET.get('openinghours-now')
if openinghours_now:
return datetime.datetime.strptime(openinghours_now, '%Y%m%d%H%M%S')
return datetime.datetime.now()
|
Allows to access global request and read a timestamp from query.
|
entailment
|
def get_closing_rule_for_now(location):
"""
Returns QuerySet of ClosingRules that are currently valid
"""
now = get_now()
if location:
return ClosingRules.objects.filter(company=location,
start__lte=now, end__gte=now)
return Company.objects.first().closingrules_set.filter(start__lte=now,
end__gte=now)
|
Returns QuerySet of ClosingRules that are currently valid
|
entailment
|
def is_open(location, now=None):
"""
Is the company currently open? Pass "now" to test with a specific
timestamp. Can be used stand-alone or as a helper.
"""
if now is None:
now = get_now()
if has_closing_rule_for_now(location):
return False
now_time = datetime.time(now.hour, now.minute, now.second)
if location:
ohs = OpeningHours.objects.filter(company=location)
else:
ohs = Company.objects.first().openinghours_set.all()
for oh in ohs:
is_open = False
# start and end is on the same day
if (oh.weekday == now.isoweekday() and
oh.from_hour <= now_time and
now_time <= oh.to_hour):
is_open = oh
# start and end are not on the same day and we test on the start day
if (oh.weekday == now.isoweekday() and
oh.from_hour <= now_time and
((oh.to_hour < oh.from_hour) and
(now_time < datetime.time(23, 59, 59)))):
is_open = oh
# start and end are not on the same day and we test on the end day
if (oh.weekday == (now.isoweekday() - 1) % 7 and
oh.from_hour >= now_time and
oh.to_hour >= now_time and
oh.to_hour < oh.from_hour):
is_open = oh
# print " 'Special' case after midnight", oh
if is_open is not False:
return oh
return False
|
Is the company currently open? Pass "now" to test with a specific
timestamp. Can be used stand-alone or as a helper.
|
entailment
|
def next_time_open(location):
"""
Returns the next possible opening hours object, or (False, None)
if location is currently open or there is no such object
I.e. when is the company open for the next time?
"""
if not is_open(location):
now = get_now()
now_time = datetime.time(now.hour, now.minute, now.second)
found_opening_hours = False
for i in range(8):
l_weekday = (now.isoweekday() + i) % 7
ohs = OpeningHours.objects.filter(company=location,
weekday=l_weekday
).order_by('weekday',
'from_hour')
if ohs.count():
for oh in ohs:
future_now = now + datetime.timedelta(days=i)
# same day issue
tmp_now = datetime.datetime(future_now.year,
future_now.month,
future_now.day,
oh.from_hour.hour,
oh.from_hour.minute,
oh.from_hour.second)
if tmp_now < now:
tmp_now = now # be sure to set the bound correctly...
if is_open(location, now=tmp_now):
found_opening_hours = oh
break
if found_opening_hours is not False:
return found_opening_hours, tmp_now
return False, None
|
Returns the next possible opening hours object, or (False, None)
if location is currently open or there is no such object
I.e. when is the company open for the next time?
|
entailment
|
def refweights(self):
"""A |numpy| |numpy.ndarray| with equal weights for all segment
junctions..
>>> from hydpy.models.hstream import *
>>> parameterstep('1d')
>>> states.qjoints.shape = 5
>>> states.qjoints.refweights
array([ 0.2, 0.2, 0.2, 0.2, 0.2])
"""
# pylint: disable=unsubscriptable-object
# due to a pylint bug (see https://github.com/PyCQA/pylint/issues/870)
return numpy.full(self.shape, 1./self.shape[0], dtype=float)
|
A |numpy| |numpy.ndarray| with equal weights for all segment
junctions..
>>> from hydpy.models.hstream import *
>>> parameterstep('1d')
>>> states.qjoints.shape = 5
>>> states.qjoints.refweights
array([ 0.2, 0.2, 0.2, 0.2, 0.2])
|
entailment
|
def add(self, directory, path=None) -> None:
"""Add a directory and optionally its path."""
objecttools.valid_variable_identifier(directory)
if path is None:
path = directory
setattr(self, directory, path)
|
Add a directory and optionally its path.
|
entailment
|
def basepath(self) -> str:
"""Absolute path pointing to the available working directories.
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> from hydpy import repr_, TestIO
>>> with TestIO():
... repr_(filemanager.basepath) # doctest: +ELLIPSIS
'...hydpy/tests/iotesting/projectname/basename'
"""
return os.path.abspath(
os.path.join(self.projectdir, self.BASEDIR))
|
Absolute path pointing to the available working directories.
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> from hydpy import repr_, TestIO
>>> with TestIO():
... repr_(filemanager.basepath) # doctest: +ELLIPSIS
'...hydpy/tests/iotesting/projectname/basename'
|
entailment
|
def availabledirs(self) -> Folder2Path:
"""Names and paths of the available working directories.
Available working directories are those beeing stored in the
base directory of the respective |FileManager| subclass.
Folders with names starting with an underscore are ignored
(use this for directories handling additional data files,
if you like). Zipped directories, which can be unpacked
on the fly, do also count as available directories:
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> import os
>>> from hydpy import repr_, TestIO
>>> TestIO.clear()
>>> with TestIO():
... os.makedirs('projectname/basename/folder1')
... os.makedirs('projectname/basename/folder2')
... open('projectname/basename/folder3.zip', 'w').close()
... os.makedirs('projectname/basename/_folder4')
... open('projectname/basename/folder5.tar', 'w').close()
... filemanager.availabledirs # doctest: +ELLIPSIS
Folder2Path(folder1=.../projectname/basename/folder1,
folder2=.../projectname/basename/folder2,
folder3=.../projectname/basename/folder3.zip)
"""
directories = Folder2Path()
for directory in os.listdir(self.basepath):
if not directory.startswith('_'):
path = os.path.join(self.basepath, directory)
if os.path.isdir(path):
directories.add(directory, path)
elif directory.endswith('.zip'):
directories.add(directory[:-4], path)
return directories
|
Names and paths of the available working directories.
Available working directories are those beeing stored in the
base directory of the respective |FileManager| subclass.
Folders with names starting with an underscore are ignored
(use this for directories handling additional data files,
if you like). Zipped directories, which can be unpacked
on the fly, do also count as available directories:
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> import os
>>> from hydpy import repr_, TestIO
>>> TestIO.clear()
>>> with TestIO():
... os.makedirs('projectname/basename/folder1')
... os.makedirs('projectname/basename/folder2')
... open('projectname/basename/folder3.zip', 'w').close()
... os.makedirs('projectname/basename/_folder4')
... open('projectname/basename/folder5.tar', 'w').close()
... filemanager.availabledirs # doctest: +ELLIPSIS
Folder2Path(folder1=.../projectname/basename/folder1,
folder2=.../projectname/basename/folder2,
folder3=.../projectname/basename/folder3.zip)
|
entailment
|
def currentdir(self) -> str:
"""Name of the current working directory containing the relevant files.
To show most of the functionality of |property|
|FileManager.currentdir| (unpacking zip files on the fly is
explained in the documentation on function
(|FileManager.zip_currentdir|), we first prepare a |FileManager|
object corresponding to the |FileManager.basepath|
`projectname/basename`:
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> import os
>>> from hydpy import repr_, TestIO
>>> TestIO.clear()
>>> with TestIO():
... os.makedirs('projectname/basename')
... repr_(filemanager.basepath) # doctest: +ELLIPSIS
'...hydpy/tests/iotesting/projectname/basename'
At first, the base directory is empty and asking for the
current working directory results in the following error:
>>> with TestIO():
... filemanager.currentdir # doctest: +ELLIPSIS
Traceback (most recent call last):
...
RuntimeError: The current working directory of the FileManager object \
has not been defined manually and cannot be determined automatically: \
`.../projectname/basename` does not contain any available directories.
If only one directory exists, it is considered as the current
working directory automatically:
>>> with TestIO():
... os.mkdir('projectname/basename/dir1')
... filemanager.currentdir
'dir1'
|property| |FileManager.currentdir| memorises the name of the
current working directory, even if another directory is later
added to the base path:
>>> with TestIO():
... os.mkdir('projectname/basename/dir2')
... filemanager.currentdir
'dir1'
Set the value of |FileManager.currentdir| to |None| to let it
forget the memorised directory. After that, asking for the
current working directory now results in another error, as
it is not clear which directory to select:
>>> with TestIO():
... filemanager.currentdir = None
... filemanager.currentdir # doctest: +ELLIPSIS
Traceback (most recent call last):
...
RuntimeError: The current working directory of the FileManager object \
has not been defined manually and cannot be determined automatically: \
`....../projectname/basename` does contain multiple available directories \
(dir1 and dir2).
Setting |FileManager.currentdir| manually solves the problem:
>>> with TestIO():
... filemanager.currentdir = 'dir1'
... filemanager.currentdir
'dir1'
Remove the current working directory `dir1` with the `del` statement:
>>> with TestIO():
... del filemanager.currentdir
... os.path.exists('projectname/basename/dir1')
False
|FileManager| subclasses can define a default directory name.
When many directories exist and none is selected manually, the
default directory is selected automatically. The following
example shows an error message due to multiple directories
without any having the default name:
>>> with TestIO():
... os.mkdir('projectname/basename/dir1')
... filemanager.DEFAULTDIR = 'dir3'
... del filemanager.currentdir
... filemanager.currentdir # doctest: +ELLIPSIS
Traceback (most recent call last):
...
RuntimeError: The current working directory of the FileManager object \
has not been defined manually and cannot be determined automatically: The \
default directory (dir3) is not among the available directories (dir1 and dir2).
We can fix this by adding the required default directory manually:
>>> with TestIO():
... os.mkdir('projectname/basename/dir3')
... filemanager.currentdir
'dir3'
Setting the |FileManager.currentdir| to `dir4` not only overwrites
the default name, but also creates the required folder:
>>> with TestIO():
... filemanager.currentdir = 'dir4'
... filemanager.currentdir
'dir4'
>>> with TestIO():
... sorted(os.listdir('projectname/basename'))
['dir1', 'dir2', 'dir3', 'dir4']
Failed attempts in removing directories result in error messages
like the following one:
>>> import shutil
>>> from unittest.mock import patch
>>> with patch.object(shutil, 'rmtree', side_effect=AttributeError):
... with TestIO():
... del filemanager.currentdir # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: While trying to delete the current working directory \
`.../projectname/basename/dir4` of the FileManager object, the following \
error occurred: ...
Then, the current working directory still exists and is remembered
by |FileManager.currentdir|:
>>> with TestIO():
... filemanager.currentdir
'dir4'
>>> with TestIO():
... sorted(os.listdir('projectname/basename'))
['dir1', 'dir2', 'dir3', 'dir4']
"""
if self._currentdir is None:
directories = self.availabledirs.folders
if len(directories) == 1:
self.currentdir = directories[0]
elif self.DEFAULTDIR in directories:
self.currentdir = self.DEFAULTDIR
else:
prefix = (f'The current working directory of the '
f'{objecttools.classname(self)} object '
f'has not been defined manually and cannot '
f'be determined automatically:')
if not directories:
raise RuntimeError(
f'{prefix} `{objecttools.repr_(self.basepath)}` '
f'does not contain any available directories.')
if self.DEFAULTDIR is None:
raise RuntimeError(
f'{prefix} `{objecttools.repr_(self.basepath)}` '
f'does contain multiple available directories '
f'({objecttools.enumeration(directories)}).')
raise RuntimeError(
f'{prefix} The default directory ({self.DEFAULTDIR}) '
f'is not among the available directories '
f'({objecttools.enumeration(directories)}).')
return self._currentdir
|
Name of the current working directory containing the relevant files.
To show most of the functionality of |property|
|FileManager.currentdir| (unpacking zip files on the fly is
explained in the documentation on function
(|FileManager.zip_currentdir|), we first prepare a |FileManager|
object corresponding to the |FileManager.basepath|
`projectname/basename`:
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> import os
>>> from hydpy import repr_, TestIO
>>> TestIO.clear()
>>> with TestIO():
... os.makedirs('projectname/basename')
... repr_(filemanager.basepath) # doctest: +ELLIPSIS
'...hydpy/tests/iotesting/projectname/basename'
At first, the base directory is empty and asking for the
current working directory results in the following error:
>>> with TestIO():
... filemanager.currentdir # doctest: +ELLIPSIS
Traceback (most recent call last):
...
RuntimeError: The current working directory of the FileManager object \
has not been defined manually and cannot be determined automatically: \
`.../projectname/basename` does not contain any available directories.
If only one directory exists, it is considered as the current
working directory automatically:
>>> with TestIO():
... os.mkdir('projectname/basename/dir1')
... filemanager.currentdir
'dir1'
|property| |FileManager.currentdir| memorises the name of the
current working directory, even if another directory is later
added to the base path:
>>> with TestIO():
... os.mkdir('projectname/basename/dir2')
... filemanager.currentdir
'dir1'
Set the value of |FileManager.currentdir| to |None| to let it
forget the memorised directory. After that, asking for the
current working directory now results in another error, as
it is not clear which directory to select:
>>> with TestIO():
... filemanager.currentdir = None
... filemanager.currentdir # doctest: +ELLIPSIS
Traceback (most recent call last):
...
RuntimeError: The current working directory of the FileManager object \
has not been defined manually and cannot be determined automatically: \
`....../projectname/basename` does contain multiple available directories \
(dir1 and dir2).
Setting |FileManager.currentdir| manually solves the problem:
>>> with TestIO():
... filemanager.currentdir = 'dir1'
... filemanager.currentdir
'dir1'
Remove the current working directory `dir1` with the `del` statement:
>>> with TestIO():
... del filemanager.currentdir
... os.path.exists('projectname/basename/dir1')
False
|FileManager| subclasses can define a default directory name.
When many directories exist and none is selected manually, the
default directory is selected automatically. The following
example shows an error message due to multiple directories
without any having the default name:
>>> with TestIO():
... os.mkdir('projectname/basename/dir1')
... filemanager.DEFAULTDIR = 'dir3'
... del filemanager.currentdir
... filemanager.currentdir # doctest: +ELLIPSIS
Traceback (most recent call last):
...
RuntimeError: The current working directory of the FileManager object \
has not been defined manually and cannot be determined automatically: The \
default directory (dir3) is not among the available directories (dir1 and dir2).
We can fix this by adding the required default directory manually:
>>> with TestIO():
... os.mkdir('projectname/basename/dir3')
... filemanager.currentdir
'dir3'
Setting the |FileManager.currentdir| to `dir4` not only overwrites
the default name, but also creates the required folder:
>>> with TestIO():
... filemanager.currentdir = 'dir4'
... filemanager.currentdir
'dir4'
>>> with TestIO():
... sorted(os.listdir('projectname/basename'))
['dir1', 'dir2', 'dir3', 'dir4']
Failed attempts in removing directories result in error messages
like the following one:
>>> import shutil
>>> from unittest.mock import patch
>>> with patch.object(shutil, 'rmtree', side_effect=AttributeError):
... with TestIO():
... del filemanager.currentdir # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: While trying to delete the current working directory \
`.../projectname/basename/dir4` of the FileManager object, the following \
error occurred: ...
Then, the current working directory still exists and is remembered
by |FileManager.currentdir|:
>>> with TestIO():
... filemanager.currentdir
'dir4'
>>> with TestIO():
... sorted(os.listdir('projectname/basename'))
['dir1', 'dir2', 'dir3', 'dir4']
|
entailment
|
def currentpath(self) -> str:
"""Absolute path of the current working directory.
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> from hydpy import repr_, TestIO
>>> with TestIO():
... filemanager.currentdir = 'testdir'
... repr_(filemanager.currentpath) # doctest: +ELLIPSIS
'...hydpy/tests/iotesting/projectname/basename/testdir'
"""
return os.path.join(self.basepath, self.currentdir)
|
Absolute path of the current working directory.
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> from hydpy import repr_, TestIO
>>> with TestIO():
... filemanager.currentdir = 'testdir'
... repr_(filemanager.currentpath) # doctest: +ELLIPSIS
'...hydpy/tests/iotesting/projectname/basename/testdir'
|
entailment
|
def filenames(self) -> List[str]:
"""Names of the files contained in the the current working directory.
Files names starting with underscores are ignored:
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> from hydpy import TestIO
>>> with TestIO():
... filemanager.currentdir = 'testdir'
... open('projectname/basename/testdir/file1.txt', 'w').close()
... open('projectname/basename/testdir/file2.npy', 'w').close()
... open('projectname/basename/testdir/_file1.nc', 'w').close()
... filemanager.filenames
['file1.txt', 'file2.npy']
"""
return sorted(
fn for fn in os.listdir(self.currentpath)
if not fn.startswith('_'))
|
Names of the files contained in the the current working directory.
Files names starting with underscores are ignored:
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> from hydpy import TestIO
>>> with TestIO():
... filemanager.currentdir = 'testdir'
... open('projectname/basename/testdir/file1.txt', 'w').close()
... open('projectname/basename/testdir/file2.npy', 'w').close()
... open('projectname/basename/testdir/_file1.nc', 'w').close()
... filemanager.filenames
['file1.txt', 'file2.npy']
|
entailment
|
def filepaths(self) -> List[str]:
"""Absolute path names of the files contained in the current
working directory.
Files names starting with underscores are ignored:
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> from hydpy import repr_, TestIO
>>> with TestIO():
... filemanager.currentdir = 'testdir'
... open('projectname/basename/testdir/file1.txt', 'w').close()
... open('projectname/basename/testdir/file2.npy', 'w').close()
... open('projectname/basename/testdir/_file1.nc', 'w').close()
... for filepath in filemanager.filepaths:
... repr_(filepath) # doctest: +ELLIPSIS
'...hydpy/tests/iotesting/projectname/basename/testdir/file1.txt'
'...hydpy/tests/iotesting/projectname/basename/testdir/file2.npy'
"""
path = self.currentpath
return [os.path.join(path, name) for name in self.filenames]
|
Absolute path names of the files contained in the current
working directory.
Files names starting with underscores are ignored:
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> from hydpy import repr_, TestIO
>>> with TestIO():
... filemanager.currentdir = 'testdir'
... open('projectname/basename/testdir/file1.txt', 'w').close()
... open('projectname/basename/testdir/file2.npy', 'w').close()
... open('projectname/basename/testdir/_file1.nc', 'w').close()
... for filepath in filemanager.filepaths:
... repr_(filepath) # doctest: +ELLIPSIS
'...hydpy/tests/iotesting/projectname/basename/testdir/file1.txt'
'...hydpy/tests/iotesting/projectname/basename/testdir/file2.npy'
|
entailment
|
def zip_currentdir(self) -> None:
"""Pack the current working directory in a `zip` file.
|FileManager| subclasses allow for manual packing and automatic
unpacking of working directories. The only supported format is `zip`.
To avoid possible inconsistencies, origin directories and zip
files are removed after packing or unpacking, respectively.
As an example scenario, we prepare a |FileManager| object with
the current working directory `folder` containing the files
`test1.txt` and `text2.txt`:
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> import os
>>> from hydpy import repr_, TestIO
>>> TestIO.clear()
>>> basepath = 'projectname/basename'
>>> with TestIO():
... os.makedirs(basepath)
... filemanager.currentdir = 'folder'
... open(f'{basepath}/folder/file1.txt', 'w').close()
... open(f'{basepath}/folder/file2.txt', 'w').close()
... filemanager.filenames
['file1.txt', 'file2.txt']
The directories existing under the base path are identical
with the ones returned by property |FileManager.availabledirs|:
>>> with TestIO():
... sorted(os.listdir(basepath))
... filemanager.availabledirs # doctest: +ELLIPSIS
['folder']
Folder2Path(folder=.../projectname/basename/folder)
After packing the current working directory manually, it is
still counted as a available directory:
>>> with TestIO():
... filemanager.zip_currentdir()
... sorted(os.listdir(basepath))
... filemanager.availabledirs # doctest: +ELLIPSIS
['folder.zip']
Folder2Path(folder=.../projectname/basename/folder.zip)
Instead of the complete directory, only the contained files
are packed:
>>> from zipfile import ZipFile
>>> with TestIO():
... with ZipFile('projectname/basename/folder.zip', 'r') as zp:
... sorted(zp.namelist())
['file1.txt', 'file2.txt']
The zip file is unpacked again, as soon as `folder` becomes
the current working directory:
>>> with TestIO():
... filemanager.currentdir = 'folder'
... sorted(os.listdir(basepath))
... filemanager.availabledirs
... filemanager.filenames # doctest: +ELLIPSIS
['folder']
Folder2Path(folder=.../projectname/basename/folder)
['file1.txt', 'file2.txt']
"""
with zipfile.ZipFile(f'{self.currentpath}.zip', 'w') as zipfile_:
for filepath, filename in zip(self.filepaths, self.filenames):
zipfile_.write(filename=filepath, arcname=filename)
del self.currentdir
|
Pack the current working directory in a `zip` file.
|FileManager| subclasses allow for manual packing and automatic
unpacking of working directories. The only supported format is `zip`.
To avoid possible inconsistencies, origin directories and zip
files are removed after packing or unpacking, respectively.
As an example scenario, we prepare a |FileManager| object with
the current working directory `folder` containing the files
`test1.txt` and `text2.txt`:
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> import os
>>> from hydpy import repr_, TestIO
>>> TestIO.clear()
>>> basepath = 'projectname/basename'
>>> with TestIO():
... os.makedirs(basepath)
... filemanager.currentdir = 'folder'
... open(f'{basepath}/folder/file1.txt', 'w').close()
... open(f'{basepath}/folder/file2.txt', 'w').close()
... filemanager.filenames
['file1.txt', 'file2.txt']
The directories existing under the base path are identical
with the ones returned by property |FileManager.availabledirs|:
>>> with TestIO():
... sorted(os.listdir(basepath))
... filemanager.availabledirs # doctest: +ELLIPSIS
['folder']
Folder2Path(folder=.../projectname/basename/folder)
After packing the current working directory manually, it is
still counted as a available directory:
>>> with TestIO():
... filemanager.zip_currentdir()
... sorted(os.listdir(basepath))
... filemanager.availabledirs # doctest: +ELLIPSIS
['folder.zip']
Folder2Path(folder=.../projectname/basename/folder.zip)
Instead of the complete directory, only the contained files
are packed:
>>> from zipfile import ZipFile
>>> with TestIO():
... with ZipFile('projectname/basename/folder.zip', 'r') as zp:
... sorted(zp.namelist())
['file1.txt', 'file2.txt']
The zip file is unpacked again, as soon as `folder` becomes
the current working directory:
>>> with TestIO():
... filemanager.currentdir = 'folder'
... sorted(os.listdir(basepath))
... filemanager.availabledirs
... filemanager.filenames # doctest: +ELLIPSIS
['folder']
Folder2Path(folder=.../projectname/basename/folder)
['file1.txt', 'file2.txt']
|
entailment
|
def load_files(self) -> selectiontools.Selections:
"""Read all network files of the current working directory, structure
their contents in a |selectiontools.Selections| object, and return it.
"""
devicetools.Node.clear_all()
devicetools.Element.clear_all()
selections = selectiontools.Selections()
for (filename, path) in zip(self.filenames, self.filepaths):
# Ensure both `Node` and `Element`start with a `fresh` memory.
devicetools.Node.extract_new()
devicetools.Element.extract_new()
try:
info = runpy.run_path(path)
except BaseException:
objecttools.augment_excmessage(
f'While trying to load the network file `{path}`')
try:
node: devicetools.Node = info['Node']
element: devicetools.Element = info['Element']
selections += selectiontools.Selection(
filename.split('.')[0],
node.extract_new(),
element.extract_new())
except KeyError as exc:
raise RuntimeError(
f'The class {exc.args[0]} cannot be loaded from the '
f'network file `{path}`.')
selections += selectiontools.Selection(
'complete',
info['Node'].query_all(),
info['Element'].query_all())
return selections
|
Read all network files of the current working directory, structure
their contents in a |selectiontools.Selections| object, and return it.
|
entailment
|
def save_files(self, selections) -> None:
"""Save the |Selection| objects contained in the given |Selections|
instance to separate network files."""
try:
currentpath = self.currentpath
selections = selectiontools.Selections(selections)
for selection in selections:
if selection.name == 'complete':
continue
path = os.path.join(currentpath, selection.name+'.py')
selection.save_networkfile(filepath=path)
except BaseException:
objecttools.augment_excmessage(
'While trying to save selections `%s` into network files'
% selections)
|
Save the |Selection| objects contained in the given |Selections|
instance to separate network files.
|
entailment
|
def delete_files(self, selections) -> None:
"""Delete the network files corresponding to the given selections
(e.g. a |list| of |str| objects or a |Selections| object)."""
try:
currentpath = self.currentpath
for selection in selections:
name = str(selection)
if name == 'complete':
continue
if not name.endswith('.py'):
name += '.py'
path = os.path.join(currentpath, name)
os.remove(path)
except BaseException:
objecttools.augment_excmessage(
f'While trying to remove the network files of '
f'selections `{selections}`')
|
Delete the network files corresponding to the given selections
(e.g. a |list| of |str| objects or a |Selections| object).
|
entailment
|
def load_file(self, element=None, filename=None, clear_registry=True):
"""Return the namespace of the given file (and eventually of its
corresponding auxiliary subfiles) as a |dict|.
By default, the internal registry is cleared when a control file and
all its corresponding auxiliary files have been loaded. You can
change this behaviour by passing `False` for the `clear_registry`
argument. This might decrease model initialization times
significantly. But then it is your own responsibility to call
method |ControlManager.clear_registry| when necessary (before
reloading a changed control file).
"""
if not filename:
filename = element.name
type(self)._workingpath = self.currentpath
info = {}
if element:
info['element'] = element
try:
self.read2dict(filename, info)
finally:
type(self)._workingpath = '.'
if clear_registry:
self._registry.clear()
return info
|
Return the namespace of the given file (and eventually of its
corresponding auxiliary subfiles) as a |dict|.
By default, the internal registry is cleared when a control file and
all its corresponding auxiliary files have been loaded. You can
change this behaviour by passing `False` for the `clear_registry`
argument. This might decrease model initialization times
significantly. But then it is your own responsibility to call
method |ControlManager.clear_registry| when necessary (before
reloading a changed control file).
|
entailment
|
def read2dict(cls, filename, info):
"""Read the control parameters from the given path (and its
auxiliary paths, where appropriate) and store them in the given
|dict| object `info`.
Note that the |dict| `info` can be used to feed information
into the execution of control files. Use this method only if you
are completely sure on how the control parameter import of HydPy
works. Otherwise, you should most probably prefer to use
|ControlManager.load_file|.
"""
if not filename.endswith('.py'):
filename += '.py'
path = os.path.join(cls._workingpath, filename)
try:
if path not in cls._registry:
with open(path) as file_:
cls._registry[path] = file_.read()
exec(cls._registry[path], {}, info)
except BaseException:
objecttools.augment_excmessage(
'While trying to load the control file `%s`'
% path)
if 'model' not in info:
raise IOError(
'Model parameters cannot be loaded from control file `%s`. '
'Please refer to the HydPy documentation on how to prepare '
'control files properly.'
% path)
|
Read the control parameters from the given path (and its
auxiliary paths, where appropriate) and store them in the given
|dict| object `info`.
Note that the |dict| `info` can be used to feed information
into the execution of control files. Use this method only if you
are completely sure on how the control parameter import of HydPy
works. Otherwise, you should most probably prefer to use
|ControlManager.load_file|.
|
entailment
|
def save_file(self, filename, text):
"""Save the given text under the given control filename and the
current path."""
if not filename.endswith('.py'):
filename += '.py'
path = os.path.join(self.currentpath, filename)
with open(path, 'w', encoding="utf-8") as file_:
file_.write(text)
|
Save the given text under the given control filename and the
current path.
|
entailment
|
def load_file(self, filename):
"""Read and return the content of the given file.
If the current directory is not defined explicitly, the directory
name is constructed with the actual simulation start date. If
such an directory does not exist, it is created immediately.
"""
_defaultdir = self.DEFAULTDIR
try:
if not filename.endswith('.py'):
filename += '.py'
try:
self.DEFAULTDIR = (
'init_' + hydpy.pub.timegrids.sim.firstdate.to_string('os'))
except KeyError:
pass
filepath = os.path.join(self.currentpath, filename)
with open(filepath) as file_:
return file_.read()
except BaseException:
objecttools.augment_excmessage(
'While trying to read the conditions file `%s`'
% filename)
finally:
self.DEFAULTDIR = _defaultdir
|
Read and return the content of the given file.
If the current directory is not defined explicitly, the directory
name is constructed with the actual simulation start date. If
such an directory does not exist, it is created immediately.
|
entailment
|
def save_file(self, filename, text):
"""Save the given text under the given condition filename and the
current path.
If the current directory is not defined explicitly, the directory
name is constructed with the actual simulation end date. If
such an directory does not exist, it is created immediately.
"""
_defaultdir = self.DEFAULTDIR
try:
if not filename.endswith('.py'):
filename += '.py'
try:
self.DEFAULTDIR = (
'init_' + hydpy.pub.timegrids.sim.lastdate.to_string('os'))
except AttributeError:
pass
path = os.path.join(self.currentpath, filename)
with open(path, 'w', encoding="utf-8") as file_:
file_.write(text)
except BaseException:
objecttools.augment_excmessage(
'While trying to write the conditions file `%s`'
% filename)
finally:
self.DEFAULTDIR = _defaultdir
|
Save the given text under the given condition filename and the
current path.
If the current directory is not defined explicitly, the directory
name is constructed with the actual simulation end date. If
such an directory does not exist, it is created immediately.
|
entailment
|
def load_file(self, sequence):
"""Load data from an "external" data file an pass it to
the given |IOSequence|."""
try:
if sequence.filetype_ext == 'npy':
sequence.series = sequence.adjust_series(
*self._load_npy(sequence))
elif sequence.filetype_ext == 'asc':
sequence.series = sequence.adjust_series(
*self._load_asc(sequence))
elif sequence.filetype_ext == 'nc':
self._load_nc(sequence)
except BaseException:
objecttools.augment_excmessage(
'While trying to load the external data of sequence %s'
% objecttools.devicephrase(sequence))
|
Load data from an "external" data file an pass it to
the given |IOSequence|.
|
entailment
|
def save_file(self, sequence, array=None):
"""Write the date stored in |IOSequence.series| of the given
|IOSequence| into an "external" data file. """
if array is None:
array = sequence.aggregate_series()
try:
if sequence.filetype_ext == 'nc':
self._save_nc(sequence, array)
else:
filepath = sequence.filepath_ext
if ((array is not None) and
(array.info['type'] != 'unmodified')):
filepath = (f'{filepath[:-4]}_{array.info["type"]}'
f'{filepath[-4:]}')
if not sequence.overwrite_ext and os.path.exists(filepath):
raise OSError(
f'Sequence {objecttools.devicephrase(sequence)} '
f'is not allowed to overwrite the existing file '
f'`{sequence.filepath_ext}`.')
if sequence.filetype_ext == 'npy':
self._save_npy(array, filepath)
elif sequence.filetype_ext == 'asc':
self._save_asc(array, filepath)
except BaseException:
objecttools.augment_excmessage(
'While trying to save the external data of sequence %s'
% objecttools.devicephrase(sequence))
|
Write the date stored in |IOSequence.series| of the given
|IOSequence| into an "external" data file.
|
entailment
|
def open_netcdf_reader(self, flatten=False, isolate=False, timeaxis=1):
"""Prepare a new |NetCDFInterface| object for reading data."""
self._netcdf_reader = netcdftools.NetCDFInterface(
flatten=bool(flatten),
isolate=bool(isolate),
timeaxis=int(timeaxis))
|
Prepare a new |NetCDFInterface| object for reading data.
|
entailment
|
def open_netcdf_writer(self, flatten=False, isolate=False, timeaxis=1):
"""Prepare a new |NetCDFInterface| object for writing data."""
self._netcdf_writer = netcdftools.NetCDFInterface(
flatten=bool(flatten),
isolate=bool(isolate),
timeaxis=int(timeaxis))
|
Prepare a new |NetCDFInterface| object for writing data.
|
entailment
|
def calc_nkor_v1(self):
"""Adjust the given precipitation values.
Required control parameters:
|NHRU|
|KG|
Required input sequence:
|Nied|
Calculated flux sequence:
|NKor|
Basic equation:
:math:`NKor = KG \\cdot Nied`
Example:
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> nhru(3)
>>> kg(0.8, 1.0, 1.2)
>>> inputs.nied = 10.0
>>> model.calc_nkor_v1()
>>> fluxes.nkor
nkor(8.0, 10.0, 12.0)
"""
con = self.parameters.control.fastaccess
inp = self.sequences.inputs.fastaccess
flu = self.sequences.fluxes.fastaccess
for k in range(con.nhru):
flu.nkor[k] = con.kg[k] * inp.nied
|
Adjust the given precipitation values.
Required control parameters:
|NHRU|
|KG|
Required input sequence:
|Nied|
Calculated flux sequence:
|NKor|
Basic equation:
:math:`NKor = KG \\cdot Nied`
Example:
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> nhru(3)
>>> kg(0.8, 1.0, 1.2)
>>> inputs.nied = 10.0
>>> model.calc_nkor_v1()
>>> fluxes.nkor
nkor(8.0, 10.0, 12.0)
|
entailment
|
def calc_tkor_v1(self):
"""Adjust the given air temperature values.
Required control parameters:
|NHRU|
|KT|
Required input sequence:
|TemL|
Calculated flux sequence:
|TKor|
Basic equation:
:math:`TKor = KT + TemL`
Example:
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> nhru(3)
>>> kt(-2.0, 0.0, 2.0)
>>> inputs.teml(1.)
>>> model.calc_tkor_v1()
>>> fluxes.tkor
tkor(-1.0, 1.0, 3.0)
"""
con = self.parameters.control.fastaccess
inp = self.sequences.inputs.fastaccess
flu = self.sequences.fluxes.fastaccess
for k in range(con.nhru):
flu.tkor[k] = con.kt[k] + inp.teml
|
Adjust the given air temperature values.
Required control parameters:
|NHRU|
|KT|
Required input sequence:
|TemL|
Calculated flux sequence:
|TKor|
Basic equation:
:math:`TKor = KT + TemL`
Example:
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> nhru(3)
>>> kt(-2.0, 0.0, 2.0)
>>> inputs.teml(1.)
>>> model.calc_tkor_v1()
>>> fluxes.tkor
tkor(-1.0, 1.0, 3.0)
|
entailment
|
def calc_et0_v1(self):
"""Calculate reference evapotranspiration after Turc-Wendling.
Required control parameters:
|NHRU|
|KE|
|KF|
|HNN|
Required input sequence:
|Glob|
Required flux sequence:
|TKor|
Calculated flux sequence:
|ET0|
Basic equation:
:math:`ET0 = KE \\cdot
\\frac{(8.64 \\cdot Glob+93 \\cdot KF) \\cdot (TKor+22)}
{165 \\cdot (TKor+123) \\cdot (1 + 0.00019 \\cdot min(HNN, 600))}`
Example:
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> simulationstep('12h')
>>> nhru(3)
>>> ke(1.1)
>>> kf(0.6)
>>> hnn(200.0, 600.0, 1000.0)
>>> inputs.glob = 200.0
>>> fluxes.tkor = 15.0
>>> model.calc_et0_v1()
>>> fluxes.et0
et0(3.07171, 2.86215, 2.86215)
"""
con = self.parameters.control.fastaccess
inp = self.sequences.inputs.fastaccess
flu = self.sequences.fluxes.fastaccess
for k in range(con.nhru):
flu.et0[k] = (con.ke[k]*(((8.64*inp.glob+93.*con.kf[k]) *
(flu.tkor[k]+22.)) /
(165.*(flu.tkor[k]+123.) *
(1.+0.00019*min(con.hnn[k], 600.)))))
|
Calculate reference evapotranspiration after Turc-Wendling.
Required control parameters:
|NHRU|
|KE|
|KF|
|HNN|
Required input sequence:
|Glob|
Required flux sequence:
|TKor|
Calculated flux sequence:
|ET0|
Basic equation:
:math:`ET0 = KE \\cdot
\\frac{(8.64 \\cdot Glob+93 \\cdot KF) \\cdot (TKor+22)}
{165 \\cdot (TKor+123) \\cdot (1 + 0.00019 \\cdot min(HNN, 600))}`
Example:
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> simulationstep('12h')
>>> nhru(3)
>>> ke(1.1)
>>> kf(0.6)
>>> hnn(200.0, 600.0, 1000.0)
>>> inputs.glob = 200.0
>>> fluxes.tkor = 15.0
>>> model.calc_et0_v1()
>>> fluxes.et0
et0(3.07171, 2.86215, 2.86215)
|
entailment
|
def calc_et0_wet0_v1(self):
"""Correct the given reference evapotranspiration and update the
corresponding log sequence.
Required control parameters:
|NHRU|
|KE|
|WfET0|
Required input sequence:
|PET|
Calculated flux sequence:
|ET0|
Updated log sequence:
|WET0|
Basic equations:
:math:`ET0_{new} = WfET0 \\cdot KE \\cdot PET +
(1-WfET0) \\cdot ET0_{alt}`
Example:
Prepare four hydrological response units with different value
combinations of parameters |KE| and |WfET0|:
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> simulationstep('12h')
>>> nhru(4)
>>> ke(0.8, 1.2, 0.8, 1.2)
>>> wfet0(2.0, 2.0, 0.2, 0.2)
Note that the actual value of time dependend parameter |WfET0|
is reduced due the difference between the given parameter and
simulation time steps:
>>> from hydpy import round_
>>> round_(wfet0.values)
1.0, 1.0, 0.1, 0.1
For the first two hydrological response units, the given |PET|
value is modified by -0.4 mm and +0.4 mm, respectively. For the
other two response units, which weight the "new" evaporation
value with 10 %, |ET0| does deviate from the old value of |WET0|
by -0.04 mm and +0.04 mm only:
>>> inputs.pet = 2.0
>>> logs.wet0 = 2.0
>>> model.calc_et0_wet0_v1()
>>> fluxes.et0
et0(1.6, 2.4, 1.96, 2.04)
>>> logs.wet0
wet0([[1.6, 2.4, 1.96, 2.04]])
"""
con = self.parameters.control.fastaccess
inp = self.sequences.inputs.fastaccess
flu = self.sequences.fluxes.fastaccess
log = self.sequences.logs.fastaccess
for k in range(con.nhru):
flu.et0[k] = (con.wfet0[k]*con.ke[k]*inp.pet +
(1.-con.wfet0[k])*log.wet0[0, k])
log.wet0[0, k] = flu.et0[k]
|
Correct the given reference evapotranspiration and update the
corresponding log sequence.
Required control parameters:
|NHRU|
|KE|
|WfET0|
Required input sequence:
|PET|
Calculated flux sequence:
|ET0|
Updated log sequence:
|WET0|
Basic equations:
:math:`ET0_{new} = WfET0 \\cdot KE \\cdot PET +
(1-WfET0) \\cdot ET0_{alt}`
Example:
Prepare four hydrological response units with different value
combinations of parameters |KE| and |WfET0|:
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> simulationstep('12h')
>>> nhru(4)
>>> ke(0.8, 1.2, 0.8, 1.2)
>>> wfet0(2.0, 2.0, 0.2, 0.2)
Note that the actual value of time dependend parameter |WfET0|
is reduced due the difference between the given parameter and
simulation time steps:
>>> from hydpy import round_
>>> round_(wfet0.values)
1.0, 1.0, 0.1, 0.1
For the first two hydrological response units, the given |PET|
value is modified by -0.4 mm and +0.4 mm, respectively. For the
other two response units, which weight the "new" evaporation
value with 10 %, |ET0| does deviate from the old value of |WET0|
by -0.04 mm and +0.04 mm only:
>>> inputs.pet = 2.0
>>> logs.wet0 = 2.0
>>> model.calc_et0_wet0_v1()
>>> fluxes.et0
et0(1.6, 2.4, 1.96, 2.04)
>>> logs.wet0
wet0([[1.6, 2.4, 1.96, 2.04]])
|
entailment
|
def calc_evpo_v1(self):
"""Calculate land use and month specific values of potential
evapotranspiration.
Required control parameters:
|NHRU|
|Lnk|
|FLn|
Required derived parameter:
|MOY|
Required flux sequence:
|ET0|
Calculated flux sequence:
|EvPo|
Additional requirements:
|Model.idx_sim|
Basic equation:
:math:`EvPo = FLn \\cdot ET0`
Example:
For clarity, this is more of a kind of an integration example.
Parameter |FLn| both depends on time (the actual month) and space
(the actual land use). Firstly, let us define a initialization
time period spanning the transition from June to July:
>>> from hydpy import pub
>>> pub.timegrids = '30.06.2000', '02.07.2000', '1d'
Secondly, assume that the considered subbasin is differenciated in
two HRUs, one of primarily consisting of arable land and the other
one of deciduous forests:
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> nhru(2)
>>> lnk(ACKER, LAUBW)
Thirdly, set the |FLn|
values, one for the relevant months and land use classes:
>>> fln.acker_jun = 1.299
>>> fln.acker_jul = 1.304
>>> fln.laubw_jun = 1.350
>>> fln.laubw_jul = 1.365
Fourthly, the index array connecting the simulation time steps
defined above and the month indexes (0...11) can be retrieved
from the |pub| module. This can be done manually more
conveniently via its update method:
>>> derived.moy.update()
>>> derived.moy
moy(5, 6)
Finally, the actual method (with its simple equation) is applied
as usual:
>>> fluxes.et0 = 2.0
>>> model.idx_sim = 0
>>> model.calc_evpo_v1()
>>> fluxes.evpo
evpo(2.598, 2.7)
>>> model.idx_sim = 1
>>> model.calc_evpo_v1()
>>> fluxes.evpo
evpo(2.608, 2.73)
Reset module |pub| to not interfere the following examples:
>>> del pub.timegrids
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
for k in range(con.nhru):
flu.evpo[k] = con.fln[con.lnk[k]-1, der.moy[self.idx_sim]] * flu.et0[k]
|
Calculate land use and month specific values of potential
evapotranspiration.
Required control parameters:
|NHRU|
|Lnk|
|FLn|
Required derived parameter:
|MOY|
Required flux sequence:
|ET0|
Calculated flux sequence:
|EvPo|
Additional requirements:
|Model.idx_sim|
Basic equation:
:math:`EvPo = FLn \\cdot ET0`
Example:
For clarity, this is more of a kind of an integration example.
Parameter |FLn| both depends on time (the actual month) and space
(the actual land use). Firstly, let us define a initialization
time period spanning the transition from June to July:
>>> from hydpy import pub
>>> pub.timegrids = '30.06.2000', '02.07.2000', '1d'
Secondly, assume that the considered subbasin is differenciated in
two HRUs, one of primarily consisting of arable land and the other
one of deciduous forests:
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> nhru(2)
>>> lnk(ACKER, LAUBW)
Thirdly, set the |FLn|
values, one for the relevant months and land use classes:
>>> fln.acker_jun = 1.299
>>> fln.acker_jul = 1.304
>>> fln.laubw_jun = 1.350
>>> fln.laubw_jul = 1.365
Fourthly, the index array connecting the simulation time steps
defined above and the month indexes (0...11) can be retrieved
from the |pub| module. This can be done manually more
conveniently via its update method:
>>> derived.moy.update()
>>> derived.moy
moy(5, 6)
Finally, the actual method (with its simple equation) is applied
as usual:
>>> fluxes.et0 = 2.0
>>> model.idx_sim = 0
>>> model.calc_evpo_v1()
>>> fluxes.evpo
evpo(2.598, 2.7)
>>> model.idx_sim = 1
>>> model.calc_evpo_v1()
>>> fluxes.evpo
evpo(2.608, 2.73)
Reset module |pub| to not interfere the following examples:
>>> del pub.timegrids
|
entailment
|
def calc_nbes_inzp_v1(self):
"""Calculate stand precipitation and update the interception storage
accordingly.
Required control parameters:
|NHRU|
|Lnk|
Required derived parameter:
|KInz|
Required flux sequence:
|NKor|
Calculated flux sequence:
|NBes|
Updated state sequence:
|Inzp|
Additional requirements:
|Model.idx_sim|
Basic equation:
:math:`NBes = \\Bigl \\lbrace
{
{PKor \\ | \\ Inzp = KInz}
\\atop
{0 \\ | \\ Inzp < KInz}
}`
Examples:
Initialize five HRUs with different land usages:
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> nhru(5)
>>> lnk(SIED_D, FEUCHT, GLETS, FLUSS, SEE)
Define |KInz| values for July the selected land usages directly:
>>> derived.kinz.sied_d_jul = 2.0
>>> derived.kinz.feucht_jul = 1.0
>>> derived.kinz.glets_jul = 0.0
>>> derived.kinz.fluss_jul = 1.0
>>> derived.kinz.see_jul = 1.0
Now we prepare a |MOY| object, that assumes that the first, second,
and third simulation time steps are in June, July, and August
respectively (we make use of the value defined above for July, but
setting the values of parameter |MOY| this way allows for a more
rigorous testing of proper indexing):
>>> derived.moy.shape = 3
>>> derived.moy = 5, 6, 7
>>> model.idx_sim = 1
The dense settlement (|SIED_D|), the wetland area (|FEUCHT|), and
both water areas (|FLUSS| and |SEE|) start with a initial interception
storage of 1/2 mm, the glacier (|GLETS|) and water areas (|FLUSS| and
|SEE|) start with 0 mm. In the first example, actual precipition
is 1 mm:
>>> states.inzp = 0.5, 0.5, 0.0, 1.0, 1.0
>>> fluxes.nkor = 1.0
>>> model.calc_nbes_inzp_v1()
>>> states.inzp
inzp(1.5, 1.0, 0.0, 0.0, 0.0)
>>> fluxes.nbes
nbes(0.0, 0.5, 1.0, 0.0, 0.0)
Only for the settled area, interception capacity is not exceeded,
meaning no stand precipitation occurs. Note that it is common in
define zero interception capacities for glacier areas, but not
mandatory. Also note that the |KInz|, |Inzp| and |NKor| values
given for both water areas are ignored completely, and |Inzp|
and |NBes| are simply set to zero.
If there is no precipitation, there is of course also no stand
precipitation and interception storage remains unchanged:
>>> states.inzp = 0.5, 0.5, 0.0, 0.0, 0.0
>>> fluxes.nkor = 0.
>>> model.calc_nbes_inzp_v1()
>>> states.inzp
inzp(0.5, 0.5, 0.0, 0.0, 0.0)
>>> fluxes.nbes
nbes(0.0, 0.0, 0.0, 0.0, 0.0)
Interception capacities change discontinuously between consecutive
months. This can result in little stand precipitation events in
periods without precipitation:
>>> states.inzp = 1.0, 0.0, 0.0, 0.0, 0.0
>>> derived.kinz.sied_d_jul = 0.6
>>> fluxes.nkor = 0.0
>>> model.calc_nbes_inzp_v1()
>>> states.inzp
inzp(0.6, 0.0, 0.0, 0.0, 0.0)
>>> fluxes.nbes
nbes(0.4, 0.0, 0.0, 0.0, 0.0)
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
sta = self.sequences.states.fastaccess
for k in range(con.nhru):
if con.lnk[k] in (WASSER, FLUSS, SEE):
flu.nbes[k] = 0.
sta.inzp[k] = 0.
else:
flu.nbes[k] = \
max(flu.nkor[k]+sta.inzp[k] -
der.kinz[con.lnk[k]-1, der.moy[self.idx_sim]], 0.)
sta.inzp[k] += flu.nkor[k]-flu.nbes[k]
|
Calculate stand precipitation and update the interception storage
accordingly.
Required control parameters:
|NHRU|
|Lnk|
Required derived parameter:
|KInz|
Required flux sequence:
|NKor|
Calculated flux sequence:
|NBes|
Updated state sequence:
|Inzp|
Additional requirements:
|Model.idx_sim|
Basic equation:
:math:`NBes = \\Bigl \\lbrace
{
{PKor \\ | \\ Inzp = KInz}
\\atop
{0 \\ | \\ Inzp < KInz}
}`
Examples:
Initialize five HRUs with different land usages:
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> nhru(5)
>>> lnk(SIED_D, FEUCHT, GLETS, FLUSS, SEE)
Define |KInz| values for July the selected land usages directly:
>>> derived.kinz.sied_d_jul = 2.0
>>> derived.kinz.feucht_jul = 1.0
>>> derived.kinz.glets_jul = 0.0
>>> derived.kinz.fluss_jul = 1.0
>>> derived.kinz.see_jul = 1.0
Now we prepare a |MOY| object, that assumes that the first, second,
and third simulation time steps are in June, July, and August
respectively (we make use of the value defined above for July, but
setting the values of parameter |MOY| this way allows for a more
rigorous testing of proper indexing):
>>> derived.moy.shape = 3
>>> derived.moy = 5, 6, 7
>>> model.idx_sim = 1
The dense settlement (|SIED_D|), the wetland area (|FEUCHT|), and
both water areas (|FLUSS| and |SEE|) start with a initial interception
storage of 1/2 mm, the glacier (|GLETS|) and water areas (|FLUSS| and
|SEE|) start with 0 mm. In the first example, actual precipition
is 1 mm:
>>> states.inzp = 0.5, 0.5, 0.0, 1.0, 1.0
>>> fluxes.nkor = 1.0
>>> model.calc_nbes_inzp_v1()
>>> states.inzp
inzp(1.5, 1.0, 0.0, 0.0, 0.0)
>>> fluxes.nbes
nbes(0.0, 0.5, 1.0, 0.0, 0.0)
Only for the settled area, interception capacity is not exceeded,
meaning no stand precipitation occurs. Note that it is common in
define zero interception capacities for glacier areas, but not
mandatory. Also note that the |KInz|, |Inzp| and |NKor| values
given for both water areas are ignored completely, and |Inzp|
and |NBes| are simply set to zero.
If there is no precipitation, there is of course also no stand
precipitation and interception storage remains unchanged:
>>> states.inzp = 0.5, 0.5, 0.0, 0.0, 0.0
>>> fluxes.nkor = 0.
>>> model.calc_nbes_inzp_v1()
>>> states.inzp
inzp(0.5, 0.5, 0.0, 0.0, 0.0)
>>> fluxes.nbes
nbes(0.0, 0.0, 0.0, 0.0, 0.0)
Interception capacities change discontinuously between consecutive
months. This can result in little stand precipitation events in
periods without precipitation:
>>> states.inzp = 1.0, 0.0, 0.0, 0.0, 0.0
>>> derived.kinz.sied_d_jul = 0.6
>>> fluxes.nkor = 0.0
>>> model.calc_nbes_inzp_v1()
>>> states.inzp
inzp(0.6, 0.0, 0.0, 0.0, 0.0)
>>> fluxes.nbes
nbes(0.4, 0.0, 0.0, 0.0, 0.0)
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.