_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q16500
|
_get_trailing_whitespace
|
train
|
def _get_trailing_whitespace(marker, s):
"""Return the whitespace content trailing the given 'marker' in string 's',
up to and including a newline.
"""
suffix = ''
start = s.index(marker) + len(marker)
i = start
while i < len(s):
if s[i] in ' \t':
suffix += s[i]
elif s[i] in '\r\n':
suffix += s[i]
if s[i] == '\r' and i + 1 < len(s) and s[i + 1] == '\n':
suffix += s[i + 1]
break
else:
break
i += 1
return suffix
|
python
|
{
"resource": ""
}
|
q16501
|
RawCmdln.cmd
|
train
|
def cmd(self, argv):
"""Run one command and exit.
"argv" is the arglist for the command to run. argv[0] is the
command to run. If argv is an empty list then the
'emptyline' handler is run.
Returns the return value from the command handler.
"""
assert isinstance(argv, (list, tuple)), \
"'argv' is not a sequence: %r" % argv
retval = None
try:
argv = self.precmd(argv)
retval = self.onecmd(argv)
self.postcmd(argv)
except:
if not self.cmdexc(argv):
raise
retval = 1
return retval
|
python
|
{
"resource": ""
}
|
q16502
|
RawCmdln.default
|
train
|
def default(self, argv):
"""Hook called to handle a command for which there is no handler.
"argv" is the command and arguments to run.
The default implementation writes an error message to stderr
and returns an error exit status.
Returns a numeric command exit status.
"""
errmsg = self._str(self.unknowncmd % (argv[0], ))
if self.cmdlooping:
self.stderr.write(errmsg + "\n")
else:
self.stderr.write("%s: %s\nTry '%s help' for info.\n" %
(self._name_str, errmsg, self._name_str))
self.stderr.flush()
return 1
|
python
|
{
"resource": ""
}
|
q16503
|
RawCmdln.helpdefault
|
train
|
def helpdefault(self, cmd, known):
"""Hook called to handle help on a command for which there is no
help handler.
"cmd" is the command name on which help was requested.
"known" is a boolean indicating if this command is known
(i.e. if there is a handler for it).
Returns a return code.
"""
if known:
msg = self._str(self.nohelp % (cmd, ))
if self.cmdlooping:
self.stderr.write(msg + '\n')
else:
self.stderr.write("%s: %s\n" % (self.name, msg))
else:
msg = self.unknowncmd % (cmd, )
if self.cmdlooping:
self.stderr.write(msg + '\n')
else:
self.stderr.write("%s: %s\n"
"Try '%s help' for info.\n" %
(self.name, msg, self.name))
self.stderr.flush()
return 1
|
python
|
{
"resource": ""
}
|
q16504
|
RawCmdln._help_reindent
|
train
|
def _help_reindent(self, help, indent=None):
"""Hook to re-indent help strings before writing to stdout.
"help" is the help content to re-indent
"indent" is a string with which to indent each line of the
help content after normalizing. If unspecified or None
then the default is use: the 'self.helpindent' class
attribute. By default this is the empty string, i.e.
no indentation.
By default, all common leading whitespace is removed and then
the lot is indented by 'self.helpindent'. When calculating the
common leading whitespace the first line is ignored -- hence
help content for Conan can be written as follows and have the
expected indentation:
def do_crush(self, ...):
'''${cmd_name}: crush your enemies, see them driven before you...
c.f. Conan the Barbarian'''
"""
if indent is None:
indent = self.helpindent
lines = help.splitlines(0)
_dedentlines(lines, skip_first_line=True)
lines = [(indent + line).rstrip() for line in lines]
return '\n'.join(lines)
|
python
|
{
"resource": ""
}
|
q16505
|
RawCmdln._help_preprocess
|
train
|
def _help_preprocess(self, help, cmdname):
"""Hook to preprocess a help string before writing to stdout.
"help" is the help string to process.
"cmdname" is the canonical sub-command name for which help
is being given, or None if the help is not specific to a
command.
By default the following template variables are interpolated in
help content. (Note: these are similar to Python 2.4's
string.Template interpolation but not quite.)
${name}
The tool's/shell's name, i.e. 'self.name'.
${option_list}
A formatted table of options for this shell/tool.
${command_list}
A formatted table of available sub-commands.
${help_list}
A formatted table of additional help topics (i.e. 'help_*'
methods with no matching 'do_*' method).
${cmd_name}
The name (and aliases) for this sub-command formatted as:
"NAME (ALIAS1, ALIAS2, ...)".
${cmd_usage}
A formatted usage block inferred from the command function
signature.
${cmd_option_list}
A formatted table of options for this sub-command. (This is
only available for commands using the optparse integration,
i.e. using @cmdln.option decorators or manually setting the
'optparser' attribute on the 'do_*' method.)
Returns the processed help.
"""
preprocessors = {
"${name}": self._help_preprocess_name,
"${option_list}": self._help_preprocess_option_list,
"${command_list}": self._help_preprocess_command_list,
"${help_list}": self._help_preprocess_help_list,
"${cmd_name}": self._help_preprocess_cmd_name,
"${cmd_usage}": self._help_preprocess_cmd_usage,
"${cmd_option_list}": self._help_preprocess_cmd_option_list,
}
for marker, preprocessor in preprocessors.items():
if marker in help:
help = preprocessor(help, cmdname)
return help
|
python
|
{
"resource": ""
}
|
q16506
|
RawCmdln._get_canonical_map
|
train
|
def _get_canonical_map(self):
"""Return a mapping of available command names and aliases to
their canonical command name.
"""
cacheattr = "_token2canonical"
if not hasattr(self, cacheattr):
# Get the list of commands and their aliases, if any.
token2canonical = {}
cmd2funcname = {} # use a dict to strip duplicates
for attr in self.get_names():
if attr.startswith("do_"): cmdname = attr[3:]
elif attr.startswith("_do_"): cmdname = attr[4:]
else:
continue
cmd2funcname[cmdname] = attr
token2canonical[cmdname] = cmdname
for cmdname, funcname in cmd2funcname.items(): # add aliases
func = getattr(self, funcname)
aliases = getattr(func, "aliases", [])
for alias in aliases:
if alias in cmd2funcname:
import warnings
warnings.warn("'%s' alias for '%s' command conflicts "
"with '%s' handler" %
(alias, cmdname, cmd2funcname[alias]))
continue
token2canonical[alias] = cmdname
setattr(self, cacheattr, token2canonical)
return getattr(self, cacheattr)
|
python
|
{
"resource": ""
}
|
q16507
|
_getRegisteredExecutable
|
train
|
def _getRegisteredExecutable(exeName):
"""Windows allow application paths to be registered in the registry."""
registered = None
if sys.platform.startswith('win'):
if os.path.splitext(exeName)[1].lower() != '.exe':
exeName += '.exe'
import _winreg
try:
key = "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\" +\
exeName
value = _winreg.QueryValue(_winreg.HKEY_LOCAL_MACHINE, key)
registered = (value, "from HKLM\\"+key)
except _winreg.error:
pass
if registered and not os.path.exists(registered[0]):
registered = None
return registered
|
python
|
{
"resource": ""
}
|
q16508
|
whichall
|
train
|
def whichall(command, path=None, verbose=0, exts=None):
"""Return a list of full paths to all matches of the given command
on the path.
"command" is a the name of the executable to search for.
"path" is an optional alternate path list to search. The default it
to use the PATH environment variable.
"verbose", if true, will cause a 2-tuple to be returned for each
match. The second element is a textual description of where the
match was found.
"exts" optionally allows one to specify a list of extensions to use
instead of the standard list for this system. This can
effectively be used as an optimization to, for example, avoid
stat's of "foo.vbs" when searching for "foo" and you know it is
not a VisualBasic script but ".vbs" is on PATHEXT. This option
is only supported on Windows.
"""
return list( whichgen(command, path, verbose, exts) )
|
python
|
{
"resource": ""
}
|
q16509
|
get_version
|
train
|
def get_version():
"""Get the python-manta version without having to import the manta package,
which requires deps to already be installed.
"""
_globals = {}
_locals = {}
exec(
compile(
open(TOP + "/manta/version.py").read(), TOP + "/manta/version.py",
'exec'), _globals, _locals)
return _locals["__version__"]
|
python
|
{
"resource": ""
}
|
q16510
|
fingerprint_from_ssh_pub_key
|
train
|
def fingerprint_from_ssh_pub_key(data):
"""Calculate the fingerprint of SSH public key data.
>>> data = "ssh-rsa AAAAB3NzaC1y...4IEAA1Z4wIWCuk8F9Tzw== my key comment"
>>> fingerprint_from_ssh_pub_key(data)
'54:c7:4c:93:cf:ff:e3:32:68:bc:89:6e:5e:22:b5:9c'
Adapted from <http://stackoverflow.com/questions/6682815/>
and imgapi.js#fingerprintFromSshpubkey.
"""
data = data.strip()
# Let's accept either:
# - just the base64 encoded data part, e.g.
# 'AAAAB3NzaC1yc2EAAAABIwAA...2l24uq9Lfw=='
# - the full ssh pub key file content, e.g.:
# 'ssh-rsa AAAAB3NzaC1yc2EAAAABIwAA...2l24uq9Lfw== my comment'
if (re.search(r'^ssh-(?:rsa|dss) ', data) or
re.search(r'^ecdsa-sha2-nistp(?:[0-9]+)', data)):
data = data.split(None, 2)[1]
key = base64.b64decode(data)
fp_plain = hashlib.md5(key).hexdigest()
return ':'.join(a + b for a, b in zip(fp_plain[::2], fp_plain[1::2]))
|
python
|
{
"resource": ""
}
|
q16511
|
agent_key_info_from_key_id
|
train
|
def agent_key_info_from_key_id(key_id):
"""Find a matching key in the ssh-agent.
@param key_id {str} Either a private ssh key fingerprint, e.g.
'b3:f0:a1:6c:18:3b:42:63:fd:6e:57:42:74:17:d4:bc', or the path to
an ssh private key file (like ssh's IdentityFile config option).
@return {dict} with these keys:
- type: "agent"
- agent_key: paramiko AgentKey
- fingerprint: key fingerprint
- algorithm: "rsa-sha1" Currently don't support DSA agent signing.
"""
# Need the fingerprint of the key we're using for signing. If it
# is a path to a priv key, then we need to load it.
if not FINGERPRINT_RE.match(key_id):
ssh_key = load_ssh_key(key_id, True)
fingerprint = ssh_key["fingerprint"]
else:
fingerprint = key_id
# Look for a matching fingerprint in the ssh-agent keys.
keys = Agent().get_keys()
for key in keys:
raw_key = key.blob
# The MD5 fingerprint functions return the hexdigest without the hash
# algorithm prefix ("MD5:"), and the SHA256 functions return the
# fingerprint with the prefix ("SHA256:"). Ideally we'd want to
# normalize these, but more importantly we don't want to break backwards
# compatibility for either the SHA or MD5 users.
md5_fp = fingerprint_from_raw_ssh_pub_key(raw_key)
sha_fp = sha256_fingerprint_from_raw_ssh_pub_key(raw_key)
if (sha_fp == fingerprint or
md5_fp == fingerprint or
"MD5:" + md5_fp == fingerprint):
# Canonicalize it to the md5 fingerprint.
md5_fingerprint = md5_fp
break
else:
raise MantaError('no ssh-agent key with fingerprint "%s"' %
fingerprint)
return {
"type": "agent",
"agent_key": key,
"fingerprint": md5_fingerprint,
"algorithm": ALGO_FROM_SSH_KEY_TYPE[key.name]
}
|
python
|
{
"resource": ""
}
|
q16512
|
create_channel
|
train
|
def create_channel(
target: str,
options: Optional[List[Tuple[str, Any]]] = None,
interceptors: Optional[List[ClientInterceptor]] = None,
) -> grpc.Channel:
"""Creates a gRPC channel
The gRPC channel is created with the provided options and intercepts each
invocation via the provided interceptors.
The created channel is configured with the following default options:
- "grpc.max_send_message_length": 100MB,
- "grpc.max_receive_message_length": 100MB.
:param target: the server address.
:param options: optional list of key-value pairs to configure the channel.
:param interceptors: optional list of client interceptors.
:returns: a gRPC channel.
"""
# The list of possible options is available here:
# https://grpc.io/grpc/core/group__grpc__arg__keys.html
options = (options or []) + [
("grpc.max_send_message_length", grpc_max_msg_size),
("grpc.max_receive_message_length", grpc_max_msg_size),
]
interceptors = interceptors or []
channel = grpc.insecure_channel(target, options)
return grpc.intercept_channel(channel, *interceptors)
|
python
|
{
"resource": ""
}
|
q16513
|
create_server
|
train
|
def create_server(
max_workers: int,
options: Optional[List[Tuple[str, Any]]] = None,
interceptors: Optional[List[grpc.ServerInterceptor]] = None,
) -> grpc.Server:
"""Creates a gRPC server
The gRPC server is created with the provided options and intercepts each
incoming RPCs via the provided interceptors.
The created server is configured with the following default options:
- "grpc.max_send_message_length": 100MB,
- "grpc.max_receive_message_length": 100MB.
:param max_workers: the maximum number of workers to use in the underlying
futures.ThreadPoolExecutor to be used by the Server to execute RPC
handlers.
:param options: optional list of key-value pairs to configure the channel.
:param interceptors: optional list of server interceptors.
:returns: a gRPC server.
"""
# The list of possible options is available here:
# https://grpc.io/grpc/core/group__grpc__arg__keys.html
options = (options or []) + [
("grpc.max_send_message_length", grpc_max_msg_size),
("grpc.max_receive_message_length", grpc_max_msg_size),
]
interceptors = [base.ServerInterceptorWrapper(i)
for i in (interceptors or [])]
server = grpc.server(ThreadPoolExecutor(max_workers=max_workers),
options=options, interceptors=interceptors)
for i in interceptors:
i.bind(server)
return server
|
python
|
{
"resource": ""
}
|
q16514
|
to_grpc_address
|
train
|
def to_grpc_address(target: str) -> str:
"""Converts a standard gRPC target to one that is supported by grpcio
:param target: the server address.
:returns: the converted address.
"""
u = urlparse(target)
if u.scheme == "dns":
raise ValueError("dns:// not supported")
if u.scheme == "unix":
return "unix:"+u.path
return u.netloc
|
python
|
{
"resource": ""
}
|
q16515
|
implement_switch_disconnector
|
train
|
def implement_switch_disconnector(mv_grid, node1, node2):
"""
Install switch disconnector in grid topology
The graph that represents the grid's topology is altered in such way that
it explicitly includes a switch disconnector.
The switch disconnector is always located at ``node1``. Technically, it
does not make any difference. This is just an convention ensuring
consistency of multiple runs.
The ring is still closed after manipulations of this function.
Parameters
----------
mv_grid : :class:`~.grid.grids.MVGrid`
MV grid instance
node1
A rings node
node2
Another rings node
"""
# Get disconnecting point's location
line = mv_grid.graph.edge[node1][node2]['line']
length_sd_line = .75e-3 # in km
x_sd = node1.geom.x + (length_sd_line / line.length) * (
node1.geom.x - node2.geom.x)
y_sd = node1.geom.y + (length_sd_line / line.length) * (
node1.geom.y - node2.geom.y)
# Instantiate disconnecting point
mv_dp_number = len(mv_grid.graph.nodes_by_attribute(
'mv_disconnecting_point'))
disconnecting_point = MVDisconnectingPoint(
id=mv_dp_number + 1,
geom=Point(x_sd, y_sd),
grid=mv_grid)
mv_grid.graph.add_node(disconnecting_point, type='mv_disconnecting_point')
# Replace original line by a new line
new_line_attr = {
'line': Line(
id=line.id,
type=line.type,
length=line.length - length_sd_line,
grid=mv_grid),
'type': 'line'}
mv_grid.graph.remove_edge(node1, node2)
mv_grid.graph.add_edge(disconnecting_point, node2, new_line_attr)
# Add disconnecting line segment
switch_disconnector_line_attr = {
'line': Line(
id="switch_disconnector_line_{}".format(
str(mv_dp_number + 1)),
type=line.type,
length=length_sd_line,
grid=mv_grid),
'type': 'line'}
mv_grid.graph.add_edge(node1, disconnecting_point,
switch_disconnector_line_attr)
# Set line to switch disconnector
disconnecting_point.line = mv_grid.graph.line_from_nodes(
disconnecting_point, node1)
|
python
|
{
"resource": ""
}
|
q16516
|
select_cable
|
train
|
def select_cable(network, level, apparent_power):
"""Selects an appropriate cable type and quantity using given apparent
power.
Considers load factor.
Parameters
----------
network : :class:`~.grid.network.Network`
The eDisGo container object
level : :obj:`str`
Grid level ('mv' or 'lv')
apparent_power : :obj:`float`
Apparent power the cable must carry in kVA
Returns
-------
:pandas:`pandas.Series<series>`
Cable type
:obj:`ìnt`
Cable count
Notes
------
Cable is selected to be able to carry the given `apparent_power`, no load
factor is considered.
"""
cable_count = 1
if level == 'mv':
available_cables = network.equipment_data['mv_cables'][
network.equipment_data['mv_cables']['U_n'] ==
network.mv_grid.voltage_nom]
suitable_cables = available_cables[
available_cables['I_max_th'] *
network.mv_grid.voltage_nom > apparent_power]
# increase cable count until appropriate cable type is found
while suitable_cables.empty and cable_count < 20:
cable_count += 1
suitable_cables = available_cables[
available_cables['I_max_th'] *
network.mv_grid.voltage_nom *
cable_count > apparent_power]
if suitable_cables.empty and cable_count == 20:
raise exceptions.MaximumIterationError(
"Could not find a suitable cable for apparent power of "
"{} kVA.".format(apparent_power))
cable_type = suitable_cables.ix[suitable_cables['I_max_th'].idxmin()]
elif level == 'lv':
suitable_cables = network.equipment_data['lv_cables'][
network.equipment_data['lv_cables']['I_max_th'] *
network.equipment_data['lv_cables']['U_n'] > apparent_power]
# increase cable count until appropriate cable type is found
while suitable_cables.empty and cable_count < 20:
cable_count += 1
suitable_cables = network.equipment_data['lv_cables'][
network.equipment_data['lv_cables']['I_max_th'] *
network.equipment_data['lv_cables']['U_n'] *
cable_count > apparent_power]
if suitable_cables.empty and cable_count == 20:
raise exceptions.MaximumIterationError(
"Could not find a suitable cable for apparent power of "
"{} kVA.".format(apparent_power))
cable_type = suitable_cables.ix[suitable_cables['I_max_th'].idxmin()]
else:
raise ValueError('Please supply a level (either \'mv\' or \'lv\').')
return cable_type, cable_count
|
python
|
{
"resource": ""
}
|
q16517
|
get_gen_info
|
train
|
def get_gen_info(network, level='mvlv', fluctuating=False):
"""
Gets all the installed generators with some additional information.
Parameters
----------
network : :class:`~.grid.network.Network`
Network object holding the grid data.
level : :obj:`str`
Defines which generators are returned. Possible options are:
* 'mv'
Only generators connected to the MV grid are returned.
* 'lv'
Only generators connected to the LV grids are returned.
* 'mvlv'
All generators connected to the MV grid and LV grids are returned.
Default: 'mvlv'.
fluctuating : :obj:`bool`
If True only returns fluctuating generators. Default: False.
Returns
--------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe with all generators connected to the specified voltage
level. Index of the dataframe are the generator objects of type
:class:`~.grid.components.Generator`. Columns of the dataframe are:
* 'gen_repr'
The representative of the generator as :obj:`str`.
* 'type'
The generator type, e.g. 'solar' or 'wind' as :obj:`str`.
* 'voltage_level'
The voltage level the generator is connected to as :obj:`str`. Can
either be 'mv' or 'lv'.
* 'nominal_capacity'
The nominal capacity of the generator as as :obj:`float`.
* 'weather_cell_id'
The id of the weather cell the generator is located in as :obj:`int`
(only applies to fluctuating generators).
"""
gens_w_id = []
if 'mv' in level:
gens = network.mv_grid.generators
gens_voltage_level = ['mv']*len(gens)
gens_type = [gen.type for gen in gens]
gens_rating = [gen.nominal_capacity for gen in gens]
for gen in gens:
try:
gens_w_id.append(gen.weather_cell_id)
except AttributeError:
gens_w_id.append(np.nan)
gens_grid = [network.mv_grid]*len(gens)
else:
gens = []
gens_voltage_level = []
gens_type = []
gens_rating = []
gens_grid = []
if 'lv' in level:
for lv_grid in network.mv_grid.lv_grids:
gens_lv = lv_grid.generators
gens.extend(gens_lv)
gens_voltage_level.extend(['lv']*len(gens_lv))
gens_type.extend([gen.type for gen in gens_lv])
gens_rating.extend([gen.nominal_capacity for gen in gens_lv])
for gen in gens_lv:
try:
gens_w_id.append(gen.weather_cell_id)
except AttributeError:
gens_w_id.append(np.nan)
gens_grid.extend([lv_grid] * len(gens_lv))
gen_df = pd.DataFrame({'gen_repr': list(map(lambda x: repr(x), gens)),
'generator': gens,
'type': gens_type,
'voltage_level': gens_voltage_level,
'nominal_capacity': gens_rating,
'weather_cell_id': gens_w_id,
'grid': gens_grid})
gen_df.set_index('generator', inplace=True, drop=True)
# filter fluctuating generators
if fluctuating:
gen_df = gen_df.loc[(gen_df.type == 'solar') | (gen_df.type == 'wind')]
return gen_df
|
python
|
{
"resource": ""
}
|
q16518
|
assign_mv_feeder_to_nodes
|
train
|
def assign_mv_feeder_to_nodes(mv_grid):
"""
Assigns an MV feeder to every generator, LV station, load, and branch tee
Parameters
-----------
mv_grid : :class:`~.grid.grids.MVGrid`
"""
mv_station_neighbors = mv_grid.graph.neighbors(mv_grid.station)
# get all nodes in MV grid and remove MV station to get separate subgraphs
mv_graph_nodes = mv_grid.graph.nodes()
mv_graph_nodes.remove(mv_grid.station)
subgraph = mv_grid.graph.subgraph(mv_graph_nodes)
for neighbor in mv_station_neighbors:
# determine feeder
mv_feeder = mv_grid.graph.line_from_nodes(mv_grid.station, neighbor)
# get all nodes in that feeder by doing a DFS in the disconnected
# subgraph starting from the node adjacent to the MVStation `neighbor`
subgraph_neighbor = nx.dfs_tree(subgraph, source=neighbor)
for node in subgraph_neighbor.nodes():
# in case of an LV station assign feeder to all nodes in that LV
# grid
if isinstance(node, LVStation):
for lv_node in node.grid.graph.nodes():
lv_node.mv_feeder = mv_feeder
else:
node.mv_feeder = mv_feeder
|
python
|
{
"resource": ""
}
|
q16519
|
get_mv_feeder_from_line
|
train
|
def get_mv_feeder_from_line(line):
"""
Determines MV feeder the given line is in.
MV feeders are identified by the first line segment of the half-ring.
Parameters
----------
line : :class:`~.grid.components.Line`
Line to find the MV feeder for.
Returns
-------
:class:`~.grid.components.Line`
MV feeder identifier (representative of the first line segment
of the half-ring)
"""
try:
# get nodes of line
nodes = line.grid.graph.nodes_from_line(line)
# get feeders
feeders = {}
for node in nodes:
# if one of the nodes is an MV station the line is an MV feeder
# itself
if isinstance(node, MVStation):
feeders[repr(node)] = None
else:
feeders[repr(node)] = node.mv_feeder
# return feeder that is not None
feeder_1 = feeders[repr(nodes[0])]
feeder_2 = feeders[repr(nodes[1])]
if not feeder_1 is None and not feeder_2 is None:
if feeder_1 == feeder_2:
return feeder_1
else:
logging.warning('Different feeders for line {}.'.format(line))
return None
else:
return feeder_1 if feeder_1 is not None else feeder_2
except Exception as e:
logging.warning('Failed to get MV feeder: {}.'.format(e))
return None
|
python
|
{
"resource": ""
}
|
q16520
|
disconnect_storage
|
train
|
def disconnect_storage(network, storage):
"""
Removes storage from network graph and pypsa representation.
Parameters
-----------
network : :class:`~.grid.network.Network`
storage : :class:`~.grid.components.Storage`
Storage instance to be removed.
"""
# does only remove from network.pypsa, not from network.pypsa_lopf
# remove from pypsa (buses, storage_units, storage_units_t, lines)
neighbor = storage.grid.graph.neighbors(storage)[0]
if network.pypsa is not None:
line = storage.grid.graph.line_from_nodes(storage, neighbor)
network.pypsa.storage_units = network.pypsa.storage_units.loc[
network.pypsa.storage_units.index.drop(
repr(storage)), :]
network.pypsa.storage_units_t.p_set.drop([repr(storage)], axis=1,
inplace=True)
network.pypsa.storage_units_t.q_set.drop([repr(storage)], axis=1,
inplace=True)
network.pypsa.buses = network.pypsa.buses.loc[
network.pypsa.buses.index.drop(
'_'.join(['Bus', repr(storage)])), :]
network.pypsa.lines = network.pypsa.lines.loc[
network.pypsa.lines.index.drop(
repr(line)), :]
# delete line
neighbor = storage.grid.graph.neighbors(storage)[0]
storage.grid.graph.remove_edge(storage, neighbor)
# delete storage
storage.grid.graph.remove_node(storage)
|
python
|
{
"resource": ""
}
|
q16521
|
Grid.weather_cells
|
train
|
def weather_cells(self):
"""
Weather cells contained in grid
Returns
-------
list
list of weather cell ids contained in grid
"""
if not self._weather_cells:
# get all the weather cell ids
self._weather_cells = []
for gen in self.generators:
if hasattr(gen, 'weather_cell_id'):
self._weather_cells.append(gen.weather_cell_id)
# drop duplicates
self._weather_cells = list(set(self._weather_cells))
# no need to check for Nones in the list because None in
# gen.weather_cell_id is kicked out by the if hasattr() before
return self._weather_cells
|
python
|
{
"resource": ""
}
|
q16522
|
Grid.peak_generation
|
train
|
def peak_generation(self):
"""
Cumulative peak generation capacity of generators of this grid
Returns
-------
float
Ad-hoc calculated or cached peak generation capacity
"""
if self._peak_generation is None:
self._peak_generation = sum(
[gen.nominal_capacity
for gen in self.generators])
return self._peak_generation
|
python
|
{
"resource": ""
}
|
q16523
|
Grid.peak_generation_per_technology
|
train
|
def peak_generation_per_technology(self):
"""
Peak generation of each technology in the grid
Returns
-------
:pandas:`pandas.Series<series>`
Peak generation index by technology
"""
peak_generation = defaultdict(float)
for gen in self.generators:
peak_generation[gen.type] += gen.nominal_capacity
return pd.Series(peak_generation)
|
python
|
{
"resource": ""
}
|
q16524
|
Grid.peak_generation_per_technology_and_weather_cell
|
train
|
def peak_generation_per_technology_and_weather_cell(self):
"""
Peak generation of each technology and the
corresponding weather cell in the grid
Returns
-------
:pandas:`pandas.Series<series>`
Peak generation index by technology
"""
peak_generation = defaultdict(float)
for gen in self.generators:
if hasattr(gen, 'weather_cell_id'):
if (gen.type, gen.weather_cell_id) in peak_generation.keys():
peak_generation[gen.type, gen.weather_cell_id] += gen.nominal_capacity
else:
peak_generation[gen.type, gen.weather_cell_id] = gen.nominal_capacity
else:
message = 'No weather cell ID found for ' \
'generator {}.'.format(repr(gen))
raise KeyError(message)
series_index = pd.MultiIndex.from_tuples(list(peak_generation.keys()),
names=['type', 'weather_cell_id'])
return pd.Series(peak_generation, index=series_index)
|
python
|
{
"resource": ""
}
|
q16525
|
Grid.peak_load
|
train
|
def peak_load(self):
"""
Cumulative peak load capacity of generators of this grid
Returns
-------
float
Ad-hoc calculated or cached peak load capacity
"""
if self._peak_load is None:
self._peak_load = sum(
[_.peak_load.sum()
for _ in self.graph.nodes_by_attribute('load')])
return self._peak_load
|
python
|
{
"resource": ""
}
|
q16526
|
Grid.consumption
|
train
|
def consumption(self):
"""
Consumption in kWh per sector for whole grid
Returns
-------
:pandas:`pandas.Series<series>`
Indexed by demand sector
"""
consumption = defaultdict(float)
for load in self.graph.nodes_by_attribute('load'):
for sector, val in load.consumption.items():
consumption[sector] += val
return pd.Series(consumption)
|
python
|
{
"resource": ""
}
|
q16527
|
Grid.generators
|
train
|
def generators(self):
"""
Connected Generators within the grid
Returns
-------
list
List of Generator Objects
"""
if not self._generators:
generators = list(self.graph.nodes_by_attribute('generator'))
generators.extend(list(self.graph.nodes_by_attribute(
'generator_aggr')))
return generators
else:
return self._generators
|
python
|
{
"resource": ""
}
|
q16528
|
MVGrid.draw
|
train
|
def draw(self):
""" Draw MV grid's graph using the geo data of nodes
Notes
-----
This method uses the coordinates stored in the nodes' geoms which
are usually conformal, not equidistant. Therefore, the plot might
be distorted and does not (fully) reflect the real positions or
distances between nodes.
"""
# get nodes' positions
nodes_pos = {}
for node in self.graph.nodes():
nodes_pos[node] = (node.geom.x, node.geom.y)
plt.figure()
nx.draw_networkx(self.graph, nodes_pos, node_size=16, font_size=8)
plt.show()
|
python
|
{
"resource": ""
}
|
q16529
|
Graph.nodes_from_line
|
train
|
def nodes_from_line(self, line):
"""
Get nodes adjacent to line
Here, line refers to the object behind the key 'line' of the attribute
dict attached to each edge.
Parameters
----------
line: edisgo.grid.components.Line
A eDisGo line object
Returns
-------
tuple
Nodes adjacent to this edge
"""
return dict([(v, k) for k, v in
nx.get_edge_attributes(self, 'line').items()])[line]
|
python
|
{
"resource": ""
}
|
q16530
|
Graph.line_from_nodes
|
train
|
def line_from_nodes(self, u, v):
"""
Get line between two nodes ``u`` and ``v``.
Parameters
----------
u : :class:`~.grid.components.Component`
One adjacent node
v : :class:`~.grid.components.Component`
The other adjacent node
Returns
-------
Line
Line segment connecting ``u`` and ``v``.
"""
try:
line = nx.get_edge_attributes(self, 'line')[(u, v)]
except:
try:
line = nx.get_edge_attributes(self, 'line')[(v, u)]
except:
raise nx.NetworkXError('Line between ``u`` and ``v`` not '
'included in the graph.')
return line
|
python
|
{
"resource": ""
}
|
q16531
|
Graph.nodes_by_attribute
|
train
|
def nodes_by_attribute(self, attr_val, attr='type'):
"""
Select Graph's nodes by attribute value
Get all nodes that share the same attribute. By default, the attr 'type'
is used to specify the nodes type (generator, load, etc.).
Examples
--------
>>> import edisgo
>>> G = edisgo.grids.Graph()
>>> G.add_node(1, type='generator')
>>> G.add_node(2, type='load')
>>> G.add_node(3, type='generator')
>>> G.nodes_by_attribute('generator')
[1, 3]
Parameters
----------
attr_val: str
Value of the `attr` nodes should be selected by
attr: str, default: 'type'
Attribute key which is 'type' by default
Returns
-------
list
A list containing nodes elements that match the given attribute
value
"""
temp_nodes = getattr(self, 'node')
nodes = list(filter(None, map(lambda x: x if temp_nodes[x][attr] == attr_val else None,
temp_nodes.keys())))
return nodes
|
python
|
{
"resource": ""
}
|
q16532
|
Graph.lines_by_attribute
|
train
|
def lines_by_attribute(self, attr_val=None, attr='type'):
""" Returns a generator for iterating over Graph's lines by attribute value.
Get all lines that share the same attribute. By default, the attr 'type'
is used to specify the lines' type (line, agg_line, etc.).
The edge of a graph is described by the two adjacent nodes and the line
object itself. Whereas the line object is used to hold all relevant
power system parameters.
Examples
--------
>>> import edisgo
>>> G = edisgo.grids.Graph()
>>> G.add_node(1, type='generator')
>>> G.add_node(2, type='load')
>>> G.add_edge(1, 2, type='line')
>>> lines = G.lines_by_attribute('line')
>>> list(lines)[0]
<class 'tuple'>: ((node1, node2), line)
Parameters
----------
attr_val: str
Value of the `attr` lines should be selected by
attr: str, default: 'type'
Attribute key which is 'type' by default
Returns
-------
Generator of :obj:`dict`
A list containing line elements that match the given attribute
value
Notes
-----
There are generator functions for nodes (`Graph.nodes()`) and edges
(`Graph.edges()`) in NetworkX but unlike graph nodes, which can be
represented by objects, branch objects can only be accessed by using an
edge attribute ('line' is used here)
To make access to attributes of the line objects simpler and more
intuitive for the user, this generator yields a dictionary for each edge
that contains information about adjacent nodes and the line object.
Note, the construction of the dictionary highly depends on the structure
of the in-going tuple (which is defined by the needs of networkX). If
this changes, the code will break.
Adapted from `Dingo <https://github.com/openego/dingo/blob/\
ee237e37d4c228081e1e246d7e6d0d431c6dda9e/dingo/core/network/\
__init__.py>`_.
"""
# get all lines that have the attribute 'type' set
lines_attributes = nx.get_edge_attributes(self, attr).items()
# attribute value provided?
if attr_val:
# extract lines where 'type' == attr_val
lines_attributes = [(k, self[k[0]][k[1]]['line'])
for k, v in lines_attributes if v == attr_val]
else:
# get all lines
lines_attributes = [(k, self[k[0]][k[1]]['line'])
for k, v in lines_attributes]
# sort them according to connected nodes
lines_sorted = sorted(list(lines_attributes), key=lambda _: repr(_[1]))
for line in lines_sorted:
yield {'adj_nodes': line[0], 'line': line[1]}
|
python
|
{
"resource": ""
}
|
q16533
|
ServerInterceptorWrapper.intercept_service
|
train
|
def intercept_service(self, continuation, handler_call_details):
"""Intercepts incoming RPCs before handing them over to a handler
See `grpc.ServerInterceptor.intercept_service`.
"""
rpc_method_handler = self._get_rpc_handler(handler_call_details)
if rpc_method_handler.response_streaming:
if self._wrapped.is_streaming:
# `self._wrapped` is a `StreamServerInterceptor`
return self._wrapped.intercept_service(
continuation, handler_call_details)
else:
if not self._wrapped.is_streaming:
# `self._wrapped` is a `UnaryServerInterceptor`
return self._wrapped.intercept_service(
continuation, handler_call_details)
# skip the interceptor due to type mismatch
return continuation(handler_call_details)
|
python
|
{
"resource": ""
}
|
q16534
|
combine_mv_and_lv
|
train
|
def combine_mv_and_lv(mv, lv):
"""Combine MV and LV grid topology in PyPSA format
"""
combined = {
c: pd.concat([mv[c], lv[c]], axis=0) for c in list(lv.keys())
}
combined['Transformer'] = mv['Transformer']
return combined
|
python
|
{
"resource": ""
}
|
q16535
|
add_aggregated_lv_components
|
train
|
def add_aggregated_lv_components(network, components):
"""
Aggregates LV load and generation at LV stations
Use this function if you aim for MV calculation only. The according
DataFrames of `components` are extended by load and generators representing
these aggregated respecting the technology type.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
components : dict of :pandas:`pandas.DataFrame<dataframe>`
PyPSA components in tabular format
Returns
-------
:obj:`dict` of :pandas:`pandas.DataFrame<dataframe>`
The dictionary components passed to the function is returned altered.
"""
generators = {}
loads = {}
# collect aggregated generation capacity by type and subtype
# collect aggregated load grouped by sector
for lv_grid in network.mv_grid.lv_grids:
generators.setdefault(lv_grid, {})
for gen in lv_grid.generators:
generators[lv_grid].setdefault(gen.type, {})
generators[lv_grid][gen.type].setdefault(gen.subtype, {})
generators[lv_grid][gen.type][gen.subtype].setdefault(
'capacity', 0)
generators[lv_grid][gen.type][gen.subtype][
'capacity'] += gen.nominal_capacity
generators[lv_grid][gen.type][gen.subtype].setdefault(
'name',
'_'.join([gen.type,
gen.subtype,
'aggregated',
'LV_grid',
str(lv_grid.id)]))
loads.setdefault(lv_grid, {})
for lo in lv_grid.graph.nodes_by_attribute('load'):
for sector, val in lo.consumption.items():
loads[lv_grid].setdefault(sector, 0)
loads[lv_grid][sector] += val
# define dict for DataFrame creation of aggr. generation and load
generator = {'name': [],
'bus': [],
'control': [],
'p_nom': [],
'type': []}
load = {'name': [], 'bus': []}
# fill generators dictionary for DataFrame creation
for lv_grid_obj, lv_grid in generators.items():
for _, gen_type in lv_grid.items():
for _, gen_subtype in gen_type.items():
generator['name'].append(gen_subtype['name'])
generator['bus'].append(
'_'.join(['Bus', lv_grid_obj.station.__repr__('lv')]))
generator['control'].append('PQ')
generator['p_nom'].append(gen_subtype['capacity'])
generator['type'].append("")
# fill loads dictionary for DataFrame creation
for lv_grid_obj, lv_grid in loads.items():
for sector, val in lv_grid.items():
load['name'].append('_'.join(['Load', sector, repr(lv_grid_obj)]))
load['bus'].append(
'_'.join(['Bus', lv_grid_obj.station.__repr__('lv')]))
components['Generator'] = pd.concat(
[components['Generator'], pd.DataFrame(generator).set_index('name')])
components['Load'] = pd.concat(
[components['Load'], pd.DataFrame(load).set_index('name')])
return components
|
python
|
{
"resource": ""
}
|
q16536
|
_pypsa_bus_timeseries
|
train
|
def _pypsa_bus_timeseries(network, buses, timesteps):
"""
Time series in PyPSA compatible format for bus instances
Set all buses except for the slack bus to voltage of 1 pu (it is assumed
this setting is entirely ignored during solving the power flow problem).
This slack bus is set to an operational voltage which is typically greater
than nominal voltage plus a control deviation.
The control deviation is always added positively to the operational voltage.
For example, the operational voltage (offset) is set to 1.025 pu plus the
control deviation of 0.015 pu. This adds up to a set voltage of the slack
bus of 1.04 pu.
.. warning::
Voltage settings for the slack bus defined by this function assume the
feedin case (reverse power flow case) as the worst-case for the power
system. Thus, the set point for the slack is always greater 1.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
timesteps : array_like
Timesteps is an array-like object with entries of type
:pandas:`pandas.Timestamp<timestamp>` specifying which time steps
to export to pypsa representation and use in power flow analysis.
buses : list
Buses names
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Time series table in PyPSA format
"""
# get slack bus label
slack_bus = '_'.join(
['Bus', network.mv_grid.station.__repr__(side='mv')])
# set all buses (except slack bus) to nominal voltage
v_set_dict = {bus: 1 for bus in buses if bus != slack_bus}
# Set slack bus to operational voltage (includes offset and control
# deviation
control_deviation = network.config[
'grid_expansion_allowed_voltage_deviations'][
'hv_mv_trafo_control_deviation']
if control_deviation != 0:
control_deviation_ts = \
network.timeseries.timesteps_load_feedin_case.case.apply(
lambda _: control_deviation if _ == 'feedin_case'
else -control_deviation)
else:
control_deviation_ts = 0
slack_voltage_pu = control_deviation_ts + 1 + \
network.config[
'grid_expansion_allowed_voltage_deviations'][
'hv_mv_trafo_offset']
v_set_dict.update({slack_bus: slack_voltage_pu})
# Convert to PyPSA compatible dataframe
v_set_df = pd.DataFrame(v_set_dict, index=timesteps)
return v_set_df
|
python
|
{
"resource": ""
}
|
q16537
|
_pypsa_generator_timeseries_aggregated_at_lv_station
|
train
|
def _pypsa_generator_timeseries_aggregated_at_lv_station(network, timesteps):
"""
Aggregates generator time series per generator subtype and LV grid.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
timesteps : array_like
Timesteps is an array-like object with entries of type
:pandas:`pandas.Timestamp<timestamp>` specifying which time steps
to export to pypsa representation and use in power flow analysis.
Returns
-------
tuple of :pandas:`pandas.DataFrame<dataframe>`
Tuple of size two containing DataFrames that represent
1. 'p_set' of aggregated Generation per subtype at each LV station
2. 'q_set' of aggregated Generation per subtype at each LV station
"""
generation_p = []
generation_q = []
for lv_grid in network.mv_grid.lv_grids:
# Determine aggregated generation at LV stations
generation = {}
for gen in lv_grid.generators:
# for type in gen.type:
# for subtype in gen.subtype:
gen_name = '_'.join([gen.type,
gen.subtype,
'aggregated',
'LV_grid',
str(lv_grid.id)])
generation.setdefault(gen.type, {})
generation[gen.type].setdefault(gen.subtype, {})
generation[gen.type][gen.subtype].setdefault('timeseries_p', [])
generation[gen.type][gen.subtype].setdefault('timeseries_q', [])
generation[gen.type][gen.subtype]['timeseries_p'].append(
gen.pypsa_timeseries('p').rename(gen_name).to_frame().loc[
timesteps])
generation[gen.type][gen.subtype]['timeseries_q'].append(
gen.pypsa_timeseries('q').rename(gen_name).to_frame().loc[
timesteps])
for k_type, v_type in generation.items():
for k_type, v_subtype in v_type.items():
col_name = v_subtype['timeseries_p'][0].columns[0]
generation_p.append(
pd.concat(v_subtype['timeseries_p'],
axis=1).sum(axis=1).rename(col_name).to_frame())
generation_q.append(
pd.concat(v_subtype['timeseries_q'], axis=1).sum(
axis=1).rename(col_name).to_frame())
return generation_p, generation_q
|
python
|
{
"resource": ""
}
|
q16538
|
_pypsa_load_timeseries_aggregated_at_lv_station
|
train
|
def _pypsa_load_timeseries_aggregated_at_lv_station(network, timesteps):
"""
Aggregates load time series per sector and LV grid.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
timesteps : array_like
Timesteps is an array-like object with entries of type
:pandas:`pandas.Timestamp<timestamp>` specifying which time steps
to export to pypsa representation and use in power flow analysis.
Returns
-------
tuple of :pandas:`pandas.DataFrame<dataframe>`
Tuple of size two containing DataFrames that represent
1. 'p_set' of aggregated Load per sector at each LV station
2. 'q_set' of aggregated Load per sector at each LV station
"""
# ToDo: Load.pypsa_timeseries is not differentiated by sector so this
# function will not work (either change here and in
# add_aggregated_lv_components or in Load class)
load_p = []
load_q = []
for lv_grid in network.mv_grid.lv_grids:
# Determine aggregated load at LV stations
load = {}
for lo in lv_grid.graph.nodes_by_attribute('load'):
for sector, val in lo.consumption.items():
load.setdefault(sector, {})
load[sector].setdefault('timeseries_p', [])
load[sector].setdefault('timeseries_q', [])
load[sector]['timeseries_p'].append(
lo.pypsa_timeseries('p').rename(repr(lo)).to_frame().loc[
timesteps])
load[sector]['timeseries_q'].append(
lo.pypsa_timeseries('q').rename(repr(lo)).to_frame().loc[
timesteps])
for sector, val in load.items():
load_p.append(
pd.concat(val['timeseries_p'], axis=1).sum(axis=1).rename(
'_'.join(['Load', sector, repr(lv_grid)])).to_frame())
load_q.append(
pd.concat(val['timeseries_q'], axis=1).sum(axis=1).rename(
'_'.join(['Load', sector, repr(lv_grid)])).to_frame())
return load_p, load_q
|
python
|
{
"resource": ""
}
|
q16539
|
update_pypsa_timeseries
|
train
|
def update_pypsa_timeseries(network, loads_to_update=None,
generators_to_update=None, storages_to_update=None,
timesteps=None):
"""
Updates load, generator, storage and bus time series in pypsa network.
See functions :func:`update_pypsa_load_timeseries`,
:func:`update_pypsa_generator_timeseries`,
:func:`update_pypsa_storage_timeseries`, and
:func:`update_pypsa_bus_timeseries` for more information.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
loads_to_update : :obj:`list`, optional
List with all loads (of type :class:`~.grid.components.Load`) that need
to be updated. If None all loads are updated depending on mode. See
:meth:`~.tools.pypsa_io.to_pypsa` for more information.
generators_to_update : :obj:`list`, optional
List with all generators (of type :class:`~.grid.components.Generator`)
that need to be updated. If None all generators are updated depending
on mode. See :meth:`~.tools.pypsa_io.to_pypsa` for more information.
storages_to_update : :obj:`list`, optional
List with all storages (of type :class:`~.grid.components.Storage`)
that need to be updated. If None all storages are updated depending on
mode. See :meth:`~.tools.pypsa_io.to_pypsa` for more information.
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`
Timesteps specifies which time steps of the load time series to export
to pypsa representation and use in power flow analysis.
If None all time steps currently existing in pypsa representation are
updated. If not None current time steps are overwritten by given
time steps. Default: None.
"""
update_pypsa_load_timeseries(
network, loads_to_update=loads_to_update, timesteps=timesteps)
update_pypsa_generator_timeseries(
network, generators_to_update=generators_to_update,
timesteps=timesteps)
update_pypsa_storage_timeseries(
network, storages_to_update=storages_to_update, timesteps=timesteps)
update_pypsa_bus_timeseries(network, timesteps=timesteps)
# update pypsa snapshots
if timesteps is None:
timesteps = network.pypsa.buses_t.v_mag_pu_set.index
network.pypsa.set_snapshots(timesteps)
|
python
|
{
"resource": ""
}
|
q16540
|
update_pypsa_load_timeseries
|
train
|
def update_pypsa_load_timeseries(network, loads_to_update=None,
timesteps=None):
"""
Updates load time series in pypsa representation.
This function overwrites p_set and q_set of loads_t attribute of
pypsa network.
Be aware that if you call this function with `timesteps` and thus overwrite
current time steps it may lead to inconsistencies in the pypsa network
since only load time series are updated but none of the other time series
or the snapshots attribute of the pypsa network. Use the function
:func:`update_pypsa_timeseries` to change the time steps you want to
analyse in the power flow analysis.
This function will also raise an error when a load that is currently not
in the pypsa representation is added.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
loads_to_update : :obj:`list`, optional
List with all loads (of type :class:`~.grid.components.Load`) that need
to be updated. If None all loads are updated depending on mode. See
:meth:`~.tools.pypsa_io.to_pypsa` for more information.
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`
Timesteps specifies which time steps of the load time series to export
to pypsa representation. If None all time steps currently existing in
pypsa representation are updated. If not None current time steps are
overwritten by given time steps. Default: None.
"""
_update_pypsa_timeseries_by_type(
network, type='load', components_to_update=loads_to_update,
timesteps=timesteps)
|
python
|
{
"resource": ""
}
|
q16541
|
update_pypsa_generator_timeseries
|
train
|
def update_pypsa_generator_timeseries(network, generators_to_update=None,
timesteps=None):
"""
Updates generator time series in pypsa representation.
This function overwrites p_set and q_set of generators_t attribute of
pypsa network.
Be aware that if you call this function with `timesteps` and thus overwrite
current time steps it may lead to inconsistencies in the pypsa network
since only generator time series are updated but none of the other time
series or the snapshots attribute of the pypsa network. Use the function
:func:`update_pypsa_timeseries` to change the time steps you want to
analyse in the power flow analysis.
This function will also raise an error when a generator that is currently
not in the pypsa representation is added.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
generators_to_update : :obj:`list`, optional
List with all generators (of type :class:`~.grid.components.Generator`)
that need to be updated. If None all generators are updated depending
on mode. See :meth:`~.tools.pypsa_io.to_pypsa` for more information.
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`
Timesteps specifies which time steps of the generator time series to
export to pypsa representation. If None all time steps currently
existing in pypsa representation are updated. If not None current time
steps are overwritten by given time steps. Default: None.
"""
_update_pypsa_timeseries_by_type(
network, type='generator', components_to_update=generators_to_update,
timesteps=timesteps)
|
python
|
{
"resource": ""
}
|
q16542
|
update_pypsa_storage_timeseries
|
train
|
def update_pypsa_storage_timeseries(network, storages_to_update=None,
timesteps=None):
"""
Updates storage time series in pypsa representation.
This function overwrites p_set and q_set of storage_unit_t attribute of
pypsa network.
Be aware that if you call this function with `timesteps` and thus overwrite
current time steps it may lead to inconsistencies in the pypsa network
since only storage time series are updated but none of the other time
series or the snapshots attribute of the pypsa network. Use the function
:func:`update_pypsa_timeseries` to change the time steps you want to
analyse in the power flow analysis.
This function will also raise an error when a storage that is currently
not in the pypsa representation is added.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
storages_to_update : :obj:`list`, optional
List with all storages (of type :class:`~.grid.components.Storage`)
that need to be updated. If None all storages are updated depending on
mode. See :meth:`~.tools.pypsa_io.to_pypsa` for more information.
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`
Timesteps specifies which time steps of the storage time series to
export to pypsa representation. If None all time steps currently
existing in pypsa representation are updated. If not None current time
steps are overwritten by given time steps. Default: None.
"""
_update_pypsa_timeseries_by_type(
network, type='storage', components_to_update=storages_to_update,
timesteps=timesteps)
|
python
|
{
"resource": ""
}
|
q16543
|
update_pypsa_bus_timeseries
|
train
|
def update_pypsa_bus_timeseries(network, timesteps=None):
"""
Updates buses voltage time series in pypsa representation.
This function overwrites v_mag_pu_set of buses_t attribute of
pypsa network.
Be aware that if you call this function with `timesteps` and thus overwrite
current time steps it may lead to inconsistencies in the pypsa network
since only bus time series are updated but none of the other time
series or the snapshots attribute of the pypsa network. Use the function
:func:`update_pypsa_timeseries` to change the time steps you want to
analyse in the power flow analysis.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`
Timesteps specifies which time steps of the time series to
export to pypsa representation. If None all time steps currently
existing in pypsa representation are updated. If not None current
time steps are overwritten by given time steps. Default: None.
"""
if timesteps is None:
timesteps = network.pypsa.buses_t.v_mag_pu_set.index
# check if timesteps is array-like, otherwise convert to list
if not hasattr(timesteps, "__len__"):
timesteps = [timesteps]
buses = network.pypsa.buses.index
v_mag_pu_set = _pypsa_bus_timeseries(network, buses, timesteps)
network.pypsa.buses_t.v_mag_pu_set = v_mag_pu_set
|
python
|
{
"resource": ""
}
|
q16544
|
_update_pypsa_timeseries_by_type
|
train
|
def _update_pypsa_timeseries_by_type(network, type, components_to_update=None,
timesteps=None):
"""
Updates time series of specified component in pypsa representation.
Be aware that if you call this function with `timesteps` and thus overwrite
current time steps it may lead to inconsistencies in the pypsa network
since only time series of the specified component are updated but none of
the other time series or the snapshots attribute of the pypsa network.
Use the function :func:`update_pypsa_timeseries` to change the time steps
you want to analyse in the power flow analysis.
This function will raise an error when a component that is currently not in
the pypsa representation is added.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
type : :obj:`str`
Type specifies the type of component (load, generator or storage)
that is updated.
components_to_update : :obj:`list`, optional
List with all components (either of type
:class:`~.grid.components.Load`, :class:`~.grid.components.Generator`
or :class:`~.grid.components.Storage`) that need to be updated.
Possible options are 'load', 'generator' and 'storage'.
Components in list must all be of the same type. If None all components
specified by `type` are updated depending on the mode. See
:meth:`~.tools.pypsa_io.to_pypsa` for more information on mode.
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`
Timesteps specifies which time steps of the time series to
export to pypsa representation. If None all time steps currently
existing in pypsa representation are updated. If not None current
time steps are overwritten by given time steps. Default: None.
"""
# pypsa dataframe to update
if type == 'load':
pypsa_ts = network.pypsa.loads_t
components_in_pypsa = network.pypsa.loads.index
elif type == 'generator':
pypsa_ts = network.pypsa.generators_t
components_in_pypsa = network.pypsa.generators.index
elif type == 'storage':
pypsa_ts = network.pypsa.storage_units_t
components_in_pypsa = network.pypsa.storage_units.index
else:
raise ValueError('{} is not a valid type.'.format(type))
# MV and LV loads
if network.pypsa.edisgo_mode is None:
# if no components are specified get all components of specified type
# in whole grid
if components_to_update is None:
grids = [network.mv_grid] + list(network.mv_grid.lv_grids)
if type == 'generator':
components_to_update = list(itertools.chain(
*[grid.generators for grid in grids]))
else:
components_to_update = list(itertools.chain(
*[grid.graph.nodes_by_attribute(type) for grid in grids]))
# if no time steps are specified update all time steps currently
# contained in pypsa representation
if timesteps is None:
timesteps = pypsa_ts.p_set.index
# check if timesteps is array-like, otherwise convert to list
# (necessary to avoid getting a scalar using .loc)
if not hasattr(timesteps, "__len__"):
timesteps = [timesteps]
p_set = pd.DataFrame()
q_set = pd.DataFrame()
for comp in components_to_update:
if repr(comp) in components_in_pypsa:
p_set[repr(comp)] = comp.pypsa_timeseries('p').loc[timesteps]
q_set[repr(comp)] = comp.pypsa_timeseries('q').loc[timesteps]
else:
raise KeyError("Tried to update component {} but could not "
"find it in pypsa network.".format(comp))
# overwrite pypsa time series
pypsa_ts.p_set = p_set
pypsa_ts.q_set = q_set
# MV and aggregated LV loads
elif network.pypsa.edisgo_mode is 'mv':
raise NotImplementedError
# LV only
elif network.pypsa.edisgo_mode is 'lv':
raise NotImplementedError
|
python
|
{
"resource": ""
}
|
q16545
|
fifty_fifty
|
train
|
def fifty_fifty(network, storage, feedin_threshold=0.5):
"""
Operational mode where the storage operation depends on actual power by
generators. If cumulative generation exceeds 50% of nominal power, the
storage is charged. Otherwise, the storage is discharged.
The time series for active power is written into the storage.
Parameters
-----------
network : :class:`~.grid.network.Network`
storage : :class:`~.grid.components.Storage`
Storage instance for which to generate time series.
feedin_threshold : :obj:`float`
Ratio of generation to installed power specifying when to charge or
discharge the storage. If feed-in threshold is e.g. 0.5 the storage
will be charged when the total generation is 50% of the installed
generator capacity and discharged when it is below.
"""
# determine generators cumulative apparent power output
generators = network.mv_grid.generators + \
[generators for lv_grid in
network.mv_grid.lv_grids for generators in
lv_grid.generators]
generators_p = pd.concat([_.timeseries['p'] for _ in generators],
axis=1).sum(axis=1).rename('p')
generators_q = pd.concat([_.timeseries['q'] for _ in generators],
axis=1).sum(axis=1).rename('q')
generation = pd.concat([generators_p, generators_q], axis=1)
generation['s'] = generation.apply(
lambda x: sqrt(x['p'] ** 2 + x['q'] ** 2), axis=1)
generators_nom_capacity = sum([_.nominal_capacity for _ in generators])
feedin_bool = generation['s'] > (feedin_threshold *
generators_nom_capacity)
feedin = feedin_bool.apply(
lambda x: storage.nominal_power if x
else -storage.nominal_power).rename('p').to_frame()
storage.timeseries = feedin
|
python
|
{
"resource": ""
}
|
q16546
|
connect_mv_generators
|
train
|
def connect_mv_generators(network):
"""Connect MV generators to existing grids.
This function searches for unconnected generators in MV grids and connects them.
It connects
* generators of voltage level 4
* to HV-MV station
* generators of voltage level 5
* with a nom. capacity of <=30 kW to LV loads of type residential
* with a nom. capacity of >30 kW and <=100 kW to LV loads of type
retail, industrial or agricultural
* to the MV-LV station if no appropriate load is available (fallback)
Parameters
----------
network : :class:`~.grid.network.Network`
The eDisGo container object
Notes
-----
Adapted from `Ding0 <https://github.com/openego/ding0/blob/\
21a52048f84ec341fe54e0204ac62228a9e8a32a/\
ding0/grid/mv_grid/mv_connect.py#L820>`_.
"""
# get params from config
buffer_radius = int(network.config[
'grid_connection']['conn_buffer_radius'])
buffer_radius_inc = int(network.config[
'grid_connection']['conn_buffer_radius_inc'])
# get standard equipment
std_line_type = network.equipment_data['mv_cables'].loc[
network.config['grid_expansion_standard_equipment']['mv_line']]
for geno in sorted(network.mv_grid.graph.nodes_by_attribute('generator'),
key=lambda _: repr(_)):
if nx.is_isolate(network.mv_grid.graph, geno):
# ===== voltage level 4: generator has to be connected to MV station =====
if geno.v_level == 4:
line_length = calc_geo_dist_vincenty(network=network,
node_source=geno,
node_target=network.mv_grid.station)
line = Line(id=random.randint(10**8, 10**9),
type=std_line_type,
kind='cable',
quantity=1,
length=line_length / 1e3,
grid=network.mv_grid)
network.mv_grid.graph.add_edge(network.mv_grid.station,
geno,
line=line,
type='line')
# add line to equipment changes to track costs
_add_cable_to_equipment_changes(network=network,
line=line)
# ===== voltage level 5: generator has to be connected to MV grid (next-neighbor) =====
elif geno.v_level == 5:
# get branches within a the predefined radius `generator_buffer_radius`
branches = calc_geo_lines_in_buffer(network=network,
node=geno,
grid=network.mv_grid,
radius=buffer_radius,
radius_inc=buffer_radius_inc)
# calc distance between generator and grid's lines -> find nearest line
conn_objects_min_stack = _find_nearest_conn_objects(network=network,
node=geno,
branches=branches)
# connect!
# go through the stack (from nearest to most far connection target object)
generator_connected = False
for dist_min_obj in conn_objects_min_stack:
target_obj_result = _connect_mv_node(network=network,
node=geno,
target_obj=dist_min_obj)
if target_obj_result is not None:
generator_connected = True
break
if not generator_connected:
logger.debug(
'Generator {0} could not be connected, try to '
'increase the parameter `conn_buffer_radius` in '
'config file `config_grid.cfg` to gain more possible '
'connection points.'.format(geno))
|
python
|
{
"resource": ""
}
|
q16547
|
_add_cable_to_equipment_changes
|
train
|
def _add_cable_to_equipment_changes(network, line):
"""Add cable to the equipment changes
All changes of equipment are stored in network.results.equipment_changes
which is used later to determine grid expansion costs.
Parameters
----------
network : :class:`~.grid.network.Network`
The eDisGo container object
line : class:`~.grid.components.Line`
Line instance which is to be added
"""
network.results.equipment_changes = \
network.results.equipment_changes.append(
pd.DataFrame(
{'iteration_step': [0],
'change': ['added'],
'equipment': [line.type.name],
'quantity': [1]
},
index=[line]
)
)
|
python
|
{
"resource": ""
}
|
q16548
|
_del_cable_from_equipment_changes
|
train
|
def _del_cable_from_equipment_changes(network, line):
"""Delete cable from the equipment changes if existing
This is needed if a cable was already added to network.results.equipment_changes
but another node is connected later to this cable. Therefore, the cable needs to
be split which changes the id (one cable id -> 2 new cable ids).
Parameters
----------
network : :class:`~.grid.network.Network`
The eDisGo container object
line : class:`~.grid.components.Line`
Line instance which is to be deleted
"""
if line in network.results.equipment_changes.index:
network.results.equipment_changes = \
network.results.equipment_changes.drop(line)
|
python
|
{
"resource": ""
}
|
q16549
|
_find_nearest_conn_objects
|
train
|
def _find_nearest_conn_objects(network, node, branches):
"""Searches all branches for the nearest possible connection object per branch
It picks out 1 object out of 3 possible objects: 2 branch-adjacent stations
and 1 potentially created branch tee on the line (using perpendicular projection).
The resulting stack (list) is sorted ascending by distance from node.
Parameters
----------
network : :class:`~.grid.network.Network`
The eDisGo container object
node : :class:`~.grid.components.Component`
Node to connect (e.g. :class:`~.grid.components.Generator`)
branches :
List of branches (NetworkX branch objects)
Returns
-------
:obj:`list` of :obj:`dict`
List of connection objects (each object is represented by dict with eDisGo object,
shapely object and distance to node.
Notes
-----
Adapted from `Ding0 <https://github.com/openego/ding0/blob/\
21a52048f84ec341fe54e0204ac62228a9e8a32a/\
ding0/grid/mv_grid/mv_connect.py#L38>`_.
"""
# threshold which is used to determine if 2 objects are on the same position (see below for details on usage)
conn_diff_tolerance = network.config['grid_connection'][
'conn_diff_tolerance']
conn_objects_min_stack = []
node_shp = transform(proj2equidistant(network), node.geom)
for branch in branches:
stations = branch['adj_nodes']
# create shapely objects for 2 stations and line between them, transform to equidistant CRS
station1_shp = transform(proj2equidistant(network), stations[0].geom)
station2_shp = transform(proj2equidistant(network), stations[1].geom)
line_shp = LineString([station1_shp, station2_shp])
# create dict with DING0 objects (line & 2 adjacent stations), shapely objects and distances
conn_objects = {'s1': {'obj': stations[0],
'shp': station1_shp,
'dist': node_shp.distance(station1_shp) * 0.999},
's2': {'obj': stations[1],
'shp': station2_shp,
'dist': node_shp.distance(station2_shp) * 0.999},
'b': {'obj': branch,
'shp': line_shp,
'dist': node_shp.distance(line_shp)}}
# Remove branch from the dict of possible conn. objects if it is too close to a node.
# Without this solution, the target object is not unique for different runs (and so
# were the topology)
if (
abs(conn_objects['s1']['dist'] - conn_objects['b']['dist']) < conn_diff_tolerance
or abs(conn_objects['s2']['dist'] - conn_objects['b']['dist']) < conn_diff_tolerance
):
del conn_objects['b']
# remove MV station as possible connection point
if isinstance(conn_objects['s1']['obj'], MVStation):
del conn_objects['s1']
elif isinstance(conn_objects['s2']['obj'], MVStation):
del conn_objects['s2']
# find nearest connection point on given triple dict (2 branch-adjacent stations + cable dist. on line)
conn_objects_min = min(conn_objects.values(), key=lambda v: v['dist'])
conn_objects_min_stack.append(conn_objects_min)
# sort all objects by distance from node
conn_objects_min_stack = [_ for _ in sorted(conn_objects_min_stack, key=lambda x: x['dist'])]
return conn_objects_min_stack
|
python
|
{
"resource": ""
}
|
q16550
|
_get_griddistrict
|
train
|
def _get_griddistrict(ding0_filepath):
"""
Just get the grid district number from ding0 data file path
Parameters
----------
ding0_filepath : str
Path to ding0 data ending typically
`/path/to/ding0_data/"ding0_grids__" + str(``grid_district``) + ".xxx"`
Returns
-------
int
grid_district number
"""
grid_district = os.path.basename(ding0_filepath)
grid_district_search = re.search('[_]+\d+', grid_district)
if grid_district_search:
grid_district = int(grid_district_search.group(0)[2:])
return grid_district
else:
raise (KeyError('Grid District not found in '.format(grid_district)))
|
python
|
{
"resource": ""
}
|
q16551
|
run_edisgo_basic
|
train
|
def run_edisgo_basic(ding0_filepath,
generator_scenario=None,
analysis='worst-case',
*edisgo_grid):
"""
Analyze edisgo grid extension cost as reference scenario
Parameters
----------
ding0_filepath : str
Path to ding0 data ending typically
`/path/to/ding0_data/"ding0_grids__" + str(``grid_district``) + ".xxx"`
analysis : str
Either 'worst-case' or 'timeseries'
generator_scenario : None or :obj:`str`
If provided defines which scenario of future generator park to use
and invokes import of these generators. Possible options are 'nep2035'
and 'ego100'.
Returns
-------
edisgo_grid : :class:`~.grid.network.EDisGo`
eDisGo network container
costs : :pandas:`pandas.Dataframe<dataframe>`
Cost of grid extension
grid_issues : dict
Grids resulting in an error including error message
"""
grid_district = _get_griddistrict(ding0_filepath)
grid_issues = {}
logging.info('Grid expansion for MV grid district {}'.format(grid_district))
if edisgo_grid: # if an edisgo_grid is passed in arg then ignore everything else
edisgo_grid = edisgo_grid[0]
else:
try:
if 'worst-case' in analysis:
edisgo_grid = EDisGo(ding0_grid=ding0_filepath,
worst_case_analysis=analysis)
elif 'timeseries' in analysis:
edisgo_grid = EDisGo(ding0_grid=ding0_filepath,
timeseries_generation_fluctuating='oedb',
timeseries_load='demandlib')
except FileNotFoundError as e:
return None, pd.DataFrame(), {'grid': grid_district, 'msg': str(e)}
# Import generators
if generator_scenario:
logging.info('Grid expansion for scenario \'{}\'.'.format(generator_scenario))
edisgo_grid.import_generators(generator_scenario=generator_scenario)
else:
logging.info('Grid expansion with no generator imports based on scenario')
try:
# Do grid reinforcement
edisgo_grid.reinforce()
# Get costs
costs_grouped = \
edisgo_grid.network.results.grid_expansion_costs.groupby(
['type']).sum()
costs = pd.DataFrame(costs_grouped.values,
columns=costs_grouped.columns,
index=[[edisgo_grid.network.id] * len(costs_grouped),
costs_grouped.index]).reset_index()
costs.rename(columns={'level_0': 'grid'}, inplace=True)
grid_issues['grid'] = None
grid_issues['msg'] = None
logging.info('SUCCESS!')
except MaximumIterationError:
grid_issues['grid'] = edisgo_grid.network.id
grid_issues['msg'] = str(edisgo_grid.network.results.unresolved_issues)
costs = pd.DataFrame()
logging.warning('Unresolved issues left after grid expansion.')
except Exception as e:
grid_issues['grid'] = edisgo_grid.network.id
grid_issues['msg'] = repr(e)
costs = pd.DataFrame()
logging.exception()
return edisgo_grid, costs, grid_issues
|
python
|
{
"resource": ""
}
|
q16552
|
_attach_aggregated
|
train
|
def _attach_aggregated(network, grid, aggregated, ding0_grid):
"""Add Generators and Loads to MV station representing aggregated generation
capacity and load
Parameters
----------
grid: MVGrid
MV grid object
aggregated: dict
Information about aggregated load and generation capacity. For
information about the structure of the dict see ... .
ding0_grid: ding0.Network
Ding0 network container
Returns
-------
MVGrid
Altered instance of MV grid including aggregated load and generation
"""
aggr_line_type = ding0_grid.network._static_data['MV_cables'].iloc[
ding0_grid.network._static_data['MV_cables']['I_max_th'].idxmax()]
for la_id, la in aggregated.items():
# add aggregated generators
for v_level, val in la['generation'].items():
for type, val2 in val.items():
for subtype, val3 in val2.items():
if type in ['solar', 'wind']:
gen = GeneratorFluctuating(
id='agg-' + str(la_id) + '-' + '_'.join(
[str(_) for _ in val3['ids']]),
nominal_capacity=val3['capacity'],
weather_cell_id=val3['weather_cell_id'],
type=type,
subtype=subtype,
geom=grid.station.geom,
grid=grid,
v_level=4)
else:
gen = Generator(
id='agg-' + str(la_id) + '-' + '_'.join(
[str(_) for _ in val3['ids']]),
nominal_capacity=val3['capacity'],
type=type,
subtype=subtype,
geom=grid.station.geom,
grid=grid,
v_level=4)
grid.graph.add_node(gen, type='generator_aggr')
# backup reference of geno to LV geno list (save geno
# where the former LV genos are aggregated in)
network.dingo_import_data.set_value(network.dingo_import_data['id'].isin(val3['ids']),
'agg_geno',
gen)
# connect generator to MV station
line = Line(id='line_aggr_generator_la_' + str(la_id) + '_vlevel_{v_level}_'
'{subtype}'.format(
v_level=v_level,
subtype=subtype),
type=aggr_line_type,
kind='cable',
length=1e-3,
grid=grid)
grid.graph.add_edge(grid.station,
gen,
line=line,
type='line_aggr')
for sector, sectoral_load in la['load'].items():
load = Load(
geom=grid.station.geom,
consumption={sector: sectoral_load},
grid=grid,
id='_'.join(['Load_aggregated', sector, repr(grid), str(la_id)]))
grid.graph.add_node(load, type='load')
# connect aggregated load to MV station
line = Line(id='_'.join(['line_aggr_load_la_' + str(la_id), sector, str(la_id)]),
type=aggr_line_type,
kind='cable',
length=1e-3,
grid=grid)
grid.graph.add_edge(grid.station,
load,
line=line,
type='line_aggr')
|
python
|
{
"resource": ""
}
|
q16553
|
_validate_ding0_grid_import
|
train
|
def _validate_ding0_grid_import(mv_grid, ding0_mv_grid, lv_grid_mapping):
"""Cross-check imported data with original data source
Parameters
----------
mv_grid: MVGrid
eDisGo MV grid instance
ding0_mv_grid: MVGridDing0
Ding0 MV grid instance
lv_grid_mapping: dict
Translates Ding0 LV grids to associated, newly created eDisGo LV grids
"""
# Check number of components in MV grid
_validate_ding0_mv_grid_import(mv_grid, ding0_mv_grid)
# Check number of components in LV grid
_validate_ding0_lv_grid_import(mv_grid.lv_grids, ding0_mv_grid,
lv_grid_mapping)
# Check cumulative load and generation in MV grid district
_validate_load_generation(mv_grid, ding0_mv_grid)
|
python
|
{
"resource": ""
}
|
q16554
|
import_generators
|
train
|
def import_generators(network, data_source=None, file=None):
"""Import generator data from source.
The generator data include
* nom. capacity
* type ToDo: specify!
* timeseries
Additional data which can be processed (e.g. used in OEDB data) are
* location
* type
* subtype
* capacity
Parameters
----------
network: :class:`~.grid.network.Network`
The eDisGo container object
data_source: :obj:`str`
Data source. Supported sources:
* 'oedb'
file: :obj:`str`
File to import data from, required when using file-based sources.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
List of generators
"""
if data_source == 'oedb':
logging.warning('Right now only solar and wind generators can be '
'imported from the oedb.')
_import_genos_from_oedb(network=network)
network.mv_grid._weather_cells = None
if network.pypsa is not None:
pypsa_io.update_pypsa_generator_import(network)
elif data_source == 'pypsa':
_import_genos_from_pypsa(network=network, file=file)
else:
logger.error("Invalid option {} for generator import. Must either be "
"'oedb' or 'pypsa'.".format(data_source))
raise ValueError('The option you specified is not supported.')
|
python
|
{
"resource": ""
}
|
q16555
|
_build_generator_list
|
train
|
def _build_generator_list(network):
"""Builds DataFrames with all generators in MV and LV grids
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
A DataFrame with id of and reference to MV generators
:pandas:`pandas.DataFrame<dataframe>`
A DataFrame with id of and reference to LV generators
:pandas:`pandas.DataFrame<dataframe>`
A DataFrame with id of and reference to aggregated LV generators
"""
genos_mv = pd.DataFrame(columns=
('id', 'obj'))
genos_lv = pd.DataFrame(columns=
('id', 'obj'))
genos_lv_agg = pd.DataFrame(columns=
('la_id', 'id', 'obj'))
# MV genos
for geno in network.mv_grid.graph.nodes_by_attribute('generator'):
genos_mv.loc[len(genos_mv)] = [int(geno.id), geno]
for geno in network.mv_grid.graph.nodes_by_attribute('generator_aggr'):
la_id = int(geno.id.split('-')[1].split('_')[-1])
genos_lv_agg.loc[len(genos_lv_agg)] = [la_id, geno.id, geno]
# LV genos
for lv_grid in network.mv_grid.lv_grids:
for geno in lv_grid.generators:
genos_lv.loc[len(genos_lv)] = [int(geno.id), geno]
return genos_mv, genos_lv, genos_lv_agg
|
python
|
{
"resource": ""
}
|
q16556
|
_build_lv_grid_dict
|
train
|
def _build_lv_grid_dict(network):
"""Creates dict of LV grids
LV grid ids are used as keys, LV grid references as values.
Parameters
----------
network: :class:`~.grid.network.Network`
The eDisGo container object
Returns
-------
:obj:`dict`
Format: {:obj:`int`: :class:`~.grid.grids.LVGrid`}
"""
lv_grid_dict = {}
for lv_grid in network.mv_grid.lv_grids:
lv_grid_dict[lv_grid.id] = lv_grid
return lv_grid_dict
|
python
|
{
"resource": ""
}
|
q16557
|
import_feedin_timeseries
|
train
|
def import_feedin_timeseries(config_data, weather_cell_ids):
"""
Import RES feed-in time series data and process
Parameters
----------
config_data : dict
Dictionary containing config data from config files.
weather_cell_ids : :obj:`list`
List of weather cell id's (integers) to obtain feed-in data for.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Feedin time series
"""
def _retrieve_timeseries_from_oedb(config_data, weather_cell_ids):
"""Retrieve time series from oedb
Parameters
----------
config_data : dict
Dictionary containing config data from config files.
weather_cell_ids : :obj:`list`
List of weather cell id's (integers) to obtain feed-in data for.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Feedin time series
"""
if config_data['data_source']['oedb_data_source'] == 'model_draft':
orm_feedin_name = config_data['model_draft']['res_feedin_data']
orm_feedin = model_draft.__getattribute__(orm_feedin_name)
orm_feedin_version = 1 == 1
else:
orm_feedin_name = config_data['versioned']['res_feedin_data']
orm_feedin = supply.__getattribute__(orm_feedin_name)
orm_feedin_version = orm_feedin.version == config_data['versioned']['version']
conn = connection(section=config_data['db_connection']['section'])
Session = sessionmaker(bind=conn)
session = Session()
# ToDo: add option to retrieve subset of time series
# ToDo: find the reference power class for mvgrid/w_id and insert instead of 4
feedin_sqla = session.query(
orm_feedin.w_id,
orm_feedin.source,
orm_feedin.feedin). \
filter(orm_feedin.w_id.in_(weather_cell_ids)). \
filter(orm_feedin.power_class.in_([0, 4])). \
filter(orm_feedin_version)
feedin = pd.read_sql_query(feedin_sqla.statement,
session.bind,
index_col=['source', 'w_id'])
feedin.sort_index(axis=0, inplace=True)
timeindex = pd.date_range('1/1/2011', periods=8760, freq='H')
recasted_feedin_dict = {}
for type_w_id in feedin.index:
recasted_feedin_dict[type_w_id] = feedin.loc[
type_w_id, :].values[0]
feedin = pd.DataFrame(recasted_feedin_dict, index=timeindex)
# rename 'wind_onshore' and 'wind_offshore' to 'wind'
new_level = [_ if _ not in ['wind_onshore']
else 'wind' for _ in feedin.columns.levels[0]]
feedin.columns.set_levels(new_level, level=0, inplace=True)
feedin.columns.rename('type', level=0, inplace=True)
feedin.columns.rename('weather_cell_id', level=1, inplace=True)
return feedin
feedin = _retrieve_timeseries_from_oedb(config_data, weather_cell_ids)
return feedin
|
python
|
{
"resource": ""
}
|
q16558
|
import_load_timeseries
|
train
|
def import_load_timeseries(config_data, data_source, mv_grid_id=None,
year=None):
"""
Import load time series
Parameters
----------
config_data : dict
Dictionary containing config data from config files.
data_source : str
Specify type of data source. Available data sources are
* 'demandlib'
Determine a load time series with the use of the demandlib.
This calculates standard load profiles for 4 different sectors.
mv_grid_id : :obj:`str`
MV grid ID as used in oedb. Provide this if `data_source` is 'oedb'.
Default: None.
year : int
Year for which to generate load time series. Provide this if
`data_source` is 'demandlib'. Default: None.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Load time series
"""
def _import_load_timeseries_from_oedb(config_data, mv_grid_id):
"""
Retrieve load time series from oedb
Parameters
----------
config_data : dict
Dictionary containing config data from config files.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Load time series
Notes
------
This is currently not a valid option to retrieve load time series
since time series in the oedb are not differentiated by sector. An
issue concerning this has been created.
"""
if config_data['versioned']['version'] == 'model_draft':
orm_load_name = config_data['model_draft']['load_data']
orm_load = model_draft.__getattribute__(orm_load_name)
orm_load_areas_name = config_data['model_draft']['load_areas']
orm_load_areas = model_draft.__getattribute__(orm_load_areas_name)
orm_load_version = 1 == 1
else:
orm_load_name = config_data['versioned']['load_data']
# orm_load = supply.__getattribute__(orm_load_name)
# ToDo: remove workaround
orm_load = model_draft.__getattribute__(orm_load_name)
# orm_load_version = orm_load.version == config.data['versioned']['version']
orm_load_areas_name = config_data['versioned']['load_areas']
# orm_load_areas = supply.__getattribute__(orm_load_areas_name)
# ToDo: remove workaround
orm_load_areas = model_draft.__getattribute__(orm_load_areas_name)
# orm_load_areas_version = orm_load.version == config.data['versioned']['version']
orm_load_version = 1 == 1
conn = connection(section=config_data['db_connection']['section'])
Session = sessionmaker(bind=conn)
session = Session()
load_sqla = session.query( # orm_load.id,
orm_load.p_set,
orm_load.q_set,
orm_load_areas.subst_id). \
join(orm_load_areas, orm_load.id == orm_load_areas.otg_id). \
filter(orm_load_areas.subst_id == mv_grid_id). \
filter(orm_load_version). \
distinct()
load = pd.read_sql_query(load_sqla.statement,
session.bind,
index_col='subst_id')
return load
def _load_timeseries_demandlib(config_data, year):
"""
Get normalized sectoral load time series
Time series are normalized to 1 kWh consumption per year
Parameters
----------
config_data : dict
Dictionary containing config data from config files.
year : int
Year for which to generate load time series.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Load time series
"""
sectoral_consumption = {'h0': 1, 'g0': 1, 'i0': 1, 'l0': 1}
cal = Germany()
holidays = dict(cal.holidays(year))
e_slp = bdew.ElecSlp(year, holidays=holidays)
# multiply given annual demand with timeseries
elec_demand = e_slp.get_profile(sectoral_consumption)
# Add the slp for the industrial group
ilp = profiles.IndustrialLoadProfile(e_slp.date_time_index,
holidays=holidays)
# Beginning and end of workday, weekdays and weekend days, and scaling
# factors by default
elec_demand['i0'] = ilp.simple_profile(
sectoral_consumption['i0'],
am=datetime.time(config_data['demandlib']['day_start'].hour,
config_data['demandlib']['day_start'].minute, 0),
pm=datetime.time(config_data['demandlib']['day_end'].hour,
config_data['demandlib']['day_end'].minute, 0),
profile_factors=
{'week': {'day': config_data['demandlib']['week_day'],
'night': config_data['demandlib']['week_night']},
'weekend': {'day': config_data['demandlib']['weekend_day'],
'night': config_data['demandlib']['weekend_night']}})
# Resample 15-minute values to hourly values and sum across sectors
elec_demand = elec_demand.resample('H').mean()
return elec_demand
if data_source == 'oedb':
load = _import_load_timeseries_from_oedb(config_data, mv_grid_id)
elif data_source == 'demandlib':
load = _load_timeseries_demandlib(config_data, year)
load.rename(columns={'g0': 'retail', 'h0': 'residential',
'l0': 'agricultural', 'i0': 'industrial'},
inplace=True)
return load
|
python
|
{
"resource": ""
}
|
q16559
|
feedin_proportional
|
train
|
def feedin_proportional(feedin, generators, curtailment_timeseries, edisgo,
curtailment_key, **kwargs):
"""
Implements curtailment methodology 'feedin-proportional'.
The curtailment that has to be met in each time step is allocated
equally to all generators depending on their share of total
feed-in in that time step.
Parameters
----------
feedin : :pandas:`pandas.DataFrame<dataframe>`
Dataframe holding the feed-in of each generator in kW for the
technology (and weather cell) specified in `curtailment_key` parameter.
Index of the dataframe is a
:pandas:`pandas.DatetimeIndex<datetimeindex>`. Columns are the
representatives of the fluctuating generators.
generators : :pandas:`pandas.DataFrame<dataframe>`
Dataframe with all generators of the type (and in weather cell)
specified in `curtailment_key` parameter. See return value of
:func:`edisgo.grid.tools.get_gen_info` for more information.
curtailment_timeseries : :pandas:`pandas.Series<series>`
The curtailment in kW to be distributed amongst the generators in
`generators` parameter. Index of the series is a
:pandas:`pandas.DatetimeIndex<datetimeindex>`.
edisgo : :class:`edisgo.grid.network.EDisGo`
curtailment_key::obj:`str` or :obj:`tuple` with :obj:`str`
The technology and weather cell ID if :obj:`tuple` or only
the technology if :obj:`str` the curtailment is specified for.
"""
# calculate curtailment in each time step of each generator
curtailment = feedin.divide(feedin.sum(axis=1), axis=0). \
multiply(curtailment_timeseries, axis=0)
# substitute NaNs from division with 0 by 0
curtailment.fillna(0, inplace=True)
# check if curtailment target was met
_check_curtailment_target(curtailment, curtailment_timeseries,
curtailment_key)
# assign curtailment to individual generators
_assign_curtailment(curtailment, edisgo, generators, curtailment_key)
|
python
|
{
"resource": ""
}
|
q16560
|
_check_curtailment_target
|
train
|
def _check_curtailment_target(curtailment, curtailment_target,
curtailment_key):
"""
Raises an error if curtailment target was not met in any time step.
Parameters
-----------
curtailment : :pandas:`pandas:DataFrame<dataframe>`
Dataframe containing the curtailment in kW per generator and time step.
Index is a :pandas:`pandas.DatetimeIndex<datetimeindex>`, columns are
the generator representatives.
curtailment_target : :pandas:`pandas.Series<series>`
The curtailment in kW that was to be distributed amongst the
generators. Index of the series is a
:pandas:`pandas.DatetimeIndex<datetimeindex>`.
curtailment_key : :obj:`str` or :obj:`tuple` with :obj:`str`
The technology and weather cell ID if :obj:`tuple` or only
the technology if :obj:`str` the curtailment was specified for.
"""
if not (abs(curtailment.sum(axis=1) - curtailment_target) < 1e-1).all():
message = 'Curtailment target not met for {}.'.format(curtailment_key)
logging.error(message)
raise TypeError(message)
|
python
|
{
"resource": ""
}
|
q16561
|
_assign_curtailment
|
train
|
def _assign_curtailment(curtailment, edisgo, generators, curtailment_key):
"""
Helper function to write curtailment time series to generator objects.
This function also writes a list of the curtailed generators to curtailment
in :class:`edisgo.grid.network.TimeSeries` and
:class:`edisgo.grid.network.Results`.
Parameters
----------
curtailment : :pandas:`pandas.DataFrame<dataframe>`
Dataframe containing the curtailment in kW per generator and time step
for all generators of the type (and in weather cell) specified in
`curtailment_key` parameter. Index is a
:pandas:`pandas.DatetimeIndex<datetimeindex>`, columns are the
generator representatives.
edisgo : :class:`edisgo.grid.network.EDisGo`
generators : :pandas:`pandas.DataFrame<dataframe>`
Dataframe with all generators of the type (and in weather cell)
specified in `curtailment_key` parameter. See return value of
:func:`edisgo.grid.tools.get_gen_info` for more information.
curtailment_key : :obj:`str` or :obj:`tuple` with :obj:`str`
The technology and weather cell ID if :obj:`tuple` or only
the technology if :obj:`str` the curtailment is specified for.
"""
gen_object_list = []
for gen in curtailment.columns:
# get generator object from representative
gen_object = generators.loc[generators.gen_repr == gen].index[0]
# assign curtailment to individual generators
gen_object.curtailment = curtailment.loc[:, gen]
gen_object_list.append(gen_object)
# set timeseries.curtailment
if edisgo.network.timeseries._curtailment:
edisgo.network.timeseries._curtailment.extend(gen_object_list)
edisgo.network.results._curtailment[curtailment_key] = \
gen_object_list
else:
edisgo.network.timeseries._curtailment = gen_object_list
# list needs to be copied, otherwise it will be extended every time
# a new key is added to results._curtailment
edisgo.network.results._curtailment = \
{curtailment_key: gen_object_list.copy()}
|
python
|
{
"resource": ""
}
|
q16562
|
add_basemap
|
train
|
def add_basemap(ax, zoom=12):
"""
Adds map to a plot.
"""
url = ctx.sources.ST_TONER_LITE
xmin, xmax, ymin, ymax = ax.axis()
basemap, extent = ctx.bounds2img(xmin, ymin, xmax, ymax,
zoom=zoom, url=url)
ax.imshow(basemap, extent=extent, interpolation='bilinear')
# restore original x/y limits
ax.axis((xmin, xmax, ymin, ymax))
|
python
|
{
"resource": ""
}
|
q16563
|
get_grid_district_polygon
|
train
|
def get_grid_district_polygon(config, subst_id=None, projection=4326):
"""
Get MV grid district polygon from oedb for plotting.
"""
# make DB session
conn = connection(section=config['db_connection']['section'])
Session = sessionmaker(bind=conn)
session = Session()
# get polygon from versioned schema
if config['data_source']['oedb_data_source'] == 'versioned':
version = config['versioned']['version']
query = session.query(EgoDpMvGriddistrict.subst_id,
EgoDpMvGriddistrict.geom)
Regions = [(subst_id, shape.to_shape(geom)) for subst_id, geom in
query.filter(EgoDpMvGriddistrict.version == version,
EgoDpMvGriddistrict.subst_id == subst_id).all()
]
# get polygon from model_draft
else:
query = session.query(EgoGridMvGriddistrict.subst_id,
EgoGridMvGriddistrict.geom)
Regions = [(subst_id, shape.to_shape(geom)) for subst_id, geom in
query.filter(EgoGridMvGriddistrict.subst_id.in_(
subst_id)).all()]
crs = {'init': 'epsg:3035'}
region = gpd.GeoDataFrame(
Regions, columns=['subst_id', 'geometry'], crs=crs)
region = region.to_crs(epsg=projection)
return region
|
python
|
{
"resource": ""
}
|
q16564
|
Load.timeseries
|
train
|
def timeseries(self):
"""
Load time series
It returns the actual time series used in power flow analysis. If
:attr:`_timeseries` is not :obj:`None`, it is returned. Otherwise,
:meth:`timeseries()` looks for time series of the according sector in
:class:`~.grid.network.TimeSeries` object.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
DataFrame containing active power in kW in column 'p' and
reactive power in kVA in column 'q'.
"""
if self._timeseries is None:
if isinstance(self.grid, MVGrid):
voltage_level = 'mv'
elif isinstance(self.grid, LVGrid):
voltage_level = 'lv'
ts_total = None
for sector in self.consumption.keys():
consumption = self.consumption[sector]
# check if load time series for MV and LV are differentiated
try:
ts = self.grid.network.timeseries.load[
sector, voltage_level].to_frame('p')
except KeyError:
try:
ts = self.grid.network.timeseries.load[
sector].to_frame('p')
except KeyError:
logger.exception(
"No timeseries for load of type {} "
"given.".format(sector))
raise
ts = ts * consumption
ts_q = self.timeseries_reactive
if ts_q is not None:
ts['q'] = ts_q.q
else:
ts['q'] = ts['p'] * self.q_sign * tan(
acos(self.power_factor))
if ts_total is None:
ts_total = ts
else:
ts_total.p += ts.p
ts_total.q += ts.q
return ts_total
else:
return self._timeseries
|
python
|
{
"resource": ""
}
|
q16565
|
Load.peak_load
|
train
|
def peak_load(self):
"""
Get sectoral peak load
"""
peak_load = pd.Series(self.consumption).mul(pd.Series(
self.grid.network.config['peakload_consumption_ratio']).astype(
float), fill_value=0)
return peak_load
|
python
|
{
"resource": ""
}
|
q16566
|
Load.power_factor
|
train
|
def power_factor(self):
"""
Power factor of load
Parameters
-----------
power_factor : :obj:`float`
Ratio of real power to apparent power.
Returns
--------
:obj:`float`
Ratio of real power to apparent power. If power factor is not set
it is retrieved from the network config object depending on the
grid level the load is in.
"""
if self._power_factor is None:
if isinstance(self.grid, MVGrid):
self._power_factor = self.grid.network.config[
'reactive_power_factor']['mv_load']
elif isinstance(self.grid, LVGrid):
self._power_factor = self.grid.network.config[
'reactive_power_factor']['lv_load']
return self._power_factor
|
python
|
{
"resource": ""
}
|
q16567
|
Load.reactive_power_mode
|
train
|
def reactive_power_mode(self):
"""
Power factor mode of Load.
This information is necessary to make the load behave in an inductive
or capacitive manner. Essentially this changes the sign of the reactive
power.
The convention used here in a load is that:
- when `reactive_power_mode` is 'inductive' then Q is positive
- when `reactive_power_mode` is 'capacitive' then Q is negative
Parameters
----------
reactive_power_mode : :obj:`str` or None
Possible options are 'inductive', 'capacitive' and
'not_applicable'. In the case of 'not_applicable' a reactive
power time series must be given.
Returns
-------
:obj:`str`
In the case that this attribute is not set, it is retrieved from
the network config object depending on the voltage level the load
is in.
"""
if self._reactive_power_mode is None:
if isinstance(self.grid, MVGrid):
self._reactive_power_mode = self.grid.network.config[
'reactive_power_mode']['mv_load']
elif isinstance(self.grid, LVGrid):
self._reactive_power_mode = self.grid.network.config[
'reactive_power_mode']['lv_load']
return self._reactive_power_mode
|
python
|
{
"resource": ""
}
|
q16568
|
Storage.timeseries
|
train
|
def timeseries(self):
"""
Time series of storage operation
Parameters
----------
ts : :pandas:`pandas.DataFrame<dataframe>`
DataFrame containing active power the storage is charged (negative)
and discharged (positive) with (on the grid side) in kW in column
'p' and reactive power in kvar in column 'q'. When 'q' is positive,
reactive power is supplied (behaving as a capacitor) and when 'q'
is negative reactive power is consumed (behaving as an inductor).
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
See parameter `timeseries`.
"""
# check if time series for reactive power is given, otherwise
# calculate it
if 'q' in self._timeseries.columns:
return self._timeseries
else:
self._timeseries['q'] = abs(self._timeseries.p) * self.q_sign * \
tan(acos(self.power_factor))
return self._timeseries.loc[
self.grid.network.timeseries.timeindex, :]
|
python
|
{
"resource": ""
}
|
q16569
|
MVDisconnectingPoint.open
|
train
|
def open(self):
"""Toggle state to open switch disconnector"""
if self._state != 'open':
if self._line is not None:
self._state = 'open'
self._nodes = self.grid.graph.nodes_from_line(self._line)
self.grid.graph.remove_edge(
self._nodes[0], self._nodes[1])
else:
raise ValueError('``line`` is not set')
|
python
|
{
"resource": ""
}
|
q16570
|
MVDisconnectingPoint.close
|
train
|
def close(self):
"""Toggle state to closed switch disconnector"""
self._state = 'closed'
self.grid.graph.add_edge(
self._nodes[0], self._nodes[1], {'line': self._line})
|
python
|
{
"resource": ""
}
|
q16571
|
wrap_context
|
train
|
def wrap_context(func):
"""Wraps the provided servicer method by passing a wrapped context
The context is wrapped using `lookout.sdk.grpc.log_fields.LogFieldsContext`.
:param func: the servicer method to wrap_context
:returns: the wrapped servicer method
"""
@functools.wraps(func)
def wrapper(self, request, context):
return func(self, request, LogFieldsContext(context))
return wrapper
|
python
|
{
"resource": ""
}
|
q16572
|
LogFieldsContext.pack_metadata
|
train
|
def pack_metadata(self) -> List[Tuple[str, Any]]:
"""Packs the log fields and the invocation metadata into a new metadata
The log fields are added in the new metadata with the key
`LOG_FIELDS_KEY_META`.
"""
metadata = [(k, v) for k, v in self._invocation_metadata.items()
if k != LOG_FIELDS_KEY_META]
metadata.append((LOG_FIELDS_KEY_META, self._log_fields.dumps()))
return metadata
|
python
|
{
"resource": ""
}
|
q16573
|
LogFields.from_metadata
|
train
|
def from_metadata(cls, metadata: Dict[str, Any]) -> 'LogFields':
"""Initialize the log fields from the provided metadata
The log fields are taken from the `LOG_FIELDS_KEY_META` key of the
provided metadata.
"""
return cls(fields=json.loads(metadata.get(LOG_FIELDS_KEY_META, '{}')))
|
python
|
{
"resource": ""
}
|
q16574
|
EDisGoReimport.plot_mv_voltages
|
train
|
def plot_mv_voltages(self, **kwargs):
"""
Plots voltages in MV grid on grid topology plot.
For more information see :func:`edisgo.tools.plots.mv_grid_topology`.
"""
if self.network.pypsa is not None:
try:
v_res = self.network.results.v_res()
except:
logging.warning("Voltages `pfa_v_mag_pu` from power flow "
"analysis must be available to plot them.")
return
plots.mv_grid_topology(
self.network.pypsa, self.network.config,
timestep=kwargs.get('timestep', None),
node_color='voltage',
filename=kwargs.get('filename', None),
grid_district_geom=kwargs.get('grid_district_geom', True),
background_map=kwargs.get('background_map', True),
voltage=v_res,
limits_cb_nodes=kwargs.get('limits_cb_nodes', None),
xlim=kwargs.get('xlim', None), ylim=kwargs.get('ylim', None),
title=kwargs.get('title', ''))
else:
logging.warning("pypsa representation of MV grid needed to "
"plot voltages.")
|
python
|
{
"resource": ""
}
|
q16575
|
EDisGoReimport.plot_mv_grid_expansion_costs
|
train
|
def plot_mv_grid_expansion_costs(self, **kwargs):
"""
Plots costs per MV line.
For more information see :func:`edisgo.tools.plots.mv_grid_topology`.
"""
if self.network.pypsa is not None and \
self.network.results.grid_expansion_costs is not None:
if isinstance(self, EDisGo):
# convert index of grid expansion costs to str
grid_expansion_costs = \
self.network.results.grid_expansion_costs.reset_index()
grid_expansion_costs['index'] = \
grid_expansion_costs['index'].apply(lambda _: repr(_))
grid_expansion_costs.set_index('index', inplace=True)
else:
grid_expansion_costs = \
self.network.results.grid_expansion_costs
plots.mv_grid_topology(
self.network.pypsa, self.network.config,
line_color='expansion_costs',
grid_expansion_costs=grid_expansion_costs,
filename=kwargs.get('filename', None),
grid_district_geom=kwargs.get('grid_district_geom', True),
background_map=kwargs.get('background_map', True),
limits_cb_lines=kwargs.get('limits_cb_lines', None),
xlim=kwargs.get('xlim', None), ylim=kwargs.get('ylim', None),
lines_cmap=kwargs.get('lines_cmap', 'inferno_r'),
title=kwargs.get('title', ''),
scaling_factor_line_width=kwargs.get(
'scaling_factor_line_width', None)
)
else:
if self.network.pypsa is None:
logging.warning("pypsa representation of MV grid needed to "
"plot grid expansion costs.")
if self.network.results.grid_expansion_costs is None:
logging.warning("Grid expansion cost results needed to plot "
"them.")
|
python
|
{
"resource": ""
}
|
q16576
|
EDisGoReimport.plot_mv_storage_integration
|
train
|
def plot_mv_storage_integration(self, **kwargs):
"""
Plots storage position in MV grid of integrated storages.
For more information see :func:`edisgo.tools.plots.mv_grid_topology`.
"""
if self.network.pypsa is not None:
plots.mv_grid_topology(
self.network.pypsa, self.network.config,
node_color='storage_integration',
filename=kwargs.get('filename', None),
grid_district_geom=kwargs.get('grid_district_geom', True),
background_map=kwargs.get('background_map', True),
xlim=kwargs.get('xlim', None), ylim=kwargs.get('ylim', None),
title=kwargs.get('title', ''))
else:
if self.network.pypsa is None:
logging.warning("pypsa representation of MV grid needed to "
"plot storage integration in MV grid.")
|
python
|
{
"resource": ""
}
|
q16577
|
EDisGoReimport.histogram_voltage
|
train
|
def histogram_voltage(self, timestep=None, title=True, **kwargs):
"""
Plots histogram of voltages.
For more information see :func:`edisgo.tools.plots.histogram`.
Parameters
----------
timestep : :pandas:`pandas.Timestamp<timestamp>` or None, optional
Specifies time step histogram is plotted for. If timestep is None
all time steps voltages are calculated for are used. Default: None.
title : :obj:`str` or :obj:`bool`, optional
Title for plot. If True title is auto generated. If False plot has
no title. If :obj:`str`, the provided title is used. Default: True.
"""
data = self.network.results.v_res()
if title is True:
if timestep is not None:
title = "Voltage histogram for time step {}".format(timestep)
else:
title = "Voltage histogram \nfor time steps {} to {}".format(
data.index[0], data.index[-1])
elif title is False:
title = None
plots.histogram(data=data, title=title, timeindex=timestep, **kwargs)
|
python
|
{
"resource": ""
}
|
q16578
|
EDisGoReimport.histogram_relative_line_load
|
train
|
def histogram_relative_line_load(self, timestep=None, title=True,
voltage_level='mv_lv', **kwargs):
"""
Plots histogram of relative line loads.
For more information see :func:`edisgo.tools.plots.histogram`.
Parameters
----------
Parameters
----------
timestep : :pandas:`pandas.Timestamp<timestamp>` or None, optional
Specifies time step histogram is plotted for. If timestep is None
all time steps voltages are calculated for are used. Default: None.
title : :obj:`str` or :obj:`bool`, optional
Title for plot. If True title is auto generated. If False plot has
no title. If :obj:`str`, the provided title is used. Default: True.
voltage_level : :obj:`str`
Specifies which voltage level to plot voltage histogram for.
Possible options are 'mv', 'lv' and 'mv_lv'. 'mv_lv' is also the
fallback option in case of wrong input. Default: 'mv_lv'
"""
residual_load = tools.get_residual_load_from_pypsa_network(
self.network.pypsa)
case = residual_load.apply(
lambda _: 'feedin_case' if _ < 0 else 'load_case')
if timestep is not None:
timeindex = [timestep]
else:
timeindex = self.network.results.s_res().index
load_factor = pd.DataFrame(
data={'s_nom': [float(self.network.config[
'grid_expansion_load_factors'][
'mv_{}_line'.format(case.loc[_])])
for _ in timeindex]},
index=timeindex)
if voltage_level == 'mv':
lines = self.network.pypsa.lines.loc[
self.network.pypsa.lines.v_nom > 1]
elif voltage_level == 'lv':
lines = self.network.pypsa.lines.loc[
self.network.pypsa.lines.v_nom < 1]
else:
lines = self.network.pypsa.lines
s_res = self.network.results.s_res().loc[
timeindex, lines.index]
# get allowed line load
s_allowed = load_factor.dot(
self.network.pypsa.lines.s_nom.to_frame().T * 1e3)
# get line load from pf
data = s_res.divide(s_allowed)
if title is True:
if timestep is not None:
title = "Relative line load histogram for time step {}".format(
timestep)
else:
title = "Relative line load histogram \nfor time steps " \
"{} to {}".format(data.index[0], data.index[-1])
elif title is False:
title = None
plots.histogram(data=data, title=title, **kwargs)
|
python
|
{
"resource": ""
}
|
q16579
|
EDisGo.curtail
|
train
|
def curtail(self, methodology, curtailment_timeseries, **kwargs):
"""
Sets up curtailment time series.
Curtailment time series are written into
:class:`~.grid.network.TimeSeries`. See
:class:`~.grid.network.CurtailmentControl` for more information on
parameters and methodologies.
"""
CurtailmentControl(edisgo=self, methodology=methodology,
curtailment_timeseries=curtailment_timeseries,
**kwargs)
|
python
|
{
"resource": ""
}
|
q16580
|
EDisGo.import_from_ding0
|
train
|
def import_from_ding0(self, file, **kwargs):
"""Import grid data from DINGO file
For details see
:func:`edisgo.data.import_data.import_from_ding0`
"""
import_from_ding0(file=file, network=self.network)
|
python
|
{
"resource": ""
}
|
q16581
|
EDisGo.reinforce
|
train
|
def reinforce(self, **kwargs):
"""
Reinforces the grid and calculates grid expansion costs.
See :meth:`edisgo.flex_opt.reinforce_grid` for more information.
"""
results = reinforce_grid(
self, max_while_iterations=kwargs.get(
'max_while_iterations', 10),
copy_graph=kwargs.get('copy_graph', False),
timesteps_pfa=kwargs.get('timesteps_pfa', None),
combined_analysis=kwargs.get('combined_analysis', False))
# add measure to Results object
if not kwargs.get('copy_graph', False):
self.network.results.measures = 'grid_expansion'
return results
|
python
|
{
"resource": ""
}
|
q16582
|
EDisGo.integrate_storage
|
train
|
def integrate_storage(self, timeseries, position, **kwargs):
"""
Integrates storage into grid.
See :class:`~.grid.network.StorageControl` for more information.
"""
StorageControl(edisgo=self, timeseries=timeseries,
position=position, **kwargs)
|
python
|
{
"resource": ""
}
|
q16583
|
Network._load_equipment_data
|
train
|
def _load_equipment_data(self):
"""Load equipment data for transformers, cables etc.
Returns
-------
:obj:`dict` of :pandas:`pandas.DataFrame<dataframe>`
"""
package_path = edisgo.__path__[0]
equipment_dir = self.config['system_dirs']['equipment_dir']
data = {}
equipment = {'mv': ['trafos', 'lines', 'cables'],
'lv': ['trafos', 'cables']}
for voltage_level, eq_list in equipment.items():
for i in eq_list:
equipment_parameters = self.config['equipment'][
'equipment_{}_parameters_{}'.format(voltage_level, i)]
data['{}_{}'.format(voltage_level, i)] = pd.read_csv(
os.path.join(package_path, equipment_dir,
equipment_parameters),
comment='#', index_col='name',
delimiter=',', decimal='.')
return data
|
python
|
{
"resource": ""
}
|
q16584
|
Config._load_config
|
train
|
def _load_config(config_path=None):
"""
Load config files.
Parameters
-----------
config_path : None or :obj:`str` or dict
See class definition for more information.
Returns
-------
:obj:`collections.OrderedDict`
eDisGo configuration data from config files.
"""
config_files = ['config_db_tables', 'config_grid',
'config_grid_expansion', 'config_timeseries']
# load configs
if isinstance(config_path, dict):
for conf in config_files:
config.load_config(filename='{}.cfg'.format(conf),
config_dir=config_path[conf],
copy_default_config=False)
else:
for conf in config_files:
config.load_config(filename='{}.cfg'.format(conf),
config_dir=config_path)
config_dict = config.cfg._sections
# convert numeric values to float
for sec, subsecs in config_dict.items():
for subsec, val in subsecs.items():
# try str -> float conversion
try:
config_dict[sec][subsec] = float(val)
except:
pass
# convert to time object
config_dict['demandlib']['day_start'] = datetime.datetime.strptime(
config_dict['demandlib']['day_start'], "%H:%M")
config_dict['demandlib']['day_start'] = datetime.time(
config_dict['demandlib']['day_start'].hour,
config_dict['demandlib']['day_start'].minute)
config_dict['demandlib']['day_end'] = datetime.datetime.strptime(
config_dict['demandlib']['day_end'], "%H:%M")
config_dict['demandlib']['day_end'] = datetime.time(
config_dict['demandlib']['day_end'].hour,
config_dict['demandlib']['day_end'].minute)
return config_dict
|
python
|
{
"resource": ""
}
|
q16585
|
TimeSeriesControl._check_timeindex
|
train
|
def _check_timeindex(self):
"""
Check function to check if all feed-in and load time series contain
values for the specified time index.
"""
try:
self.timeseries.generation_fluctuating
self.timeseries.generation_dispatchable
self.timeseries.load
self.timeseries.generation_reactive_power
self.timeseries.load_reactive_power
except:
message = 'Time index of feed-in and load time series does ' \
'not match.'
logging.error(message)
raise KeyError(message)
|
python
|
{
"resource": ""
}
|
q16586
|
TimeSeriesControl._worst_case_generation
|
train
|
def _worst_case_generation(self, worst_case_scale_factors, modes):
"""
Define worst case generation time series for fluctuating and
dispatchable generators.
Parameters
----------
worst_case_scale_factors : dict
Scale factors defined in config file 'config_timeseries.cfg'.
Scale factors describe actual power to nominal power ratio of in
worst-case scenarios.
modes : list
List with worst-cases to generate time series for. Can be
'feedin_case', 'load_case' or both.
"""
self.timeseries.generation_fluctuating = pd.DataFrame(
{'solar': [worst_case_scale_factors[
'{}_feedin_pv'.format(mode)] for mode in modes],
'wind': [worst_case_scale_factors[
'{}_feedin_other'.format(mode)] for mode in modes]},
index=self.timeseries.timeindex)
self.timeseries.generation_dispatchable = pd.DataFrame(
{'other': [worst_case_scale_factors[
'{}_feedin_other'.format(mode)] for mode in modes]},
index=self.timeseries.timeindex)
|
python
|
{
"resource": ""
}
|
q16587
|
TimeSeriesControl._worst_case_load
|
train
|
def _worst_case_load(self, worst_case_scale_factors,
peakload_consumption_ratio, modes):
"""
Define worst case load time series for each sector.
Parameters
----------
worst_case_scale_factors : dict
Scale factors defined in config file 'config_timeseries.cfg'.
Scale factors describe actual power to nominal power ratio of in
worst-case scenarios.
peakload_consumption_ratio : dict
Ratios of peak load to annual consumption per sector, defined in
config file 'config_timeseries.cfg'
modes : list
List with worst-cases to generate time series for. Can be
'feedin_case', 'load_case' or both.
"""
sectors = ['residential', 'retail', 'industrial', 'agricultural']
lv_power_scaling = np.array(
[worst_case_scale_factors['lv_{}_load'.format(mode)]
for mode in modes])
mv_power_scaling = np.array(
[worst_case_scale_factors['mv_{}_load'.format(mode)]
for mode in modes])
lv = {(sector, 'lv'): peakload_consumption_ratio[sector] *
lv_power_scaling
for sector in sectors}
mv = {(sector, 'mv'): peakload_consumption_ratio[sector] *
mv_power_scaling
for sector in sectors}
self.timeseries.load = pd.DataFrame({**lv, **mv},
index=self.timeseries.timeindex)
|
python
|
{
"resource": ""
}
|
q16588
|
CurtailmentControl._check_timeindex
|
train
|
def _check_timeindex(self, curtailment_timeseries, network):
"""
Raises an error if time index of curtailment time series does not
comply with the time index of load and feed-in time series.
Parameters
-----------
curtailment_timeseries : :pandas:`pandas.Series<series>` or \
:pandas:`pandas.DataFrame<dataframe>`
See parameter `curtailment_timeseries` in class definition for more
information.
"""
if curtailment_timeseries is None:
message = 'No curtailment given.'
logging.error(message)
raise KeyError(message)
try:
curtailment_timeseries.loc[network.timeseries.timeindex]
except:
message = 'Time index of curtailment time series does not match ' \
'with load and feed-in time series.'
logging.error(message)
raise KeyError(message)
|
python
|
{
"resource": ""
}
|
q16589
|
CurtailmentControl._precheck
|
train
|
def _precheck(self, curtailment_timeseries, feedin_df, curtailment_key):
"""
Raises an error if the curtailment at any time step exceeds the
total feed-in of all generators curtailment can be distributed among
at that time.
Parameters
-----------
curtailment_timeseries : :pandas:`pandas.Series<series>`
Curtailment time series in kW for the technology (and weather
cell) specified in `curtailment_key`.
feedin_df : :pandas:`pandas.Series<series>`
Feed-in time series in kW for all generators of type (and in
weather cell) specified in `curtailment_key`.
curtailment_key : :obj:`str` or :obj:`tuple` with :obj:`str`
Technology (and weather cell) curtailment is given for.
"""
if not feedin_df.empty:
feedin_selected_sum = feedin_df.sum(axis=1)
diff = feedin_selected_sum - curtailment_timeseries
# add tolerance (set small negative values to zero)
diff[diff.between(-1, 0)] = 0
if not (diff >= 0).all():
bad_time_steps = [_ for _ in diff.index
if diff[_] < 0]
message = 'Curtailment demand exceeds total feed-in in time ' \
'steps {}.'.format(bad_time_steps)
logging.error(message)
raise ValueError(message)
else:
bad_time_steps = [_ for _ in curtailment_timeseries.index
if curtailment_timeseries[_] > 0]
if bad_time_steps:
message = 'Curtailment given for time steps {} but there ' \
'are no generators to meet the curtailment target ' \
'for {}.'.format(bad_time_steps, curtailment_key)
logging.error(message)
raise ValueError(message)
|
python
|
{
"resource": ""
}
|
q16590
|
CurtailmentControl._postcheck
|
train
|
def _postcheck(self, network, feedin):
"""
Raises an error if the curtailment of a generator exceeds the
feed-in of that generator at any time step.
Parameters
-----------
network : :class:`~.grid.network.Network`
feedin : :pandas:`pandas.DataFrame<dataframe>`
DataFrame with feed-in time series in kW. Columns of the dataframe
are :class:`~.grid.components.GeneratorFluctuating`, index is
time index.
"""
curtailment = network.timeseries.curtailment
gen_repr = [repr(_) for _ in curtailment.columns]
feedin_repr = feedin.loc[:, gen_repr]
curtailment_repr = curtailment
curtailment_repr.columns = gen_repr
if not ((feedin_repr - curtailment_repr) > -1e-1).all().all():
message = 'Curtailment exceeds feed-in.'
logging.error(message)
raise TypeError(message)
|
python
|
{
"resource": ""
}
|
q16591
|
StorageControl._integrate_storage
|
train
|
def _integrate_storage(self, timeseries, position, params, voltage_level,
reactive_power_timeseries, **kwargs):
"""
Integrate storage units in the grid.
Parameters
----------
timeseries : :obj:`str` or :pandas:`pandas.Series<series>`
Parameter used to obtain time series of active power the storage
storage is charged (negative) or discharged (positive) with. Can
either be a given time series or an operation strategy. See class
definition for more information
position : :obj:`str` or :class:`~.grid.components.Station` or :class:`~.grid.components.BranchTee` or :class:`~.grid.components.Generator` or :class:`~.grid.components.Load`
Parameter used to place the storage. See class definition for more
information.
params : :obj:`dict`
Dictionary with storage parameters for one storage. See class
definition for more information on what parameters must be
provided.
voltage_level : :obj:`str` or None
`voltage_level` defines which side of the LV station the storage is
connected to. Valid options are 'lv' and 'mv'. Default: None. See
class definition for more information.
reactive_power_timeseries : :pandas:`pandas.Series<series>` or None
Reactive power time series in kvar (generator sign convention).
Index of the series needs to be a
:pandas:`pandas.DatetimeIndex<datetimeindex>`.
"""
# place storage
params = self._check_nominal_power(params, timeseries)
if isinstance(position, Station) or isinstance(position, BranchTee) \
or isinstance(position, Generator) \
or isinstance(position, Load):
storage = storage_integration.set_up_storage(
node=position, parameters=params, voltage_level=voltage_level)
line = storage_integration.connect_storage(storage, position)
elif isinstance(position, str) \
and position == 'hvmv_substation_busbar':
storage, line = storage_integration.storage_at_hvmv_substation(
self.edisgo.network.mv_grid, params)
elif isinstance(position, str) \
and position == 'distribute_storages_mv':
# check active power time series
if not isinstance(timeseries, pd.Series):
raise ValueError(
"Storage time series needs to be a pandas Series if "
"`position` is 'distribute_storages_mv'.")
else:
timeseries = pd.DataFrame(data={'p': timeseries},
index=timeseries.index)
self._check_timeindex(timeseries)
# check reactive power time series
if reactive_power_timeseries is not None:
self._check_timeindex(reactive_power_timeseries)
timeseries['q'] = reactive_power_timeseries.loc[
timeseries.index]
else:
timeseries['q'] = 0
# start storage positioning method
storage_positioning.one_storage_per_feeder(
edisgo=self.edisgo, storage_timeseries=timeseries,
storage_nominal_power=params['nominal_power'], **kwargs)
return
else:
message = 'Provided storage position option {} is not ' \
'valid.'.format(timeseries)
logging.error(message)
raise KeyError(message)
# implement operation strategy (active power)
if isinstance(timeseries, pd.Series):
timeseries = pd.DataFrame(data={'p': timeseries},
index=timeseries.index)
self._check_timeindex(timeseries)
storage.timeseries = timeseries
elif isinstance(timeseries, str) and timeseries == 'fifty-fifty':
storage_operation.fifty_fifty(self.edisgo.network, storage)
else:
message = 'Provided storage timeseries option {} is not ' \
'valid.'.format(timeseries)
logging.error(message)
raise KeyError(message)
# reactive power
if reactive_power_timeseries is not None:
self._check_timeindex(reactive_power_timeseries)
storage.timeseries = pd.DataFrame(
{'p': storage.timeseries.p,
'q': reactive_power_timeseries.loc[storage.timeseries.index]},
index=storage.timeseries.index)
# update pypsa representation
if self.edisgo.network.pypsa is not None:
pypsa_io.update_pypsa_storage(
self.edisgo.network.pypsa,
storages=[storage], storages_lines=[line])
|
python
|
{
"resource": ""
}
|
q16592
|
StorageControl._check_nominal_power
|
train
|
def _check_nominal_power(self, storage_parameters, timeseries):
"""
Tries to assign a nominal power to the storage.
Checks if nominal power is provided through `storage_parameters`,
otherwise tries to return the absolute maximum of `timeseries`. Raises
an error if it cannot assign a nominal power.
Parameters
----------
timeseries : :obj:`str` or :pandas:`pandas.Series<series>`
See parameter `timeseries` in class definition for more
information.
storage_parameters : :obj:`dict`
See parameter `parameters` in class definition for more
information.
Returns
--------
:obj:`dict`
The given `storage_parameters` is returned extended by an entry for
'nominal_power', if it didn't already have that key.
"""
if storage_parameters.get('nominal_power', None) is None:
try:
storage_parameters['nominal_power'] = max(abs(timeseries))
except:
raise ValueError("Could not assign a nominal power to the "
"storage. Please provide either a nominal "
"power or an active power time series.")
return storage_parameters
|
python
|
{
"resource": ""
}
|
q16593
|
StorageControl._check_timeindex
|
train
|
def _check_timeindex(self, timeseries):
"""
Raises an error if time index of storage time series does not
comply with the time index of load and feed-in time series.
Parameters
-----------
timeseries : :pandas:`pandas.DataFrame<dataframe>`
DataFrame containing active power the storage is charged (negative)
and discharged (positive) with in kW in column 'p' and
reactive power in kVA in column 'q'.
"""
try:
timeseries.loc[self.edisgo.network.timeseries.timeindex]
except:
message = 'Time index of storage time series does not match ' \
'with load and feed-in time series.'
logging.error(message)
raise KeyError(message)
|
python
|
{
"resource": ""
}
|
q16594
|
Results.curtailment
|
train
|
def curtailment(self):
"""
Holds curtailment assigned to each generator per curtailment target.
Returns
-------
:obj:`dict` with :pandas:`pandas.DataFrame<dataframe>`
Keys of the dictionary are generator types (and weather cell ID)
curtailment targets were given for. E.g. if curtailment is provided
as a :pandas:`pandas.DataFrame<dataframe>` with
:pandas.`pandas.MultiIndex` columns with levels 'type' and
'weather cell ID' the dictionary key is a tuple of
('type','weather_cell_id').
Values of the dictionary are dataframes with the curtailed power in
kW per generator and time step. Index of the dataframe is a
:pandas:`pandas.DatetimeIndex<datetimeindex>`. Columns are the
generators of type
:class:`edisgo.grid.components.GeneratorFluctuating`.
"""
if self._curtailment is not None:
result_dict = {}
for key, gen_list in self._curtailment.items():
curtailment_df = pd.DataFrame()
for gen in gen_list:
curtailment_df[gen] = gen.curtailment
result_dict[key] = curtailment_df
return result_dict
else:
return None
|
python
|
{
"resource": ""
}
|
q16595
|
Results.storages
|
train
|
def storages(self):
"""
Gathers relevant storage results.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe containing all storages installed in the MV grid and
LV grids. Index of the dataframe are the storage representatives,
columns are the following:
nominal_power : :obj:`float`
Nominal power of the storage in kW.
voltage_level : :obj:`str`
Voltage level the storage is connected to. Can either be 'mv'
or 'lv'.
"""
grids = [self.network.mv_grid] + list(self.network.mv_grid.lv_grids)
storage_results = {}
storage_results['storage_id'] = []
storage_results['nominal_power'] = []
storage_results['voltage_level'] = []
storage_results['grid_connection_point'] = []
for grid in grids:
for storage in grid.graph.nodes_by_attribute('storage'):
storage_results['storage_id'].append(repr(storage))
storage_results['nominal_power'].append(storage.nominal_power)
storage_results['voltage_level'].append(
'mv' if isinstance(grid, MVGrid) else 'lv')
storage_results['grid_connection_point'].append(
grid.graph.neighbors(storage)[0])
return pd.DataFrame(storage_results).set_index('storage_id')
|
python
|
{
"resource": ""
}
|
q16596
|
Results.storages_timeseries
|
train
|
def storages_timeseries(self):
"""
Returns a dataframe with storage time series.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe containing time series of all storages installed in the
MV grid and LV grids. Index of the dataframe is a
:pandas:`pandas.DatetimeIndex<datetimeindex>`. Columns are the
storage representatives.
"""
storages_p = pd.DataFrame()
storages_q = pd.DataFrame()
grids = [self.network.mv_grid] + list(self.network.mv_grid.lv_grids)
for grid in grids:
for storage in grid.graph.nodes_by_attribute('storage'):
ts = storage.timeseries
storages_p[repr(storage)] = ts.p
storages_q[repr(storage)] = ts.q
return storages_p, storages_q
|
python
|
{
"resource": ""
}
|
q16597
|
ResultsReimport.v_res
|
train
|
def v_res(self, nodes=None, level=None):
"""
Get resulting voltage level at node.
Parameters
----------
nodes : :obj:`list`
List of string representatives of grid topology components, e.g.
:class:`~.grid.components.Generator`. If not provided defaults to
all nodes available in grid level `level`.
level : :obj:`str`
Either 'mv' or 'lv' or None (default). Depending on which grid
level results you are interested in. It is required to provide this
argument in order to distinguish voltage levels at primary and
secondary side of the transformer/LV station.
If not provided (respectively None) defaults to ['mv', 'lv'].
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Resulting voltage levels obtained from power flow analysis
"""
# check if voltages are available:
if hasattr(self, 'pfa_v_mag_pu'):
self.pfa_v_mag_pu.sort_index(axis=1, inplace=True)
else:
message = "No voltage results available."
raise AttributeError(message)
if level is None:
level = ['mv', 'lv']
if nodes is None:
return self.pfa_v_mag_pu.loc[:, (level, slice(None))]
else:
not_included = [_ for _ in nodes
if _ not in list(self.pfa_v_mag_pu[level].columns)]
labels_included = [_ for _ in nodes if _ not in not_included]
if not_included:
logging.warning("Voltage levels for {nodes} are not returned "
"from PFA".format(nodes=not_included))
return self.pfa_v_mag_pu[level][labels_included]
|
python
|
{
"resource": ""
}
|
q16598
|
set_up_storage
|
train
|
def set_up_storage(node, parameters,
voltage_level=None, operational_mode=None):
"""
Sets up a storage instance.
Parameters
----------
node : :class:`~.grid.components.Station` or :class:`~.grid.components.BranchTee`
Node the storage will be connected to.
parameters : :obj:`dict`, optional
Dictionary with storage parameters. Must at least contain
'nominal_power'. See :class:`~.grid.network.StorageControl` for more
information.
voltage_level : :obj:`str`, optional
This parameter only needs to be provided if `node` is of type
:class:`~.grid.components.LVStation`. In that case `voltage_level`
defines which side of the LV station the storage is connected to. Valid
options are 'lv' and 'mv'. Default: None.
operational_mode : :obj:`str`, optional
Operational mode. See :class:`~.grid.network.StorageControl` for
possible options and more information. Default: None.
"""
# if node the storage is connected to is an LVStation voltage_level
# defines which side the storage is connected to
if isinstance(node, LVStation):
if voltage_level == 'lv':
grid = node.grid
elif voltage_level == 'mv':
grid = node.mv_grid
else:
raise ValueError(
"{} is not a valid option for voltage_level.".format(
voltage_level))
else:
grid = node.grid
return Storage(operation=operational_mode,
id='{}_storage_{}'.format(grid,
len(grid.graph.nodes_by_attribute(
'storage')) + 1),
grid=grid,
geom=node.geom,
**parameters)
|
python
|
{
"resource": ""
}
|
q16599
|
connect_storage
|
train
|
def connect_storage(storage, node):
"""
Connects storage to the given node.
The storage is connected by a cable
The cable the storage is connected with is selected to be able to carry
the storages nominal power and equal amount of reactive power.
No load factor is considered.
Parameters
----------
storage : :class:`~.grid.components.Storage`
Storage instance to be integrated into the grid.
node : :class:`~.grid.components.Station` or :class:`~.grid.components.BranchTee`
Node the storage will be connected to.
Returns
-------
:class:`~.grid.components.Line`
Newly added line to connect storage.
"""
# add storage itself to graph
storage.grid.graph.add_node(storage, type='storage')
# add 1m connecting line to node the storage is connected to
if isinstance(storage.grid, MVGrid):
voltage_level = 'mv'
else:
voltage_level = 'lv'
# necessary apparent power the line must be able to carry is set to be
# the storages nominal power and equal amount of reactive power devided by
# the minimum load factor
lf_dict = storage.grid.network.config['grid_expansion_load_factors']
lf = min(lf_dict['{}_feedin_case_line'.format(voltage_level)],
lf_dict['{}_load_case_line'.format(voltage_level)])
apparent_power_line = sqrt(2) * storage.nominal_power / lf
line_type, line_count = select_cable(storage.grid.network, voltage_level,
apparent_power_line)
line = Line(
id=storage.id,
type=line_type,
kind='cable',
length=1e-3,
grid=storage.grid,
quantity=line_count)
storage.grid.graph.add_edge(node, storage, line=line, type='line')
return line
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.