code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
priv_key = None
# If `key_id` is already a private key path, then easy.
if not FINGERPRINT_RE.match(key_id):
if not skip_priv_key:
f = io.open(key_id, 'rb')
try:
priv_key = f.read()
finally:
f.close()
pub_key_path = key_id + '.pub'
f = io.open(pub_key_path, 'r')
try:
pub_key = f.read()
finally:
f.close()
fingerprint = fingerprint_from_ssh_pub_key(pub_key)
# XXX: pubkey should NOT be in PEM format.
try:
algo = ALGO_FROM_SSH_KEY_TYPE[pub_key.split()[0]]
except KeyError:
raise MantaError("Unsupported key type for: {}".format(key_id))
return dict(
pub_key_path=pub_key_path,
fingerprint=fingerprint,
priv_key_path=key_id,
priv_key=priv_key,
algorithm=algo)
# Else, look at all pub/priv keys in "~/.ssh" for a matching fingerprint.
fingerprint = key_id
pub_key_glob = expanduser('~/.ssh/*.pub')
pub_key = None
for pub_key_path in glob(pub_key_glob):
try:
f = io.open(pub_key_path, 'r')
except IOError:
# This can happen if the .pub file is a broken symlink.
log.debug("could not open '%s', skip it", pub_key_path)
continue
try:
pub_key = f.read()
finally:
f.close()
# The MD5 fingerprint functions return the hexdigest without the hash
# algorithm prefix ("MD5:"), and the SHA256 functions return the
# fingerprint with the prefix ("SHA256:"). Ideally we'd want to
# normalize these, but more importantly we don't want to break backwards
# compatibility for either the SHA or MD5 users.
md5_fp = fingerprint_from_ssh_pub_key(pub_key)
sha256_fp = sha256_fingerprint_from_ssh_pub_key(pub_key)
if (sha256_fp == fingerprint or
md5_fp == fingerprint or
"MD5:" + md5_fp == fingerprint):
# if the user has given us sha256 fingerprint, canonicalize
# it to the md5 fingerprint
fingerprint = md5_fp
break
else:
raise MantaError(
"no '~/.ssh/*.pub' key found with fingerprint '%s'"
% fingerprint)
# XXX: pubkey should NOT be in PEM format.
try:
algo = ALGO_FROM_SSH_KEY_TYPE[pub_key.split()[0]]
except KeyError:
raise MantaError("Unsupported key type for: {}".format(key_id))
priv_key_path = os.path.splitext(pub_key_path)[0]
if not skip_priv_key:
f = io.open(priv_key_path, 'rb')
try:
priv_key = f.read()
finally:
f.close()
return dict(
pub_key_path=pub_key_path,
fingerprint=fingerprint,
priv_key_path=priv_key_path,
priv_key=priv_key,
algorithm=algo) | def load_ssh_key(key_id, skip_priv_key=False) | Load a local ssh private key (in PEM format). PEM format is the OpenSSH
default format for private keys.
See similar code in imgapi.js#loadSSHKey.
@param key_id {str} An ssh public key fingerprint or ssh private key path.
@param skip_priv_key {boolean} Optional. Default false. If true, then this
will skip loading the private key file and `priv_key` will be `None`
in the retval.
@returns {dict} with these keys:
- pub_key_path
- fingerprint
- priv_key_path
- priv_key
- algorithm | 2.569869 | 2.473264 | 1.03906 |
if FINGERPRINT_RE.match(key_id) and priv_key:
key_info = {"fingerprint": key_id, "priv_key": priv_key}
else:
# Otherwise, we attempt to load necessary details from ~/.ssh.
key_info = load_ssh_key(key_id)
# Load a key signer.
key = None
try:
key = serialization.load_pem_private_key(
key_info["priv_key"],
password=None,
backend=default_backend())
except TypeError as ex:
log.debug("could not import key without passphrase (will "
"try with passphrase): %s", ex)
if "priv_key_path" in key_info:
prompt = "Passphrase [%s]: " % key_info["priv_key_path"]
else:
prompt = "Passphrase: "
for i in range(3):
passphrase = getpass(prompt)
if not passphrase:
break
try:
key = serialization.load_pem_private_key(
key_info["priv_key"],
password=passphrase,
backend=default_backend())
except ValueError:
continue
else:
break
if not key:
details = ""
if "priv_key_path" in key_info:
details = " (%s)" % key_info["priv_key_path"]
raise MantaError("could not import key" + details)
# If load_ssh_key() wasn't run, set the algorithm here.
if 'algorithm' not in key_info:
if isinstance(key, ec.EllipticCurvePrivateKey):
key_info['algorithm'] = ECDSA_ALGO_FROM_KEY_SIZE[str(key.key_size)]
elif isinstance(key, rsa.RSAPrivateKey):
key_info['algorithm'] = RSA_STR
else:
raise MantaError("Unsupported key type for: {}".format(key_id))
key_info["signer"] = key
key_info["type"] = "ssh_key"
return key_info | def ssh_key_info_from_key_data(key_id, priv_key=None) | Get/load SSH key info necessary for signing.
@param key_id {str} Either a private ssh key fingerprint, e.g.
'b3:f0:a1:6c:18:3b:42:63:fd:6e:57:42:74:17:d4:bc', or the path to
an ssh private key file (like ssh's IdentityFile config option).
@param priv_key {str} Optional. SSH private key file data (PEM format).
@return {dict} with these keys:
- type: "agent"
- signer: Crypto signer class (a PKCS#1 v1.5 signer for RSA keys)
- fingerprint: key md5 fingerprint
- algorithm: See ALGO_FROM_SSH_KEY_TYPE for supported list.
- ... some others added by `load_ssh_key()` | 2.902929 | 2.784087 | 1.042686 |
# Need the fingerprint of the key we're using for signing. If it
# is a path to a priv key, then we need to load it.
if not FINGERPRINT_RE.match(key_id):
ssh_key = load_ssh_key(key_id, True)
fingerprint = ssh_key["fingerprint"]
else:
fingerprint = key_id
# Look for a matching fingerprint in the ssh-agent keys.
keys = Agent().get_keys()
for key in keys:
raw_key = key.blob
# The MD5 fingerprint functions return the hexdigest without the hash
# algorithm prefix ("MD5:"), and the SHA256 functions return the
# fingerprint with the prefix ("SHA256:"). Ideally we'd want to
# normalize these, but more importantly we don't want to break backwards
# compatibility for either the SHA or MD5 users.
md5_fp = fingerprint_from_raw_ssh_pub_key(raw_key)
sha_fp = sha256_fingerprint_from_raw_ssh_pub_key(raw_key)
if (sha_fp == fingerprint or
md5_fp == fingerprint or
"MD5:" + md5_fp == fingerprint):
# Canonicalize it to the md5 fingerprint.
md5_fingerprint = md5_fp
break
else:
raise MantaError('no ssh-agent key with fingerprint "%s"' %
fingerprint)
return {
"type": "agent",
"agent_key": key,
"fingerprint": md5_fingerprint,
"algorithm": ALGO_FROM_SSH_KEY_TYPE[key.name]
} | def agent_key_info_from_key_id(key_id) | Find a matching key in the ssh-agent.
@param key_id {str} Either a private ssh key fingerprint, e.g.
'b3:f0:a1:6c:18:3b:42:63:fd:6e:57:42:74:17:d4:bc', or the path to
an ssh private key file (like ssh's IdentityFile config option).
@return {dict} with these keys:
- type: "agent"
- agent_key: paramiko AgentKey
- fingerprint: key fingerprint
- algorithm: "rsa-sha1" Currently don't support DSA agent signing. | 4.762218 | 4.347774 | 1.095323 |
if self._key_info_cache is None:
self._key_info_cache = ssh_key_info_from_key_data(self.key_id,
self.priv_key)
return self._key_info_cache | def _get_key_info(self) | Get key info appropriate for signing. | 3.942545 | 3.369508 | 1.170065 |
if self._key_info_cache is None:
self._key_info_cache = agent_key_info_from_key_id(self.key_id)
return self._key_info_cache | def _get_key_info(self) | Get key info appropriate for signing. | 3.557746 | 3.147337 | 1.130399 |
if self._key_info_cache is not None:
return self._key_info_cache
errors = []
# First try the agent.
try:
key_info = agent_key_info_from_key_id(self.key_id)
except MantaError:
_, ex, _ = sys.exc_info()
errors.append(ex)
else:
self._key_info_cache = key_info
return self._key_info_cache
# Try loading from "~/.ssh/*".
try:
key_info = ssh_key_info_from_key_data(self.key_id)
except MantaError:
_, ex, _ = sys.exc_info()
errors.append(ex)
else:
self._key_info_cache = key_info
return self._key_info_cache
raise MantaError("could not find key info for signing: %s" %
"; ".join(map(str, errors))) | def _get_key_info(self) | Get key info appropriate for signing: either from the ssh agent
or from a private key. | 2.681638 | 2.387652 | 1.123127 |
# The list of possible options is available here:
# https://grpc.io/grpc/core/group__grpc__arg__keys.html
options = (options or []) + [
("grpc.max_send_message_length", grpc_max_msg_size),
("grpc.max_receive_message_length", grpc_max_msg_size),
]
interceptors = interceptors or []
channel = grpc.insecure_channel(target, options)
return grpc.intercept_channel(channel, *interceptors) | def create_channel(
target: str,
options: Optional[List[Tuple[str, Any]]] = None,
interceptors: Optional[List[ClientInterceptor]] = None,
) -> grpc.Channel | Creates a gRPC channel
The gRPC channel is created with the provided options and intercepts each
invocation via the provided interceptors.
The created channel is configured with the following default options:
- "grpc.max_send_message_length": 100MB,
- "grpc.max_receive_message_length": 100MB.
:param target: the server address.
:param options: optional list of key-value pairs to configure the channel.
:param interceptors: optional list of client interceptors.
:returns: a gRPC channel. | 2.314498 | 2.585298 | 0.895254 |
# The list of possible options is available here:
# https://grpc.io/grpc/core/group__grpc__arg__keys.html
options = (options or []) + [
("grpc.max_send_message_length", grpc_max_msg_size),
("grpc.max_receive_message_length", grpc_max_msg_size),
]
interceptors = [base.ServerInterceptorWrapper(i)
for i in (interceptors or [])]
server = grpc.server(ThreadPoolExecutor(max_workers=max_workers),
options=options, interceptors=interceptors)
for i in interceptors:
i.bind(server)
return server | def create_server(
max_workers: int,
options: Optional[List[Tuple[str, Any]]] = None,
interceptors: Optional[List[grpc.ServerInterceptor]] = None,
) -> grpc.Server | Creates a gRPC server
The gRPC server is created with the provided options and intercepts each
incoming RPCs via the provided interceptors.
The created server is configured with the following default options:
- "grpc.max_send_message_length": 100MB,
- "grpc.max_receive_message_length": 100MB.
:param max_workers: the maximum number of workers to use in the underlying
futures.ThreadPoolExecutor to be used by the Server to execute RPC
handlers.
:param options: optional list of key-value pairs to configure the channel.
:param interceptors: optional list of server interceptors.
:returns: a gRPC server. | 2.540827 | 2.910249 | 0.873062 |
u = urlparse(target)
if u.scheme == "dns":
raise ValueError("dns:// not supported")
if u.scheme == "unix":
return "unix:"+u.path
return u.netloc | def to_grpc_address(target: str) -> str | Converts a standard gRPC target to one that is supported by grpcio
:param target: the server address.
:returns: the converted address. | 4.282508 | 4.172369 | 1.026397 |
# Get disconnecting point's location
line = mv_grid.graph.edge[node1][node2]['line']
length_sd_line = .75e-3 # in km
x_sd = node1.geom.x + (length_sd_line / line.length) * (
node1.geom.x - node2.geom.x)
y_sd = node1.geom.y + (length_sd_line / line.length) * (
node1.geom.y - node2.geom.y)
# Instantiate disconnecting point
mv_dp_number = len(mv_grid.graph.nodes_by_attribute(
'mv_disconnecting_point'))
disconnecting_point = MVDisconnectingPoint(
id=mv_dp_number + 1,
geom=Point(x_sd, y_sd),
grid=mv_grid)
mv_grid.graph.add_node(disconnecting_point, type='mv_disconnecting_point')
# Replace original line by a new line
new_line_attr = {
'line': Line(
id=line.id,
type=line.type,
length=line.length - length_sd_line,
grid=mv_grid),
'type': 'line'}
mv_grid.graph.remove_edge(node1, node2)
mv_grid.graph.add_edge(disconnecting_point, node2, new_line_attr)
# Add disconnecting line segment
switch_disconnector_line_attr = {
'line': Line(
id="switch_disconnector_line_{}".format(
str(mv_dp_number + 1)),
type=line.type,
length=length_sd_line,
grid=mv_grid),
'type': 'line'}
mv_grid.graph.add_edge(node1, disconnecting_point,
switch_disconnector_line_attr)
# Set line to switch disconnector
disconnecting_point.line = mv_grid.graph.line_from_nodes(
disconnecting_point, node1) | def implement_switch_disconnector(mv_grid, node1, node2) | Install switch disconnector in grid topology
The graph that represents the grid's topology is altered in such way that
it explicitly includes a switch disconnector.
The switch disconnector is always located at ``node1``. Technically, it
does not make any difference. This is just an convention ensuring
consistency of multiple runs.
The ring is still closed after manipulations of this function.
Parameters
----------
mv_grid : :class:`~.grid.grids.MVGrid`
MV grid instance
node1
A rings node
node2
Another rings node | 2.769445 | 2.877277 | 0.962523 |
cable_count = 1
if level == 'mv':
available_cables = network.equipment_data['mv_cables'][
network.equipment_data['mv_cables']['U_n'] ==
network.mv_grid.voltage_nom]
suitable_cables = available_cables[
available_cables['I_max_th'] *
network.mv_grid.voltage_nom > apparent_power]
# increase cable count until appropriate cable type is found
while suitable_cables.empty and cable_count < 20:
cable_count += 1
suitable_cables = available_cables[
available_cables['I_max_th'] *
network.mv_grid.voltage_nom *
cable_count > apparent_power]
if suitable_cables.empty and cable_count == 20:
raise exceptions.MaximumIterationError(
"Could not find a suitable cable for apparent power of "
"{} kVA.".format(apparent_power))
cable_type = suitable_cables.ix[suitable_cables['I_max_th'].idxmin()]
elif level == 'lv':
suitable_cables = network.equipment_data['lv_cables'][
network.equipment_data['lv_cables']['I_max_th'] *
network.equipment_data['lv_cables']['U_n'] > apparent_power]
# increase cable count until appropriate cable type is found
while suitable_cables.empty and cable_count < 20:
cable_count += 1
suitable_cables = network.equipment_data['lv_cables'][
network.equipment_data['lv_cables']['I_max_th'] *
network.equipment_data['lv_cables']['U_n'] *
cable_count > apparent_power]
if suitable_cables.empty and cable_count == 20:
raise exceptions.MaximumIterationError(
"Could not find a suitable cable for apparent power of "
"{} kVA.".format(apparent_power))
cable_type = suitable_cables.ix[suitable_cables['I_max_th'].idxmin()]
else:
raise ValueError('Please supply a level (either \'mv\' or \'lv\').')
return cable_type, cable_count | def select_cable(network, level, apparent_power) | Selects an appropriate cable type and quantity using given apparent
power.
Considers load factor.
Parameters
----------
network : :class:`~.grid.network.Network`
The eDisGo container object
level : :obj:`str`
Grid level ('mv' or 'lv')
apparent_power : :obj:`float`
Apparent power the cable must carry in kVA
Returns
-------
:pandas:`pandas.Series<series>`
Cable type
:obj:`ìnt`
Cable count
Notes
------
Cable is selected to be able to carry the given `apparent_power`, no load
factor is considered. | 1.979642 | 1.888042 | 1.048516 |
gens_w_id = []
if 'mv' in level:
gens = network.mv_grid.generators
gens_voltage_level = ['mv']*len(gens)
gens_type = [gen.type for gen in gens]
gens_rating = [gen.nominal_capacity for gen in gens]
for gen in gens:
try:
gens_w_id.append(gen.weather_cell_id)
except AttributeError:
gens_w_id.append(np.nan)
gens_grid = [network.mv_grid]*len(gens)
else:
gens = []
gens_voltage_level = []
gens_type = []
gens_rating = []
gens_grid = []
if 'lv' in level:
for lv_grid in network.mv_grid.lv_grids:
gens_lv = lv_grid.generators
gens.extend(gens_lv)
gens_voltage_level.extend(['lv']*len(gens_lv))
gens_type.extend([gen.type for gen in gens_lv])
gens_rating.extend([gen.nominal_capacity for gen in gens_lv])
for gen in gens_lv:
try:
gens_w_id.append(gen.weather_cell_id)
except AttributeError:
gens_w_id.append(np.nan)
gens_grid.extend([lv_grid] * len(gens_lv))
gen_df = pd.DataFrame({'gen_repr': list(map(lambda x: repr(x), gens)),
'generator': gens,
'type': gens_type,
'voltage_level': gens_voltage_level,
'nominal_capacity': gens_rating,
'weather_cell_id': gens_w_id,
'grid': gens_grid})
gen_df.set_index('generator', inplace=True, drop=True)
# filter fluctuating generators
if fluctuating:
gen_df = gen_df.loc[(gen_df.type == 'solar') | (gen_df.type == 'wind')]
return gen_df | def get_gen_info(network, level='mvlv', fluctuating=False) | Gets all the installed generators with some additional information.
Parameters
----------
network : :class:`~.grid.network.Network`
Network object holding the grid data.
level : :obj:`str`
Defines which generators are returned. Possible options are:
* 'mv'
Only generators connected to the MV grid are returned.
* 'lv'
Only generators connected to the LV grids are returned.
* 'mvlv'
All generators connected to the MV grid and LV grids are returned.
Default: 'mvlv'.
fluctuating : :obj:`bool`
If True only returns fluctuating generators. Default: False.
Returns
--------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe with all generators connected to the specified voltage
level. Index of the dataframe are the generator objects of type
:class:`~.grid.components.Generator`. Columns of the dataframe are:
* 'gen_repr'
The representative of the generator as :obj:`str`.
* 'type'
The generator type, e.g. 'solar' or 'wind' as :obj:`str`.
* 'voltage_level'
The voltage level the generator is connected to as :obj:`str`. Can
either be 'mv' or 'lv'.
* 'nominal_capacity'
The nominal capacity of the generator as as :obj:`float`.
* 'weather_cell_id'
The id of the weather cell the generator is located in as :obj:`int`
(only applies to fluctuating generators). | 1.983554 | 1.743461 | 1.137711 |
mv_station_neighbors = mv_grid.graph.neighbors(mv_grid.station)
# get all nodes in MV grid and remove MV station to get separate subgraphs
mv_graph_nodes = mv_grid.graph.nodes()
mv_graph_nodes.remove(mv_grid.station)
subgraph = mv_grid.graph.subgraph(mv_graph_nodes)
for neighbor in mv_station_neighbors:
# determine feeder
mv_feeder = mv_grid.graph.line_from_nodes(mv_grid.station, neighbor)
# get all nodes in that feeder by doing a DFS in the disconnected
# subgraph starting from the node adjacent to the MVStation `neighbor`
subgraph_neighbor = nx.dfs_tree(subgraph, source=neighbor)
for node in subgraph_neighbor.nodes():
# in case of an LV station assign feeder to all nodes in that LV
# grid
if isinstance(node, LVStation):
for lv_node in node.grid.graph.nodes():
lv_node.mv_feeder = mv_feeder
else:
node.mv_feeder = mv_feeder | def assign_mv_feeder_to_nodes(mv_grid) | Assigns an MV feeder to every generator, LV station, load, and branch tee
Parameters
-----------
mv_grid : :class:`~.grid.grids.MVGrid` | 4.116412 | 4.21915 | 0.97565 |
try:
# get nodes of line
nodes = line.grid.graph.nodes_from_line(line)
# get feeders
feeders = {}
for node in nodes:
# if one of the nodes is an MV station the line is an MV feeder
# itself
if isinstance(node, MVStation):
feeders[repr(node)] = None
else:
feeders[repr(node)] = node.mv_feeder
# return feeder that is not None
feeder_1 = feeders[repr(nodes[0])]
feeder_2 = feeders[repr(nodes[1])]
if not feeder_1 is None and not feeder_2 is None:
if feeder_1 == feeder_2:
return feeder_1
else:
logging.warning('Different feeders for line {}.'.format(line))
return None
else:
return feeder_1 if feeder_1 is not None else feeder_2
except Exception as e:
logging.warning('Failed to get MV feeder: {}.'.format(e))
return None | def get_mv_feeder_from_line(line) | Determines MV feeder the given line is in.
MV feeders are identified by the first line segment of the half-ring.
Parameters
----------
line : :class:`~.grid.components.Line`
Line to find the MV feeder for.
Returns
-------
:class:`~.grid.components.Line`
MV feeder identifier (representative of the first line segment
of the half-ring) | 2.908866 | 2.798865 | 1.039302 |
# does only remove from network.pypsa, not from network.pypsa_lopf
# remove from pypsa (buses, storage_units, storage_units_t, lines)
neighbor = storage.grid.graph.neighbors(storage)[0]
if network.pypsa is not None:
line = storage.grid.graph.line_from_nodes(storage, neighbor)
network.pypsa.storage_units = network.pypsa.storage_units.loc[
network.pypsa.storage_units.index.drop(
repr(storage)), :]
network.pypsa.storage_units_t.p_set.drop([repr(storage)], axis=1,
inplace=True)
network.pypsa.storage_units_t.q_set.drop([repr(storage)], axis=1,
inplace=True)
network.pypsa.buses = network.pypsa.buses.loc[
network.pypsa.buses.index.drop(
'_'.join(['Bus', repr(storage)])), :]
network.pypsa.lines = network.pypsa.lines.loc[
network.pypsa.lines.index.drop(
repr(line)), :]
# delete line
neighbor = storage.grid.graph.neighbors(storage)[0]
storage.grid.graph.remove_edge(storage, neighbor)
# delete storage
storage.grid.graph.remove_node(storage) | def disconnect_storage(network, storage) | Removes storage from network graph and pypsa representation.
Parameters
-----------
network : :class:`~.grid.network.Network`
storage : :class:`~.grid.components.Storage`
Storage instance to be removed. | 2.942007 | 2.686654 | 1.095045 |
if not self._weather_cells:
# get all the weather cell ids
self._weather_cells = []
for gen in self.generators:
if hasattr(gen, 'weather_cell_id'):
self._weather_cells.append(gen.weather_cell_id)
# drop duplicates
self._weather_cells = list(set(self._weather_cells))
# no need to check for Nones in the list because None in
# gen.weather_cell_id is kicked out by the if hasattr() before
return self._weather_cells | def weather_cells(self) | Weather cells contained in grid
Returns
-------
list
list of weather cell ids contained in grid | 3.990873 | 4.146036 | 0.962575 |
if self._peak_generation is None:
self._peak_generation = sum(
[gen.nominal_capacity
for gen in self.generators])
return self._peak_generation | def peak_generation(self) | Cumulative peak generation capacity of generators of this grid
Returns
-------
float
Ad-hoc calculated or cached peak generation capacity | 5.986543 | 4.449685 | 1.345386 |
peak_generation = defaultdict(float)
for gen in self.generators:
peak_generation[gen.type] += gen.nominal_capacity
return pd.Series(peak_generation) | def peak_generation_per_technology(self) | Peak generation of each technology in the grid
Returns
-------
:pandas:`pandas.Series<series>`
Peak generation index by technology | 4.760877 | 5.831803 | 0.816365 |
peak_generation = defaultdict(float)
for gen in self.generators:
if hasattr(gen, 'weather_cell_id'):
if (gen.type, gen.weather_cell_id) in peak_generation.keys():
peak_generation[gen.type, gen.weather_cell_id] += gen.nominal_capacity
else:
peak_generation[gen.type, gen.weather_cell_id] = gen.nominal_capacity
else:
message = 'No weather cell ID found for ' \
'generator {}.'.format(repr(gen))
raise KeyError(message)
series_index = pd.MultiIndex.from_tuples(list(peak_generation.keys()),
names=['type', 'weather_cell_id'])
return pd.Series(peak_generation, index=series_index) | def peak_generation_per_technology_and_weather_cell(self) | Peak generation of each technology and the
corresponding weather cell in the grid
Returns
-------
:pandas:`pandas.Series<series>`
Peak generation index by technology | 2.507535 | 2.576836 | 0.973106 |
if self._peak_load is None:
self._peak_load = sum(
[_.peak_load.sum()
for _ in self.graph.nodes_by_attribute('load')])
return self._peak_load | def peak_load(self) | Cumulative peak load capacity of generators of this grid
Returns
-------
float
Ad-hoc calculated or cached peak load capacity | 5.393308 | 6.115977 | 0.881839 |
consumption = defaultdict(float)
for load in self.graph.nodes_by_attribute('load'):
for sector, val in load.consumption.items():
consumption[sector] += val
return pd.Series(consumption) | def consumption(self) | Consumption in kWh per sector for whole grid
Returns
-------
:pandas:`pandas.Series<series>`
Indexed by demand sector | 6.205328 | 7.424374 | 0.835805 |
if not self._generators:
generators = list(self.graph.nodes_by_attribute('generator'))
generators.extend(list(self.graph.nodes_by_attribute(
'generator_aggr')))
return generators
else:
return self._generators | def generators(self) | Connected Generators within the grid
Returns
-------
list
List of Generator Objects | 4.281355 | 5.560457 | 0.769965 |
# get nodes' positions
nodes_pos = {}
for node in self.graph.nodes():
nodes_pos[node] = (node.geom.x, node.geom.y)
plt.figure()
nx.draw_networkx(self.graph, nodes_pos, node_size=16, font_size=8)
plt.show() | def draw(self) | Draw MV grid's graph using the geo data of nodes
Notes
-----
This method uses the coordinates stored in the nodes' geoms which
are usually conformal, not equidistant. Therefore, the plot might
be distorted and does not (fully) reflect the real positions or
distances between nodes. | 3.145452 | 2.797543 | 1.124362 |
return dict([(v, k) for k, v in
nx.get_edge_attributes(self, 'line').items()])[line] | def nodes_from_line(self, line) | Get nodes adjacent to line
Here, line refers to the object behind the key 'line' of the attribute
dict attached to each edge.
Parameters
----------
line: edisgo.grid.components.Line
A eDisGo line object
Returns
-------
tuple
Nodes adjacent to this edge | 8.334763 | 9.875373 | 0.843995 |
try:
line = nx.get_edge_attributes(self, 'line')[(u, v)]
except:
try:
line = nx.get_edge_attributes(self, 'line')[(v, u)]
except:
raise nx.NetworkXError('Line between ``u`` and ``v`` not '
'included in the graph.')
return line | def line_from_nodes(self, u, v) | Get line between two nodes ``u`` and ``v``.
Parameters
----------
u : :class:`~.grid.components.Component`
One adjacent node
v : :class:`~.grid.components.Component`
The other adjacent node
Returns
-------
Line
Line segment connecting ``u`` and ``v``. | 3.031286 | 2.97863 | 1.017678 |
temp_nodes = getattr(self, 'node')
nodes = list(filter(None, map(lambda x: x if temp_nodes[x][attr] == attr_val else None,
temp_nodes.keys())))
return nodes | def nodes_by_attribute(self, attr_val, attr='type') | Select Graph's nodes by attribute value
Get all nodes that share the same attribute. By default, the attr 'type'
is used to specify the nodes type (generator, load, etc.).
Examples
--------
>>> import edisgo
>>> G = edisgo.grids.Graph()
>>> G.add_node(1, type='generator')
>>> G.add_node(2, type='load')
>>> G.add_node(3, type='generator')
>>> G.nodes_by_attribute('generator')
[1, 3]
Parameters
----------
attr_val: str
Value of the `attr` nodes should be selected by
attr: str, default: 'type'
Attribute key which is 'type' by default
Returns
-------
list
A list containing nodes elements that match the given attribute
value | 4.65651 | 5.475824 | 0.850376 |
# get all lines that have the attribute 'type' set
lines_attributes = nx.get_edge_attributes(self, attr).items()
# attribute value provided?
if attr_val:
# extract lines where 'type' == attr_val
lines_attributes = [(k, self[k[0]][k[1]]['line'])
for k, v in lines_attributes if v == attr_val]
else:
# get all lines
lines_attributes = [(k, self[k[0]][k[1]]['line'])
for k, v in lines_attributes]
# sort them according to connected nodes
lines_sorted = sorted(list(lines_attributes), key=lambda _: repr(_[1]))
for line in lines_sorted:
yield {'adj_nodes': line[0], 'line': line[1]} | def lines_by_attribute(self, attr_val=None, attr='type') | Returns a generator for iterating over Graph's lines by attribute value.
Get all lines that share the same attribute. By default, the attr 'type'
is used to specify the lines' type (line, agg_line, etc.).
The edge of a graph is described by the two adjacent nodes and the line
object itself. Whereas the line object is used to hold all relevant
power system parameters.
Examples
--------
>>> import edisgo
>>> G = edisgo.grids.Graph()
>>> G.add_node(1, type='generator')
>>> G.add_node(2, type='load')
>>> G.add_edge(1, 2, type='line')
>>> lines = G.lines_by_attribute('line')
>>> list(lines)[0]
<class 'tuple'>: ((node1, node2), line)
Parameters
----------
attr_val: str
Value of the `attr` lines should be selected by
attr: str, default: 'type'
Attribute key which is 'type' by default
Returns
-------
Generator of :obj:`dict`
A list containing line elements that match the given attribute
value
Notes
-----
There are generator functions for nodes (`Graph.nodes()`) and edges
(`Graph.edges()`) in NetworkX but unlike graph nodes, which can be
represented by objects, branch objects can only be accessed by using an
edge attribute ('line' is used here)
To make access to attributes of the line objects simpler and more
intuitive for the user, this generator yields a dictionary for each edge
that contains information about adjacent nodes and the line object.
Note, the construction of the dictionary highly depends on the structure
of the in-going tuple (which is defined by the needs of networkX). If
this changes, the code will break.
Adapted from `Dingo <https://github.com/openego/dingo/blob/\
ee237e37d4c228081e1e246d7e6d0d431c6dda9e/dingo/core/network/\
__init__.py>`_. | 3.729326 | 3.207931 | 1.162533 |
rpc_method_handler = self._get_rpc_handler(handler_call_details)
if rpc_method_handler.response_streaming:
if self._wrapped.is_streaming:
# `self._wrapped` is a `StreamServerInterceptor`
return self._wrapped.intercept_service(
continuation, handler_call_details)
else:
if not self._wrapped.is_streaming:
# `self._wrapped` is a `UnaryServerInterceptor`
return self._wrapped.intercept_service(
continuation, handler_call_details)
# skip the interceptor due to type mismatch
return continuation(handler_call_details) | def intercept_service(self, continuation, handler_call_details) | Intercepts incoming RPCs before handing them over to a handler
See `grpc.ServerInterceptor.intercept_service`. | 2.924176 | 2.864063 | 1.020989 |
combined = {
c: pd.concat([mv[c], lv[c]], axis=0) for c in list(lv.keys())
}
combined['Transformer'] = mv['Transformer']
return combined | def combine_mv_and_lv(mv, lv) | Combine MV and LV grid topology in PyPSA format | 6.107374 | 5.470185 | 1.116484 |
generators = {}
loads = {}
# collect aggregated generation capacity by type and subtype
# collect aggregated load grouped by sector
for lv_grid in network.mv_grid.lv_grids:
generators.setdefault(lv_grid, {})
for gen in lv_grid.generators:
generators[lv_grid].setdefault(gen.type, {})
generators[lv_grid][gen.type].setdefault(gen.subtype, {})
generators[lv_grid][gen.type][gen.subtype].setdefault(
'capacity', 0)
generators[lv_grid][gen.type][gen.subtype][
'capacity'] += gen.nominal_capacity
generators[lv_grid][gen.type][gen.subtype].setdefault(
'name',
'_'.join([gen.type,
gen.subtype,
'aggregated',
'LV_grid',
str(lv_grid.id)]))
loads.setdefault(lv_grid, {})
for lo in lv_grid.graph.nodes_by_attribute('load'):
for sector, val in lo.consumption.items():
loads[lv_grid].setdefault(sector, 0)
loads[lv_grid][sector] += val
# define dict for DataFrame creation of aggr. generation and load
generator = {'name': [],
'bus': [],
'control': [],
'p_nom': [],
'type': []}
load = {'name': [], 'bus': []}
# fill generators dictionary for DataFrame creation
for lv_grid_obj, lv_grid in generators.items():
for _, gen_type in lv_grid.items():
for _, gen_subtype in gen_type.items():
generator['name'].append(gen_subtype['name'])
generator['bus'].append(
'_'.join(['Bus', lv_grid_obj.station.__repr__('lv')]))
generator['control'].append('PQ')
generator['p_nom'].append(gen_subtype['capacity'])
generator['type'].append("")
# fill loads dictionary for DataFrame creation
for lv_grid_obj, lv_grid in loads.items():
for sector, val in lv_grid.items():
load['name'].append('_'.join(['Load', sector, repr(lv_grid_obj)]))
load['bus'].append(
'_'.join(['Bus', lv_grid_obj.station.__repr__('lv')]))
components['Generator'] = pd.concat(
[components['Generator'], pd.DataFrame(generator).set_index('name')])
components['Load'] = pd.concat(
[components['Load'], pd.DataFrame(load).set_index('name')])
return components | def add_aggregated_lv_components(network, components) | Aggregates LV load and generation at LV stations
Use this function if you aim for MV calculation only. The according
DataFrames of `components` are extended by load and generators representing
these aggregated respecting the technology type.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
components : dict of :pandas:`pandas.DataFrame<dataframe>`
PyPSA components in tabular format
Returns
-------
:obj:`dict` of :pandas:`pandas.DataFrame<dataframe>`
The dictionary components passed to the function is returned altered. | 2.738718 | 2.684883 | 1.020051 |
mv_load_timeseries_p = []
mv_load_timeseries_q = []
lv_load_timeseries_p = []
lv_load_timeseries_q = []
# add MV grid loads
if mode is 'mv' or mode is None:
for load in network.mv_grid.graph.nodes_by_attribute('load'):
mv_load_timeseries_q.append(load.pypsa_timeseries('q').rename(
repr(load)).to_frame().loc[timesteps])
mv_load_timeseries_p.append(load.pypsa_timeseries('p').rename(
repr(load)).to_frame().loc[timesteps])
if mode is 'mv':
lv_load_timeseries_p, lv_load_timeseries_q = \
_pypsa_load_timeseries_aggregated_at_lv_station(
network, timesteps)
# add LV grid's loads
if mode is 'lv' or mode is None:
for lv_grid in network.mv_grid.lv_grids:
for load in lv_grid.graph.nodes_by_attribute('load'):
lv_load_timeseries_q.append(load.pypsa_timeseries('q').rename(
repr(load)).to_frame().loc[timesteps])
lv_load_timeseries_p.append(load.pypsa_timeseries('p').rename(
repr(load)).to_frame().loc[timesteps])
load_df_p = pd.concat(mv_load_timeseries_p + lv_load_timeseries_p, axis=1)
load_df_q = pd.concat(mv_load_timeseries_q + lv_load_timeseries_q, axis=1)
return load_df_p, load_df_q | def _pypsa_load_timeseries(network, timesteps, mode=None) | Time series in PyPSA compatible format for load instances
Parameters
----------
network : Network
The eDisGo grid topology model overall container
timesteps : array_like
Timesteps is an array-like object with entries of type
:pandas:`pandas.Timestamp<timestamp>` specifying which time steps
to export to pypsa representation and use in power flow analysis.
mode : str, optional
Specifically retrieve load time series for MV or LV grid level or both.
Either choose 'mv' or 'lv'.
Defaults to None, which returns both timeseries for MV and LV in a
single DataFrame.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Time series table in PyPSA format | 2.142813 | 2.121603 | 1.009997 |
mv_gen_timeseries_q = []
mv_gen_timeseries_p = []
lv_gen_timeseries_q = []
lv_gen_timeseries_p = []
# MV generator timeseries
if mode is 'mv' or mode is None:
for gen in network.mv_grid.generators:
mv_gen_timeseries_q.append(gen.pypsa_timeseries('q').rename(
repr(gen)).to_frame().loc[timesteps])
mv_gen_timeseries_p.append(gen.pypsa_timeseries('p').rename(
repr(gen)).to_frame().loc[timesteps])
if mode is 'mv':
lv_gen_timeseries_p, lv_gen_timeseries_q = \
_pypsa_generator_timeseries_aggregated_at_lv_station(
network, timesteps)
# LV generator timeseries
if mode is 'lv' or mode is None:
for lv_grid in network.mv_grid.lv_grids:
for gen in lv_grid.generators:
lv_gen_timeseries_q.append(gen.pypsa_timeseries('q').rename(
repr(gen)).to_frame().loc[timesteps])
lv_gen_timeseries_p.append(gen.pypsa_timeseries('p').rename(
repr(gen)).to_frame().loc[timesteps])
gen_df_p = pd.concat(mv_gen_timeseries_p + lv_gen_timeseries_p, axis=1)
gen_df_q = pd.concat(mv_gen_timeseries_q + lv_gen_timeseries_q, axis=1)
return gen_df_p, gen_df_q | def _pypsa_generator_timeseries(network, timesteps, mode=None) | Timeseries in PyPSA compatible format for generator instances
Parameters
----------
network : Network
The eDisGo grid topology model overall container
timesteps : array_like
Timesteps is an array-like object with entries of type
:pandas:`pandas.Timestamp<timestamp>` specifying which time steps
to export to pypsa representation and use in power flow analysis.
mode : str, optional
Specifically retrieve generator time series for MV or LV grid level or
both. Either choose 'mv' or 'lv'.
Defaults to None, which returns both timeseries for MV and LV in a
single DataFrame.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Time series table in PyPSA format | 1.961675 | 1.928968 | 1.016955 |
mv_storage_timeseries_q = []
mv_storage_timeseries_p = []
lv_storage_timeseries_q = []
lv_storage_timeseries_p = []
# MV storage time series
if mode is 'mv' or mode is None:
for storage in network.mv_grid.graph.nodes_by_attribute('storage'):
mv_storage_timeseries_q.append(
storage.pypsa_timeseries('q').rename(
repr(storage)).to_frame().loc[timesteps])
mv_storage_timeseries_p.append(
storage.pypsa_timeseries('p').rename(
repr(storage)).to_frame().loc[timesteps])
# LV storage time series
if mode is 'lv' or mode is None:
for lv_grid in network.mv_grid.lv_grids:
for storage in lv_grid.graph.nodes_by_attribute('storage'):
lv_storage_timeseries_q.append(
storage.pypsa_timeseries('q').rename(
repr(storage)).to_frame().loc[timesteps])
lv_storage_timeseries_p.append(
storage.pypsa_timeseries('p').rename(
repr(storage)).to_frame().loc[timesteps])
storage_df_p = pd.concat(
mv_storage_timeseries_p + lv_storage_timeseries_p, axis=1)
storage_df_q = pd.concat(
mv_storage_timeseries_q + lv_storage_timeseries_q, axis=1)
return storage_df_p, storage_df_q | def _pypsa_storage_timeseries(network, timesteps, mode=None) | Timeseries in PyPSA compatible format for storage instances
Parameters
----------
network : Network
The eDisGo grid topology model overall container
timesteps : array_like
Timesteps is an array-like object with entries of type
:pandas:`pandas.Timestamp<timestamp>` specifying which time steps
to export to pypsa representation and use in power flow analysis.
mode : str, optional
Specifically retrieve generator time series for MV or LV grid level or
both. Either choose 'mv' or 'lv'.
Defaults to None, which returns both timeseries for MV and LV in a
single DataFrame.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Time series table in PyPSA format | 1.942324 | 1.884464 | 1.030703 |
# get slack bus label
slack_bus = '_'.join(
['Bus', network.mv_grid.station.__repr__(side='mv')])
# set all buses (except slack bus) to nominal voltage
v_set_dict = {bus: 1 for bus in buses if bus != slack_bus}
# Set slack bus to operational voltage (includes offset and control
# deviation
control_deviation = network.config[
'grid_expansion_allowed_voltage_deviations'][
'hv_mv_trafo_control_deviation']
if control_deviation != 0:
control_deviation_ts = \
network.timeseries.timesteps_load_feedin_case.case.apply(
lambda _: control_deviation if _ == 'feedin_case'
else -control_deviation)
else:
control_deviation_ts = 0
slack_voltage_pu = control_deviation_ts + 1 + \
network.config[
'grid_expansion_allowed_voltage_deviations'][
'hv_mv_trafo_offset']
v_set_dict.update({slack_bus: slack_voltage_pu})
# Convert to PyPSA compatible dataframe
v_set_df = pd.DataFrame(v_set_dict, index=timesteps)
return v_set_df | def _pypsa_bus_timeseries(network, buses, timesteps) | Time series in PyPSA compatible format for bus instances
Set all buses except for the slack bus to voltage of 1 pu (it is assumed
this setting is entirely ignored during solving the power flow problem).
This slack bus is set to an operational voltage which is typically greater
than nominal voltage plus a control deviation.
The control deviation is always added positively to the operational voltage.
For example, the operational voltage (offset) is set to 1.025 pu plus the
control deviation of 0.015 pu. This adds up to a set voltage of the slack
bus of 1.04 pu.
.. warning::
Voltage settings for the slack bus defined by this function assume the
feedin case (reverse power flow case) as the worst-case for the power
system. Thus, the set point for the slack is always greater 1.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
timesteps : array_like
Timesteps is an array-like object with entries of type
:pandas:`pandas.Timestamp<timestamp>` specifying which time steps
to export to pypsa representation and use in power flow analysis.
buses : list
Buses names
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Time series table in PyPSA format | 5.801767 | 4.786799 | 1.212035 |
generation_p = []
generation_q = []
for lv_grid in network.mv_grid.lv_grids:
# Determine aggregated generation at LV stations
generation = {}
for gen in lv_grid.generators:
# for type in gen.type:
# for subtype in gen.subtype:
gen_name = '_'.join([gen.type,
gen.subtype,
'aggregated',
'LV_grid',
str(lv_grid.id)])
generation.setdefault(gen.type, {})
generation[gen.type].setdefault(gen.subtype, {})
generation[gen.type][gen.subtype].setdefault('timeseries_p', [])
generation[gen.type][gen.subtype].setdefault('timeseries_q', [])
generation[gen.type][gen.subtype]['timeseries_p'].append(
gen.pypsa_timeseries('p').rename(gen_name).to_frame().loc[
timesteps])
generation[gen.type][gen.subtype]['timeseries_q'].append(
gen.pypsa_timeseries('q').rename(gen_name).to_frame().loc[
timesteps])
for k_type, v_type in generation.items():
for k_type, v_subtype in v_type.items():
col_name = v_subtype['timeseries_p'][0].columns[0]
generation_p.append(
pd.concat(v_subtype['timeseries_p'],
axis=1).sum(axis=1).rename(col_name).to_frame())
generation_q.append(
pd.concat(v_subtype['timeseries_q'], axis=1).sum(
axis=1).rename(col_name).to_frame())
return generation_p, generation_q | def _pypsa_generator_timeseries_aggregated_at_lv_station(network, timesteps) | Aggregates generator time series per generator subtype and LV grid.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
timesteps : array_like
Timesteps is an array-like object with entries of type
:pandas:`pandas.Timestamp<timestamp>` specifying which time steps
to export to pypsa representation and use in power flow analysis.
Returns
-------
tuple of :pandas:`pandas.DataFrame<dataframe>`
Tuple of size two containing DataFrames that represent
1. 'p_set' of aggregated Generation per subtype at each LV station
2. 'q_set' of aggregated Generation per subtype at each LV station | 2.285106 | 2.166692 | 1.054652 |
# ToDo: Load.pypsa_timeseries is not differentiated by sector so this
# function will not work (either change here and in
# add_aggregated_lv_components or in Load class)
load_p = []
load_q = []
for lv_grid in network.mv_grid.lv_grids:
# Determine aggregated load at LV stations
load = {}
for lo in lv_grid.graph.nodes_by_attribute('load'):
for sector, val in lo.consumption.items():
load.setdefault(sector, {})
load[sector].setdefault('timeseries_p', [])
load[sector].setdefault('timeseries_q', [])
load[sector]['timeseries_p'].append(
lo.pypsa_timeseries('p').rename(repr(lo)).to_frame().loc[
timesteps])
load[sector]['timeseries_q'].append(
lo.pypsa_timeseries('q').rename(repr(lo)).to_frame().loc[
timesteps])
for sector, val in load.items():
load_p.append(
pd.concat(val['timeseries_p'], axis=1).sum(axis=1).rename(
'_'.join(['Load', sector, repr(lv_grid)])).to_frame())
load_q.append(
pd.concat(val['timeseries_q'], axis=1).sum(axis=1).rename(
'_'.join(['Load', sector, repr(lv_grid)])).to_frame())
return load_p, load_q | def _pypsa_load_timeseries_aggregated_at_lv_station(network, timesteps) | Aggregates load time series per sector and LV grid.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
timesteps : array_like
Timesteps is an array-like object with entries of type
:pandas:`pandas.Timestamp<timestamp>` specifying which time steps
to export to pypsa representation and use in power flow analysis.
Returns
-------
tuple of :pandas:`pandas.DataFrame<dataframe>`
Tuple of size two containing DataFrames that represent
1. 'p_set' of aggregated Load per sector at each LV station
2. 'q_set' of aggregated Load per sector at each LV station | 3.636643 | 3.498119 | 1.039599 |
update_pypsa_load_timeseries(
network, loads_to_update=loads_to_update, timesteps=timesteps)
update_pypsa_generator_timeseries(
network, generators_to_update=generators_to_update,
timesteps=timesteps)
update_pypsa_storage_timeseries(
network, storages_to_update=storages_to_update, timesteps=timesteps)
update_pypsa_bus_timeseries(network, timesteps=timesteps)
# update pypsa snapshots
if timesteps is None:
timesteps = network.pypsa.buses_t.v_mag_pu_set.index
network.pypsa.set_snapshots(timesteps) | def update_pypsa_timeseries(network, loads_to_update=None,
generators_to_update=None, storages_to_update=None,
timesteps=None) | Updates load, generator, storage and bus time series in pypsa network.
See functions :func:`update_pypsa_load_timeseries`,
:func:`update_pypsa_generator_timeseries`,
:func:`update_pypsa_storage_timeseries`, and
:func:`update_pypsa_bus_timeseries` for more information.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
loads_to_update : :obj:`list`, optional
List with all loads (of type :class:`~.grid.components.Load`) that need
to be updated. If None all loads are updated depending on mode. See
:meth:`~.tools.pypsa_io.to_pypsa` for more information.
generators_to_update : :obj:`list`, optional
List with all generators (of type :class:`~.grid.components.Generator`)
that need to be updated. If None all generators are updated depending
on mode. See :meth:`~.tools.pypsa_io.to_pypsa` for more information.
storages_to_update : :obj:`list`, optional
List with all storages (of type :class:`~.grid.components.Storage`)
that need to be updated. If None all storages are updated depending on
mode. See :meth:`~.tools.pypsa_io.to_pypsa` for more information.
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`
Timesteps specifies which time steps of the load time series to export
to pypsa representation and use in power flow analysis.
If None all time steps currently existing in pypsa representation are
updated. If not None current time steps are overwritten by given
time steps. Default: None. | 2.005987 | 2.265786 | 0.885338 |
_update_pypsa_timeseries_by_type(
network, type='load', components_to_update=loads_to_update,
timesteps=timesteps) | def update_pypsa_load_timeseries(network, loads_to_update=None,
timesteps=None) | Updates load time series in pypsa representation.
This function overwrites p_set and q_set of loads_t attribute of
pypsa network.
Be aware that if you call this function with `timesteps` and thus overwrite
current time steps it may lead to inconsistencies in the pypsa network
since only load time series are updated but none of the other time series
or the snapshots attribute of the pypsa network. Use the function
:func:`update_pypsa_timeseries` to change the time steps you want to
analyse in the power flow analysis.
This function will also raise an error when a load that is currently not
in the pypsa representation is added.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
loads_to_update : :obj:`list`, optional
List with all loads (of type :class:`~.grid.components.Load`) that need
to be updated. If None all loads are updated depending on mode. See
:meth:`~.tools.pypsa_io.to_pypsa` for more information.
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`
Timesteps specifies which time steps of the load time series to export
to pypsa representation. If None all time steps currently existing in
pypsa representation are updated. If not None current time steps are
overwritten by given time steps. Default: None. | 3.860733 | 5.94468 | 0.649443 |
_update_pypsa_timeseries_by_type(
network, type='generator', components_to_update=generators_to_update,
timesteps=timesteps) | def update_pypsa_generator_timeseries(network, generators_to_update=None,
timesteps=None) | Updates generator time series in pypsa representation.
This function overwrites p_set and q_set of generators_t attribute of
pypsa network.
Be aware that if you call this function with `timesteps` and thus overwrite
current time steps it may lead to inconsistencies in the pypsa network
since only generator time series are updated but none of the other time
series or the snapshots attribute of the pypsa network. Use the function
:func:`update_pypsa_timeseries` to change the time steps you want to
analyse in the power flow analysis.
This function will also raise an error when a generator that is currently
not in the pypsa representation is added.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
generators_to_update : :obj:`list`, optional
List with all generators (of type :class:`~.grid.components.Generator`)
that need to be updated. If None all generators are updated depending
on mode. See :meth:`~.tools.pypsa_io.to_pypsa` for more information.
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`
Timesteps specifies which time steps of the generator time series to
export to pypsa representation. If None all time steps currently
existing in pypsa representation are updated. If not None current time
steps are overwritten by given time steps. Default: None. | 3.864973 | 5.794806 | 0.666972 |
_update_pypsa_timeseries_by_type(
network, type='storage', components_to_update=storages_to_update,
timesteps=timesteps) | def update_pypsa_storage_timeseries(network, storages_to_update=None,
timesteps=None) | Updates storage time series in pypsa representation.
This function overwrites p_set and q_set of storage_unit_t attribute of
pypsa network.
Be aware that if you call this function with `timesteps` and thus overwrite
current time steps it may lead to inconsistencies in the pypsa network
since only storage time series are updated but none of the other time
series or the snapshots attribute of the pypsa network. Use the function
:func:`update_pypsa_timeseries` to change the time steps you want to
analyse in the power flow analysis.
This function will also raise an error when a storage that is currently
not in the pypsa representation is added.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
storages_to_update : :obj:`list`, optional
List with all storages (of type :class:`~.grid.components.Storage`)
that need to be updated. If None all storages are updated depending on
mode. See :meth:`~.tools.pypsa_io.to_pypsa` for more information.
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`
Timesteps specifies which time steps of the storage time series to
export to pypsa representation. If None all time steps currently
existing in pypsa representation are updated. If not None current time
steps are overwritten by given time steps. Default: None. | 3.907329 | 5.562675 | 0.702419 |
if timesteps is None:
timesteps = network.pypsa.buses_t.v_mag_pu_set.index
# check if timesteps is array-like, otherwise convert to list
if not hasattr(timesteps, "__len__"):
timesteps = [timesteps]
buses = network.pypsa.buses.index
v_mag_pu_set = _pypsa_bus_timeseries(network, buses, timesteps)
network.pypsa.buses_t.v_mag_pu_set = v_mag_pu_set | def update_pypsa_bus_timeseries(network, timesteps=None) | Updates buses voltage time series in pypsa representation.
This function overwrites v_mag_pu_set of buses_t attribute of
pypsa network.
Be aware that if you call this function with `timesteps` and thus overwrite
current time steps it may lead to inconsistencies in the pypsa network
since only bus time series are updated but none of the other time
series or the snapshots attribute of the pypsa network. Use the function
:func:`update_pypsa_timeseries` to change the time steps you want to
analyse in the power flow analysis.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`
Timesteps specifies which time steps of the time series to
export to pypsa representation. If None all time steps currently
existing in pypsa representation are updated. If not None current
time steps are overwritten by given time steps. Default: None. | 2.809155 | 2.648819 | 1.060531 |
# pypsa dataframe to update
if type == 'load':
pypsa_ts = network.pypsa.loads_t
components_in_pypsa = network.pypsa.loads.index
elif type == 'generator':
pypsa_ts = network.pypsa.generators_t
components_in_pypsa = network.pypsa.generators.index
elif type == 'storage':
pypsa_ts = network.pypsa.storage_units_t
components_in_pypsa = network.pypsa.storage_units.index
else:
raise ValueError('{} is not a valid type.'.format(type))
# MV and LV loads
if network.pypsa.edisgo_mode is None:
# if no components are specified get all components of specified type
# in whole grid
if components_to_update is None:
grids = [network.mv_grid] + list(network.mv_grid.lv_grids)
if type == 'generator':
components_to_update = list(itertools.chain(
*[grid.generators for grid in grids]))
else:
components_to_update = list(itertools.chain(
*[grid.graph.nodes_by_attribute(type) for grid in grids]))
# if no time steps are specified update all time steps currently
# contained in pypsa representation
if timesteps is None:
timesteps = pypsa_ts.p_set.index
# check if timesteps is array-like, otherwise convert to list
# (necessary to avoid getting a scalar using .loc)
if not hasattr(timesteps, "__len__"):
timesteps = [timesteps]
p_set = pd.DataFrame()
q_set = pd.DataFrame()
for comp in components_to_update:
if repr(comp) in components_in_pypsa:
p_set[repr(comp)] = comp.pypsa_timeseries('p').loc[timesteps]
q_set[repr(comp)] = comp.pypsa_timeseries('q').loc[timesteps]
else:
raise KeyError("Tried to update component {} but could not "
"find it in pypsa network.".format(comp))
# overwrite pypsa time series
pypsa_ts.p_set = p_set
pypsa_ts.q_set = q_set
# MV and aggregated LV loads
elif network.pypsa.edisgo_mode is 'mv':
raise NotImplementedError
# LV only
elif network.pypsa.edisgo_mode is 'lv':
raise NotImplementedError | def _update_pypsa_timeseries_by_type(network, type, components_to_update=None,
timesteps=None) | Updates time series of specified component in pypsa representation.
Be aware that if you call this function with `timesteps` and thus overwrite
current time steps it may lead to inconsistencies in the pypsa network
since only time series of the specified component are updated but none of
the other time series or the snapshots attribute of the pypsa network.
Use the function :func:`update_pypsa_timeseries` to change the time steps
you want to analyse in the power flow analysis.
This function will raise an error when a component that is currently not in
the pypsa representation is added.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
type : :obj:`str`
Type specifies the type of component (load, generator or storage)
that is updated.
components_to_update : :obj:`list`, optional
List with all components (either of type
:class:`~.grid.components.Load`, :class:`~.grid.components.Generator`
or :class:`~.grid.components.Storage`) that need to be updated.
Possible options are 'load', 'generator' and 'storage'.
Components in list must all be of the same type. If None all components
specified by `type` are updated depending on the mode. See
:meth:`~.tools.pypsa_io.to_pypsa` for more information on mode.
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`
Timesteps specifies which time steps of the time series to
export to pypsa representation. If None all time steps currently
existing in pypsa representation are updated. If not None current
time steps are overwritten by given time steps. Default: None. | 3.085883 | 2.846752 | 1.084001 |
# determine generators cumulative apparent power output
generators = network.mv_grid.generators + \
[generators for lv_grid in
network.mv_grid.lv_grids for generators in
lv_grid.generators]
generators_p = pd.concat([_.timeseries['p'] for _ in generators],
axis=1).sum(axis=1).rename('p')
generators_q = pd.concat([_.timeseries['q'] for _ in generators],
axis=1).sum(axis=1).rename('q')
generation = pd.concat([generators_p, generators_q], axis=1)
generation['s'] = generation.apply(
lambda x: sqrt(x['p'] ** 2 + x['q'] ** 2), axis=1)
generators_nom_capacity = sum([_.nominal_capacity for _ in generators])
feedin_bool = generation['s'] > (feedin_threshold *
generators_nom_capacity)
feedin = feedin_bool.apply(
lambda x: storage.nominal_power if x
else -storage.nominal_power).rename('p').to_frame()
storage.timeseries = feedin | def fifty_fifty(network, storage, feedin_threshold=0.5) | Operational mode where the storage operation depends on actual power by
generators. If cumulative generation exceeds 50% of nominal power, the
storage is charged. Otherwise, the storage is discharged.
The time series for active power is written into the storage.
Parameters
-----------
network : :class:`~.grid.network.Network`
storage : :class:`~.grid.components.Storage`
Storage instance for which to generate time series.
feedin_threshold : :obj:`float`
Ratio of generation to installed power specifying when to charge or
discharge the storage. If feed-in threshold is e.g. 0.5 the storage
will be charged when the total generation is 50% of the installed
generator capacity and discharged when it is below. | 3.78299 | 3.531799 | 1.071123 |
# get params from config
buffer_radius = int(network.config[
'grid_connection']['conn_buffer_radius'])
buffer_radius_inc = int(network.config[
'grid_connection']['conn_buffer_radius_inc'])
# get standard equipment
std_line_type = network.equipment_data['mv_cables'].loc[
network.config['grid_expansion_standard_equipment']['mv_line']]
for geno in sorted(network.mv_grid.graph.nodes_by_attribute('generator'),
key=lambda _: repr(_)):
if nx.is_isolate(network.mv_grid.graph, geno):
# ===== voltage level 4: generator has to be connected to MV station =====
if geno.v_level == 4:
line_length = calc_geo_dist_vincenty(network=network,
node_source=geno,
node_target=network.mv_grid.station)
line = Line(id=random.randint(10**8, 10**9),
type=std_line_type,
kind='cable',
quantity=1,
length=line_length / 1e3,
grid=network.mv_grid)
network.mv_grid.graph.add_edge(network.mv_grid.station,
geno,
line=line,
type='line')
# add line to equipment changes to track costs
_add_cable_to_equipment_changes(network=network,
line=line)
# ===== voltage level 5: generator has to be connected to MV grid (next-neighbor) =====
elif geno.v_level == 5:
# get branches within a the predefined radius `generator_buffer_radius`
branches = calc_geo_lines_in_buffer(network=network,
node=geno,
grid=network.mv_grid,
radius=buffer_radius,
radius_inc=buffer_radius_inc)
# calc distance between generator and grid's lines -> find nearest line
conn_objects_min_stack = _find_nearest_conn_objects(network=network,
node=geno,
branches=branches)
# connect!
# go through the stack (from nearest to most far connection target object)
generator_connected = False
for dist_min_obj in conn_objects_min_stack:
target_obj_result = _connect_mv_node(network=network,
node=geno,
target_obj=dist_min_obj)
if target_obj_result is not None:
generator_connected = True
break
if not generator_connected:
logger.debug(
'Generator {0} could not be connected, try to '
'increase the parameter `conn_buffer_radius` in '
'config file `config_grid.cfg` to gain more possible '
'connection points.'.format(geno)) | def connect_mv_generators(network) | Connect MV generators to existing grids.
This function searches for unconnected generators in MV grids and connects them.
It connects
* generators of voltage level 4
* to HV-MV station
* generators of voltage level 5
* with a nom. capacity of <=30 kW to LV loads of type residential
* with a nom. capacity of >30 kW and <=100 kW to LV loads of type
retail, industrial or agricultural
* to the MV-LV station if no appropriate load is available (fallback)
Parameters
----------
network : :class:`~.grid.network.Network`
The eDisGo container object
Notes
-----
Adapted from `Ding0 <https://github.com/openego/ding0/blob/\
21a52048f84ec341fe54e0204ac62228a9e8a32a/\
ding0/grid/mv_grid/mv_connect.py#L820>`_. | 5.455114 | 5.227942 | 1.043453 |
network.results.equipment_changes = \
network.results.equipment_changes.append(
pd.DataFrame(
{'iteration_step': [0],
'change': ['added'],
'equipment': [line.type.name],
'quantity': [1]
},
index=[line]
)
) | def _add_cable_to_equipment_changes(network, line) | Add cable to the equipment changes
All changes of equipment are stored in network.results.equipment_changes
which is used later to determine grid expansion costs.
Parameters
----------
network : :class:`~.grid.network.Network`
The eDisGo container object
line : class:`~.grid.components.Line`
Line instance which is to be added | 5.286845 | 5.398735 | 0.979275 |
if line in network.results.equipment_changes.index:
network.results.equipment_changes = \
network.results.equipment_changes.drop(line) | def _del_cable_from_equipment_changes(network, line) | Delete cable from the equipment changes if existing
This is needed if a cable was already added to network.results.equipment_changes
but another node is connected later to this cable. Therefore, the cable needs to
be split which changes the id (one cable id -> 2 new cable ids).
Parameters
----------
network : :class:`~.grid.network.Network`
The eDisGo container object
line : class:`~.grid.components.Line`
Line instance which is to be deleted | 3.497159 | 3.503106 | 0.998302 |
# threshold which is used to determine if 2 objects are on the same position (see below for details on usage)
conn_diff_tolerance = network.config['grid_connection'][
'conn_diff_tolerance']
conn_objects_min_stack = []
node_shp = transform(proj2equidistant(network), node.geom)
for branch in branches:
stations = branch['adj_nodes']
# create shapely objects for 2 stations and line between them, transform to equidistant CRS
station1_shp = transform(proj2equidistant(network), stations[0].geom)
station2_shp = transform(proj2equidistant(network), stations[1].geom)
line_shp = LineString([station1_shp, station2_shp])
# create dict with DING0 objects (line & 2 adjacent stations), shapely objects and distances
conn_objects = {'s1': {'obj': stations[0],
'shp': station1_shp,
'dist': node_shp.distance(station1_shp) * 0.999},
's2': {'obj': stations[1],
'shp': station2_shp,
'dist': node_shp.distance(station2_shp) * 0.999},
'b': {'obj': branch,
'shp': line_shp,
'dist': node_shp.distance(line_shp)}}
# Remove branch from the dict of possible conn. objects if it is too close to a node.
# Without this solution, the target object is not unique for different runs (and so
# were the topology)
if (
abs(conn_objects['s1']['dist'] - conn_objects['b']['dist']) < conn_diff_tolerance
or abs(conn_objects['s2']['dist'] - conn_objects['b']['dist']) < conn_diff_tolerance
):
del conn_objects['b']
# remove MV station as possible connection point
if isinstance(conn_objects['s1']['obj'], MVStation):
del conn_objects['s1']
elif isinstance(conn_objects['s2']['obj'], MVStation):
del conn_objects['s2']
# find nearest connection point on given triple dict (2 branch-adjacent stations + cable dist. on line)
conn_objects_min = min(conn_objects.values(), key=lambda v: v['dist'])
conn_objects_min_stack.append(conn_objects_min)
# sort all objects by distance from node
conn_objects_min_stack = [_ for _ in sorted(conn_objects_min_stack, key=lambda x: x['dist'])]
return conn_objects_min_stack | def _find_nearest_conn_objects(network, node, branches) | Searches all branches for the nearest possible connection object per branch
It picks out 1 object out of 3 possible objects: 2 branch-adjacent stations
and 1 potentially created branch tee on the line (using perpendicular projection).
The resulting stack (list) is sorted ascending by distance from node.
Parameters
----------
network : :class:`~.grid.network.Network`
The eDisGo container object
node : :class:`~.grid.components.Component`
Node to connect (e.g. :class:`~.grid.components.Generator`)
branches :
List of branches (NetworkX branch objects)
Returns
-------
:obj:`list` of :obj:`dict`
List of connection objects (each object is represented by dict with eDisGo object,
shapely object and distance to node.
Notes
-----
Adapted from `Ding0 <https://github.com/openego/ding0/blob/\
21a52048f84ec341fe54e0204ac62228a9e8a32a/\
ding0/grid/mv_grid/mv_connect.py#L38>`_. | 3.93776 | 3.505663 | 1.123257 |
grid_district = os.path.basename(ding0_filepath)
grid_district_search = re.search('[_]+\d+', grid_district)
if grid_district_search:
grid_district = int(grid_district_search.group(0)[2:])
return grid_district
else:
raise (KeyError('Grid District not found in '.format(grid_district))) | def _get_griddistrict(ding0_filepath) | Just get the grid district number from ding0 data file path
Parameters
----------
ding0_filepath : str
Path to ding0 data ending typically
`/path/to/ding0_data/"ding0_grids__" + str(``grid_district``) + ".xxx"`
Returns
-------
int
grid_district number | 3.149765 | 3.483948 | 0.904079 |
grid_district = _get_griddistrict(ding0_filepath)
grid_issues = {}
logging.info('Grid expansion for MV grid district {}'.format(grid_district))
if edisgo_grid: # if an edisgo_grid is passed in arg then ignore everything else
edisgo_grid = edisgo_grid[0]
else:
try:
if 'worst-case' in analysis:
edisgo_grid = EDisGo(ding0_grid=ding0_filepath,
worst_case_analysis=analysis)
elif 'timeseries' in analysis:
edisgo_grid = EDisGo(ding0_grid=ding0_filepath,
timeseries_generation_fluctuating='oedb',
timeseries_load='demandlib')
except FileNotFoundError as e:
return None, pd.DataFrame(), {'grid': grid_district, 'msg': str(e)}
# Import generators
if generator_scenario:
logging.info('Grid expansion for scenario \'{}\'.'.format(generator_scenario))
edisgo_grid.import_generators(generator_scenario=generator_scenario)
else:
logging.info('Grid expansion with no generator imports based on scenario')
try:
# Do grid reinforcement
edisgo_grid.reinforce()
# Get costs
costs_grouped = \
edisgo_grid.network.results.grid_expansion_costs.groupby(
['type']).sum()
costs = pd.DataFrame(costs_grouped.values,
columns=costs_grouped.columns,
index=[[edisgo_grid.network.id] * len(costs_grouped),
costs_grouped.index]).reset_index()
costs.rename(columns={'level_0': 'grid'}, inplace=True)
grid_issues['grid'] = None
grid_issues['msg'] = None
logging.info('SUCCESS!')
except MaximumIterationError:
grid_issues['grid'] = edisgo_grid.network.id
grid_issues['msg'] = str(edisgo_grid.network.results.unresolved_issues)
costs = pd.DataFrame()
logging.warning('Unresolved issues left after grid expansion.')
except Exception as e:
grid_issues['grid'] = edisgo_grid.network.id
grid_issues['msg'] = repr(e)
costs = pd.DataFrame()
logging.exception()
return edisgo_grid, costs, grid_issues | def run_edisgo_basic(ding0_filepath,
generator_scenario=None,
analysis='worst-case',
*edisgo_grid) | Analyze edisgo grid extension cost as reference scenario
Parameters
----------
ding0_filepath : str
Path to ding0 data ending typically
`/path/to/ding0_data/"ding0_grids__" + str(``grid_district``) + ".xxx"`
analysis : str
Either 'worst-case' or 'timeseries'
generator_scenario : None or :obj:`str`
If provided defines which scenario of future generator park to use
and invokes import of these generators. Possible options are 'nep2035'
and 'ego100'.
Returns
-------
edisgo_grid : :class:`~.grid.network.EDisGo`
eDisGo network container
costs : :pandas:`pandas.Dataframe<dataframe>`
Cost of grid extension
grid_issues : dict
Grids resulting in an error including error message | 4.200492 | 3.549656 | 1.183352 |
# base case with no generator import
edisgo_grid, \
costs_before_geno_import, \
grid_issues_before_geno_import = run_edisgo_basic(*run_args)
if edisgo_grid:
# clear the pypsa object and results from edisgo_grid
edisgo_grid.network.results = Results(edisgo_grid.network)
edisgo_grid.network.pypsa = None
# case after generator import
# run_args = [ding0_filename]
# run_args.extend(run_args_opt)
run_args.append(edisgo_grid)
_, costs, \
grid_issues = run_edisgo_basic(*run_args)
return costs_before_geno_import, grid_issues_before_geno_import, \
costs, grid_issues
else:
return costs_before_geno_import, grid_issues_before_geno_import, \
costs_before_geno_import, grid_issues_before_geno_import | def run_edisgo_twice(run_args) | Run grid analysis twice on same grid: once w/ and once w/o new generators
First run without connection of new generators approves sufficient grid
hosting capacity. Otherwise, grid is reinforced.
Second run assessment grid extension needs in terms of RES integration
Parameters
----------
run_args : list
Optional parameters for :func:`run_edisgo_basic`.
Returns
-------
all_costs_before_geno_import : :pandas:`pandas.Dataframe<dataframe>`
Grid extension cost before grid connection of new generators
all_grid_issues_before_geno_import : dict
Remaining overloading or over-voltage issues in grid
all_costs : :pandas:`pandas.Dataframe<dataframe>`
Grid extension cost due to grid connection of new generators
all_grid_issues : dict
Remaining overloading or over-voltage issues in grid | 4.414261 | 3.346959 | 1.318887 |
def collect_pool_results(result):
results.append(result)
results = []
pool = mp.Pool(workers,
maxtasksperchild=worker_lifetime)
for file in ding0_file_list:
edisgo_args = [file] + run_args_opt
pool.apply_async(func=run_edisgo_twice,
args=(edisgo_args,),
callback=collect_pool_results)
pool.close()
pool.join()
# process results data
all_costs_before_geno_import = [r[0] for r in results]
all_grid_issues_before_geno_import = [r[1] for r in results]
all_costs = [r[2] for r in results]
all_grid_issues = [r[3] for r in results]
return all_costs_before_geno_import, all_grid_issues_before_geno_import, \
all_costs, all_grid_issues | def run_edisgo_pool(ding0_file_list, run_args_opt,
workers=mp.cpu_count(), worker_lifetime=1) | Use python multiprocessing toolbox for parallelization
Several grids are analyzed in parallel.
Parameters
----------
ding0_file_list : list
Ding0 grid data file names
run_args_opt : list
eDisGo options, see :func:`run_edisgo_basic` and
:func:`run_edisgo_twice`
workers: int
Number of parallel process
worker_lifetime : int
Bunch of grids sequentially analyzed by a worker
Returns
-------
all_costs_before_geno_import : list
Grid extension cost before grid connection of new generators
all_grid_issues_before_geno_import : list
Remaining overloading or over-voltage issues in grid
all_costs : list
Grid extension cost due to grid connection of new generators
all_grid_issues : list
Remaining overloading or over-voltage issues in grid | 2.524279 | 1.809054 | 1.395359 |
def collect_pool_results(result):
results.update({result.network.id: result})
results = {}
pool = mp2.Pool(workers,
maxtasksperchild=worker_lifetime)
def error_callback(key):
return lambda o: results.update({key: o})
for ding0_id in ding0_id_list:
edisgo_args = (ding0_id, *func_arguments)
pool.apply_async(func=func,
args=edisgo_args,
callback=collect_pool_results,
error_callback=error_callback(ding0_id))
pool.close()
pool.join()
return results | def run_edisgo_pool_flexible(ding0_id_list, func, func_arguments,
workers=mp2.cpu_count(), worker_lifetime=1) | Use python multiprocessing toolbox for parallelization
Several grids are analyzed in parallel based on your custom function that
defines the specific application of eDisGo.
Parameters
----------
ding0_id_list : list of int
List of ding0 grid data IDs (also known as HV/MV substation IDs)
func : any function
Your custom function that shall be parallelized
func_arguments : tuple
Arguments to custom function ``func``
workers: int
Number of parallel process
worker_lifetime : int
Bunch of grids sequentially analyzed by a worker
Notes
-----
Please note, the following requirements for the custom function which is to
be executed in parallel
#. It must return an instance of the type :class:`~.edisgo.EDisGo`.
#. The first positional argument is the MV grid district id (as int). It is
prepended to the tuple of arguments ``func_arguments``
Returns
-------
containers : dict of :class:`~.edisgo.EDisGo`
Dict of EDisGo instances keyed by its ID | 3.120863 | 3.309153 | 0.9431 |
# when `file` is a string, it will be read by the help of pickle
if isinstance(file, str):
ding0_nd = load_nd_from_pickle(filename=file)
# otherwise it is assumed the object is passed directly
else:
ding0_nd = file
ding0_mv_grid = ding0_nd._mv_grid_districts[0].mv_grid
# Make sure circuit breakers (respectively the rings) are closed
ding0_mv_grid.close_circuit_breakers()
# Import medium-voltage grid data
network.mv_grid = _build_mv_grid(ding0_mv_grid, network)
# Import low-voltage grid data
lv_grids, lv_station_mapping, lv_grid_mapping = _build_lv_grid(
ding0_mv_grid, network)
# Assign lv_grids to network
network.mv_grid.lv_grids = lv_grids
# Integrate disconnecting points
position_switch_disconnectors(network.mv_grid,
mode=network.config['disconnecting_point'][
'position'])
# Check data integrity
_validate_ding0_grid_import(network.mv_grid, ding0_mv_grid,
lv_grid_mapping)
# Set data source
network.set_data_source('grid', 'dingo')
# Set more params
network._id = network.mv_grid.id
# Update the weather_cell_ids in mv_grid to include the ones in lv_grids
# ToDo: maybe get a better solution to push the weather_cell_ids in lv_grids but not in mv_grid but into the
# mv_grid.weather_cell_ids from within the Grid() object or the MVGrid() or LVGrid()
mv_weather_cell_id = network.mv_grid.weather_cells
for lvg in lv_grids:
if lvg.weather_cells:
for lv_w_id in lvg._weather_cells:
if not (lv_w_id in mv_weather_cell_id):
network.mv_grid._weather_cells.append(lv_w_id) | def import_from_ding0(file, network) | Import an eDisGo grid topology from
`Ding0 data <https://github.com/openego/ding0>`_.
This import method is specifically designed to load grid topology data in
the format as `Ding0 <https://github.com/openego/ding0>`_ provides it via
pickles.
The import of the grid topology includes
* the topology itself
* equipment parameter
* generators incl. location, type, subtype and capacity
* loads incl. location and sectoral consumption
Parameters
----------
file: :obj:`str` or :class:`ding0.core.NetworkDing0`
If a str is provided it is assumed it points to a pickle with Ding0
grid data. This file will be read.
If an object of the type :class:`ding0.core.NetworkDing0` data will be
used directly from this object.
network: :class:`~.grid.network.Network`
The eDisGo data container object
Notes
-----
Assumes :class:`ding0.core.NetworkDing0` provided by `file` contains
only data of one mv_grid_district. | 5.91456 | 5.488823 | 1.077564 |
aggr_line_type = ding0_grid.network._static_data['MV_cables'].iloc[
ding0_grid.network._static_data['MV_cables']['I_max_th'].idxmax()]
for la_id, la in aggregated.items():
# add aggregated generators
for v_level, val in la['generation'].items():
for type, val2 in val.items():
for subtype, val3 in val2.items():
if type in ['solar', 'wind']:
gen = GeneratorFluctuating(
id='agg-' + str(la_id) + '-' + '_'.join(
[str(_) for _ in val3['ids']]),
nominal_capacity=val3['capacity'],
weather_cell_id=val3['weather_cell_id'],
type=type,
subtype=subtype,
geom=grid.station.geom,
grid=grid,
v_level=4)
else:
gen = Generator(
id='agg-' + str(la_id) + '-' + '_'.join(
[str(_) for _ in val3['ids']]),
nominal_capacity=val3['capacity'],
type=type,
subtype=subtype,
geom=grid.station.geom,
grid=grid,
v_level=4)
grid.graph.add_node(gen, type='generator_aggr')
# backup reference of geno to LV geno list (save geno
# where the former LV genos are aggregated in)
network.dingo_import_data.set_value(network.dingo_import_data['id'].isin(val3['ids']),
'agg_geno',
gen)
# connect generator to MV station
line = Line(id='line_aggr_generator_la_' + str(la_id) + '_vlevel_{v_level}_'
'{subtype}'.format(
v_level=v_level,
subtype=subtype),
type=aggr_line_type,
kind='cable',
length=1e-3,
grid=grid)
grid.graph.add_edge(grid.station,
gen,
line=line,
type='line_aggr')
for sector, sectoral_load in la['load'].items():
load = Load(
geom=grid.station.geom,
consumption={sector: sectoral_load},
grid=grid,
id='_'.join(['Load_aggregated', sector, repr(grid), str(la_id)]))
grid.graph.add_node(load, type='load')
# connect aggregated load to MV station
line = Line(id='_'.join(['line_aggr_load_la_' + str(la_id), sector, str(la_id)]),
type=aggr_line_type,
kind='cable',
length=1e-3,
grid=grid)
grid.graph.add_edge(grid.station,
load,
line=line,
type='line_aggr') | def _attach_aggregated(network, grid, aggregated, ding0_grid) | Add Generators and Loads to MV station representing aggregated generation
capacity and load
Parameters
----------
grid: MVGrid
MV grid object
aggregated: dict
Information about aggregated load and generation capacity. For
information about the structure of the dict see ... .
ding0_grid: ding0.Network
Ding0 network container
Returns
-------
MVGrid
Altered instance of MV grid including aggregated load and generation | 3.92834 | 3.82531 | 1.026934 |
# Check number of components in MV grid
_validate_ding0_mv_grid_import(mv_grid, ding0_mv_grid)
# Check number of components in LV grid
_validate_ding0_lv_grid_import(mv_grid.lv_grids, ding0_mv_grid,
lv_grid_mapping)
# Check cumulative load and generation in MV grid district
_validate_load_generation(mv_grid, ding0_mv_grid) | def _validate_ding0_grid_import(mv_grid, ding0_mv_grid, lv_grid_mapping) | Cross-check imported data with original data source
Parameters
----------
mv_grid: MVGrid
eDisGo MV grid instance
ding0_mv_grid: MVGridDing0
Ding0 MV grid instance
lv_grid_mapping: dict
Translates Ding0 LV grids to associated, newly created eDisGo LV grids | 3.452701 | 3.404416 | 1.014183 |
integrity_checks = ['branch_tee',
'disconnection_point', 'mv_transformer',
'lv_station'#,'line',
]
data_integrity = {}
data_integrity.update({_: {'ding0': None, 'edisgo': None, 'msg': None}
for _ in integrity_checks})
# Check number of branch tees
data_integrity['branch_tee']['ding0'] = len(ding0_grid._cable_distributors)
data_integrity['branch_tee']['edisgo'] = len(
grid.graph.nodes_by_attribute('branch_tee'))
# Check number of disconnecting points
data_integrity['disconnection_point']['ding0'] = len(
ding0_grid._circuit_breakers)
data_integrity['disconnection_point']['edisgo'] = len(
grid.graph.nodes_by_attribute('mv_disconnecting_point'))
# Check number of MV transformers
data_integrity['mv_transformer']['ding0'] = len(
list(ding0_grid.station().transformers()))
data_integrity['mv_transformer']['edisgo'] = len(
grid.station.transformers)
# Check number of LV stations in MV grid (graph)
data_integrity['lv_station']['edisgo'] = len(grid.graph.nodes_by_attribute(
'lv_station'))
data_integrity['lv_station']['ding0'] = len(
[_ for _ in ding0_grid._graph.nodes()
if (isinstance(_, LVStationDing0) and
not _.grid.grid_district.lv_load_area.is_aggregated)])
# Check number of lines outside aggregated LA
# edges_w_la = grid.graph.lines()
# data_integrity['line']['edisgo'] = len([_ for _ in edges_w_la
# if not (_['adj_nodes'][0] == grid.station or
# _['adj_nodes'][1] == grid.station) and
# _['line']._length > .5])
# data_integrity['line']['ding0'] = len(
# [_ for _ in ding0_grid.lines()
# if not _['branch'].connects_aggregated])
# raise an error if data does not match
for c in integrity_checks:
if data_integrity[c]['edisgo'] != data_integrity[c]['ding0']:
raise ValueError(
'Unequal number of objects for {c}. '
'\n\tDing0:\t{ding0_no}'
'\n\teDisGo:\t{edisgo_no}'.format(
c=c,
ding0_no=data_integrity[c]['ding0'],
edisgo_no=data_integrity[c]['edisgo']))
return data_integrity | def _validate_ding0_mv_grid_import(grid, ding0_grid) | Verify imported data with original data from Ding0
Parameters
----------
grid: MVGrid
MV Grid data (eDisGo)
ding0_grid: ding0.MVGridDing0
Ding0 MV grid object
Notes
-----
The data validation excludes grid components located in aggregated load
areas as these are represented differently in eDisGo.
Returns
-------
dict
Dict showing data integrity for each type of grid component | 3.798484 | 3.408723 | 1.114342 |
integrity_checks = ['branch_tee', 'lv_transformer',
'generator', 'load','line']
data_integrity = {}
for grid in grids:
data_integrity.update({grid:{_: {'ding0': None, 'edisgo': None, 'msg': None}
for _ in integrity_checks}})
# Check number of branch tees
data_integrity[grid]['branch_tee']['ding0'] = len(
lv_grid_mapping[grid]._cable_distributors)
data_integrity[grid]['branch_tee']['edisgo'] = len(
grid.graph.nodes_by_attribute('branch_tee'))
# Check number of LV transformers
data_integrity[grid]['lv_transformer']['ding0'] = len(
list(lv_grid_mapping[grid].station().transformers()))
data_integrity[grid]['lv_transformer']['edisgo'] = len(
grid.station.transformers)
# Check number of generators
data_integrity[grid]['generator']['edisgo'] = len(
grid.generators)
data_integrity[grid]['generator']['ding0'] = len(
list(lv_grid_mapping[grid].generators()))
# Check number of loads
data_integrity[grid]['load']['edisgo'] = len(
grid.graph.nodes_by_attribute('load'))
data_integrity[grid]['load']['ding0'] = len(
list(lv_grid_mapping[grid].loads()))
# Check number of lines outside aggregated LA
data_integrity[grid]['line']['edisgo'] = len(
list(grid.graph.lines()))
data_integrity[grid]['line']['ding0'] = len(
[_ for _ in lv_grid_mapping[grid].graph_edges()
if not _['branch'].connects_aggregated])
# raise an error if data does not match
for grid in grids:
for c in integrity_checks:
if data_integrity[grid][c]['edisgo'] != data_integrity[grid][c]['ding0']:
raise ValueError(
'Unequal number of objects in grid {grid} for {c}. '
'\n\tDing0:\t{ding0_no}'
'\n\teDisGo:\t{edisgo_no}'.format(
grid=grid,
c=c,
ding0_no=data_integrity[grid][c]['ding0'],
edisgo_no=data_integrity[grid][c]['edisgo'])) | def _validate_ding0_lv_grid_import(grids, ding0_grid, lv_grid_mapping) | Verify imported data with original data from Ding0
Parameters
----------
grids: list of LVGrid
LV Grid data (eDisGo)
ding0_grid: ding0.MVGridDing0
Ding0 MV grid object
lv_grid_mapping: dict
Defines relationship between Ding0 and eDisGo grid objects
Notes
-----
The data validation excludes grid components located in aggregated load
areas as these are represented differently in eDisGo.
Returns
-------
dict
Dict showing data integrity for each type of grid component | 3.028367 | 2.809408 | 1.077937 |
if data_source == 'oedb':
logging.warning('Right now only solar and wind generators can be '
'imported from the oedb.')
_import_genos_from_oedb(network=network)
network.mv_grid._weather_cells = None
if network.pypsa is not None:
pypsa_io.update_pypsa_generator_import(network)
elif data_source == 'pypsa':
_import_genos_from_pypsa(network=network, file=file)
else:
logger.error("Invalid option {} for generator import. Must either be "
"'oedb' or 'pypsa'.".format(data_source))
raise ValueError('The option you specified is not supported.') | def import_generators(network, data_source=None, file=None) | Import generator data from source.
The generator data include
* nom. capacity
* type ToDo: specify!
* timeseries
Additional data which can be processed (e.g. used in OEDB data) are
* location
* type
* subtype
* capacity
Parameters
----------
network: :class:`~.grid.network.Network`
The eDisGo container object
data_source: :obj:`str`
Data source. Supported sources:
* 'oedb'
file: :obj:`str`
File to import data from, required when using file-based sources.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
List of generators | 5.439495 | 5.57397 | 0.975874 |
genos_mv = pd.DataFrame(columns=
('id', 'obj'))
genos_lv = pd.DataFrame(columns=
('id', 'obj'))
genos_lv_agg = pd.DataFrame(columns=
('la_id', 'id', 'obj'))
# MV genos
for geno in network.mv_grid.graph.nodes_by_attribute('generator'):
genos_mv.loc[len(genos_mv)] = [int(geno.id), geno]
for geno in network.mv_grid.graph.nodes_by_attribute('generator_aggr'):
la_id = int(geno.id.split('-')[1].split('_')[-1])
genos_lv_agg.loc[len(genos_lv_agg)] = [la_id, geno.id, geno]
# LV genos
for lv_grid in network.mv_grid.lv_grids:
for geno in lv_grid.generators:
genos_lv.loc[len(genos_lv)] = [int(geno.id), geno]
return genos_mv, genos_lv, genos_lv_agg | def _build_generator_list(network) | Builds DataFrames with all generators in MV and LV grids
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
A DataFrame with id of and reference to MV generators
:pandas:`pandas.DataFrame<dataframe>`
A DataFrame with id of and reference to LV generators
:pandas:`pandas.DataFrame<dataframe>`
A DataFrame with id of and reference to aggregated LV generators | 2.79019 | 2.437738 | 1.144581 |
lv_grid_dict = {}
for lv_grid in network.mv_grid.lv_grids:
lv_grid_dict[lv_grid.id] = lv_grid
return lv_grid_dict | def _build_lv_grid_dict(network) | Creates dict of LV grids
LV grid ids are used as keys, LV grid references as values.
Parameters
----------
network: :class:`~.grid.network.Network`
The eDisGo container object
Returns
-------
:obj:`dict`
Format: {:obj:`int`: :class:`~.grid.grids.LVGrid`} | 2.758379 | 2.963569 | 0.930763 |
def _retrieve_timeseries_from_oedb(config_data, weather_cell_ids):
if config_data['data_source']['oedb_data_source'] == 'model_draft':
orm_feedin_name = config_data['model_draft']['res_feedin_data']
orm_feedin = model_draft.__getattribute__(orm_feedin_name)
orm_feedin_version = 1 == 1
else:
orm_feedin_name = config_data['versioned']['res_feedin_data']
orm_feedin = supply.__getattribute__(orm_feedin_name)
orm_feedin_version = orm_feedin.version == config_data['versioned']['version']
conn = connection(section=config_data['db_connection']['section'])
Session = sessionmaker(bind=conn)
session = Session()
# ToDo: add option to retrieve subset of time series
# ToDo: find the reference power class for mvgrid/w_id and insert instead of 4
feedin_sqla = session.query(
orm_feedin.w_id,
orm_feedin.source,
orm_feedin.feedin). \
filter(orm_feedin.w_id.in_(weather_cell_ids)). \
filter(orm_feedin.power_class.in_([0, 4])). \
filter(orm_feedin_version)
feedin = pd.read_sql_query(feedin_sqla.statement,
session.bind,
index_col=['source', 'w_id'])
feedin.sort_index(axis=0, inplace=True)
timeindex = pd.date_range('1/1/2011', periods=8760, freq='H')
recasted_feedin_dict = {}
for type_w_id in feedin.index:
recasted_feedin_dict[type_w_id] = feedin.loc[
type_w_id, :].values[0]
feedin = pd.DataFrame(recasted_feedin_dict, index=timeindex)
# rename 'wind_onshore' and 'wind_offshore' to 'wind'
new_level = [_ if _ not in ['wind_onshore']
else 'wind' for _ in feedin.columns.levels[0]]
feedin.columns.set_levels(new_level, level=0, inplace=True)
feedin.columns.rename('type', level=0, inplace=True)
feedin.columns.rename('weather_cell_id', level=1, inplace=True)
return feedin
feedin = _retrieve_timeseries_from_oedb(config_data, weather_cell_ids)
return feedin | def import_feedin_timeseries(config_data, weather_cell_ids) | Import RES feed-in time series data and process
Parameters
----------
config_data : dict
Dictionary containing config data from config files.
weather_cell_ids : :obj:`list`
List of weather cell id's (integers) to obtain feed-in data for.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Feedin time series | 3.244127 | 3.319261 | 0.977364 |
# calculate curtailment in each time step of each generator
curtailment = feedin.divide(feedin.sum(axis=1), axis=0). \
multiply(curtailment_timeseries, axis=0)
# substitute NaNs from division with 0 by 0
curtailment.fillna(0, inplace=True)
# check if curtailment target was met
_check_curtailment_target(curtailment, curtailment_timeseries,
curtailment_key)
# assign curtailment to individual generators
_assign_curtailment(curtailment, edisgo, generators, curtailment_key) | def feedin_proportional(feedin, generators, curtailment_timeseries, edisgo,
curtailment_key, **kwargs) | Implements curtailment methodology 'feedin-proportional'.
The curtailment that has to be met in each time step is allocated
equally to all generators depending on their share of total
feed-in in that time step.
Parameters
----------
feedin : :pandas:`pandas.DataFrame<dataframe>`
Dataframe holding the feed-in of each generator in kW for the
technology (and weather cell) specified in `curtailment_key` parameter.
Index of the dataframe is a
:pandas:`pandas.DatetimeIndex<datetimeindex>`. Columns are the
representatives of the fluctuating generators.
generators : :pandas:`pandas.DataFrame<dataframe>`
Dataframe with all generators of the type (and in weather cell)
specified in `curtailment_key` parameter. See return value of
:func:`edisgo.grid.tools.get_gen_info` for more information.
curtailment_timeseries : :pandas:`pandas.Series<series>`
The curtailment in kW to be distributed amongst the generators in
`generators` parameter. Index of the series is a
:pandas:`pandas.DatetimeIndex<datetimeindex>`.
edisgo : :class:`edisgo.grid.network.EDisGo`
curtailment_key::obj:`str` or :obj:`tuple` with :obj:`str`
The technology and weather cell ID if :obj:`tuple` or only
the technology if :obj:`str` the curtailment is specified for. | 3.470765 | 3.432173 | 1.011244 |
if not (abs(curtailment.sum(axis=1) - curtailment_target) < 1e-1).all():
message = 'Curtailment target not met for {}.'.format(curtailment_key)
logging.error(message)
raise TypeError(message) | def _check_curtailment_target(curtailment, curtailment_target,
curtailment_key) | Raises an error if curtailment target was not met in any time step.
Parameters
-----------
curtailment : :pandas:`pandas:DataFrame<dataframe>`
Dataframe containing the curtailment in kW per generator and time step.
Index is a :pandas:`pandas.DatetimeIndex<datetimeindex>`, columns are
the generator representatives.
curtailment_target : :pandas:`pandas.Series<series>`
The curtailment in kW that was to be distributed amongst the
generators. Index of the series is a
:pandas:`pandas.DatetimeIndex<datetimeindex>`.
curtailment_key : :obj:`str` or :obj:`tuple` with :obj:`str`
The technology and weather cell ID if :obj:`tuple` or only
the technology if :obj:`str` the curtailment was specified for. | 3.356536 | 3.401092 | 0.986899 |
gen_object_list = []
for gen in curtailment.columns:
# get generator object from representative
gen_object = generators.loc[generators.gen_repr == gen].index[0]
# assign curtailment to individual generators
gen_object.curtailment = curtailment.loc[:, gen]
gen_object_list.append(gen_object)
# set timeseries.curtailment
if edisgo.network.timeseries._curtailment:
edisgo.network.timeseries._curtailment.extend(gen_object_list)
edisgo.network.results._curtailment[curtailment_key] = \
gen_object_list
else:
edisgo.network.timeseries._curtailment = gen_object_list
# list needs to be copied, otherwise it will be extended every time
# a new key is added to results._curtailment
edisgo.network.results._curtailment = \
{curtailment_key: gen_object_list.copy()} | def _assign_curtailment(curtailment, edisgo, generators, curtailment_key) | Helper function to write curtailment time series to generator objects.
This function also writes a list of the curtailed generators to curtailment
in :class:`edisgo.grid.network.TimeSeries` and
:class:`edisgo.grid.network.Results`.
Parameters
----------
curtailment : :pandas:`pandas.DataFrame<dataframe>`
Dataframe containing the curtailment in kW per generator and time step
for all generators of the type (and in weather cell) specified in
`curtailment_key` parameter. Index is a
:pandas:`pandas.DatetimeIndex<datetimeindex>`, columns are the
generator representatives.
edisgo : :class:`edisgo.grid.network.EDisGo`
generators : :pandas:`pandas.DataFrame<dataframe>`
Dataframe with all generators of the type (and in weather cell)
specified in `curtailment_key` parameter. See return value of
:func:`edisgo.grid.tools.get_gen_info` for more information.
curtailment_key : :obj:`str` or :obj:`tuple` with :obj:`str`
The technology and weather cell ID if :obj:`tuple` or only
the technology if :obj:`str` the curtailment is specified for. | 3.242624 | 2.886088 | 1.123536 |
# get parameters for standard transformer
try:
standard_transformer = network.equipment_data['lv_trafos'].loc[
network.config['grid_expansion_standard_equipment'][
'mv_lv_transformer']]
except KeyError:
print('Standard MV/LV transformer is not in equipment list.')
transformers_changes = {'added': {}, 'removed': {}}
for station in critical_stations.index:
# list of maximum power of each transformer in the station
s_max_per_trafo = [_.type.S_nom for _ in station.transformers]
# maximum station load from power flow analysis
s_station_pfa = critical_stations.s_pfa[station]
# determine missing transformer power to solve overloading issue
case = network.timeseries.timesteps_load_feedin_case.case[
critical_stations.time_index[station]]
load_factor = network.config['grid_expansion_load_factors'][
'lv_{}_transformer'.format(case)]
s_trafo_missing = s_station_pfa - (sum(s_max_per_trafo) * load_factor)
# check if second transformer of the same kind is sufficient
# if true install second transformer, otherwise install as many
# standard transformers as needed
if max(s_max_per_trafo) >= s_trafo_missing:
# if station has more than one transformer install a new
# transformer of the same kind as the transformer that best
# meets the missing power demand
duplicated_transformer = min(
[_ for _ in station.transformers
if _.type.S_nom > s_trafo_missing],
key=lambda j: j.type.S_nom - s_trafo_missing)
new_transformer = Transformer(
id='LVStation_{}_transformer_{}'.format(
str(station.id), str(len(station.transformers) + 1)),
geom=duplicated_transformer.geom,
mv_grid=duplicated_transformer.mv_grid,
grid=duplicated_transformer.grid,
voltage_op=duplicated_transformer.voltage_op,
type=copy.deepcopy(duplicated_transformer.type))
# add transformer to station and return value
station.add_transformer(new_transformer)
transformers_changes['added'][station] = [new_transformer]
else:
# get any transformer to get attributes for new transformer from
station_transformer = station.transformers[0]
# calculate how many parallel standard transformers are needed
number_transformers = math.ceil(
s_station_pfa / standard_transformer.S_nom)
# add transformer to station
new_transformers = []
for i in range(number_transformers):
new_transformer = Transformer(
id='LVStation_{}_transformer_{}'.format(
str(station.id), str(i + 1)),
geom=station_transformer.geom,
mv_grid=station_transformer.mv_grid,
grid=station_transformer.grid,
voltage_op=station_transformer.voltage_op,
type=copy.deepcopy(standard_transformer))
new_transformers.append(new_transformer)
transformers_changes['added'][station] = new_transformers
transformers_changes['removed'][station] = station.transformers
station.transformers = new_transformers
return transformers_changes | def extend_distribution_substation_overloading(network, critical_stations) | Reinforce MV/LV substations due to overloading issues.
In a first step a parallel transformer of the same kind is installed.
If this is not sufficient as many standard transformers as needed are
installed.
Parameters
----------
network : :class:`~.grid.network.Network`
critical_stations : :pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded MV/LV stations, their apparent power
at maximal over-loading and the corresponding time step.
Index of the dataframe are the over-loaded stations of type
:class:`~.grid.components.LVStation`. Columns are 's_pfa'
containing the apparent power at maximal over-loading as float and
'time_index' containing the corresponding time step the over-loading
occured in as :pandas:`pandas.Timestamp<timestamp>`. See
:func:`~.flex_opt.check_tech_constraints.mv_lv_station_load` for more
information.
Returns
-------
dict
Dictionary with lists of added and removed transformers. | 4.02694 | 3.665482 | 1.098611 |
# get parameters for standard transformer
try:
standard_transformer = network.equipment_data['lv_trafos'].loc[
network.config['grid_expansion_standard_equipment'][
'mv_lv_transformer']]
except KeyError:
print('Standard MV/LV transformer is not in equipment list.')
transformers_changes = {'added': {}}
for grid in critical_stations.keys():
# get any transformer to get attributes for new transformer from
station_transformer = grid.station.transformers[0]
new_transformer = Transformer(
id='LVStation_{}_transformer_{}'.format(
str(grid.station.id), str(len(grid.station.transformers) + 1)),
geom=station_transformer.geom,
mv_grid=station_transformer.mv_grid,
grid=station_transformer.grid,
voltage_op=station_transformer.voltage_op,
type=copy.deepcopy(standard_transformer))
# add standard transformer to station and return value
grid.station.add_transformer(new_transformer)
transformers_changes['added'][grid.station] = [new_transformer]
if transformers_changes['added']:
logger.debug("==> {} LV station(s) has/have been reinforced ".format(
str(len(transformers_changes['added']))) +
"due to overloading issues.")
return transformers_changes | def extend_distribution_substation_overvoltage(network, critical_stations) | Reinforce MV/LV substations due to voltage issues.
A parallel standard transformer is installed.
Parameters
----------
network : :class:`~.grid.network.Network`
critical_stations : :obj:`dict`
Dictionary with :class:`~.grid.grids.LVGrid` as key and a
:pandas:`pandas.DataFrame<dataframe>` with its critical station and
maximum voltage deviation as value.
Index of the dataframe is the :class:`~.grid.components.LVStation`
with over-voltage issues. Columns are 'v_mag_pu' containing the
maximum voltage deviation as float and 'time_index' containing the
corresponding time step the over-voltage occured in as
:pandas:`pandas.Timestamp<timestamp>`.
Returns
-------
Dictionary with lists of added transformers. | 6.567576 | 5.776654 | 1.136917 |
# load standard line data
try:
standard_line_lv = network.equipment_data['lv_cables'].loc[
network.config['grid_expansion_standard_equipment']['lv_line']]
except KeyError:
print('Chosen standard LV line is not in equipment list.')
try:
standard_line_mv = network.equipment_data['mv_cables'].loc[
network.config['grid_expansion_standard_equipment']['mv_line']]
except KeyError:
print('Chosen standard MV line is not in equipment list.')
lines_changes = {}
for crit_line in crit_lines.index:
rel_overload = crit_lines.max_rel_overload[crit_line]
# check if line is in LV or MV and set standard line accordingly
if isinstance(crit_line.grid, LVGrid):
standard_line = standard_line_lv
else:
standard_line = standard_line_mv
if crit_line.type.name == standard_line.name:
# check how many parallel standard lines are needed
number_parallel_lines = math.ceil(
rel_overload * crit_line.quantity)
lines_changes[crit_line] = (number_parallel_lines -
crit_line.quantity)
crit_line.quantity = number_parallel_lines
else:
# check if parallel line of the same kind is sufficient
if (crit_line.quantity == 1 and rel_overload <= 2
and crit_line.kind == 'cable'):
crit_line.quantity = 2
lines_changes[crit_line] = 1
else:
number_parallel_lines = math.ceil(
crit_line.type['I_max_th'] * rel_overload /
standard_line['I_max_th'])
lines_changes[crit_line] = number_parallel_lines
crit_line.type = standard_line.copy()
crit_line.quantity = number_parallel_lines
crit_line.kind = 'cable'
if not crit_lines.empty:
logger.debug('==> {} branche(s) was/were reinforced '.format(
crit_lines.shape[0]) + 'due to over-loading issues.')
return lines_changes | def reinforce_branches_overloading(network, crit_lines) | Reinforce MV or LV grid due to overloading.
Parameters
----------
network : :class:`~.grid.network.Network`
crit_lines : :pandas:`pandas.DataFrame<dataframe>`
Dataframe containing over-loaded lines, their maximum relative
over-loading and the corresponding time step.
Index of the dataframe are the over-loaded lines of type
:class:`~.grid.components.Line`. Columns are 'max_rel_overload'
containing the maximum relative over-loading as float and 'time_index'
containing the corresponding time step the over-loading occured in as
:pandas:`pandas.Timestamp<timestamp>`.
Returns
-------
Dictionary with :class:`~.grid.components.Line` and the number of Lines
added.
Notes
-----
Reinforce measures:
1. Install parallel line of the same type as the existing line (Only if
line is a cable, not an overhead line. Otherwise a standard equipment
cable is installed right away.)
2. Remove old line and install as many parallel standard lines as
needed. | 3.570007 | 2.973044 | 1.200792 |
url = ctx.sources.ST_TONER_LITE
xmin, xmax, ymin, ymax = ax.axis()
basemap, extent = ctx.bounds2img(xmin, ymin, xmax, ymax,
zoom=zoom, url=url)
ax.imshow(basemap, extent=extent, interpolation='bilinear')
# restore original x/y limits
ax.axis((xmin, xmax, ymin, ymax)) | def add_basemap(ax, zoom=12) | Adds map to a plot. | 4.752239 | 5.157954 | 0.921342 |
# make DB session
conn = connection(section=config['db_connection']['section'])
Session = sessionmaker(bind=conn)
session = Session()
# get polygon from versioned schema
if config['data_source']['oedb_data_source'] == 'versioned':
version = config['versioned']['version']
query = session.query(EgoDpMvGriddistrict.subst_id,
EgoDpMvGriddistrict.geom)
Regions = [(subst_id, shape.to_shape(geom)) for subst_id, geom in
query.filter(EgoDpMvGriddistrict.version == version,
EgoDpMvGriddistrict.subst_id == subst_id).all()
]
# get polygon from model_draft
else:
query = session.query(EgoGridMvGriddistrict.subst_id,
EgoGridMvGriddistrict.geom)
Regions = [(subst_id, shape.to_shape(geom)) for subst_id, geom in
query.filter(EgoGridMvGriddistrict.subst_id.in_(
subst_id)).all()]
crs = {'init': 'epsg:3035'}
region = gpd.GeoDataFrame(
Regions, columns=['subst_id', 'geometry'], crs=crs)
region = region.to_crs(epsg=projection)
return region | def get_grid_district_polygon(config, subst_id=None, projection=4326) | Get MV grid district polygon from oedb for plotting. | 3.534761 | 3.365823 | 1.050192 |
mv_load_timeseries_p = []
lv_load_timeseries_p = []
# add MV grid loads
if mode is 'mv' or mode is None:
for load in network.mv_grid.graph.nodes_by_attribute('load'):
mv_load_timeseries_p.append(load.pypsa_timeseries('p').rename(
repr(load)).to_frame().loc[timesteps])
# add LV grid's loads
if mode is 'lv' or mode is None:
for lv_grid in network.mv_grid.lv_grids:
for load in lv_grid.graph.nodes_by_attribute('load'):
for sector in list(load.consumption.keys()):
# for sector in list(list(load.consumption.keys())[0]):
# ToDo: remove consideration of only industrial sector
# now, if a load object has consumption in multiple sectors
# (like currently only industrial/retail) the consumption is
# implicitly assigned to the industrial sector when being
# exported to pypsa.
# ToDo: resolve this in the importer
if sector != 'retail':
# lv_load_timeseries_q.append(
# load.pypsa_timeseries('q').rename(
# repr(load)).to_frame().loc[timesteps])
lv_load_timeseries_p.append(
load.pypsa_timeseries('p').rename(
repr(load)).to_frame().loc[timesteps])
load_df_p = pd.concat(mv_load_timeseries_p + lv_load_timeseries_p, axis=1)
return load_df_p | def _pypsa_load_timeseries(network, timesteps, mode=None) | Time series in PyPSA compatible format for load instances
Parameters
----------
network : Network
The eDisGo grid topology model overall container
timesteps : array_like
Timesteps is an array-like object with entries of type
:pandas:`pandas.Timestamp<timestamp>` specifying which time steps
to export to pypsa representation and use in power flow analysis.
mode : str, optional
Specifically retrieve load time series for MV or LV grid level or both.
Either choose 'mv' or 'lv'.
Defaults to None, which returns both timeseries for MV and LV in a
single DataFrame.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Time series table in PyPSA format | 4.058821 | 3.996188 | 1.015673 |
mv_gen_timeseries_p_min = []
mv_gen_timeseries_p_max = []
lv_gen_timeseries_p_min = []
lv_gen_timeseries_p_max = []
# MV generator timeseries
if mode is 'mv' or mode is None:
for gen in network.mv_grid.graph.nodes_by_attribute('generator') + \
network.mv_grid.graph.nodes_by_attribute('generator_aggr'):
mv_gen_timeseries_p_min.append(gen.pypsa_timeseries('p').rename(
repr(gen)).to_frame().loc[timesteps] / gen.nominal_capacity)
mv_gen_timeseries_p_max.append(gen.pypsa_timeseries('p').rename(
repr(gen)).to_frame().loc[timesteps] / gen.nominal_capacity)
# LV generator timeseries
if mode is 'lv' or mode is None:
for lv_grid in network.mv_grid.lv_grids:
for gen in lv_grid.graph.nodes_by_attribute('generator'):
lv_gen_timeseries_p_min.append(gen.pypsa_timeseries('p').rename(
repr(gen)).to_frame().loc[timesteps] / gen.nominal_capacity)
lv_gen_timeseries_p_max.append(gen.pypsa_timeseries('p').rename(
repr(gen)).to_frame().loc[timesteps] / gen.nominal_capacity)
# Slack time series
lv_gen_timeseries_p_min.append(
pd.Series([-1] * len(timesteps), index=timesteps).rename(
"Generator_slack").to_frame())
lv_gen_timeseries_p_max.append(
pd.Series([1] * len(timesteps), index=timesteps).rename(
"Generator_slack").to_frame())
gen_df_p_max = pd.concat(
mv_gen_timeseries_p_max + lv_gen_timeseries_p_max, axis=1)
gen_df_p_min = pd.concat(
mv_gen_timeseries_p_min + lv_gen_timeseries_p_min, axis=1)
return gen_df_p_min, gen_df_p_max | def _pypsa_generator_timeseries(network, timesteps, mode=None) | Timeseries in PyPSA compatible format for generator instances
Parameters
----------
network : Network
The eDisGo grid topology model overall container
timesteps : array_like
Timesteps is an array-like object with entries of type
:pandas:`pandas.Timestamp<timestamp>` specifying which time steps
to export to pypsa representation and use in power flow analysis.
mode : str, optional
Specifically retrieve generator time series for MV or LV grid level or
both. Either choose 'mv' or 'lv'.
Defaults to None, which returns both timeseries for MV and LV in a
single DataFrame.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Time series table in PyPSA format | 1.972297 | 1.921593 | 1.026386 |
mv_storage_timeseries_p_min = []
mv_storage_timeseries_p_max = []
# MV storage time series
if mode is 'mv' or mode is None:
for storage in network.mv_grid.graph.nodes_by_attribute('storage'):
mv_storage_timeseries_p_min.append(
storage.timeseries.p.rename(repr(
storage)).to_frame().loc[timesteps])
mv_storage_timeseries_p_max.append(
storage.timeseries.p.rename(repr(
storage)).to_frame().loc[timesteps])
storage_df_p_min = pd.concat(
mv_storage_timeseries_p_min, axis=1)
storage_df_p_max = pd.concat(
mv_storage_timeseries_p_max, axis=1)
return storage_df_p_min, storage_df_p_max | def _pypsa_storage_timeseries(network, timesteps, mode=None) | Timeseries in PyPSA compatible format for storage instances
Parameters
----------
network : Network
The eDisGo grid topology model overall container
timesteps : array_like
Timesteps is an array-like object with entries of type
:pandas:`pandas.Timestamp<timestamp>` specifying which time steps
to export to pypsa representation and use in power flow analysis.
mode : str, optional
Specifically retrieve generator time series for MV or LV grid level or
both. Either choose 'mv' or 'lv'.
Defaults to None, which returns both timeseries for MV and LV in a
single DataFrame.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Time series table in PyPSA format | 2.824572 | 2.824403 | 1.00006 |
if self._timeseries is None:
if isinstance(self.grid, MVGrid):
voltage_level = 'mv'
elif isinstance(self.grid, LVGrid):
voltage_level = 'lv'
ts_total = None
for sector in self.consumption.keys():
consumption = self.consumption[sector]
# check if load time series for MV and LV are differentiated
try:
ts = self.grid.network.timeseries.load[
sector, voltage_level].to_frame('p')
except KeyError:
try:
ts = self.grid.network.timeseries.load[
sector].to_frame('p')
except KeyError:
logger.exception(
"No timeseries for load of type {} "
"given.".format(sector))
raise
ts = ts * consumption
ts_q = self.timeseries_reactive
if ts_q is not None:
ts['q'] = ts_q.q
else:
ts['q'] = ts['p'] * self.q_sign * tan(
acos(self.power_factor))
if ts_total is None:
ts_total = ts
else:
ts_total.p += ts.p
ts_total.q += ts.q
return ts_total
else:
return self._timeseries | def timeseries(self) | Load time series
It returns the actual time series used in power flow analysis. If
:attr:`_timeseries` is not :obj:`None`, it is returned. Otherwise,
:meth:`timeseries()` looks for time series of the according sector in
:class:`~.grid.network.TimeSeries` object.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
DataFrame containing active power in kW in column 'p' and
reactive power in kVA in column 'q'. | 4.435693 | 3.668672 | 1.209073 |
if self._timeseries_reactive is None:
# if normalized reactive power time series are given, they are
# scaled by the annual consumption; if none are given reactive
# power time series are calculated timeseries getter using a given
# power factor
if self.grid.network.timeseries.load_reactive_power is not None:
self.power_factor = 'not_applicable'
self.reactive_power_mode = 'not_applicable'
ts_total = None
for sector in self.consumption.keys():
consumption = self.consumption[sector]
try:
ts = self.grid.network.timeseries.load_reactive_power[
sector].to_frame('q')
except KeyError:
logger.exception(
"No timeseries for load of type {} "
"given.".format(sector))
raise
ts = ts * consumption
if ts_total is None:
ts_total = ts
else:
ts_total.q += ts.q
return ts_total
else:
return None
else:
return self._timeseries_reactive | def timeseries_reactive(self) | Reactive power time series in kvar.
Parameters
-----------
timeseries_reactive : :pandas:`pandas.Seriese<series>`
Series containing reactive power in kvar.
Returns
-------
:pandas:`pandas.Series<series>` or None
Series containing reactive power time series in kvar. If it is not
set it is tried to be retrieved from `load_reactive_power`
attribute of global TimeSeries object. If that is not possible
None is returned. | 5.688306 | 5.403619 | 1.052684 |
peak_load = pd.Series(self.consumption).mul(pd.Series(
self.grid.network.config['peakload_consumption_ratio']).astype(
float), fill_value=0)
return peak_load | def peak_load(self) | Get sectoral peak load | 8.210471 | 8.086493 | 1.015331 |
if self._power_factor is None:
if isinstance(self.grid, MVGrid):
self._power_factor = self.grid.network.config[
'reactive_power_factor']['mv_load']
elif isinstance(self.grid, LVGrid):
self._power_factor = self.grid.network.config[
'reactive_power_factor']['lv_load']
return self._power_factor | def power_factor(self) | Power factor of load
Parameters
-----------
power_factor : :obj:`float`
Ratio of real power to apparent power.
Returns
--------
:obj:`float`
Ratio of real power to apparent power. If power factor is not set
it is retrieved from the network config object depending on the
grid level the load is in. | 3.243488 | 2.850022 | 1.138057 |
if self._reactive_power_mode is None:
if isinstance(self.grid, MVGrid):
self._reactive_power_mode = self.grid.network.config[
'reactive_power_mode']['mv_load']
elif isinstance(self.grid, LVGrid):
self._reactive_power_mode = self.grid.network.config[
'reactive_power_mode']['lv_load']
return self._reactive_power_mode | def reactive_power_mode(self) | Power factor mode of Load.
This information is necessary to make the load behave in an inductive
or capacitive manner. Essentially this changes the sign of the reactive
power.
The convention used here in a load is that:
- when `reactive_power_mode` is 'inductive' then Q is positive
- when `reactive_power_mode` is 'capacitive' then Q is negative
Parameters
----------
reactive_power_mode : :obj:`str` or None
Possible options are 'inductive', 'capacitive' and
'not_applicable'. In the case of 'not_applicable' a reactive
power time series must be given.
Returns
-------
:obj:`str`
In the case that this attribute is not set, it is retrieved from
the network config object depending on the voltage level the load
is in. | 2.677644 | 2.772825 | 0.965674 |
if self._timeseries is None:
# calculate time series for active and reactive power
try:
timeseries = \
self.grid.network.timeseries.generation_dispatchable[
self.type].to_frame('p')
except KeyError:
try:
timeseries = \
self.grid.network.timeseries.generation_dispatchable[
'other'].to_frame('p')
except KeyError:
logger.exception("No time series for type {} "
"given.".format(self.type))
raise
timeseries = timeseries * self.nominal_capacity
if self.timeseries_reactive is not None:
timeseries['q'] = self.timeseries_reactive
else:
timeseries['q'] = timeseries['p'] * self.q_sign * tan(acos(
self.power_factor))
return timeseries
else:
return self._timeseries.loc[
self.grid.network.timeseries.timeindex, :] | def timeseries(self) | Feed-in time series of generator
It returns the actual dispatch time series used in power flow analysis.
If :attr:`_timeseries` is not :obj:`None`, it is returned. Otherwise,
:meth:`timeseries` looks for time series of the according type of
technology in :class:`~.grid.network.TimeSeries`. If the reactive
power time series is provided through :attr:`_timeseries_reactive`,
this is added to :attr:`_timeseries`. When :attr:`_timeseries_reactive`
is not set, the reactive power is also calculated in
:attr:`_timeseries` using :attr:`power_factor` and
:attr:`reactive_power_mode`. The :attr:`power_factor` determines the
magnitude of the reactive power based on the power factor and active
power provided and the :attr:`reactive_power_mode` determines if the
reactive power is either consumed (inductive behaviour) or provided
(capacitive behaviour).
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
DataFrame containing active power in kW in column 'p' and
reactive power in kvar in column 'q'. | 4.649827 | 3.647581 | 1.27477 |
if self._timeseries_reactive is None:
if self.grid.network.timeseries.generation_reactive_power \
is not None:
try:
timeseries = \
self.grid.network.timeseries.generation_reactive_power[
self.type].to_frame('q')
except (KeyError, TypeError):
try:
timeseries = \
self.grid.network.timeseries.generation_reactive_power[
'other'].to_frame('q')
except:
logger.warning(
"No reactive power time series for type {} given. "
"Reactive power time series will be calculated from "
"assumptions in config files and active power "
"timeseries.".format(self.type))
return None
self.power_factor = 'not_applicable'
self.reactive_power_mode = 'not_applicable'
return timeseries * self.nominal_capacity
else:
return None
else:
return self._timeseries_reactive.loc[
self.grid.network.timeseries.timeindex, :] | def timeseries_reactive(self) | Reactive power time series in kvar.
Parameters
-----------
timeseries_reactive : :pandas:`pandas.Seriese<series>`
Series containing reactive power in kvar.
Returns
-------
:pandas:`pandas.Series<series>` or None
Series containing reactive power time series in kvar. If it is not
set it is tried to be retrieved from `generation_reactive_power`
attribute of global TimeSeries object. If that is not possible
None is returned. | 4.451702 | 4.034074 | 1.103525 |
if self._timeseries is None:
# get time series for active power depending on if they are
# differentiated by weather cell ID or not
if isinstance(self.grid.network.timeseries.generation_fluctuating.
columns, pd.MultiIndex):
if self.weather_cell_id:
try:
timeseries = self.grid.network.timeseries.\
generation_fluctuating[
self.type, self.weather_cell_id].to_frame('p')
except KeyError:
logger.exception("No time series for type {} and "
"weather cell ID {} given.".format(
self.type, self.weather_cell_id))
raise
else:
logger.exception("No weather cell ID provided for "
"fluctuating generator {}.".format(
repr(self)))
raise KeyError
else:
try:
timeseries = self.grid.network.timeseries.\
generation_fluctuating[self.type].to_frame('p')
except KeyError:
logger.exception("No time series for type {} "
"given.".format(self.type))
raise
timeseries = timeseries * self.nominal_capacity
# subtract curtailment
if self.curtailment is not None:
timeseries = timeseries.join(
self.curtailment.to_frame('curtailment'), how='left')
timeseries.p = timeseries.p - timeseries.curtailment.fillna(0)
if self.timeseries_reactive is not None:
timeseries['q'] = self.timeseries_reactive
else:
timeseries['q'] = timeseries['p'] * self.q_sign * tan(acos(
self.power_factor))
return timeseries
else:
return self._timeseries.loc[
self.grid.network.timeseries.timeindex, :] | def timeseries(self) | Feed-in time series of generator
It returns the actual time series used in power flow analysis. If
:attr:`_timeseries` is not :obj:`None`, it is returned. Otherwise,
:meth:`timeseries` looks for generation and curtailment time series
of the according type of technology (and weather cell) in
:class:`~.grid.network.TimeSeries`.
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
DataFrame containing active power in kW in column 'p' and
reactive power in kVA in column 'q'. | 3.802877 | 3.264061 | 1.165075 |
if self._timeseries_reactive is None:
# try to get time series for reactive power depending on if they
# are differentiated by weather cell ID or not
# raise warning if no time series for generator type (and weather
# cell ID) can be retrieved
if self.grid.network.timeseries.generation_reactive_power \
is not None:
if isinstance(
self.grid.network.timeseries.generation_reactive_power.
columns, pd.MultiIndex):
if self.weather_cell_id:
try:
timeseries = self.grid.network.timeseries. \
generation_reactive_power[
self.type, self.weather_cell_id].to_frame('q')
return timeseries * self.nominal_capacity
except (KeyError, TypeError):
logger.warning("No time series for type {} and "
"weather cell ID {} given. "
"Reactive power time series will "
"be calculated from assumptions "
"in config files and active power "
"timeseries.".format(
self.type, self.weather_cell_id))
return None
else:
raise ValueError(
"No weather cell ID provided for fluctuating "
"generator {}, but reactive power is given as a "
"MultiIndex suggesting that it is differentiated "
"by weather cell ID.".format(repr(self)))
else:
try:
timeseries = self.grid.network.timeseries. \
generation_reactive_power[self.type].to_frame('q')
return timeseries * self.nominal_capacity
except (KeyError, TypeError):
logger.warning("No reactive power time series for "
"type {} given. Reactive power time "
"series will be calculated from "
"assumptions in config files and "
"active power timeseries.".format(
self.type))
return None
else:
return None
else:
return self._timeseries_reactive.loc[
self.grid.network.timeseries.timeindex, :] | def timeseries_reactive(self) | Reactive power time series in kvar.
Parameters
-------
:pandas:`pandas.Series<series>`
Series containing reactive power time series in kvar.
Returns
----------
:pandas:`pandas.DataFrame<dataframe>` or None
Series containing reactive power time series in kvar. If it is not
set it is tried to be retrieved from `generation_reactive_power`
attribute of global TimeSeries object. If that is not possible
None is returned. | 3.51899 | 3.275979 | 1.07418 |
if self._curtailment is not None:
return self._curtailment
elif isinstance(self.grid.network.timeseries._curtailment,
pd.DataFrame):
if isinstance(self.grid.network.timeseries.curtailment.
columns, pd.MultiIndex):
if self.weather_cell_id:
try:
return self.grid.network.timeseries.curtailment[
self.type, self.weather_cell_id]
except KeyError:
logger.exception("No curtailment time series for type "
"{} and weather cell ID {} "
"given.".format(self.type,
self.weather_cell_id))
raise
else:
logger.exception("No weather cell ID provided for "
"fluctuating generator {}.".format(
repr(self)))
raise KeyError
else:
try:
return self.grid.network.timeseries.curtailment[self.type]
except KeyError:
logger.exception("No curtailment time series for type "
"{} given.".format(self.type))
raise
else:
return None | def curtailment(self) | Parameters
----------
curtailment_ts : :pandas:`pandas.Series<series>`
See class definition for details.
Returns
-------
:pandas:`pandas.Series<series>`
If self._curtailment is set it returns that. Otherwise, if
curtailment in :class:`~.grid.network.TimeSeries` for the
corresponding technology type (and if given, weather cell ID)
is set this is returned. | 2.823672 | 2.458565 | 1.148504 |
# check if time series for reactive power is given, otherwise
# calculate it
if 'q' in self._timeseries.columns:
return self._timeseries
else:
self._timeseries['q'] = abs(self._timeseries.p) * self.q_sign * \
tan(acos(self.power_factor))
return self._timeseries.loc[
self.grid.network.timeseries.timeindex, :] | def timeseries(self) | Time series of storage operation
Parameters
----------
ts : :pandas:`pandas.DataFrame<dataframe>`
DataFrame containing active power the storage is charged (negative)
and discharged (positive) with (on the grid side) in kW in column
'p' and reactive power in kvar in column 'q'. When 'q' is positive,
reactive power is supplied (behaving as a capacitor) and when 'q'
is negative reactive power is consumed (behaving as an inductor).
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
See parameter `timeseries`. | 9.053712 | 8.021444 | 1.128688 |
if self.reactive_power_mode.lower() == 'inductive':
return -1
elif self.reactive_power_mode.lower() == 'capacitive':
return 1
else:
raise ValueError("Unknown value {} in reactive_power_mode".format(
self.reactive_power_mode)) | def q_sign(self) | Get the sign reactive power based on the
:attr: `_reactive_power_mode`
Returns
-------
:obj: `int` : +1 or -1 | 3.414972 | 2.724074 | 1.253626 |
if self._state != 'open':
if self._line is not None:
self._state = 'open'
self._nodes = self.grid.graph.nodes_from_line(self._line)
self.grid.graph.remove_edge(
self._nodes[0], self._nodes[1])
else:
raise ValueError('``line`` is not set') | def open(self) | Toggle state to open switch disconnector | 4.52685 | 4.570426 | 0.990466 |
self._state = 'closed'
self.grid.graph.add_edge(
self._nodes[0], self._nodes[1], {'line': self._line}) | def close(self) | Toggle state to closed switch disconnector | 8.593439 | 7.870216 | 1.091894 |
adj_nodes = self._grid._graph.nodes_from_line(self)
return LineString([adj_nodes[0].geom, adj_nodes[1].geom]) | def geom(self) | Provide :shapely:`Shapely LineString object<linestrings>` geometry of
:class:`Line` | 11.2879 | 9.166615 | 1.231414 |
@functools.wraps(func)
def wrapper(self, request, context):
return func(self, request, LogFieldsContext(context))
return wrapper | def wrap_context(func) | Wraps the provided servicer method by passing a wrapped context
The context is wrapped using `lookout.sdk.grpc.log_fields.LogFieldsContext`.
:param func: the servicer method to wrap_context
:returns: the wrapped servicer method | 4.302364 | 4.080059 | 1.054486 |
self._log_fields.add_fields(fields) | def add_log_fields(self, fields: Dict[str, Any]) | Add the provided log fields
If a key is already present, then it is ignored.
:param fields: the log fields to add | 13.331674 | 12.452669 | 1.070588 |
metadata = [(k, v) for k, v in self._invocation_metadata.items()
if k != LOG_FIELDS_KEY_META]
metadata.append((LOG_FIELDS_KEY_META, self._log_fields.dumps()))
return metadata | def pack_metadata(self) -> List[Tuple[str, Any]] | Packs the log fields and the invocation metadata into a new metadata
The log fields are added in the new metadata with the key
`LOG_FIELDS_KEY_META`. | 5.021659 | 2.81858 | 1.781627 |
return cls(fields=json.loads(metadata.get(LOG_FIELDS_KEY_META, '{}'))) | def from_metadata(cls, metadata: Dict[str, Any]) -> 'LogFields' | Initialize the log fields from the provided metadata
The log fields are taken from the `LOG_FIELDS_KEY_META` key of the
provided metadata. | 8.594823 | 4.835995 | 1.777261 |
for k, v in fields.items():
if k not in self._fields:
self._fields[k] = v | def add_fields(self, fields) | Add the provided log fields
If a key is already present, then it is ignored.
:param fields: the log fields to add | 2.774429 | 3.303904 | 0.839743 |
if self.network.pypsa is None:
try:
timesteps = self.network.timeseries.timeindex
self.network.pypsa = pypsa_io.to_pypsa(
self.network, mode=None, timesteps=timesteps)
except:
logging.warning(
"pypsa representation of MV grid needed to plot MV "
"grid topology.")
if self.network.pypsa is not None:
plots.mv_grid_topology(
self.network.pypsa, self.network.config,
node_color='technology' if technologies is True else None,
filename=kwargs.get('filename', None),
grid_district_geom=kwargs.get('grid_district_geom', True),
background_map=kwargs.get('background_map', True),
xlim=kwargs.get('xlim', None), ylim=kwargs.get('ylim', None),
title=kwargs.get('title', '')) | def plot_mv_grid_topology(self, technologies=False, **kwargs) | Plots plain MV grid topology and optionally nodes by technology type
(e.g. station or generator).
Parameters
----------
technologies : :obj:`Boolean`
If True plots stations, generators, etc. in the grid in different
colors. If False does not plot any nodes. Default: False.
For more information see :func:`edisgo.tools.plots.mv_grid_topology`. | 3.705071 | 3.594008 | 1.030902 |
if self.network.pypsa is not None:
try:
v_res = self.network.results.v_res()
except:
logging.warning("Voltages `pfa_v_mag_pu` from power flow "
"analysis must be available to plot them.")
return
plots.mv_grid_topology(
self.network.pypsa, self.network.config,
timestep=kwargs.get('timestep', None),
node_color='voltage',
filename=kwargs.get('filename', None),
grid_district_geom=kwargs.get('grid_district_geom', True),
background_map=kwargs.get('background_map', True),
voltage=v_res,
limits_cb_nodes=kwargs.get('limits_cb_nodes', None),
xlim=kwargs.get('xlim', None), ylim=kwargs.get('ylim', None),
title=kwargs.get('title', ''))
else:
logging.warning("pypsa representation of MV grid needed to "
"plot voltages.") | def plot_mv_voltages(self, **kwargs) | Plots voltages in MV grid on grid topology plot.
For more information see :func:`edisgo.tools.plots.mv_grid_topology`. | 5.263297 | 4.727306 | 1.113382 |
if self.network.pypsa is not None and \
self.network.results.i_res is not None:
plots.mv_grid_topology(
self.network.pypsa, self.network.config,
timestep=kwargs.get('timestep', None),
line_color='loading',
node_color=kwargs.get('node_color', None),
line_load=self.network.results.s_res(),
filename=kwargs.get('filename', None),
arrows=kwargs.get('arrows', None),
grid_district_geom=kwargs.get('grid_district_geom', True),
background_map=kwargs.get('background_map', True),
voltage=self.network.results.v_res(),
limits_cb_lines=kwargs.get('limits_cb_lines', None),
limits_cb_nodes=kwargs.get('limits_cb_nodes', None),
xlim=kwargs.get('xlim', None), ylim=kwargs.get('ylim', None),
lines_cmap=kwargs.get('lines_cmap', 'inferno_r'),
title=kwargs.get('title', ''),
scaling_factor_line_width=kwargs.get(
'scaling_factor_line_width', None))
else:
if self.network.pypsa is None:
logging.warning("pypsa representation of MV grid needed to "
"plot line loading.")
if self.network.results.i_res is None:
logging.warning("Currents `i_res` from power flow analysis "
"must be available to plot line loading.") | def plot_mv_line_loading(self, **kwargs) | Plots relative line loading (current from power flow analysis to
allowed current) of MV lines.
For more information see :func:`edisgo.tools.plots.mv_grid_topology`. | 3.778459 | 3.425702 | 1.102973 |
if self.network.pypsa is not None and \
self.network.results.grid_expansion_costs is not None:
if isinstance(self, EDisGo):
# convert index of grid expansion costs to str
grid_expansion_costs = \
self.network.results.grid_expansion_costs.reset_index()
grid_expansion_costs['index'] = \
grid_expansion_costs['index'].apply(lambda _: repr(_))
grid_expansion_costs.set_index('index', inplace=True)
else:
grid_expansion_costs = \
self.network.results.grid_expansion_costs
plots.mv_grid_topology(
self.network.pypsa, self.network.config,
line_color='expansion_costs',
grid_expansion_costs=grid_expansion_costs,
filename=kwargs.get('filename', None),
grid_district_geom=kwargs.get('grid_district_geom', True),
background_map=kwargs.get('background_map', True),
limits_cb_lines=kwargs.get('limits_cb_lines', None),
xlim=kwargs.get('xlim', None), ylim=kwargs.get('ylim', None),
lines_cmap=kwargs.get('lines_cmap', 'inferno_r'),
title=kwargs.get('title', ''),
scaling_factor_line_width=kwargs.get(
'scaling_factor_line_width', None)
)
else:
if self.network.pypsa is None:
logging.warning("pypsa representation of MV grid needed to "
"plot grid expansion costs.")
if self.network.results.grid_expansion_costs is None:
logging.warning("Grid expansion cost results needed to plot "
"them.") | def plot_mv_grid_expansion_costs(self, **kwargs) | Plots costs per MV line.
For more information see :func:`edisgo.tools.plots.mv_grid_topology`. | 3.18045 | 2.899644 | 1.096842 |
if self.network.pypsa is not None:
plots.mv_grid_topology(
self.network.pypsa, self.network.config,
node_color='storage_integration',
filename=kwargs.get('filename', None),
grid_district_geom=kwargs.get('grid_district_geom', True),
background_map=kwargs.get('background_map', True),
xlim=kwargs.get('xlim', None), ylim=kwargs.get('ylim', None),
title=kwargs.get('title', ''))
else:
if self.network.pypsa is None:
logging.warning("pypsa representation of MV grid needed to "
"plot storage integration in MV grid.") | def plot_mv_storage_integration(self, **kwargs) | Plots storage position in MV grid of integrated storages.
For more information see :func:`edisgo.tools.plots.mv_grid_topology`. | 4.754109 | 4.102881 | 1.158725 |
data = self.network.results.v_res()
if title is True:
if timestep is not None:
title = "Voltage histogram for time step {}".format(timestep)
else:
title = "Voltage histogram \nfor time steps {} to {}".format(
data.index[0], data.index[-1])
elif title is False:
title = None
plots.histogram(data=data, title=title, timeindex=timestep, **kwargs) | def histogram_voltage(self, timestep=None, title=True, **kwargs) | Plots histogram of voltages.
For more information see :func:`edisgo.tools.plots.histogram`.
Parameters
----------
timestep : :pandas:`pandas.Timestamp<timestamp>` or None, optional
Specifies time step histogram is plotted for. If timestep is None
all time steps voltages are calculated for are used. Default: None.
title : :obj:`str` or :obj:`bool`, optional
Title for plot. If True title is auto generated. If False plot has
no title. If :obj:`str`, the provided title is used. Default: True. | 3.746971 | 3.862722 | 0.970034 |
residual_load = tools.get_residual_load_from_pypsa_network(
self.network.pypsa)
case = residual_load.apply(
lambda _: 'feedin_case' if _ < 0 else 'load_case')
if timestep is not None:
timeindex = [timestep]
else:
timeindex = self.network.results.s_res().index
load_factor = pd.DataFrame(
data={'s_nom': [float(self.network.config[
'grid_expansion_load_factors'][
'mv_{}_line'.format(case.loc[_])])
for _ in timeindex]},
index=timeindex)
if voltage_level == 'mv':
lines = self.network.pypsa.lines.loc[
self.network.pypsa.lines.v_nom > 1]
elif voltage_level == 'lv':
lines = self.network.pypsa.lines.loc[
self.network.pypsa.lines.v_nom < 1]
else:
lines = self.network.pypsa.lines
s_res = self.network.results.s_res().loc[
timeindex, lines.index]
# get allowed line load
s_allowed = load_factor.dot(
self.network.pypsa.lines.s_nom.to_frame().T * 1e3)
# get line load from pf
data = s_res.divide(s_allowed)
if title is True:
if timestep is not None:
title = "Relative line load histogram for time step {}".format(
timestep)
else:
title = "Relative line load histogram \nfor time steps " \
"{} to {}".format(data.index[0], data.index[-1])
elif title is False:
title = None
plots.histogram(data=data, title=title, **kwargs) | def histogram_relative_line_load(self, timestep=None, title=True,
voltage_level='mv_lv', **kwargs) | Plots histogram of relative line loads.
For more information see :func:`edisgo.tools.plots.histogram`.
Parameters
----------
Parameters
----------
timestep : :pandas:`pandas.Timestamp<timestamp>` or None, optional
Specifies time step histogram is plotted for. If timestep is None
all time steps voltages are calculated for are used. Default: None.
title : :obj:`str` or :obj:`bool`, optional
Title for plot. If True title is auto generated. If False plot has
no title. If :obj:`str`, the provided title is used. Default: True.
voltage_level : :obj:`str`
Specifies which voltage level to plot voltage histogram for.
Possible options are 'mv', 'lv' and 'mv_lv'. 'mv_lv' is also the
fallback option in case of wrong input. Default: 'mv_lv' | 3.675094 | 3.653773 | 1.005835 |
CurtailmentControl(edisgo=self, methodology=methodology,
curtailment_timeseries=curtailment_timeseries,
**kwargs) | def curtail(self, methodology, curtailment_timeseries, **kwargs) | Sets up curtailment time series.
Curtailment time series are written into
:class:`~.grid.network.TimeSeries`. See
:class:`~.grid.network.CurtailmentControl` for more information on
parameters and methodologies. | 6.966642 | 6.807575 | 1.023366 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.