Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364 values |
|---|---|---|---|---|---|---|
Grid2Op | Grid2Op-master/docs/_static/style.css | /* this stuff uses a couple of themes as a base with some custom stuff added
In particular thanks to:
- Alabaster for being a good base
- Which thanks Flask + KR theme
- Sphinx Readable Theme
- Which also proved to be a great base
- Rapptz from which this is copy pasted
*/
@import url('basic.css');
body {
font-family: 'Georgia', 'Yu Gothic', 'Noto Sans CJK JP Regular', serif;
font-size: 16px;
margin: 0;
padding: 0;
}
p {
margin-bottom: 8px;
}
div.document {
margin: 10px auto 0 auto;
max-width: 1200px; /* page width */
}
div.body {
max-width: 960px;
}
div.documentwrapper {
float: left;
width: 100%;
}
div.bodywrapper {
margin: 0 0 0 240px; /* sidebar width */
}
div.body {
background-color: #ffffff;
color: #3e4349;
padding: 0 30px 30px 30px;
}
div.footer {
color: #555;
font-size: 14px;
margin: 20px auto 30px auto;
text-align: right;
max-width: 1200px; /* page width */
}
div.footer a {
color: #444;
text-decoration: underline;
}
div.related {
padding: 10px 10px;
width: auto;
}
div.sphinxsidebar {
float: left;
font-size: 14px;
line-height: 1.5em;
margin-left: -100%;
width: 240px; /* sidebar width */
}
div.sphinxsidebarwrapper {
font-size: 14px;
line-height: 1.5em;
padding: 10px 0 10px 10px;
/* sticky sidebar */
position: fixed;
width: 240px; /* sidebar width */
height: 90%;
overflow: hidden;
}
/* show scrollbar on hover */
div.sphinxsidebarwrapper:hover {
overflow: auto;
}
div.sphinxsidebar h3,
div.sphinxsidebar h4 {
color: #333;
font-size: 24px;
font-weight: normal;
margin: 0 0 5px 0;
padding: 0;
}
div.sphinxsidebar h4 {
font-size: 1.1em;
}
div.sphinxsidebar h3 a {
color: #333;
}
div.sphinxsidebar p {
color: #888;
}
div.sphinxsidebar p.searchtip {
line-height: 1.4em;
}
div.sphinxsidebar ul {
color: #000;
margin: 10px 0 20px;
padding: 0;
}
div.sphinxsidebar a {
color: #444;
}
div.sphinxsidebar input {
border: 1px solid #ccc;
font-family: sans-serif;
font-size: 1em;
margin-top: 10px;
}
/* -- body styles --------------------------------------------------------- */
a {
color: #2591c4;
text-decoration: none;
}
a:hover {
color: #0b3a44;
text-decoration: underline;
}
hr {
border: 1px solid #b1b4b6;
}
div.body h1,
div.body h2,
div.body h3,
div.body h4,
div.body h5,
div.body h6 { font-weight: normal; }
div.body h1,
div.body h2,
div.body h3,
div.body h4 { color: #212224; }
div.body h5 { color: #000; }
div.body h6 { color: #777; }
div.body h1 { margin: 0 0 10px 0; }
div.body h2,
div.body h3 { margin: 10px 0px 10px 0px; }
div.body h4,
div.body h5,
div.body h6 { margin: 20px 0px 10px 0px; }
div.body h1 { padding: 0 0 10px 0; }
div.body h2,
div.body h3 { padding: 10px 0 10px 0; }
div.body h4 { padding: 10px 0 10px 0; }
div.body h5,
div.body h6 { padding: 10px 0 0 0; }
div.body h1,
div.body h2,
div.body h3 { border-bottom: 1px solid #ddd; }
div.body h4 { border-bottom: 1px solid #e5e5e5; }
div.body h1 { font-size: 230%; }
div.body h2 { font-size: 180%; }
div.body h3 { font-size: 130%; }
div.body h4 { font-size: 110%; }
div.body h5 { font-size: 105%; }
div.body h6 { font-size: 100%; }
a.headerlink {
color: #3e4349;
font-size: 0.8em;
padding: 0 4px 0 4px;
text-decoration: none;
}
a.headerlink:hover {
background-color: #3e4349;
color: #fff;
}
div.body ul {
list-style: disc;
margin: 1em 0;
padding-left: 1.3em;
}
div.body ul ul, div.body ol ul {
margin: .2em 0;
padding-left: 1.2em;
}
div.body ul li {
padding: 2px 0;
}
div.body ul.search li {
padding: 5px 0 5px 20px;
}
div.body ol {
counter-reset: li;
margin-left: 0;
padding-left: 0;
}
div.body ol ol {
margin: .2em 0;
}
div.body ol > li {
list-style: none;
margin: 0 0 0 1.9em;
padding: 2px 1px;
position: relative;
}
div.body ol > li:before {
content: counter(li) ".";
counter-increment: li;
top: -2px;
left: -1.9em;
width: 1.9em;
padding: 4px 0;
position: absolute;
text-align: left;
}
div.body p,
div.body dd,
div.body li {
line-height: 1.4em;
}
/* weird margins */
li > p {
margin: 2px;
}
li > blockquote {
margin: 10px;
}
div.admonition p.admonition-title + p {
display: inline;
}
div.highlight {
background-color: #fff;
}
div.important, div.note, div.hint, div.tip {
background-color: #eee;
border: 1px solid #ccc;
}
div.attention, div.warning, div.caution, div.seealso {
background-color: #fef9e9;
border: 1px solid #fbe091;
}
/* no disgusting background in the FAQ */
div.topic {
background-color: transparent;
border: none;
}
/* don't link-ify the FAQ page */
a.toc-backref {
text-decoration: none;
color: #3e4349;
}
/* bold and fix the Parameter, Raises, etc. */
dl.field-list > dt {
font-weight: bold;
}
/* remove flex from field lists */
dl.field-list {
display: block;
}
/* internal references are forced to bold for some reason */
a.reference.internal > strong {
font-weight: unset;
font-family: monospace;
}
div.danger, div.error {
background-color: #ffe4e4;
border: 1px solid #f66;
}
div.admonition {
padding: 10px;
}
p.admonition-title {
display: inline;
}
p.admonition-title:after {
content: ':';
}
/* helpful admonitions */
div.helpful {
background-color: #e4f2ff;
border: 1px solid #66b3ff;
}
div.helpful > p.admonition-title {
display: block;
}
div.helpful > p.admonition-title:after {
content: unset;
}
/* exception hierarchy */
.exception-hierarchy-content dd,
.exception-hierarchy-content dl {
margin: 0px 2px;
}
.exception-hierarchy-content {
margin-left: 0.5em;
}
.exception-hierarchy-content ul {
list-style: '»' !important;
}
pre {
background-color: #f5f5f5;
border: 1px solid #C6C9CB;
color: #222;
font-size: 0.75em;
line-height: 1.5em;
margin: 1.5em 0 1.5em 0;
padding: 10px;
}
pre, tt, code {
font-family: 'Consolas', 'Menlo', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
font-size: 0.9em;
}
tt, code {
background-color: #ecf0f3;
}
tt.descname, code.descname {
font-size: 0.95em;
}
tt.xref, a tt, code.xref, a code {
font-weight: normal;
}
span.pre {
padding: 0 2px;
}
dl.class {
margin-bottom: 50px;
}
dl.describe > dt,
dl.function > dt,
dl.attribute > dt,
dl.classmethod > dt,
dl.method > dt,
dl.class > dt,
dl.exception > dt {
background-color: #f5f5f5;
padding: 1px 10px;
}
dd {
margin-top: 10px;
}
.container.operations {
padding: 10px;
border: 1px solid #ddd;
margin-bottom: 20px;
}
.container.operations::before {
content: 'Supported Operations';
color: #212224;
display: block;
padding-bottom: 5px;
}
.container.operations > dl.describe > dt {
background-color: #f8f8f8;
}
table.docutils {
width: 100%;
}
table.docutils.footnote {
width: auto;
}
table.docutils thead,
table.docutils tfoot {
background: #f5f5f5;
}
table.docutils thead tr th {
color: #000;
font-weight: normal;
padding: 7px 5px;
vertical-align: middle;
}
table.docutils tbody tr th,
table.docutils tbody tr td {
border-bottom: 0;
border-top: solid 1px #ddd;
padding: 7px 5px;
vertical-align: top;
}
table.docutils tbody tr:last-child th,
table.docutils tbody tr:last-child td {
border-bottom: solid 1px #ddd;
}
table.docutils thead tr td p,
table.docutils tfoot tr td p,
table.docutils tbody tr td p,
table.docutils thead tr td ul,
table.docutils tfoot tr td ul,
table.docutils tbody tr td ul,
table.docutils thead tr td ol,
table.docutils tfoot tr td ol,
table.docutils tbody tr td ol {
margin: 0 0 .5em;
}
table.docutils thead tr td p.last,
table.docutils tfoot tr td p.last,
table.docutils tbody tr td p.last,
table.docutils thead tr td ul.last,
table.docutils tfoot tr td ul.last,
table.docutils tbody tr td ul.last,
table.docutils thead tr td ol.last,
table.docutils tfoot tr td ol.last,
table.docutils tbody tr td ol.last {
margin-bottom: 0;
}
.viewcode-back {
font-family: Arial, sans-serif;
}
div.viewcode-block:target {
background-color: #fef9e9;
border-top: 1px solid #fbe091;
border-bottom: 1px solid #fbe091;
}
/* hide the welcome text */
div#welcome-to-discord-py > h1 {
display: none;
}
.active {
background-color: #dbdbdb;
border-left: 5px solid #dbdbdb;
}
div.code-block-caption {
font-size: medium;
font-weight: bold;
}
@media screen and (max-width: 870px) {
div.document {
width: auto;
margin: 0;
}
div.documentwrapper {
float: none;
}
div.bodywrapper {
margin: 0;
}
div.body {
min-height: 0;
padding: 0 20px 30px 20px;
}
div.footer {
background-color: #333;
color: #888;
margin: 0;
padding: 10px 20px 20px;
text-align: left;
width: auto;
}
div.footer a {
color: #bbb;
}
div.footer a:hover {
color: #fff;
}
div.sphinxsidebar {
background-color: #333;
color: #fff;
float: none;
margin: 0;
padding: 10px 20px;
width: auto;
}
/* sticky sidebar */
div.sphinxsidebarwrapper {
position: relative;
}
div.sphinxsidebar h3,
div.sphinxsidebar h4,
div.sphinxsidebar p,
div.sphinxsidebar h3 a {
color: #fff;
}
div.sphinxsidebar ul {
color: #999;
}
div.sphinxsidebar a {
color: #aaa;
}
div.sphinxsidebar a:hover {
color: #fff;
}
.active {
background-color: transparent;
border-left: none;
}
}
| 9,320 | 15.098446 | 95 | css |
Grid2Op | Grid2Op-master/docs/_templates/genindex.html | {%- extends "basic/genindex.html" %}
{% block body %}
{{ super() }}
<!-- Inject some JavaScript to convert the index names into something useful. -->
<script>
let elements = document.querySelectorAll("table.indextable a");
// this is pretty finicky but it should work.
for(let el of elements) {
let key = el.getAttribute('href').split('#', 2)[1]
if(!key.startsWith('discord.')) {
continue;
}
if(key.startsWith('discord.ext.')) {
key = key.substr(12); // discord.ext.
}
if(el.textContent.indexOf('()') !== -1) {
key = key + '()'
}
el.textContent = key;
}
document.querySelectorAll("td").forEach(el => el.style.width = 'auto');
</script>
{% endblock %}
<!--this stuff uses a couple of themes copy pasted from Rapptz -->
| 823 | 26.466667 | 83 | html |
Grid2Op | Grid2Op-master/docs/_templates/layout.html | {%- extends "basic/layout.html" %}
{% set show_source = False %}
{% set style = 'style.css' %}
{%- block extrahead %}
{{ super() }}
<meta name="viewport" content="width=device-width, initial-scale=0.9, maximum-scale=0.9">
{% endblock %}
{%- block relbar2 %}{% endblock %}
{% block header %}
{{ super() }}
{% if pagename == 'index' %}
<div class="indexwrapper">
{% endif %}
{% endblock %}
{%- block footer %}
<div class="footer">
© Copyright {{ copyright }}.
{% trans sphinx_version=sphinx_version|e %}Created using <a href="http://sphinx-doc.org/">Sphinx</a> {{ sphinx_version }}.{% endtrans %}
</div>
{% if pagename == 'index' %}
</div>
{% endif %}
{%- if READTHEDOCS %}
<script>
if (typeof READTHEDOCS_DATA !== 'undefined') {
if (!READTHEDOCS_DATA.features) {
READTHEDOCS_DATA.features = {};
}
READTHEDOCS_DATA.features.docsearch_disabled = true;
}
</script>
{%- endif %}
{%- endblock %}
<!--this stuff uses a couple of themes copy pasted from Rapptz -->
| 1,054 | 23.534884 | 140 | html |
Grid2Op | Grid2Op-master/docs/_templates/relations.html | <!-- purposefully empty --> | 27 | 27 | 27 | html |
Grid2Op | Grid2Op-master/examples/backend_integration/Step0_make_env.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
"""
In this script we will create a function to make a proper grid2Op environment that will use
to test / check our backend.
Grid2op supports lots of functions. Here we mainly spend our time to deactivate them in
order to have something has clean and predictable as possible.
The most notable thing in this script is the use of `BackendConverter`. This class allows to
manipulate (agent, time series etc.) data as if they come for the `source_backend_class` but
internally, grid2op uses the `target_backend_class` (in this case the default PandaPowerBackend)
to carry out the computation.
This is especially usefull when you want to write a new "backend" because:
1) as the "source_backend_class" is never really used, you are not forced
to implement everything at once before being able to make some tests
2) it provides a default "mapping" from elements in the grid you load that
might have different names (*eg* "load_1_2" in PandaPowerBackend and
"load_2_1" in your backend) and you are still able to read the time
series provided in the grid2op package.
We recommend not to spend much time looking at this code but keep in mind that
when backends are developed, BackendConverter might be a usefull tools.
"""
import warnings
import grid2op
from grid2op.Action import CompleteAction
from grid2op.Converter import BackendConverter
from grid2op.Backend import PandaPowerBackend
from grid2op.Reward import ConstantReward
from grid2op.Opponent import BaseOpponent
def make_env_for_backend(env_name, backend_class):
# env_name: one of:
# - rte_case5_example: the grid in the documentation (completely fake grid)
# - l2rpn_case14_sandbox: inspired from IEEE 14
# - l2rpn_neurips_2020_track1: inspired from IEEE 118 (only a third of it)
# - l2rpn_wcci_2022_dev: inspired from IEEE 118 (entire grid)
if env_name == "rte_case5_example":
pass
elif env_name == "l2rpn_case14_sandbox":
pass
elif env_name == "l2rpn_neurips_2020_track1":
pass
elif env_name == "l2rpn_wcci_2022_dev":
pass
else:
raise RuntimeError(f"Unknown grid2op environment name {env_name} used when developping a new backend.")
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = grid2op.make(env_name,
test=True,
action_class=CompleteAction, # we tell grid2op we will manipulate all type of actions
reward_class=ConstantReward, # we don't have yet redispatching data, that might be use by the reward
opponent_class=BaseOpponent, # we deactivate the opponents
backend=BackendConverter(source_backend_class=backend_class,
target_backend_class=PandaPowerBackend,
use_target_backend_name=True)
)
obs = env.reset()
return env, obs
| 3,480 | 46.040541 | 128 | py |
Grid2Op | Grid2Op-master/examples/backend_integration/Step1_loading.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
"""
This script :
1) provide a possible implementation of the "load_grid" function based on pandapower, see
the implementation of `def load_grid(self, path, filename=None):`
2) emulate the behaviour of grid2op when this funciton is called, see what happens after the
`if __name__ == "__main__":`
I can also be used as "test" to make sure your backend can be loaded properly for example.
Of course, this script is highly inefficient and can be optimized in multiple ways. Its only
purpose is to be as clear and minimal as possible.
"""
import os
import numpy as np
import grid2op
from grid2op.Backend import Backend # required
# to serve as an example
import pandapower as pp
class CustomBackend_Step1(Backend):
def load_grid(self, path, filename=None):
# first load the grid from the file
full_path = path
if filename is not None:
full_path = os.path.join(full_path, filename)
self._grid = pp.from_json(full_path)
# then fill the "n_sub" and "sub_info"
self.n_sub = self._grid.bus.shape[0]
# then fill the number and location of loads
self.n_load = self._grid.load.shape[0]
self.load_to_subid = np.zeros(self.n_load, dtype=int)
for load_id in range(self.n_load):
self.load_to_subid[load_id] = self._grid.load.iloc[load_id]["bus"]
# then fill the number and location of generators
self.n_gen = self._grid.gen.shape[0]
self.gen_to_subid = np.zeros(self.n_gen, dtype=int)
for gen_id in range(self.n_gen):
self.gen_to_subid[gen_id] = self._grid.gen.iloc[gen_id]["bus"]
# then fill the number and location of storage units
# self.n_storage = self._grid.storage.shape[0]
# self.storage_to_subid = np.zeros(self.n_storage, dtype=int)
# for storage_id in range(self.n_storage):
# self.storage_to_subid[storage_id] = self._grid.storage.iloc[storage_id]["bus"]
# WARNING
# for storage, their description is loaded in a different file (see
# the doc of Backend.load_storage_data)
# to start we recommend you to ignore the storage unit of your grid with:
self.set_no_storage()
# finally handle powerlines
# NB: grid2op considers that trafos are powerlines.
# so we decide here to say: first n "powerlines" of grid2Op
# will be pandapower powerlines and
# last k "powerlines" of grid2op will be the trafos of pandapower.
self.n_line = self._grid.line.shape[0] + self._grid.trafo.shape[0]
self.line_or_to_subid = np.zeros(self.n_line, dtype=int)
self.line_ex_to_subid = np.zeros(self.n_line, dtype=int)
for line_id in range(self._grid.line.shape[0]):
self.line_or_to_subid[line_id] = self._grid.line.iloc[line_id]["from_bus"]
self.line_ex_to_subid[line_id] = self._grid.line.iloc[line_id]["to_bus"]
nb_powerline = self._grid.line.shape[0]
for trafo_id in range(self._grid.trafo.shape[0]):
self.line_or_to_subid[trafo_id + nb_powerline] = self._grid.trafo.iloc[trafo_id]["hv_bus"]
self.line_ex_to_subid[trafo_id + nb_powerline] = self._grid.trafo.iloc[trafo_id]["lv_bus"]
# and now the thermal limit
self.thermal_limit_a = 1000. * np.concatenate(
(
self._grid.line["max_i_ka"].values,
self._grid.trafo["sn_mva"].values
/ (np.sqrt(3) * self._grid.trafo["vn_hv_kv"].values),
)
)
self._compute_pos_big_topo()
def apply_action(self, action):
raise NotImplementedError("Will be detailed in another example script")
def runpf(self, is_dc=False):
raise NotImplementedError("Will be detailed in another example script")
def get_topo_vect(self):
raise NotImplementedError("Will be detailed in another example script")
def generators_info(self):
raise NotImplementedError("Will be detailed in another example script")
def loads_info(self):
raise NotImplementedError("Will be detailed in another example script")
def lines_or_info(self):
raise NotImplementedError("Will be detailed in another example script")
def lines_ex_info(self):
raise NotImplementedError("Will be detailed in another example script")
if __name__ == "__main__":
path_grid2op = grid2op.__file__
path_data_test = os.path.join(os.path.split(path_grid2op)[0], "data")
env_name = "rte_case5_example"
# one of:
# - rte_case5_example: the grid in the documentation (completely fake grid)
# - l2rpn_case14_sandbox: inspired from IEEE 14
# - l2rpn_neurips_2020_track1: inspired from IEEE 118 (only a third of it)
# - l2rpn_wcci_2022_dev: inspired from IEEE 118 (entire grid)
a_grid = os.path.join(path_data_test, env_name, "grid.json")
backend = CustomBackend_Step1()
backend.load_grid(a_grid)
# grid2op then performs basic check to make sure that the grid is "consistent"
backend.assert_grid_correct()
# and you can check all the attribute that are required by grid2op (exhaustive list in the
# GridObjects class)
# name_load
# name_gen
# name_line
# name_sub
# name_storage
# to which substation is connected each element
# load_to_subid
# gen_to_subid
# line_or_to_subid
# line_ex_to_subid
# storage_to_subid
# # which index has this element in the substation vector
# load_to_sub_pos
# gen_to_sub_pos
# line_or_to_sub_pos
# line_ex_to_sub_pos
# storage_to_sub_pos
# # which index has this element in the topology vector
# load_pos_topo_vect
# gen_pos_topo_vect
# line_or_pos_topo_vect
# line_ex_pos_topo_vect
# storage_pos_topo_vect
# for example
print(type(backend).name_load)
print(type(backend).load_to_subid)
print(type(backend).load_to_sub_pos)
print(type(backend).load_pos_topo_vect)
| 6,622 | 37.730994 | 112 | py |
Grid2Op | Grid2Op-master/examples/backend_integration/Step2_modify_load.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
"""
This script provides a possible implementation, based on pandapower of the "change the load" part of the "grid2op backend loop".
It get back the loading function from Step1, implements the "apply_action" relevant for the "change_load", the "runpf" method
(to compute the powerflow) and then the "loads_info"
"""
import numpy as np
import pandapower as pp
from Step1_loading import CustomBackend_Step1
class CustomBackend_Step2(CustomBackend_Step1):
def apply_action(self, action):
# the following few lines are highly recommended
if action is None:
return
(
active_bus,
(prod_p, prod_v, load_p, load_q, storage),
_,
shunts__,
) = action()
# change the active values of the loads
for load_id, new_p in load_p:
self._grid.load["p_mw"].iloc[load_id] = new_p
# change the reactive values of the loads
for load_id, new_q in load_q:
self._grid.load["q_mvar"].iloc[load_id] = new_q
def runpf(self, is_dc=False):
# possible implementation of the runpf function
try:
if is_dc:
pp.rundcpp(self._grid, check_connectivity=False)
else:
pp.runpp(self._grid, check_connectivity=False)
return self._grid.converged, None
except pp.powerflow.LoadflowNotConverged as exc_:
# of the powerflow has not converged, results are Nan
return False, exc_
def loads_info(self):
# carefull with copy / deep copy
load_p = self._grid.res_load["p_mw"].values # in MW
load_q = self._grid.res_load["q_mvar"].values # in MVAr
# load_v is the voltage magnitude at the bus at which the load is connected.
# in pandapower this is not straightforward. We first need to retrieve the
# voltage in per unit of the bus to which each load is connected.
# And then we convert the pu to kV. This is what is done below.
load_v = self._grid.res_bus.iloc[self._grid.load["bus"].values]["vm_pu"].values # in pu
load_v *= self._grid.bus.iloc[self._grid.load["bus"].values]["vn_kv"].values # in kv
return load_p, load_q, load_v
if __name__ == "__main__":
import grid2op
import os
from Step0_make_env import make_env_for_backend
path_grid2op = grid2op.__file__
path_data_test = os.path.join(os.path.split(path_grid2op)[0], "data")
env_name = "rte_case5_example"
# one of:
# - rte_case5_example: the grid in the documentation (completely fake grid)
# - l2rpn_case14_sandbox: inspired from IEEE 14
# - l2rpn_neurips_2020_track1: inspired from IEEE 118 (only a third of it)
# - l2rpn_wcci_2022_dev: inspired from IEEE 118 (entire grid)
env, obs = make_env_for_backend(env_name, CustomBackend_Step2)
a_grid = os.path.join(path_data_test, env_name, "grid.json")
# we highly recommend to do these 3 steps (this is done automatically by grid2op... of course. See an example of the "complete"
# backend)
backend = CustomBackend_Step2()
backend.load_grid(a_grid)
backend.assert_grid_correct()
#########
# this is how "user" manipute the grid
new_load_p = obs.load_p * 1.1
new_load_q = obs.load_q * 0.9
action = env.action_space({"injection": {"load_p": new_load_p,
"load_q": new_load_q}})
# we could have written
# > action = env.action_space({"injection": {"load_p": [9. , 7.9, 7.7],
# > "load_q": [6.3, 5.5, 5.4]}})
# for the environment "rte_case5_example" but we want this script to be usable
# with the other environments that have different number of loads (so you need different
# vector of different size... this is why we use the obs.load_p and obs.load_q that already
# have the proper size)
# this is technical to grid2op (done internally)
bk_act = env._backend_action_class()
bk_act += action
#############
# this is what the backend receive:
backend.apply_action(bk_act)
# now run a powerflow
conv, exc_ = backend.runpf()
assert conv, "powerflow has diverged"
# and retrieve the results
load_p, load_q, load_v = backend.loads_info()
print(f"{load_p = }")
print(f"{load_q = }")
print(f"{load_v = }")
assert np.isclose(np.sum(load_p), np.sum(new_load_p))
assert np.isclose(np.sum(load_q), np.sum(new_load_q))
| 5,065 | 39.528 | 132 | py |
Grid2Op | Grid2Op-master/examples/backend_integration/Step3_modify_gen.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
"""
This script provides a possible implementation, based on pandapower of
the "change the generators" part of the "grid2op backend loop".
It get back the loading function from Step2, implements
the "apply_action" relevant for the "change generators"
and the "generators_info".
NB: the "runpf" is taken from CustomBackend_Step2
"""
import numpy as np
from Step2_modify_load import CustomBackend_Step2
class CustomBackend_Step3(CustomBackend_Step2):
def apply_action(self, action):
# the following few lines are highly recommended
if action is None:
return
# loads are modified in the previous script
super().apply_action(action)
(
active_bus,
(prod_p, prod_v, load_p, load_q, storage),
_,
shunts__,
) = action()
# change the active value of generators
for gen_id, new_p in prod_p:
self._grid.gen["p_mw"].iloc[gen_id] = new_p
# for the voltage magnitude, pandapower expects pu but grid2op provides kV,
# so we need a bit of change
for gen_id, new_v in prod_v:
self._grid.gen["vm_pu"].iloc[gen_id] = new_v # but new_v is not pu !
self._grid.gen["vm_pu"].iloc[gen_id] /= self._grid.bus["vn_kv"][
self.gen_to_subid[gen_id]
] # now it is :-)
def generators_info(self):
prod_p = self._grid.res_gen["p_mw"].values # in MW
prod_q = self._grid.res_gen["q_mvar"].values # in MVAr
# same as for load, gen_v is not directly accessible in pandapower
# we first retrieve the per unit voltage, then convert it to kV
prod_v = self._grid.res_gen["vm_pu"].values
prod_v *= (
self._grid.bus["vn_kv"].iloc[self.gen_to_subid].values
) # in kV
return prod_p, prod_q, prod_v
if __name__ == "__main__":
import grid2op
import os
from Step0_make_env import make_env_for_backend
path_grid2op = grid2op.__file__
path_data_test = os.path.join(os.path.split(path_grid2op)[0], "data")
env_name = "rte_case5_example"
# one of:
# - rte_case5_example: the grid in the documentation (completely fake grid)
# - l2rpn_case14_sandbox: inspired from IEEE 14
# - l2rpn_neurips_2020_track1: inspired from IEEE 118 (only a third of it)
# - l2rpn_wcci_2022_dev: inspired from IEEE 118 (entire grid)
env, obs = make_env_for_backend(env_name, CustomBackend_Step3)
a_grid = os.path.join(path_data_test, env_name, "grid.json")
# we highly recommend to do these 3 steps (this is done automatically by grid2op... of course. See an example of the "complete"
# backend)
backend = CustomBackend_Step3()
backend.load_grid(a_grid)
backend.assert_grid_correct()
#########
new_gen_p = obs.gen_p * 1.1
new_gen_v = obs.gen_v * 1.05
# this is how "user" manipute the grid
action = env.action_space({"injection": {"prod_p": new_gen_p,
"prod_v": new_gen_v}})
# we could have written
# > action = env.action_space({"injection": {"prod_p": [ 0.99 , 29.688126],
# > "prod_v": [107.1, 107.1]}})
# for the environment "rte_case5_example" but we want this script to be usable
# with the other environments that have different number of generators (so you need different
# vector of different size... this is why we use the obs.gen_p and obs.gen_v that already
# have the proper size)
# this is technical to grid2op (done internally)
bk_act = env._backend_action_class()
bk_act += action
#############
# this is what the backend receive:
backend.apply_action(bk_act)
# now run a powerflow
conv, exc_ = backend.runpf()
assert conv, "powerflow has diverged"
# and retrieve the results
gen_p, gen_q, gen_v = backend.generators_info()
print(f"{gen_p = }")
print(f"{gen_q = }")
print(f"{gen_v = }")
# some gen_p might be slightly different than the setpoint
# due to the slack ! (this is why we cannot assert things based on gen_p...)
assert np.allclose(gen_v, new_gen_v)
# the assertion above works because there is no limit on reactive power absorbed / produced by generators in pandapower.
| 4,886 | 38.096 | 132 | py |
Grid2Op | Grid2Op-master/examples/backend_integration/Step4_modify_line_status.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
"""
This script provides a possible implementation, based on pandapower of
the "change the line status" part of the "grid2op backend loop".
It get back the loading function from Step3, implements
the "apply_action" relevant for the "change line status"
and the "lines_or_info" and "lines_ex_info".
NB: the "runpf" is taken from CustomBackend_Step2
"""
import numpy as np
from Step3_modify_gen import CustomBackend_Step3
class CustomBackend_Step4(CustomBackend_Step3):
def apply_action(self, action):
# the following few lines are highly recommended
if action is None:
return
# loads and generators are modified in the previous script
super().apply_action(action)
# disconnected powerlines are indicated because they are
# connected to bus "-1" in the `get_lines_or_bus()` and
# `get_lines_ex_bus()`
# NB : at time of writing, grid2op side a powerline disconnected
# on a side (*eg* "or" side or "ext" side) is
# disconnected on both.
# the only difficulty here is that grid2op considers that
# trafo are also powerline.
# We already "solved" that by saying that the "k" last "lines"
# from grid2op point of view will indeed be trafos.
n_line_pp = self._grid.line.shape[0]
# handle the disconnection on "or" side
lines_or_bus = action.get_lines_or_bus()
for line_id, new_bus in lines_or_bus:
if line_id < n_line_pp:
# a pandapower powerline has bee disconnected in grid2op
dt = self._grid.line
line_id_db = line_id
else:
# a pandapower trafo has bee disconnected in grid2op
dt = self._grid.trafo
line_id_db = line_id - n_line_pp
if new_bus == -1:
# element was disconnected
dt["in_service"].iloc[line_id_db] = False
else:
# element was connected
dt["in_service"].iloc[line_id_db] = True
lines_ex_bus = action.get_lines_ex_bus()
for line_id, new_bus in lines_ex_bus:
if line_id < n_line_pp:
# a pandapower powerline has bee disconnected in grid2op
dt = self._grid.line
line_id_db = line_id
else:
# a pandapower trafo has bee disconnected in grid2op
dt = self._grid.trafo
line_id_db = line_id - n_line_pp
if new_bus == -1:
# element was disconnected
dt["in_service"].iloc[line_id_db] = False
else:
# element was connected
dt["in_service"].iloc[line_id_db] = True
def _aux_get_line_info(self, colname_powerline, colname_trafo):
"""
concatenate the information of powerlines and trafo using
the convention that "powerlines go first" then trafo
"""
res = np.concatenate(
(
self._grid.res_line[colname_powerline].values,
self._grid.res_trafo[colname_trafo].values,
)
)
return res
def lines_or_info(self):
"""
Main method to retrieve the information at the "origin" side of the powerlines and transformers.
We simply need to follow the convention we adopted:
- origin side (grid2op) will be "from" side for pandapower powerline
- origin side (grid2op) will be "hv" side for pandapower trafo
- we chose to first have powerlines, then transformers
(convention chosen in :func:`EducPandaPowerBackend.load_grid`)
"""
p_or = self._aux_get_line_info("p_from_mw", "p_hv_mw")
q_or = self._aux_get_line_info("q_from_mvar", "q_hv_mvar")
v_or = self._aux_get_line_info("vm_from_pu", "vm_hv_pu") # in pu
a_or = self._aux_get_line_info("i_from_ka", "i_hv_ka") * 1000 # grid2op expects amps (A) pandapower returns kilo-amps (kA)
# get the voltage in kV (and not in pu)
bus_id = np.concatenate(
(
self._grid.line["from_bus"].values,
self._grid.trafo["hv_bus"].values,
)
)
v_or *= self._grid.bus.iloc[bus_id]["vn_kv"].values
# there would be a bug in v_or because of the way pandapower
# internally looks at the extremity of powerlines / trafos.
# we fix it here:
status = np.concatenate(
(
self._grid.line["in_service"].values,
self._grid.trafo["in_service"].values,
)
)
v_or[~status] = 0.
return p_or, q_or, v_or, a_or
def lines_ex_info(self):
"""
Main method to retrieve the information at the "extremity" side of the powerlines and transformers.
We simply need to follow the convention we adopted:
- extremity side (grid2op) will be "to" side for pandapower powerline
- extremity side (grid2op) will be "lv" side for pandapower trafo
- we chose to first have powerlines, then transformers
(convention chosen in function :func:`EducPandaPowerBackend.load_grid`)
"""
p_ex = self._aux_get_line_info("p_to_mw", "p_lv_mw")
q_ex = self._aux_get_line_info("q_to_mvar", "q_lv_mvar")
v_ex = self._aux_get_line_info("vm_to_pu", "vm_lv_pu") # in pu
a_ex = self._aux_get_line_info("i_to_ka", "i_lv_ka") * 1000 # grid2op expects amps (A) pandapower returns kilo-amps (kA)
# get the voltage in kV (and not in pu)
bus_id = np.concatenate(
(
self._grid.line["to_bus"].values,
self._grid.trafo["lv_bus"].values,
)
)
v_ex *= self._grid.bus.iloc[bus_id]["vn_kv"].values
# there would be a bug in v_ex because of the way pandapower
# internally looks at the extremity of powerlines / trafos.
# we fix it here:
status = np.concatenate(
(
self._grid.line["in_service"].values,
self._grid.trafo["in_service"].values,
)
)
v_ex[~status] = 0.
return p_ex, q_ex, v_ex, a_ex
if __name__ == "__main__":
import grid2op
import os
from Step0_make_env import make_env_for_backend
path_grid2op = grid2op.__file__
path_data_test = os.path.join(os.path.split(path_grid2op)[0], "data")
env_name = "rte_case5_example"
# one of:
# - rte_case5_example: the grid in the documentation (completely fake grid)
# - l2rpn_case14_sandbox: inspired from IEEE 14
# - l2rpn_neurips_2020_track1: inspired from IEEE 118 (only a third of it)
# - l2rpn_wcci_2022_dev: inspired from IEEE 118 (entire grid)
env, obs = make_env_for_backend(env_name, CustomBackend_Step4)
a_grid = os.path.join(path_data_test, env_name, "grid.json")
# we highly recommend to do these 3 steps (this is done automatically by grid2op... of course. See an example of the "complete"
# backend)
backend = CustomBackend_Step4()
backend.load_grid(a_grid)
backend.assert_grid_correct()
#########
# this is how "user" manipute the grid
# in this I disconnect powerline 0
action = env.action_space({"set_line_status": [(0, -1)]})
# this is technical to grid2op
bk_act = env._backend_action_class()
bk_act += action
#############
# this is what the backend receive:
backend.apply_action(bk_act)
# now run a powerflow
conv, exc_ = backend.runpf()
assert conv, "powerflow has diverged"
# and retrieve the results
p_or, q_or, v_or, a_or = backend.lines_or_info()
print("After disconnecting powerline 0: ")
print(f"{p_or = }")
print(f"{q_or = }")
print(f"{v_or = }")
print(f"{a_or = }")
assert p_or[0] == 0.
assert q_or[0] == 0.
assert v_or[0] == 0.
assert a_or[0] == 0.
# this is how "user" manipute the grid
# in this I reconnect powerline 0
action = env.action_space({"set_line_status": [(0, 1)]})
# this is technical to grid2op (done internally)
bk_act = env._backend_action_class()
bk_act += action
#############
# this is what the backend receive:
backend.apply_action(bk_act)
assert conv, "powerflow has diverged"
# now run a powerflow
conv, exc_ = backend.runpf()
# and retrieve the results
p_or, q_or, v_or, a_or = backend.lines_or_info()
print("\nAfter reconnecting powerline 0: ")
print(f"{p_or = }")
print(f"{q_or = }")
print(f"{v_or = }")
print(f"{a_or = }")
# this is how "user" manipute the grid
# in this I disconnect the last powerline
line_id = env.n_line - 1
action = env.action_space({"set_line_status": [(line_id, -1)]})
# this is technical to grid2op (done internally)
bk_act = env._backend_action_class()
bk_act += action
#############
# this is what the backend receive:
backend.apply_action(bk_act)
# now run a powerflow
conv, exc_ = backend.runpf()
assert conv, "powerflow has diverged"
# and retrieve the results
p_or, q_or, v_or, a_or = backend.lines_or_info()
print(f"\nAfter disconnecting powerline id {line_id}")
print(f"{p_or = }")
print(f"{q_or = }")
print(f"{v_or = }")
print(f"{a_or = }")
assert p_or[line_id] == 0.
assert q_or[line_id] == 0.
assert v_or[line_id] == 0.
assert a_or[line_id] == 0.
| 10,169 | 34.559441 | 132 | py |
Grid2Op | Grid2Op-master/examples/backend_integration/Step5_modify_topology.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
"""
This script provides a possible implementation, based on pandapower of
the "change topology part of the "grid2op backend loop".
It get back the loading function from Step3, implements
the "apply_action" relevant for the "change topology" (and copy paste
from Step 4 the "change status").
This script also implement the final "getter" the "get_topo_vect".
NB: the "runpf" is taken from CustomBackend_Step2
"""
import copy
import pandas as pd
import numpy as np
from Step4_modify_line_status import CustomBackend_Step4
class CustomBackend_Minimal(CustomBackend_Step4):
# We need to perform a "trick" for this functionality to work properly. This is because
# in the "pandapower representation" there is no explicit substation. You only have "bus".
# But in grid2op, we assume that it is possible to have two buses per substations.
# Implementing this using pandapower is rather easy: we will double the number of buses
# available and say that the first half will be the "bus 1" (at their respective substation)
# and the second half will be the "bus 2" (at their respective substation)
# This is what we do in the "load_grid" function below.
def load_grid(self, path, filename=None):
# loads and generators are modified in the previous script
super().load_grid(path, filename)
# please read the note above, this part is specific to pandapower !
add_topo = copy.deepcopy(self._grid.bus)
add_topo.index += add_topo.shape[0]
add_topo["in_service"] = False
self._grid.bus = pd.concat((self._grid.bus, add_topo))
def _aux_change_bus_or_disconnect(self, new_bus, dt, key, el_id):
if new_bus == -1:
dt["in_service"].iloc[el_id] = False
else:
dt["in_service"].iloc[el_id] = True
dt[key].iloc[el_id] = new_bus
# As a "bonus" (see the comments above the "load_grid" function), we can also use the
# grid2op built-in "***_global()" functions that allows to retrieve the global id
# (from 0 to n_total_bus-1) directly (instead of manipulating local bus id that
# are either 1 or 2)
def apply_action(self, action):
# the following few lines are highly recommended
if action is None:
return
# loads and generators are modified in the previous script
super().apply_action(action)
# handle the load (see the comment above the definition of this
# function as to why it's possible to use the get_loads_bus_global)
loads_bus = action.get_loads_bus_global()
for load_id, new_bus in loads_bus:
self._aux_change_bus_or_disconnect(new_bus,
self._grid.load,
"bus",
load_id)
# handle the generators (see the comment above the definition of this
# function as to why it's possible to use the get_loads_bus_global)
gens_bus = action.get_gens_bus_global()
for gen_id, new_bus in gens_bus:
self._aux_change_bus_or_disconnect(new_bus,
self._grid.gen,
"bus",
gen_id)
# handle the powerlines (largely inspired from the Step4...) (see the comment above the definition of this
# function as to why it's possible to use the get_lines_or_bus_global)
n_line_pp = self._grid.line.shape[0]
lines_or_bus = action.get_lines_or_bus_global()
for line_id, new_bus in lines_or_bus:
if line_id < n_line_pp:
dt = self._grid.line
key = "from_bus"
line_id_pp = line_id
else:
dt = self._grid.trafo
key = "hv_bus"
line_id_pp = line_id - n_line_pp
self._aux_change_bus_or_disconnect(new_bus,
dt,
key,
line_id_pp)
lines_ex_bus = action.get_lines_ex_bus_global()
for line_id, new_bus in lines_ex_bus:
if line_id < n_line_pp:
dt = self._grid.line
key = "to_bus"
line_id_pp = line_id
else:
dt = self._grid.trafo
key = "lv_bus"
line_id_pp = line_id - n_line_pp
self._aux_change_bus_or_disconnect(new_bus,
dt,
key,
line_id_pp)
# and now handle the bus data frame status (specific to pandapower)
# we reuse the fact that there is n_sub substation on the grid,
# the bus (pandapower) for substation i will be bus i and bus i + n_sub
# as we explained.
(
active_bus,
(prod_p, prod_v, load_p, load_q, storage),
_,
shunts__,
) = action()
bus_is = self._grid.bus["in_service"]
for i, (bus1_status, bus2_status) in enumerate(active_bus):
bus_is[i] = bus1_status
bus_is[i + type(self).n_sub] = bus2_status
def _aux_get_topo_vect(self, res, dt, key, pos_topo_vect, add_id=0):
# we loop through each element of the table
# (each table representing either the loads, or the generators or the powerlines or the trafos)
# then we assign the right bus (local - eg 1 or 2) to the right
# component of the vector "res" (the component is given by the "pos_topo_vect" - eg self.load_pos_topo_vect
# when we look at the loads)
el_id = 0
for (status, bus_id) in dt[["in_service", key]].values:
my_pos_topo_vect = pos_topo_vect[el_id + add_id]
if status:
local_bus = self.global_bus_to_local_int(bus_id, my_pos_topo_vect)
else:
local_bus = -1
res[my_pos_topo_vect] = local_bus
el_id += 1
# it should return, in the correct order, on which bus each element is connected
def get_topo_vect(self):
res = np.full(self.dim_topo, fill_value=-2, dtype=int)
# read results for load
self._aux_get_topo_vect(res, self._grid.load, "bus", self.load_pos_topo_vect)
# then for generators
self._aux_get_topo_vect(res, self._grid.gen, "bus", self.gen_pos_topo_vect)
# then each side of powerlines
self._aux_get_topo_vect(res, self._grid.line, "from_bus", self.line_or_pos_topo_vect)
self._aux_get_topo_vect(res, self._grid.line, "to_bus", self.line_ex_pos_topo_vect)
# then for the trafos, but remember pandapower trafos are powerlines in grid2Op....
# so we need to trick it a bit
# (we can do this trick because we put the trafo "at the end" of the powerline in grid2op
# in the Step1_loading.py)
n_line_pp = self._grid.line.shape[0]
self._aux_get_topo_vect(res, self._grid.trafo, "hv_bus", self.line_or_pos_topo_vect, add_id=n_line_pp)
self._aux_get_topo_vect(res, self._grid.trafo, "lv_bus", self.line_ex_pos_topo_vect, add_id=n_line_pp)
return res
if __name__ == "__main__":
import grid2op
import os
from Step0_make_env import make_env_for_backend
path_grid2op = grid2op.__file__
path_data_test = os.path.join(os.path.split(path_grid2op)[0], "data")
env_name = "l2rpn_wcci_2022_dev"
# one of:
# - rte_case5_example: the grid in the documentation (completely fake grid)
# - l2rpn_case14_sandbox: inspired from IEEE 14
# - l2rpn_neurips_2020_track1: inspired from IEEE 118 (only a third of it)
# - l2rpn_wcci_2022_dev: inspired from IEEE 118 (entire grid)
env, obs = make_env_for_backend(env_name, CustomBackend_Minimal)
a_grid = os.path.join(path_data_test, env_name, "grid.json")
# we highly recommend to do these 3 steps (this is done automatically by grid2op... of course. See an example of the "complete"
# backend)
backend = CustomBackend_Minimal()
backend.load_grid(a_grid)
backend.assert_grid_correct()
#########
# this is how "user" manipute the grid
if env_name == "rte_case5_example":
sub_id = 0
local_topo = (1, 2, 1, 2, 1, 2)
elif env_name == "l2rpn_case14_sandbox":
sub_id = 2
local_topo = (1, 2, 1, 2)
elif env_name == "l2rpn_neurips_2020_track1":
sub_id = 1
local_topo = (1, 2, 1, 2, 1, 2)
elif env_name == "l2rpn_wcci_2022_dev":
sub_id = 3
local_topo = (1, 2, 1, 2, 1)
else:
raise RuntimeError(f"Unknown grid2op environment name {env_name}")
action = env.action_space({"set_bus": {"substations_id": [(sub_id, local_topo)]}})
#############################
# this is technical to grid2op
bk_act = env._backend_action_class()
bk_act += action
####################################
# this is what the backend receive:
backend.apply_action(bk_act)
# now run a powerflow
conv, exc_ = backend.runpf()
assert conv, f"Power flow diverged with error:\n\t{exc_}"
# and retrieve the results
p_or, q_or, v_or, a_or = backend.lines_or_info()
print(f"{p_or = }")
print(f"{q_or = }")
print(f"{v_or = }")
print(f"{a_or = }")
topo_vect = backend.get_topo_vect()
beg_ = np.sum(env.sub_info[:sub_id])
end_ = beg_ + env.sub_info[sub_id]
assert np.all(topo_vect[beg_:end_] == local_topo)
# and you can also make a "more powerful" test
# that test if, from grid2op point of view, the KCL are met or not
p_subs, q_subs, p_bus, q_bus, diff_v_bus = backend.check_kirchoff()
# p_subs: active power mismatch at the substation level [shape: nb_substation ]
# p_subs: reactive power mismatch at the substation level [shape: nb_substation ]
# p_bus: active power mismatch at the bus level [shape: (nb substation, 2)]
# p_bus: reactive power mismatch at the bus level [shape: (nb substation, 2)]
# diff_v_bus: difference between the highest voltage level and the lowest voltage level for
# among all elements connected to the same bus
# if your "solver" meets the KCL then it should all be 0. (*ie* less than a small tolerance)
tol = 1e-4
assert np.all(p_subs <= tol)
# assert np.all(q_subs <= tol) # does not work if there are shunts on the grid (not yet coded in the backend)
assert np.all(p_bus <= tol)
# assert np.all(q_bus <= tol) # does not work if there are shunts on the grid (not yet coded in the backend)
assert np.all(diff_v_bus <= tol)
| 11,537 | 44.070313 | 132 | py |
Grid2Op | Grid2Op-master/examples/backend_integration/Step6_integration.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
"""
This script provides, given the implementation of a (at least minimal) backend
some standard usage of said backend with grid2op, the way people "normally"
interacts with it.
"""
from Step5_modify_topology import CustomBackend_Minimal
if __name__ == "__main__":
import grid2op
from grid2op.Action import CompleteAction
import os
import warnings
from Step0_make_env import make_env_for_backend
from grid2op.Agent import RecoPowerlineAgent
from grid2op.Reward import L2RPNReward
path_grid2op = grid2op.__file__
path_data_test = os.path.join(os.path.split(path_grid2op)[0], "data")
env_name = "rte_case5_example"
# one of:
# - rte_case5_example: the grid in the documentation (completely fake grid)
# - l2rpn_case14_sandbox: inspired from IEEE 14
# - l2rpn_neurips_2020_track1: inspired from IEEE 118 (only a third of it)
# - l2rpn_wcci_2022_dev: inspired from IEEE 118 (entire grid)
converter_env, _ = make_env_for_backend(env_name, CustomBackend_Minimal)
# "real" usecase that corresponds to a realistic use of a
# backend for grid2op. (note that users are totally not aware of what's
# going on behing the scene)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = grid2op.make(env_name,
test=True,
action_class=CompleteAction,
backend=CustomBackend_Minimal(),
reward_class=L2RPNReward, # we use this mainly for the "greedy agent" (see below)
# this is because the load / gen / line names might be different than the one read by pandapower.
# so we tell grid2op to use the names it founds in the time series (for loads and generators)
# and map them to name found on the grid (as defined in the backend)
# but as we don't want to come-up with this dictionnary by hands (which would be
# for a real usecase THE ONLY way to go) we simply rely on the grid2op automatic
# conversion offered by the converter
names_chronics_to_grid=converter_env.backend.names_target_to_source
)
obs = env.reset()
########### First "test" perform nothing and see what it gives
done = False
nb_step = 0
while True:
obs, reward, done, info = env.step(env.action_space())
if done:
break
nb_step += 1
print(f"{nb_step} steps have been made with your backend with do nothing")
########## Second "test" perform random actions every now and then
env.seed(0)
obs = env.reset()
done = False
nb_step = 0
while True:
if nb_step % 10 == 9:
# do a randome action sometime
act = env.action_space.sample()
else:
# do nothing most of the time
act = env.action_space()
obs, reward, done, info = env.step(act)
if done:
break
nb_step += 1
print(f"{nb_step} steps have been made with your backend with random actions")
########### Third "test" using an "agent" that "does smart actions" (greedy agent)
done = False
nb_step = 0
obs = env.reset()
reward = 0.
agent = RecoPowerlineAgent(env.action_space)
while True:
act = agent.act(obs, reward)
obs, reward, done, info = env.step(act)
if done:
break
nb_step += 1
print(f"{nb_step} steps have been made with the greedy agent")
| 4,171 | 40.306931 | 124 | py |
Grid2Op | Grid2Op-master/examples/backend_integration/readme.md | # What it does ?
In this example, we show explicitly the different steps performed when grid2op loads an "environment"
from the backend point of view.
It can be usefull for people wanting to implement a new backend for the grid2op platform.
Please refer to the documentation here https://grid2op.readthedocs.io/en/latest/createbackend.html
for more information.
Basically, the typical "grid2op use" is:
```python
# called once
backend.load_grid(...)
# called for each "step", thousands of times
backend.apply_action() # modify the topology, load, generation etc.
backend.runpf() # run the solver
backend.get_topo_vect() # retrieve the results
backend.loads_info() # retrieve the results
backend.generators_info() # retrieve the results
backend.lines_or_info() # retrieve the results
backend.lines_ex_info() # retrieve the results
```
## Reminder
Grid2op is totally agnostic from the grid equations. In grid2op agents only manipulate "high level" objects
connected to a powergrid (for example "loads", "generators", "side of powerlines" etc.)
The way these objects behave and the equations they follow are totally irrelevant from the grid2op perspective. The task of making sure the proper equations are solved is carried out by the "backend".
Traditionnally, the "Backend" will rely on another tools that carries out the computation, implements the equaitons, solve it etc. In this setting, the "Backend" is some "glue code" that map the representation of your solver to grid2op expected functions. Some example of backend include:
- [PandapowerBackend](https://grid2op.readthedocs.io/en/latest/backend.html#grid2op.Backend.PandaPowerBackend): which is the default backend
- [EducPandaPowerBackend](https://github.com/rte-france/Grid2Op/blob/master/grid2op/Backend/EducPandaPowerBackend.py): which is a "simplification" of the previous backend for education purpose. So we highly recommend you to check it out :-)
- [lightsim2grid](https://lightsim2grid.readthedocs.io/en/latest/lightsimbackend.html#lightsim2grid.lightSimBackend.LightSimBackend) which is a backend that uses a port of some function of pandapower in c++ for speed.
We are also aware that some powerflows such as [Hades2](https://github.com/rte-france/hades2-distribution) and other commercial solvers such as PowerFactory are already connected with grid2op, so not open source at the moment.
Hopefully, more "powergrid solvers" can be connected.
## Note on static / dynamic, steady state / transient
At time of writing, only "steady state / static" solvers are connected to grid2op but that does not mean it cannot be different.
Grid2op only expects the "backend" to output some "state" about elements of the grid (for example "active flow at a given end of a powerline" or "voltage magnitude at the bus at which a generator is connected"). The way these "states" are computed is not important for grid2op nor for the backend. The only requirement is that these "states" can be accessed and retrieved python side.
## Alternative use
This example describe the "full" integration with grid2op directly. If your code can be linked against in c++ and you don't want to "worry" about grid2op "representation" / "encoding" / etc. (which is the topic of this example) you might directly want to compute the "complex voltage vector V" solution to "the equations". And in this case, if you can implement the function:
```cpp
bool compute_pf(const Eigen::SparseMatrix<cplx_type> & Ybus, // the admittance matrix of your system
CplxVect & V, // store the results of the powerflow and the Vinit !
const CplxVect & Sbus, // the injection vector
const Eigen::VectorXi & ref, // bus id participating to the distributed slack
const RealVect & slack_weights, // slack weights for each bus
const Eigen::VectorXi & pv, // (might be ignored) index of the components of Sbus should be computed
const Eigen::VectorXi & pq, // (might be ignored) index of the components of |V| should be computed
int max_iter, // maximum number of iteration (might be ignored)
real_type tol // solver tolerance
);
```
then it should be relatively simple to use it with lightsim2grid.
**NB** this is not the preferred solution.
# Main "functions" to implement
We suppose that you already have a "solver" that is able to read a file describing a powergrid, retrieve the parameters needed, compute a solution to the equations and for which you can read the results. For example a "powerflow solver".
Once you have that, implementing a backend can be done in 4 different steps, each described in a subsection below.
## Step 1: loading the grid, exporting it grid2op side
This step is called only ONCE, when the grid2op environment is created. In this step, you read a grid file (in the format that you want) and the backend should inform grid2op about the "objects" on this powergrid and their location.
This is done by the method:
```python
def load_grid(self, path, filename=None):
TODO !
```
Basically, once you have loaded the file in your solver you should first fill `self.n_sub` (number of substations on your grid)
Then for each type of elements (among "load", "gen", "line_or", "line_ex" and "storage"), you fill :
- (optional) `self.name_$el` (*eg* self.name_load, self.name_line, self.name_storage) : the name of the elements (*eg* `self.name_load[5]` is the name of the load 5.). You can ignore it if you want and if that is the case, grid2op will automatically assign such names transparently.
- `self.n_$el` (*eg* self.n_load, self.n_gen, self.n_line, self.n_storage): the number of element. For example self.n_load will be the total number of loads on your grid.
- `self.$el_to_subid` (*eg* self.load_to_subid, self.gen_to_subid, self.line_or_to_subid, self.line_ex_to_subid, self.storage_to_subid): the id of the substation to which this given element is connected. For example `self.load_to_subid[2] = 5` informs grid2op that the load with id 2 is connected to substation with id 5 and `self.line_or_to_subid[7] = 9` informs grid2op that the origin side of line 7 is connected to substation with id 9.
You need to fill :
- integers: self.n_load, self.n_gen, self.n_storage, self.n_line,
- vectos: self.load_to_subid, self.gen_to_subid, self.line_or_to_subid, self.line_ex_to_subid
Then you call `self._compute_pos_big_topo()` and this will assign all the right vectors required by grid2op for you.
An example is given in the [Step1_loading](Step1_loading.py) script.
**NB** A "transformer" (from a powergrid perspective) is a "powerline" from a grid2op perspective.
## Step 2: modifying the state
This step is "first step" of the "grid2Op backend loop" (which is summarized by: "modify", "run the model", "retrieve the state of the elemtns", repeat).
It is implemented in the method `apply_action` (that does not return anything):
```python
def apply_action(self, action=None):
TODO !
```
Classically, you can divide this method into different modifications:
- continuous modifications: change the active / reactive consumption of loads or storage units, the active power at generators or the voltage setpoint at these generators.
- discrete / topological modifications: connect / disconnect powerlines or change the bus to which an element is connected.
To implement it, you simply need to implement all the above part. Detailed examples are provided in the scripts "StepK_change_load.py" or "StepK_change_gen.py" for examples. Indeed we not find convenient to test "simply" that the setpoint has been modified. We prefer testing that the setpoint can be changed and then that the results can be read back (see steps 3 and 4 below).
**NB** the "action" here is NOT a grid2op.Action.BaseAction. It is a grid2op.Action._BackendAction !
## Step 3: solves the equations
This is the second step of the "grid2op backend loop" (which is still "modify", "run", "retrieve the results"). It is implemented in the function:
```python
def runpf(self, is_dc: bool=False) -> Tuple[bool, Union[None, Exception]]:
TODO
return has_converged, exception_if_diverged_otherwise_None
```
This is probably the most straightforward function to implement as you only need to call something like 'compute()' or 'run_pf' or 'solve' on your underlying model.
Detailed examples are provided in the scripts "StepK_change_load.py" or "StepK_change_gen.py" for examples. Indeed we not find convenient to test "simply" that the setpoint has been modified. We prefer testing that the setpoint can be changed and then that the results can be read back (see step 4 below).
## Step 4: reading the states
This is the third and final "call" of the "grid2op backend loop". At this stage, you are expected to export the results of your computation python side. Results should follow some given convention (*eg* units).
It is implemented in the functions:
```python
def get_topo_vect(self):
TODO
def loads_info(self):
TODO
def generators_info(self):
TODO
def lines_or_info(self):
TODO
def lines_ex_info(self):
TODO
```
Detailed examples are provided in the scripts "StepK_change_load.py" or "StepK_change_gen.py" for examples where the whole "backend loop" is exposed "element by element".
More explicitely:
- **get_topo_vect(self):** returns the topology vector
- **generators_info(self):** returns gen_p (in MW), gen_q (in MVAr), gen_v (in kV) [gen_v is the voltage magnitude at the bus to which the generator is connected]
- **loads_info(self):** returns load_p (in MW), load_q (in MVAr), load_v (in kV) [load_v is the voltage magnitude at the bus to which the generator is connected]
- **lines_or_info(self):** returns p_or (in MW), q_or (in MVAr), v_or (in kV), a_or (in A) [all the flows at the origin side of the powerline (remember it includes "trafo") + the voltage magnitude at the bus to which the origin side of the powerline is connected]
- **lines_or_info(self):** returns p_ex (in MW), q_ex (in MVAr), v_ex (in kV), a_ex (in A) [all the flows at the extremity side of the powerline (remember it includes "trafo") + the voltage magnitude at the bus to which the extremity side of the powerline is connected]
## Breakpoint :-)
At this stage, you can already use your backend with grid2op and all its eco system, even though some functionalities might still be missing (seed the "advanced" features below). The scripts `Step0` to `Step6` propose a possible way to split the coding
of all these functions into different independant tasks and to have basic "tests" (more preciselys examples of what could be some tests).
More precisely:
- [Step0_make_env](./Step0_make_env.py): create a grid2op environment that you can use even if your backend is not completely coded.
It does that by relying on the computation of the powerflow by the default backend (of course it assumes the same grid can be loaded
by your backend and by Pandapower, which might involve converting some data from pandapower format (json specific representation)
to your format. You can use some utilities of pandapower for such purpose, see *eg* https://pandapower.readthedocs.io/en/latest/converter.html)
- [Step1_loading](./Step1_loading.py): gives and example on how to implement the "load the grid from a file and define everything needed by grid2op"
- [Step2_modify_load](./Step2_modify_load.py): gives and example on how to implement the powerflow and on how to modify the load setpoints
- [Step3_modify_gen](./Step3_modify_gen.py): gives and example on how to modify the generator setpoints
- [Step4_modify_line_status](./Step4_modify_line_status.py): gives and example on how to modify powerline status (disconnect / reconnect) powerlines
- [Step5_modify_topology](./Step5_modify_topology.py): gives some examples on the topology changes connect object to different busbars
at the substation they are connected to.
- [Step6_integration](./Step6_integration.py): gives some examples of agents interacting on the grid (and powerflow are carried out by your backend)
## (advanced): automatic testing
TODO
How to use grid2op tests to test your backend in depth?
## (advanced): handling of storage units
TODO (do not forget the storage description file !)
## (advanced): handling of shunts
TODO
## (advanced): handling of other generators attributes (cost, ramps, pmin / pmax etc.)
TODO
## (advanced): copy
TODO | 12,527 | 57.269767 | 440 | md |
Grid2Op | Grid2Op-master/getting_started/ml_agent.py | """
This piece of code is provided as an example of what can be achieved when training deep learning agents when using
grid2op. This code is not optimize for performances (use of computational resources) nor for achieve state of the
art results, but rather to serve as example.
Documentation is rather poor and we encourage the read to check the indicated website on each model to have
more informations.
"""
from collections import deque
import random
import numpy as np
import pdb
import os
#tf2.0 friendly
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import tensorflow.keras
import tensorflow.keras.backend as K
from tensorflow.keras.models import load_model, Sequential, Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense, subtract, add
from tensorflow.keras.layers import Input, Lambda, Concatenate
import grid2op
from grid2op.Agent import AgentWithConverter
from grid2op.Converter import IdToAct
class TrainingParam(object):
"""
A class to store the training parameters of the models. It was hard coded in the notebook 3.
"""
def __init__(self,
DECAY_RATE=0.9,
BUFFER_SIZE=40000,
MINIBATCH_SIZE=64,
TOT_FRAME=3000000,
EPSILON_DECAY=10000,
MIN_OBSERVATION=50, #5000
FINAL_EPSILON=1/300, # have on average 1 random action per scenario of approx 287 time steps
INITIAL_EPSILON=0.1,
TAU=0.01,
ALPHA=1,
NUM_FRAMES=1,
):
self.DECAY_RATE = DECAY_RATE
self.BUFFER_SIZE = BUFFER_SIZE
self.MINIBATCH_SIZE = MINIBATCH_SIZE
self.TOT_FRAME = TOT_FRAME
self.EPSILON_DECAY = EPSILON_DECAY
self.MIN_OBSERVATION = MIN_OBSERVATION # 5000
self.FINAL_EPSILON = FINAL_EPSILON # have on average 1 random action per scenario of approx 287 time steps
self.INITIAL_EPSILON = INITIAL_EPSILON
self.TAU = TAU
self.NUM_FRAMES = NUM_FRAMES
self.ALPHA = ALPHA
# Credit Abhinav Sagar:
# https://github.com/abhinavsagar/Reinforcement-Learning-Tutorial
# Code under MIT license, available at:
# https://github.com/abhinavsagar/Reinforcement-Learning-Tutorial/blob/master/LICENSE
class ReplayBuffer:
"""Constructs a buffer object that stores the past moves
and samples a set of subsamples"""
def __init__(self, buffer_size):
self.buffer_size = buffer_size
self.count = 0
self.buffer = deque()
def add(self, s, a, r, d, s2):
"""Add an experience to the buffer"""
# S represents current state, a is action,
# r is reward, d is whether it is the end,
# and s2 is next state
if np.any(~np.isfinite(s)) or np.any(~np.isfinite(s2)):
# TODO proper handling of infinite values somewhere !!!!
return
experience = (s, a, r, d, s2)
if self.count < self.buffer_size:
self.buffer.append(experience)
self.count += 1
else:
self.buffer.popleft()
self.buffer.append(experience)
def size(self):
return self.count
def sample(self, batch_size):
"""Samples a total of elements equal to batch_size from buffer
if buffer contains enough elements. Otherwise return all elements"""
batch = []
if self.count < batch_size:
batch = random.sample(self.buffer, self.count)
else:
batch = random.sample(self.buffer, batch_size)
# Maps each experience in batch in batches of states, actions, rewards
# and new states
s_batch, a_batch, r_batch, d_batch, s2_batch = list(map(np.array, list(zip(*batch))))
return s_batch, a_batch, r_batch, d_batch, s2_batch
def clear(self):
self.buffer.clear()
self.count = 0
# refactorization of the code in a base class to avoid copy paste.
class RLQvalue(object):
"""
This class aims at representing the Q value (or more in case of SAC) parametrization by
a neural network.
It is composed of 2 different networks:
- model: which is the main model
- target_model: which has the same architecture and same initial weights as "model" but is updated less frequently
to stabilize training
It has basic methods to make predictions, to train the model, and train the target model.
"""
def __init__(self, action_size, observation_size,
lr=1e-5,
training_param=TrainingParam()):
# TODO add more flexibilities when building the deep Q networks, with a "NNParam" for example.
self.action_size = action_size
self.observation_size = observation_size
self.lr_ = lr
self.qvalue_evolution = np.zeros((0,))
self.training_param = training_param
self.model = None
self.target_model = None
def construct_q_network(self):
raise NotImplementedError("Not implemented")
def predict_movement(self, data, epsilon):
"""Predict movement of game controler where is epsilon
probability randomly move."""
rand_val = np.random.random(data.shape[0])
q_actions = self.model.predict(data)
opt_policy = np.argmax(np.abs(q_actions), axis=-1)
opt_policy[rand_val < epsilon] = np.random.randint(0, self.action_size, size=(np.sum(rand_val < epsilon)))
self.qvalue_evolution = np.concatenate((self.qvalue_evolution, q_actions[0, opt_policy]))
return opt_policy, q_actions[0, opt_policy]
def train(self, s_batch, a_batch, r_batch, d_batch, s2_batch, observation_num):
"""Trains network to fit given parameters"""
targets = self.model.predict(s_batch)
fut_action = self.target_model.predict(s2_batch)
targets[:, a_batch] = r_batch
targets[d_batch, a_batch[d_batch]] += self.training_param.DECAY_RATE * np.max(fut_action[d_batch], axis=-1)
loss = self.model.train_on_batch(s_batch, targets)
# Print the loss every 100 iterations.
if observation_num % 100 == 0:
print("We had a loss equal to ", loss)
return np.all(np.isfinite(loss))
@staticmethod
def _get_path_model(path, name=None):
if name is None:
path_model = path
else:
path_model = os.path.join(path, name)
path_target_model = "{}_target".format(path_model)
return path_model, path_target_model
def save_network(self, path, name=None, ext="h5"):
# Saves model at specified path as h5 file
# nothing has changed
path_model, path_target_model = self._get_path_model(path, name)
self.model.save('{}.{}'.format(path_model, ext))
self.target_model.save('{}.{}'.format(path_target_model, ext))
print("Successfully saved network.")
def load_network(self, path, name=None, ext="h5"):
# nothing has changed
path_model, path_target_model = self._get_path_model(path, name)
self.model = load_model('{}.{}'.format(path_model, ext))
self.target_model = load_model('{}.{}'.format(path_target_model, ext))
print("Succesfully loaded network.")
def target_train(self):
# nothing has changed from the original implementation
model_weights = self.model.get_weights()
target_model_weights = self.target_model.get_weights()
for i in range(len(model_weights)):
target_model_weights[i] = self.training_param.TAU * model_weights[i] + (1 - self.training_param.TAU) * \
target_model_weights[i]
self.target_model.set_weights(target_model_weights)
# Credit Abhinav Sagar:
# https://github.com/abhinavsagar/Reinforcement-Learning-Tutorial
# Code under MIT license, available at:
# https://github.com/abhinavsagar/Reinforcement-Learning-Tutorial/blob/master/LICENSE
class DeepQ(RLQvalue):
"""Constructs the desired deep q learning network"""
def __init__(self,
action_size,
observation_size,
lr=1e-5,
training_param=TrainingParam()):
RLQvalue.__init__(self, action_size, observation_size, lr, training_param)
self.construct_q_network()
def construct_q_network(self):
# replacement of the Convolution layers by Dense layers, and change the size of the input space and output space
# Uses the network architecture found in DeepMind paper
self.model = Sequential()
input_layer = Input(shape=(self.observation_size * self.training_param.NUM_FRAMES,))
layer1 = Dense(self.observation_size * self.training_param.NUM_FRAMES)(input_layer)
layer1 = Activation('relu')(layer1)
layer2 = Dense(self.observation_size)(layer1)
layer2 = Activation('relu')(layer2)
layer3 = Dense(self.observation_size)(layer2)
layer3 = Activation('relu')(layer3)
layer4 = Dense(2 * self.action_size)(layer3)
layer4 = Activation('relu')(layer4)
output = Dense(self.action_size)(layer4)
self.model = Model(inputs=[input_layer], outputs=[output])
self.model.compile(loss='mse', optimizer=Adam(lr=self.lr_))
self.target_model = Model(inputs=[input_layer], outputs=[output])
self.target_model.compile(loss='mse', optimizer=Adam(lr=self.lr_))
self.target_model.set_weights(self.model.get_weights())
class DuelQ(RLQvalue):
"""Constructs the desired duelling deep q learning network"""
def __init__(self, action_size, observation_size,
lr=0.00001,
training_param=TrainingParam()):
RLQvalue.__init__(self, action_size, observation_size, lr, training_param)
self.construct_q_network()
def construct_q_network(self):
# Uses the network architecture found in DeepMind paper
# The inputs and outputs size have changed, as well as replacing the convolution by dense layers.
self.model = Sequential()
input_layer = Input(shape=(self.observation_size*self.training_param.NUM_FRAMES,))
lay1 = Dense(self.observation_size*self.training_param.NUM_FRAMES)(input_layer)
lay1 = Activation('relu')(lay1)
lay2 = Dense(self.observation_size)(lay1)
lay2 = Activation('relu')(lay2)
lay3 = Dense(2*self.action_size)(lay2)
lay3 = Activation('relu')(lay3)
fc1 = Dense(self.action_size)(lay3)
advantage = Dense(self.action_size)(fc1)
fc2 = Dense(self.action_size)(lay3)
value = Dense(1)(fc2)
meaner = Lambda(lambda x: K.mean(x, axis=1) )
mn_ = meaner(advantage)
tmp = subtract([advantage, mn_])
policy = add([tmp, value])
self.model = Model(inputs=[input_layer], outputs=[policy])
self.model.compile(loss='mse', optimizer=Adam(lr=self.lr_))
self.target_model = Model(inputs=[input_layer], outputs=[policy])
self.target_model.compile(loss='mse', optimizer=Adam(lr=self.lr_))
print("Successfully constructed networks.")
# This class implements the "Sof Actor Critic" model.
# It is a custom implementation, courtesy to Clement Goubet
# The original paper is: https://arxiv.org/abs/1801.01290
class SAC(RLQvalue):
"""Constructs the desired soft actor critic network"""
def __init__(self, action_size, observation_size, lr=1e-5,
training_param=TrainingParam()):
RLQvalue.__init__(self, action_size, observation_size, lr, training_param)
# TODO add as meta param the number of "Q" you want to use (here 2)
# TODO add as meta param size and types of the networks
self.average_reward = 0
self.life_spent = 1
self.qvalue_evolution = np.zeros((0,))
self.Is_nan = False
self.model_value_target = None
self.model_value = None
self.model_Q = None
self.model_Q2 = None
self.model_policy = None
self.construct_q_network()
def _build_q_NN(self):
input_states = Input(shape=(self.observation_size,))
input_action = Input(shape=(self.action_size,))
input_layer = Concatenate()([input_states, input_action])
lay1 = Dense(self.observation_size)(input_layer)
lay1 = Activation('relu')(lay1)
lay2 = Dense(self.observation_size)(lay1)
lay2 = Activation('relu')(lay2)
lay3 = Dense(2*self.action_size)(lay2)
lay3 = Activation('relu')(lay3)
advantage = Dense(1, activation = 'linear')(lay3)
model = Model(inputs=[input_states, input_action], outputs=[advantage])
model.compile(loss='mse', optimizer=Adam(lr=self.lr_))
return model
def _build_model_value(self):
input_states = Input(shape=(self.observation_size,))
lay1 = Dense(self.observation_size)(input_states)
lay1 = Activation('relu')(lay1)
lay3 = Dense(2 * self.action_size)(lay1)
lay3 = Activation('relu')(lay3)
advantage = Dense(self.action_size, activation='relu')(lay3)
state_value = Dense(1, activation='linear')(advantage)
model = Model(inputs=[input_states], outputs=[state_value])
model.compile(loss='mse', optimizer=Adam(lr=self.lr_))
return model
def construct_q_network(self):
# construct double Q networks
self.model_Q = self._build_q_NN()
self.model_Q2 = self._build_q_NN()
# state value function approximation
self.model_value = self._build_model_value()
self.model_value_target = self._build_model_value()
self.model_value_target.set_weights(self.model_value.get_weights())
# policy function approximation
self.model_policy = Sequential()
# proba of choosing action a depending on policy pi
input_states = Input(shape = (self.observation_size,))
lay1 = Dense(self.observation_size)(input_states)
lay1 = Activation('relu')(lay1)
lay2 = Dense(self.observation_size)(lay1)
lay2 = Activation('relu')(lay2)
lay3 = Dense(2*self.action_size)(lay2)
lay3 = Activation('relu')(lay3)
soft_proba = Dense(self.action_size, activation="softmax", kernel_initializer='uniform')(lay3)
self.model_policy = Model(inputs=[input_states], outputs=[soft_proba])
self.model_policy.compile(loss='categorical_crossentropy', optimizer=Adam(lr=self.lr_))
print("Successfully constructed networks.")
def predict_movement(self, data, epsilon):
rand_val = np.random.random(data.shape[0])
# q_actions = self.model.predict(data)
p_actions = self.model_policy.predict(data)
opt_policy_orig = np.argmax(np.abs(p_actions), axis=-1)
opt_policy = 1.0 * opt_policy_orig
opt_policy[rand_val < epsilon] = np.random.randint(0, self.action_size, size=(np.sum(rand_val < epsilon)))
# store the qvalue_evolution (lots of computation time maybe here)
tmp = np.zeros((data.shape[0], self.action_size))
tmp[np.arange(data.shape[0]), opt_policy_orig] = 1.0
q_actions0 = self.model_Q.predict([data, tmp])
q_actions2 = self.model_Q2.predict([data, tmp])
q_actions = np.fmin(q_actions0, q_actions2).reshape(-1)
self.qvalue_evolution = np.concatenate((self.qvalue_evolution, q_actions))
# above is not mandatory for predicting a movement so, might need to be moved somewhere else...
opt_policy = opt_policy.astype(np.int)
return opt_policy, p_actions[:, opt_policy]
def train(self, s_batch, a_batch, r_batch, d_batch, s2_batch, observation_num):
"""Trains networks to fit given parameters"""
batch_size = s_batch.shape[0]
target = np.zeros((batch_size, 1))
# training of the action state value networks
last_action = np.zeros((batch_size, self.action_size))
fut_action = self.model_value_target.predict(s2_batch).reshape(-1)
target[:, 0] = r_batch + (1 - d_batch) * self.training_param.DECAY_RATE * fut_action
loss = self.model_Q.train_on_batch([s_batch, last_action], target)
loss_2 = self.model_Q2.train_on_batch([s_batch, last_action], target)
self.life_spent += 1
temp = 1 / np.log(self.life_spent) / 2
tiled_batch = np.tile(s_batch, (self.action_size, 1))
# tiled_batch: output something like: batch, batch, batch
# TODO save that somewhere not to compute it each time, you can even save this in the
# TODO tensorflow graph!
tmp = np.repeat(np.eye(self.action_size), batch_size*np.ones(self.action_size, dtype=np.int), axis=0)
# tmp is something like [1,0,0] (batch size times), [0,1,0,...] batch size time etc.
action_v1_orig = self.model_Q.predict([tiled_batch, tmp]).reshape(batch_size, -1)
action_v2_orig = self.model_Q2.predict([tiled_batch, tmp]).reshape(batch_size, -1)
action_v1 = action_v1_orig - np.amax(action_v1_orig, axis=-1).reshape(batch_size, 1)
new_proba = np.exp(action_v1 / temp) / np.sum(np.exp(action_v1 / temp), axis=-1).reshape(batch_size, 1)
loss_policy = self.model_policy.train_on_batch(s_batch, new_proba)
# training of the value_function
target_pi = self.model_policy.predict(s_batch)
value_target = np.fmin(action_v1_orig[0, a_batch], action_v2_orig[0, a_batch]) - np.sum(target_pi * np.log(target_pi + 1e-6))
loss_value = self.model_value.train_on_batch(s_batch, value_target.reshape(-1,1))
self.Is_nan = np.isnan(loss) + np.isnan(loss_2) + np.isnan(loss_policy) + np.isnan(loss_value)
# Print the loss every 100 iterations.
if observation_num % 100 == 0:
print("We had a loss equal to ", loss, loss_2, loss_policy, loss_value)
return np.all(np.isfinite(loss)) & np.all(np.isfinite(loss_2)) & np.all(np.isfinite(loss_policy)) & \
np.all(np.isfinite(loss_value))
@staticmethod
def _get_path_model(path, name=None):
if name is None:
path_model = path
else:
path_model = os.path.join(path, name)
path_target_model = "{}_target".format(path_model)
path_modelQ = "{}_Q".format(path_model)
path_modelQ2 = "{}_Q2".format(path_model)
path_policy = "{}_policy".format(path_model)
return path_model, path_target_model, path_modelQ, path_modelQ2, path_policy
def save_network(self, path, name=None, ext="h5"):
# Saves model at specified path as h5 file
path_model, path_target_model, path_modelQ, path_modelQ2, path_policy = self._get_path_model(path, name)
self.model_value.save('{}.{}'.format(path_model, ext))
self.model_value_target.save('{}.{}'.format(path_target_model, ext))
self.model_Q.save('{}.{}'.format(path_modelQ, ext))
self.model_Q2.save('{}.{}'.format(path_modelQ2, ext))
self.model_policy.save('{}.{}'.format(path_policy, ext))
print("Successfully saved network.")
def load_network(self, path, name=None, ext="h5"):
# nothing has changed
path_model, path_target_model, path_modelQ, path_modelQ2, path_policy = self._get_path_model(path, name)
self.model_value = load_model('{}.{}'.format(path_model, ext))
self.model_value_target = load_model('{}.{}'.format(path_target_model, ext))
self.model_Q = load_model('{}.{}'.format(path_modelQ, ext))
self.model_Q2 = load_model('{}.{}'.format(path_modelQ2, ext))
self.model_policy = load_model('{}.{}'.format(path_policy, ext))
print("Succesfully loaded network.")
def target_train(self):
# nothing has changed from the original implementation
model_weights = self.model_value.get_weights()
target_model_weights = self.model_value_target.get_weights()
for i in range(len(model_weights)):
target_model_weights[i] = self.training_param.TAU * model_weights[i] + (1 - self.training_param.TAU) * target_model_weights[i]
self.model_value_target.set_weights(model_weights)
class DeepQAgent(AgentWithConverter):
def convert_obs(self, observation):
return np.concatenate((observation.rho, observation.line_status, observation.topo_vect))
def my_act(self, transformed_observation, reward, done=False):
if self.deep_q is None:
self.init_deep_q(transformed_observation)
predict_movement_int, *_ = self.deep_q.predict_movement(transformed_observation.reshape(1, -1), epsilon=0.0)
return int(predict_movement_int)
def init_deep_q(self, transformed_observation):
if self.deep_q is None:
# the first time an observation is observed, I set up the neural network with the proper dimensions.
if self.mode == "DQN":
cls = DeepQ
elif self.mode == "DDQN":
cls = DuelQ
elif self.mode == "SAC":
cls = SAC
else:
raise RuntimeError("Unknown neural network named \"{}\". Supported types are \"DQN\", \"DDQN\" and "
"\"SAC\"".format(self.mode))
self.deep_q = cls(self.action_space.size(), observation_size=transformed_observation.shape[-1], lr=self.lr)
def __init__(self, action_space, mode="DDQN", lr=1e-5, training_param=TrainingParam()):
# this function has been adapted.
# to built a AgentWithConverter, we need an action_space.
# No problem, we add it in the constructor.
AgentWithConverter.__init__(self, action_space, action_space_converter=IdToAct)
# and now back to the origin implementation
self.replay_buffer = ReplayBuffer(training_param.BUFFER_SIZE)
# compare to original implementation, i don't know the observation space size.
# Because it depends on the component of the observation we want to look at. So these neural network will
# be initialized the first time an observation is observe.
self.deep_q = None
self.mode = mode
self.lr = lr
self.training_param = training_param
def load_network(self, path):
# not modified compare to original implementation
self.deep_q.load_network(path)
| 22,615 | 43.171875 | 138 | py |
Grid2Op | Grid2Op-master/getting_started/test_episodereplay.py | import os
import warnings
import grid2op
from grid2op.Plot import EpisodeReplay
from grid2op.Agent import GreedyAgent, RandomAgent
from grid2op.Runner import Runner
from tqdm import tqdm
path_agents = "getting_started/study_agent_getting_started"
# if i start from grid2op/getting started (eg cd ~/Documents/grid2op/getting_started)
path_agents = "path_agents/"
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = grid2op.make("rte_case14_realistic")
class CustomRandom(RandomAgent):
def __init__(self, action_space):
RandomAgent.__init__(self, action_space)
self.i = 0
def my_act(self, transformed_observation, reward, done=False):
if self.i % 10 != 0:
res = 0
else:
res = self.action_space.sample()
self.i += 1
return res
runner = Runner(**env.get_params_for_runner(), agentClass=CustomRandom)
path_agent = os.path.join(path_agents, "awesome_agent_logs")
res = runner.run(nb_episode=2, path_save=path_agent, pbar=tqdm, agent_seeds=[0, 1])
ep_replay = EpisodeReplay(agent_path=path_agent)
for _, chron_name, cum_reward, nb_time_step, max_ts in res:
ep_replay.replay_episode(chron_name,
display=False)
if False:
plot_epi = EpisodeReplay(path_agent)
#plot_epi.replay_episode("001", max_fps=5, video_name="test.mp4")
plot_epi.replay_episode(res[0][1], max_fps=2, video_name="random_agent.gif")
| 1,453 | 32.045455 | 85 | py |
Grid2Op | Grid2Op-master/getting_started/test_renderer_14.py | import grid2op
from grid2op.Agent import DoNothingAgent
from grid2op.Agent import GreedyAgent, RandomAgent
import numpy as np
import pdb
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = grid2op.make("case14_realistic")
class MyExpertAgent(GreedyAgent):
def __init__(self, action_space):
GreedyAgent.__init__(self, action_space)
self.saved_score = []
def act(self, observation, reward, done=False):
"""
By definition, all "greedy" agents are acting the same way. The only thing that can differentiate multiple
agents is the actions that are tested.
These actions are defined in the method :func:`._get_tested_action`. This :func:`.act` method implements the
greedy logic: take the actions that maximizes the instantaneous reward on the simulated action.
Parameters
----------
observation: :class:`grid2op.BaseObservation.BaseObservation`
The current observation of the :class:`grid2op.Environment`
reward: ``float``
The current reward. This is the reward obtained by the previous action
done: ``bool``
Whether the episode has ended or not. Used to maintain gym compatibility
Returns
-------
res: :class:`grid2op.BaseAction.BaseAction`
The action chosen by the bot / controller / agent.
"""
# print("________________\nbeginning simulate")
self.tested_action = self._get_tested_action(observation)
if len(self.tested_action) > 1:
all_rewards = np.full(shape=len(self.tested_action), fill_value=np.NaN, dtype=np.float)
for i, action in enumerate(self.tested_action):
simul_obs, simul_reward, simul_has_error, simul_info = observation.simulate(action)
all_rewards[i] = simul_reward
# if simul_reward > 19:
# pdb.set_trace()
reward_idx = np.argmax(all_rewards) # rewards.index(max(rewards))
expected_reward = np.max(all_rewards)
best_action = self.tested_action[reward_idx]
# print("BaseAction taken:\n{}".format(best_action))
else:
all_rewards = [None]
expected_reward = None
best_action = self.tested_action[0]
self.saved_score.append(((best_action, expected_reward),
[el for el in zip(self.tested_action, all_rewards)]))
# print("end simulate\n_____________")
return best_action
def _get_tested_action(self, observation):
res = [self.action_space({})] # add the do nothing
for i, el in enumerate(observation.line_status):
# try to reconnect powerlines
if not el:
tmp = np.zeros(self.action_space.n_line, dtype=np.int)
tmp[i] = 1
action = self.action_space({"set_line_status": tmp})
action = action.update({"set_bus": {"lines_or_id": [(i, 1)], "lines_ex_id": [(i, 1)]}})
res.append(action)
# disconnect the powerlines
## 12 to 13, 10 to 9 # 5 to 12, 5 to 10,
for i in [19, 17]: # , 10 ,12 <- with that it takes action that leads to divergence, check that!
tmp = np.full(self.action_space.n_line, fill_value=False, dtype=np.bool)
tmp[i] = True
action = self.action_space({"change_line_status": tmp})
if not observation.line_status[i]:
# so the action consisted in reconnecting the powerline
# i need to say on which bus
action = action.update({"set_bus": {"lines_or_id": [(i, 1)], "lines_ex_id": [(i, 1)]}})
res.append(action)
# play with the topology
## i put powerlines going from 1 to 4 with powerline going from 3 to 4 at substation 4
action = self.action_space({"change_bus":
{"substations_id": [(4, np.array([False, True, True, False, False]))]}})
res.append(action)
## i put powerline from 5 to 12 with powerline from 5 to 10 at substation 5
action = self.action_space({"change_bus":
{"substations_id": [(5, np.array([False, True, False, True, False, False]))]}})
res.append(action)
## i put powerline from 1 to 4 with powerline from 1 to 3 with at substation 1
action = self.action_space({"change_bus":
{"substations_id": [(1, np.array([False, False, True, True, False, False]))]}})
res.append(action)
return res
my_agent = MyExpertAgent(env.action_space)
# my_agent = RandomAgent(env.action_space)
print("Total unitary action possible: {}".format(my_agent.action_space.n))
all_obs = []
obs = env.reset()
all_obs.append(obs)
reward = env.reward_range[0]
done = False
nb_step = 0
while True:
env.render()
action = my_agent.act(obs, reward, done)
obs, reward, done, _ = env.step(action)
print("Rendering timestep {}".format(nb_step))
if done:
break
all_obs.append(obs)
nb_step += 1
| 5,260 | 40.101563 | 116 | py |
Grid2Op | Grid2Op-master/grid2op/Parameters.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import os
import json
import warnings
from grid2op.dtypes import dt_int, dt_float, dt_bool
class Parameters:
"""
Main classes representing the parameters of the game. The main parameters are described bellow.
Note that changing the values of these parameters might not be enough. If these _parameters are not used in the
:class:`grid2op.Rules.RulesChecker`, then modifying them will have no impact at all.
Attributes
----------
NO_OVERFLOW_DISCONNECTION: ``bool``
If set to ``True`` then the :class:`grid2op.Environment.Environment` will not disconnect powerline above their
thermal
limit. Default is ``False``
NB_TIMESTEP_OVERFLOW_ALLOWED: ``int``
Number of timesteps for which a soft overflow is allowed, default 2. This means that a powerline will be
disconnected (if :attr:`.NO_OVERFLOW_DISCONNECTION` is set to ``False``) after 2 time steps above its thermal
limit. This is called a "soft overflow".
NB_TIMESTEP_RECONNECTION: ``int``
Number of timesteps a powerline disconnected for security motives (for example due to
:attr:`.NB_TIMESTEP_POWERFLOW_ALLOWED` or :attr:`.HARD_OVERFLOW_THRESHOLD`) will remain disconnected.
It's set to 10 timestep by default.
NB_TIMESTEP_COOLDOWN_LINE: ``int``
When someone acts on a powerline by changing its status (connected / disconnected) this number indicates
how many timesteps the :class:`grid2op.Agent.BaseAgent` has to wait before being able to modify this status
again.
For examle, if this is 1, this means that an BaseAgent can act on status of a powerline 1 out of 2 time step (1
time step it acts, another one it cools down, and the next one it can act again). Having it at 0 it equivalent
to deactivate this feature (default).
NB_TIMESTEP_COOLDOWN_SUB: ``int``
When someone changes the topology of a substations, this number indicates how many timesteps the
:class:`grid2op.Agent.BaseAgent` has to wait before being able to modify the topology on this same substation. It
has the same behaviour as :attr:`Parameters.NB_TIMESTEP_LINE_STATUS_REMODIF`. To deactivate this feature,
put it at 0 (default).
HARD_OVERFLOW_THRESHOLD: ``float``
If a the powerflow on a line is above HARD_OVERFLOW_THRESHOLD * thermal limit (and
:attr:`Parameters.NO_OVERFLOW_DISCONNECTION` is set to ``False``) then it is automatically disconnected,
regardless of
the number of timesteps it is on overflow). This is called a "hard overflow". This is expressed in relative
value of the thermal limits, for example, if for a powerline the `thermal_limit` is 150 and the
HARD_OVERFLOW_THRESHOLD is 2.0, then if the flow on the powerline reaches 2 * 150 = 300.0 the powerline
the powerline is automatically disconnected.
ENV_DC: ``bool``
Whether or not making the simulations of the environment in the "direct current" approximation. This can be
usefull for early training of agent, as this mode is much faster to compute than the corresponding
"alternative current" powerflow. It is also less precise. The default is ``False``
FORECAST_DC: ``bool``
DEPRECATED. Please use the "change_forecast_param" function of the environment
Whether to use the direct current approximation in the :func:`grid2op.Observation.BaseObservation.simulate`
method. Default is ``False``. Setting :attr:`FORECAST_DC` to `True` can speed up the computation of the
`simulate` function, but will make the results less accurate.
MAX_SUB_CHANGED: ``int``
Maximum number of substations that can be reconfigured between two consecutive timesteps by an
:class:`grid2op.Agent.BaseAgent`. Default value is 1.
MAX_LINE_STATUS_CHANGED: ``int``
Maximum number of powerlines statuses that can be changed between two consecutive timesteps by an
:class:`grid2op.Agent.BaseAgent`. Default value is 1.
IGNORE_MIN_UP_DOWN_TIME: ``bool``
Whether or not to ignore the attributes `gen_min_uptime` and `gen_min_downtime`. Basically setting this
parameter to ``True``
LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION: ``bool``
If set to ``True`` (NOT the default) the environment will automatically limit the curtailment / storage actions that otherwise
would lead to infeasible state.
For example the maximum ramp up of generator at a given step is 100 MW / step (
ie you can increase the production of these generators of maximum 100 MW at this step) but if you cumul the storage
action and the curtailment action, you ask + 110 MW at this step (for example you curtail 100MW of renewables).
In this example, if param.LIMIT_INFEASIBLE_CURTAILMENT_ACTION is ``False`` (default) this is a game over. If it's ``True``
then the curtailment action is limited so that it does not exceed 100 MW.
Setting it to ``True`` might help the learning of agent using redispatching.
If you want a similar behaviour, when you don't have access to the parameters of the environment, you can
have a look at :func:`grid2op.Aciton.BaseAction.limit_curtail_storage`.
.. note::
This argument and the :func:`grid2op.Action.BaseAction.limit_curtail_storage` have the same objective:
prevent an agent to do some curtailment too strong for the grid.
When using this parameter, the environment will do it knowing exactly what will happen next (
its a bit "cheating") and limit exactly the action to exactly right amount.
Using :func:`grid2op.Aciton.BaseAction.limit_curtail_storage` is always feasible, but less precise.
INIT_STORAGE_CAPACITY: ``float``
Between 0. and 1. Specify, at the beginning of each episode, what is the storage capacity of each storage unit.
The storage capacity will be expressed as fraction of storage_Emax. For example, if `INIT_STORAGE_CAPACITY` is
0.5 then at the beginning of every episode, all storage unit will have a storage capacity of
0.5 * `storage_Emax`. By default: `0.5`
ACTIVATE_STORAGE_LOSS: ``bool``
You can set it to ``False`` to not take into account the loss in the storage units.
This deactivates the "loss amount per time step" (`storage_loss`) and has also the effect to set
to do **as if** the
storage units were perfect (as if `storage_charging_efficiency=1.` and `storage_discharging_efficiency=1.`.
**NB** it does **as if** it were the case. But the parameters `storage_loss`, `storage_charging_efficiency`
and storage_discharging_efficiency` are not affected by this.
Default: ``True``
ALARM_BEST_TIME: ``int``
Number of step for which it's best to send an alarm BEFORE a game over
ALARM_WINDOW_SIZE: ``int``
Number of steps for which it's worth it to give an alarm (if an alarm is send outside of the window
`[ALARM_BEST_TIME - ALARM_WINDOW_SIZE, ALARM_BEST_TIME + ALARM_WINDOW_SIZE]` then it does not grant anything
ALERT_TIME_WINDOW : ``int``
Number of steps for which it's worth it to give an alert after an attack. If the alert is sent before, the assistant
score doesn't take into account that an alert is raised.
MAX_SIMULATE_PER_STEP: ``int``
Maximum number of calls to `obs.simuate(...)` allowed per step (reset each "env.step(...)"). Defaults to -1 meaning "as much as you want".
MAX_SIMULATE_PER_EPISODE: ``int``
Maximum number of calls to `obs.simuate(...)` allowed per episode (reset each "env.simulate(...)"). Defaults to -1 meaning "as much as you want".
"""
def __init__(self, parameters_path=None):
"""
Build an object representing the _parameters of the game.
Parameters
----------
parameters_path: ``str``, optional
Path where to look for parameters.
"""
# if True, then it will not disconnect lines above their thermal limits
self.NO_OVERFLOW_DISCONNECTION = False
# number of timestep before powerline with an overflow is automatically disconnected
self.NB_TIMESTEP_OVERFLOW_ALLOWED = dt_int(2)
# number of timestep before a line can be reconnected if it has suffer a forced disconnection
self.NB_TIMESTEP_RECONNECTION = dt_int(10)
# number of timestep before a substation topology can be modified again
self.NB_TIMESTEP_COOLDOWN_LINE = dt_int(0)
self.NB_TIMESTEP_COOLDOWN_SUB = dt_int(0)
# threshold above which a powerline is instantly disconnected by protections
# this is expressed in relative value of the thermal limits
# for example setting "HARD_OVERFLOW_THRESHOLD = 2" is equivalent, if a powerline has a thermal limit of
# 243 A, to disconnect it instantly if it has a powerflow higher than 2 * 243 = 486 A
self.HARD_OVERFLOW_THRESHOLD = dt_float(2.0)
# are the powerflow performed by the environment in DC mode (dc powerflow) or AC (ac powerflow)
self.ENV_DC = False
# same as above, but for the forecast states
self.FORECAST_DC = False # DEPRECATED use "change_forecast_parameters(new_param)" with "new_param.ENV_DC=..."
# maximum number of substations that can be change in one action
self.MAX_SUB_CHANGED = dt_int(1)
# maximum number of powerline status that can be changed in one action
self.MAX_LINE_STATUS_CHANGED = dt_int(1)
# ignore the min_uptime and downtime for the generators: allow them to be connected / disconnected
# at will
self.IGNORE_MIN_UP_DOWN_TIME = True
# allow dispatch on turned off generator (if ``True`` you can actually dispatch a turned on geenrator)
self.ALLOW_DISPATCH_GEN_SWITCH_OFF = True
# if a curtailment action is "too strong" it will limit it to the "maximum feasible"
# not to break the whole system
self.LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION = False
# storage capacity (NOT in pct so 0.5 = 50%)
self.INIT_STORAGE_CAPACITY = 0.5
# do i take into account the storage loss in the step function
self.ACTIVATE_STORAGE_LOSS = True
# alarms
self.ALARM_BEST_TIME = 12
self.ALARM_WINDOW_SIZE = 12
# alert
self.ALERT_TIME_WINDOW = 12
# number of simulate
self.MAX_SIMULATE_PER_STEP = dt_int(-1)
self.MAX_SIMULATE_PER_EPISODE = dt_int(-1)
if parameters_path is not None:
if os.path.isfile(parameters_path):
self.init_from_json(parameters_path)
else:
warn_msg = "Parameters: the file {} is not found. Continuing with default parameters."
warnings.warn(warn_msg.format(parameters_path))
@staticmethod
def _isok_txt(arg):
if isinstance(arg, type(True)):
return arg
if isinstance(arg, type("")):
arg = arg.strip('"')
elif isinstance(arg, type(1)):
arg = "{}".format(arg)
res = False
if (
arg == "True"
or arg == "T"
or arg == "true"
or arg == "t"
or str(arg) == "1"
):
res = True
elif (
arg == "False"
or arg == "F"
or arg == "false"
or arg == "f"
or str(arg) == "0"
):
res = False
else:
msg = (
"It's ambiguous where an argument is True or False. "
'Please only provide "True" or "False" and not {}'
)
raise RuntimeError(msg.format(arg))
return res
def init_from_dict(self, dict_):
"""
Initialize the object given a dictionary. All keys are optional. If a key is not present in the dictionary,
the default parameters is used.
Parameters
----------
dict_: ``dict``
The dictionary representing the parameters to load.
"""
if "NO_OVERFLOW_DISCONNECTION" in dict_:
self.NO_OVERFLOW_DISCONNECTION = Parameters._isok_txt(
dict_["NO_OVERFLOW_DISCONNECTION"]
)
if "IGNORE_MIN_UP_DOWN_TIME" in dict_:
self.IGNORE_MIN_UP_DOWN_TIME = Parameters._isok_txt(
dict_["IGNORE_MIN_UP_DOWN_TIME"]
)
if "ALLOW_DISPATCH_GEN_SWITCH_OFF" in dict_:
self.ALLOW_DISPATCH_GEN_SWITCH_OFF = Parameters._isok_txt(
dict_["ALLOW_DISPATCH_GEN_SWITCH_OFF"]
)
if "LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION" in dict_:
self.LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION = Parameters._isok_txt(
dict_["LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION"]
)
if "NB_TIMESTEP_POWERFLOW_ALLOWED" in dict_:
self.NB_TIMESTEP_OVERFLOW_ALLOWED = dt_int(
dict_["NB_TIMESTEP_POWERFLOW_ALLOWED"]
)
if "NB_TIMESTEP_OVERFLOW_ALLOWED" in dict_:
self.NB_TIMESTEP_OVERFLOW_ALLOWED = dt_int(
dict_["NB_TIMESTEP_OVERFLOW_ALLOWED"]
)
if "NB_TIMESTEP_RECONNECTION" in dict_:
self.NB_TIMESTEP_RECONNECTION = dt_int(dict_["NB_TIMESTEP_RECONNECTION"])
if "HARD_OVERFLOW_THRESHOLD" in dict_:
self.HARD_OVERFLOW_THRESHOLD = dt_float(dict_["HARD_OVERFLOW_THRESHOLD"])
if "ENV_DC" in dict_:
self.ENV_DC = Parameters._isok_txt(dict_["ENV_DC"])
if "FORECAST_DC" in dict_:
new_val = Parameters._isok_txt(dict_["FORECAST_DC"])
if new_val != self.FORECAST_DC:
warnings.warn(
"The FORECAST_DC attributes is deprecated. Please change the parameters of the "
'"forecast" backend with "env.change_forecast_parameters(new_param)" function '
'with "new_param.ENV_DC=..." '
)
self.FORECAST_DC = new_val
if "MAX_SUB_CHANGED" in dict_:
self.MAX_SUB_CHANGED = dt_int(dict_["MAX_SUB_CHANGED"])
if "MAX_LINE_STATUS_CHANGED" in dict_:
self.MAX_LINE_STATUS_CHANGED = dt_int(dict_["MAX_LINE_STATUS_CHANGED"])
if "NB_TIMESTEP_TOPOLOGY_REMODIF" in dict_:
# for backward compatibility (in case of old dataset)
self.NB_TIMESTEP_COOLDOWN_SUB = dt_int(
dict_["NB_TIMESTEP_TOPOLOGY_REMODIF"]
)
if "NB_TIMESTEP_COOLDOWN_SUB" in dict_:
self.NB_TIMESTEP_COOLDOWN_SUB = dt_int(dict_["NB_TIMESTEP_COOLDOWN_SUB"])
if "NB_TIMESTEP_LINE_STATUS_REMODIF" in dict_:
# for backward compatibility (in case of old dataset)
self.NB_TIMESTEP_COOLDOWN_LINE = dt_int(
dict_["NB_TIMESTEP_LINE_STATUS_REMODIF"]
)
if "NB_TIMESTEP_COOLDOWN_LINE" in dict_:
self.NB_TIMESTEP_COOLDOWN_LINE = dt_int(dict_["NB_TIMESTEP_COOLDOWN_LINE"])
# storage parameters
if "INIT_STORAGE_CAPACITY" in dict_:
self.INIT_STORAGE_CAPACITY = dt_float(dict_["INIT_STORAGE_CAPACITY"])
if "ACTIVATE_STORAGE_LOSS" in dict_:
self.ACTIVATE_STORAGE_LOSS = Parameters._isok_txt(
dict_["ACTIVATE_STORAGE_LOSS"]
)
# alarm parameters
if "ALARM_BEST_TIME" in dict_:
self.ALARM_BEST_TIME = dt_int(dict_["ALARM_BEST_TIME"])
if "ALARM_WINDOW_SIZE" in dict_:
self.ALARM_WINDOW_SIZE = dt_int(dict_["ALARM_WINDOW_SIZE"])
# alert parameters
if "ALERT_TIME_WINDOW" in dict_:
self.ALERT_TIME_WINDOW = dt_int(dict_["ALERT_TIME_WINDOW"])
if "MAX_SIMULATE_PER_STEP" in dict_:
self.MAX_SIMULATE_PER_STEP = dt_int(dict_["MAX_SIMULATE_PER_STEP"])
if "MAX_SIMULATE_PER_EPISODE" in dict_:
self.MAX_SIMULATE_PER_EPISODE = dt_int(dict_["MAX_SIMULATE_PER_EPISODE"])
authorized_keys = set(self.__dict__.keys())
authorized_keys = authorized_keys | {
"NB_TIMESTEP_POWERFLOW_ALLOWED",
"NB_TIMESTEP_TOPOLOGY_REMODIF",
"NB_TIMESTEP_LINE_STATUS_REMODIF",
}
ignored_keys = dict_.keys() - authorized_keys
if len(ignored_keys):
warnings.warn(
'Parameters: The _parameters "{}" used to build the Grid2Op.Parameters '
"class are not recognized and will be ignored.".format(ignored_keys)
)
def to_dict(self):
"""
Serialize all the _parameters as a dictionnary; Useful to write it in json format.
Returns
-------
res: ``dict``
A representation of these _parameters in the form of a dictionnary.
"""
res = {}
res["NO_OVERFLOW_DISCONNECTION"] = bool(self.NO_OVERFLOW_DISCONNECTION)
res["IGNORE_MIN_UP_DOWN_TIME"] = bool(self.IGNORE_MIN_UP_DOWN_TIME)
res["ALLOW_DISPATCH_GEN_SWITCH_OFF"] = bool(self.ALLOW_DISPATCH_GEN_SWITCH_OFF)
res["LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION"] = bool(
self.LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION
)
res["NB_TIMESTEP_OVERFLOW_ALLOWED"] = int(self.NB_TIMESTEP_OVERFLOW_ALLOWED)
res["NB_TIMESTEP_RECONNECTION"] = int(self.NB_TIMESTEP_RECONNECTION)
res["HARD_OVERFLOW_THRESHOLD"] = float(self.HARD_OVERFLOW_THRESHOLD)
res["ENV_DC"] = bool(self.ENV_DC)
res["FORECAST_DC"] = bool(self.FORECAST_DC)
res["MAX_SUB_CHANGED"] = int(self.MAX_SUB_CHANGED)
res["MAX_LINE_STATUS_CHANGED"] = int(self.MAX_LINE_STATUS_CHANGED)
res["NB_TIMESTEP_COOLDOWN_LINE"] = int(self.NB_TIMESTEP_COOLDOWN_LINE)
res["NB_TIMESTEP_COOLDOWN_SUB"] = int(self.NB_TIMESTEP_COOLDOWN_SUB)
res["INIT_STORAGE_CAPACITY"] = float(self.INIT_STORAGE_CAPACITY)
res["ACTIVATE_STORAGE_LOSS"] = bool(self.ACTIVATE_STORAGE_LOSS)
res["ALARM_BEST_TIME"] = int(self.ALARM_BEST_TIME)
res["ALARM_WINDOW_SIZE"] = int(self.ALARM_WINDOW_SIZE)
res["ALERT_TIME_WINDOW"] = int(self.ALERT_TIME_WINDOW)
res["MAX_SIMULATE_PER_STEP"] = int(self.MAX_SIMULATE_PER_STEP)
res["MAX_SIMULATE_PER_EPISODE"] = int(self.MAX_SIMULATE_PER_EPISODE)
return res
def init_from_json(self, json_path):
"""
Set member attributes from a json file
Parameters
----------
json_path: ``str``
The complete (*ie.* path + filename) where the json file is located.
"""
try:
with open(json_path) as f:
dict_ = json.load(f)
self.init_from_dict(dict_)
except Exception as exc_:
warn_msg = (
"Could not load from {}\n"
'Continuing with default parameters. \n\nThe error was "{}"'
)
warnings.warn(warn_msg.format(json_path, exc_))
def __eq__(self, other):
this_dict = self.to_dict()
other_dict = other.to_dict()
return this_dict == other_dict
@staticmethod
def from_json(json_path):
"""
Create instance of a Parameters from a path where is a json is saved.
Parameters
----------
json_path: ``str``
The complete (*ie.* path + filename) where the json file is located.
Returns
-------
res: :class:`Parameters`
The _parameters initialized
"""
res = Parameters(json_path)
return res
def check_valid(self):
"""
check the parameter is valid (ie it checks that all the values are of correct types and within the
correct range.
Raises
-------
An exception if the parameter is not valid
"""
try:
if not isinstance(self.NO_OVERFLOW_DISCONNECTION, (bool, dt_bool)):
raise RuntimeError("NO_OVERFLOW_DISCONNECTION should be a boolean")
self.NO_OVERFLOW_DISCONNECTION = dt_bool(self.NO_OVERFLOW_DISCONNECTION)
except Exception as exc_:
raise RuntimeError(
f'Impossible to convert NO_OVERFLOW_DISCONNECTION to bool with error \n:"{exc_}"'
)
try:
self.NB_TIMESTEP_OVERFLOW_ALLOWED = int(
self.NB_TIMESTEP_OVERFLOW_ALLOWED
) # to raise if numpy array
self.NB_TIMESTEP_OVERFLOW_ALLOWED = dt_int(
self.NB_TIMESTEP_OVERFLOW_ALLOWED
)
except Exception as exc_:
raise RuntimeError(
f'Impossible to convert NB_TIMESTEP_OVERFLOW_ALLOWED to int with error \n:"{exc_}"'
)
if self.NB_TIMESTEP_OVERFLOW_ALLOWED < 0:
raise RuntimeError(
"NB_TIMESTEP_OVERFLOW_ALLOWED < 0., this should be >= 0."
)
try:
self.NB_TIMESTEP_RECONNECTION = int(
self.NB_TIMESTEP_RECONNECTION
) # to raise if numpy array
self.NB_TIMESTEP_RECONNECTION = dt_int(self.NB_TIMESTEP_RECONNECTION)
except Exception as exc_:
raise RuntimeError(
f'Impossible to convert NB_TIMESTEP_RECONNECTION to int with error \n:"{exc_}"'
)
if self.NB_TIMESTEP_RECONNECTION < 0:
raise RuntimeError("NB_TIMESTEP_RECONNECTION < 0., this should be >= 0.")
try:
self.NB_TIMESTEP_COOLDOWN_LINE = int(self.NB_TIMESTEP_COOLDOWN_LINE)
self.NB_TIMESTEP_COOLDOWN_LINE = dt_int(self.NB_TIMESTEP_COOLDOWN_LINE)
except Exception as exc_:
raise RuntimeError(
f'Impossible to convert NB_TIMESTEP_COOLDOWN_LINE to int with error \n:"{exc_}"'
)
if self.NB_TIMESTEP_COOLDOWN_LINE < 0:
raise RuntimeError("NB_TIMESTEP_COOLDOWN_LINE < 0., this should be >= 0.")
try:
self.NB_TIMESTEP_COOLDOWN_SUB = int(
self.NB_TIMESTEP_COOLDOWN_SUB
) # to raise if numpy array
self.NB_TIMESTEP_COOLDOWN_SUB = dt_int(self.NB_TIMESTEP_COOLDOWN_SUB)
except Exception as exc_:
raise RuntimeError(
f'Impossible to convert NB_TIMESTEP_COOLDOWN_SUB to int with error \n:"{exc_}"'
)
if self.NB_TIMESTEP_COOLDOWN_SUB < 0:
raise RuntimeError("NB_TIMESTEP_COOLDOWN_SUB < 0., this should be >= 0.")
try:
self.HARD_OVERFLOW_THRESHOLD = float(
self.HARD_OVERFLOW_THRESHOLD
) # to raise if numpy array
self.HARD_OVERFLOW_THRESHOLD = dt_float(self.HARD_OVERFLOW_THRESHOLD)
except Exception as exc_:
raise RuntimeError(
f'Impossible to convert HARD_OVERFLOW_THRESHOLD to float with error \n:"{exc_}"'
)
if self.HARD_OVERFLOW_THRESHOLD < 1.0:
raise RuntimeError(
"HARD_OVERFLOW_THRESHOLD < 1., this should be >= 1. (use env.set_thermal_limit "
"to modify the thermal limit)"
)
try:
if not isinstance(self.ENV_DC, (bool, dt_bool)):
raise RuntimeError("NO_OVERFLOW_DISCONNECTION should be a boolean")
self.ENV_DC = dt_bool(self.ENV_DC)
except Exception as exc_:
raise RuntimeError(
f'Impossible to convert ENV_DC to bool with error \n:"{exc_}"'
)
try:
self.MAX_SUB_CHANGED = int(self.MAX_SUB_CHANGED) # to raise if numpy array
self.MAX_SUB_CHANGED = dt_int(self.MAX_SUB_CHANGED)
except Exception as exc_:
raise RuntimeError(
f'Impossible to convert MAX_SUB_CHANGED to int with error \n:"{exc_}"'
)
if self.MAX_SUB_CHANGED < 0:
raise RuntimeError(
"MAX_SUB_CHANGED should be >=0 (or -1 if you want to be able to change every "
"substation at once)"
)
try:
self.MAX_LINE_STATUS_CHANGED = int(
self.MAX_LINE_STATUS_CHANGED
) # to raise if numpy array
self.MAX_LINE_STATUS_CHANGED = dt_int(self.MAX_LINE_STATUS_CHANGED)
except Exception as exc_:
raise RuntimeError(
f'Impossible to convert MAX_LINE_STATUS_CHANGED to int with error \n:"{exc_}"'
)
if self.MAX_LINE_STATUS_CHANGED < 0:
raise RuntimeError(
"MAX_LINE_STATUS_CHANGED should be >=0 "
"(or -1 if you want to be able to change every powerline at once)"
)
try:
if not isinstance(self.IGNORE_MIN_UP_DOWN_TIME, (bool, dt_bool)):
raise RuntimeError("IGNORE_MIN_UP_DOWN_TIME should be a boolean")
self.IGNORE_MIN_UP_DOWN_TIME = dt_bool(self.IGNORE_MIN_UP_DOWN_TIME)
except Exception as exc_:
raise RuntimeError(
f'Impossible to convert IGNORE_MIN_UP_DOWN_TIME to bool with error \n:"{exc_}"'
)
try:
if not isinstance(self.ALLOW_DISPATCH_GEN_SWITCH_OFF, (bool, dt_bool)):
raise RuntimeError("ALLOW_DISPATCH_GEN_SWITCH_OFF should be a boolean")
self.ALLOW_DISPATCH_GEN_SWITCH_OFF = dt_bool(
self.ALLOW_DISPATCH_GEN_SWITCH_OFF
)
except Exception as exc_:
raise RuntimeError(
f'Impossible to convert ALLOW_DISPATCH_GEN_SWITCH_OFF to bool with error \n:"{exc_}"'
)
try:
if not isinstance(
self.LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION, (bool, dt_bool)
):
raise RuntimeError(
"LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION should be a boolean"
)
self.LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION = dt_bool(
self.LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION
)
except Exception as exc_:
raise RuntimeError(
f'Impossible to convert LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION to bool with error \n:"{exc_}"'
)
try:
self.INIT_STORAGE_CAPACITY = float(
self.INIT_STORAGE_CAPACITY
) # to raise if numpy array
self.INIT_STORAGE_CAPACITY = dt_float(self.INIT_STORAGE_CAPACITY)
except Exception as exc_:
raise RuntimeError(
f'Impossible to convert INIT_STORAGE_CAPACITY to float with error \n:"{exc_}"'
)
if self.INIT_STORAGE_CAPACITY < 0.0:
raise RuntimeError(
"INIT_STORAGE_CAPACITY < 0., this should be within range [0., 1.]"
)
if self.INIT_STORAGE_CAPACITY > 1.0:
raise RuntimeError(
"INIT_STORAGE_CAPACITY > 1., this should be within range [0., 1.]"
)
try:
if not isinstance(self.ACTIVATE_STORAGE_LOSS, (bool, dt_bool)):
raise RuntimeError("ACTIVATE_STORAGE_LOSS should be a boolean")
self.ACTIVATE_STORAGE_LOSS = dt_bool(self.ACTIVATE_STORAGE_LOSS)
except Exception as exc_:
raise RuntimeError(
f'Impossible to convert ACTIVATE_STORAGE_LOSS to bool with error \n:"{exc_}"'
)
try:
self.ALARM_WINDOW_SIZE = dt_int(self.ALARM_WINDOW_SIZE)
except Exception as exc_:
raise RuntimeError(
f'Impossible to convert ALARM_WINDOW_SIZE to int with error \n:"{exc_}"'
)
try:
self.ALARM_BEST_TIME = dt_int(self.ALARM_BEST_TIME)
except Exception as exc_:
raise RuntimeError(
f'Impossible to convert ALARM_BEST_TIME to int with error \n:"{exc_}"'
)
try:
self.ALERT_TIME_WINDOW = dt_int(self.ALERT_TIME_WINDOW)
except Exception as exc_:
raise RuntimeError(
f'Impossible to convert ALERT_TIME_WINDOW to int with error \n:"{exc_}"'
)
if self.ALARM_WINDOW_SIZE <= 0:
raise RuntimeError("self.ALARM_WINDOW_SIZE should be a positive integer !")
if self.ALARM_BEST_TIME <= 0:
raise RuntimeError("self.ALARM_BEST_TIME should be a positive integer !")
if self.ALERT_TIME_WINDOW <= 0:
raise RuntimeError("self.ALERT_TIME_WINDOW should be a positive integer !")
try:
self.MAX_SIMULATE_PER_STEP = int(
self.MAX_SIMULATE_PER_STEP
) # to raise if numpy array
self.MAX_SIMULATE_PER_STEP = dt_int(self.MAX_SIMULATE_PER_STEP)
except Exception as exc_:
raise RuntimeError(
f'Impossible to convert MAX_SIMULATE_PER_STEP to int with error \n:"{exc_}"'
)
if self.MAX_SIMULATE_PER_STEP <= -2:
raise RuntimeError(
f"self.MAX_SIMULATE_PER_STEP should be a positive integer or -1, we found {self.MAX_SIMULATE_PER_STEP}"
)
try:
self.MAX_SIMULATE_PER_EPISODE = int(
self.MAX_SIMULATE_PER_EPISODE
) # to raise if numpy array
self.MAX_SIMULATE_PER_EPISODE = dt_int(self.MAX_SIMULATE_PER_EPISODE)
except Exception as exc_:
raise RuntimeError(
f'Impossible to convert MAX_SIMULATE_PER_EPISODE to int with error \n:"{exc_}"'
)
if self.MAX_SIMULATE_PER_EPISODE <= -2:
raise RuntimeError(
f"self.MAX_SIMULATE_PER_EPISODE should be a positive integer or -1, we found {self.MAX_SIMULATE_PER_EPISODE}"
)
| 30,456 | 43.789706 | 153 | py |
Grid2Op | Grid2Op-master/grid2op/__init__.py |
# Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
"""
Grid2Op
"""
__version__ = '1.9.1'
__all__ = [
"Action",
"Agent",
"Backend",
"Chronics",
"Environment",
"Exceptions",
"Observation",
"Parameters",
"Rules",
"Reward",
"Runner",
"Plot",
"PlotGrid",
"Episode",
"Download",
"VoltageControler",
"tests",
"main",
"command_line",
"utils",
# utility functions
"list_available_remote_env",
"list_available_local_env",
"get_current_local_dir",
"change_local_dir",
"list_available_test_env",
"update_env",
"make"
,]
from grid2op.MakeEnv import make_old, make, make_from_dataset_path
from grid2op.MakeEnv import update_env
from grid2op.MakeEnv import (
list_available_remote_env,
list_available_local_env,
get_current_local_dir,
)
from grid2op.MakeEnv import change_local_dir, list_available_test_env
| 1,332 | 23.236364 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/_glop_platform_info.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
# this module check on which platform grid2op is currently running. This is important for multiprocessing that
# is not handled the same way in all platform.
import sys
_IS_WINDOWS = sys.platform.startswith("win")
_IS_LINUX = sys.platform.startswith("linux")
_IS_MACOS = sys.platform.startswith("darwin")
| 770 | 44.352941 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/command_line.py | #!/usr/bin/env python3
# Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import warnings
import os
import unittest
import sys
from grid2op.main import main_cli as mainEntryPoint
from grid2op.Download.download import main as downloadEntryPoint
__LI_FILENAME_TESTS = [
"test_Action.py",
# "test_Action_iadd.py",
"test_ActionProperties.py",
"test_Observation.py",
"test_AgentsFast.py",
"test_RunnerFast.py",
"test_attached_envs.py",
# "test_GymConverter.py", # requires gym
# "test_Reward.py",
# "test_issue_126.py",
# "test_issue_131.py",
# "test_issue_140.py",
# "test_issue_146.py",
# "test_issue_147.py",
# # "test_issue_148.py", # requires additional data
# "test_issue_151.py",
# "test_issue_153.py",
# "test_issue_164.py",
]
def main():
mainEntryPoint()
def download():
downloadEntryPoint()
def replay():
try:
from grid2op.Episode.EpisodeReplay import main as replayEntryPoint
replayEntryPoint()
except ImportError as e:
warn_msg = (
"\nEpisode replay is missing an optional dependency\n"
"Please run pip3 install grid2op[optional].\n The error was {}"
)
warnings.warn(warn_msg.format(e))
def testinstall():
"""
Performs aperforms basic tests to make sure grid2op is properly installed and working.
It's not because these tests pass that grid2op will be fully functional however.
"""
test_loader = unittest.TestLoader()
this_directory = os.path.abspath(os.path.dirname(__file__))
test_suite = test_loader.discover(
os.path.join(this_directory, "tests"), pattern=__LI_FILENAME_TESTS[0]
)
for file_name in __LI_FILENAME_TESTS[1:]:
test_suite.addTest(
test_loader.discover(
os.path.join(this_directory, "tests"), pattern=file_name
)
)
results = unittest.TextTestResult(stream=sys.stderr, descriptions=True, verbosity=1)
test_suite.run(results)
if results.wasSuccessful():
sys.exit(0)
else:
for _, str_ in results.errors:
print(str_)
print("-------------------------\n")
for _, str_ in results.failures:
print(str_)
print("-------------------------\n")
raise RuntimeError("Test not successful !")
| 2,773 | 29.483516 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/dtypes.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from packaging import version
try:
from importlib.metadata import distribution
except ModuleNotFoundError:
# not available in python 3.7
from importlib_metadata import distribution
NUMPY_VERSION = version.parse(distribution('numpy').version)
_MAX_NUMPY_VERSION_NPINT = version.parse("1.20.0")
dt_int = (
np.int32
) # dtype('int64') or dtype('int32') depending on platform => i force it to int32
dt_float = (
np.float32
) # dtype('float64') or dtype('float32') depending on platform => i force it to float32
dt_bool = np.bool_ # mandatory for numpy >= 1.24
int_types = (int, dt_int, np.int64)
if NUMPY_VERSION <= _MAX_NUMPY_VERSION_NPINT:
int_types = (int, dt_int, np.int64, np.int)
| 1,196 | 37.612903 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/main.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
"""
TODO documentation of this function!
"""
import os
import argparse
from grid2op.Observation import CompleteObservation
from grid2op.Chronics import Multifolder
from grid2op.Reward import FlatReward
from grid2op.Agent import DoNothingAgent
from grid2op.Backend import PandaPowerBackend
from grid2op.Rules import AlwaysLegal
from grid2op.Runner import Runner
def main_run(
path_casefile=None,
path_chronics=None,
path_parameters=None,
chronics_class=Multifolder,
backend_class=PandaPowerBackend,
agent_class=DoNothingAgent,
reward_class=FlatReward,
observation_class=CompleteObservation,
legalAct_class=AlwaysLegal,
nb_episode=3,
nb_process=1,
path_save=None,
names_chronics_to_backend=None,
gridStateclass_kwargs={},
):
init_grid_path = os.path.abspath(path_casefile)
path_chron = os.path.abspath(path_chronics)
parameters_path = path_parameters
runner = Runner(
init_grid_path=init_grid_path,
path_chron=path_chron,
parameters_path=parameters_path,
names_chronics_to_backend=names_chronics_to_backend,
gridStateclass=chronics_class,
gridStateclass_kwargs=gridStateclass_kwargs,
backendClass=backend_class,
rewardClass=reward_class,
agentClass=agent_class,
observationClass=observation_class,
legalActClass=legalAct_class,
)
res = runner.run(nb_episode=nb_episode, nb_process=nb_process, path_save=path_save)
return res
def cli_main():
parser = argparse.ArgumentParser(
description='Launch the evaluation of the Grid2Op ("Grid To Operate") code.'
)
parser.add_argument(
"--path_save",
default=None,
help="The path where the log of the experience will be stored (default: None -> nothing stored)",
)
parser.add_argument(
"--nb_process",
type=int,
default=1,
help="The number of process used for each evaluation (note that if nb_process > nb_episode then nb_episode is used.",
)
parser.add_argument(
"--nb_episode",
type=int,
default=3,
help="The number of episode to play (default 3)",
)
parser.add_argument(
"--path_casefile",
type=str,
required=True,
help="Path where the case file is located (casefile is the file describing the powergrid)",
)
parser.add_argument(
"--path_chronics",
type=str,
required=True,
help="Path where the chronics (temporal variation of loads and production usually are located)",
)
parser.add_argument(
"--path_parameters",
default=None,
help="Path where the _parameters of the game are stored",
)
args = parser.parse_args()
return args
def main_cli(args=None):
if args is None:
args = cli_main()
if args.path_save is not None:
path_save = str(args.path_save)
else:
path_save = None
if args.path_parameters is not None:
path_parameter = str(args.path_parameters)
else:
path_parameter = None
names_chronics_to_backend = None
# actually performing the run
msg_ = 'Running Grid2Op:\n\t- on case file at "{case_file}"\n\t- with data located at "{data}"'
msg_ += "\n\t- using {process} process(es)\n\t- for {nb_episode} episodes"
if args.path_save is None:
msg_ += "\n\t- results will not be saved"
else:
msg_ += '\n\t- results will be saved in "{}"'.format(args.path_save)
print(
msg_.format(
case_file=args.path_casefile,
data=args.path_chronics,
process=args.nb_process,
nb_episode=args.nb_episode,
)
)
res = main_run(
path_save=path_save,
nb_process=args.nb_process,
nb_episode=args.nb_episode,
path_casefile=args.path_casefile,
path_chronics=args.path_chronics,
path_parameters=path_parameter,
names_chronics_to_backend=names_chronics_to_backend,
)
print("The results are:")
for chron_name, _, cum_reward, nb_time_step, max_ts in res:
msg_tmp = "\tFor chronics located at {}\n".format(chron_name)
msg_tmp += "\t\t - cumulative reward: {:.2f}\n".format(cum_reward)
msg_tmp += "\t\t - number of time steps completed: {:.0f} / {:.0f}".format(
nb_time_step, max_ts
)
print(msg_tmp)
if __name__ == "__main__":
args = cli_main()
main_cli(args)
| 4,975 | 29.906832 | 125 | py |
Grid2Op | Grid2Op-master/grid2op/Action/ActionSpace.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import warnings
import copy
from grid2op.Action.BaseAction import BaseAction
from grid2op.Action.SerializableActionSpace import SerializableActionSpace
class ActionSpace(SerializableActionSpace):
"""
:class:`ActionSpace` should be created by an :class:`grid2op.Environment.Environment`
with its parameters coming from a properly
set up :class:`grid2op.Backend.Backend` (ie a Backend instance with a loaded powergrid.
See :func:`grid2op.Backend.Backend.load_grid` for
more information).
It will allow, thanks to its :func:`ActionSpace.__call__` method to create valid :class:`BaseAction`. It is the
the preferred way to create an object of class :class:`BaseAction` in this package.
On the contrary to the :class:`BaseAction`, it is NOT recommended to overload this helper. If more flexibility is
needed on the type of :class:`BaseAction` created, it is recommended to pass a different "*actionClass*" argument
when it's built. Note that it's mandatory that the class used in the "*actionClass*" argument derived from the
:class:`BaseAction`.
Attributes
----------
legal_action: :class:`grid2op.RulesChecker.BaseRules`
Class specifying the rules of the game used to check the legality of the actions.
"""
def __init__(
self,
gridobj,
legal_action,
actionClass=BaseAction, # need to be a base grid2op type (and not a type generated on the fly)
):
"""
INTERNAL USE ONLY
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
The actions space is created by the environment. Do not attempt to create one yourself.
All parameters (name_gen, name_load, name_line, sub_info, etc.) are used to fill the attributes having the
same name. See :class:`ActionSpace` for more information.
Parameters
----------
gridobj: :class:`grid2op.Space.GridObjects`
The representation of the powergrid.
actionClass: ``type``
Note that this parameter expected a class and not an object of the class. It is used to return the
appropriate action type.
legal_action: :class:`grid2op.RulesChecker.BaseRules`
Class specifying the rules of the game used to check the legality of the actions.
"""
actionClass._add_shunt_data()
actionClass._update_value_set()
SerializableActionSpace.__init__(self, gridobj, actionClass=actionClass)
self.legal_action = legal_action
def __call__(
self, dict_: dict = None, check_legal: bool = False, env: "BaseEnv" = None
) -> BaseAction:
"""
This utility allows you to build a valid action, with the proper sizes if you provide it with a valid
dictionary.
More information about this dictionary can be found in the :func:`Action.update` help. This dictionary
is not changed in this method.
**NB** This is the only recommended way to make a valid, with proper dimension :class:`Action` object:
Examples
--------
Here is a short example on how to make a action. For more detailed examples see :func:`Action.update`
.. code-block:: python
import grid2op
# create a simple environment
env = grid2op.make()
act = env.action_space({})
# act is now the "do nothing" action, that doesn't modify the grid.
Parameters
----------
dict_ : ``dict``
see :func:`Action.__call__` documentation for an extensive help about this parameter
check_legal: ``bool``
is there a test performed on the legality of the action. **NB** When an object of class :class:`Action` is
used, it is automatically tested for ambiguity. If this parameter is set to ``True`` then a legality test
is performed. An action can be illegal if the environment doesn't allow it, for example if an agent tries
to reconnect a powerline during a maintenance.
env: :class:`grid2op.Environment.Environment`, optional
An environment used to perform a legality check.
Returns
-------
res: :class:`BaseAction`
An action that is valid and corresponds to what the agent want to do with the formalism defined in
see :func:`Action.udpate`.
"""
res = self.actionClass()
# update the action
res.update(dict_)
if check_legal:
is_legal, reason = self._is_legal(res, env)
if not is_legal:
raise reason
return res
def _is_legal(self, action, env):
"""
INTERNAL USE ONLY
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Whether an action is legal or not is checked by the environment at each call
to `env.step`
Parameters
----------
action: :class:`BaseAction`
The action to test
env: :class:`grid2op.Environment.Environment`
The current environment
Returns
-------
res: ``bool``
``True`` if the action is legal, ie is allowed to be performed by the rules of the game. ``False``
otherwise.
"""
if env is None:
warnings.warn(
"Cannot performed legality check because no environment is provided."
)
return True, None
is_legal, reason = self.legal_action(action, env)
return is_legal, reason
def _custom_deepcopy_for_copy(self, new_obj):
"""implements a faster "res = copy.deepcopy(self)" to use
in "self.copy"
Do not use it anywhere else...
"""
# TODO clean that after it is working... (ie make this method per class...)
# fill the super classes
super()._custom_deepcopy_for_copy(new_obj)
# now fill my class
new_obj.legal_action = copy.deepcopy(self.legal_action)
def copy(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Perform a deep copy of the Observation space.
"""
# performs the copy
# res = copy.deepcopy(self) # painfully slow...
# create an empty "me"
my_cls = type(self)
res = my_cls.__new__(my_cls)
self._custom_deepcopy_for_copy(res)
return res
def close(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Make sure all references to possible backends are closed. This is not used here in general but might be for some specific cases.
"""
pass
| 7,333 | 35.67 | 136 | py |
Grid2Op | Grid2Op-master/grid2op/Action/BaseAction.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
import numpy as np
import warnings
from typing import Tuple
from grid2op.dtypes import dt_int, dt_bool, dt_float
from grid2op.Exceptions import *
from grid2op.Space import GridObjects
# TODO time delay somewhere (eg action is implemented after xxx timestep, and not at the time where it's proposed)
# TODO have the "reverse" action, that does the opposite of an action. Will be hard but who know ? :eyes:
# TODO ie: action + (rev_action) = do_nothing_action
# TODO consistency in names gen_p / prod_p and in general gen_* prod_*
class BaseAction(GridObjects):
"""
This is a base class for each :class:`BaseAction` objects.
As stated above, an action represents conveniently the modifications that will affect a powergrid.
It is not recommended to instantiate an action from scratch. The recommended way to get an action is either by
modifying an existing one using the method :func:`BaseAction.update` or to call and :class:`ActionSpace` object that
has been properly set up by an :class:`grid2op.Environment`.
BaseAction can be fully converted to and back from a numpy array with a **fixed** size.
An action can modify the grid in multiple ways.
It can change :
- the production and voltage setpoint of the generator units
- the amount of power consumed (for both active and reactive part) for load
- disconnect powerlines
- change the topology of the _grid.
To be valid, an action should be convertible to a tuple of 5 elements:
- the first element is the "injections" vector: representing the way generator units and loads are modified
- It is, in turn, a dictionary with the following keys (optional)
- "load_p" a vector of the same size of the load, giving the modification of the loads active consumption
- "load_q" a vector of the same size of the load, giving the modification of the loads reactive consumption
- "prod_p" a vector of the same size of the generators, giving the modification of the productions active
setpoint production
- "prod_v" a vector of the same size of the generators, giving the modification of the productions voltage
setpoint
- the second element is made of force line status. It is made of a vector of size :attr:`BaseAction._n_lines`
(the number of lines in the powergrid) and is interpreted as:
- -1 force line disconnection
- +1 force line reconnection
- 0 do nothing to this line
- the third element is the switch line status vector. It is made of a vector of size :attr:`BaseAction.n_line`
and is
interpreted as:
- ``True``: change the line status
- ``False``: don't do anything
- the fourth element set the buses to which the object is connected. It's a vector of integers with the following
interpretation:
- 0 -> don't change
- 1 -> connect to bus 1
- 2 -> connect to bus 2
- -1 -> disconnect the object.
- the fifth element changes the buses to which the object is connected. It's a boolean vector interpreted as:
- ``False``: nothing is done
- ``True``: change the bus eg connect it to bus 1 if it was connected to bus 2 or connect it to bus 2 if it was
connected to bus 1. NB this is only active if the system has only 2 buses per substation (that's the case for
the L2RPN challenge).
- the sixth element is a vector, representing the redispatching. Component of this vector is added to the
generators active setpoint value (if set) of the first elements.
**NB** the difference between :attr:`BaseAction._set_topo_vect` and :attr:`BaseAction._change_bus_vect` is the
following:
- If a component of :attr:`BaseAction._set_topo_vect` is 1, then the object (load, generator or powerline)
will be moved to bus 1 of the substation to which it is connected. If it is already to bus 1 nothing will be
done.
If it's on another bus it will connect it to bus 1. It's disconnected, it will reconnect it and connect it
to bus 1.
- If a component of :attr:`BaseAction._change_bus_vect` is True, then the object will be moved from one bus to
another.
If the object were on bus 1
it will be moved on bus 2, and if it were on bus 2, it will be moved on bus 1. If the object were
disconnected,
then this does nothing.
The conversion to the action into an understandable format by the backend is performed by the "update" method,
that takes into account a dictionary and is responsible to convert it into this format.
It is possible to overload this class as long as the overloaded :func:`BaseAction.__call__` operator returns the
specified format, and the :func:`BaseAction.__init__` method has the same signature.
This format is then digested by the backend and the powergrid is modified accordingly.
Attributes
----------
_set_line_status: :class:`numpy.ndarray`, dtype:int
For each powerline, it gives the effect of the action on the status of it. It should be understood as:
- -1: disconnect the powerline
- 0: don't affect the powerline
- +1: reconnect the powerline
_switch_line_status: :class:`numpy.ndarray`, dtype:bool
For each powerline, it informs whether the action will switch the status of a powerline of not. It should be
understood as followed:
- ``False``: the action doesn't affect the powerline
- ``True``: the action affects the powerline. If it was connected, it will disconnect it. If it was
disconnected, it will reconnect it.
_dict_inj: ``dict``
Represents the modification of the injection (productions and loads) of the power _grid. This dictionary can
have the optional keys:
- "load_p" to set the active load values (this is a numpy array with the same size as the number of load
in the power _grid with Nan: don't change anything, else set the value
- "load_q": same as above but for the load reactive values
- "prod_p": same as above but for the generator active setpoint values. It has the size corresponding
to the number of generators in the test case.
- "prod_v": same as above but set the voltage setpoint of generator units.
_set_topo_vect: :class:`numpy.ndarray`, dtype:int
Similar to :attr:`BaseAction._set_line_status` but instead of affecting the status of powerlines, it affects the
bus connectivity at a substation. It has the same size as the full topological vector
(:attr:`BaseAction._dim_topo`)
and for each element it should be understood as:
- 0 -> don't change
- 1 -> connect to bus 1
- 2 -> connect to bus 2
- -1 -> disconnect the object.
_change_bus_vect: :class:`numpy.ndarray`, dtype:bool
Similar to :attr:`BaseAction._switch_line_status` but it affects the topology at substations instead of the
status of
the powerline. It has the same size as the full topological vector (:attr:`BaseAction._dim_topo`) and each
component should mean:
- ``False``: the object is not affected
- ``True``: the object will be moved to another bus. If it was on bus 1 it will be moved on bus 2, and if
it was on bus 2 it will be moved on bus 1.
authorized_keys: :class:`set`
The set indicating which keys the actions can understand when calling :func:`BaseAction.update`
_subs_impacted: :class:`numpy.ndarray`, dtype:bool
This attributes is either not initialized (set to ``None``) or it tells, for each substation, if it is impacted
by the action (in this case :attr:`BaseAction._subs_impacted`\[sub_id\] is ``True``) or not
(in this case :attr:`BaseAction._subs_impacted`\[sub_id\] is ``False``)
_lines_impacted: :class:`numpy.ndarray`, dtype:bool
This attributes is either not initialized (set to ``None``) or it tells, for each powerline, if it is impacted
by the action (in this case :attr:`BaseAction._lines_impacted`\[line_id\] is ``True``) or not
(in this case :attr:`BaseAction._subs_impacted`\[line_id\] is ``False``)
attr_list_vect: ``list``, static
The authorized key that are processed by :func:`BaseAction.__call__` to modify the injections
attr_list_vect_set: ``set``, static
The authorized key that is processed by :func:`BaseAction.__call__` to modify the injections
_redispatch: :class:`numpy.ndarray`, dtype:float
Amount of redispatching that this action will perform. Redispatching will increase the generator's active
setpoint
value. This will be added to the value of the generators. The Environment will make sure that every physical
constraint is met. This means that the agent provides a setpoint, but there is no guarantee that the setpoint
will be achievable. Redispatching action is cumulative, this means that if at a given timestep you ask +10 MW
on a generator, and on another you ask +10 MW then the total setpoint for this generator that the environment
will try to implement is +20MW.
_storage_power: :class:`numpy.ndarray`, dtype:float
Amount of power you want each storage units to produce / absorbs. Storage units are in "loads"
convention. This means that if you ask for a positive number, the storage unit will absorb
power from the grid (=it will charge) and if you ask for a negative number, the storage unit
will inject power on the grid (storage unit will discharge).
_curtail: :class:`numpy.ndarray`, dtype:float
For each renewable generator, allows you to give a maximum value (as ratio of Pmax, *eg* 0.5 =>
you limit the production of this generator to 50% of its Pmax) to renewable generators.
.. warning::
In grid2op we decided that the "curtailment" type of actions consists in directly providing the
upper bound you the agent allowed for a given generator. It does not reflect the amount
of MW that will be "curtailed" but will rather provide a limit on the number of
MW a given generator can produce.
Examples
--------
Here are example on how to use the action, for more information on what will be the effect of each,
please refer to the explanatory notebooks.
You have two main methods to build actions, as showed here:
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
# first method:
action_description = {...} # see below
act = env.action_space(action_description)
# second method
act = env.action_space()
act.PROPERTY = MODIF
The description of action as a dictionary is the "historical" method. The method using the properties
has been added to simplify the API.
To connect / disconnect powerline, using the "set" action, you can:
.. code-block:: python
# method 1
act = env.action_space({"set_line_status": [(line_id, new_status), (line_id, new_status), ...]})
# method 2
act = env.action_space()
act.line_set_status = [(line_id, new_status), (line_id, new_status), ...]
typically: 0 <= line_id <= env.n_line and new_status = 1 or -1
To connect / disconnect powerline using the "change" action type, you can:
.. code-block:: python
# method 1
act = env.action_space({"change_line_status": [line_id, line_id, ...]})
# method 2
act = env.action_space()
act.line_change_status = [line_id, line_id, ...]
typically: 0 <= line_id <= env.n_line
To modify the busbar at which an element is connected you can (if using set, to use "change" instead
replace "set_bus" in the text below by "change_bus" **eg** `nv.action_space({"change_bus": ...})`
or `act.load_change_bus = ...` ):
.. code-block:: python
# method 1
act = env.action_space({"set_bus":
{"lines_or_id": [(line_id, new_bus), (line_id, new_bus), ...],
"lines_ex_id": [(line_id, new_bus), (line_id, new_bus), ...],
"loads_id": [(load_id, new_bus), (load_id, new_bus), ...],
"generators_id": [(gen_id, new_bus), (gen_id, new_bus), ...],
"storages_id": [(storage_id, new_bus), (storage_id, new_bus), ...]
}
})
# method 2
act = env.action_space()
act.line_or_set_bus = [(line_id, new_bus), (line_id, new_bus), ...]
act.line_ex_set_bus = [(line_id, new_bus), (line_id, new_bus), ...]
act.load_set_bus = [(load_id, new_bus), (load_id, new_bus), ...]
act.gen_set_bus = [(gen_id, new_bus), (gen_id, new_bus), ...]
act.storage_set_bus = [(storage_id, new_bus), (storage_id, new_bus), ...]
Of course you can modify one type of object at a time (you don't have to specify all "lines_or_id",
"lines_ex_id", "loads_id", "generators_id", "storages_id"
You can also give the topologies you want at each substations with:
.. code-block:: python
# method 1
act = env.action_space({"set_bus":{
"substations_id": [(sub_id, topo_sub), (sub_id, topo_sub), ...]
}})
# method 2
act = env.action_space()
act.sub_set_bus = [(sub_id, topo_sub), (sub_id, topo_sub), ...]
In the above typically 0 <= sub_id < env.n_sub and topo_sub is a vector having the right dimension (
so if a substation has 4 elements, then topo_sub should have 4 elements)
It has to be noted that `act.sub_set_bus` will return a 1d vector representing the topology
of the grid as "set" by the action, with the convention, -1 => disconnect, 0 => don't change,
1=> set to bus 1 and 2 => set object to bus 2.
In order to perform redispatching you can do as follow:
.. code-block:: python
# method 1
act = env.action_space({"redispatch": [(gen_id, amount), (gen_id, amount), ...]})
# method 2
act = env.action_space()
act.redispatch = [(gen_id, amount), (gen_id, amount), ...]
Typically 0<= gen_id < env.n_gen and `amount` is a floating point between gen_max_ramp_down and
gen_min_ramp_down for the generator modified.
In order to perform action on storage units, you can:
.. code-block:: python
# method 1
act = env.action_space({"set_storage": [(storage_id, amount), (storage_id, amount), ...]})
# method 2
act = env.action_space()
act.set_storage = [(storage_id, amount), (storage_id, amount), ...]
Typically `0 <= storage_id < env.n_storage` and `amount` is a floating point between the maximum
power and minimum power the storage unit can absorb / produce.
Finally, in order to perform curtailment action on renewable generators, you can:
.. code-block:: python
# method 1
act = env.action_space({"curtail": [(gen_id, amount), (gen_id, amount), ...]})
# method 2
act = env.action_space()
act.curtail = [(gen_id, amount), (gen_id, amount), ...]
Typically `0 <= gen_id < env.n_gen` and `amount` is a floating point between the 0. and 1.
giving the limit of power you allow each renewable generator to produce (expressed in ratio of
Pmax). For example if `gen_id=1` and `amount=0.7` it means you limit the production of
generator 1 to 70% of its Pmax.
"""
authorized_keys = {
"injection",
"hazards",
"maintenance",
"set_line_status",
"change_line_status",
"set_bus",
"change_bus",
"redispatch",
"set_storage",
"curtail",
"raise_alarm",
"raise_alert",
}
attr_list_vect = [
"prod_p",
"prod_v",
"load_p",
"load_q",
"_redispatch",
"_set_line_status",
"_switch_line_status",
"_set_topo_vect",
"_change_bus_vect",
"_hazards",
"_maintenance",
"_storage_power",
"_curtail",
"_raise_alarm",
"_raise_alert",
]
attr_nan_list_set = set()
attr_list_set = set(attr_list_vect)
shunt_added = False
_line_or_str = "line (origin)"
_line_ex_str = "line (extremity)"
ERR_ACTION_CUT = 'The action added to me will be cut, because i don\'t support modification of "{}"'
ERR_NO_STOR_SET_BUS = 'Impossible to modify the storage bus (with "set") with this action type.'
def __init__(self):
"""
INTERNAL USE ONLY
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
**It is NOT recommended** to create an action with this method, use the action space
of the environment :attr:`grid2op.Environment.Environment.action_space` instead.
This is used to create an BaseAction instance. Preferably, :class:`BaseAction` should be created with
:class:`ActionSpace`.
IMPORTANT: Use :func:`ActionSpace.__call__` or :func:`ActionSpace.sample` to generate a valid action.
"""
GridObjects.__init__(self)
# False(line is disconnected) / True(line is connected)
self._set_line_status = np.full(shape=self.n_line, fill_value=0, dtype=dt_int)
self._switch_line_status = np.full(
shape=self.n_line, fill_value=False, dtype=dt_bool
)
# injection change
self._dict_inj = {}
# topology changed
self._set_topo_vect = np.full(shape=self.dim_topo, fill_value=0, dtype=dt_int)
self._change_bus_vect = np.full(
shape=self.dim_topo, fill_value=False, dtype=dt_bool
)
# add the hazards and maintenance usefull for saving.
self._hazards = np.full(shape=self.n_line, fill_value=False, dtype=dt_bool)
self._maintenance = np.full(shape=self.n_line, fill_value=False, dtype=dt_bool)
# redispatching vector
self._redispatch = np.full(shape=self.n_gen, fill_value=0.0, dtype=dt_float)
# storage unit vector
self._storage_power = np.full(
shape=self.n_storage, fill_value=0.0, dtype=dt_float
)
# curtailment of renewable energy
self._curtail = np.full(shape=self.n_gen, fill_value=-1.0, dtype=dt_float)
self._vectorized = None
self._lines_impacted = None
self._subs_impacted = None
# shunts
if self.shunts_data_available:
self.shunt_p = np.full(
shape=self.n_shunt, fill_value=np.NaN, dtype=dt_float
)
self.shunt_q = np.full(
shape=self.n_shunt, fill_value=np.NaN, dtype=dt_float
)
self.shunt_bus = np.full(shape=self.n_shunt, fill_value=0, dtype=dt_int)
else:
self.shunt_p = None
self.shunt_q = None
self.shunt_bus = None
self._single_act = True
self._raise_alarm = np.full(
shape=self.dim_alarms, dtype=dt_bool, fill_value=False
) # TODO
self._raise_alert = np.full(
shape=self.dim_alerts, dtype=dt_bool, fill_value=False
) # TODO
# change the stuff
self._modif_inj = False
self._modif_set_bus = False
self._modif_change_bus = False
self._modif_set_status = False
self._modif_change_status = False
self._modif_redispatch = False
self._modif_storage = False
self._modif_curtailment = False
self._modif_alarm = False
self._modif_alert = False
@classmethod
def process_shunt_satic_data(cls):
if not cls.shunts_data_available:
# this is really important, otherwise things from grid2op base types will be affected
cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect)
cls.attr_list_set = copy.deepcopy(cls.attr_list_set)
# remove the shunts from the list to vector
for el in ["shunt_p", "shunt_q", "shunt_bus"]:
if el in cls.attr_list_vect:
try:
cls.attr_list_vect.remove(el)
except ValueError:
pass
cls.attr_list_set = set(cls.attr_list_vect)
return super().process_shunt_satic_data()
def copy(self) -> "BaseAction":
# sometimes this method is used...
return self.__deepcopy__()
def _aux_copy(self, other):
attr_simple = [
"_modif_inj",
"_modif_set_bus",
"_modif_change_bus",
"_modif_set_status",
"_modif_change_status",
"_modif_redispatch",
"_modif_storage",
"_modif_curtailment",
"_modif_alarm",
"_modif_alert",
"_single_act",
]
attr_vect = [
"_set_line_status",
"_switch_line_status",
"_set_topo_vect",
"_change_bus_vect",
"_hazards",
"_maintenance",
"_redispatch",
"_storage_power",
"_curtail",
"_raise_alarm",
"_raise_alert",
]
if self.shunts_data_available:
attr_vect += ["shunt_p", "shunt_q", "shunt_bus"]
for attr_nm in attr_simple:
setattr(other, attr_nm, getattr(self, attr_nm))
for attr_nm in attr_vect:
getattr(other, attr_nm)[:] = getattr(self, attr_nm)
def __copy__(self) -> "BaseAction":
res = type(self)()
self._aux_copy(other=res)
# handle dict_inj
for k, el in self._dict_inj.items():
res._dict_inj[k] = copy.copy(el)
# just copy
res._vectorized = self._vectorized
res._lines_impacted = self._lines_impacted
res._subs_impacted = self._subs_impacted
return res
@classmethod
def process_shunt_satic_data(cls):
return super().process_shunt_satic_data()
def __deepcopy__(self, memodict={}) -> "BaseAction":
res = type(self)()
self._aux_copy(other=res)
# handle dict_inj
for k, el in self._dict_inj.items():
res._dict_inj[k] = copy.deepcopy(el, memodict)
# just copy
res._vectorized = copy.deepcopy(self._vectorized, memodict)
res._lines_impacted = copy.deepcopy(self._lines_impacted, memodict)
res._subs_impacted = copy.deepcopy(self._subs_impacted, memodict)
return res
def _aux_serialize_add_key_change(self, attr_nm, dict_key, res):
tmp_ = [int(id_) for id_, val in enumerate(getattr(self, attr_nm)) if val]
if tmp_:
res[dict_key] = tmp_
def _aux_serialize_add_key_set(self, attr_nm, dict_key, res):
tmp_ = [(int(id_), int(val)) for id_, val in enumerate(getattr(self, attr_nm)) if val != 0.]
if tmp_:
res[dict_key] = tmp_
def as_serializable_dict(self) -> dict:
"""
This method returns an action as a dictionnary, that can be serialized using the "json" module.
It can be used to store the action into a grid2op indepependant format (the default action serialization, for speed, writes actions to numpy array.
The size of these arrays can change depending on grid2op versions, especially if some different types of actions are implemented).
Once you have these dictionnary, you can use them to build back the action from the action space.
Examples
---------
It can be used like:
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or anything else
env = grid2op.make(env_name)
act = env.action_space(...)
dict_ = act.as_serializable_dict() # you can save this dict with the json library
act2 = env.action_space(dict_)
act == act2
"""
res = {}
# bool elements
if self._modif_alert:
res["raise_alert"] = [
int(id_) for id_, val in enumerate(self._raise_alert) if val
]
if self._modif_alarm:
res["raise_alarm"] = [
int(id_) for id_, val in enumerate(self._raise_alarm) if val
]
if self._modif_change_bus:
res["change_bus"] = {}
self._aux_serialize_add_key_change("load_change_bus", "loads_id", res["change_bus"])
self._aux_serialize_add_key_change("gen_change_bus", "generators_id", res["change_bus"])
self._aux_serialize_add_key_change("line_or_change_bus", "lines_or_id", res["change_bus"])
self._aux_serialize_add_key_change("line_ex_change_bus", "lines_ex_id", res["change_bus"])
self._aux_serialize_add_key_change("storage_change_bus", "storages_id", res["change_bus"])
if self._modif_change_status:
res["change_line_status"] = [
int(id_) for id_, val in enumerate(self._switch_line_status) if val
]
# int elements
if self._modif_set_bus:
res["set_bus"] = {}
self._aux_serialize_add_key_set("load_set_bus", "loads_id", res["set_bus"])
self._aux_serialize_add_key_set("gen_set_bus", "generators_id", res["set_bus"])
self._aux_serialize_add_key_set("line_or_set_bus", "lines_or_id", res["set_bus"])
self._aux_serialize_add_key_set("line_ex_set_bus", "lines_ex_id", res["set_bus"])
self._aux_serialize_add_key_set("storage_set_bus", "storages_id", res["set_bus"])
if self._modif_set_status:
res["set_line_status"] = [
(int(id_), int(val))
for id_, val in enumerate(self._set_line_status)
if val != 0
]
# float elements
if self._modif_redispatch:
res["redispatch"] = [
(int(id_), float(val))
for id_, val in enumerate(self._redispatch)
if val != 0.0
]
if self._modif_storage:
res["set_storage"] = [
(int(id_), float(val))
for id_, val in enumerate(self._storage_power)
if val != 0.0
]
if self._modif_curtailment:
res["curtail"] = [
(int(id_), float(val))
for id_, val in enumerate(self._curtail)
if val != -1
]
# more advanced options
if self._modif_inj:
res["injection"] = {}
for ky in ["prod_p", "prod_v", "load_p", "load_q"]:
if ky in self._dict_inj:
res["injection"][ky] = [float(val) for val in self._dict_inj[ky]]
if not res["injection"]:
del res["injection"]
if type(self).shunts_data_available:
res["shunt"] = {}
if np.any(np.isfinite(self.shunt_p)):
res["shunt"]["shunt_p"] = [
(int(sh_id), float(val)) for sh_id, val in enumerate(self.shunt_p) if np.isfinite(val)
]
if np.any(np.isfinite(self.shunt_q)):
res["shunt"]["shunt_q"] = [
(int(sh_id), float(val)) for sh_id, val in enumerate(self.shunt_q) if np.isfinite(val)
]
if np.any(self.shunt_bus != 0):
res["shunt"]["shunt_bus"] = [
(int(sh_id), int(val))
for sh_id, val in enumerate(self.shunt_bus)
if val != 0
]
if not res["shunt"]:
del res["shunt"]
return res
@classmethod
def _add_shunt_data(cls):
if cls.shunt_added is False and cls.shunts_data_available:
cls.shunt_added = True
cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect)
cls.attr_list_vect += ["shunt_p", "shunt_q", "shunt_bus"]
cls.authorized_keys = copy.deepcopy(cls.authorized_keys)
cls.authorized_keys.add("shunt")
cls.attr_nan_list_set.add("shunt_p")
cls.attr_nan_list_set.add("shunt_q")
cls._update_value_set()
def alarm_raised(self) -> np.ndarray:
"""
INTERNAL
.. warning::
/!\\\\ Only valid with "l2rpn_icaps_2021" environment /!\\\\
This function is used to know if the given action aimed at raising an alarm or not.
Returns
-------
res: numpy array
The indexes of the areas where the agent has raised an alarm.
"""
return np.where(self._raise_alarm)[0]
def alert_raised(self) -> np.ndarray:
"""
INTERNAL
This function is used to know if the given action aimed at raising an alert or not.
Returns
-------
res: numpy array
The indexes of the lines where the agent has raised an alert.
"""
return np.where(self._raise_alert)[0]
@classmethod
def process_grid2op_compat(cls):
if cls.glop_version == cls.BEFORE_COMPAT_VERSION:
# oldest version: no storage and no curtailment available
# this is really important, otherwise things from grid2op base types will be affected
cls.authorized_keys = copy.deepcopy(cls.authorized_keys)
cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect)
cls.attr_list_set = copy.deepcopy(cls.attr_list_set)
# deactivate storage
cls.set_no_storage()
if "set_storage" in cls.authorized_keys:
cls.authorized_keys.remove("set_storage")
if "_storage_power" in cls.attr_list_vect:
cls.attr_list_vect.remove("_storage_power")
cls.attr_list_set = set(cls.attr_list_vect)
# remove the curtailment
if "curtail" in cls.authorized_keys:
cls.authorized_keys.remove("curtail")
if "_curtail" in cls.attr_list_vect:
cls.attr_list_vect.remove("_curtail")
cls.attr_list_set = set(cls.attr_list_vect)
if cls.glop_version < "1.6.0":
# this feature did not exist before.
cls.dim_alarms = 0
if cls.glop_version < "1.9.1":
# this feature did not exist before.
cls.dim_alerts = 0
def _reset_modified_flags(self):
self._modif_inj = False
self._modif_set_bus = False
self._modif_change_bus = False
self._modif_set_status = False
self._modif_change_status = False
self._modif_redispatch = False
self._modif_storage = False
self._modif_curtailment = False
self._modif_alarm = False
self._modif_alert = False
def can_affect_something(self) -> bool:
"""
This functions returns True if the current action has any chance to change the grid.
Notes
-----
This does not say however if the action will indeed modify something somewhere !
"""
return (
self._modif_inj
or self._modif_set_bus
or self._modif_change_bus
or self._modif_set_status
or self._modif_change_status
or self._modif_redispatch
or self._modif_storage
or self._modif_curtailment
or self._modif_alarm
or self._modif_alert
)
def _get_array_from_attr_name(self, attr_name):
if hasattr(self, attr_name):
res = super()._get_array_from_attr_name(attr_name)
else:
if attr_name in self._dict_inj:
res = self._dict_inj[attr_name]
else:
if attr_name == "prod_p" or attr_name == "prod_v":
res = np.full(self.n_gen, fill_value=0.0, dtype=dt_float)
elif attr_name == "load_p" or attr_name == "load_q":
res = np.full(self.n_load, fill_value=0.0, dtype=dt_float)
else:
raise Grid2OpException(
'Impossible to find the attribute "{}" '
'into the BaseAction of type "{}"'.format(attr_name, type(self))
)
return res
def _post_process_from_vect(self):
self._modif_inj = self._dict_inj != {}
self._modif_set_bus = np.any(self._set_topo_vect != 0)
self._modif_change_bus = np.any(self._change_bus_vect)
self._modif_set_status = np.any(self._set_line_status != 0)
self._modif_change_status = np.any(self._switch_line_status)
self._modif_redispatch = np.any(
np.isfinite(self._redispatch) & (self._redispatch != 0.0)
)
self._modif_storage = np.any(self._storage_power != 0.0)
self._modif_curtailment = np.any(self._curtail != -1.0)
self._modif_alarm = np.any(self._raise_alarm)
self._modif_alert = np.any(self._raise_alert)
def _assign_attr_from_name(self, attr_nm, vect):
if hasattr(self, attr_nm):
if attr_nm not in type(self).attr_list_set:
raise AmbiguousAction(
f"Impossible to modify attribute {attr_nm} with this action type."
)
super()._assign_attr_from_name(attr_nm, vect)
self._post_process_from_vect()
else:
if np.any(np.isfinite(vect)):
if np.any(vect != 0.0):
self._dict_inj[attr_nm] = vect
def check_space_legit(self):
"""
This method allows to check if this method is ambiguous **per se** (defined more formally as:
whatever the observation at time *t*, and the changes that can occur between *t* and *t+1*, this
action will be ambiguous).
For example, an action that try to assign something to busbar 3 will be ambiguous *per se*. An action
that tries to dispatch a non dispatchable generator will be also ambiguous *per se*.
However, an action that "switch" (change) the status (connected / disconnected) of a powerline can be
ambiguous and it will not be detected here. This is because the ambiguity depends on the current state
of the powerline:
- if the powerline is disconnected, changing its status means reconnecting it. And we cannot reconnect a
powerline without specifying on which bus.
- on the contrary if the powerline is connected, changing its status means disconnecting it, which is
always feasible.
In case of "switch" as we see here, the action can be ambiguous, but not ambiguous *per se*. This method
will **never** throw any error in this case.
Raises
-------
:class:`grid2op.Exceptions.AmbiguousAction`
Or any of its more precise subclasses, depending on which assumption is not met.
"""
self._check_for_ambiguity()
def get_set_line_status_vect(self) -> np.ndarray:
"""
Computes and returns a vector that can be used in the :func:`BaseAction.__call__` with the keyword
"set_status" if building an :class:`BaseAction`.
**NB** this vector is not the internal vector of this action but corresponds to "do nothing" action.
Returns
-------
res: :class:`numpy.array`, dtype:dt_int
A vector that doesn't affect the grid, but can be used in :func:`BaseAction.__call__` with the keyword
"set_status" if building an :class:`BaseAction`.
"""
return np.full(shape=self.n_line, fill_value=0, dtype=dt_int)
def get_change_line_status_vect(self) -> np.ndarray:
"""
Computes and returns a vector that can be used in the :func:`BaseAction.__call__` with the keyword
"set_status" if building an :class:`BaseAction`.
**NB** this vector is not the internal vector of this action but corresponds to "do nothing" action.
Returns
-------
res: :class:`numpy.array`, dtype:dt_bool
A vector that doesn't affect the grid, but can be used in :func:`BaseAction.__call__` with the keyword
"set_status" if building an :class:`BaseAction`.
"""
return np.full(shape=self.n_line, fill_value=False, dtype=dt_bool)
def __eq__(self, other) -> bool:
"""
Test the equality of two actions.
2 actions are said to be identical if they have the same impact on the powergrid. This is unrelated to their
respective class. For example, if an Action is of class :class:`Action` and doesn't act on the injection, it
can be equal to an Action of the derived class :class:`TopologyAction` (if the topological modifications are the
same of course).
This implies that the attributes :attr:`Action.authorized_keys` is not checked in this method.
Note that if 2 actions don't act on the same powergrid, or on the same backend (eg number of loads, or
generators are not the same in *self* and *other*, or they are not in the same order) then action will be
declared as different.
**Known issue** if two backends are different, but the description of the _grid are identical (ie all
n_gen, n_load, n_line, sub_info, dim_topo, all vectors \*_to_subid, and \*_pos_topo_vect are
identical) then this method will not detect the backend are different, and the action could be declared
as identical. For now, this is only a theoretical behavior: if everything is the same, then probably, up to
the naming convention, then the power grids are identical too.
Parameters
----------
other: :class:`BaseAction`
An instance of class Action to which "self" will be compared.
Returns
-------
res: ``bool``
Whether the actions are equal or not.
"""
if other is None:
return False
# check that the underlying grid is the same in both instances
same_grid = type(self).same_grid_class(type(other))
if not same_grid:
return False
# _grid is the same, now I test the the injections modifications are the same
same_action = self._modif_inj == other._modif_inj
same_action = same_action and self._dict_inj.keys() == other._dict_inj.keys()
if not same_action:
return False
# all injections are the same
for el in self._dict_inj.keys():
me_inj = self._dict_inj[el]
other_inj = other._dict_inj[el]
tmp_me = np.isfinite(me_inj)
tmp_other = np.isfinite(other_inj)
if not np.all(tmp_me == tmp_other) or not np.all(
me_inj[tmp_me] == other_inj[tmp_other]
):
return False
# same line status
if (self._modif_set_status != other._modif_set_status) or not np.all(
self._set_line_status == other._set_line_status
):
return False
if (self._modif_change_status != other._modif_change_status) or not np.all(
self._switch_line_status == other._switch_line_status
):
return False
# redispatching is same
if (self._modif_redispatch != other._modif_redispatch) or not np.all(
self._redispatch == other._redispatch
):
return False
# storage is same
me_inj = self._storage_power
other_inj = other._storage_power
tmp_me = np.isfinite(me_inj)
tmp_other = np.isfinite(other_inj)
if not np.all(tmp_me == tmp_other) or not np.all(
me_inj[tmp_me] == other_inj[tmp_other]
):
return False
# curtailment
if (self._modif_curtailment != other._modif_curtailment) or not np.array_equal(
self._curtail, other._curtail
):
return False
# alarm
if (self._modif_alarm != other._modif_alarm) or not np.array_equal(
self._raise_alarm, other._raise_alarm
):
return False
# alarm
if (self._modif_alert != other._modif_alert) or not np.array_equal(
self._raise_alert, other._raise_alert
):
return False
# same topology changes
if (self._modif_set_bus != other._modif_set_bus) or not np.all(
self._set_topo_vect == other._set_topo_vect
):
return False
if (self._modif_change_bus != other._modif_change_bus) or not np.all(
self._change_bus_vect == other._change_bus_vect
):
return False
# shunts are the same
if self.shunts_data_available:
if self.n_shunt != other.n_shunt:
return False
is_ok_me = np.isfinite(self.shunt_p)
is_ok_ot = np.isfinite(other.shunt_p)
if np.any(is_ok_me != is_ok_ot):
return False
if not np.all(self.shunt_p[is_ok_me] == other.shunt_p[is_ok_ot]):
return False
is_ok_me = np.isfinite(self.shunt_q)
is_ok_ot = np.isfinite(other.shunt_q)
if np.any(is_ok_me != is_ok_ot):
return False
if not np.all(self.shunt_q[is_ok_me] == other.shunt_q[is_ok_ot]):
return False
if not np.all(self.shunt_bus == other.shunt_bus):
return False
return True
def _dont_affect_topology(self) -> bool:
return (
(not self._modif_set_bus)
and (not self._modif_change_bus)
and (not self._modif_set_status)
and (not self._modif_change_status)
)
def get_topological_impact(self, powerline_status=None) -> Tuple[np.ndarray, np.ndarray]:
"""
Gives information about the element being impacted by this action.
**NB** The impacted elements can be used by :class:`grid2op.BaseRules` to determine whether or not an action
is legal or not.
**NB** The impacted are the elements that can potentially be impacted by the action. This does not mean they
will be impacted. For examples:
* If an action from an :class:`grid2op.BaseAgent` reconnect a powerline, but this powerline is being
disconnected by a hazard at the same time step, then this action will not be implemented on the grid.
However, it this powerline couldn't be reconnected for some reason (for example it was already out of order)
the action will still be declared illegal, even if it has NOT impacted the powergrid.
* If an action tries to disconnect a powerline already disconnected, it will "impact" this powergrid.
This means that even if the action will do nothing, it disconnecting this powerline is against the rules,
then the action will be illegal.
* If an action tries to change the topology of a substation, but this substation is already at the target
topology, the same mechanism applies. The action will "impact" the substation, even if, in the end, it
consists of doing nothing.
Any such "change" that would be illegal is declared as "illegal" regardless of the real impact of this action
on the powergrid.
Returns
-------
lines_impacted: :class:`numpy.ndarray`, dtype:dt_bool
A vector with the same size as the number of powerlines in the grid (:attr:`BaseAction.n_line`) with for
each component ``True`` if the line STATUS is impacted by the action, and ``False`` otherwise. See
:attr:`BaseAction._lines_impacted` for more information.
subs_impacted: :class:`numpy.ndarray`, dtype:dt_bool
A vector with the same size as the number of substations in the grid with for each
component ``True`` if the substation is impacted by the action, and ``False`` otherwise. See
:attr:`BaseAction._subs_impacted` for more information.
Examples
--------
You can use this function like;
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
# get an action
action = env.action_space.sample()
# inspect its impact
lines_impacted, subs_impacted = action.get_topological_impact()
for line_id in np.where(lines_impacted)[0]:
print(f"The line {env.name_line[line_id]} with id {line_id} is impacted by this action")
print(action)
"""
if self._dont_affect_topology():
# action is not impacting the topology
# so it does not modified anything concerning the topology
self._lines_impacted = np.full(
shape=self.n_line, fill_value=False, dtype=dt_bool
)
self._subs_impacted = np.full(
shape=self.sub_info.shape, fill_value=False, dtype=dt_bool
)
return self._lines_impacted, self._subs_impacted
if powerline_status is None:
isnotconnected = np.full(self.n_line, fill_value=True, dtype=dt_bool)
else:
isnotconnected = ~powerline_status
self._lines_impacted = self._switch_line_status | (self._set_line_status != 0)
self._subs_impacted = np.full(
shape=self.sub_info.shape, fill_value=False, dtype=dt_bool
)
# compute the changes of the topo vector
effective_change = self._change_bus_vect | (self._set_topo_vect != 0)
# remove the change due to powerline only
effective_change[
self.line_or_pos_topo_vect[self._lines_impacted & isnotconnected]
] = False
effective_change[
self.line_ex_pos_topo_vect[self._lines_impacted & isnotconnected]
] = False
# i can change also the status of a powerline by acting on its extremity
# first sub case i connected the powerline by setting origin OR extremity to positive stuff
if powerline_status is not None:
# if we don't know the state of the grid, we don't consider
# these "improvments": we consider a powerline is never
# affected if its bus is modified at any of its ends.
connect_set_or = (self._set_topo_vect[self.line_or_pos_topo_vect] > 0) & (
isnotconnected
)
self._lines_impacted |= connect_set_or
effective_change[self.line_or_pos_topo_vect[connect_set_or]] = False
effective_change[self.line_ex_pos_topo_vect[connect_set_or]] = False
connect_set_ex = (self._set_topo_vect[self.line_ex_pos_topo_vect] > 0) & (
isnotconnected
)
self._lines_impacted |= connect_set_ex
effective_change[self.line_or_pos_topo_vect[connect_set_ex]] = False
effective_change[self.line_ex_pos_topo_vect[connect_set_ex]] = False
# second sub case i disconnected the powerline by setting origin or extremity to negative stuff
disco_set_or = (self._set_topo_vect[self.line_or_pos_topo_vect] < 0) & (
powerline_status
)
self._lines_impacted |= disco_set_or
effective_change[self.line_or_pos_topo_vect[disco_set_or]] = False
effective_change[self.line_ex_pos_topo_vect[disco_set_or]] = False
disco_set_ex = (self._set_topo_vect[self.line_ex_pos_topo_vect] < 0) & (
powerline_status
)
self._lines_impacted |= disco_set_ex
effective_change[self.line_or_pos_topo_vect[disco_set_ex]] = False
effective_change[self.line_ex_pos_topo_vect[disco_set_ex]] = False
self._subs_impacted[self._topo_vect_to_sub[effective_change]] = True
return self._lines_impacted, self._subs_impacted
def remove_line_status_from_topo(self,
obs: "grid2op.Observation.BaseObservation" = None,
check_cooldown: bool = True):
"""
.. versionadded:: 1.8.0
This function prevent an action to act on a powerline status if
through the "set_bus" and "change_bus" part if a cooldown applies (
see :ref:`action_powerline_status` for cases where this can apply)
For example:
.. code-block:: python
import grid2op
import numpy as np
env_name = "l2rpn_icaps_2021_small"
env = grid2op.make(env_name)
env.set_id(0)
env.seed(0)
obs = env.reset()
act = env.action_space({"set_bus": {"substations_id": [(27, [1, -1, 2, 2, 1])]}})
obs, reward, done, info = env.step(act)
act_sub28 = env.action_space({"set_bus": {"substations_id": [(28, [1, 2, 2, 1, 1])]}})
obs, reward, done, info = env.step(act_sub28)
# >>> info["exception"] : IllegalAction('Powerline with ids [42] have been modified illegally (cooldown)')
This is because in the second action, the powerline 42 is assigned to bus 2, so it would be reconnected,
which is not possible due to the cooldown.
The behaviour is (for all powerlines where a cooldown applies *ie* `obs.time_before_cooldown_sub > 0`):
- if this line is disconnected and is assigned to a bus 1 or 2 at a substation for
one of its end, then this part of the action is ignored (it has not effect: bus will NOT
be set)
- if this line is connected and it is assigned to bus "-1" at one of its side
(extremity or origin side) then this part of the action is ignored (bus will NOT be "set")
- if this line is disconnected and the bus to one of its side is "changed", then this
part is ignored: bus will NOT be changed
And regardless of cooldowns it also:
- if a powerline is affected to a certain bus at one of its end with `set_bus` (for example
`set_bus` to 1 or 2) and at the same time disconnected (`set_line_status` is -1) then
the `set_bus` part is ignore to avoid `AmbiguousAction`
- if a powerline is disconnect from its bus at one of its end with `set_bus` (for example
`set_bus` to -1) and at the same time reconnected (`set_line_status` is 1) then
the `set_bus` part is ignore to avoid `AmbiguousAction`
- if a powerline is affected to a certain bus at one of its end with `change_bus` (`change_bus` is
``True``) and at the same time disconnected (`set_line_status` is -1) then
the `change_bus` part is ignore to avoid `AmbiguousAction`
.. warning::
This modifies the action in-place, especially the "set_bus" and "change_bus" attributes.
.. note::
This function does not check the cooldowns if you specify `check_cooldown=False`
.. note::
As from version 1.9.0 you are no longer forced to provide an observation if `check_cooldown=False`
Examples
---------
To avoid the issue explained above, you can now do:
.. code-block:: python
import grid2op
import numpy as np
env_name = "l2rpn_icaps_2021_small"
env = grid2op.make(env_name)
env.set_id(0)
env.seed(0)
obs = env.reset()
act = env.action_space({"set_bus": {"substations_id": [(27, [1, -1, 2, 2, 1])]}})
obs, reward, done, info = env.step(act)
act_sub28_clean = env.action_space({"set_bus": {"substations_id": [(28, [1, 2, 2, 1, 1])]}})
act_sub28_clean.remove_line_status_from_topo(obs)
print(act_sub28_clean)
# This action will:
# - NOT change anything to the injections
# - NOT perform any redispatching action
# - NOT modify any storage capacity
# - NOT perform any curtailment
# - NOT force any line status
# - NOT switch any line status
# - NOT switch anything in the topology
# - Set the bus of the following element(s):
# - Assign bus 1 to line (extremity) id 41 [on substation 28]
# - Assign bus 2 to line (origin) id 44 [on substation 28]
# - Assign bus 1 to line (extremity) id 57 [on substation 28]
# - Assign bus 1 to generator id 16 [on substation 28]
# - NOT raise any alarm
# - NOT raise any alert
obs, reward, done, info = env.step(act_sub28_clean)
# >>> info["exception"] : []
.. note::
The part of the action `act_sub28_clean` that would
"*- Assign bus 2 to line (extremity) id 42 [on substation 28]*" has been removed because powerline
42 is disconnected in the observation and under a cooldown.
Parameters
----------
obs: :class:`grid2op.Observation.BaseObservation`
The current observation
check_cooldown: `bool`, optional
If `True` (default) will modify the action only for the powerline impacted by a cooldown.
Otherwise will modify all the powerlines.
"""
if not check_cooldown:
line_under_cooldown = np.full(self.n_line, fill_value=True, dtype=dt_bool)
if obs is None:
connected = np.full(self.n_line, fill_value=True, dtype=dt_bool)
disconnected = np.full(self.n_line, fill_value=True, dtype=dt_bool)
else:
connected = obs.line_status
disconnected = ~obs.line_status
else:
line_under_cooldown = obs.time_before_cooldown_line > 0
connected = obs.line_status
disconnected = ~obs.line_status
cls = type(self)
# remove the "set" part that would cause a reconnection
mask_reco = np.full(cls.dim_topo, fill_value=False)
reco_or_ = np.full(cls.n_line, fill_value=False)
reco_or_[(self._set_topo_vect[cls.line_or_pos_topo_vect] > 0) &
disconnected & line_under_cooldown] = True
mask_reco[cls.line_or_pos_topo_vect] = reco_or_
reco_ex_ = np.full(cls.n_line, fill_value=False)
reco_ex_[(self._set_topo_vect[cls.line_ex_pos_topo_vect] > 0) &
disconnected & line_under_cooldown] = True
mask_reco[cls.line_ex_pos_topo_vect] = reco_ex_
self._set_topo_vect[mask_reco] = 0
# remove the "set" that would cause a disconnection
mask_disco = np.full(cls.dim_topo, fill_value=False)
reco_or_ = np.full(cls.n_line, fill_value=False)
reco_or_[(self._set_topo_vect[cls.line_or_pos_topo_vect] < 0) &
connected & line_under_cooldown] = True
mask_disco[cls.line_or_pos_topo_vect] = reco_or_
reco_ex_ = np.full(cls.n_line, fill_value=False)
reco_ex_[(self._set_topo_vect[cls.line_ex_pos_topo_vect] < 0) &
connected & line_under_cooldown] = True
mask_disco[cls.line_ex_pos_topo_vect] = reco_ex_
self._set_topo_vect[mask_disco] = 0
# remove the "change" part when powerlines is disconnected
mask_disco = np.full(cls.dim_topo, fill_value=False)
reco_or_ = np.full(cls.n_line, fill_value=False)
reco_or_[self._change_bus_vect[cls.line_or_pos_topo_vect] &
disconnected & line_under_cooldown] = True
mask_disco[cls.line_or_pos_topo_vect] = reco_or_
reco_ex_ = np.full(cls.n_line, fill_value=False)
reco_ex_[self._change_bus_vect[cls.line_ex_pos_topo_vect] &
disconnected & line_under_cooldown] = True
mask_disco[cls.line_ex_pos_topo_vect] = reco_ex_
self._change_bus_vect[mask_disco] = False
return self
def reset(self):
"""
INTERNAL USE ONLY
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Reset the action to the "do nothing" state.
"""
# False(line is disconnected) / True(line is connected)
self._set_line_status[:] = 0
self._switch_line_status[:] = False
# injection change
self._dict_inj = {}
# topology changed
self._set_topo_vect[:] = 0
self._change_bus_vect[:] = False
# add the hazards and maintenance usefull for saving.
self._hazards[:] = False
self._maintenance[:] = False
# redispatching vector
self._redispatch[:] = 0.0
# storage
self._storage_power[:] = 0.0
# storage
self._curtail[:] = -1.0
self._vectorized = None
self._lines_impacted = None
self._subs_impacted = None
# shunts
if self.shunts_data_available:
self.shunt_p[:] = np.NaN
self.shunt_q[:] = np.NaN
self.shunt_bus[:] = 0
# alarm
self._raise_alarm[:] = False
# alert
self._raise_alert[:] = False
self._reset_modified_flags()
def _assign_iadd_or_warn(self, attr_name, new_value):
if attr_name not in self.attr_list_set:
old_value = getattr(self, attr_name)
new_is_finite = np.isfinite(new_value)
old_is_finite = np.isfinite(old_value)
new_finite = new_value[new_is_finite | old_is_finite]
old_finite = old_value[new_is_finite | old_is_finite]
if np.any(new_finite != old_finite):
warnings.warn(
type(self).ERR_ACTION_CUT.format(attr_name)
)
else:
getattr(self, attr_name)[:] = new_value
def __iadd__(self, other):
"""
Add an action to this one.
Adding an action to myself is equivalent to perform myself, and then perform other (but at the
same step)
Parameters
----------
other: :class:`BaseAction`
Examples
--------
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
act1 = env.action_space()
act1.set_bus = ... # for example
print("before += :")
print(act1)
act2 = env.action_space()
act2.redispatch = ... # for example
print(act2)
act1 += act 2
print("after += ")
print(act1)
"""
# deal with injections
for el in self.attr_list_vect:
if el in other._dict_inj:
if el not in self._dict_inj:
self._dict_inj[el] = other._dict_inj[el]
else:
val = other._dict_inj[el]
ok_ind = np.isfinite(val)
self._dict_inj[el][ok_ind] = val[ok_ind]
# warning if the action cannot be added
for el in other._dict_inj:
if not el in self.attr_list_set:
warnings.warn(
type(self).ERR_ACTION_CUT.format(el)
)
# redispatching
redispatching = other._redispatch
if np.any(redispatching != 0.0):
if "_redispatch" not in self.attr_list_set:
warnings.warn(
type(self).ERR_ACTION_CUT.format("_redispatch")
)
else:
ok_ind = np.isfinite(redispatching)
self._redispatch[ok_ind] += redispatching[ok_ind]
# storage
set_storage = other._storage_power
ok_ind = np.isfinite(set_storage) & np.any(set_storage != 0.0)
if np.any(ok_ind):
if "_storage_power" not in self.attr_list_set:
warnings.warn(
type(self).ERR_ACTION_CUT.format("_storage_power")
)
else:
self._storage_power[ok_ind] += set_storage[ok_ind]
# curtailment
curtailment = other._curtail
ok_ind = np.isfinite(curtailment) & (curtailment != -1.0)
if np.any(ok_ind):
if "_curtail" not in self.attr_list_set:
warnings.warn(
type(self).ERR_ACTION_CUT.format("_curtail")
)
else:
# new curtailment of the results should be
# the curtailment of rhs, only when rhs acts
# on curtailment
self._curtail[ok_ind] = curtailment[ok_ind]
# set and change status
other_set = other._set_line_status
other_change = other._switch_line_status
me_set = 1 * self._set_line_status
me_change = copy.deepcopy(self._switch_line_status)
# i change, but so does the other, i do nothing
canceled_change = other_change & me_change
# i dont change, the other change, i change
update_change = other_change & ~me_change
# Defered apply to prevent conflicts
me_change[canceled_change] = False
me_change[update_change] = True
# i change, but the other set, it's erased
me_change[other_set != 0 & me_change] = False
# i set, but the other change, set to the opposite
inverted_set = other_change & (me_set != 0)
# so change +1 becomes -1 and -1 becomes +1
me_set[inverted_set] *= -1
# Has been inverted, cancel change
me_change[inverted_set] = False
# i set, the other set
me_set[other_set != 0] = other_set[other_set != 0]
self._assign_iadd_or_warn("_set_line_status", me_set)
self._assign_iadd_or_warn("_switch_line_status", me_change)
# set and change bus
other_set = other._set_topo_vect
other_change = other._change_bus_vect
me_set = 1 * self._set_topo_vect
me_change = copy.deepcopy(self._change_bus_vect)
# i change, but so does the other, i do nothing
canceled_change = other_change & me_change
# i dont change, the other change, i change
update_change = other_change & ~me_change
# Defered apply to prevent conflicts
me_change[canceled_change] = False
me_change[update_change] = True
# i change, but the other set, it's erased
me_change[other_set != 0 & me_change] = False
# i set, but the other change, set to the opposite
inverted_set = other_change & (me_set > 0)
# so change +1 becomes +2 and +2 becomes +1
me_set[inverted_set] -= 1 # 1 becomes 0 and 2 becomes 1
me_set[inverted_set] *= -1 # 1 is 0 and 2 becomes -1
me_set[inverted_set] += 2 # 1 is 2 and 2 becomes 1
# Has been inverted, cancel change
me_change[inverted_set] = False
# i set, the other set
me_set[other_set != 0] = other_set[other_set != 0]
self._assign_iadd_or_warn("_set_topo_vect", me_set)
self._assign_iadd_or_warn("_change_bus_vect", me_change)
# shunts
if self.shunts_data_available:
val = other.shunt_p
ok_ind = np.isfinite(val)
shunt_p = 1.0 * self.shunt_p
shunt_p[ok_ind] = val[ok_ind]
self._assign_iadd_or_warn("shunt_p", shunt_p)
val = other.shunt_q
ok_ind = np.isfinite(val)
shunt_q = 1.0 * self.shunt_q
shunt_q[ok_ind] = val[ok_ind]
self._assign_iadd_or_warn("shunt_q", shunt_q)
val = other.shunt_bus
ok_ind = val != 0
shunt_bus = 1 * self.shunt_bus
shunt_bus[ok_ind] = val[ok_ind]
self._assign_iadd_or_warn("shunt_bus", shunt_bus)
# alarm feature
self._raise_alarm[other._raise_alarm] = True
# line alert feature
self._raise_alert[other._raise_alert] = True
# the modif flags
self._modif_change_bus = self._modif_change_bus or other._modif_change_bus
self._modif_set_bus = self._modif_set_bus or other._modif_set_bus
self._modif_change_status = (
self._modif_change_status or other._modif_change_status
)
self._modif_set_status = self._modif_set_status or other._modif_set_status
self._modif_inj = self._modif_inj or other._modif_inj
self._modif_redispatch = self._modif_redispatch or other._modif_redispatch
self._modif_storage = self._modif_storage or other._modif_storage
self._modif_curtailment = self._modif_curtailment or other._modif_curtailment
self._modif_alarm = self._modif_alarm or other._modif_alarm
self._modif_alert = self._modif_alert or other._modif_alert
return self
def __add__(self, other) -> "BaseAction":
"""
Implements the `+` operator for the action using the `+=` definition.
This function is not commutative !
Notes
-------
Be careful if two actions do not share the same type (for example you want to add act1
of type :class:`TopologyAction` to act2 of type :class:`DispatchAction`) the results of
`act1 + act2` might differ from what you expect.
The result will always of the same type as act1. In the above case, it means that the `dispatch`
part of `act2`will be ignored (because it is ignored in :class:`TopologyAction`).
This is why we recommend to using this class directly with the :class:`PlayableAction` or
from action directly generated with `env.action_space()`
"""
res = type(self)()
res += self
res += other
return res
def __call__(self) -> Tuple[dict, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, dict]:
"""
INTERNAL USE ONLY
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This method is used to return the effect of the current action in a format understandable by the backend.
This format is detailed below.
This function must also integrate the redispatching strategy for the BaseAction.
It also performs a check of whether or not an action is "Ambiguous", eg an action that reconnect a powerline
but doesn't specify on which bus to reconnect it is said to be ambiguous.
If this :func:`BaseAction.__call__` is overloaded, the call of :func:`BaseAction._check_for_ambiguity` must be
ensured by this the derived class.
Returns
-------
dict_injection: :class:`dict`
This dictionnary is :attr:`BaseAction._dict_inj`
set_line_status: :class:`numpy.ndarray`, dtype:int
This array is :attr:`BaseAction._set_line_status`
switch_line_status: :class:`numpy.ndarray`, dtype:bool
This array is :attr:`BaseAction._switch_line_status`
set_topo_vect: :class:`numpy.ndarray`, dtype:int
This array is :attr:`BaseAction._set_topo_vect`
change_bus_vect: :class:`numpy.ndarray`, dtype:bool
This array is :attr:`BaseAction._change_bus_vect`
redispatch: :class:`numpy.ndarray`, dtype:float
This array, that has the same size as the number of generators indicates for each generator the amount of
redispatching performed by the action.
storage_power: :class:`numpy.ndarray`, dtype:float
Indicates, for all storage units, what is the production / absorbtion setpoint
curtailment: :class:`numpy.ndarray`, dtype:float
Indicates, for all generators, which curtailment is applied (if any)
shunts: ``dict``
A dictionary containing the shunts data, with keys: "shunt_p", "shunt_q" and "shunt_bus" and the
convention, for "shun_p" and "shunt_q" that Nan means "don't change" and for shunt_bus: -1 => disconnect
0 don't change, and 1 / 2 connect to bus 1 / 2
Raises
-------
:class:`grid2op.Exceptions.AmbiguousAction`
Or one of its derivate class.
"""
self._check_for_ambiguity()
dict_inj = self._dict_inj
set_line_status = self._set_line_status
switch_line_status = self._switch_line_status
set_topo_vect = self._set_topo_vect
change_bus_vect = self._change_bus_vect
redispatch = self._redispatch
storage_power = self._storage_power
# remark: curtailment is handled by an algorithm in the environment, so don't need to be returned here
shunts = {}
if self.shunts_data_available:
shunts["shunt_p"] = self.shunt_p
shunts["shunt_q"] = self.shunt_q
shunts["shunt_bus"] = self.shunt_bus
# other remark: alarm and alert are not handled in the backend, this is why it does not appear here !
return (
dict_inj,
set_line_status,
switch_line_status,
set_topo_vect,
change_bus_vect,
redispatch,
storage_power,
shunts,
)
def _digest_shunt(self, dict_):
if not self.shunts_data_available:
return
if "shunt" in dict_:
ddict_ = dict_["shunt"]
key_shunt_reco = {"set_bus", "shunt_p", "shunt_q", "shunt_bus"}
for k in ddict_:
if k not in key_shunt_reco:
warn = "The key {} is not recognized by BaseAction when trying to modify the shunt.".format(
k
)
warn += " Recognized keys are {}".format(sorted(key_shunt_reco))
warnings.warn(warn)
for key_n, vect_self in zip(
["shunt_bus", "shunt_p", "shunt_q", "set_bus"],
[self.shunt_bus, self.shunt_p, self.shunt_q, self.shunt_bus],
):
if key_n in ddict_:
tmp = ddict_[key_n]
if isinstance(tmp, np.ndarray):
# complete shunt vector is provided
vect_self[:] = tmp
elif isinstance(tmp, list):
# expected a list: (id shunt, new bus)
for (sh_id, new_bus) in tmp:
if sh_id < 0:
raise AmbiguousAction(
"Invalid shunt id {}. Shunt id should be positive".format(
sh_id
)
)
if sh_id >= self.n_shunt:
raise AmbiguousAction(
"Invalid shunt id {}. Shunt id should be less than the number "
"of shunt {}".format(sh_id, self.n_shunt)
)
vect_self[sh_id] = new_bus
elif tmp is None:
pass
else:
raise AmbiguousAction(
"Invalid way to modify {} for shunts. It should be a numpy array or a "
"dictionary.".format(key_n)
)
def _digest_injection(self, dict_):
# I update the action
if "injection" in dict_:
if dict_["injection"] is not None:
tmp_d = dict_["injection"]
self._modif_inj = True
for k in tmp_d:
if k in self.attr_list_set:
self._dict_inj[k] = np.array(tmp_d[k]).astype(dt_float)
# TODO check the size based on the input data !
else:
warn = (
"The key {} is not recognized by BaseAction when trying to modify the injections."
"".format(k)
)
warnings.warn(warn)
def _digest_setbus(self, dict_):
if "set_bus" in dict_:
self._modif_set_bus = True
if dict_["set_bus"] is None:
# no real action has been made
return
if isinstance(dict_["set_bus"], dict):
ddict_ = dict_["set_bus"]
handled = False
if "loads_id" in ddict_:
self.load_set_bus = ddict_["loads_id"]
handled = True
if "generators_id" in ddict_:
self.gen_set_bus = ddict_["generators_id"]
handled = True
if "lines_or_id" in ddict_:
self.line_or_set_bus = ddict_["lines_or_id"]
handled = True
if "lines_ex_id" in ddict_:
self.line_ex_set_bus = ddict_["lines_ex_id"]
handled = True
if "storages_id" in ddict_:
self.storage_set_bus = ddict_["storages_id"]
handled = True
if "substations_id" in ddict_:
self.sub_set_bus = ddict_["substations_id"]
handled = True
if not handled:
msg = 'Invalid way to set the topology. When dict_["set_bus"] is a dictionary it should have'
msg += (
' at least one of "loads_id", "generators_id", "lines_or_id", '
)
msg += '"lines_ex_id" or "substations_id"'
msg += " as keys. None where found. Current used keys are: "
msg += "{}".format(sorted(ddict_.keys()))
raise AmbiguousAction(msg)
else:
self.set_bus = dict_["set_bus"]
def _digest_change_bus(self, dict_):
if "change_bus" in dict_:
self._modif_change_bus = True
if dict_["change_bus"] is None:
# no real action has been made
return
if isinstance(dict_["change_bus"], dict):
ddict_ = dict_["change_bus"]
handled = False
if "loads_id" in ddict_:
self.load_change_bus = ddict_["loads_id"]
handled = True
if "generators_id" in ddict_:
self.gen_change_bus = ddict_["generators_id"]
handled = True
if "lines_or_id" in ddict_:
self.line_or_change_bus = ddict_["lines_or_id"]
handled = True
if "lines_ex_id" in ddict_:
self.line_ex_change_bus = ddict_["lines_ex_id"]
handled = True
if "storages_id" in ddict_:
self.storage_change_bus = ddict_["storages_id"]
handled = True
if "substations_id" in ddict_:
self.sub_change_bus = ddict_["substations_id"]
handled = True
if not handled:
msg = 'Invalid way to change the topology. When dict_["set_bus"] is a dictionary it should have'
msg += (
' at least one of "loads_id", "generators_id", "lines_or_id", '
)
msg += '"lines_ex_id" or "substations_id"'
msg += " as keys. None where found. Current used keys are: "
msg += "{}".format(sorted(ddict_.keys()))
raise AmbiguousAction(msg)
else:
self.change_bus = dict_["change_bus"]
def _digest_set_status(self, dict_):
if "set_line_status" in dict_:
# this action can both disconnect or reconnect a powerlines
self.line_set_status = dict_["set_line_status"]
def _digest_hazards(self, dict_):
if "hazards" in dict_:
# set the values of the power lines to "disconnected" for element being "False"
# does nothing to the others
# an hazard will never reconnect a powerline
if dict_["hazards"] is not None:
self._modif_set_status = True
tmp = dict_["hazards"]
try:
tmp = np.array(tmp)
except Exception as exc_:
raise AmbiguousAction(
f'You ask to perform hazard on powerlines, this can only be done if "hazards" can be casted '
f"into a numpy ndarray with error {exc_}"
)
if np.issubdtype(tmp.dtype, np.dtype(bool).type):
if len(tmp) != self.n_line:
raise InvalidNumberOfLines(
'This "hazards" action acts on {} lines while there are {} in the _grid'.format(
len(tmp), self.n_line
)
)
elif not np.issubdtype(tmp.dtype, np.dtype(int).type):
raise AmbiguousAction(
"You can only ask hazards with int or boolean numpy array vector."
)
self._set_line_status[tmp] = -1
self._hazards[tmp] = True
# force ignore of any topological actions
self._ignore_topo_action_if_disconnection(tmp)
def _digest_maintenance(self, dict_):
if "maintenance" in dict_:
# set the values of the power lines to "disconnected" for element being "False"
# does nothing to the others
# a _maintenance operation will never reconnect a powerline
if dict_["maintenance"] is not None:
self._modif_set_status = True
tmp = dict_["maintenance"]
try:
tmp = np.array(tmp)
except Exception as exc_:
raise AmbiguousAction(
f'You ask to perform maintenance on powerlines, this can only be done if "maintenance" can '
f"be casted into a numpy ndarray with error {exc_}"
)
if np.issubdtype(tmp.dtype, np.dtype(bool).type):
if len(tmp) != self.n_line:
raise InvalidNumberOfLines(
'This "maintenance" action acts on {} lines while there are {} in the _grid'.format(
len(tmp), self.n_line
)
)
elif not np.issubdtype(tmp.dtype, np.dtype(int).type):
raise AmbiguousAction(
"You can only ask to perform lines maintenance with int or boolean numpy array vector."
)
self._set_line_status[tmp] = -1
self._maintenance[tmp] = True
self._ignore_topo_action_if_disconnection(tmp)
def _digest_change_status(self, dict_):
if "change_line_status" in dict_:
# the action will switch the status of the powerline
# for each element equal to 1 in this dict_["change_line_status"]
# if the status is "disconnected" it will be transformed into "connected"
# and if the status is "connected" it will be switched to "disconnected"
# Lines with "0" in this vector are not impacted.
if dict_["change_line_status"] is not None:
self.line_change_status = dict_["change_line_status"]
def _digest_redispatching(self, dict_):
if "redispatch" in dict_:
self.redispatch = dict_["redispatch"]
def _digest_storage(self, dict_):
if "set_storage" in dict_:
self.storage_p = dict_["set_storage"]
def _digest_curtailment(self, dict_):
if "curtail" in dict_:
self.curtail = dict_["curtail"]
def _digest_alarm(self, dict_):
"""
.. warning::
/!\\\\ Only valid with "l2rpn_icaps_2021" environment /!\\\\"""
if "raise_alarm" in dict_:
self.raise_alarm = dict_["raise_alarm"]
def _digest_alert(self, dict_):
if "raise_alert" in dict_:
self.raise_alert = dict_["raise_alert"]
def _reset_vect(self):
"""
INTERNAL USE ONLY
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Need to be called when update is called !
"""
self._vectorized = None
self._subs_impacted = None
self._lines_impacted = None
def update(self, dict_):
"""
Update the action with a comprehensible format specified by a dictionary.
Preferably, if a key of the argument *dict_* is not found in :attr:`Action.authorized_keys` it should throw a
warning. This argument will be completely ignored.
This method also reset the attributes :attr:`Action._vectorized` :attr:`Action._lines_impacted` and
:attr:`Action._subs_impacted` to ``None`` regardless of the argument in input.
If an action consists of "reconnecting" a powerline, and this same powerline is affected by maintenance or a
hazard, it will be erased without any warning. "hazards" and "maintenance" have the priority. This is a
requirement for all proper :class:`Action` subclass.
Parameters
----------
dict_: :class:`dict`
If it's ``None`` or empty it does nothing. Otherwise, it can contain the following (optional) keys:
- "*injection*" if the action will modify the injections (generator setpoint/load value - active or
reactive) of the powergrid. It has optionally one of the following keys:
- "load_p": to set the active load values (this is a numpy array with the same size as the number of
load in the power _grid with Nan: don't change anything, else set the value
- "load_q": same as above but for the load reactive values
- "prod_p": same as above but for the generator active setpoint values. It has the size
corresponding to the number of generators in the test case.
- "prod_v": same as above but set the voltage setpoint of generator units.
- "*hazards*": represents the hazards that the line might suffer (boolean vector) False: no hazard, nothing
is done, True: a hazard, the powerline is disconnected
- "*maintenance*": represents the maintenance operation performed on each powerline (boolean vector) False:
no maintenance, nothing is done, True: a maintenance is scheduled, the powerline is disconnected
- "*set_line_status*": a vector (int or float) to set the status of the powerline status (connected /
disconnected) with the following interpretation:
- 0: nothing is changed,
- -1: disconnect the powerline,
- +1: reconnect the powerline. If an action consists in "reconnecting" a powerline, and this same
powerline is affected by a maintenance or a hazard, it will be erased without any warning. "hazards"
and "maintenance" have the priority.
- "change_line_status": a vector (bool) to change the status of the powerline. This vector should be
interpreted as:
- ``False``: do nothing
- ``True``: change the status of the powerline: disconnect it if it was connected, connect it if it was
disconnected
- "set_bus": (numpy int vector or dictionary) will set the buses to which the objects are connected. It
follows a similar interpretation than the line status vector:
- 0 -> don't change anything
- +1 -> set to bus 1,
- +2 -> set to bus 2, etc.
- -1: You can use this method to disconnect an object by setting the value to -1.
- "change_bus": (numpy bool vector or dictionary) will change the bus to which the object is connected.
True will
change it (eg switch it from bus 1 to bus 2 or from bus 2 to bus 1). NB this is only active if the system
has only 2 buses per substation.
- "redispatch": the best use of this is to specify either the numpy array of the redispatch vector you want
to apply (that should have the size of the number of generators on the grid) or to specify a list of
tuple, each tuple being 2 elements: first the generator ID, second the amount of redispatching,
for example `[(1, -23), (12, +17)]`
- "set_storage": the best use of this is to specify either the numpy array of the storage units vector
you want
to apply (that should have the size of the number of storage units on the grid) or to specify a list of
tuple, each tuple being 2 elements: first the storage ID, second the amount of power you want to
produce / absorb, for example `[(1, -23), (12, +17)]`
**NB** the difference between "set_bus" and "change_bus" is the following:
- If "set_bus" is 1, then the object (load, generator or powerline) will be moved to bus 1 of the
substation to which it is connected. If it is already to bus 1 nothing will be done. If it's on another
bus it will connect it to bus 1. It's disconnected, it will reconnect it and connect it to bus 1.
- If "change_bus" is True, then objects will be moved from one bus to another. If the object were on bus 1
then it will be moved on bus 2, and if it were on bus 2, it will be moved on bus 1. If the object is
disconnected then the action is ambiguous, and calling it will throw an AmbiguousAction exception.
**NB**: CHANGES: you can reconnect a powerline without specifying on each bus you reconnect it at both its
ends. In that case the last known bus id for each its end is used.
**NB**: if for a given powerline, both switch_line_status and set_line_status is set, the action will not
be usable.
This will lead to an :class:`grid2op.Exception.AmbiguousAction` exception.
**NB**: The length of vectors provided here is NOT check in this function. This method can be "chained" and
only on the final action, when used, eg. in the Backend, is checked.
**NB**: If a powerline is disconnected, on maintenance, or suffer an outage, the associated "set_bus" will
be ignored.
Disconnection has the priority on anything. This priority is given because, in case of hazard, the hazard
has the priority over the possible actions.
Examples
--------
Here are short examples on how to update an action *eg.* how to create a valid :class:`Action` object that
be used to modify a :class:`grid2op.Backend.Backend`.
In all the following examples, we suppose that a valid grid2op environment is created, for example with:
.. code-block:: python
import grid2op
# create a simple environment
# and make sure every type of action can be used.
env = grid2op.make(action_class=grid2op.Action.Action)
*Example 1*: modify the load active values to set them all to 1. You can replace "load_p" by "load_q",
"prod_p" or "prod_v" to change the load reactive value, the generator active setpoint or the generator
voltage magnitude setpoint.
.. code-block:: python
new_load = np.ones(env.action_space.n_load)
modify_load_active_value = env.action_space({"injection": {"load_p": new_load}})
print(modify_load_active_value)
*Example 2*: disconnect the powerline of id 1:
.. code-block:: python
disconnect_powerline = env.action_space({"set_line_status": [(1, -1)]})
print(disconnect_powerline)
# there is a shortcut to do that:
disconnect_powerline2 = env.disconnect_powerline(line_id=1)
*Example 3*: force the reconnection of the powerline of id 5 by connected it to bus 1 on its origin end and
bus 2 on its extremity end.
.. code-block:: python
reconnect_powerline = env.action_space({"set_line_status": [(5, 1)],
"set_bus": {"lines_or_id": [(5, 1)]},
"set_bus": {"lines_ex_id": [(5, 2)]}
})
print(reconnect_powerline)
# and the shorter method:
reconnect_powerline = env.action.space.reconnect_powerline(line_id=5, bus_or=1, bus_ex=2)
*Example 4*: change the bus to which load 4 is connected:
.. code-block:: python
change_load_bus = env.action_space({"set_bus": {"loads_id": [(4, 1)]} })
print(change_load_bus)
*Example 5*: reconfigure completely substation 5, and connect the first 3 elements to bus 1 and the last 3
elements to bus 2
.. code-block:: python
sub_id = 5
target_topology = np.ones(env.sub_info[sub_id], dtype=dt_int)
target_topology[3:] = 2
reconfig_sub = env.action_space({"set_bus": {"substations_id": [(sub_id, target_topology)] } })
print(reconfig_sub)
*Example 6*: apply redispatching of +17.42 MW at generator with id 23 and -27.8 at generator with id 1
.. code-block:: python
redisp_act = env.action_space({"redispatch": [(23, +17.42), (23, -27.8)]})
print(redisp_act)
*Example 7*: apply an action on a storage unit: have the storage unit of id 0 produce 1.5MW
.. code-block:: python
storage_act = env.action_space({"set_storage": [(0, -1.5)]})
print(storage_act)
*Example 8*: apply a action of type curtailment: limit the production to a renewable energy unit
(in the example the generator with id 2)
at 80% of its maximum capacity
.. code-block:: python
renewable_energy_source = 2
storage_act = env.action_space({"curtail": [(renewable_energy_source, 0.8)]})
print(storage_act)
Returns
-------
self: :class:`BaseAction`
Return the modified instance. This is handy to chain modifications if needed.
"""
self._reset_vect()
if dict_ is not None:
for kk in dict_.keys():
if kk not in self.authorized_keys:
warn = 'The key "{}" used to update an action will be ignored. Valid keys are {}'
warn = warn.format(kk, self.authorized_keys)
warnings.warn(warn)
self._digest_shunt(dict_)
self._digest_injection(dict_)
self._digest_redispatching(dict_)
self._digest_storage(dict_) # ADDED for battery
self._digest_curtailment(dict_) # ADDED for curtailment
self._digest_setbus(dict_)
self._digest_change_bus(dict_)
self._digest_set_status(dict_)
self._digest_hazards(dict_)
self._digest_maintenance(dict_)
self._digest_change_status(dict_)
self._digest_alarm(dict_)
self._digest_alert(dict_)
return self
def is_ambiguous(self) -> Tuple[bool, AmbiguousAction]:
"""
Says if the action, as defined is ambiguous *per se* or not.
See definition of :func:`BaseAction.check_space_legit` for more details about *ambiguity per se*.
Returns
-------
res: ``True`` if the action is ambiguous, ``False`` otherwise.
info: ``dict`` or not
More information about the error. If the action is not ambiguous, it values to ``None``
"""
try:
self._check_for_ambiguity()
res = False
info = None
except AmbiguousAction as exc_:
info = exc_
res = True
return res, info
def _check_for_correct_modif_flags(self):
if self._dict_inj:
if not self._modif_inj:
raise AmbiguousAction(
"A action on the injection is performed while the appropriate flag is not "
"set. Please use the official grid2op action API to modify the injections."
)
if "injection" not in self.authorized_keys:
raise IllegalAction("You illegally act on the injection")
if np.any(self._change_bus_vect):
if not self._modif_change_bus:
raise AmbiguousAction(
"A action of type change_bus is performed while the appropriate flag is not "
"set. Please use the official grid2op action API to modify the bus using "
"'change'."
)
if "change_bus" not in self.authorized_keys:
raise IllegalAction("You illegally act on the bus (using change)")
if np.any(self._set_topo_vect != 0):
if not self._modif_set_bus:
raise AmbiguousAction(
"A action of type set_bus is performed while the appropriate flag is not "
"set. Please use the official grid2op action API to modify the bus using "
"'set'."
)
if "set_bus" not in self.authorized_keys:
raise IllegalAction("You illegally act on the bus (using set)")
if np.any(self._set_line_status != 0):
if not self._modif_set_status:
raise AmbiguousAction(
"A action of type line_set_status is performed while the appropriate flag is not "
"set. Please use the official grid2op action API to modify the status of "
"powerline using "
"'set'."
)
if "set_line_status" not in self.authorized_keys:
raise IllegalAction(
"You illegally act on the powerline status (using set)"
)
if np.any(self._switch_line_status):
if not self._modif_change_status:
raise AmbiguousAction(
"A action of type line_change_status is performed while the appropriate flag "
"is not "
"set. Please use the official grid2op action API to modify the status of "
"powerlines using 'change'."
)
if "change_line_status" not in self.authorized_keys:
raise IllegalAction(
"You illegally act on the powerline status (using change)"
)
if np.any(self._redispatch != 0.0):
if not self._modif_redispatch:
raise AmbiguousAction(
"A action of type redispatch is performed while the appropriate flag "
"is not "
"set. Please use the official grid2op action API to perform redispatching "
"action."
)
if "redispatch" not in self.authorized_keys:
raise IllegalAction("You illegally act on the redispatching")
if np.any(self._storage_power != 0.0):
if not self._modif_storage:
raise AmbiguousAction(
"A action on the storage unit is performed while the appropriate flag "
"is not "
"set. Please use the official grid2op action API to perform "
"action on storage unit."
)
if "set_storage" not in self.authorized_keys:
raise IllegalAction("You illegally act on the storage unit")
if np.any(self._curtail != -1.0):
if not self._modif_curtailment:
raise AmbiguousAction(
"A curtailment is performed while the action is not supposed to have done so. "
"Please use the official grid2op action API to perform curtailment action."
)
if "curtail" not in self.authorized_keys:
raise IllegalAction("You illegally act on the curtailment")
if np.any(self._raise_alarm):
if not self._modif_alarm:
raise AmbiguousAction(
"Incorrect way to raise some alarm, the appropriate flag is not "
"modified properly."
)
if "raise_alarm" not in self.authorized_keys:
raise IllegalAction("You illegally send an alarm.")
if np.any(self._raise_alert):
if not self._modif_alert:
raise AmbiguousActionRaiseAlert(
"Incorrect way to raise some alert, the appropriate flag is not "
"modified properly."
)
if "raise_alert" not in self.authorized_keys:
raise IllegalAction("You illegally send an alert.")
def _check_for_ambiguity(self):
"""
This method checks if an action is ambiguous or not. If the instance is ambiguous, an
:class:`grid2op.Exceptions.AmbiguousAction` is raised.
An action can be ambiguous in the following context:
- It incorrectly affects the injections:
- :code:`self._dict_inj["load_p"]` doesn't have the same size as the number of loads on the _grid.
- :code:`self._dict_inj["load_q"]` doesn't have the same size as the number of loads on the _grid.
- :code:`self._dict_inj["prod_p"]` doesn't have the same size as the number of loads on the _grid.
- :code:`self._dict_inj["prod_v"]` doesn't have the same size as the number of loads on the _grid.
- It affects the powerline in an incorrect manner:
- :code:`self._switch_line_status` has not the same size as the number of powerlines
- :code:`self._set_line_status` has not the same size as the number of powerlines
- the status of some powerline is both *changed* (:code:`self._switch_line_status[i] == True` for some *i*)
and *set* (:code:`self._set_line_status[i]` for the same *i* is not 0)
- a powerline is both connected at one end (ex. its origin is set to bus 1) and disconnected at another
(its extremity is set to bus -1)
- It has an ambiguous behavior concerning the topology of some substations
- the state of some bus for some element is both *changed* (:code:`self._change_bus_vect[i] = True` for
some *i*) and *set* (:code:`self._set_topo_vect[i]` for the same *i* is not 0)
- :code:`self._set_topo_vect` has not the same dimension as the number of elements on the powergrid
- :code:`self._change_bus_vect` has not the same dimension as the number of elements on the powergrid
- For redispatching, Ambiguous actions can come from:
- Some redispatching action is active, yet
:attr:`grid2op.Space.GridObjects.redispatching_unit_commitment_availble` is set to ``False``
- the length of the redispatching vector :attr:`BaseAction._redispatching` is not compatible with the number
of generators.
- some redispatching are above the maximum ramp up :attr:`grid2op.Space.GridObjects.gen_max_ramp_up`
- some redispatching are below the maximum ramp down :attr:`grid2op.Space.GridObjects.gen_max_ramp_down`
- the redispatching action affect non dispatchable generators
- the redispatching and the production setpoint, if added, are above pmax for at least a generator
- the redispatching and the production setpoint, if added, are below pmin for at least a generator
In case of need to overload this method, it is advise to still call this one from the base :class:`BaseAction`
with ":code:`super()._check_for_ambiguity()`" or ":code:`BaseAction._check_for_ambiguity(self)`".
Raises
-------
:class:`grid2op.Exceptions.AmbiguousAction`
Or any of its more precise subclasses, depending on which assumption is not met.
"""
# check that the correct flags are properly computed
self._check_for_correct_modif_flags()
if (
self._modif_change_status
and self._modif_set_status
and np.any(self._set_line_status[self._switch_line_status] != 0)
):
raise InvalidLineStatus(
"You asked to change the status (connected / disconnected) of a powerline by"
' using the keyword "change_status" and set this same line state in '
'"set_status" '
'(or "hazard" or "maintenance"). This ambiguous behaviour is not supported'
)
# check size
if self._modif_inj:
if "load_p" in self._dict_inj:
if len(self._dict_inj["load_p"]) != self.n_load:
raise InvalidNumberOfLoads(
"This action acts on {} loads while there are {} "
"in the _grid".format(
len(self._dict_inj["load_p"]), self.n_load
)
)
if "load_q" in self._dict_inj:
if len(self._dict_inj["load_q"]) != self.n_load:
raise InvalidNumberOfLoads(
"This action acts on {} loads while there are {} in "
"the _grid".format(len(self._dict_inj["load_q"]), self.n_load)
)
if "prod_p" in self._dict_inj:
if len(self._dict_inj["prod_p"]) != self.n_gen:
raise InvalidNumberOfGenerators(
"This action acts on {} generators while there are {} in "
"the _grid".format(len(self._dict_inj["prod_p"]), self.n_gen)
)
if "prod_v" in self._dict_inj:
if len(self._dict_inj["prod_v"]) != self.n_gen:
raise InvalidNumberOfGenerators(
"This action acts on {} generators while there are {} in "
"the _grid".format(len(self._dict_inj["prod_v"]), self.n_gen)
)
if len(self._switch_line_status) != self.n_line:
raise InvalidNumberOfLines(
"This action acts on {} lines while there are {} in "
"the _grid".format(len(self._switch_line_status), self.n_line)
)
if len(self._set_topo_vect) != self.dim_topo:
raise InvalidNumberOfObjectEnds(
"This action acts on {} ends of object while there are {} "
"in the _grid".format(len(self._set_topo_vect), self.dim_topo)
)
if len(self._change_bus_vect) != self.dim_topo:
raise InvalidNumberOfObjectEnds(
"This action acts on {} ends of object while there are {} "
"in the _grid".format(len(self._change_bus_vect), self.dim_topo)
)
if len(self._redispatch) != self.n_gen:
raise InvalidNumberOfGenerators(
"This action acts on {} generators (redispatching= while "
"there are {} in the grid".format(len(self._redispatch), self.n_gen)
)
# redispatching specific check
if self._modif_redispatch:
if "redispatch" not in self.authorized_keys:
raise AmbiguousAction(
'Action of type "redispatch" are not supported by this action type'
)
if not self.redispatching_unit_commitment_availble:
raise UnitCommitorRedispachingNotAvailable(
"Impossible to use a redispatching action in this "
"environment. Please set up the proper costs for generator"
)
if np.any(self._redispatch[~self.gen_redispatchable] != 0.0):
raise InvalidRedispatching(
"Trying to apply a redispatching action on a non redispatchable generator"
)
if self._single_act:
if np.any(self._redispatch > self.gen_max_ramp_up):
raise InvalidRedispatching(
"Some redispatching amount are above the maximum ramp up"
)
if np.any(-self._redispatch > self.gen_max_ramp_down):
raise InvalidRedispatching(
"Some redispatching amount are bellow the maximum ramp down"
)
if "prod_p" in self._dict_inj:
new_p = self._dict_inj["prod_p"]
tmp_p = new_p + self._redispatch
indx_ok = np.isfinite(new_p)
if np.any(tmp_p[indx_ok] > self.gen_pmax[indx_ok]):
raise InvalidRedispatching(
"Some redispatching amount, cumulated with the production setpoint, "
"are above pmax for some generator."
)
if np.any(tmp_p[indx_ok] < self.gen_pmin[indx_ok]):
raise InvalidRedispatching(
"Some redispatching amount, cumulated with the production setpoint, "
"are below pmin for some generator."
)
# storage specific checks:
self._is_storage_ambiguous()
# curtailment specific checks:
self._is_curtailment_ambiguous()
# topological action
if (
self._modif_set_bus
and self._modif_change_bus
and np.any(self._set_topo_vect[self._change_bus_vect] != 0)
):
raise InvalidBusStatus(
"You asked to change the bus of an object with"
' using the keyword "change_bus" and set this same object state in "set_bus"'
". This ambiguous behaviour is not supported"
)
if self._modif_set_bus and np.any(self._set_topo_vect < -1):
raise InvalidBusStatus(
"Invalid set_bus. Buses should be either -1 (disconnect), 0 (change nothing),"
"1 (assign this object to bus one) or 2 (assign this object to bus"
"2). A negative number has been found."
)
if self._modif_set_bus and np.any(self._set_topo_vect > 2):
raise InvalidBusStatus(
"Invalid set_bus. Buses should be either -1 (disconnect), 0 (change nothing),"
"1 (assign this object to bus one) or 2 (assign this object to bus"
"2). A number higher than 2 has been found: substations with more than 2 busbars"
"are not supported by grid2op at the moment. Do not hesitate to fill a feature "
"request on github if you need this feature."
)
if False:
# TODO find an elegant way to disable that
# now it's possible.
for q_id, status in enumerate(self._set_line_status):
if status == 1:
# i reconnect a powerline, i need to check that it's connected on both ends
if (
self._set_topo_vect[self.line_or_pos_topo_vect[q_id]] == 0
or self._set_topo_vect[self.line_ex_pos_topo_vect[q_id]] == 0
):
raise InvalidLineStatus(
"You ask to reconnect powerline {} yet didn't tell on"
" which bus.".format(q_id)
)
if self._modif_set_bus:
disco_or = self._set_topo_vect[self.line_or_pos_topo_vect] == -1
if np.any(self._set_topo_vect[self.line_ex_pos_topo_vect][disco_or] > 0):
raise InvalidLineStatus(
"A powerline is connected (set to a bus at extremity end) and "
"disconnected (set to bus -1 at origin end)"
)
disco_ex = self._set_topo_vect[self.line_ex_pos_topo_vect] == -1
if np.any(self._set_topo_vect[self.line_or_pos_topo_vect][disco_ex] > 0):
raise InvalidLineStatus(
"A powerline is connected (set to a bus at origin end) and "
"disconnected (set to bus -1 at extremity end)"
)
# if i disconnected of a line, but i modify also the bus where it's connected
if self._modif_set_bus or self._modif_change_bus:
idx = self._set_line_status == -1
id_disc = np.where(idx)[0]
idx2 = self._set_line_status == 1
id_reco = np.where(idx2)[0]
if self._modif_set_bus:
if "set_bus" not in self.authorized_keys:
raise AmbiguousAction(
'Action of type "set_bus" are not supported by this action type'
)
if np.any(
self._set_topo_vect[self.line_or_pos_topo_vect[id_disc]] > 0
) or np.any(self._set_topo_vect[self.line_ex_pos_topo_vect[id_disc]] > 0):
raise InvalidLineStatus(
"You ask to disconnect a powerline but also to connect it "
"to a certain bus."
)
if np.any(
self._set_topo_vect[self.line_or_pos_topo_vect[id_reco]] == -1
) or np.any(self._set_topo_vect[self.line_ex_pos_topo_vect[id_reco]] == -1):
raise InvalidLineStatus(
"You ask to reconnect a powerline but also to disconnect it "
"from a certain bus."
)
if self._modif_change_bus:
if "change_bus" not in self.authorized_keys:
raise AmbiguousAction(
'Action of type "change_bus" are not supported by this action type'
)
if np.any(
self._change_bus_vect[self.line_or_pos_topo_vect[id_disc]] > 0
) or np.any(self._change_bus_vect[self.line_ex_pos_topo_vect[id_disc]] > 0):
raise InvalidLineStatus(
"You ask to disconnect a powerline but also to change its bus."
)
if np.any(
self._change_bus_vect[
self.line_or_pos_topo_vect[self._set_line_status == 1]
]
):
raise InvalidLineStatus(
"You ask to connect an origin powerline but also to *change* the bus to which "
"it is connected. This is ambiguous. You must *set* this bus instead."
)
if np.any(
self._change_bus_vect[
self.line_ex_pos_topo_vect[self._set_line_status == 1]
]
):
raise InvalidLineStatus(
"You ask to connect an extremity powerline but also to *change* the bus to "
"which it is connected. This is ambiguous. You must *set* this bus instead."
)
if self.shunts_data_available:
if self.shunt_p.shape[0] != self.n_shunt:
raise IncorrectNumberOfElements(
"Incorrect number of shunt (for shunt_p) in your action."
)
if self.shunt_q.shape[0] != self.n_shunt:
raise IncorrectNumberOfElements(
"Incorrect number of shunt (for shunt_q) in your action."
)
if self.shunt_bus.shape[0] != self.n_shunt:
raise IncorrectNumberOfElements(
"Incorrect number of shunt (for shunt_bus) in your action."
)
if self.n_shunt > 0:
if np.max(self.shunt_bus) > 2:
raise AmbiguousAction(
"Some shunt is connected to a bus greater than 2"
)
if np.min(self.shunt_bus) < -1:
raise AmbiguousAction(
"Some shunt is connected to a bus smaller than -1"
)
else:
# shunt is not available
if self.shunt_p is not None:
raise AmbiguousAction(
"Attempt to modify a shunt (shunt_p) while shunt data is not handled by backend"
)
if self.shunt_q is not None:
raise AmbiguousAction(
"Attempt to modify a shunt (shunt_q) while shunt data is not handled by backend"
)
if self.shunt_bus is not None:
raise AmbiguousAction(
"Attempt to modify a shunt (shunt_bus) while shunt data is not handled "
"by backend"
)
if self._modif_alarm:
if self._raise_alarm.shape[0] != self.dim_alarms:
raise AmbiguousAction(
f"Wrong number of alarm raised: {self._raise_alarm.shape[0]} raised, expecting "
f"{self.dim_alarms}"
)
else:
if np.any(self._raise_alarm):
raise AmbiguousAction(
f"Unrecognize alarm action: an action acts on the alarm, yet it's not tagged "
f"as doing so. Expect wrong behaviour."
)
if self._modif_alert:
if self._raise_alert.shape[0] != self.dim_alerts:
raise AmbiguousActionRaiseAlert(
f"Wrong number of alert raised: {self._raise_alert.shape[0]} raised, expecting "
f"{self.dim_alerts}"
)
else:
if np.any(self._raise_alert):
raise AmbiguousActionRaiseAlert(
f"Unrecognize alert action: an action acts on the alert, yet it's not tagged "
f"as doing so. Expect wrong behaviour."
)
def _is_storage_ambiguous(self):
"""check if storage actions are ambiguous"""
if self._modif_storage:
if "set_storage" not in self.authorized_keys:
raise AmbiguousAction(
'Action of type "set_storage" are not supported by this action type'
)
if self.n_storage == 0:
raise InvalidStorage(
"Attempt to modify a storage unit while there is none on the grid"
)
if self._storage_power.shape[0] != self.n_storage:
raise InvalidStorage(
"self._storage_power.shape[0] != self.n_storage: wrong number of storage "
"units affected"
)
if np.any(self._storage_power < -self.storage_max_p_prod):
where_bug = np.where(self._storage_power < -self.storage_max_p_prod)[0]
raise InvalidStorage(
f"you asked a storage unit to absorb more than what it can: "
f"self._storage_power[{where_bug}] < -self.storage_max_p_prod[{where_bug}]."
)
if np.any(self._storage_power > self.storage_max_p_absorb):
where_bug = np.where(self._storage_power > self.storage_max_p_absorb)[0]
raise InvalidStorage(
f"you asked a storage unit to produce more than what it can: "
f"self._storage_power[{where_bug}] > self.storage_max_p_absorb[{where_bug}]."
)
if "_storage_power" not in self.attr_list_set:
if np.any(self._set_topo_vect[self.storage_pos_topo_vect] > 0):
raise InvalidStorage("Attempt to modify bus (set) of a storage unit")
if np.any(self._change_bus_vect[self.storage_pos_topo_vect]):
raise InvalidStorage("Attempt to modify bus (change) of a storage unit")
def _is_curtailment_ambiguous(self):
"""check if curtailment action is ambiguous"""
if self._modif_curtailment:
if "curtail" not in self.authorized_keys:
raise AmbiguousAction(
'Action of type "curtail" are not supported by this action type'
)
if not self.redispatching_unit_commitment_availble:
raise UnitCommitorRedispachingNotAvailable(
"Impossible to use a redispatching action in this "
"environment. Please set up the proper costs for generator. "
"This also means curtailment feature is not available."
)
if self._curtail.shape[0] != self.n_gen:
raise InvalidCurtailment(
"self._curtail.shape[0] != self.n_gen: wrong number of generator "
"units affected"
)
if np.any((self._curtail < 0.0) & (self._curtail != -1.0)):
where_bug = np.where((self._curtail < 0.0) & (self._curtail != -1.0))[0]
raise InvalidCurtailment(
f"you asked to perform a negative curtailment: "
f"self._curtail[{where_bug}] < 0. "
f"Curtailment should be a real number between 0.0 and 1.0"
)
if np.any(self._curtail > 1.0):
where_bug = np.where(self._curtail > 1.0)[0]
raise InvalidCurtailment(
f"you asked a storage unit to produce more than what it can: "
f"self._curtail[{where_bug}] > 1. "
f"Curtailment should be a real number between 0.0 and 1.0"
)
if np.any(self._curtail[~self.gen_renewable] != -1.0):
raise InvalidCurtailment(
"Trying to apply a curtailment on a non renewable generator"
)
def _ignore_topo_action_if_disconnection(self, sel_):
# force ignore of any topological actions
self._set_topo_vect[np.array(self.line_or_pos_topo_vect[sel_])] = 0
self._change_bus_vect[np.array(self.line_or_pos_topo_vect[sel_])] = False
self._set_topo_vect[np.array(self.line_ex_pos_topo_vect[sel_])] = 0
self._change_bus_vect[np.array(self.line_ex_pos_topo_vect[sel_])] = False
def _obj_caract_from_topo_id(self, id_):
obj_id = None
objt_type = None
array_subid = None
for l_id, id_in_topo in enumerate(self.load_pos_topo_vect):
if id_in_topo == id_:
obj_id = l_id
objt_type = "load"
array_subid = self.load_to_subid
if obj_id is None:
for l_id, id_in_topo in enumerate(self.gen_pos_topo_vect):
if id_in_topo == id_:
obj_id = l_id
objt_type = "generator"
array_subid = self.gen_to_subid
if obj_id is None:
for l_id, id_in_topo in enumerate(self.line_or_pos_topo_vect):
if id_in_topo == id_:
obj_id = l_id
objt_type = self._line_or_str
array_subid = self.line_or_to_subid
if obj_id is None:
for l_id, id_in_topo in enumerate(self.line_ex_pos_topo_vect):
if id_in_topo == id_:
obj_id = l_id
objt_type = self._line_ex_str
array_subid = self.line_ex_to_subid
if obj_id is None:
for l_id, id_in_topo in enumerate(self.storage_pos_topo_vect):
if id_in_topo == id_:
obj_id = l_id
objt_type = "storage"
array_subid = self.storage_to_subid
substation_id = array_subid[obj_id]
return obj_id, objt_type, substation_id
def __str__(self) -> str:
"""
This utility allows printing in a human-readable format what objects will be impacted by the action.
Returns
-------
str: :class:`str`
The string representation of an :class:`BaseAction` in a human-readable format.
Examples
---------
It is simply the "print" function:
.. code-block:: python
action = env.action_space(...)
print(action)
"""
res = ["This action will:"]
impact = self.impact_on_objects()
# injections
injection_impact = impact["injection"]
if injection_impact["changed"]:
for change in injection_impact["impacted"]:
res.append("\t - Set {} to {}".format(change["set"], change["to"]))
else:
res.append("\t - NOT change anything to the injections")
# redispatch
if self._modif_redispatch:
res.append(
"\t - Modify the generators with redispatching in the following way:"
)
for gen_idx in range(self.n_gen):
if self._redispatch[gen_idx] != 0.0:
gen_name = self.name_gen[gen_idx]
r_amount = self._redispatch[gen_idx]
res.append(
'\t \t - Redispatch "{}" of {:.2f} MW'.format(
gen_name, r_amount
)
)
else:
res.append("\t - NOT perform any redispatching action")
# storage
if self._modif_storage:
res.append("\t - Modify the storage units in the following way:")
for stor_idx in range(self.n_storage):
amount_ = self._storage_power[stor_idx]
if np.isfinite(amount_) and amount_ != 0.0:
name_ = self.name_storage[stor_idx]
res.append(
'\t \t - Ask unit "{}" to {} {:.2f} MW (setpoint: {:.2f} MW)'
"".format(
name_,
"absorb" if amount_ > 0.0 else "produce",
np.abs(amount_),
amount_,
)
)
else:
res.append("\t - NOT modify any storage capacity")
# curtailment
if self._modif_curtailment:
res.append("\t - Perform the following curtailment:")
for gen_idx in range(self.n_gen):
amount_ = self._curtail[gen_idx]
if np.isfinite(amount_) and amount_ != -1.0:
name_ = self.name_gen[gen_idx]
res.append(
'\t \t - Limit unit "{}" to {:.1f}% of its Pmax (setpoint: {:.3f})'
"".format(name_, 100.0 * amount_, amount_)
)
else:
res.append("\t - NOT perform any curtailment")
# force line status
force_line_impact = impact["force_line"]
if force_line_impact["changed"]:
reconnections = force_line_impact["reconnections"]
if reconnections["count"] > 0:
res.append(
"\t - Force reconnection of {} powerlines ({})".format(
reconnections["count"], reconnections["powerlines"]
)
)
disconnections = force_line_impact["disconnections"]
if disconnections["count"] > 0:
res.append(
"\t - Force disconnection of {} powerlines ({})".format(
disconnections["count"], disconnections["powerlines"]
)
)
else:
res.append("\t - NOT force any line status")
# swtich line status
swith_line_impact = impact["switch_line"]
if swith_line_impact["changed"]:
res.append(
"\t - Switch status of {} powerlines ({})".format(
swith_line_impact["count"], swith_line_impact["powerlines"]
)
)
else:
res.append("\t - NOT switch any line status")
# topology
bus_switch_impact = impact["topology"]["bus_switch"]
if len(bus_switch_impact) > 0:
res.append("\t - Change the bus of the following element(s):")
for switch in bus_switch_impact:
res.append(
"\t \t - Switch bus of {} id {} [on substation {}]".format(
switch["object_type"], switch["object_id"], switch["substation"]
)
)
else:
res.append("\t - NOT switch anything in the topology")
assigned_bus_impact = impact["topology"]["assigned_bus"]
disconnect_bus_impact = impact["topology"]["disconnect_bus"]
if len(assigned_bus_impact) > 0 or len(disconnect_bus_impact) > 0:
if assigned_bus_impact:
res.append("\t - Set the bus of the following element(s):")
for assigned in assigned_bus_impact:
res.append(
"\t \t - Assign bus {} to {} id {} [on substation {}]".format(
assigned["bus"],
assigned["object_type"],
assigned["object_id"],
assigned["substation"],
)
)
if disconnect_bus_impact:
res.append("\t - Disconnect the following element(s):")
for disconnected in disconnect_bus_impact:
res.append(
"\t \t - Disconnect {} id {} [on substation {}]".format(
disconnected["object_type"],
disconnected["object_id"],
disconnected["substation"],
)
)
else:
res.append("\t - NOT force any particular bus configuration")
my_cls = type(self)
if my_cls.dim_alarms > 0:
if self._modif_alarm:
li_area = np.array(my_cls.alarms_area_names)[
np.where(self._raise_alarm)[0]
]
if len(li_area) == 1:
area_str = ": " + li_area[0]
else:
area_str = "s: \n\t \t - " + "\n\t \t - ".join(li_area)
res.append(f"\t - Raise an alarm on area" f"{area_str}")
else:
res.append("\t - Not raise any alarm")
if my_cls.dim_alerts > 0:
if self._modif_alert:
i_alert = np.where(self._raise_alert)[0]
li_line = np.array(my_cls.alertable_line_names)[i_alert]
if len(li_line) == 1:
line_str = f": {i_alert[0]} (on line {li_line[0]})"
else:
line_str = "s: \n\t \t - " + "\n\t \t - ".join(
[f": {i} (on line {l})" for i,l in zip(i_alert,li_line)])
res.append(f"\t - Raise alert(s) " f"{line_str}")
else:
res.append("\t - Not raise any alert")
return "\n".join(res)
def impact_on_objects(self) -> dict:
"""
This will return a dictionary which contains details on objects that will be impacted by the action.
Returns
-------
dict: :class:`dict`
The dictionary representation of an action impact on objects with keys, "has_impact", "injection",
"force_line", "switch_line", "topology", "redispatch", "storage", "curtailment".
"""
# handles actions on injections
has_impact = False
inject_detail = {"changed": False, "count": 0, "impacted": []}
for k in ["load_p", "prod_p", "load_q", "prod_v"]:
if k in self._dict_inj:
inject_detail["changed"] = True
has_impact = True
inject_detail["count"] += 1
inject_detail["impacted"].append({"set": k, "to": self._dict_inj[k]})
# handles actions on force line status
force_line_status = {
"changed": False,
"reconnections": {"count": 0, "powerlines": []},
"disconnections": {"count": 0, "powerlines": []},
}
if np.any(self._set_line_status == 1):
force_line_status["changed"] = True
has_impact = True
force_line_status["reconnections"]["count"] = np.sum(
self._set_line_status == 1
)
force_line_status["reconnections"]["powerlines"] = np.where(
self._set_line_status == 1
)[0]
if np.any(self._set_line_status == -1):
force_line_status["changed"] = True
has_impact = True
force_line_status["disconnections"]["count"] = np.sum(
self._set_line_status == -1
)
force_line_status["disconnections"]["powerlines"] = np.where(
self._set_line_status == -1
)[0]
# handles action on swtich line status
switch_line_status = {"changed": False, "count": 0, "powerlines": []}
if np.sum(self._switch_line_status):
switch_line_status["changed"] = True
has_impact = True
switch_line_status["count"] = np.sum(self._switch_line_status)
switch_line_status["powerlines"] = np.where(self._switch_line_status)[0]
topology = {
"changed": False,
"bus_switch": [],
"assigned_bus": [],
"disconnect_bus": [],
}
# handles topology
if np.any(self._change_bus_vect):
for id_, k in enumerate(self._change_bus_vect):
if k:
obj_id, objt_type, substation_id = self._obj_caract_from_topo_id(
id_
)
topology["bus_switch"].append(
{
"bus": k,
"object_type": objt_type,
"object_id": obj_id,
"substation": substation_id,
}
)
topology["changed"] = True
has_impact = True
if np.any(self._set_topo_vect != 0):
for id_, k in enumerate(self._set_topo_vect):
if k > 0:
obj_id, objt_type, substation_id = self._obj_caract_from_topo_id(
id_
)
topology["assigned_bus"].append(
{
"bus": k,
"object_type": objt_type,
"object_id": obj_id,
"substation": substation_id,
}
)
if k < 0:
obj_id, objt_type, substation_id = self._obj_caract_from_topo_id(
id_
)
topology["disconnect_bus"].append(
{
"bus": k,
"object_type": objt_type,
"object_id": obj_id,
"substation": substation_id,
}
)
topology["changed"] = True
has_impact = True
# handle redispatching
redispatch = {"changed": False, "generators": []}
if np.any(self._redispatch != 0.0):
for gen_idx in range(self.n_gen):
if self._redispatch[gen_idx] != 0.0:
gen_name = self.name_gen[gen_idx]
r_amount = self._redispatch[gen_idx]
redispatch["generators"].append(
{"gen_id": gen_idx, "gen_name": gen_name, "amount": r_amount}
)
redispatch["changed"] = True
has_impact = True
storage = {"changed": False, "capacities": []}
if self._modif_storage:
for str_idx in range(self.n_storage):
tmp = self._storage_power[str_idx]
if np.isfinite(tmp):
name_ = self.name_storage[str_idx]
new_capacity = tmp
storage["capacities"].append(
{
"storage_id": str_idx,
"storage_name": name_,
"new_capacity": new_capacity,
}
)
storage["changed"] = True
has_impact = True
curtailment = {"changed": False, "limit": []}
if self._modif_curtailment:
for gen_idx in range(self.n_gen):
tmp = self._curtail[gen_idx]
if np.isfinite(tmp) and tmp != -1:
name_ = self.name_gen[gen_idx]
new_max = tmp
curtailment["limit"].append(
{
"generator_id": gen_idx,
"generator_name": name_,
"amount": new_max,
}
)
curtailment["changed"] = True
has_impact = True
return {
"has_impact": has_impact,
"injection": inject_detail,
"force_line": force_line_status,
"switch_line": switch_line_status,
"topology": topology,
"redispatch": redispatch,
"storage": storage,
"curtailment": curtailment,
}
def as_dict(self) -> dict:
"""
Represent an action "as a" dictionary. This dictionary is useful to further inspect on which elements
the actions had an impact. It is not recommended to use it as a way to serialize actions. The "do nothing"
action should always be represented by an empty dictionary.
The following keys (all optional) are present in the results:
* `load_p`: if the action modifies the active loads.
* `load_q`: if the action modifies the reactive loads.
* `prod_p`: if the action modifies the active productions of generators.
* `prod_v`: if the action modifies the voltage setpoint of generators.
* `set_line_status` if the action tries to **set** the status of some powerlines. If present, this is a
a dictionary with keys:
* `nb_connected`: number of powerlines that are reconnected
* `nb_disconnected`: number of powerlines that are disconnected
* `connected_id`: the id of the powerlines reconnected
* `disconnected_id`: the ids of the powerlines disconnected
* `change_line_status`: if the action tries to **change** the status of some powerlines. If present, this
is a dictionary with keys:
* `nb_changed`: number of powerlines having their status changed
* `changed_id`: the ids of the powerlines that are changed
* `change_bus_vect`: if the action tries to **change** the topology of some substations. If present, this
is a dictionary with keys:
* `nb_modif_subs`: number of substations impacted by the action
* `modif_subs_id`: ids of the substations impacted by the action
* `change_bus_vect`: details the objects that are modified. It is itself a dictionary that represents for
each impacted substations (keys) the modification of the objects connected to it.
* `set_bus_vect`: if the action tries to **set** the topology of some substations. If present, this is a
dictionary with keys:
* `nb_modif_subs`: number of substations impacted by the action
* `modif_subs_id`: the ids of the substations impacted by the action
* `set_bus_vect`: details the objects that are modified. It is also a dictionary that represents for
each impacted substations (keys) how the elements connected to it are impacted (their "new" bus)
* `hazards` if the action is composed of some hazards. In this case, it's simply the index of the powerlines
that are disconnected because of them.
* `nb_hazards` the number of hazards the "action" implemented (eg number of powerlines disconnected because of
hazards.
* `maintenance` if the action is composed of some maintenance. In this case, it's simply the index of the
powerlines that are affected by maintenance operation at this time step.
that are disconnected because of them.
* `nb_maintenance` the number of maintenance the "action" implemented eg the number of powerlines
disconnected because of maintenance operations.
* `redispatch` the redispatching action (if any). It gives, for each generator (all generator, not just the
dispatchable one) the amount of power redispatched in this action.
* `storage_power`: the setpoint for production / consumption for all storage units
* `curtailment`: the curtailment performed on all generator
Returns
-------
res: ``dict``
The action represented as a dictionary. See above for a description of it.
"""
res = {}
# saving the injections
for k in ["load_p", "prod_p", "load_q", "prod_v"]:
if k in self._dict_inj:
res[k] = 1.0 * self._dict_inj[k]
# handles actions on force line status
if np.any(self._set_line_status != 0):
res["set_line_status"] = {}
res["set_line_status"]["nb_connected"] = np.sum(self._set_line_status == 1)
res["set_line_status"]["nb_disconnected"] = np.sum(
self._set_line_status == -1
)
res["set_line_status"]["connected_id"] = np.where(
self._set_line_status == 1
)[0]
res["set_line_status"]["disconnected_id"] = np.where(
self._set_line_status == -1
)[0]
# handles action on swtich line status
if np.sum(self._switch_line_status):
res["change_line_status"] = {}
res["change_line_status"]["nb_changed"] = np.sum(self._switch_line_status)
res["change_line_status"]["changed_id"] = np.where(
self._switch_line_status
)[0]
# handles topology change
if np.any(self._change_bus_vect):
res["change_bus_vect"] = {}
res["change_bus_vect"]["nb_modif_objects"] = np.sum(self._change_bus_vect)
all_subs = set()
for id_, k in enumerate(self._change_bus_vect):
if k:
obj_id, objt_type, substation_id = self._obj_caract_from_topo_id(
id_
)
sub_id = "{}".format(substation_id)
if not sub_id in res["change_bus_vect"]:
res["change_bus_vect"][sub_id] = {}
res["change_bus_vect"][sub_id]["{}".format(obj_id)] = {
"type": objt_type
}
all_subs.add(sub_id)
res["change_bus_vect"]["nb_modif_subs"] = len(all_subs)
res["change_bus_vect"]["modif_subs_id"] = sorted(all_subs)
# handles topology set
if np.any(self._set_topo_vect):
res["set_bus_vect"] = {}
res["set_bus_vect"]["nb_modif_objects"] = np.sum(self._set_topo_vect)
all_subs = set()
for id_, k in enumerate(self._set_topo_vect):
if k != 0:
obj_id, objt_type, substation_id = self._obj_caract_from_topo_id(
id_
)
sub_id = "{}".format(substation_id)
if not sub_id in res["set_bus_vect"]:
res["set_bus_vect"][sub_id] = {}
res["set_bus_vect"][sub_id]["{}".format(obj_id)] = {
"type": objt_type,
"new_bus": k,
}
all_subs.add(sub_id)
res["set_bus_vect"]["nb_modif_subs"] = len(all_subs)
res["set_bus_vect"]["modif_subs_id"] = sorted(all_subs)
if np.any(self._hazards):
res["hazards"] = np.where(self._hazards)[0]
res["nb_hazards"] = np.sum(self._hazards)
if np.any(self._maintenance):
res["maintenance"] = np.where(self._maintenance)[0]
res["nb_maintenance"] = np.sum(self._maintenance)
if np.any(self._redispatch != 0.0):
res["redispatch"] = 1.0 * self._redispatch
if self._modif_storage:
res["storage_power"] = 1.0 * self._storage_power
if self._modif_curtailment:
res["curtailment"] = 1.0 * self._curtail
return res
def get_types(self) -> Tuple[bool, bool, bool, bool, bool, bool, bool]:
"""
Shorthand to get the type of an action. The type of an action is among:
- "injection": does this action modifies load or generator active values
- "voltage": does this action modifies the generator voltage setpoint or the shunts
- "topology": does this action modifies the topology of the grid (*ie* set or switch some buses)
- "line": does this action modifies the line status
- "redispatching" does this action modifies the redispatching
- "storage" does this action impact the production / consumption of storage units
- "curtailment" does this action impact the non renewable generators through curtailment
Notes
------
A single action can be of multiple types.
The `do nothing` has no type at all (all flags are ``False``)
If a line only set / change the status of a powerline then it does not count as a topological
modification.
If the bus to which a storage unit is connected is modified, but there is no setpoint for
the production / consumption of any storage units, then the action is **NOT** taged as
an action on the storage units.
Returns
-------
injection: ``bool``
Does it affect load or generator active value
voltage: ``bool``
Does it affect the voltage
topology: ``bool``
Does it affect the topology (line status change / switch are **NOT** counted as topology)
line: ``bool``
Does it affect the line status (line status change / switch are **NOT** counted as topology)
redispatching: ``bool``
Does it performs (explicitly) any redispatching
storage: ``bool``
Does it performs (explicitly) any action on the storage production / consumption
curtailment: ``bool``
Does it performs (explicitly) any action on renewable generator
"""
injection = "load_p" in self._dict_inj or "prod_p" in self._dict_inj
voltage = "prod_v" in self._dict_inj
if self.shunts_data_available:
voltage = voltage or np.any(np.isfinite(self.shunt_p))
voltage = voltage or np.any(np.isfinite(self.shunt_q))
voltage = voltage or np.any(self.shunt_bus != 0)
lines_impacted, subs_impacted = self.get_topological_impact()
topology = np.any(subs_impacted)
line = np.any(lines_impacted)
redispatching = np.any(self._redispatch != 0.0)
storage = self._modif_storage
curtailment = self._modif_curtailment
return injection, voltage, topology, line, redispatching, storage, curtailment
def _aux_effect_on_load(self, load_id):
if load_id >= self.n_load:
raise Grid2OpException(
f"There are only {self.n_load} loads on the grid. Cannot check impact on "
f"`load_id={load_id}`"
)
if load_id < 0:
raise Grid2OpException(f"`load_id` should be positive.")
res = {"new_p": np.NaN, "new_q": np.NaN, "change_bus": False, "set_bus": 0}
if "load_p" in self._dict_inj:
res["new_p"] = self._dict_inj["load_p"][load_id]
if "load_q" in self._dict_inj:
res["new_q"] = self._dict_inj["load_q"][load_id]
my_id = self.load_pos_topo_vect[load_id]
res["change_bus"] = self._change_bus_vect[my_id]
res["set_bus"] = self._set_topo_vect[my_id]
return res
def _aux_effect_on_gen(self, gen_id):
if gen_id >= self.n_gen:
raise Grid2OpException(
f"There are only {self.n_gen} gens on the grid. Cannot check impact on "
f"`gen_id={gen_id}`"
)
if gen_id < 0:
raise Grid2OpException(f"`gen_id` should be positive.")
res = {"new_p": np.NaN, "new_v": np.NaN, "set_bus": 0, "change_bus": False}
if "prod_p" in self._dict_inj:
res["new_p"] = self._dict_inj["prod_p"][gen_id]
if "prod_v" in self._dict_inj:
res["new_v"] = self._dict_inj["prod_v"][gen_id]
my_id = self.gen_pos_topo_vect[gen_id]
res["change_bus"] = self._change_bus_vect[my_id]
res["set_bus"] = self._set_topo_vect[my_id]
res["redispatch"] = self._redispatch[gen_id]
res["curtailment"] = self._curtail[gen_id]
return res
def _aux_effect_on_line(self, line_id):
if line_id >= self.n_line:
raise Grid2OpException(
f"There are only {self.n_line} powerlines on the grid. Cannot check impact on "
f"`line_id={line_id}`"
)
if line_id < 0:
raise Grid2OpException(f"`line_id` should be positive.")
res = {}
# origin topology
my_id = self.line_or_pos_topo_vect[line_id]
res["change_bus_or"] = self._change_bus_vect[my_id]
res["set_bus_or"] = self._set_topo_vect[my_id]
# extremity topology
my_id = self.line_ex_pos_topo_vect[line_id]
res["change_bus_ex"] = self._change_bus_vect[my_id]
res["set_bus_ex"] = self._set_topo_vect[my_id]
# status
res["set_line_status"] = self._set_line_status[line_id]
res["change_line_status"] = self._switch_line_status[line_id]
return res
def _aux_effect_on_storage(self, storage_id):
if storage_id >= self.n_storage:
raise Grid2OpException(
f"There are only {self.n_storage} storage units on the grid. "
f"Cannot check impact on "
f"`storage_id={storage_id}`"
)
if storage_id < 0:
raise Grid2OpException(f"`storage_id` should be positive.")
res = {"power": np.NaN, "set_bus": 0, "change_bus": False}
my_id = self.storage_pos_topo_vect[storage_id]
res["change_bus"] = self._change_bus_vect[my_id]
res["set_bus"] = self._set_topo_vect[my_id]
res["power"] = self._storage_power[storage_id]
return res
def _aux_effect_on_substation(self, substation_id):
if substation_id >= self.n_sub:
raise Grid2OpException(
f"There are only {self.n_sub} substations on the grid. "
f"Cannot check impact on "
f"`substation_id={substation_id}`"
)
if substation_id < 0:
raise Grid2OpException(f"`substation_id` should be positive.")
res = {}
beg_ = int(np.sum(self.sub_info[:substation_id]))
end_ = int(beg_ + self.sub_info[substation_id])
res["change_bus"] = self._change_bus_vect[beg_:end_]
res["set_bus"] = self._set_topo_vect[beg_:end_]
return res
def effect_on(
self,
_sentinel=None,
load_id=None,
gen_id=None,
line_id=None,
substation_id=None,
storage_id=None,
) -> dict:
"""
Return the effect of this action on a unique given load, generator unit, powerline or substation.
Only one of load, gen, line or substation should be filled.
The query of these objects can only be done by id here (ie by giving the integer of the object in the backed).
The :class:`ActionSpace` has some utilities to access them by name too.
Parameters
----------
_sentinel: ``None``
Used to prevent positional parameters. Internal, **do not use**.
load_id: ``int``
The ID of the load we want to inspect
gen_id: ``int``
The ID of the generator we want to inspect
line_id: ``int``
The ID of the powerline we want to inspect
substation_id: ``int``
The ID of the substation we want to inspect
storage_id: ``int``
The ID of the storage unit we want to inspect
Returns
-------
res: :class:`dict`
A dictionary with keys and value depending on which object needs to be inspected:
- if a load is inspected, then the keys are:
- "new_p" the new load active value (or NaN if it doesn't change),
- "new_q" the new load reactive value (or Nan if nothing has changed from this point of view)
- "set_bus" the new bus where the load will be moved (int: id of the bus, 0 no change, -1 disconnected)
- "change_bus" whether or not this load will be moved from one bus to another (for example is an action
asked it to go from bus 1 to bus 2)
- if a generator is inspected, then the keys are:
- "new_p" the new generator active setpoint value (or NaN if it doesn't change),
- "new_v" the new generator voltage setpoint value (or Nan if nothing has changed from this point of
view)
- "set_bus" the new bus where the load will be moved (int: id of the bus, 0 no change, -1 disconnected)
- "change_bus" whether or not this load will be moved from one bus to another (for example is an action
asked it to go from bus 1 to bus 2)
- "redispatch" the amount of power redispatched for this generator.
- "curtailment": the amount of curtailment on this generator
- if a powerline is inspected then the keys are:
- "change_bus_or": whether or not the origin end will be moved from one bus to another
- "change_bus_ex": whether or not the extremity end will be moved from one bus to another
- "set_bus_or": the new bus where the origin will be moved
- "set_bus_ex": the new bus where the extremity will be moved
- "set_line_status": the new status of the power line
- "change_line_status": whether or not to switch the status of the powerline
- if a substation is inspected, it returns the topology to this substation in a dictionary with keys:
- "change_bus"
- "set_bus"
- if a storage unit is inspected, it returns a dictionary with:
- "change_bus"
- "set_bus"
- "power" : the power you want to produce / absorb with the storage unit ( if < 0 the power is
produced, if > 0 then power is absorbed)
NB the difference between "set_bus" and "change_bus" is the following:
- If "set_bus" is 1, then the object (load, generator or powerline) will be moved to bus 1 of the substation
to which it is connected. If it is already to bus 1 nothing will be done. If it's on another bus it will
connect it to bus 1. It's disconnected, it will reconnect it and connect it to bus 1.
- If "change_bus" is True, then the object will be moved from one bus to another. If the object were on
bus 1
then it will be moved on bus 2, and if it were on bus 2, it will be moved on bus 1. If the object were
disconnected, then it will be connected to the affected bus.
Raises
------
:class:`grid2op.Exception.Grid2OpException`
If _sentinel is modified, or if none of the arguments are set or alternatively if 2 or more of the
parameters are being set.
"""
EXCEPT_TOO_MUCH_ELEMENTS = (
"You can only the inspect the effect of an action on one single element"
)
if _sentinel is not None:
raise Grid2OpException(
"action.effect_on should only be called with named argument."
)
if (
load_id is None
and gen_id is None
and line_id is None
and storage_id is None
and substation_id is None
):
raise Grid2OpException(
"You ask the effect of an action on something, without provided anything"
)
if load_id is not None:
if (
gen_id is not None
or line_id is not None
or storage_id is not None
or substation_id is not None
):
raise Grid2OpException(EXCEPT_TOO_MUCH_ELEMENTS)
res = self._aux_effect_on_load(load_id)
elif gen_id is not None:
if (
line_id is not None
or storage_id is not None
or substation_id is not None
):
raise Grid2OpException(EXCEPT_TOO_MUCH_ELEMENTS)
res = self._aux_effect_on_gen(gen_id)
elif line_id is not None:
if storage_id is not None or substation_id is not None:
raise Grid2OpException(EXCEPT_TOO_MUCH_ELEMENTS)
res = self._aux_effect_on_line(line_id)
elif storage_id is not None:
if substation_id is not None:
raise Grid2OpException(
"You can only the inspect the effect of an action on one single element"
)
res = self._aux_effect_on_storage(storage_id)
else:
res = self._aux_effect_on_substation(substation_id)
return res
def get_storage_modif(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Retrieve the modification that will be performed on all the storage unit
Returns
-------
storage_power: ``np.ndarray``
New storage power target (Nan = not modified, otherwise the setpoint given) [in MW]
storage_set_bus: ``np.ndarray``
New bus of the storage units, affected with "set_bus" command (0 = not affected, -1 = disconnected)
storage_change_bus: ``np.ndarray``
New bus of the storage units, affected with "change_bus" command
"""
storage_power = 1.0 * self._storage_power
storage_set_bus = 1 * self._set_topo_vect[self.storage_pos_topo_vect]
storage_change_bus = copy.deepcopy(
self._change_bus_vect[self.storage_pos_topo_vect]
)
return storage_power, storage_set_bus, storage_change_bus
def get_load_modif(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Retrieve the modification that will be performed on all the loads
Returns
-------
load_p: ``np.ndarray``
New load p (Nan = not modified) [in MW]
load_q: ``np.ndarray``
New load q (Nan = not modified) [in MVaR]
load_set_bus: ``np.ndarray``
New bus of the loads, affected with "set_bus" command
load_change_bus: ``np.ndarray``
New bus of the loads, affected with "change_bus" command
"""
load_p = np.full(self.n_load, fill_value=np.NaN, dtype=dt_float)
if "load_p" in self._dict_inj:
load_p[:] = self._dict_inj["load_p"]
load_q = 1.0 * load_p
if "load_q" in self._dict_inj:
load_q[:] = self._dict_inj["load_q"]
load_set_bus = 1 * self._set_topo_vect[self.load_pos_topo_vect]
load_change_bus = copy.deepcopy(self._change_bus_vect[self.load_pos_topo_vect])
return load_p, load_q, load_set_bus, load_change_bus
def get_gen_modif(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Retrieve the modification that will be performed on all the generators
TODO add curtailment and redispatching
Returns
-------
gen_p: ``np.ndarray``
New gen p (Nan = not modified) [in MW]
gen_v: ``np.ndarray``
New gen v setpoint (Nan = not modified) [in kV]
gen_set_bus: ``np.ndarray``
New bus of the generators, affected with "set_bus" command
gen_change_bus: ``np.ndarray``
New bus of the generators, affected with "change_bus" command
"""
gen_p = np.full(self.n_load, fill_value=np.NaN, dtype=dt_float)
if "prod_p" in self._dict_inj:
gen_p[:] = self._dict_inj["prod_p"]
gen_v = 1.0 * gen_p
if "prod_v" in self._dict_inj:
gen_v[:] = self._dict_inj["prod_v"]
gen_set_bus = 1 * self._set_topo_vect[self.gen_pos_topo_vect]
gen_change_bus = copy.deepcopy(self._change_bus_vect[self.gen_pos_topo_vect])
return gen_p, gen_v, gen_set_bus, gen_change_bus
# TODO do the get_line_modif, get_line_or_modif and get_line_ex_modif
def _aux_affect_object_int(
self,
values,
name_el,
nb_els,
name_els,
inner_vect,
outer_vect,
min_val=-1,
max_val=2,
):
"""
NB : this do not set the _modif_set_bus attribute. It is expected to be set in the property setter.
This is not set here, because it's recursive and if it fails at a point, it would be set for nothing
values: the new values to set
name_el: "load"
nb_els: self.n_load
inner_vect: self.load_pos_topo_vect
name_els: self.name_load
outer_vect: self._set_topo_vect
will modify outer_vect[inner_vect]
"""
if isinstance(values, tuple):
# i provide a tuple: load_id, new_bus
if len(values) != 2:
raise IllegalAction(
f"when set with tuple, this tuple should have size 2 and be: {name_el}_id, new_bus "
f"eg. (3, {max_val})"
)
el_id, new_bus = values
try:
new_bus = int(new_bus)
except Exception as exc_:
raise IllegalAction(
f'new_bus should be convertible to integer. Error was : "{exc_}"'
)
if new_bus < min_val:
raise IllegalAction(
f"new_bus should be between {min_val} and {max_val}"
)
if new_bus > max_val:
raise IllegalAction(
f"new_bus should be between {min_val} and {max_val}"
)
if isinstance(el_id, (float, dt_float, np.float64)):
raise IllegalAction(
f"{name_el}_id should be integers you provided float!"
)
if isinstance(el_id, (bool, dt_bool)):
raise IllegalAction(
f"{name_el}_id should be integers you provided bool!"
)
if isinstance(el_id, str):
raise IllegalAction(
f"{name_el}_id should be integers you provided string "
f"(hint: you can use a dictionary to set the bus by name eg. "
f"act.{name_el}_set_bus = {{act.name_{name_el}[0] : 1, act.name_{name_el}[1] : "
f"{max_val}}} )!"
)
try:
el_id = int(el_id)
except Exception as exc_:
raise IllegalAction(
f'{name_el}_id should be convertible to integer. Error was : "{exc_}"'
)
if el_id < 0:
raise IllegalAction(
f"Impossible to set the bus of a {name_el} with negative id"
)
if el_id >= nb_els:
raise IllegalAction(
f"Impossible to set a {name_el} id {el_id} because there are only "
f"{nb_els} on the grid (and in python id starts at 0)"
)
outer_vect[inner_vect[el_id]] = new_bus
return
elif isinstance(values, np.ndarray):
if (
isinstance(values.dtype, float)
or values.dtype == dt_float
or values.dtype == np.float64
):
raise IllegalAction(
f"{name_el}_id should be integers you provided float!"
)
if isinstance(values.dtype, bool) or values.dtype == dt_bool:
raise IllegalAction(
f"{name_el}_id should be integers you provided boolean!"
)
try:
values = values.astype(dt_int)
except Exception as exc_:
raise IllegalAction(
f'{name_el}_id should be convertible to integer. Error was : "{exc_}"'
)
if np.any(values < min_val):
raise IllegalAction(
f"new_bus should be between {min_val} and {max_val}, found a value < {min_val}"
)
if np.any(values > max_val):
raise IllegalAction(
f"new_bus should be between {min_val} and {max_val}, found a value > {max_val}"
)
outer_vect[inner_vect] = values
return
elif isinstance(values, list):
# 2 cases: list of tuple, or list (convertible to numpy array)
if len(values) == nb_els:
# 2 cases: either i set all loads in the form [(0,..), (1,..), (2,...)]
# or i should have converted the list to np array
if isinstance(values[0], tuple):
# list of tuple, handled below
# TODO can be somewhat "hacked" if the type of the object on the list is not always the same
pass
else:
# get back to case where it's a full vector
values = np.array(values)
self._aux_affect_object_int(
values,
name_el,
nb_els,
name_els,
inner_vect=inner_vect,
outer_vect=outer_vect,
min_val=min_val,
max_val=max_val,
)
return
# expected list of tuple, each tuple is a pair with load_id, new_load_bus: example: [(0, 1), (2,2)]
for el in values:
if len(el) != 2:
raise IllegalAction(
f"If input is a list, it should be a list of pair (el_id, new_bus) "
f"eg. [(0, {max_val}), (2, {min_val})]"
)
el_id, new_bus = el
if isinstance(el_id, str) and name_els is not None:
tmp = np.where(name_els == el_id)[0]
if len(tmp) == 0:
raise IllegalAction(f"No known {name_el} with name {el_id}")
el_id = tmp[0]
self._aux_affect_object_int(
(el_id, new_bus),
name_el,
nb_els,
name_els,
inner_vect=inner_vect,
outer_vect=outer_vect,
min_val=min_val,
max_val=max_val,
)
elif isinstance(values, dict):
# 2 cases: either key = load_id and value = new_bus or key = load_name and value = new bus
for key, new_bus in values.items():
if isinstance(key, str) and name_els is not None:
tmp = np.where(name_els == key)[0]
if len(tmp) == 0:
raise IllegalAction(f"No known {name_el} with name {key}")
key = tmp[0]
self._aux_affect_object_int(
(key, new_bus),
name_el,
nb_els,
name_els,
inner_vect=inner_vect,
outer_vect=outer_vect,
min_val=min_val,
max_val=max_val,
)
else:
raise IllegalAction(
f"Impossible to modify the {name_el} bus with inputs {values}. "
f"Please see the documentation."
)
@property
def load_set_bus(self) -> np.ndarray:
"""
Allows to retrieve (and affect) the busbars at which each storage unit is **set**.
It behaves similarly as :attr:`BaseAction.gen_set_bus`. See the help there for more information.
"""
res = self.set_bus[self.load_pos_topo_vect]
res.flags.writeable = False
return res
@load_set_bus.setter
def load_set_bus(self, values):
if "set_bus" not in self.authorized_keys:
raise IllegalAction(
'Impossible to modify the load bus (with "set") with this action type.'
)
orig_ = self.load_set_bus
try:
self._aux_affect_object_int(
values,
"load",
self.n_load,
self.name_load,
self.load_pos_topo_vect,
self._set_topo_vect,
)
self._modif_set_bus = True
except Exception as exc_:
self._aux_affect_object_int(
orig_,
"load",
self.n_load,
self.name_load,
self.load_pos_topo_vect,
self._set_topo_vect,
)
raise IllegalAction(
f"Impossible to modify the load bus with your input. Please consult the documentation. "
f'The error was "{exc_}"'
)
@property
def gen_set_bus(self) -> np.ndarray:
"""
Allows to retrieve (and affect) the busbars at which the action **set** the generator units.
Returns
-------
res:
A vector of integer, of size `act.n_gen` indicating what type of action is performed for
each generator units with the convention :
* 0 the action do not action on this storage unit
* -1 the action disconnect the storage unit
* 1 the action set the storage unit to busbar 1
* 2 the action set the storage unit to busbar 2
Examples
--------
To retrieve the impact of the action on the storage unit, you can do:
.. code-block:: python
gen_buses = act.gen_set_bus
To modify these buses with **set** you can do:
.. code-block:: python
# create an environment where i can modify everything
import numpy as np
import grid2op
from grid2op.Action import CompleteAction
env = grid2op.make("educ_case14_storage", test=True, action_class=CompleteAction)
# create an action
act = env.action_space()
# method 1 : provide the full vector
act.gen_set_bus = np.ones(act.n_gen, dtype=int)
# method 2: provide the index of the unit you want to modify
act.gen_set_bus = (1, 2)
# method 3: provide a list of the units you want to modify
act.gen_set_bus = [(1, 2), (0, -1)]
# method 4: change the storage unit by their name with a dictionary
act.gen_set_bus = {"gen_1_0": 2}
.. note:: The "rule of thumb" to modify an object using "set" method it to provide always
the ID of an object AND its value. The ID should be an integer (or a name in some cases)
and the value an integer representing on which busbar to put the new element.
Notes
-----
It is a "property", you don't have to use parenthesis to access it:
.. code-block:: python
# valid code
gen_buses = act.gen_set_bus
# invalid code, it will crash, do not run
gen_buses = act.gen_set_bus()
# end do not run
And neither should you uses parenthesis to modify it:
.. code-block:: python
# valid code
act.gen_set_bus = [(1, 2), (0, -1)]
# invalid code, it will crash, do not run
act.gen_set_bus([(1, 2), (0, -1)])
# end do not run
Property cannot be set "directly", you have to use the `act.XXX = ...` syntax. For example:
.. code-block:: python
# valid code
act.gen_set_bus = [(1, 2), (0, -1)]
# invalid code, it will raise an error, and even if it did not it would have not effect
# do not run
act.gen_set_bus[1] = 2
# end do not run
.. note:: Be careful not to mix "change" and "set". For "change" you only need to provide the ID of the elements
you want to change, for "set" you need to provide the ID **AND** where you want to set them.
"""
res = self.set_bus[self.gen_pos_topo_vect]
res.flags.writeable = False
return res
@gen_set_bus.setter
def gen_set_bus(self, values):
if "set_bus" not in self.authorized_keys:
raise IllegalAction(
'Impossible to modify the gen bus (with "set") with this action type.'
)
orig_ = self.gen_set_bus
try:
self._aux_affect_object_int(
values,
"gen",
self.n_gen,
self.name_gen,
self.gen_pos_topo_vect,
self._set_topo_vect,
)
self._modif_set_bus = True
except Exception as exc_:
self._aux_affect_object_int(
orig_,
"gen",
self.n_gen,
self.name_gen,
self.gen_pos_topo_vect,
self._set_topo_vect,
)
raise IllegalAction(
f"Impossible to modify the gen bus with your input. Please consult the documentation. "
f'The error was:\n"{exc_}"'
)
@property
def storage_set_bus(self) -> np.ndarray:
"""
Allows to retrieve (and affect) the busbars at which each storage unit is **set**.
It behaves similarly as :attr:`BaseAction.gen_set_bus`. See the help there for more information.
"""
if "set_storage" not in self.authorized_keys:
raise IllegalAction(type(self).ERR_NO_STOR_SET_BUS)
res = self.set_bus[self.storage_pos_topo_vect]
res.flags.writeable = False
return res
@storage_set_bus.setter
def storage_set_bus(self, values):
if "set_bus" not in self.authorized_keys:
raise IllegalAction(type(self).ERR_NO_STOR_SET_BUS)
if "set_storage" not in self.authorized_keys:
raise IllegalAction(type(self).ERR_NO_STOR_SET_BUS)
orig_ = self.storage_set_bus
try:
self._aux_affect_object_int(
values,
"storage",
self.n_storage,
self.name_storage,
self.storage_pos_topo_vect,
self._set_topo_vect,
)
self._modif_set_bus = True
except Exception as exc_:
self._aux_affect_object_int(
orig_,
"storage",
self.n_storage,
self.name_storage,
self.storage_pos_topo_vect,
self._set_topo_vect,
)
raise IllegalAction(
f"Impossible to modify the storage bus with your input. "
f"Please consult the documentation. "
f'The error was:\n"{exc_}"'
)
@property
def line_or_set_bus(self) -> np.ndarray:
"""
Allows to retrieve (and affect) the busbars at which the origin side of each powerline is **set**.
It behaves similarly as :attr:`BaseAction.gen_set_bus`. See the help there for more information.
"""
res = self.set_bus[self.line_or_pos_topo_vect]
res.flags.writeable = False
return res
@line_or_set_bus.setter
def line_or_set_bus(self, values):
if "set_bus" not in self.authorized_keys:
raise IllegalAction(
'Impossible to modify the line (origin) bus (with "set") with this action type.'
)
orig_ = self.line_or_set_bus
try:
self._aux_affect_object_int(
values,
self._line_or_str,
self.n_line,
self.name_line,
self.line_or_pos_topo_vect,
self._set_topo_vect,
)
self._modif_set_bus = True
except Exception as exc_:
self._aux_affect_object_int(
orig_,
self._line_or_str,
self.n_line,
self.name_line,
self.line_or_pos_topo_vect,
self._set_topo_vect,
)
raise IllegalAction(
f"Impossible to modify the line origin bus with your input. "
f"Please consult the documentation. "
f'The error was:\n"{exc_}"'
)
@property
def line_ex_set_bus(self) -> np.ndarray:
"""
Allows to retrieve (and affect) the busbars at which the extremity side of each powerline is **set**.
It behaves similarly as :attr:`BaseAction.gen_set_bus`. See the help there for more information.
"""
res = self.set_bus[self.line_ex_pos_topo_vect]
res.flags.writeable = False
return res
@line_ex_set_bus.setter
def line_ex_set_bus(self, values):
if "set_bus" not in self.authorized_keys:
raise IllegalAction(
'Impossible to modify the line (ex) bus (with "set") with this action type.'
)
orig_ = self.line_ex_set_bus
try:
self._aux_affect_object_int(
values,
self._line_ex_str,
self.n_line,
self.name_line,
self.line_ex_pos_topo_vect,
self._set_topo_vect,
)
self._modif_set_bus = True
except Exception as exc_:
self._aux_affect_object_int(
orig_,
self._line_ex_str,
self.n_line,
self.name_line,
self.line_ex_pos_topo_vect,
self._set_topo_vect,
)
raise IllegalAction(
f"Impossible to modify the line extrmity bus with your input. "
f"Please consult the documentation. "
f'The error was:\n"{exc_}"'
)
@property
def set_bus(self) -> np.ndarray:
"""
Allows to retrieve (and affect) the busbars at which any element is **set**.
It behaves similarly as :attr:`BaseAction.gen_set_bus` and can be use to modify any elements type
as opposed to the more specific :attr:`BaseAction.gen_set_bus`, :attr:`BaseAction.load_set_bus`,
:attr:`BaseAction.line_or_set_bus`, :attr:`BaseAction.line_ex_set_bus` or
:attr:`BaseAction.storage_set_bus` that are specific to a certain type of objects.
Notes
-----
For performance reasons, it do not allow to modify the elements by there names.
The order of each elements are given in the :attr:`grid2op.Space.GridObjects.gen_pos_topo_vect`,
:attr:`grid2op.Space.GridObjects.load_pos_topo_vect`,
:attr:`grid2op.Space.GridObjects.line_or_pos_topo_vect`,
:attr:`grid2op.Space.GridObjects.line_ex_pos_topo_vect` or
:attr:`grid2op.Space.GridObjects.storage_pos_topo_vect`
For example:
.. code-block:: python
act.set_bus = [(0,1), (1, -1), (3, 2)]
Will:
* set to bus 1 the (unique) element for which \*_pos_topo_vect is 1
* disconnect the (unique) element for which \*_pos_topo_vect is 2
* set to bus 2 the (unique) element for which \*_pos_topo_vect is 3
You can use the documentation page :ref:`modeled-elements-module` for more information about which
element correspond to what component of this vector.
"""
if "set_bus" not in self.authorized_keys:
raise IllegalAction(
'Impossible to modify the bus (with "set") with this action type.'
)
res = 1 * self._set_topo_vect
res.flags.writeable = False
return res
@set_bus.setter
def set_bus(self, values):
if "set_bus" not in self.authorized_keys:
raise IllegalAction(
'Impossible to modify the bus (with "set") with this action type.'
)
orig_ = self.set_bus
try:
self._aux_affect_object_int(
values,
"",
self.dim_topo,
None,
np.arange(self.dim_topo),
self._set_topo_vect,
)
self._modif_set_bus = True
except Exception as exc_:
self._aux_affect_object_int(
orig_,
"",
self.dim_topo,
None,
np.arange(self.dim_topo),
self._set_topo_vect,
)
raise IllegalAction(
f"Impossible to modify the bus with your input. "
f"Please consult the documentation. "
f'The error was:\n"{exc_}"'
)
@property
def line_set_status(self) -> np.ndarray:
"""
Property to set the status of the powerline.
It behave similarly than :attr:`BaseAction.gen_set_bus` but with the following convention:
* 0 still means it is not affected
* +1 means that we force the connection on a powerline
* -1 means we force the disconnection of a powerline
Notes
-----
Setting a status of a powerline to +2 will raise an error.
Examples
---------
For example:
.. code-block:: python
act.line_set_status = [(0,1), (1, -1), (3, 1)]
Will force the reconnection of line id 0 and 1 and force disconnection of line id 1.
"""
if "set_line_status" not in self.authorized_keys:
raise IllegalAction(
'Impossible to modify the status of powerlines (with "set") with this action type.'
)
res = 1 * self._set_line_status
res.flags.writeable = False
return res
@line_set_status.setter
def line_set_status(self, values):
if "set_line_status" not in self.authorized_keys:
raise IllegalAction(
'Impossible to modify the status of powerlines (with "set") with this action type.'
)
orig_ = 1 * self._set_line_status
try:
self._aux_affect_object_int(
values,
"line status",
self.n_line,
self.name_line,
np.arange(self.n_line),
self._set_line_status,
max_val=1,
)
self._modif_set_status = True
except Exception as exc_:
self._aux_affect_object_int(
orig_,
"line status",
self.n_line,
self.name_line,
np.arange(self.n_line),
self._set_line_status,
max_val=1,
)
raise IllegalAction(
f"Impossible to modify the line status with your input. "
f"Please consult the documentation. "
f'The error was:\n"{exc_}"'
)
@property
def set_line_status(self) -> np.ndarray:
"""another name for :func:`BaseAction.line_set_status`"""
return self.line_set_status
@set_line_status.setter
def set_line_status(self, values):
self.line_set_status = values
@property
def change_line_status(self) -> np.ndarray:
"""another name for :func:`BaseAction.change_line_status`"""
return self.line_change_status
@change_line_status.setter
def change_line_status(self, values):
self.line_change_status = values
def _aux_affect_object_bool(
self, values, name_el, nb_els, name_els, inner_vect, outer_vect
):
"""
NB : this do not set the _modif_set_bus attribute. It is expected to be set in the property setter.
This is not set here, because it's recursive and if it fails at a point, it would be set for nothing
values: the new values to set
name_el: "load"
nb_els: self.n_load
inner_vect: self.load_pos_topo_vect
name_els: self.name_load
outer_vect: self._change_bus_vect
will modify outer_vect[inner_vect]
"""
if isinstance(values, bool):
# to make it explicit, tuple modifications are deactivated
raise IllegalAction(
f"Impossible to change a {name_el} with a tuple input. Accepted inputs are:"
f"int, list of int, list of string, array of int, array of bool, set of int,"
f"set of string"
)
elif isinstance(values, float):
# to make it explicit, tuple modifications are deactivated
raise IllegalAction(
f"Impossible to change a {name_el} with a tuple input. Accepted inputs are:"
f"int, list of int, list of string, array of int, array of bool, set of int,"
f"set of string"
)
elif isinstance(values, (int, dt_int, np.int64)):
# i provide an int: load_id
try:
el_id = int(values)
except Exception as exc_:
raise IllegalAction(
f'{name_el}_id should be convertible to integer. Error was : "{exc_}"'
)
if el_id < 0:
raise IllegalAction(
f"Impossible to change a negative {name_el} with negative id"
)
if el_id >= nb_els:
raise IllegalAction(
f"Impossible to change a {name_el} id {el_id} because there are only "
f"{nb_els} on the grid (and in python id starts at 0)"
)
outer_vect[inner_vect[el_id]] = not outer_vect[inner_vect[el_id]]
return
elif isinstance(values, tuple):
# to make it explicit, tuple modifications are deactivated
raise IllegalAction(
f"Impossible to change a {name_el} with a tuple input. Accepted inputs are:"
f"int, list of int, list of string, array of int, array of bool, set of int,"
f"set of string"
)
elif isinstance(values, np.ndarray):
# either the int id i need to change or the full value.
if (
isinstance(values.dtype, bool)
or values.dtype == dt_bool
or values.dtype == bool
):
# so i change by giving the full vector
if values.shape[0] != nb_els:
raise IllegalAction(
f"If provided with bool array, the number of components of the vector"
f"should match the total number of {name_el}. You provided a vector "
f"with size {values.shape[0]} and there are {nb_els} {name_el} "
f"on the grid."
)
outer_vect[inner_vect[values]] = ~outer_vect[inner_vect[values]]
return
# this is the case where i give the integers i want to change
try:
values = values.astype(dt_int)
except Exception as exc_:
raise IllegalAction(
f'{name_el}_id should be convertible to integer. Error was : "{exc_}"'
)
if np.any(values < 0):
raise IllegalAction(
f"Impossible to change a negative {name_el} with negative id"
)
if np.any(values > nb_els):
raise IllegalAction(
f"Impossible to change a {name_el} id because there are only "
f"{nb_els} on the grid and you wanted to change an element with an "
f"id > {nb_els} (in python id starts at 0)"
)
outer_vect[inner_vect[values]] = ~outer_vect[inner_vect[values]]
return
elif isinstance(values, list):
# 1 case only: list of int
# (note: i cannot convert to numpy array other I could mix types...)
for el_id_or_name in values:
if isinstance(el_id_or_name, str):
tmp = np.where(name_els == el_id_or_name)[0]
if len(tmp) == 0:
raise IllegalAction(
f'No known {name_el} with name "{el_id_or_name}"'
)
el_id = tmp[0]
elif isinstance(el_id_or_name, (bool, dt_bool)):
# somehow python considers bool are int...
raise IllegalAction(
f"If a list is provided, it is only valid with integer found "
f"{type(el_id_or_name)}."
)
elif isinstance(el_id_or_name, (int, dt_int, np.int64)):
el_id = el_id_or_name
else:
raise IllegalAction(
f"If a list is provided, it is only valid with integer found "
f"{type(el_id_or_name)}."
)
el_id = int(el_id)
self._aux_affect_object_bool(
el_id,
name_el,
nb_els,
name_els,
inner_vect=inner_vect,
outer_vect=outer_vect,
)
elif isinstance(values, set):
# 2 cases: either set of load_id or set of load_name
values = list(values)
self._aux_affect_object_bool(
values,
name_el,
nb_els,
name_els,
inner_vect=inner_vect,
outer_vect=outer_vect,
)
else:
raise IllegalAction(
f"Impossible to modify the {name_el} with inputs {values}. "
f"Please see the documentation."
)
@property
def change_bus(self) -> np.ndarray:
"""
Allows to retrieve (and affect) the busbars at which any element is **change**.
It behaves similarly as :attr:`BaseAction.gen_change_bus` and can be use to modify any elements type
as opposed to the more specific :attr:`BaseAction.gen_change_bus`, :attr:`BaseAction.load_change_bus`,
:attr:`BaseAction.line_or_change_bus`, :attr:`BaseAction.line_ex_change_bus` or
:attr:`BaseAction.storage_change_bus` that are specific to a certain type of objects.
Notes
-----
For performance reasons, it do not allow to modify the elements by there names.
The order of each elements are given in the :attr:`grid2op.Space.GridObjects.gen_pos_topo_vect`,
:attr:`grid2op.Space.GridObjects.load_pos_topo_vect`,
:attr:`grid2op.Space.GridObjects.line_or_pos_topo_vect`,
:attr:`grid2op.Space.GridObjects.line_ex_pos_topo_vect` or
:attr:`grid2op.Space.GridObjects.storage_pos_topo_vect`
For example:
.. code-block:: python
act.set_bus [0, 1, 3]
Will:
* change the bus of the (unique) element for which \*_pos_topo_vect is 1
* change the bus of (unique) element for which \*_pos_topo_vect is 2
* change the bus of (unique) element for which \*_pos_topo_vect is 3
You can use the documentation page :ref:`modeled-elements-module` for more information about which
element correspond to what component of this "vector".
"""
res = copy.deepcopy(self._change_bus_vect)
res.flags.writeable = False
return res
@change_bus.setter
def change_bus(self, values):
orig_ = self.change_bus
try:
self._aux_affect_object_bool(
values,
"",
self.dim_topo,
None,
np.arange(self.dim_topo),
self._change_bus_vect,
)
self._modif_change_bus = True
except Exception as exc_:
self._aux_affect_object_bool(
orig_,
"",
self.dim_topo,
None,
np.arange(self.dim_topo),
self._change_bus_vect,
)
raise IllegalAction(
f"Impossible to modify the bus with your input. "
f"Please consult the documentation. "
f'The error was:\n"{exc_}"'
)
@property
def load_change_bus(self) -> np.ndarray:
"""
Allows to retrieve (and affect) the busbars at which the loads is **changed**.
It behaves similarly as :attr:`BaseAction.gen_change_bus`. See the help there for more information.
"""
res = self.change_bus[self.load_pos_topo_vect]
res.flags.writeable = False
return res
@load_change_bus.setter
def load_change_bus(self, values):
if "change_bus" not in self.authorized_keys:
raise IllegalAction(
'Impossible to modify the load bus (with "change") with this action type.'
)
orig_ = self.load_change_bus
try:
self._aux_affect_object_bool(
values,
"load",
self.n_load,
self.name_load,
self.load_pos_topo_vect,
self._change_bus_vect,
)
self._modif_change_bus = True
except Exception as exc_:
self._change_bus_vect[self.load_pos_topo_vect] = orig_
raise IllegalAction(
f"Impossible to modify the load bus with your input. Please consult the documentation. "
f'The error was "{exc_}"'
)
@property
def gen_change_bus(self) -> np.ndarray:
"""
Allows to retrieve (and affect) the busbars at which the action **change** the generator units.
Returns
-------
res:
A vector of bool, of size `act.n_gen` indicating what type of action is performed for
each generator units with the convention :
* ``False`` this generator is not affected by any "change" action
* ``True`` this generator bus is not affected by any "change" action. If it was
on bus 1, it will be moved to bus 2, if it was on bus 2 it will be moved to bus 1 (
and if it was disconnected it will stay disconnected)
Examples
--------
To retrieve the impact of the action on the storage unit, you can do:
.. code-block:: python
gen_buses = act.gen_change_bus
To modify these buses you can do:
.. code-block:: python
# create an environment where i can modify everything
import numpy as np
import grid2op
from grid2op.Action import CompleteAction
env = grid2op.make("educ_case14_storage", test=True, action_class=CompleteAction)
# create an action
act = env.action_space()
# method 1 : provide the full vector
act.gen_change_bus = np.ones(act.n_gen, dtype=bool)
# method 2: provide the index of the unit you want to modify
act.gen_change_bus = 1
# method 3: provide a list of the units you want to modify
act.gen_change_bus = [1, 2]
# method 4: change the storage unit by their name with a set
act.gen_change_bus = {"gen_1_0"}
.. note:: The "rule of thumb" to modify an object using "change" method it to provide always
the ID of an object. The ID should be an integer (or a name in some cases). It does not
make any sense to provide a "value" associated to an ID: either you change it, or not.
Notes
-----
It is a "property", you don't have to use parenthesis to access it:
.. code-block:: python
# valid code
gen_buses = act.gen_change_bus
# invalid code, it will crash, do not run
gen_buses = act.gen_change_bus()
# end do not run
And neither should you uses parenthesis to modify it:
.. code-block:: python
# valid code
act.gen_change_bus = [1, 2, 3]
# invalid code, it will crash, do not run
act.gen_change_bus([1, 2, 3])
# end do not run
Property cannot be set "directly", you have to use the `act.XXX = ..` syntax. For example:
.. code-block:: python
# valid code
act.gen_change_bus = [1, 3, 4]
# invalid code, it will raise an error, and even if it did not it would have not effect
# do not run
act.gen_change_bus[1] = True
# end do not run
.. note:: Be careful not to mix "change" and "set". For "change" you only need to provide the ID of the elements
you want to change, for "set" you need to provide the ID **AND** where you want to set them.
"""
res = self.change_bus[self.gen_pos_topo_vect]
res.flags.writeable = False
return res
@gen_change_bus.setter
def gen_change_bus(self, values):
if "change_bus" not in self.authorized_keys:
raise IllegalAction(
'Impossible to modify the gen bus (with "change") with this action type.'
)
orig_ = self.gen_change_bus
try:
self._aux_affect_object_bool(
values,
"gen",
self.n_gen,
self.name_gen,
self.gen_pos_topo_vect,
self._change_bus_vect,
)
self._modif_change_bus = True
except Exception as exc_:
self._change_bus_vect[self.gen_pos_topo_vect] = orig_
raise IllegalAction(
f"Impossible to modify the gen bus with your input. Please consult the documentation. "
f'The error was:\n"{exc_}"'
)
@property
def storage_change_bus(self) -> np.ndarray:
"""
Allows to retrieve (and affect) the busbars at which the storage units are **changed**.
It behaves similarly as :attr:`BaseAction.gen_change_bus`. See the help there for more information.
"""
res = self.change_bus[self.storage_pos_topo_vect]
res.flags.writeable = False
return res
@storage_change_bus.setter
def storage_change_bus(self, values):
if "change_bus" not in self.authorized_keys:
raise IllegalAction(
'Impossible to modify the storage bus (with "change") with this action type.'
)
if "set_storage" not in self.authorized_keys:
raise IllegalAction(
"Impossible to modify the storage units with this action type."
)
orig_ = self.storage_change_bus
try:
self._aux_affect_object_bool(
values,
"storage",
self.n_storage,
self.name_storage,
self.storage_pos_topo_vect,
self._change_bus_vect,
)
self._modif_change_bus = True
except Exception as exc_:
self._change_bus_vect[self.storage_pos_topo_vect] = orig_
raise IllegalAction(
f"Impossible to modify the storage bus with your input. "
f"Please consult the documentation. "
f'The error was:\n"{exc_}"'
)
@property
def line_or_change_bus(self) -> np.ndarray:
"""
Allows to retrieve (and affect) the busbars at which the origin side of powerlines are **changed**.
It behaves similarly as :attr:`BaseAction.gen_change_bus`. See the help there for more information.
"""
res = self.change_bus[self.line_or_pos_topo_vect]
res.flags.writeable = False
return res
@line_or_change_bus.setter
def line_or_change_bus(self, values):
if "change_bus" not in self.authorized_keys:
raise IllegalAction(
'Impossible to modify the line (origin) bus (with "change") with this action type.'
)
orig_ = self.line_or_change_bus
try:
self._aux_affect_object_bool(
values,
self._line_or_str,
self.n_line,
self.name_line,
self.line_or_pos_topo_vect,
self._change_bus_vect,
)
self._modif_change_bus = True
except Exception as exc_:
self._change_bus_vect[self.line_or_pos_topo_vect] = orig_
raise IllegalAction(
f"Impossible to modify the line origin bus with your input. "
f"Please consult the documentation. "
f'The error was:\n"{exc_}"'
)
@property
def line_ex_change_bus(self) -> np.ndarray:
"""
Allows to retrieve (and affect) the busbars at which the extremity side of powerlines are **changed**.
It behaves similarly as :attr:`BaseAction.gen_change_bus`. See the help there for more information.
"""
res = self.change_bus[self.line_ex_pos_topo_vect]
res.flags.writeable = False
return res
@line_ex_change_bus.setter
def line_ex_change_bus(self, values):
if "change_bus" not in self.authorized_keys:
raise IllegalAction(
'Impossible to modify the line (ex) bus (with "change") with this action type.'
)
orig_ = self.line_ex_change_bus
try:
self._aux_affect_object_bool(
values,
self._line_ex_str,
self.n_line,
self.name_line,
self.line_ex_pos_topo_vect,
self._change_bus_vect,
)
self._modif_change_bus = True
except Exception as exc_:
self._change_bus_vect[self.line_ex_pos_topo_vect] = orig_
raise IllegalAction(
f"Impossible to modify the line extrmity bus with your input. "
f"Please consult the documentation. "
f'The error was:\n"{exc_}"'
)
@property
def line_change_status(self) -> np.ndarray:
"""
Property to set the status of the powerline.
It behave similarly than :attr:`BaseAction.gen_change_bus` but with the following convention:
* ``False`` will not affect the powerline
* ``True`` will change the status of the powerline. If it was connected, it will attempt to
disconnect it, if it was disconnected, it will attempt to reconnect it.
"""
res = copy.deepcopy(self._switch_line_status)
res.flags.writeable = False
return res
@line_change_status.setter
def line_change_status(self, values):
if "change_line_status" not in self.authorized_keys:
raise IllegalAction(
'Impossible to modify the status of powerlines (with "change") with this action type.'
)
orig_ = 1 * self._switch_line_status
try:
self._aux_affect_object_bool(
values,
"line status",
self.n_line,
self.name_line,
np.arange(self.n_line),
self._switch_line_status,
)
self._modif_change_status = True
except Exception as exc_:
self._switch_line_status[:] = orig_
raise IllegalAction(
f"Impossible to modify the line status with your input. "
f"Please consult the documentation. "
f'The error was:\n"{exc_}"'
)
@property
def raise_alarm(self) -> np.ndarray:
"""
.. warning::
/!\\\\ Only valid with "l2rpn_icaps_2021" environment /!\\\\
Property to raise alarm.
If you set it to ``True`` an alarm is raised for the given area, otherwise None are raised.
Notes
-----
In order to be able to "cancel" an alarm properly, if you set "two consecutive alarm" on the same area
it will behave as if you had set none:
.. code-block:: python
import grid2op
env_name = "l2rpn_icaps_2021" # chose an environment that supports the alarm feature
env = grid2op.make(env_name)
act = env.action_space()
act.raise_alarm = [0]
# this act will raise an alarm on the area 0
act.raise_alarm = [0]
# this second call will "cancel" the alarm for convenience
This might be counter intuitive
"""
res = copy.deepcopy(self._raise_alarm)
res.flags.writeable = False
return res
@raise_alarm.setter
def raise_alarm(self, values):
"""
.. warning::
/!\\\\ Only valid with "l2rpn_icaps_2021" environment /!\\\\
"""
if "raise_alarm" not in self.authorized_keys:
raise IllegalAction("Impossible to send alarms with this action type.")
orig_ = copy.deepcopy(self._raise_alarm)
try:
self._aux_affect_object_bool(
values,
"raise alarm",
self.dim_alarms,
self.alarms_area_names,
np.arange(self.dim_alarms),
self._raise_alarm,
)
self._modif_alarm = True
except Exception as exc_:
self._raise_alarm[:] = orig_
raise IllegalAction(
f"Impossible to modify the alarm with your input. "
f"Please consult the documentation. "
f'The error was:\n"{exc_}"'
)
@property
def raise_alert(self) -> np.ndarray:
"""
Property to raise alert.
If you set it to ``True`` an alert is raised for the given line, otherwise no alert is raised.
Notes
-----
.. code-block:: python
import grid2op
env_name = "l2rpn_idf_2023" # chose an environment that supports the alert feature
env = grid2op.make(env_name)
act = env.action_space()
act.raise_alert = [0]
# this act will raise an alert on the powerline attackable 0 (powerline concerned will be action.alertable_line_ids[0])
"""
res = copy.deepcopy(self._raise_alert)
res.flags.writeable = False
return res
@raise_alert.setter
def raise_alert(self, values):
if "raise_alert" not in self.authorized_keys:
raise IllegalAction("Impossible to send alerts with this action type.")
orig_ = copy.deepcopy(self._raise_alert)
try:
self._aux_affect_object_bool(
values,
"raise alert",
self.dim_alerts,
self.alertable_line_names,
np.arange(self.dim_alerts),
self._raise_alert,
)
self._modif_alert = True
except Exception as exc_:
self._raise_alert[:] = orig_
raise IllegalAction(
f"Impossible to modify the alert with your input. "
f"Please consult the documentation. "
f'The error was:\n"{exc_}"'
)
def _aux_affect_object_float(
self,
values,
name_el,
nb_els,
name_els,
inner_vect,
outer_vect,
):
"""
INTERNAL USE ONLY
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
NB : this do not set the _modif_set_bus attribute. It is expected to be set in the property setter.
This is not set here, because it's recursive and if it fails at a point, it would be set for nothing
values: the new values to set
name_el: "load"
nb_els: self.n_load
inner_vect: self.load_pos_topo_vect
name_els: self.name_load
outer_vect: self._set_topo_vect
will modify outer_vect[inner_vect]
"""
if isinstance(values, (bool, dt_bool)):
raise IllegalAction(
f"Impossible to set {name_el} values with a single boolean."
)
elif isinstance(values, (int, dt_int, np.int64)):
raise IllegalAction(
f"Impossible to set {name_el} values with a single integer."
)
elif isinstance(values, (float, dt_float, np.float64)):
raise IllegalAction(
f"Impossible to set {name_el} values with a single float."
)
elif isinstance(values, tuple):
# i provide a tuple: load_id, new_vals
if len(values) != 2:
raise IllegalAction(
f"when set with tuple, this tuple should have size 2 and be: {name_el}_id, new_bus "
f"eg. (3, 0.0)"
)
el_id, new_val = values
if isinstance(new_val, (bool, dt_bool)):
raise IllegalAction(
f"new_val should be a float. A boolean was provided"
)
try:
new_val = float(new_val)
except Exception as exc_:
raise IllegalAction(
f'new_val should be convertible to a float. Error was : "{exc_}"'
)
if isinstance(el_id, (float, dt_float, np.float64)):
raise IllegalAction(
f"{name_el}_id should be integers you provided float!"
)
if isinstance(el_id, (bool, dt_bool)):
raise IllegalAction(
f"{name_el}_id should be integers you provided bool!"
)
if isinstance(el_id, str):
raise IllegalAction(
f"{name_el}_id should be integers you provided string "
f"(hint: you can use a dictionary to set the bus by name eg. "
f"act.{name_el}_set_bus = {{act.name_{name_el}[0] : 1, act.name_{name_el}[1] : "
f"0.0}} )!"
)
try:
el_id = int(el_id)
except Exception as exc_:
raise IllegalAction(
f'{name_el}_id should be convertible to integer. Error was : "{exc_}"'
)
if el_id < 0:
raise IllegalAction(
f"Impossible to set the bus of a {name_el} with negative id"
)
if el_id >= nb_els:
raise IllegalAction(
f"Impossible to set a {name_el} id {el_id} because there are only "
f"{nb_els} on the grid (and in python id starts at 0)"
)
if np.isfinite(new_val):
outer_vect[inner_vect[el_id]] = new_val
return
elif isinstance(values, np.ndarray):
if (
isinstance(values.dtype, int)
or values.dtype == dt_int
or values.dtype == np.int64
):
# for this the user explicitly casted it as integer, this won't work.
raise IllegalAction(f"{name_el}_id should be floats you provided int!")
if isinstance(values.dtype, bool) or values.dtype == dt_bool:
raise IllegalAction(
f"{name_el}_id should be floats you provided boolean!"
)
try:
values = values.astype(dt_float)
except Exception as exc_:
raise IllegalAction(
f'{name_el}_id should be convertible to float. Error was : "{exc_}"'
)
indx_ok = np.isfinite(values)
outer_vect[inner_vect[indx_ok]] = values[indx_ok]
return
elif isinstance(values, list):
# 2 cases: list of tuple, or list (convertible to numpy array)
if len(values) == nb_els:
# 2 cases: either i set all loads in the form [(0,..), (1,..), (2,...)]
# or i should have converted the list to np array
if isinstance(values, (bool, dt_bool)):
raise IllegalAction(
f"Impossible to set {name_el} values with a single boolean."
)
elif isinstance(values, (int, dt_int, np.int64)):
raise IllegalAction(
f"Impossible to set {name_el} values with a single integer."
)
elif isinstance(values, (float, dt_float, np.float64)):
raise IllegalAction(
f"Impossible to set {name_el} values with a single float."
)
elif isinstance(values[0], tuple):
# list of tuple, handled below
# TODO can be somewhat "hacked" if the type of the object on the list is not always the same
pass
else:
# get back to case where it's a full vector
values = np.array(values)
self._aux_affect_object_float(
values,
name_el,
nb_els,
name_els,
inner_vect=inner_vect,
outer_vect=outer_vect,
)
return
# expected list of tuple, each tuple is a pair with load_id, new_vals: example: [(0, -1.0), (2,2.7)]
for el in values:
if len(el) != 2:
raise IllegalAction(
f"If input is a list, it should be a list of pair (el_id, new_val) "
f"eg. [(0, 1.0), (2, 2.7)]"
)
el_id, new_val = el
if isinstance(el_id, str):
tmp = np.where(name_els == el_id)[0]
if len(tmp) == 0:
raise IllegalAction(f"No known {name_el} with name {el_id}")
el_id = tmp[0]
self._aux_affect_object_float(
(el_id, new_val),
name_el,
nb_els,
name_els,
inner_vect=inner_vect,
outer_vect=outer_vect,
)
elif isinstance(values, dict):
# 2 cases: either key = load_id and value = new_bus or key = load_name and value = new bus
for key, new_val in values.items():
if isinstance(key, str):
tmp = np.where(name_els == key)[0]
if len(tmp) == 0:
raise IllegalAction(f"No known {name_el} with name {key}")
key = tmp[0]
self._aux_affect_object_float(
(key, new_val),
name_el,
nb_els,
name_els,
inner_vect=inner_vect,
outer_vect=outer_vect,
)
else:
raise IllegalAction(
f"Impossible to modify the {name_el} with inputs {values}. "
f"Please see the documentation."
)
@property
def redispatch(self) -> np.ndarray:
"""
Allows to retrieve (and affect) the redispatching setpoint of the generators.
Returns
-------
res:
A vector of integer, of size `act.n_gen` indicating what type of action is performed for
each generator units. Note that these are the setpoint. The actual redispatching that will
be available might be different. See :ref:`generator-mod-el` for more information.
Examples
--------
To retrieve the impact of the action on the generator unit, you can do:
.. code-block:: python
redisp = act.redispatch
For each generator it will give the amount of redispatch this action wants to perform.
To change the setpoint of the redispatching, you can do:
.. code-block:: python
# create an environment where i can modify everything
import numpy as np
import grid2op
from grid2op.Action import CompleteAction
env = grid2op.make("educ_case14_storage", test=True, action_class=CompleteAction)
# create an action
act = env.action_space()
# method 1 : provide the full vector
act.redispatch = np.ones(act.n_gen, dtype=float) # only floats are accepted !
# method 2: provide the index of the unit you want to modify
act.redispatch = (1, 2.5)
# method 3: provide a list of the units you want to modify
act.redispatch = [(1, 2.5), (0, -1.3)]
# method 4: change the generators by their name with a dictionary
act.redispatch = {"gen_1_0": 2.0}
.. note:: The "rule of thumb" to perform redispatching is to provide always
the ID of an object AND its value. The ID should be an integer (or a name in some cases)
and the value a float representing what amount of redispatching you want to perform on the
unit with the associated ID.
Notes
-----
It is a "property", you don't have to use parenthesis to access it:
.. code-block:: python
# valid code
redisp = act.redispatch
# invalid code, it will crash, do not run
redisp = act.redispatch()
# end do not run
And neither should you uses parenthesis to modify it:
.. code-block:: python
# valid code
act.redispatch = [(1, 2.5), (0, -1.3)]
# invalid code, it will crash, do not run
act.redispatch([(1, 2.5), (0, -1.3)])
# end do not run
Property cannot be set "directly", you have to use the `act.XXX = ..` syntax. For example:
.. code-block:: python
# valid code
act.redispatch = [(1, 2.5), (0, -1.3)]
# invalid code, it will raise an error, and even if it did not it would have not effect
# do not run
act.redispatch[1] = 2.5
# end do not run
.. note:: Be careful not to mix action to set something on a bus bar (where the values are integer,
like "set_bus" or "set_status")
and continuous action (where the values are float, like "redispatch" or "storage_p")
"""
res = 1.0 * self._redispatch
res.flags.writeable = False
return res
@redispatch.setter
def redispatch(self, values):
if "redispatch" not in self.authorized_keys:
raise IllegalAction(
"Impossible to perform redispatching with this action type."
)
orig_ = self.redispatch
try:
self._aux_affect_object_float(
values,
"redispatching",
self.n_gen,
self.name_gen,
np.arange(self.n_gen),
self._redispatch,
)
self._modif_redispatch = True
except Exception as exc_:
self._redispatch[:] = orig_
raise IllegalAction(
f"Impossible to modify the redispatching with your input. "
f"Please consult the documentation. "
f'The error was:\n"{exc_}"'
)
@property
def storage_p(self) -> np.ndarray:
"""
Allows to modify the setpoint of the storage units.
It behaves similarly as :attr:`BaseAction.redispatch`. See the help there for more information.
Notes
------
The "load convention" is used for storage units. This means that:
- if you ask a positive value, the storage unit will charge, power will be "taken" from the
grid to the unit. The unit in this case will behave like a *load*
- if you ask a negative value, the storage unit will discharge, power will be injected from
the unit to the grid. The unit, in this case, will behave like a *generator*.
For more information, feel free to consult the documentation :ref:`storage-mod-el` where more
details are given about the modeling ot these storage units.
"""
res = 1.0 * self._storage_power
res.flags.writeable = False
return res
@storage_p.setter
def storage_p(self, values):
if "set_storage" not in self.authorized_keys:
raise IllegalAction(
"Impossible to perform storage action with this action type."
)
if self.n_storage == 0:
raise IllegalAction(
"Impossible to perform storage action with this grid (no storage unit"
"available)"
)
orig_ = self.storage_p
try:
self._aux_affect_object_float(
values,
"storage",
self.n_storage,
self.name_storage,
np.arange(self.n_storage),
self._storage_power,
)
self._modif_storage = True
except Exception as exc_:
self._storage_power[:] = orig_
raise IllegalAction(
f"Impossible to modify the storage active power with your input. "
f"Please consult the documentation. "
f'The error was:\n"{exc_}"'
)
@property
def set_storage(self) -> np.ndarray:
"""Another name for the property :func:`BaseAction.storage_p`"""
return self.storage_p
@set_storage.setter
def set_storage(self, values):
self.storage_p = values
@property
def curtail(self) -> np.ndarray:
"""
Allows to perfom some curtailment on some generators
It behaves similarly as :attr:`BaseAction.redispatch`. See the help there for more information.
For more information, feel free to consult the documentation :ref:`generator-mod-el` where more
details are given about the modeling ot these storage units.
"""
res = 1.0 * self._curtail
res.flags.writeable = False
return res
@curtail.setter
def curtail(self, values):
if "curtail" not in self.authorized_keys:
raise IllegalAction(
"Impossible to perform curtailment action with this action type."
)
if not self.redispatching_unit_commitment_availble:
raise IllegalAction(
"Impossible to perform curtailment as it is not possible to compute redispatching. "
'Your backend do not support "redispatching_unit_commitment_availble"'
)
orig_ = self.curtail
try:
self._aux_affect_object_float(
values,
"curtailment",
self.n_gen,
self.name_gen,
np.arange(self.n_gen),
self._curtail,
)
self._modif_curtailment = True
except Exception as exc_:
self._curtail[:] = orig_
raise IllegalAction(
f"Impossible to perform curtailment with your input. "
f"Please consult the documentation. "
f'The error was:\n"{exc_}"'
)
def _aux_aux_convert_and_check_np_array(self, array_):
try:
array_ = np.array(array_)
except Exception as exc_:
raise IllegalAction(
f"When setting the topology by substation and by giving a tuple, the "
f"second element of the tuple should be convertible to a numpy "
f'array of type int. Error was: "{exc_}"'
)
if (
isinstance(array_.dtype, (bool, dt_bool))
or array_.dtype == dt_bool
or array_.dtype == bool
):
raise IllegalAction(
"To set substation topology, you need a vector of integers, and not a vector "
"of bool."
)
elif (
isinstance(array_.dtype, (float, dt_float))
or array_.dtype == dt_float
or array_.dtype == float
):
raise IllegalAction(
"To set substation topology, you need a vector of integers, and not a vector "
"of float."
)
array_ = array_.astype(dt_int)
if np.any(array_ < -1):
raise IllegalAction(
f"Impossible to set element to bus {np.min(array_)}. Buses must be "
f"-1, 0, 1 or 2."
)
if np.any(array_ > 2):
raise IllegalAction(
f"Impossible to set element to bus {np.max(array_)}. Buses must be "
f"-1, 0, 1 or 2."
)
return array_
def _aux_set_bus_sub(self, values):
if isinstance(values, (bool, dt_bool)):
raise IllegalAction(
"Impossible to modify bus by substation with a single bool."
)
elif isinstance(values, (int, dt_int, np.int64)):
raise IllegalAction(
"Impossible to modify bus by substation with a single integer."
)
elif isinstance(values, (float, dt_float, np.float64)):
raise IllegalAction(
"Impossible to modify bus by substation with a single float."
)
elif isinstance(values, np.ndarray):
# full topo vect
if values.shape[0] != self.dim_topo:
raise IllegalAction(
"Impossible to modify bus when providing a full topology vector "
"that has not the right "
)
if values.dtype == dt_bool or values.dtype == bool:
raise IllegalAction(
"When using a full vector for setting the topology, it should be "
"of integer types"
)
values = self._aux_aux_convert_and_check_np_array(values)
self._set_topo_vect[:] = values
elif isinstance(values, tuple):
# should be a tuple (sub_id, new_topo)
sub_id, topo_repr, nb_el = self._check_for_right_vectors_sub(values)
topo_repr = self._aux_aux_convert_and_check_np_array(topo_repr)
start_ = np.sum(self.sub_info[:sub_id])
end_ = start_ + nb_el
self._set_topo_vect[start_:end_] = topo_repr
elif isinstance(values, list):
if len(values) == self.dim_topo:
# if list is the size of the full topo vect, it's a list representing it
values = self._aux_aux_convert_and_check_np_array(values)
self._aux_set_bus_sub(values)
return
# otherwise it should be a list of tuples: [(sub_id, topo), (sub_id, topo)]
for el in values:
if not isinstance(el, tuple):
raise IllegalAction(
"When provided a list, it should be a list of tuples: "
"[(sub_id, topo), (sub_id, topo), ... ] "
)
self._aux_set_bus_sub(el)
elif isinstance(values, dict):
for sub_id, topo_repr in values.items():
sub_id = self._aux_sub_when_dict_get_id(sub_id)
self._aux_set_bus_sub((sub_id, topo_repr))
else:
raise IllegalAction(
"Impossible to set the topology by substation with your input."
"Please consult the documentation."
)
@property
def sub_set_bus(self) -> np.ndarray:
# TODO doc
res = 1 * self.set_bus
res.flags.writeable = False
return res
@sub_set_bus.setter
def sub_set_bus(self, values):
if "set_bus" not in self.authorized_keys:
raise IllegalAction(
'Impossible to modify the substation bus (with "set") with this action type.'
)
orig_ = self.sub_set_bus
try:
self._aux_set_bus_sub(values)
self._modif_set_bus = True
except Exception as exc_:
self._set_topo_vect[:] = orig_
raise IllegalAction(
f"Impossible to modify the substation bus with your input. "
f"Please consult the documentation. "
f'The error was:\n"{exc_}"'
)
def _aux_aux_convert_and_check_np_array_change(self, array_):
try:
array_ = np.array(array_)
except Exception as exc_:
raise IllegalAction(
f"When setting the topology by substation and by giving a tuple, the "
f"second element of the tuple should be convertible to a numpy "
f'array of type int. Error was: "{exc_}"'
)
if (
isinstance(array_.dtype, (int, dt_int))
or array_.dtype == dt_int
or array_.dtype == int
):
raise IllegalAction(
"To change substation topology, you need a vector of bools, and not a vector "
"of int."
)
elif (
isinstance(array_.dtype, (float, dt_float))
or array_.dtype == dt_float
or array_.dtype == float
):
raise IllegalAction(
"To change substation topology, you need a vector of bools, and not a vector "
"of float."
)
array_ = array_.astype(dt_bool)
return array_
def _check_for_right_vectors_sub(self, values):
if len(values) != 2:
raise IllegalAction(
"Impossible to set the topology of a substation with a tuple which "
"has not a size of 2 (substation_id, topology_representation)"
)
sub_id, topo_repr = values
if isinstance(sub_id, (bool, dt_bool)):
raise IllegalAction("Substation id should be integer")
if isinstance(sub_id, (float, dt_float, np.float64)):
raise IllegalAction("Substation id should be integer")
try:
el_id = int(sub_id)
except Exception as exc_:
raise IllegalAction(
f"Substation id should be convertible to integer. "
f'Error was "{exc_}"'
)
try:
size_ = len(topo_repr)
except Exception as exc_:
raise IllegalAction(
f"Topology cannot be set with your input." f'Error was "{exc_}"'
)
nb_el = self.sub_info[el_id]
if size_ != nb_el:
raise IllegalAction(
f"To set topology of a substation, you must provide the full list of the "
f"elements you want to modify. You provided a vector with {size_} components "
f"while there are {self.sub_info[el_id]} on the substation."
)
return sub_id, topo_repr, nb_el
def _aux_change_bus_sub(self, values):
if isinstance(values, (bool, dt_bool)):
raise IllegalAction(
"Impossible to modify bus by substation with a single bool."
)
elif isinstance(values, (int, dt_int, np.int64)):
raise IllegalAction(
"Impossible to modify bus by substation with a single integer."
)
elif isinstance(values, (float, dt_float, np.float64)):
raise IllegalAction(
"Impossible to modify bus by substation with a single float."
)
elif isinstance(values, np.ndarray):
# full topo vect
if values.shape[0] != self.dim_topo:
raise IllegalAction(
"Impossible to modify bus when providing a full topology vector "
"that has not the right size."
)
if values.dtype == dt_int or values.dtype == int:
raise IllegalAction(
"When using a full vector for setting the topology, it should be "
"of bool types"
)
values = self._aux_aux_convert_and_check_np_array_change(values)
self._change_bus_vect[:] = values
elif isinstance(values, tuple):
# should be a tuple (sub_id, new_topo)
sub_id, topo_repr, nb_el = self._check_for_right_vectors_sub(values)
topo_repr = self._aux_aux_convert_and_check_np_array_change(topo_repr)
start_ = np.sum(self.sub_info[:sub_id])
end_ = start_ + nb_el
self._change_bus_vect[start_:end_] = topo_repr
elif isinstance(values, list):
if len(values) == self.dim_topo:
# if list is the size of the full topo vect, it's a list representing it
values = self._aux_aux_convert_and_check_np_array_change(values)
self._aux_change_bus_sub(values)
return
# otherwise it should be a list of tuples: [(sub_id, topo), (sub_id, topo)]
for el in values:
if not isinstance(el, tuple):
raise IllegalAction(
"When provided a list, it should be a list of tuples: "
"[(sub_id, topo), (sub_id, topo), ... ] "
)
self._aux_change_bus_sub(el)
elif isinstance(values, dict):
for sub_id, topo_repr in values.items():
sub_id = self._aux_sub_when_dict_get_id(sub_id)
self._aux_change_bus_sub((sub_id, topo_repr))
else:
raise IllegalAction(
"Impossible to set the topology by substation with your input."
"Please consult the documentation."
)
def _aux_sub_when_dict_get_id(self, sub_id):
if isinstance(sub_id, str):
tmp = np.where(self.name_sub == sub_id)[0]
if len(tmp) == 0:
raise IllegalAction(f"No substation named {sub_id}")
sub_id = tmp[0]
elif not isinstance(sub_id, int):
raise IllegalAction(
f"When using a dictionary it should be either with key = name of the "
f"substation or key = id of the substation. You provided neither string nor"
f"int but {type(sub_id)}."
)
return sub_id
@property
def sub_change_bus(self) -> np.ndarray:
res = copy.deepcopy(self.change_bus)
res.flags.writeable = False
return res
@sub_change_bus.setter
def sub_change_bus(self, values):
if "change_bus" not in self.authorized_keys:
raise IllegalAction(
'Impossible to modify the substation bus (with "change") with this action type.'
)
orig_ = self.sub_change_bus
try:
self._aux_change_bus_sub(values)
self._modif_change_bus = True
except Exception as exc_:
self._change_bus_vect[:] = orig_
raise IllegalAction(
f"Impossible to modify the substation bus with your input. "
f"Please consult the documentation. "
f'The error was:\n"{exc_}"'
)
def curtailment_mw_to_ratio(self, curtailment_mw) -> np.ndarray:
"""
Transform a "curtailment" given as maximum MW to the grid2op formalism (in ratio of gen_pmax)
Parameters
----------
curtailment_mw:
Same type of inputs you can use in `act.curtail = ...`
Returns
-------
A proper input to `act.curtail` with the converted input expressed in ratio of gen_pmax
Examples
--------
If you want to limit the production of generator 1 (suppose its renewable) at 1.5MW
then you can do:
.. code-block:: python
gen_id = 1
amount_max = 1.5
act.curtail = act.curtailment_mw_to_ratio([(gen_id, amount_max)])
"""
values = self._curtail * self.gen_pmax
self._aux_affect_object_float(
curtailment_mw,
"curtailment",
self.n_gen,
self.name_gen,
np.arange(self.n_gen),
values,
)
values /= self.gen_pmax
values[values >= 1.0] = 1.0
values[values < 0.0] = -1.0
return values
@property
def curtail_mw(self) -> np.ndarray:
"""
Allows to perfom some curtailment on some generators in MW (by default in grid2Op it should be expressed
in ratio of gen_pmax)
It behaves similarly as :attr:`BaseAction.redispatch`. See the help there for more information.
For more information, feel free to consult the documentation :ref:`generator-mod-el` where more
details are given about the modeling ot these storage units.
.. warnings:
We remind that "curtailment" will limit the number of MW produce by renewable energy sources. The agent
is asked to provide the limit it wants and not the amount of MW it wants the generator to be cut off.
For example, if a generator with a Pmax of 100 produces 55MW and you ask to "curtail_mw = 15" for this generator,
its production will be limited to 15 MW (then droping from 55MW to 15MW) so loosing 40MW (and not 15 !)
"""
res = 1.0 * self._curtail * self.gen_pmax
res[res < 0.0] = -1.0
res.flags.writeable = False
return res
@curtail_mw.setter
def curtail_mw(self, values_mw):
self.curtail = self.curtailment_mw_to_ratio(values_mw)
def limit_curtail_storage(self,
obs: "BaseObservation",
margin: float=10.,
do_copy: bool=False,
_tol_equal : float=0.01) -> Tuple["BaseAction", np.ndarray, np.ndarray]:
"""
This function tries to limit the possibility to end up
with a "game over" because actions on curtailment or storage units (see the "Notes" section
for more information).
It will modify the action (unless `do_copy` is `True`) from a given observation `obs`.
It limits the curtailment / storage unit to ensure that the
amount of MW curtailed / taken to-from the storage units
are within `-sum(obs.gen_margin_down)` and `sum(obs.gen_margin_up)`
The `margin` parameter is here to allows to "take into account" the uncertainties. Indeed, if you
limit only to `-sum(obs.gen_margin_down)` and `sum(obs.gen_margin_up)`, because you don't know
how much the production will vary (due to loads, or intrisinc variability of
renewable energy sources). The higher `margin` the less likely you will end up with
a "game over" but the more your action will possibly be affected. The lower
this parameter, the more likely you will end up with a game over but the less
your action will be impacted. It represents a certain amount of `MW`.
Notes
-------
At each time, the environment ensures that the following equations are met:
1) for each controlable generators $p^{(c)}_{min} <= p^{(c)}_t <= p^{(c)}_{max}$
2) for each controlable generators $-ramp_{min}^{(c)} <= p^{(c)}_t - p^{(c)}_{t-1} <= ramp_{max}^{(c)}$
3) at each step the sum of MW curtailed and the total contribution of storage units
is absorbed by the controlable generators so that the total amount of power injected
at this step does not change:
$\sum_{\text{all generators } g} p^{(g, scenario)}_t = \sum_{\text{controlable generators } c} p^{(c)}_t + \sum_{\text{storage unit } s} p^{s}_t + \sum_{\text{renewable generator} r} p^{(r)}_t$
where $p^{(g)}_t$ denotes the productions of generator $g$ in the input data "scenario"
(*ie* "in the current episode", "before any modification", "decided by the market / central authority").
In the above equations, `\sum_{\text{storage unit } s} p^{s}_t` are controled by the action (thanks to the storage units)
and `\sum_{\text{renewable generator} r} p^{(r)}_t` are controlled by the curtailment.
`\sum_{\text{all generators } g} p^{(g, scenario)}_t` are input data from the environment (that cannot be modify).
The exact value of each `p^{(c)}_t` (for each controlable generator) is computed by an internal routine of the
environment.
The constraint comes from the fact that `\sum_{\text{controlable generators } c} p^{(c)}_t` is determined by the last equation
above but at the same time the values of each `p^{(c)}_t` (for each controllable generator) is heavily constrained
by equations 1) and 2).
.. note::
This argument and the :func:`grid2op.Parameters.Parameters.LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION` have the same objective:
prevent an agent to do some curtailment too strong for the grid.
When using :func:`grid2op.Parameters.Parameters.LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION`,
the environment will do it knowing exactly what will happen next (its a bit "cheating") and limit
exactly the action to exactly right amount.
Using :func:`grid2op.Aciton.BaseAction.limit_curtail_storage` is always feasible, but less precise
and subject to uncertainties.
.. warning::
If the action has no effect (for example you give a limit of the curtailment above the
actual production of renewable generators) then regardless of the "margin" parameter
your action will be declared "legal" which may cause unfeasibility in the future.
Parameters
----------
obs : ``Observation``
The current observation. The main attributes used for the observation are
`obs.gen_margin_down` and `obs.gen_margin_up`.
margin : ``float``, optional
The "margin" taken from the controlable generators "margin" to
"take into account" when limiting the action
(see description for more information), by default 10.
do_copy : ``bool``, optional
Whether to make a copy of the current action (if set to ``True``) or to modify the
action "in-place" (default, when ``False``)
Returns
-------
`Action`, np.ndarray, np.ndarray:
- `act`: the action after the storage unit / curtailment are modified (by default it's also `self`)
- `res_add_curtailed`: the modification made to the curtailment
- `res_add_storage`: the modification made to the storage units
"""
cls = type(self)
if do_copy:
res = copy.deepcopy(self)
else:
res = self
res_add_storage = np.zeros(cls.n_storage, dtype=dt_float)
res_add_curtailed = np.zeros(cls.n_gen, dtype=dt_float)
max_down = np.sum(obs.gen_margin_down)
max_up = np.sum(obs.gen_margin_up)
# storage
total_mw_storage = np.sum(res._storage_power)
total_storage_consumed = np.sum(res._storage_power)
# curtailment
gen_curtailed = (res._curtail != -1) & cls.gen_renewable
gen_curtailed &= ( (obs.gen_p > res._curtail * cls.gen_pmax) | (obs.gen_p_before_curtail > obs.gen_p ))
gen_p_after_max = (res._curtail * cls.gen_pmax)[gen_curtailed]
# I might have a problem because curtailment decreases too rapidly (ie i set a limit too low)
prod_after_down = np.minimum(gen_p_after_max, obs.gen_p[gen_curtailed])
# I might have a problem because curtailment increase too rapidly (limit was low and I set it too high too
# rapidly)
prod_after_up = np.minimum(gen_p_after_max, obs.gen_p_before_curtail[gen_curtailed])
gen_p_after = np.maximum(prod_after_down, prod_after_up)
mw_curtailed = obs.gen_p[gen_curtailed] - gen_p_after
mw_curtailed_down = 1.0 * mw_curtailed
mw_curtailed_down[mw_curtailed_down < 0.] = 0.
mw_curtailed_up = -1.0 * mw_curtailed
mw_curtailed_up[mw_curtailed_up < 0.] = 0.
total_mw_curtailed_down = np.sum(mw_curtailed_down)
total_mw_curtailed_up = np.sum(mw_curtailed_up)
total_mw_curtailed = total_mw_curtailed_down - total_mw_curtailed_up
total_mw_act = total_mw_curtailed + total_mw_storage
if (total_mw_act > 0) and (total_mw_act > max_up - margin):
# controlable generators should be asked to increase their production too much, I need to limit
# the storage unit (consume too much) or the curtailment (curtailment too strong)
if max_up < margin + _tol_equal:
# not enough ramp up anyway so I don't do anything
res_add_storage[:] = -res._storage_power
res_add_curtailed[gen_curtailed] = obs.gen_p[gen_curtailed] / obs.gen_pmax[gen_curtailed] - res._curtail[gen_curtailed]
res._storage_power[:] = 0. # don't act on storage
res._curtail[gen_curtailed] = -1 # reset curtailment
else:
remove_mw = total_mw_act - (max_up - margin)
# fix curtailment
if total_mw_curtailed_down > 0.:
remove_curtail_mw = remove_mw * total_mw_curtailed_down / (total_mw_curtailed_down + total_mw_storage)
tmp_ = mw_curtailed_down / total_mw_curtailed_down * remove_curtail_mw / cls.gen_pmax[gen_curtailed]
res_add_curtailed[gen_curtailed] = tmp_
res._curtail[gen_curtailed] += tmp_
# fix storage
if total_storage_consumed > 0.:
# only consider storage units that consume something (do not attempt to modify the others)
do_storage_consum = res._storage_power > 0.
remove_storage_mw = remove_mw * total_mw_storage / (total_mw_curtailed_down + total_mw_storage)
tmp_ = -(res._storage_power[do_storage_consum] *
remove_storage_mw / np.sum(res._storage_power[do_storage_consum]))
res._storage_power[do_storage_consum] += tmp_
res_add_storage[do_storage_consum] = tmp_
elif (total_mw_act < 0) and (total_mw_act < -max_down + margin):
# controlable generators should be asked to decrease their production too much, I need to limit
# the storage unit (produce too much) or the curtailment (curtailment too little)
if max_down < margin + _tol_equal:
# not enough ramp down anyway so I don't do anything
res_add_storage[:] = -res._storage_power
res_add_curtailed[gen_curtailed] = obs.gen_p[gen_curtailed] / obs.gen_pmax[gen_curtailed] - res._curtail[gen_curtailed]
res._storage_power[:] = 0. # don't act on storage
res._curtail[gen_curtailed] = -1 # reset curtailment
else:
add_mw = -(total_mw_act + (max_down - margin))
# fix curtailment => does not work at all !
if total_mw_curtailed_up > 0.:
add_curtail_mw = add_mw * total_mw_curtailed_up / (total_mw_curtailed_up + total_mw_storage)
tmp_ = (obs.gen_p_before_curtail[gen_curtailed] * res._curtail[gen_curtailed] - mw_curtailed_up / total_mw_curtailed_up * add_curtail_mw )/ cls.gen_pmax[gen_curtailed]
res_add_curtailed[gen_curtailed] = tmp_ - res._curtail[gen_curtailed]
res._curtail[gen_curtailed] = tmp_
# fix storage
if total_storage_consumed < 0.:
# only consider storage units that consume something (do not attempt to modify the others)
do_storage_prod = res._storage_power < 0.
remove_storage_mw = add_mw * total_mw_storage / (total_mw_curtailed_up + total_mw_storage)
tmp_ = (res._storage_power[do_storage_prod] *
remove_storage_mw / np.sum(res._storage_power[do_storage_prod]))
res._storage_power[do_storage_prod] += tmp_
res_add_storage[do_storage_prod] = tmp_
return res, res_add_curtailed, res_add_storage
def _aux_decompose_as_unary_actions_change(self, cls, group_topo, res):
if group_topo:
tmp = cls()
tmp._modif_change_bus = True
tmp._change_bus_vect = copy.deepcopy(self._change_bus_vect)
res["change_bus"] = [tmp]
else:
subs_changed = cls.grid_objects_types[self._change_bus_vect, cls.SUB_COL]
subs_changed = np.unique(subs_changed)
res["change_bus"] = []
for sub_id in subs_changed:
tmp = cls()
tmp._modif_change_bus = True
mask_sub = cls.grid_objects_types[:, cls.SUB_COL] == sub_id
tmp._change_bus_vect[mask_sub] = self._change_bus_vect[mask_sub]
res["change_bus"].append(tmp)
def _aux_decompose_as_unary_actions_change_ls(self, cls, group_line_status, res):
if group_line_status:
tmp = cls()
tmp._modif_change_status = True
tmp._switch_line_status = copy.deepcopy(self._switch_line_status)
res["change_line_status"] = [tmp]
else:
lines_changed = np.where(self._switch_line_status)[0]
res["change_line_status"] = []
for l_id in lines_changed:
tmp = cls()
tmp._modif_change_status = True
tmp._switch_line_status[l_id] = True
res["change_line_status"].append(tmp)
def _aux_decompose_as_unary_actions_set(self, cls, group_topo, res):
if group_topo:
tmp = cls()
tmp._modif_set_bus = True
tmp._set_topo_vect = 1 * self._set_topo_vect
res["set_bus"] = [tmp]
else:
subs_changed = cls.grid_objects_types[self._set_topo_vect != 0, cls.SUB_COL]
subs_changed = np.unique(subs_changed)
res["set_bus"] = []
for sub_id in subs_changed:
tmp = cls()
tmp._modif_set_bus = True
mask_sub = cls.grid_objects_types[:, cls.SUB_COL] == sub_id
tmp._set_topo_vect[mask_sub] = self._set_topo_vect[mask_sub]
res["set_bus"].append(tmp)
def _aux_decompose_as_unary_actions_set_ls(self, cls, group_line_status, res):
if group_line_status:
tmp = cls()
tmp._modif_set_status = True
tmp._set_line_status = 1 * self._set_line_status
res["set_line_status"] = [tmp]
else:
lines_changed = np.where(self._set_line_status != 0)[0]
res["set_line_status"] = []
for l_id in lines_changed:
tmp = cls()
tmp._modif_set_status = True
tmp._set_line_status[l_id] = self._set_line_status[l_id]
res["set_line_status"].append(tmp)
def _aux_decompose_as_unary_actions_redisp(self, cls, group_redispatch, res):
if group_redispatch:
tmp = cls()
tmp._modif_redispatch = True
tmp._redispatch = 1. * self._redispatch
res["redispatch"] = [tmp]
else:
gen_changed = np.where(self._redispatch != 0.)[0]
res["redispatch"] = []
for g_id in gen_changed:
tmp = cls()
tmp._modif_redispatch = True
tmp._redispatch[g_id] = self._redispatch[g_id]
res["redispatch"].append(tmp)
def _aux_decompose_as_unary_actions_storage(self, cls, group_storage, res):
if group_storage:
tmp = cls()
tmp._modif_storage = True
tmp._storage_power = 1. * self._storage_power
res["set_storage"] = [tmp]
else:
sto_changed = np.where(self._storage_power != 0.)[0]
res["set_storage"] = []
for s_id in sto_changed:
tmp = cls()
tmp._modif_storage = True
tmp._storage_power[s_id] = self._storage_power[s_id]
res["set_storage"].append(tmp)
def _aux_decompose_as_unary_actions_curtail(self, cls, group_curtailment, res):
if group_curtailment:
tmp = cls()
tmp._modif_curtailment = True
tmp._curtail = 1. * self._curtail
res["curtail"] = [tmp]
else:
gen_changed = np.where(self._curtail != -1.)[0]
res["curtail"] = []
for g_id in gen_changed:
tmp = cls()
tmp._modif_curtailment = True
tmp._curtail[g_id] = self._curtail[g_id]
res["curtail"].append(tmp)
def decompose_as_unary_actions(self,
group_topo=False,
group_line_status=False,
group_redispatch=True,
group_storage=True,
group_curtail=True) -> dict:
"""This function allows to split a possibly "complex" action into its
"unary" counterpart.
By "unary" action here we mean "action that acts on only
one type". For example an action that only `set_line_status` is
unary but an action that acts on `set_line_status` AND `set_bus` is
not. Also, note that an action that acts on `set_line_status`
and `change_line_status` is not considered as "unary" by this method.
This functions output a dictionnary with up to 7 keys:
- "change_bus" if the action affects the grid with `change_bus`.
In this case the value associated with this key is a list containing
only action that performs `change_bus`
- "set_bus" if the action affects the grid with`set_bus`.
In this case the value associated with this key is a list containing
only action that performs `set_bus`
- "change_line_status" if the action affects the grid with `change_line_status`
In this case the value associated with this key is a list containing
only action that performs `change_line_status`
- "set_line_status" if the action affects the grid with `set_line_status`
In this case the value associated with this key is a list containing
only action that performs `set_line_status`
- "redispatch" if the action affects the grid with `redispatch`
In this case the value associated with this key is a list containing
only action that performs `redispatch`
- "set_storage" if the action affects the grid with `set_storage`
In this case the value associated with this key is a list containing
only action that performs `set_storage`
- "curtail" if the action affects the grid with `curtail`
In this case the value associated with this key is a list containing
only action that performs `curtail`
**NB** if the action is a "do nothing" action type, then this function will
return an empty dictionary.
.. versionadded:: 1.9.1
Notes
-------
If the action is not ambiguous (ie it is valid and can be correctly
understood by grid2op) and if you sum all the actions in all
the lists of all the keys of the
dictionnary returned by this function, you will retrieve exactly the
current action.
For example:
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox"
env = grid2op.make(env_name, ...)
act = env.action_space({"curtail": [(4, 0.8), (5, 0.7)],
"set_storage": [(0, +1.), (1, -1.)],
"redispatch": [(0, +1.), (1, -1.)],
"change_line_status": [2, 3],
"set_line_status": [(0, -1), (1, -1)],
"set_bus": {"loads_id": [(0, 2), (1, 2)],
"generators_id": [(0, 2)]},
"change_bus": {"loads_id": [2, 3],
"generators_id": [1]}
})
res = act.decompose_as_unary_actions()
tmp = env.action_space()
for k, v in res.items():
for a in v:
tmp += a
assert tmp == act
Parameters
----------
group_topo : bool, optional
This flag allows you to control the size of the `change_bus` and
`set_bus` values. If it's ``True`` then the values
associated with this keys will be unique (made of one single element)
that will affect all the elements affected by this action (grouping them
all together)
Otherwise, it will counts as many elements as the number of
substations affected by a `change_bus` or a `set_bus`. Each action
returned by this will then act on only one substation. By default False (meaning there will be as many element
in `change_bus` as the number of substations affected by a `change_bus`
action [same for `set_bus`])
group_line_status : bool, optional
Whether to group the line status in one single action (so the values associated
with the keys `set_line_status` and `change_line_status` will count
exactly one element - if present) or not. By default False (meaning there will be as many element
in `change_line_status` as the number of lines affected by a
`change_line_status` action [same for `set_line_status`] : if
the original action `set` the status of two powerlines, then the
value associated with `set_line_status` will count 2 elements:
the first action will `set` the status of the first line affected by
the action, the second will... `set` the status of the
second line affected by the action)
group_redispatch : bool, optional
same behaviour as `group_line_status` but for "generators" and
"redispatching" instead of "powerline" and `set_line_status`, by default True (meaning the value associated with
the key `redispatch` will be a list of one element performing
a redispatching action on all generators modified by the current action)
group_storage : bool, optional
same behaviour as `group_line_status` but for "storage units" and
"set setpoint" instead of "powerline" and `set_line_status`, by default True (meaning the value associated with
the key `set_storage` will be a list of one element performing
a set point action on all storage units modified by the current action)
group_curtail : bool, optional
same behaviour as `group_line_status` but for "generators" and
"curtailment" instead of "powerline" and `set_line_status`, , by default True (meaning the value associated with
the key `curtail` will be a list of one element performing
a curtailment on all storage generators modified by the current action)
Returns
-------
dict
See description for further information.
"""
res = {}
cls = type(self)
if self._modif_change_bus:
self._aux_decompose_as_unary_actions_change(cls, group_topo, res)
if self._modif_set_bus:
self._aux_decompose_as_unary_actions_set(cls, group_topo, res)
if self._modif_change_status:
self._aux_decompose_as_unary_actions_change_ls(cls, group_line_status, res)
if self._modif_set_status:
self._aux_decompose_as_unary_actions_set_ls(cls, group_line_status, res)
if self._modif_redispatch:
self._aux_decompose_as_unary_actions_redisp(cls, group_redispatch, res)
if self._modif_storage:
self._aux_decompose_as_unary_actions_storage(cls, group_storage, res)
if self._modif_curtailment:
self._aux_decompose_as_unary_actions_curtail(cls, group_curtail, res)
return res
| 262,431 | 41.657997 | 205 | py |
Grid2Op | Grid2Op-master/grid2op/Action/CompleteAction.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Action.BaseAction import BaseAction
class CompleteAction(BaseAction):
"""
Class representing the possibility to play everything.
It cannot (and should not) be used by an Agent. Indeed, Agent actions are limited to :class:`PlayableAction`. This
class is used by the chronics, the environment the opponent or the voltage controler for example.
"""
def __init__(self):
BaseAction.__init__(self)
| 903 | 42.047619 | 118 | py |
Grid2Op | Grid2Op-master/grid2op/Action/DispatchAction.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Action.PlayableAction import PlayableAction
class DispatchAction(PlayableAction):
"""
This type of :class:`PlayableAction` only implements the modifications of the grid through "redispatch" keyword.
Nothing else is supported and any attempt to use something else will have not impact.
"""
authorized_keys = {"redispatch"}
attr_list_vect = ["_redispatch"]
attr_list_set = set(attr_list_vect)
def __init__(self):
super().__init__()
| 953 | 34.333333 | 116 | py |
Grid2Op | Grid2Op-master/grid2op/Action/DontAct.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Action.PlayableAction import PlayableAction
class DontAct(PlayableAction):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This type of action is only compatible with "do nothing"...
This class is model the action where you force someone to do absolutely nothing. It is not the "do nothing"
action.
This is not the "do nothing" action. This class will not implement any way to modify the grid. Any attempt to
modify it will fail.
"""
authorized_keys = set()
attr_list_vect = []
def __init__(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
See the definition of :func:`BaseAction.__init__` and of :class:`BaseAction` for more information. Nothing
more is done in this constructor.
"""
PlayableAction.__init__(self)
def update(self, dict_):
"""
It has the particularity to not use `dict_`
Parameters
----------
dict_: ``dict``
A dictionnary, not taken into account for this specific class.
Returns
-------
"""
return self
| 1,711 | 29.035088 | 114 | py |
Grid2Op | Grid2Op-master/grid2op/Action/PlayableAction.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import warnings
from grid2op.Exceptions import AmbiguousAction
from grid2op.Action.BaseAction import BaseAction
class PlayableAction(BaseAction):
"""
From this class inherit all actions that the player will be allowed to do. This includes for example
:class:`TopologyAndDispatchAction` or :class:`TopologyAction`
"""
authorized_keys = {
"set_line_status",
"change_line_status",
"set_bus",
"change_bus",
"redispatch",
"set_storage",
"curtail",
"raise_alarm",
"raise_alert"
}
attr_list_vect = [
"_set_line_status",
"_switch_line_status",
"_set_topo_vect",
"_change_bus_vect",
"_redispatch",
"_storage_power",
"_curtail",
"_raise_alarm",
"_raise_alert"
]
attr_list_set = set(attr_list_vect)
shunt_added = True # no shunt here
def __init__(self):
super().__init__()
self.authorized_keys_to_digest = {
"set_line_status": self._digest_set_status,
"change_line_status": self._digest_change_status,
"set_bus": self._digest_setbus,
"change_bus": self._digest_change_bus,
"redispatch": self._digest_redispatching,
"set_storage": self._digest_storage,
"curtail": self._digest_curtailment,
"raise_alarm": self._digest_alarm,
"raise_alert": self._digest_alert,
}
def __call__(self):
"""
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Compare to the ancestor :func:`BaseAction.__call__` this type of BaseAction doesn't allow internal actions
The returned tuple is same, but with empty dictionaries for internal actions
Returns
-------
dict_injection: ``dict``
This dictionary is always empty
set_line_status: :class:`numpy.ndarray`, dtype:int
This array is :attr:`BaseAction._set_line_status`
switch_line_status: :class:`numpy.ndarray`, dtype:bool
This array is :attr:`BaseAction._switch_line_status`
set_topo_vect: :class:`numpy.ndarray`, dtype:int
This array is :attr:`BaseAction._set_topo_vect`
change_bus_vect: :class:`numpy.ndarray`, dtype:bool
This array is :attr:`BaseAction._change_bus_vect`
redispatch: :class:`numpy.ndarray`, dtype:float
The array is :attr:`BaseAction._redispatch`
curtail: :class:`numpy.ndarray`, dtype:float
The array is :attr:`BaseAction._curtail`
shunts: ``dict``
Always empty for this class
"""
if self._dict_inj:
raise AmbiguousAction("Injections actions are not playable.")
self._check_for_ambiguity()
return (
{},
self._set_line_status,
self._switch_line_status,
self._set_topo_vect,
self._change_bus_vect,
self._redispatch,
self._storage_power,
{},
)
def update(self, dict_):
"""
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Similar to :class:`BaseAction`, except that the allowed entries are limited to the playable action set
Parameters
----------
dict_: :class:`dict`
See the help of :func:`BaseAction.update` for a detailed explanation.
If an entry is not in the playable action set, this will raise
Returns
-------
self: :class:`PlayableAction`
Return object itself thus allowing multiple calls to "update" to be chained.
"""
self._reset_vect()
warn_msg = (
'The key "{}" used to update an action will be ignored. Valid keys are {}'
)
if dict_ is None:
return self
for kk in dict_.keys():
if kk not in self.authorized_keys:
warn = warn_msg.format(kk, self.authorized_keys)
warnings.warn(warn)
else:
self.authorized_keys_to_digest[kk](dict_)
return self
| 4,692 | 31.365517 | 114 | py |
Grid2Op | Grid2Op-master/grid2op/Action/PowerlineChangeAction.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Action.PlayableAction import PlayableAction
class PowerlineChangeAction(PlayableAction):
"""
This type of :class:`PlayableAction` only implements the modifications
of the grid through "change_line_status".
Nothing else is supported and any attempt to use something else
will have no impact.
"""
authorized_keys = {"change_line_status"}
attr_list_vect = ["_switch_line_status"]
attr_list_set = set(attr_list_vect)
def __init__(self):
super().__init__()
| 982 | 34.107143 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Action/PowerlineChangeAndDispatchAction.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Action.PlayableAction import PlayableAction
class PowerlineChangeAndDispatchAction(PlayableAction):
"""
This type of :class:`PlayableAction` only implements the
modifications of the grid with powerlines switch and dispatch actions.
It accepts the key words: "change_line_status" and "redispatch".
Nothing else is supported and any attempt to use something else
will have no impact.
"""
authorized_keys = {"change_line_status", "redispatch"}
attr_list_vect = ["_switch_line_status", "_redispatch"]
attr_list_set = set(attr_list_vect)
def __init__(self):
super().__init__()
| 1,106 | 37.172414 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Action/PowerlineChangeDispatchAndStorageAction.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Action.PlayableAction import PlayableAction
class PowerlineChangeDispatchAndStorageAction(PlayableAction):
"""
TODO storage doc
"""
authorized_keys = {"change_line_status", "redispatch", "set_storage"}
attr_list_vect = ["_switch_line_status", "_redispatch", "_storage_power"]
attr_list_set = set(attr_list_vect)
def __init__(self):
super().__init__()
| 869 | 33.8 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Action/PowerlineSetAction.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Action.PlayableAction import PlayableAction
class PowerlineSetAction(PlayableAction):
"""
This type of :class:`PlayableAction` only implements the modifications
of the grid through "set_line_status" keyword.
Nothing else is supported and any attempt to use something
else will have no impact.
"""
authorized_keys = {"set_line_status"}
attr_list_vect = ["_set_line_status"]
attr_list_set = set(attr_list_vect)
def __init__(self):
super().__init__()
| 978 | 33.964286 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Action/PowerlineSetAndDispatchAction.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Action.PlayableAction import PlayableAction
class PowerlineSetAndDispatchAction(PlayableAction):
"""
This type of :class:`PlayableAction` only implements the
modifications of the grid with set topological and dispatch actions.
It accepts the key words: "set_line_status" and "redispatch".
Nothing else is supported and any attempt to use something else
will have no impact.
"""
authorized_keys = {
"set_line_status",
# "set_bus",
"redispatch",
}
attr_list_vect = [
"_set_line_status",
# "_set_topo_vect",
"_redispatch",
]
attr_list_set = set(attr_list_vect)
def __init__(self):
super().__init__()
| 1,189 | 30.315789 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Action/SerializableActionSpace.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import warnings
import numpy as np
import itertools
from typing import Dict, List
from grid2op.dtypes import dt_int, dt_float, dt_bool
from grid2op.Exceptions import AmbiguousAction, Grid2OpException
from grid2op.Space import SerializableSpace
from grid2op.Action.BaseAction import BaseAction
class SerializableActionSpace(SerializableSpace):
"""
This class allows serializing/ deserializing the action space.
It should not be used inside an :attr:`grid2op.Environment.Environment` , as some functions of the action might not
be compatible with the serialization, especially the checking of whether or not an action is legal or not.
Attributes
----------
actionClass: ``type``
Type used to build the :attr:`SerializableActionSpace.template_act`
_template_act: :class:`BaseAction`
An instance of the "*actionClass*" provided used to provide higher level utilities, such as the size of the
action (see :func:`Action.size`) or to sample a new Action (see :func:`grid2op.Action.Action.sample`)
"""
SET_STATUS_ID = 0
CHANGE_STATUS_ID = 1
SET_BUS_ID = 2
CHANGE_BUS_ID = 3
REDISPATCHING_ID = 4
STORAGE_POWER_ID = 5
RAISE_ALARM_ID = 6
RAISE_ALERT_ID = 7
ERR_MSG_WRONG_TYPE = ('The action to update using `ActionSpace` is of type "{}" '
'"which is not the type of action handled by this action space "'
'("{}")')
def __init__(self, gridobj, actionClass=BaseAction, _init_grid=True):
"""
INTERNAL USE ONLY
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
The :class:`grid2op.Environment.Environment` is responsible for the creation of the
action space. Do not attempt to make one yourself.
Parameters
----------
gridobj: :class:`grid2op.Space.GridObjects`
Representation of the underlying powergrid.
actionClass: ``type``
Type of action used to build :attr:`Space.SerializableSpace._template_obj`. It should derived from
:class:`BaseAction`.
"""
SerializableSpace.__init__(
self, gridobj=gridobj, subtype=actionClass, _init_grid=_init_grid
)
self.actionClass = self.subtype
self._template_act = self.actionClass()
@staticmethod
def from_dict(dict_):
"""
INTERNAL USE ONLY
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Allows the de-serialization of an object stored as a dictionary (for example in the case of JSON saving).
Parameters
----------
dict_: ``dict``
Representation of an BaseAction Space (aka SerializableActionSpace) as a dictionary.
Returns
-------
res: :class:``SerializableActionSpace``
An instance of an action space matching the dictionary.
"""
tmp = SerializableSpace.from_dict(dict_)
CLS = SerializableActionSpace.init_grid(tmp)
res = CLS(gridobj=tmp, actionClass=tmp.subtype, _init_grid=False)
return res
def _get_possible_action_types(self):
rnd_types = []
cls = type(self)
if "set_line_status" in self.actionClass.authorized_keys:
rnd_types.append(cls.SET_STATUS_ID)
if "change_line_status" in self.actionClass.authorized_keys:
rnd_types.append(cls.CHANGE_STATUS_ID)
if "set_bus" in self.actionClass.authorized_keys:
rnd_types.append(cls.SET_BUS_ID)
if "change_bus" in self.actionClass.authorized_keys:
rnd_types.append(cls.CHANGE_BUS_ID)
if "redispatch" in self.actionClass.authorized_keys:
rnd_types.append(cls.REDISPATCHING_ID)
if self.n_storage > 0 and "storage_power" in self.actionClass.authorized_keys:
rnd_types.append(cls.STORAGE_POWER_ID)
if self.dim_alarms > 0 and "raise_alarm" in self.actionClass.authorized_keys:
rnd_types.append(cls.RAISE_ALARM_ID)
if self.dim_alerts > 0 and "raise_alert" in self.actionClass.authorized_keys:
rnd_types.append(cls.RAISE_ALERT_ID)
return rnd_types
def supports_type(self, action_type):
"""
Returns if the current action_space supports the current action type.
Parameters
----------
action_type: ``str``
One of "set_line_status", "change_line_status", "set_bus", "change_bus", "redispatch",
"storage_power", "set_storage", "curtail" or "curtail_mw"
A string representing the action types you want to inspect.
Returns
-------
``True`` if you can use the `action_type` to create an action, ``False`` otherwise.
Examples
---------
To know if you can use the `act.set_bus` property to change the bus of an element, you can use:
.. code-block:: python
import grid2op
from grid2op.Converter import ConnectivityConverter
env = grid2op.make("rte_case14_realistic", test=True)
can_i_use_set_bus = env.action_space.supports_type("set_bus") # this is True
env2 = grid2op.make("educ_case14_storage", test=True)
can_i_use_set_bus = env2.action_space.supports_type("set_bus") # this is False
# this environment do not allow for topological changes but only action on storage units and redispatching
"""
name_action_types = [
"set_line_status",
"change_line_status",
"set_bus",
"change_bus",
"redispatch",
"storage_power",
"set_storage",
"curtail",
"curtail_mw",
"raise_alarm",
"raise_alert"
]
assert action_type in name_action_types, (
f"The action type provided should be in {name_action_types}. "
f"You provided {action_type} which is not supported."
)
if action_type == "storage_power":
return (self.n_storage > 0) and (
"set_storage" in self.actionClass.authorized_keys
)
elif action_type == "set_storage":
return (self.n_storage > 0) and (
"set_storage" in self.actionClass.authorized_keys
)
elif action_type == "curtail_mw":
return "curtail" in self.actionClass.authorized_keys
else:
return action_type in self.actionClass.authorized_keys
def _sample_set_line_status(self, rnd_update=None):
if rnd_update is None:
rnd_update = {}
rnd_line = self.space_prng.randint(self.n_line)
rnd_status = self.space_prng.choice([1, -1])
rnd_update["set_line_status"] = [(rnd_line, rnd_status)]
return rnd_update
def _sample_change_line_status(self, rnd_update=None):
if rnd_update is None:
rnd_update = {}
rnd_line = self.space_prng.randint(self.n_line)
rnd_update["change_line_status"] = [rnd_line]
return rnd_update
def _sample_set_bus(self, rnd_update=None):
if rnd_update is None:
rnd_update = {}
rnd_sub = self.space_prng.randint(self.n_sub)
sub_size = self.sub_info[rnd_sub]
rnd_topo = self.space_prng.choice([-1, 0, 1, 2], sub_size)
rnd_update["set_bus"] = {"substations_id": [(rnd_sub, rnd_topo)]}
return rnd_update
def _sample_change_bus(self, rnd_update=None):
if rnd_update is None:
rnd_update = {}
rnd_sub = self.space_prng.randint(self.n_sub)
sub_size = self.sub_info[rnd_sub]
rnd_topo = self.space_prng.choice([0, 1], sub_size).astype(dt_bool)
rnd_update["change_bus"] = {"substations_id": [(rnd_sub, rnd_topo)]}
return rnd_update
def _sample_redispatch(self, rnd_update=None):
if rnd_update is None:
rnd_update = {}
gens = np.arange(self.n_gen)[self.gen_redispatchable]
rnd_gen = self.space_prng.choice(gens)
rd = -self.gen_max_ramp_down[rnd_gen]
ru = self.gen_max_ramp_up[rnd_gen]
rnd_gen_disp = (ru - rd) * self.space_prng.random() + rd
rnd_disp = np.zeros(self.n_gen)
rnd_disp[rnd_gen] = rnd_gen_disp
rnd_update["redispatch"] = rnd_disp
rnd_update["redispatch"] = rnd_update["redispatch"].astype(dt_float)
return rnd_update
def _sample_storage_power(self, rnd_update=None):
if rnd_update is None:
rnd_update = {}
stor_unit = np.arange(self.n_storage)
rnd_sto = self.space_prng.choice(stor_unit)
rd = -self.storage_max_p_prod[rnd_sto]
ru = self.storage_max_p_absorb[rnd_sto]
rnd_sto_prod = (ru - rd) * self.space_prng.random() + rd
res = np.zeros(self.n_gen)
res[rnd_sto] = rnd_sto_prod
rnd_update["storage_power"] = res
rnd_update["storage_power"] = rnd_update["storage_power"].astype(dt_float)
return rnd_update
def _sample_raise_alarm(self, rnd_update=None):
""".. warning::
/!\\\\ Only valid with "l2rpn_icaps_2021" environment /!\\\\
"""
if rnd_update is None:
rnd_update = {}
rnd_area = self.space_prng.randint(self.dim_alarms)
rnd_update["raise_alarm"] = [rnd_area]
return rnd_update
def _sample_raise_alert(self, rnd_update=None):
if rnd_update is None:
rnd_update = {}
rnd_alerted_lines = self.space_prng.choice([True, False], self.dim_alerts).astype(dt_bool)
rnd_update["raise_alert"] = rnd_alerted_lines
return rnd_update
def sample(self):
"""
A utility used to sample a new random :class:`BaseAction`.
The sampled action is unitary: It has an impact on a single line/substation/generator.
There is no guarantee concerning the "legality" of the action (see the description of the
Action module for more information about illegal action).
It will only act by doing action supported by the action space. For example, if the action space
does not support "redispatching" then this method will NOT sample any redispatching action.
Returns
-------
res: :class:`BaseAction`
A random action sampled from the :attr:`ActionSpace.actionClass`
Examples
---------
The first usage is to sample uniform **unary** actions, you can do this with the
following:
.. code-block:: python
import grid2op
env = grid2op.make()
# and now you can sample from the action space
random_action = env.action_space.sample()
*Note* that the random action can be illegal depending on the game rules defined in the
rules :class:`grid2op.Rules`
If for some reason you want to sample more complex actions, you can do this the following way:
.. code-block:: python
import grid2op
env = grid2op.make()
# and now you can sample from the action space
random_action = env.action_space()
for i in range(5):
# my resulting action will be a complex action
# that will be the results of applying 5 random actions
random_action += env.action_space.sample()
print(random_action)
"""
rnd_act = self.actionClass()
# get the type of actions I am allowed to perform
rnd_types = self._get_possible_action_types()
# Cannot sample this space, return do nothing
if not len(rnd_types):
return rnd_act
# this sampling
rnd_type = self.space_prng.choice(rnd_types)
if rnd_type == self.SET_STATUS_ID:
rnd_update = self._sample_set_line_status()
elif rnd_type == self.CHANGE_STATUS_ID:
rnd_update = self._sample_change_line_status()
elif rnd_type == self.SET_BUS_ID:
rnd_update = self._sample_set_bus()
elif rnd_type == self.CHANGE_BUS_ID:
rnd_update = self._sample_change_bus()
elif rnd_type == self.REDISPATCHING_ID:
rnd_update = self._sample_redispatch()
elif rnd_type == self.STORAGE_POWER_ID:
rnd_update = self._sample_storage_power()
elif rnd_type == self.RAISE_ALARM_ID:
rnd_update = self._sample_raise_alarm()
elif rnd_type == self.RAISE_ALERT_ID:
rnd_update = self._sample_raise_alert()
else:
raise Grid2OpException(
"Impossible to sample action of type {}".format(rnd_type)
)
rnd_act.update(rnd_update)
return rnd_act
def disconnect_powerline(self, line_id=None, line_name=None, previous_action=None):
"""
Utilities to disconnect a powerline more easily.
Parameters
----------
line_id: ``int``
The powerline to be disconnected.
line_name: ``str``
Name of the powerline. Note that either line_id or line_name should be provided. If both are provided, it is
an error, if none are provided it is an error.
previous_action: :class:`BaseAction`
If you want to stack up multiple actions.
Returns
-------
res: :class:`BaseAction`
The action that will disconnect the powerline.
Notes
------
If you use `previous_action` it will modify the action **in place** which means that
`previous_action` will be modified by this method.
Examples
---------
You can use it this way:
.. code-block:: python
import grid2op
env = grid2op.make()
# and now you can disconnect line 0
disco_line_0 = env.action_space.disconnect_powerline(line_id=0)
# or line with name "0_4_1"
disco_line_1 = env.action_space.disconnect_powerline(line_name="0_4_1")
# and you can disconnect both line 2 and 3 with:
disco_line_2 = env.action_space.disconnect_powerline(line_id=2)
disco_line_2_and_3 = env.action_space.disconnect_powerline(line_id=3, previous_action=disco_line_2)
print(disco_line_2_and_3)
# be careful, "disco_line_2" is affected and is in fact equal to "disco_line_2_and_3"
# after the last call!
"""
if line_id is None and line_name is None:
raise AmbiguousAction(
'You need to provide either the "line_id" or the "line_name" of the powerline '
"you want to disconnect"
)
if line_id is not None and line_name is not None:
raise AmbiguousAction(
'You need to provide only of the "line_id" or the "line_name" of the powerline '
"you want to disconnect"
)
if line_id is None:
line_id = np.where(self.name_line == line_name)[0]
if not len(line_id):
raise AmbiguousAction(
'Line with name "{}" is not on the grid. The powerlines names are:\n{}'
"".format(line_name, self.name_line)
)
if previous_action is None:
res = self.actionClass()
else:
if not isinstance(previous_action, self.actionClass):
raise AmbiguousAction(
type(self).ERR_MSG_WRONG_TYPE.format(type(previous_action), self.actionClass)
)
res = previous_action
if line_id > self.n_line:
raise AmbiguousAction(
"You asked to disconnect powerline of id {} but this id does not exist. The "
"grid counts only {} powerline".format(line_id, self.n_line)
)
res.update({"set_line_status": [(line_id, -1)]})
return res
def reconnect_powerline(
self, bus_or, bus_ex, line_id=None, line_name=None, previous_action=None
):
"""
Utilities to reconnect a powerline more easily.
Note that in case "bus_or" or "bus_ex" are not the current bus to which the powerline is connected, they
will be affected by this action.
Notes
------
This utility requires you to specify on which bus you want to connect each end
("*origin*" or "*extremity*") of the powerline you want to reconnect.
If you don't want to specify them, you can set them to ``0`` and it will reconnect them
to the last known buses to which they were connected (this is automatically done by the
Environment since version `0.8.0`).
If you use `previous_action` it will modify the action **in place** which means that
`previous_action` will be modified by this method.
Parameters
----------
line_id: ``int``
The powerline to be disconnected.
bus_or: ``int``
On which bus to reconnect the powerline at its origin end
bus_ex: ``int``
On which bus to reconnect the powerline at its extremity end
previous_action
Returns
-------
res: :class:`BaseAction`
The action that will reconnect the powerline.
Examples
---------
You can use it this way:
.. code-block:: python
import grid2op
env = grid2op.make()
# and now you can reconnect line 0
reco_line_0 = env.action_space.reconnect_powerline(line_id=0, bus_or=1, bus_ex=0)
# or line with name "0_4_1" to bus 1 on its "origin" end and bus 2 on its "extremity" end
reco_line_1 = env.action_space.reconnect_powerline(line_name="0_4_1", bus_or=1, bus_ex=2)
# and you can reconnect both line 2 and 3 with:
reco_line_2 = env.action_space.reconnect_powerline(line_id=2, bus_or=1, bus_ex=2)
reco_line_2_and_3 = env.action_space.reconnect_powerline(line_id=3,
bus_or=0, bus_ex=1,
previous_action=reco_line_2)
print(reco_line_2_and_3)
# be careful, "reco_line_2" is affected and is in fact equal to "reco_line_2_and_3"
# after the last call!
"""
if line_id is None and line_name is None:
raise AmbiguousAction(
'You need to provide either the "line_id" or the "line_name" of the powerline '
"you want to reconnect"
)
if line_id is not None and line_name is not None:
raise AmbiguousAction(
'You need to provide only of the "line_id" or the "line_name" of the powerline '
"you want to reconnect"
)
if line_id is None:
line_id = np.where(self.name_line == line_name)[0]
if previous_action is None:
res = self.actionClass()
else:
if not isinstance(previous_action, self.actionClass):
raise AmbiguousAction(
type(self).ERR_MSG_WRONG_TYPE.format(type(previous_action), self.actionClass)
)
res = previous_action
if line_id > self.n_line:
raise AmbiguousAction(
"You asked to disconnect powerline of id {} but this id does not exist. The "
"grid counts only {} powerline".format(line_id, self.n_line)
)
res.update(
{
"set_line_status": [(line_id, 1)],
"set_bus": {
"lines_or_id": [(line_id, bus_or)],
"lines_ex_id": [(line_id, bus_ex)],
},
}
)
return res
def change_bus(
self,
name_element,
extremity=None,
substation=None,
type_element=None,
previous_action=None,
):
"""
Utilities to change the bus of a single element if you give its name. **NB** Changing a bus has the effect to
assign the object to bus 1 if it was before that connected to bus 2, and to assign it to bus 2 if it was
connected to bus 1. It should not be mixed up with :func:`ActionSpace.set_bus`.
If the parameter "*previous_action*" is not ``None``, then the action given to it is updated (in place) and
returned.
Parameters
----------
name_element: ``str``
The name of the element you want to change the bus
extremity: ``str``
"or" or "ex" for origin or extremity, ignored if an element is not a powerline.
substation: ``int``, optional
Its substation ID, if you know it will increase the performance. Otherwise, the method will search for it.
type_element: ``str``, optional
Type of the element to look for. It is here to speed up the computation. One of "line", "gen" or "load"
previous_action: :class:`Action`, optional
The (optional) action to update. It should be of the same type as :attr:`ActionSpace.actionClass`
Notes
------
If you use `previous_action` it will modify the action **in place** which means that
`previous_action` will be modified by this method.
Returns
-------
res: :class:`BaseAction`
The action with the modification implemented
Raises
------
res :class:`grid2op.Exception.AmbiguousAction`
If *previous_action* has not the same type as :attr:`ActionSpace.actionClass`.
Examples
---------
You can use it this way:
.. code-block:: python
import grid2op
env = grid2op.make()
# change bus of element named 'gen_1_0'
change_gen_0 = env.action_space.change_bus('gen_1_0', type_element="gen")
# you are not forced to specify the element types
change_load_1 = env.action_space.change_bus('load_2_1')
# dealing with powerline, you can affect one of its extremity
# (handy when you don't know on which substation it is located)
change_line_8_or = env.action_space.change_bus('5_11_8', extremity="or")
# and you can combine the action with
change_line_14_ex = env.action_space.change_bus('12_13_14', extremity="ex")
change_line_14_ex_load_2 = env.action_space.change_bus("load_3_2",
previous_action=change_line_14_ex)
print(change_line_14_ex_load_2)
# be careful, "change_line_14_ex" is affected and is in fact equal to
# "change_line_14_ex_load_2"
# after the last call!
"""
if previous_action is None:
res = self.actionClass()
else:
if not isinstance(previous_action, self.actionClass):
raise AmbiguousAction(
type(self).ERR_MSG_WRONG_TYPE.format(type(previous_action), self.actionClass)
)
res = previous_action
dict_, to_sub_pos, my_id, my_sub_id = self._extract_dict_action(
name_element, extremity, substation, type_element, res
)
arr_ = dict_["change_bus"]
me_id_ = to_sub_pos[my_id]
arr_[me_id_] = True
res.update({"change_bus": {"substations_id": [(my_sub_id, arr_)]}})
return res
def _extract_database_powerline(self, extremity):
if extremity[:2] == "or":
to_subid = self.line_or_to_subid
to_sub_pos = self.line_or_to_sub_pos
to_name = self.name_line
elif extremity[:2] == "ex":
to_subid = self.line_ex_to_subid
to_sub_pos = self.line_ex_to_sub_pos
to_name = self.name_line
elif extremity is None:
raise Grid2OpException(
"It is mandatory to know on which ends you want to change the bus of the powerline"
)
else:
raise Grid2OpException(
'unknown extremity specifier "{}". Extremity should be "or" or "ex"'
"".format(extremity)
)
return to_subid, to_sub_pos, to_name
def _extract_dict_action(
self,
name_element,
extremity=None,
substation=None,
type_element=None,
action=None,
):
to_subid = None
to_sub_pos = None
to_name = None
if type_element is None:
# i have to look through all the objects to find it
if name_element in self.name_load:
to_subid = self.load_to_subid
to_sub_pos = self.load_to_sub_pos
to_name = self.name_load
elif name_element in self.name_gen:
to_subid = self.gen_to_subid
to_sub_pos = self.gen_to_sub_pos
to_name = self.name_gen
elif name_element in self.name_line:
to_subid, to_sub_pos, to_name = self._extract_database_powerline(
extremity
)
else:
AmbiguousAction(
'Element "{}" not found in the powergrid'.format(name_element)
)
elif type_element == "line":
to_subid, to_sub_pos, to_name = self._extract_database_powerline(extremity)
elif type_element[:3] == "gen" or type_element[:4] == "prod":
to_subid = self.gen_to_subid
to_sub_pos = self.gen_to_sub_pos
to_name = self.name_gen
elif type_element == "load":
to_subid = self.load_to_subid
to_sub_pos = self.load_to_sub_pos
to_name = self.name_load
else:
raise AmbiguousAction(
'unknown type_element specifier "{}". type_element should be "line" or "load" '
'or "gen"'.format(extremity)
)
my_id = None
for i, nm in enumerate(to_name):
if nm == name_element:
my_id = i
break
if my_id is None:
raise AmbiguousAction(
'Element "{}" not found in the powergrid'.format(name_element)
)
my_sub_id = to_subid[my_id]
dict_ = action.effect_on(substation_id=my_sub_id)
return dict_, to_sub_pos, my_id, my_sub_id
def set_bus(
self,
name_element,
new_bus,
extremity=None,
substation=None,
type_element=None,
previous_action=None,
):
"""
Utilities to set the bus of a single element if you give its name. **NB** Setting a bus has the effect to
assign the object to this bus. If it was before that connected to bus 1, and you assign it to bus 1 (*new_bus*
= 1) it will stay on bus 1. If it was on bus 2 (and you still assign it to bus 1) it will be moved to bus 2.
1. It should not be mixed up with :func:`ActionSpace.change_bus`.
If the parameter "*previous_action*" is not ``None``, then the action given to it is updated (in place) and
returned.
Parameters
----------
name_element: ``str``
The name of the element you want to change the bus
new_bus: ``int``
Id of the new bus to connect the object to.
extremity: ``str``
"or" or "ext" for origin or extremity, ignored if the element is not a powerline.
substation: ``int``, optional
Its substation ID, if you know it will increase the performance. Otherwise, the method will search for it.
type_element: ``str``, optional
Type of the element to look for. It is here to speed up the computation. One of "line", "gen" or "load"
previous_action: :class:`Action`, optional
The (optional) action to update. It should be of the same type as :attr:`ActionSpace.actionClass`
Returns
-------
res: :class:`BaseAction`
The action with the modification implemented
Raises
------
AmbiguousAction
If *previous_action* has not the same type as :attr:`ActionSpace.actionClass`.
Examples
---------
You can use it this way:
.. code-block:: python
import grid2op
env = grid2op.make()
# set bus of element named 'gen_1_0' to bus 2
setbus_gen_0 = env.action_space.set_bus('gen_1_0', new_bus=2, type_element="gen")
# are not forced to specify the element types (example with load set to bus 1)
setbus_load_1 = env.action_space.set_bus('load_2_1', new_bus=1)
# dealing with powerline, you can affect one of its extremity
# (handy when you don't know on which substation it is located)
setbus_line_8_or = env.action_space.set_bus('5_11_8', new_bus=1, extremity="or")
# and you can combine the actions with:
setbus_line_14_ex = env.action_space.set_bus('12_13_14', new_bus=2, extremity="ex")
setbus_line_14_ex_load_2 = env.action_space.set_bus("load_3_2", new_bus=1,
previous_action=setbus_line_14_ex)
print(setbus_line_14_ex_load_2)
# be careful, "setbus_line_14_ex" is affected and is in fact equal to
# "setbus_line_14_ex_load_2"
# after the last call!
"""
if previous_action is None:
res = self.actionClass()
else:
res = previous_action
dict_, to_sub_pos, my_id, my_sub_id = self._extract_dict_action(
name_element, extremity, substation, type_element, res
)
dict_["set_bus"][to_sub_pos[my_id]] = new_bus
res.update({"set_bus": {"substations_id": [(my_sub_id, dict_["set_bus"])]}})
return res
def get_set_line_status_vect(self):
"""
Computes and returns a vector that can be used in the "set_status" keyword if building an :class:`BaseAction`
Returns
-------
res: :class:`numpy.array`, dtype:dt_int
A vector that doesn't affect the grid, but can be used in "set_line_status"
"""
return self._template_act.get_set_line_status_vect()
def get_change_line_status_vect(self):
"""
Computes and return a vector that can be used in the "change_line_status" keyword if building an :class:`BaseAction`
Returns
-------
res: :class:`numpy.array`, dtype:dt_bool
A vector that doesn't affect the grid, but can be used in "change_line_status"
"""
return self._template_act.get_change_line_status_vect()
@staticmethod
def get_all_unitary_line_set(action_space):
"""
Return all unitary actions that "set" powerline status.
For each powerline, there are 5 such actions:
- disconnect it
- connected it origin at bus 1 and extremity at bus 1
- connected it origin at bus 1 and extremity at bus 2
- connected it origin at bus 2 and extremity at bus 1
- connected it origin at bus 2 and extremity at bus 2
Parameters
----------
action_space: :class:`grid2op.BaseAction.ActionSpace`
The action space used.
Returns
-------
res: ``list``
The list of all "set" action acting on powerline status
"""
res = []
# powerline switch: disconnection
for i in range(action_space.n_line):
res.append(action_space.disconnect_powerline(line_id=i))
# powerline switch: reconnection
for bus_or in [1, 2]:
for bus_ex in [1, 2]:
for i in range(action_space.n_line):
act = action_space.reconnect_powerline(
line_id=i, bus_ex=bus_ex, bus_or=bus_or
)
res.append(act)
return res
@staticmethod
def get_all_unitary_line_set_simple(action_space):
"""
Return all unitary actions that "set" powerline status but in a
more simple way than :func:`SerializableActionSpace.get_all_unitary_line_set`
For each powerline, there are 2 such actions:
- disconnect it
- connected it (implicitly to the last known busbar where each
side used to be connected)
It has the main advantages to "only" add 2 actions per powerline
instead of 5.
Parameters
----------
action_space: :class:`grid2op.BaseAction.ActionSpace`
The action space used.
Returns
-------
res: ``list``
The list of all "set" action acting on powerline status
"""
res = []
# powerline set: disconnection
for i in range(action_space.n_line):
res.append(action_space({"set_line_status": [(i,-1)]}))
# powerline set: reconnection
for i in range(action_space.n_line):
res.append(action_space({"set_line_status": [(i, +1)]}))
return res
@staticmethod
def get_all_unitary_alarm(action_space):
"""
.. warning::
/!\\\\ Only valid with "l2rpn_icaps_2021" environment /!\\\\
"""
res = []
for i in range(action_space.dim_alarms):
status = np.full(action_space.dim_alarms, fill_value=False, dtype=dt_bool)
status[i] = True
res.append(action_space({"raise_alarm": status}))
return res
@staticmethod
def get_all_unitary_alert(action_space):
"""
Return all unitary actions that raise an alert on powerlines.
.. warning:: There will be one action per combination of attackable lines, so basically, if
you can raise alerts on 10 powerline, you will end up with 2**10 actions.
If you got 22 attackable lines, then you got 2**22 actions... probably a TERRIBLE IDEA !
"""
res = []
possible_values = [False, True]
if action_space.dim_alerts:
for status in itertools.product(possible_values, repeat=type(action_space).dim_alerts):
res.append(action_space({"raise_alert": np.array(status, dtype=dt_bool)}))
return res
@staticmethod
def get_all_unitary_line_change(action_space):
"""
Return all unitary actions that "change" powerline status.
For each powerline, there is only one such action that consist in change its status.
Parameters
----------
action_space: :class:`grid2op.BaseAction.ActionSpace`
The action space used.
Returns
-------
res: ``list``
The list of all "change" action acting on powerline status
"""
res = []
for i in range(action_space.n_line):
status = action_space.get_change_line_status_vect()
status[i] = True
res.append(action_space({"change_line_status": status}))
return res
@staticmethod
def get_all_unitary_topologies_change(action_space, sub_id=None):
"""
This methods allows to compute and return all the unitary topological changes that can be performed on a
powergrid.
The changes will be performed using the "change_bus" method. It excludes the "do nothing" action
Parameters
----------
action_space: :class:`grid2op.BaseAction.ActionSpace`
The action space used.
sub_id: ``int``, optional
The substation ID. If ``None`` it is done for all substations.
Notes
-----
This might take a long time on large grid (possibly 10-15 mins for the IEEE 118 for example)
Returns
-------
res: ``list``
The list of all the topological actions that can be performed.
Examples
---------
You can use it this way:
.. code-block:: python
import grid2op
env = grid2op.make()
# all "change bus" action for all the substations
all_change_actions = env.action_space.get_all_unitary_topologies_change(env.action_space)
# you can only study "change_bus" action for a given substation (can dramatically improve the computation time)
all_change_actions_sub4 = env.action_space.get_all_unitary_topologies_change(env.action_space, sub_id=4)
"""
res = []
S = [0, 1]
for sub_id_, num_el in enumerate(action_space.sub_info):
if sub_id is not None:
if sub_id_ != sub_id:
continue
already_set = set()
# remove the "do nothing" action, which is either equivalent to not change anything
# or to change everything
already_set.add(tuple([1 for _ in range(num_el)]))
already_set.add(tuple([0 for _ in range(num_el)]))
for tup_ in itertools.product(S, repeat=num_el):
if tup_ not in already_set:
indx = np.full(shape=num_el, fill_value=False, dtype=dt_bool)
# tup = np.array((0, *tup)).astype(dt_bool) # add a zero to first element -> break symmetry
tup = np.array(tup_).astype(
dt_bool
) # add a zero to first element -> break symmetry
indx[tup] = True
action = action_space(
{"change_bus": {"substations_id": [(sub_id_, indx)]}}
)
already_set.add(tup_)
already_set.add(tuple([1 - el for el in tup_]))
res.append(action)
# otherwise, the change has already beend added (NB by symmetry , if there are A, B, C and D in
# a substation, changing A,B or changing C,D always has the same effect.
return res
@staticmethod
def get_all_unitary_topologies_set(action_space, sub_id=None):
"""
This methods allows to compute and return all the unitary topological changes that can be performed on a
powergrid.
The changes will be performed using the "set_bus" method. The "do nothing" action will be counted once
per substation in the grid.
Parameters
----------
action_space: :class:`grid2op.BaseAction.ActionHelper`
The action space used.
sub_id: ``int``, optional
The substation ID. If ``None`` it is done for all substations.
Notes
-----
This might take a long time on large grid (possibly 10-15 mins for the IEEE 118 for example)
Returns
-------
res: ``list``
The list of all the topological actions that can be performed.
Examples
---------
You can use it this way:
.. code-block:: python
import grid2op
env = grid2op.make()
# all "set_bus" actions
all_change_actions = env.action_space.get_all_unitary_topologies_set(env.action_space)
# you can only study "set_bus" action for a given substation (can dramatically improve the computation time)
all_change_actions_sub4 = env.action_space.get_all_unitary_topologies_set(env.action_space, sub_id=4)
"""
res = []
S = [0, 1]
for sub_id_, num_el in enumerate(action_space.sub_info):
tmp = []
if sub_id is not None:
if sub_id_ != sub_id:
continue
new_topo = np.full(shape=num_el, fill_value=1, dtype=dt_int)
# perform the action "set everything on bus 1"
action = action_space(
{"set_bus": {"substations_id": [(sub_id_, new_topo)]}}
)
tmp.append(action)
powerlines_or_id = action_space.line_or_to_sub_pos[
action_space.line_or_to_subid == sub_id_
]
powerlines_ex_id = action_space.line_ex_to_sub_pos[
action_space.line_ex_to_subid == sub_id_
]
powerlines_id = np.concatenate((powerlines_or_id, powerlines_ex_id))
# computes all the topologies at 2 buses for this substation
for tup in itertools.product(S, repeat=num_el - 1):
indx = np.full(shape=num_el, fill_value=False, dtype=dt_bool)
tup = np.array((0, *tup)).astype(
dt_bool
) # add a zero to first element -> break symmetry
indx[tup] = True
if np.sum(indx) >= 2 and np.sum(~indx) >= 2:
# i need 2 elements on each bus at least (almost all the times, except when a powerline
# is alone on its bus)
new_topo = np.full(shape=num_el, fill_value=1, dtype=dt_int)
new_topo[~indx] = 2
if (
np.sum(indx[powerlines_id]) == 0
or np.sum(~indx[powerlines_id]) == 0
):
# if there is a "node" without a powerline, the topology is not valid
continue
action = action_space(
{"set_bus": {"substations_id": [(sub_id_, new_topo)]}}
)
tmp.append(action)
else:
# i need to take into account the case where 1 powerline is alone on a bus too
if (
np.sum(indx[powerlines_id]) >= 1
and np.sum(~indx[powerlines_id]) >= 1
):
new_topo = np.full(shape=num_el, fill_value=1, dtype=dt_int)
new_topo[~indx] = 2
action = action_space(
{"set_bus": {"substations_id": [(sub_id_, new_topo)]}}
)
tmp.append(action)
if len(tmp) >= 2:
# if i have only one single topology on this substation, it doesn't make any action
# i cannot change the topology is there is only one.
res += tmp
return res
@staticmethod
def get_all_unitary_redispatch(
action_space, num_down=5, num_up=5, max_ratio_value=1.0
):
"""
Redispatching action are continuous action. This method is an helper to convert the continuous
action into discrete action (by rounding).
The number of actions is equal to num_down + num_up (by default 10) per dispatchable generator.
This method acts as followed:
- it will divide the interval [-gen_max_ramp_down, 0] into `num_down`, each will make
a distinct action (then counting `num_down` different action, because 0.0 is removed)
- it will do the same for [0, gen_maw_ramp_up]
Parameters
----------
action_space: :class:`grid2op.BaseAction.ActionHelper`
The action space used.
num_down: ``int``
In how many intervals the "redispatch down" will be split
num_up: ``int``
In how many intervals the "redispatch up" will be split
max_ratio_value: ``float``
Expressed in terms of ratio of `gen_max_ramp_up` / `gen_max_ramp_down`, it gives the maximum value
that will be used to generate the actions. For example, if `max_ratio_value=0.5`, then it will not
generate actions that attempts to redispatch more than `0.5 * gen_max_ramp_up` (or less than
`- 0.5 * gen_max_ramp_down`). This helps reducing the instability that can be caused by redispatching.
Returns
-------
res: ``list``
The list of all discretized redispatching actions.
"""
res = []
n_gen = len(action_space.gen_redispatchable)
for gen_idx in range(n_gen):
# Skip non-dispatchable generators
if not action_space.gen_redispatchable[gen_idx]:
continue
# Create evenly spaced positive interval
ramps_up = np.linspace(
0.0, max_ratio_value * action_space.gen_max_ramp_up[gen_idx], num=num_up
)
ramps_up = ramps_up[1:] # Exclude redispatch of 0MW
# Create evenly spaced negative interval
ramps_down = np.linspace(
-max_ratio_value * action_space.gen_max_ramp_down[gen_idx],
0.0,
num=num_down,
)
ramps_down = ramps_down[:-1] # Exclude redispatch of 0MW
# Merge intervals
ramps = np.append(ramps_up, ramps_down)
# Create ramp up actions
for ramp in ramps:
action = action_space({"redispatch": [(gen_idx, ramp)]})
res.append(action)
return res
@staticmethod
def get_all_unitary_curtail(action_space, num_bin=10, min_value=0.5):
"""
Curtailment action are continuous action. This method is an helper to convert the continuous
action into discrete action (by rounding).
The number of actions is equal to num_bin (by default 10) per renewable generator
(remember that only renewable generator can be curtailed in grid2op).
This method acts as followed:
- it will divide the interval [0, 1] into `num_bin`, each will make
a distinct action (then counting `num_bin` different action, because 0.0 is removed)
Parameters
----------
action_space: :class:`grid2op.BaseAction.ActionHelper`
The action space used.
num_bin: ``int``
Number of actions for each renewable generator
min_value: ``float``
Between 0. and 1.: minimum value allow for the curtailment. FOr example if you set this
value to be 0.2 then no curtailment will be done to limit the generator below 20% of its maximum capacity
Returns
-------
res: ``list``
The list of all discretized curtailment actions.
"""
res = []
n_gen = len(action_space.gen_renewable)
for gen_idx in range(n_gen):
# Skip non-renewable generators (they cannot be curtail)
if not action_space.gen_renewable[gen_idx]:
continue
# Create evenly spaced interval
ramps = np.linspace(min_value, 1.0, num=num_bin)
# Create ramp up actions
for ramp in ramps:
action = action_space({"curtail": [(gen_idx, ramp)]})
res.append(action)
return res
@staticmethod
def get_all_unitary_storage(action_space, num_down=5, num_up=5):
"""
Storage action are continuous action. This method is an helper to convert the continuous
action into discrete action (by rounding).
The number of actions is equal to num_down + num_up (by default 10) per storage unit.
This method acts as followed:
- it will divide the interval [-storage_max_p_prod, 0] into `num_down`, each will make
a distinct action (then counting `num_down` different action, because 0.0 is removed)
- it will do the same for [0, storage_max_p_absorb]
Parameters
----------
action_space: :class:`grid2op.BaseAction.ActionHelper`
The action space used.
Returns
-------
res: ``list``
The list of all discretized actions on storage units.
"""
res = []
n_stor = action_space.n_storage
for stor_idx in range(n_stor):
# Create evenly spaced positive interval
ramps_up = np.linspace(
0.0, action_space.storage_max_p_absorb[stor_idx], num=num_up
)
ramps_up = ramps_up[1:] # Exclude action of 0MW
# Create evenly spaced negative interval
ramps_down = np.linspace(
-action_space.storage_max_p_prod[stor_idx], 0.0, num=num_down
)
ramps_down = ramps_down[:-1] # Exclude action of 0MW
# Merge intervals
ramps = np.append(ramps_up, ramps_down)
# Create ramp up actions
for ramp in ramps:
action = action_space({"set_storage": [(stor_idx, ramp)]})
res.append(action)
return res
def _custom_deepcopy_for_copy(self, new_obj):
super()._custom_deepcopy_for_copy(new_obj)
# SerializableObservationSpace
new_obj.actionClass = self.subtype
new_obj._template_act = self.actionClass()
def _aux_get_back_to_ref_state_curtail(self, res, obs):
is_curtailed = obs.curtailment_limit != 1.0
if np.any(is_curtailed):
res["curtailment"] = []
if not self.supports_type("curtail"):
warnings.warn(
"A generator is is curtailed, but you cannot perform curtailment action. Impossible to get back to the original topology."
)
return
curtail = np.full(obs.n_gen, fill_value=np.NaN)
curtail[is_curtailed] = 1.0
act = self.actionClass()
act.curtail = curtail
res["curtailment"].append(act)
def _aux_get_back_to_ref_state_line(self, res, obs):
disc_lines = ~obs.line_status
if np.any(disc_lines):
li_disc = np.where(disc_lines)[0]
res["powerline"] = []
for el in li_disc:
act = self.actionClass()
if self.supports_type("set_line_status"):
act.set_line_status = [(el, +1)]
elif self.supports_type("change_line_status"):
act.change_line_status = [el]
else:
warnings.warn(
"A powerline is disconnected by you cannot reconnect it with your action space. Impossible to get back to the original topology"
)
break
res["powerline"].append(act)
def _aux_get_back_to_ref_state_sub(self, res, obs):
not_on_bus_1 = obs.topo_vect > 1 # disconnected lines are handled above
if np.any(not_on_bus_1):
res["substation"] = []
subs_changed = type(self).grid_objects_types[
not_on_bus_1, type(self).SUB_COL
]
for sub_id in set(subs_changed):
nb_el: int = type(self).sub_info[sub_id]
act = self.actionClass()
if self.supports_type("set_bus"):
act.sub_set_bus = [(sub_id, np.ones(nb_el, dtype=dt_int))]
elif self.supports_type("change_bus"):
arr_ = np.full(nb_el, fill_value=False, dtype=dt_bool)
changed = obs.state_of(substation_id=sub_id)["topo_vect"] >= 1
arr_[changed] = True
act.sub_change_bus = [(sub_id, arr_)]
else:
warnings.warn(
"A substation is is not on its original topology (all at bus 1) and your action type does not allow to change it. "
"Impossible to get back to the original topology."
)
break
res["substation"].append(act)
def _aux_get_back_to_ref_state_redisp(self, res, obs, precision=1e-5):
# TODO this is ugly, probably slow and could definitely be optimized
notredisp_setpoint = obs.target_dispatch != 0.0
if np.any(notredisp_setpoint):
need_redisp = np.where(notredisp_setpoint)[0]
res["redispatching"] = []
# combine generators and do not exceed ramps (up or down)
rem = np.zeros(self.n_gen, dtype=dt_float)
nb_ = np.zeros(self.n_gen, dtype=dt_int)
for gen_id in need_redisp:
if obs.target_dispatch[gen_id] > 0.0:
div_ = obs.target_dispatch[gen_id] / obs.gen_max_ramp_down[gen_id]
else:
div_ = -obs.target_dispatch[gen_id] / obs.gen_max_ramp_up[gen_id]
div_ = np.round(div_, precision)
nb_[gen_id] = int(div_)
if div_ != int(div_):
if obs.target_dispatch[gen_id] > 0.0:
rem[gen_id] = (
obs.target_dispatch[gen_id]
- obs.gen_max_ramp_down[gen_id] * nb_[gen_id]
)
else:
rem[gen_id] = (
-obs.target_dispatch[gen_id]
- obs.gen_max_ramp_up[gen_id] * nb_[gen_id]
)
nb_[gen_id] += 1
# now create the proper actions
for nb_act in range(np.max(nb_)):
act = self.actionClass()
if not self.supports_type("redispatch"):
warnings.warn(
"Some redispatching are set, but you cannot modify it with your action type. Impossible to get back to the original topology."
)
break
reds = np.zeros(self.n_gen, dtype=dt_float)
for gen_id in need_redisp:
if nb_act >= nb_[gen_id]:
# nothing to add for this generator in this case
continue
if obs.target_dispatch[gen_id] > 0.0:
if nb_act < nb_[gen_id] - 1 or (
rem[gen_id] == 0.0 and nb_act == nb_[gen_id] - 1
):
reds[gen_id] = -obs.gen_max_ramp_down[gen_id]
else:
reds[gen_id] = -rem[gen_id]
else:
if nb_act < nb_[gen_id] - 1 or (
rem[gen_id] == 0.0 and nb_act == nb_[gen_id] - 1
):
reds[gen_id] = obs.gen_max_ramp_up[gen_id]
else:
reds[gen_id] = rem[gen_id]
act.redispatch = [
(gen_id, red_) for gen_id, red_ in zip(need_redisp, reds)
]
res["redispatching"].append(act)
def _aux_get_back_to_ref_state_storage(
self, res, obs, storage_setpoint, precision=5
):
# TODO this is ugly, probably slow and could definitely be optimized
# TODO refacto with the redispatching
notredisp_setpoint = obs.storage_charge / obs.storage_Emax != storage_setpoint
delta_time_hour = dt_float(obs.delta_time / 60.0)
if np.any(notredisp_setpoint):
need_ajust = np.where(notredisp_setpoint)[0]
res["storage"] = []
# combine storage units and do not exceed maximum power
rem = np.zeros(self.n_storage, dtype=dt_float)
nb_ = np.zeros(self.n_storage, dtype=dt_int)
current_state = obs.storage_charge - storage_setpoint * obs.storage_Emax
for stor_id in need_ajust:
if current_state[stor_id] > 0.0:
div_ = current_state[stor_id] / (
obs.storage_max_p_prod[stor_id] * delta_time_hour
)
else:
div_ = -current_state[stor_id] / (
obs.storage_max_p_absorb[stor_id] * delta_time_hour
)
div_ = np.round(div_, precision)
nb_[stor_id] = int(div_)
if div_ != int(div_):
if current_state[stor_id] > 0.0:
rem[stor_id] = (
current_state[stor_id] / delta_time_hour
- obs.storage_max_p_prod[stor_id] * nb_[stor_id]
)
else:
rem[stor_id] = (
-current_state[stor_id] / delta_time_hour
- obs.storage_max_p_absorb[stor_id] * nb_[stor_id]
)
nb_[stor_id] += 1
# now create the proper actions
for nb_act in range(np.max(nb_)):
act = self.actionClass()
if not self.supports_type("set_storage"):
warnings.warn(
"Some storage are modififed, but you cannot modify them with your action type. Impossible to get back to the original topology."
)
break
reds = np.zeros(self.n_storage, dtype=dt_float)
for stor_id in need_ajust:
if nb_act >= nb_[stor_id]:
# nothing to add in this case
continue
if current_state[stor_id] > 0.0:
if nb_act < nb_[stor_id] - 1 or (
rem[stor_id] == 0.0 and nb_act == nb_[stor_id] - 1
):
reds[stor_id] = -obs.storage_max_p_prod[stor_id]
else:
reds[stor_id] = -rem[stor_id]
else:
if nb_act < nb_[stor_id] - 1 or (
rem[stor_id] == 0.0 and nb_act == nb_[stor_id] - 1
):
reds[stor_id] = obs.storage_max_p_absorb[stor_id]
else:
reds[stor_id] = rem[stor_id]
act.storage_p = [
(stor_id, red_) for stor_id, red_ in zip(need_ajust, reds)
]
res["storage"].append(act)
def get_back_to_ref_state(
self,
obs: "grid2op.Observation.BaseObservation",
storage_setpoint=0.5,
precision=5,
) -> Dict[str, List[BaseAction]]:
"""
This function returns the list of unary actions that you can perform in order to get back to the "fully meshed" / "initial" topology.
Parameters
----------
observation:
The current observation (the one you want to know actions to set it back ot)
Notes
-----
In this context a "unary" action, is (exclusive or):
- an action that acts on a single powerline
- an action on a single substation
- a redispatching action
- a storage action
The list might be relatively long, in the case where lots of actions are needed. Depending on the rules of the game (for example limiting the
action on one single substation), in order to get back to this topology, multiple consecutive actions will need to be implemented.
It is returned as a dictionnary of list. This dictionnary has 4 keys:
- "powerline" for the list of actions needed to set back the powerlines in a proper state (connected). They can be of type "change_line" or "set_line".
- "substation" for the list of actions needed to set back each substation in its initial state (everything connected to bus 1). They can be
implemented as "set_bus" or "change_bus"
- "redispatching": for the redispatching action (there can be multiple redispatching actions needed because of the ramps of the generator)
- "storage": for action on storage units (you might need to perform multiple storage actions because of the maximum power these units can absorb / produce )
- "curtailment": for curtailment action (usually at most one such action is needed)
After receiving these lists, the agent has the choice for the order in which to apply these actions as well as how to best combine them (you can most
of the time combine action of different types in grid2op.)
.. warning::
It does not presume anything on the availability of the objects. For examples, this funciton ignores completely the cooldowns on lines and substations.
.. warning::
For the storage units, it tries to set their current setpoint to `storage_setpoint` % of their storage total capacity. Applying these actions
at different times might not fully set back the storage to this capacity in case of storage losses !
.. warning::
See section :ref:`action_powerline_status` for note on the powerline status. It might happen that you modify a powerline status using a "set_bus" (ie
tagged as "susbtation" by this function).
.. warning::
It can raise warnings in case it's not possible, with your action space, to get back to the original / fully meshed topology
Examples
--------
TODO
"""
from grid2op.Observation.baseObservation import BaseObservation
if not isinstance(obs, BaseObservation):
raise AmbiguousAction(
"You need to provide a grid2op Observation for this function to work correctly."
)
res = {}
# powerline actions
self._aux_get_back_to_ref_state_line(res, obs)
# substations
self._aux_get_back_to_ref_state_sub(res, obs)
# redispatching
self._aux_get_back_to_ref_state_redisp(res, obs, precision=precision)
# storage
self._aux_get_back_to_ref_state_storage(
res, obs, storage_setpoint, precision=precision
)
# curtailment
self._aux_get_back_to_ref_state_curtail(res, obs)
return res
| 62,836 | 38.470477 | 164 | py |
Grid2Op | Grid2Op-master/grid2op/Action/TopologyAction.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Action.PlayableAction import PlayableAction
class TopologyAction(PlayableAction):
"""
This type of :class:`PlayableAction` only implements the
modifications of the grid with topological actions.
It accepts the key words: "set_line_status", "change_line_status",
"set_bus" and "change_bus".
Nothing else is supported and any attempt to use something else
will have no impact.
"""
authorized_keys = {
"set_line_status",
"change_line_status",
"set_bus",
"change_bus",
}
attr_list_vect = [
"_set_line_status",
"_switch_line_status",
"_set_topo_vect",
"_change_bus_vect",
]
attr_list_set = set(attr_list_vect)
def __init__(self):
super().__init__()
| 1,255 | 29.634146 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Action/TopologyAndDispatchAction.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Action.PlayableAction import PlayableAction
class TopologyAndDispatchAction(PlayableAction):
"""
This type of :class:`PlayableAction` implements the modifications
of the grid with topological and redispatching actions.
It accepts the key words: "set_line_status", "change_line_status",
"set_bus", "change_bus" and "redispatch".
Nothing else is supported and any attempt to use something else
will have no impact.
"""
authorized_keys = {
"set_line_status",
"change_line_status",
"set_bus",
"change_bus",
"redispatch",
}
attr_list_vect = [
"_set_line_status",
"_set_topo_vect",
"_change_bus_vect",
"_switch_line_status",
"_redispatch",
]
attr_list_set = set(attr_list_vect)
def __init__(self):
super().__init__()
| 1,338 | 30.139535 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Action/TopologyChangeAction.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Action.PlayableAction import PlayableAction
class TopologyChangeAction(PlayableAction):
"""
This type of :class:`PlayableAction` implements the modifications
of the grid with "change" topological actions.
It accepts the key words: "change_line_status" and "change_bus".
Nothing else is supported and any attempt to use something else
will have no impact.
"""
authorized_keys = {"change_line_status", "change_bus"}
attr_list_vect = ["_change_bus_vect", "_switch_line_status"]
attr_list_set = set(attr_list_vect)
def __init__(self):
super().__init__()
| 1,084 | 36.413793 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Action/TopologyChangeAndDispatchAction.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Action.PlayableAction import PlayableAction
class TopologyChangeAndDispatchAction(PlayableAction):
"""
This type of :class:`PlayableAction` implements the modifications
of the grid with "change" topological actions and allows for redispatching.
It accepts the key words: "change_line_status", "change_bus"
and "redispatch".
Nothing else is supported and any attempt to use something else
will have no impact.
"""
authorized_keys = {"change_line_status", "change_bus", "redispatch"}
attr_list_vect = [
"_change_bus_vect",
"_switch_line_status",
"_redispatch",
]
attr_list_set = set(attr_list_vect)
def __init__(self):
super().__init__()
| 1,203 | 33.4 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Action/TopologySetAction.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Action.PlayableAction import PlayableAction
class TopologySetAction(PlayableAction):
"""
This type of :class:`PlayableAction` implements the modifications
of the grid with "set" topological actions.
It accepts the key words: "set_line_status" and "set_bus".
Nothing else is supported and any attempt to use something else
will have no impact.
"""
authorized_keys = {"set_line_status", "set_bus"}
attr_list_vect = ["_set_line_status", "_set_topo_vect"]
attr_list_set = set(attr_list_vect)
def __init__(self):
super().__init__()
| 1,062 | 34.433333 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Action/TopologySetAndDispatchAction.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Action.PlayableAction import PlayableAction
class TopologySetAndDispatchAction(PlayableAction):
"""
This type of :class:`PlayableAction` implements the modifications
of the grid with "set" topological actions and allows for redispatching.
It accepts the key words: "set_line_status", "set_bus" and "redispatch".
Nothing else is supported and any attempt to use something else
will have no impact.
"""
authorized_keys = {"set_line_status", "set_bus", "redispatch"}
attr_list_vect = ["_set_line_status", "_set_topo_vect", "_redispatch"]
attr_list_set = set(attr_list_vect)
def __init__(self):
super().__init__()
| 1,144 | 38.482759 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Action/VoltageOnlyAction.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import warnings
from grid2op.Exceptions import AmbiguousAction
from grid2op.Action.BaseAction import BaseAction
class VoltageOnlyAction(BaseAction):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is the main class used by the voltage controller.
This class is here to serve as a base class for the controller of the voltages (if any). It allows to perform
only modification of the generator voltage set point.
Only action of type "injection" are supported, and only setting "prod_v" keyword.
"""
authorized_keys = {"injection"}
attr_list_vect = ["prod_v"]
attr_list_set = set(attr_list_vect)
_shunt_added = False
_first_init = True
def __init__(self):
"""
See the definition of :func:`BaseAction.__init__` and of :class:`BaseAction` for more information. Nothing more is done
in this constructor.
"""
BaseAction.__init__(self)
if VoltageOnlyAction._shunt_added is False and self.shunts_data_available:
VoltageOnlyAction.attr_list_vect += ["shunt_p", "shunt_q", "shunt_bus"]
VoltageOnlyAction.authorized_keys.add("shunt")
VoltageOnlyAction._shunt_added = True
if VoltageOnlyAction._first_init is True:
self._update_value_set()
VoltageOnlyAction._first_init = False
def _check_dict(self):
"""
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Check that nothing, beside prod_v has been updated with this action.
Returns
-------
"""
if self._dict_inj:
for el in self._dict_inj:
if el not in self.attr_list_vect:
raise AmbiguousAction(
'Impossible to modify something different than "prod_v" using '
'"VoltageOnlyAction" action.'
)
def update(self, dict_):
"""
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
As its original implementation, this method allows modifying the way a dictionary can be mapped to a valid
:class:`BaseAction`.
It has only minor modifications compared to the original :func:`BaseAction.update` implementation, most notably, it
doesn't update the :attr:`BaseAction._dict_inj`. It raises a warning if attempting to change them.
Parameters
----------
dict_: :class:`dict`
See the help of :func:`BaseAction.update` for a detailed explanation. **NB** all the explanations concerning the
"injection", "change bus", "set bus", or "change line status" are irrelevant for this subclass.
Returns
-------
self: :class:`PowerLineSet`
Return object itself thus allowing multiple calls to "update" to be chained.
"""
self._reset_vect()
if dict_ is not None:
for kk in dict_.keys():
if kk not in self.authorized_keys:
warn = 'The key "{}" used to update an action will be ignored. Valid keys are {}'
warn = warn.format(kk, self.authorized_keys)
warnings.warn(warn)
self._digest_injection(dict_)
self._digest_shunt(dict_)
self._check_dict()
return self
| 3,912 | 36.266667 | 127 | py |
Grid2Op | Grid2Op-master/grid2op/Action/_BackendAction.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
import numpy as np
from grid2op.dtypes import dt_int, dt_bool, dt_float
from grid2op.Space import GridObjects
# TODO see if it can be done in c++ easily
class ValueStore:
"""
INTERNAL USE ONLY
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
"""
def __init__(self, size, dtype):
## TODO at the init it's mandatory to have everything at "1" here
# if topo is not "fully connected" it will not work
self.values = np.ones(size, dtype=dtype)
self.changed = np.full(size, dtype=dt_bool, fill_value=False)
self.last_index = 0
self.__size = size
if issubclass(dtype, dt_int):
self.set_val = self._set_val_int
self.change_val = self._change_val_int
elif issubclass(dtype, dt_float):
self.set_val = self._set_val_float
self.change_val = self._change_val_float
def _set_val_float(self, newvals):
changed_ = np.isfinite(newvals)
self.changed[changed_] = True
self.values[changed_] = newvals[changed_]
def _set_val_int(self, newvals):
changed_ = newvals != 0
self.changed[changed_] = True
self.values[changed_] = newvals[changed_]
def _change_val_int(self, newvals):
changed_ = newvals & (self.values > 0)
self.changed[changed_] = True
self.values[changed_] = (1 - self.values[changed_]) + 2
def _change_val_float(self, newvals):
changed_ = newvals != 0.0
self.changed[changed_] = True
self.values[changed_] += newvals[changed_]
def reset(self):
self.changed[:] = False
self.last_index = 0
def change_status(self, switch, lineor_id, lineex_id, old_vect):
if not np.any(switch):
# nothing is modified so i stop here
return
# changed
changed_ = switch
# make it to ids
id_chg_or = lineor_id[changed_]
id_chg_ex = lineex_id[changed_]
self.changed[id_chg_or] = True
self.changed[id_chg_ex] = True
# disconnect the powerlines
me_or_bus = self.values[id_chg_or]
me_ex_bus = self.values[id_chg_ex]
was_connected = (me_or_bus > 0) | (me_ex_bus > 0)
was_disco = ~was_connected
# it was connected, i disconnect it
self.values[id_chg_or[was_connected]] = -1
self.values[id_chg_ex[was_connected]] = -1
# it was disconnected, i reconnect it
reco_or = id_chg_or[was_disco]
reco_ex = id_chg_ex[was_disco]
self.values[reco_or] = old_vect[reco_or]
self.values[reco_ex] = old_vect[reco_ex]
def set_status(self, set_status, lineor_id, lineex_id, old_vect):
id_or = lineor_id
id_ex = lineex_id
# disco
disco_ = set_status == -1
reco_ = set_status == 1
# make it to ids
id_reco_or = id_or[reco_]
id_reco_ex = id_ex[reco_]
id_disco_or = id_or[disco_]
id_disco_ex = id_ex[disco_]
self.changed[id_reco_or] = True
self.changed[id_reco_ex] = True
self.changed[id_disco_or] = True
self.changed[id_disco_ex] = True
# disconnect the powerlines
self.values[id_disco_or] = -1
self.values[id_disco_ex] = -1
# reconnect the powerlines
# don't consider powerlines that have been already changed with topology
# ie reconnect to the old bus only powerline from which we don't know the status
id_reco_or = id_reco_or[self.values[id_reco_or] < 0]
id_reco_ex = id_reco_ex[self.values[id_reco_ex] < 0]
self.values[id_reco_or] = old_vect[id_reco_or]
self.values[id_reco_ex] = old_vect[id_reco_ex]
def get_line_status(self, lineor_id, lineex_id):
return self.values[lineor_id], self.values[lineex_id]
def update_connected(self, current_values):
indx_conn = current_values.values > 0
self.values[indx_conn] = current_values.values[indx_conn]
def all_changed(self):
self.reset()
self.changed[:] = True
def __getitem__(self, item):
return self.values[item]
def __setitem__(self, key, value):
self.values[key] = value
self.changed[key] = value
def __iter__(self):
return self
def __next__(self):
res = None
while self.last_index < self.values.shape[0]:
if self.changed[self.last_index]:
res = (self.last_index, self.values[self.last_index])
self.last_index += 1
if res is not None:
break
if res is not None:
return res
else:
raise StopIteration
def __len__(self):
return self.__size
def reorder(self, new_order):
"""reorder the element modified, this is use when converting backends only and should not be use
outside of this usecase"""
self.changed[:] = self.changed[new_order]
self.values[:] = self.values[new_order]
def copy_from_index(self, ref, index):
self.reset()
self.changed[:] = ref.changed[index]
self.values[:] = ref.values[index]
def __copy__(self):
res = type(self)(self.values.shape[0], self.values.dtype.type)
res.values[:] = self.values
res.changed[:] = self.changed
res.last_index = self.last_index
res.__size = self.__size
return res
def __deepcopy__(self, memodict={}):
res = type(self)(self.values.shape[0], self.values.dtype.type)
res.values[:] = self.values
res.changed[:] = self.changed
res.last_index = self.last_index
res.__size = self.__size
return res
class _BackendAction(GridObjects):
"""
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Internal class, use at your own risk.
This class "digest" the players / environment / opponent / voltage controlers "action",
and transform it to setpoint for the backend.
"""
def __init__(self):
GridObjects.__init__(self)
# last connected registered
self.last_topo_registered = ValueStore(self.dim_topo, dtype=dt_int)
# topo at time t
self.current_topo = ValueStore(self.dim_topo, dtype=dt_int)
# injection at time t
self.prod_p = ValueStore(self.n_gen, dtype=dt_float)
self.prod_v = ValueStore(self.n_gen, dtype=dt_float)
self.load_p = ValueStore(self.n_load, dtype=dt_float)
self.load_q = ValueStore(self.n_load, dtype=dt_float)
self.storage_power = ValueStore(self.n_storage, dtype=dt_float)
self.activated_bus = np.full((self.n_sub, 2), dtype=dt_bool, fill_value=False)
self.big_topo_to_subid = np.repeat(
list(range(self.n_sub)), repeats=self.sub_info
)
# shunts
if self.shunts_data_available:
self.shunt_p = ValueStore(self.n_shunt, dtype=dt_float)
self.shunt_q = ValueStore(self.n_shunt, dtype=dt_float)
self.shunt_bus = ValueStore(self.n_shunt, dtype=dt_int)
self._status_or_before = np.ones(self.n_line, dtype=dt_int)
self._status_ex_before = np.ones(self.n_line, dtype=dt_int)
self._status_or = np.ones(self.n_line, dtype=dt_int)
self._status_ex = np.ones(self.n_line, dtype=dt_int)
self._loads_bus = None
self._gens_bus = None
self._lines_or_bus = None
self._lines_ex_bus = None
self._storage_bus = None
def __deepcopy__(self, memodict={}):
res = type(self)()
# last connected registered
res.last_topo_registered = copy.deepcopy(self.last_topo_registered)
res.current_topo = copy.deepcopy(self.current_topo)
res.prod_p = copy.deepcopy(self.prod_p)
res.prod_v = copy.deepcopy(self.prod_v)
res.load_p = copy.deepcopy(self.load_p)
res.load_q = copy.deepcopy(self.load_q)
res.storage_power = copy.deepcopy(self.storage_power)
res.activated_bus[:] = self.activated_bus
res.big_topo_to_subid[:] = self.big_topo_to_subid
if self.shunts_data_available:
res.shunt_p = copy.deepcopy(self.shunt_p)
res.shunt_q = copy.deepcopy(self.shunt_q)
res.shunt_bus = copy.deepcopy(self.shunt_bus)
res._status_or_before[:] = self._status_or_before
res._status_ex_before[:] = self._status_ex_before
res._status_or[:] = self._status_or
res._status_ex[:] = self._status_ex
res._loads_bus = copy.deepcopy(self._loads_bus)
res._gens_bus = copy.deepcopy(self._gens_bus)
res._lines_or_bus = copy.deepcopy(self._lines_or_bus)
res._lines_ex_bus = copy.deepcopy(self._lines_ex_bus)
res._storage_bus = copy.deepcopy(self._storage_bus)
return res
def __copy__(self):
res = type(self)()
# last connected registered
res.last_topo_registered = copy.copy(self.last_topo_registered)
res.current_topo = copy.copy(self.current_topo)
res.prod_p = copy.copy(self.prod_p)
res.prod_v = copy.copy(self.prod_v)
res.load_p = copy.copy(self.load_p)
res.load_q = copy.copy(self.load_q)
res.storage_power = copy.copy(self.storage_power)
res.activated_bus[:] = self.activated_bus
res.big_topo_to_subid[:] = self.big_topo_to_subid
if self.shunts_data_available:
res.shunt_p = copy.copy(self.shunt_p)
res.shunt_q = copy.copy(self.shunt_q)
res.shunt_bus = copy.copy(self.shunt_bus)
res._status_or_before[:] = self._status_or_before
res._status_ex_before[:] = self._status_ex_before
res._status_or[:] = self._status_or
res._status_ex[:] = self._status_ex
res._loads_bus = copy.copy(self._loads_bus)
res._gens_bus = copy.copy(self._gens_bus)
res._lines_or_bus = copy.copy(self._lines_or_bus)
res._lines_ex_bus = copy.copy(self._lines_ex_bus)
res._storage_bus = copy.copy(self._storage_bus)
return res
def reorder(self, no_load, no_gen, no_topo, no_storage, no_shunt):
"""
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
reorder the element modified, this is use when converting backends only and should not be use
outside of this usecase
no_* stands for "new order"
"""
self.last_topo_registered.reorder(no_topo)
self.current_topo.reorder(no_topo)
self.prod_p.reorder(no_gen)
self.prod_v.reorder(no_gen)
self.load_p.reorder(no_load)
self.load_q.reorder(no_load)
self.storage_power.reorder(no_storage)
if self.shunts_data_available:
self.shunt_p.reorder(no_shunt)
self.shunt_q.reorder(no_shunt)
self.shunt_bus.reorder(no_shunt)
def reset(self):
# last topo
self.last_topo_registered.reset()
# topo at time t
self.current_topo.reset()
# injection at time t
self.prod_p.reset()
self.prod_v.reset()
self.load_p.reset()
self.load_q.reset()
self.storage_power.reset()
# storage unit have their power reset to 0. each step
self.storage_power.changed[:] = True
self.storage_power.values[:] = 0.0
# shunts
if self.shunts_data_available:
self.shunt_p.reset()
self.shunt_q.reset()
self.shunt_bus.reset()
def all_changed(self):
# last topo
self.last_topo_registered.all_changed()
# topo at time t
self.current_topo.all_changed()
# injection at time t
self.prod_p.all_changed()
self.prod_v.all_changed()
self.load_p.all_changed()
self.load_q.all_changed()
self.storage_power.all_changed()
# TODO handle shunts
# shunts
# if self.shunts_data_available:
# self.shunt_p.all_changed()
# self.shunt_q.all_changed()
# self.shunt_bus.all_changed()
def set_redispatch(self, new_redispatching):
self.prod_p.change_val(new_redispatching)
def __iadd__(self, other):
"""
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
other: a grid2op action standard
Parameters
----------
other: :class:`grid2op.Action.BaseAction.BaseAction`
Returns
-------
"""
dict_injection = other._dict_inj
set_status = other._set_line_status
switch_status = other._switch_line_status
set_topo_vect = other._set_topo_vect
switcth_topo_vect = other._change_bus_vect
redispatching = other._redispatch
storage_power = other._storage_power
# I deal with injections
# Ia set the injection
if other._modif_inj:
if "load_p" in dict_injection:
tmp = dict_injection["load_p"]
self.load_p.set_val(tmp)
if "load_q" in dict_injection:
tmp = dict_injection["load_q"]
self.load_q.set_val(tmp)
if "prod_p" in dict_injection:
tmp = dict_injection["prod_p"]
self.prod_p.set_val(tmp)
if "prod_v" in dict_injection:
tmp = dict_injection["prod_v"]
self.prod_v.set_val(tmp)
# Ib change the injection aka redispatching
if other._modif_redispatch:
self.prod_p.change_val(redispatching)
# Ic storage unit
if other._modif_storage:
self.storage_power.set_val(storage_power)
# II shunts
if self.shunts_data_available:
shunts = {}
if other.shunts_data_available:
shunts["shunt_p"] = other.shunt_p
shunts["shunt_q"] = other.shunt_q
shunts["shunt_bus"] = other.shunt_bus
arr_ = shunts["shunt_p"]
self.shunt_p.set_val(arr_)
arr_ = shunts["shunt_q"]
self.shunt_q.set_val(arr_)
arr_ = shunts["shunt_bus"]
self.shunt_bus.set_val(arr_)
# III line status
# this need to be done BEFORE the topology, as a connected powerline will be connected to their old bus.
# regardless if the status is changed in the action or not.
if other._modif_change_status:
self.current_topo.change_status(
switch_status,
self.line_or_pos_topo_vect,
self.line_ex_pos_topo_vect,
self.last_topo_registered,
)
if other._modif_set_status:
self.current_topo.set_status(
set_status,
self.line_or_pos_topo_vect,
self.line_ex_pos_topo_vect,
self.last_topo_registered,
)
# if other._modif_change_status or other._modif_set_status:
(
self._status_or_before[:],
self._status_ex_before[:],
) = self.current_topo.get_line_status(
self.line_or_pos_topo_vect, self.line_ex_pos_topo_vect
)
# IV topo
if other._modif_change_bus:
self.current_topo.change_val(switcth_topo_vect)
if other._modif_set_bus:
self.current_topo.set_val(set_topo_vect)
# V Force disconnected status
# of disconnected powerlines extremities
self._status_or[:], self._status_ex[:] = self.current_topo.get_line_status(
self.line_or_pos_topo_vect, self.line_ex_pos_topo_vect
)
# At least one disconnected extremity
if other._modif_change_bus or other._modif_set_bus:
disco_or = (self._status_or_before == -1) | (self._status_or == -1)
disco_ex = (self._status_ex_before == -1) | (self._status_ex == -1)
disco_now = (
disco_or | disco_ex
) # a powerline is disconnected if at least one of its extremity is
# added
reco_or = (self._status_or_before == -1) & (self._status_or >= 1)
reco_ex = (self._status_or_before == -1) & (self._status_ex >= 1)
reco_now = reco_or | reco_ex
# Set nothing
set_now = np.zeros_like(self._status_or)
# Force some disconnections
set_now[disco_now] = -1
set_now[reco_now] = 1
self.current_topo.set_status(
set_now,
self.line_or_pos_topo_vect,
self.line_ex_pos_topo_vect,
self.last_topo_registered,
)
return self
def __call__(self):
injections = (
self.prod_p,
self.prod_v,
self.load_p,
self.load_q,
self.storage_power,
)
topo = self.current_topo
shunts = None
if self.shunts_data_available:
shunts = self.shunt_p, self.shunt_q, self.shunt_bus
self._get_active_bus()
return self.activated_bus, injections, topo, shunts
def get_loads_bus(self):
if self._loads_bus is None:
self._loads_bus = ValueStore(self.n_load, dtype=dt_int)
self._loads_bus.copy_from_index(self.current_topo, self.load_pos_topo_vect)
return self._loads_bus
def _aux_to_global(self, value_store, to_subid):
value_store = copy.deepcopy(value_store)
value_store.values = type(self).local_bus_to_global(value_store.values, to_subid)
return value_store
def get_loads_bus_global(self):
tmp_ = self.get_loads_bus()
return self._aux_to_global(tmp_, self.load_to_subid)
def get_gens_bus(self):
if self._gens_bus is None:
self._gens_bus = ValueStore(self.n_gen, dtype=dt_int)
self._gens_bus.copy_from_index(self.current_topo, self.gen_pos_topo_vect)
return self._gens_bus
def get_gens_bus_global(self):
tmp_ = copy.deepcopy(self.get_gens_bus())
return self._aux_to_global(tmp_, self.gen_to_subid)
def get_lines_or_bus(self):
if self._lines_or_bus is None:
self._lines_or_bus = ValueStore(self.n_line, dtype=dt_int)
self._lines_or_bus.copy_from_index(
self.current_topo, self.line_or_pos_topo_vect
)
return self._lines_or_bus
def get_lines_or_bus_global(self):
tmp_ = self.get_lines_or_bus()
return self._aux_to_global(tmp_, self.line_or_to_subid)
def get_lines_ex_bus(self):
if self._lines_ex_bus is None:
self._lines_ex_bus = ValueStore(self.n_line, dtype=dt_int)
self._lines_ex_bus.copy_from_index(
self.current_topo, self.line_ex_pos_topo_vect
)
return self._lines_ex_bus
def get_lines_ex_bus_global(self):
tmp_ = self.get_lines_ex_bus()
return self._aux_to_global(tmp_, self.line_ex_to_subid)
def get_storages_bus(self):
if self._storage_bus is None:
self._storage_bus = ValueStore(self.n_storage, dtype=dt_int)
self._storage_bus.copy_from_index(self.current_topo, self.storage_pos_topo_vect)
return self._storage_bus
def get_storages_bus_global(self):
tmp_ = self.get_storages_bus()
return self._aux_to_global(tmp_, self.storage_to_subid)
def _get_active_bus(self):
self.activated_bus[:] = False
tmp = self.current_topo.values - 1
self.activated_bus[self.big_topo_to_subid, tmp] = True
def update_state(self, powerline_disconnected):
"""
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Update the internal state. Should be called after the cascading failures
"""
if np.any(powerline_disconnected >= 0):
arr_ = np.zeros(powerline_disconnected.shape, dtype=dt_int)
arr_[powerline_disconnected >= 0] = -1
self.current_topo.set_status(
arr_,
self.line_or_pos_topo_vect,
self.line_ex_pos_topo_vect,
self.last_topo_registered,
)
self.last_topo_registered.update_connected(self.current_topo)
self.current_topo.reset()
| 20,936 | 34.486441 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Action/__init__.py | __all__ = [
# Internals
"BaseAction",
"PlayableAction",
"ActionSpace",
"SerializableActionSpace",
# Usable
"VoltageOnlyAction",
"CompleteAction",
"DontAct",
"PowerlineSetAction",
"PowerlineChangeAction",
"PowerlineSetAndDispatchAction",
"PowerlineChangeAndDispatchAction",
"PowerlineChangeDispatchAndStorageAction",
"TopologyAction",
"TopologyAndDispatchAction",
"TopologySetAction",
"TopologySetAndDispatchAction",
"TopologyChangeAction",
"TopologyChangeAndDispatchAction",
"DispatchAction",
]
# Internals
from grid2op.Action.BaseAction import BaseAction
from grid2op.Action.PlayableAction import PlayableAction
from grid2op.Action.VoltageOnlyAction import VoltageOnlyAction
from grid2op.Action.CompleteAction import CompleteAction
from grid2op.Action.ActionSpace import ActionSpace
from grid2op.Action.SerializableActionSpace import SerializableActionSpace
from grid2op.Action.DontAct import DontAct
from grid2op.Action.PowerlineSetAction import PowerlineSetAction
from grid2op.Action.PowerlineChangeAction import PowerlineChangeAction
from grid2op.Action.PowerlineSetAndDispatchAction import PowerlineSetAndDispatchAction
from grid2op.Action.PowerlineChangeAndDispatchAction import (
PowerlineChangeAndDispatchAction,
)
from grid2op.Action.PowerlineChangeDispatchAndStorageAction import (
PowerlineChangeDispatchAndStorageAction,
)
from grid2op.Action.TopologyAction import TopologyAction
from grid2op.Action.TopologyAndDispatchAction import TopologyAndDispatchAction
from grid2op.Action.TopologySetAction import TopologySetAction
from grid2op.Action.TopologySetAndDispatchAction import TopologySetAndDispatchAction
from grid2op.Action.TopologyChangeAction import TopologyChangeAction
from grid2op.Action.TopologyChangeAndDispatchAction import (
TopologyChangeAndDispatchAction,
)
from grid2op.Action.DispatchAction import DispatchAction
| 1,939 | 36.307692 | 86 | py |
Grid2Op | Grid2Op-master/grid2op/Agent/__init__.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Game, Grid2Game a gamified platform to interact with grid2op environments.
__all__ = [
"BaseAgent",
"DoNothingAgent",
"OneChangeThenNothing",
"GreedyAgent",
"PowerLineSwitch",
"TopologyGreedy",
"AgentWithConverter",
"RandomAgent",
"DeltaRedispatchRandomAgent",
"MLAgent",
"RecoPowerlineAgent",
"FromActionsListAgent",
"RecoPowerlinePerArea"
]
from grid2op.Agent.baseAgent import BaseAgent
from grid2op.Agent.doNothing import DoNothingAgent
from grid2op.Agent.oneChangeThenNothing import OneChangeThenNothing
from grid2op.Agent.greedyAgent import GreedyAgent
from grid2op.Agent.powerlineSwitch import PowerLineSwitch
from grid2op.Agent.topologyGreedy import TopologyGreedy
from grid2op.Agent.agentWithConverter import AgentWithConverter
from grid2op.Agent.randomAgent import RandomAgent
from grid2op.Agent.deltaRedispatchRandomAgent import DeltaRedispatchRandomAgent
from grid2op.Agent.mlAgent import MLAgent
from grid2op.Agent.recoPowerlineAgent import RecoPowerlineAgent
from grid2op.Agent.fromActionsListAgent import FromActionsListAgent
from grid2op.Agent.recoPowerLinePerArea import RecoPowerlinePerArea
| 1,539 | 39.526316 | 102 | py |
Grid2Op | Grid2Op-master/grid2op/Agent/agentWithConverter.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from abc import abstractmethod
from grid2op.Converter import Converter
from grid2op.Exceptions import Grid2OpException
from grid2op.Agent.baseAgent import BaseAgent
class AgentWithConverter(BaseAgent):
"""
Compared to a regular BaseAgent, these types of Agents are able to deal with a different representation of
:class:`grid2op.Action.BaseAction` and :class:`grid2op.Observation.BaseObservation`.
As any other Agents, AgentWithConverter will implement the :func:`BaseAgent.act` method. But for them, it's slightly
different.
They receive in this method an observation, as an object (ie an instance of
:class:`grid2op.Observation.BaseObservation`). This
object can then be converted to any other object with the method :func:`AgentWithConverter.convert_obs`.
Then, this `transformed_observation` is pass to the method :func:`AgentWithConverter.my_act` that is supposed
to be defined for each agents. This function outputs an `encoded_act` which can be whatever you want to be.
Finally, the `encoded_act` is decoded into a proper action, object of class :class:`grid2op.Action.BaseAction`,
thanks to the method :func:`AgentWithConverter.convert_act`.
This allows, for example, to represent actions as integers to train more easily standard discrete control algorithm
used to solve atari games for example.
**NB** It is possible to define :func:`AgentWithConverter.convert_obs` and :func:`AgentWithConverter.convert_act`
or to define a :class:`grid2op.Converters.Converter` and feed it to the `action_space_converter` parameters
used to initialise the class. The second option is preferred, as the :attr:`AgentWithConverter.action_space`
will then directly be this converter. Such an BaseAgent will really behave as if the actions are encoded the way he
wants.
Examples
--------
For example, imagine an BaseAgent uses a neural networks to take its decision.
Suppose also that, after some
features engineering, it's best for the neural network to use only the load active values
(:attr:`grid2op.Observation.BaseObservation.load_p`) and the sum of the
relative flows (:attr:`grid2op.Observation.BaseObservation.rho`) with the active flow
(:attr:`grid2op.Observation.BaseObservation.p_or`) [**NB** that agent would not make sense a priori, but who knows]
Suppose that this neural network can be accessed with a class `AwesomeNN` (not available...) that can
predict some actions. It can be loaded with the "load" method and make predictions with the
"predict" method.
For the sake of the examples, we will suppose that this agent only predicts powerline status (so 0 or 1) that
are represented as vector. So we need to take extra care to convert this vector from a numpy array to a valid
action.
This is done below:
.. code-block:: python
import grid2op
import AwesomeNN # this does not exists!
# create a simple environment
env = grid2op.make()
# define the class above
class AgentCustomObservation(AgentWithConverter):
def __init__(self, action_space, path):
AgentWithConverter.__init__(self, action_space)
self.my_neural_network = AwesomeNN()
self.my_neural_networl.load(path)
def convert_obs(self, observation):
# convert the observation
return np.concatenate((observation.load_p, observation.rho + observation.p_or))
def convert_act(self, encoded_act):
# convert back the action, output from the NN "self.my_neural_network"
# to a valid action
act = self.action_space({"set_status": encoded_act})
def my_act(self, transformed_observation, reward, done=False):
act_predicted = self.my_neural_network(transformed_observation)
return act_predicted
# make the agent that behaves as expected.
my_agent = AgentCustomObservation(action_space=env.action_space, path=".")
# this agent is perfectly working :-) You can use it as any other agents.
Attributes
----------
action_space_converter: :class:`grid2op.Converters.Converter`
The converter that is used to represents the BaseAgent action space. Might be set to ``None`` if not initialized
init_action_space: :class:`grid2op.Action.ActionSpace`
The initial action space. This corresponds to the action space of the :class:`grid2op.Environment.Environment`.
action_space: :class:`grid2op.Converters.ActionSpace`
If a converter is used, then this action space represents is this converter. The agent will behave as if
the action space is directly encoded the way it wants.
"""
def __init__(self, action_space, action_space_converter=None, **kwargs_converter):
self.action_space_converter = action_space_converter
self.init_action_space = action_space
if action_space_converter is None:
BaseAgent.__init__(self, action_space)
else:
if isinstance(action_space_converter, type):
if issubclass(action_space_converter, Converter):
action_space_converter_this_env_class = (
action_space_converter.init_grid(action_space)
)
this_action_space = action_space_converter_this_env_class(
action_space
)
BaseAgent.__init__(self, this_action_space)
else:
raise Grid2OpException(
"Impossible to make an BaseAgent with a converter of type {}. "
"Please use a converter deriving from grid2op.ActionSpaceConverter.Converter."
"".format(action_space_converter)
)
elif isinstance(action_space_converter, Converter):
if isinstance(
action_space_converter._template_act,
self.init_action_space.actionClass,
):
BaseAgent.__init__(self, action_space_converter)
else:
raise Grid2OpException(
"Impossible to make an BaseAgent with the provided converter of type {}. "
"It doesn't use the same type of action as the BaseAgent's action space."
"".format(action_space_converter)
)
else:
raise Grid2OpException(
'You try to initialize and BaseAgent with an invalid converter "{}". It must'
'either be a type deriving from "Converter", or an instance of a class'
"deriving from it."
"".format(action_space_converter)
)
self.action_space.init_converter(**kwargs_converter)
def convert_obs(self, observation):
"""
This function convert the observation, that is an object of class :class:`grid2op.Observation.BaseObservation`
into a representation understandable by the BaseAgent.
For example, and agent could only want to look at the relative flows
:attr:`grid2op.Observation.BaseObservation.rho`
to take his decision. This is possible by overloading this method.
This method can also be used to scale the observation such that each compononents has mean 0 and variance 1
for example.
Parameters
----------
observation: :class:`grid2op.Observation.Observation`
Initial observation received by the agent in the :func:`BaseAgent.act` method.
Returns
-------
res: ``object``
Anything that will be used by the BaseAgent to take decisions.
"""
return self.action_space.convert_obs(observation)
def convert_act(self, encoded_act):
"""
This function will convert an "ecnoded action" that be of any types, to a valid action that can be ingested
by the environment.
Parameters
----------
encoded_act: ``object``
Anything that represents an action.
Returns
-------
act: :grid2op.BaseAction.BaseAction`
A valid actions, represented as a class, that corresponds to the encoded action given as input.
"""
res = self.action_space.convert_act(encoded_act)
return res
def act(self, observation, reward, done=False):
"""
Standard method of an :class:`BaseAgent`. There is no need to overload this function.
Parameters
----------
observation: :class:`grid2op.Observation.Observation`
The current observation of the :class:`grid2op.Environment.Environment`
reward: ``float``
The current reward. This is the reward obtained by the previous action
done: ``bool``
Whether the episode has ended or not. Used to maintain gym compatibility
Returns
-------
res: :class:`grid2op.Action.Action`
The action chosen by the bot / controler / agent.
"""
transformed_observation = self.convert_obs(observation)
encoded_act = self.my_act(transformed_observation, reward, done)
return self.convert_act(encoded_act)
def seed(self, seed):
"""
Seed the agent AND the associated converter if it needs to be seeded.
See a more detailed explanation in :func:`BaseAgent.seed` for more information about seeding.
"""
super().seed(seed)
if self.action_space_converter is not None:
self.action_space.seed(seed)
@abstractmethod
def my_act(self, transformed_observation, reward, done=False):
"""
This method should be override if this class is used. It is an "abstract" method.
If someone wants to make a agent that handles different kinds of actions an observation.
Parameters
----------
transformed_observation: ``object``
Anything that will be used to create an action. This is the results to the call of
:func:`AgentWithConverter.convert_obs`. This is likely a numpy array.
reward: ``float``
The current reward. This is the reward obtained by the previous action
done: ``bool``
Whether the episode has ended or not. Used to maintain gym compatibility
Returns
-------
res: ``object``
A representation of an action in any possible format. This action will then be ingested and formatted into
a valid action with the :func:`AgentWithConverter.convert_act` method.
"""
transformed_action = None
return transformed_action
| 11,402 | 42.357414 | 120 | py |
Grid2Op | Grid2Op-master/grid2op/Agent/baseAgent.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
import os
from abc import ABC, abstractmethod
from grid2op.Space import RandomObject
from grid2op.Observation import BaseObservation
from grid2op.Action import BaseAction, ActionSpace
class BaseAgent(RandomObject, ABC):
"""
This class represents the base class of an BaseAgent. All bot / controller / agent used in the Grid2Op simulator
should derived from this class.
To work properly, it is advise to create BaseAgent after the :class:`grid2op.Environment` has been created and reuse
the :attr:`grid2op.Environment.Environment.action_space` to build the BaseAgent.
Attributes
-----------
action_space: :class:`grid2op.Action.ActionSpace`
It represent the action space ie a tool that can serve to create valid action. Note that a valid action can
be illegal or ambiguous, and so lead to a "game over" or to a error. But at least it will have a proper size.
"""
def __init__(self, action_space: ActionSpace):
RandomObject.__init__(self)
self.action_space = copy.deepcopy(action_space)
def reset(self, obs: BaseObservation):
"""
This method is called at the beginning of a new episode.
It is implemented by agents to reset their internal state if needed.
Attributes
-----------
obs: :class:`grid2op.Observation.BaseObservation`
The first observation corresponding to the initial state of the environment.
"""
pass
def seed(self, seed: int) -> None:
"""
This function is used to guarantee that the "pseudo random numbers" generated and used by the agent instance
will be deterministic.
This guarantee, if the recommendation in :func:`BaseAgent.act` are followed that the agent will produce the same
set of actions if it faces the same observations in the same order. This is particularly important for
random agent.
You can override this function with the method of your choosing, but if you do so, don't forget to call
`super().seed(seed)`.
Parameters
----------
seed: ``int``
The seed used
Returns
-------
seed: ``tuple``
a tuple of seed used
"""
return super().seed(seed), self.action_space.seed(seed)
@abstractmethod
def act(self, observation: BaseObservation, reward: float, done : bool=False) -> BaseAction:
"""
This is the main method of an BaseAgent. Given the current observation and the current reward (ie the reward
that the environment send to the agent after the previous action has been implemented).
Notes
-----
In order to be reproducible, and to make proper use of the
:func:`BaseAgent.seed` capabilities, you must absolutely NOT use the `random` python module (which will not
be seeded) nor the `np.random` module and avoid any other "sources" of pseudo random numbers.
You can adapt your code the following way. Instead of using `np.random` use `self.space_prng`.
For example, if you wanted to write
`np.random.randint(1,5)` replace it by `self.space_prng.randint(1,5)`. It is the same for `np.random.normal()`
that is
replaced by `self.space_prng.normal()`.
You have an example of such usage in :func:`RandomAgent.my_act`.
If you really need other sources of randomness (for example if you use tensorflow or torch) we strongly
recommend you to overload the :func:`BaseAgent.seed` accordingly. In that
Parameters
----------
observation: :class:`grid2op.Observation.BaseObservation`
The current observation of the :class:`grid2op.Environment.Environment`
reward: ``float``
The current reward. This is the reward obtained by the previous action
done: ``bool``
Whether the episode has ended or not. Used to maintain gym compatibility
Returns
-------
res: :class:`grid2op.Action.PlaybleAction`
The action chosen by the bot / controler / agent.
"""
return self.action_space()
def save_state(self, savestate_path :os.PathLike):
"""
An optional method to save the internal state of your agent.
The saved state can later be re-loaded with `self.load_state`, e.g. to repeat
a Grid2Op time step with exactly the same internal parameterization. This
can be useful to repeat Grid2Op experiments and analyze why your agent performed
certain actions in past time steps. Concept developed by Fraunhofer IEE KES.
Notes
-----
First, the internal state your agent consists of attributes that are contained in
the :class:`grid2op.Agent.BaseAgent` and :class:`grid2op.Agent.BaseAgent.action_space`.
Examples are the parameterization and seeds of the random number generator that your
agent uses. Such attributes can easily be obtained with the :func:`getattr` and stored
in a common file format, such as `.npy`.
Second, your agent may contain custom attributes, such as e.g. a vector of line indices
from a Grid2Op observation. You could obtain and save them in the same way as explained
before.
Third, your agent may contain very specific modules such as `Tensorflow` that
do not support the simple :func:`getattr`. However, these modules normally have
their own methods to save an internal state. Examples of such methods are
:func:`save_weights` that you can integrate in your implementation of `self.save_state`.
Parameters
----------
savestate_path: ``string``
The path to which your agent state variables should be saved
"""
pass
def load_state(self, loadstate_path :os.PathLike):
"""
An optional method to re-load the internal agent state that was saved with `self.save_state`.
This can be useful to re-set your agent to an earlier simulation time step and reproduce
past experiments with Grid2Op. Concept developed by Fraunhofer IEE KES.
Notes
-----
First, the internal state your agent consists of attributes that are contained in
the :class:`grid2op.Agent.BaseAgent` and :class:`grid2op.Agent.BaseAgent.action_space`.
Such attributes can easily be re-set with :func:`setattr`.
Second, your agent may contain custom attributes, such as e.g. a vector of line indices
from a Grid2Op observation. You can re-set them with :func:`setattr` as well.
Third, your agent may contain very specific modules such as `Tensorflow` that
do not support the simple :func:`setattr`. However, these modules normally have
their own methods to re-load an internal state. Examples of such methods are
:func:`load_weights` that you can integrate in your implementation of `self.load_state`.
Parameters
----------
savestate_path: ``string``
The path from which your agent state variables should be loaded
"""
pass
| 7,719 | 43.114286 | 120 | py |
Grid2Op | Grid2Op-master/grid2op/Agent/deltaRedispatchRandomAgent.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.Agent.baseAgent import BaseAgent
class DeltaRedispatchRandomAgent(BaseAgent):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Used for test. Prefer using a random agent by selecting only the redispatching action
that you want.
This agent will perform some redispatch of a given amount among randomly selected dispatchable
generators.
Parameters
----------
action_space: :class:`grid2op.Action.ActionSpace`
the Grid2Op action space
n_gens_to_redispatch: `int`
The maximum number of dispatchable generators to play with
redispatching_delta: `float`
The redispatching MW value used in both directions
"""
def __init__(self, action_space, n_gens_to_redispatch=2, redispatching_delta=1.0):
super().__init__(action_space)
self.desired_actions = []
# Get all generators IDs
gens_ids = np.arange(self.action_space.n_gen, dtype=int)
# Filter out non resipatchable IDs
gens_redisp = gens_ids[self.action_space.gen_redispatchable]
# Cut if needed
if len(gens_redisp) > n_gens_to_redispatch:
gens_redisp = gens_redisp[0:n_gens_to_redispatch]
# Register do_nothing action
self.desired_actions.append(self.action_space({}))
# Register 2 actions per generator
# (increase or decrease by the delta)
for gen_id in gens_redisp:
# Create action redispatch by opposite delta
act1 = self.action_space(
{"redispatch": [(gen_id, -float(redispatching_delta))]}
)
# Create action redispatch by delta
act2 = self.action_space(
{"redispatch": [(gen_id, float(redispatching_delta))]}
)
# Register this generator actions
self.desired_actions.append(act1)
self.desired_actions.append(act2)
def act(self, observation, reward, done=False):
act = self.space_prng.choice(self.desired_actions)
return act
| 2,590 | 34.493151 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Agent/doNothing.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Agent.baseAgent import BaseAgent
class DoNothingAgent(BaseAgent):
"""
This is the most basic BaseAgent. It is purely passive, and does absolutely nothing.
As opposed to most reinforcement learning environments, in grid2op, doing nothing is often
the best solution.
"""
def __init__(self, action_space):
BaseAgent.__init__(self, action_space)
def act(self, observation, reward, done=False):
"""
As better explained in the document of :func:`grid2op.BaseAction.update` or
:func:`grid2op.BaseAction.ActionSpace.__call__`.
The preferred way to make an object of type action is to call :func:`grid2op.BaseAction.ActionSpace.__call__`
with the dictionary representing the action. In this case, the action is "do nothing" and it is represented by
the empty dictionary.
Parameters
----------
observation: :class:`grid2op.Observation.Observation`
The current observation of the :class:`grid2op.Environment.Environment`
reward: ``float``
The current reward. This is the reward obtained by the previous action
done: ``bool``
Whether the episode has ended or not. Used to maintain gym compatibility
Returns
-------
res: :class:`grid2op.Action.Action`
The action chosen by the bot / controller / agent.
"""
res = self.action_space({})
return res
| 1,941 | 36.346154 | 118 | py |
Grid2Op | Grid2Op-master/grid2op/Agent/fromActionsListAgent.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
from collections.abc import Iterable
from grid2op.Action import BaseAction
from grid2op.Exceptions import AgentError
from grid2op.Agent.baseAgent import BaseAgent
class FromActionsListAgent(BaseAgent):
"""This type of agent will perform some actions based on a provided list of actions.
If no action is provided for a given step (for example because it survives for more steps that the
length of the provided action list, it will do nothing.
Notes
-----
No check are performed to make sure the action types is compatible with the environment. For example, the
environment might prevent to perform redispatching, but, at the creation of the agent, we do not ensure
that no actions performing redispatching are performed.
"""
def __init__(self, action_space, action_list=None):
BaseAgent.__init__(self, action_space=action_space)
if action_list is None:
self._action_list = []
else:
if isinstance(action_list, Iterable):
self._action_list = copy.deepcopy(action_list)
else:
raise AgentError(
'Impossible to create a "FromActionsListAgent" without providing a valid list of '
'actions. Make sure that "action_list" parameters is iterable.'
)
# check that everything is valid
my_dict = copy.deepcopy(type(self.action_space()).cls_to_dict())
self.__clean_dict_for_compare(my_dict)
for act_nb, act in enumerate(self._action_list):
if not isinstance(act, BaseAction):
raise AgentError(
f'Impossible to create a "FromActionsListAgent" with a list that does not '
f"contain an action. We found {act} at position {act_nb}, which is NOT a valid "
f"grid2op action."
)
this_dict = copy.deepcopy(type(act).cls_to_dict())
self.__clean_dict_for_compare(this_dict)
if this_dict != my_dict:
raise AgentError(
f'Impossible to create a "FromActionsListAgent" with a list that contains '
f"actions from a different environment. Please check action at position {act_nb}."
)
def act(self, observation, reward, done=False):
if observation.current_step < len(self._action_list):
return self._action_list[observation.current_step]
else:
return self.action_space()
def __clean_dict_for_compare(self, dict_):
if "glop_version" in dict_:
del dict_["glop_version"]
if "_PATH_ENV" in dict_:
del dict_["_PATH_ENV"]
if "env_name" in dict_:
del dict_["env_name"]
| 3,254 | 42.986486 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Agent/greedyAgent.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from abc import abstractmethod
import numpy as np
from grid2op.Agent.baseAgent import BaseAgent
from grid2op.dtypes import dt_float
class GreedyAgent(BaseAgent):
"""
This is a class of "Greedy BaseAgent". Greedy agents are all executing the same kind of algorithm to take action:
1. They :func:`grid2op.Observation.Observation.simulate` all actions in a given set
2. They take the action that maximise the simulated reward among all these actions
This class is an abstract class (object of this class cannot be created). To create "GreedyAgent" one must
override this class. Examples are provided with :class:`PowerLineSwitch` and :class:`TopologyGreedy`.
"""
def __init__(self, action_space):
BaseAgent.__init__(self, action_space)
self.tested_action = None
self.resulting_rewards = None
def act(self, observation, reward, done=False):
"""
By definition, all "greedy" agents are acting the same way. The only thing that can differentiate multiple
agents is the actions that are tested.
These actions are defined in the method :func:`._get_tested_action`. This :func:`.act` method implements the
greedy logic: take the actions that maximizes the instantaneous reward on the simulated action.
Parameters
----------
observation: :class:`grid2op.Observation.Observation`
The current observation of the :class:`grid2op.Environment.Environment`
reward: ``float``
The current reward. This is the reward obtained by the previous action
done: ``bool``
Whether the episode has ended or not. Used to maintain gym compatibility
Returns
-------
res: :class:`grid2op.Action.Action`
The action chosen by the bot / controller / agent.
"""
self.tested_action = self._get_tested_action(observation)
if len(self.tested_action) > 1:
self.resulting_rewards = np.full(
shape=len(self.tested_action), fill_value=np.NaN, dtype=dt_float
)
for i, action in enumerate(self.tested_action):
(
simul_obs,
simul_reward,
simul_has_error,
simul_info,
) = observation.simulate(action)
self.resulting_rewards[i] = simul_reward
reward_idx = int(
np.argmax(self.resulting_rewards)
) # rewards.index(max(rewards))
best_action = self.tested_action[reward_idx]
else:
best_action = self.tested_action[0]
return best_action
@abstractmethod
def _get_tested_action(self, observation):
"""
Returns the list of all the candidate actions.
From this list, the one that achieve the best "simulated reward" is used.
Parameters
----------
observation: :class:`grid2op.Observation.Observation`
The current observation of the :class:`grid2op.Environment.Environment`
Returns
-------
res: ``list``
A list of all candidate :class:`grid2op.BaseAction.BaseAction`
"""
pass
| 3,717 | 38.136842 | 117 | py |
Grid2Op | Grid2Op-master/grid2op/Agent/mlAgent.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Converter import ToVect
from grid2op.Agent.agentWithConverter import AgentWithConverter
class MLAgent(AgentWithConverter):
"""
This agent allows to handle only vectors. The "my_act" function will return "do nothing" action (so it needs
to be override)
In this class, the "my_act" is expected to return a vector that can be directly converted into a valid action.
"""
def __init__(self, action_space, action_space_converter=ToVect, **kwargs_converter):
AgentWithConverter.__init__(
self, action_space, action_space_converter, **kwargs_converter
)
self.do_nothing_vect = action_space({}).to_vect()
def my_act(self, transformed_observation, reward, done=False):
"""
By default this agent returns only the "do nothing" action, unless some smarter implementations are provided
for this function.
Parameters
----------
transformed_observation: ``numpy.ndarray``, dtype=float
The observation transformed into a 1d numpy array of float. All components of the observation are kept.
reward: ``float``
Reward of the previous action
done: ``bool``
Whether the episode is over or not.
Returns
-------
res: ``numpy.ndarray``, dtype=float
The action taken represented as a vector.
"""
return self.do_nothing_vect
def convert_from_vect(self, act):
"""
Helper to convert an action, represented as a numpy array as an :class:`grid2op.BaseAction` instance.
Parameters
----------
act: ``numppy.ndarray``
An action cast as an :class:`grid2op.BaseAction.BaseAction` instance.
Returns
-------
res: :class:`grid2op.Action.Action`
The `act` parameters converted into a proper :class:`grid2op.BaseAction.BaseAction` object.
"""
res = self.action_space({})
res.from_vect(act)
return res
| 2,488 | 36.149254 | 116 | py |
Grid2Op | Grid2Op-master/grid2op/Agent/oneChangeThenNothing.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Agent.baseAgent import BaseAgent
class OneChangeThenNothing(BaseAgent):
"""
This is a specific kind of BaseAgent. It does an BaseAction (possibly non empty) at the first time step and then does
nothing.
This class is an abstract class and cannot be instanciated (ie no object of this class can be created). It must
be overridden and the method :func:`OneChangeThenNothing._get_dict_act` be defined. Basically, it must know
what action to do.
Attributes
------------
my_dict: ``dict`` (class member)
Representation, as a dictionnary of the only action that this Agent will do at the first time step.
Examples
---------
We advise to use this class as following
.. code-block:: python
import grid2op
from grid2op.Agent import OneChangeThenNothing
acts_dict_ = [{}, {"set_line_status": [(0,-1)]}] # list of dictionaries. Each dictionary
# represents a valid action
env = grid2op.make() # create an environment
for act_as_dict in zip(acts_dict_):
# generate the proper class that will perform the first action (encoded by {}) in acts_dict_
agent_class = OneChangeThenNothing.gen_next(act_as_dict)
# start a runner with this agent
runner = Runner(**env.get_params_for_runner(), agentClass=agent_class)
# run 2 episode with it
res_2 = runner.run(nb_episode=2)
"""
my_dict = {}
def __init__(self, action_space):
BaseAgent.__init__(self, action_space)
self.has_changed = False
self.do_nothing_action = self.action_space({})
def act(self, observation, reward, done=False):
if self.has_changed:
res = self.do_nothing_action
else:
res = self.action_space(self._get_dict_act())
self.has_changed = True
return res
def reset(self, obs):
self.has_changed = False
def _get_dict_act(self):
"""
Function that need to be overridden to indicate which action to perform.
Returns
-------
res: ``dict``
A dictionnary that can be converted into a valid :class:`grid2op.BaseAction.BaseAction`. See the help of
:func:`grid2op.BaseAction.ActionSpace.__call__` for more information.
"""
return self.my_dict
@classmethod
def gen_next(cls, dict_):
"""
This function allows to change the dictionnary of the action that the agent will perform.
See the class level documentation for an example on how to use this.
Parameters
----------
dict_: ``dict``
A dictionnary representing an action. This dictionnary is assumed to be convertible into an action.
No check is performed at this stage.
"""
cls.my_dict = dict_
return cls
| 3,369 | 34.104167 | 121 | py |
Grid2Op | Grid2Op-master/grid2op/Agent/powerlineSwitch.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.dtypes import dt_bool
from grid2op.Agent.greedyAgent import GreedyAgent
class PowerLineSwitch(GreedyAgent):
"""
This is a :class:`GreedyAgent` example, which will attempt to disconnect powerlines.
It will choose among:
- doing nothing
- changing the status of one powerline
which action that will maximize the simulated reward. All powerlines are tested at each steps. This means
that if `n` is the number of powerline on the grid, at each steps this actions will perform `n` +1
calls to "simulate" (one to do nothing and one that change the status of each powerline)
"""
def __init__(self, action_space):
GreedyAgent.__init__(self, action_space)
def _get_tested_action(self, observation):
res = [self.action_space({})] # add the do nothing
for i in range(self.action_space.n_line):
tmp = np.full(self.action_space.n_line, fill_value=False, dtype=dt_bool)
tmp[i] = True
action = self.action_space({"change_line_status": tmp})
if not observation.line_status[i]:
# so the action consisted in reconnecting the powerline
# i need to say on which bus (always on bus 1 for this type of agent)
action = action.update(
{"set_bus": {"lines_or_id": [(i, 1)], "lines_ex_id": [(i, 1)]}}
)
res.append(action)
return res
| 1,936 | 40.212766 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Agent/randomAgent.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Converter import IdToAct
from grid2op.Agent.agentWithConverter import AgentWithConverter
class RandomAgent(AgentWithConverter):
"""
This agent acts randomly on the powergrid. It uses the :class:`grid2op.Converters.IdToAct` to compute all the
possible actions available for the environment. And then chooses a random one among all these.
Notes
------
Actions are taken uniformly at random among unary actions. For example, if a game rules allows to take actions that
can disconnect a powerline AND modify the topology of a substation an action that do both will not be sampled
by this class.
This agent is not equivalent to calling `env.action_space.sample()` because the sampling is not
done the same manner. This agent sample uniformly among all unary actions whereas
`env.action_space.sample()` (see :func:`grid2op.Action.SerializableActionSpace.sample` for more
information about the later).
"""
def __init__(
self, action_space, action_space_converter=IdToAct, **kwargs_converter
):
AgentWithConverter.__init__(
self, action_space, action_space_converter, **kwargs_converter
)
def my_act(self, transformed_observation, reward, done=False):
"""
A random agent will "simply" draw a random number between 0 and the number of action, and return this action.
This is equivalent to draw uniformly at random a feasible action.
Notes
-----
In order to be working as intended, it is crucial that this method does not rely on any other source
of "pseudo randomness" than :attr:`grid2op.Space.RandomObject.space_prng`.
In particular, you must avoid
to use `np.random.XXXX` or the `random` python module. You can replace any call to `np.random.XXX` by
`self.space_prng.XXX` (**eg** `np.random.randint(1,5)` can be replaced by `self.space_prng.randint(1,5)`).
If you really need other sources of randomness (for example if you use tensorflow or torch) we strongly
recommend you to overload the :func:`BaseAgent.seed` accordingly.
"""
my_int = self.space_prng.randint(self.action_space.n)
return my_int
| 2,702 | 44.813559 | 119 | py |
Grid2Op | Grid2Op-master/grid2op/Agent/recoPowerLinePerArea.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.Agent import BaseAgent
from grid2op.Observation import BaseObservation
from grid2op.Action import ActionSpace
from grid2op.Exceptions import AgentError
class RecoPowerlinePerArea(BaseAgent):
"""This class acts like the :class:`RecoPowerlineAgent` but it is able
to reconnect multiple lines at the same steps (one line per area).
The "areas" are defined by a list of list of substation id provided as input.
Of course the area you provide to the agent should be the same as the areas
used in the rules of the game. Otherwise, the agent might try to reconnect
two powerline "in the same area for the environment" which of course will
lead to an illegal action.
You can use it like:
.. code-block::
import grid2op
from grid2op.Agent import RecoPowerlinePerArea
env_name = "l2rpn_idf_2023" # (or any other env name supporting the feature)
env = grid2op.make(env_name)
agent = RecoPowerlinePerArea(env.action_space, env._game_rules.legal_action.substations_id_by_area)
"""
def __init__(self, action_space: ActionSpace, areas_by_sub_id: dict):
super().__init__(action_space)
self.lines_to_area_id = np.zeros(type(action_space).n_line, dtype=int) - 1
for aread_id, (area_nm, sub_this_area) in enumerate(areas_by_sub_id.items()):
for line_id, subor_id in enumerate(type(action_space).line_or_to_subid):
if subor_id in sub_this_area:
self.lines_to_area_id[line_id] = aread_id
if np.any(self.lines_to_area_id == -1):
raise AgentError("some powerline have no area id")
self.nb_area = len(areas_by_sub_id)
def act(self, observation: BaseObservation, reward: float, done : bool=False):
line_stat_s = observation.line_status
cooldown = observation.time_before_cooldown_line
can_be_reco = ~line_stat_s & (cooldown == 0)
if not np.any(can_be_reco):
# no line to reconnect
return self.action_space()
area_used = np.full(self.nb_area, fill_value=False, dtype=bool)
reco_ids = []
for l_id in np.where(can_be_reco)[0]:
if not area_used[self.lines_to_area_id[l_id]]:
reco_ids.append(l_id)
area_used[self.lines_to_area_id[l_id]] = True
res = self.action_space({"set_line_status": [(l_id, +1) for l_id in reco_ids]})
return res
| 2,963 | 43.909091 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Agent/recoPowerlineAgent.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.Agent.greedyAgent import GreedyAgent
class RecoPowerlineAgent(GreedyAgent):
"""
This is a :class:`GreedyAgent` example, which will attempt to reconnect powerlines: for each disconnected powerline
that can be reconnected, it will simulate the effect of reconnecting it. And reconnect the one that lead to the
highest simulated reward.
"""
def __init__(self, action_space):
GreedyAgent.__init__(self, action_space)
def _get_tested_action(self, observation):
res = [self.action_space({})] # add the do nothing
line_stat_s = observation.line_status
cooldown = observation.time_before_cooldown_line
can_be_reco = ~line_stat_s & (cooldown == 0)
if np.any(can_be_reco):
res = [
self.action_space({"set_line_status": [(id_, +1)]})
for id_ in np.where(can_be_reco)[0]
]
return res
| 1,412 | 40.558824 | 119 | py |
Grid2Op | Grid2Op-master/grid2op/Agent/topologyGreedy.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Agent.greedyAgent import GreedyAgent
class TopologyGreedy(GreedyAgent):
"""
This is a :class:`GreedyAgent` example, which will attempt to reconfigure the substations connectivity.
It will choose among:
- doing nothing
- changing the topology of one substation.
To choose, it will simulate the outcome of all actions, and then chose the action leading to the best rewards.
"""
def __init__(self, action_space):
GreedyAgent.__init__(self, action_space)
self.tested_action = None
def _get_tested_action(self, observation):
if self.tested_action is None:
res = [self.action_space({})] # add the do nothing
# better use "get_all_unitary_topologies_set" and not "get_all_unitary_topologies_change"
# maybe "change" are still "bugged" (in the sens they don't count all topologies exactly once)
res += self.action_space.get_all_unitary_topologies_set(self.action_space)
self.tested_action = res
return self.tested_action
| 1,530 | 40.378378 | 114 | py |
Grid2Op | Grid2Op-master/grid2op/Backend/Backend.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
import os
import sys
import warnings
import json
from abc import ABC, abstractmethod
import numpy as np
import pandas as pd
from grid2op.dtypes import dt_int, dt_float, dt_bool
from grid2op.Exceptions import (
EnvError,
DivergingPowerFlow,
IncorrectNumberOfElements,
IncorrectNumberOfLoads,
)
from grid2op.Exceptions import (
IncorrectNumberOfGenerators,
BackendError,
IncorrectNumberOfLines,
)
from grid2op.Space import GridObjects
from grid2op.Exceptions import Grid2OpException
# TODO method to get V and theta at each bus, could be in the same shape as check_kirchoff
class Backend(GridObjects, ABC):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Unless if you want to code yourself a backend this is not recommend to alter it
or use it directly in any way.
If you want to code a backend, an example is given in :class:`PandaPowerBackend` (
or in the repository lightsim2grid on github)
This documentation is present mainly for exhaustivity. It is not recommended to manipulate a Backend
directly. Prefer using an :class:`grid2op.Environment.Environment`
This is a base class for each :class:`Backend` object.
It allows to run power flow smoothly, and abstract the method of computing cascading failures.
This class allow the user or the agent to interact with an power flow calculator, while relying on dedicated
methods to change the power grid behaviour.
It is NOT recommended to use this class outside the Environment.
An example of a valid backend is provided in the :class:`PandapowerBackend`.
All the abstract methods (that need to be implemented for a backend to work properly) are (more information given
in the :ref:`create-backend-module` page):
- :func:`Backend.load_grid`
- :func:`Backend.apply_action`
- :func:`Backend.runpf`
- :func:`Backend.get_topo_vect`
- :func:`Backend.generators_info`
- :func:`Backend.loads_info`
- :func:`Backend.lines_or_info`
- :func:`Backend.lines_ex_info`
And optionally:
- :func:`Backend.close` (this is mandatory if your backend implementation (`self._grid`) is relying on some
c / c++ code that do not free memory automatically.
- :func:`Backend.copy` (not that this is mandatory if your backend implementation (in `self._grid`) cannot be
deep copied using the python copy.deepcopy function) [as of grid2op >= 1.7.1 it is no more
required. If not implemented, you won't be able to use some of grid2op feature however]
- :func:`Backend.get_line_status`: the default implementation uses the "get_topo_vect()" and then check
if buses at both ends of powerline are positive. This is rather slow and can most likely be optimized.
- :func:`Backend.get_line_flow`: the default implementation will retrieve all powerline information
at the "origin" side and just return the "a_or" vector. You want to do something smarter here.
- :func:`Backend._disconnect_line`: has a default slow implementation using "apply_action" that might
can most likely be optimized in your backend.
- :func:`Backend.reset` will reload the powergrid from the hard drive by default. This is rather slow and we
recommend to overload it.
And, if the flag :attr:Backend.shunts_data_available` is set to ``True`` the method :func:`Backend.shunt_info`
should also be implemented.
.. note:: Backend also support "shunts" information if the `self.shunts_data_available` flag is set to
``True`` in that case, you also need to implement all the relevant shunt information (attributes `n_shunt`,
`shunt_to_subid`, `name_shunt` and function `shunt_info` and handle the modification of shunts
bus, active value and reactive value in the "apply_action" function).
In order to be valid and carry out some computations, you should call :func:`Backend.load_grid` and later
:func:`grid2op.Spaces.GridObjects.assert_grid_correct`. It is also more than recommended to call
:func:`Backend.assert_grid_correct_after_powerflow` after the first powerflow. This is all carried ou in the
environment properly.
Attributes
----------
detailed_infos_for_cascading_failures: :class:`bool`
Whether to be verbose when computing a cascading failure.
thermal_limit_a: :class:`numpy.array`, dtype:float
Thermal limit of the powerline in amps for each powerline. Thie thermal limit is relevant on only one
side of the powerline: the same side returned by :func:`Backend.get_line_overflow`
comp_time: ``float``
Time to compute the powerflow (might be unset, ie stay at 0.0)
"""
IS_BK_CONVERTER = False
env_name = "unknown"
# action to set me
my_bk_act_class = None
_complete_action_class = None
ERR_INIT_POWERFLOW = "Power cannot be computed on the first time step, please check your data."
def __init__(self,
detailed_infos_for_cascading_failures: bool=False,
can_be_copied: bool=True,
**kwargs):
"""
Initialize an instance of Backend. This does nothing per se. Only the call to :func:`Backend.load_grid`
should guarantee the backend is properly configured.
:param detailed_infos_for_cascading_failures: Whether to be detailed (but slow) when computing cascading failures
:type detailed_infos_for_cascading_failures: :class:`bool`
"""
GridObjects.__init__(self)
# the following parameter is used to control the amount of verbosity when computing a cascading failure
# if it's set to true, it returns all intermediate _grid states. This can slow down the computation!
self.detailed_infos_for_cascading_failures = (
detailed_infos_for_cascading_failures
)
# the power _grid manipulated. One powergrid per backend.
self._grid = None
# thermal limit setting, in ampere, at the same "side" of the powerline than self.get_line_overflow
self.thermal_limit_a = None
# for the shunt (only if supported)
self._sh_vnkv = None # for each shunt gives the nominal value at the bus at which it is connected
# if this information is not present, then "get_action_to_set" might not behave correctly
self.comp_time = 0.0
self.can_output_theta = False
# to prevent the use of the same backend instance in different environment.
self._is_loaded = False
self._can_be_copied = can_be_copied
self._my_kwargs = {"detailed_infos_for_cascading_failures": detailed_infos_for_cascading_failures,
"can_be_copied": self._can_be_copied}
for k, v in kwargs.items():
self._my_kwargs[k] = v
@property
def is_loaded(self):
return self._is_loaded
@is_loaded.setter
def is_loaded(self, value):
if value is True:
self._is_loaded = True
else:
raise BackendError('Impossible to unset the "is_loaded" status.')
@abstractmethod
def load_grid(self, path, filename=None):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is called once at the loading of the powergrid.
Load the powergrid.
It should first define self._grid.
And then fill all the helpers used by the backend eg. all the attributes of :class:`Space.GridObjects`.
After a the call to :func:`Backend.load_grid` has been performed, the backend should be in such a state where
the :class:`grid2op.Space.GridObjects` is properly set up. See the description of
:class:`grid2op.Space.GridObjects` to know which attributes should be set here and which should not.
:param path: the path to find the powergrid
:type path: :class:`string`
:param filename: the filename of the powergrid
:type filename: :class:`string`, optional
:return: ``None``
"""
pass
@abstractmethod
def apply_action(self, action):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Don't attempt to apply an action directly to a backend. This function will modify
the powergrid state given the action in input.
This is one of the core function if you want to code a backend.
Modify the powergrid with the action given by an agent or by the envir.
For the L2RPN project, this action is mainly for topology if it has been sent by the agent.
Or it can also affect production and loads, if the action is made by the environment.
The help of :func:`grid2op.BaseAction.BaseAction.__call__` or the code in BaseActiontion.py file give more information about
the implementation of this method.
:param action: the action to be implemented on the powergrid.
:type action: :class:`grid2op.Action._BackendAction._BackendAction`
:return: ``None``
"""
pass
@abstractmethod
def runpf(self, is_dc=False):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is called by :func:`Backend.next_grid_state` (that computes some kind of
cascading failures).
This is one of the core function if you want to code a backend. It will carry out
a powerflow.
Run a power flow on the underlying _grid.
Powerflow can be AC (is_dc = False) or DC (is_dc = True)
:param is_dc: is the powerflow run in DC or in AC
:type is_dc: :class:`bool`
:return: ``True`` if it has converged, or false otherwise. In case of non convergence, no flows can be inspected on
the _grid.
:rtype: :class:`bool`
:return: an exception in case of divergence (or none if no particular info are available)
:rtype: `Exception`
"""
pass
@abstractmethod
def get_topo_vect(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Prefer using :attr:`grid2op.Observation.BaseObservation.topo_vect`
Get the topology vector from the :attr:`Backend._grid`.
The topology vector defines, for each object, on which bus it is connected.
It returns -1 if the object is not connected.
It is a vector with as much elements (productions, loads and lines extremity) as there are in the powergrid.
For each elements, it gives on which bus it is connected in its substation.
For example, if the first element of this vector is the load of id 1, then if `res[0] = 2` it means that the
load of id 1 is connected to the second bus of its substation.
You can check which object of the powerlines is represented by each component of this vector by looking at the
`*_pos_topo_vect` (*eg.* :attr:`grid2op.Space.GridObjects.load_pos_topo_vect`) vectors.
For each elements it gives its position in this vector.
As any function of the backend, it is not advised to use it directly. You can get this information in the
:attr:`grid2op.Observation.Observation.topo_vect` instead.
Returns
--------
res: ``numpy.ndarray`` dtype: ``int``
An array saying to which bus the object is connected.
"""
pass
@abstractmethod
def generators_info(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Prefer using :attr:`grid2op.Observation.BaseObservation.gen_p`,
:attr:`grid2op.Observation.BaseObservation.gen_q` and
:attr:`grid2op.Observation.BaseObservation.gen_v` instead.
This method is used to retrieve information about the generators (active, reactive production
and voltage magnitude of the bus to which it is connected).
.. note::
The values returned here are the values AFTER the powerflow has been computed and not
the target values.
Returns
-------
prod_p ``numpy.ndarray``
The active power production for each generator (in MW)
prod_q ``numpy.ndarray``
The reactive power production for each generator (in MVAr)
prod_v ``numpy.ndarray``
The voltage magnitude of the bus to which each generators is connected (in kV)
"""
pass
@abstractmethod
def loads_info(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Prefer using :attr:`grid2op.Observation.BaseObservation.load_p`,
:attr:`grid2op.Observation.BaseObservation.load_q` and
:attr:`grid2op.Observation.BaseObservation.load_v` instead.
This method is used to retrieve information about the loads (active, reactive consumption
and voltage magnitude of the bus to which it is connected).
.. note::
The values returned here are the values AFTER the powerflow has been computed and not
the target values.
Returns
-------
load_p ``numpy.ndarray``
The active power consumption for each load (in MW)
load_q ``numpy.ndarray``
The reactive power consumption for each load (in MVAr)
load_v ``numpy.ndarray``
The voltage magnitude of the bus to which each load is connected (in kV)
"""
pass
@abstractmethod
def lines_or_info(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Prefer using :attr:`grid2op.Observation.BaseObservation.p_or`,
:attr:`grid2op.Observation.BaseObservation.q_or`,
:attr:`grid2op.Observation.BaseObservation.a_or` and,
:attr:`grid2op.Observation.BaseObservation.v_or` instead
It returns the information extracted from the _grid at the origin end of each powerline.
For assumption about the order of the powerline flows return in this vector, see the help of the
:func:`Backend.get_line_status` method.
Returns
-------
p_or ``numpy.ndarray``
the origin active power flowing on the lines (in MW)
q_or ``numpy.ndarray``
the origin reactive power flowing on the lines (in MVAr)
v_or ``numpy.ndarray``
the voltage magnitude at the origin of each powerlines (in kV)
a_or ``numpy.ndarray``
the current flow at the origin of each powerlines (in A)
"""
pass
@abstractmethod
def lines_ex_info(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Prefer using :attr:`grid2op.Observation.BaseObservation.p_ex`,
:attr:`grid2op.Observation.BaseObservation.q_ex`,
:attr:`grid2op.Observation.BaseObservation.a_ex` and,
:attr:`grid2op.Observation.BaseObservation.v_ex` instead
It returns the information extracted from the _grid at the extremity end of each powerline.
For assumption about the order of the powerline flows return in this vector, see the help of the
:func:`Backend.get_line_status` method.
Returns
-------
p_ex ``numpy.ndarray``
the extremity active power flowing on the lines (in MW)
q_ex ``numpy.ndarray``
the extremity reactive power flowing on the lines (in MVAr)
v_ex ``numpy.ndarray``
the voltage magnitude at the extremity of each powerlines (in kV)
a_ex ``numpy.ndarray``
the current flow at the extremity of each powerlines (in A)
"""
pass
def close(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is called by `env.close()` do not attempt to use it otherwise.
This function is called when the environment is over.
After calling this function, the backend might not behave properly, and in any case should not be used before
another call to :func:`Backend.load_grid` is performed
"""
pass
def reset(self, grid_path, grid_filename=None):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is done in the `env.reset()` method and should be performed otherwise.
Reload the power grid.
For backwards compatibility this method calls `Backend.load_grid`.
But it is encouraged to overload it in the subclasses.
"""
self.comp_time = 0.0
self.load_grid(grid_path, filename=grid_filename)
def copy(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
.. note::
As of grid2op 1.7.1 you it is not mandatory to implement this function
when creating a backend.
If it is not available, then grid2op will automatically
deactivate the forecast capability and will not use the "backend.copy()"
function.
When this function is not implement, you will not be able to use (for
example) :func:`grid2op.Observation.BaseObservation.simulate` nor
the :class:`grid2op.simulator.Simulator` for example.
Performs a deep copy of the backend.
In the default implementation we explicitly called the deepcopy operator on `self._grid` to make the
error message more explicit in case there is a problem with this part.
The implementation is **equivalent** to:
.. code-block:: python
def copy(self):
return copy.deepcopy(self)
:return: An instance of Backend equal to :attr:`self`, but deep copied.
:rtype: :class:`Backend`
"""
if not self._can_be_copied:
raise BackendError("This backend cannot be copied.")
start_grid = self._grid
self._grid = None
res = copy.deepcopy(self)
res.__class__ = type(self) # somehow deepcopy forget the init class... weird
res._grid = copy.deepcopy(start_grid)
self._grid = start_grid
res._is_loaded = False # i can reload a copy of an environment
return res
def save_file(self, full_path):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Save the current power _grid in a human readable format supported by the backend.
The format is not modified by this wrapper.
This function is not mandatory, and if implemented, it is used only as a debugging purpose.
:param full_path: the full path (path + file name + extension) where *self._grid* is stored.
:type full_path: :class:`string`
:return: ``None``
"""
raise RuntimeError("Class {} does not allow for saving file.".format(self))
def get_line_status(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Prefer using :attr:`grid2op.Observation.BaseObservation.line_status` instead
Return the status of each lines (connected : True / disconnected: False )
It is assume that the order of the powerline is fixed: if the status of powerline "l1" is put at the 42nd element
of the return vector, then it should always be set at the 42nd element.
It is also assumed that all the other methods of the backend that allows to retrieve informations on the powerlines
also respect the same convention, and consistent with one another.
For example, if powerline "l1" is the 42nd second of the vector returned by :func:`Backend.get_line_status` then information
about it's flow will be at position *42* of the vector returned by :func:`Backend.get_line_flow` for example.
:return: an array with the line status of each powerline
:rtype: np.array, dtype:bool
"""
topo_vect = self.get_topo_vect()
return (topo_vect[self.line_or_pos_topo_vect] >= 0) & (
topo_vect[self.line_ex_pos_topo_vect] >= 0
)
def get_line_flow(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Prefer using :attr:`grid2op.Observation.BaseObservation.a_or` or
:attr:`grid2op.Observation.BaseObservation.a_ex` for example
Return the current flow in each lines of the powergrid. Only one value per powerline is returned.
If the AC mod is used, this shall return the current flow on the end of the powerline where there is a protection.
For example, if there is a protection on "origin end" of powerline "l2" then this method shall return the current
flow of at the "origin end" of powerline l2.
Note that in general, there is no loss of generality in supposing all protections are set on the "origin end" of
the powerline. So this method will return all origin line flows.
It is also possible, for a specific application, to return the maximum current flow between both ends of a power
_grid for more complex scenario.
For assumption about the order of the powerline flows return in this vector, see the help of the
:func:`Backend.get_line_status` method.
:return: an array with the line flows of each powerline
:rtype: np.array, dtype:float
"""
p_or, q_or, v_or, a_or = self.lines_or_info()
return a_or
def set_thermal_limit(self, limits):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
You can set the thermal limit directly in the environment.
This function is used as a convenience function to set the thermal limits :attr:`Backend.thermal_limit_a`
in amperes.
It can be used at the beginning of an episode if the thermal limit are not present in the original data files
or alternatively if the thermal limits depends on the period of the year (one in winter and one in summer
for example).
Parameters
----------
limits: ``object``
It can be understood differently according to its type:
- If it's a ``numpy.ndarray``, then it is assumed the thermal limits are given in amperes in the same order
as the powerlines computed in the backend. In that case it modifies all the thermal limits of all
the powerlines at once.
- If it's a ``dict`` it must have:
- as key the powerline names (not all names are mandatory, in that case only the powerlines with the name
in this dictionnary will be modified)
- as value the new thermal limit (should be a strictly positive float).
"""
if isinstance(limits, np.ndarray):
if limits.shape[0] == self.n_line:
self.thermal_limit_a = 1.0 * limits.astype(dt_float)
elif isinstance(limits, dict):
for el in limits.keys():
if not el in self.name_line:
raise BackendError(
'You asked to modify the thermal limit of powerline named "{}" that is not '
"on the grid. Names of powerlines are {}".format(
el, self.name_line
)
)
for i, el in self.name_line:
if el in limits:
try:
tmp = dt_float(limits[el])
except:
raise BackendError(
'Impossible to convert data ({}) for powerline named "{}" into float '
"values".format(limits[el], el)
)
if tmp <= 0:
raise BackendError(
'New thermal limit for powerlines "{}" is not positive ({})'
"".format(el, tmp)
)
self.thermal_limit_a[i] = tmp
def update_thermal_limit_from_vect(self, thermal_limit_a):
"""You can use it if your backend stores the thermal limits
of the grid in a vector (see PandaPowerBackend for example)
.. warning::
This is not called by the environment and cannot be used to
model Dynamic Line Rating. For such purpose please use `update_thermal_limit`
This function is used to create a "Simulator" from a backend for example.
Parameters
----------
vect : np.ndarray
The thermal limits (in A)
"""
thermal_limit_a = np.array(thermal_limit_a).astype(dt_float)
self.thermal_limit_a[:] = thermal_limit_a
def update_thermal_limit(self, env):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is done in a call to `env.step` in case of DLR for example.
If you don't want this feature, do not implement it.
Update the new thermal limit in case of DLR for example.
By default it does nothing.
Depending on the operational strategy, it is also possible to implement some
`Dynamic Line Rating <https://en.wikipedia.org/wiki/Dynamic_line_rating_for_electric_utilities>`_ (DLR)
strategies.
In this case, this function will give the thermal limit for a given time step provided the flows and the
weather condition are accessible by the backend. Our methodology doesn't make any assumption on the method
used to get these thermal limits.
Parameters
----------
env: :class:`grid2op.Environment.Environment`
The environment used to compute the thermal limit
"""
pass
def get_thermal_limit(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Retrieve the thermal limit directly from the environment instead (with a call
to :func:`grid2op.Environment.BaseEnc.get_thermal_limit` for example)
Gives the thermal limit (in amps) for each powerline of the _grid. Only one value per powerline is returned.
It is assumed that both :func:`Backend.get_line_flow` and *_get_thermal_limit* gives the value of the same
end of the powerline.
See the help of *_get_line_flow* for a more detailed description of this problem.
For assumption about the order of the powerline flows return in this vector, see the help of the
:func:`Backend.get_line_status` method.
:return: An array giving the thermal limit of the powerlines.
:rtype: np.array, dtype:float
"""
return self.thermal_limit_a
def get_relative_flow(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Prefer using :attr:`grid2op.Observation.BaseObservation.rho`
This method return the relative flows, *eg.* the current flow divided by the thermal limits. It has a pretty
straightforward default implementation, but it can be overriden for example for transformer if the limits are
on the lower voltage side or on the upper voltage level.
Returns
-------
res: ``numpy.ndarray``, dtype: float
The relative flow in each powerlines of the grid.
"""
num_ = self.get_line_flow()
denom_ = self.get_thermal_limit()
res = np.divide(num_, denom_)
return res
def get_line_overflow(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Prefer using :attr:`grid2op.Observation.BaseObservation.rho` and
check whether or not the flow is higher tha 1. or have a look at
:attr:`grid2op.Observation.BaseObservation.timestep_overflow` and check the
non zero index.
Prefer using the attribute of the :class:`grid2op.Observation.BaseObservation`
faster accessor to the line that are on overflow.
For assumption about the order of the powerline flows return in this vector, see the help of the
:func:`Backend.get_line_status` method.
:return: An array saying if a powerline is overflow or not
:rtype: np.array, dtype:bool
"""
th_lim = self.get_thermal_limit()
flow = self.get_line_flow()
return flow > th_lim
def shunt_info(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This method is optional. If implemented, it should return the proper information about the shunt in the
powergrid.
If not implemented it returns empty list.
Note that if there are shunt on the powergrid, it is recommended that this method should be implemented before
calling :func:`Backend.check_kirchoff`.
If this method is implemented AND :func:`Backend.check_kirchoff` is called, the method
:func:`Backend.sub_from_bus_id` should also be implemented preferably.
Returns
-------
shunt_p: ``numpy.ndarray``
For each shunt, the active power it withdraw at the bus to which it is connected.
shunt_q: ``numpy.ndarray``
For each shunt, the reactive power it withdraw at the bus to which it is connected.
shunt_v: ``numpy.ndarray``
For each shunt, the voltage magnitude of the bus to which it is connected.
shunt_bus: ``numpy.ndarray``
For each shunt, the bus id to which it is connected.
"""
return [], [], [], []
def get_theta(self):
"""
Notes
-----
Don't forget to set the flag :attr:`Backend.can_output_theta` to ``True`` in the
:func:`Bakcend.load_grid` if you support this feature.
Returns
-------
line_or_theta: ``numpy.ndarray``
For each origin side of powerline, gives the voltage angle
line_ex_theta: ``numpy.ndarray``
For each extremity side of powerline, gives the voltage angle
load_theta: ``numpy.ndarray``
Gives the voltage angle to the bus at which each load is connected
gen_theta: ``numpy.ndarray``
Gives the voltage angle to the bus at which each generator is connected
storage_theta: ``numpy.ndarray``
Gives the voltage angle to the bus at which each storage unit is connected
"""
raise NotImplementedError(
"Your backend does not support the retrieval of the voltage angle theta."
)
def sub_from_bus_id(self, bus_id):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Optional method that allows to get the substation if the bus id is provided.
Parameters
----------
bus_id: ``int``
The id of the bus where you want to know to which substation it belongs
Returns
-------
The substation to which an object connected to bus with id `bus_id` is connected to.
"""
raise BackendError(
"This backend doesn't allow to get the substation from the bus id."
)
def _disconnect_line(self, id_):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Prefer using the action space to disconnect a powerline.
Disconnect the line of id "id\\_ " in the backend.
In this scenario, the *id\\_* of a powerline is its position (counted starting from O) in the vector returned by
:func:`Backend.get_line_status` or :func:`Backend.get_line_flow` for example.
For example, if the current flow on powerline "l1" is the 42nd element of the vector returned by
:func:`Backend.get_line_flow`
then :func:`Backend._disconnect_line(42)` will disconnect this same powerline "l1".
For assumption about the order of the powerline flows return in this vector, see the help of the
:func:`Backend.get_line_status` method.
:param id_: id of the powerline to be disconnected
:type id_: int
"""
my_cls = type(self)
action = my_cls._complete_action_class()
action.update({"set_line_status": [(id_, -1)]})
bk_act = my_cls.my_bk_act_class()
bk_act += action
self.apply_action(bk_act)
def _runpf_with_diverging_exception(self, is_dc):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Computes a power flow on the _grid and raises an exception in case of diverging power flow, or any other
exception that can be thrown by the backend.
:param is_dc: mode of the power flow. If *is_dc* is True, then the powerlow is run using the DC
approximation otherwise it uses the AC powerflow.
:type is_dc: bool
Raises
------
exc_: :class:`grid2op.Exceptions.DivergingPowerFlow`
In case of divergence of the powerflow
"""
conv = False
exc_me = None
try:
conv, exc_me = self.runpf(is_dc=is_dc) # run powerflow
except Grid2OpException as exc_:
exc_me = exc_
except Exception as exc_:
exc_me = DivergingPowerFlow(
f" An unexpected error occurred during the computation of the powerflow."
f"The error is: \n {exc_} \n. This is game over"
)
if not conv and exc_me is None:
exc_me = DivergingPowerFlow(
"GAME OVER: Powerflow has diverged during computation "
"or a load has been disconnected or a generator has been disconnected."
)
return exc_me
def next_grid_state(self, env, is_dc=False):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is called by `env.step`
This method is called by the environment to compute the next\\_grid\\_states.
It allows to compute the powerline and approximate the "cascading failures" if there are some overflows.
Attributes
----------
env: :class:`grid2op.Environment.Environment`
the environment in which the powerflow is ran.
is_dc: ``bool``
mode of power flow (AC : False, DC: is_dc is True)
Returns
--------
disconnected_during_cf: ``numpy.ndarray``, dtype=bool
For each powerlines, it returns ``True`` if the powerline has been disconnected due to a cascading failure
or ``False`` otherwise.
infos: ``list``
If :attr:`Backend.detailed_infos_for_cascading_failures` is ``True`` then it returns the different
state computed by the powerflow (can drastically slow down this function, as it requires
deep copy of backend object). Otherwise the list is always empty.
"""
infos = []
disconnected_during_cf = np.full(self.n_line, fill_value=-1, dtype=dt_int)
conv_ = self._runpf_with_diverging_exception(is_dc)
if env._no_overflow_disconnection or conv_ is not None:
return disconnected_during_cf, infos, conv_
# the environment disconnect some powerlines
init_time_step_overflow = copy.deepcopy(env._timestep_overflow)
ts = 0
while True:
# simulate the cascading failure
lines_flows = 1.0 * self.get_line_flow()
thermal_limits = self.get_thermal_limit()
lines_status = self.get_line_status()
# a) disconnect lines on hard overflow (that are still connected)
to_disc = (
lines_flows > env._hard_overflow_threshold * thermal_limits
) & lines_status
# b) deals with soft overflow (disconnect them if lines still connected)
init_time_step_overflow[(lines_flows >= thermal_limits) & lines_status] += 1
to_disc[
(init_time_step_overflow > env._nb_timestep_overflow_allowed)
& lines_status
] = True
# disconnect the current power lines
if np.sum(to_disc[lines_status]) == 0:
# no powerlines have been disconnected at this time step, i stop the computation there
break
disconnected_during_cf[to_disc] = ts
# perform the disconnection action
for i, el in enumerate(to_disc):
if el:
self._disconnect_line(i)
# start a powerflow on this new state
conv_ = self._runpf_with_diverging_exception(is_dc)
if self.detailed_infos_for_cascading_failures:
infos.append(self.copy())
if conv_ is not None:
break
ts += 1
return disconnected_during_cf, infos, conv_
def storages_info(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Prefer using :attr:`grid2op.Observation.BaseObservation.storage_power` instead.
This method is used to retrieve information about the storage units (active, reactive consumption
and voltage magnitude of the bus to which it is connected).
Returns
-------
storage_p ``numpy.ndarray``
The active power consumption for each load (in MW)
storage_q ``numpy.ndarray``
The reactive power consumption for each load (in MVAr)
storage_v ``numpy.ndarray``
The voltage magnitude of the bus to which each load is connected (in kV)
"""
if self.n_storage > 0:
raise BackendError(
"storages_info method is not implemented yet there is batteries on the grid."
)
def storage_deact_for_backward_comaptibility(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This function is called under a very specific condition: an old environment has been loaded that
do not take into account the storage units, even though they were possibly some modeled by the backend.
This function is supposed to "remove" from the backend any reference to the storage units.
Overloading this function is not necessary (when developing a new backend). If it is not overloaded however,
some "backward compatibility" (for grid2op <= 1.4.0) might not be working properly depending on
your backend.
"""
pass
def check_kirchoff(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Check that the powergrid respects kirchhoff's law.
This function can be called at any moment (after a powerflow has been run)
to make sure a powergrid is in a consistent state, or to perform
some tests for example.
In order to function properly, this method requires that :func:`Backend.shunt_info` and
:func:`Backend.sub_from_bus_id` are properly defined. Otherwise the results might be wrong, especially
for reactive values (q_subs and q_bus bellow)
Returns
-------
p_subs ``numpy.ndarray``
sum of injected active power at each substations (MW)
q_subs ``numpy.ndarray``
sum of injected reactive power at each substations (MVAr)
p_bus ``numpy.ndarray``
sum of injected active power at each buses. It is given in form of a matrix, with number of substations as
row, and number of columns equal to the maximum number of buses for a substation (MW)
q_bus ``numpy.ndarray``
sum of injected reactive power at each buses. It is given in form of a matrix, with number of substations as
row, and number of columns equal to the maximum number of buses for a substation (MVAr)
diff_v_bus: ``numpy.ndarray`` (2d array)
difference between maximum voltage and minimum voltage (computed for each elements)
at each bus. It is an array of two dimension:
- first dimension represents the the substation (between 1 and self.n_sub)
- second element represents the busbar in the substation (0 or 1 usually)
"""
p_or, q_or, v_or, *_ = self.lines_or_info()
p_ex, q_ex, v_ex, *_ = self.lines_ex_info()
p_gen, q_gen, v_gen = self.generators_info()
p_load, q_load, v_load = self.loads_info()
if self.n_storage > 0:
p_storage, q_storage, v_storage = self.storages_info()
# fist check the "substation law" : nothing is created at any substation
p_subs = np.zeros(self.n_sub, dtype=dt_float)
q_subs = np.zeros(self.n_sub, dtype=dt_float)
# check for each bus
p_bus = np.zeros((self.n_sub, 2), dtype=dt_float)
q_bus = np.zeros((self.n_sub, 2), dtype=dt_float)
v_bus = (
np.zeros((self.n_sub, 2, 2), dtype=dt_float) - 1.0
) # sub, busbar, [min,max]
topo_vect = self.get_topo_vect()
# bellow i'm "forced" to do a loop otherwise, numpy do not compute the "+=" the way I want it to.
# for example, if two powerlines are such that line_or_to_subid is equal (eg both connected to substation 0)
# then numpy do not guarantee that `p_subs[self.line_or_to_subid] += p_or` will add the two "corresponding p_or"
# TODO this can be vectorized with matrix product, see example in obs.flow_bus_matrix (BaseObervation.py)
for i in range(self.n_line):
sub_or_id = self.line_or_to_subid[i]
sub_ex_id = self.line_ex_to_subid[i]
loc_bus_or = topo_vect[self.line_or_pos_topo_vect[i]] - 1
loc_bus_ex = topo_vect[self.line_ex_pos_topo_vect[i]] - 1
# for substations
p_subs[sub_or_id] += p_or[i]
p_subs[sub_ex_id] += p_ex[i]
q_subs[sub_or_id] += q_or[i]
q_subs[sub_ex_id] += q_ex[i]
# for bus
p_bus[sub_or_id, loc_bus_or] += p_or[i]
q_bus[sub_or_id, loc_bus_or] += q_or[i]
p_bus[ sub_ex_id, loc_bus_ex] += p_ex[i]
q_bus[sub_ex_id, loc_bus_ex] += q_ex[i]
# fill the min / max voltage per bus (initialization)
if (v_bus[sub_or_id,loc_bus_or,][0] == -1):
v_bus[sub_or_id,loc_bus_or,][0] = v_or[i]
if (v_bus[sub_ex_id,loc_bus_ex,][0] == -1):
v_bus[sub_ex_id,loc_bus_ex,][0] = v_ex[i]
if (v_bus[sub_or_id, loc_bus_or,][1]== -1):
v_bus[sub_or_id,loc_bus_or,][1] = v_or[i]
if (v_bus[sub_ex_id,loc_bus_ex,][1]== -1):
v_bus[sub_ex_id,loc_bus_ex,][1] = v_ex[i]
# now compute the correct stuff
if v_or[i] > 0.0:
# line is connected
v_bus[sub_or_id,loc_bus_or,][0] = min(v_bus[sub_or_id,loc_bus_or,][0],v_or[i],)
v_bus[sub_or_id,loc_bus_or,][1] = max(v_bus[sub_or_id,loc_bus_or,][1],v_or[i],)
if v_ex[i] > 0:
# line is connected
v_bus[sub_ex_id,loc_bus_ex,][0] = min(v_bus[sub_ex_id,loc_bus_ex,][0],v_ex[i],)
v_bus[sub_ex_id,loc_bus_ex,][1] = max(v_bus[sub_ex_id,loc_bus_ex,][1],v_ex[i],)
for i in range(self.n_gen):
# for substations
p_subs[self.gen_to_subid[i]] -= p_gen[i]
q_subs[self.gen_to_subid[i]] -= q_gen[i]
# for bus
p_bus[
self.gen_to_subid[i], topo_vect[self.gen_pos_topo_vect[i]] - 1
] -= p_gen[i]
q_bus[
self.gen_to_subid[i], topo_vect[self.gen_pos_topo_vect[i]] - 1
] -= q_gen[i]
# compute max and min values
if v_gen[i]:
# but only if gen is connected
v_bus[self.gen_to_subid[i], topo_vect[self.gen_pos_topo_vect[i]] - 1][
0
] = min(
v_bus[
self.gen_to_subid[i], topo_vect[self.gen_pos_topo_vect[i]] - 1
][0],
v_gen[i],
)
v_bus[self.gen_to_subid[i], topo_vect[self.gen_pos_topo_vect[i]] - 1][
1
] = max(
v_bus[
self.gen_to_subid[i], topo_vect[self.gen_pos_topo_vect[i]] - 1
][1],
v_gen[i],
)
for i in range(self.n_load):
# for substations
p_subs[self.load_to_subid[i]] += p_load[i]
q_subs[self.load_to_subid[i]] += q_load[i]
# for buses
p_bus[
self.load_to_subid[i], topo_vect[self.load_pos_topo_vect[i]] - 1
] += p_load[i]
q_bus[
self.load_to_subid[i], topo_vect[self.load_pos_topo_vect[i]] - 1
] += q_load[i]
# compute max and min values
if v_load[i]:
# but only if load is connected
v_bus[self.load_to_subid[i], topo_vect[self.load_pos_topo_vect[i]] - 1][
0
] = min(
v_bus[
self.load_to_subid[i], topo_vect[self.load_pos_topo_vect[i]] - 1
][0],
v_load[i],
)
v_bus[self.load_to_subid[i], topo_vect[self.load_pos_topo_vect[i]] - 1][
1
] = max(
v_bus[
self.load_to_subid[i], topo_vect[self.load_pos_topo_vect[i]] - 1
][1],
v_load[i],
)
for i in range(self.n_storage):
p_subs[self.storage_to_subid[i]] += p_storage[i]
q_subs[self.storage_to_subid[i]] += q_storage[i]
p_bus[
self.storage_to_subid[i], topo_vect[self.storage_pos_topo_vect[i]] - 1
] += p_storage[i]
q_bus[
self.storage_to_subid[i], topo_vect[self.storage_pos_topo_vect[i]] - 1
] += q_storage[i]
# compute max and min values
if v_storage[i] > 0:
# the storage unit is connected
v_bus[
self.storage_to_subid[i],
topo_vect[self.storage_pos_topo_vect[i]] - 1,
][0] = min(
v_bus[
self.storage_to_subid[i],
topo_vect[self.storage_pos_topo_vect[i]] - 1,
][0],
v_storage[i],
)
v_bus[
self.storage_to_subid[i],
topo_vect[self.storage_pos_topo_vect[i]] - 1,
][1] = max(
v_bus[
self.storage_to_subid[i],
topo_vect[self.storage_pos_topo_vect[i]] - 1,
][1],
v_storage[i],
)
if self.shunts_data_available:
p_s, q_s, v_s, bus_s = self.shunt_info()
for i in range(self.n_shunt):
# for substations
p_subs[self.shunt_to_subid[i]] += p_s[i]
q_subs[self.shunt_to_subid[i]] += q_s[i]
# for buses
p_bus[self.shunt_to_subid[i], bus_s[i] - 1] += p_s[i]
q_bus[self.shunt_to_subid[i], bus_s[i] - 1] += q_s[i]
# compute max and min values
v_bus[self.shunt_to_subid[i], bus_s[i] - 1][0] = min(
v_bus[self.shunt_to_subid[i], bus_s[i] - 1][0], v_s[i]
)
v_bus[self.shunt_to_subid[i], bus_s[i] - 1][1] = max(
v_bus[self.shunt_to_subid[i], bus_s[i] - 1][1], v_s[i]
)
else:
warnings.warn(
"Backend.check_kirchoff Impossible to get shunt information. Reactive information might be "
"incorrect."
)
diff_v_bus = np.zeros((self.n_sub, 2), dtype=dt_float)
diff_v_bus[:, :] = v_bus[:, :, 1] - v_bus[:, :, 0]
return p_subs, q_subs, p_bus, q_bus, diff_v_bus
def load_redispacthing_data(self, path, name="prods_charac.csv"):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This method will load everything needed for the redispatching and unit commitment problem.
We don't recommend at all to modify this function.
Parameters
----------
path: ``str``
Location of the dataframe containing the redispatching data. This dataframe (csv, coma separated)
should have at least the columns (other columns are ignored, order of the colums do not matter):
- "name": identifying the name of the generator (should match the names in self.name_gen)
- "type": one of "thermal", "nuclear", "wind", "solar" or "hydro" representing the type of the generator
- "pmax": the maximum value the generator can produce (in MW)
- "pmin": the minimum value the generator can produce (in MW)
- "max_ramp_up": maximum value the generator can increase its production between two consecutive
steps TODO make it independant from the duration of the step
- "max_ramp_down": maximum value the generator can decrease its production between two consecutive
steps (is positive) TODO make it independant from the duration of the step
- "start_cost": starting cost of the generator in $ (or any currency you want)
- "shut_down_cost": cost associated to the shut down of the generator in $ (or any currency you want)
- "marginal_cost": "average" marginal cost of the generator. For now we don't allow it to vary across
different steps or episode in $/(MW.time step duration) and NOT $/MWh (TODO change that)
- "min_up_time": minimum time a generator need to stay "connected" before we can disconnect it (
measured in time step) (TODO change that)
- "min_down_time": minimum time a generator need to stay "disconnected" before we can connect it again.(
measured in time step) (TODO change that)
name: ``str``
Name of the dataframe containing the redispatching data. Defaults to 'prods_charac.csv', we don't advise
to change it.
"""
self._fill_names()
self.redispatching_unit_commitment_availble = False
# for redispatching
fullpath = os.path.join(path, name)
if not os.path.exists(fullpath):
return
try:
df = pd.read_csv(fullpath, sep=",")
except Exception as exc_:
warnings.warn(
f'Impossible to load the redispatching data for this environment with error:\n"{exc_}"\n'
f"Redispatching will be unavailable.\n"
f"Please make sure \"{name}\" file is a csv (coma ',') separated file."
)
return
mandatory_columns = [
"type",
"Pmax",
"Pmin",
"max_ramp_up",
"max_ramp_down",
"start_cost",
"shut_down_cost",
"marginal_cost",
"min_up_time",
"min_down_time",
]
for el in mandatory_columns:
if el not in df.columns:
warnings.warn(
f"Impossible to load the redispatching data for this environment because"
f"one of the mandatory column is not present ({el}). Please check the file "
f'"{name}" contains all the mandatory columns: {mandatory_columns}'
)
return
gen_info = {}
for _, row in df.iterrows():
gen_info[row["name"]] = {
"type": row["type"],
"pmax": row["Pmax"],
"pmin": row["Pmin"],
"max_ramp_up": row["max_ramp_up"],
"max_ramp_down": row["max_ramp_down"],
"start_cost": row["start_cost"],
"shut_down_cost": row["shut_down_cost"],
"marginal_cost": row["marginal_cost"],
"min_up_time": row["min_up_time"],
"min_down_time": row["min_down_time"],
}
self.redispatching_unit_commitment_availble = True
self.gen_type = np.full(self.n_gen, fill_value="aaaaaaaaaa")
self.gen_pmin = np.full(self.n_gen, fill_value=1.0, dtype=dt_float)
self.gen_pmax = np.full(self.n_gen, fill_value=1.0, dtype=dt_float)
self.gen_redispatchable = np.full(self.n_gen, fill_value=False, dtype=dt_bool)
self.gen_max_ramp_up = np.full(self.n_gen, fill_value=0.0, dtype=dt_float)
self.gen_max_ramp_down = np.full(self.n_gen, fill_value=0.0, dtype=dt_float)
self.gen_min_uptime = np.full(self.n_gen, fill_value=-1, dtype=dt_int)
self.gen_min_downtime = np.full(self.n_gen, fill_value=-1, dtype=dt_int)
self.gen_cost_per_MW = np.full(
self.n_gen, fill_value=1.0, dtype=dt_float
) # marginal cost
self.gen_startup_cost = np.full(
self.n_gen, fill_value=1.0, dtype=dt_float
) # start cost
self.gen_shutdown_cost = np.full(
self.n_gen, fill_value=1.0, dtype=dt_float
) # shutdown cost
self.gen_renewable = np.full(self.n_gen, fill_value=False, dtype=dt_bool)
for i, gen_nm in enumerate(self.name_gen):
try:
tmp_gen = gen_info[gen_nm]
except KeyError as exc_:
raise BackendError(
f"Impossible to load the redispatching data. The generator {i} with name {gen_nm} "
f'could not be located on the description file "{name}".'
)
self.gen_type[i] = str(tmp_gen["type"])
self.gen_pmin[i] = self._aux_check_finite_float(
tmp_gen["pmin"], f' for gen. "{gen_nm}" and column "pmin"'
)
self.gen_pmax[i] = self._aux_check_finite_float(
tmp_gen["pmax"], f' for gen. "{gen_nm}" and column "pmax"'
)
self.gen_redispatchable[i] = dt_bool(
tmp_gen["type"] not in ["wind", "solar"]
)
tmp = dt_float(tmp_gen["max_ramp_up"])
if np.isfinite(tmp):
self.gen_max_ramp_up[i] = tmp
tmp = dt_float(tmp_gen["max_ramp_down"])
if np.isfinite(tmp):
self.gen_max_ramp_down[i] = tmp
self.gen_min_uptime[i] = dt_int(tmp_gen["min_up_time"])
self.gen_min_downtime[i] = dt_int(tmp_gen["min_down_time"])
self.gen_cost_per_MW[i] = dt_float(tmp_gen["marginal_cost"])
self.gen_startup_cost[i] = dt_float(tmp_gen["start_cost"])
self.gen_shutdown_cost[i] = dt_float(tmp_gen["shut_down_cost"])
self.gen_renewable[i] = dt_bool(tmp_gen["type"] in ["wind", "solar"])
self.redispatching_unit_commitment_availble = True
def load_storage_data(self, path, name="storage_units_charac.csv"):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This method will load everything needed in presence of storage unit on the grid.
We don't recommend at all to modify this function.
Parameters
----------
path: ``str``
Location of the dataframe containing the storage unit data. This dataframe (csv, coma separated)
should have at least the columns. It is mandatory to have it if there are storage units on the grid,
but it is ignored if not:
- "name": identifying the name of the unit storage (should match the names in self.name_storage)
- "type": one of "battery", "pumped_storage" representing the type of the unit storage
- "Emax": the maximum energy capacity the unit can store (in MWh)
- "Emin": the minimum energy capacity the unit can store (in MWh) [it can be >0 if a battery cannot be
completely empty for example]
- "max_p_prod": maximum flow the battery can absorb in MW
- "max_p_absorb": maximum flow the battery can produce in MW
- "marginal_cost": cost in $ (or any currency, really) of usage of the battery.
- "power_discharge_loss" (optional): power loss in the battery in MW (the capacity will decrease constantly
of this amount). Set it to 0.0 to deactivate it. If not present, it is set to 0.
- "charging_efficiency" (optional):
Float between 0. and 1. 1. means that if the grid provides 1MW (for ex. 1MW for 1h) to the storage
capacity, then the
state of charge of the battery will increase of 1MWh. If this efficiency is 0.5 then if 1MWh
if provided by the grid, then only 0.5MWh will be stored.
- "discharging_efficiency" (optional): battery efficiency when it is discharged. 1.0 means if you want to
get 1MWh on the grid, the battery state of charge will decrease by 1MWh. If this is 33% then it
means if you want to get (grid point of view) 1MWh on the grid, you need to decrease the
state of charge of 3MWh.
name: ``str``
Name of the dataframe containing the redispatching data. Defaults to 'prods_charac.csv', we don't advise
to change it.
Notes
-----
The battery efficiency defined as the "AC-AC" round trip efficiency is, with the convention above, defined
as `charging_efficiency * discharging_efficency` (see
https://www.greeningthegrid.org/news/new-resource-grid-scale-battery-storage-frequently-asked-questions-1
for further references)
"""
if self.n_storage == 0:
# set the "no battery state" if there are none
type(self).set_no_storage()
return
# for storage unit information
fullpath = os.path.join(path, name)
if not os.path.exists(fullpath):
raise BackendError(
f"There are storage unit on the grid, yet we could not locate their description."
f'Please make sure to have a file "{name}" where the environment data are located.'
f'For this environment the location is "{path}"'
)
try:
df = pd.read_csv(fullpath)
except Exception as exc_:
raise BackendError(
f"There are storage unit on the grid, yet we could not locate their description."
f'Please make sure to have a file "{name}" where the environment data are located.'
f'For this environment the location is "{path}"'
)
mandatory_colnames = [
"name",
"type",
"Emax",
"Emin",
"max_p_prod",
"max_p_absorb",
"marginal_cost",
]
for el in mandatory_colnames:
if el not in df.columns:
raise BackendError(
f"There are storage unit on the grid, yet we could not properly load their "
f"description. Please make sure the csv {name} contains all the columns "
f"{mandatory_colnames}"
)
stor_info = {}
for _, row in df.iterrows():
stor_info[row["name"]] = {
"name": row["name"],
"type": row["type"],
"Emax": row["Emax"],
"Emin": row["Emin"],
"max_p_prod": row["max_p_prod"],
"max_p_absorb": row["max_p_absorb"],
"marginal_cost": row["marginal_cost"],
}
if "power_loss" in row:
stor_info[row["name"]]["power_loss"] = row["power_loss"]
else:
stor_info[row["name"]]["power_loss"] = 0.0
if "charging_efficiency" in row:
stor_info[row["name"]]["charging_efficiency"] = row[
"charging_efficiency"
]
else:
stor_info[row["name"]]["charging_efficiency"] = 1.0
if "discharging_efficiency" in row:
stor_info[row["name"]]["discharging_efficiency"] = row[
"discharging_efficiency"
]
else:
stor_info[row["name"]]["discharging_efficiency"] = 1.0
self.storage_type = np.full(self.n_storage, fill_value="aaaaaaaaaa")
self.storage_Emax = np.full(self.n_storage, fill_value=1.0, dtype=dt_float)
self.storage_Emin = np.full(self.n_storage, fill_value=0.0, dtype=dt_float)
self.storage_max_p_prod = np.full(
self.n_storage, fill_value=1.0, dtype=dt_float
)
self.storage_max_p_absorb = np.full(
self.n_storage, fill_value=1.0, dtype=dt_float
)
self.storage_marginal_cost = np.full(
self.n_storage, fill_value=1.0, dtype=dt_float
)
self.storage_loss = np.full(self.n_storage, fill_value=0.0, dtype=dt_float)
self.storage_charging_efficiency = np.full(
self.n_storage, fill_value=1.0, dtype=dt_float
)
self.storage_discharging_efficiency = np.full(
self.n_storage, fill_value=1.0, dtype=dt_float
)
for i, sto_nm in enumerate(self.name_storage):
try:
tmp_sto = stor_info[sto_nm]
except KeyError as exc_:
raise BackendError(
f"Impossible to load the storage data. The storage unit {i} with name {sto_nm} "
f'could not be located on the description file "{name}" with error : \n'
f"{exc_}."
)
self.storage_type[i] = str(tmp_sto["type"])
self.storage_Emax[i] = self._aux_check_finite_float(
tmp_sto["Emax"], f' for {sto_nm} and column "Emax"'
)
self.storage_Emin[i] = self._aux_check_finite_float(
tmp_sto["Emin"], f' for {sto_nm} and column "Emin"'
)
self.storage_max_p_prod[i] = self._aux_check_finite_float(
tmp_sto["max_p_prod"], f' for {sto_nm} and column "max_p_prod"'
)
self.storage_max_p_absorb[i] = self._aux_check_finite_float(
tmp_sto["max_p_absorb"], f' for {sto_nm} and column "max_p_absorb"'
)
self.storage_marginal_cost[i] = self._aux_check_finite_float(
tmp_sto["marginal_cost"], f' for {sto_nm} and column "marginal_cost"'
)
self.storage_loss[i] = self._aux_check_finite_float(
tmp_sto["power_loss"], f' for {sto_nm} and column "power_loss"'
)
self.storage_charging_efficiency[i] = self._aux_check_finite_float(
tmp_sto["charging_efficiency"],
f' for {sto_nm} and column "charging_efficiency"',
)
self.storage_discharging_efficiency[i] = self._aux_check_finite_float(
tmp_sto["discharging_efficiency"],
f' for {sto_nm} and column "discharging_efficiency"',
)
def _aux_check_finite_float(self, nb_, str_=""):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
check and returns if correct that a number is convertible to `dt_float` and that it's finite
"""
tmp = dt_float(nb_)
if not np.isfinite(tmp):
raise BackendError(
f"Infinite number met for a number that should be finite. Please check your data {str_}"
)
return tmp
def load_grid_layout(self, path, name="grid_layout.json"):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
We don't recommend at all to modify this function.
This function loads the layout (eg the coordinates of each substation) for the powergrid.
Parameters
----------
path: ``str``
TODO
name: ``str``
TODO
"""
full_fn = os.path.join(path, name)
if not os.path.exists(full_fn):
return Exception("File {} does not exist".format(full_fn))
try:
with open(full_fn, "r") as f:
dict_ = json.load(f)
except Exception as e:
return e
new_grid_layout = {}
for el in self.name_sub:
if el not in dict_:
return Exception("substation named {} not in layout".format(el))
tmp = dict_[el]
try:
x, y = tmp
x = dt_float(x)
y = dt_float(y)
new_grid_layout[el] = (x, y)
except Exception as e_:
return Exception(
"fail to convert coordinates for {} into list of coordinates with error {}"
"".format(el, e_)
)
self.attach_layout(grid_layout=new_grid_layout)
return None
def _aux_get_line_status_to_set(self, line_status):
line_status = 2 * line_status - 1
line_status = line_status.astype(dt_int)
return line_status
def get_action_to_set(self):
"""
Get the action to set another backend to represent the internal state of this current backend.
It handles also the information about the shunts if available
Returns
-------
res: :class:`grid2op.Action.CompleteAction`
The complete action to set a backend to the internal state of `self`
"""
line_status = self._aux_get_line_status_to_set(self.get_line_status())
topo_vect = self.get_topo_vect()
if np.all(topo_vect == -1):
raise RuntimeError(
"The get_action_to_set should not be used after a divergence of the powerflow"
)
prod_p, _, prod_v = self.generators_info()
load_p, load_q, _ = self.loads_info()
set_me = self._complete_action_class()
dict_ = {
"set_line_status": line_status,
"set_bus": 1 * topo_vect,
"injection": {
"prod_p": prod_p,
"prod_v": prod_v,
"load_p": load_p,
"load_q": load_q,
},
}
if self.shunts_data_available:
p_s, q_s, sh_v, bus_s = self.shunt_info()
dict_["shunt"] = {"shunt_bus": bus_s}
if np.sum(bus_s >= 1):
p_s *= (self._sh_vnkv / sh_v) ** 2
q_s *= (self._sh_vnkv / sh_v) ** 2
p_s[bus_s == -1] = np.NaN
q_s[bus_s == -1] = np.NaN
dict_["shunt"]["shunt_p"] = p_s
dict_["shunt"]["shunt_q"] = q_s
if self.n_storage > 0:
sto_p, *_ = self.storages_info()
dict_["set_storage"] = 1.0 * sto_p
set_me.update(dict_)
return set_me
def update_from_obs(self, obs, force_update=False):
"""
Takes an observation as input and update the internal state of `self` to match the state of the backend
that produced this observation.
Only the "line_status", "topo_vect", "prod_p", "prod_v", "load_p" and "load_q" attributes of the
observations are used.
Notes
-----
If the observation is not perfect (for example with noise, or partial) this method will not work. You need
to pass it a complete observation.
For example, you might want to consider to have a state estimator if that is the case.
Parameters
----------
obs: :class:`grid2op.Observation.CompleteObservation`
A complete observation describing the state of the grid you want this backend to be in.
"""
# lazy loading to prevent circular references
from grid2op.Observation import CompleteObservation
if (not force_update) and (not isinstance(obs, CompleteObservation)):
raise BackendError(
"Impossible to set a backend to a state not represented by a "
'"grid2op.Observation.CompleteObservation".'
)
backend_action = self.my_bk_act_class()
act = self._complete_action_class()
line_status = self._aux_get_line_status_to_set(obs.line_status)
# skip the action part and update directly the backend action !
dict_ = {
"set_bus": obs.topo_vect,
"set_line_status": line_status,
"injection": {
"prod_p": obs.prod_p,
"prod_v": obs.prod_v,
"load_p": obs.load_p,
"load_q": obs.load_q,
},
}
if self.shunts_data_available and obs.shunts_data_available:
if "_shunt_bus" not in type(obs).attr_list_set:
raise BackendError(
"Impossible to set the backend to the state given by the observation: shunts data "
"are not present in the observation."
)
dict_["shunt"] = {"shunt_bus": obs._shunt_bus}
shunt_co = obs._shunt_bus >= 1
if np.sum(shunt_co):
mults = (self._sh_vnkv / obs._shunt_v) ** 2
sh_p = obs._shunt_p * mults
sh_q = obs._shunt_q * mults
sh_p[~shunt_co] = np.NaN
sh_q[~shunt_co] = np.NaN
dict_["shunt"]["shunt_p"] = sh_p
dict_["shunt"]["shunt_q"] = sh_q
act.update(dict_)
backend_action += act
self.apply_action(backend_action)
def assert_grid_correct(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is done as it should be by the Environment
"""
# lazy loading
from grid2op.Action import CompleteAction
from grid2op.Action._BackendAction import _BackendAction
orig_type = type(self)
if orig_type.my_bk_act_class is None:
# class is already initialized
# and set up the proper class and everything
self._init_class_attr()
# hack due to changing class of imported module in the module itself
self.__class__ = type(self).init_grid(
type(self), force_module=type(self).__module__
)
setattr(
sys.modules[type(self).__module__],
self.__class__.__name__,
self.__class__,
)
# reset the attribute of the grid2op.Backend.Backend class
# that can be messed up with depending on the initialization of the backend
Backend._clear_class_attribute()
orig_type._clear_class_attribute()
my_cls = type(self)
my_cls.my_bk_act_class = _BackendAction.init_grid(my_cls)
my_cls._complete_action_class = CompleteAction.init_grid(my_cls)
my_cls._complete_action_class._add_shunt_data()
my_cls._complete_action_class._update_value_set()
my_cls.assert_grid_correct_cls()
def assert_grid_correct_after_powerflow(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is done as it should be by the Environment
This method is called by the environment. It ensure that the backend remains consistent even after a powerflow
has be run with :func:`Backend.runpf` method.
:raise: :class:`grid2op.Exceptions.EnvError` and possibly all of its derived class.
"""
# test the results gives the proper size
tmp = self.get_line_status()
if tmp.shape[0] != self.n_line:
raise IncorrectNumberOfLines('returned by "backend.get_line_status()"')
if np.any(~np.isfinite(tmp)):
raise EnvironmentError(type(self).ERR_INIT_POWERFLOW)
tmp = self.get_line_flow()
if tmp.shape[0] != self.n_line:
raise IncorrectNumberOfLines('returned by "backend.get_line_flow()"')
if np.any(~np.isfinite(tmp)):
raise EnvironmentError(type(self).ERR_INIT_POWERFLOW)
tmp = self.get_thermal_limit()
if tmp.shape[0] != self.n_line:
raise IncorrectNumberOfLines('returned by "backend.get_thermal_limit()"')
if np.any(~np.isfinite(tmp)):
raise EnvironmentError(type(self).ERR_INIT_POWERFLOW)
tmp = self.get_line_overflow()
if tmp.shape[0] != self.n_line:
raise IncorrectNumberOfLines('returned by "backend.get_line_overflow()"')
if np.any(~np.isfinite(tmp)):
raise EnvironmentError(type(self).ERR_INIT_POWERFLOW)
tmp = self.generators_info()
if len(tmp) != 3:
raise EnvError(
'"generators_info()" should return a tuple with 3 elements: p, q and v'
)
for el in tmp:
if el.shape[0] != self.n_gen:
raise IncorrectNumberOfGenerators(
'returned by "backend.generators_info()"'
)
tmp = self.loads_info()
if len(tmp) != 3:
raise EnvError(
'"loads_info()" should return a tuple with 3 elements: p, q and v'
)
for el in tmp:
if el.shape[0] != self.n_load:
raise IncorrectNumberOfLoads('returned by "backend.loads_info()"')
tmp = self.lines_or_info()
if len(tmp) != 4:
raise EnvError(
'"lines_or_info()" should return a tuple with 4 elements: p, q, v and a'
)
for el in tmp:
if el.shape[0] != self.n_line:
raise IncorrectNumberOfLines('returned by "backend.lines_or_info()"')
tmp = self.lines_ex_info()
if len(tmp) != 4:
raise EnvError(
'"lines_ex_info()" should return a tuple with 4 elements: p, q, v and a'
)
for el in tmp:
if el.shape[0] != self.n_line:
raise IncorrectNumberOfLines('returned by "backend.lines_ex_info()"')
if self.n_storage > 0:
tmp = self.storages_info()
if len(tmp) != 3:
raise EnvError(
'"storages_info()" should return a tuple with 3 elements: p, q and v'
)
for el in tmp:
if el.shape[0] != self.n_storage:
raise IncorrectNumberOfLines(
'returned by "backend.storages_info()"'
)
tmp = self.get_topo_vect()
if tmp.shape[0] != np.sum(self.sub_info):
raise IncorrectNumberOfElements('returned by "backend.get_topo_vect()"')
if np.any(~np.isfinite(tmp)):
raise EnvError(
'Some components of "backend.get_topo_vect()" are not finite. This should be integer.'
)
| 77,496 | 40.508838 | 132 | py |
Grid2Op | Grid2Op-master/grid2op/Backend/EducPandaPowerBackend.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import os # load the python os default module
import sys # laod the python sys default module
import copy
import warnings
import numpy as np
import pandas as pd
import pandapower as pp
import scipy
from grid2op.dtypes import dt_int, dt_float, dt_bool
from grid2op.Backend.Backend import Backend
from grid2op.Exceptions import *
class EducPandaPowerBackend(Backend):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This class does not work.
It is mainly presented for educational purpose as example on coding your own backend.
It is derived from PandaPowerBackend, but without all the "optimization" that could make the
resulting backend harder to read.
This module presents an example of an implementation of a `grid2op.Backend` when using the powerflow
implementation "pandapower" available at `PandaPower <https://www.pandapower.org/>`_ for more details about
this backend. This file is provided as an example of a proper :class:`grid2op.Backend.Backend` implementation.
This backend currently does not work with 3 winding transformers and other exotic object.
As explained in the `grid2op.Backend` module, every module must inherit the `grid2op.Backend` class.
We illustrate here how to set up a backend with what we think is "rather standard" in the powersystem
eco system.
Please consult the documentation at :ref:`create-backend-module` for more information.
You have at your disposal:
- a tool that is able to compute power flows from a given grid in a given format (in this case call
`pandapower.runpf(pandapower_grid)`)
- a tool that is able to load a powergrid from a file store on the hard drive.
We try to find a good compromise between the size of the code (for clarity) and the "closeness to a working
code".
For a complete working example, relatively optimized (but much less readable) please have a look at the
real :class:`grid2op.Backend.PandaPowerBackend` class.
"""
def __init__(self,
detailed_infos_for_cascading_failures=False,
can_be_copied=True):
"""
Nothing much to do here except initializing what you would need (a tensorflow session, link to some
external dependencies etc.)
Nothing much for this example.
Parameters
----------
detailed_infos_for_cascading_failures: ``bool``
See the documentation of :class:`grid2op.Backend.Backend.__init__` for more information
"""
Backend.__init__(
self,
detailed_infos_for_cascading_failures=detailed_infos_for_cascading_failures,
can_be_copied=can_be_copied,
# extra arguments that might be needed for building such a backend
# these extra kwargs will be stored (without copy) in the
# base class and used when another backend will be created
# for example in the Runner class.
)
warnings.warn(
"This backend is used for demonstration purpose only, you should not use it under any "
"circumstances. Please use grid2op.Backend.PandaPowerBackend instead"
)
self._nb_real_line_pandapower = None
# NB: this instance of backend is here for academic purpose only. For clarity, it does not handle
# neither shunt nor storage unit.
####### load the grid
def load_grid(self, path=None, filename=None):
"""
Demonstration on how you can load a powergrid and then initialize the proper grid2op attributes.
The only thing we have to do here is to "order" the objects in each substation. Note that we don't even do it
implicitly here (relying on default grid2op ordering).
The only decision we had to make was concerning "grid2op powerlines" which represents both
"pandapower transformers"
and "pandapower powerlines".
We decided that:
- powerline are "before" trafo (so in the grid2op line id I will put first all powerlines, then all trafo)
- grid2op "origin" side will be "from" side for pandapower powerline and "hv" side for pandapower trafo
- grid2op "extremity" side will be "to" side for pandapower powerline and "lv" side for pandapower trafo
.. note:: We use one "trick" here. Pandapower grid (as it will be the case for most format) will have one "bus"
per substation. For grid2op, we want at least 2 busbar per substation. So we simply copy and paste the grid.
And we will deactivate the busbar that are not connected (no element connected to it).
This "coding" allows for easily mapping the bus id (each bus is represented with an id in pandapower)
and whether its busbar 1 or busbar 2 (grid2op side). More precisely: busbar 1 of substation with
id `sub_id` will have id `sub_id` and busbar 2 of the same substation will have id `sub_id + n_sub`
(recall that n_sub is the number of substation on the grid).
This "coding" is not optimal in the ram it takes. But we recommend you to adopt a similar one. It's
pretty easy to change the topology using this trick, much easier than if you rely on "switches" for
example. (But of course you can still use switches if you really want to)
"""
# first, handles different kind of path:
if path is None and filename is None:
raise RuntimeError(
"You must provide at least one of path or file to load a powergrid."
)
if path is None:
full_path = filename
elif filename is None:
full_path = path
else:
full_path = os.path.join(path, filename)
if not os.path.exists(full_path):
raise RuntimeError('There is no powergrid at "{}"'.format(full_path))
# then load the grid located at the full path and store it in `self._grid`
# raise an exception if it can't be loaded
try:
with warnings.catch_warnings():
# remove deprecationg warnings for old version of pandapower
warnings.filterwarnings("ignore", category=DeprecationWarning)
self._grid = pp.from_json(full_path)
except Exception as exc_:
raise BackendError(
f'Impossible to load the powergrid located at "{full_path}". Please '
f"check the file exist and that the file represent a valid pandapower "
f"grid. For your information, the error is:\n{exc_}"
)
######################################################################
# this part is due to the "modeling" of the topology FOR THIS EXAMPLE
# remember (see docstring of this function) that we "duplicate the buses" to code more easily the
# topology modification (instead of relying on the `switches`)
# first we remember the number of substation
self.n_sub = self._grid.bus.shape[0]
# and then we duplicate the bus
add_topo = copy.deepcopy(self._grid.bus)
add_topo.index += add_topo.shape[0]
add_topo["in_service"] = False
self._grid.bus = pd.concat((self._grid.bus, add_topo))
self._nb_real_line_pandapower = self._grid.line.shape[0]
# i do a powerflow to initialize the "results" dataframes
# this last step is internal to pandapower
pp.runpp(self._grid, check_connectivity=False)
######################################################################
# and now we initialize the number of each of the elements
self.n_line = (
self._grid.line.shape[0] + self._grid.trafo.shape[0]
) # trafo are powerline for grid2op !
self.n_gen = self._grid.gen.shape[0]
self.n_load = self._grid.load.shape[0]
# self.n_sub # already initialize above
# initialize the number of elements per substation
# now export to grid2op the substation to which objects are connected
self.load_to_subid = copy.deepcopy(self._grid.load["bus"])
self.gen_to_subid = copy.deepcopy(self._grid.gen["bus"])
# here we just decide (but that is a convention we could have done it differently)
# that "origin side" (grid2op) corresponds to "from_bus" from pandapower line and "hv_bus" for
# pandapower trafo.
self.line_or_to_subid = np.concatenate(
(
copy.deepcopy(self._grid.line["from_bus"]),
copy.deepcopy(self._grid.trafo["hv_bus"]),
)
)
self.line_ex_to_subid = np.concatenate(
(
copy.deepcopy(self._grid.line["to_bus"]),
copy.deepcopy(self._grid.trafo["lv_bus"]),
)
)
# and now we don't forget to initialize the rest
self._compute_pos_big_topo() # we highly recommend you to call this !
# and now the thermal limit
self.thermal_limit_a = 1000. * np.concatenate(
(
self._grid.line["max_i_ka"].values,
self._grid.trafo["sn_mva"].values
/ (np.sqrt(3) * self._grid.trafo["vn_hv_kv"].values),
)
)
self.thermal_limit_a = self.thermal_limit_a.astype(dt_float)
# NB: this instance of backend is here for academic purpose only. For clarity, it does not handle
# neither shunt nor storage unit.
type(self).shunts_data_available = False
type(self).set_no_storage()
###### modify the grid
def apply_action(self, backendAction=None):
"""
Here the implementation of the "modify the grid" function.
From the documentation, it's pretty straightforward, even though the implementation takes ~70 lines of code.
Most of them being direct copy paste from the examples in the documentation.
"""
if backendAction is None:
return
(
active_bus,
(prod_p, prod_v, load_p, load_q, storage),
_,
shunts__,
) = backendAction()
for gen_id, new_p in prod_p:
self._grid.gen["p_mw"].iloc[gen_id] = new_p
for gen_id, new_v in prod_v:
self._grid.gen["vm_pu"].iloc[gen_id] = new_v # but new_v is not pu !
self._grid.gen["vm_pu"].iloc[gen_id] /= self._grid.bus["vn_kv"][
self.gen_to_subid[gen_id]
] # now it is :-)
for load_id, new_p in load_p:
self._grid.load["p_mw"].iloc[load_id] = new_p
for load_id, new_q in load_q:
self._grid.load["q_mvar"].iloc[load_id] = new_q
# now i deal with the topology
loads_bus = backendAction.get_loads_bus()
for load_id, new_bus in loads_bus:
if new_bus == -1:
self._grid.load["in_service"][load_id] = False
else:
self._grid.load["in_service"][load_id] = True
# this formula is really convenient because we decided to duplicated buses in each substation.
# and decided that: bus 1 of a substation with id `sub_id` will have id `sub_id` and
# bus 2 of the same substation will have id `sub_id + n_substation`
self._grid.load["bus"][load_id] = (
self.load_to_subid[load_id] + (new_bus - 1) * self.n_sub
)
gens_bus = backendAction.get_gens_bus()
for gen_id, new_bus in gens_bus:
if new_bus == -1:
self._grid.gen["in_service"][gen_id] = False
else:
self._grid.gen["in_service"][gen_id] = True
self._grid.gen["bus"][gen_id] = (
self.gen_to_subid[gen_id] + (new_bus - 1) * self.n_sub
)
lines_or_bus = backendAction.get_lines_or_bus()
for line_id, new_bus in lines_or_bus:
if line_id < self._nb_real_line_pandapower:
dt = self._grid.line
key = "from_bus"
line_id_db = line_id
else:
dt = self._grid.trafo
key = "hv_bus"
line_id_db = line_id - self._nb_real_line_pandapower
if new_bus == -1:
dt["in_service"][line_id_db] = False
else:
dt["in_service"][line_id_db] = True
dt[key][line_id_db] = (
self.line_or_to_subid[line_id] + (new_bus - 1) * self.n_sub
)
lines_ex_bus = backendAction.get_lines_ex_bus()
for line_id, new_bus in lines_ex_bus:
if line_id < self._nb_real_line_pandapower:
dt = self._grid.line
key = "to_bus"
line_id_db = line_id
else:
dt = self._grid.trafo
key = "lv_bus"
line_id_db = line_id - self._nb_real_line_pandapower
if new_bus == -1:
dt["in_service"][line_id_db] = False
else:
dt["in_service"][line_id_db] = True
dt[key][line_id_db] = (
self.line_ex_to_subid[line_id] + (new_bus - 1) * self.n_sub
)
# spec
bus_is = self._grid.bus["in_service"]
for i, (bus1_status, bus2_status) in enumerate(active_bus):
bus_is[i] = bus1_status # no iloc for bus, don't ask me why please :-/
bus_is[i + self.n_sub] = bus2_status
###### computes powerflow
def runpf(self, is_dc=False):
"""
Now we just perform a powerflow with pandapower which can be done with either `rundcpp` for dc powerflow
or with `runpp` for AC powerflow.
This is really a 2 lines code. It is a bit more verbose here because we took care of silencing some
warnings and try / catch some possible exceptions.
"""
try:
with warnings.catch_warnings():
# remove the warning if _grid non connex. And it that case load flow as not converged
warnings.filterwarnings(
"ignore", category=scipy.sparse.linalg.MatrixRankWarning
)
warnings.filterwarnings("ignore", category=RuntimeWarning)
if is_dc:
pp.rundcpp(self._grid, check_connectivity=False)
else:
pp.runpp(self._grid, check_connectivity=False)
return self._grid.converged, None
except pp.powerflow.LoadflowNotConverged as exc_:
# of the powerflow has not converged, results are Nan
return False, exc_
###### getters
def get_topo_vect(self):
"""
Retrieve the bus to which the objects are connected based on the information stored on the grid.
This is fairly simple, again, because we choose to explicitly represents 2 buses per substation.
Function is verbose (~40 lines of code), but pretty straightforward.
"""
res = np.full(self.dim_topo, fill_value=np.NaN, dtype=dt_int)
line_status = self.get_line_status()
i = 0
for row in self._grid.line[["from_bus", "to_bus"]].values:
bus_or_id = row[0]
bus_ex_id = row[1]
if line_status[i]:
res[self.line_or_pos_topo_vect[i]] = (
1 if bus_or_id == self.line_or_to_subid[i] else 2
)
res[self.line_ex_pos_topo_vect[i]] = (
1 if bus_ex_id == self.line_ex_to_subid[i] else 2
)
else:
res[self.line_or_pos_topo_vect[i]] = -1
res[self.line_ex_pos_topo_vect[i]] = -1
i += 1
nb = self._nb_real_line_pandapower
i = 0
for row in self._grid.trafo[["hv_bus", "lv_bus"]].values:
bus_or_id = row[0]
bus_ex_id = row[1]
j = i + nb
if line_status[j]:
res[self.line_or_pos_topo_vect[j]] = (
1 if bus_or_id == self.line_or_to_subid[j] else 2
)
res[self.line_ex_pos_topo_vect[j]] = (
1 if bus_ex_id == self.line_ex_to_subid[j] else 2
)
else:
res[self.line_or_pos_topo_vect[j]] = -1
res[self.line_ex_pos_topo_vect[j]] = -1
i += 1
i = 0
for bus_id in self._grid.gen["bus"].values:
res[self.gen_pos_topo_vect[i]] = 1 if bus_id == self.gen_to_subid[i] else 2
i += 1
i = 0
for bus_id in self._grid.load["bus"].values:
res[self.load_pos_topo_vect[i]] = (
1 if bus_id == self.load_to_subid[i] else 2
)
i += 1
return res
def generators_info(self):
"""
We chose to keep the same order in grid2op and in pandapower. So we just return the correct values.
"""
# carefull with copy / deep copy
prod_p = self._grid.res_gen["p_mw"].values.astype(dt_float)
prod_q = self._grid.res_gen["q_mvar"].values.astype(dt_float)
prod_v = self._grid.res_gen["vm_pu"].values.astype(dt_float) # in pu
prod_v *= (
self._grid.bus["vn_kv"].iloc[self.gen_to_subid].values.astype(dt_float)
) # in kV
return prod_p, prod_q, prod_v
def loads_info(self):
"""
We chose to keep the same order in grid2op and in pandapower. So we just return the correct values.
"""
# carefull with copy / deep copy
load_p = self._grid.res_load["p_mw"].values.astype(dt_float)
load_q = self._grid.res_load["q_mvar"].values.astype(dt_float)
load_v = self._grid.res_bus.loc[self._grid.load["bus"].values][
"vm_pu"
].values.astype(
dt_float
) # in pu
load_v *= self._grid.bus.loc[self._grid.load["bus"].values][
"vn_kv"
].values.astype(
dt_float
) # in kV
return load_p, load_q, load_v
def _aux_get_line_info(self, colname_powerline, colname_trafo):
"""
concatenate the information of powerlines and trafo using the convention that "powerlines go first"
"""
res = np.concatenate(
(
self._grid.res_line[colname_powerline].values,
self._grid.res_trafo[colname_trafo].values,
)
)
return res
def lines_or_info(self):
"""
Main method to retrieve the information at the "origin" side of the powerlines and transformers.
We simply need to follow the convention we adopted:
- origin side (grid2op) will be "from" side for pandapower powerline
- origin side (grid2op) will be "hv" side for pandapower trafo
- we chose to first have powerlines, then transformers
(convention chosen in :func:`EducPandaPowerBackend.load_grid`)
"""
p_or = self._aux_get_line_info("p_from_mw", "p_hv_mw")
q_or = self._aux_get_line_info("q_from_mvar", "q_hv_mvar")
v_or = self._aux_get_line_info("vm_from_pu", "vm_hv_pu")
a_or = self._aux_get_line_info("i_from_ka", "i_hv_ka") * 1000
return p_or, q_or, v_or, a_or
def lines_ex_info(self):
"""
Main method to retrieve the information at the "extremity" side of the powerlines and transformers.
We simply need to follow the convention we adopted:
- extremity side (grid2op) will be "to" side for pandapower powerline
- extremity side (grid2op) will be "lv" side for pandapower trafo
- we chose to first have powerlines, then transformers
(convention chosen in function :func:`EducPandaPowerBackend.load_grid`)
"""
p_ex = self._aux_get_line_info("p_to_mw", "p_lv_mw")
q_ex = self._aux_get_line_info("q_to_mvar", "q_lv_mvar")
v_ex = self._aux_get_line_info("vm_to_pu", "vm_lv_pu")
a_ex = self._aux_get_line_info("i_to_ka", "i_lv_ka") * 1000
return p_ex, q_ex, v_ex, a_ex
# other less important method that you will need to implement
def get_line_status(self):
"""
you might consider implementing it
.. warning:: /!\\\\ This is a not a "main method" but you might want to implement
it for a new backend (default implementation most likely not efficient at all). /!\\\\
"""
return np.concatenate(
(
self._grid.line["in_service"].values,
self._grid.trafo["in_service"].values,
)
).astype(dt_bool)
def _disconnect_line(self, id_):
"""
you might consider implementing it
.. warning:: /!\\\\ This is a not a "main method" but you might want to implement
it for a new backend (default implementation most likely not efficient at all). /!\\\\
"""
if id_ < self._nb_real_line_pandapower:
self._grid.line["in_service"].iloc[id_] = False
else:
self._grid.trafo["in_service"].iloc[
id_ - self._nb_real_line_pandapower
] = False
def copy(self):
"""
you might consider implementing it
.. warning:: /!\\\\ This is a not a "main method" but you might want to implement
it for a new backend (default implementation most likely not efficient at all). /!\\\\
Nothing really crazy here
Performs a deep copy of the power :attr:`_grid`.
As pandapower is pure python, the deep copy operator is perfectly suited for the task.
"""
res = copy.deepcopy(self)
return res
def reset(self, path=None, grid_filename=None):
"""
you might consider implementing it
.. warning:: /!\\\\ This is a not a "main method" but you might want to implement
it for a new backend (default implementation most likely not efficient at all). /!\\\\
Reset the grid to the original state
"""
# set everything to its proper bus (this is because we used a specific way to represent
# the topology for this example by chosing not to use swtiches, but to double the number of
# buses per "substation"
self._grid.line["from_bus"].iloc[:] = self.line_or_to_subid[
: self._nb_real_line_pandapower
]
self._grid.trafo["hv_bus"].iloc[:] = self.line_or_to_subid[
self._nb_real_line_pandapower :
]
self._grid.line["to_bus"].iloc[:] = self.line_ex_to_subid[
: self._nb_real_line_pandapower
]
self._grid.trafo["lv_bus"].iloc[:] = self.line_ex_to_subid[
self._nb_real_line_pandapower :
]
self._grid.load["bus"].iloc[:] = self.load_to_subid
self._grid.gen["bus"].iloc[:] = self.gen_to_subid
# originally everything is in service
self._grid.line["in_service"].iloc[:] = True
self._grid.trafo["in_service"].iloc[:] = True
self._grid.load["in_service"].iloc[:] = True
self._grid.gen["in_service"].iloc[:] = True
self._grid.bus["in_service"].iloc[: self.n_sub] = True
self._grid.bus["in_service"].iloc[self.n_sub :] = False
def close(self):
"""
you might consider implementing it
.. warning:: /!\\\\ This is a not a "main method" but you might want to implement
it for a new backend (default implementation most likely not efficient at all). /!\\\\
Called when the :class:`grid2op;Environment` has terminated, this function only reset the grid to a state
where it has not been loaded.
"""
del self._grid
self._grid = None
| 24,433 | 40.767521 | 120 | py |
Grid2Op | Grid2Op-master/grid2op/Backend/PandaPowerBackend.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import os # load the python os default module
import sys # laod the python sys default module
import copy
import warnings
import numpy as np
import pandas as pd
import pandapower as pp
import scipy
from grid2op.dtypes import dt_int, dt_float, dt_bool
from grid2op.Backend.Backend import Backend
from grid2op.Action import BaseAction
from grid2op.Exceptions import *
try:
import numba
NUMBA_ = True
except (ImportError, ModuleNotFoundError):
NUMBA_ = False
warnings.warn(
"Numba cannot be loaded. You will gain possibly massive speed if installing it by "
"\n\t{} -m pip install numba\n".format(sys.executable)
)
class PandaPowerBackend(Backend):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
If you want to code a backend to use grid2op with another powerflow, you can get inspired
from this class. Note However that implies knowing the behaviour
of PandaPower.
This module presents an example of an implementation of a `grid2op.Backend` when using the powerflow
implementation "pandapower" available at `PandaPower <https://www.pandapower.org/>`_ for more details about
this backend. This file is provided as an example of a proper :class:`grid2op.Backend.Backend` implementation.
This backend currently does not work with 3 winding transformers and other exotic object.
As explained in the `grid2op.Backend` module, every module must inherit the `grid2op.Backend` class.
This class have more attributes that are used internally for faster information retrieval.
Attributes
----------
prod_pu_to_kv: :class:`numpy.array`, dtype:float
The ratio that allow the conversion from pair-unit to kv for the generators
load_pu_to_kv: :class:`numpy.array`, dtype:float
The ratio that allow the conversion from pair-unit to kv for the loads
lines_or_pu_to_kv: :class:`numpy.array`, dtype:float
The ratio that allow the conversion from pair-unit to kv for the origin end of the powerlines
lines_ex_pu_to_kv: :class:`numpy.array`, dtype:float
The ratio that allow the conversion from pair-unit to kv for the extremity end of the powerlines
p_or: :class:`numpy.array`, dtype:float
The active power flowing at the origin end of each powerline
q_or: :class:`numpy.array`, dtype:float
The reactive power flowing at the origin end of each powerline
v_or: :class:`numpy.array`, dtype:float
The voltage magnitude at the origin bus of the powerline
a_or: :class:`numpy.array`, dtype:float
The current flowing at the origin end of each powerline
p_ex: :class:`numpy.array`, dtype:float
The active power flowing at the extremity end of each powerline
q_ex: :class:`numpy.array`, dtype:float
The reactive power flowing at the extremity end of each powerline
a_ex: :class:`numpy.array`, dtype:float
The current flowing at the extremity end of each powerline
v_ex: :class:`numpy.array`, dtype:float
The voltage magnitude at the extremity bus of the powerline
Examples
---------
The only recommended way to use this class is by passing an instance of a Backend into the "make"
function of grid2op. Do not attempt to use a backend outside of this specific usage.
.. code-block:: python
import grid2op
from grid2op.Backend import PandaPowerBackend
backend = PandaPowerBackend()
env = grid2op.make(backend=backend)
# and use "env" as any open ai gym environment.
"""
def __init__(
self,
detailed_infos_for_cascading_failures=False,
lightsim2grid=False, # use lightsim2grid as pandapower powerflow solver
dist_slack=False,
max_iter=10,
can_be_copied=True,
with_numba=NUMBA_,
):
Backend.__init__(
self,
detailed_infos_for_cascading_failures=detailed_infos_for_cascading_failures,
can_be_copied=can_be_copied,
lightsim2grid=lightsim2grid,
dist_slack=dist_slack,
max_iter=max_iter,
with_numba=with_numba
)
self.with_numba = with_numba
self.prod_pu_to_kv = None
self.load_pu_to_kv = None
self.lines_or_pu_to_kv = None
self.lines_ex_pu_to_kv = None
self.storage_pu_to_kv = None
self.p_or = None
self.q_or = None
self.v_or = None
self.a_or = None
self.p_ex = None
self.q_ex = None
self.v_ex = None
self.a_ex = None
self.load_p = None
self.load_q = None
self.load_v = None
self.storage_p = None
self.storage_q = None
self.storage_v = None
self.prod_p = None
self.prod_q = None
self.prod_v = None
self.line_status = None
self._pf_init = "flat"
self._pf_init = "results"
self._nb_bus_before = None # number of active bus at the preceeding step
self.thermal_limit_a = None
self._iref_slack = None
self._id_bus_added = None
self._fact_mult_gen = -1
self._what_object_where = None
self._number_true_line = -1
self._corresp_name_fun = {}
self._get_vector_inj = {}
self.dim_topo = -1
self._vars_action = BaseAction.attr_list_vect
self._vars_action_set = BaseAction.attr_list_vect
self.cst_1 = dt_float(1.0)
self._topo_vect = None
self.slack_id = None
# function to rstore some information
self.__nb_bus_before = None # number of substation in the powergrid
self.__nb_powerline = (
None # number of powerline (real powerline, not transformer)
)
self._init_bus_load = None
self._init_bus_gen = None
self._init_bus_lor = None
self._init_bus_lex = None
self._get_vector_inj = None
self._big_topo_to_obj = None
self._big_topo_to_backend = None
self.__pp_backend_initial_grid = None # initial state to facilitate the "reset"
# Mapping some fun to apply bus updates
self._type_to_bus_set = [
self._apply_load_bus,
self._apply_gen_bus,
self._apply_lor_bus,
self._apply_trafo_hv,
self._apply_lex_bus,
self._apply_trafo_lv,
]
self.tol = None # this is NOT the pandapower tolerance !!!! this is used to check if a storage unit
# produce / absorbs anything
# TODO storage doc (in grid2op rst) of the backend
self.can_output_theta = True # I support the voltage angle
self.theta_or = None
self.theta_ex = None
self.load_theta = None
self.gen_theta = None
self.storage_theta = None
self._lightsim2grid = lightsim2grid
self._dist_slack = dist_slack
self._max_iter = max_iter
def _check_for_non_modeled_elements(self):
"""This function check for elements in the pandapower grid that will have no impact on grid2op.
See the full list of grid2op modeled elements in :ref:`modeled-elements-module`
"""
for el_nm in [
"trafo3w",
"sgen",
"switch",
"motor",
"asymmetric_load",
"asymmetric_sgen",
"impedance",
"ward",
"xward",
"dcline",
"measurement",
]:
if el_nm in self._grid:
if self._grid[el_nm].shape[0]:
warnings.warn(
f'There are "{el_nm}" in the pandapower grid. These '
f"elements are not modeled on grid2op side (the environment will "
f"work, but you won't be able to modify them)."
)
def get_theta(self):
"""
TODO doc
Returns
-------
theta_or: ``numpy.ndarray``
For each orgin side of powerline, gives the voltage angle (in degree)
theta_ex: ``numpy.ndarray``
For each extremity side of powerline, gives the voltage angle (in degree)
load_theta: ``numpy.ndarray``
Gives the voltage angle (in degree) to the bus at which each load is connected
gen_theta: ``numpy.ndarray``
Gives the voltage angle (in degree) to the bus at which each generator is connected
storage_theta: ``numpy.ndarray``
Gives the voltage angle (in degree) to the bus at which each storage unit is connected
"""
return (
self.cst_1 * self.theta_or,
self.cst_1 * self.theta_ex,
self.cst_1 * self.load_theta,
self.cst_1 * self.gen_theta,
self.cst_1 * self.storage_theta,
)
def get_nb_active_bus(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Compute the amount of buses "in service" eg with at least a powerline connected to it.
Returns
-------
res: :class:`int`
The total number of active buses.
"""
return np.sum(self._grid.bus["in_service"])
@staticmethod
def _load_grid_load_p_mw(grid):
return grid.load["p_mw"]
@staticmethod
def _load_grid_load_q_mvar(grid):
return grid.load["q_mvar"]
@staticmethod
def _load_grid_gen_p_mw(grid):
return grid.gen["p_mw"]
@staticmethod
def _load_grid_gen_vm_pu(grid):
return grid.gen["vm_pu"]
def reset(self, path=None, grid_filename=None):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Reload the grid.
For pandapower, it is a bit faster to store of a copy of itself at the end of load_grid
and deep_copy it to itself instead of calling load_grid again
"""
# Assign the content of itself as saved at the end of load_grid
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
self._grid = copy.deepcopy(self.__pp_backend_initial_grid)
self._reset_all_nan()
self._topo_vect[:] = self._get_topo_vect()
self.comp_time = 0.0
def load_grid(self, path=None, filename=None):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Load the _grid, and initialize all the member of the class. Note that in order to perform topological
modification of the substation of the underlying powergrid, some buses are added to the test case loaded. They
are set as "out of service" unless a topological action acts on these specific substations.
"""
if path is None and filename is None:
raise RuntimeError(
"You must provide at least one of path or file to load a powergrid."
)
if path is None:
full_path = filename
elif filename is None:
full_path = path
else:
full_path = os.path.join(path, filename)
if not os.path.exists(full_path):
raise RuntimeError('There is no powergrid at "{}"'.format(full_path))
with warnings.catch_warnings():
# remove deprecationg warnings for old version of pandapower
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
self._grid = pp.from_json(full_path)
self._check_for_non_modeled_elements()
# add the slack bus that is often not modeled as a generator, but i need it for this backend to work
bus_gen_added = None
i_ref = None
self._iref_slack = None
self._id_bus_added = None
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
try:
pp.runpp(
self._grid,
numba=self.with_numba,
lightsim2grid=self._lightsim2grid,
distributed_slack=self._dist_slack,
max_iteration=self._max_iter,
)
except pp.powerflow.LoadflowNotConverged:
pp.rundcpp(
self._grid,
numba=self.with_numba,
lightsim2grid=self._lightsim2grid,
distributed_slack=self._dist_slack,
max_iteration=self._max_iter,
)
new_pp_version = False
if not "slack_weight" in self._grid.gen:
self._grid.gen["slack_weight"] = 1.0
else:
new_pp_version = True
if np.all(~self._grid.gen["slack"]):
# there are not defined slack bus on the data, i need to hack it up a little bit
pd2ppc = self._grid._pd2ppc_lookups["bus"] # pd2ppc[pd_id] = ppc_id
ppc2pd = np.argsort(pd2ppc) # ppc2pd[ppc_id] = pd_id
for gen_id_pp, el in enumerate(self._grid._ppc["gen"][:, 0]):
if (
int(el)
not in self._grid._pd2ppc_lookups["bus"][
self._grid.gen["bus"].values
]
):
if bus_gen_added is not None:
# TODO handle better when distributed slack bus
# raise RuntimeError("Impossible to recognize the powergrid")
warnings.warn(
"Your grid has a distributed slack bus. Just so you know, it is not"
"fully supported at the moment. (it will be converted to a single slack bus)"
)
bus_gen_added = ppc2pd[int(el)]
# see https://matpower.org/docs/ref/matpower5.0/idx_gen.html for details on the comprehension of self._grid._ppc
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
# some warnings are issued depending on pp and pandas version
if new_pp_version:
id_added = pp.create_gen(
self._grid,
bus_gen_added,
p_mw=self._grid._ppc["gen"][gen_id_pp, 1],
vm_pu=self._grid._ppc["gen"][gen_id_pp, 5],
min_p_mw=self._grid._ppc["gen"][gen_id_pp, 9],
max_p_mw=self._grid._ppc["gen"][gen_id_pp, 8],
max_q_mvar=self._grid._ppc["gen"][gen_id_pp, 3],
min_q_mvar=self._grid._ppc["gen"][gen_id_pp, 4],
slack=i_ref is None,
slack_weight=1.0,
controllable=True,
)
else:
id_added = pp.create_gen(
self._grid,
bus_gen_added,
p_mw=self._grid._ppc["gen"][gen_id_pp, 1],
vm_pu=self._grid._ppc["gen"][gen_id_pp, 5],
min_p_mw=self._grid._ppc["gen"][gen_id_pp, 9],
max_p_mw=self._grid._ppc["gen"][gen_id_pp, 8],
max_q_mvar=self._grid._ppc["gen"][gen_id_pp, 3],
min_q_mvar=self._grid._ppc["gen"][gen_id_pp, 4],
slack=i_ref is None,
controllable=True,
)
if i_ref is None:
i_ref = gen_id_pp
self._iref_slack = i_ref
self._id_bus_added = id_added # self._grid.gen.shape[0]
# TODO here i force the distributed slack bus too, by removing the other from the ext_grid...
self._grid.ext_grid = self._grid.ext_grid.iloc[:1]
else:
self.slack_id = np.where(self._grid.gen["slack"])[0]
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
try:
pp.runpp(
self._grid,
numba=self.with_numba,
lightsim2grid=self._lightsim2grid,
distributed_slack=self._dist_slack,
max_iteration=self._max_iter,
)
except pp.powerflow.LoadflowNotConverged:
pp.rundcpp(
self._grid,
numba=self.with_numba,
lightsim2grid=self._lightsim2grid,
distributed_slack=self._dist_slack,
max_iteration=self._max_iter,
)
self.__nb_bus_before = self._grid.bus.shape[0]
self.__nb_powerline = self._grid.line.shape[0]
self._init_bus_load = self.cst_1 * self._grid.load["bus"].values
self._init_bus_gen = self.cst_1 * self._grid.gen["bus"].values
self._init_bus_lor = self.cst_1 * self._grid.line["from_bus"].values
self._init_bus_lex = self.cst_1 * self._grid.line["to_bus"].values
t_for = self.cst_1 * self._grid.trafo["hv_bus"].values
t_fex = self.cst_1 * self._grid.trafo["lv_bus"].values
self._init_bus_lor = np.concatenate((self._init_bus_lor, t_for)).astype(dt_int)
self._init_bus_lex = np.concatenate((self._init_bus_lex, t_fex)).astype(dt_int)
self._grid["ext_grid"]["va_degree"] = 0.0
# this has the effect to divide by 2 the active power in the added generator, if this generator and the "slack bus"
# one are connected to the same bus.
# if not, it must not be done. So basically, i create a vector for which p and q for generator must be multiply
self._fact_mult_gen = np.ones(self._grid.gen.shape[0])
# self._fact_mult_gen[-1] += 1
# now extract the powergrid
self.n_line = copy.deepcopy(self._grid.line.shape[0]) + copy.deepcopy(
self._grid.trafo.shape[0]
)
if (
"name" in self._grid.line.columns
and not self._grid.line["name"].isnull().values.any()
):
self.name_line = [name for name in self._grid.line["name"]]
else:
self.name_line = [
"{from_bus}_{to_bus}_{id_powerline_me}".format(**row, id_powerline_me=i)
for i, (_, row) in enumerate(self._grid.line.iterrows())
]
if (
"name" in self._grid.trafo.columns
and not self._grid.trafo["name"].isnull().values.any()
):
self.name_line += [name_traf for name_traf in self._grid.trafo["name"]]
else:
transfo = [
("{hv_bus}".format(**row), "{lv_bus}".format(**row))
for i, (_, row) in enumerate(self._grid.trafo.iterrows())
]
transfo = [sorted(el) for el in transfo]
self.name_line += [
"{}_{}_{}".format(*el, i + self._grid.line.shape[0])
for i, el in enumerate(transfo)
]
self.name_line = np.array(self.name_line)
self.n_gen = copy.deepcopy(self._grid.gen.shape[0])
if (
"name" in self._grid.gen.columns
and not self._grid.gen["name"].isnull().values.any()
):
self.name_gen = [name_g for name_g in self._grid.gen["name"]]
else:
self.name_gen = [
"gen_{bus}_{index_gen}".format(**row, index_gen=i)
for i, (_, row) in enumerate(self._grid.gen.iterrows())
]
self.name_gen = np.array(self.name_gen)
self.n_load = copy.deepcopy(self._grid.load.shape[0])
if (
"name" in self._grid.load.columns
and not self._grid.load["name"].isnull().values.any()
):
self.name_load = [nl for nl in self._grid.load["name"]]
else:
self.name_load = [
"load_{bus}_{index_load}".format(**row, index_load=i)
for i, (_, row) in enumerate(self._grid.load.iterrows())
]
self.name_load = np.array(self.name_load)
self.n_storage = copy.deepcopy(self._grid.storage.shape[0])
if self.n_storage == 0:
self.set_no_storage()
else:
if (
"name" in self._grid.storage.columns
and not self._grid.storage["name"].isnull().values.any()
):
self.name_storage = [nl for nl in self._grid.storage["name"]]
else:
self.name_storage = [
"storage_{bus}_{index_sto}".format(**row, index_sto=i)
for i, (_, row) in enumerate(self._grid.storage.iterrows())
]
self.name_storage = np.array(self.name_storage)
self.n_sub = copy.deepcopy(self._grid.bus.shape[0])
self.name_sub = ["sub_{}".format(i) for i, row in self._grid.bus.iterrows()]
self.name_sub = np.array(self.name_sub)
# "hack" to handle topological changes, for now only 2 buses per substation
add_topo = copy.deepcopy(self._grid.bus)
add_topo.index += add_topo.shape[0]
add_topo["in_service"] = False
self._grid.bus = pd.concat((self._grid.bus, add_topo))
self._init_private_attrs()
def _init_private_attrs(self):
# number of elements per substation
self.sub_info = np.zeros(self.n_sub, dtype=dt_int)
self.load_to_subid = np.zeros(self.n_load, dtype=dt_int)
self.gen_to_subid = np.zeros(self.n_gen, dtype=dt_int)
self.line_or_to_subid = np.zeros(self.n_line, dtype=dt_int)
self.line_ex_to_subid = np.zeros(self.n_line, dtype=dt_int)
self.load_to_sub_pos = np.zeros(self.n_load, dtype=dt_int)
self.gen_to_sub_pos = np.zeros(self.n_gen, dtype=dt_int)
self.line_or_to_sub_pos = np.zeros(self.n_line, dtype=dt_int)
self.line_ex_to_sub_pos = np.zeros(self.n_line, dtype=dt_int)
if self.n_storage > 0:
self.storage_to_subid = np.zeros(self.n_storage, dtype=dt_int)
self.storage_to_sub_pos = np.zeros(self.n_storage, dtype=dt_int)
pos_already_used = np.zeros(self.n_sub, dtype=dt_int)
self._what_object_where = [[] for _ in range(self.n_sub)]
for i, (_, row) in enumerate(self._grid.line.iterrows()):
sub_or_id = int(row["from_bus"])
sub_ex_id = int(row["to_bus"])
self.sub_info[sub_or_id] += 1
self.sub_info[sub_ex_id] += 1
self.line_or_to_subid[i] = sub_or_id
self.line_ex_to_subid[i] = sub_ex_id
self.line_or_to_sub_pos[i] = pos_already_used[sub_or_id]
pos_already_used[sub_or_id] += 1
self.line_ex_to_sub_pos[i] = pos_already_used[sub_ex_id]
pos_already_used[sub_ex_id] += 1
self._what_object_where[sub_or_id].append(("line", "from_bus", i))
self._what_object_where[sub_ex_id].append(("line", "to_bus", i))
lag_transfo = self._grid.line.shape[0]
self._number_true_line = copy.deepcopy(self._grid.line.shape[0])
for i, (_, row) in enumerate(self._grid.trafo.iterrows()):
sub_or_id = int(row["hv_bus"])
sub_ex_id = int(row["lv_bus"])
self.sub_info[sub_or_id] += 1
self.sub_info[sub_ex_id] += 1
self.line_or_to_subid[i + lag_transfo] = sub_or_id
self.line_ex_to_subid[i + lag_transfo] = sub_ex_id
self.line_or_to_sub_pos[i + lag_transfo] = pos_already_used[sub_or_id]
pos_already_used[sub_or_id] += 1
self.line_ex_to_sub_pos[i + lag_transfo] = pos_already_used[sub_ex_id]
pos_already_used[sub_ex_id] += 1
self._what_object_where[sub_or_id].append(("trafo", "hv_bus", i))
self._what_object_where[sub_ex_id].append(("trafo", "lv_bus", i))
for i, (_, row) in enumerate(self._grid.gen.iterrows()):
sub_id = int(row["bus"])
self.sub_info[sub_id] += 1
self.gen_to_subid[i] = sub_id
self.gen_to_sub_pos[i] = pos_already_used[sub_id]
pos_already_used[sub_id] += 1
self._what_object_where[sub_id].append(("gen", "bus", i))
for i, (_, row) in enumerate(self._grid.load.iterrows()):
sub_id = int(row["bus"])
self.sub_info[sub_id] += 1
self.load_to_subid[i] = sub_id
self.load_to_sub_pos[i] = pos_already_used[sub_id]
pos_already_used[sub_id] += 1
self._what_object_where[sub_id].append(("load", "bus", i))
if self.n_storage > 0:
for i, (_, row) in enumerate(self._grid.storage.iterrows()):
sub_id = int(row["bus"])
self.sub_info[sub_id] += 1
self.storage_to_subid[i] = sub_id
self.storage_to_sub_pos[i] = pos_already_used[sub_id]
pos_already_used[sub_id] += 1
self._what_object_where[sub_id].append(("storage", "bus", i))
self.dim_topo = np.sum(self.sub_info)
self._compute_pos_big_topo()
# utilities for imeplementing apply_action
self._corresp_name_fun = {}
self._get_vector_inj = {}
self._get_vector_inj[
"load_p"
] = self._load_grid_load_p_mw # lambda grid: grid.load["p_mw"]
self._get_vector_inj[
"load_q"
] = self._load_grid_load_q_mvar # lambda grid: grid.load["q_mvar"]
self._get_vector_inj[
"prod_p"
] = self._load_grid_gen_p_mw # lambda grid: grid.gen["p_mw"]
self._get_vector_inj[
"prod_v"
] = self._load_grid_gen_vm_pu # lambda grid: grid.gen["vm_pu"]
self.load_pu_to_kv = self._grid.bus["vn_kv"][self.load_to_subid].values.astype(
dt_float
)
self.prod_pu_to_kv = self._grid.bus["vn_kv"][self.gen_to_subid].values.astype(
dt_float
)
self.lines_or_pu_to_kv = self._grid.bus["vn_kv"][
self.line_or_to_subid
].values.astype(dt_float)
self.lines_ex_pu_to_kv = self._grid.bus["vn_kv"][
self.line_ex_to_subid
].values.astype(dt_float)
self.storage_pu_to_kv = self._grid.bus["vn_kv"][
self.storage_to_subid
].values.astype(dt_float)
self.thermal_limit_a = 1000 * np.concatenate(
(
self._grid.line["max_i_ka"].values,
self._grid.trafo["sn_mva"].values
/ (np.sqrt(3) * self._grid.trafo["vn_hv_kv"].values),
)
)
self.thermal_limit_a = self.thermal_limit_a.astype(dt_float)
self.p_or = np.full(self.n_line, dtype=dt_float, fill_value=np.NaN)
self.q_or = np.full(self.n_line, dtype=dt_float, fill_value=np.NaN)
self.v_or = np.full(self.n_line, dtype=dt_float, fill_value=np.NaN)
self.a_or = np.full(self.n_line, dtype=dt_float, fill_value=np.NaN)
self.p_ex = np.full(self.n_line, dtype=dt_float, fill_value=np.NaN)
self.q_ex = np.full(self.n_line, dtype=dt_float, fill_value=np.NaN)
self.v_ex = np.full(self.n_line, dtype=dt_float, fill_value=np.NaN)
self.a_ex = np.full(self.n_line, dtype=dt_float, fill_value=np.NaN)
self.line_status = np.full(self.n_line, dtype=dt_bool, fill_value=np.NaN)
self.load_p = np.full(self.n_load, dtype=dt_float, fill_value=np.NaN)
self.load_q = np.full(self.n_load, dtype=dt_float, fill_value=np.NaN)
self.load_v = np.full(self.n_load, dtype=dt_float, fill_value=np.NaN)
self.prod_p = np.full(self.n_gen, dtype=dt_float, fill_value=np.NaN)
self.prod_v = np.full(self.n_gen, dtype=dt_float, fill_value=np.NaN)
self.prod_q = np.full(self.n_gen, dtype=dt_float, fill_value=np.NaN)
self.storage_p = np.full(self.n_storage, dtype=dt_float, fill_value=np.NaN)
self.storage_q = np.full(self.n_storage, dtype=dt_float, fill_value=np.NaN)
self.storage_v = np.full(self.n_storage, dtype=dt_float, fill_value=np.NaN)
self._nb_bus_before = None
# shunts data
self.n_shunt = self._grid.shunt.shape[0]
self.shunt_to_subid = np.zeros(self.n_shunt, dtype=dt_int) - 1
name_shunt = []
# TODO read name from the grid if provided
for i, (_, row) in enumerate(self._grid.shunt.iterrows()):
bus = int(row["bus"])
name_shunt.append("shunt_{bus}_{index_shunt}".format(**row, index_shunt=i))
self.shunt_to_subid[i] = bus
self.name_shunt = np.array(name_shunt)
self._sh_vnkv = self._grid.bus["vn_kv"][self.shunt_to_subid].values.astype(
dt_float
)
self.shunts_data_available = True
# store the topoid -> objid
self._big_topo_to_obj = [(None, None) for _ in range(self.dim_topo)]
nm_ = "load"
for load_id, pos_big_topo in enumerate(self.load_pos_topo_vect):
self._big_topo_to_obj[pos_big_topo] = (load_id, nm_)
nm_ = "gen"
for gen_id, pos_big_topo in enumerate(self.gen_pos_topo_vect):
self._big_topo_to_obj[pos_big_topo] = (gen_id, nm_)
nm_ = "lineor"
for l_id, pos_big_topo in enumerate(self.line_or_pos_topo_vect):
self._big_topo_to_obj[pos_big_topo] = (l_id, nm_)
nm_ = "lineex"
for l_id, pos_big_topo in enumerate(self.line_ex_pos_topo_vect):
self._big_topo_to_obj[pos_big_topo] = (l_id, nm_)
# store the topoid -> objid
self._big_topo_to_backend = [(None, None, None) for _ in range(self.dim_topo)]
for load_id, pos_big_topo in enumerate(self.load_pos_topo_vect):
self._big_topo_to_backend[pos_big_topo] = (load_id, load_id, 0)
for gen_id, pos_big_topo in enumerate(self.gen_pos_topo_vect):
self._big_topo_to_backend[pos_big_topo] = (gen_id, gen_id, 1)
for l_id, pos_big_topo in enumerate(self.line_or_pos_topo_vect):
if l_id < self.__nb_powerline:
self._big_topo_to_backend[pos_big_topo] = (l_id, l_id, 2)
else:
self._big_topo_to_backend[pos_big_topo] = (
l_id,
l_id - self.__nb_powerline,
3,
)
for l_id, pos_big_topo in enumerate(self.line_ex_pos_topo_vect):
if l_id < self.__nb_powerline:
self._big_topo_to_backend[pos_big_topo] = (l_id, l_id, 4)
else:
self._big_topo_to_backend[pos_big_topo] = (
l_id,
l_id - self.__nb_powerline,
5,
)
self.theta_or = np.full(self.n_line, fill_value=np.NaN, dtype=dt_float)
self.theta_ex = np.full(self.n_line, fill_value=np.NaN, dtype=dt_float)
self.load_theta = np.full(self.n_load, fill_value=np.NaN, dtype=dt_float)
self.gen_theta = np.full(self.n_gen, fill_value=np.NaN, dtype=dt_float)
self.storage_theta = np.full(self.n_storage, fill_value=np.NaN, dtype=dt_float)
self._topo_vect = self._get_topo_vect()
self.tol = 1e-5 # this is NOT the pandapower tolerance !!!! this is used to check if a storage unit
# produce / absorbs anything
# Create a deep copy of itself in the initial state
# Store it under super private attribute
with warnings.catch_warnings():
# raised on some versions of pandapower / pandas
warnings.simplefilter("ignore", FutureWarning)
self.__pp_backend_initial_grid = copy.deepcopy(
self._grid
) # will be initialized in the "assert_grid_correct"
def storage_deact_for_backward_comaptibility(self):
self._init_private_attrs()
def _convert_id_topo(self, id_big_topo):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
convert an id of the big topo vector into:
- the id of the object in its "only object" (eg if id_big_topo represents load 2, then it will be 2)
- the type of object among: "load", "gen", "lineor" and "lineex"
"""
return self._big_topo_to_obj[id_big_topo]
def apply_action(self, backendAction=None):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Specific implementation of the method to apply an action modifying a powergrid in the pandapower format.
"""
if backendAction is None:
return
cls = type(self)
(
active_bus,
(prod_p, prod_v, load_p, load_q, storage),
topo__,
shunts__,
) = backendAction()
# handle bus status
bus_is = self._grid.bus["in_service"]
for i, (bus1_status, bus2_status) in enumerate(active_bus):
bus_is[i] = bus1_status # no iloc for bus, don't ask me why please :-/
bus_is[i + self.__nb_bus_before] = bus2_status
tmp_prod_p = self._get_vector_inj["prod_p"](self._grid)
if np.any(prod_p.changed):
tmp_prod_p.iloc[prod_p.changed] = prod_p.values[prod_p.changed]
tmp_prod_v = self._get_vector_inj["prod_v"](self._grid)
if np.any(prod_v.changed):
tmp_prod_v.iloc[prod_v.changed] = (
prod_v.values[prod_v.changed] / self.prod_pu_to_kv[prod_v.changed]
)
if self._id_bus_added is not None and prod_v.changed[self._id_bus_added]:
# handling of the slack bus, where "2" generators are present.
self._grid["ext_grid"]["vm_pu"] = 1.0 * tmp_prod_v[self._id_bus_added]
tmp_load_p = self._get_vector_inj["load_p"](self._grid)
if np.any(load_p.changed):
tmp_load_p.iloc[load_p.changed] = load_p.values[load_p.changed]
tmp_load_q = self._get_vector_inj["load_q"](self._grid)
if np.any(load_q.changed):
tmp_load_q.iloc[load_q.changed] = load_q.values[load_q.changed]
if self.n_storage > 0:
# active setpoint
tmp_stor_p = self._grid.storage["p_mw"]
if np.any(storage.changed):
tmp_stor_p.iloc[storage.changed] = storage.values[storage.changed]
# topology of the storage
stor_bus = backendAction.get_storages_bus()
new_bus_id = stor_bus.values[stor_bus.changed] # id of the busbar 1 or 2 if
activated = new_bus_id > 0 # mask of storage that have been activated
new_bus_num = (
self.storage_to_subid[stor_bus.changed] + (new_bus_id - 1) * self.n_sub
) # bus number
new_bus_num[~activated] = self.storage_to_subid[stor_bus.changed][
~activated
]
self._grid.storage["in_service"].values[stor_bus.changed] = activated
self._grid.storage["bus"].values[stor_bus.changed] = new_bus_num
self._topo_vect[self.storage_pos_topo_vect[stor_bus.changed]] = new_bus_num
self._topo_vect[
self.storage_pos_topo_vect[stor_bus.changed][~activated]
] = -1
if type(backendAction).shunts_data_available:
shunt_p, shunt_q, shunt_bus = shunts__
if np.any(shunt_p.changed):
self._grid.shunt["p_mw"].iloc[shunt_p.changed] = shunt_p.values[
shunt_p.changed
]
if np.any(shunt_q.changed):
self._grid.shunt["q_mvar"].iloc[shunt_q.changed] = shunt_q.values[
shunt_q.changed
]
if np.any(shunt_bus.changed):
sh_service = shunt_bus.values[shunt_bus.changed] != -1
self._grid.shunt["in_service"].iloc[shunt_bus.changed] = sh_service
chg_and_in_service = sh_service & shunt_bus.changed
self._grid.shunt["bus"].loc[chg_and_in_service] = cls.local_bus_to_global(shunt_bus.values[chg_and_in_service],
cls.shunt_to_subid[chg_and_in_service])
# i made at least a real change, so i implement it in the backend
for id_el, new_bus in topo__:
id_el_backend, id_topo, type_obj = self._big_topo_to_backend[id_el]
if type_obj is not None:
# storage unit are handled elsewhere
self._type_to_bus_set[type_obj](new_bus, id_el_backend, id_topo)
def _apply_load_bus(self, new_bus, id_el_backend, id_topo):
new_bus_backend = type(self).local_bus_to_global_int(
new_bus, self._init_bus_load[id_el_backend]
)
if new_bus_backend >= 0:
self._grid.load["bus"].iat[id_el_backend] = new_bus_backend
self._grid.load["in_service"].iat[id_el_backend] = True
else:
self._grid.load["in_service"].iat[id_el_backend] = False
self._grid.load["bus"].iat[id_el_backend] = -1
def _apply_gen_bus(self, new_bus, id_el_backend, id_topo):
new_bus_backend = type(self).local_bus_to_global_int(
new_bus, self._init_bus_gen[id_el_backend]
)
if new_bus_backend >= 0:
self._grid.gen["bus"].iat[id_el_backend] = new_bus_backend
self._grid.gen["in_service"].iat[id_el_backend] = True
# remember in this case slack bus is actually 2 generators for pandapower !
if (
id_el_backend == (self._grid.gen.shape[0] - 1)
and self._iref_slack is not None
):
self._grid.ext_grid["bus"].iat[0] = new_bus_backend
else:
self._grid.gen["in_service"].iat[id_el_backend] = False
self._grid.gen["bus"].iat[id_el_backend] = -1
# in this case the slack bus cannot be disconnected
def _apply_lor_bus(self, new_bus, id_el_backend, id_topo):
new_bus_backend = type(self).local_bus_to_global_int(
new_bus, self._init_bus_lor[id_el_backend]
)
self.change_bus_powerline_or(id_el_backend, new_bus_backend)
def change_bus_powerline_or(self, id_powerline_backend, new_bus_backend):
if new_bus_backend >= 0:
self._grid.line["in_service"].iat[id_powerline_backend] = True
self._grid.line["from_bus"].iat[id_powerline_backend] = new_bus_backend
else:
self._grid.line["in_service"].iat[id_powerline_backend] = False
def _apply_lex_bus(self, new_bus, id_el_backend, id_topo):
new_bus_backend = type(self).local_bus_to_global_int(
new_bus, self._init_bus_lex[id_el_backend]
)
self.change_bus_powerline_ex(id_el_backend, new_bus_backend)
def change_bus_powerline_ex(self, id_powerline_backend, new_bus_backend):
if new_bus_backend >= 0:
self._grid.line["in_service"].iat[id_powerline_backend] = True
self._grid.line["to_bus"].iat[id_powerline_backend] = new_bus_backend
else:
self._grid.line["in_service"].iat[id_powerline_backend] = False
def _apply_trafo_hv(self, new_bus, id_el_backend, id_topo):
new_bus_backend = type(self).local_bus_to_global_int(
new_bus, self._init_bus_lor[id_el_backend]
)
self.change_bus_trafo_hv(id_topo, new_bus_backend)
def change_bus_trafo_hv(self, id_powerline_backend, new_bus_backend):
if new_bus_backend >= 0:
self._grid.trafo["in_service"].iat[id_powerline_backend] = True
self._grid.trafo["hv_bus"].iat[id_powerline_backend] = new_bus_backend
else:
self._grid.trafo["in_service"].iat[id_powerline_backend] = False
def _apply_trafo_lv(self, new_bus, id_el_backend, id_topo):
new_bus_backend = type(self).local_bus_to_global_int(
new_bus, self._init_bus_lex[id_el_backend]
)
self.change_bus_trafo_lv(id_topo, new_bus_backend)
def change_bus_trafo_lv(self, id_powerline_backend, new_bus_backend):
if new_bus_backend >= 0:
self._grid.trafo["in_service"].iat[id_powerline_backend] = True
self._grid.trafo["lv_bus"].iat[id_powerline_backend] = new_bus_backend
else:
self._grid.trafo["in_service"].iat[id_powerline_backend] = False
def _aux_get_line_info(self, colname1, colname2):
res = np.concatenate(
(
self._grid.res_line[colname1].values,
self._grid.res_trafo[colname2].values,
)
)
return res
def runpf(self, is_dc=False):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Run a power flow on the underlying _grid. This implements an optimization of the powerflow
computation: if the number of
buses has not changed between two calls, the previous results are re used. This speeds up the computation
in case of "do nothing" action applied.
"""
nb_bus = self.get_nb_active_bus()
try:
with warnings.catch_warnings():
# remove the warning if _grid non connex. And it that case load flow as not converged
warnings.filterwarnings(
"ignore", category=scipy.sparse.linalg.MatrixRankWarning
)
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
if self._nb_bus_before is None:
self._pf_init = "dc"
elif nb_bus == self._nb_bus_before:
self._pf_init = "results"
else:
self._pf_init = "auto"
if np.any(~self._grid.load["in_service"]):
# TODO see if there is a better way here -> do not handle this here, but rather in Backend._next_grid_state
raise pp.powerflow.LoadflowNotConverged("Disconnected load: for now grid2op cannot handle properly"
" disconnected load. If you want to disconnect one, say it"
" consumes 0. instead. Please check loads: "
f"{np.where(~self._grid.load['in_service'])[0]}"
)
if np.any(~self._grid.gen["in_service"]):
# TODO see if there is a better way here -> do not handle this here, but rather in Backend._next_grid_state
raise pp.powerflow.LoadflowNotConverged("Disconnected gen: for now grid2op cannot handle properly"
" disconnected generators. If you want to disconnect one, say it"
" produces 0. instead. Please check generators: "
f"{np.where(~self._grid.gen['in_service'])[0]}"
)
if is_dc:
pp.rundcpp(self._grid, check_connectivity=False, init="flat")
self._nb_bus_before = (
None # if dc i start normally next time i call an ac powerflow
)
else:
pp.runpp(
self._grid,
check_connectivity=False,
init=self._pf_init,
numba=self.with_numba,
lightsim2grid=self._lightsim2grid,
max_iteration=self._max_iter,
distributed_slack=self._dist_slack,
)
# stores the computation time
if "_ppc" in self._grid:
if "et" in self._grid["_ppc"]:
self.comp_time += self._grid["_ppc"]["et"]
if self._grid.res_gen.isnull().values.any():
# TODO see if there is a better way here -> do not handle this here, but rather in Backend._next_grid_state
# sometimes pandapower does not detect divergence and put Nan.
raise pp.powerflow.LoadflowNotConverged("Divergence due to Nan values in res_gen table.")
(
self.prod_p[:],
self.prod_q[:],
self.prod_v[:],
self.gen_theta[:],
) = self._gens_info()
(
self.load_p[:],
self.load_q[:],
self.load_v[:],
self.load_theta[:],
) = self._loads_info()
if not is_dc:
if not np.all(np.isfinite(self.load_v)):
# TODO see if there is a better way here
# some loads are disconnected: it's a game over case!
raise pp.powerflow.LoadflowNotConverged("Isolated load")
else:
# fix voltages magnitude that are always "nan" for dc case
# self._grid.res_bus["vm_pu"] is always nan when computed in DC
self.load_v[:] = self.load_pu_to_kv # TODO
# need to assign the correct value when a generator is present at the same bus
# TODO optimize this ugly loop
# see https://github.com/e2nIEE/pandapower/issues/1996 for a fix
for l_id in range(self.n_load):
if self.load_to_subid[l_id] in self.gen_to_subid:
ind_gens = np.where(
self.gen_to_subid == self.load_to_subid[l_id]
)[0]
for g_id in ind_gens:
if (
self._topo_vect[self.load_pos_topo_vect[l_id]]
== self._topo_vect[self.gen_pos_topo_vect[g_id]]
):
self.load_v[l_id] = self.prod_v[g_id]
break
self.line_status[:] = self._get_line_status()
# I retrieve the data once for the flows, so has to not re read multiple dataFrame
self.p_or[:] = self._aux_get_line_info("p_from_mw", "p_hv_mw")
self.q_or[:] = self._aux_get_line_info("q_from_mvar", "q_hv_mvar")
self.v_or[:] = self._aux_get_line_info("vm_from_pu", "vm_hv_pu")
self.a_or[:] = self._aux_get_line_info("i_from_ka", "i_hv_ka") * 1000
self.theta_or[:] = self._aux_get_line_info(
"va_from_degree", "va_hv_degree"
)
self.a_or[~np.isfinite(self.a_or)] = 0.0
self.v_or[~np.isfinite(self.v_or)] = 0.0
self.p_ex[:] = self._aux_get_line_info("p_to_mw", "p_lv_mw")
self.q_ex[:] = self._aux_get_line_info("q_to_mvar", "q_lv_mvar")
self.v_ex[:] = self._aux_get_line_info("vm_to_pu", "vm_lv_pu")
self.a_ex[:] = self._aux_get_line_info("i_to_ka", "i_lv_ka") * 1000
self.theta_ex[:] = self._aux_get_line_info(
"va_to_degree", "va_lv_degree"
)
self.a_ex[~np.isfinite(self.a_ex)] = 0.0
self.v_ex[~np.isfinite(self.v_ex)] = 0.0
# it seems that pandapower does not take into account disconencted powerline for their voltage
self.v_or[~self.line_status] = 0.0
self.v_ex[~self.line_status] = 0.0
self.v_or[:] *= self.lines_or_pu_to_kv
self.v_ex[:] *= self.lines_ex_pu_to_kv
# see issue https://github.com/rte-france/Grid2Op/issues/389
self.theta_or[~np.isfinite(self.theta_or)] = 0.0
self.theta_ex[~np.isfinite(self.theta_ex)] = 0.0
self._nb_bus_before = None
self._grid._ppc["gen"][self._iref_slack, 1] = 0.0
# handle storage units
# note that we have to look ourselves for disconnected storage
(
self.storage_p[:],
self.storage_q[:],
self.storage_v[:],
self.storage_theta[:],
) = self._storages_info()
deact_storage = ~np.isfinite(self.storage_v)
if np.any(np.abs(self.storage_p[deact_storage]) > self.tol):
raise pp.powerflow.LoadflowNotConverged(
"Isolated storage set to absorb / produce something"
)
self.storage_p[deact_storage] = 0.0
self.storage_q[deact_storage] = 0.0
self.storage_v[deact_storage] = 0.0
self._grid.storage["in_service"].values[deact_storage] = False
self._topo_vect[:] = self._get_topo_vect()
return self._grid.converged, None
except pp.powerflow.LoadflowNotConverged as exc_:
# of the powerflow has not converged, results are Nan
self._reset_all_nan()
msg = exc_.__str__()
return False, DivergingPowerFlow(f'powerflow diverged with error :"{msg}"')
def assert_grid_correct(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is done as it should be by the Environment
"""
super().assert_grid_correct()
def _reset_all_nan(self):
self.p_or[:] = np.NaN
self.q_or[:] = np.NaN
self.v_or[:] = np.NaN
self.a_or[:] = np.NaN
self.p_ex[:] = np.NaN
self.q_ex[:] = np.NaN
self.v_ex[:] = np.NaN
self.a_ex[:] = np.NaN
self.prod_p[:] = np.NaN
self.prod_q[:] = np.NaN
self.prod_v[:] = np.NaN
self.load_p[:] = np.NaN
self.load_q[:] = np.NaN
self.load_v[:] = np.NaN
self.storage_p[:] = np.NaN
self.storage_q[:] = np.NaN
self.storage_v[:] = np.NaN
self._nb_bus_before = None
self.theta_or[:] = np.NaN
self.theta_ex[:] = np.NaN
self.load_theta[:] = np.NaN
self.gen_theta[:] = np.NaN
self.storage_theta[:] = np.NaN
def copy(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Performs a deep copy of the power :attr:`_grid`.
As pandapower is pure python, the deep copy operator is perfectly suited for the task.
"""
# res = copy.deepcopy(self) # this was really slow...
res = type(self)(**self._my_kwargs)
# copy from base class (backend)
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
# warnings depending on pandas version and pp version
res._grid = copy.deepcopy(self._grid)
res.thermal_limit_a = copy.deepcopy(self.thermal_limit_a)
res._sh_vnkv = copy.deepcopy(self._sh_vnkv)
res.comp_time = self.comp_time
res.can_output_theta = self.can_output_theta
res._is_loaded = self._is_loaded
# copy all attributes from myself
res.prod_pu_to_kv = copy.deepcopy(self.prod_pu_to_kv)
res.load_pu_to_kv = copy.deepcopy(self.load_pu_to_kv)
res.lines_or_pu_to_kv = copy.deepcopy(self.lines_or_pu_to_kv)
res.lines_ex_pu_to_kv = copy.deepcopy(self.lines_ex_pu_to_kv)
res.storage_pu_to_kv = copy.deepcopy(self.storage_pu_to_kv)
res.p_or = copy.deepcopy(self.p_or)
res.q_or = copy.deepcopy(self.q_or)
res.v_or = copy.deepcopy(self.v_or)
res.a_or = copy.deepcopy(self.a_or)
res.p_ex = copy.deepcopy(self.p_ex)
res.q_ex = copy.deepcopy(self.q_ex)
res.v_ex = copy.deepcopy(self.v_ex)
res.a_ex = copy.deepcopy(self.a_ex)
res.load_p = copy.deepcopy(self.load_p)
res.load_q = copy.deepcopy(self.load_q)
res.load_v = copy.deepcopy(self.load_v)
res.storage_p = copy.deepcopy(self.storage_p)
res.storage_q = copy.deepcopy(self.storage_q)
res.storage_v = copy.deepcopy(self.storage_v)
res.prod_p = copy.deepcopy(self.prod_p)
res.prod_q = copy.deepcopy(self.prod_q)
res.prod_v = copy.deepcopy(self.prod_v)
res.line_status = copy.deepcopy(self.line_status)
res._pf_init = self._pf_init
res._nb_bus_before = self._nb_bus_before
res.thermal_limit_a = copy.deepcopy(self.thermal_limit_a)
res._iref_slack = self._iref_slack
res._id_bus_added = self._id_bus_added
res._fact_mult_gen = copy.deepcopy(self._fact_mult_gen)
res._what_object_where = copy.deepcopy(self._fact_mult_gen)
res._number_true_line = self._number_true_line
res._corresp_name_fun = copy.deepcopy(self._corresp_name_fun)
res.dim_topo = self.dim_topo
res.cst_1 = self.cst_1
res._topo_vect = copy.deepcopy(self._topo_vect)
res.slack_id = self.slack_id
# function to rstore some information
res.__nb_bus_before = (
self.__nb_bus_before
) # number of substation in the powergrid
res.__nb_powerline = (
self.__nb_powerline
) # number of powerline (real powerline, not transformer)
res._init_bus_load = copy.deepcopy(self._init_bus_load)
res._init_bus_gen = copy.deepcopy(self._init_bus_gen)
res._init_bus_lor = copy.deepcopy(self._init_bus_lor)
res._init_bus_lex = copy.deepcopy(self._init_bus_lex)
res._get_vector_inj = copy.deepcopy(self._get_vector_inj)
res._big_topo_to_obj = copy.deepcopy(self._big_topo_to_obj)
res._big_topo_to_backend = copy.deepcopy(self._big_topo_to_backend)
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
res.__pp_backend_initial_grid = copy.deepcopy(self.__pp_backend_initial_grid)
res.tol = (
self.tol
) # this is NOT the pandapower tolerance !!!! this is used to check if a storage unit
# produce / absorbs anything
# TODO storage doc (in grid2op rst) of the backend
res.can_output_theta = self.can_output_theta # I support the voltage angle
res.theta_or = copy.deepcopy(self.theta_or)
res.theta_ex = copy.deepcopy(self.theta_ex)
res.load_theta = copy.deepcopy(self.load_theta)
res.gen_theta = copy.deepcopy(self.gen_theta)
res.storage_theta = copy.deepcopy(self.storage_theta)
return res
def close(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Called when the :class:`grid2op;Environment` has terminated, this function only reset the grid to a state
where it has not been loaded.
"""
del self._grid
self._grid = None
del self.__pp_backend_initial_grid
self.__pp_backend_initial_grid = None
def save_file(self, full_path):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
You might want to use it for debugging purpose only, and only if you develop yourself a backend.
Save the file to json.
:param full_path:
:return:
"""
pp.to_json(self._grid, full_path)
def get_line_status(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
As all the functions related to powerline, pandapower split them into multiple dataframe (some for transformers,
some for 3 winding transformers etc.). We make sure to get them all here.
"""
return self.line_status
def _get_line_status(self):
return np.concatenate(
(
self._grid.line["in_service"].values,
self._grid.trafo["in_service"].values,
)
).astype(dt_bool)
def get_line_flow(self):
return self.a_or
def _disconnect_line(self, id_):
if id_ < self._number_true_line:
self._grid.line["in_service"].iloc[id_] = False
else:
self._grid.trafo["in_service"].iloc[id_ - self._number_true_line] = False
self._topo_vect[self.line_or_pos_topo_vect[id_]] = -1
self._topo_vect[self.line_ex_pos_topo_vect[id_]] = -1
self.line_status[id_] = False
def _reconnect_line(self, id_):
if id_ < self._number_true_line:
self._grid.line["in_service"].iloc[id_] = True
else:
self._grid.trafo["in_service"].iloc[id_ - self._number_true_line] = True
self.line_status[id_] = True
def get_topo_vect(self):
return self._topo_vect
def _get_topo_vect(self):
res = np.full(self.dim_topo, fill_value=np.iinfo(dt_int).max, dtype=dt_int)
line_status = self.get_line_status()
i = 0
for row in self._grid.line[["from_bus", "to_bus"]].values:
bus_or_id = row[0]
bus_ex_id = row[1]
if line_status[i]:
res[self.line_or_pos_topo_vect[i]] = (
1 if bus_or_id == self.line_or_to_subid[i] else 2
)
res[self.line_ex_pos_topo_vect[i]] = (
1 if bus_ex_id == self.line_ex_to_subid[i] else 2
)
else:
res[self.line_or_pos_topo_vect[i]] = -1
res[self.line_ex_pos_topo_vect[i]] = -1
i += 1
nb = self._number_true_line
i = 0
for row in self._grid.trafo[["hv_bus", "lv_bus"]].values:
bus_or_id = row[0]
bus_ex_id = row[1]
j = i + nb
if line_status[j]:
res[self.line_or_pos_topo_vect[j]] = (
1 if bus_or_id == self.line_or_to_subid[j] else 2
)
res[self.line_ex_pos_topo_vect[j]] = (
1 if bus_ex_id == self.line_ex_to_subid[j] else 2
)
else:
res[self.line_or_pos_topo_vect[j]] = -1
res[self.line_ex_pos_topo_vect[j]] = -1
i += 1
i = 0
for bus_id in self._grid.gen["bus"].values:
res[self.gen_pos_topo_vect[i]] = 1 if bus_id == self.gen_to_subid[i] else 2
i += 1
i = 0
for bus_id in self._grid.load["bus"].values:
res[self.load_pos_topo_vect[i]] = (
1 if bus_id == self.load_to_subid[i] else 2
)
i += 1
if self.n_storage:
# storage can be deactivated by the environment for backward compatibility
i = 0
for bus_id in self._grid.storage["bus"].values:
status = self._grid.storage["in_service"].values[i]
if status:
res[self.storage_pos_topo_vect[i]] = (
1 if bus_id == self.storage_to_subid[i] else 2
)
else:
res[self.storage_pos_topo_vect[i]] = -1
i += 1
return res
def _gens_info(self):
prod_p = self.cst_1 * self._grid.res_gen["p_mw"].values.astype(dt_float)
prod_q = self.cst_1 * self._grid.res_gen["q_mvar"].values.astype(dt_float)
prod_v = (
self.cst_1
* self._grid.res_gen["vm_pu"].values.astype(dt_float)
* self.prod_pu_to_kv
)
prod_theta = self.cst_1 * self._grid.res_gen["va_degree"].values.astype(
dt_float
)
if self._iref_slack is not None:
# slack bus and added generator are on same bus. I need to add power of slack bus to this one.
# if self._grid.gen["bus"].iloc[self._id_bus_added] == self.gen_to_subid[self._id_bus_added]:
if "gen" in self._grid._ppc["internal"]:
prod_p[self._id_bus_added] += self._grid._ppc["internal"]["gen"][
self._iref_slack, 1
]
prod_q[self._id_bus_added] += self._grid._ppc["internal"]["gen"][
self._iref_slack, 2
]
return prod_p, prod_q, prod_v, prod_theta
def _loads_info(self):
load_p = self.cst_1 * self._grid.res_load["p_mw"].values.astype(dt_float)
load_q = self.cst_1 * self._grid.res_load["q_mvar"].values.astype(dt_float)
load_v = (
self._grid.res_bus.loc[self._grid.load["bus"].values][
"vm_pu"
].values.astype(dt_float)
* self.load_pu_to_kv
)
load_theta = self._grid.res_bus.loc[self._grid.load["bus"].values][
"va_degree"
].values.astype(dt_float)
return load_p, load_q, load_v, load_theta
def generators_info(self):
return (
self.cst_1 * self.prod_p,
self.cst_1 * self.prod_q,
self.cst_1 * self.prod_v,
)
def loads_info(self):
return (
self.cst_1 * self.load_p,
self.cst_1 * self.load_q,
self.cst_1 * self.load_v,
)
def lines_or_info(self):
return (
self.cst_1 * self.p_or,
self.cst_1 * self.q_or,
self.cst_1 * self.v_or,
self.cst_1 * self.a_or,
)
def lines_ex_info(self):
return (
self.cst_1 * self.p_ex,
self.cst_1 * self.q_ex,
self.cst_1 * self.v_ex,
self.cst_1 * self.a_ex,
)
def shunt_info(self):
shunt_p = self.cst_1 * self._grid.res_shunt["p_mw"].values.astype(dt_float)
shunt_q = self.cst_1 * self._grid.res_shunt["q_mvar"].values.astype(dt_float)
shunt_v = (
self._grid.res_bus["vm_pu"]
.loc[self._grid.shunt["bus"].values]
.values.astype(dt_float)
)
shunt_v *= (
self._grid.bus["vn_kv"]
.loc[self._grid.shunt["bus"].values]
.values.astype(dt_float)
)
shunt_bus = type(self).global_bus_to_local(self._grid.shunt["bus"].values, self.shunt_to_subid)
shunt_v[~self._grid.shunt["in_service"].values] = 0
shunt_bus[~self._grid.shunt["in_service"].values] = -1
# handle shunt alone on a bus (in this case it should probably diverge...)
alone = ~np.isfinite(shunt_v)
shunt_v[alone] = 0
shunt_bus[alone] = -1
return shunt_p, shunt_q, shunt_v, shunt_bus
def storages_info(self):
return (
self.cst_1 * self.storage_p,
self.cst_1 * self.storage_q,
self.cst_1 * self.storage_v,
)
def _storages_info(self):
if self.n_storage:
# this is because we support "backward comaptibility" feature. So the storage can be
# deactivated from the Environment...
p_storage = self._grid.res_storage["p_mw"].values.astype(dt_float)
q_storage = self._grid.res_storage["q_mvar"].values.astype(dt_float)
v_storage = (
self._grid.res_bus.loc[self._grid.storage["bus"].values][
"vm_pu"
].values.astype(dt_float)
* self.storage_pu_to_kv
)
theta_storage = (
self._grid.res_bus.loc[self._grid.storage["bus"].values][
"vm_pu"
].values.astype(dt_float)
* self.storage_pu_to_kv
)
else:
p_storage = np.zeros(shape=0, dtype=dt_float)
q_storage = np.zeros(shape=0, dtype=dt_float)
v_storage = np.zeros(shape=0, dtype=dt_float)
theta_storage = np.zeros(shape=0, dtype=dt_float)
return p_storage, q_storage, v_storage, theta_storage
def sub_from_bus_id(self, bus_id):
if bus_id >= self._number_true_line:
return bus_id - self._number_true_line
return bus_id
| 65,425 | 41.155928 | 132 | py |
Grid2Op | Grid2Op-master/grid2op/Backend/__init__.py | __all__ = ["Backend", "PandaPowerBackend"]
from grid2op.Backend.Backend import Backend
from grid2op.Backend.PandaPowerBackend import PandaPowerBackend
| 152 | 29.6 | 63 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/GSFFWFWM.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import os
import json
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from grid2op.dtypes import dt_bool, dt_int
from grid2op.Exceptions import Grid2OpException
from grid2op.Chronics.gridStateFromFileWithForecasts import (
GridStateFromFileWithForecasts,
)
class GridStateFromFileWithForecastsWithMaintenance(GridStateFromFileWithForecasts):
"""
An extension of :class:`GridStateFromFileWithForecasts` that implements the maintenance chronic generator
on the fly (maintenance are not read from files, but are rather generated when the chronics is created).
Attributes
----------
maintenance_starting_hour: ``int``
The hour at which every maintenance will start
maintenance_ending_hour: ``int``
The hour at which every maintenance will end (we suppose mainteance end on same day for now
line_to_maintenance: ``array``, dtype: ``string``
Array used to store the name of the lines that can happen to be in maintenance
daily_proba_per_month_maintenance: ``array``, dtype: ``float``
Array used to store probability each line can be in maintenance on a day for a given month
max_daily_number_per_month_maintenance: ``array``, dtype: ``int``
Array used to store maximum number of maintenance per day for each month
"""
MULTI_CHRONICS = False
def __init__(
self,
path,
sep=";",
time_interval=timedelta(minutes=5),
max_iter=-1,
chunk_size=None,
h_forecast=(5, )
):
super().__init__(
path=path,
sep=sep,
time_interval=time_interval,
max_iter=max_iter,
chunk_size=chunk_size,
h_forecast=h_forecast
)
self.maintenance_starting_hour = None
self.maintenance_ending_hour = None
self.daily_proba_per_month_maintenance = None
self.max_daily_number_per_month_maintenance = None
self.line_to_maintenance = None
def initialize(
self,
order_backend_loads,
order_backend_prods,
order_backend_lines,
order_backend_subs,
names_chronics_to_backend=None,
):
self.name_line = order_backend_lines
# properties of maintenance
# self.maintenance_duration= 8*(self.time_interval.total_seconds()*60*60)#8h, 9am to 5pm
# 8h furation, 9am to 5pm
if (
self.maintenance_starting_hour is None
or self.maintenance_ending_hour is None
or self.daily_proba_per_month_maintenance is None
or self.line_to_maintenance is None
or self.max_daily_number_per_month_maintenance is None
):
# initialize the parameters from the json
with open(
os.path.join(self.path, "maintenance_meta.json"), "r", encoding="utf-8"
) as f:
dict_ = json.load(f)
self.maintenance_starting_hour = dict_["maintenance_starting_hour"]
# self.maintenance_duration= 8*(self.time_interval.total_seconds()*60*60) # not used for now, could be used later
self.maintenance_ending_hour = dict_["maintenance_ending_hour"]
self.line_to_maintenance = set(dict_["line_to_maintenance"])
# frequencies of maintenance
self.daily_proba_per_month_maintenance = dict_[
"daily_proba_per_month_maintenance"
]
self.max_daily_number_per_month_maintenance = dict_[
"max_daily_number_per_month_maintenance"
]
super().initialize(
order_backend_loads,
order_backend_prods,
order_backend_lines,
order_backend_subs,
names_chronics_to_backend,
)
def _init_attrs(
self, load_p, load_q, prod_p, prod_v, hazards=None, maintenance=None,
is_init=False
):
super()._init_attrs(
load_p, load_q, prod_p, prod_v, hazards=hazards, maintenance=None,
is_init=is_init
)
if is_init:
# ignore the maitenance but keep hazards
self._sample_maintenance()
# sampled only at the initialization of the episode, and not at each chunk !
def _sample_maintenance(self):
########
# new method to introduce generated maintenance
self.maintenance = self._generate_maintenance() #
##########
# same as before in GridStateFromFileWithForecasts
GridStateFromFileWithForecastsWithMaintenance._fix_maintenance_format(self)
@staticmethod
def _fix_maintenance_format(obj_with_maintenance):
obj_with_maintenance.maintenance_time = (
np.zeros(shape=(obj_with_maintenance.maintenance.shape[0], obj_with_maintenance.n_line), dtype=dt_int) - 1
)
obj_with_maintenance.maintenance_duration = np.zeros(
shape=(obj_with_maintenance.maintenance.shape[0], obj_with_maintenance.n_line), dtype=dt_int
)
# test that with chunk size
for line_id in range(obj_with_maintenance.n_line):
obj_with_maintenance.maintenance_time[:, line_id] = obj_with_maintenance.get_maintenance_time_1d(
obj_with_maintenance.maintenance[:, line_id]
)
obj_with_maintenance.maintenance_duration[:, line_id] = obj_with_maintenance.get_maintenance_duration_1d(
obj_with_maintenance.maintenance[:, line_id]
)
# there are _maintenance and hazards only if the value in the file is not 0.
obj_with_maintenance.maintenance = obj_with_maintenance.maintenance != 0.0
obj_with_maintenance.maintenance = obj_with_maintenance.maintenance.astype(dt_bool)
@staticmethod
def _generate_matenance_static(name_line,
n_,
line_to_maintenance,
time_interval,
start_datetime,
maintenance_starting_hour,
maintenance_ending_hour,
daily_proba_per_month_maintenance,
max_daily_number_per_month_maintenance,
space_prng,
):
# define maintenance dataframe with size (nbtimesteps,nlines)
columnsNames = name_line
nbTimesteps = n_
res = np.zeros((nbTimesteps, len(name_line)))
# read the maintenance line
idx_line_maintenance = np.array(
[el in line_to_maintenance for el in columnsNames]
)
nb_line_maint = np.sum(idx_line_maintenance)
if nb_line_maint == 0:
# TODO log something there !
return res
if nb_line_maint != len(line_to_maintenance):
raise Grid2OpException(
"Lines that are suppose to be in maintenance are:\n{}\nand lines in the grid "
"are\n{}\nCheck that all lines in maintenance are in the grid."
"".format(line_to_maintenance, name_line)
)
# identify the timestamps of the chronics to find out the month and day of the week
freq = (
str(int(time_interval.total_seconds())) + "s"
) # should be in the timedelta frequency format in pandas
datelist = pd.date_range(start_datetime, periods=nbTimesteps, freq=freq)
datelist = np.unique(np.array([el.date() for el in datelist]))
datelist = datelist[:-1]
n_lines_maintenance = len(line_to_maintenance)
_24_h = timedelta(seconds=86400)
nb_rows = int(86400 / time_interval.total_seconds())
selected_rows_beg = int(
maintenance_starting_hour * 3600 / time_interval.total_seconds()
)
selected_rows_end = int(
maintenance_ending_hour * 3600 / time_interval.total_seconds()
)
# TODO this is INSANELY slow for now. find a way to make it faster
# HINT: vectorize everything into one single numpy array, everything can be vectorized there...
month = 0
maintenance_daily_proba = -1
maxDailyMaintenance = -1
for nb_day_since_beg, this_day in enumerate(datelist):
dayOfWeek = this_day.weekday()
if dayOfWeek < 5: # only maintenance starting on working days
month = this_day.month
maintenance_me = np.zeros((nb_rows, nb_line_maint))
# Careful: month start at 1 but inidces start at 0 in python
maintenance_daily_proba = daily_proba_per_month_maintenance[
(month - 1)
]
maxDailyMaintenance = max_daily_number_per_month_maintenance[
(month - 1)
]
# now for each line in self.line_to_maintenance, sample to know if we generate a maintenance
# for line in self.line_to_maintenance:
are_lines_in_maintenance = space_prng.choice(
[False, True],
p=[(1.0 - maintenance_daily_proba), maintenance_daily_proba],
size=n_lines_maintenance,
)
n_Generated_Maintenance = np.sum(are_lines_in_maintenance)
# check if the number of maintenance is not above the max allowed. otherwise randomly pick up the right
# number
if n_Generated_Maintenance > maxDailyMaintenance:
# we pick up only maxDailyMaintenance elements
not_chosen = space_prng.choice(
n_Generated_Maintenance,
replace=False,
size=n_Generated_Maintenance - maxDailyMaintenance,
)
are_lines_in_maintenance[
np.where(are_lines_in_maintenance)[0][not_chosen]
] = False
maintenance_me[
selected_rows_beg:selected_rows_end, are_lines_in_maintenance
] = 1.0
# handle last iteration
n_max = res[
(nb_day_since_beg * nb_rows) : ((nb_day_since_beg + 1) * nb_rows),
idx_line_maintenance,
].shape[0]
res[
(nb_day_since_beg * nb_rows) : ((nb_day_since_beg + 1) * nb_rows),
idx_line_maintenance,
] = maintenance_me[:n_max, :]
return res
def _generate_maintenance(self):
return GridStateFromFileWithForecastsWithMaintenance._generate_matenance_static(
self.name_line,
self.n_,
self.line_to_maintenance,
self.time_interval,
self.start_datetime,
self.maintenance_starting_hour,
self.maintenance_ending_hour,
self.daily_proba_per_month_maintenance,
self.max_daily_number_per_month_maintenance,
self.space_prng
)
| 11,654 | 40.038732 | 125 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/Settings_5busExample.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
"""
This file contains the settings (path to the case file, chronics converter etc.) that allows to make a simple
environment with a powergrid of only 5 buses, 3 laods, 2 generators and 8 powerlines.
"""
import os
from pathlib import Path
file_dir = Path(__file__).parent.absolute()
grid2op_root = file_dir.parent.absolute()
grid2op_root = str(grid2op_root)
dat_dir = os.path.abspath(os.path.join(grid2op_root, "data"))
case_dir = "rte_case5_example"
grid_file = "grid.json"
EXAMPLE_CASEFILE = os.path.join(dat_dir, case_dir, grid_file)
EXAMPLE_CHRONICSPATH = os.path.join(dat_dir, case_dir, "chronics")
CASE_5_GRAPH_LAYOUT = [(0, 0), (0, 400), (200, 400), (400, 400), (400, 0)]
| 1,145 | 41.444444 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/Settings_L2RPN2019.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
"""
This file contains the settings (path to the case file, chronics converter etc. that allows to run
the competition "L2RPN 2019" that took place on the pypownet plateform.
It is present to reproduce this competition.
"""
import os
import warnings
from pathlib import Path
import numpy as np
from grid2op.Action import BaseAction
from grid2op.Exceptions import AmbiguousAction, IncorrectNumberOfElements
from grid2op.Chronics.readPypowNetData import (
ReadPypowNetData,
) # imported by another module
file_dir = Path(__file__).parent.absolute()
grid2op_root = file_dir.parent.absolute()
grid2op_root = str(grid2op_root)
dat_dir = os.path.abspath(os.path.join(grid2op_root, "data"))
case_dir = "l2rpn_2019"
grid_file = "grid.json"
L2RPN2019_CASEFILE = os.path.join(dat_dir, case_dir, grid_file)
L2RPN2019_CHRONICSPATH = os.path.join(dat_dir, case_dir, "chronics")
CASE_14_L2RPN2019_LAYOUT = graph_layout = [
(-280, -81),
(-100, -270),
(366, -270),
(366, -54),
(-64, -54),
(-64, 54),
(450, 0),
(550, 0),
(326, 54),
(222, 108),
(79, 162),
(-170, 270),
(-64, 270),
(222, 216),
]
# names of object of the grid were not in the same order as the default one
L2RPN2019_DICT_NAMES = {
"loads": {
"2_C-10.61": "load_1_0",
"3_C151.15": "load_2_1",
"14_C63.6": "load_13_10",
"4_C-9.47": "load_3_2",
"5_C201.84": "load_4_3",
"6_C-6.27": "load_5_4",
"9_C130.49": "load_8_5",
"10_C228.66": "load_9_6",
"11_C-138.89": "load_10_7",
"12_C-27.88": "load_11_8",
"13_C-13.33": "load_12_9",
},
"lines": {
"1_2_1": "0_1_0",
"1_5_2": "0_4_1",
"9_10_16": "8_9_16",
"9_14_17": "8_13_15",
"10_11_18": "9_10_17",
"12_13_19": "11_12_18",
"13_14_20": "12_13_19",
"2_3_3": "1_2_2",
"2_4_4": "1_3_3",
"2_5_5": "1_4_4",
"3_4_6": "2_3_5",
"4_5_7": "3_4_6",
"6_11_11": "5_10_12",
"6_12_12": "5_11_11",
"6_13_13": "5_12_10",
"4_7_8": "3_6_7",
"4_9_9": "3_8_8",
"5_6_10": "4_5_9",
"7_8_14": "6_7_13",
"7_9_15": "6_8_14",
},
"prods": {
"1_G137.1": "gen_0_4",
"3_G36.31": "gen_1_0",
"6_G63.29": "gen_2_1",
"2_G-56.47": "gen_5_2",
"8_G40.43": "gen_7_3",
},
}
# class of the action didn't implement the "set" part. Only change was present.
# Beside when reconnected, objects were always reconnected on bus 1.
# This is not used at the moment.
class L2RPN2019_Action(BaseAction):
"""
This class is here to model only a subpart of Topological actions, the one consisting in topological switching.
It will throw an "AmbiguousAction" error it someone attempt to change injections in any ways.
It has the same attributes as its base class :class:`BaseAction`.
It is also here to show an example on how to implement a valid class deriving from :class:`BaseAction`.
**NB** This class doesn't allow to connect object to other buses than their original bus. In this case,
reconnecting a powerline cannot be considered "ambiguous". We have to
"""
def __init__(self):
"""
See the definition of :func:`BaseAction.__init__` and of :class:`BaseAction` for more information. Nothing more is done
in this constructor.
"""
BaseAction.__init__(self)
# the injection keys is not authorized, meaning it will send a warning is someone try to implement some
# modification injection.
self.authorized_keys = set(
[
k
for k in self.authorized_keys
if k != "injection" and k != "set_bus" and "set_line_status"
]
)
def __call__(self):
"""
Compare to the ancestor :func:`BaseAction.__call__` this type of BaseAction doesn't allow to change the injections.
The only difference is in the returned value *dict_injection* that is always an empty dictionnary.
Returns
-------
dict_injection: :class:`dict`
This dictionnary is always empty
set_line_status: :class:`numpy.array`, dtype:int
This array is :attr:`BaseAction._set_line_status`
switch_line_status: :class:`numpy.array`, dtype:bool
This array is :attr:`BaseAction._switch_line_status`, it is never modified
set_topo_vect: :class:`numpy.array`, dtype:int
This array is :attr:`BaseAction._set_topo_vect`, it is never modified
change_bus_vect: :class:`numpy.array`, dtype:bool
This array is :attr:`BaseAction._change_bus_vect`, it is never modified
"""
if self._dict_inj:
raise AmbiguousAction(
'You asked to modify the injection with an action of class "TopologyAction".'
)
self._check_for_ambiguity()
return (
{},
self._set_line_status,
self._switch_line_status,
self._set_topo_vect,
self._change_bus_vect,
)
def update(self, dict_):
"""
As its original implementation, this method allows to modify the way a dictionnary can be mapped to a valid
:class:`BaseAction`.
It has only minor modifications compared to the original :func:`BaseAction.update` implementation, most notably, it
doesn't update the :attr:`BaseAction._dict_inj`. It raises a warning if attempting to change them.
Parameters
----------
dict_: :class:`dict`
See the help of :func:`BaseAction.update` for a detailed explanation. **NB** all the explanations concerning the
"injection", "change bus", "set bus", or "change line status" are irrelevant for this subclass.
Returns
-------
self: :class:`PowerLineSet`
Return object itself thus allowing mutiple call to "update" to be chained.
"""
self.as_vect = None
if dict_ is not None:
for kk in dict_.keys():
if kk not in self.authorized_keys:
warn = 'The key "{}" used to update an action will be ignored. Valid keys are {}'
warn = warn.format(kk, self.authorized_keys)
warnings.warn(warn)
self._digest_change_bus(dict_)
self._digest_hazards(dict_)
self._digest_maintenance(dict_)
self._digest_change_status(dict_)
# self.disambiguate_reconnection()
return self
def size(self):
"""
Compare to the base class, this action has a shorter size, as all information about injections are ignored.
Returns
-------
size: ``int``
The size of :class:`PowerLineSet` converted to an array.
"""
return self.n_line + self.dim_topo
def to_vect(self):
"""
See :func:`BaseAction.to_vect` for a detailed description of this method.
This method has the same behaviour as its base class, except it doesn't require any information about the
injections to be sent, thus being more efficient from a memory footprint perspective.
Returns
-------
_vectorized: :class:`numpy.array`, dtype:float
The instance of this action converted to a vector.
"""
if self.as_vect is None:
self.as_vect = np.concatenate(
(
self._switch_line_status.flatten().astype(np.float),
self._change_bus_vect.flatten().astype(np.float),
)
)
if self.as_vect.shape[0] != self.size():
raise AmbiguousAction("L2RPN2019_Action has not the proper shape.")
return self.as_vect
def from_vect(self, vect):
"""
See :func:`BaseAction.from_vect` for a detailed description of this method.
Nothing more is made except the initial vector is (much) smaller.
Parameters
----------
vect: :class:`numpy.array`, dtype:float
A vector reprenseting an instance of :class:`.`
Returns
-------
"""
self.reset()
if vect.shape[0] != self.size():
raise IncorrectNumberOfElements(
'Incorrect number of elements found while loading a "TopologyAction" from a vector. Found {} elements instead of {}'.format(
vect.shape[1], self.size()
)
)
prev_ = 0
next_ = self.n_line
self._switch_line_status = vect[prev_:next_]
self._switch_line_status = self._switch_line_status.astype(np.bool_)
prev_ = next_
next_ += self.dim_topo
self._change_bus_vect = vect[prev_:next_]
self._change_bus_vect = self._change_bus_vect.astype(np.bool_)
# self.disambiguate_reconnection()
self._check_for_ambiguity()
def sample(self, space_prng):
"""
Sample a PowerlineSwitch BaseAction.
By default, this sampling will act on one random powerline, and it will either
disconnect it or reconnect it each with equal probability.
Parameters
----------
space_prng
Returns
-------
res: :class:`PowerLineSwitch`
The sampled action
"""
self.reset()
i = np.random.randint(0, self.size()) # the powerline on which we can act
val = (
2 * np.random.randint(0, 2) - 1
) # the action: +1 reconnect it, -1 disconnect it
self._set_line_status[i] = val
if val == 1:
self._set_topo_vect[self.line_ex_pos_topo_vect[i]] = 1
self._set_topo_vect[self.line_or_pos_topo_vect[i]] = 1
return self
| 10,360 | 33.194719 | 140 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/Settings_case14_realistic.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
"""
This file contains the settings (path to the case file, chronics converter etc.) that allows to make a simple
environment with a powergrid of only 5 buses, 3 laods, 2 generators and 8 powerlines.
"""
import os
import numpy as np
from pathlib import Path
file_dir = Path(__file__).parent.absolute()
grid2op_root = file_dir.parent.absolute()
grid2op_root = str(grid2op_root)
dat_dir = os.path.abspath(os.path.join(grid2op_root, "data"))
case_dir = "rte_case14_realistic"
grid_file = "grid.json"
case14_real_CASEFILE = os.path.join(dat_dir, case_dir, grid_file)
case14_real_CHRONICSPATH = os.path.join(dat_dir, case_dir, "chronics")
case14_real_TH_LIM = np.array(
[
384.900179,
384.900179,
380.0,
380.0,
157.0,
380.0,
380.0,
1077.7205012,
461.8802148,
769.80036,
269.4301253,
384.900179,
760.0,
380.0,
760.0,
384.900179,
230.9401074,
170.79945452,
3402.24266,
3402.24266,
]
)
| 1,511 | 28.647059 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/Settings_case14_redisp.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
"""
This file contains the settings (path to the case file, chronics converter etc.) that allows to make a simple
environment with a powergrid of only 5 buses, 3 laods, 2 generators and 8 powerlines.
"""
import os
import numpy as np
from pathlib import Path
file_dir = Path(__file__).parent.absolute()
grid2op_root = file_dir.parent.absolute()
grid2op_root = str(grid2op_root)
dat_dir = os.path.abspath(os.path.join(grid2op_root, "data"))
case_dir = "rte_case14_redisp"
grid_file = "grid.json"
case14_redisp_CASEFILE = os.path.join(dat_dir, case_dir, grid_file)
case14_redisp_CHRONICSPATH = os.path.join(dat_dir, case_dir, "chronics")
case14_redisp_TH_LIM = np.array(
[
3.84900179e02,
3.84900179e02,
2.28997102e05,
2.28997102e05,
2.28997102e05,
1.52664735e04,
2.28997102e05,
3.84900179e02,
3.84900179e02,
1.83285800e02,
3.84900179e02,
3.84900179e02,
2.28997102e05,
2.28997102e05,
6.93930612e04,
3.84900179e02,
3.84900179e02,
2.40562612e02,
3.40224266e03,
3.40224266e03,
]
)
| 1,608 | 30.54902 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/Settings_case14_test.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
"""
This file contains the settings (path to the case file, chronics converter etc.) that allows to make a simple
environment with a powergrid of only 5 buses, 3 laods, 2 generators and 8 powerlines.
"""
import os
import numpy as np
from pathlib import Path
file_dir = Path(__file__).parent.absolute()
grid2op_root = file_dir.parent.absolute()
grid2op_root = str(grid2op_root)
dat_dir = os.path.abspath(os.path.join(grid2op_root, "data"))
case_dir = "rte_case14_test"
grid_file = "case14_test.json"
case14_test_CASEFILE = os.path.join(dat_dir, case_dir, grid_file)
case14_test_CHRONICSPATH = os.path.join(dat_dir, case_dir, "chronics")
case14_test_TH_LIM = np.array(
[
352.8251645,
352.8251645,
183197.68156979,
183197.68156979,
183197.68156979,
12213.17877132,
183197.68156979,
352.8251645,
352.8251645,
352.8251645,
352.8251645,
352.8251645,
183197.68156979,
183197.68156979,
183197.68156979,
352.8251645,
352.8251645,
352.8251645,
2721.79412618,
2721.79412618,
]
)
| 1,602 | 30.431373 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/__init__.py | __all__ = [
"handlers",
"ChronicsHandler",
"GridValue",
"ChangeNothing",
"Multifolder",
"MultifolderWithCache",
"GridStateFromFile",
"GridStateFromFileWithForecasts",
"GridStateFromFileWithForecastsWithMaintenance",
"GridStateFromFileWithForecastsWithoutMaintenance",
"FromNPY",
"FromChronix2grid",
"FromHandlers"
]
from grid2op.Chronics.chronicsHandler import ChronicsHandler
from grid2op.Chronics.changeNothing import ChangeNothing
from grid2op.Chronics.gridValue import GridValue
from grid2op.Chronics.gridStateFromFile import GridStateFromFile
from grid2op.Chronics.gridStateFromFileWithForecasts import (
GridStateFromFileWithForecasts,
)
from grid2op.Chronics.multiFolder import Multifolder
from grid2op.Chronics.GSFFWFWM import GridStateFromFileWithForecastsWithMaintenance
from grid2op.Chronics.fromFileWithoutMaintenance import (
GridStateFromFileWithForecastsWithoutMaintenance,
)
from grid2op.Chronics.multifolderWithCache import MultifolderWithCache
from grid2op.Chronics.fromNPY import FromNPY
from grid2op.Chronics.fromChronix2grid import FromChronix2grid
from grid2op.Chronics.time_series_from_handlers import FromHandlers
| 1,199 | 35.363636 | 83 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/changeNothing.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from datetime import datetime, timedelta
from grid2op.dtypes import dt_int
from grid2op.Chronics.gridValue import GridValue
class ChangeNothing(GridValue):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Do not attempt to create an object of this class. This is initialized by the environment
at its creation.
This set of class is mainly internal.
We don't recommend you, unless you want to code a custom "chroncis class" to change anything
on these classes.
This class is the most basic class to modify a powergrid values.
It does nothing aside from increasing :attr:`GridValue.max_iter` and the :attr:`GridValue.current_datetime`.
Examples
--------
Usage example, for what you don't really have to do:
.. code-block:: python
import grid2op
from grid2op.Chronics import ChangeNothing
env_name = "l2rpn_case14_sandbox" # or any other name
# env = grid2op.make(env_name, data_feeding_kwargs={"gridvalueClass": ChangeNothing})
env = grid2op.make(env_name, chronics_class=ChangeNothing)
It can also be used with the "blank" environment:
.. code-block:: python
import grid2op
from grid2op.Chronics import ChangeNothing
env = grid2op.make("blank",
test=True,
grid_path=EXAMPLE_CASEFILE,
chronics_class=ChangeNothing,
action_class=TopologyAndDispatchAction)
"""
MULTI_CHRONICS = False
def __init__(
self,
time_interval=timedelta(minutes=5),
max_iter=-1,
start_datetime=datetime(year=2019, month=1, day=1),
chunk_size=None,
**kwargs
):
GridValue.__init__(
self,
time_interval=time_interval,
max_iter=max_iter,
start_datetime=start_datetime,
chunk_size=chunk_size,
)
self.n_gen = None
self.n_load = None
self.n_line = None
self.maintenance_time = None
self.maintenance_duration = None
self.hazard_duration = None
def initialize(
self,
order_backend_loads,
order_backend_prods,
order_backend_lines,
order_backend_subs,
names_chronics_to_backend=None,
):
self.n_gen = len(order_backend_prods)
self.n_load = len(order_backend_loads)
self.n_line = len(order_backend_lines)
self.curr_iter = 0
self.maintenance_time = np.zeros(shape=(self.n_line,), dtype=dt_int) - 1
self.maintenance_duration = np.zeros(shape=(self.n_line,), dtype=dt_int)
self.hazard_duration = np.zeros(shape=(self.n_line,), dtype=dt_int)
def load_next(self):
self.current_datetime += self.time_interval
self.curr_iter += 1
return (
self.current_datetime,
{},
self.maintenance_time,
self.maintenance_duration,
self.hazard_duration,
None,
)
def check_validity(self, backend):
return True
def next_chronics(self):
self.current_datetime = self.start_datetime
self.curr_iter = 0
| 3,792 | 30.87395 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/chronicsHandler.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
import os
import numpy as np
from datetime import timedelta
from grid2op.dtypes import dt_int
from grid2op.Exceptions import Grid2OpException, ChronicsError
from grid2op.Space import RandomObject
from grid2op.Chronics.gridValue import GridValue
from grid2op.Chronics.changeNothing import ChangeNothing
class ChronicsHandler(RandomObject):
"""
Represents a Chronics handler that returns a grid state.
As stated previously, it is not recommended to make an directly an object from the class :class:`GridValue`. This
utility will ensure that the creation of such objects are properly made.
The types of chronics used can be specified in the :attr:`ChronicsHandler.chronicsClass` attribute.
Attributes
----------
chronicsClass: ``type``, optional
Type of chronics that will be loaded and generated. Default is :class:`ChangeNothing` (*NB* the class, and not
an object / instance of the class should be send here.) This should be a derived class from :class:`GridValue`.
kwargs: ``dict``, optional
key word arguments that will be used to build new chronics.
max_iter: ``int``, optional
Maximum number of iterations per episode.
real_data: :class:`GridValue`
An instance of type given by :attr:`ChronicsHandler.chronicsClass`.
path: ``str`` (or None)
path where the data are located.
"""
def __init__(
self,
chronicsClass=ChangeNothing,
time_interval=timedelta(minutes=5),
max_iter=-1,
**kwargs
):
RandomObject.__init__(self)
if not isinstance(chronicsClass, type):
raise Grid2OpException(
'Parameter "chronicsClass" used to build the ChronicsHandler should be a type '
"(a class) and not an object (an instance of a class). It is currently "
'"{}"'.format(type(chronicsClass))
)
if not issubclass(chronicsClass, GridValue):
raise ChronicsError(
'ChronicsHandler: the "chronicsClass" argument should be a derivative of the '
'"Grid2Op.GridValue" type and not {}.'.format(type(chronicsClass))
)
self.chronicsClass = chronicsClass
self._kwargs = kwargs
self.max_iter = max_iter
self.path = None
if "path" in kwargs:
self.path = kwargs["path"]
self._real_data = None
try:
self._real_data = self.chronicsClass(
time_interval=time_interval, max_iter=self.max_iter, **self.kwargs
)
except TypeError as exc_:
raise ChronicsError(
"Impossible to build a chronics of type {} with arguments in "
"{}".format(chronicsClass, self.kwargs)
) from exc_
@property
def kwargs(self):
res = copy.deepcopy(self._kwargs)
if self._real_data is not None:
self._real_data.get_kwargs(res)
return res
@kwargs.setter
def kwargs(self, new_value):
raise ChronicsError('Impossible to set the "kwargs" attribute')
@property
def real_data(self):
return self._real_data
def next_time_step(self):
"""
This method returns the modification of the powergrid at the next time step for the same episode.
See definition of :func:`GridValue.load_next` for more information about this method.
"""
res = self._real_data.load_next()
return res
def max_episode_duration(self):
"""
Returns
-------
max_duration: ``int``
The maximum duration of the current episode
Notes
-----
Using this function (which we do not recommend) you will receive "-1" for "infinite duration" otherwise
you will receive a positive integer
"""
tmp = self.max_iter
if tmp == -1:
# tmp = -1 means "infinite duration" but in this case, i can have a limit
# due to the data used (especially if read from files)
tmp = self._real_data.max_timestep()
else:
# i can also have a limit on the maximum number of data in the chronics (especially if read from files)
tmp = min(tmp, self._real_data.max_timestep())
return tmp
def get_name(self):
"""
This method retrieve a unique name that is used to serialize episode data on
disk.
See definition of :mod:`EpisodeData` for more information about this method.
"""
return str(os.path.split(self.get_id())[-1])
def set_max_iter(self, max_iter: int):
"""
This function is used to set the maximum number of
iterations possible before the chronics ends.
You can reset this by setting it to `-1`.
Parameters
----------
max_iter: ``int``
The maximum number of steps that can be done before reaching
the end of the episode
"""
if not isinstance(max_iter, int):
raise Grid2OpException(
"The maximum number of iterations possible for this chronics, before it ends."
)
if max_iter == 0:
raise Grid2OpException(
"The maximum number of iteration should be > 0 (or -1 if you mean "
'"don\'t limit it")'
)
elif max_iter < -1:
raise Grid2OpException(
"The maximum number of iteration should be > 0 (or -1 if you mean "
'"don\'t limit it")'
)
self.max_iter = max_iter
self._real_data.max_iter = max_iter
def seed(self, seed):
"""
Seed the chronics handler and the :class:`GridValue` that is used to generate the chronics.
Parameters
----------
seed: ``int``
Set the seed for this instance and for the data it holds
Returns
-------
seed: ``int``
The seed used for this object
seed_chronics: ``int``
The seed used for the real data
"""
super().seed(seed)
max_int = np.iinfo(dt_int).max
seed_chronics = self.space_prng.randint(max_int)
self._real_data.seed(seed_chronics)
return seed, seed_chronics
def __getattr__(self, name):
if name in ["__getstate__", "__setstate__"]:
# otherwise there is a recursion depth exceeded in multiprocessing
# https://github.com/matplotlib/matplotlib/issues/7852/
return object.__getattr__(self, name)
return getattr(self._real_data, name)
| 7,166 | 33.291866 | 119 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/fromChronix2grid.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import os
import json
from typing import Optional, Union
import numpy as np
import hashlib
from datetime import datetime, timedelta
import grid2op
from grid2op.dtypes import dt_bool, dt_int
from grid2op.Chronics import GridValue, ChangeNothing
from grid2op.Chronics.GSFFWFWM import GridStateFromFileWithForecastsWithMaintenance
from grid2op.Chronics.fromNPY import FromNPY
from grid2op.Exceptions import ChronicsError
class FromChronix2grid(GridValue):
"""This class of "chronix" allows to use the `chronix2grid` package to generate data "on the fly" rather
than having to read it from the hard drive.
.. versionadded:: 1.6.6
.. warning::
It requires the `chronix2grid` package to be installed, please install it with :
`pip install grid2op[chronix2grid]`
And visit https://github.com/bdonnot/chronix2grid#installation for more installation details (in particular
you need the coinor-cbc software on your machine)
As of writing, this class is really slow compared to reading data from the hard drive. Indeed to generate a week of data
at the 5 mins time resolution (*ie* to generate the data for a "standard" episode) it takes roughly 40/45 s for
the `l2rpn_wcci_2022` environment (based on the IEEE 118).
Notes
------
It requires lots of extra metadata to use this class. As of writing, only the `l2rpn_wcci_2022` is compatible with it.
Examples
----------
To use it (though we do not recommend to use it) you can do:
.. code-block:: python
import grid2op
from grid2op.Chronics import FromChronix2grid
env_nm = "l2rpn_wcci_2022" # only compatible environment at time of writing
env = grid2op.make(env_nm,
chronics_class=FromChronix2grid,
data_feeding_kwargs={"env_path": os.path.join(grid2op.get_current_local_dir(), env_nm),
"with_maintenance": True, # whether to include maintenance (optional)
"max_iter": 2 * 288, # duration (in number of steps) of the data generated (optional)
}
)
Before using it, please consult the :ref:`generate_data_flow` section of the document, that provides a much faster way
to do this.
"""
REQUIRED_FILES = ["loads_charac.csv", "params.json", "params_load.json",
"params_loss.json", "params_opf.json", "params_res.json",
"prods_charac.csv", "scenario_params.json"]
MULTI_CHRONICS = False
def __init__(self,
env_path: os.PathLike,
with_maintenance: bool,
with_loss: bool = True,
time_interval: timedelta = timedelta(minutes=5),
max_iter: int = 2016, # set to one week (default)
start_datetime: datetime = datetime(year=2019, month=1, day=1),
chunk_size: Optional[int] = None,
**kwargs):
for el in type(self).REQUIRED_FILES:
tmp_ = os.path.join(env_path, el)
if not (os.path.exists(tmp_) and os.path.isfile(tmp_)):
raise ChronicsError(f"The file \"{el}\" is required but is missing from your environment. "
f"Check data located at \"env_path={env_path}\" and make sure you "
f"can use this environment to generate data.")
GridValue.__init__(
self,
time_interval=time_interval,
max_iter=max_iter,
start_datetime=start_datetime,
chunk_size=chunk_size,
)
import grid2op
self.env = grid2op.make(env_path,
_add_to_name="_fromChronix2grid",
chronics_class=ChangeNothing,
data_feeding_kwargs={"max_iter": 5} # otherwise for some opponent I might run into trouble
)
# required parameters
with open(os.path.join(self.env.get_path_env(), "scenario_params.json"), "r", encoding="utf-8") as f:
self.dict_ref = json.load(f)
self.dt = self.dict_ref["dt"]
self.li_months = self.dict_ref["all_dates"]
self.current_index = 0
self._load_p = None
self._load_q = None
self._gen_p = None
self._gen_v = None
self.has_maintenance = with_maintenance
if with_maintenance:
# initialize the parameters from the json
# TODO copy paste from GridStateFromFileWithForecastWithMaintenance
with open(
os.path.join(env_path, "maintenance_meta.json"), "r", encoding="utf-8"
) as f:
dict_ = json.load(f)
self.maintenance_starting_hour = dict_["maintenance_starting_hour"]
self.maintenance_ending_hour = dict_["maintenance_ending_hour"]
self.line_to_maintenance = set(dict_["line_to_maintenance"])
# frequencies of maintenance
self.daily_proba_per_month_maintenance = dict_[
"daily_proba_per_month_maintenance"
]
self.max_daily_number_per_month_maintenance = dict_[
"max_daily_number_per_month_maintenance"
]
self.maintenance = None # TODO
self.maintenance_time = None
self.maintenance_duration = None
self.maintenance_time_nomaint = None
self.maintenance_duration_nomaint = None
self.hazards = None # TODO
self.has_hazards = False # TODO
self.hazard_duration_nohaz = None
self._forecasts = None # TODO
self._init_datetime = None
self._seed_used_for_chronix2grid = None
self._reuse_seed = False
self._with_loss = with_loss
def _generate_one_episode(self, *args, **kwargs):
# here to prevent circular import
try:
from chronix2grid.grid2op_utils import generate_one_episode
except ImportError as exc_:
raise ChronicsError(
f"Chronix2grid package is not installed. Install it with `pip install grid2op[chronix2grid]`"
f"Please visit https://github.com/bdonnot/chronix2grid#installation "
f"for further install instructions."
) from exc_
return generate_one_episode(*args, **kwargs)
def check_validity(
self, backend: Optional["grid2op.Backend.backend.Backend"]
) -> None:
pass
# TODO also do some checks here !
def initialize(
self,
order_backend_loads,
order_backend_prods,
order_backend_lines,
order_backend_subs,
names_chronics_to_backend=None,
):
self.n_line = len(order_backend_lines)
self.maintenance_time_nomaint = np.zeros(shape=(self.n_line,), dtype=dt_int) - 1
self.maintenance_duration_nomaint = np.zeros(shape=(self.n_line,), dtype=dt_int)
self.hazard_duration_nohaz = np.zeros(shape=(self.n_line,), dtype=dt_int)
self.next_chronics()
# TODO perform the checks: number of loads, name of the laods etc.
def get_id(self) -> str:
# get the seed
return f"{self._seed_used_for_chronix2grid}@{self._init_datetime}"
def tell_id(self, id_, previous=False):
_seed_used_for_chronix2grid, datetime_ = id_.split("@")
self._seed_used_for_chronix2grid = int(_seed_used_for_chronix2grid)
self._init_datetime = datetime_
self._reuse_seed = True
def load_next(self):# TODO refacto with fromNPY
self.current_index += 1
if self.current_index >= self._load_p.shape[0]:
raise StopIteration
res = {}
prod_v = FromNPY._create_dict_inj(res, self)
maintenance_time, maintenance_duration, hazard_duration = FromNPY._create_dict_maintenance_hazards(res, self)
self.current_datetime += self.time_interval
self.curr_iter += 1
return (
self.current_datetime,
res,
maintenance_time,
maintenance_duration,
hazard_duration,
prod_v,
)
def max_timestep(self):
return self._max_iter
def forecasts(self):
"""
By default, forecasts are only made 1 step ahead.
We could change that. Do not hesitate to make a feature request
(https://github.com/rte-france/Grid2Op/issues/new?assignees=&labels=enhancement&template=feature_request.md&title=) if that is necessary for you.
"""
# TODO implement that and maybe refacto with fromNPY ?
if self._forecasts is None:
return []
self._forecasts.current_index = self.current_index - 1
dt, dict_, *rest = self._forecasts.load_next()
return [(self.current_datetime + self.time_interval, dict_)]
def done(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Compare to :func:`GridValue.done` an episode can be over for 2 main reasons:
- :attr:`GridValue.max_iter` has been reached
- There are no data in the numpy array.
- i_end has been reached
The episode is done if one of the above condition is met.
Returns
-------
res: ``bool``
Whether the episode has reached its end or not.
"""
res = False
if self.current_index >= self._load_p.shape[0]:
res = True
elif self._max_iter > 0:
if self.curr_iter > self._max_iter:
res = True
return res
def next_chronics(self):
# generate the next possible chronics
if not self._reuse_seed:
self._init_datetime = self.space_prng.choice(self.li_months, 1)[0]
self._seed_used_for_chronix2grid = self.space_prng.randint(np.iinfo(dt_int).max)
self._reuse_seed = False
self.current_datetime = datetime.strptime(self._init_datetime, "%Y-%m-%d")
self.curr_iter = 0
self.current_index = self.curr_iter
res_gen = self._generate_one_episode(self.env, self.dict_ref, self.dt, self._init_datetime,
seed=self._seed_used_for_chronix2grid,
with_loss=self._with_loss,
nb_steps=self._max_iter)
self._load_p = res_gen[0].values
self._load_p_forecasted = res_gen[1].values
self._load_q = res_gen[2].values
self._load_q_forecasted = res_gen[3].values
self._gen_p = res_gen[4].values
self._gen_p_forecasted = res_gen[5].values
if self.has_maintenance:
self.maintenance = GridStateFromFileWithForecastsWithMaintenance._generate_matenance_static(
self.env.name_line,
self._load_p.shape[0],
self.line_to_maintenance,
self.time_interval,
self.current_datetime,
self.maintenance_starting_hour,
self.maintenance_ending_hour,
self.daily_proba_per_month_maintenance,
self.max_daily_number_per_month_maintenance,
self.space_prng
)
##########
# same as before in GridStateFromFileWithForecasts
GridStateFromFileWithForecastsWithMaintenance._fix_maintenance_format(self)
self.check_validity(backend=None)
| 12,474 | 38.983974 | 153 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/fromFileWithoutMaintenance.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import os
import json
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from grid2op.dtypes import dt_bool, dt_int
from grid2op.Exceptions import Grid2OpException
from grid2op.Chronics.gridStateFromFileWithForecasts import (
GridStateFromFileWithForecasts,
)
class GridStateFromFileWithForecastsWithoutMaintenance(GridStateFromFileWithForecasts):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This class is made mainly for debugging. And it is not well tested.
Behaves exactly like "GridStateFromFileWithForecasts" but ignore all maintenance and hazards
Examples
---------
You can use it as follow:
.. code-block:: python
import grid2op
from grid2op.Chronics import GridStateFromFileWithForecastsWithoutMaintenance
env= make(ENV_NAME,
data_feeding_kwargs={"gridvalueClass": GridStateFromFileWithForecastsWithoutMaintenance},
)
# even if there are maintenance in the environment, they will not be used.
"""
MULTI_CHRONICS = False
def __init__(
self,
path,
sep=";",
time_interval=timedelta(minutes=5),
max_iter=-1,
chunk_size=None,
h_forecast=(5, ),
):
GridStateFromFileWithForecasts.__init__(
self,
path,
sep=sep,
time_interval=time_interval,
max_iter=max_iter,
chunk_size=chunk_size,
h_forecast=h_forecast,
)
self.n_gen = None
self.n_load = None
self.n_line = None
self.maintenance_time_no_maint = None
self.maintenance_duration_no_maint = None
self.hazard_duration_no_haz = None
def initialize(
self,
order_backend_loads,
order_backend_prods,
order_backend_lines,
order_backend_subs,
names_chronics_to_backend=None,
):
self.n_gen = len(order_backend_prods)
self.n_load = len(order_backend_loads)
self.n_line = len(order_backend_lines)
super().initialize(
order_backend_loads,
order_backend_prods,
order_backend_lines,
order_backend_subs,
names_chronics_to_backend=names_chronics_to_backend,
)
self.maintenance_time_no_maint = (
np.zeros(shape=(self.n_line,), dtype=dt_int) - 1
)
self.maintenance_duration_no_maint = np.zeros(
shape=(self.n_line,), dtype=dt_int
)
self.hazard_duration_no_haz = np.zeros(shape=(self.n_line,), dtype=dt_int)
def load_next(self):
(
current_datetime,
res,
maintenance_time,
maintenance_duration,
hazard_duration,
prod_v,
) = super().load_next()
if "maintenance" in res:
del res["maintenance"]
if "hazards" in res:
del res["hazards"]
return (
current_datetime,
res,
self.maintenance_time_no_maint,
self.maintenance_duration_no_maint,
self.hazard_duration_no_haz,
prod_v,
)
| 3,737 | 28.203125 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/fromNPY.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from typing import Optional, Union
import numpy as np
import hashlib
from datetime import datetime, timedelta
import grid2op
from grid2op.dtypes import dt_int
from grid2op.Chronics.gridValue import GridValue
from grid2op.Exceptions import ChronicsError
class FromNPY(GridValue):
"""
This class allows to generate some chronics compatible with grid2op if the data are provided in numpy format.
It also enables the use of the starting the chronics at different time than the original time and to end it before the end
of the chronics.
It is then much more flexible in its usage than the defaults chronics. But it is also much more error prone. For example, it does not check
the order of the loads / generators that you provide.
.. warning::
It assume the order of the elements are consistent with the powergrid backend ! It will not attempt to reorder the columns of the dataset
.. note::
The effect if "i_start" and "i_end" are persistant. If you set it once, it affects the object even after "env.reset()" is called.
If you want to modify them, you need to use the :func:`FromNPY.chronics.change_i_start` and :func:`FromNPY.chronics.change_i_end` methods
(and call `env.reset()`!)
TODO implement methods to change the loads / production "based on sampling" (online sampling instead of only reading data)
TODO implement the possibility to simulate maintenance / hazards "on the fly"
TODO implement hazards !
Examples
--------
Usage example, for what you don't really have to do:
.. code-block:: python
import grid2op
from grid2op.Chronics import FromNPY
# first retrieve the data that you want, the easiest wayt is to create an environment and read the data from it.
env_name = "l2rpn_case14_sandbox" # for example
env_ref = grid2op.make(env_name)
# retrieve the data
load_p = 1.0 * env_ref.chronics_handler.real_data.data.load_p
load_q = 1.0 * env_ref.chronics_handler.real_data.data.load_q
prod_p = 1.0 * env_ref.chronics_handler.real_data.data.prod_p
prod_v = 1.0 * env_ref.chronics_handler.real_data.data.prod_v
# now create an environment with these chronics:
env = grid2op.make(env_name,
chronics_class=FromNPY,
data_feeding_kwargs={"i_start": 5, # start at the "step" 5 NB first step is first observation, available with `obs = env.reset()`
"i_end": 18, # end index: data after that will not be considered (excluded as per python convention)
"load_p": load_p,
"load_q": load_q,
"prod_p": prod_p,
"prod_v": prod_v
# other parameters includes
# maintenance
# load_p_forecast
# load_q_forecast
# prod_p_forecast
# prod_v_forecast
})
# you can use env normally, including in runners
obs = env.reset()
# obs.load_p is load_p[5] (because you set "i_start" = 5, by default it's 0)
You can, after creation, change the data with:
.. code-block:: python
# create env as above
# retrieve some new values that you would like
new_load_p = ...
new_load_q = ...
new_prod_p = ...
new_prod_v = ...
# change the values
env.chronics_handler.real_data.change_chronics(new_load_p, new_load_q, new_prod_p, new_prod_v)
obs = env.reset() # mandatory if you want the change to be taken into account
# obs.load_p is new_load_p[5] (or rather load_p[env.chronics_handler.real_data._i_start])
.. seealso::
More usage examples in:
- :func:`FromNPY.change_chronics`
- :func:`FromNPY.change_forecasts`
- :func:`FromNPY.change_i_start`
- :func:`FromNPY.change_i_end`
Attributes
----------
TODO
"""
MULTI_CHRONICS = False
def __init__(
self,
load_p: np.ndarray,
load_q: np.ndarray,
prod_p: np.ndarray,
prod_v: Optional[np.ndarray] = None,
hazards: Optional[np.ndarray] = None,
maintenance: Optional[np.ndarray] = None,
load_p_forecast: Optional[np.ndarray] = None, # TODO forecasts !!
load_q_forecast: Optional[np.ndarray] = None,
prod_p_forecast: Optional[np.ndarray] = None,
prod_v_forecast: Optional[np.ndarray] = None,
time_interval: timedelta = timedelta(minutes=5),
max_iter: int = -1,
start_datetime: datetime = datetime(year=2019, month=1, day=1),
chunk_size: Optional[int] = None,
i_start: Optional[int] = None,
i_end: Optional[int] = None, # excluded, as always in python
**kwargs
):
GridValue.__init__(
self,
time_interval=time_interval,
max_iter=max_iter,
start_datetime=start_datetime,
chunk_size=chunk_size,
)
self._i_start: int = i_start if i_start is not None else 0
self.__new_istart: Optional[int] = i_start
self.n_gen: int = prod_p.shape[1]
self.n_load: int = load_p.shape[1]
self.n_line: Union[int, None] = None
self._load_p: np.ndarray = 1.0 * load_p
self._load_q: np.ndarray = 1.0 * load_q
self._prod_p: np.ndarray = 1.0 * prod_p
self._prod_v = None
if prod_v is not None:
self._prod_v = 1.0 * prod_v
self.__new_load_p: Optional[np.ndarray] = None
self.__new_prod_p: Optional[np.ndarray] = None
self.__new_prod_v: Optional[np.ndarray] = None
self.__new_load_q: Optional[np.ndarray] = None
self._i_end: int = i_end if i_end is not None else load_p.shape[0]
self.__new_iend: Optional[int] = i_end
self.has_maintenance = False
self.maintenance = None
self.maintenance_duration = None
self.maintenance_time = None
if maintenance is not None:
self.has_maintenance = True
self.n_line = maintenance.shape[1]
assert load_p.shape[0] == maintenance.shape[0]
self.maintenance = maintenance # TODO copy
self.maintenance_time = (
np.zeros(shape=(self.maintenance.shape[0], self.n_line), dtype=dt_int)
- 1
)
self.maintenance_duration = np.zeros(
shape=(self.maintenance.shape[0], self.n_line), dtype=dt_int
)
for line_id in range(self.n_line):
self.maintenance_time[:, line_id] = self.get_maintenance_time_1d(
self.maintenance[:, line_id]
)
self.maintenance_duration[
:, line_id
] = self.get_maintenance_duration_1d(self.maintenance[:, line_id])
self.has_hazards = False
self.hazards = None
self.hazard_duration = None
if hazards is not None:
raise ChronicsError(
"This feature is not available at the moment. Fill a github issue at "
"https://github.com/rte-france/Grid2Op/issues/new?assignees=&labels=enhancement&template=feature_request.md&title="
)
# self.has_hazards = True
# if self.n_line is None:
# self.n_line = hazards.shape[1]
# else:
# assert self.n_line == hazards.shape[1]
# assert load_p.shape[0] == hazards.shape[0]
# self.hazards = hazards # TODO copy !
# self.hazard_duration = np.zeros(shape=(self.hazards.shape[0], self.n_line), dtype=dt_int)
# for line_id in range(self.n_line):
# self.hazard_duration[:, line_id] = self.get_hazard_duration_1d(self.hazards[:, line_id])
self._forecasts = None
if load_p_forecast is not None:
assert load_q_forecast is not None
assert prod_p_forecast is not None
self._forecasts = FromNPY(
load_p=load_p_forecast,
load_q=load_q_forecast,
prod_p=prod_p_forecast,
prod_v=prod_v_forecast,
load_p_forecast=None,
load_q_forecast=None,
prod_p_forecast=None,
prod_v_forecast=None,
i_start=i_start,
i_end=i_end,
)
elif load_q_forecast is not None:
raise ChronicsError(
"if load_q_forecast is not None, then load_p_forecast should not be None"
)
elif prod_p_forecast is not None:
raise ChronicsError(
"if prod_p_forecast is not None, then load_p_forecast should not be None"
)
def initialize(
self,
order_backend_loads,
order_backend_prods,
order_backend_lines,
order_backend_subs,
names_chronics_to_backend=None,
):
assert len(order_backend_prods) == self.n_gen
assert len(order_backend_loads) == self.n_load
if self.n_line is None:
self.n_line = len(order_backend_lines)
else:
assert len(order_backend_lines) == self.n_line
if self._forecasts is not None:
self._forecasts.initialize(
order_backend_loads,
order_backend_prods,
order_backend_lines,
order_backend_subs,
names_chronics_to_backend,
)
self.maintenance_time_nomaint = np.zeros(shape=(self.n_line,), dtype=dt_int) - 1
self.maintenance_duration_nomaint = np.zeros(shape=(self.n_line,), dtype=dt_int)
self.hazard_duration_nohaz = np.zeros(shape=(self.n_line,), dtype=dt_int)
self.curr_iter = 0
self.current_index = self._i_start - 1
def _get_long_hash(self, hash_: hashlib.blake2b = None):
# get the "long hash" from blake2b
if hash_ is None:
hash_ = (
hashlib.blake2b()
) # should be faster than md5 ! (and safer, but we only care about speed here)
hash_.update(self._load_p.tobytes())
hash_.update(self._load_q.tobytes())
hash_.update(self._prod_p.tobytes())
if self._prod_v is not None:
hash_.update(self._prod_v.tobytes())
if self.maintenance is not None:
hash_.update(self.maintenance.tobytes())
if self.hazards is not None:
hash_.update(self.hazards.tobytes())
if self._forecasts:
self._forecasts._get_long_hash(hash_)
return hash_.digest()
def get_id(self) -> str:
"""
To return a unique ID of the chronics, we use a hash function (black2b), but it outputs a name too big (64 characters or so).
So we hash it again with md5 to get a reasonable length id (32 characters)
Returns:
str: the hash of the arrays (load_p, load_q, etc.) in the chronics
"""
long_hash_byte = self._get_long_hash()
# now shorten it with md5
short_hash = hashlib.md5(long_hash_byte)
return short_hash.hexdigest()
@staticmethod
def _create_dict_inj(res, obj_with_inj_data):
dict_ = {}
prod_v = None
if obj_with_inj_data._load_p is not None:
dict_["load_p"] = 1.0 * obj_with_inj_data._load_p[obj_with_inj_data.current_index, :]
if obj_with_inj_data._load_q is not None:
dict_["load_q"] = 1.0 * obj_with_inj_data._load_q[obj_with_inj_data.current_index, :]
array_gen_p = obj_with_inj_data._gen_p if hasattr(obj_with_inj_data, "_gen_p") else obj_with_inj_data._prod_p
if array_gen_p is not None:
dict_["prod_p"] = 1.0 * array_gen_p[obj_with_inj_data.current_index, :]
array_gen_v = obj_with_inj_data._gen_v if hasattr(obj_with_inj_data, "_gen_v") else obj_with_inj_data._prod_v
if array_gen_v is not None:
prod_v = 1.0 * array_gen_v[obj_with_inj_data.current_index, :]
if dict_:
res["injection"] = dict_
return prod_v
@staticmethod
def _create_dict_maintenance_hazards(res, obj_with_inj_data):
if obj_with_inj_data.maintenance is not None and obj_with_inj_data.has_maintenance:
res["maintenance"] = obj_with_inj_data.maintenance[obj_with_inj_data.current_index, :]
if obj_with_inj_data.hazards is not None and obj_with_inj_data.has_hazards:
res["hazards"] = obj_with_inj_data.hazards[obj_with_inj_data.current_index, :]
if (
obj_with_inj_data.maintenance_time is not None
and obj_with_inj_data.maintenance_duration is not None
and obj_with_inj_data.has_maintenance
):
maintenance_time = dt_int(1 * obj_with_inj_data.maintenance_time[obj_with_inj_data.current_index, :])
maintenance_duration = dt_int(
1 * obj_with_inj_data.maintenance_duration[obj_with_inj_data.current_index, :]
)
else:
maintenance_time = obj_with_inj_data.maintenance_time_nomaint
maintenance_duration = obj_with_inj_data.maintenance_duration_nomaint
if obj_with_inj_data.hazard_duration is not None and obj_with_inj_data.has_hazards:
hazard_duration = 1 * obj_with_inj_data.hazard_duration[obj_with_inj_data.current_index, :]
else:
hazard_duration = obj_with_inj_data.hazard_duration_nohaz
return maintenance_time, maintenance_duration, hazard_duration
def load_next(self):
self.current_index += 1
if (
self.current_index > self._i_end
or self.current_index >= self._load_p.shape[0]
):
raise StopIteration
res = {}
prod_v = FromNPY._create_dict_inj(res, self)
maintenance_time, maintenance_duration, hazard_duration = FromNPY._create_dict_maintenance_hazards(res, self)
self.current_datetime += self.time_interval
self.curr_iter += 1
return (
self.current_datetime,
res,
maintenance_time,
maintenance_duration,
hazard_duration,
prod_v,
)
def check_validity(
self, backend: Optional["grid2op.Backend.backend.Backend"]
) -> None:
# TODO raise the proper errors from ChronicsError here rather than AssertError
assert self._load_p.shape[0] == self._load_q.shape[0]
assert self._load_p.shape[0] == self._prod_p.shape[0]
if self._prod_v is not None:
assert self._load_p.shape[0] == self._prod_v.shape[0]
if self.hazards is not None:
assert self.hazards.shape[1] == self.n_line
if self.maintenance is not None:
assert self.maintenance.shape[1] == self.n_line
if self.maintenance_duration is not None:
assert self.n_line == self.maintenance_duration.shape[1]
if self.maintenance_time is not None:
assert self.n_line == self.maintenance_time.shape[1]
# TODO forecast
if self._forecasts is not None:
assert self._forecasts.n_line == self.n_line
assert self._forecasts.n_gen == self.n_gen
assert self._forecasts.n_load == self.n_load
assert self._load_p.shape[0] == self._forecasts._load_p.shape[0]
assert self._load_q.shape[0] == self._forecasts._load_q.shape[0]
assert self._prod_p.shape[0] == self._forecasts._prod_p.shape[0]
if self._prod_v is not None and self._forecasts._prod_v is not None:
assert self._prod_v.shape[0] == self._forecasts._prod_v.shape[0]
self._forecasts.check_validity(backend=backend)
def next_chronics(self):
# restart the chronics: read it again !
self.current_datetime = self.start_datetime
self.curr_iter = 0
if self.__new_istart is not None:
self._i_start = self.__new_istart
else:
self._i_start = 0
self.current_index = self._i_start
if self.__new_load_p is not None:
self._load_p = self.__new_load_p
self.__new_load_p = None
if self.__new_load_q is not None:
self._load_q = self.__new_load_q
self.__new_load_q = None
if self.__new_prod_p is not None:
self._prod_p = self.__new_prod_p
self.__new_prod_p = None
if self.__new_prod_v is not None:
self._prod_v = self.__new_prod_v
self.__new_prod_v = None
if self.__new_iend is None:
self._i_end = self._load_p.shape[0]
else:
self._i_end = self.__new_iend
if self._forecasts is not None:
# update the forecast
self._forecasts.next_chronics()
self.check_validity(backend=None)
def done(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Compare to :func:`GridValue.done` an episode can be over for 2 main reasons:
- :attr:`GridValue.max_iter` has been reached
- There are no data in the numpy array.
- i_end has been reached
The episode is done if one of the above condition is met.
Returns
-------
res: ``bool``
Whether the episode has reached its end or not.
"""
res = False
if (
self.current_index >= self._i_end
or self.current_index >= self._load_p.shape[0]
):
res = True
elif self._max_iter > 0:
if self.curr_iter > self._max_iter:
res = True
return res
def forecasts(self):
"""
By default, forecasts are only made 1 step ahead.
We could change that. Do not hesitate to make a feature request
(https://github.com/rte-france/Grid2Op/issues/new?assignees=&labels=enhancement&template=feature_request.md&title=) if that is necessary for you.
"""
if self._forecasts is None:
return []
self._forecasts.current_index = self.current_index - 1
dt, dict_, *rest = self._forecasts.load_next()
return [(self.current_datetime + self.time_interval, dict_)]
def change_chronics(
self,
new_load_p: np.ndarray = None,
new_load_q: np.ndarray = None,
new_prod_p: np.ndarray = None,
new_prod_v: np.ndarray = None,
):
"""
Allows to change the data used by this class.
.. warning::
This has an effect only after "env.reset" has been called !
Args:
new_load_p (np.ndarray, optional): change the load_p. Defaults to None (= do not change).
new_load_q (np.ndarray, optional): change the load_q. Defaults to None (= do not change).
new_prod_p (np.ndarray, optional): change the prod_p. Defaults to None (= do not change).
new_prod_v (np.ndarray, optional): change the prod_v. Defaults to None (= do not change).
Examples
---------
.. code-block:: python
import grid2op
from grid2op.Chronics import FromNPY
# create an environment as in this class description (in short: )
load_p = ... # find somehow a suitable "load_p" array: rows represent time, columns the individual load
load_q = ...
prod_p = ...
prod_v = ...
# now create an environment with these chronics:
env = grid2op.make(env_name,
chronics_class=FromNPY,
data_feeding_kwargs={"load_p": load_p,
"load_q": load_q,
"prod_p": prod_p,
"prod_v": prod_v}
)
obs = env.reset() # obs.load_p is load_p[0] (or rather load_p[env.chronics_handler.real_data._i_start])
new_load_p = ... # find somehow a new suitable "load_p"
new_load_q = ...
new_prod_p = ...
new_prod_v = ...
env.chronics_handler.real_data.change_chronics(new_load_p, new_load_q, new_prod_p, new_prod_v)
# has no effect at this stage
obs = env.reset() # now has some effect !
# obs.load_p is new_load_p[0] (or rather load_p[env.chronics_handler.real_data._i_start])
"""
if new_load_p is not None:
self.__new_load_p = 1.0 * new_load_p
if new_load_q is not None:
self.__new_load_q = 1.0 * new_load_q
if new_prod_p is not None:
self.__new_prod_p = 1.0 * new_prod_p
if new_prod_v is not None:
self.__new_prod_v = 1.0 * new_prod_v
def change_forecasts(
self,
new_load_p: np.ndarray = None,
new_load_q: np.ndarray = None,
new_prod_p: np.ndarray = None,
new_prod_v: np.ndarray = None,
):
"""
Allows to change the data used by this class in the "obs.simulate" function.
.. warning::
This has an effect only after "env.reset" has been called !
Args:
new_load_p (np.ndarray, optional): change the load_p_forecast. Defaults to None (= do not change).
new_load_q (np.ndarray, optional): change the load_q_forecast. Defaults to None (= do not change).
new_prod_p (np.ndarray, optional): change the prod_p_forecast. Defaults to None (= do not change).
new_prod_v (np.ndarray, optional): change the prod_v_forecast. Defaults to None (= do not change).
Examples
---------
.. code-block:: python
import grid2op
from grid2op.Chronics import FromNPY
# create an environment as in this class description (in short: )
load_p = ... # find somehow a suitable "load_p" array: rows represent time, columns the individual load
load_q = ...
prod_p = ...
prod_v = ...
load_p_forecast = ...
load_q_forecast = ...
prod_p_forecast = ...
prod_v_forecast = ...
env = grid2op.make(env_name,
chronics_class=FromNPY,
data_feeding_kwargs={"load_p": load_p,
"load_q": load_q,
"prod_p": prod_p,
"prod_v": prod_v,
"load_p_forecast": load_p_forecast
"load_q_forecast": load_q_forecast
"prod_p_forecast": prod_p_forecast
"prod_v_forecast": prod_v_forecast
})
new_load_p_forecast = ... # find somehow a new suitable "load_p"
new_load_q_forecast = ...
new_prod_p_forecast = ...
new_prod_v_forecast = ...
env.chronics_handler.real_data.change_forecasts(new_load_p_forecast, new_load_q_forecast, new_prod_p_forecast, new_prod_v_forecast)
# has no effect at this stage
obs = env.reset() # now has some effect !
sim_o, *_ = obs.simulate() # sim_o.load_p has the values of new_load_p_forecast[0]
"""
if self._forecasts is None:
raise ChronicsError(
"You cannot change the forecast for this chronics are there are no forecasts enabled"
)
self._forecasts.change_chronics(
new_load_p=new_load_p,
new_load_q=new_load_q,
new_prod_p=new_prod_p,
new_prod_v=new_prod_v,
)
def max_timestep(self):
if self._max_iter >= 0:
return min(self._max_iter, self._load_p.shape[0], self._i_end)
return min(self._load_p.shape[0], self._i_end)
def change_i_start(self, new_i_start: Union[int, None]):
"""
Allows to change the "i_start".
.. warning::
It has only an affect after "env.reset()" is called.
Examples
--------
.. code-block:: python
import grid2op
from grid2op.Chronics import FromNPY
# create an environment as in this class description (in short: )
load_p = ... # find somehow a suitable "load_p" array: rows represent time, columns the individual load
load_q = ...
prod_p = ...
prod_v = ...
# now create an environment with these chronics:
env = grid2op.make(env_name,
chronics_class=FromNPY,
data_feeding_kwargs={"load_p": load_p,
"load_q": load_q,
"prod_p": prod_p,
"prod_v": prod_v}
)
obs = env.reset() # obs.load_p is load_p[0] (or rather load_p[env.chronics_handler.real_data._i_start])
env.chronics_handler.real_data.change_i_start(10)
obs = env.reset() # obs.load_p is load_p[10]
# indeed `env.chronics_handler.real_data._i_start` has been changed to 10.
# to undo all changes (and use the defaults) you can:
# env.chronics_handler.real_data.change_i_start(None)
"""
if new_i_start is not None:
self.__new_istart = int(new_i_start)
else:
self.__new_istart = None
def change_i_end(self, new_i_end: Union[int, None]):
"""
Allows to change the "i_end".
.. warning::
It has only an affect after "env.reset()" is called.
Examples
--------
.. code-block:: python
import grid2op
from grid2op.Chronics import FromNPY
# create an environment as in this class description (in short: )
load_p = ... # find somehow a suitable "load_p" array: rows represent time, columns the individual load
load_q = ...
prod_p = ...
prod_v = ...
# now create an environment with these chronics:
env = grid2op.make(env_name,
chronics_class=FromNPY,
data_feeding_kwargs={"load_p": load_p,
"load_q": load_q,
"prod_p": prod_p,
"prod_v": prod_v}
)
obs = env.reset()
env.chronics_handler.real_data.change_i_end(150)
obs = env.reset()
# indeed `env.chronics_handler.real_data._i_end` has been changed to 10.
# scenario lenght will be at best 150 !
# to undo all changes (and use the defaults) you can:
# env.chronics_handler.real_data.change_i_end(None)
"""
if new_i_end is not None:
self.__new_iend = int(new_i_end)
else:
self.__new_iend = None
| 28,565 | 39.634424 | 157 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/gridStateFromFile.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import os
import copy
import numpy as np
import pandas as pd
import warnings
from datetime import datetime, timedelta
from grid2op.dtypes import dt_int, dt_float, dt_bool
from grid2op.Exceptions import (
IncorrectNumberOfElements,
ChronicsError,
ChronicsNotFoundError,
)
from grid2op.Exceptions import (
IncorrectNumberOfLoads,
IncorrectNumberOfGenerators,
IncorrectNumberOfLines,
)
from grid2op.Exceptions import EnvError, InsufficientData
from grid2op.Chronics.gridValue import GridValue
class GridStateFromFile(GridValue):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Do not attempt to create an object of this class. This is initialized by the environment
at its creation.
Read the injections values from a file stored on hard drive. More detailed about the files is provided in the
:func:`GridStateFromFile.initialize` method.
This class reads only files stored as csv. The header of the csv is mandatory and should represent the name of
the objects. This names should either be matched to the name of the same object in the backend using the
`names_chronics_to_backend` argument pass into the :func:`GridStateFromFile.initialize` (see
:func:`GridValue.initialize` for more information) or match the names of the object in the backend.
When the grid value is initialized, all present csv are read, sorted in order compatible with the backend and
extracted as numpy array.
For now, the current date and times are not read from file. It is mandatory that the chronics starts at 00:00 and
its first time stamps is corresponds to January, 1st 2019.
Chronics read from this files don't implement the "forecast" value.
In this values, only 1 episode is stored. If the end of the episode is reached and another one should start, then
it will loop from the beginning.
It reads the following files from the "path" location specified:
- "prod_p.csv": for each time steps, this file contains the value for the active production of
each generators of the grid (it counts as many rows as the number of time steps - and its header)
and as many columns as the number of generators on the grid. The header must contains the names of
the generators used to map their value on the grid. Values must be convertible to floating point and the
column separator of this file should be semi-colon `;` (unless you specify a "sep" when loading this class)
- "prod_v.csv": same as "prod_p.csv" but for the production voltage setpoint.
- "load_p.csv": same as "prod_p.csv" but for the load active value (number of columns = number of loads)
- "load_q.csv": same as "prod_p.csv" but for the load reactive value (number of columns = number of loads)
- "maintenance.csv": that contains whether or not there is a maintenance for a given powerline (column) at
each time step (row).
- "hazards.csv": that contains whether or not there is a hazard for a given powerline (column) at
each time step (row).
- "start_datetime.info": the time stamp (date and time) at which the chronic is starting.
- "time_interval.info": the amount of time between two consecutive steps (*e.g.* 5 mins, or 1h)
If a file is missing, it is understood as "this value will not be modified". For example, if the file
"prod_v.csv" is not present, it will be equivalent as not modifying the production voltage setpoint, never.
Except if the attribute :attr:`GridStateFromFile.sep` is modified, the above tables should be "semi colon" (;)
separated.
Attributes
----------
path: ``str``
The path of the folder where the data are stored. It is recommended to set absolute path, and not relative
paths.
load_p: ``numpy.ndarray``, dtype: ``float``
All the values of the load active values
load_q: ``numpy.ndarray``, dtype: ``float``
All the values of the load reactive values
prod_p: ``numpy.ndarray``, dtype: ``float``
All the productions setpoint active values.
prod_v: ``numpy.ndarray``, dtype: ``float``
All the productions setpoint voltage magnitude values.
hazards: ``numpy.ndarray``, dtype: ``bool``
This vector represents the possible hazards. It is understood as: ``True`` there is a hazard
for the given powerline, ``False`` there is not.
maintenance: ``numpy.ndarray``, dtype: ``bool``
This vector represents the possible maintenance. It is understood as: ``True`` there is a maintenance
for the given powerline, ``False`` there is not.
current_index: ``int``
The index of the last observation sent to the :class:`grid2op.Environment`.
sep: ``str``, optional
The csv columns separator. By defaults it's ";"
names_chronics_to_backend: ``dict``
This directory matches the name of the objects (line extremity, generator or load) to the same object in the
backed. See the help of :func:`GridValue.initialize` for more information).
"""
MULTI_CHRONICS = False
def __init__(
self,
path,
sep=";",
time_interval=timedelta(minutes=5),
max_iter=-1,
start_datetime=datetime(year=2019, month=1, day=1),
chunk_size=None,
):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Do not attempt to create an object of this class. This is initialized by the environment
at its creation.
Build an instance of GridStateFromFile. Such an instance should be built before an :class:`grid2op.Environment`
is created.
Parameters
----------
path: ``str``
Used to initialize :attr:`GridStateFromFile.path`
sep: ``str``, optional
Used to initialize :attr:`GridStateFromFile.sep`
time_interval: ``datetime.timedelta``
Used to initialize :attr:`GridValue.time_interval`
max_iter: int, optional
Used to initialize :attr:`GridValue.max_iter`
"""
GridValue.__init__(
self,
time_interval=time_interval,
max_iter=max_iter,
start_datetime=start_datetime,
chunk_size=chunk_size,
)
self.path = path
self.n_ = None # maximum number of rows of the array
self.tmp_max_index = None # size maximum of the current tables in memory
self.load_p = None # numpy array corresponding to the current active load values in the power _grid. It has the same size as the number of loads
self.load_q = None # numpy array corresponding to the current reactive load values in the power _grid. It has the same size as the number of loads
self.prod_p = None # numpy array corresponding to the current active production values in the power _grid. It has the same size as the number of generators
self.prod_v = None # numpy array corresponding to the current voltage production setpoint values in the power _grid. It has the same size as the number of generators
# for the two following vector, the convention is the following: False(line is disconnected) / True(line is connected)
self.hazards = None # numpy array representing the outage (unplanned), same size as the number of powerlines on the _grid.
self.maintenance = None # numpy array representing the _maintenance (planned withdrawal of a powerline), same size as the number of powerlines on the _grid.
self.maintenance_time = None
self.maintenance_duration = None
self.current_index = -1
self.sep = sep
self.names_chronics_to_backend = None
# added to provide an easier access to read data in chunk
self.chunk_size = chunk_size
self._data_chunk = {}
self._order_load_p = None
self._order_load_q = None
self._order_prod_p = None
self._order_prod_v = None
self._order_hazards = None
self._order_maintenance = None
# order of the names in the backend
self._order_backend_loads = None
self._order_backend_prods = None
self._order_backend_lines = None
def _clear(self):
self.n_ = None # maximum number of rows of the array
self.tmp_max_index = None # size maximum of the current tables in memory
self.load_p = None # numpy array corresponding to the current active load values in the power _grid. It has the same size as the number of loads
self.load_q = None # numpy array corresponding to the current reactive load values in the power _grid. It has the same size as the number of loads
self.prod_p = None # numpy array corresponding to the current active production values in the power _grid. It has the same size as the number of generators
self.prod_v = None # numpy array corresponding to the current voltage production setpoint values in the power _grid. It has the same size as the number of generators
# for the two following vector, the convention is the following: False(line is disconnected) / True(line is connected)
self.hazards = None # numpy array representing the outage (unplanned), same size as the number of powerlines on the _grid.
self.maintenance = None # numpy array representing the _maintenance (planned withdrawal of a powerline), same size as the number of powerlines on the _grid.
self.maintenance_time = None
self.maintenance_duration = None
self.current_index = -1
self.names_chronics_to_backend = None
# added to provide an easier access to read data in chunk
self._data_chunk = {}
self._order_load_p = None
self._order_load_q = None
self._order_prod_p = None
self._order_prod_v = None
self._order_hazards = None
self._order_maintenance = None
# order of the names in the backend
self._order_backend_loads = None
self._order_backend_prods = None
self._order_backend_lines = None
def _assert_correct(self, dict_convert, order_backend):
len_backend = len(order_backend)
len_dict_keys = len(dict_convert)
vals = set(dict_convert.values())
lend_dict_values = len(vals)
if len_dict_keys != len_backend:
err_msg = "Conversion mismatch between backend data {} elements and converter data {} (keys)"
raise IncorrectNumberOfElements(err_msg.format(len_backend, len_dict_keys))
if lend_dict_values != len_backend:
err_msg = "Conversion mismatch between backend data {} elements and converter data {} (values)"
raise IncorrectNumberOfElements(
err_msg.format(len_backend, lend_dict_values)
)
for el in order_backend:
if not el in vals:
raise ChronicsError(
'Impossible to find element "{}" in the original converter data'.format(
el
)
)
def _assert_correct_second_stage(self, pandas_name, dict_convert, key, extra=""):
for i, el in enumerate(pandas_name):
if not el in dict_convert[key]:
raise ChronicsError(
"Element named {} is found in the data (column {}) but it is not found on the "
'powergrid for data of type "{}".\nData in files are: {}\n'
"Converter data are: {}".format(
el,
i + 1,
key,
sorted(list(pandas_name)),
sorted(list(dict_convert[key].keys())),
)
)
def _init_date_time(self):
if os.path.exists(os.path.join(self.path, "start_datetime.info")):
with open(os.path.join(self.path, "start_datetime.info"), "r") as f:
a = f.read().rstrip().lstrip()
try:
tmp = datetime.strptime(a, "%Y-%m-%d %H:%M")
except ValueError:
tmp = datetime.strptime(a, "%Y-%m-%d")
except Exception:
raise ChronicsNotFoundError(
'Impossible to understand the content of "start_datetime.info". Make sure '
'it\'s composed of only one line with a datetime in the "%Y-%m-%d %H:%M"'
"format."
)
self.start_datetime = tmp
self.current_datetime = tmp
if os.path.exists(os.path.join(self.path, "time_interval.info")):
with open(os.path.join(self.path, "time_interval.info"), "r") as f:
a = f.read().rstrip().lstrip()
try:
tmp = datetime.strptime(a, "%H:%M")
except ValueError:
tmp = datetime.strptime(a, "%M")
except Exception:
raise ChronicsNotFoundError(
'Impossible to understand the content of "time_interval.info". Make sure '
'it\'s composed of only one line with a datetime in the "%H:%M"'
"format."
)
self.time_interval = timedelta(hours=tmp.hour, minutes=tmp.minute)
def _get_fileext(self, data_name):
read_compressed = ".csv"
if not os.path.exists(os.path.join(self.path, "{}.csv".format(data_name))):
# try to read compressed data
if os.path.exists(os.path.join(self.path, "{}.csv.bz2".format(data_name))):
read_compressed = ".csv.bz2"
elif os.path.exists(os.path.join(self.path, "{}.zip".format(data_name))):
read_compressed = ".zip"
elif os.path.exists(
os.path.join(self.path, "{}.csv.gzip".format(data_name))
):
read_compressed = ".csv.gzip"
elif os.path.exists(os.path.join(self.path, "{}.csv.xz".format(data_name))):
read_compressed = ".csv.xz"
else:
read_compressed = None
# raise ChronicsNotFoundError(
# "GridStateFromFile: unable to locate the data files that should be at \"{}\"".format(self.path))
return read_compressed
def _get_data(self, data_name, chunksize=-1, nrows=None):
file_ext = self._get_fileext(data_name)
if nrows is None:
if self._max_iter > 0:
nrows = self._max_iter + 1
if file_ext is not None:
if chunksize == -1:
chunksize = self.chunk_size
res = pd.read_csv(
os.path.join(self.path, "{}{}".format(data_name, file_ext)),
sep=self.sep,
chunksize=chunksize,
nrows=nrows,
)
else:
res = None
return res
def _get_orders(
self,
load_p,
load_q,
prod_p,
prod_v,
hazards,
maintenance,
order_backend_loads,
order_backend_prods,
order_backend_lines,
):
order_chronics_load_p = None
order_backend_load_q = None
order_backend_prod_p = None
order_backend_prod_v = None
order_backend_hazards = None
order_backend_maintenance = None
if load_p is not None:
self._assert_correct_second_stage(
load_p.columns, self.names_chronics_to_backend, "loads", "active"
)
order_chronics_load_p = np.array(
[
order_backend_loads[self.names_chronics_to_backend["loads"][el]]
for el in load_p.columns
]
).astype(dt_int)
if load_q is not None:
self._assert_correct_second_stage(
load_q.columns, self.names_chronics_to_backend, "loads", "reactive"
)
order_backend_load_q = np.array(
[
order_backend_loads[self.names_chronics_to_backend["loads"][el]]
for el in load_q.columns
]
).astype(dt_int)
if prod_p is not None:
self._assert_correct_second_stage(
prod_p.columns, self.names_chronics_to_backend, "prods", "active"
)
order_backend_prod_p = np.array(
[
order_backend_prods[self.names_chronics_to_backend["prods"][el]]
for el in prod_p.columns
]
).astype(dt_int)
if prod_v is not None:
self._assert_correct_second_stage(
prod_v.columns,
self.names_chronics_to_backend,
"prods",
"voltage magnitude",
)
order_backend_prod_v = np.array(
[
order_backend_prods[self.names_chronics_to_backend["prods"][el]]
for el in prod_v.columns
]
).astype(dt_int)
if hazards is not None:
self._assert_correct_second_stage(
hazards.columns, self.names_chronics_to_backend, "lines", "hazards"
)
order_backend_hazards = np.array(
[
order_backend_lines[self.names_chronics_to_backend["lines"][el]]
for el in hazards.columns
]
).astype(dt_int)
if maintenance is not None:
self._assert_correct_second_stage(
maintenance.columns,
self.names_chronics_to_backend,
"lines",
"maintenance",
)
order_backend_maintenance = np.array(
[
order_backend_lines[self.names_chronics_to_backend["lines"][el]]
for el in maintenance.columns
]
).astype(dt_int)
return (
order_chronics_load_p,
order_backend_load_q,
order_backend_prod_p,
order_backend_prod_v,
order_backend_hazards,
order_backend_maintenance,
)
def _get_next_chunk(self):
load_p = None
load_q = None
prod_p = None
prod_v = None
if self._data_chunk["load_p"] is not None:
load_p = next(self._data_chunk["load_p"])
self.tmp_max_index = load_p.shape[0]
if self._data_chunk["load_q"] is not None:
load_q = next(self._data_chunk["load_q"])
self.tmp_max_index = load_q.shape[0]
if self._data_chunk["prod_p"] is not None:
prod_p = next(self._data_chunk["prod_p"])
self.tmp_max_index = prod_p.shape[0]
if self._data_chunk["prod_v"] is not None:
prod_v = next(self._data_chunk["prod_v"])
self.tmp_max_index = prod_v.shape[0]
return load_p, load_q, prod_p, prod_v
def initialize(
self,
order_backend_loads,
order_backend_prods,
order_backend_lines,
order_backend_subs,
names_chronics_to_backend=None,
):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Called at the creation of the environment.
In this function, the numpy arrays are read from the csv using the panda.dataframe engine.
In order to be valid, the folder located at :attr:`GridStateFromFile.path` can contain:
- a file named "load_p.csv" used to initialize :attr:`GridStateFromFile.load_p`
- a file named "load_q.csv" used to initialize :attr:`GridStateFromFile.load_q`
- a file named "prod_p.csv" used to initialize :attr:`GridStateFromFile.prod_p`
- a file named "prod_v.csv" used to initialize :attr:`GridStateFromFile.prod_v`
- a file named "hazards.csv" used to initialize :attr:`GridStateFromFile.hazards`
- a file named "maintenance.csv" used to initialize :attr:`GridStateFromFile.maintenance`
All these csv must have the same separator specified by :attr:`GridStateFromFile.sep`.
If one of these file is missing, it is equivalent to "change nothing" class.
If a file named "start_datetime.info" is present, then it will be used to initialized
:attr:`GridStateFromFile.start_datetime`. If this file exists, it should count only one row, with the
initial datetime in the "%Y-%m-%d %H:%M" format.
If a file named "time_interval.info" is present, then it will be used to initialized the
:attr:`GridStateFromFile.time_interval` attribute. If this file exists, it should count only one row, with the
initial datetime in the "%H:%M" format. Only timedelta composed of hours and minutes are supported (time delta
cannot go above 23 hours 55 minutes and cannot be smaller than 0 hour 1 minutes)
The first row of these csv is understood as the name of the object concerned by the column. Either this name is
present in the :class:`grid2op.Backend`, in this case no modification is performed, or in case the name
is not found in the backend and in this case it must be specified in the "names_chronics_to_backend"
parameters how to understand it. See the help of :func:`GridValue.initialize` for more information
about this dictionnary.
All files should have the same number of rows.
Parameters
----------
See help of :func:`GridValue.initialize` for a detailed help about the parameters.
"""
self.n_gen = len(order_backend_prods)
self.n_load = len(order_backend_loads)
self.n_line = len(order_backend_lines)
self._order_backend_loads = order_backend_loads
self._order_backend_prods = order_backend_prods
self._order_backend_lines = order_backend_lines
self.names_chronics_to_backend = copy.deepcopy(names_chronics_to_backend)
if self.names_chronics_to_backend is None:
self.names_chronics_to_backend = {}
if not "loads" in self.names_chronics_to_backend:
self.names_chronics_to_backend["loads"] = {
k: k for k in order_backend_loads
}
else:
self._assert_correct(
self.names_chronics_to_backend["loads"], order_backend_loads
)
if not "prods" in self.names_chronics_to_backend:
self.names_chronics_to_backend["prods"] = {
k: k for k in order_backend_prods
}
else:
self._assert_correct(
self.names_chronics_to_backend["prods"], order_backend_prods
)
if not "lines" in self.names_chronics_to_backend:
self.names_chronics_to_backend["lines"] = {
k: k for k in order_backend_lines
}
else:
self._assert_correct(
self.names_chronics_to_backend["lines"], order_backend_lines
)
if not "subs" in self.names_chronics_to_backend:
self.names_chronics_to_backend["subs"] = {k: k for k in order_backend_subs}
else:
self._assert_correct(
self.names_chronics_to_backend["subs"], order_backend_subs
)
self._init_date_time()
# read the data
load_p_iter = self._get_data("load_p")
load_q_iter = self._get_data("load_q")
prod_p_iter = self._get_data("prod_p")
prod_v_iter = self._get_data("prod_v")
read_compressed = self._get_fileext("hazards")
nrows = None
if self._max_iter > 0:
nrows = self._max_iter + 1
if read_compressed is not None:
hazards = pd.read_csv(
os.path.join(self.path, "hazards{}".format(read_compressed)),
sep=self.sep,
nrows=nrows,
)
else:
hazards = None
read_compressed = self._get_fileext("maintenance")
if read_compressed is not None:
maintenance = pd.read_csv(
os.path.join(self.path, "maintenance{}".format(read_compressed)),
sep=self.sep,
nrows=nrows,
)
else:
maintenance = None
# put the proper name in order
order_backend_loads = {el: i for i, el in enumerate(order_backend_loads)}
order_backend_prods = {el: i for i, el in enumerate(order_backend_prods)}
order_backend_lines = {el: i for i, el in enumerate(order_backend_lines)}
if self.chunk_size is None:
load_p = load_p_iter
load_q = load_q_iter
prod_p = prod_p_iter
prod_v = prod_v_iter
if load_p is not None:
self.tmp_max_index = load_p.shape[0]
elif load_q is not None:
self.tmp_max_index = load_q.shape[0]
elif prod_p is not None:
self.tmp_max_index = prod_p.shape[0]
elif prod_v is not None:
self.tmp_max_index = prod_v.shape[0]
else:
raise ChronicsError(
'No files are found in directory "{}". If you don\'t want to load any chronics,'
' use "ChangeNothing" and not "{}" to load chronics.'
"".format(self.path, type(self))
)
else:
self._data_chunk = {
"load_p": load_p_iter,
"load_q": load_q_iter,
"prod_p": prod_p_iter,
"prod_v": prod_v_iter,
}
load_p, load_q, prod_p, prod_v = self._get_next_chunk()
# get the chronics in order
(
order_chronics_load_p,
order_backend_load_q,
order_backend_prod_p,
order_backend_prod_v,
order_backend_hazards,
order_backend_maintenance,
) = self._get_orders(
load_p,
load_q,
prod_p,
prod_v,
hazards,
maintenance,
order_backend_loads,
order_backend_prods,
order_backend_lines,
)
# now "sort" the columns of each chunk of data
self._order_load_p = np.argsort(order_chronics_load_p)
self._order_load_q = np.argsort(order_backend_load_q)
self._order_prod_p = np.argsort(order_backend_prod_p)
self._order_prod_v = np.argsort(order_backend_prod_v)
self._order_hazards = np.argsort(order_backend_hazards)
self._order_maintenance = np.argsort(order_backend_maintenance)
# retrieve total number of rows
if maintenance is not None:
n_ = maintenance.shape[0]
elif hazards is not None:
n_ = hazards.shape[0]
else:
n_ = None
for fn in ["prod_p", "load_p", "prod_v", "load_q"]:
ext_ = self._get_fileext(fn)
if ext_ is not None:
n_ = self._file_len(
os.path.join(self.path, "{}{}".format(fn, ext_)), ext_
)
break
if n_ is None:
raise ChronicsError(
'No files are found in directory "{}". If you don\'t want to load any chronics,'
' use "ChangeNothing" and not "{}" to load chronics.'
"".format(self.path, type(self))
)
self.n_ = n_ # the -1 is present because the initial grid state doesn't count as a "time step"
if self._max_iter > 0:
if self.n_ is not None:
if self._max_iter >= self.n_:
self._max_iter = self.n_ - 1
# TODO: issue warning in this case
self.n_ = self._max_iter + 1
else:
# if the number of maximum time step is not set yet, we set it to be the number of
# data in the chronics (number of rows of the files) -1.
# the -1 is present because the initial grid state doesn't count as a "time step" but is read
# from these data.
self._max_iter = self.n_ - 1
self._init_attrs(
load_p, load_q, prod_p, prod_v, hazards=hazards, maintenance=maintenance,
is_init=True
)
self.curr_iter = 0
@staticmethod
def _file_len(fname, ext_):
res = pd.read_csv(fname, sep="@", dtype=str).shape[0]
return res
def _init_attrs(
self, load_p, load_q, prod_p, prod_v, hazards=None, maintenance=None,
is_init=False
):
# this called at the initialization but also each time more data should
# be read from the disk (at the end of each chunk for example)
self.load_p = None
self.load_q = None
self.prod_p = None
self.prod_v = None
if is_init:
self.hazards = None
self.hazard_duration = None
self.maintenance = None
self.maintenance_time = None
self.maintenance_duration = None
if load_p is not None:
self.load_p = copy.deepcopy(
load_p.values[:, self._order_load_p].astype(dt_float)
)
if load_q is not None:
self.load_q = copy.deepcopy(
load_q.values[:, self._order_load_q].astype(dt_float)
)
if prod_p is not None:
self.prod_p = copy.deepcopy(
prod_p.values[:, self._order_prod_p].astype(dt_float)
)
if prod_v is not None:
self.prod_v = copy.deepcopy(
prod_v.values[:, self._order_prod_v].astype(dt_float)
)
# TODO optimize this piece of code, and the whole laoding process if hazards.csv and maintenance.csv are
# provided in the proper format.
if hazards is not None:
# hazards and maintenance cannot be computed by chunk. So we need to differenciate their behaviour
self.hazards = copy.deepcopy(hazards.values[:, self._order_hazards])
self.hazard_duration = np.zeros(
shape=(self.hazards.shape[0], self.n_line), dtype=dt_int
)
for line_id in range(self.n_line):
self.hazard_duration[:, line_id] = self.get_hazard_duration_1d(
self.hazards[:, line_id]
)
self.hazards = self.hazards != 0.0
if maintenance is not None:
self.maintenance = copy.deepcopy(
maintenance.values[:, self._order_maintenance]
)
self.maintenance_time = (
np.zeros(shape=(self.maintenance.shape[0], self.n_line), dtype=dt_int)
- 1
)
self.maintenance_duration = np.zeros(
shape=(self.maintenance.shape[0], self.n_line), dtype=dt_int
)
# test that with chunk size
for line_id in range(self.n_line):
self.maintenance_time[:, line_id] = self.get_maintenance_time_1d(
self.maintenance[:, line_id]
)
self.maintenance_duration[
:, line_id
] = self.get_maintenance_duration_1d(self.maintenance[:, line_id])
# there are _maintenance and hazards only if the value in the file is not 0.
self.maintenance = self.maintenance != 0.0
self.maintenance = self.maintenance.astype(dt_bool)
def done(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Compare to :func:`GridValue.done` an episode can be over for 2 main reasons:
- :attr:`GridValue.max_iter` has been reached
- There are no data in the csv.
The episode is done if one of the above condition is met.
Returns
-------
res: ``bool``
Whether the episode has reached its end or not.
"""
res = False
# if self.current_index+1 >= self.tmp_max_index:
if self.current_index > self.n_:
res = True
elif self._max_iter > 0:
if self.curr_iter > self._max_iter:
res = True
return res
@property
def max_iter(self):
return self._max_iter
@max_iter.setter
def max_iter(self, value : int):
if value == -1:
self._max_iter = self.n_ - 1
else:
self._max_iter = int(value)
def max_timestep(self):
if self._max_iter == -1:
return self.n_ - 1
return self._max_iter
def _data_in_memory(self):
if self.chunk_size is None:
# if i don't use chunk, all the data are in memory alreay
return True
if self.current_index == 0:
# data are loaded the first iteration
return True
if self.current_index % self.chunk_size != 0:
# data are already in ram
return True
return False
def _load_next_chunk_in_memory(self):
# print("I loaded another chunk")
# i load the next chunk as dataframes
load_p, load_q, prod_p, prod_v = self._get_next_chunk()
# i put these dataframes in the right order (columns)
self._init_attrs(load_p, load_q, prod_p, prod_v)
# i don't forget to reset the reading index to 0
self.current_index = 0
def load_next(self):
self.current_index += 1 # index in the chunk
# for the "global" index use self.curr_iter
if not self._data_in_memory():
try:
self._load_next_chunk_in_memory()
except StopIteration as exc_:
raise StopIteration from exc_
if self.current_index >= self.tmp_max_index:
raise StopIteration
if self._max_iter > 0:
if self.curr_iter > self._max_iter:
raise StopIteration
res = {}
dict_ = {}
prod_v = None
if self.load_p is not None:
dict_["load_p"] = 1.0 * self.load_p[self.current_index, :]
if self.load_q is not None:
dict_["load_q"] = 1.0 * self.load_q[self.current_index, :]
if self.prod_p is not None:
dict_["prod_p"] = 1.0 * self.prod_p[self.current_index, :]
if self.prod_v is not None:
prod_v = 1.0 * self.prod_v[self.current_index, :]
if dict_:
res["injection"] = dict_
if self.maintenance is not None:
res["maintenance"] = self.maintenance[self.curr_iter, :]
if self.hazards is not None:
res["hazards"] = self.hazards[self.curr_iter, :]
if self.maintenance_time is not None:
maintenance_time = dt_int(1 * self.maintenance_time[self.curr_iter, :])
maintenance_duration = dt_int(
1 * self.maintenance_duration[self.curr_iter, :]
)
else:
maintenance_time = np.full(self.n_line, fill_value=-1, dtype=dt_int)
maintenance_duration = np.full(self.n_line, fill_value=0, dtype=dt_int)
if self.hazard_duration is not None:
hazard_duration = 1 * self.hazard_duration[self.current_index, :]
else:
hazard_duration = np.full(self.n_line, fill_value=-1, dtype=dt_int)
self.current_datetime += self.time_interval
self.curr_iter += 1
return (
self.current_datetime,
res,
maintenance_time,
maintenance_duration,
hazard_duration,
prod_v,
)
def check_validity(self, backend):
at_least_one = False
if self.load_p is not None:
if self.load_p.shape[1] != backend.n_load:
msg_err = "for the active part. It should be {} but is in fact {}"
raise IncorrectNumberOfLoads(
msg_err.format(backend.n_load, self.load_p.shape[1])
)
at_least_one = True
if self.load_q is not None:
if self.load_q.shape[1] != backend.n_load:
msg_err = "for the reactive part. It should be {} but is in fact {}"
raise IncorrectNumberOfLoads(
msg_err.format(backend.n_load, self.load_q.shape[1])
)
at_least_one = True
if self.prod_p is not None:
if self.prod_p.shape[1] != backend.n_gen:
msg_err = "for the active part. It should be {} but is in fact {}"
raise IncorrectNumberOfGenerators(
msg_err.format(backend.n_gen, self.prod_p.shape[1])
)
at_least_one = True
if self.prod_v is not None:
if self.prod_v.shape[1] != backend.n_gen:
msg_err = "for the voltage part. It should be {} but is in fact {}"
raise IncorrectNumberOfGenerators(
msg_err.format(backend.n_gen, self.prod_v.shape[1])
)
at_least_one = True
if self.hazards is not None:
if self.hazards.shape[1] != backend.n_line:
msg_err = "for the outage. It should be {} but is in fact {}"
raise IncorrectNumberOfLines(
msg_err.format(backend.n_line, self.hazards.shape[1])
)
at_least_one = True
if self.maintenance is not None:
if self.maintenance.shape[1] != backend.n_line:
msg_err = "for the maintenance. It should be {} but is in fact {}"
raise IncorrectNumberOfLines(
msg_err.format(backend.n_line, self.maintenance.shape[1])
)
at_least_one = True
if self.maintenance_time is not None:
if self.maintenance_time.shape[1] != backend.n_line:
msg_err = "for the maintenance times. It should be {} but is in fact {}"
raise IncorrectNumberOfLines(
msg_err.format(backend.n_line, self.maintenance_time.shape[1])
)
at_least_one = True
if self.maintenance_duration is not None:
if self.maintenance_duration.shape[1] != backend.n_line:
msg_err = (
"for the maintenance durations. It should be {} but is in fact {}"
)
raise IncorrectNumberOfLines(
msg_err.format(backend.n_line, self.maintenance_duration.shape[1])
)
at_least_one = True
if self.hazard_duration is not None:
if self.hazard_duration.shape[1] != backend.n_line:
msg_err = "for the hazard durations. It should be {} but is in fact {}"
raise IncorrectNumberOfLines(
msg_err.format(backend.n_line, self.hazard_duration.shape[1])
)
at_least_one = True
if not at_least_one:
raise ChronicsError(
'No files are found in directory "{}". If you don\'t want to load any chronics, use '
'"ChangeNothing" and not "{}" to load chronics.'
"".format(self.path, type(self))
)
for name_arr, arr in zip(
[
"load_q",
"load_p",
"prod_v",
"prod_p",
"maintenance",
"hazards",
"maintenance time",
"maintenance duration",
"hazard duration",
],
[
self.load_q,
self.load_p,
self.prod_v,
self.prod_p,
self.maintenance,
self.hazards,
self.maintenance_time,
self.maintenance_duration,
self.hazard_duration,
],
):
if arr is not None:
if self.chunk_size is None:
if arr.shape[0] != self.n_:
msg_err = (
"Array {} has not the same number of rows ({}) than the maintenance ({}). "
"The chronics cannot be loaded properly."
)
raise EnvError(msg_err.format(name_arr, arr.shape[0], self.n_))
if self._max_iter > 0:
if self._max_iter > self.n_:
msg_err = "Files count {} rows and you ask this episode to last at {} timestep."
raise InsufficientData(msg_err.format(self.n_, self._max_iter))
def next_chronics(self):
self.current_datetime = self.start_datetime
self.current_index = -1
self.curr_iter = 0
if self.chunk_size is not None:
self._clear() # remove previously loaded data [only needed if chunk size is set, I assume]
def get_id(self) -> str:
return self.path
def set_chunk_size(self, new_chunk_size):
self.chunk_size = new_chunk_size
def _convert_datetime(self, datetime_beg):
res = datetime_beg
if not isinstance(datetime_beg, datetime):
try:
res = datetime.strptime(datetime_beg, "%Y-%m-%d %H:%M")
except:
try:
res = datetime.strptime(datetime_beg, "%Y-%m-%d")
except:
raise ChronicsError(
'Impossible to convert "{}" to a valid datetime. Accepted format is '
'"%Y-%m-%d %H:%M"'.format(datetime_beg)
)
return res
def _extract_array(self, nm):
var = self.__dict__[nm]
if var is None:
return None
else:
return var[self.current_index, :]
def _save_array(self, array_, path_out, name, colnames):
if array_ is None:
return
tmp = pd.DataFrame(array_)
tmp.columns = colnames
tmp.to_csv(os.path.join(path_out, name), index=False, sep=self.sep)
def _init_res_split(self, nb_rows):
res_prod_p = None
res_prod_v = None
res_load_p = None
res_load_q = None
res_maintenance = None
res_hazards = None
if self.prod_p is not None:
res_prod_p = np.zeros((nb_rows, self.n_gen), dtype=dt_float)
if self.prod_v is not None:
res_prod_v = np.zeros((nb_rows, self.n_gen), dtype=dt_float)
if self.load_p is not None:
res_load_p = np.zeros((nb_rows, self.n_load), dtype=dt_float)
if self.load_q is not None:
res_load_q = np.zeros((nb_rows, self.n_load), dtype=dt_float)
if self.maintenance is not None:
res_maintenance = np.zeros((nb_rows, self.n_line), dtype=dt_float)
if self.hazards is not None:
res_hazards = np.zeros((nb_rows, self.n_line), dtype=dt_float)
return (
res_prod_p,
res_prod_v,
res_load_p,
res_load_q,
res_maintenance,
res_hazards,
)
def _update_res_split(self, i, tmp, *arrays):
(
res_prod_p,
res_prod_v,
res_load_p,
res_load_q,
res_maintenance,
res_hazards,
) = arrays
if res_prod_p is not None:
res_prod_p[i, :] = tmp._extract_array("prod_p")
if res_prod_v is not None:
res_prod_v[i, :] = tmp._extract_array("prod_v")
if res_load_p is not None:
res_load_p[i, :] = tmp._extract_array("load_p")
if res_load_q is not None:
res_load_q[i, :] = tmp._extract_array("load_q")
if res_maintenance is not None:
res_maintenance[i, :] = tmp._extract_array("maintenance")
if res_hazards is not None:
res_hazards[i, :] = tmp._extract_array("hazards")
def _clean_arrays(self, i, *arrays):
(
res_prod_p,
res_prod_v,
res_load_p,
res_load_q,
res_maintenance,
res_hazards,
) = arrays
if res_prod_p is not None:
res_prod_p = res_prod_p[:i, :]
if res_prod_v is not None:
res_prod_v = res_prod_v[:i, :]
if res_load_p is not None:
res_load_p = res_load_p[:i, :]
if res_load_q is not None:
res_load_q = res_load_q[:i, :]
if res_maintenance is not None:
res_maintenance = res_maintenance[:i, :]
if res_hazards is not None:
res_hazards = res_hazards[:i, :]
return (
res_prod_p,
res_prod_v,
res_load_p,
res_load_q,
res_maintenance,
res_hazards,
)
def _get_name_arrays_for_saving(self):
return ["prod_p", "prod_v", "load_p", "load_q", "maintenance", "hazards"]
def _get_colorder_arrays_for_saving(self):
return [
self._order_backend_prods,
self._order_backend_prods,
self._order_backend_loads,
self._order_backend_loads,
self._order_backend_lines,
self._order_backend_lines,
]
def split_and_save(self, datetime_beg, datetime_end, path_out):
"""
You can use this function to save the values of the chronics in a format that will be loadable
by :class:`GridStateFromFile`
Notes
-----
Prefer using the :func:`Multifolder.split_and_save` that handles different chronics
Parameters
----------
datetime_beg: ``str``
Time stamp of the beginning of the data you want to save (time stamp in "%Y-%m-%d %H:%M"
format)
datetime_end: ``str``
Time stamp of the end of the data you want to save (time stamp in "%Y-%m-%d %H:%M"
format)
path_out: ``str``
Location where to save the data
"""
# work on a copy of myself
tmp = copy.deepcopy(self)
datetime_beg = self._convert_datetime(datetime_beg)
datetime_end = self._convert_datetime(datetime_end)
nb_rows = datetime_end - datetime_beg
nb_rows = nb_rows.total_seconds()
nb_rows = int(nb_rows / self.time_interval.total_seconds()) + 1
if nb_rows <= 0:
raise ChronicsError(
'Invalid time step to be extracted. Make sure "datetime_beg" is lower than '
'"datetime_end" {} - {}'.format(datetime_beg, datetime_end)
)
# prepare folder
if not os.path.exists(path_out):
os.mkdir(path_out)
# skip until datetime_beg starts
curr_dt = tmp.current_datetime
if curr_dt > datetime_beg:
warnings.warn(
"split_and_save: you ask for a beginning of the extraction of the chronics after the "
"current datetime of it. If they ever existed, the data in the chronics prior to {}"
"will be ignored".format(curr_dt)
)
# in the chronics we load the first row to initialize the data, so here we stop just a bit before that
datetime_start = datetime_beg - self.time_interval
while curr_dt < datetime_start:
curr_dt, *_ = tmp.load_next()
real_init_dt = curr_dt
arrays = self._init_res_split(nb_rows)
i = 0
while curr_dt < datetime_end:
self._update_res_split(i, tmp, *arrays)
curr_dt, *_ = tmp.load_next()
i += 1
if i < nb_rows:
warnings.warn(
"split_and_save: chronics goes up to {} but you want to split it up to {}. Results "
"has been troncated".format(curr_dt, datetime_end)
)
arrays = self._clean_arrays(i, *arrays)
nms = self._get_name_arrays_for_saving()
orders_columns = self._get_colorder_arrays_for_saving()
for el, nm, colnames in zip(arrays, nms, orders_columns):
nm = "{}{}".format(nm, ".csv.bz2")
self._save_array(el, path_out, nm, colnames)
with open(os.path.join(path_out, "start_datetime.info"), "w") as f:
f.write("{:%Y-%m-%d %H:%M}\n".format(real_init_dt))
tmp_for_time_delta = (
datetime(year=2018, month=1, day=1, hour=0, minute=0, second=0)
+ self.time_interval
)
with open(os.path.join(path_out, "time_interval.info"), "w") as f:
f.write("{:%H:%M}\n".format(tmp_for_time_delta))
| 49,470 | 39.285831 | 174 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/gridStateFromFileWithForecasts.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import os
import copy
import numpy as np
import pandas as pd
from datetime import timedelta
from grid2op.dtypes import dt_float, dt_bool
from grid2op.Exceptions import (
EnvError,
IncorrectNumberOfLoads,
IncorrectNumberOfLines,
IncorrectNumberOfGenerators,
)
from grid2op.Exceptions import ChronicsError
from grid2op.Chronics.gridStateFromFile import GridStateFromFile
class GridStateFromFileWithForecasts(GridStateFromFile):
"""
An extension of :class:`GridStateFromFile` that implements the "forecast" functionality.
Forecast are also read from a file. For this class, only 1 forecast per timestep is read. The "forecast"
present in the file at row $i$ is the one available at the corresponding time step, so valid for the grid state
at the next time step.
To have more advanced forecasts, this class could be overridden.
Attributes
----------
load_p_forecast: ``numpy.ndarray``, dtype: ``float``
Array used to store the forecasts of the load active values.
load_q_forecast: ``numpy.ndarray``, dtype: ``float``
Array used to store the forecasts of the load reactive values.
prod_p_forecast: ``numpy.ndarray``, dtype: ``float``
Array used to store the forecasts of the generator active production setpoint.
prod_v_forecast: ``numpy.ndarray``, dtype: ``float``
Array used to store the forecasts of the generator voltage magnitude setpoint.
"""
MULTI_CHRONICS = False
def __init__(
self,
path,
sep=";",
time_interval=timedelta(minutes=5),
max_iter=-1,
chunk_size=None,
h_forecast=(5, ),
):
self.load_p_forecast = None
self.load_q_forecast = None
self.prod_p_forecast = None
self.prod_v_forecast = None
# for when you read data in chunk
self._order_load_p_forecasted = None
self._order_load_q_forecasted = None
self._order_prod_p_forecasted = None
self._order_prod_v_forecasted = None
self._data_already_in_mem = False # says if the "main" value from the base class had to be reloaded (used for chunk)
self._nb_forecast = len(h_forecast)
self._h_forecast = copy.deepcopy(h_forecast)
self._check_hs_consistent(self._h_forecast, time_interval)
# init base class
GridStateFromFile.__init__(
self,
path,
sep=sep,
time_interval=time_interval,
max_iter=max_iter,
chunk_size=chunk_size,
)
def _clear(self):
super()._clear()
self.load_p_forecast = None
self.load_q_forecast = None
self.prod_p_forecast = None
self.prod_v_forecast = None
# for when you read data in chunk
self._order_load_p_forecasted = None
self._order_load_q_forecasted = None
self._order_prod_p_forecasted = None
self._order_prod_v_forecasted = None
self._data_already_in_mem = False # says if the "main" value from the base class had to be reloaded (used for chunk)
def _check_hs_consistent(self, h_forecast, time_interval):
prev = timedelta(minutes=0)
for i, h in enumerate(h_forecast):
prev += time_interval
if prev.total_seconds() // 60 != h:
raise ChronicsError("For now you cannot build non contiuguous forecast. "
"Forecast should look like [5, 10, 15, 20] "
"but not [10, 15, 20] (missing h=5mins) or [5, 10, 20] (missing h=15)")
def _get_next_chunk_forecasted(self):
load_p = None
load_q = None
prod_p = None
prod_v = None
if self._data_chunk["load_p_forecasted"] is not None:
load_p = next(self._data_chunk["load_p_forecasted"])
if self._data_chunk["load_q_forecasted"] is not None:
load_q = next(self._data_chunk["load_q_forecasted"])
if self._data_chunk["prod_p_forecasted"] is not None:
prod_p = next(self._data_chunk["prod_p_forecasted"])
if self._data_chunk["prod_v_forecasted"] is not None:
prod_v = next(self._data_chunk["prod_v_forecasted"])
return load_p, load_q, prod_p, prod_v
def _data_in_memory(self):
res = super()._data_in_memory()
self._data_already_in_mem = res
return res
def initialize(
self,
order_backend_loads,
order_backend_prods,
order_backend_lines,
order_backend_subs,
names_chronics_to_backend=None,
):
"""
The same condition as :class:`GridStateFromFile.initialize` applies also for
:attr:`GridStateFromFileWithForecasts.load_p_forecast`, :attr:`GridStateFromFileWithForecasts.load_q_forecast`,
:attr:`GridStateFromFileWithForecasts.prod_p_forecast`, and
:attr:`GridStateFromFileWithForecasts.prod_v_forecast`.
Parameters
----------
See help of :func:`GridValue.initialize` for a detailed help about the _parameters.
"""
super().initialize(
order_backend_loads,
order_backend_prods,
order_backend_lines,
order_backend_subs,
names_chronics_to_backend,
)
if self.chunk_size is not None:
chunk_size = self.chunk_size * self._nb_forecast
else:
chunk_size = None
if self._max_iter > 0:
nrows_to_load = (self._max_iter + 1) * self._nb_forecast
load_p_iter = self._get_data("load_p_forecasted",
chunk_size, nrows_to_load)
load_q_iter = self._get_data("load_q_forecasted",
chunk_size, nrows_to_load)
prod_p_iter = self._get_data("prod_p_forecasted",
chunk_size, nrows_to_load)
prod_v_iter = self._get_data("prod_v_forecasted",
chunk_size, nrows_to_load)
hazards = None # no hazards in forecast
maintenance = None # maintenance are read from the real data and propagated in the chronics
if self.chunk_size is None:
load_p = load_p_iter
load_q = load_q_iter
prod_p = prod_p_iter
prod_v = prod_v_iter
else:
self._data_chunk["load_p_forecasted"] = load_p_iter
self._data_chunk["load_q_forecasted"] = load_q_iter
self._data_chunk["prod_p_forecasted"] = prod_p_iter
self._data_chunk["prod_v_forecasted"] = prod_v_iter
load_p, load_q, prod_p, prod_v = self._get_next_chunk_forecasted()
order_backend_loads = {el: i for i, el in enumerate(order_backend_loads)}
order_backend_prods = {el: i for i, el in enumerate(order_backend_prods)}
order_backend_lines = {el: i for i, el in enumerate(order_backend_lines)}
(
order_chronics_load_p,
order_backend_load_q,
order_backend_prod_p,
order_backend_prod_v,
order_backend_hazards,
order_backend_maintenance,
) = self._get_orders(
load_p,
load_q,
prod_p,
prod_v,
hazards,
maintenance,
order_backend_loads,
order_backend_prods,
order_backend_lines,
)
self._order_load_p_forecasted = np.argsort(order_chronics_load_p)
self._order_load_q_forecasted = np.argsort(order_backend_load_q)
self._order_prod_p_forecasted = np.argsort(order_backend_prod_p)
self._order_prod_v_forecasted = np.argsort(order_backend_prod_v)
self._init_attrs_forecast(
load_p, load_q, prod_p, prod_v
)
def _init_attrs_forecast(self, load_p, load_q, prod_p, prod_v):
# TODO refactor that with _init_attrs from super()
self.load_p_forecast = None
self.load_q_forecast = None
self.prod_p_forecast = None
self.prod_v_forecast = None
if load_p is not None:
self.load_p_forecast = copy.deepcopy(
load_p.values[:, self._order_load_p_forecasted].astype(dt_float)
)
if load_q is not None:
self.load_q_forecast = copy.deepcopy(
load_q.values[:, self._order_load_q_forecasted].astype(dt_float)
)
if prod_p is not None:
self.prod_p_forecast = copy.deepcopy(
prod_p.values[:, self._order_prod_p_forecasted].astype(dt_float)
)
if prod_v is not None:
self.prod_v_forecast = copy.deepcopy(
prod_v.values[:, self._order_prod_v_forecasted].astype(dt_float)
)
def check_validity(self, backend):
super(GridStateFromFileWithForecasts, self).check_validity(backend)
at_least_one = False
if self.load_p_forecast is not None:
if self.load_p_forecast.shape[1] != backend.n_load:
raise IncorrectNumberOfLoads(
"for the active part. It should be {} but is in fact {}"
"".format(backend.n_load, len(self.load_p))
)
at_least_one = True
if self.load_q_forecast is not None:
if self.load_q_forecast.shape[1] != backend.n_load:
raise IncorrectNumberOfLoads(
"for the reactive part. It should be {} but is in fact {}"
"".format(backend.n_load, len(self.load_q))
)
at_least_one = True
if self.prod_p_forecast is not None:
if self.prod_p_forecast.shape[1] != backend.n_gen:
raise IncorrectNumberOfGenerators(
"for the active part. It should be {} but is in fact {}"
"".format(backend.n_gen, len(self.prod_p))
)
at_least_one = True
if self.prod_v_forecast is not None:
if self.prod_v_forecast.shape[1] != backend.n_gen:
raise IncorrectNumberOfGenerators(
"for the voltage part. It should be {} but is in fact {}"
"".format(backend.n_gen, len(self.prod_v))
)
at_least_one = True
if not at_least_one:
raise ChronicsError(
"You used a class that read forecasted data, yet there is no forecasted data in"
'"{}". Please fall back to using class "GridStateFromFile" instead of '
'"{}"'.format(self.path, type(self))
)
for name_arr, arr in zip(
["load_q", "load_p", "prod_v", "prod_p"],
[
self.load_q_forecast,
self.load_p_forecast,
self.prod_v_forecast,
self.prod_p_forecast
],
):
if arr is not None:
if self.chunk_size is None:
if arr.shape[0] < self.n_ * self._nb_forecast:
raise EnvError(
"Array for forecast {}_forecasted as not the same number of rows of {} x nb_forecast. "
"The chronics cannot be loaded properly.".format(name_arr, name_arr)
)
def _load_next_chunk_in_memory_forecast(self):
# i load the next chunk as dataframes
load_p, load_q, prod_p, prod_v = self._get_next_chunk_forecasted()
# i put these dataframes in the right order (columns)
self._init_attrs_forecast(load_p, load_q, prod_p, prod_v)
# resetting the index has been done in _load_next_chunk_in_memory, or at least it should have
def forecasts(self):
"""
This is the major difference between :class:`GridStateFromFileWithForecasts` and :class:`GridStateFromFile`.
It returns non empty forecasts.
As explained in the :func:`GridValue.forecasts`, forecasts are made of list of tuple. Each tuple having
exactly 2 elements:
1. Is the time stamp of the forecast
2. An :class:`grid2op.BaseAction` representing the modification of the powergrid after the forecast.
For this class, only the forecast of the next time step is given, and only for the injections and maintenance.
Returns
-------
See :func:`GridValue.forecasts` for more information.
"""
if not self._data_already_in_mem:
try:
self._load_next_chunk_in_memory_forecast()
except StopIteration as exc_:
raise exc_
res = []
for h_id, h in enumerate(self._h_forecast):
res_d = {}
dict_ = {}
indx_to_look = self._nb_forecast * self.current_index + h_id
if self.load_p_forecast is not None:
dict_["load_p"] = dt_float(
1.0 * self.load_p_forecast[indx_to_look, :]
)
if self.load_q_forecast is not None:
dict_["load_q"] = dt_float(
1.0 * self.load_q_forecast[indx_to_look, :]
)
if self.prod_p_forecast is not None:
dict_["prod_p"] = dt_float(
1.0 * self.prod_p_forecast[indx_to_look, :]
)
if self.prod_v_forecast is not None:
dict_["prod_v"] = dt_float(
1.0 * self.prod_v_forecast[indx_to_look, :]
)
if dict_:
res_d["injection"] = dict_
forecast_datetime = self.current_datetime + timedelta(minutes=h)
res.append((forecast_datetime, res_d))
return res
def get_id(self) -> str:
return self.path
def _init_res_split(self, nb_rows):
res_load_p_f = None
res_load_q_f = None
res_prod_p_f = None
res_prod_v_f = None
if self.prod_p_forecast is not None:
res_prod_p_f = np.zeros((nb_rows, self.n_gen), dtype=dt_float)
if self.prod_v_forecast is not None:
res_prod_v_f = np.zeros((nb_rows, self.n_gen), dtype=dt_float)
if self.load_p_forecast is not None:
res_load_p_f = np.zeros((nb_rows, self.n_load), dtype=dt_float)
if self.load_q_forecast is not None:
res_load_q_f = np.zeros((nb_rows, self.n_load), dtype=dt_float)
res = super()._init_res_split(nb_rows)
res += tuple(
[res_prod_p_f, res_prod_v_f, res_load_p_f, res_load_q_f]
)
return res
def _update_res_split(self, i, tmp, *arrays):
(
*args_super,
res_prod_p_f,
res_prod_v_f,
res_load_p_f,
res_load_q_f
) = arrays
super()._update_res_split(i, tmp, *args_super)
if res_prod_p_f is not None:
res_prod_p_f[i, :] = tmp._extract_array("prod_p_forecast")
if res_prod_v_f is not None:
res_prod_v_f[i, :] = tmp._extract_array("prod_v_forecast")
if res_load_p_f is not None:
res_load_p_f[i, :] = tmp._extract_array("load_p_forecast")
if res_load_q_f is not None:
res_load_q_f[i, :] = tmp._extract_array("load_q_forecast")
def _clean_arrays(self, i, *arrays):
(
*args_super,
res_prod_p_f,
res_prod_v_f,
res_load_p_f,
res_load_q_f
) = arrays
res = super()._clean_arrays(i, *args_super)
if res_prod_p_f is not None:
res_prod_p_f = res_prod_p_f[:i, :]
if res_prod_v_f is not None:
res_prod_v_f = res_prod_v_f[:i, :]
if res_load_p_f is not None:
res_load_p_f = res_load_p_f[:i, :]
if res_load_q_f is not None:
res_load_q_f = res_load_q_f[:i, :]
res += tuple(
[res_prod_p_f, res_prod_v_f, res_load_p_f, res_load_q_f]
)
return res
def _get_name_arrays_for_saving(self):
res = super()._get_name_arrays_for_saving()
res += [
"prod_p_forecasted",
"prod_v_forecasted",
"load_p_forecasted",
"load_q_forecasted"
]
return res
def _get_colorder_arrays_for_saving(self):
res = super()._get_colorder_arrays_for_saving()
res += tuple(
[
self._order_backend_prods,
self._order_backend_prods,
self._order_backend_loads,
self._order_backend_loads
]
)
return res
| 17,222 | 37.444196 | 125 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/gridValue.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
import warnings
from datetime import datetime, timedelta
from abc import ABC, abstractmethod
from grid2op.dtypes import dt_int
from grid2op.Space import RandomObject
from grid2op.Exceptions import EnvError
# TODO sous echantillonner ou sur echantilloner les scenario: need to modify everything that affect the number
# TODO of time steps there, for example "Space.gen_min_time_on" or "params.NB_TIMESTEP_POWERFLOW_ALLOWED" for
# TODO example. And more generally, it would be better to have all of this attributes exported / imported in
# TODO time interval, instead of time steps.
# TODO add a class to sample "online" the data.
class GridValue(RandomObject, ABC):
"""
This is the base class for every kind of data for the _grid.
It allows the :class:`grid2op.Environment` to perform powergrid modification that make the "game" time dependant.
It is not recommended to directly create :class:`GridValue` object, but to use the
:attr:`grid2op.Environment.chronics_handler" for such a purpose. This is made in an attempt to make sure the
:func:`GridValue.initialize` is called. Before this initialization, it is not recommended to use any
:class:`GridValue` object.
The method :func:`GridValue.next_chronics` should be used between two epoch of the game. If there are no more
data to be generated from this object, then :func:`GridValue.load_next` should raise a :class:`StopIteration`
exception and a call to :func:`GridValue.done` should return True.
In grid2op, the production and loads (and hazards or maintenance) can be stored in this type of
of "GridValue". This class will map things generated (or read from a file) and assign the given element
of the powergrid with its proper value at each time steps.
Attributes
----------
time_interval: :class:`.datetime.timedelta`
Time interval between 2 consecutive timestamps. Default 5 minutes.
start_datetime: :class:`datetime.datetime`
The datetime of the first timestamp of the scenario.
current_datetime: :class:`datetime.datetime`
The timestamp of the current scenario.
max_iter: ``int``
Number maximum of data to generate for one episode.
curr_iter: ``int``
Duration of the current episode.
maintenance_time: ``numpy.ndarray``, dtype:``int``
Number of time steps the next maintenance will take place with the following convention:
- -1 no maintenance are planned for the forseeable future
- 0 a maintenance is taking place
- 1, 2, 3 ... a maintenance will take place in 1, 2, 3, ... time step
Some examples are given in :func:`GridValue.maintenance_time_1d`.
maintenance_duration: ``numpy.ndarray``, dtype:``int``
Duration of the next maintenance. 0 means no maintenance is happening. If a maintenance is planned for a
given powerline, this number decreases each time step, up until arriving at 0 when the maintenance is over. Note
that if a maintenance is planned (see :attr:`GridValue.maintenance_time`) this number indicates how long
the maintenance will last, and does not suppose anything on the maintenance taking place or not (= there can be
positive number here without a powerline being removed from the grid for maintenance reason). Some examples are
given in :func:`GridValue.maintenance_duration_1d`.
hazard_duration: ``numpy.ndarray``, dtype:``int``
Duration of the next hzard. 0 means no maintenance is happening. If a hazard is taking place for a
given powerline, this number decreases each time step, up until arriving at 0 when the maintenance is over. On
the contrary to :attr:`GridValue.maintenance_duration`, if a component of this vector is higher than 1, it
means that the powerline is out of service. Some examples are
given in :func:`GridValue.get_hazard_duration_1d`.
"""
NAN_BUT_IN_INT = -9999999
def __init__(
self,
time_interval=timedelta(minutes=5),
max_iter=-1,
start_datetime=datetime(year=2019, month=1, day=1),
chunk_size=None,
):
RandomObject.__init__(self)
self.time_interval = time_interval
self.current_datetime = start_datetime
self.start_datetime = start_datetime
self._max_iter = max_iter
self.curr_iter = 0
self.maintenance_time = None
self.maintenance_duration = None
self.hazard_duration = None
def get_kwargs(self, dict_):
"""
Overload this function if you want to pass some data when building
a new instance of this class.
"""
pass
@property
def max_iter(self):
return self._max_iter
@max_iter.setter
def max_iter(self, value : int):
self._max_iter = int(value)
@abstractmethod
def initialize(
self,
order_backend_loads,
order_backend_prods,
order_backend_lines,
order_backend_subs,
names_chronics_to_backend,
):
"""
This function is used to initialize the data generator.
It can be use to load scenarios, or to initialize noise if scenarios are generated on the fly. It must also
initialize :attr:`GridValue.maintenance_time`, :attr:`GridValue.maintenance_duration` and
:attr:`GridValue.hazard_duration`.
This function should also increment :attr:`GridValue.curr_iter` of 1 each time it is called.
The :class:`GridValue` is what makes the connection between the data (generally in a shape of files on the
hard drive) and the power grid. One of the main advantage of the Grid2Op package is its ability to change
the tool that computes the load flows. Generally, such :class:`grid2op.Backend` expects data in a specific
format that is given by the way their internal powergrid is represented, and in particular, the "same"
objects can have different name and different position. To ensure that the same chronics would
produce the same results on every backend (**ie** regardless of the order of which the Backend is expecting
the data, the outcome of the powerflow is the same) we encourage the user to provide a file that maps the name
of the object in the chronics to the name of the same object in the backend.
This is done with the "names_chronics_to_backend" dictionnary that has the following keys:
- "loads"
- "prods"
- "lines"
The value associated to each of these keys is in turn a mapping dictionnary from the chronics to the backend.
This means that each *keys* of these subdictionnary is a name of one column in the files, and each values
is the corresponding name of this same object in the dictionnary. An example is provided bellow.
Parameters
----------
order_backend_loads: ``numpy.ndarray``, dtype:str
Ordered name, in the Backend, of the loads. It is required that a :class:`grid2op.Backend` object always
output the informations in the same order. This array gives the name of the loads following this order.
See the documentation of :mod:`grid2op.Backend` for more information about this.
order_backend_prods: ``numpy.ndarray``, dtype:str
Same as order_backend_loads, but for generators.
order_backend_lines: ``numpy.ndarray``, dtype:str
Same as order_backend_loads, but for powerline.
order_backend_subs: ``numpy.ndarray``, dtype:str
Same as order_backend_loads, but for powerline.
names_chronics_to_backend: ``dict``
See in the description of the method for more information about its format.
Examples
--------
For example, suppose we have a :class:`grid2op.Backend` with:
- substations ids strart from 0 to N-1 (N being the number of substations in the powergrid)
- loads named "load_i" with "i" the subtations to which it is connected
- generators units named "gen_i" (i still the substation id to which it is connected)
- powerlnes are named "i_j" if it connected substations i to substation j
And on the other side, we have some files with the following conventions:
- substations are numbered from 1 to N
- loads are named "i_C" with i being the substation to which it is connected
- generators are named "i_G" with is being the id of the substations to which it is connected
- powerlines are namesd "i_j_k" where i is the origin substation, j the extremity substations and "k"
is a unique identifier of this powerline in the powergrid.
In this case, instead of renaming the powergrid (in the backend) of the data files, it is advised to build the
following elements and initialize the object gridval of type :class:`GridValue` with:
.. code-block:: python
gridval = GridValue() # Note: this code won't execute because "GridValue" is an abstract class
order_backend_loads = ['load_1', 'load_2', 'load_13', 'load_3', 'load_4', 'load_5', 'load_8', 'load_9',
'load_10', 'load_11', 'load_12']
order_backend_prods = ['gen_1', 'gen_2', 'gen_5', 'gen_7', 'gen_0']
order_backend_lines = ['0_1', '0_4', '8_9', '8_13', '9_10', '11_12', '12_13', '1_2', '1_3', '1_4', '2_3',
'3_4', '5_10', '5_11', '5_12', '3_6', '3_8', '4_5', '6_7', '6_8']
order_backend_subs = ['sub_0', 'sub_1', 'sub_10', 'sub_11', 'sub_12', 'sub_13', 'sub_2', 'sub_3', 'sub_4',
'sub_5', 'sub_6', 'sub_7', 'sub_8', 'sub_9']
names_chronics_to_backend = {"loads": {"2_C": 'load_1', "3_C": 'load_2',
"14": 'load_13', "4_C": 'load_3', "5_C": 'load_4',
"6_C": 'load_5', "9_C": 'load_8', "10_C": 'load_9',
"11_C": 'load_10', "12_C": 'load_11',
"13_C": 'load_12'},
"lines": {'1_2_1': '0_1', '1_5_2': '0_4', '9_10_16': '8_9', '9_14_17': '8_13',
'10_11_18': '9_10', '12_13_19': '11_12', '13_14_20': '12_13',
'2_3_3': '1_2', '2_4_4': '1_3', '2_5_5': '1_4', '3_4_6': '2_3',
'4_5_7': '3_4', '6_11_11': '5_10', '6_12_12': '5_11',
'6_13_13': '5_12', '4_7_8': '3_6', '4_9_9': '3_8', '5_6_10': '4_5',
'7_8_14': '6_7', '7_9_15': '6_8'},
"prods": {"1_G": 'gen_0', "3_G": "gen_2", "6_G": "gen_5",
"2_G": "gen_1", "8_G": "gen_7"},
}
gridval.initialize(order_backend_loads, order_backend_prods, order_backend_lines, names_chronics_to_backend)
"""
self.curr_iter += 1
self.current_datetime += self.time_interval
@staticmethod
def get_maintenance_time_1d(maintenance):
"""
This function allows to transform a 1d numpy aarray maintenance, where is specify:
- 0 there is no maintenance at this time step
- 1 there is a maintenance at this time step
Into the representation in terms of "next maintenance time" as specified in
:attr:`GridValue.maintenance_time` which is:
- `-1` no foreseeable maintenance operation will be performed
- `0` a maintenance operation is being performed
- `1`, `2` etc. is the number of time step the next maintenance will be performed.
Parameters
----------
maintenance: ``numpy.ndarray``
1 dimensional array representing the time series of the maintenance (0 there is no maintenance, 1 there
is a maintenance at this time step)
Returns
-------
maintenance_duration: ``numpy.ndarray``
Array representing the time series of the duration of the next maintenance forseeable.
Examples
--------
If no maintenance are planned:
.. code-block:: python
maintenance_time = GridValue.get_maintenance_time_1d(np.array([0 for _ in range(10)]))
assert np.all(maintenance_time == np.array([-1 for _ in range(10)]))
If a maintenance planned of 3 time steps starting at timestep 6 (index 5 - index starts at 0)
.. code-block:: python
maintenance = np.array([0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0])
maintenance_time = GridValue.get_maintenance_time_1d(maintenance)
assert np.all(maintenance_time == np.array([5,4,3,2,1,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1]))
If a maintenance planned of 3 time steps starting at timestep 6
(index 5 - index starts at 0), and a second one for 2 time steps at time step 13
.. code-block:: python
maintenance = np.array([0,0,0,0,0,1,1,1,0,0,0,0,1,1,0,0,0])
maintenance_time = GridValue.get_maintenance_time_1d(maintenance)
assert np.all(maintenance_time == np.array([5,4,3,2,1,0,0,0,4,3,2,1,0,0,-1,-1,-1]))
"""
res = np.full(maintenance.shape, fill_value=GridValue.NAN_BUT_IN_INT, dtype=dt_int)
maintenance = np.concatenate((maintenance, (0, 0)))
a = np.diff(maintenance)
# +1 is because numpy does the diff `t+1` - `t` so to get index of the initial array
# I need to "+1"
start = np.where(a == 1)[0] + 1 # start of maintenance
end = np.where(a == -1)[0] + 1 # end of maintenance
prev_ = 0
# it's efficient here as i do a loop only on the number of time there is a maintenance
# and maintenance are quite rare
for beg_, end_ in zip(start, end):
res[prev_:beg_] = list(range(beg_ - prev_, 0, -1))
res[beg_:end_] = 0
prev_ = end_
# no maintenance are planned in the forseeable future
res[prev_:] = -1
return res
@staticmethod
def get_maintenance_duration_1d(maintenance):
"""
This function allows to transform a 1d numpy aarray maintenance (or hazards), where is specify:
- 0 there is no maintenance at this time step
- 1 there is a maintenance at this time step
Into the representation in terms of "next maintenance duration" as specified in
:attr:`GridValue.maintenance_duration` which is:
- `0` no forseeable maintenance operation will be performed
- `1`, `2` etc. is the number of time step the next maintenance will last (it can be positive even in the
case that no maintenance is currently being performed.
Parameters
----------
maintenance: ``numpy.ndarray``
1 dimensional array representing the time series of the maintenance (0 there is no maintenance, 1 there
is a maintenance at this time step)
Returns
-------
maintenance_duration: ``numpy.ndarray``
Array representing the time series of the duration of the next maintenance forseeable.
Examples
--------
If no maintenance are planned:
.. code-block:: python
maintenance = np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
maintenance_duration = GridValue.get_maintenance_duration_1d(maintenance)
assert np.all(maintenance_duration == np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]))
If a maintenance planned of 3 time steps starting at timestep 6 (index 5 - index starts at 0)
.. code-block:: python
maintenance = np.array([0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0])
maintenance_duration = GridValue.get_maintenance_duration_1d(maintenance)
assert np.all(maintenance_duration == np.array([3,3,3,3,3,3,2,1,0,0,0,0,0,0,0,0]))
If a maintenance planned of 3 time steps starting at timestep 6
(index 5 - index starts at 0), and a second one for 2 time steps at time step 13
.. code-block:: python
maintenance = np.array([0,0,0,0,0,1,1,1,0,0,0,0,1,1,0,0,0])
maintenance_duration = GridValue.get_maintenance_duration_1d(maintenance)
assert np.all(maintenance_duration == np.array([3,3,3,3,3,3,2,1,2,2,2,2,2,1,0,0,0]))
"""
res = np.full(maintenance.shape, fill_value=GridValue.NAN_BUT_IN_INT, dtype=dt_int)
maintenance = np.concatenate((maintenance, (0, 0)))
a = np.diff(maintenance)
# +1 is because numpy does the diff `t+1` - `t` so to get index of the initial array
# I need to "+1"
start = np.where(a == 1)[0] + 1 # start of maintenance
end = np.where(a == -1)[0] + 1 # end of maintenance
prev_ = 0
# it's efficient here as i do a loop only on the number of time there is a maintenance
# and maintenance are quite rare
for beg_, end_ in zip(start, end):
res[prev_:beg_] = end_ - beg_
res[beg_:end_] = list(range(end_ - beg_, 0, -1))
prev_ = end_
# no maintenance are planned in the foreseeable future
res[prev_:] = 0
return res
@staticmethod
def get_hazard_duration_1d(hazard):
"""
This function allows to transform a 1d numpy aarray maintenance (or hazards), where is specify:
- 0 there is no maintenance at this time step
- 1 there is a maintenance at this time step
Into the representation in terms of "hzard duration" as specified in
:attr:`GridValue.maintenance_duration` which is:
- `0` no forseeable hazard operation will be performed
- `1`, `2` etc. is the number of time step the next hzard will last (it is positive only when a hazard
affect a given powerline)
Compared to :func:`GridValue.get_maintenance_duration_1d` we only know when the hazard occurs how long it
will last.
Parameters
----------
hazard: ``numpy.ndarray``
1 dimensional array representing the time series of the hazards (0 there is no hazard, 1 there
is a hazard at this time step)
Returns
-------
hazard_duration: ``numpy.ndarray``
Array representing the time series of the duration of the next hazard forseeable.
Examples
--------
If no maintenance are planned:
.. code-block:: python
hazard = np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
hazard_duration = GridValue.get_hazard_duration_1d(hazard)
assert np.all(hazard_duration == np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]))
If a maintenance planned of 3 time steps starting at timestep 6 (index 5 - index starts at 0)
.. code-block:: python
hazard = np.array([0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0])
hazard_duration = GridValue.get_hazard_duration_1d(hazard)
assert np.all(hazard_duration == np.array([0,0,0,0,0,3,2,1,0,0,0,0,0,0,0,0]))
If a maintenance planned of 3 time steps starting at timestep 6
(index 5 - index starts at 0), and a second one for 2 time steps at time step 13
.. code-block:: python
hazard = np.array([0,0,0,0,0,1,1,1,0,0,0,0,1,1,0,0,0])
hazard_duration = GridValue.get_hazard_duration_1d(hazard)
assert np.all(hazard_duration == np.array([0,0,0,0,0,3,2,1,0,0,0,0,2,1,0,0,0]))
"""
res = np.full(hazard.shape, fill_value=GridValue.NAN_BUT_IN_INT, dtype=dt_int)
hazard = np.concatenate((hazard, (0, 0)))
a = np.diff(hazard)
# +1 is because numpy does the diff `t+1` - `t` so to get index of the initial array
# I need to "+1"
start = np.where(a == 1)[0] + 1 # start of maintenance
end = np.where(a == -1)[0] + 1 # end of maintenance
prev_ = 0
# it's efficient here as i do a loop only on the number of time there is a maintenance
# and maintenance are quite rare
for beg_, end_ in zip(start, end):
res[prev_:beg_] = 0
res[(beg_):(end_)] = list(range(end_ - beg_, 0, -1))
prev_ = end_
# no maintenance are planned in the forseeable future
res[prev_:] = 0
return res
@abstractmethod
def load_next(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is automatically called by the "env.step" function. It loads the next information
about the grid state (load p and load q, prod p and prod v as well as some maintenance
or hazards information)
Generate the next values, either by reading from a file, or by generating on the fly and return a dictionary
compatible with the :class:`grid2op.BaseAction` class allowed for the :class:`Environment`.
More information about this dictionary can be found at :func:`grid2op.BaseAction.update`.
As a (quick) reminder: this dictionary has for keys:
- "injection" (optional): a dictionary with keys (optional) "load_p", "load_q", "prod_p", "prod_v"
- "hazards" (optional) : the outage suffered from the _grid
- "maintenance" (optional) : the maintenance operations planned on the grid for the current time step.
Returns
-------
timestamp: ``datetime.datetime``
The current timestamp for which the modifications have been generated.
dict_: ``dict``
Always empty, indicating i do nothing (for this case)
maintenance_time: ``numpy.ndarray``, dtype:``int``
Information about the next planned maintenance. See :attr:`GridValue.maintenance_time` for more information.
maintenance_duration: ``numpy.ndarray``, dtype:``int``
Information about the duration of next planned maintenance. See :attr:`GridValue.maintenance_duration`
for more information.
hazard_duration: ``numpy.ndarray``, dtype:``int``
Information about the current hazard. See :attr:`GridValue.hazard_duration`
for more information.
prod_v: ``numpy.ndarray``, dtype:``float``
the (stored) value of the generator voltage setpoint
Raises
------
StopIteration
if the chronics is over
"""
self.current_datetime += self.time_interval
return (
self.current_datetime,
{},
self.maintenance_time,
self.maintenance_duration,
self.hazard_duration,
None,
)
@abstractmethod
def check_validity(self, backend):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is called at the creation of the environment to ensure the Backend and the chronics
are consistent with one another.
A call to this method ensure that the action that will be sent to the current :class:`grid2op.Environment`
can be properly implemented by its :class:`grid2op.Backend`.
This specific method check that the dimension of all vectors are consistent
Parameters
----------
backend: :class:`grid2op.Backend.Backend`
The backend used by the :class:`grid2op.Environment.Environment`
"""
raise EnvError("check_validity not implemented")
def done(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Use the :class:`ChroncisHandler` for such purpose
Whether the episode is over or not.
Returns
-------
done: ``bool``
``True`` means the episode has arrived to the end (no more data to generate) ``False`` means that the episode
is not over yet.
"""
if self.max_iter >= 0:
return self.curr_iter >= self.max_iter
else:
return False
def forecasts(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Use the :class:`ChroncisHandler` for such purpose
This method is used to generate the forecasts that are made available to the :class:`grid2op.BaseAgent`.
This forecasts are behaving the same way than a list of tuple as the one returned by
:func:`GridValue.load_next` method.
The way they are generated depends on the GridValue class. If not forecasts are made available, then
the empty list should be returned.
Returns
-------
res: ``list``
Each element of this list having the same type as what is returned by :func:`GridValue.load_next`.
"""
return []
@abstractmethod
def next_chronics(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Move to the next "chronics", representing the next "level" if we make the parallel
with video games.
A call to this function should at least restart:
- :attr:`GridValue.current_datetime` to its origin value
- :attr:`GridValue.curr_iter`
"""
pass
def tell_id(self, id_num, previous=False):
"""
Tell the backend to use one folder for the chronics in particular. This method is mainly use when the GridValue
object can deal with many folder. In this case, this method is used by the :class:`grid2op.Runner` to indicate
which chronics to load for the current simulated episode.
This is important to ensure reproducibility, especially in parrallel computation settings.
This should also be used in case of generation "on the fly" of the chronics to ensure the same property.
By default it does nothing.
.. note::
As of grid2op 1.6.4, this function now accepts the return value of `self.get_id()`.
"""
warnings.warn(
'Class {} doesn\'t handle different input folder. "tell_id" method has no impact.'
"".format(type(self).__name__)
)
def get_id(self) -> str:
"""
Utility to get the current name of the path of the data are looked at, if data are files.
This could also be used to return a unique identifier to the generated chronics even in the case where they are
generated on the fly, for example by return a hash of the seed.
Returns
-------
res: ``str``
A unique identifier of the chronics generated for this episode. For example, if the chronics comes from a
specific folder, this could be the path to this folder.
"""
warnings.warn(
'Class {} doesn\'t handle different input folder. "get_id" method will return "".'
"".format(type(self).__name__)
)
return ""
def max_timestep(self):
"""
This method returned the maximum timestep that the current episode can last.
Note that if the :class:`grid2op.BaseAgent` performs a bad action that leads to a game over, then the episode
can lasts less.
Returns
-------
res: ``int``
-1 if possibly infinite length or a positive integer representing the maximum duration of this episode
"""
return self.max_iter
def shuffle(self, shuffler=None):
"""
This method can be overridden if the data that are represented by this object need to be shuffle.
By default it does nothing.
Parameters
----------
shuffler: ``object``
Any function that can be used to shuffle the data.
"""
pass
def sample_next_chronics(self, probabilities=None):
"""
this is used to sample the next chronics used with given probabilities
Parameters
-----------
probabilities: ``np.ndarray``
Array of integer with the same size as the number of chronics in the cache.
If it does not sum to one, it is rescaled such that it sums to one.
Returns
-------
selected: ``int``
The integer that was selected.
Examples
--------
Let's assume in your chronics, the folder names are "Scenario_august_dummy", and
"Scenario_february_dummy". For the sake of the example, we want the environment to loop
75% of the time to the month of february and 25% of the time to the month of august.
.. code-block:: python
import grid2op
env = grid2op.make("l2rpn_neurips_2020_track1", test=True) # don't add "test=True" if
# you don't want to perform a test.
# check at which month will belong each observation
for i in range(10):
obs = env.reset()
print(obs.month)
# it always alternatively prints "8" (if chronics if from august) or
# "2" if chronics is from february) with a probability of 50% / 50%
env.seed(0) # for reproducible experiment
for i in range(10):
_ = env.chronics_handler.sample_next_chronics([0.25, 0.75])
obs = env.reset()
print(obs.month)
# it prints "2" with probability 0.75 and "8" with probability 0.25
"""
return -1
def set_filter(self, filter_fun):
"""
Assign a filtering function to remove some chronics from the next time a call to "reset_cache" is called.
**NB** filter_fun is applied to all element of :attr:`Multifolder.subpaths`. If ``True`` then it will
be put in cache, if ``False`` this data will NOT be put in the cache.
**NB** this has no effect until :attr:`Multifolder.reset` is called.
Notes
------
As of now, this has no effect unless the chronics are generated using :class:`Multifolder`
or :class:`MultifolderWithCache`
Examples
--------
Let's assume in your chronics, the folder names are "Scenario_august_dummy", and
"Scenario_february_dummy". For the sake of the example, we want the environment to loop
only through the month of february, because why not. Then we can do the following:
.. code-block:: python
import re
import grid2op
env = grid2op.make("l2rpn_neurips_2020_track1", test=True) # don't add "test=True" if
# you don't want to perform a test.
# check at which month will belong each observation
for i in range(10):
obs = env.reset()
print(obs.month)
# it always alternatively prints "8" (if chronics if from august) or
# "2" if chronics is from february)
# to see where the chronics are located
print(env.chronics_handler.subpaths)
# keep only the month of february
env.chronics_handler.set_filter(lambda path: re.match(".*february.*", path) is not None)
env.chronics_handler.reset() # if you don't do that it will not have any effect
for i in range(10):
obs = env.reset()
print(obs.month)
# it always prints "2" (representing february)
"""
warnings.warn(
f'Calling this function has no effect for chronics generated from "{type(self)}"'
)
def set_chunk_size(self, new_chunk_size):
"""
This parameters allows to set, if the data generation process support it, the amount of data that is read
at the same time. It can help speeding up the computation process by adding more control on the io operation.
Parameters
----------
new_chunk_size: ``int``
The chunk size (ie the number of rows that will be read on each data set at the same time)
"""
pass
def fast_forward(self, nb_timestep):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Prefer using :func:`grid2op.Environment.BaseEnv.fast_forward_chronics`
This method allows you to skip some time step at the beginning of the chronics.
This is useful at the beginning of the training, if you want your agent to learn on more diverse scenarios.
Indeed, the data provided in the chronics usually starts always at the same date time (for example Jan 1st at
00:00). This can lead to suboptimal exploration, as during this phase, only a few time steps are managed by
the agent, so in general these few time steps will correspond to grid state around Jan 1st at 00:00.
Parameters
----------
nb_timestep: ``int``
Number of time step to "fast forward"
"""
for _ in range(nb_timestep):
self.load_next()
| 34,207 | 41.600249 | 123 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/multiFolder.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import os
import json
import warnings
import numpy as np
from datetime import timedelta, datetime
from grid2op.dtypes import dt_int
from grid2op.Exceptions import *
from grid2op.Chronics.gridValue import GridValue
from grid2op.Chronics.gridStateFromFile import GridStateFromFile
class Multifolder(GridValue):
"""
The classes :class:`GridStateFromFile` and :class:`GridStateFromFileWithForecasts` implemented the reading of a
single folder representing a single episode.
This class is here to "loop" between different episode. Each one being stored in a folder readable by
:class:`GridStateFromFile` or one of its derivate (eg. :class:`GridStateFromFileWithForecasts`).
Chronics are always read in the alpha-numeric order for this class. This means that if the folder is not modified,
the data are always loaded in the same order, regardless of the :class:`grid2op.Backend`, :class:`grid2op.BaseAgent` or
:class:`grid2op.Environment`.
.. note::
Most grid2op environments, by default, use this type of "chronix", read from the hard drive.
Attributes
-----------
gridvalueClass: ``type``, optional
Type of class used to read the data from the disk. It defaults to :class:`GridStateFromFile`.
data: :class:`GridStateFromFile`
Data that will be loaded and used to produced grid state and forecasted values.
path: ``str``
Path where the folders of the episodes are stored.
sep: ``str``
Columns separtor, forwarded to :attr:`Multifolder.data` when it's built at the beginning of each episode.
subpaths: ``list``
List of all the episode that can be "played". It's a sorted list of all the directory in
:attr:`Multifolder.path`. Each one should contain data in a format that is readable by
:attr:`MultiFolder.gridvalueClass`.
"""
MULTI_CHRONICS = True
def __init__(
self,
path,
time_interval=timedelta(minutes=5),
start_datetime=datetime(year=2019, month=1, day=1),
gridvalueClass=GridStateFromFile,
sep=";",
max_iter=-1,
chunk_size=None,
filter_func=None,
**kwargs
):
self._kwargs = kwargs
GridValue.__init__(
self,
time_interval=time_interval,
max_iter=max_iter,
chunk_size=chunk_size,
start_datetime=start_datetime,
)
self.gridvalueClass = gridvalueClass
self.data = None
self.path = os.path.abspath(path)
self.sep = sep
self.init_subpath()
if len(self.subpaths) == 0:
raise ChronicsNotFoundError(
'Not chronics are found in "{}". Make sure there are at least '
"1 chronics folder there.".format(self.path)
)
# TODO clarify that
# np.random.shuffle(self.subpaths)
self.chunk_size = chunk_size
# for saving
self._order_backend_loads = None
self._order_backend_prods = None
self._order_backend_lines = None
self._order_backend_subs = None
self._names_chronics_to_backend = None
# improving looping strategy
if filter_func is None:
self._filter = self._default_filter
else:
if not callable(filter_func):
raise ChronicsError(
"The filtering function you provided ("
"kwargs: filter_func) is not callable."
)
self._filter = filter_func
self._prev_cache_id = 0
self._order = None
def init_subpath(self):
"""
Read the content of the main directory and initialize the `subpaths`
where the data could be located.
This is usefull, for example, if you generated data and want to be able to use them.
**NB** this has no effect until :attr:`Multifolder.reset` is called.
.. warning::
By default, it will only consider data that are present at creation time. If you add data after, you need
to call this function (and do a reset)
Examples
---------
A "typical" usage of this function can be the following workflow.
Start a script to train an agent (say "train_agent.py"):
.. code-block:: python
import os
import grid2op
from lightsim2grid import LightSimBackend # highly recommended for speed !
env_name = "l2rpn_wcci_2022" # only compatible with what comes next (at time of writing)
env = grid2op.make(env_name, backend=LightSimBackend())
# now train an agent
# see l2rpn_baselines package for more information, for example
# l2rpn-baselines.readthedocs.io/
from l2rpn_baselines.PPO_SB3 import train
nb_iter = 10000 # train for that many iterations
agent_name = "WhaetverIWant" # or any other name
agent_path = os.path.expand("~") # or anywhere else on your computer
trained_agent = train(env,
iterations=nb_iter,
name=agent_name,
save_path=agent_path)
On another script (say "generate_data.py"), you can generate more data:
.. code-block:: python
import grid2op
env_name = "l2rpn_wcci_2022" # only compatible with what comes next (at time of writing)
env = grid2op.make(env_name)
env.generate_data(nb_year=50) # generates 50 years of data
# (takes roughly 50s per week, around 45mins per year, in this case 50 * 45 mins = lots of minutes)
Let the script to generate the data run normally (don't interupt it).
And from time to time, in the script "train_agent.py" you can do:
.. code-block:: python
# reload the generated data
env.chronics_handler.init_subpath()
env.chronics_handler.reset()
# retrain the agent taking into account new data
trained_agent = train(env,
iterations=nb_iter,
name=agent_name,
save_path=agent_path,
load_path=agent_path
)
# the script to generate data is still running, you can reload some data again
env.chronics_handler.init_subpath()
env.chronics_handler.reset()
# retrain the agent
trained_agent = train(env,
iterations=nb_iter,
name=agent_name,
save_path=agent_path,
load_path=agent_path
)
# etc.
Both scripts you run "at the same time" for it to work efficiently.
To recap:
- script "generate_data.py" will... generate data
- these data will be reloaded from time to time by the script "train_agent.py"
.. warning::
Do not delete data between calls to `env.chronics_handler.init_subpath()` and `env.chronics_handler.reset()`,
and even less so during training !
If you want to delete data (for example not to overload your hard drive) you should remove them
right before calling `env.chronics_handler.init_subpath()`.
"""
try:
self.subpaths = [
os.path.join(self.path, el)
for el in os.listdir(self.path)
if os.path.isdir(os.path.join(self.path, el))
]
self.subpaths.sort()
self.subpaths = np.array(self.subpaths)
except FileNotFoundError as exc_:
raise ChronicsError(
'Path "{}" doesn\'t exists.'.format(self.path)
) from exc_
self._order = None # to trigger a "reset" when chronix will next be loaded
def get_kwargs(self, dict_):
if self._filter != self._default_filter:
dict_["filter_func"] = self._filter
def available_chronics(self):
"""return the list of available chronics.
Examples
--------
# TODO
"""
return self.subpaths[self._order]
def _default_filter(self, x):
"""
default filter used at the initialization. It keeps only the first data encountered.
"""
return True
def set_filter(self, filter_fun):
"""
Assign a filtering function to remove some chronics from the next time a call to "reset_cache" is called.
**NB** filter_fun is applied to all element of :attr:`Multifolder.subpaths`. If ``True`` then it will
be put in cache, if ``False`` this data will NOT be put in the cache.
**NB** this has no effect until :attr:`Multifolder.reset` is called.
Examples
--------
Let's assume in your chronics, the folder names are "Scenario_august_dummy", and
"Scenario_february_dummy". For the sake of the example, we want the environment to loop
only through the month of february, because why not. Then we can do the following:
.. code-block:: python
import re
import grid2op
env = grid2op.make("l2rpn_neurips_2020_track1", test=True) # don't add "test=True" if
# you don't want to perform a test.
# check at which month will belong each observation
for i in range(10):
obs = env.reset()
print(obs.month)
# it always alternatively prints "8" (if chronics if from august) or
# "2" if chronics is from february)
# to see where the chronics are located
print(env.chronics_handler.subpaths)
# keep only the month of february
env.chronics_handler.set_filter(lambda path: re.match(".*february.*", path) is not None)
env.chronics_handler.reset() # if you don't do that it will not have any effect
for i in range(10):
obs = env.reset()
print(obs.month)
# it always prints "2" (representing february)
"""
self._filter = filter_fun
def next_chronics(self):
self._prev_cache_id += 1
# TODO implement the shuffling indeed.
# if self._prev_cache_id >= len(self._order):
# self.space_prng.shuffle(self._order)
self._prev_cache_id %= len(self._order)
def sample_next_chronics(self, probabilities=None):
"""
This function should be called before "next_chronics".
It can be used to sample non uniformly for the next next chronics.
Parameters
-----------
probabilities: ``np.ndarray``
Array of integer with the same size as the number of chronics in the cache.
If it does not sum to one, it is rescaled such that it sums to one.
Returns
-------
selected: ``int``
The integer that was selected.
Examples
--------
Let's assume in your chronics, the folder names are "Scenario_august_dummy", and
"Scenario_february_dummy". For the sake of the example, we want the environment to loop
75% of the time to the month of february and 25% of the time to the month of august.
.. code-block:: python
import grid2op
env = grid2op.make("l2rpn_neurips_2020_track1", test=True) # don't add "test=True" if
# you don't want to perform a test.
# check at which month will belong each observation
for i in range(10):
obs = env.reset()
print(obs.month)
# it always alternatively prints "8" (if chronics if from august) or
# "2" if chronics is from february) with a probability of 50% / 50%
env.seed(0) # for reproducible experiment
for i in range(10):
_ = env.chronics_handler.sample_next_chronics([0.25, 0.75])
obs = env.reset()
print(obs.month)
# it prints "2" with probability 0.75 and "8" with probability 0.25
"""
self._prev_cache_id = -1
if probabilities is None:
probabilities = np.ones(self._order.shape[0])
# make sure it sums to 1
probabilities /= np.sum(probabilities)
# take one at "random" among these
selected = self.space_prng.choice(self._order, p=probabilities)
id_sel = np.where(self._order == selected)[0]
self._prev_cache_id = selected - 1
return id_sel
def reset(self):
"""
Rebuilt the :attr:`Multifolder._order`. This should be called after a call to :func:`Multifolder.set_filter`
is performed.
.. warning:: This "reset" is different from the `env.reset`. It should be only called after the function to set
the filtering function has been called.
This "reset" only reset which chronics are used for the environment.
Returns
-------
new_order: ``numpy.ndarray``, dtype: str
The selected chronics paths after a call to this method.
Notes
-----
Except explicitly mentioned, for example by :func:`Multifolder.set_filter` you should not use this
function. This will erased every selection of chronics, every shuffle etc.
"""
self._order = []
self._prev_cache_id = 0
for i, path in enumerate(self.subpaths):
if not self._filter(path):
continue
self._order.append(i)
if len(self._order) == 0:
raise RuntimeError(
'Impossible to initialize the Multifolder. Your "filter_fun" filters out all the '
"possible scenarios."
)
self._order = np.array(self._order)
return self.subpaths[self._order]
def initialize(
self,
order_backend_loads,
order_backend_prods,
order_backend_lines,
order_backend_subs,
names_chronics_to_backend=None,
):
self._order_backend_loads = order_backend_loads
self._order_backend_prods = order_backend_prods
self._order_backend_lines = order_backend_lines
self._order_backend_subs = order_backend_subs
self._names_chronics_to_backend = names_chronics_to_backend
self.n_gen = len(order_backend_prods)
self.n_load = len(order_backend_loads)
self.n_line = len(order_backend_lines)
if self._order is None:
# initialize the cache
self.reset()
id_scenario = self._order[self._prev_cache_id]
this_path = self.subpaths[id_scenario]
self.data = self.gridvalueClass(
time_interval=self.time_interval,
sep=self.sep,
path=this_path,
max_iter=self.max_iter,
chunk_size=self.chunk_size,
**self._kwargs
)
if self.seed is not None:
max_int = np.iinfo(dt_int).max
seed_chronics = self.space_prng.randint(max_int)
self.data.seed(seed_chronics)
self.data.initialize(
order_backend_loads,
order_backend_prods,
order_backend_lines,
order_backend_subs,
names_chronics_to_backend=names_chronics_to_backend,
)
def done(self):
"""
Tells the :class:`grid2op.Environment` if the episode is over.
Returns
-------
res: ``bool``
Whether or not the episode, represented by :attr:`MultiFolder.data` is over.
"""
return self.data.done()
def load_next(self):
"""
Load the next data from the current episode. It loads the next time step for the current episode.
Returns
-------
See the return type of :class:`GridStateFromFile.load_next` (or of :attr:`MultiFolder.gridvalueClass` if it
has been changed) for more information.
"""
return self.data.load_next()
def check_validity(self, backend):
"""
This method check that the data loaded can be properly read and understood by the :class:`grid2op.Backend`.
Parameters
----------
backend: :class:`grid2op.Backend`
The backend used for the experiment.
Returns
-------
See the return type of :class:`GridStateFromFile.check_validity` (or of :attr:`MultiFolder.gridvalueClass` if it
has been changed) for more information.
"""
return self.data.check_validity(backend)
def forecasts(self):
"""
The representation of the forecasted grid state(s), if any.
Returns
-------
See the return type of :class:`GridStateFromFile.forecasts` (or of :attr:`MultiFolder.gridvalueClass` if it
has been changed) for more information.
"""
return self.data.forecasts()
def tell_id(self, id_num, previous=False):
"""
This tells this chronics to load for the next episode.
By default, if id_num is greater than the number of episode, it is equivalent at restarting from the first
one: episode are played indefinitely in the same order.
Parameters
----------
id_num: ``int`` | ``str``
Id of the chronics to load.
previous:
Do you want to set to the previous value of this one or not (note that in general you want to set to
the previous value, as calling this function as an impact only after `env.reset()` is called)
"""
import pdb
if isinstance(id_num, str):
# new accepted behaviour starting 1.6.4
# new in version 1.6.5: you only need to specify the chronics folder id and not the full path
found = False
for internal_id_, number in enumerate(self._order):
if (
self.subpaths[number] == id_num
or os.path.join(self.path, id_num) == self.subpaths[number]
):
self._prev_cache_id = internal_id_
found = True
if not found:
raise ChronicsError(
f'Impossible to find the chronics with id "{id_num}". The call to '
f"`env.chronics_handler.tell_id(...)` cannot be performed."
)
else:
# default behaviour prior to 1.6.4
self._prev_cache_id = id_num
self._prev_cache_id %= len(self._order)
if previous:
self._prev_cache_id -= 1
self._prev_cache_id %= len(self._order)
def get_id(self) -> str:
"""
Full absolute path of the current folder used for the current episode.
Returns
-------
res: ``str``
Path from which the data are generated for the current episode.
"""
return self.subpaths[self._order[self._prev_cache_id]]
def max_timestep(self):
return self.data.max_timestep()
def shuffle(self, shuffler=None):
"""
This method is used to have a better control on the order in which the subfolder containing the episode are
processed.
It can focus the evaluation on one specific folder, shuffle the folders, use only a subset of them etc. See the
examples for more information.
Parameters
----------
shuffler: ``object``
Shuffler should be a function that is called on :attr:`MultiFolder.subpaths` that will shuffle them.
It can also be used to remove some path if needed (see example).
Returns
--------
new_order: ``numpy.ndarray``, dtype: str
The order in which the chronics will be looped through
Examples
---------
If you want to simply shuffle the data you can do:
.. code-block:: python
# create an environment
import numpy as np
import grid2op
env_name = "l2rpn_case14_sandbox"
env = grid2op.make(env_name)
# shuffle the chronics (uniformly at random, without duplication)
env.chronics_handler.shuffle()
# use the environment as you want, here do 10 episode with the selected data
for i in range(10):
obs = env.reset()
print(f"Path of the chronics used: {env.chronics_handler.data.path}")
done = False
while not done:
act = ...
obs, reward, done, info = env.step(act)
# re shuffle them (still uniformly at random, without duplication)
env.chronics_handler.shuffle()
# use the environment as you want, here do 10 episode with the selected data
for i in range(10):
obs = env.reset()
print(f"Path of the chronics used: {env.chronics_handler.data.path}")
done = False
while not done:
act = ...
obs, reward, done, info = env.step(act)
If you want to use only a subset of the path, say for example the path with index 1, 5, and 6
.. code-block:: python
# create an environment
import numpy as np
import grid2op
env_name = "l2rpn_case14_sandbox"
env = grid2op.make(env_name)
# select the chronics (here 5 at random amongst the 10 "last" chronics of the environment)
nb_chron = len(env.chronics_handler.chronics_used)
chron_id_to_keep = np.random.choice(np.arange(nb_chron - 10, nb_chron), size=5, replace=True)
env.chronics_handler.shuffle(lambda x: chron_id_to_keep)
# use the environment as you want, here do 10 episode with the selected data
for i in range(10):
obs = env.reset()
print(f"Path of the chronics used: {env.chronics_handler.data.path}")
done = False
while not done:
act = ...
obs, reward, done, info = env.step(act)
# re shuffle them (uniformly at random, without duplication, among the chronics "selected" above.)
env.chronics_handler.shuffle()
# use the environment as you want, here do 10 episode with the selected data
for i in range(10):
obs = env.reset()
print(f"Path of the chronics used: {env.chronics_handler.data.path}")
done = False
while not done:
act = ...
obs, reward, done, info = env.step(act)
.. warning:: Though it is possible to use this "shuffle" function to only use some chronics, we highly
recommend you to have a look at the sections :ref:`environment-module-chronics-info` or
:ref:`environment-module-train-val-test`. It is likely that you will find better way to do
what you want to do there. Use this last example with care then.
.. warning:: As stated on the :func:`MultiFolder.reset`, any call to `env.chronics_handler.reset`
will remove anything related to shuffling, including the selection of chronics !
"""
if shuffler is None:
def shuffler(x):
return x[self.space_prng.choice(len(x), size=len(x), replace=False)]
self._order = 1 * shuffler(self._order)
return self.subpaths[self._order]
@property
def chronics_used(self):
"""return the full path of the chronics currently in use."""
return self.subpaths[self._order]
def set_chunk_size(self, new_chunk_size):
self.chunk_size = new_chunk_size
def split_and_save(self, datetime_beg, datetime_end, path_out):
"""
This function allows you to split the data (keeping only the data between datetime_beg and datetime_end) and to
save it on your local machine. This is espacially handy if you want to extract only a piece of the dataset we
provide for example.
Parameters
----------
datetime_beg: ``dict``
Keys are the name id of the scenarios you want to save. Values
are the corresponding starting date and time (in "%Y-%m-ùd %H:%M"
format). See example for more information.
datetime_end: ``dict``
keys must be the same as in the "datetime_beg" argument.
See example for more information
path_out: ``str``
The path were the data will be stored.
Examples
---------
Here is a short example on how to use it
.. code-block:: python
import grid2op
import os
env = grid2op.make()
env.chronics_handler.real_data.split_and_save({"004": "2019-01-08 02:00",
"005": "2019-01-30 08:00",
"006": "2019-01-17 00:00",
"007": "2019-01-17 01:00",
"008": "2019-01-21 09:00",
"009": "2019-01-22 12:00",
"010": "2019-01-27 19:00",
"011": "2019-01-15 12:00",
"012": "2019-01-08 13:00",
"013": "2019-01-22 00:00"},
{"004": "2019-01-11 02:00",
"005": "2019-02-01 08:00",
"006": "2019-01-18 00:00",
"007": "2019-01-18 01:00",
"008": "2019-01-22 09:00",
"009": "2019-01-24 12:00",
"010": "2019-01-29 19:00",
"011": "2019-01-17 12:00",
"012": "2019-01-10 13:00",
"013": "2019-01-24 00:00"},
path_out=os.path.join("/tmp"))
"""
if not isinstance(datetime_beg, dict):
datetime_beg_orig = datetime_beg
datetime_beg = {}
for subpath in self.subpaths:
id_this_chron = os.path.split(subpath)[-1]
datetime_beg[id_this_chron] = datetime_beg_orig
if not isinstance(datetime_end, dict):
datetime_end_orig = datetime_end
datetime_end = {}
for subpath in self.subpaths:
id_this_chron = os.path.split(subpath)[-1]
datetime_end[id_this_chron] = datetime_end_orig
seed_chronics_all = {}
for subpath in self.subpaths:
id_this_chron = os.path.split(subpath)[-1]
if not id_this_chron in datetime_beg:
continue
tmp = self.gridvalueClass(
time_interval=self.time_interval,
sep=self.sep,
path=subpath,
max_iter=self._max_iter,
chunk_size=self.chunk_size,
)
seed_chronics = None
if self.seed is not None:
max_int = np.iinfo(dt_int).max
seed_chronics = self.space_prng.randint(max_int)
tmp.seed(seed_chronics)
seed_chronics_all[subpath] = seed_chronics
tmp.initialize(
self._order_backend_loads,
self._order_backend_prods,
self._order_backend_lines,
self._order_backend_subs,
self._names_chronics_to_backend,
)
path_out_chron = os.path.join(path_out, id_this_chron)
tmp.split_and_save(
datetime_beg[id_this_chron], datetime_end[id_this_chron], path_out_chron
)
meta_params = {}
meta_params["datetime_beg"] = datetime_beg
meta_params["datetime_end"] = datetime_end
meta_params["path_out"] = path_out
meta_params["all_seeds"] = seed_chronics_all
try:
with open(
os.path.join(path_out, "split_and_save_meta_params.json"),
"w",
encoding="utf-8",
) as f:
json.dump(obj=meta_params, fp=f, sort_keys=True, indent=4)
except Exception as exc_:
warnings.warn(
'Impossible to save the "metadata" for the chronics with error:\n"{}"'
"".format(exc_)
)
def fast_forward(self, nb_timestep):
self.data.fast_forward(nb_timestep)
| 30,244 | 38.076227 | 123 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/multifolderWithCache.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from datetime import timedelta, datetime
from grid2op.dtypes import dt_int
from grid2op.Chronics.multiFolder import Multifolder
from grid2op.Chronics.gridStateFromFile import GridStateFromFile
from grid2op.Exceptions import ChronicsError
class MultifolderWithCache(Multifolder):
"""
This class is a particular type of :class:`Multifolder` that, instead of reading is all from disk each time
stores it into memory.
For now it's only compatible (because it only present some kind of interest) with :class:`GridValue` class
inheriting from :class:`GridStateFromFile`.
The function :func:`MultifolderWithCache.reset` will redo the cache from scratch. You can filter which
type of data will be cached or not with the :func:`MultifolderWithCache.set_filter` function.
**NB** Efficient use of this class can dramatically increase the speed of the learning algorithm, especially at
the beginning where lots of data are read from the hard drive and the agent games over after a few time steps (
typically, data are given by months, so 30*288 >= 8600 time steps, while during exploration an agent usually
performs less than a few dozen of steps leading to more time spent reading 8600 rows than computing the
few dozen of steps.
.. danger::
When you create an environment with this chronics class (*eg* by doing
`env = make(...,chronics_class=MultifolderWithCache)`), the "cache" is not
pre loaded, only the first scenario is loaded in memory (to save loading time).
In order to load everything, you NEED to call `env.chronics_handler.reset()`, which,
by default, will load every scenario into memory. If you want to filter some
data, for example by reading only the scenario of decembre, you can use the
`set_filter` method.
A typical workflow (at the start of your program) when using this class is then:
1) create the environment: `env = make(...,chronics_class=MultifolderWithCache)`
2) (optional but recommended) select some scenarios:
`env.chronics_handler.real_data.set_filter(lambda x: re.match(".*december.*", x) is not None)`
3) load the data in memory: `env.chronics_handler.reset()`
4) do whatever you want using `env`
.. note::
After creation (anywhere in your code),
you can use other scenarios by calling the `set_filter` function again:
1) select other scenarios:
`env.chronics_handler.real_data.set_filter(lambda x: re.match(".*january.*", x) is not None)`
2) load the data in memory: `env.chronics_handler.reset()`
3) do whatever you want using `env`
Examples
---------
This is how this class can be used:
.. code-block:: python
import re
from grid2op import make
from grid2op.Chronics import MultifolderWithCache
env = make(...,chronics_class=MultifolderWithCache)
# set the chronics to limit to one week of data (lower memory footprint)
env.chronics_handler.set_max_iter(7*288)
# assign a filter, use only chronics that have "december" in their name
env.chronics_handler.real_data.set_filter(lambda x: re.match(".*december.*", x) is not None)
# create the cache
env.chronics_handler.reset()
# and now you can use it as you would do any gym environment:
my_agent = ...
obs = env.reset()
done = False
reward = env.reward_range[0]
while not done:
act = my_agent.act(obs, reward, done)
obs, reward, done, info = env.step(act) # and step will NOT load any data from disk.
"""
MULTI_CHRONICS = True
ERROR_MSG_NOT_LOADED = ("We detected a misusage of the `MultifolderWithCache` class: the cache "
"has not been loaded in memory which will most likely cause issues "
"with your environment. Do not forget to call "
"`env.chronics_handler.set_filter(...)` to tell which time series "
"you want to keep and then `env.chronics_handler.reset()` "
"to load them. \nFor more information consult the documentation:\n"
"https://grid2op.readthedocs.io/en/latest/chronics.html#grid2op.Chronics.MultifolderWithCache")
def __init__(
self,
path,
time_interval=timedelta(minutes=5),
start_datetime=datetime(year=2019, month=1, day=1),
gridvalueClass=GridStateFromFile,
sep=";",
max_iter=-1,
chunk_size=None,
filter_func=None,
**kwargs,
):
# below: counter to prevent use without explicit call to `env.chronics.handler.reset()`
if "_DONTUSE_nb_reset_called" in kwargs:
self.__nb_reset_called = int(kwargs["_DONTUSE_nb_reset_called"])
del kwargs["_DONTUSE_nb_reset_called"]
else:
self.__nb_reset_called = -1
if "_DONTUSE_nb_step_called" in kwargs:
self.__nb_step_called = int(kwargs["_DONTUSE_nb_step_called"])
del kwargs["_DONTUSE_nb_step_called"]
else:
self.__nb_step_called = -1
if "_DONTUSE_nb_init_called" in kwargs:
self.__nb_init_called = int(kwargs["_DONTUSE_nb_init_called"])
del kwargs["_DONTUSE_nb_init_called"]
else:
self.__nb_init_called = -1
# now init the data
Multifolder.__init__(
self,
path=path,
time_interval=time_interval,
start_datetime=start_datetime,
gridvalueClass=gridvalueClass,
sep=sep,
max_iter=max_iter,
chunk_size=None,
filter_func=filter_func,
**kwargs
)
self._cached_data = None
self.cache_size = 0
if not issubclass(self.gridvalueClass, GridStateFromFile):
raise RuntimeError(
'MultifolderWithCache does not work when "gridvalueClass" does not inherit from '
'"GridStateFromFile".'
)
self.__i = 0
def _default_filter(self, x):
"""
default filter used at the initialization. It keeps only the first data encountered.
"""
if self.__i > 0:
return False
else:
self.__i += 1
return True
def reset(self):
"""
Rebuilt the cache as if it were built from scratch.
This call might take a while to process.
.. danger::
You NEED to call this function (with `env.chronics_handler.reset()`)
if you use the `MultiFolderWithCache` class in your experiments.
.. warning::
If a seed is set (see :func:`MultiFolderWithCache.seed`) then
all the data in the cache are also seeded when this
method is called.
"""
self._cached_data = [None for _ in self.subpaths]
self.__i = 0
# select the right paths, and store their id in "_order"
super().reset()
self.cache_size = 0
max_int = np.iinfo(dt_int).max
for i in self._order:
# everything in "_order" need to be put in cache
path = self.subpaths[i]
data = self.gridvalueClass(
time_interval=self.time_interval,
sep=self.sep,
path=path,
max_iter=self.max_iter,
chunk_size=None,
)
if self.seed_used is not None:
seed_chronics = self.space_prng.randint(max_int)
data.seed(seed_chronics)
data.initialize(
self._order_backend_loads,
self._order_backend_prods,
self._order_backend_lines,
self._order_backend_subs,
self._names_chronics_to_backend,
)
self._cached_data[i] = data
self.cache_size += 1
if self.cache_size == 0:
raise RuntimeError("Impossible to initialize the new cache.")
self.__nb_reset_called += 1
return self.subpaths[self._order]
def initialize(
self,
order_backend_loads,
order_backend_prods,
order_backend_lines,
order_backend_subs,
names_chronics_to_backend=None,
):
self.__nb_init_called += 1
if self.__nb_reset_called <= 0:
if self.__nb_init_called != 0:
# authorize the creation of the environment but nothing more
raise ChronicsError(type(self).ERROR_MSG_NOT_LOADED)
self._order_backend_loads = order_backend_loads
self._order_backend_prods = order_backend_prods
self._order_backend_lines = order_backend_lines
self._order_backend_subs = order_backend_subs
self._names_chronics_to_backend = names_chronics_to_backend
self.n_gen = len(order_backend_prods)
self.n_load = len(order_backend_loads)
self.n_line = len(order_backend_lines)
if self._cached_data is None:
# initialize the cache
self.reset()
id_scenario = self._order[self._prev_cache_id]
self.data = self._cached_data[id_scenario]
self.data.next_chronics()
@property
def max_iter(self):
return self._max_iter
@max_iter.setter
def max_iter(self, value : int):
self._max_iter = int(value)
for el in self._cached_data:
if el is None:
continue
el.max_iter = value
def max_timestep(self):
return self.data.max_timestep()
def seed(self, seed : int):
"""This seeds both the MultiFolderWithCache
(which has an impact for example on :func:`MultiFolder.sample_next_chronics`)
and each data present in the cache.
Parameters
----------
seed : int
The seed to use
"""
res = super().seed(seed)
max_int = np.iinfo(dt_int).max
for i in self._order:
data = self._cached_data[i]
if data is None:
continue
seed_ts = self.space_prng.randint(max_int)
data.seed(seed_ts)
return res
def load_next(self):
self.__nb_step_called += 1
if self.__nb_reset_called <= 0:
if self.__nb_step_called != 0:
# authorize the creation of the environment but nothing more
raise ChronicsError(type(self).ERROR_MSG_NOT_LOADED)
return super().load_next()
def set_filter(self, filter_fun):
self.__nb_reset_called = 0
self.__nb_step_called = 0
self.__nb_init_called = 0
return super().set_filter(filter_fun)
def get_kwargs(self, dict_):
dict_["_DONTUSE_nb_reset_called"] = self.__nb_reset_called
dict_["_DONTUSE_nb_step_called"] = self.__nb_step_called
dict_["_DONTUSE_nb_init_called"] = self.__nb_init_called
return super().get_kwargs(dict_)
| 11,746 | 38.820339 | 123 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/readPypowNetData.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import os
import copy
import warnings
from datetime import timedelta, datetime
import numpy as np
import pandas as pd
from grid2op.dtypes import dt_int
from grid2op.Chronics import GridStateFromFileWithForecasts
from grid2op.Exceptions import ChronicsError
# Names of the csv were not the same
class ReadPypowNetData(GridStateFromFileWithForecasts):
"""
DEPRECATED, this class is no longer used nor tested.
"""
MULTI_CHRONICS = False
def __init__(
self,
path,
sep=";",
time_interval=timedelta(minutes=5),
max_iter=-1,
chunk_size=None,
):
GridStateFromFileWithForecasts.__init__(
self,
path,
sep=sep,
time_interval=time_interval,
max_iter=max_iter,
chunk_size=chunk_size,
)
def initialize(
self,
order_backend_loads,
order_backend_prods,
order_backend_lines,
order_backend_subs,
names_chronics_to_backend=None,
):
self.n_gen = len(order_backend_prods)
self.n_load = len(order_backend_loads)
self.n_line = len(order_backend_lines)
self.names_chronics_to_backend = copy.deepcopy(names_chronics_to_backend)
if self.names_chronics_to_backend is None:
self.names_chronics_to_backend = {}
if not "loads" in self.names_chronics_to_backend:
self.names_chronics_to_backend["loads"] = {
k: k for k in order_backend_loads
}
else:
self._assert_correct(
self.names_chronics_to_backend["loads"], order_backend_loads
)
if not "prods" in self.names_chronics_to_backend:
self.names_chronics_to_backend["prods"] = {
k: k for k in order_backend_prods
}
else:
self._assert_correct(
self.names_chronics_to_backend["prods"], order_backend_prods
)
if not "lines" in self.names_chronics_to_backend:
self.names_chronics_to_backend["lines"] = {
k: k for k in order_backend_lines
}
else:
self._assert_correct(
self.names_chronics_to_backend["lines"], order_backend_lines
)
if not "subs" in self.names_chronics_to_backend:
self.names_chronics_to_backend["subs"] = {k: k for k in order_backend_subs}
else:
self._assert_correct(
self.names_chronics_to_backend["subs"], order_backend_subs
)
# print(os.listdir(self.path))
read_compressed = ".csv"
if not os.path.exists(os.path.join(self.path, "_N_loads_p.csv")):
# try to read compressed data
if os.path.exists(os.path.join(self.path, "_N_loads_p.csv.bz2")):
read_compressed = ".csv.bz2"
elif os.path.exists(os.path.join(self.path, "_N_loads_p.zip")):
read_compressed = ".zip"
elif os.path.exists(os.path.join(self.path, "_N_loads_p.csv.gzip")):
read_compressed = ".csv.gzip"
elif os.path.exists(os.path.join(self.path, "_N_loads_p.csv.xz")):
read_compressed = ".csv.xz"
else:
raise RuntimeError(
'GridStateFromFile: unable to locate the data files that should be at "{}"'.format(
self.path
)
)
load_p = pd.read_csv(
os.path.join(self.path, "_N_loads_p{}".format(read_compressed)),
sep=self.sep,
)
load_q = pd.read_csv(
os.path.join(self.path, "_N_loads_q{}".format(read_compressed)),
sep=self.sep,
)
prod_p = pd.read_csv(
os.path.join(self.path, "_N_prods_p{}".format(read_compressed)),
sep=self.sep,
)
prod_v = pd.read_csv(
os.path.join(self.path, "_N_prods_v{}".format(read_compressed)),
sep=self.sep,
)
hazards = pd.read_csv(
os.path.join(self.path, "hazards{}".format(read_compressed)), sep=self.sep
)
maintenance = pd.read_csv(
os.path.join(self.path, "maintenance{}".format(read_compressed)),
sep=self.sep,
)
order_backend_loads = {el: i for i, el in enumerate(order_backend_loads)}
order_backend_prods = {el: i for i, el in enumerate(order_backend_prods)}
order_backend_lines = {el: i for i, el in enumerate(order_backend_lines)}
order_chronics_load_p = np.array(
[
order_backend_loads[self.names_chronics_to_backend["loads"][el]]
for el in load_p.columns
]
).astype(dt_int)
order_backend_load_q = np.array(
[
order_backend_loads[self.names_chronics_to_backend["loads"][el]]
for el in load_q.columns
]
).astype(dt_int)
order_backend_prod_p = np.array(
[
order_backend_prods[self.names_chronics_to_backend["prods"][el]]
for el in prod_p.columns
]
).astype(dt_int)
order_backend_prod_v = np.array(
[
order_backend_prods[self.names_chronics_to_backend["prods"][el]]
for el in prod_v.columns
]
).astype(dt_int)
order_backend_hazards = np.array(
[
order_backend_lines[self.names_chronics_to_backend["lines"][el]]
for el in hazards.columns
]
).astype(dt_int)
order_backend_maintenance = np.array(
[
order_backend_lines[self.names_chronics_to_backend["lines"][el]]
for el in maintenance.columns
]
).astype(dt_int)
self.load_p = copy.deepcopy(load_p.values[:, np.argsort(order_chronics_load_p)])
self.load_q = copy.deepcopy(load_q.values[:, np.argsort(order_backend_load_q)])
self.prod_p = copy.deepcopy(prod_p.values[:, np.argsort(order_backend_prod_p)])
self.prod_v = copy.deepcopy(prod_v.values[:, np.argsort(order_backend_prod_v)])
self.hazards = copy.deepcopy(
hazards.values[:, np.argsort(order_backend_hazards)]
)
self.maintenance = copy.deepcopy(
maintenance.values[:, np.argsort(order_backend_maintenance)]
)
# date and time
datetimes_ = pd.read_csv(
os.path.join(self.path, "_N_datetimes{}".format(read_compressed)),
sep=self.sep,
)
self.start_datetime = datetime.strptime(datetimes_.iloc[0, 0], "%Y-%b-%d")
# there are maintenance and hazards only if the value in the file is not 0.
self.maintenance = self.maintenance != 0.0
self.hazards = self.hazards != 0.0
self.curr_iter = 0
if self.max_iter == -1:
# if the number of maximum time step is not set yet, we set it to be the number of
# data in the chronics (number of rows of the files) -1.
# the -1 is present because the initial grid state doesn't count as a "time step" but is read
# from these data.
self.max_iter = self.load_p.shape[0] - 1
load_p = pd.read_csv(
os.path.join(self.path, "_N_loads_p_planned{}".format(read_compressed)),
sep=self.sep,
)
load_q = pd.read_csv(
os.path.join(self.path, "_N_loads_q_planned{}".format(read_compressed)),
sep=self.sep,
)
prod_p = pd.read_csv(
os.path.join(self.path, "_N_prods_p_planned{}".format(read_compressed)),
sep=self.sep,
)
prod_v = pd.read_csv(
os.path.join(self.path, "_N_prods_v_planned{}".format(read_compressed)),
sep=self.sep,
)
maintenance = pd.read_csv(
os.path.join(self.path, "maintenance{}".format(read_compressed)),
sep=self.sep,
)
order_backend_loads = {el: i for i, el in enumerate(order_backend_loads)}
order_backend_prods = {el: i for i, el in enumerate(order_backend_prods)}
order_backend_lines = {el: i for i, el in enumerate(order_backend_lines)}
order_chronics_load_p = np.array(
[
order_backend_loads[self.names_chronics_to_backend["loads"][el]]
for el in load_p.columns
]
).astype(dt_int)
order_backend_load_q = np.array(
[
order_backend_loads[self.names_chronics_to_backend["loads"][el]]
for el in load_q.columns
]
).astype(dt_int)
order_backend_prod_p = np.array(
[
order_backend_prods[self.names_chronics_to_backend["prods"][el]]
for el in prod_p.columns
]
).astype(dt_int)
order_backend_prod_v = np.array(
[
order_backend_prods[self.names_chronics_to_backend["prods"][el]]
for el in prod_v.columns
]
).astype(dt_int)
order_backend_maintenance = np.array(
[
order_backend_lines[self.names_chronics_to_backend["lines"][el]]
for el in maintenance.columns
]
).astype(dt_int)
self.load_p_forecast = copy.deepcopy(
load_p.values[:, np.argsort(order_chronics_load_p)]
)
self.load_q_forecast = copy.deepcopy(
load_q.values[:, np.argsort(order_backend_load_q)]
)
self.prod_p_forecast = copy.deepcopy(
prod_p.values[:, np.argsort(order_backend_prod_p)]
)
self.prod_v_forecast = copy.deepcopy(
prod_v.values[:, np.argsort(order_backend_prod_v)]
)
self.maintenance_forecast = copy.deepcopy(
maintenance.values[:, np.argsort(order_backend_maintenance)]
)
# there are maintenance and hazards only if the value in the file is not 0.
self.maintenance_time = (
np.zeros(shape=(self.load_p.shape[0], self.n_line), dtype=dt_int) - 1
)
self.maintenance_duration = np.zeros(
shape=(self.load_p.shape[0], self.n_line), dtype=dt_int
)
self.hazard_duration = np.zeros(
shape=(self.load_p.shape[0], self.n_line), dtype=dt_int
)
for line_id in range(self.n_line):
self.maintenance_time[:, line_id] = self.get_maintenance_time_1d(
self.maintenance[:, line_id]
)
self.maintenance_duration[:, line_id] = self.get_maintenance_duration_1d(
self.maintenance[:, line_id]
)
self.hazard_duration[:, line_id] = self.get_maintenance_duration_1d(
self.hazards[:, line_id]
)
self.maintenance_forecast = self.maintenance != 0.0
self.curr_iter = 0
if self.maintenance is not None:
n_ = self.maintenance.shape[0]
elif self.hazards is not None:
n_ = self.hazards.shape[0]
else:
n_ = None
for fn in ["prod_p", "load_p", "prod_v", "load_q"]:
ext_ = self._get_fileext(fn)
if ext_ is not None:
n_ = self._file_len(
os.path.join(self.path, "{}{}".format(fn, ext_)), ext_
)
break
if n_ is None:
raise ChronicsError(
'No files are found in directory "{}". If you don\'t want to load any chronics,'
' use "ChangeNothing" and not "{}" to load chronics.'
"".format(self.path, type(self))
)
self.n_ = n_ # the -1 is present because the initial grid state doesn't count as a "time step"
self.tmp_max_index = load_p.shape[0]
| 12,493 | 37.801242 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/time_series_from_handlers.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from datetime import datetime, timedelta
import os
import numpy as np
import copy
from typing import Optional
from grid2op.Exceptions import (
ChronicsNotFoundError, HandlerError
)
from grid2op.Chronics.gridValue import GridValue
from grid2op.Chronics.handlers import BaseHandler
from grid2op.dtypes import dt_int, dt_float
class FromHandlers(GridValue):
"""This class allows to use the :class:`grid2op.Chronics.handlers.BaseHandler`
(and all the derived class, see :ref:`tshandler-module`) to
generate the "input time series" of the environment.
This class does nothing in particular beside making sure the "formalism" of the
Handlers can be adapted to generate compliant grid2op data.
.. seealso::
:ref:`tshandler-module` for more information
In order to use the handlers you need to:
- tell grid2op that you are going to generate time series from "handlers" by using `FromHandlers` class
- for each type of data ("gen_p", "gen_v", "load_p", "load_q", "maintenance", "gen_p_forecasted",
"load_p_forecasted", "load_q_forecasted" and "load_v_forecasted") you need to provide a way to
"handle" this type of data: you need a specific handler.
You need at least to provide handlers for the environment data types ("gen_p", "gen_v", "load_p", "load_q").
If you do not provide handlers for some data (*e.g* for "maintenance", "gen_p_forecasted",
"load_p_forecasted", "load_q_forecasted" and "load_v_forecasted") then it will be treated like "change nothing":
- there will be no maintenance if you do not provide a handler for maintenance
- for forecast it's a bit different... You will benefit from forecast if at least one handler generates
some (**though we do not recommend to do it**). And in that case, the "missing handlers" will be treated as
"no data available, keep as it was last time"
.. warning::
You cannot mix up all types of handler with each other. We wrote in the description of each Handlers
some conditions for them to work well.
Examples
---------
You can use the handers this way:
.. code-block:: python
import grid2op
from grid2op.Chronics import FromHandlers
from grid2op.Chronics.handlers import CSVHandler, DoNothingHandler, PerfectForecastHandler
env_name = "l2rpn_case14_sandbox"
env = grid2op.make(env_name,
data_feeding_kwargs={"gridvalueClass": FromHandlers,
"gen_p_handler": CSVHandler("prod_p"),
"load_p_handler": CSVHandler("load_p"),
"gen_v_handler": DoNothingHandler("prod_v"),
"load_q_handler": CSVHandler("load_q"),
"gen_p_for_handler": PerfectForecastHandler("prod_p_forecasted"),
"load_p_for_handler": PerfectForecastHandler("load_p_forecasted"),
"load_q_for_handler": PerfectForecastHandler("load_q_forecasted"),
}
)
obs = env.reset()
# and now you can use "env" as any grid2op environment.
More examples are given in the :ref:`tshandler-module` .
Notes
------
For the environment, data, the handler are called in the order: "load_p", "load_q", "gen_p" and finally "gen_v".
They are called once per step (per handler) at most.
Then the maintenance (and hazards) data are generated with the appropriate handler.
Finally, the forecast data are called after the environment data (and the maintenance data) once per step and per horizon.
Horizon are called "in the order" (all data "for 5 minutes", all data "for 10 minutes", all data for "15 minutes" etc.). And
for a given horizon, like the environment it is called in the order: "load_p", "load_q", "gen_p" and "gen_v".
About the seeding, the handlers are seeded in the order:
- load_p
- load_q
- gen_p
- gen_v
- maintenance
- hazards
- load_p_for
- load_q_for
- gen_p_for
- gen_v_for
Each individual handler will have its own pseudo random generator and the same seed will be used regardless of
the presence / absence of other handlers.
For example, regardless of the fact that you have a `maintenance_handler`, if you type `env.seed(0)` the
`load_p_for_handler` will behave exactly the same (it will generate the same numbers whether or not you have
maintenance or not.)
"""
MULTI_CHRONICS = False
def __init__(
self,
path, # can be None !
load_p_handler,
load_q_handler,
gen_p_handler,
gen_v_handler,
maintenance_handler=None,
hazards_handler=None,
load_p_for_handler=None,
load_q_for_handler=None,
gen_p_for_handler=None,
gen_v_for_handler=None,
time_interval=timedelta(minutes=5),
sep=";", # here for compatibility with grid2op, but not used
max_iter=-1,
start_datetime=datetime(year=2019, month=1, day=1),
chunk_size=None,
h_forecast=(5,),
):
GridValue.__init__(
self,
time_interval=time_interval,
max_iter=max_iter,
start_datetime=start_datetime,
chunk_size=chunk_size,
)
self.path = path
if self.path is not None:
self._init_date_time()
# all my "handlers" (I need to perform a deepcopy otherwise data are kept between episode...)
self.gen_p_handler : BaseHandler = copy.deepcopy(gen_p_handler)
self.gen_v_handler : BaseHandler = copy.deepcopy(gen_v_handler)
self.load_p_handler : BaseHandler = copy.deepcopy(load_p_handler)
self.load_q_handler : BaseHandler = copy.deepcopy(load_q_handler)
self.maintenance_handler : Optional[BaseHandler] = copy.deepcopy(maintenance_handler)
self.hazards_handler : Optional[BaseHandler] = copy.deepcopy(hazards_handler)
self.gen_p_for_handler : Optional[BaseHandler] = copy.deepcopy(gen_p_for_handler)
self.gen_v_for_handler : Optional[BaseHandler] = copy.deepcopy(gen_v_for_handler)
self.load_p_for_handler : Optional[BaseHandler] = copy.deepcopy(load_p_for_handler)
self.load_q_for_handler : Optional[BaseHandler] = copy.deepcopy(load_q_for_handler)
# when there are no maintenance / hazards, build this only once
self._no_mh_time = None
self._no_mh_duration = None
# define the active handlers
self._active_handlers = [self.gen_p_handler, self.gen_v_handler, self.load_p_handler, self.load_q_handler]
self._forcast_handlers = []
if self.maintenance_handler is not None:
self._active_handlers.append(self.maintenance_handler)
if self.hazards_handler is not None:
self._active_handlers.append(self.hazards_handler)
if self.gen_p_for_handler is not None:
self._active_handlers.append(self.gen_p_for_handler)
self._forcast_handlers.append(self.gen_p_for_handler)
if self.gen_v_for_handler is not None:
self._active_handlers.append(self.gen_v_for_handler)
self._forcast_handlers.append(self.gen_v_for_handler)
if self.load_p_for_handler is not None:
self._active_handlers.append(self.load_p_for_handler)
self._forcast_handlers.append(self.load_p_for_handler)
if self.load_q_for_handler is not None:
self._active_handlers.append(self.load_q_for_handler)
self._forcast_handlers.append(self.load_q_for_handler)
self._check_types()
# now synch all handlers
for handl in self._forcast_handlers:
handl.set_h_forecast(h_forecast)
# set the current path of the time series
self._set_path(self.path)
if chunk_size is not None:
self.set_chunk_size(chunk_size)
if max_iter != -1:
self.set_max_iter(max_iter)
self.init_datetime()
self.current_inj = None
def _check_types(self):
for handl in self._active_handlers:
if not isinstance(handl, BaseHandler):
raise HandlerError("One of the \"handler\" used in your time series does not "
"inherit from `BaseHandler`. This is not supported.")
def initialize(
self,
order_backend_loads,
order_backend_prods,
order_backend_lines,
order_backend_subs,
names_chronics_to_backend=None,
):
# set the current path of the time series
self._set_path(self.path)
# give the right date and times to the "handlers"
self.init_datetime()
self.n_gen = len(order_backend_prods)
self.n_load = len(order_backend_loads)
self.n_line = len(order_backend_lines)
self.curr_iter = 0
self.current_inj = None
self.gen_p_handler.initialize(order_backend_prods, names_chronics_to_backend)
self.gen_v_handler.initialize(order_backend_prods, names_chronics_to_backend)
self.load_p_handler.initialize(order_backend_loads, names_chronics_to_backend)
self.load_q_handler.initialize(order_backend_loads, names_chronics_to_backend)
self._update_max_iter() # might be used in the forecast
if self.gen_p_for_handler is not None:
self.gen_p_for_handler.initialize(order_backend_prods, names_chronics_to_backend)
if self.gen_v_for_handler is not None:
self.gen_v_for_handler.initialize(order_backend_prods, names_chronics_to_backend)
if self.load_p_for_handler is not None:
self.load_p_for_handler.initialize(order_backend_loads, names_chronics_to_backend)
if self.load_q_for_handler is not None:
self.load_q_for_handler.initialize(order_backend_loads, names_chronics_to_backend)
self._update_max_iter() # might be used in the maintenance
if self.maintenance_handler is not None:
self.maintenance_handler.initialize(order_backend_lines, names_chronics_to_backend)
if self.hazards_handler is not None:
self.hazards_handler.initialize(order_backend_lines, names_chronics_to_backend)
# when there are no maintenance / hazards, build this only once
self._no_mh_time = np.full(self.n_line, fill_value=-1, dtype=dt_int)
self._no_mh_duration = np.full(self.n_line, fill_value=0, dtype=dt_int)
self._update_max_iter()
def load_next(self):
self.current_datetime += self.time_interval
self.curr_iter += 1
res = {}
# load the injection
dict_inj, prod_v = self._load_injection()
res["injection"] = dict_inj
# load maintenance
if self.maintenance_handler is not None:
tmp_ = self.maintenance_handler.load_next(res)
if tmp_ is not None:
res["maintenance"] = tmp_
maintenance_time, maintenance_duration = self.maintenance_handler.load_next_maintenance()
else:
maintenance_time = self._no_mh_time
maintenance_duration = self._no_mh_duration
# load hazards
if self.hazard_duration is not None:
res["hazards"] = self.hazards_handler.load_next(res)
hazard_duration = self.hazards_handler.load_next_hazard()
else:
hazard_duration = self._no_mh_duration
self.current_inj = res
return (
self.current_datetime,
res,
maintenance_time,
maintenance_duration,
hazard_duration,
prod_v,
)
def max_timestep(self):
return self.max_iter
def next_chronics(self):
self.current_datetime = self.start_datetime
self.curr_iter = 0
for el in self._active_handlers:
el.next_chronics()
self._update_max_iter()
def done(self):
# I am done if the part I control is "over"
if self._max_iter > 0 and self.curr_iter > self._max_iter:
return True
# or if any of the handler is "done"
for handl in self._active_handlers:
if handl.done():
return True
return False
def check_validity(self, backend):
for el in self._active_handlers:
el.check_validity(backend)
# TODO other things here maybe ???
return True
def _aux_forecasts(self, h_id, dict_, key,
for_handler, base_handler, handlers):
if for_handler is not None:
tmp_ = for_handler.forecast(h_id, self.current_inj, dict_, base_handler, handlers)
if tmp_ is not None:
dict_[key] = dt_float(1.0) * tmp_
def forecasts(self):
res = []
if not self._forcast_handlers:
# nothing to handle forecast in this class
return res
handlers = (self.load_p_handler, self.load_q_handler, self.gen_p_handler, self.gen_v_handler)
for h_id, h in enumerate(self._forcast_handlers[0].get_available_horizons()):
dict_ = {}
self._aux_forecasts(h_id, dict_, "load_p", self.load_p_for_handler, self.load_p_handler, handlers)
self._aux_forecasts(h_id, dict_, "load_q", self.load_q_for_handler, self.load_q_handler, handlers)
self._aux_forecasts(h_id, dict_, "prod_p", self.gen_p_for_handler, self.gen_p_handler, handlers)
self._aux_forecasts(h_id, dict_, "prod_v", self.gen_v_for_handler, self.gen_v_handler, handlers)
res_d = {}
if dict_:
res_d["injection"] = dict_
forecast_datetime = self.current_datetime + timedelta(minutes=h)
res.append((forecast_datetime, res_d))
return res
def get_kwargs(self, dict_):
dict_["gen_p_handler"] = copy.deepcopy(self.gen_p_handler)._clear() if self.gen_p_handler is not None else None
dict_["gen_v_handler"] = copy.deepcopy(self.gen_v_handler)._clear() if self.gen_v_handler is not None else None
dict_["load_p_handler"] = copy.deepcopy(self.load_p_handler)._clear() if self.load_p_handler is not None else None
dict_["load_q_handler"] = copy.deepcopy(self.load_q_handler)._clear() if self.load_q_handler is not None else None
dict_["maintenance_handler"] = copy.deepcopy(self.maintenance_handler)._clear() if self.maintenance_handler is not None else None
dict_["hazards_handler"] = copy.deepcopy(self.hazards_handler)._clear() if self.hazards_handler is not None else None
dict_["gen_p_for_handler"] = copy.deepcopy(self.gen_p_for_handler)._clear() if self.gen_p_for_handler is not None else None
dict_["gen_v_for_handler"] = copy.deepcopy(self.gen_v_for_handler)._clear() if self.gen_v_for_handler is not None else None
dict_["load_p_for_handler"] = copy.deepcopy(self.load_p_for_handler)._clear() if self.load_p_for_handler is not None else None
dict_["load_q_for_handler"] = copy.deepcopy(self.load_q_for_handler)._clear() if self.load_q_for_handler is not None else None
return dict_
def get_id(self) -> str:
if self.path is not None:
return self.path
else:
# TODO
raise NotImplementedError()
def shuffle(self, shuffler=None):
# TODO
pass
def sample_next_chronics(self, probabilities=None):
# TODO
pass
def set_chunk_size(self, new_chunk_size):
# TODO
for el in self._active_handlers:
el.set_chunk_size(new_chunk_size)
def set_max_iter(self, max_iter):
self.max_iter = int(max_iter)
for el in self._active_handlers:
el.set_max_iter(max_iter)
def init_datetime(self):
for handl in self._active_handlers:
handl.set_times(self.start_datetime, self.time_interval)
def seed(self, seed):
super().seed(seed)
max_seed = np.iinfo(dt_int).max
seeds = self.space_prng.randint(max_seed, size=10)
# this way of doing ensure the same seed given by the environment is
# used even if some "handlers" are missing
# (if env.seed(0) is called, then regardless of maintenance_handler or not,
# gen_p_for_handler will always be seeded with the same number)
lp_seed = self.load_p_handler.seed(seeds[0])
lq_seed = self.load_q_handler.seed(seeds[1])
gp_seed = self.gen_p_handler.seed(seeds[2])
gv_seed = self.gen_v_handler.seed(seeds[3])
maint_seed = None
if self.maintenance_handler is not None:
maint_seed = self.maintenance_handler.seed(seeds[4])
haz_seed = None
if self.hazards_handler is not None:
haz_seed = self.hazards_handler.seed(seeds[5])
lpf_seed = None
if self.load_p_for_handler is not None:
lpf_seed = self.load_p_for_handler.seed(seeds[6])
lqf_seed = None
if self.load_q_for_handler is not None:
lqf_seed = self.load_q_for_handler.seed(seeds[7])
gpf_seed = None
if self.gen_p_for_handler is not None:
gpf_seed = self.gen_p_for_handler.seed(seeds[8])
gvf_seed = None
if self.gen_v_for_handler is not None:
gvf_seed = self.gen_v_for_handler.seed(seeds[9])
return (seed, gp_seed, gv_seed, lp_seed, lq_seed,
maint_seed, haz_seed, gpf_seed, gvf_seed,
lpf_seed, lqf_seed)
def _set_path(self, path):
"""tell the handler where this chronics is located"""
if path is None:
return
for el in self._active_handlers:
el.set_path(path)
def set_max_episode_duration(self, max_ep_dur):
for handl in self._active_handlers:
handl.set_max_episode_duration(max_ep_dur)
def _update_max_iter(self):
# get the max iter from the handlers
max_iters = [el.get_max_iter() for el in self._active_handlers]
max_iters = [el for el in max_iters if el != -1]
# get the max iter from myself
if self._max_iter != -1:
max_iters.append(self.max_iter)
# prevent empty list
if not max_iters:
max_iters.append(self.max_iter)
# take the minimum
self.max_iter = np.min(max_iters)
# update everyone with the "new" max iter
max_ep_dur = [el.max_episode_duration for el in self._active_handlers]
max_ep_dur = [el for el in max_ep_dur if el is not None]
if max_ep_dur:
if self.max_iter == -1:
self.max_iter = np.min(max_ep_dur)
else:
self.max_iter = min(self.max_iter, np.min(max_ep_dur))
if self.max_iter != -1:
self.set_max_episode_duration(self.max_iter)
def _load_injection(self):
dict_ = {}
prod_v = None
if self.load_p_handler is not None:
tmp_ = self.load_p_handler.load_next(dict_)
if tmp_ is not None:
dict_["load_p"] = dt_float(1.0) * tmp_
if self.load_q_handler is not None:
tmp_ = self.load_q_handler.load_next(dict_)
if tmp_ is not None:
dict_["load_q"] = dt_float(1.0) * tmp_
if self.gen_p_handler is not None:
tmp_ = self.gen_p_handler.load_next(dict_)
if tmp_ is not None:
dict_["prod_p"] = dt_float(1.0) * tmp_
if self.gen_v_handler is not None:
tmp_ = self.gen_v_handler.load_next(dict_)
if tmp_ is not None:
prod_v = dt_float(1.0) * tmp_
return dict_, prod_v
def _init_date_time(self): # in csv handler
if os.path.exists(os.path.join(self.path, "start_datetime.info")):
with open(os.path.join(self.path, "start_datetime.info"), "r") as f:
a = f.read().rstrip().lstrip()
try:
tmp = datetime.strptime(a, "%Y-%m-%d %H:%M")
except ValueError:
tmp = datetime.strptime(a, "%Y-%m-%d")
except Exception:
raise ChronicsNotFoundError(
'Impossible to understand the content of "start_datetime.info". Make sure '
'it\'s composed of only one line with a datetime in the "%Y-%m-%d %H:%M"'
"format."
)
self.start_datetime = tmp
self.current_datetime = tmp
if os.path.exists(os.path.join(self.path, "time_interval.info")):
with open(os.path.join(self.path, "time_interval.info"), "r") as f:
a = f.read().rstrip().lstrip()
try:
tmp = datetime.strptime(a, "%H:%M")
except ValueError:
tmp = datetime.strptime(a, "%M")
except Exception:
raise ChronicsNotFoundError(
'Impossible to understand the content of "time_interval.info". Make sure '
'it\'s composed of only one line with a datetime in the "%H:%M"'
"format."
)
self.time_interval = timedelta(hours=tmp.hour, minutes=tmp.minute)
def fast_forward(self, nb_timestep):
for _ in range(nb_timestep):
self.load_next()
# for this class I suppose the real data AND the forecast are read each step
self.forecasts()
| 22,704 | 42.330153 | 137 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/handlers/__init__.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
__all__ = ["BaseHandler",
"CSVHandler",
"CSVForecastHandler",
"DoNothingHandler",
"CSVMaintenanceHandler",
"JSONMaintenanceHandler",
"PersistenceForecastHandler",
"PerfectForecastHandler",
"NoisyForecastHandler",
"LoadQFromPHandler",
]
from .baseHandler import BaseHandler
from .csvHandler import CSVHandler
from .do_nothing_handler import DoNothingHandler
from .csvForecastHandler import CSVForecastHandler
from .csvMaintenanceHandler import CSVMaintenanceHandler
from .jsonMaintenanceHandler import JSONMaintenanceHandler
from .persitenceForecastHandler import PersistenceForecastHandler
from .perfectForecastHandler import PerfectForecastHandler
from .noisyForecastHandler import NoisyForecastHandler
from .load_q_from_p_handler import LoadQFromPHandler
| 1,331 | 41.967742 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/handlers/baseHandler.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
import os
import numpy as np
from typing import Optional, Tuple
from grid2op.Space import RandomObject
from datetime import timedelta, datetime
# TODO logger !
class BaseHandler(RandomObject):
"""This is the base class that represents a time series "handler".
.. versionadded:: 1.9.0
Each "handler" will be reponsible to produce the data for "one single type of elements"
of the grid. For example you will have 1 handler for "load_p", one for "load_q", another
one for "load_p_forecasted" etc. This allows some great flexibility to the way you want
to retrieve data, but can be quite verbose as every type of data needs to be "handled".
A "handler" will, for a certain type of data (*eg* load_p or maintenance etc.)
handle the way this data type is generated.
To be a valid "handler" an class must first inherit from :class:`BaseHandler` and
implements (for all types of handlers):
- :func:`BaseHandler.initialize` : to initialize the handler from the environment
data (number of loads, lines etc.)
- :func:`BaseHandler.done` : whether or not this "handler" is over or not.
- :func:`BaseHandler.check_validity` : check if the input data are valid with the backend,
for example if you read from a csv
number of columns should match number of element
- :func:`BaseHandler.next_chronics` : called just before the start of a scenario.
If the data represents "real time" data (*ie* the data seen by the agent in real
time in the observation) then it needs also to implement:
- :func:`BaseHandler.load_next` : to "generate" data for the next steps
If the data represents "forecast data" (*ie* the data accessed by the agent when it uses
:func:`grid2op.Observation.BaseObservation.simulate` or :class:`grid2op.simulator.Simulator`
or :func:`grid2op.Observation.BaseObservation.get_forecast_env`) then it needs to implement:
- :func:`BaseHandler.forecast` : to retrieve the forecast at a given horizon
And if the "handler" represents maintenance data, then it needs to implement:
- :func:`BaseHandler.load_next` : that returns a boolean vector for whether or not
each powerline is in maintenance
- :func:`BaseHandler.load_next_maintenance` : to "generate" data for the next steps
.. seealso::
:class:`grid2op.Chronics.FromHandlers` which is the things that "consumes" the handlers to output
the data read by the :class:`grid2op.Environment.Environment`
"""
def __init__(self, array_name, max_iter=-1, h_forecast=(5, )):
super().__init__()
self.max_iter : int = max_iter
self.init_datetime : Optional[datetime] = None
self.time_interval : Optional[timedelta] = None
self.array_name : str = array_name
self._h_forecast : tuple = copy.deepcopy(h_forecast)
self.path : Optional[os.PathLike] = None
self.max_episode_duration : Optional[int] = None
def set_max_iter(self, max_iter: Optional[int]) -> None:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
The the maximum number of iteration this handler is able to produce.
`-1` means "forever". This is set by the :class:`grid2op.Chronics.FromHandlers`
Parameters
----------
max_iter : Optional[int]
Maximum number of iterations
"""
if max_iter is not None:
self.max_iter = int(max_iter)
else:
self.max_iter = -1
def set_max_episode_duration(self, max_episode_duration: Optional[int]) -> None:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
The the maximum number of iteration the environment will use. It is a way
to synchronize all the handlers for the same environment
Parameters
----------
max_episode_duration : Optional[int]
Maximum number of iterations for the current grid2op environment
"""
if max_episode_duration is not None:
self.max_episode_duration = int(max_episode_duration)
else:
self.max_episode_duration = None
def get_max_iter(self) -> int:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
The maximum number of iterations this particular handler can generate.
`-1` means "forever" otherwise it should be a > 0 integers.
Returns
-------
int
The maximum number of iterations this particular handler can generate.
"""
return self.max_iter
def set_path(self, path: os.PathLike) -> None:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This method is used by the :class:`grid2op.Chronics.FromHandlers` to inform this handler about the location
where the required data for this handler could be located.
Parameters
----------
path : os.PathLike
The path to look for the data
"""
self.path = path
def set_chunk_size(self, chunk_size: Optional[int]) -> None:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
optional: when data are read from the hard drive (*eg* :class:`grid2op.Chronics.handlers.CSVHandler`)
this can inform the handler about the number of data to proceed at each 'step'.
.. note::
Do not use this function directly, it should be used only from the environment.
.. seealso::
This can be set by a call to `env.chronics_handler.set_chunk_size(chunk_size)`
Parameters
----------
chunk_size : Optional[int]
The desired chunk size
"""
# Chunk size is part of public API but has no sense for
# data not read from a disk
pass
def set_times(self,
init_datetime : datetime,
time_interval : timedelta) -> None:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This method is used by the :class:`grid2op.Chronics.FromHandlers` to inform this handler
about the intial datetime of the episode and the duration between two steps.
.. note::
Do not use this function directly, it should be used only from the environment.
Parameters
----------
init_datetime : datetime
The initial datetime.
time_interval : timedelta
The time between two steps of the environment.
"""
self.init_datetime = init_datetime
self.time_interval = time_interval
def _clear(self):
self.init_datetime = None
self.time_interval = None
def get_kwargs(self, dict_ : dict) -> None:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
If some extra parameters are needed to build your handler "from scratch" you should copy them here. This is used
when creating a runner for example.
Parameters
----------
dict_ : dict
The dictionnary to update with the parameters.
"""
# no need to remember special kwargs for the base class
pass
def set_h_forecast(self, h_forecast : Tuple[int]) -> None:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This method is used by the :class:`grid2op.Chronics.FromHandlers` to inform this handler
about the different forecast horizons available.
.. seealso::
:func:`BaseHandler.get_available_horizons`
Parameters
----------
h_forecast : Tuple[int]
A tuple containing the different forecast horizons available. The horizons should be given in minutes,
for example `handler.set_h_forecast((5, 10))` tells this handler that forecasts are available for 5 and
10 minutes ahead.
"""
self._h_forecast = copy.deepcopy(h_forecast)
def get_available_horizons(self) -> Tuple:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This methods returns the available forecast horizons (in minutes) known by this handler.
.. seealso::
:func:`BaseHandler.set_h_forecast`
Returns
-------
Tuple
A tuple containing the different forecast horizons available. The horizons should be given in minutes,
for example `handler.set_h_forecast((5, 10))` tells this handler that forecasts are available for 5 and
10 minutes ahead.
"""
return copy.deepcopy(self._h_forecast)
def initialize(self,
order_backend_arrays,
names_chronics_to_backend) -> None:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This function is called by the :class:`grid2op.Chronics.FromHandlers` after the current handler has been created.
Its goal is to initialize it with the relevant data from the environment.
For example, if this handler represents "load_p" then `order_backend_arrays` will be the name of
each load in the environment and `names_chronics_to_backend` is a dictionnary mapping the name in the
data to the names as read by the grid simulator / the backend.
Parameters
----------
order_backend_arrays : np.ndarray
numpy array representing the name of the element in the grid
names_chronics_to_backend : dict
mapping between the names in `order_backend_arrays` and the names found in the data.
"""
raise NotImplementedError()
def done(self) -> bool:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Whether or not this handler has done generating the data. It can be "done" in the case it
reads data from a csv and you are at the bottom line of the csv for example.
Returns
-------
bool
Whether it is "done" or not.
"""
raise NotImplementedError()
def load_next(self, dict_: dict) -> Optional[np.ndarray]:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This function is called by the :class:`grid2op.Chronics.FromHandlers`
Used by the environment handlers ("load_p", "load_q", "prod_p" and "prod_v" only). When this function
is called, it should return the next state of the type of data it is responsible for. If the previous
state should not be modified, then this function can returns "None".
This is called exactly once per step.
Parameters
----------
dict_ : dict
A dictionnary representing the other "previous" data type. This function is always called in the same order:
1) on "load_p"
2) on "load_q"
3) on "gen_p"
4) on "gen_v"
So if your handler is reponsible for "gen_p" then this dictionnary might contain 2 items:
- key: "load_p", value: all the active loads for the environment at the same step (if the values are modified
by the relevant handlers)
- key: "load_q", value: all the reactive laods for the environment at the same step (if the values are modified
by the relevant handlers)
Returns
-------
Optional[np.ndarray]
The new values (if any)
"""
raise NotImplementedError()
def check_validity(self, backend):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This function is called by the :class:`grid2op.Chronics.FromHandlers` after all the handlers have
been initialized.
Its role is to make sure that every handlers can "handle" the data of the environment smoothly.
It is called after each "env.reset()" call.
Parameters
----------
backend : :class:`grid2op.Backend.Backend`
The backend used in the environment.
"""
raise NotImplementedError()
def load_next_maintenance(self) -> Tuple[np.ndarray, np.ndarray]:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This function is used only if the handler is reponsible for "maintenance". It is called
exactly once per step.
Returns
-------
Tuple[np.ndarray, np.ndarray]
maintenance time: np.ndarray
Time for next maintenance for each powerline
maintenance duration: np.ndarray
Duration of the next maintenance, for each powerline
"""
raise NotImplementedError()
def load_next_hazard(self) -> np.ndarray:
# TODO
raise NotImplementedError()
def forecast(self,
forecast_horizon_id : int,
inj_dict_env : dict,
inj_dict_previous_forecast : dict,
# eg gen_p_handler if this is set to gen_p_for_handler:
env_handler : "BaseHandler",
# list of the 4 env handlers: (load_p_handler, load_q_handler, gen_p_handler, gen_v_handler)
env_handlers : Tuple["BaseHandler", "BaseHandler", "BaseHandler", "BaseHandler"]
) -> Optional[np.ndarray] :
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This function is called by the :class:`grid2op.Chronics.FromHandlers` only for the handlers responsible
for some "forecasts", which are "load_p_forecasted", "load_q_forecasted", "prod_p_forecasted",
"prod_v_forecasted".
It is called exactly once per step and per horizon.
It's similar to :func:`BaseHandler.load_next` with different inputs (because "forecast" are more complicated that
just real time data)
.. seealso:: :func:`BaseHandler.load_next`
Parameters
----------
forecast_horizon_id : int
The `id` of the horizon concerns. The horizon id is the index of the current horizon in the list :attr:`BaseHandler._h_forecast`
inj_dict_env : dict
The dictionnary containing the data of the environment (not the forecast) if data have been modified by the relevant handlers.
inj_dict_previous_forecast : dict
Similar to the `dict_` parameters of :func:`BaseHandler.load_next`
env_handler : BaseHandler
The handler of the same type as this one, but for the environment.
For example, if this handler deals with "`load_q_forecasted`" then
`env_handler` will be the handler of `load_q`.
env_handlers : Tuple[:class:`BaseHandler`, :class:`BaseHandler`, :class:`BaseHandler`, :class:`BaseHandler`]
In these you have all the environment handlers in a tuple.
The order is: "load_p", "load_q", "prod_p", "prod_v".
Returns
-------
Optional[np.ndarray]
The forecast (in the shape of numpy array) or None if nothing should be returned.
"""
raise NotImplementedError()
def get_future_data(self, horizon: int, quiet_warnings : bool=False) -> Optional[np.ndarray]:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This function is for example used in the
:class:`grid2op.Chronics.handlers.PerfectForecastHandler`: to generate a
"perfect forecast" this class will use this function to "have a look"
into the future through this function.
This function is for example implemented in
:class:`grid2op.Chronics.handlers.CSVHandler`
Parameters
----------
horizon : int
The horizon (in minutes) to which we want the data.
quiet_warnings: bool
Whether to issue a warning (default, if quiet_warnings is False) or not
Returns
-------
Optional[np.ndarray]
The data that will be generated in `horizon` minutes.
"""
return None
def next_chronics(self) -> None:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This function is called by the :class:`grid2op.Chronics.FromHandlers` at the
end of each episode when the next episode is loaded.
"""
return None
| 18,447 | 37.273859 | 140 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/handlers/csvForecastHandler.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from typing import Tuple
from grid2op.Exceptions import (
HandlerError
)
from grid2op.Chronics.handlers.baseHandler import BaseHandler
from grid2op.Chronics.handlers.csvHandler import CSVHandler
class CSVForecastHandler(CSVHandler):
"""Reads and produce time series if given by a csv file (possibly compressed).
The separator used can be specified as input.
The file name should match the "array_name":
for example if the data you want to use for "load_p_forecasted" in the environment
are in the file "my_load_p_forecast.csv.bz2" should name this handler
"my_load_p_forecast" and not "load_p" nor "my_load_p_forecast.csv" nor
"my_load_p_forecast.csv.bz2"
The csv should be structured as follow:
- it should not have any "index" or anything, only data used directly
by grid2op (so only "active loads" if this handler is responsible
for the generation of "load_p")
- Each element (for example a load) is represented by a `column`.
- It should have a header with the name of the elements it "handles" and
this name should match the one in the environment. For example
if "load_1_0" is the name of a load and you read data for "load_p"
or "load_q" then one column of your csv should be named "load_1_0".
- only floating point numbers should be present in the data (no bool, string
and integers will be casted to float)
The structuration of the rows are a bit different than for :class:`CSVHandler`
because this class can read "multiple steps ahead forecast", provided that
it knows for how many different horizons forecasts are made.
Let's take the example that forecast are available for h = 5, 10 and 15
minutes ahead (so for the next, next next and next next next steps). In this case:
- the first row (not counting the header) will be the forecast made
for h = 5 at the first step: the forecasts available at t=0 for t=5mins
- the second row will be the forecasts made
for h = 10 at the first step: the forecasts available at t=0 for t=10mins
- the third row will be the forecasts made
for h = 15 at the first step: the forecasts available at t=0 for t=15mins
- the fourth row will be the forecasts made
for h = 5 at the second step: the forecasts available at t=5 for t=10mins
- the fifth row will be the forecasts made
for h = 10 at the second step: the forecasts available at t=5 for t=15mins
- etc.
.. warning::
Use this class only for the FORECAST data ("load_p_forecasted",
"load_q_forecasted", "prod_p_forecasted" or "prod_v_forecasted") and
not for maintenance (in this case
use :class:`CSVMaintenanceHandler`) nor for
environment data (in this case use :class:`CSVHandler`)
This is the default way to provide data to grid2op and its used for
most l2rpn environments when forecasts are available.
.. note::
The current implementation heavily relies on the fact that the
:func:`CSVForecastHandler.forecast` method is called
exactly once per horizon and per step.
"""
def __init__(self,
array_name,
sep=";",
chunk_size=None,
max_iter=-1) -> None:
super().__init__(array_name, sep, chunk_size, max_iter)
self._h_forecast = None
self._nb_row_per_step = 1
def load_next(self, dict_):
raise HandlerError("You should only use this class for FORECAST data, and not for ENVIRONMENT data. "
"Please consider using `CSVHandler` (`from grid2op.Chronics.handlers import CSVHandler`) "
"for your environment data.")
def set_chunk_size(self, chunk_size):
super().set_chunk_size(self._nb_row_per_step * int(chunk_size))
def set_max_iter(self, max_iter):
super().set_max_iter(self._nb_row_per_step * int(max_iter))
def set_h_forecast(self, h_forecast):
super().set_h_forecast(h_forecast)
self._nb_row_per_step = len(self._h_forecast)
def get_available_horizons(self):
# skip the definition in CSVHandler to jump to the level "above"
return super(CSVHandler, self).get_available_horizons()
def forecast(self,
forecast_horizon_id : int,
inj_dict_env : dict,
inj_dict_previous_forecast : dict,
# eg gen_p_handler if this is set to gen_p_for_handler:
env_handler : "BaseHandler",
# list of the 4 env handlers: (load_p_handler, load_q_handler, gen_p_handler, gen_v_handler)
env_handlers : Tuple["BaseHandler", "BaseHandler", "BaseHandler", "BaseHandler"]):
res = super().load_next(inj_dict_previous_forecast)
return res
| 5,400 | 45.965217 | 117 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/handlers/csvHandler.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import os
import pandas as pd
import numpy as np
import copy
from typing import Tuple
from grid2op.Exceptions import (
ChronicsError, HandlerError
)
from grid2op.dtypes import dt_int, dt_float
from grid2op.Chronics.handlers.baseHandler import BaseHandler
class CSVHandler(BaseHandler):
"""Reads and produce time series if given by a csv file (possibly compressed).
The separator used can be specified as input.
The file name should match the "array_name":
for example if the data you want to use for "load_p" in the environment
are in the file "my_load_p_data.csv.bz2" should name this handler
"my_load_p_data" and not "load_p" nor "my_load_p_data.csv" nor
"my_load_p_data.csv.bz2"
The csv should be structured as follow:
- it should not have any "index" or anything, only data used by
grid2op will be used
- Each element (for example a load) is represented by a `column`.
- It should have a header with the name of the elements it "handles" and
this name should match the one in the environment. For example
if "load_1_0" is the name of a load and you read data for "load_p"
or "load_q" then one column of your csv should be named "load_1_0".
- each time step is represented as a `row` and in order. For example
(removing the header), row 1 (first row) will be step 1, row 2 will
be step 2 etc.
- only floating point numbers should be present in the data (no bool, string
and integers will be casted to float)
.. warning::
Use this class only for the ENVIRONMENT data ("load_p", "load_q",
"prod_p" or "prod_v") and not for maintenance (in this case
use :class:`CSVMaintenanceHandler`) nor for
forecast (in this case use :class:`CSVForecastHandler`)
This is the default way to provide data to grid2op and its used for
most l2rpn environments.
"""
def __init__(self,
array_name, # eg "load_p"
sep=";",
chunk_size=None,
max_iter=-1) -> None:
super().__init__(array_name, max_iter)
self.path = None
self._file_ext = None
self.tmp_max_index = None # size maximum of the current tables in memory
self.array = None # numpy array corresponding to the current active load values in the power _grid.
self.current_index = -1
self.sep = sep
self.names_chronics_to_backend = None
# added to provide an easier access to read data in chunk
self.chunk_size = chunk_size
self._data_chunk = {}
self._order_array = None
#
self._order_backend_arrays = None
#
self._nb_row_per_step = 1
def _clear(self):
"""reset to a state as if it was just created"""
super()._clear()
self.path = None
self._file_ext = None
self.tmp_max_index = None
self.array = None
self.current_index = - 1
self.names_chronics_to_backend = None
self._data_chunk = {}
self._order_array = None
self._order_backend_arrays = None
return self
def set_path(self, path):
self._file_ext = self._get_fileext(path)
self.path = os.path.join(path, f"{self.array_name}{self._file_ext}")
def initialize(self, order_backend_arrays, names_chronics_to_backend):
self._order_backend_arrays = copy.deepcopy(order_backend_arrays)
self.names_chronics_to_backend = copy.deepcopy(names_chronics_to_backend)
# read the data
array_iter = self._get_data()
if not self.names_chronics_to_backend:
self.names_chronics_to_backend = {}
self.names_chronics_to_backend[self.array_name] = {
k: k for k in self._order_backend_arrays
}
# put the proper name in order
order_backend_arrays = {el: i for i, el in enumerate(order_backend_arrays)}
if self.chunk_size is None:
array = array_iter
if array is not None:
self.tmp_max_index = array.shape[0]
else:
raise HandlerError(
'No files are found in directory "{}". If you don\'t want to load any chronics,'
' use "DoNothingHandler" (`from grid2op.Chronics.handlers import DoNothingHandler`) '
'and not "{}" to load chronics.'
"".format(self.path, type(self))
)
else:
self._data_chunk = {
self.array_name: array_iter,
}
array = self._get_next_chunk()
# get the chronics in order
order_chronics = self._get_orders(array, order_backend_arrays)
# now "sort" the columns of each chunk of data
self._order_array = np.argsort(order_chronics)
self._init_attrs(array)
self.curr_iter = 0
if self.chunk_size is None:
self.max_episode_duration = self.array.shape[0] - 1
def done(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Compare to :func:`GridValue.done` an episode can be over for 2 main reasons:
- :attr:`GridValue.max_iter` has been reached
- There are no data in the csv.
The episode is done if one of the above condition is met.
Returns
-------
res: ``bool``
Whether the episode has reached its end or not.
"""
if self.max_iter > 0 and self.curr_iter > self.max_iter:
return True
if self.chunk_size is None and self.current_index >= self.array.shape[0]:
return True
return False
def load_next(self, dict_):
self.current_index += 1
if not self._data_in_memory():
try:
self._load_next_chunk_in_memory()
except StopIteration as exc_:
raise StopIteration from exc_
if self.current_index > self.tmp_max_index:
raise StopIteration
if self.max_iter > 0:
if self.curr_iter >= self.max_iter:
raise StopIteration
return copy.deepcopy(self.array[self.current_index, :])
def get_max_iter(self):
if self.max_iter != -1:
return self.max_iter
if self.max_episode_duration is not None:
return self.max_episode_duration
if self.chunk_size is None and self.array is not None:
return self.array.shape[0] - 1
if self.array is None:
return -1
import warnings
warnings.warn("Unable to read the 'max_iter' when there is a chunk size set and no \"max_iter\"")
return -1 # TODO
def check_validity(self, backend):
# TODO
return True
def _init_attrs(
self, array
):
self.array = None
if array is not None:
self.array = copy.deepcopy(
array.values[:, self._order_array].astype(dt_float)
)
def _get_fileext(self, path_tmp): # in csvhandler
read_compressed = ".csv"
if not os.path.exists(os.path.join(path_tmp, "{}.csv".format(self.array_name))):
# try to read compressed data
if os.path.exists(os.path.join(path_tmp, "{}.csv.bz2".format(self.array_name))):
read_compressed = ".csv.bz2"
elif os.path.exists(os.path.join(path_tmp, "{}.zip".format(self.array_name))):
read_compressed = ".zip"
elif os.path.exists(
os.path.join(path_tmp, "{}.csv.gzip".format(self.array_name))
):
read_compressed = ".csv.gzip"
elif os.path.exists(os.path.join(path_tmp, "{}.csv.xz".format(self.array_name))):
read_compressed = ".csv.xz"
else:
read_compressed = None
return read_compressed
def _get_data(self, chunksize=-1, nrows=None): # in csvhandler
if nrows is None:
if self.max_iter > 0:
nrows = self.max_iter + self._nb_row_per_step
if self._file_ext is not None:
if chunksize == -1:
chunksize = self.chunk_size
res = pd.read_csv(
self.path,
sep=self.sep,
chunksize=chunksize,
nrows=nrows,
)
else:
res = None
return res
def _get_orders(
self,
array, # eg load_p
order_arrays, # eg order_backend_loads
):
order_chronics_arrays = None
if array is not None:
self._assert_correct_second_stage(
array.columns, self.names_chronics_to_backend
)
order_chronics_arrays = np.array(
[
order_arrays[self.names_chronics_to_backend[self.array_name][el]]
for el in array.columns
]
).astype(dt_int)
return order_chronics_arrays
def _assert_correct_second_stage(self, pandas_name, dict_convert):
for i, el in enumerate(pandas_name):
if not el in dict_convert[self.array_name]:
raise ChronicsError(
"Element named {} is found in the data (column {}) but it is not found on the "
'powergrid for data of type "{}".\nData in files are: {}\n'
"Converter data are: {}".format(
el,
i + 1,
self.array_name,
sorted(list(pandas_name)),
sorted(list(dict_convert[self.array_name].keys())),
)
)
def set_chunk_size(self, chunk_size):
self.chunk_size = int(chunk_size)
def _get_next_chunk(self):
res = None
if self._data_chunk[self.array_name] is not None:
res = next(self._data_chunk[self.array_name])
return res
def _data_in_memory(self):
if self.chunk_size is None:
# if i don't use chunk, all the data are in memory alreay
return True
if self.current_index == 0:
# data are loaded the first iteration
return True
if self.current_index % self.chunk_size != 0:
# data are already in ram
return True
return False
def _load_next_chunk_in_memory(self):
# i load the next chunk as dataframes
array = self._get_next_chunk() # array: load_p
# i put these dataframes in the right order (columns)
self._init_attrs(array)
# i don't forget to reset the reading index to 0
self.current_index = 0
def _get_next_chunk(self):
array = None
if self._data_chunk[self.array_name] is not None:
array = next(self._data_chunk[self.array_name])
self.tmp_max_index = array.shape[0]
return array
def forecast(self,
forecast_horizon_id : int,
inj_dict_env : dict,
inj_dict_previous_forecast : dict,
# eg gen_p_handler if this is set to gen_p_for_handler:
env_handler : "BaseHandler",
# list of the 4 env handlers: (load_p_handler, load_q_handler, gen_p_handler, gen_v_handler)
env_handlers : Tuple["BaseHandler", "BaseHandler", "BaseHandler", "BaseHandler"]):
raise HandlerError(f"forecast {self.array_name}: You should only use this class for ENVIRONMENT data, and not for FORECAST data. "
"Please consider using `CSVForecastHandler` (`from grid2op.Chronics.handlers import CSVForecastHandler`) "
"for your forecast data.")
def get_available_horizons(self):
raise HandlerError(f"get_available_horizons {self.array_name}: You should only use this class for ENVIRONMENT data, and not for FORECAST data. "
"Please consider using `CSVForecastHandler` (`from grid2op.Chronics.handlers import CSVForecastHandler`) "
"for your forecast data.")
def load_next_maintenance(self):
raise HandlerError(f"load_next_maintenance {self.array_name}: You should only use this class for ENVIRONMENT data, and not for FORECAST data nor MAINTENANCE data. "
"Please consider using `CSVMaintenanceHandler` (`from grid2op.Chronics.handlers import CSVMaintenanceHandler`) "
"for your maintenance data.")
def load_next_hazard(self):
raise HandlerError(f"load_next_hazard {self.array_name}: You should only use this class for ENVIRONMENT data, and not for FORECAST "
"data nor MAINTENANCE nor HAZARDS data. (NB HAZARDS data are not yet supported) "
"by handlers.")
def next_chronics(self):
self.current_index = -1
self.curr_iter = 0
if self.chunk_size is not None:
self._clear() # we should have to reload everything if all data have been already loaded
def get_future_data(self, horizon: int, quiet_warnings : bool=False):
horizon = int(horizon)
tmp_index = self.current_index + horizon // (self.time_interval.total_seconds() // 60)
tmp_index = int(tmp_index)
if tmp_index < self.array.shape[0]:
res = self.array[tmp_index, :]
else:
if not quiet_warnings:
import warnings
warnings.warn(f"{type(self)} {self.array_name}: No more data to get, the last known data is returned.")
res = self.array[-1, :]
return copy.deepcopy(res)
| 14,544 | 37.580902 | 172 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/handlers/csvMaintenanceHandler.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from typing import Tuple
import pandas as pd
import numpy as np
from grid2op.dtypes import dt_int, dt_float, dt_bool
from grid2op.Chronics.gridValue import GridValue
from grid2op.Chronics.handlers.csvHandler import CSVHandler
class CSVMaintenanceHandler(CSVHandler):
"""Reads and produce time series if given by a csv file (possibly compressed).
The separator used can be specified as input.
The file name should match the "array_name". If you want to use
the maintenance file present in the file "my_maintenance_file.csv.gz"
then you should create a CSVMaintenanceHandler with
`array_name="my_maintenance_file"`.
The csv should be structured as follow:
- it should not have any "index" or anything, only data used by
grid2op will be used
- Each element powerline is represented by a `column`.
- It should have a header with the name of the powerlines that
should match the one in the environment. For example
if "0_1_0" is the name of a powerline in your environment,
then a column should be called "0_1_0".
- each time step is represented as a `row` and in order. For example
(removing the header), row 1 (first row) will be step 1, row 2 will
be step 2 etc.
- only binary data (0 or 1) should be present in the file. No "bool",
no string etc.
.. warning::
Use this class only for the ENVIRONMENT data ("load_p", "load_q",
"prod_p" or "prod_v") and not for maintenance (in this case
use :class:`CSVMaintenanceHandler`) nor for
forecast (in this case use :class:`CSVForecastHandler`)
This is the default way to provide data to grid2op and its used for
most l2rpn environments.
"""
def __init__(self,
array_name="maintenance",
sep=";",
max_iter=-1) -> None:
super().__init__(array_name, sep, None, max_iter)
# None corresponds to "chunk_size" which is not supported for maintenance
def _init_attrs(self, array):
super()._init_attrs(array)
n_line = self.array.shape[1]
self.maintenance_time = (
np.zeros(shape=(self.array.shape[0], n_line), dtype=dt_int)
- 1
)
self.maintenance_duration = np.zeros(
shape=(self.array.shape[0], n_line), dtype=dt_int
)
# test that with chunk size
for line_id in range(n_line):
self.maintenance_time[:, line_id] = GridValue.get_maintenance_time_1d(
self.array[:, line_id]
)
self.maintenance_duration[
:, line_id
] = GridValue.get_maintenance_duration_1d(self.array[:, line_id])
# there are _maintenance and hazards only if the value in the file is not 0.
self.array = self.array != 0.0
self.array = self.array.astype(dt_bool)
def load_next_maintenance(self) -> Tuple[np.ndarray, np.ndarray]:
maint_time = 1 * self.maintenance_time[self.current_index, :]
maint_duration = 1 * self.maintenance_duration[self.current_index, :]
return maint_time, maint_duration
def set_chunk_size(self, chunk_size):
# skip the definition in CSVHandler to jump to the level "above"
return super(CSVHandler, self).set_chunk_size(chunk_size)
| 3,851 | 40.419355 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/handlers/do_nothing_handler.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Chronics.handlers.baseHandler import BaseHandler
class DoNothingHandler(BaseHandler):
"""This is the specific types of handler that does nothing.
You can use if for any data type that you want.
"""
def __init__(self, array_name="do nothing") -> None:
super().__init__(array_name)
def initialize(self, order_backend_prods, names_chronics_to_backend):
# there is nothing to do for the DoNothingHandler
pass
def check_validity(self, backend):
# there is nothing to do for the DoNothingHandler
pass
def load_next(self, dict):
# there is nothing to do for the DoNothingHandler
pass
def done(self):
# there is nothing to do for the DoNothingHandler
return False
def forecast(self,
forecast_horizon_id,
inj_dict_env,
inj_dict_previous_forecast,
# eg gen_p_handler if this is set to gen_p_for_handler:
env_handler,
# list of the 4 env handlers: (load_p_handler, load_q_handler, gen_p_handler, gen_v_handler)
env_handlers):
# there is nothing to do for the DoNothingHandler
return None
def load_next_maintenance(self):
return None, None
def load_next_hazard(self):
return None
| 1,872 | 34.339623 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/handlers/jsonMaintenanceHandler.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
import json
import os
from grid2op.Chronics.GSFFWFWM import GridStateFromFileWithForecastsWithMaintenance
from grid2op.Chronics.gridValue import GridValue
from grid2op.Chronics.handlers.baseHandler import BaseHandler
class JSONMaintenanceHandler(BaseHandler):
"""This type of handlers will generate maintenance based on some json files.
Maintenance generated with this class will be stochastic: some different
maintenance time / duration will be generated for each new episode (of course
you can seed your environment for a purely deterministic process)
The json file it will read should be called `json_file_name` (by default
`"maintenance_meta.json"`)
It should contain the data:
- "line_to_maintenance": the list of the name of the powerline that can be
"in maintenance" for this episode
- "maintenance_starting_hour" : the starting hour for all maintenance
- "maintenance_ending_hour" : the hour at which each maintenance ends
- "daily_proba_per_month_maintenance" : it's a list having 12 elements (one
for each month of the year) that gives, for each month the probability
for any given line to be in maintenance. For example if
`daily_proba_per_month_maintenance[6] = 0.1` it means that for the
6th month of the year (june) there is a 10% for each powerline to be in
maintenance
- "max_daily_number_per_month_maintenance": maximum number of powerlines
allowed in maintenance at the same time.
"""
def __init__(self,
array_name="maintenance",
json_file_name="maintenance_meta.json",
max_iter=-1,
_duration_episode_default=24*12, # if max_iter is not set, then maintenance are computed for a whole day
):
super().__init__(array_name, max_iter)
self.json_file_name = json_file_name
self.dict_meta_data = None
self.maintenance = None
self.maintenance_time = None
self.maintenance_duration = None
self.n_line = None # used in one of the GridStateFromFileWithForecastsWithMaintenance functions
self._duration_episode_default = _duration_episode_default
self.current_index = 0
def get_maintenance_time_1d(self, maintenance):
return GridValue.get_maintenance_time_1d(maintenance)
def get_maintenance_duration_1d(self, maintenance):
return GridValue.get_maintenance_duration_1d(maintenance)
def _create_maintenance_arrays(self, current_datetime):
# create the self.maintenance, self.maintenance_time and self.maintenance_duration
self.maintenance = GridStateFromFileWithForecastsWithMaintenance._generate_matenance_static(
self._order_backend_arrays,
self.max_episode_duration if self.max_episode_duration is not None else self._duration_episode_default,
self.dict_meta_data["line_to_maintenance"],
self.time_interval,
current_datetime,
self.dict_meta_data["maintenance_starting_hour"],
self.dict_meta_data["maintenance_ending_hour"],
self.dict_meta_data["daily_proba_per_month_maintenance"],
self.dict_meta_data["max_daily_number_per_month_maintenance"],
self.space_prng
)
GridStateFromFileWithForecastsWithMaintenance._fix_maintenance_format(self)
def initialize(self, order_backend_arrays, names_chronics_to_backend):
self._order_backend_arrays = copy.deepcopy(order_backend_arrays)
self.names_chronics_to_backend = copy.deepcopy(names_chronics_to_backend)
self.n_line = len(self._order_backend_arrays)
self.current_index = 0
# read the description file
with open(os.path.join(self.path, self.json_file_name), "r", encoding="utf-8") as f:
self.dict_meta_data = json.load(f)
# and now sample the maintenance
self._create_maintenance_arrays(self.init_datetime)
def check_validity(self, backend):
# TODO
pass
def load_next_maintenance(self):
maint_time = 1 * self.maintenance_time[self.current_index, :]
maint_duration = 1 * self.maintenance_duration[self.current_index, :]
return maint_time, maint_duration
def load_next(self, dict_):
self.current_index += 1
if self.current_index >= self.maintenance.shape[0]:
# regenerate some maintenance if needed
self.current_index = 0
self.init_datetime += self.maintenance.shape[0] * self.time_interval
self._create_maintenance_arrays(self.init_datetime)
return copy.deepcopy(self.maintenance[self.current_index, :])
def _clear(self):
super()._clear()
self.dict_meta_data = None
self.maintenance = None
self.maintenance_time = None
self.maintenance_duration = None
self.n_line = None
self.current_index = 0
def done(self):
# maintenance can be generated on the fly so they are never "done"
return False | 5,632 | 44.427419 | 121 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/handlers/load_q_from_p_handler.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.Exceptions import (
HandlerError
)
from grid2op.dtypes import dt_float
from grid2op.Chronics.handlers.baseHandler import BaseHandler
class LoadQFromPHandler(BaseHandler):
"""This handler is specific for "load_q" type of data.
You can use it for both "forecast" ("load_q_forecasted")
and for environment data ("load_q").
It will generate load_q based on a "q over p" ratio provided
as input. Basically, whenever called, it will
return (when possible): `load_q = ratio * load_p`
.. note::
Its current implementation heavily relies on the fact that
when the "load_q" / "load_q_forecasted" handlers
are called the "load_p" / "load_p_forecasted" data are already
computed and known.
"""
def __init__(self,
array_name="load_q",
qp_ratio: float=0.7,
max_iter=-1):
super().__init__(array_name, max_iter)
if isinstance(qp_ratio, np.ndarray):
self._qp_ratio = (1.0 * qp_ratio).astype(dt_float)
else:
self._qp_ratio = dt_float(qp_ratio)
def done(self):
# this is never done as long as there is a "load_p"
return False
def load_next(self, dict_):
if "load_p" in dict_ and not "prod_p" in dict_ and not "prod_v" in dict_:
if dict_["load_p"] is not None:
return self._qp_ratio * dict_["load_p"]
return None
def initialize(self, order_backend_prods, names_chronics_to_backend):
# nothing to do for this particular handler
pass
def check_validity(self, backend):
if isinstance(self._qp_ratio, np.ndarray):
if backend.n_load != self._qp_ratio.shape[0]:
raise HandlerError(f"{self.array_name}: qp_ratio should either be a single float "
"or a numpy array with as many loads as there are loads on the grid. "
f"You provided {self._qp_ratio.shape[0]} ratios but there are "
f"{backend.n_load} loads on the grid.")
def load_next_maintenance(self):
raise HandlerError(f"load_next_maintenance {self.array_name}: You should only "
"use this class for ENVIRONMENT data, and not for FORECAST data nor MAINTENANCE data. "
)
def load_next_hazard(self):
raise HandlerError(f"load_next_hazard {self.array_name}: You should only use "
"this class for ENVIRONMENT data, and not for FORECAST ")
def forecast(self,
forecast_horizon_id,
inj_dict_env,
inj_dict_previous_forecast,
# eg gen_p_handler if this is set to gen_p_for_handler:
env_handler,
# list of the 4 env handlers: (load_p_handler, load_q_handler, gen_p_handler, gen_v_handler)
env_handlers):
return self.load_next(inj_dict_previous_forecast)
| 3,587 | 40.72093 | 114 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/handlers/noisyForecastHandler.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from typing import Union, Callable, Iterable, Tuple
try:
from typing import Literal
except ImportError:
# Literal not available in python 3.7
from typing_extensions import Literal
from grid2op.dtypes import dt_float
from grid2op.Exceptions import HandlerError
from grid2op.Chronics.handlers.baseHandler import BaseHandler
from grid2op.Chronics.handlers.perfectForecastHandler import PerfectForecastHandler
class NoisyForecastHandler(PerfectForecastHandler):
"""This class allows you to generate some noisy multiple steps ahead
forecasts for a given environment.
To make "noisy" forecast, this class first retrieve the "perfect forecast"
(see :class:`PerfectForecastHandler`) and then it "adds" some noise to
each individual component of this vector.
Noise is not "added" but "multiply" with the formula:
`output = lognormal(0., sigma) * input` with sigma being the
standard deviation of the noise that depends on the forecast horizons.
.. seealso::
The class :class:`PerfectForecastHandler`
.. warning::
This class has the same limitation as the :class:`PerfectForecastHandler`. It only works
if the handlers of the environments supports the :func:`BaseHandler.get_future_data` is implemented
for the environment handlers.
Notes
------
**Independance of the noise:**
The noise is applied independantly for each variable and each
"horizon" and each "step".
This means that:
- the forecast noise applied at t0=0 for tf=5 for generator 1 is independant
from the noise applied t0=0 for tf=5 for generator 2
- the forecast noise applied at t0=0 for tf=5 is independant from the
applied made at t0=5 for tf=5
- the forecast noise applied at t0=0 for tf=5 is independant from the
applied made at t0=0 for tf=10, or tf=15 etc.
In other words, there are no "correlation" between the noise of any kind.
If you want better quality forecast, you should use a dedicated tools to
generate some. Among which is "chronix2grid".
**Noise depending on the forecast horizon:**
For now, you can only use "multiplicative noise" that is applied
by multiplyig the sampling of a LogNormal distribution with the
"perfect forecast".
The "standard deviation" of this lognormal can be parametrized: the
"forecast error" can be dependant on the forecast horizon.
For that, you can input :
- either a callable (=function) that takes as input a forecast horizon
(in minutes) and return the std of the noise for this horizon
- or an iterable that directly contains the std of the noise.
For example:
.. code-block:: python
import grid2op
from grid2op.Chronics import FromHandlers
from grid2op.Chronics.handlers import CSVHandler, NoisyForecastHandler
env_name = "l2rpn_case14_sandbox"
hs_ = [5*(i+1) for i in range(12)]
# uses the default noise: sqrt(horizon) * 0.01 : error of 8% 1h ahead
env = grid2op.make(env_name,
data_feeding_kwargs={"gridvalueClass": FromHandlers,
"gen_p_handler": CSVHandler("prod_p"),
"load_p_handler": CSVHandler("load_p"),
"gen_v_handler": CSVHandler("prod_v"),
"load_q_handler": CSVHandler("load_q"),
"h_forecast": hs_,
"gen_p_for_handler": NoisyForecastHandler("prod_p_forecasted"),
"load_p_for_handler": NoisyForecastHandler("load_p_forecasted"),
"load_q_for_handler": NoisyForecastHandler("load_q_forecasted"),
}
)
# uses the noise: `horizon -> 0.01 * horizon` : error of 6% 1h ahead
env = grid2op.make(env_name,
data_feeding_kwargs={"gridvalueClass": FromHandlers,
"gen_p_handler": CSVHandler("prod_p"),
"load_p_handler": CSVHandler("load_p"),
"gen_v_handler": CSVHandler("prod_v"),
"load_q_handler": CSVHandler("load_q"),
"h_forecast": hs_,
"gen_p_for_handler": NoisyForecastHandler("prod_p_forecasted",
sigma=lambda x: 0.01 * x),
"load_p_for_handler": NoisyForecastHandler("load_p_forecasted",
sigma=lambda x: 0.01 * x),
"load_q_for_handler": NoisyForecastHandler("load_q_forecasted",
sigma=lambda x: 0.01 * x),
}
)
**Caveats:**
1) Be carefull, the noise for "gen_p" / "prod_p" is not exactly the one given in input.
This is because of the "first law of power system" (saying that total generation should be equal to
todal demand and losses). To make sure this "first law" is "more met" we scale the generation to make sure
that it is roughly 1.02 * total load (when possible) [to be really exhaustive, the ratio 1.02 is, whenever
possible modified and computed from the real time data]
2) There might be some pmin / pmax violation for the generated generators
3) There will be some max_ramp_down / max_ramp_up violations for the generated generators
4) The higher the noise, the higher the trouble you will encounter
To "get rid" of all these limitations, you can of course use an "offline" way to generate more realistic forecasts,
for example using chronix2grid.
"""
def __init__(self,
array_name,
sigma: Union[Callable, Iterable]=None,
noise_type : Literal["mult"] = "mult", # TO BE ADDED LATER
quiet_warnings : bool=False,
max_iter=-1):
super().__init__(array_name, max_iter, quiet_warnings)
self.noise_type = noise_type
self.sigma = sigma
self._my_noise = None
@staticmethod
def _default_noise(horizon: int):
"""horizon in minutes"""
return np.sqrt(1.0 * horizon) * 0.01 # error of ~8% at 1h
def _get_list(self, sigma: Union[Callable, Iterable]):
if sigma is None:
res = [type(self)._default_noise(h) for h in self._h_forecast]
elif callable(sigma):
res = [sigma(h) for h in self._h_forecast]
else:
try:
iter(sigma)
except TypeError as exc_:
raise HandlerError(f"{type(self)} ({self.array_name}): make "
"sure the sigma are either callable or iterable") from exc_
res = [float(el) for el in sigma]
if len(res) < len(self._h_forecast):
last_el = res[-1]
import warnings
warnings.warn(f"{type(self)} ({self.array_name}): a list too short was provided "
f"for one of the sigma_*** parameter ({len(res)} elements "
f"given but forecasts are made for {len(self._h_forecast)} horizons)")
for _ in range(len(self._h_forecast) - len(res)):
res.append(last_el)
return res
def set_h_forecast(self, h_forecast):
super().set_h_forecast(h_forecast)
self._my_noise = self._get_list(self.sigma)
def _env_loss_ratio(self, inj_dict_env):
res = 1.02
if ("load_p" in inj_dict_env and
"prod_p" in inj_dict_env):
if (inj_dict_env["load_p"] is not None and
inj_dict_env["prod_p"] is not None):
res = inj_dict_env["prod_p"].sum() / inj_dict_env["load_p"].sum()
return res
def forecast(self,
forecast_horizon_id : int,
inj_dict_env : dict,
inj_dict_previous_forecast : dict,
# eg gen_p_handler if this is set to gen_p_for_handler:
env_handler : BaseHandler,
# list of the 4 env handlers: (load_p_handler, load_q_handler, gen_p_handler, gen_v_handler)
env_handlers : Tuple[BaseHandler, BaseHandler, BaseHandler, BaseHandler]
):
res = super().forecast(forecast_horizon_id, inj_dict_env, inj_dict_previous_forecast, env_handler, env_handlers)
if res is not None:
if self.noise_type == "mult":
res *= self.space_prng.lognormal(sigma=self._my_noise[forecast_horizon_id])
else:
raise HandlerError(f"{self.array_name}: the type of noise {self.noise_type} is not supported. "
f"Only multiplicative noise are supported at the moment")
if ("load_p" in inj_dict_previous_forecast and
"load_q" in inj_dict_previous_forecast and
"prod_p" not in inj_dict_previous_forecast
):
# so this handler is for the generation: I scale it to be slightly above the total loads
if inj_dict_previous_forecast["load_p"] is not None:
res *= inj_dict_previous_forecast["load_p"].sum() / res.sum()
res *= self._env_loss_ratio(inj_dict_env)
# TODO ramps, pmin, pmax !
return res.astype(dt_float) if res is not None else None
| 10,768 | 49.088372 | 120 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/handlers/perfectForecastHandler.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from typing import Tuple
import warnings
from grid2op.Exceptions import HandlerError
from grid2op.Chronics.handlers.baseHandler import BaseHandler
class PerfectForecastHandler(BaseHandler):
"""This class is allows to generate "perfect forecast", with this class the agent
will know what will be the exact production, loads etc for the near future.
This is a strong "assumption" and it is not realistic.
To have make things more realistic, you can use the :class:`NoisyForecastHandler` but again,
this class is far from perfect.
More "research" is needed in this area and any contribution is more than welcome !
As the name suggest, you should use this class only for the FORECAST data and not for environment or maintenance.
.. warning::
It only works if the handlers of the environments supports the :func:`BaseHandler.get_future_data` is implemented
for the environment handlers.
"""
def __init__(self, array_name, max_iter=-1, quiet_warnings : bool=False):
super().__init__(array_name, max_iter)
self.quiet_warnings = quiet_warnings
def initialize(self, order_backend_arrays, names_chronics_to_backend):
# nothing particular to do at initialization
pass
def done(self):
# this handler is never "done", only when the "real data" it depends on is done
return False
def load_next(self, dict_):
raise HandlerError("You should only use this class for FORECAST data, and not for ENVIRONMENT data. "
"You might want to use the `DoNothingHandler` that will `do_nothing` for "
"the environment data (see `from grid2op.Chronics.handlers import DoNothingHandler`)")
def check_validity(self, backend):
return True
def load_next_maintenance(self):
raise HandlerError("You should only use this class for FORECAST data and not for MAINTENANCE data")
def load_next_hazard(self):
raise HandlerError("You should only use this class for FORECAST data and not for HAZARDS data")
def forecast(self,
forecast_horizon_id : int,
inj_dict_env : dict,
inj_dict_previous_forecast : dict,
# eg gen_p_handler if this is set to gen_p_for_handler:
env_handler : BaseHandler,
# list of the 4 env handlers: (load_p_handler, load_q_handler, gen_p_handler, gen_v_handler)
env_handlers : Tuple[BaseHandler, BaseHandler, BaseHandler, BaseHandler]
):
res = env_handler.get_future_data(self._h_forecast[forecast_horizon_id], self.quiet_warnings)
return res
| 3,244 | 44.704225 | 121 | py |
Grid2Op | Grid2Op-master/grid2op/Chronics/handlers/persitenceForecastHandler.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import re
from typing import Tuple
from grid2op.Exceptions import HandlerError
from grid2op.Chronics.handlers.baseHandler import BaseHandler
class PersistenceForecastHandler(BaseHandler):
"""
This type of handler will generate the "persitence" type of forecast: basically it will copy
paste the last known data of the environment.
You should use it only for FORECAST data and not for environment data as the name suggest.
"""
INJ_KEYS = ("load_p", "load_q", "prod_p", "prod_v")
def __init__(self, array_name, max_iter=-1):
super().__init__(array_name, max_iter)
tmp = re.sub("_for.*$", "", array_name)
tmp = tmp.replace("gen", "prod")
if tmp in type(self).INJ_KEYS:
self._possible_key = tmp
else:
self._possible_key = None
def initialize(self, order_backend_arrays, names_chronics_to_backend):
# nothing particular to do at initialization
pass
def done(self):
# this handler is never "done", only when the "real data" it depends on is done
return False
def load_next(self, dict_):
raise HandlerError("You should only use this class for FORECAST data, and not for ENVIRONMENT data. "
"You might want to use the `DoNothingHandler` that will `do_nothing` for "
"the environment data (see `from grid2op.Chronics.handlers import DoNothingHandler`)")
def check_validity(self, backend):
return True
def load_next_maintenance(self):
raise HandlerError("You should only use this class for FORECAST data and not for MAINTENANCE data")
def load_next_hazard(self):
raise HandlerError("You should only use this class for FORECAST data and not for HAZARDS data")
def _aux_get_inj_key(self, env_handler):
# return the key I need to look for in the "inj_dict_env" to get the
# real data
if self._possible_key is not None:
return self._possible_key
if env_handler.array_name == "load_p":
return "load_p"
if env_handler.array_name == "load_q":
return "load_q"
if env_handler.array_name == "prod_p":
return "prod_p"
if env_handler.array_name == "prod_v":
return "prod_v"
if env_handler.array_name == "gen_p":
return "prod_p"
if env_handler.array_name == "gen_v":
return "prod_v"
return None
def forecast(self,
forecast_horizon_id : int,
inj_dict_env : dict,
inj_dict_previous_forecast : dict,
# eg gen_p_handler if this is set to gen_p_for_handler:
env_handler : "BaseHandler",
# list of the 4 env handlers: (load_p_handler, load_q_handler, gen_p_handler, gen_v_handler)
env_handlers : Tuple["BaseHandler", "BaseHandler", "BaseHandler", "BaseHandler"]
):
key = self._aux_get_inj_key(env_handler)
if key is None:
raise HandlerError("Impossible to find which key to look for in the dictionary for "
"the real time data. Please change the name of the PersistenceHandler "
f"currently named {self.array_name} to something more specific "
"such as \"gen_p\" or \"load_p_forecast\"")
if key not in inj_dict_env["injection"]:
raise HandlerError(f"Please the remove the handler {self.array_name}. Indeed there is no data for "
f"{key} in any of the environment handler, so I cannot \"continue to copy\" "
"this data in the forecast (NB for \"prod_v\" environment data are handled "
"somewhere else, namely in the VoltageControler, so they cannot be 'copied' "
"from the time series).")
return inj_dict_env["injection"][key]
| 4,560 | 46.020619 | 113 | py |
Grid2Op | Grid2Op-master/grid2op/Converter/AnalogStateConverter.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
import math
from grid2op.Converter import Converter
class AnalogStateConverter(Converter):
"""
Converter that can be used with analog representation of the grid state.
Details are provided in convert_obs and convert_act
The grid2op observation is converted into a 1d normalied array
The grid2op action is created from a set of real valued arrays
It can not yet be converted to / from gym space. If this feature is interesting for you, you can
reply to the issue posted at https://github.com/rte-france/Grid2Op/issues/16
"""
def __init__(self, action_space, bias=0.0):
super().__init__(action_space)
self.__class__ = AnalogStateConverter.init_grid(action_space)
self.__bias = 0.0
@staticmethod
def to_norm_vect(inputv, pad_v=0.0, scale_v=1.0):
v = np.asarray(inputv)
v = v / scale_v
vsafe = np.nan_to_num(v, nan=pad_v, posinf=pad_v, neginf=pad_v)
return vsafe.astype(np.float32)
def convert_obs(self, obs):
"""
This converter will convert the observation into a 1D vector,
with all values normalized, plus bias (if provided)
Parameters
----------
obs: :class:`grid2op.Observation.Observation`
The input observation.
Returns
-------
``np.array`` 1D array of np.float32 normalized values
"""
# Store the obs for action conversion
self.__obs = obs
# Store some shortcuts
topo = obs.topo_vect
g_pos = obs.gen_pos_topo_vect
l_pos = obs.load_pos_topo_vect
lor_pos = obs.line_or_pos_topo_vect
lex_pos = obs.line_ex_pos_topo_vect
# Get time data
time_li = [
obs.month / 12.0,
obs.day / 31.0,
obs.day_of_week / 7.0,
obs.hour_of_day / 24.0,
obs.minute_of_hour / 60.0,
]
time_v = self.to_norm_vect(time_li)
time_line_cd = self.to_norm_vect(
obs.time_before_cooldown_line, pad_v=-1.0, scale_v=10.0
)
time_line_nm = self.to_norm_vect(obs.time_next_maintenance, scale_v=10.0)
time_sub_cd = self.to_norm_vect(
obs.time_before_cooldown_sub, pad_v=-1.0, scale_v=10.0
)
# Get generators info
g_p = self.to_norm_vect(obs.prod_p, scale_v=1000.0)
g_q = self.to_norm_vect(obs.prod_q, scale_v=1000.0)
g_v = self.to_norm_vect(obs.prod_v, scale_v=1000.0)
g_tr = self.to_norm_vect(obs.target_dispatch, scale_v=150.0)
g_ar = self.to_norm_vect(obs.actual_dispatch, scale_v=150.0)
g_cost = self.to_norm_vect(obs.gen_cost_per_MW, pad_v=0.0, scale_v=1.0)
g_buses = np.zeros(obs.n_gen)
for gen_id in range(obs.n_gen):
g_buses[gen_id] = topo[g_pos[gen_id]]
if g_buses[gen_id] <= 0.0:
g_buses[gen_id] = 0.0
g_bus = self.to_norm_vect(g_buses, pad_v=-1.0, scale_v=3.0)
# Get loads info
l_p = self.to_norm_vect(obs.load_p, scale_v=1000.0)
l_q = self.to_norm_vect(obs.load_q, scale_v=1000.0)
l_v = self.to_norm_vect(obs.load_v, scale_v=1000.0)
l_buses = np.zeros(obs.n_load)
for load_id in range(obs.n_load):
l_buses[load_id] = topo[l_pos[load_id]]
if l_buses[load_id] <= 0.0:
l_buses[load_id] = 0.0
l_bus = self.to_norm_vect(l_buses, pad_v=-1.0, scale_v=3.0)
# Get lines origin info
or_p = self.to_norm_vect(obs.p_or, scale_v=1000.0)
or_q = self.to_norm_vect(obs.q_or, scale_v=1000.0)
or_v = self.to_norm_vect(obs.v_or, scale_v=1000.0)
or_buses = np.zeros(obs.n_line)
for line_id in range(obs.n_line):
or_buses[line_id] = topo[lor_pos[line_id]]
if or_buses[line_id] <= 0.0:
or_buses[line_id] = 0.0
or_bus = self.to_norm_vect(or_buses, pad_v=-1.0, scale_v=3.0)
or_rho = self.to_norm_vect(obs.rho, pad_v=-1.0)
# Get extremities origin info
ex_p = self.to_norm_vect(obs.p_ex, scale_v=1000.0)
ex_q = self.to_norm_vect(obs.q_ex, scale_v=1000.0)
ex_v = self.to_norm_vect(obs.v_ex, scale_v=1000.0)
ex_buses = np.zeros(obs.n_line)
for line_id in range(obs.n_line):
ex_buses[line_id] = topo[lex_pos[line_id]]
if ex_buses[line_id] <= 0.0:
ex_buses[line_id] = 0.0
ex_bus = self.to_norm_vect(ex_buses, pad_v=-1.0, scale_v=3.0)
ex_rho = self.to_norm_vect(obs.rho, pad_v=-1.0)
res = np.concatenate(
[
# Time
time_v,
time_line_cd,
time_sub_cd,
time_line_nm,
# Gens
g_p,
g_q,
g_v,
g_ar,
g_tr,
g_bus,
g_cost,
# Loads
l_p,
l_q,
l_v,
l_bus,
# Origins
or_p,
or_q,
or_v,
or_bus,
or_rho,
# Extremities
ex_p,
ex_q,
ex_v,
ex_bus,
ex_rho,
]
)
return res + self.__bias
def convert_act(self, netstate):
"""
Create a grid2op action based on the last observation and
the real valued state vectors in parameters
Parameters
----------
netstate: ``tuple``
A tuple containing the following (3) elements:
- netbus: ``np.array``
A numpy array of dimension n_bus(2) x dim_topo and range [0.0; 1.0].
Where the first axis represent the bus, the second the elements.
Then, for element i, netbus[bus_index][i] represent the probability
element i should be on bus_index + 1.
The buses are then picked using argmax across dimension 0
- netline: ``np.array``
A numpy array of dimension n_line and range [0.0; 1.0]
Each element representing a line status:
0 meaning disconnected and > 0.0 connected
- netdisp: ``np.array``
A numpy array of dimension n_gen and range[-1.0;1.0]
Each generator redispatch setpoint is then
rescaled to the range [-rdown;+rup].
This is cumulative over time, as per grid2op convention.
Returns
-------
res: :class:`grid2op.Action.Action`
An action that will change the last observation (current state)
To the state described in parameters
"""
netbus = netstate[0]
netline = netstate[1]
netdisp = netstate[2]
act_setbus = self.netbus_to_act_setbus(self.__obs, netbus)
act_setstatus = self.netline_to_act_setstatus(self.__obs, netline)
act_redispatch = self.netdisp_to_act_redispatch(self.__obs, netdisp)
act = self.__call__(
{
"set_bus": act_setbus,
"set_line_status": act_setstatus,
"redispatch": act_redispatch,
}
)
return act
@staticmethod
def size_obs(obs):
dims = np.array(
[
# Time
5, # Timestamp
2 * obs.n_line,
obs.n_sub,
# Gen
obs.n_gen * 7,
# Load
obs.n_load * 4,
# Line origins
obs.n_line * 5,
# Line extremities
obs.n_line * 5,
]
)
return np.sum(dims)
@staticmethod
def netbus_to_act_setbus(obs, net_bus):
# n_bus x dim_topo x p([0.0; 1.0]) ->
# -> dim_topo x [0 unchanged; 1: bus_1; 2 bus_2 ]
# Pick the buses
act_setbus = np.argmax(net_bus, axis=0) + 1
# Don't set disconnected elements
act_setbus[obs.topo_vect <= 0] = 0
# Don't set elements already on the correct bus
act_setbus[act_setbus == obs.topo_vect] = 0
return act_setbus
@staticmethod
def netline_to_act_setstatus(obs, net_line):
# [0.0 Disconnect; > 0.0 Connect] ->
# -> [0.0 Unchanged; -1.0 Disconnect; 1.0 Connect]
act_setstatus = np.copy(net_line)
act_setstatus[net_line <= 0.0] = -1
act_setstatus[net_line > 0.0] = 1
# Do no 'set' already connected lines
act_setstatus[obs.line_status == (act_setstatus == 1)] = 0
# Do not 'set' already disconnected lines
act_setstatus[(obs.line_status == False) == (act_setstatus == -1)] = 0
return act_setstatus
@staticmethod
def netdisp_to_act_redispatch(obs, net_disp):
# [-1.0;1.0] -> [-ramp_down;+ramp_up]
act_redispatch = np.zeros(obs.n_gen)
for i, d in enumerate(net_disp):
if math.isclose(d, 0.0): # Skip if 0.0
continue
rmin = obs.gen_max_ramp_down[i]
rmax = obs.gen_max_ramp_up[i]
r = np.interp(d, [-1.0, 1.0], [-rmin, rmax])
act_redispatch[i] = round(r) # Round at 1MW
return act_redispatch
# Helpers to generate random actions
@staticmethod
def netbus_rnd(obs, n_bus=2):
# Copy obs state
rnd_topo = np.zeros((n_bus, obs.dim_topo))
rnd_topo[0][obs.topo_vect == 1] = 1.0
rnd_topo[1][obs.topo_vect == 2] = 1.0
# Pick a random substation
rnd_sub = np.random.randint(obs.n_sub)
n_elem = obs.sub_info[rnd_sub]
# Pick a random number of elements to change
rnd_n_changes = np.random.randint(n_elem + 1)
# Pick the elements to change at random
rnd_sub_elems = np.random.randint(0, n_elem, rnd_n_changes)
# Set the topo vect
sub_topo_pos = np.sum(obs.sub_info[0:rnd_sub])
for elem_pos in rnd_sub_elems:
rnd_bus = np.random.randint(n_bus)
rnd_topo[rnd_bus][sub_topo_pos + elem_pos] = 1.0
# Set the other buses to 0.0
for b in range(n_bus):
if b == rnd_bus:
continue
rnd_topo[b][sub_topo_pos + elem_pos] = 0.0
return rnd_topo
@staticmethod
def netline_rnd(obs):
rnd_lines = obs.line_status.astype(np.float32)
rnd_lineid = np.random.randint(obs.n_line)
rnd_linestatus = not obs.line_status[rnd_lineid]
rnd_lines[rnd_lineid] = np.int32(rnd_linestatus)
return rnd_lines
@staticmethod
def netdisp_rnd(obs):
disp_rnd = np.zeros(obs.n_gen)
# Take random gen to disp
rnd_gen = np.random.randint(obs.n_gen)
# Take a random disp
rnd_ramp = np.random.uniform(-1.0, 1.0)
disp_rnd[rnd_gen] = rnd_ramp
return disp_rnd
| 11,496 | 34.928125 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Converter/BackendConverter.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
import copy
import warnings
from grid2op.dtypes import dt_float, dt_int
from grid2op.Backend import Backend
from grid2op.Exceptions import Grid2OpException, BackendError
ERROR_NB_ELEMENTS = (
"Impossible to make a BackendConverter with backends having different number of {}."
)
ERROR_ELEMENT_CONNECTED = (
"No {0} connected at substation {1} for the target backend while a {0} is connected "
"at substation {2} for the source backend"
)
ERROR_INVALID_VECTOR = "invalid vector: some element are not found in either source or target"
class BackendConverter(Backend):
"""
Convert two instance of backend to "align" them. This can be usefull if you have time series
from a given "type of backend" (with certain names) but want to use another type
of backend to run the computation.
Another usecase is if you have agents trained with certain backend type (so having loads,
generators etc. in certain order) but want to use a different backend to run the powerflow. You train
your agent with a "simple and fast" powerflow but want to test them with a "more realistic" one.
This means that grid2op will behave exactly as is the "source backend" class is used everywhere, but
the powerflow computation will be carried out by the "target backend".
# TODO: rename: "source backend" as "user backend" and "target backend" as "powerflow backend" or something
like that.
This means that from grid2op point of view, and from the agent point of view, line will be the order given
by "source backend", load will be in the order of "source backend", topology will be given with the
one from "source backend" etc. etc.
Be careful, the BackendAction will also need to be transformed. Backend action is given with the order
of the "source backend" and will have to be modified when transmitted to the "target backend".
On the other end, no powerflow at all (except if some powerflows are performed at the initialization) will
be computed using the source backend, only the target backend is relevant for the powerflow computations.
Note that these backend need to access the grid description file from both "source backend" and "target backend"
class. The underlying grid must be the same.
# TODO: have a "mapping" from source name to target name in the constructor. If not provided it's the current
behaviour (everything is automatic) but if present it maps the things according to this mapping. This mapping
could be like `names_chronics_to_backend`
Examples
---------
Here is a (dummy and useless) example of how to use this class.
.. code-block:: python
import grid2op
from grid2op.Converter import BackendConverter
from grid2op.Backend import PandaPowerBackend
from lightsim2grid import LightSimBackend
backend = BackendConverter(source_backend_class=PandaPowerBackend,
target_backend_class=LightSimBackend,
target_backend_grid_path=None)
# and now your environment behaves as if PandaPowerBackend did the computation (same load order, same
generator order etc.) but real computation are made with LightSimBackend.
# NB: for this specific example it is useless to do it because LightSimBackend and PandaPowerBackend have
# by default the same order etc. This is just an illustration here
# NB as of now you cannot use a runner with this method (yet)
env = grid2op.make(..., backend=backend)
# do regular computations here
"""
IS_BK_CONVERTER = True
def __init__(
self,
source_backend_class,
target_backend_class,
target_backend_grid_path=None,
sub_source_target=None,
detailed_infos_for_cascading_failures=False,
use_target_backend_name=False,
kwargs_target_backend=None,
kwargs_source_backend=None,
):
Backend.__init__(
self,
detailed_infos_for_cascading_failures=detailed_infos_for_cascading_failures,
use_target_backend_name=use_target_backend_name,
kwargs_target_backend=kwargs_target_backend,
kwargs_source_backend=kwargs_source_backend,
)
difcf = detailed_infos_for_cascading_failures
if kwargs_source_backend is None:
kwargs_source_backend = {}
self.source_backend = source_backend_class(
detailed_infos_for_cascading_failures=difcf,
**kwargs_source_backend
) # the one for the order of the elements
if kwargs_target_backend is None:
kwargs_target_backend = {}
self.target_backend = target_backend_class(
detailed_infos_for_cascading_failures=difcf,
**kwargs_target_backend
) # the one to computes powerflow
# if the target backend (the one performing the powerflows) needs a different file
self.target_backend_grid_path = target_backend_grid_path
# key: name in the source backend, value name in the target backend, for the substations
self.sub_source_target = sub_source_target
# if tmp is from the target backend, then tmp[self._line_tg2sr] is ordered according to the source backend
self._sub_tg2sr = None
self._sub_sr2tg = None
self._line_tg2sr = None
self._line_sr2tg = None
self._gen_tg2sr = None
self._gen_sr2tg = None
self._load_tg2sr = None
self._load_sr2tg = None
self._shunt_tg2sr = None
self._shunt_sr2tg = None
self._topo_tg2sr = None
self._topo_sr2tg = None
self._storage_tg2sr = None
self._storage_sr2tg = None
# for redispatching data
self.path_redisp = None
self.name_redisp = None
self.path_grid_layout = None
self.name_grid_layout = None
self.path_storage_data = None
self.name_storage_data = None
# for easier copy of np array
self.cst1 = dt_float(1.0)
# which name to use when loading the data (if names_chronics_to_backend has not been set)
self.use_target_backend_name = use_target_backend_name
self.names_target_to_source = None
# TODO storage check all this class ! + the doc of the backend
def load_grid(self, path=None, filename=None):
self.source_backend.load_grid(path, filename)
# and now i load the target backend
if self.target_backend_grid_path is not None:
self.target_backend.load_grid(path=self.target_backend_grid_path)
else:
# both source and target backend understands the same format
self.target_backend.load_grid(path, filename)
def _assert_same_grid(self):
"""basic assertion that self and the target backend have the same grid
but not necessarily the same object at the same place of course"""
cls = type(self)
tg_cls = type(self.target_backend)
if cls.n_sub != tg_cls.n_sub:
raise Grid2OpException(ERROR_NB_ELEMENTS.format("substations"))
if cls.n_gen != tg_cls.n_gen:
raise Grid2OpException(ERROR_NB_ELEMENTS.format("generators"))
if cls.n_load != tg_cls.n_load:
raise Grid2OpException(ERROR_NB_ELEMENTS.format("loads"))
if cls.n_line != tg_cls.n_line:
raise Grid2OpException(ERROR_NB_ELEMENTS.format("lines"))
if cls.n_storage > 0:
if cls.n_storage != tg_cls.n_storage:
raise Grid2OpException(ERROR_NB_ELEMENTS.format("storages"))
else:
# a possible reason is that the "source backend" do not support storage
# but the target one does. In this case I issue a warning,
# and make sure I have not storage.
if tg_cls.n_storage > 0:
warnings.warn("BackendConverter: the source backend does not appear to support storage units, "
"but the target one does (and there are some storage units on the grid). "
"Be aware that the converted backend will NOT support storage units.")
def _init_myself(self):
# shortcut to set all information related to the class, except the name of the environment
# this should been done when the source backend is fully initialized only
self._assert_same_grid()
# and now init all the converting vectors
# a) for substation
self._sub_tg2sr = np.full(self.n_sub, fill_value=-1, dtype=dt_int)
self._sub_sr2tg = np.full(self.n_sub, fill_value=-1, dtype=dt_int)
if self.sub_source_target is None:
# automatic mode
# I can only do it if the names matches
if np.all(
sorted(self.source_backend.name_sub)
== sorted(self.target_backend.name_sub)
):
for id_source, nm_source in enumerate(self.source_backend.name_sub):
id_target = np.where(self.target_backend.name_sub == nm_source)[0]
self._sub_tg2sr[id_source] = id_target
self._sub_sr2tg[id_target] = id_source
else:
for id_source, nm_source in enumerate(self.source_backend.name_sub):
nm_target = self.sub_source_target[nm_source]
id_target = np.where(self.target_backend.name_sub == nm_target)[0]
self._sub_tg2sr[id_source] = id_target
self._sub_sr2tg[id_target] = id_source
# b) for load
self._load_tg2sr = np.full(self.n_load, fill_value=-1, dtype=dt_int)
self._load_sr2tg = np.full(self.n_load, fill_value=-1, dtype=dt_int)
# automatic mode
self._auto_fill_vect_load_gen_shunt(
n_element=self.n_load,
source_2_id_sub=self.source_backend.load_to_subid,
target_2_id_sub=self.target_backend.load_to_subid,
tg2sr=self._load_tg2sr,
sr2tg=self._load_sr2tg,
nm="load",
)
# c) for generator
self._gen_tg2sr = np.full(self.n_gen, fill_value=-1, dtype=dt_int)
self._gen_sr2tg = np.full(self.n_gen, fill_value=-1, dtype=dt_int)
# automatic mode
self._auto_fill_vect_load_gen_shunt(
n_element=self.n_gen,
source_2_id_sub=self.source_backend.gen_to_subid,
target_2_id_sub=self.target_backend.gen_to_subid,
tg2sr=self._gen_tg2sr,
sr2tg=self._gen_sr2tg,
nm="gen",
)
# d) for powerline
self._line_tg2sr = np.full(self.n_line, fill_value=-1, dtype=dt_int)
self._line_sr2tg = np.full(self.n_line, fill_value=-1, dtype=dt_int)
# automatic
self._auto_fill_vect_powerline()
# e) and now the topology vectors.
self._topo_tg2sr = np.full(self.source_backend.dim_topo, fill_value=-1, dtype=dt_int)
self._topo_sr2tg = np.full(self.target_backend.dim_topo, fill_value=-1, dtype=dt_int)
self._auto_fill_vect_topo()
# f) for the storage units
self._storage_tg2sr = np.full(self.n_storage, fill_value=-1, dtype=dt_int)
self._storage_sr2tg = np.full(self.n_storage, fill_value=-1, dtype=dt_int)
# automatic mode
if self.n_storage:
self._auto_fill_vect_load_gen_shunt(
n_element=self.n_storage,
source_2_id_sub=self.source_backend.storage_to_subid,
target_2_id_sub=self.target_backend.storage_to_subid,
tg2sr=self._storage_tg2sr,
sr2tg=self._storage_sr2tg,
nm="storage",
)
# shunt are available if both source and target provide it
self.shunts_data_available = (
self.source_backend.shunts_data_available
and self.target_backend.shunts_data_available
)
if self.shunts_data_available:
self._shunt_tg2sr = np.full(self.n_shunt, fill_value=-1, dtype=dt_int)
self._shunt_sr2tg = np.full(self.n_shunt, fill_value=-1, dtype=dt_int)
# automatic mode
self._auto_fill_vect_load_gen_shunt(
n_element=self.n_shunt,
source_2_id_sub=self.source_backend.shunt_to_subid,
target_2_id_sub=self.target_backend.shunt_to_subid,
tg2sr=self._shunt_tg2sr,
sr2tg=self._shunt_sr2tg,
nm="shunt",
)
else:
self.n_shunt = 0
self.name_shunt = np.empty(0, dtype=str)
self._thermal_limit_a = 1.0 * self.target_backend.thermal_limit_a
self.set_thermal_limit(self.target_backend.thermal_limit_a[self._line_tg2sr])
def _get_possible_target_ids(self, id_source, source_2_id_sub, target_2_id_sub, nm):
id_sub_source = source_2_id_sub[id_source]
id_sub_target = self._sub_tg2sr[id_sub_source]
ids_target = np.where(target_2_id_sub == id_sub_target)[0]
if ids_target.shape[0] == 0:
raise RuntimeError(
ERROR_ELEMENT_CONNECTED.format(nm, id_sub_target, id_sub_source)
)
return id_sub_target, ids_target
def _auto_fill_vect_load_gen_shunt(
self, n_element, source_2_id_sub, target_2_id_sub, tg2sr, sr2tg, nm
):
nb_load_per_sub = np.zeros(self.n_sub, dtype=dt_int)
if source_2_id_sub.shape[0] != n_element:
raise RuntimeError(
"Impossible to convert backend that do not have the same number of objects"
)
if target_2_id_sub.shape[0] != n_element:
raise RuntimeError(
"Impossible to convert backend that do not have the same number of objects"
)
for id_source in range(n_element):
id_sub_target, id_target = self._get_possible_target_ids(
id_source, source_2_id_sub, target_2_id_sub, nm
)
id_target = id_target[nb_load_per_sub[id_sub_target]]
# TODO no no no use the "to_sub_pos" to compute that, and even better raise an error in this case
# this means automatic is failing here !
nb_load_per_sub[id_sub_target] += 1
tg2sr[id_source] = id_target
sr2tg[id_target] = id_source
def _auto_fill_vect_powerline(self):
# automatic matching
nb_load_per_sub = np.zeros((self.n_sub, self.n_sub), dtype=dt_int)
n_element = self.n_line
source_or_2_id_sub = self.source_backend.line_or_to_subid
target_or_2_id_sub = self.target_backend.line_or_to_subid
source_ex_2_id_sub = self.source_backend.line_ex_to_subid
target_ex_2_id_sub = self.target_backend.line_ex_to_subid
nm = "powerline"
tg2sr = self._line_tg2sr
sr2tg = self._line_sr2tg
for id_source in range(n_element):
idor_sub_source = source_or_2_id_sub[id_source]
idor_sub_target = self._sub_tg2sr[idor_sub_source]
idex_sub_source = source_ex_2_id_sub[id_source]
idex_sub_target = self._sub_tg2sr[idex_sub_source]
ids_target = np.where(
(target_or_2_id_sub == idor_sub_target)
& (target_ex_2_id_sub == idex_sub_target)
)[0]
if ids_target.shape[0] == 0:
raise RuntimeError(
ERROR_ELEMENT_CONNECTED.format(
nm,
"{}->{}".format(idor_sub_target, idex_sub_target),
"{}->{}".format(idor_sub_source, idex_sub_source),
)
)
id_target = ids_target[nb_load_per_sub[idor_sub_target, idex_sub_target]]
# TODO no no no use the "to_sub_pos" to compute that, and even better raise an error in this case
# this means automatic is failing here !
nb_load_per_sub[idor_sub_target, idex_sub_target] += 1
tg2sr[id_source] = id_target
sr2tg[id_target] = id_source
def _auto_fill_vect_topo(self):
self._auto_fill_vect_topo_aux(
self.n_load,
self.source_backend.load_pos_topo_vect,
self.target_backend.load_pos_topo_vect,
self._load_sr2tg,
)
self._auto_fill_vect_topo_aux(
self.n_gen,
self.source_backend.gen_pos_topo_vect,
self.target_backend.gen_pos_topo_vect,
self._gen_sr2tg,
)
self._auto_fill_vect_topo_aux(
self.n_line,
self.source_backend.line_or_pos_topo_vect,
self.target_backend.line_or_pos_topo_vect,
self._line_sr2tg,
)
self._auto_fill_vect_topo_aux(
self.n_line,
self.source_backend.line_ex_pos_topo_vect,
self.target_backend.line_ex_pos_topo_vect,
self._line_sr2tg,
)
if self.n_storage:
self._auto_fill_vect_topo_aux(
self.n_storage,
self.source_backend.storage_pos_topo_vect,
self.target_backend.storage_pos_topo_vect,
self._storage_sr2tg,
)
def _auto_fill_vect_topo_aux(self, n_elem, source_pos, target_pos, sr2tg):
# TODO that might not be working as intented... it always says it's the identity...
self._topo_tg2sr[source_pos[sr2tg]] = target_pos
self._topo_sr2tg[target_pos] = source_pos[sr2tg]
def assert_grid_correct(self):
# this is done before a call to this function, by the environment
tg_cls = type(self.target_backend)
sr_cls = type(self.source_backend)
env_name = type(self).env_name
tg_cls.set_env_name(env_name)
sr_cls.set_env_name(env_name)
# handle specifc case of shunt data:
if not self.target_backend.shunts_data_available:
# disable the shunt data in grid2op.
self.source_backend.shunts_data_available = False
self.source_backend.n_shunt = None
self.source_backend.name_shunt = np.empty(0, dtype=str)
self._init_class_attr(obj=self.source_backend)
if self.path_redisp is not None:
# redispatching data were available
try:
super().load_redispacthing_data(self.path_redisp, name=self.name_redisp)
self.source_backend.load_redispacthing_data(
self.path_redisp, name=self.name_redisp
)
except BackendError as exc_:
self.redispatching_unit_commitment_availble = False
warnings.warn(f"Impossible to load redispatching data. This is not an error but you will not be able "
f"to use all grid2op functionalities. "
f"The error was: \"{exc_}\"")
if self.path_storage_data is not None:
super().load_storage_data(self.path_storage_data, self.name_storage_data)
self.source_backend.load_storage_data(
self.path_storage_data, name=self.name_storage_data
)
self.target_backend.load_storage_data(
self.path_storage_data, name=self.name_storage_data
)
if self.path_grid_layout is not None:
# grid layout data were available
super().load_grid_layout(self.path_grid_layout, self.name_grid_layout)
self.source_backend.load_grid_layout(
self.path_redisp, name=self.name_redisp
)
# init the target backend (the one that does the computation and that is initialized)
self.target_backend.assert_grid_correct()
# initialize the other one, because, well the grid should be seen from both backend
self.source_backend._init_class_attr(obj=self)
self.source_backend.assert_grid_correct()
# and this should be called after all the rest
super().assert_grid_correct()
# everything went well, so i can properly terminate my initialization
self._init_myself()
# redefine this as the class changed after "assert grid correct"
tg_cls = type(self.target_backend)
sr_cls = type(self.source_backend)
cls = type(self)
if self.sub_source_target is None:
# automatic mode for substations, names must match
assert np.all(
self.target_backend.name_sub[self._sub_tg2sr]
== self.source_backend.name_sub
)
assert np.all(
self.source_backend.name_sub[self._sub_sr2tg]
== self.target_backend.name_sub
)
# check that all corresponding vectors are valid (and properly initialized, like every component above 0 etc.)
self._check_both_consistent(self._line_tg2sr, self._line_sr2tg)
self._check_both_consistent(self._load_tg2sr, self._load_sr2tg)
self._check_both_consistent(self._gen_tg2sr, self._gen_sr2tg)
self._check_both_consistent(self._sub_tg2sr, self._sub_sr2tg)
if cls.n_storage == tg_cls.n_storage:
# both source and target supports storage units
self._check_both_consistent(self._topo_tg2sr, self._topo_sr2tg)
elif self.n_storage == 0:
# n_storage == 0 and there are storage units on the source backend
# this means that the target_backend supports storage but not
# the source one
assert np.all(self._topo_sr2tg[self._topo_tg2sr] >= 0)
assert np.all(sorted(self._topo_sr2tg[self._topo_tg2sr]) == np.arange(self.dim_topo))
topo_sr2tg_without_storage = self._topo_sr2tg[self._topo_sr2tg >= 0]
assert np.sum(self._topo_sr2tg == -1) == tg_cls.n_storage
assert np.all(self._topo_tg2sr[topo_sr2tg_without_storage] >= 0)
target_without_storage = np.array([i for i in range(tg_cls.dim_topo)
if not i in tg_cls.storage_pos_topo_vect])
assert np.all(sorted(self._topo_tg2sr[topo_sr2tg_without_storage]) == target_without_storage)
self._topo_sr2tg = topo_sr2tg_without_storage
if self.shunts_data_available:
self._check_both_consistent(self._shunt_tg2sr, self._shunt_sr2tg)
# finally check that powergrids are identical (up to the env name)
tg_cls.same_grid_class(sr_cls)
# once everything is done, make the converter for the names
d_loads = {tg_cls.name_load[i]: sr_cls.name_load[self._load_sr2tg[i]]
for i in range(cls.n_load)}
d_gens = {tg_cls.name_gen[i]: sr_cls.name_gen[self._gen_sr2tg[i]]
for i in range(cls.n_gen)}
d_lines = {tg_cls.name_line[i]: sr_cls.name_line[self._line_sr2tg[i]]
for i in range(cls.n_line)}
d_subs = {tg_cls.name_sub[i]: sr_cls.name_sub[self._sub_sr2tg[i]]
for i in range(cls.n_sub)}
dict_ = {"loads": d_loads, "lines": d_lines, "prods": d_gens, "subs": d_subs}
self.names_target_to_source = dict_
def _check_vect_valid(self, vect):
assert np.all(
vect >= 0
), ERROR_INVALID_VECTOR
assert sorted(np.unique(vect)) == sorted(
vect
), ERROR_INVALID_VECTOR
if vect.shape[0] > 0:
assert (
np.max(vect) == vect.shape[0] - 1
), ERROR_INVALID_VECTOR
def _check_both_consistent(self, tg2sr, sr2tg):
self._check_vect_valid(tg2sr)
self._check_vect_valid(sr2tg)
res = np.arange(tg2sr.shape[0])
assert np.all(tg2sr[sr2tg] == res)
assert np.all(sr2tg[tg2sr] == res)
def assert_grid_correct_after_powerflow(self):
# we don't assert that `self.source_backend.assert_grid_correct_after_powerflow()`
# because obviously no powerflow are run using the source backend.
self.target_backend.assert_grid_correct_after_powerflow()
super().assert_grid_correct_after_powerflow()
self._sh_vnkv = self.target_backend._sh_vnkv
def reset(self, grid_path, grid_filename=None):
"""
Reload the power grid.
For backwards compatibility this method calls `Backend.load_grid`.
But it is encouraged to overload it in the subclasses.
"""
self.target_backend.reset(grid_path, grid_filename=grid_filename)
def close(self):
self.source_backend.close()
self.target_backend.close()
def apply_action(self, action):
# action is from the source backend
action_target = self._transform_action(action)
self.target_backend.apply_action(action_target)
def runpf(self, is_dc=False):
return self.target_backend.runpf(is_dc=is_dc)
def copy(self):
source_backend_sv = self.source_backend
target_backend_sv = self.target_backend
self.source_backend = None
self.target_backend = None
res = copy.deepcopy(self)
res.source_backend = source_backend_sv.copy()
res.target_backend = target_backend_sv.copy()
self.source_backend = source_backend_sv
self.target_backend = target_backend_sv
return res
def save_file(self, full_path):
self.target_backend.save_file(full_path)
self.source_backend.save_file(full_path)
def get_line_status(self):
tmp = self.target_backend.get_line_status()
return tmp[self._line_tg2sr]
def get_line_flow(self):
tmp = self.target_backend.get_line_flow()
return self.cst1 * tmp[self._line_tg2sr]
def set_thermal_limit(self, limits):
super().set_thermal_limit(limits=limits)
self.source_backend.set_thermal_limit(limits=limits)
if limits is not None:
self.target_backend.set_thermal_limit(limits=limits[self._line_sr2tg])
def get_thermal_limit(self):
tmp = self.target_backend.get_thermal_limit()
return self.cst1 * tmp[self._line_tg2sr]
def get_topo_vect(self):
tmp = self.target_backend.get_topo_vect()
return tmp[self._topo_tg2sr]
def generators_info(self):
prod_p, prod_q, prod_v = self.target_backend.generators_info()
return (
self.cst1 * prod_p[self._gen_tg2sr],
self.cst1 * prod_q[self._gen_tg2sr],
self.cst1 * prod_v[self._gen_tg2sr],
)
def loads_info(self):
load_p, load_q, load_v = self.target_backend.loads_info()
return (
self.cst1 * load_p[self._load_tg2sr],
self.cst1 * load_q[self._load_tg2sr],
self.cst1 * load_v[self._load_tg2sr],
)
def lines_or_info(self):
p_, q_, v_, a_ = self.target_backend.lines_or_info()
return (
self.cst1 * p_[self._line_tg2sr],
self.cst1 * q_[self._line_tg2sr],
self.cst1 * v_[self._line_tg2sr],
self.cst1 * a_[self._line_tg2sr],
)
def lines_ex_info(self):
p_, q_, v_, a_ = self.target_backend.lines_ex_info()
return (
self.cst1 * p_[self._line_tg2sr],
self.cst1 * q_[self._line_tg2sr],
self.cst1 * v_[self._line_tg2sr],
self.cst1 * a_[self._line_tg2sr],
)
def storages_info(self):
p_, q_, v_ = self.target_backend.storages_info()
return (
self.cst1 * p_[self._storage_tg2sr],
self.cst1 * q_[self._storage_tg2sr],
self.cst1 * v_[self._storage_tg2sr],
)
def shunt_info(self):
if self._shunt_tg2sr is not None:
# shunts are supported by both source and target backend
sh_p, sh_q, sh_v, sh_bus = self.target_backend.shunt_info()
return (
sh_p[self._shunt_tg2sr],
sh_q[self._shunt_tg2sr],
sh_v[self._shunt_tg2sr],
sh_bus[self._shunt_tg2sr],
)
# shunt are not supported by either source or target backend
return [], [], [], []
def sub_from_bus_id(self, bus_id):
# not supported because the bus_id is given into the source backend,
# and i need to convert to to the target backend, not sure how to do that atm
raise Grid2OpException(
"This backend doesn't allow to get the substation from the bus id."
)
def _disconnect_line(self, id_):
id_target = int(
self._line_tg2sr[id_]
) # not sure why, but it looks to work this way
self.target_backend._disconnect_line(id_target)
def _transform_action(self, source_action):
# transform the source action into the target backend action
# source_action: a backend action!
target_action = copy.deepcopy(source_action)
# consistent with TestLoadingBackendFunc, otherwise it's not correct
target_action.reorder(
no_load=self._load_sr2tg,
no_gen=self._gen_sr2tg,
no_topo=self._topo_sr2tg,
no_shunt=self._shunt_sr2tg,
no_storage=self._storage_sr2tg,
)
return target_action
def load_redispacthing_data(self, path, name="prods_charac.csv"):
# data are loaded with the name of the source backend, i need to map it to the target backend too
self.path_redisp = path
self.name_redisp = name
def load_storage_data(self, path, name="storage_units_charac.csv"):
# data are loaded with the name of the source backend, i need to map it to the target backend too
self.path_storage_data = path
self.name_storage_data = name
def load_grid_layout(self, path, name="grid_layout.json"):
self.path_grid_layout = path
self.name_grid_layout = name
def get_action_to_set(self):
act = self.target_backend.get_action_to_set()
line_vect = self._line_sr2tg
gen_vect = self._gen_sr2tg
load_vect = self._load_sr2tg
topo_vect = self._topo_sr2tg
dict_ = act._dict_inj
if "prod_p" in dict_:
dict_["dict_"] = dict_["prod_p"][gen_vect]
if "prod_v" in dict_:
dict_["dict_"] = dict_["prod_v"][gen_vect]
if "load_p" in dict_:
dict_["dict_"] = dict_["load_p"][load_vect]
if "load_q" in dict_:
dict_["dict_"] = dict_["load_q"][load_vect]
act._set_topo_vect[:] = act._set_topo_vect[topo_vect]
act._change_bus_vect[:] = act._change_bus_vect[topo_vect]
act._hazards[:] = act._hazards[line_vect]
act._maintenance[:] = act._hazards[line_vect]
act._redispatch[:] = act._redispatch[gen_vect]
act._set_line_status[:] = act._set_line_status[line_vect]
act._switch_line_status[:] = act._switch_line_status[line_vect]
if act.shunt_added and act.shunts_data_available:
shunt_vect = self._shunt_sr2tg
act.shunt_p[:] = act.shunt_p[shunt_vect]
act.shunt_q[:] = act.shunt_q[shunt_vect]
act.shunt_bus[:] = act.shunt_bus[shunt_vect]
return act
def update_thermal_limit(self, env):
# TODO
# env has the powerline stored in the order of the source backend, but i need
# to have them stored in the order of the target backend for such function
pass
# TODO update_from_obs too, maybe ?
| 32,167 | 42.825613 | 118 | py |
Grid2Op | Grid2Op-master/grid2op/Converter/ConnectivityConverter.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.Converter.Converters import Converter
from grid2op.dtypes import dt_int, dt_float
# TODO: use the "last_obs" and the "change_bus" in case of "set_bus" not available
class ConnectivityConverter(Converter):
"""
In this converter, you have as many output as pairs of object that can be connected, and your model is asked
to output 0 if he wants these elements disconnected and 1 if he wants them connected.
This type of modeling is rather hard to "get working" the first time, especially because some "conflict" might
appear. For example, consider three objects (line for example) on a given substation. You can chose to "connect
A and B", connect "B and C" but "**not connect** A and C" in this case you need an algorithm to disambuate your
action.
The section "examples" below provides a concrete example on what we mean by that and how to make it
working.
It can not yet be converted to / from gym space. If this feature is interesting for you, you can
reply to the issue posted at https://github.com/rte-france/Grid2Op/issues/16
**NB** compare to :class:`IdToAct` this converter allows for a smaller size. If you have N elements connected at
a substation, you end up with `N*(N-1)/2` different action. Compare to IdToAct though, it is expected that your
algorithm produces more than 1 outputs.
**VERY IMPORTANT** : for this converter to work, it needs to remember the previous state of the grid, so you
absolutely need to call its method :func:`ConnectivityConverter.convert_obs` a each observation.
.. note:: This converter does not allow to affect the status (connected / disconnected) of the objects, neither
to perform redispatching actions, neither to perform actions on storage units.
Examples
--------
TODO: documentation in progress
The idea of this converter is to allow to provide an interface if you want to provide action with what elements
should be connected together.
This is useful if an agent should reason on the target graph of the grid rather than reasoning on which
elements are connected on which busbar.
This converters then expects a vector of floats, all in [0., 1.]. The number of components of this vector
is determined once and for all at the initialization and is accessible with `converter.n`. This is determined
with the following rule. A pair of element of the grid el_i, el_j (elements here is: load, generator, storage
unit, origin side of a powerline, extremity side of a powerline):
- el_i and el_j belongs at the same substation
- the substation to which el_i and el_j belongs counts 4 or more elements
You can access which pair of elements is encoded for each component of this vector with
:func:`ConnectivityConverter.which_pairs`.
To create use the connectivity converter, you can:
.. code-block:: python
import grid2op
import numpy as np
from grid2op.Converter import ConnectivityConverter
env = grid2op.make("rte_case14_realistic", test=True)
converter = ConnectivityConverter(env.action_space)
# it's a good practice to seed the element that can be, for reproducibility
converter.seed(0)
# to avoid creating illegal actions affecting more than the allowed number of parameters
converter.init_converter(max_sub_changed=env.parameters.MAX_SUB_CHANGED)
This converter is expected to receive a vector of the proper size with components being floats, representing:
- -1.000...: the pairs should not be connected
- 1.000...: the pairs should be connected
- 0.000...: i have no opinion on this pairs of objects
It uses an heuristic (greedy) to compute a resulting target topology (the vector `act.set_bus`) that tries to
minimize the "disagreement" between the connectivity provided and the topology computed.
More concretely, say you have 4 objects el1, el2, el3 and el4 connected on a substation. You want:
- el1 connected to el2 with score of 0.7
In the above example, we can change the connectivity of 77 pairs of elements, being:
.. code-block:: python
print(f"The connectivity of {converter.n} pairs of elements can be affected")
for i in range(converter.n):
sub_id, (type0, id0), (type1, id1) = converter.which_pairs(i)
print(f"You can decide to connect / disconnect the \"{type0} id {id0}\" and the \"{type1} id {id1}\" at "
f"substation {sub_id} by action on component {i}")
For example, if you want, at substation 1 to have:
- "line_ex id 0", "line_or id 2" and "load id 0" on the same busbar
- "line_or id 3", "line_or id 4" and "gen id 0" on the other one
You can (this is one of the possible way to do it):
.. code-block:: python
encoded_act = np.zeros(converter.n)
encoded_act[0] = 1 # i want to connect "line_ex id 0" and the "line_or id 2"
encoded_act[1] = -1 # i don't want to connect "line_ex id 0" and the "line_or id 3"
encoded_act[2] = -1 # i don't want to connect "line_ex id 0" and the "line_or id 4"
encoded_act[3] = -1 # i don't want to connect "line_ex id 0" and the "gen id 0"
encoded_act[4] = 1 # i want to connect "line_ex id 0" and the "load id 0"
# and now retrieve the corresponding grid2op action:
grid2op_act = converter.convert_act(encoded_act)
print(grid2op_act)
Another one, to express exactly the same action:
.. code-block:: python
encoded_act2 = np.zeros(converter.n)
encoded_act2[0] = 1 # i want to connect "line_ex id 0" and the "line_or id 2"
encoded_act2[4] = 1 # i want to connect "line_ex id 0" and the "load id 0"
encoded_act2[9] = 1 # i want to connect "line_or id 3" and the "line_or id 4"
encoded_act2[10] = 1 # i want to connect "line_or id 3" and the "gen id 0"
encoded_act2[14] = -1 # i don't want to connect "gen id 0" and the "load id 0"
# and now retrieve the corresponding grid2op action:
grid2op_act2 = converter.convert_act(encoded_act2)
print(grid2op_act2)
In most cases, "something" (*eg* a neural network) is responsible to predict the "encoded action" and this
converter can then be used to convert it to a valid grid2op action.
Notes
------
This converter does not allow to connect / disconnect any object. This feature might be added in the future.
This converter takes as input a vector of (-1, 1) each component representing the "score" of the corresponding
pairs of element on the grid to be connected or disconnected.
A perfect converter would minimize (the variables are the component of `act.set_bus` vector that can
be either 0 (i dont change) 1 or 2) the sum, for
all index `i` fo pairs of elements in the grid el_k, el_j (that are encoded at position `i`)
`1 - encoded_act[i]` if the pairs of elements el_k, el_j are on the same
busbar {*i.e* iif (`act.set_bus[el_k] == 1` and `act.set_bus[el_j] == 1`) or
(`act.set_bus[el_k] == 2` and `act.set_bus[el_j] == 2`)} and `1 + encoded_act[i]` otherwise
{*i.e* iif (`act.set_bus[el_k] == 1` and `act.set_bus[el_j] == 2`) or
(`act.set_bus[el_k] == 2` and `act.set_bus[el_j] == 1`)}.
For now a heuristic based on a greedy approach is used. This is far from giving an "optimal" solution.
This heuristic tries to act on as little elements as possible.
"""
def __init__(self, action_space):
if not action_space.supports_type("set_bus"):
raise RuntimeError(
"It is not possible to use the connectivity converter if the action space do not "
'support the "set_bus" argument.'
)
Converter.__init__(self, action_space)
self.__class__ = ConnectivityConverter.init_grid(action_space)
self.subs_ids = np.array([], dtype=dt_int)
self.obj_type = []
self.pos_topo = np.array([], dtype=dt_int)
self.n = -1
self.last_obs = None
self.max_sub_changed = self.n_sub
self.last_disagreement = None
self.indx_sel = None # for explore in "convert_act"
def init_converter(self, all_actions=None, **kwargs):
# compute all pairs of elements that can be connected together
self.pos_topo = []
self.subs_ids = []
for sub_id, nb_element in enumerate(self.sub_info):
if nb_element < 4:
continue
c_id = np.where(self.load_to_subid == sub_id)[0]
g_id = np.where(self.gen_to_subid == sub_id)[0]
lor_id = np.where(self.line_or_to_subid == sub_id)[0]
lex_id = np.where(self.line_ex_to_subid == sub_id)[0]
storage_id = np.where(self.storage_to_subid == sub_id)[0]
c_pos = self.load_to_sub_pos[self.load_to_subid == sub_id]
g_pos = self.gen_to_sub_pos[self.gen_to_subid == sub_id]
lor_pos = self.line_or_to_sub_pos[self.line_or_to_subid == sub_id]
lex_pos = self.line_ex_to_sub_pos[self.line_ex_to_subid == sub_id]
storage_pos = self.storage_to_sub_pos[self.storage_to_subid == sub_id]
my_types = []
pos_topo = []
next_load_ = 0
next_gen_ = 0
next_lor_ = 0
next_lex_ = 0
next_storage_ = 0
next_load = c_id[next_load_] if c_id.shape[0] > 0 else None
next_gen = g_id[next_gen_] if g_id.shape[0] > 0 else None
next_lor = lor_id[next_lor_] if lor_id.shape[0] > 0 else None
next_lex = lex_id[next_lex_] if lex_id.shape[0] > 0 else None
next_storage = (
storage_id[next_storage_] if storage_id.shape[0] > 0 else None
)
for id_i in range(nb_element):
type_i, id_obj_i = self._get_id_from_obj(
id_i,
c_pos,
g_pos,
lor_pos,
lex_pos,
storage_pos,
next_load,
next_gen,
next_lor,
next_lex,
next_storage,
)
if type_i == "load":
next_load_ += 1
next_load = c_id[next_load_] if c_id.shape[0] > next_load_ else None
elif type_i == "gen":
next_gen_ += 1
next_gen = g_id[next_gen_] if g_id.shape[0] > next_gen_ else None
elif type_i == "line_or":
next_lor_ += 1
next_lor = (
lor_id[next_lor_] if lor_id.shape[0] > next_lor_ else None
)
elif type_i == "line_ex":
next_lex_ += 1
next_lex = (
lex_id[next_lex_] if lex_id.shape[0] > next_lex_ else None
)
elif type_i == "storage":
next_storage_ += 1
next_storage = (
storage_id[next_storage_]
if storage_id.shape[0] > next_storage_
else None
)
else:
raise RuntimeError(f"Unsupported object type: {type_i}")
my_types.append((type_i, id_obj_i))
pos_topo.append(self._get_pos_topo(type_i, id_obj_i))
for id_i in range(nb_element):
id_i_ = my_types[id_i]
pos_topo_i = pos_topo[id_i]
for id_j in range(id_i + 1, nb_element):
id_j_ = my_types[id_j]
pos_topo_j = pos_topo[id_j]
self.obj_type.append((sub_id, id_i_, id_j_))
self.pos_topo.append((pos_topo_i, pos_topo_j))
self.subs_ids.append(sub_id)
self.pos_topo = np.array(self.pos_topo)
self.subs_ids = np.array(self.subs_ids)
self.n = self.subs_ids.shape[0]
if "max_sub_changed" in kwargs:
self.max_sub_changed = int(kwargs["max_sub_changed"])
def _get_id_from_obj(
self,
id_,
c_pos,
g_pos,
lor_pos,
lex_pos,
storage_pos,
next_load,
next_gen,
next_lor,
next_lex,
next_storage,
):
if id_ in c_pos:
type_ = "load"
id_obj_ = next_load
elif id_ in g_pos:
type_ = "gen"
id_obj_ = next_gen
elif id_ in lor_pos:
type_ = "line_or"
id_obj_ = next_lor
elif id_ in lex_pos:
type_ = "line_ex"
id_obj_ = next_lex
elif id_ in storage_pos:
type_ = "storage"
id_obj_ = next_storage
else:
raise RuntimeError("Invalid grid")
return type_, id_obj_
def _get_pos_topo(self, type_, id_obj):
if type_ == "load":
res = self.load_pos_topo_vect[id_obj]
elif type_ == "gen":
res = self.gen_pos_topo_vect[id_obj]
elif type_ == "line_or":
res = self.line_or_pos_topo_vect[id_obj]
elif type_ == "line_ex":
res = self.line_ex_pos_topo_vect[id_obj]
elif type_ == "storage":
res = self.storage_pos_topo_vect[id_obj]
else:
raise RuntimeError("Invalid grid")
return res
def convert_obs(self, obs):
"""
This function is used to convert an observation into something that is easier to manipulate.
**VERY IMPORTANT**: for this converter to work, it needs to remember the previous state of the grid, so you
absolutely need to call its method :func:`ConnectivityConverter.convert_obs` at each observation.
Parameters
----------
obs: :class:`grid2op.Observation.Observation`
The input observation.
Returns
-------
transformed_obs: ``object``
An different representation of the input observation, typically represented as a 1d vector that can be
processed by a neural networks.
"""
self.last_obs = obs
return obs
def convert_act(self, encoded_act, explore=None):
"""
For this converter, encoded_act is a vector, with the same size as there are possible ways to reconfigure
the grid.
And it find a consistent state that does not break too much the connectivity asked.
NOTE: there might be better ways to do it... This is computed with a greedy approach for now.
Parameters
----------
encoded_act: ``numpy.ndarray``
This action should have the same size as the number of pairs of element that can be connected. A number
close to -1 means you don't want to connect the pair together, a number close to +1 means you want the
pairs to be connected together.
explore: ``int``
Defaults to ``None`` to be purely greedy. The higher `explore` the closer the returned solution will be to
the "global optimum", but the longer it will takes. ``None`` will return the greedy approaches. Note that
this is definitely not optimized for performance, and casting this problem into an optimization problem
and solving this combinatorial optimization would definitely make this convereter more usefull.
Returns
-------
act: :class:`grid2op.Action.BaseAction`
The action that is usable by grid2op (after conversion) [the action space must be compatible with
the "set_bus" key word]
"""
encoded_act = np.array(encoded_act).astype(dt_float)
if encoded_act.shape[0] != self.n:
raise RuntimeError(
f"Invalid encoded_act shape provided it should be {self.n}"
)
if np.any((encoded_act < -1.0) | (encoded_act > 1.0)):
errors = (encoded_act < -1.0) | (encoded_act > 1.0)
indexes = np.where(errors)[0]
raise RuntimeError(
f'All elements of "encoded_act" must be in range [-1, 1]. Please check your '
f"encoded action at positions {indexes[:5]}... (only first 5 displayed)"
)
act_want_change = encoded_act != 0.0
encoded_act_filtered = encoded_act[act_want_change]
if encoded_act_filtered.shape[0] == 0:
# do nothing action in this case
return super().__call__()
argsort_changed = np.argsort(-np.abs(encoded_act_filtered))
argsort = np.where(act_want_change)[0][argsort_changed]
act, disag = self._aux_act_from_order(argsort, encoded_act)
self.indx_sel = 0
if explore is None:
pass
elif isinstance(explore, int):
# TODO better way here without a doubt! (combinatorial optimization, google OR-tools for example)
for nb_exp in range(explore):
# shuffle a bit the order i which i will built the action
this_order = 1 * argsort
self.space_prng.shuffle(this_order)
# and now compute the action and the disagreement
tmp_act, tmp_disag = self._aux_act_from_order(this_order, encoded_act)
# if disagreement is lower than previous one, then take this action instead
if tmp_disag < disag:
self.indx_sel = nb_exp + 1
act = tmp_act
disag = tmp_disag
else:
raise RuntimeError('Unknown parameters "explore" provided.')
self.last_disagreement = disag
return act
def _aux_act_from_order(self, order, encoded_act):
# TODO some part should be able to be vectorize i imagine
topo_vect = np.zeros(self.dim_topo, dtype=dt_int)
subs_added = np.full(self.n_sub, fill_value=False)
sub_changed = 0
order_id = (
[]
) # id of the pairs i have the right to modify (i can't always modifies everything due to
# limit on self.max_sub_changed
for el in order:
my_sub = self.subs_ids[el]
if not subs_added[my_sub]:
if sub_changed < self.max_sub_changed:
subs_added[my_sub] = True
topo_vect[
self.pos_topo[el, 0]
] = 1 # assign to +1 the first element of the substation met
sub_changed += 1
order_id.append(el) # i need to modify this element later on:
# because it's the first element of a substation and i have the right to modify the substation
else:
# i need to modify this element later on:
# because i modify its substation already.
order_id.append(el)
order = np.array(order_id)
while order.shape[0] > 0:
new_order = []
for el in order:
bus_1_id = self.pos_topo[el, 0]
bus_2_id = self.pos_topo[el, 1]
need_1 = topo_vect[bus_1_id] <= 0
need_2 = topo_vect[bus_2_id] <= 0
val = encoded_act[el]
if need_2 and not need_1:
if val > 0.0:
# they are likely on same bus
topo_vect[bus_2_id] = topo_vect[bus_1_id]
elif val < 0.0:
# they are likely on different bus
topo_vect[bus_2_id] = 1 - topo_vect[bus_1_id] + 2
elif need_1 and not need_2:
if val > 0.0:
# they are likely on same bus
topo_vect[bus_1_id] = topo_vect[bus_2_id]
elif val < 0.0:
# they are likely on different bus
topo_vect[bus_1_id] = 1 - topo_vect[bus_2_id] + 2
elif need_1 and need_2:
# i don't have enough information yet to find a good placement for these
new_order.append(el)
if set(new_order) == set(order):
# i don't have constraints to solve the problem, i add something articially
topo_vect[self.pos_topo[new_order[0], 0]] = 1
order = np.array(new_order)
act = super().__call__({"set_bus": topo_vect})
dis_ = self._compute_disagreement(encoded_act, topo_vect)
return act, dis_
def _compute_disagreement(self, encoded_act, topo_vect):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Computes the disagreement between the encoded act and the proposed topo_vect
**NB** if encoded act is random uniform, and topo_vect is full of 1, then disagreement is, on average 0.5.
Lower disagreement is always better.
"""
set_component = encoded_act != 0.0
bus_el1 = topo_vect[self.pos_topo[:, 0]]
bus_el2 = topo_vect[self.pos_topo[:, 1]]
# for the element that will connected
together = 1 - encoded_act[(bus_el1 == bus_el2) & (bus_el1 > 0) & set_component]
# for the element that will be disconnected
split = (
1
+ encoded_act[
(bus_el1 != bus_el2) & (bus_el1 > 0) & (bus_el2 > 0) & set_component
]
)
# for the elements that are not affected by the action (i don't know where they will be: maximum penalty)
not_set = np.full(
np.sum(((bus_el1 == 0) | (bus_el2 == 0)) & set_component),
fill_value=2,
dtype=dt_int,
)
# total disagreement
raw_disag = together.sum() + split.sum() + not_set.sum()
scaled_disag = raw_disag / self.n * 0.5 # to have something between 0 and 1
return scaled_disag
def sample(self):
coded_act = self.space_prng.rand(self.n) * 2.0 - 1.0
return self.convert_act(coded_act)
def which_pairs(self, pair_id):
"""
Returns a description of the pair of element that is encoded at position `pair_id` of the `encoded_act`
Parameters
----------
pair_id: ``int``
Returns
-------
res: ``tuple``
Tuple of 3 elements containing:
- `sub_id` the id of the substation affected by the component `pair_id`
- (obj_type, obj_id) the i
"""
try:
pair_id = int(pair_id)
except Exception as exc_:
raise RuntimeError(
f'Invalid "pair_id" provided, it should be of integer type. Error was: \n"{exc_}"'
)
if pair_id < 0:
raise RuntimeError(f'"pair_id" should be positive. You provided {pair_id}')
if pair_id >= self.n:
raise RuntimeError(
f'"pair_id" should be lower than the size of the action space, in this case '
f"{self.n}. You provided {pair_id}"
)
return self.obj_type[pair_id]
def do_nothing_encoded_act(self):
"""returns the do nothing encoding act"""
return np.zeros(self.n, dtype=dt_float)
| 23,976 | 42.201802 | 118 | py |
Grid2Op | Grid2Op-master/grid2op/Converter/Converters.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Action import ActionSpace
class Converter(ActionSpace):
"""
This Base class should be use to implement any converter. If for some reasons
"""
def __init__(self, action_space):
ActionSpace.__init__(
self, action_space, action_space.legal_action, action_space.subtype
)
self.space_prng = action_space.space_prng
self.seed_used = action_space.seed_used
def init_converter(self, **kwargs):
pass
def convert_obs(self, obs):
"""
This function is used to convert an observation into something that is easier to manipulate.
Parameters
----------
obs: :class:`grid2op.Observation.Observation`
The input observation.
Returns
-------
transformed_obs: ``object``
An different representation of the input observation, typically represented as a 1d vector that can be
processed by a neural networks.
"""
transformed_obs = obs
return transformed_obs
def convert_act(self, encoded_act):
"""
This function will transform the action, encoded somehow (for example identified by an id, represented by
an integer) to a valid actions that can be processed by the environment.
Parameters
----------
encoded_act: ``object``
Representation of an action, as a vector or an integer etc.
Returns
-------
regular_act: :class:`grid2op.Action.Action`
The action corresponding to the `encoded_action` above converted into a format that can be processed
by the environment.
"""
regular_act = encoded_act
return regular_act
def get_gym_dict(self, cls_gym):
"""
To convert this space into a open ai gym space. This function returns a dictionnary used
to initialize such a converter.
It should not be used directly. Prefer to use the :class:`grid2op.Converter.GymConverter`
cls_gym represents either :class:`grid2op.gym_compat.LegacyGymActionSpace` or
:class:`grid2op.gym_compat.GymnasiumActionSpace`
"""
raise NotImplementedError(
'Impossible to convert the converter "{}" automatically '
"into a gym space (or gym is not installed on your machine)."
"".format(self)
)
def convert_action_from_gym(self, gymlike_action):
"""
Convert the action (represented as a gym object, in fact an ordered dict) as an action
compatible with this converter.
This is not compatible with all converters and you need to install gym for it to work.
Parameters
----------
gymlike_action:
the action to be converted to an action compatible with the action space representation
Returns
-------
res:
The action converted to be understandable by this converter.
Examples
---------
Here is an example on how to use this feature with the :class:`grid2op.Converter.IdToAct`
converter (imports are not shown here).
.. code-block:: python
# create the environment
env = grid2op.make()
# create the converter
converter = IdToAct(env.action_space)
# create the gym action space
gym_action_space = GymObservationSpace(action_space=converter)
gym_action = gym_action_space.sample()
converter_action = converter.convert_action_from_gym(gym_action) # this represents the same action
grid2op_action = converter.convert_act(converter_action)
"""
raise NotImplementedError(
"Impossible to convert the gym-like action automatically "
'into the converter representation for "{}" '
"".format(self)
)
def convert_action_to_gym(self, action):
"""
Convert the action (compatible with this converter) into a "gym action" (ie an OrderedDict)
This is not compatible with all converters and you need to install gym for it to work.
Parameters
----------
action:
the action to be converted to an action compatible with the action space representation
Returns
-------
res:
The action converted to a "gym" model (can be used by a machine learning model)
Examples
---------
Here is an example on how to use this feature with the :class:`grid2op.Converter.IdToAct`
converter (imports are not shown here).
.. code-block:: python
# create the environment
env = grid2op.make()
# create the converter
converter = IdToAct(env.action_space)
# create the gym action space
gym_action_space = GymObservationSpace(action_space=converter)
converter_action = converter.sample()
gym_action = converter.to_gym(converter_action) # this represents the same action
"""
raise NotImplementedError(
"Impossible to convert the gym-like action automatically "
'into the converter representation for "{}" '
"".format(self)
)
| 5,788 | 33.664671 | 114 | py |
Grid2Op | Grid2Op-master/grid2op/Converter/GymConverter.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
class GymObservationSpace:
def __init__(self, env):
raise RuntimeError(
'The "GymObservationSpace" has been moved to "grid2op.gym" module instead.\n'
'Note to update: use "from grid2op.gym_compat import GymObservationSpace"'
)
class GymActionSpace:
def __init__(self, action_space):
raise RuntimeError(
'The "GymActionSpace" has been moved to "grid2op.gym" module instead.\n'
'Note to update: use "from grid2op.gym_compat import GymActionSpace"'
)
| 1,002 | 40.791667 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Converter/IdToAct.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import os
import numpy as np
from collections import OrderedDict
from grid2op.Action import BaseAction
from grid2op.Converter.Converters import Converter
from grid2op.Exceptions.Grid2OpException import Grid2OpException
from grid2op.dtypes import dt_float, dt_int, int_types
class IdToAct(Converter):
"""
This type of converter allows to represent action with unique id. Instead of manipulating complex objects, it allows
to manipulate only positive integer.
The list of all actions can either be the list of all possible unary actions (see below for a complete
description) or by a given pre computed list.
A "unary action" is an action that consists only in acting on one "concept" it includes:
- disconnecting a single powerline
- reconnecting a single powerline and connect it to bus xxx on its origin end and yyy on its extremity end
- changing the topology of a single substation
- performing redispatching on a single generator
- performing curtailment on a single generator
- performing action on a single storage unit
Examples of non unary actions include:
- disconnection / reconnection of 2 or more powerlines
- change of the configuration of 2 or more substations
- disconnection / reconnection of a single powerline and change of the configration of a single substation
**NB** All the actions created automatically are unary. For the L2RPN 2019, agent could be allowed to act with non
unary actions, for example by disconnecting a powerline and reconfiguring a substation. This class would not
allow to do such action at one time step.
**NB** The actions that are initialized by default uses the "set" way and not the "change" way (see the description
of :class:`grid2op.BaseAction.BaseAction` for more information).
For each powerline, 5 different actions will be computed:
- disconnect it
- reconnect it and connect it to bus 1 on "origin" end ann bus 1 on "extremity" end
- reconnect it and connect it to bus 1 on "origin" end ann bus 2 on "extremity" end
- reconnect it and connect it to bus 2 on "origin" end ann bus 1 on "extremity" end
- reconnect it and connect it to bus 2 on "origin" end ann bus 2 on "extremity" end
Actions corresponding to all topologies are also used by default. See
:func:`grid2op.BaseAction.ActionSpace.get_all_unitary_topologies_set` for more information.
In this converter:
- `encoded_act` are positive integer, representing the index of the actions.
- `transformed_obs` are regular observations.
**NB** The number of actions in this converter can be especially big. For example, if a substation counts N elements
there are roughly 2^(N-1) possible actions in this substation. This means if there are a single substation with
more than N = 15 or 16 elements, the amount of actions (for this substation alone) will be higher than 16.000
which makes it rather difficult to handle for most machine learning algorithm. Be carefull with that !
"""
def __init__(self, action_space):
Converter.__init__(self, action_space)
self.__class__ = IdToAct.init_grid(action_space)
self.all_actions = []
# add the do nothing topology
self.all_actions.append(super().__call__())
self.n = 1
self._init_size = action_space.size()
self.kwargs_init = {}
def init_converter(self, all_actions=None, **kwargs):
"""
This function is used to initialized the converter. When the converter is created, this method should be called
otherwise the converter might be in an unstable state.
Parameters
----------
all_actions: ``None``, ``list``, ``str``, ``np.ndarray``
See the example section for more informations.
if `all_actions` is:
- ``None``: the action space will be built from scratch using the provided key word arguments.
- a ``list``: The (ordered) list of all actions that the agent will be able to perform.
If given a number ``i`` the
converter will return action ``all_actions[i]``. In the "pacman" game, this vector could be
["up", "down", "left", "right"], in this case "up" would be encode by 0, "down" by 1, "left" by 2 and
"right" by 3. If nothing is provided, the converter will output all the unary actions possible for
the environment. Be careful, computing all these actions might take some time.
- a ``str`` this will be considered as a path where a previous converter has been saved. You need to
provide the full path, including the filename and its extension. It gives something like:
"/path/where/it/is/saved/action_space_vect.npy"
kwargs:
other keyword arguments (all considered to be ``True`` by default) that can be:
set_line_status: ``bool``
Whether you want to include the set line status in your action
(in case the original action space allows it)
change_line_status: ``bool``
Whether you want to include the "change line status" in your action space
(in case the original action space allows it)
change_line_status: ``bool``
Whether you want to include the "change line status" in your action space
(in case the original action space allows it)
set_topo_vect: ``bool``
Whether you want to include the "set_bus" in your action space
(in case the original action space allows it)
change_bus_vect: ``bool``
Whether you want to include the "change_bus" in your action space
(in case the original action space allows it)
redispatch: ``bool``
Whether you want to include the "redispatch" in your action space
(in case the original action space allows it)
curtail: ``bool``
Whether you want to include the "curtailment" in your action space
(in case the original action space allows it)
storage: ``bool``
Whether you want to include the "storage unit" in your action space
(in case the original action space allows it)
Examples
--------
Here is an example of a code that will: make a converter by selecting some action. Save it, and then restore
its original state to be used elsewhere.
.. code-block:: python
import grid2op
from grid2op.Converter import IdToAct
env = grid2op.make()
converter = IdToAct(env.action_space)
# the path were will save it
path_ = "/path/where/it/is/saved/"
name_file = "tmp_convert.npy"
# init the converter, the first time, here by passing some key word arguments, to not consider
# redispatching for example
converter.init_converter(redispatch=False)
converter.save(path_, name_file)
# i just do an action, for example the number 27... whatever it does does not matter here
act = converter.convert_act(27)
converter2 = IdToAct(self.env.action_space)
converter2.init_converter(all_actions=os.path.join(path_, name_file))
act2 = converter2.convert_act(27)
assert act == act2 # this is ``True`` the converter has properly been saved.
"""
self.kwargs_init = kwargs
if all_actions is None:
self.all_actions = []
# add the do nothing action, always
self.all_actions.append(super().__call__())
if "_set_line_status" in self._template_act.attr_list_vect:
# lines 'set'
include_ = True
if "set_line_status" in kwargs:
include_ = kwargs["set_line_status"]
if include_:
self.all_actions += self.get_all_unitary_line_set(self)
if "_switch_line_status" in self._template_act.attr_list_vect:
# lines 'change'
include_ = True
if "change_line_status" in kwargs:
include_ = kwargs["change_line_status"]
if include_:
self.all_actions += self.get_all_unitary_line_change(self)
if "_set_topo_vect" in self._template_act.attr_list_vect:
# topologies 'set'
include_ = True
if "set_topo_vect" in kwargs:
include_ = kwargs["set_topo_vect"]
if include_:
self.all_actions += self.get_all_unitary_topologies_set(self)
if "_change_bus_vect" in self._template_act.attr_list_vect:
# topologies 'change'
include_ = True
if "change_bus_vect" in kwargs:
include_ = kwargs["change_bus_vect"]
if include_:
self.all_actions += self.get_all_unitary_topologies_change(self)
if "_redispatch" in self._template_act.attr_list_vect:
# redispatch (transformed to discrete variables)
include_ = True
if "redispatch" in kwargs:
include_ = kwargs["redispatch"]
if include_:
self.all_actions += self.get_all_unitary_redispatch(self)
if "_curtail" in self._template_act.attr_list_vect:
# redispatch (transformed to discrete variables)
include_ = True
if "curtail" in kwargs:
include_ = kwargs["curtail"]
if include_:
self.all_actions += self.get_all_unitary_curtail(self)
if "_storage_power" in self._template_act.attr_list_vect:
# redispatch (transformed to discrete variables)
include_ = True
if "storage" in kwargs:
include_ = kwargs["storage"]
if include_:
self.all_actions += self.get_all_unitary_storage(self)
elif isinstance(all_actions, str):
# load the path from the path provided
if not os.path.exists(all_actions):
raise FileNotFoundError(
'No file located at "{}" where the actions should have been stored.'
"".format(all_actions)
)
try:
all_act = np.load(all_actions)
except Exception as e:
raise RuntimeError(
'Impossible to load the data located at "{}" with error\n{}.'
"".format(all_actions, e)
)
try:
self.all_actions = np.array([self.__call__() for _ in all_act])
for i, el in enumerate(all_act):
self.all_actions[i].from_vect(el)
except Exception as e:
raise RuntimeError(
'Impossible to convert the data located at "{}" into valid grid2op action. '
"The error was:\n{}".format(all_actions, e)
)
elif isinstance(all_actions, (list, np.ndarray)):
# assign the action to my actions
possible_act = all_actions[0]
if isinstance(possible_act, BaseAction):
# list of grid2op action
self.all_actions = np.array(all_actions)
elif isinstance(possible_act, dict):
# list of dictionnary (obtained with `act.as_serializable_dict()`)
self.all_actions = np.array([self.__call__(el) for el in all_actions])
else:
# should be an array !
try:
self.all_actions = np.array([self.__call__() for _ in all_actions])
for i, el in enumerate(all_actions):
self.all_actions[i].from_vect(el)
except Exception as exc_:
raise Grid2OpException(
'Impossible to convert the data provided in "all_actions" into valid '
"grid2op action. The error was:\n{}".format(e)
) from exc_
else:
raise RuntimeError("Impossible to load the action provided.")
self.n = len(self.all_actions)
def filter_action(self, filtering_fun):
"""
This function allows you to "easily" filter generated actions.
**NB** the action space will change after a call to this function, especially its size. It is NOT recommended
to apply it once training has started.
Parameters
----------
filtering_fun: ``function``
This takes an action as input and should retrieve ``True`` meaning "this action will be kept" or
``False`` meaning "this action will be dropped.
"""
self.all_actions = np.array(
[el for el in self.all_actions if filtering_fun(el)]
)
self.n = len(self.all_actions)
def save(self, path, name="action_space_vect.npy"):
"""
Save the action space as a numpy array that can be reloaded afterwards with the :func:`IdToAct.init_converter`
function by setting argument `all_actions` to `os.path.join(path, name)`
The resulting object will be a numpy array of float. Each row of this array will be an action of the
action space.
Parameters
----------
path: ``str``
The path were to save the action space
name: ``str``, optional
The name of the numpy array stored on disk. By default its "action_space_vect.npy"
Examples
--------
Here is an example of a code that will: make a converter by selecting some action. Save it, and then restore
its original state to be used elsewhere.
.. code-block:: python
import grid2op
from grid2op.Converter import IdToAct
env = grid2op.make()
converter = IdToAct(env.action_space)
# the path were will save it
path_ = "/path/where/it/is/saved/"
name_file = "tmp_convert.npy"
# init the converter, the first time, here by passing some key word arguments, to not consider
# redispatching for example
converter.init_converter(redispatch=False)
converter.save(path_, name_file)
# i just do an action, for example the number 27... whatever it does does not matter here
act = converter.convert_act(27)
converter2 = IdToAct(self.env.action_space)
converter2.init_converter(all_actions=os.path.join(path_, name_file))
act2 = converter2.convert_act(27)
assert act == act2 # this is ``True`` the converter has properly been saved.
"""
if not os.path.exists(path):
raise FileNotFoundError(
'Impossible to save the action space as the directory "{}" does not exist.'
"".format(path)
)
if not os.path.isdir(path):
raise NotADirectoryError(
'The path to save the action space provided "{}" is not a directory.'
"".format(path)
)
saved_npy = (
np.array([el.to_vect() for el in self.all_actions])
.astype(dtype=dt_float)
.reshape(self.n, -1)
)
np.save(file=os.path.join(path, name), arr=saved_npy)
def sample(self):
"""
Having define a complete set of observation an agent can do, sampling from it is now made easy.
One action amoung the n possible actions is used at random.
Returns
-------
res: ``int``
An id of an action.
"""
idx = self.space_prng.randint(0, self.n, dtype=dt_int)
return idx
def convert_act(self, encoded_act):
"""
In this converter, we suppose that "encoded_act" is an id of an action stored in the
:attr:`IdToAct.all_actions` list.
Converting an id of an action (here called "act") into a valid action is then easy: we just need to take the
"act"-th element of :attr:`IdToAct.all_actions`.
Parameters
----------
encoded_act: ``int``
The id of the action
Returns
-------
action: :class:`grid2op.Action.Action`
The action corresponding to id "act"
"""
return self.all_actions[encoded_act]
def get_gym_dict(self, cls_gym):
"""
Transform this converter into a dictionary that can be used to initialized a :class:`gym.spaces.Dict`.
The converter is modeled as a "Discrete" gym space with as many elements as the number
of different actions handled by this converter.
This is available as the "action" keys of the spaces.Dict gym action space build from it.
This function should not be used "as is", but rather through :class:`grid2op.Converter.GymConverter`
cls_gym represents either :class:`grid2op.gym_compat.LegacyGymActionSpace` or
:class:`grid2op.gym_compat.GymnasiumActionSpace`
"""
res = {"action": cls_gym._DiscreteType(n=self.n)}
return res
def convert_action_from_gym(self, gymlike_action):
"""
Convert the action (represented as a gym object, in fact an ordered dict) as an action
compatible with this converter.
This is not compatible with all converters and you need to install gym for it to work.
Parameters
----------
gymlike_action:
the action to be converted to an action compatible with the action space representation
Returns
-------
res:
The action converted to be understandable by this converter.
Examples
---------
Here is an example on how to use this feature with the :class:`grid2op.Converter.IdToAct`
converter (imports are not shown here).
.. code-block:: python
# create the environment
env = grid2op.make()
# create the converter
converter = IdToAct(env.action_space)
# create the gym action space
gym_action_space = GymObservationSpace(action_space=converter)
gym_action = gym_action_space.sample()
converter_action = converter.from_gym(gym_action) # this represents the same action
grid2op_action = converter.convert_act(converter_action) # this is a grid2op action
"""
res = gymlike_action["action"]
if not isinstance(res, int_types):
raise RuntimeError("TODO")
return int(res)
def convert_action_to_gym(self, action):
"""
Convert the action (compatible with this converter) into a "gym action" (ie an OrderedDict)
This is not compatible with all converters and you need to install gym for it to work.
Parameters
----------
action:
the action to be converted to an action compatible with the action space representation
Returns
-------
res:
The action converted to a "gym" model (can be used by a machine learning model)
Examples
---------
Here is an example on how to use this feature with the :class:`grid2op.Converter.IdToAct`
converter (imports are not shown here).
.. code-block:: python
# create the environment
env = grid2op.make()
# create the converter
converter = IdToAct(env.action_space)
# create the gym action space
gym_action_space = GymObservationSpace(action_space=converter)
converter_action = converter.sample()
gym_action = converter.to_gym(converter_action) # this represents the same action
"""
res = OrderedDict({"action": int(action)})
return res
| 20,808 | 41.123482 | 120 | py |
Grid2Op | Grid2Op-master/grid2op/Converter/ToVect.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from collections import OrderedDict
import numpy as np
from grid2op.Converter.Converters import Converter
from grid2op.dtypes import dt_float, dt_int
class ToVect(Converter):
"""
This converters allows to manipulate the vector representation of the actions and observations.
In this converter:
- `encoded_act` are numpy ndarray
- `transformed_obs` are numpy ndarray
(read more about these concepts by looking at the documentation of :class:`grid2op.Converter.Converters`)
It is convertible to a gym representation (like the original action space) in the form of a spaces.Box
representing a continuous action space (even though most component are probably discrete).
Note that if converted to a gym space, it is unlikely the method "sample" will yield to valid results.
Most of the time it should generate Ambiguous action that will not be handled by grid2op.
**NB** the conversion to a gym space should be done thanks to the :class:`grid2op.Converter.GymActionSpace`.
"""
def __init__(self, action_space):
Converter.__init__(self, action_space)
self.init_action_space = action_space
self.__class__ = ToVect.init_grid(action_space)
self.do_nothing_vect = action_space({}).to_vect()
# for gym conversion
self.__gym_action_space = None
self.__dict_space = None
self.__order_gym = None
self.__dtypes_gym = None
self.__shapes_gym = None
self.__order_gym_2_me = None
self.__order_me_2_gym = None
def convert_obs(self, obs):
"""
This converter will match the observation to a vector, using the
:func:`grid2op.Observation.BaseObservation.to_vect`
function.
Parameters
----------
obs: :class:`grid2op.Observation.Observation`
The observation, that will be processed into a numpy ndarray vector.
Returns
-------
transformed_obs: ``numpy.ndarray``
The vector representation of the action.
"""
return obs.to_vect()
def convert_act(self, encoded_act):
"""
In this converter `encoded_act` is a numpy ndarray. This function transforms it back to a valid action.
Parameters
----------
encoded_act: ``numpy.ndarray``
The action, represented as a vector
Returns
-------
regular_act: :class:`grid2op.Action.Action`
The corresponding action transformed with the :func:`grid2op.Action.BaseAction.from_vect`.
"""
res = self.__call__({})
res.from_vect(encoded_act, check_legit=False)
return res
def _init_gym_converter(self, cls_gym):
if self.__gym_action_space is None:
# i do that not to duplicate the code of the low / high bounds
gym_action_space = cls_gym(self.init_action_space)
low = tuple()
high = tuple()
order_gym = []
dtypes = []
shapes = []
sizes = []
prev = 0
for k, v in gym_action_space.spaces.items():
order_gym.append(k)
dtypes.append(v.dtype)
if isinstance(v, cls_gym._MultiBinaryType):
low += tuple([0 for _ in range(v.n)])
high += tuple([1 for _ in range(v.n)])
my_size = v.n
elif isinstance(v, cls_gym._BoxType):
low += tuple(v.low)
high += tuple(v.high)
my_size = v.low.shape[0]
else:
raise RuntimeError(
"Impossible to convert this converter to gym. Type {} of data "
"encountered while only MultiBinary and Box are supported for now."
)
shapes.append(my_size)
sizes.append(np.arange(my_size) + prev)
prev += my_size
self.__gym_action_space = gym_action_space
my_type = cls_gym._BoxType(low=np.array(low), high=np.array(high), dtype=dt_float)
order_me = []
_order_gym_2_me = np.zeros(my_type.shape[0], dtype=dt_int) - 1
_order_me_2_gym = np.zeros(my_type.shape[0], dtype=dt_int) - 1
for el in self.init_action_space.attr_list_vect:
order_me.append(cls_gym.keys_grid2op_2_human[el])
prev = 0
order_gym = list(gym_action_space.spaces.keys())
for id_me, nm_attr in enumerate(order_me):
id_gym = order_gym.index(nm_attr)
index_me = np.arange(shapes[id_gym]) + prev
_order_gym_2_me[sizes[id_gym]] = index_me
_order_me_2_gym[index_me] = sizes[id_gym]
# self.__order_gym_2_me[this_gym_ind] = sizes[id_me]
prev += shapes[id_gym]
self.__order_gym_2_me = _order_gym_2_me
self.__order_me_2_gym = _order_me_2_gym
self.__dict_space = {"action": my_type}
self.__order_gym = order_gym
self.__dtypes_gym = dtypes
self.__shapes_gym = shapes
def get_gym_dict(self, cls_gym):
"""
Convert this action space int a "gym" action space represented by a dictionary (spaces.Dict)
This dictionary counts only one keys which is "action" and inside this action is the
cls_gym represents either :class:`grid2op.gym_compat.LegacyGymActionSpace` or
:class:`grid2op.gym_compat.GymnasiumActionSpace`
"""
self._init_gym_converter(cls_gym)
return self.__dict_space
def convert_action_from_gym(self, gymlike_action):
"""
Convert a gym-like action (ie a Ordered dictionary with one key being only "action") to an
action compatible with this converter (in this case a vectorized action).
"""
vect = gymlike_action["action"]
return vect[self.__order_gym_2_me]
def convert_action_to_gym(self, action):
"""
Convert a an action of this converter (ie a numpy array) into an action that is usable with
an open ai gym (ie a Ordered dictionary with one key being only "action")
"""
res = OrderedDict({"action": action[self.__order_me_2_gym]})
return res
| 6,833 | 39.43787 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Converter/__init__.py | __all__ = [
"Converter",
"ToVect",
"IdToAct",
"ConnectivityConverter",
"AnalogStateConverter",
]
from grid2op.Converter.BackendConverter import BackendConverter
from grid2op.Converter.Converters import Converter
from grid2op.Converter.ToVect import ToVect
from grid2op.Converter.IdToAct import IdToAct
from grid2op.Converter.AnalogStateConverter import AnalogStateConverter
from grid2op.Converter.ConnectivityConverter import ConnectivityConverter
try:
from grid2op.Converter.GymConverter import GymObservationSpace, GymActionSpace
__all__.append("GymObservationSpace")
__all__.append("GymActionSpace")
except ImportError:
# you must install open ai gym to benefit from this converter
pass
| 733 | 29.583333 | 82 | py |
Grid2Op | Grid2Op-master/grid2op/Download/DownloadDataset.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import os
import sys
from tqdm import tqdm
import re
import tarfile
from grid2op.Exceptions import Grid2OpException
try:
import urllib.request
except Exception as e:
raise RuntimeError("Impossible to find library urllib. Please install it.")
URL_GRID2OP_DATA = "https://github.com/Tezirg/Grid2Op/releases/download/{}/{}"
DATASET_TAG_v0_1_0 = "datasets-v0.1.0"
DICT_URL_GRID2OP_DL = {
"rte_case14_realistic": URL_GRID2OP_DATA.format(
DATASET_TAG_v0_1_0, "rte_case14_realistic.tar.bz2"
),
"rte_case14_redisp": URL_GRID2OP_DATA.format(
DATASET_TAG_v0_1_0, "rte_case14_redisp.tar.bz2"
),
"l2rpn_2019": URL_GRID2OP_DATA.format(DATASET_TAG_v0_1_0, "l2rpn_2019.tar.bz2"),
}
LI_VALID_ENV = sorted(['"{}"'.format(el) for el in DICT_URL_GRID2OP_DL.keys()])
class DownloadProgressBar(tqdm):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This class is here to show the progress bar when downloading this dataset
"""
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
def download_url(url, output_path):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This function download the file located at 'url' and save it to 'output_path'
Parameters
----------
url: ``str``
The url of the file to download
output_path: ``str``
The path where the data will be stored.
"""
with DownloadProgressBar(
unit="B", unit_scale=True, miniters=1, desc=url.split("/")[-1]
) as t:
urllib.request.urlretrieve(url, filename=output_path, reporthook=t.update_to)
def _aux_download(url, dataset_name, path_data, ds_name_dl=None):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
"""
if ds_name_dl is None:
ds_name_dl = dataset_name
final_path = os.path.join(path_data, ds_name_dl)
if os.path.exists(final_path):
str_ = (
'Downloading and extracting this data would create a folder "{final_path}" '
"but this folder already exists. Either you already downloaded the data, in this case "
"you can invoke the environment from a python script with:\n"
'\t env = grid2op.make("{final_path}")\n'
'Alternatively you can also delete the folder "{final_path}" from your computer and run this command '
"again.\n"
"Finally, you can download the data in a different folder by specifying (in a command prompt):\n"
'\t grid2op.download --name "{env_name}" --path_save PATH\WHERE\YOU\WANT\TO\DOWNLOAD'
"".format(final_path=final_path, env_name=dataset_name)
)
print(str_)
raise Grid2OpException(str_)
if not os.path.exists(path_data):
print(
'Creating path "{}" where data for "{}" environment will be downloaded.'
"".format(path_data, ds_name_dl)
)
try:
os.mkdir(path_data)
except Exception as exc_:
str_ = (
'Impossible to create path "{}" to store the data. Please save the data in a different repository '
'with setting the argument "--path_save"'
"Error was:\n{}".format(path_data, exc_)
)
raise Grid2OpException(str_)
output_path = os.path.abspath(
os.path.join(path_data, "{}.tar.bz2".format(ds_name_dl))
)
# download the data (with progress bar)
print("downloading the training data, this may take a while.")
download_url(url, output_path)
tar = tarfile.open(output_path, "r:bz2")
print('Extract the tar archive in "{}"'.format(os.path.abspath(path_data)))
tar.extractall(path_data)
tar.close()
# rename the file if necessary
if ds_name_dl != dataset_name:
try:
os.rename(final_path, os.path.join(path_data, dataset_name))
except FileNotFoundError as exc_:
# the try catch is added because for some environments, the
# archive name does not match the env name.
# so the folder cannot be deleted properly.
print(f"WARN: file \"{final_path}\" could not be found. You might need to manually delete it.")
pass
# and rm the tar bz2
# bug in the AWS file... named ".tar.tar.bz2" ...
os.remove(output_path)
# check for update (if any)
from grid2op.MakeEnv.UpdateEnv import _update_files
_update_files(dataset_name)
print(
'You may now use the environment "{}" with the available data by invoking:\n'
'\tenv = grid2op.make("{}")'
"".format(dataset_name, dataset_name)
)
def main_download(dataset_name, path_data):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
"""
dataset_name = dataset_name.lower().rstrip().lstrip()
dataset_name = re.sub('"', "", dataset_name)
if dataset_name not in DICT_URL_GRID2OP_DL:
print(
'Impossible to find environment named "{env_name}". Known environments are:\n{li_env}'
"".format(env_name=dataset_name, li_env=",".join(LI_VALID_ENV))
)
sys.exit(1)
url = DICT_URL_GRID2OP_DL[dataset_name]
_aux_download(url, dataset_name, path_data)
| 5,955 | 33.627907 | 115 | py |
Grid2Op | Grid2Op-master/grid2op/Download/__init__.py | all = ["DownloadDataset"]
| 26 | 12.5 | 25 | py |