Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364 values |
|---|---|---|---|---|---|---|
Grid2Op | Grid2Op-master/grid2op/Rules/PreventDiscoStorageModif.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.Exceptions import IllegalAction
from grid2op.Rules.BaseRules import BaseRules
class PreventDiscoStorageModif(BaseRules):
"""
This subclass only check that the action do not modify the storage power (charge / discharge) of a disconnected
storage unit.
See :func:`BaseRules.__call__` for a definition of the parameters of this function.
"""
def __call__(self, action, env):
"""
See :func:`BaseRules.__call__` for a definition of the parameters of this function.
"""
if env.n_storage == 0:
# nothing to do if no storage
return True, None
# at first iteration, env.current_obs is None...
storage_disco = env.backend.get_topo_vect()[env.storage_pos_topo_vect] < 0
storage_power, storage_set_bus, storage_change_bus = action.get_storage_modif()
power_modif_disco = (np.isfinite(storage_power[storage_disco])) & (
storage_power[storage_disco] != 0.0
)
not_set_status = storage_set_bus[storage_disco] <= 0
not_change_status = ~storage_change_bus[storage_disco]
if np.any(power_modif_disco & not_set_status & not_change_status):
tmp_ = power_modif_disco & not_set_status & not_change_status
return False, IllegalAction(
f"Attempt to modify the power produced / absorbed by a storage unit "
f"without reconnecting it (check storage with id {np.where(tmp_)[0]}."
)
return True, None
| 2,006 | 41.702128 | 115 | py |
Grid2Op | Grid2Op-master/grid2op/Rules/PreventReconnection.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.Exceptions import IllegalAction
from grid2op.Rules.BaseRules import BaseRules
class PreventReconnection(BaseRules):
"""
A subclass is used to check that an action will not attempt to reconnect a powerlines disconnected because of
an overflow, or to check that 2 actions acting on the same powerline are distant from the right number of timesteps
(see :attr:`grid2op.Parameters.Parameters.NB_TIMESTEP_LINE_STATUS_REMODIF`) or if two topological modification
of the same substation are too close in time
(see :attr:`grid2op.Parameters.Parameters.NB_TIMESTEP_TOPOLOGY_REMODIF`)
"""
def __call__(self, action, env):
"""
This function check only that the action doesn't attempt to reconnect a powerline that has been disconnected
due to an overflow.
See :func:`BaseRules.__call__` for a definition of the parameters of this function.
"""
# at first iteration, env.current_obs is None...
# TODO this is used inside the environment (for step) inside LookParam and here
# this could be computed only once, and fed to this instead
powerline_status = env.get_current_line_status()
aff_lines, aff_subs = action.get_topological_impact(powerline_status)
if np.any(env._times_before_line_status_actionable[aff_lines] > 0):
# i tried to act on a powerline too shortly after a previous action
# or shut down due to an overflow or opponent or hazards or maintenance
ids = np.where((env._times_before_line_status_actionable > 0) & aff_lines)[
0
]
return False, IllegalAction(
"Powerline with ids {} have been modified illegally (cooldown of {})".format(
ids, env._times_before_line_status_actionable[ids]
)
)
if np.any(env._times_before_topology_actionable[aff_subs] > 0):
# I tried to act on a topology too shortly after a previous action
ids = np.where((env._times_before_topology_actionable > 0) & aff_subs)[0]
return False, IllegalAction(
"Substation with ids {} have been modified illegally (cooldown of {})".format(
ids, env._times_before_topology_actionable[ids]
)
)
return True, None
| 2,855 | 46.6 | 119 | py |
Grid2Op | Grid2Op-master/grid2op/Rules/RulesChecker.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
import warnings
from grid2op.Exceptions import Grid2OpException
from grid2op.Rules.BaseRules import BaseRules
from grid2op.Rules.AlwaysLegal import AlwaysLegal
class RulesChecker(object):
"""
Class that define the rules of the game.
"""
def __init__(self, legalActClass=AlwaysLegal):
"""
Parameters
----------
legalActClass: ``type``
The class that will be used to tell if the actions are legal or not. The class must be given, and not
an object of this class. It should derived from :class:`BaseRules`.
"""
if isinstance(legalActClass, type):
if not issubclass(legalActClass, BaseRules):
raise Grid2OpException(
"Gamerules: legalActClass should be initialize with a class deriving "
"from BaseRules and not {}".format(type(legalActClass))
)
self.legal_action = legalActClass()
else:
if not isinstance(legalActClass, BaseRules):
raise Grid2OpException(
'Parameter "legalActClass" used to build the Environment should be an instance of the '
'grid2op.BaseRules class, type provided is "{}"'.format(
type(legalActClass)
)
)
try:
self.legal_action = copy.deepcopy(legalActClass)
except Exception as exc_:
warnings.warn("You passed the legal action as an instance that cannot be deepcopied. It will be "
"used 'as is', we do not garantee anything if you modify the original object.")
self.legal_action = legalActClass
def initialize(self, env):
"""
This function is used to inform the class instance about the environment specification.
It can be the place to assert the defined rules are suited for the environement.
Parameters
----------
env: :class:`grid2op.Environment.Environment`
The environment on which the action is performed.
"""
self.legal_action.initialize(env)
def __call__(self, action, env):
"""
Says if an action is legal or not.
Parameters
----------
action: :class:`grid2op.Action.Action`
The action that need to be tested
env: :class:`grid2op.Environment.Environment`
The current used environment.
Returns
-------
is_legal: ``bool``
Assess if the given action is legal or not. ``True``: the action is legal, ``False`` otherwise
reason:
A grid2op IllegalException given the reason for which the action is illegal
"""
return self.legal_action(action, env)
| 3,323 | 38.105882 | 113 | py |
Grid2Op | Grid2Op-master/grid2op/Rules/__init__.py | __all__ = [
"RulesChecker",
"DefaultRules",
"AlwaysLegal",
"BaseRules",
"LookParam",
"PreventReconnection",
"PreventDiscoStorageModif",
"RulesByArea",
]
from grid2op.Rules.RulesChecker import RulesChecker
from grid2op.Rules.DefaultRules import DefaultRules
from grid2op.Rules.AlwaysLegal import AlwaysLegal
from grid2op.Rules.BaseRules import BaseRules
from grid2op.Rules.LookParam import LookParam
from grid2op.Rules.PreventReconnection import PreventReconnection
from grid2op.Rules.PreventDiscoStorageModif import PreventDiscoStorageModif
from grid2op.Rules.rulesByArea import RulesByArea
import warnings
class LegalAction(BaseRules):
def __init__(self, *args, **kwargs):
BaseRules.__init__(self, *args, **kwargs)
warnings.warn(
'LegalAction class has been renamed "BaseRules". '
"This class LegalAction will be removed in future versions.",
category=PendingDeprecationWarning,
)
class GameRules(RulesChecker):
def __init__(self, *args, **kwargs):
RulesChecker.__init__(self, *args, **kwargs)
warnings.warn(
'GameRules class has been renamed "RulesChecker". '
"This class GameRules will be removed in future versions.",
category=PendingDeprecationWarning,
)
class PreventReconection(PreventReconnection):
def __init__(self, *args, **kwargs):
PreventReconection.__init__(self, *args, **kwargs)
warnings.warn(
'PreventReconection class has been renamed "PreventReconnection". '
"This class Action will be removed in future versions.",
category=PendingDeprecationWarning,
)
| 1,704 | 32.431373 | 79 | py |
Grid2Op | Grid2Op-master/grid2op/Rules/rulesByArea.py | # Copyright (c) 2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from itertools import chain
from grid2op.Rules.BaseRules import BaseRules
from grid2op.Rules.LookParam import LookParam
from grid2op.Rules.PreventReconnection import PreventReconnection
from grid2op.Rules.PreventDiscoStorageModif import PreventDiscoStorageModif
from grid2op.Exceptions import (
IllegalAction, Grid2OpException
)
class RulesByArea(BaseRules):
"""
This subclass combine :class:`PreventReconnection`, :class: `PreventDiscoStorageModif` to be applied on the whole grid at once,
while a specifique method look for the legality of simultaneous actions taken on defined areas of a grid.
An action is declared legal if and only if:
- It doesn't reconnect more power lines than what is stated in the actual game _parameters
:class:`grid2op.Parameters`
- It doesn't attempt to act on more substations and lines within each area that what is stated in the actual game _parameters
:class:`grid2op.Parameters`
- It doesn't attempt to modify the power produce by a turned off storage unit
Example
---------
If you want the environment to take into account the rules by area, you can achieve it with:
.. code-block:
import grid2op
from grid2op.Rules.rulesByArea import RulesByArea
# First you set up the areas within the RulesByArea class
my_gamerules_byarea = RulesByArea([[0,1,2,3,4,5,6,7],[8,9,10,11,12,13,14]])
# Then you create your environment with it:
NAME_OF_THE_ENVIRONMENT = "l2rpn_case14_sandbox"
env = grid2op.make(NAME_OF_THE_ENVIRONMENT,gamerules_class=my_gamerules_byarea)
"""
def __init__(self, areas_list):
"""
The initialization of the rule with a list of list of ids of substations composing the aimed areas.
Parameters
----------
areas_list : list of areas, each placeholder containing the ids of substations of each defined area
"""
self.substations_id_by_area = {i : sorted(k) for i,k in enumerate(areas_list)}
def initialize(self, env):
"""
This function is used to inform the class instance about the environment specification and check no substation of the grid are left ouside an area.
Parameters
----------
env: :class:`grid2op.Environment.Environment`
An environment instance properly initialized.
"""
n_sub = env.n_sub
n_sub_rule = np.sum([len(set(list_ids)) for list_ids in self.substations_id_by_area.values()])
if n_sub_rule != n_sub:
raise Grid2OpException("The number of listed ids of substations in rule initialization does not match the number of substations of the chosen environement. Look for missing ids or doublon")
else:
self.lines_id_by_area = {key : sorted(list(chain(*[[item for item in np.where(env.line_or_to_subid == subid)[0]
] for subid in subid_list]))) for key,subid_list in self.substations_id_by_area.items()}
def __call__(self, action, env):
"""
See :func:`BaseRules.__call__` for a definition of the _parameters of this function.
"""
is_legal, reason = PreventDiscoStorageModif.__call__(self, action, env)
if not is_legal:
return False, reason
is_legal, reason = self._lookparam_byarea(action, env)
if not is_legal:
return False, reason
return PreventReconnection.__call__(self, action, env)
def can_use_simulate(self, nb_simulate_call_step, nb_simulate_call_episode, param):
return LookParam.can_use_simulate(
self, nb_simulate_call_step, nb_simulate_call_episode, param
)
def _lookparam_byarea(self, action, env):
"""
See :func:`BaseRules.__call__` for a definition of the parameters of this function.
"""
# at first iteration, env.current_obs is None...
powerline_status = env.get_current_line_status()
aff_lines, aff_subs = action.get_topological_impact(powerline_status)
if any([np.sum(aff_lines[line_ids]) > env._parameters.MAX_LINE_STATUS_CHANGED for line_ids in self.lines_id_by_area.values()]):
ids = [[k for k in np.where(aff_lines)[0] if k in line_ids] for line_ids in self.lines_id_by_area.values()]
return False, IllegalAction(
"More than {} line status affected by the action in one area: {}"
"".format(env.parameters.MAX_LINE_STATUS_CHANGED, ids)
)
if any([np.sum(aff_subs[sub_ids]) > env._parameters.MAX_SUB_CHANGED for sub_ids in self.substations_id_by_area.values()]):
ids = [[k for k in np.where(aff_subs)[0] if k in sub_ids] for sub_ids in self.substations_id_by_area.values()]
return False, IllegalAction(
"More than {} substation affected by the action in one area: {}"
"".format(env.parameters.MAX_SUB_CHANGED, ids)
)
return True, None
| 5,563 | 46.965517 | 201 | py |
Grid2Op | Grid2Op-master/grid2op/Runner/FakePBar.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
class _FakePbar(object):
"""
Just a fake progress bar with same interface as tqdm
"""
def __init__(self, total=0, desc=""):
pass
def update(self, int):
pass
def close(self):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
| 813 | 27.068966 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Runner/__init__.py | __all__ = ["Runner"]
from grid2op.Runner.runner import Runner
| 63 | 15 | 40 | py |
Grid2Op | Grid2Op-master/grid2op/Runner/aux_fun.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
import time
import numpy as np
from grid2op.Environment import Environment
from grid2op.Agent import BaseAgent
from grid2op.Episode import EpisodeData
from grid2op.Runner.FakePBar import _FakePbar
from grid2op.dtypes import dt_int, dt_float, dt_bool
from grid2op.Chronics import ChronicsHandler
def _aux_add_data(reward, env, episode,
efficient_storing, end__, beg__, act,
obs, info, time_step, opp_attack):
episode.incr_store(
efficient_storing,
time_step,
end__ - beg__,
float(reward),
env._env_modification,
act,
obs,
opp_attack,
info,
)
return reward
def _aux_one_process_parrallel(
runner,
episode_this_process,
process_id,
path_save=None,
env_seeds=None,
agent_seeds=None,
max_iter=None,
add_detailed_output=False,
add_nb_highres_sim=False,
):
"""this is out of the runner, otherwise it does not work on windows / macos"""
chronics_handler = ChronicsHandler(
chronicsClass=runner.gridStateclass,
path=runner.path_chron,
**runner.gridStateclass_kwargs
)
parameters = copy.deepcopy(runner.parameters)
nb_episode_this_process = len(episode_this_process)
res = [(None, None, None) for _ in range(nb_episode_this_process)]
for i, ep_id in enumerate(episode_this_process):
# `ep_id`: grid2op id of the episode i want to play
# `i`: my id of the episode played (0, 1, ... episode_this_process)
env, agent = runner._new_env(
chronics_handler=chronics_handler, parameters=parameters
)
try:
env_seed = None
if env_seeds is not None:
env_seed = env_seeds[i]
agt_seed = None
if agent_seeds is not None:
agt_seed = agent_seeds[i]
tmp_ = _aux_run_one_episode(
env,
agent,
runner.logger,
ep_id,
path_save,
env_seed=env_seed,
max_iter=max_iter,
agent_seed=agt_seed,
detailed_output=add_detailed_output,
)
(name_chron, cum_reward, nb_time_step, max_ts, episode_data, nb_highres_sim) = tmp_
id_chron = chronics_handler.get_id()
res[i] = (id_chron, name_chron, float(cum_reward), nb_time_step, max_ts)
if add_detailed_output:
res[i] = (*res[i], episode_data)
if add_nb_highres_sim:
res[i] = (*res[i], nb_highres_sim)
finally:
env.close()
return res
def _aux_run_one_episode(
env: Environment,
agent: BaseAgent,
logger,
indx : int,
path_save=None,
pbar=False,
env_seed=None,
agent_seed=None,
max_iter=None,
detailed_output=False,
):
done = False
time_step = int(0)
time_act = 0.0
cum_reward = dt_float(0.0)
# set the environment to use the proper chronic
env.set_id(indx)
# set the seed
if env_seed is not None:
env.seed(env_seed)
# handle max_iter
if max_iter is not None:
env.chronics_handler.set_max_iter(max_iter)
# reset it
obs = env.reset()
# reset the number of calls to high resolution simulator
env._highres_sim_counter._HighResSimCounter__nb_highres_called = 0
# seed and reset the agent
if agent_seed is not None:
agent.seed(agent_seed)
agent.reset(obs)
# compute the size and everything if it needs to be stored
nb_timestep_max = env.chronics_handler.max_timestep()
efficient_storing = nb_timestep_max > 0
nb_timestep_max = max(nb_timestep_max, 0)
max_ts = nb_timestep_max
if path_save is None and not detailed_output:
# i don't store anything on drive, so i don't need to store anything on memory
nb_timestep_max = 0
disc_lines_templ = np.full((1, env.backend.n_line), fill_value=False, dtype=dt_bool)
attack_templ = np.full(
(1, env._oppSpace.action_space.size()), fill_value=0.0, dtype=dt_float
)
if efficient_storing:
times = np.full(nb_timestep_max, fill_value=np.NaN, dtype=dt_float)
rewards = np.full(nb_timestep_max, fill_value=np.NaN, dtype=dt_float)
actions = np.full(
(nb_timestep_max, env.action_space.n), fill_value=np.NaN, dtype=dt_float
)
env_actions = np.full(
(nb_timestep_max, env._helper_action_env.n),
fill_value=np.NaN,
dtype=dt_float,
)
observations = np.full(
(nb_timestep_max + 1, env.observation_space.n),
fill_value=np.NaN,
dtype=dt_float,
)
disc_lines = np.full(
(nb_timestep_max, env.backend.n_line), fill_value=np.NaN, dtype=dt_bool
)
attack = np.full(
(nb_timestep_max, env._opponent_action_space.n),
fill_value=0.0,
dtype=dt_float,
)
legal = np.full(nb_timestep_max, fill_value=True, dtype=dt_bool)
ambiguous = np.full(nb_timestep_max, fill_value=False, dtype=dt_bool)
else:
times = np.full(0, fill_value=np.NaN, dtype=dt_float)
rewards = np.full(0, fill_value=np.NaN, dtype=dt_float)
actions = np.full((0, env.action_space.n), fill_value=np.NaN, dtype=dt_float)
env_actions = np.full(
(0, env._helper_action_env.n), fill_value=np.NaN, dtype=dt_float
)
observations = np.full(
(0, env.observation_space.n), fill_value=np.NaN, dtype=dt_float
)
disc_lines = np.full((0, env.backend.n_line), fill_value=np.NaN, dtype=dt_bool)
attack = np.full(
(0, env._opponent_action_space.n), fill_value=0.0, dtype=dt_float
)
legal = np.full(0, fill_value=True, dtype=dt_bool)
ambiguous = np.full(0, fill_value=False, dtype=dt_bool)
need_store_first_act = path_save is not None or detailed_output
if need_store_first_act:
# store observation at timestep 0
if efficient_storing:
observations[time_step, :] = obs.to_vect()
else:
observations = np.concatenate((observations, obs.to_vect().reshape(1, -1)))
episode = EpisodeData(
actions=actions,
env_actions=env_actions,
observations=observations,
rewards=rewards,
disc_lines=disc_lines,
times=times,
observation_space=env.observation_space,
action_space=env.action_space,
helper_action_env=env._helper_action_env,
path_save=path_save,
disc_lines_templ=disc_lines_templ,
attack_templ=attack_templ,
attack=attack,
attack_space=env._opponent_action_space,
logger=logger,
name=env.chronics_handler.get_name(),
force_detail=detailed_output,
other_rewards=[],
legal=legal,
ambiguous=ambiguous,
has_legal_ambiguous=True,
)
if need_store_first_act:
# I need to manually force in the first observation (otherwise it's not computed)
episode.observations.objects[0] = episode.observations.helper.from_vect(
observations[time_step, :]
)
episode.set_parameters(env)
beg_ = time.perf_counter()
reward = float(env.reward_range[0])
done = False
next_pbar = [False]
with _aux_make_progress_bar(pbar, nb_timestep_max, next_pbar) as pbar_:
while not done:
beg__ = time.perf_counter()
act = agent.act(obs, reward, done)
end__ = time.perf_counter()
time_act += end__ - beg__
if type(env).CAN_SKIP_TS:
# the environment can "skip" some time
# steps I need to call the 'env.steps()' to get all
# the steps.
res_env_tmp = env.steps(act)
for (obs, reward, done, info), opp_attack in zip(*res_env_tmp):
time_step += 1
cum_reward += _aux_add_data(reward, env, episode,
efficient_storing,
end__, beg__, act,
obs, info, time_step,
opp_attack)
pbar_.update(1)
else:
# regular environment
obs, reward, done, info = env.step(act)
time_step += 1
opp_attack = env._oppSpace.last_attack
cum_reward += _aux_add_data(reward, env, episode,
efficient_storing,
end__, beg__, act,
obs, info, time_step,
opp_attack)
pbar_.update(1)
end_ = time.perf_counter()
episode.set_meta(env, time_step, float(cum_reward), env_seed, agent_seed)
li_text = [
"Env: {:.2f}s",
"\t - apply act {:.2f}s",
"\t - run pf: {:.2f}s",
"\t - env update + observation: {:.2f}s",
"Agent: {:.2f}s",
"Total time: {:.2f}s",
"Cumulative reward: {:1f}",
]
msg_ = "\n".join(li_text)
logger.info(
msg_.format(
env._time_apply_act + env._time_powerflow + env._time_extract_obs,
env._time_apply_act,
env._time_powerflow,
env._time_extract_obs,
time_act,
end_ - beg_,
cum_reward,
)
)
episode.set_episode_times(env, time_act, beg_, end_)
episode.to_disk()
name_chron = env.chronics_handler.get_name()
return (name_chron, cum_reward,
int(time_step),
int(max_ts),
episode,
env.nb_highres_called)
def _aux_make_progress_bar(pbar, total, next_pbar):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Parameters
----------
pbar: ``bool`` or ``type`` or ``object``
How to display the progress bar, understood as follow:
- if pbar is ``None`` nothing is done.
- if pbar is a boolean, tqdm pbar are used, if tqdm package is available and installed on the system
[if ``true``]. If it's false it's equivalent to pbar being ``None``
- if pbar is a ``type`` ( a class), it is used to build a progress bar at the highest level (episode) and
and the lower levels (step during the episode). If it's a type it muyst accept the argument "total"
and "desc" when being built, and the closing is ensured by this method.
- if pbar is an object (an instance of a class) it is used to make a progress bar at this highest level
(episode) but not at lower levels (step during the episode)
"""
pbar_ = _FakePbar()
next_pbar[0] = False
if isinstance(pbar, bool):
if pbar:
try:
from tqdm import tqdm
pbar_ = tqdm(total=total, desc="episode")
next_pbar[0] = True
except (ImportError, ModuleNotFoundError):
pass
elif isinstance(pbar, type):
pbar_ = pbar(total=total, desc="episode")
next_pbar[0] = pbar
elif isinstance(pbar, object):
pbar_ = pbar
return pbar_
| 12,025 | 34.370588 | 113 | py |
Grid2Op | Grid2Op-master/grid2op/Runner/basic_logger.py | # Copyright (c) 2019-2021, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
class DoNothingLog:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
A class to emulate the behaviour of a logger, but that does absolutely nothing.
"""
INFO = 2
WARNING = 1
ERROR = 0
def __init__(self, max_level=2):
self.max_level = max_level
def warn(self, *args, **kwargs):
pass
def info(self, *args, **kwargs):
pass
def error(self, *args, **kwargs):
pass
def warning(self, *args, **kwargs):
pass
class ConsoleLog(DoNothingLog):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
A class to emulate the behaviour of a logger, but that prints on the console
"""
def __init__(self, max_level=2):
DoNothingLog.__init__(self, max_level)
def warn(self, *args, **kwargs):
if self.max_level >= self.WARNING:
if args:
print('WARNING: "{}"'.format(", ".join(args)))
if kwargs:
print("WARNING: {}".format(kwargs))
def info(self, *args, **kwargs):
if self.max_level >= self.INFO:
if args:
print('INFO: "{}"'.format(", ".join(args)))
if kwargs:
print("INFO: {}".format(kwargs))
def error(self, *args, **kwargs):
if self.max_level >= self.ERROR:
if args:
print('ERROR: "{}"'.format(", ".join(args)))
if kwargs:
print("ERROR: {}".format(kwargs))
def warning(self, *args, **kwargs):
if self.max_level >= self.WARNING:
if args:
print('WARNING: "{}"'.format(", ".join(args)))
if kwargs:
print("WARNING: {}".format(kwargs))
| 2,278 | 28.217949 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Runner/runner.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import os
import warnings
import copy
from multiprocessing import Pool
from typing import Tuple, Optional, List
from grid2op.Environment import BaseEnv
from grid2op.Action import BaseAction, TopologyAction, DontAct
from grid2op.Exceptions import Grid2OpException, EnvError
from grid2op.Observation import CompleteObservation, BaseObservation
from grid2op.Opponent.OpponentSpace import OpponentSpace
from grid2op.Reward import FlatReward, BaseReward
from grid2op.Rules import AlwaysLegal, BaseRules
from grid2op.Environment import Environment
from grid2op.Chronics import ChronicsHandler, GridStateFromFile, GridValue
from grid2op.Backend import Backend, PandaPowerBackend
from grid2op.Parameters import Parameters
from grid2op.Agent import DoNothingAgent, BaseAgent
from grid2op.VoltageControler import ControlVoltageFromFile
from grid2op.dtypes import dt_float
from grid2op.Opponent import BaseOpponent, NeverAttackBudget
from grid2op.operator_attention import LinearAttentionBudget
from grid2op.Runner.aux_fun import (
_aux_run_one_episode,
_aux_make_progress_bar,
_aux_one_process_parrallel,
)
from grid2op.Runner.basic_logger import DoNothingLog, ConsoleLog
from grid2op.Episode import EpisodeData
# on windows if i start using sequential, i need to continue using sequential
# if i start using parallel i need to continue using parallel
# so i force the usage of the "starmap" stuff even if there is one process on windows
from grid2op._glop_platform_info import _IS_WINDOWS, _IS_LINUX, _IS_MACOS
runner_returned_type = Tuple[str, str, float, int, int, Optional[EpisodeData], Optional[int]]
# TODO have a vectorized implementation of everything in case the agent is able to act on multiple environment
# at the same time. This might require a lot of work, but would be totally worth it!
# (especially for Neural Net based agents)
# TODO add a more suitable logging strategy
# TODO use gym logger if specified by the user.
# TODO: if chronics are "loop through" multiple times, only last results are saved. :-/
class Runner(object):
"""
A runner is a utility tool that allows to run simulations more easily.
It is a more convenient way to execute the
following loops:
.. code-block:: python
import grid2op
from grid2op.Agent import RandomAgent # for example...
from grid2op.Runner import Runner
env = grid2op.make()
###############
# the gym loops
nb_episode = 5
for i in range(nb_episode):
obs = env.reset()
done = False
reward = env.reward_range[0]
while not done:
act = agent.act(obs, reward, done)
obs, reward, done, info = env.step(act)
###############
# equivalent with use of a Runner
runner = Runner(**env.get_params_for_runner(), agentClass=RandomAgent)
res = runner.run(nb_episode=nn_episode)
This specific class as for main purpose to evaluate the performance of a trained
:class:`grid2op.Agent.BaseAgent` rather than to train it.
It has also the good property to be able to save the results of a experiment in a standardized
manner described in the :class:`grid2op.Episode.EpisodeData`.
**NB** we do not recommend to create a runner from scratch by providing all the arguments. We strongly
encourage you to use the :func:`grid2op.Environment.Environment.get_params_for_runner` for
creating a runner.
Attributes
----------
envClass: ``type``
The type of the environment used for the game. The class should be given, and **not** an instance (object) of
this class. The default is the :class:`grid2op.Environment`. If modified, it should derived from this class.
other_env_kwargs: ``dict``
Other kwargs used to build the environment (None for "nothing")
actionClass: ``type``
The type of action that can be performed by the agent / bot / controler. The class should be given, and
**not** an instance of this class. This type
should derived from :class:`grid2op.BaseAction`. The default is :class:`grid2op.TopologyAction`.
observationClass: ``type``
This type represents the class that will be used to build the :class:`grid2op.BaseObservation` visible by the
:class:`grid2op.BaseAgent`. As :attr:`Runner.actionClass`, this should be a type, and **not** and instance
(object)
of this type. This type should derived from :class:`grid2op.BaseObservation`. The default is
:class:`grid2op.CompleteObservation`.
rewardClass: ``type``
Representes the type used to build the rewards that are given to the :class:`BaseAgent`. As
:attr:`Runner.actionClass`, this should be a type, and **not** and instance (object) of this type.
This type should derived from :class:`grid2op.BaseReward`. The default is :class:`grid2op.ConstantReward` that
**should not** be used to train or evaluate an agent, but rather as debugging purpose.
gridStateclass: ``type``
This types control the mechanisms to read chronics and assign data to the powergrid. Like every "\\.*Class"
attributes the type should be pass and not an intance (object) of this type. Its default is
:class:`grid2op.GridStateFromFile` and it must be a subclass of :class:`grid2op.GridValue`.
legalActClass: ``type``
This types control the mechanisms to assess if an :class:`grid2op.BaseAction` is legal.
Like every "\\.*Class" attributes the type should be pass and not an intance (object) of this type.
Its default is :class:`grid2op.AlwaysLegal` and it must be a subclass of :class:`grid2op.BaseRules`.
backendClass: ``type``
This types control the backend, *eg.* the software that computes the powerflows.
Like every "\\.*Class" attributes the type should be pass and not an intance (object) of this type.
Its default is :class:`grid2op.PandaPowerBackend` and it must be a subclass of :class:`grid2op.Backend`.
backend_kwargs: ``dict``
Optional arguments used to build the backend. These arguments will not
be copied to create the backend used by the runner. They might
required to be pickeable on some plateform when using multi processing.
agentClass: ``type``
This types control the type of BaseAgent, *eg.* the bot / controler that will take :class:`grid2op.BaseAction`
and
avoid cascading failures.
Like every "\\.*Class" attributes the type should be pass and not an intance (object) of this type.
Its default is :class:`grid2op.DoNothingAgent` and it must be a subclass of :class:`grid2op.BaseAgent`.
logger:
A object than can be used to log information, either in a text file, or by printing them to the command prompt.
init_grid_path: ``str``
This attributes store the path where the powergrid data are located. If a relative path is given, it will be
extended as an absolute path.
names_chronics_to_backend: ``dict``
See description of :func:`grid2op.ChronicsHelper.initialize` for more information about this dictionnary
parameters_path: ``str``, optional
Where to look for the :class:`grid2op.Environment` :class:`grid2op.Parameters`. It defaults to ``None`` which
corresponds to using default values.
parameters: :class:`grid2op.Parameters`
Type of _parameters used. This is an instance (object) of type :class:`grid2op.Parameters` initialized from
:attr:`Runner.parameters_path`
path_chron: ``str``
Path indicatng where to look for temporal data.
chronics_handler: :class:`grid2op.ChronicsHandler`
Initialized from :attr:`Runner.gridStateclass` and :attr:`Runner.path_chron` it represents the input data used
to generate grid state by the :attr:`Runner.env`
backend: :class:`grid2op.Backend`
Used to compute the powerflow. This object has the type given by :attr:`Runner.backendClass`
env: :class:`grid2op.Environment`
Represents the environment which the agent / bot / control must control through action. It is initialized from
the :attr:`Runner.envClass`
agent: :class:`grid2op.Agent`
Represents the agent / bot / controler that takes action performed on a environment (the powergrid) to maximize
a certain reward.
verbose: ``bool``
If ``True`` then detailed output of each steps are written.
gridStateclass_kwargs: ``dict``
Additional keyword arguments used to build the :attr:`Runner.chronics_handler`
thermal_limit_a: ``numpy.ndarray``
The thermal limit for the environment (if any).
opponent_action_class: ``type``, optional
The action class used for the opponent. The opponent will not be able to use action that are invalid with
the given action class provided. It defaults to :class:`grid2op.Action.DontAct` which forbid any type
of action possible.
opponent_class: ``type``, optional
The opponent class to use. The default class is :class:`grid2op.Opponent.BaseOpponent` which is a type
of opponents that does nothing.
opponent_init_budget: ``float``, optional
The initial budget of the opponent. It defaults to 0.0 which means the opponent cannot perform any action
if this is not modified.
opponent_budget_per_ts: ``float``, optional
The budget increase of the opponent per time step
opponent_budget_class: ``type``, optional
The class used to compute the attack cost.
grid_layout: ``dict``, optional
The layout of the grid (position of each substation) usefull if you need to plot some things for example.
TODO
_attention_budget_cls=LinearAttentionBudget,
_kwargs_attention_budget=None,
_has_attention_budget=False
Examples
--------
Different examples are showed in the description of the main method :func:`Runner.run`
Notes
-----
Runner does not necessarily behave normally when "nb_process" is not 1 on some platform (windows and some
version of macos). Please read the documentation, and especially the :ref:`runner-multi-proc-warning`
for more information and possible way to disable this feature.
"""
FORCE_SEQUENTIAL = "GRID2OP_RUNNER_FORCE_SEQUENTIAL"
def __init__(
self,
init_env_path: str,
init_grid_path: str,
path_chron, # path where chronics of injections are stored
name_env="unknown",
parameters_path=None,
names_chronics_to_backend=None,
actionClass=TopologyAction,
observationClass=CompleteObservation,
rewardClass=FlatReward,
legalActClass=AlwaysLegal,
envClass=Environment,
other_env_kwargs=None,
gridStateclass=GridStateFromFile,
# type of chronics to use. For example GridStateFromFile if forecasts are not used,
# or GridStateFromFileWithForecasts otherwise
backendClass=PandaPowerBackend,
backend_kwargs=None,
agentClass=DoNothingAgent, # class used to build the agent
agentInstance=None,
verbose=False,
gridStateclass_kwargs={},
voltageControlerClass=ControlVoltageFromFile,
thermal_limit_a=None,
max_iter=-1,
other_rewards={},
opponent_space_type=OpponentSpace,
opponent_action_class=DontAct,
opponent_class=BaseOpponent,
opponent_init_budget=0.0,
opponent_budget_per_ts=0.0,
opponent_budget_class=NeverAttackBudget,
opponent_attack_duration=0,
opponent_attack_cooldown=99999,
opponent_kwargs={},
grid_layout=None,
with_forecast=True,
attention_budget_cls=LinearAttentionBudget,
kwargs_attention_budget=None,
has_attention_budget=False,
logger=None,
kwargs_observation=None,
observation_bk_class=None,
observation_bk_kwargs=None,
# experimental: whether to read from local dir or generate the classes on the fly:
_read_from_local_dir=False,
_is_test=False, # TODO not implemented !!
):
"""
Initialize the Runner.
Parameters
----------
init_grid_path: ``str``
Madantory, used to initialize :attr:`Runner.init_grid_path`.
path_chron: ``str``
Madantory where to look for chronics data, used to initialize :attr:`Runner.path_chron`.
parameters_path: ``str`` or ``dict``, optional
Used to initialize :attr:`Runner.parameters_path`. If it's a string, this will suppose parameters are
located at this path, if it's a dictionary, this will use the parameters converted from this dictionary.
names_chronics_to_backend: ``dict``, optional
Used to initialize :attr:`Runner.names_chronics_to_backend`.
actionClass: ``type``, optional
Used to initialize :attr:`Runner.actionClass`.
observationClass: ``type``, optional
Used to initialize :attr:`Runner.observationClass`.
rewardClass: ``type``, optional
Used to initialize :attr:`Runner.rewardClass`. Default to :class:`grid2op.ConstantReward` that
*should not** be used to train or evaluate an agent, but rather as debugging purpose.
legalActClass: ``type``, optional
Used to initialize :attr:`Runner.legalActClass`.
envClass: ``type``, optional
Used to initialize :attr:`Runner.envClass`.
gridStateclass: ``type``, optional
Used to initialize :attr:`Runner.gridStateclass`.
backendClass: ``type``, optional
Used to initialize :attr:`Runner.backendClass`.
agentClass: ``type``, optional
Used to initialize :attr:`Runner.agentClass`.
agentInstance: :class:`grid2op.Agent.Agent`
Used to initialize the agent. Note that either :attr:`agentClass` or :attr:`agentInstance` is used
at the same time. If both ot them are ``None`` or both of them are "not ``None``" it throw an error.
verbose: ``bool``, optional
Used to initialize :attr:`Runner.verbose`.
thermal_limit_a: ``numpy.ndarray``
The thermal limit for the environment (if any).
voltagecontrolerClass: :class:`grid2op.VoltageControler.ControlVoltageFromFile`, optional
The controler that will change the voltage setpoints of the generators.
# TODO documentation on the opponent
# TOOD doc for the attention budget
"""
self.with_forecast = with_forecast
self.name_env = name_env
if not isinstance(envClass, type):
raise Grid2OpException(
'Parameter "envClass" used to build the Runner should be a type (a class) and not an object '
'(an instance of a class). It is currently "{}"'.format(type(envClass))
)
if not issubclass(envClass, Environment):
raise RuntimeError(
"Impossible to create a runner without an evnrionment derived from grid2op.Environement"
' class. Please modify "envClass" parameter.'
)
self.envClass = envClass
if other_env_kwargs is not None:
self.other_env_kwargs = other_env_kwargs
else:
self.other_env_kwargs = {}
if not isinstance(actionClass, type):
raise Grid2OpException(
'Parameter "actionClass" used to build the Runner should be a type (a class) and not an object '
'(an instance of a class). It is currently "{}"'.format(
type(actionClass)
)
)
if not issubclass(actionClass, BaseAction):
raise RuntimeError(
"Impossible to create a runner without an action class derived from grid2op.BaseAction. "
'Please modify "actionClass" parameter.'
)
self.actionClass = actionClass
if not isinstance(observationClass, type):
raise Grid2OpException(
'Parameter "observationClass" used to build the Runner should be a type (a class) and not an object '
'(an instance of a class). It is currently "{}"'.format(
type(observationClass)
)
)
if not issubclass(observationClass, BaseObservation):
raise RuntimeError(
"Impossible to create a runner without an observation class derived from "
'grid2op.BaseObservation. Please modify "observationClass" parameter.'
)
self.observationClass = observationClass
if isinstance(rewardClass, type):
if not issubclass(rewardClass, BaseReward):
raise RuntimeError(
"Impossible to create a runner without an observation class derived from "
'grid2op.BaseReward. Please modify "rewardClass" parameter.'
)
else:
if not isinstance(rewardClass, BaseReward):
raise RuntimeError(
"Impossible to create a runner without an observation class derived from "
'grid2op.BaseReward. Please modify "rewardClass" parameter.'
)
self.rewardClass = rewardClass
if not isinstance(gridStateclass, type):
raise Grid2OpException(
'Parameter "gridStateclass" used to build the Runner should be a type (a class) and not an object '
'(an instance of a class). It is currently "{}"'.format(
type(gridStateclass)
)
)
if not issubclass(gridStateclass, GridValue):
raise RuntimeError(
"Impossible to create a runner without an chronics class derived from "
'grid2op.GridValue. Please modify "gridStateclass" parameter.'
)
self.gridStateclass = gridStateclass
self.envClass._check_rules_correct(legalActClass)
self.legalActClass = legalActClass
if not isinstance(backendClass, type):
raise Grid2OpException(
'Parameter "legalActClass" used to build the Runner should be a type (a class) and not an object '
'(an instance of a class). It is currently "{}"'.format(
type(backendClass)
)
)
if not issubclass(backendClass, Backend):
raise RuntimeError(
"Impossible to create a runner without a backend class derived from grid2op.GridValue. "
'Please modify "backendClass" parameter.'
)
self.backendClass = backendClass
if backend_kwargs is not None:
self._backend_kwargs = backend_kwargs
else:
self._backend_kwargs = {}
self.__can_copy_agent = True
if agentClass is not None:
if agentInstance is not None:
raise RuntimeError(
"Impossible to build the Runner. Only one of agentClass or agentInstance can be "
"used (both are set / both are not None)."
)
if not isinstance(agentClass, type):
raise Grid2OpException(
'Parameter "agentClass" used to build the Runner should be a type (a class) and not an object '
'(an instance of a class). It is currently "{}"'.format(
type(agentClass)
)
)
if not issubclass(agentClass, BaseAgent):
raise RuntimeError(
"Impossible to create a runner without an agent class derived from "
"grid2op.BaseAgent. "
'Please modify "agentClass" parameter.'
)
self.agentClass = agentClass
self._useclass = True
self.agent = None
elif agentInstance is not None:
if not isinstance(agentInstance, BaseAgent):
raise RuntimeError(
"Impossible to create a runner without an agent class derived from "
"grid2op.BaseAgent. "
'Please modify "agentInstance" parameter.'
)
self.agentClass = None
self._useclass = False
self.agent = agentInstance
# Test if we can copy the agent for parallel runs
try:
copy.copy(self.agent)
except:
self.__can_copy_agent = False
else:
raise RuntimeError(
"Impossible to build the backend. Either AgentClass or agentInstance must be provided "
"and both are None."
)
self.agentInstance = agentInstance
self._read_from_local_dir = _read_from_local_dir
self._observation_bk_class = observation_bk_class
self._observation_bk_kwargs = observation_bk_kwargs
self.logger = ConsoleLog(DoNothingLog.INFO if verbose else DoNothingLog.ERROR)
if logger is None:
import logging
self.logger = logging.getLogger(__name__)
if verbose:
self.logger.setLevel("debug")
else:
self.logger.disabled = True
else:
self.logger = logger.getChild("grid2op_Runner")
# store _parameters
self.init_env_path = init_env_path
self.init_grid_path = init_grid_path
self.names_chronics_to_backend = names_chronics_to_backend
# game _parameters
self.parameters_path = parameters_path
if isinstance(parameters_path, str):
self.parameters = Parameters(parameters_path)
elif isinstance(parameters_path, dict):
self.parameters = Parameters()
self.parameters.init_from_dict(parameters_path)
elif parameters_path is None:
self.parameters = Parameters()
else:
raise RuntimeError(
'Impossible to build the parameters. The argument "parameters_path" should either '
"be a string or a dictionary."
)
# chronics of grid state
self.path_chron = path_chron
self.gridStateclass_kwargs = gridStateclass_kwargs
self.max_iter = max_iter
if max_iter > 0:
self.gridStateclass_kwargs["max_iter"] = max_iter
self.chronics_handler = ChronicsHandler(
chronicsClass=self.gridStateclass,
path=self.path_chron,
**self.gridStateclass_kwargs
)
self.verbose = verbose
self.thermal_limit_a = thermal_limit_a
# controler for voltage
if not issubclass(voltageControlerClass, ControlVoltageFromFile):
raise Grid2OpException(
'Parameter "voltagecontrolClass" should derive from "ControlVoltageFromFile".'
)
self.voltageControlerClass = voltageControlerClass
self._other_rewards = other_rewards
# for opponent (should be defined here) after the initialization of BaseEnv
self._opponent_space_type = opponent_space_type
if not issubclass(opponent_action_class, BaseAction):
raise EnvError(
"Impossible to make an environment with an opponent action class not "
"derived from BaseAction"
)
try:
self.opponent_init_budget = dt_float(opponent_init_budget)
except Exception as e:
raise EnvError(
'Impossible to convert "opponent_init_budget" to a float with error {}'.format(
e
)
)
if self.opponent_init_budget < 0.0:
raise EnvError(
"If you want to deactive the opponent, please don't set its budget to a negative number."
'Prefer the use of the DontAct action type ("opponent_action_class=DontAct" '
"and / or set its budget to 0."
)
if not issubclass(opponent_class, BaseOpponent):
raise EnvError(
"Impossible to make an opponent with a type that does not inherit from BaseOpponent."
)
self.opponent_action_class = opponent_action_class
self.opponent_class = opponent_class
self.opponent_init_budget = opponent_init_budget
self.opponent_budget_per_ts = opponent_budget_per_ts
self.opponent_budget_class = opponent_budget_class
self.opponent_attack_duration = opponent_attack_duration
self.opponent_attack_cooldown = opponent_attack_cooldown
self.opponent_kwargs = opponent_kwargs
self.grid_layout = grid_layout
# attention budget
self._attention_budget_cls = attention_budget_cls
self._kwargs_attention_budget = copy.deepcopy(kwargs_attention_budget)
self._has_attention_budget = has_attention_budget
# custom observation building
if kwargs_observation is None:
kwargs_observation = {}
self._kwargs_observation = copy.deepcopy(kwargs_observation)
# otherwise on windows / macos it sometimes fail in the runner in multi process
# on linux like OS i prefer to generate all the proper classes accordingly
if _IS_LINUX:
pass
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
with self.init_env() as env:
bk_class = type(env.backend)
pass
# not implemented !
self._is_test = _is_test
self.__used = False
def _new_env(self, chronics_handler, parameters) -> Tuple[BaseEnv, BaseAgent]:
# the same chronics_handler is used for all the environments.
# make sure to "reset" it properly
# (this is handled elsewhere in case of "multi chronics")
if not self.chronics_handler.chronicsClass.MULTI_CHRONICS:
self.chronics_handler.next_chronics()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
res = self.envClass.init_obj_from_kwargs(
other_env_kwargs=self.other_env_kwargs,
init_env_path=self.init_env_path,
init_grid_path=self.init_grid_path,
chronics_handler=chronics_handler,
backend=self.backendClass(**self._backend_kwargs),
parameters=parameters,
name=self.name_env,
names_chronics_to_backend=self.names_chronics_to_backend,
actionClass=self.actionClass,
observationClass=self.observationClass,
rewardClass=self.rewardClass,
legalActClass=self.legalActClass,
voltagecontrolerClass=self.voltageControlerClass,
other_rewards=self._other_rewards,
opponent_space_type=self._opponent_space_type,
opponent_action_class=self.opponent_action_class,
opponent_class=self.opponent_class,
opponent_init_budget=self.opponent_init_budget,
opponent_budget_per_ts=self.opponent_budget_per_ts,
opponent_budget_class=self.opponent_budget_class,
opponent_attack_duration=self.opponent_attack_duration,
opponent_attack_cooldown=self.opponent_attack_cooldown,
kwargs_opponent=self.opponent_kwargs,
with_forecast=self.with_forecast,
attention_budget_cls=self._attention_budget_cls,
kwargs_attention_budget=self._kwargs_attention_budget,
has_attention_budget=self._has_attention_budget,
logger=self.logger,
kwargs_observation=self._kwargs_observation,
observation_bk_class=self._observation_bk_class,
observation_bk_kwargs=self._observation_bk_kwargs,
_raw_backend_class=self.backendClass,
_read_from_local_dir=self._read_from_local_dir,
)
if self.thermal_limit_a is not None:
res.set_thermal_limit(self.thermal_limit_a)
if self.grid_layout is not None:
res.attach_layout(self.grid_layout)
if self._useclass:
agent = self.agentClass(res.action_space)
else:
if self.__can_copy_agent:
agent = copy.copy(self.agent)
else:
agent = self.agent
return res, agent
def init_env(self) -> BaseEnv:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Function used to initialized the environment and the agent.
It is called by :func:`Runner.reset`.
"""
env, self.agent = self._new_env(self.chronics_handler, self.parameters)
return env
def reset(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Used to reset an environment. This method is called at the beginning of each new episode.
If the environment is not initialized, then it initializes it with :func:`Runner.make_env`.
"""
pass
def run_one_episode(
self,
indx=0,
path_save=None,
pbar=False,
env_seed=None,
max_iter=None,
agent_seed=None,
episode_id=None,
detailed_output=False,
add_nb_highres_sim=False,
) -> runner_returned_type:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Function used to run one episode of the :attr:`Runner.agent` and see how it performs in the :attr:`Runner.env`.
Parameters
----------
indx: ``int``
The number of episode previously run
path_save: ``str``, optional
Path where to save the data. See the description of :mod:`grid2op.Runner` for the structure of the saved
file.
detailed_output:
See descr. of :func:`Runner.run` method
add_nb_highres_sim:
See descr. of :func:`Runner.run` method
Returns
-------
TODO DEPRECATED DOC
cum_reward: ``np.float32``
The cumulative reward obtained by the agent during this episode
time_step: ``int``
The number of timesteps that have been played before the end of the episode (because of a "game over" or
because there were no more data)
"""
self.reset()
with self.init_env() as env:
res = _aux_run_one_episode(
env,
self.agent,
self.logger,
indx,
path_save,
pbar=pbar,
env_seed=env_seed,
max_iter=max_iter,
agent_seed=agent_seed,
detailed_output=detailed_output,
)
if max_iter is not None:
env.chronics_handler.set_max_iter(-1)
# `res` here necessarily contains detailed_output and nb_highres_call
if not add_nb_highres_sim:
res = res[:-1]
if not detailed_output:
res = res[:-1]
return res
def _run_sequential(
self,
nb_episode,
path_save=None,
pbar=False,
env_seeds=None,
agent_seeds=None,
max_iter=None,
episode_id=None,
add_detailed_output=False,
add_nb_highres_sim=False,
) -> List[runner_returned_type]:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This method is called to see how well an agent performed on a sequence of episode.
Parameters
----------
nb_episode: ``int``
Number of episode to play.
path_save: ``str``, optional
If not None, it specifies where to store the data. See the description of this module :mod:`Runner` for
more information
pbar: ``bool`` or ``type`` or ``object``
How to display the progress bar, understood as follow:
- if pbar is ``None`` nothing is done.
- if pbar is a boolean, tqdm pbar are used, if tqdm package is available and installed on the system
[if ``true``]. If it's false it's equivalent to pbar being ``None``
- if pbar is a ``type`` ( a class), it is used to build a progress bar at the highest level (episode) and
and the lower levels (step during the episode). If it's a type it muyst accept the argument "total"
and "desc" when being built, and the closing is ensured by this method.
- if pbar is an object (an instance of a class) it is used to make a progress bar at this highest level
(episode) but not at lower levels (setp during the episode)
env_seeds: ``list``
An iterable of the seed used for the experiments. By default ``None``, no seeds are set. If provided,
its size should match ``nb_episode``.
episode_id: ``list``
For each of the nb_episdeo you want to compute, it specifies the id of the chronix that will be used.
By default ``None``, no seeds are set. If provided,
its size should match ``nb_episode``.
add_detailed_output: see Runner.run method
Returns
-------
res: ``list``
List of tuple. Each tuple having 5 elements:
- "id_chron" unique identifier of the episode
- "name_chron" name of chronics
- "cum_reward" the cumulative reward obtained by the :attr:`Runner.BaseAgent` on this episode i
- "nb_time_step": the number of time steps played in this episode.
- "max_ts" : the maximum number of time steps of the chronics
- "episode_data" : The :class:`EpisodeData` corresponding to this episode run
"""
res = [(None, None, None, None, None, None)
for _ in range(nb_episode)]
next_pbar = [False]
with _aux_make_progress_bar(pbar, nb_episode, next_pbar) as pbar_:
for i in range(nb_episode):
env_seed = None
if env_seeds is not None:
env_seed = env_seeds[i]
agt_seed = None
if agent_seeds is not None:
agt_seed = agent_seeds[i]
ep_id = i # if no "episode_id" is provided i used the i th one
if episode_id is not None:
ep_id = episode_id[i] # otherwise i use the provided one
(
name_chron,
cum_reward,
nb_time_step,
max_ts,
episode_data,
nb_call_highres_sim,
) = self.run_one_episode(
path_save=path_save,
indx=ep_id,
pbar=next_pbar[0],
env_seed=env_seed,
agent_seed=agt_seed,
max_iter=max_iter,
detailed_output=True,
add_nb_highres_sim=True
)
id_chron = self.chronics_handler.get_id()
res[i] = (id_chron,
name_chron,
float(cum_reward),
nb_time_step,
max_ts
)
if add_detailed_output:
res[i] = (*res[i], episode_data)
if add_nb_highres_sim:
res[i] = (*res[i], nb_call_highres_sim)
pbar_.update(1)
return res
def _run_parrallel(
self,
nb_episode,
nb_process=1,
path_save=None,
env_seeds=None,
agent_seeds=None,
max_iter=None,
episode_id=None,
add_detailed_output=False,
add_nb_highres_sim=False,
) -> List[runner_returned_type]:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This method will run in parallel, independently the nb_episode over nb_process.
In case the agent cannot be cloned using `copy.copy`: nb_process is set to 1
Note that it restarts completely the :attr:`Runner.backend` and :attr:`Runner.env` if the computation
is actually performed with more than 1 cores (nb_process > 1)
It uses the python multiprocess, and especially the :class:`multiprocess.Pool` to perform the computations.
This implies that all runs are completely independent (they happen in different process) and that the
memory consumption can be big. Tests may be recommended if the amount of RAM is low.
It has the same return type as the :func:`Runner.run_sequential`.
Parameters
----------
nb_episode: ``int``
Number of episode to simulate
nb_process: ``int``, optional
Number of process used to play the nb_episode. Default to 1.
path_save: ``str``, optional
If not None, it specifies where to store the data. See the description of this module :mod:`Runner` for
more information
env_seeds: ``list``
An iterable of the seed used for the experiments. By default ``None``, no seeds are set. If provided,
its size should match ``nb_episode``.
agent_seeds: ``list``
An iterable that contains the seed used for the environment. By default ``None`` means no seeds are set.
If provided, its size should match the ``nb_episode``. The agent will be seeded at the beginning of each
scenario BEFORE calling `agent.reset()`.
add_detailed_output: see Runner.run method
Returns
-------
res: ``list``
List of tuple. Each tuple having 3 elements:
- "i" unique identifier of the episode (compared to :func:`Runner.run_sequential`, the elements of the
returned list are not necessarily sorted by this value)
- "cum_reward" the cumulative reward obtained by the :attr:`Runner.BaseAgent` on this episode i
- "nb_time_step": the number of time steps played in this episode.
- "max_ts" : the maximum number of time steps of the chronics
- "episode_data" : The :class:`EpisodeData` corresponding to this episode run
"""
if nb_process <= 0:
raise RuntimeError("Runner: you need at least 1 process to run episodes")
force_sequential = False
tmp = os.getenv(Runner.FORCE_SEQUENTIAL)
if tmp is not None:
force_sequential = int(tmp) > 0
if nb_process == 1 or (not self.__can_copy_agent) or force_sequential:
# on windows if i start using sequential, i need to continue using sequential
# if i start using parallel i need to continue using parallel
# so i force the usage of the sequential mode
self.logger.warn(
"Runner.run_parrallel: number of process set to 1. Failing back into sequential mod."
)
return self._run_sequential(
nb_episode,
path_save=path_save,
env_seeds=env_seeds,
max_iter=max_iter,
agent_seeds=agent_seeds,
episode_id=episode_id,
add_detailed_output=add_detailed_output,
add_nb_highres_sim=add_nb_highres_sim,
)
else:
self._clean_up()
nb_process = int(nb_process)
process_ids = [[] for i in range(nb_process)]
for i in range(nb_episode):
if episode_id is None:
process_ids[i % nb_process].append(i)
else:
process_ids[i % nb_process].append(episode_id[i])
if env_seeds is None:
seeds_env_res = [None for _ in range(nb_process)]
else:
# split the seeds according to the process
seeds_env_res = [[] for i in range(nb_process)]
for i in range(nb_episode):
seeds_env_res[i % nb_process].append(env_seeds[i])
if agent_seeds is None:
seeds_agt_res = [None for _ in range(nb_process)]
else:
# split the seeds according to the process
seeds_agt_res = [[] for i in range(nb_process)]
for i in range(nb_episode):
seeds_agt_res[i % nb_process].append(agent_seeds[i])
res = []
if _IS_LINUX:
lists = [(self,) for _ in enumerate(process_ids)]
else:
lists = [(Runner(**self._get_params()),) for _ in enumerate(process_ids)]
for i, pn in enumerate(process_ids):
lists[i] = (*lists[i],
pn,
i,
path_save,
seeds_env_res[i],
seeds_agt_res[i],
max_iter,
add_detailed_output,
add_nb_highres_sim)
with Pool(nb_process) as p:
tmp = p.starmap(_aux_one_process_parrallel, lists)
for el in tmp:
res += el
return res
def _get_params(self):
res = {
"init_grid_path": self.init_grid_path,
"init_env_path": self.init_env_path,
"path_chron": self.path_chron, # path where chronics of injections are stored
"name_env": self.name_env,
"parameters_path": self.parameters_path,
"names_chronics_to_backend": self.names_chronics_to_backend,
"actionClass": self.actionClass,
"observationClass": self.observationClass,
"rewardClass": self.rewardClass,
"legalActClass": self.legalActClass,
"envClass": self.envClass,
"gridStateclass": self.gridStateclass,
"backendClass": self.backendClass,
"backend_kwargs": self._backend_kwargs,
"agentClass": self.agentClass,
"agentInstance": self.agentInstance,
"verbose": self.verbose,
"gridStateclass_kwargs": copy.deepcopy(self.gridStateclass_kwargs),
"voltageControlerClass": self.voltageControlerClass,
"thermal_limit_a": self.thermal_limit_a,
"max_iter": self.max_iter,
"other_rewards": copy.deepcopy(self._other_rewards),
"opponent_space_type": self._opponent_space_type,
"opponent_action_class": self.opponent_action_class,
"opponent_class": self.opponent_class,
"opponent_init_budget": self.opponent_init_budget,
"opponent_budget_per_ts": self.opponent_budget_per_ts,
"opponent_budget_class": self.opponent_budget_class,
"opponent_attack_duration": self.opponent_attack_duration,
"opponent_attack_cooldown": self.opponent_attack_cooldown,
"opponent_kwargs": copy.deepcopy(self.opponent_kwargs),
"grid_layout": copy.deepcopy(self.grid_layout),
"with_forecast": self.with_forecast,
"attention_budget_cls": self._attention_budget_cls,
"kwargs_attention_budget": self._kwargs_attention_budget,
"has_attention_budget": self._has_attention_budget,
"logger": self.logger,
"kwargs_observation": self._kwargs_observation,
"_read_from_local_dir": self._read_from_local_dir,
"_is_test": self._is_test,
}
return res
def _clean_up(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
close the environment if it has been created
"""
pass
def run(
self,
nb_episode,
nb_process=1,
path_save=None,
max_iter=None,
pbar=False,
env_seeds=None,
agent_seeds=None,
episode_id=None,
add_detailed_output=False,
add_nb_highres_sim=False,
) -> List[runner_returned_type]:
"""
Main method of the :class:`Runner` class. It will either call :func:`Runner._run_sequential` if "nb_process" is
1 or :func:`Runner._run_parrallel` if nb_process >= 2.
Parameters
----------
nb_episode: ``int``
Number of episode to simulate
nb_process: ``int``, optional
Number of process used to play the nb_episode. Default to 1. **NB** Multitoprocessing is deactivated
on windows based platform (it was not fully supported so we decided to remove it)
path_save: ``str``, optional
If not None, it specifies where to store the data. See the description of this module :mod:`Runner` for
more information
max_iter: ``int``
Maximum number of iteration you want the runner to perform.
pbar: ``bool`` or ``type`` or ``object``
How to display the progress bar, understood as follow:
- if pbar is ``None`` nothing is done.
- if pbar is a boolean, tqdm pbar are used, if tqdm package is available and installed on the system
[if ``true``]. If it's false it's equivalent to pbar being ``None``
- if pbar is a ``type`` ( a class), it is used to build a progress bar at the highest level (episode) and
and the lower levels (step during the episode). If it's a type it muyst accept the argument "total"
and "desc" when being built, and the closing is ensured by this method.
- if pbar is an object (an instance of a class) it is used to make a progress bar at this highest level
(episode) but not at lower levels (setp during the episode)
env_seeds: ``list``
An iterable of the seed used for the environment. By default ``None``, no seeds are set. If provided,
its size should match ``nb_episode``.
agent_seeds: ``list``
An iterable that contains the seed used for the environment. By default ``None`` means no seeds are set.
If provided, its size should match the ``nb_episode``. The agent will be seeded at the beginning of each
scenario BEFORE calling `agent.reset()`.
episode_id: ``list``
For each of the nb_episdeo you want to compute, it specifies the id of the chronix that will be used.
By default ``None``, no seeds are set. If provided,
its size should match ``nb_episode``.
add_detailed_output: ``bool``
A flag to add an :class:`EpisodeData` object to the results, containing a lot of information about the run
add_nb_highres_sim: ``bool``
Whether to add an estimated number of "high resolution simulator" called performed by the agent (either by
obs.simulate, or by obs.get_forecast_env or by obs.get_simulator)
Returns
-------
res: ``list``
List of tuple. Each tuple having 3[4] elements:
- "i" unique identifier of the episode (compared to :func:`Runner.run_sequential`, the elements of the
returned list are not necessarily sorted by this value)
- "cum_reward" the cumulative reward obtained by the :attr:`Runner.Agent` on this episode i
- "nb_time_step": the number of time steps played in this episode.
- "episode_data" : [Optional] The :class:`EpisodeData` corresponding to this episode run only
if `add_detailed_output=True`
- "add_nb_highres_sim": [Optional] The estimated number of calls to high resolution simulator made
by the agent
Examples
--------
You can use the runner this way:
.. code-block: python
import grid2op
from gri2op.Runner import Runner
from grid2op.Agent import RandomAgent
env = grid2op.make()
runner = Runner(**env.get_params_for_runner(), agentClass=RandomAgent)
res = runner.run(nb_episode=1)
If you would rather to provide an agent instance (and not a class) you can do it this way:
.. code-block: python
import grid2op
from gri2op.Runner import Runner
from grid2op.Agent import RandomAgent
env = grid2op.make()
my_agent = RandomAgent(env.action_space)
runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=my_agent)
res = runner.run(nb_episode=1)
Finally, in the presence of stochastic environments or stochastic agent you might want to set the seeds for
ensuring reproducible experiments you might want to seed both the environment and your agent. You can do that
by passing `env_seeds` and `agent_seeds` parameters (on the example bellow, the agent will be seeded with 42
and the environment with 0.
.. code-block: python
import grid2op
from gri2op.Runner import Runner
from grid2op.Agent import RandomAgent
env = grid2op.make()
my_agent = RandomAgent(env.action_space)
runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=my_agent)
res = runner.run(nb_episode=1, agent_seeds=[42], env_seeds=[0])
"""
if nb_episode < 0:
raise RuntimeError("Impossible to run a negative number of scenarios.")
if env_seeds is not None:
if len(env_seeds) != nb_episode:
raise RuntimeError(
'You want to compute "{}" run(s) but provide only "{}" different seeds '
"(environment)."
"".format(nb_episode, len(env_seeds))
)
if agent_seeds is not None:
if len(agent_seeds) != nb_episode:
raise RuntimeError(
'You want to compute "{}" run(s) but provide only "{}" different seeds (agent).'
"".format(nb_episode, len(agent_seeds))
)
if episode_id is not None:
if len(episode_id) != nb_episode:
raise RuntimeError(
'You want to compute "{}" run(s) but provide only "{}" different ids.'
"".format(nb_episode, len(episode_id))
)
if max_iter is not None:
max_iter = int(max_iter)
if nb_episode == 0:
res = []
else:
try:
if nb_process <= 0:
raise RuntimeError("Impossible to run using less than 1 process.")
self.__used = True
if nb_process == 1:
self.logger.info("Sequential runner used.")
res = self._run_sequential(
nb_episode,
path_save=path_save,
pbar=pbar,
env_seeds=env_seeds,
max_iter=max_iter,
agent_seeds=agent_seeds,
episode_id=episode_id,
add_detailed_output=add_detailed_output,
add_nb_highres_sim=add_nb_highres_sim,
)
else:
if add_detailed_output and (_IS_WINDOWS or _IS_MACOS):
self.logger.warn(
"Parallel run are not fully supported on windows or macos when "
'"add_detailed_output" is True. So we decided '
"to fully deactivate them."
)
res = self._run_sequential(
nb_episode,
path_save=path_save,
pbar=pbar,
env_seeds=env_seeds,
max_iter=max_iter,
agent_seeds=agent_seeds,
episode_id=episode_id,
add_detailed_output=add_detailed_output,
add_nb_highres_sim=add_nb_highres_sim,
)
else:
self.logger.info("Parallel runner used.")
res = self._run_parrallel(
nb_episode,
nb_process=nb_process,
path_save=path_save,
env_seeds=env_seeds,
max_iter=max_iter,
agent_seeds=agent_seeds,
episode_id=episode_id,
add_detailed_output=add_detailed_output,
add_nb_highres_sim=add_nb_highres_sim,
)
finally:
self._clean_up()
return res
| 54,407 | 41.874704 | 119 | py |
Grid2Op | Grid2Op-master/grid2op/Space/GridObjects.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
"""
This class abstracts the main components of BaseAction, BaseObservation, ActionSpace, and ObservationSpace.
It represents a powergrid (the object in it) in a format completely agnostic to the solver used to compute
the power flows (:class:`grid2op.Backend.Backend`).
See :class:`grid2op.Converter` for a different type of Action / Observation. These can be used to transform
complex :class:`grid2op.Action.Action` or :class:`grid2op.Observation.Observaion` into more convient structures
to manipulate.
"""
import warnings
import copy
import numpy as np
import grid2op
from grid2op.dtypes import dt_int, dt_float, dt_bool
from grid2op.Exceptions import *
from grid2op.Space.space_utils import extract_from_dict, save_to_dict
# TODO tests of these methods and this class in general
class GridObjects:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Almost every class inherit from this class, so they have its methods and attributes.
Do not attempt to use it outside of grid2op environment.
This class stores in a "Backend agnostic" way some information about the powergrid. All these attributes
are constant throughout an episode and are defined when the backend is loaded by the environment.
It stores information about numbers of objects, and which objects are where, their names, etc.
The classes :class:`grid2op.Action.BaseAction`, :class:`grid2op.Action.ActionSpace`,
:class:`grid2op.Observation.BaseObservation`, :class:`grid2op.Observation.ObservationSpace` and
:class:`grid2op.Backend.Backend` all inherit from this class. This means that each of the above has its own
representation of the powergrid.
Before diving into the technical details on the implementation, you might want to have a look at this
page of the documentation :ref:`graph-encoding-gridgraph` that details why this representation is suitable.
The modeling adopted for describing a powergrid is the following:
- only the main objects of a powergrid are represented. An "object" is either a load (consumption) a generator
(production), an end of a powerline (each powerline have exactly two extremities: "origin" (or)
and "extremity" (ext)).
- every "object" (see above) is connected to a unique substation. Each substation then counts a given (fixed)
number of objects connected to it. [in this platform we don't consider the possibility to build new "objects" as
of today]
For each object, the bus to which it is connected is given in the `*_to_subid` (for
example :attr:`GridObjects.load_to_subid` gives, for each load, the id of the substation to which it is
connected)
We suppose that, at every substation, each object (if connected) can be connected to either "busbar" 1 or
"busbar" 2. This means that, at maximum, there are 2 independent buses for each substation.
With this hypothesis, we can represent (thought experiment) each substation by a vector. This vector has as
many components than the number of objects in the substation (following the previous example, the vector
representing the first substation would have 5 components). And each component of this vector would represent
a fixed element in it. For example, if say, the load with id 1 is connected to the first element, there would be
a unique component saying if the load with id 1 is connected to busbar 1 or busbar 2. For the generators, this
id in this (fictive) vector is indicated in the :attr:`GridObjects.gen_to_sub_pos` vector. For example the first
position of :attr:`GridObjects.gen_to_sub_pos` indicates on which component of the (fictive) vector representing
the
substation 1 to look to know on which bus the first generator is connected.
We define the "topology" as the busbar to which each object is connected: each object being connected to either
busbar 1 or busbar 2, this topology can be represented by a vector of fixed size (and it actually is in
:attr:`grid2op.Observation.BaseObservation.topo_vect` or in :func:`grid2op.Backend.Backend.get_topo_vect`).
There are
multiple ways to make such a vector. We decided to concatenate all the (fictive) vectors described above. This
concatenation represents the actual topology of this powergrid at a given timestep. This class doesn't store this
information (see :class:`grid2op.Observation.BaseObservation` for such purpose).
This entails that:
- the bus to which each object on a substation will be stored in consecutive components of such a vector. For
example, if the first substation of the grid has 5 elements connected to it, then the first 5 elements of
:attr:`grid2op.Observation.BaseObservation.topo_vect` will represent these 5 elements. The number of elements
in each substation is given in :attr:`grid2op.Space.GridObjects.sub_info`.
- the substation are stored in "order": objects of the first substations are represented, then this is the objects
of the second substation etc. So in the example above, the 6th element of
:attr:`grid2op.Observation.BaseObservation.topo_vect` is an object connected to the second substation.
- to know on which position of this "topology vector" we can find the information relative a specific element
it is possible to:
- method 1 (not recommended):
i) retrieve the substation to which this object is connected (for example looking at
:attr:`GridObjects.line_or_to_subid` [l_id] to know on which substation is connected the origin of
powerline with id $l_id$.)
ii) once this substation id is known, compute which are the components of the topological vector that encodes
information about this substation. For example, if the substation id `sub_id` is 4, we a) count the number
of elements in substations with id 0, 1, 2 and 3 (say it's 42) we know, by definition that the substation
4 is encoded in ,:attr:`grid2op.Observation.BaseObservation.topo_vect` starting at component 42 and b)
this
substations has :attr:`GridObjects.sub_info` [sub_id] elements (for the sake of the example say it's 5)
then the end of the vector for substation 4 will be 42+5 = 47. Finally, we got the representation of the
"local topology" of the substation 4 by looking at
:attr:`grid2op.Observation.BaseObservation.topo_vect` [42:47].
iii) retrieve which component of this vector of dimension 5 (remember we assumed substation 4 had 5 elements)
encodes information about the origin end of the line with id `l_id`. This information is given in
:attr:`GridObjects.line_or_to_sub_pos` [l_id]. This is a number between 0 and 4, say it's 3. 3 being
the index of the object in the substation)
- method 2 (not recommended): all of the above is stored (for the same powerline) in the
:attr:`GridObjects.line_or_pos_topo_vect` [l_id]. In the example above, we will have:
:attr:`GridObjects.line_or_pos_topo_vect` [l_id] = 45 (=42+3:
42 being the index on which the substation started and 3 being the index of the object in the substation)
- method 3 (recommended): use any of the function that computes it for you:
:func:`grid2op.Observation.BaseObservation.state_of` is such an interesting method. The two previous methods
"method 1" and "method 2" were presented as a way to give detailed and "concrete" example on how the
modeling of the powergrid work.
For a given powergrid, this object should be initialized once in the :class:`grid2op.Backend.Backend` when
the first call to :func:`grid2op.Backend.Backend.load_grid` is performed. In particular the following attributes
must necessarily be defined (see above for a detailed description of some of the attributes):
- :attr:`GridObjects.name_load`
- :attr:`GridObjects.name_gen`
- :attr:`GridObjects.name_line`
- :attr:`GridObjects.name_sub`
- :attr:`GridObjects.name_storage`
- :attr:`GridObjects.n_line`
- :attr:`GridObjects.n_gen`
- :attr:`GridObjects.n_load`
- :attr:`GridObjects.n_sub`
- :attr:`GridObjects.n_storage`
- :attr:`GridObjects.sub_info`
- :attr:`GridObjects.dim_topo`
- :attr:`GridObjects.load_to_subid`
- :attr:`GridObjects.gen_to_subid`
- :attr:`GridObjects.line_or_to_subid`
- :attr:`GridObjects.line_ex_to_subid`
- :attr:`GridObjects.storage_to_subid`
Optionally, to have more control on the internal grid2op representation, you can also set:
- :attr:`GridObjects.load_to_sub_pos`
- :attr:`GridObjects.gen_to_sub_pos`
- :attr:`GridObjects.line_or_to_sub_pos`
- :attr:`GridObjects.line_ex_to_sub_pos`
- :attr:`GridObjects.storage_to_sub_pos`
A call to the function :func:`GridObjects._compute_pos_big_topo_cls` allow to compute the \*_pos_topo_vect attributes
(for example :attr:`GridObjects.line_ex_pos_topo_vect`) can be computed from the above data:
- :attr:`GridObjects.load_pos_topo_vect`
- :attr:`GridObjects.gen_pos_topo_vect`
- :attr:`GridObjects.line_or_pos_topo_vect`
- :attr:`GridObjects.line_ex_pos_topo_vect`
- :attr:`GridObjects.storage_pos_topo_vect`
Note that if you want to model an environment with unit commitment or redispatching capabilities, you also need
to provide the following attributes:
- :attr:`GridObjects.gen_type`
- :attr:`GridObjects.gen_pmin`
- :attr:`GridObjects.gen_pmax`
- :attr:`GridObjects.gen_redispatchable`
- :attr:`GridObjects.gen_max_ramp_up`
- :attr:`GridObjects.gen_max_ramp_down`
- :attr:`GridObjects.gen_min_uptime`
- :attr:`GridObjects.gen_min_downtime`
- :attr:`GridObjects.gen_cost_per_MW`
- :attr:`GridObjects.gen_startup_cost`
- :attr:`GridObjects.gen_shutdown_cost`
- :attr:`GridObjects.gen_renewable`
These information are loaded using the :func:`grid2op.Backend.Backend.load_redispacthing_data` method.
**NB** it does not store any information about the current state of the powergrid. It stores information that
cannot be modified by the BaseAgent, the Environment or any other entity.
Attributes
----------
n_line: :class:`int`
number of powerlines in the powergrid [*class attribute*]
n_gen: :class:`int`
number of generators in the powergrid [*class attribute*]
n_load: :class:`int`
number of loads in the powergrid. [*class attribute*]
n_sub: :class:`int`
number of substations in the powergrid. [*class attribute*]
n_storage: :class:`int`
number of storage units in the powergrid. [*class attribute*]
dim_topo: :class:`int`
The total number of objects in the powergrid.
This is also the dimension of the "topology vector" defined above. [*class attribute*]
sub_info: :class:`numpy.ndarray`, dtype:int
for each substation, gives the number of elements connected to it [*class attribute*]
load_to_subid: :class:`numpy.ndarray`, dtype:int
for each load, gives the id the substation to which it is connected. For example,
:attr:`GridObjects.load_to_subid` [load_id] gives the id of the substation to which the load of id
`load_id` is connected. [*class attribute*]
gen_to_subid: :class:`numpy.ndarray`, dtype:int
for each generator, gives the id the substation to which it is connected [*class attribute*]
line_or_to_subid: :class:`numpy.ndarray`, dtype:int
for each line, gives the id the substation to which its "origin" end is connected [*class attribute*]
line_ex_to_subid: :class:`numpy.ndarray`, dtype:int
for each line, gives the id the substation to which its "extremity" end is connected [*class attribute*]
storage_to_subid: :class:`numpy.ndarray`, dtype:int
for each storage unit, gives the id the substation to which it is connected [*class attribute*]
load_to_sub_pos: :class:`numpy.ndarray`, dtype:int
Suppose you represent the topoology of the substation *s* with a vector (each component of this vector will
represent an object connected to this substation). This vector has, by definition the size
:attr:`GridObject.sub_info` [s]. `load_to_sub_pos` tells which component of this vector encodes the
current load. Suppose that load of id `l` is connected to the substation of id `s` (this information is
stored in :attr:`GridObjects.load_to_subid` [l]), then if you represent the topology of the substation
`s` with a vector `sub_topo_vect`, then "`sub_topo_vect` [ :attr:`GridObjects.load_to_subid` [l] ]" will encode
on which bus the load of id `l` is stored. [*class attribute*]
gen_to_sub_pos: :class:`numpy.ndarray`, dtype:int
same as :attr:`GridObjects.load_to_sub_pos` but for generators. [*class attribute*]
line_or_to_sub_pos: :class:`numpy.ndarray`, dtype:int
same as :attr:`GridObjects.load_to_sub_pos` but for "origin" end of powerlines. [*class attribute*]
line_ex_to_sub_pos: :class:`numpy.ndarray`, dtype:int
same as :attr:`GridObjects.load_to_sub_pos` but for "extremity" end of powerlines. [*class attribute*]
storage_to_sub_pos: :class:`numpy.ndarray`, dtype:int
same as :attr:`GridObjects.load_to_sub_pos` but for storage units. [*class attribute*]
load_pos_topo_vect: :class:`numpy.ndarray`, dtype:int
The topology if the entire grid is given by a vector, say *topo_vect* of size
:attr:`GridObjects.dim_topo`. For a given load of id *l*,
:attr:`GridObjects.load_to_sub_pos` [l] is the index
of the load *l* in the vector :attr:`grid2op.BaseObservation.BaseObservation.topo_vect` .
This means that, if
"`topo_vect` [ :attr:`GridObjects.load_pos_topo_vect` \[l\] ]=2"
then load of id *l* is connected to the second bus of the substation. [*class attribute*]
gen_pos_topo_vect: :class:`numpy.ndarray`, dtype:int
same as :attr:`GridObjects.load_pos_topo_vect` but for generators. [*class attribute*]
line_or_pos_topo_vect: :class:`numpy.ndarray`, dtype:int
same as :attr:`GridObjects.load_pos_topo_vect` but for "origin" end of powerlines. [*class attribute*]
line_ex_pos_topo_vect: :class:`numpy.ndarray`, dtype:int
same as :attr:`GridObjects.load_pos_topo_vect` but for "extremity" end of powerlines. [*class attribute*]
storage_pos_topo_vect: :class:`numpy.ndarray`, dtype:int
same as :attr:`GridObjects.load_pos_topo_vect` but for storage units. [*class attribute*]
name_load: :class:`numpy.ndarray`, dtype:str
ordered names of the loads in the grid. [*class attribute*]
name_gen: :class:`numpy.ndarray`, dtype:str
ordered names of the productions in the grid. [*class attribute*]
name_line: :class:`numpy.ndarray`, dtype:str
ordered names of the powerline in the grid. [*class attribute*]
name_sub: :class:`numpy.ndarray`, dtype:str
ordered names of the substation in the grid [*class attribute*]
name_storage: :class:`numpy.ndarray`, dtype:str
ordered names of the storage units in the grid [*class attribute*]
attr_list_vect: ``list``, static
List of string. It represents the attributes that will be stored to/from vector when the BaseObservation is
converted
to/from it. This parameter is also used to compute automatically :func:`GridObjects.dtype` and
:func:`GridObjects.shape` as well as :func:`GridObjects.size`. If this class is derived, then it's really
important that this vector is properly set. All the attributes with the name on this vector should have
consistently the same size and shape, otherwise, some methods will not behave as expected. [*class attribute*]
_vectorized: :class:`numpy.ndarray`, dtype:float
The representation of the GridObject as a vector. See the help of :func:`GridObjects.to_vect` and
:func:`GridObjects.from_vect` for more information. **NB** for performance reason, the conversion of the
internal
representation to a vector is not performed at any time. It is only performed when :func:`GridObjects.to_vect`
is
called the first time. Otherwise, this attribute is set to ``None``. [*class attribute*]
gen_type: :class:`numpy.ndarray`, dtype:str
Type of the generators, among: "solar", "wind", "hydro", "thermal" and "nuclear". Optional. Used
for unit commitment problems or redispacthing action. [*class attribute*]
gen_pmin: :class:`numpy.ndarray`, dtype:float
Minimum active power production needed for a generator to work properly. Optional. Used
for unit commitment problems or redispacthing action. [*class attribute*]
gen_pmax: :class:`numpy.ndarray`, dtype:float
Maximum active power production needed for a generator to work properly. Optional. Used
for unit commitment problems or redispacthing action. [*class attribute*]
gen_redispatchable: :class:`numpy.ndarray`, dtype:bool
For each generator, it says if the generator is dispatchable or not. Optional. Used
for unit commitment problems or redispacthing action. [*class attribute*]
gen_max_ramp_up: :class:`numpy.ndarray`, dtype:float
Maximum active power variation possible between two consecutive timestep for each generator:
a redispatching action
on generator `g_id` cannot be above :attr:`GridObjects.gen_ramp_up_max` [`g_id`]. Optional. Used
for unit commitment problems or redispacthing action. [*class attribute*]
gen_max_ramp_down: :class:`numpy.ndarray`, dtype:float
Minimum active power variationpossible between two consecutive timestep for each generator: a redispatching
action
on generator `g_id` cannot be below :attr:`GridObjects.gen_ramp_down_min` [`g_id`]. Optional. Used
for unit commitment problems or redispacthing action. [*class attribute*]
gen_min_uptime: :class:`numpy.ndarray`, dtype:float
The minimum time (expressed in the number of timesteps) a generator needs to be turned on: it's not possible to
turn off generator `gen_id` that has been turned on less than `gen_min_time_on` [`gen_id`] timesteps
ago. Optional. Used
for unit commitment problems or redispacthing action. [*class attribute*]
gen_min_downtime: :class:`numpy.ndarray`, dtype:float
The minimum time (expressed in the number of timesteps) a generator needs to be turned off: it's not possible to
turn on generator `gen_id` that has been turned off less than `gen_min_time_on` [`gen_id`] timesteps
ago. Optional. Used
for unit commitment problems or redispacthing action. [*class attribute*]
gen_cost_per_MW: :class:`numpy.ndarray`, dtype:float
For each generator, it gives the "operating cost", eg the cost, in terms of "used currency" for the production
of one MW with this generator, if it is already turned on. It's a positive real number. It's the marginal cost
for each MW. Optional. Used
for unit commitment problems or redispacthing action. [*class attribute*]
gen_startup_cost: :class:`numpy.ndarray`, dtype:float
The cost to start a generator. It's a positive real number. Optional. Used
for unit commitment problems or redispacthing action. [*class attribute*]
gen_shutdown_cost: :class:`numpy.ndarray`, dtype:float
The cost to shut down a generator. It's a positive real number. Optional. Used
for unit commitment problems or redispacthing action. [*class attribute*]
gen_renewable: :class:`numpy.ndarray`, dtype:bool
Whether each generator is from a renewable energy sources (=can be curtailed). Optional. Used
for unit commitment problems or redispacthing action. [*class attribute*]
redispatching_unit_commitment_availble: ``bool``
Does the current grid allow for redispatching and / or unit commit problem. If not, any attempt to use it
will raise a :class:`grid2op.Exceptions.UnitCommitorRedispachingNotAvailable` error. [*class attribute*]
For an environment to be compatible with this feature, you need to set up, when loading the backend:
- :attr:`GridObjects.gen_type`
- :attr:`GridObjects.gen_pmin`
- :attr:`GridObjects.gen_pmax`
- :attr:`GridObjects.gen_redispatchable`
- :attr:`GridObjects.gen_max_ramp_up`
- :attr:`GridObjects.gen_max_ramp_down`
- :attr:`GridObjects.gen_min_uptime`
- :attr:`GridObjects.gen_min_downtime`
- :attr:`GridObjects.gen_cost_per_MW`
- :attr:`GridObjects.gen_startup_cost`
- :attr:`GridObjects.gen_shutdown_cost`
- :attr:`GridObjects.gen_renewable`
grid_layout: ``dict`` or ``None``
The layout of the powergrid in a form of a dictionnary with keys the substation name, and value a tuple of
the coordinate of this substation. If no layout are provided, it defaults to ``None`` [*class attribute*]
shunts_data_available: ``bool``
Whether or not the backend support the shunt data. [*class attribute*]
n_shunt: ``int`` or ``None``
Number of shunts on the grid. It might be ``None`` if the backend does not support shunts. [*class attribute*]
name_shunt: ``numpy.ndarray``, dtype:``str`` or ``None``
Name of each shunt on the grid, or ``None`` if the backend does not support shunts. [*class attribute*]
shunt_to_subid: :class:`numpy.ndarray`, dtype:int
for each shunt (if supported), gives the id the substation to which it is connected [*class attribute*]
storage_type:
type of each storage units, one of "battery" or "pumped storage"
storage_Emax:
maximum energy the storage unit can store, in MWh
storage_Emin:
minimum energy in the storage unit, in MWh
At any given point, the state of charge (obs.storage_charge) should be >= than `storage_Emin`. This might
not be the case if there are losses on your storage units. In this case, the charge can fall below
this (but the charge will never be < 0.)
storage_max_p_prod:
maximum power the storage unit can produce (in MW)
storage_max_p_absorb :
maximum power the storage unit can absorb (in MW)
storage_marginal_cost:
Cost of usage of the storage unit, when charged or discharged, in $/MWh produced (or absorbed)
storage_loss:
The self discharged loss of each storage unit (in MW). It is applicable for each step and each
storage unit where the state of charge is > 0.
Due to this loss, the storage state of charge can fall below its minimum allowed capacity `storage_Emin`.
storage_charging_efficiency:
The efficiency when the storage unit is charging (how much will the capacity increase when the
unit is charging) between 0. and 1.
storage_discharging_efficiency:
The efficiency when the storage unit is discharging (how much will the capacity decrease
to generate a 1MWh of energy on the grid side) between 0. and 1.
grid_objects_types: ``matrix``
Give the information about each element of the "topo_vect" vector. It is an "easy" way to retrieve at
which element (side of a power, load, generator, storage units) a given component of the "topology vector"
is referring to.
For more information, you can consult the :ref:`graph-encoding-gridgraph` of the documentation
or the getting started notebook about the observation and the action for more information.
dim_alarms = 0 # TODO
alarms_area_names = [] # name of each area # TODO
alarms_lines_area = {} # for each lines of the grid, gives on which area(s) it is # TODO
alarms_area_lines = [] # for each area in the grid, gives which powerlines it contains # TODO
# TODO specify the unit of redispatching data MWh, $/MW etc.
"""
BEFORE_COMPAT_VERSION = "neurips_2020_compat"
glop_version = grid2op.__version__
_PATH_ENV = None # especially do not modify that
SUB_COL = 0
LOA_COL = 1
GEN_COL = 2
LOR_COL = 3
LEX_COL = 4
STORAGE_COL = 5
attr_list_vect = None
attr_list_set = {}
attr_list_json = []
attr_nan_list_set = set()
# name of the objects
env_name = "unknown"
name_load = None
name_gen = None
name_line = None
name_sub = None
name_storage = None
n_gen = -1
n_load = -1
n_line = -1
n_sub = -1
n_storage = -1
sub_info = None
dim_topo = -1
# to which substation is connected each element
load_to_subid = None
gen_to_subid = None
line_or_to_subid = None
line_ex_to_subid = None
storage_to_subid = None
# which index has this element in the substation vector
load_to_sub_pos = None
gen_to_sub_pos = None
line_or_to_sub_pos = None
line_ex_to_sub_pos = None
storage_to_sub_pos = None
# which index has this element in the topology vector
load_pos_topo_vect = None
gen_pos_topo_vect = None
line_or_pos_topo_vect = None
line_ex_pos_topo_vect = None
storage_pos_topo_vect = None
# "convenient" way to retrieve information of the grid
grid_objects_types = None
# to which substation each element of the topovect is connected
_topo_vect_to_sub = None
# list of attribute to convert it from/to a vector
_vectorized = None
# for redispatching / unit commitment
_li_attr_disp = [
"gen_type",
"gen_pmin",
"gen_pmax",
"gen_redispatchable",
"gen_max_ramp_up",
"gen_max_ramp_down",
"gen_min_uptime",
"gen_min_downtime",
"gen_cost_per_MW",
"gen_startup_cost",
"gen_shutdown_cost",
"gen_renewable",
]
_type_attr_disp = [
str,
float,
float,
bool,
float,
float,
int,
int,
float,
float,
float,
bool,
]
# redispatch data, not available in all environment
redispatching_unit_commitment_availble = False
gen_type = None
gen_pmin = None
gen_pmax = None
gen_redispatchable = None
gen_max_ramp_up = None
gen_max_ramp_down = None
gen_min_uptime = None
gen_min_downtime = None
gen_cost_per_MW = None # marginal cost (in currency / (power.step) and not in $/(MW.h) it would be $ / (MW.5mins) )
gen_startup_cost = None # start cost (in currency)
gen_shutdown_cost = None # shutdown cost (in currency)
gen_renewable = None
# storage unit static data
storage_type = None
storage_Emax = None
storage_Emin = None
storage_max_p_prod = None
storage_max_p_absorb = None
storage_marginal_cost = None
storage_loss = None
storage_charging_efficiency = None
storage_discharging_efficiency = None
# grid layout
grid_layout = None
# shunt data, not available in every backend
shunts_data_available = False
n_shunt = None
name_shunt = None
shunt_to_subid = None
# alarm / alert
assistant_warning_type = None
# alarm feature
# dimension of the alarm "space" (number of alarm that can be raised at each step)
dim_alarms = 0 # TODO
alarms_area_names = [] # name of each area # TODO
alarms_lines_area = (
{}
) # for each lines of the grid, gives on which area(s) it is # TODO
alarms_area_lines = (
[]
) # for each area in the grid, gives which powerlines it contains # TODO
# alert feature
# dimension of the alert "space" (number of alerts that can be raised at each step)
dim_alerts = 0 # TODO
alertable_line_names = [] # name of each line to produce an alert on # TODO
alertable_line_ids = []
def __init__(self):
pass
@classmethod
def tell_dim_alarm(cls, dim_alarms):
if cls.dim_alarms != 0:
# number of alarms has already been set, i issue a warning
warnings.warn(
"You will change the number of dimensions of the alarm. This might cause trouble "
"if you environment is read back. We strongly recommend NOT to do this."
)
if dim_alarms and cls.assistant_warning_type == "by_line":
raise Grid2OpException("Impossible to set both alarm and alert for the same environment.")
cls.dim_alarms = dim_alarms
if dim_alarms:
cls.assistant_warning_type = "zonal"
@classmethod
def tell_dim_alert(cls, dim_alerts):
if cls.dim_alerts != 0:
# number of alerts has already been set, i issue a warning
warnings.warn(
"You will change the number of dimensions of the alert. This might cause trouble "
"if you environment is read back. We strongly recommend NOT to do this."
)
if dim_alerts and cls.assistant_warning_type == "zonal":
raise Grid2OpException("Impossible to set both alarm and alert for the same environment.")
cls.dim_alerts = dim_alerts
if dim_alerts:
cls.assistant_warning_type = "by_line"
@classmethod
def _clear_class_attribute(cls):
cls.glop_version = grid2op.__version__
cls._PATH_ENV = None
cls.SUB_COL = 0
cls.LOA_COL = 1
cls.GEN_COL = 2
cls.LOR_COL = 3
cls.LEX_COL = 4
cls.STORAGE_COL = 5
cls.attr_list_vect = None
cls.attr_list_set = {}
cls.attr_list_json = []
cls.attr_nan_list_set = set()
# class been init
# __is_init = False
# name of the objects
cls.env_name = "unknown"
cls.name_load = None
cls.name_gen = None
cls.name_line = None
cls.name_sub = None
cls.name_storage = None
cls.n_gen = -1
cls.n_load = -1
cls.n_line = -1
cls.n_sub = -1
cls.n_storage = -1
cls.sub_info = None
cls.dim_topo = -1
# to which substation is connected each element
cls.load_to_subid = None
cls.gen_to_subid = None
cls.line_or_to_subid = None
cls.line_ex_to_subid = None
cls.storage_to_subid = None
# which index has this element in the substation vector
cls.load_to_sub_pos = None
cls.gen_to_sub_pos = None
cls.line_or_to_sub_pos = None
cls.line_ex_to_sub_pos = None
cls.storage_to_sub_pos = None
# which index has this element in the topology vector
cls.load_pos_topo_vect = None
cls.gen_pos_topo_vect = None
cls.line_or_pos_topo_vect = None
cls.line_ex_pos_topo_vect = None
cls.storage_pos_topo_vect = None
# "convenient" way to retrieve information of the grid
cls.grid_objects_types = None
# to which substation each element of the topovect is connected
cls._topo_vect_to_sub = None
# list of attribute to convert it from/to a vector
cls._vectorized = None
# for redispatching / unit commitment
cls._li_attr_disp = [
"gen_type",
"gen_pmin",
"gen_pmax",
"gen_redispatchable",
"gen_max_ramp_up",
"gen_max_ramp_down",
"gen_min_uptime",
"gen_min_downtime",
"gen_cost_per_MW",
"gen_startup_cost",
"gen_shutdown_cost",
"gen_renewable",
]
cls._type_attr_disp = [
str,
float,
float,
bool,
float,
float,
int,
int,
float,
float,
float,
bool,
]
# redispatch data, not available in all environment
cls.redispatching_unit_commitment_availble = False
cls.gen_type = None
cls.gen_pmin = None
cls.gen_pmax = None
cls.gen_redispatchable = None
cls.gen_max_ramp_up = None
cls.gen_max_ramp_down = None
cls.gen_min_uptime = None
cls.gen_min_downtime = None
cls.gen_cost_per_MW = None # marginal cost (in currency / (power.step) and not in $/(MW.h) it would be $ / (MW.5mins) )
cls.gen_startup_cost = None # start cost (in currency)
cls.gen_shutdown_cost = None # shutdown cost (in currency)
cls.gen_renewable = None
# storage unit static data
cls.storage_type = None
cls.storage_Emax = None
cls.storage_Emin = None
cls.storage_max_p_prod = None
cls.storage_max_p_absorb = None
cls.storage_marginal_cost = None
cls.storage_loss = None
cls.storage_charging_efficiency = None
cls.storage_discharging_efficiency = None
# grid layout
cls.grid_layout = None
# shunt data, not available in every backend
cls.shunts_data_available = False
cls.n_shunt = None
cls.name_shunt = None
cls.shunt_to_subid = None
# alarm / alert
cls.assistant_warning_type = None
# alarms
cls.dim_alarms = 0
cls.alarms_area_names = []
cls.alarms_lines_area = {}
cls.alarms_area_lines = []
# alerts
cls.dim_alerts = 0
cls.alertable_line_names = []
cls.alertable_line_ids = []
@classmethod
def _update_value_set(cls):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Update the class attribute `attr_list_vect_set` from `attr_list_vect`
"""
cls.attr_list_set = set(cls.attr_list_vect)
def _raise_error_attr_list_none(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Raise a "NotImplementedError" if :attr:`GridObjects.attr_list_vect` is not defined.
Raises
-------
``NotImplementedError``
"""
if self.attr_list_vect is None:
raise IncorrectNumberOfElements(
"attr_list_vect attribute is not defined for class {}. "
"It is not possible to convert it from/to a vector, "
"nor to know its size, shape or dtype.".format(type(self))
)
def _get_array_from_attr_name(self, attr_name):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This function returns the proper attribute vector that can be inspected in the
:func:`GridObject.shape`, :func:`GridObject.size`, :func:`GridObject.dtype`,
:func:`GridObject.from_vect` and :func:`GridObject.to_vect` method.
If this function is overloaded, then the _assign_attr_from_name must be too.
Parameters
----------
attr_name: ``str``
Name of the attribute to inspect or set
Returns
-------
res: ``numpy.ndarray``
The attribute corresponding the name, flatten as a 1d vector.
"""
return np.array(getattr(self, attr_name)).flatten()
def to_vect(self):
"""
Convert this instance of GridObjects to a numpy ndarray.
The size of the array is always the same and is determined by the :func:`GridObject.size` method.
**NB**: in case the class GridObjects is derived,
either :attr:`GridObjects.attr_list_vect` is properly defined for the derived class, or this function must be
redefined.
Returns
-------
res: ``numpy.ndarray``
The representation of this action as a flat numpy ndarray
Examples
--------
It is mainly used for converting Observation of Action to vector:
.. code-block:: python
import grid2op
env = grid2op.make()
# for an observation:
obs = env.reset()
obs_as_vect = obs.to_vect()
# for an action
act = env.action_space.sample()
ac_as_vect = act.to_vec()
"""
if self._vectorized is None:
self._raise_error_attr_list_none()
li_vect = [
self._get_array_from_attr_name(el).astype(dt_float)
for el in self.attr_list_vect
]
if li_vect:
self._vectorized = np.concatenate(li_vect)
else:
self._vectorized = np.array([], dtype=dt_float)
return self._vectorized
def to_json(self, convert=True):
"""
Convert this instance of GridObjects to a dictionary that can be json serialized.
convert: do you convert the numpy types to standard python list (might take lots of time)
TODO doc and example
"""
# TODO optimization for action or observation, to reduce json size, for example using the
# action._modif_inj or action._modif_set_bus etc.
# for observation this could be using the default values for obs.line_status (always true) etc.
# or even storing the things in [id, value] for these types of attributes (time_before_cooldown_line,
# time_before_cooldown_sub, time_next_maintenance, duration_next_maintenance etc.)
res = {}
for attr_nm in self.attr_list_vect + self.attr_list_json:
res[attr_nm] = self._get_array_from_attr_name(attr_nm)
if convert:
self._convert_to_json(res) # TODO !
return res
def from_json(self, dict_):
"""
This transform an gridobject (typically an action or an observation) serialized in json format
to the corresponding grid2op action / observation (subclass of grid2op.Action.BaseAction
or grid2op.Observation.BaseObservation)
Parameters
----------
dict_
Returns
-------
"""
# TODO optimization for action or observation, to reduce json size, for example using the see `to_json`
all_keys = self.attr_list_vect + self.attr_list_json
for key, array_ in dict_.items():
if key not in all_keys:
raise AmbiguousAction(f'Impossible to recognize the key "{key}"')
my_attr = getattr(self, key)
if isinstance(my_attr, np.ndarray):
# the regular instance is an array, so i just need to assign the right values to it
my_attr[:] = array_
else:
# normal values is a scalar. So i need to convert the array received as a scalar, and
# convert it to the proper type
type_ = type(my_attr)
setattr(self, key, type_(array_[0]))
def _convert_to_json(self, dict_):
for attr_nm in self.attr_list_vect + self.attr_list_json:
tmp = dict_[attr_nm]
dtype = tmp.dtype
if dtype == dt_float:
dict_[attr_nm] = [float(el) for el in tmp]
elif dtype == dt_int:
dict_[attr_nm] = [int(el) for el in tmp]
elif dtype == dt_bool:
dict_[attr_nm] = [bool(el) for el in tmp]
def shape(self):
"""
The shapes of all the components of the action, mainly used for gym compatibility is the shape of all
part of the action.
It is mainly used to know of which "sub spaces the action space and observation space are made of, but
you can also directly use it on an observation or an action.
It returns a numpy integer array.
This function must return a vector from which the sum is equal to the return value of "size()".
The shape vector must have the same number of components as the return value of the :func:`GridObjects.dtype()`
vector.
**NB**: in case the class GridObjects is derived,
either :attr:`GridObjects.attr_list_vect` is properly defined for the derived class, or this function must be
redefined.
Returns
-------
res: ``numpy.ndarray``
The shape of the :class:`GridObjects`
Examples
--------
It is mainly used to know of which "sub spaces the action space and observation space are made of.
.. code-block:: python
import grid2op
env = grid2op.make()
# for an observation:
obs_space_shapes = env.observation_space.shape()
# for an action
act_space_shapes = env.action_space.shape()
"""
self._raise_error_attr_list_none()
res = np.array(
[self._get_array_from_attr_name(el).shape[0] for el in self.attr_list_vect]
).astype(dt_int)
return res
def dtype(self):
"""
The types of the components of the GridObjects, mainly used for gym compatibility is the shape of all part
of the action.
It is mainly used to know of which types each "sub spaces" the action space and observation space are made of,
but you can also directly use it on an observation or an action.
It is a numpy array of objects.
The dtype vector must have the same number of components as the return value of the :func:`GridObjects.shape`
vector.
**NB**: in case the class GridObjects is derived,
either :attr:`GridObjects.attr_list_vect` is properly defined for the derived class, or this function must be
redefined.
Returns
-------
res: ``numpy.ndarray``
The dtype of the :class:`GridObjects`
Examples
--------
It is mainly used to know of which "sub spaces the action space and observation space are made of.
.. code-block:: python
import grid2op
env = grid2op.make()
# for an observation:
obs_space_types = env.observation_space.dtype()
# for an action
act_space_types = env.action_space.dtype()
"""
self._raise_error_attr_list_none()
res = np.array(
[self._get_array_from_attr_name(el).dtype for el in self.attr_list_vect]
)
return res
def _assign_attr_from_name(self, attr_nm, vect):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Assign the proper attributes with name 'attr_nm' with the value of the vector vect
If this function is overloaded, then the _get_array_from_attr_name must be too.
Parameters
----------
attr_nm
vect:
TODO doc : documentation and example
"""
tmp = getattr(self, attr_nm)
if isinstance(tmp, (dt_bool, dt_int, dt_float, float, int, bool)):
if isinstance(vect, np.ndarray):
setattr(self, attr_nm, vect[0])
else:
setattr(self, attr_nm, vect)
else:
tmp[:] = vect
def check_space_legit(self):
pass
def from_vect(self, vect, check_legit=True):
"""
Convert a GridObjects, represented as a vector, into an GridObjects object.
**NB**: in case the class GridObjects is derived,
either :attr:`GridObjects.attr_list_vect` is properly defined for the derived class, or this function must be
redefined.
It is recommended to use it from the action_space and the observation_space exclusively.
Only the size is checked. If it does not match, an :class:`grid2op.Exceptions.AmbiguousAction` is thrown.
Otherwise the component of the vector are coerced into the proper type silently.
It may results in an non deterministic behaviour if the input vector is not a real action, or cannot be
converted to one.
Parameters
----------
vect: ``numpy.ndarray``
A vector representing an BaseAction.
Examples
--------
It is mainly used for converting back vector representing action or observation into "grid2op" action
or observation. **NB** You should use it only with the "env.action_space" and "env.observation_space"
.. code-block:: python
import grid2op
env = grid2op.make()
# get the vector representation of an observation:
obs = env.reset()
obs_as_vect = obs.to_vect()
# convert it back to an observation (which will be equal to the first one)
obs_cpy = env.observation_space.from_vect(obs_as_vect)
# get the vector representation of an action:
act = env.action_space.sample()
act_as_vect = act.to_vec()
# convert it back to an action (which will be equal to the first one)
act_cpy = env.action_space.from_vect(act_as_vect)
"""
if vect.shape[0] != self.size():
raise IncorrectNumberOfElements(
"Incorrect number of elements found while load a GridObjects "
"from a vector. Found {} elements instead of {}"
"".format(vect.shape[0], self.size())
)
try:
vect = np.array(vect).astype(dt_float)
except Exception as exc_:
raise EnvError(
"Impossible to convert the input vector to a floating point numpy array "
"with error:\n"
'"{}".'.format(exc_)
)
self._raise_error_attr_list_none()
prev_ = 0
for attr_nm, sh, dt in zip(self.attr_list_vect, self.shape(), self.dtype()):
tmp = vect[prev_ : (prev_ + sh)]
# TODO a flag that says "default Nan" for example for when attributes are initialized with
# nan
# if np.any(~np.isfinite(tmp)) and default_nan:
# raise NonFiniteElement("None finite number in from_vect detected")
if attr_nm not in type(self).attr_nan_list_set and np.any(
~np.isfinite(tmp)
):
raise NonFiniteElement("None finite number in from_vect detected")
try:
tmp = tmp.astype(dt)
except Exception as exc_:
raise EnvError(
'Impossible to convert the input vector to its type ({}) for attribute "{}" '
"with error:\n"
'"{}".'.format(dt, attr_nm, exc_)
)
self._assign_attr_from_name(attr_nm, tmp)
prev_ += sh
if check_legit:
self.check_space_legit()
self._post_process_from_vect()
def _post_process_from_vect(self):
"""called at the end of "from_vect" if the function requires post processing"""
pass
def size(self):
"""
When the action / observation is converted to a vector, this method return its size.
NB that it is a requirement that converting an GridObjects gives a vector of a fixed size throughout a training.
The size of an object if constant, but more: for a given environment the size of each action or the size
of each observations is constant. This allows us to also define the size of the "action_space" and
"observation_space": this method also applies to these spaces (see the examples bellow).
**NB**: in case the class GridObjects is derived,
either :attr:`GridObjects.attr_list_vect` is properly defined for the derived class, or this function must be
redefined.
Returns
-------
size: ``int``
The size of the GridObjects if it's converted to a flat vector.
Examples
--------
It is mainly used to know the size of the vector that would represent these objects
.. code-block:: python
import grid2op
env = grid2op.make()
# get the vector representation of an observation:
obs = env.reset()
print("The size of this observation is {}".format(obs.size()))
# get the vector representation of an action:
act = env.action_space.sample()
print("The size of this action is {}".format(act.size()))
# it can also be used with the action_space and observation_space
print("The size of the observation space is {}".format(env.observation_space.size()))
print("The size of the action space is {}".format(env.action_space.size()))
"""
res = np.sum(self.shape()).astype(dt_int)
return res
@classmethod
def _aux_pos_big_topo(cls, vect_to_subid, vect_to_sub_pos):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Return the proper "_pos_big_topo" vector given "to_subid" vector and "to_sub_pos" vectors.
This function is also called to performed sanity check after the load on the powergrid.
:param vect_to_subid: vector of int giving the id of the topology for this element
:type vect_to_subid: iterable int
:param vect_to_sub_pos: vector of int giving the id IN THE SUBSTATION for this element
:type vect_to_sub_pos: iterable int
:return:
"""
res = np.zeros(shape=vect_to_subid.shape, dtype=dt_int)
for i, (sub_id, my_pos) in enumerate(zip(vect_to_subid, vect_to_sub_pos)):
obj_before = np.sum(cls.sub_info[:sub_id])
res[i] = obj_before + my_pos
return res
def _init_class_attr(self, obj=None):
"""init the class attribute from an instance of the class
THIS IS NOT A CLASS ATTR
obj should be an object and NOT a class !
"""
if obj is None:
obj = self
cls = type(self)
cls_as_dict = {}
GridObjects._make_cls_dict_extended(obj, cls_as_dict, as_list=False)
for attr_nm, attr in cls_as_dict.items():
setattr(cls, attr_nm, attr)
def _compute_pos_big_topo(self):
# TODO move the object attribute as class attribute !
self._init_class_attr()
cls = type(self)
cls._compute_pos_big_topo_cls()
@classmethod
def _compute_pos_big_topo_cls(cls):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Compute the position of each element in the big topological vector.
Topology action are represented by numpy vector of size np.sum(self.sub_info).
The vector self.load_pos_topo_vect will give the index of each load in this big topology vector.
For example, for load i, self.load_pos_topo_vect[i] gives the position in such a topology vector that
affect this load.
This position can be automatically deduced from self.sub_info, self.load_to_subid and self.load_to_sub_pos.
This is the same for generators and both end of powerlines
:return: ``None``
"""
# check if we need to implement the position in substation
if (
cls.n_storage == -1
and cls.storage_to_subid is None
and cls.storage_pos_topo_vect is None
and cls.storage_to_sub_pos is None
):
# no storage on the grid, so i deactivate them
cls.set_no_storage()
cls._compute_sub_elements()
cls._compute_sub_pos()
cls.load_pos_topo_vect = cls._aux_pos_big_topo(
cls.load_to_subid, cls.load_to_sub_pos
).astype(dt_int)
cls.gen_pos_topo_vect = cls._aux_pos_big_topo(
cls.gen_to_subid, cls.gen_to_sub_pos
).astype(dt_int)
cls.line_or_pos_topo_vect = cls._aux_pos_big_topo(
cls.line_or_to_subid, cls.line_or_to_sub_pos
).astype(dt_int)
cls.line_ex_pos_topo_vect = cls._aux_pos_big_topo(
cls.line_ex_to_subid, cls.line_ex_to_sub_pos
).astype(dt_int)
cls.storage_pos_topo_vect = cls._aux_pos_big_topo(
cls.storage_to_subid, cls.storage_to_sub_pos
).astype(dt_int)
cls._topo_vect_to_sub = np.repeat(np.arange(cls.n_sub), repeats=cls.sub_info)
cls.grid_objects_types = np.full(
shape=(cls.dim_topo, 6), fill_value=-1, dtype=dt_int
)
prev = 0
for sub_id, nb_el in enumerate(cls.sub_info):
cls.grid_objects_types[prev : (prev + nb_el), :] = cls.get_obj_substations(
substation_id=sub_id
)
prev += nb_el
@classmethod
def _check_sub_id(cls):
# check it can be converted to proper types
if not isinstance(cls.load_to_subid, np.ndarray):
try:
cls.load_to_subid = np.array(cls.load_to_subid)
cls.load_to_subid = cls.load_to_subid.astype(dt_int)
except Exception as exc_:
raise EnvError(
f"self.load_to_subid should be convertible to a numpy array. "
f'It fails with error "{exc_}"'
)
if not isinstance(cls.gen_to_subid, np.ndarray):
try:
cls.gen_to_subid = np.array(cls.gen_to_subid)
cls.gen_to_subid = cls.gen_to_subid.astype(dt_int)
except Exception as exc_:
raise EnvError(
f"self.gen_to_subid should be convertible to a numpy array. "
f'It fails with error "{exc_}"'
)
if not isinstance(cls.line_or_to_subid, np.ndarray):
try:
cls.line_or_to_subid = np.array(cls.line_or_to_subid)
cls.line_or_to_subid = cls.line_or_to_subid.astype(dt_int)
except Exception as exc_:
raise EnvError(
f"self.line_or_to_subid should be convertible to a numpy array. "
f'It fails with error "{exc_}"'
)
if not isinstance(cls.line_ex_to_subid, np.ndarray):
try:
cls.line_ex_to_subid = np.array(cls.line_ex_to_subid)
cls.line_ex_to_subid = cls.line_ex_to_subid.astype(dt_int)
except Exception as exc_:
raise EnvError(
"self.line_ex_to_subid should be convertible to a numpy array"
f'It fails with error "{exc_}"'
)
if not isinstance(cls.storage_to_subid, np.ndarray):
try:
cls.storage_to_subid = np.array(cls.storage_to_subid)
cls.storage_to_subid = cls.storage_to_subid.astype(dt_int)
except Exception as e:
raise EnvError(
"self.storage_to_subid should be convertible to a numpy array"
)
# now check the sizes
if len(cls.load_to_subid) != cls.n_load:
raise IncorrectNumberOfLoads()
if np.min(cls.load_to_subid) < 0:
raise EnvError("Some shunt is connected to a negative substation id.")
if np.max(cls.load_to_subid) > cls.n_sub:
raise EnvError(
"Some load is supposed to be connected to substations with id {} which"
"is greater than the number of substations of the grid, which is {}."
"".format(np.max(cls.load_to_subid), cls.n_sub)
)
if len(cls.gen_to_subid) != cls.n_gen:
raise IncorrectNumberOfGenerators()
if np.min(cls.gen_to_subid) < 0:
raise EnvError("Some shunt is connected to a negative substation id.")
if np.max(cls.gen_to_subid) > cls.n_sub:
raise EnvError(
"Some generator is supposed to be connected to substations with id {} which"
"is greater than the number of substations of the grid, which is {}."
"".format(np.max(cls.gen_to_subid), cls.n_sub)
)
if len(cls.line_or_to_subid) != cls.n_line:
raise IncorrectNumberOfLines()
if np.min(cls.line_or_to_subid) < 0:
raise EnvError("Some shunt is connected to a negative substation id.")
if np.max(cls.line_or_to_subid) > cls.n_sub:
raise EnvError(
"Some powerline (or) is supposed to be connected to substations with id {} which"
"is greater than the number of substations of the grid, which is {}."
"".format(np.max(cls.line_or_to_subid), cls.n_sub)
)
if len(cls.line_ex_to_subid) != cls.n_line:
raise IncorrectNumberOfLines()
if np.min(cls.line_ex_to_subid) < 0:
raise EnvError("Some shunt is connected to a negative substation id.")
if np.max(cls.line_ex_to_subid) > cls.n_sub:
raise EnvError(
"Some powerline (ex) is supposed to be connected to substations with id {} which"
"is greater than the number of substations of the grid, which is {}."
"".format(np.max(cls.line_or_to_subid), cls.n_sub)
)
if len(cls.storage_to_subid) != cls.n_storage:
raise IncorrectNumberOfStorages()
if cls.n_storage > 0:
if np.min(cls.storage_to_subid) < 0:
raise EnvError("Some storage is connected to a negative substation id.")
if np.max(cls.storage_to_subid) > cls.n_sub:
raise EnvError(
"Some powerline (ex) is supposed to be connected to substations with id {} which"
"is greater than the number of substations of the grid, which is {}."
"".format(np.max(cls.line_or_to_subid), cls.n_sub)
)
@classmethod
def _fill_names(cls):
if cls.name_line is None:
cls.name_line = [
"{}_{}_{}".format(or_id, ex_id, l_id)
for l_id, (or_id, ex_id) in enumerate(
zip(cls.line_or_to_subid, cls.line_ex_to_subid)
)
]
cls.name_line = np.array(cls.name_line)
warnings.warn(
"name_line is None so default line names have been assigned to your grid. "
"(FYI: Line names are used to make the correspondence between the chronics and the backend)"
"This might result in impossibility to load data."
'\n\tIf "env.make" properly worked, you can safely ignore this warning.'
)
if cls.name_load is None:
cls.name_load = [
"load_{}_{}".format(bus_id, load_id)
for load_id, bus_id in enumerate(cls.load_to_subid)
]
cls.name_load = np.array(cls.name_load)
warnings.warn(
"name_load is None so default load names have been assigned to your grid. "
"(FYI: load names are used to make the correspondence between the chronics and the backend)"
"This might result in impossibility to load data."
'\n\tIf "env.make" properly worked, you can safely ignore this warning.'
)
if cls.name_gen is None:
cls.name_gen = [
"gen_{}_{}".format(bus_id, gen_id)
for gen_id, bus_id in enumerate(cls.gen_to_subid)
]
cls.name_gen = np.array(cls.name_gen)
warnings.warn(
"name_gen is None so default generator names have been assigned to your grid. "
"(FYI: generator names are used to make the correspondence between the chronics and "
"the backend)"
"This might result in impossibility to load data."
'\n\tIf "env.make" properly worked, you can safely ignore this warning.'
)
if cls.name_sub is None:
cls.name_sub = ["sub_{}".format(sub_id) for sub_id in range(cls.n_sub)]
cls.name_sub = np.array(cls.name_sub)
warnings.warn(
"name_sub is None so default substation names have been assigned to your grid. "
"(FYI: substation names are used to make the correspondence between the chronics and "
"the backend)"
"This might result in impossibility to load data."
'\n\tIf "env.make" properly worked, you can safely ignore this warning.'
)
if cls.name_storage is None:
cls.name_storage = [
"storage_{}_{}".format(bus_id, sto_id)
for sto_id, bus_id in enumerate(cls.storage_to_subid)
]
cls.name_storage = np.array(cls.name_storage)
warnings.warn(
"name_storage is None so default storage unit names have been assigned to your grid. "
"(FYI: storage names are used to make the correspondence between the chronics and "
"the backend)"
"This might result in impossibility to load data."
'\n\tIf "env.make" properly worked, you can safely ignore this warning.'
)
@classmethod
def _check_names(cls):
cls._fill_names()
if not isinstance(cls.name_line, np.ndarray):
try:
cls.name_line = np.array(cls.name_line)
cls.name_line = cls.name_line.astype(str)
except Exception as exc_:
raise EnvError(
f"self.name_line should be convertible to a numpy array of type str. Error was "
f"{exc_}"
)
if not isinstance(cls.name_load, np.ndarray):
try:
cls.name_load = np.array(cls.name_load)
cls.name_load = cls.name_load.astype(str)
except Exception as exc_:
raise EnvError(
"self.name_load should be convertible to a numpy array of type str. Error was "
f"{exc_}"
)
if not isinstance(cls.name_gen, np.ndarray):
try:
cls.name_gen = np.array(cls.name_gen)
cls.name_gen = cls.name_gen.astype(str)
except Exception as exc_:
raise EnvError(
"self.name_gen should be convertible to a numpy array of type str. Error was "
f"{exc_}"
)
if not isinstance(cls.name_sub, np.ndarray):
try:
cls.name_sub = np.array(cls.name_sub)
cls.name_sub = cls.name_sub.astype(str)
except Exception as exc_:
raise EnvError(
"self.name_sub should be convertible to a numpy array of type str. Error was "
f"{exc_}"
)
if not isinstance(cls.name_storage, np.ndarray):
try:
cls.name_storage = np.array(cls.name_storage)
cls.name_storage = cls.name_storage.astype(str)
except Exception as exc_:
raise EnvError(
"self.name_storage should be convertible to a numpy array of type str. Error was "
f"{exc_}"
)
attrs_nms = [
cls.name_gen,
cls.name_sub,
cls.name_line,
cls.name_load,
cls.name_storage,
]
nms = ["generators", "substations", "lines", "loads", "storage units"]
if cls.shunts_data_available:
# these are set to "None" if there is no shunts on the grid
attrs_nms.append(cls.name_shunt)
nms.append("shunts")
for arr_, nm in zip(attrs_nms, nms):
tmp = np.unique(arr_)
if tmp.shape[0] != arr_.shape[0]:
nms = "\n\t - ".join(sorted(arr_))
raise EnvError(
f'Two {nm} have the same names. Please check the "grid.json" file and make sure the '
f"name of the {nm} are all different. Right now they are \n\t - {nms}."
)
@classmethod
def _check_sub_pos(cls):
if not isinstance(cls.load_to_sub_pos, np.ndarray):
try:
cls.load_to_sub_pos = np.array(cls.load_to_sub_pos)
cls.load_to_sub_pos = cls.load_to_sub_pos.astype(dt_int)
except Exception as exc_:
raise EnvError(
"self.load_to_sub_pos should be convertible to a numpy array. Error was "
f"{exc_}"
)
if not isinstance(cls.gen_to_sub_pos, np.ndarray):
try:
cls.gen_to_sub_pos = np.array(cls.gen_to_sub_pos)
cls.gen_to_sub_pos = cls.gen_to_sub_pos.astype(dt_int)
except Exception as exc_:
raise EnvError(
"self.gen_to_sub_pos should be convertible to a numpy array. Error was "
f"{exc_}"
)
if not isinstance(cls.line_or_to_sub_pos, np.ndarray):
try:
cls.line_or_to_sub_pos = np.array(cls.line_or_to_sub_pos)
cls.line_or_to_sub_pos = cls.line_or_to_sub_pos.astype(dt_int)
except Exception as exc_:
raise EnvError(
"self.line_or_to_sub_pos should be convertible to a numpy array. Error was "
f"{exc_}"
)
if not isinstance(cls.line_ex_to_sub_pos, np.ndarray):
try:
cls.line_ex_to_sub_pos = np.array(cls.line_ex_to_sub_pos)
cls.line_ex_to_sub_pos = cls.line_ex_to_sub_pos.astype(dt_int)
except Exception as exc_:
raise EnvError(
"self.line_ex_to_sub_pos should be convertible to a numpy array. Error was "
f"{exc_}"
)
if not isinstance(cls.storage_to_sub_pos, np.ndarray):
try:
cls.storage_to_sub_pos = np.array(cls.storage_to_sub_pos)
cls.storage_to_sub_pos = cls.storage_to_sub_pos.astype(dt_int)
except Exception as exc_:
raise EnvError(
"self.line_ex_to_sub_pos should be convertible to a numpy array. Error was "
f"{exc_}"
)
@classmethod
def _check_topo_vect(cls):
if not isinstance(cls.load_pos_topo_vect, np.ndarray):
try:
cls.load_pos_topo_vect = np.array(cls.load_pos_topo_vect)
cls.load_pos_topo_vect = cls.load_pos_topo_vect.astype(dt_int)
except Exception as exc_:
raise EnvError(
"self.load_pos_topo_vect should be convertible to a numpy array. Error was "
f"{exc_}"
)
if not isinstance(cls.gen_pos_topo_vect, np.ndarray):
try:
cls.gen_pos_topo_vect = np.array(cls.gen_pos_topo_vect)
cls.gen_pos_topo_vect = cls.gen_pos_topo_vect.astype(dt_int)
except Exception as exc_:
raise EnvError(
"self.gen_pos_topo_vect should be convertible to a numpy array. Error was "
f"{exc_}"
)
if not isinstance(cls.line_or_pos_topo_vect, np.ndarray):
try:
cls.line_or_pos_topo_vect = np.array(cls.line_or_pos_topo_vect)
cls.line_or_pos_topo_vect = cls.line_or_pos_topo_vect.astype(dt_int)
except Exception as exc_:
raise EnvError(
"self.line_or_pos_topo_vect should be convertible to a numpy array. Error was "
f"{exc_}"
)
if not isinstance(cls.line_ex_pos_topo_vect, np.ndarray):
try:
cls.line_ex_pos_topo_vect = np.array(cls.line_ex_pos_topo_vect)
cls.line_ex_pos_topo_vect = cls.line_ex_pos_topo_vect.astype(dt_int)
except Exception as exc_:
raise EnvError(
"self.line_ex_pos_topo_vect should be convertible to a numpy array. Error was "
f"{exc_}"
)
if not isinstance(cls.storage_pos_topo_vect, np.ndarray):
try:
cls.storage_pos_topo_vect = np.array(cls.storage_pos_topo_vect)
cls.storage_pos_topo_vect = cls.storage_pos_topo_vect.astype(dt_int)
except Exception as exc_:
raise EnvError(
"self.storage_pos_topo_vect should be convertible to a numpy array. Error was "
f"{exc_}"
)
@classmethod
def _compute_sub_pos(cls):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is used at the initialization of the environment.
Export to grid2op the position of each object in their substation
If not done by the user, we will order the objects the following way, for each substation:
- load (if any is connected to this substation) will be labeled first
- gen will be labeled just after
- then origin side of powerline
- then extremity side of powerline
you are free to chose any other ordering. It's a possible ordering we propose for the example, but it is
definitely not mandatory.
It supposes that the *_to_sub_id are properly set up
"""
need_implement = False
if cls.load_to_sub_pos is None:
need_implement = True
if cls.gen_to_sub_pos is None:
if need_implement is False:
raise BackendError(
'You chose to implement "load_to_sub_pos" but not "gen_to_sub_pos". We cannot '
"work with that. Please either use the automatic setting, or implement all of "
"*_to_sub_pos vectors"
""
)
need_implement = True
if cls.line_or_to_sub_pos is None:
if need_implement is False:
raise BackendError(
'You chose to implement "line_or_to_sub_pos" but not "load_to_sub_pos"'
'or "gen_to_sub_pos". We cannot '
"work with that. Please either use the automatic setting, or implement all of "
"*_to_sub_pos vectors"
""
)
need_implement = True
if cls.line_ex_to_sub_pos is None:
if need_implement is False:
raise BackendError(
'You chose to implement "line_ex_to_sub_pos" but not "load_to_sub_pos"'
'or "gen_to_sub_pos" or "line_or_to_sub_pos". We cannot '
"work with that. Please either use the automatic setting, or implement all of "
"*_to_sub_pos vectors"
""
)
need_implement = True
if cls.storage_to_sub_pos is None:
if need_implement is False:
raise BackendError(
'You chose to implement "storage_to_sub_pos" but not "load_to_sub_pos"'
'or "gen_to_sub_pos" or "line_or_to_sub_pos" or "line_ex_to_sub_pos". '
"We cannot "
"work with that. Please either use the automatic setting, or implement all of "
"*_to_sub_pos vectors"
""
)
need_implement = True
if not need_implement:
return
last_order_number = np.zeros(cls.n_sub, dtype=dt_int)
cls.load_to_sub_pos = np.zeros(cls.n_load, dtype=dt_int)
for load_id, sub_id_connected in enumerate(cls.load_to_subid):
cls.load_to_sub_pos[load_id] = last_order_number[sub_id_connected]
last_order_number[sub_id_connected] += 1
cls.gen_to_sub_pos = np.zeros(cls.n_gen, dtype=dt_int)
for gen_id, sub_id_connected in enumerate(cls.gen_to_subid):
cls.gen_to_sub_pos[gen_id] = last_order_number[sub_id_connected]
last_order_number[sub_id_connected] += 1
cls.line_or_to_sub_pos = np.zeros(cls.n_line, dtype=dt_int)
for lor_id, sub_id_connected in enumerate(cls.line_or_to_subid):
cls.line_or_to_sub_pos[lor_id] = last_order_number[sub_id_connected]
last_order_number[sub_id_connected] += 1
cls.line_ex_to_sub_pos = np.zeros(cls.n_line, dtype=dt_int)
for lex_id, sub_id_connected in enumerate(cls.line_ex_to_subid):
cls.line_ex_to_sub_pos[lex_id] = last_order_number[sub_id_connected]
last_order_number[sub_id_connected] += 1
cls.storage_to_sub_pos = np.zeros(cls.n_storage, dtype=dt_int)
for sto_id, sub_id_connected in enumerate(cls.storage_to_subid):
cls.storage_to_sub_pos[sto_id] = last_order_number[sub_id_connected]
last_order_number[sub_id_connected] += 1
@classmethod
def _compute_sub_elements(cls):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Computes "dim_topo" and "sub_info" class attributes
It supposes that *to_subid are initialized and that n_line, n_sub, n_load and n_gen are all positive
"""
if cls.dim_topo is None or cls.dim_topo <= 0:
cls.dim_topo = 2 * cls.n_line + cls.n_load + cls.n_gen + cls.n_storage
if cls.sub_info is None:
cls.sub_info = np.zeros(cls.n_sub, dtype=dt_int)
# NB the vectorized implementation do not work
for s_id in cls.load_to_subid:
cls.sub_info[s_id] += 1
for s_id in cls.gen_to_subid:
cls.sub_info[s_id] += 1
for s_id in cls.line_or_to_subid:
cls.sub_info[s_id] += 1
for s_id in cls.line_ex_to_subid:
cls.sub_info[s_id] += 1
for s_id in cls.storage_to_subid:
cls.sub_info[s_id] += 1
@classmethod
def assert_grid_correct_cls(cls):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is used at the initialization of the environment.
Performs some checking on the loaded grid to make sure it is consistent.
It also makes sure that the vector such as *sub_info*, *load_to_subid* or *gen_to_sub_pos* are of the
right type eg. numpy.ndarray with dtype: dt_int
It is called after the grid has been loaded.
These function is by default called by the :class:`grid2op.Environment` class after the initialization of the
environment.
If these tests are not successfull, no guarantee are given that the backend will return consistent computations.
In order for the backend to fully understand the structure of actions, it is strongly advised NOT to override
this method.
:return: ``None``
:raise: :class:`grid2op.EnvError` and possibly all of its derived class.
"""
# TODO refactor this method with the `_check***` methods.
# TODO refactor the `_check***` to use the same "base functions" that would be coded only once.
if cls.n_gen <= 0:
raise EnvError(
"n_gen is negative. Powergrid is invalid: there are no generator"
)
if cls.n_load <= 0:
raise EnvError(
"n_load is negative. Powergrid is invalid: there are no load"
)
if cls.n_line <= 0:
raise EnvError(
"n_line is negative. Powergrid is invalid: there are no line"
)
if cls.n_sub <= 0:
raise EnvError(
"n_sub is negative. Powergrid is invalid: there are no substation"
)
if (
cls.n_storage == -1
and cls.storage_to_subid is None
and cls.storage_pos_topo_vect is None
and cls.storage_to_sub_pos is None
):
# no storage on the grid, so i deactivate them
cls.set_no_storage()
if cls.n_storage < 0:
raise EnvError(
"n_storage is negative. Powergrid is invalid: you specify a negative number of unit storage"
)
cls._compute_sub_elements()
if not isinstance(cls.sub_info, np.ndarray):
try:
cls.sub_info = np.array(cls.sub_info)
cls.sub_info = cls.sub_info.astype(dt_int)
except Exception as exc_:
raise EnvError(
f"self.sub_info should be convertible to a numpy array. "
f'It fails with error "{exc_}"'
)
# to which subtation they are connected
cls._check_sub_id()
# for names
cls._check_names()
# compute the position in substation if not done already
cls._compute_sub_pos()
# test position in substation
cls._check_sub_pos()
# test position in topology vector
cls._check_topo_vect()
# test that all numbers are finite:
tmp = np.concatenate(
(
cls.sub_info.flatten(),
cls.load_to_subid.flatten(),
cls.gen_to_subid.flatten(),
cls.line_or_to_subid.flatten(),
cls.line_ex_to_subid.flatten(),
cls.storage_to_subid.flatten(),
cls.load_to_sub_pos.flatten(),
cls.gen_to_sub_pos.flatten(),
cls.line_or_to_sub_pos.flatten(),
cls.line_ex_to_sub_pos.flatten(),
cls.storage_to_sub_pos.flatten(),
cls.load_pos_topo_vect.flatten(),
cls.gen_pos_topo_vect.flatten(),
cls.line_or_pos_topo_vect.flatten(),
cls.line_ex_pos_topo_vect.flatten(),
cls.storage_pos_topo_vect.flatten(),
)
)
try:
if np.any(~np.isfinite(tmp)):
raise EnvError(
"The grid could not be loaded properly."
"One of the vector is made of non finite elements, check the sub_info, *_to_subid, "
"*_to_sub_pos and *_pos_topo_vect vectors"
)
except Exception as exc_:
raise EnvError(
f"Impossible to check whether or not vectors contains only finite elements (probably one "
f"or more topology related vector is not valid (contains ``None``). Error was "
f"{exc_}"
)
# check sizes
if len(cls.sub_info) != cls.n_sub:
raise IncorrectNumberOfSubstation(
"The number of substation is not consistent in "
'self.sub_info (size "{}")'
"and self.n_sub ({})".format(len(cls.sub_info), cls.n_sub)
)
if (
np.sum(cls.sub_info)
!= cls.n_load + cls.n_gen + 2 * cls.n_line + cls.n_storage
):
err_msg = "The number of elements of elements is not consistent between self.sub_info where there are "
err_msg += (
"{} elements connected to all substations and the number of load, generators and lines in "
"the _grid ({})."
)
err_msg = err_msg.format(
np.sum(cls.sub_info),
cls.n_load + cls.n_gen + 2 * cls.n_line + cls.n_storage,
)
raise IncorrectNumberOfElements(err_msg)
if len(cls.name_load) != cls.n_load:
raise IncorrectNumberOfLoads("len(self.name_load) != self.n_load")
if len(cls.name_gen) != cls.n_gen:
raise IncorrectNumberOfGenerators("len(self.name_gen) != self.n_gen")
if len(cls.name_line) != cls.n_line:
raise IncorrectNumberOfLines("len(self.name_line) != self.n_line")
if len(cls.name_storage) != cls.n_storage:
raise IncorrectNumberOfStorages("len(self.name_storage) != self.n_storage")
if len(cls.name_sub) != cls.n_sub:
raise IncorrectNumberOfSubstation("len(self.name_sub) != self.n_sub")
if len(cls.load_to_sub_pos) != cls.n_load:
raise IncorrectNumberOfLoads("len(self.load_to_sub_pos) != self.n_load")
if len(cls.gen_to_sub_pos) != cls.n_gen:
raise IncorrectNumberOfGenerators("en(self.gen_to_sub_pos) != self.n_gen")
if len(cls.line_or_to_sub_pos) != cls.n_line:
raise IncorrectNumberOfLines("len(self.line_or_to_sub_pos) != self.n_line")
if len(cls.line_ex_to_sub_pos) != cls.n_line:
raise IncorrectNumberOfLines("len(self.line_ex_to_sub_pos) != self.n_line")
if len(cls.storage_to_sub_pos) != cls.n_storage:
raise IncorrectNumberOfStorages(
"len(self.storage_to_sub_pos) != self.n_storage"
)
if len(cls.load_pos_topo_vect) != cls.n_load:
raise IncorrectNumberOfLoads("len(self.load_pos_topo_vect) != self.n_load")
if len(cls.gen_pos_topo_vect) != cls.n_gen:
raise IncorrectNumberOfGenerators(
"len(self.gen_pos_topo_vect) != self.n_gen"
)
if len(cls.line_or_pos_topo_vect) != cls.n_line:
raise IncorrectNumberOfLines(
"len(self.line_or_pos_topo_vect) != self.n_line"
)
if len(cls.line_ex_pos_topo_vect) != cls.n_line:
raise IncorrectNumberOfLines(
"len(self.line_ex_pos_topo_vect) != self.n_line"
)
if len(cls.storage_pos_topo_vect) != cls.n_storage:
raise IncorrectNumberOfLines(
"len(self.storage_pos_topo_vect) != self.n_storage"
)
# test if object are connected to right substation
obj_per_sub = np.zeros(shape=(cls.n_sub,), dtype=dt_int)
for sub_id in cls.load_to_subid:
obj_per_sub[sub_id] += 1
for sub_id in cls.gen_to_subid:
obj_per_sub[sub_id] += 1
for sub_id in cls.line_or_to_subid:
obj_per_sub[sub_id] += 1
for sub_id in cls.line_ex_to_subid:
obj_per_sub[sub_id] += 1
for sub_id in cls.storage_to_subid:
obj_per_sub[sub_id] += 1
if not np.all(obj_per_sub == cls.sub_info):
raise IncorrectNumberOfElements(
f"for substation(s): {np.where(obj_per_sub != cls.sub_info)[0]}"
)
# test right number of element in substations
# test that for each substation i don't have an id above the number of element of a substations
for i, (sub_id, sub_pos) in enumerate(
zip(cls.load_to_subid, cls.load_to_sub_pos)
):
if sub_pos >= cls.sub_info[sub_id]:
raise IncorrectPositionOfLoads("for load {}".format(i))
for i, (sub_id, sub_pos) in enumerate(
zip(cls.gen_to_subid, cls.gen_to_sub_pos)
):
if sub_pos >= cls.sub_info[sub_id]:
raise IncorrectPositionOfGenerators("for generator {}".format(i))
for i, (sub_id, sub_pos) in enumerate(
zip(cls.line_or_to_subid, cls.line_or_to_sub_pos)
):
if sub_pos >= cls.sub_info[sub_id]:
raise IncorrectPositionOfLines("for line {} at origin end".format(i))
for i, (sub_id, sub_pos) in enumerate(
zip(cls.line_ex_to_subid, cls.line_ex_to_sub_pos)
):
if sub_pos >= cls.sub_info[sub_id]:
raise IncorrectPositionOfLines("for line {} at extremity end".format(i))
for i, (sub_id, sub_pos) in enumerate(
zip(cls.storage_to_subid, cls.storage_to_sub_pos)
):
if sub_pos >= cls.sub_info[sub_id]:
raise IncorrectPositionOfStorages("for storage {}".format(i))
# check that i don't have 2 objects with the same id in the "big topo" vector
concat_topo = np.concatenate(
(
cls.load_pos_topo_vect.flatten(),
cls.gen_pos_topo_vect.flatten(),
cls.line_or_pos_topo_vect.flatten(),
cls.line_ex_pos_topo_vect.flatten(),
cls.storage_pos_topo_vect.flatten(),
)
)
if len(np.unique(concat_topo)) != np.sum(cls.sub_info):
raise EnvError(
"2 different objects would have the same id in the topology vector, or there would be"
"an empty component in this vector."
)
# check that self.load_pos_topo_vect and co are consistent
load_pos_big_topo = cls._aux_pos_big_topo(
cls.load_to_subid, cls.load_to_sub_pos
)
if not np.all(load_pos_big_topo == cls.load_pos_topo_vect):
raise IncorrectPositionOfLoads(
"Mismatch between load_to_subid, load_to_sub_pos and load_pos_topo_vect"
)
gen_pos_big_topo = cls._aux_pos_big_topo(cls.gen_to_subid, cls.gen_to_sub_pos)
if not np.all(gen_pos_big_topo == cls.gen_pos_topo_vect):
raise IncorrectNumberOfGenerators(
"Mismatch between gen_to_subid, gen_to_sub_pos and gen_pos_topo_vect"
)
lines_or_pos_big_topo = cls._aux_pos_big_topo(
cls.line_or_to_subid, cls.line_or_to_sub_pos
)
if not np.all(lines_or_pos_big_topo == cls.line_or_pos_topo_vect):
raise IncorrectPositionOfLines(
"Mismatch between line_or_to_subid, "
"line_or_to_sub_pos and line_or_pos_topo_vect"
)
lines_ex_pos_big_topo = cls._aux_pos_big_topo(
cls.line_ex_to_subid, cls.line_ex_to_sub_pos
)
if not np.all(lines_ex_pos_big_topo == cls.line_ex_pos_topo_vect):
raise IncorrectPositionOfLines(
"Mismatch between line_ex_to_subid, "
"line_ex_to_sub_pos and line_ex_pos_topo_vect"
)
storage_pos_big_topo = cls._aux_pos_big_topo(
cls.storage_to_subid, cls.storage_to_sub_pos
)
if not np.all(storage_pos_big_topo == cls.storage_pos_topo_vect):
raise IncorrectPositionOfStorages(
"Mismatch between storage_to_subid, "
"storage_to_sub_pos and storage_pos_topo_vect"
)
# no empty bus: at least one element should be present on each bus
if np.any(cls.sub_info < 1):
if not grid2op.Space.space_utils._WARNING_ISSUED_FOR_SUB_NO_ELEM:
warnings.warn(
f"There are {np.sum(cls.sub_info < 1)} substations where no 'controlable' elements "
f"are connected. These substations will be used in the computation of the powerflow "
f"(by the backend) but you will NOT be able to control anything on them."
)
grid2op.Space.space_utils._WARNING_ISSUED_FOR_SUB_NO_ELEM = True
# redispatching / unit commitment
if cls.redispatching_unit_commitment_availble:
cls._check_validity_dispathcing_data()
# shunt data
if cls.shunts_data_available:
cls._check_validity_shunt_data()
# storage data
cls._check_validity_storage_data()
# alarm data
cls._check_validity_alarm_data()
# alert data
cls._check_validity_alert_data()
@classmethod
def _check_validity_alarm_data(cls):
if cls.dim_alarms == 0:
# no alarm data
assert (
cls.alarms_area_names == []
), "No alarm data is provided, yet cls.alarms_area_names != []"
assert (
cls.alarms_lines_area == {}
), "No alarm data is provided, yet cls.alarms_lines_area != {}"
assert (
cls.alarms_area_lines == []
), "No alarm data is provided, yet cls.alarms_area_lines != []"
elif cls.dim_alarms < 0:
raise EnvError(
f"The number of areas for the alarm feature should be >= 0. It currently is {cls.dim_alarms}"
)
else:
assert cls.assistant_warning_type == "zonal"
# the "alarm" feature is supported
assert isinstance(
cls.alarms_area_names, list
), "cls.alarms_area_names should be a list"
assert isinstance(
cls.alarms_lines_area, dict
), "cls.alarms_lines_area should be a dict"
assert isinstance(
cls.alarms_area_lines, list
), "cls.alarms_area_lines should be a dict"
assert (
len(cls.alarms_area_names) == cls.dim_alarms
), "len(cls.alarms_area_names) != cls.dim_alarms"
names_to_id = {nm: id_ for id_, nm in enumerate(cls.alarms_area_names)}
# check that information in alarms_lines_area and alarms_area_lines match
for l_nm, li_area in cls.alarms_lines_area.items():
for area_nm in li_area:
area_id = names_to_id[area_nm]
all_lines_this_area = cls.alarms_area_lines[area_id]
assert l_nm in all_lines_this_area, (
f'line "{l_nm}" is said to belong to area "{area_nm}" '
f"in cls.alarms_lines_area yet when looking for the lines in "
f"this "
f"area in cls.alarms_area_lines, this line is not in there"
)
for area_id, all_lines_this_area in enumerate(cls.alarms_area_lines):
area_nm = cls.alarms_area_names[area_id]
for l_nm in all_lines_this_area:
assert area_nm in cls.alarms_lines_area[l_nm], (
f'line "{l_nm}" is said to belong to area '
f'"{area_nm}" '
f"in cls.alarms_area_lines yet when looking for "
f"the areas where this line belong in "
f"cls.alarms_lines_area it appears it does not "
f"belong there."
)
# now check that all lines are in at least one area
for line, li_area in cls.alarms_lines_area.items():
# check that all lines in the grid are in at least one area
if not li_area:
raise EnvError(
f"Line (on the grid) named {line} is not in any area. This is not supported at "
f"the moment"
)
# finally check that all powerlines are represented in the dictionary:
for l_nm in cls.name_line:
if l_nm not in cls.alarms_lines_area:
raise EnvError(
f'The powerline "{l_nm}" is not in cls.alarms_lines_area'
)
@classmethod
def _check_validity_alert_data(cls):
# TODO remove assert and raise Grid2opExcpetion instead
if cls.dim_alerts == 0:
# no alert data
assert (
cls.alertable_line_names == []
), "No alert data is provided, yet cls.alertable_line_names != []"
assert (
len(cls.alertable_line_ids) == 0
), "No alert data is provided, yet len(cls.alertable_line_ids) != 0"
elif cls.dim_alerts < 0:
raise EnvError(
f"The number of lines for the alert feature should be >= 0. It currently is {cls.dim_alerts}"
)
else:
assert cls.assistant_warning_type == "by_line"
# the "alert" feature is supported
assert isinstance(
cls.alertable_line_names, list
), "cls.alertable_line_names should be a list"
assert (
len(cls.alertable_line_names) == cls.dim_alerts
), "len(cls.alertable_line_names) != cls.dim_alerts"
try:
cls.alertable_line_ids = np.array(cls.alertable_line_ids).astype(dt_int)
except Exception as exc_:
raise EnvError(f"Impossible to convert alertable_line_ids "
f"to an array of int with error {exc_}")
@classmethod
def _check_validity_storage_data(cls):
if cls.storage_type is None:
raise IncorrectNumberOfStorages("self.storage_type is None")
if cls.storage_Emax is None:
raise IncorrectNumberOfStorages("self.storage_Emax is None")
if cls.storage_Emin is None:
raise IncorrectNumberOfStorages("self.storage_Emin is None")
if cls.storage_max_p_prod is None:
raise IncorrectNumberOfStorages("self.storage_max_p_prod is None")
if cls.storage_max_p_absorb is None:
raise IncorrectNumberOfStorages("self.storage_max_p_absorb is None")
if cls.storage_marginal_cost is None:
raise IncorrectNumberOfStorages("self.storage_marginal_cost is None")
if cls.storage_loss is None:
raise IncorrectNumberOfStorages("self.storage_loss is None")
if cls.storage_discharging_efficiency is None:
raise IncorrectNumberOfStorages(
"self.storage_discharging_efficiency is None"
)
if cls.storage_charging_efficiency is None:
raise IncorrectNumberOfStorages("self.storage_charging_efficiency is None")
if cls.n_storage == 0:
# no more check to perform is there is no storage
return
if cls.storage_type.shape[0] != cls.n_storage:
raise IncorrectNumberOfStorages(
"self.storage_type.shape[0] != self.n_storage"
)
if cls.storage_Emax.shape[0] != cls.n_storage:
raise IncorrectNumberOfStorages(
"self.storage_Emax.shape[0] != self.n_storage"
)
if cls.storage_Emin.shape[0] != cls.n_storage:
raise IncorrectNumberOfStorages(
"self.storage_Emin.shape[0] != self.n_storage"
)
if cls.storage_max_p_prod.shape[0] != cls.n_storage:
raise IncorrectNumberOfStorages(
"self.storage_max_p_prod.shape[0] != self.n_storage"
)
if cls.storage_max_p_absorb.shape[0] != cls.n_storage:
raise IncorrectNumberOfStorages(
"self.storage_max_p_absorb.shape[0] != self.n_storage"
)
if cls.storage_marginal_cost.shape[0] != cls.n_storage:
raise IncorrectNumberOfStorages(
"self.storage_marginal_cost.shape[0] != self.n_storage"
)
if cls.storage_loss.shape[0] != cls.n_storage:
raise IncorrectNumberOfStorages(
"self.storage_loss.shape[0] != self.n_storage"
)
if cls.storage_discharging_efficiency.shape[0] != cls.n_storage:
raise IncorrectNumberOfStorages(
"self.storage_discharging_efficiency.shape[0] != self.n_storage"
)
if cls.storage_charging_efficiency.shape[0] != cls.n_storage:
raise IncorrectNumberOfStorages(
"self.storage_charging_efficiency.shape[0] != self.n_storage"
)
if np.any(~np.isfinite(cls.storage_Emax)):
raise BackendError("np.any(~np.isfinite(self.storage_Emax))")
if np.any(~np.isfinite(cls.storage_Emin)):
raise BackendError("np.any(~np.isfinite(self.storage_Emin))")
if np.any(~np.isfinite(cls.storage_max_p_prod)):
raise BackendError("np.any(~np.isfinite(self.storage_max_p_prod))")
if np.any(~np.isfinite(cls.storage_max_p_absorb)):
raise BackendError("np.any(~np.isfinite(self.storage_max_p_absorb))")
if np.any(~np.isfinite(cls.storage_marginal_cost)):
raise BackendError("np.any(~np.isfinite(self.storage_marginal_cost))")
if np.any(~np.isfinite(cls.storage_loss)):
raise BackendError("np.any(~np.isfinite(self.storage_loss))")
if np.any(~np.isfinite(cls.storage_charging_efficiency)):
raise BackendError("np.any(~np.isfinite(self.storage_charging_efficiency))")
if np.any(~np.isfinite(cls.storage_discharging_efficiency)):
raise BackendError(
"np.any(~np.isfinite(self.storage_discharging_efficiency))"
)
if np.any(cls.storage_Emax < cls.storage_Emin):
tmp = np.where(cls.storage_Emax < cls.storage_Emin)[0]
raise BackendError(
f"storage_Emax < storage_Emin for storage units with ids: {tmp}"
)
if np.any(cls.storage_Emax < 0.0):
tmp = np.where(cls.storage_Emax < 0.0)[0]
raise BackendError(
f"self.storage_Emax < 0. for storage units with ids: {tmp}"
)
if np.any(cls.storage_Emin < 0.0):
tmp = np.where(cls.storage_Emin < 0.0)[0]
raise BackendError(
f"self.storage_Emin < 0. for storage units with ids: {tmp}"
)
if np.any(cls.storage_max_p_prod < 0.0):
tmp = np.where(cls.storage_max_p_prod < 0.0)[0]
raise BackendError(
f"self.storage_max_p_prod < 0. for storage units with ids: {tmp}"
)
if np.any(cls.storage_max_p_absorb < 0.0):
tmp = np.where(cls.storage_max_p_absorb < 0.0)[0]
raise BackendError(
f"self.storage_max_p_absorb < 0. for storage units with ids: {tmp}"
)
if np.any(cls.storage_loss < 0.0):
tmp = np.where(cls.storage_loss < 0.0)[0]
raise BackendError(
f"self.storage_loss < 0. for storage units with ids: {tmp}"
)
if np.any(cls.storage_discharging_efficiency <= 0.0):
tmp = np.where(cls.storage_discharging_efficiency <= 0.0)[0]
raise BackendError(
f"self.storage_discharging_efficiency <= 0. for storage units with ids: {tmp}"
)
if np.any(cls.storage_discharging_efficiency > 1.0):
tmp = np.where(cls.storage_discharging_efficiency > 1.0)[0]
raise BackendError(
f"self.storage_discharging_efficiency > 1. for storage units with ids: {tmp}"
)
if np.any(cls.storage_charging_efficiency < 0.0):
tmp = np.where(cls.storage_charging_efficiency < 0.0)[0]
raise BackendError(
f"self.storage_charging_efficiency < 0. for storage units with ids: {tmp}"
)
if np.any(cls.storage_charging_efficiency > 1.0):
tmp = np.where(cls.storage_charging_efficiency > 1.0)[0]
raise BackendError(
f"self.storage_charging_efficiency > 1. for storage units with ids: {tmp}"
)
if np.any(cls.storage_loss > cls.storage_max_p_absorb):
tmp = np.where(cls.storage_loss > cls.storage_max_p_absorb)[0]
raise BackendError(
f"Some storage units are such that their loss (self.storage_loss) is higher "
f"than the maximum power at which they can be charged (self.storage_max_p_absorb). "
f"Such storage units are doomed to discharged (due to losses) without anything "
f"being able to charge them back. This really un interesting behaviour is not "
f"supported by grid2op. Please check storage data for units {tmp}"
)
@classmethod
def _check_validity_shunt_data(cls):
if cls.n_shunt is None:
raise IncorrectNumberOfElements(
'Backend is supposed to support shunts, but "n_shunt" is not set.'
)
if cls.name_shunt is None:
raise IncorrectNumberOfElements(
'Backend is supposed to support shunts, but "name_shunt" is not set.'
)
if cls.shunt_to_subid is None:
raise IncorrectNumberOfElements(
'Backend is supposed to support shunts, but "shunt_to_subid" is not set.'
)
if not isinstance(cls.name_shunt, np.ndarray):
try:
cls.name_shunt = np.array(cls.name_shunt)
cls.name_shunt = cls.name_shunt.astype(np.str_)
except Exception as exc:
raise EnvError(
'name_shunt should be convertible to a numpy array with dtype "str".'
)
if not isinstance(cls.shunt_to_subid, np.ndarray):
try:
cls.shunt_to_subid = np.array(cls.shunt_to_subid)
cls.shunt_to_subid = cls.shunt_to_subid.astype(dt_int)
except Exception as e:
raise EnvError(
'shunt_to_subid should be convertible to a numpy array with dtype "int".'
)
if cls.name_shunt.shape[0] != cls.n_shunt:
raise IncorrectNumberOfElements(
'Backend is supposed to support shunts, but "name_shunt" has not '
'"n_shunt" elements.'
)
if cls.shunt_to_subid.shape[0] != cls.n_shunt:
raise IncorrectNumberOfElements(
'Backend is supposed to support shunts, but "shunt_to_subid" has not '
'"n_shunt" elements.'
)
if cls.n_shunt > 0:
# check the substation id only if there are shunt
if np.min(cls.shunt_to_subid) < 0:
raise EnvError("Some shunt is connected to a negative substation id.")
if np.max(cls.shunt_to_subid) > cls.n_sub:
raise EnvError(
"Some shunt is supposed to be connected to substations with id {} which"
"is greater than the number of substations of the grid, which is {}."
"".format(np.max(cls.shunt_to_subid), cls.n_sub)
)
@classmethod
def _check_validity_dispathcing_data(cls):
if cls.gen_type is None:
raise InvalidRedispatching(
"Impossible to recognize the type of generators (gen_type) when "
"redispatching is supposed to be available."
)
if cls.gen_pmin is None:
raise InvalidRedispatching(
"Impossible to recognize the pmin of generators (gen_pmin) when "
"redispatching is supposed to be available."
)
if cls.gen_pmax is None:
raise InvalidRedispatching(
"Impossible to recognize the pmax of generators (gen_pmax) when "
"redispatching is supposed to be available."
)
if cls.gen_redispatchable is None:
raise InvalidRedispatching(
"Impossible to know which generator can be dispatched (gen_redispatchable)"
" when redispatching is supposed to be available."
)
if cls.gen_max_ramp_up is None:
raise InvalidRedispatching(
"Impossible to recognize the ramp up of generators (gen_max_ramp_up)"
" when redispatching is supposed to be available."
)
if cls.gen_max_ramp_down is None:
raise InvalidRedispatching(
"Impossible to recognize the ramp up of generators (gen_max_ramp_down)"
" when redispatching is supposed to be available."
)
if cls.gen_min_uptime is None:
raise InvalidRedispatching(
"Impossible to recognize the min uptime of generators (gen_min_uptime)"
" when redispatching is supposed to be available."
)
if cls.gen_min_downtime is None:
raise InvalidRedispatching(
"Impossible to recognize the min downtime of generators (gen_min_downtime)"
" when redispatching is supposed to be available."
)
if cls.gen_cost_per_MW is None:
raise InvalidRedispatching(
"Impossible to recognize the marginal costs of generators (gen_cost_per_MW)"
" when redispatching is supposed to be available."
)
if cls.gen_startup_cost is None:
raise InvalidRedispatching(
"Impossible to recognize the start up cost of generators (gen_startup_cost)"
" when redispatching is supposed to be available."
)
if cls.gen_shutdown_cost is None:
raise InvalidRedispatching(
"Impossible to recognize the shut down cost of generators "
"(gen_shutdown_cost) when redispatching is supposed to be available."
)
if cls.gen_renewable is None:
raise InvalidRedispatching(
"Impossible to recognize the whether generators comes from renewable energy "
"sources "
"(gen_renewable) when redispatching is supposed to be available."
)
if len(cls.gen_type) != cls.n_gen:
raise InvalidRedispatching(
"Invalid length for the type of generators (gen_type) when "
"redispatching is supposed to be available."
)
if len(cls.gen_pmin) != cls.n_gen:
raise InvalidRedispatching(
"Invalid length for the pmin of generators (gen_pmin) when "
"redispatching is supposed to be available."
)
if len(cls.gen_pmax) != cls.n_gen:
raise InvalidRedispatching(
"Invalid length for the pmax of generators (gen_pmax) when "
"redispatching is supposed to be available."
)
if len(cls.gen_redispatchable) != cls.n_gen:
raise InvalidRedispatching(
"Invalid length for which generator can be dispatched (gen_redispatchable)"
" when redispatching is supposed to be available."
)
if len(cls.gen_max_ramp_up) != cls.n_gen:
raise InvalidRedispatching(
"Invalid length for the ramp up of generators (gen_max_ramp_up)"
" when redispatching is supposed to be available."
)
if len(cls.gen_max_ramp_down) != cls.n_gen:
raise InvalidRedispatching(
"Invalid length for the ramp up of generators (gen_max_ramp_down)"
" when redispatching is supposed to be available."
)
if len(cls.gen_min_uptime) != cls.n_gen:
raise InvalidRedispatching(
"Invalid length for the min uptime of generators (gen_min_uptime)"
" when redispatching is supposed to be available."
)
if len(cls.gen_min_downtime) != cls.n_gen:
raise InvalidRedispatching(
"Invalid length for the min downtime of generators (gen_min_downtime)"
" when redispatching is supposed to be available."
)
if len(cls.gen_cost_per_MW) != cls.n_gen:
raise InvalidRedispatching(
"Invalid length for the marginal costs of generators (gen_cost_per_MW)"
" when redispatching is supposed to be available."
)
if len(cls.gen_startup_cost) != cls.n_gen:
raise InvalidRedispatching(
"Invalid length for the start up cost of generators (gen_startup_cost)"
" when redispatching is supposed to be available."
)
if len(cls.gen_shutdown_cost) != cls.n_gen:
raise InvalidRedispatching(
"Invalid length for the shut down cost of generators "
"(gen_shutdown_cost) when redispatching is supposed to be available."
)
if len(cls.gen_renewable) != cls.n_gen:
raise InvalidRedispatching(
"Invalid length for the renewable flag vector"
"(gen_renewable) when redispatching is supposed to be available."
)
if np.any(cls.gen_min_uptime < 0):
raise InvalidRedispatching(
"Minimum uptime of generator (gen_min_uptime) cannot be negative"
)
if np.any(cls.gen_min_downtime < 0):
raise InvalidRedispatching(
"Minimum downtime of generator (gen_min_downtime) cannot be negative"
)
for el in cls.gen_type:
if not el in ["solar", "wind", "hydro", "thermal", "nuclear"]:
raise InvalidRedispatching("Unknown generator type : {}".format(el))
if np.any(cls.gen_pmin < 0.0):
raise InvalidRedispatching("One of the Pmin (gen_pmin) is negative")
if np.any(cls.gen_pmax < 0.0):
raise InvalidRedispatching("One of the Pmax (gen_pmax) is negative")
if np.any(cls.gen_max_ramp_down < 0.0):
raise InvalidRedispatching(
"One of the ramp up (gen_max_ramp_down) is negative"
)
if np.any(cls.gen_max_ramp_up < 0.0):
raise InvalidRedispatching(
"One of the ramp down (gen_max_ramp_up) is negative"
)
if np.any(cls.gen_startup_cost < 0.0):
raise InvalidRedispatching(
"One of the start up cost (gen_startup_cost) is negative"
)
if np.any(cls.gen_shutdown_cost < 0.0):
raise InvalidRedispatching(
"One of the start up cost (gen_shutdown_cost) is negative"
)
for el, type_ in zip(
[
"gen_type",
"gen_pmin",
"gen_pmax",
"gen_redispatchable",
"gen_max_ramp_up",
"gen_max_ramp_down",
"gen_min_uptime",
"gen_min_downtime",
"gen_cost_per_MW",
"gen_startup_cost",
"gen_shutdown_cost",
"gen_renewable",
],
[
str,
dt_float,
dt_float,
dt_bool,
dt_float,
dt_float,
dt_int,
dt_int,
dt_float,
dt_float,
dt_float,
dt_bool,
],
):
if not isinstance(getattr(cls, el), np.ndarray):
try:
setattr(cls, el, getattr(cls, el).astype(type_))
except Exception as exc_:
raise InvalidRedispatching(
'{} should be convertible to a numpy array with error:\n "{}"'
"".format(el, exc_)
)
if not np.issubdtype(getattr(cls, el).dtype, np.dtype(type_).type):
try:
setattr(cls, el, getattr(cls, el).astype(type_))
except Exception as exc_:
raise InvalidRedispatching(
"{} should be convertible data should be convertible to "
'{} with error: \n"{}"'.format(el, type_, exc_)
)
if np.any(
cls.gen_max_ramp_up[cls.gen_redispatchable]
> cls.gen_pmax[cls.gen_redispatchable]
):
raise InvalidRedispatching(
"Invalid maximum ramp for some generator (above pmax)"
)
@classmethod
def attach_layout(cls, grid_layout):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
We do not recommend to "attach layout" outside of the environment. Please refer to the function
:func:`grid2op.Environment.BaseEnv.attach_layout` for more information.
grid layout is a dictionary with the keys the name of the substations, and the value the tuple of coordinates
of each substations. No check are made it to ensure it is correct.
Parameters
----------
grid_layout: ``dict``
See definition of :attr:`GridObjects.grid_layout` for more information.
"""
cls.grid_layout = grid_layout
@classmethod
def set_env_name(cls, name):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Do not attempt in any case to modify the name of the environment once it has been loaded. If you
do that, you might experience undefined behaviours, notably with the multi processing but not only.
"""
cls.env_name = name
@classmethod
def init_grid(cls, gridobj, force=False, extra_name=None, force_module=None):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is done at the creation of the environment. Use of this class outside of this particular
use is really dangerous and will lead to undefined behaviours. **Do not use this function**.
Initialize this :class:`GridObjects` subclass with a provided class.
It does not perform any check on the validity of the `gridobj` parameters, but it guarantees that if `gridobj`
is a valid grid, then the initialization will lead to a valid grid too.
Parameters
----------
gridobj: :class:`GridObjects`
The representation of the powergrid
force: ``bool``
force the initialization of the class. By default if a class with the same name exists in `globals()`
it does not initialize it. Setting "force=True" will bypass this check and update it accordingly.
"""
# nothing to do now that the value are class member
name_res = "{}_{}".format(cls.__name__, gridobj.env_name)
if gridobj.glop_version != grid2op.__version__:
name_res += f"_{gridobj.glop_version}"
if gridobj._PATH_ENV is not None:
# the configuration equires to initialize the classes from the local environment path
# this might be usefull when using pickle module or multiprocessing on Windows for example
my_class = GridObjects._build_cls_from_import(name_res, gridobj._PATH_ENV)
if my_class is not None:
return my_class
if not gridobj.shunts_data_available:
# if you import env for backend
# with shunt and without shunt, then
# there might be issues
name_res += "_noshunt"
if name_res in globals():
if not force:
# no need to recreate the class, it already exists
return globals()[name_res]
else:
# i recreate the variable
del globals()[name_res]
cls_attr_as_dict = {}
GridObjects._make_cls_dict_extended(gridobj, cls_attr_as_dict, as_list=False)
res_cls = type(name_res, (cls,), cls_attr_as_dict)
if hasattr(cls, "_INIT_GRID_CLS") and cls._INIT_GRID_CLS is not None:
# original class is already from an initialized environment, i keep track of it
res_cls._INIT_GRID_CLS = cls._INIT_GRID_CLS
else:
# i am the original class from grid2op
res_cls._INIT_GRID_CLS = cls
res_cls._compute_pos_big_topo_cls()
res_cls.process_shunt_satic_data()
if res_cls.glop_version != grid2op.__version__:
res_cls.process_grid2op_compat()
if force_module is not None:
res_cls.__module__ = force_module # hack because otherwise it says "abc" which is not the case
# best would be to have a look at https://docs.python.org/3/library/types.html
# store the type created here in the "globals" to prevent the initialization of the same class over and over
globals()[name_res] = res_cls
del res_cls
return globals()[name_res]
@classmethod
def process_grid2op_compat(cls):
"""
This function can be overloaded.
This is called when the class is initialized, with `init_grid` to broadcast grid2op compatibility feature.
"""
if cls.glop_version < "1.6.0":
# this feature did not exist before.
cls.dim_alarms = 0
cls.assistant_warning_type = None
if cls.glop_version < "1.9.1":
# this feature did not exists before
cls.dim_alerts = 0
@classmethod
def get_obj_connect_to(cls, _sentinel=None, substation_id=None):
"""
Get all the object connected to a given substation. This is particularly usefull if you want to know the
names of the generator / load connected to a given substation, or which extremity etc.
Parameters
----------
_sentinel: ``None``
Used to prevent positional parameters. Internal, do not use.
substation_id: ``int``
ID of the substation we want to inspect
Returns
-------
res: ``dict``
A dictionary with keys:
- "loads_id": a vector giving the id of the loads connected to this substation, empty if none
- "generators_id": a vector giving the id of the generators connected to this substation, empty if none
- "lines_or_id": a vector giving the id of the origin side of the powerlines connected to this substation,
empty if none
- "lines_ex_id": a vector giving the id of the extermity side of the powerlines connected to this
substation, empty if none.
- "storages_id": a vector giving the id of the storage units connected at this substation.
- "nb_elements" : number of elements connected to this substation
Examples
--------
.. code-block:: python
import grid2op
env = grid2op.make()
# get the vector representation of an observation:
sub_id = 1
dict_ = env.get_obj_connect_to(substation_id=sub_id)
print("There are {} elements connected to this substation (not counting shunt)".format(
dict_["nb_elements"]))
print("The names of the loads connected to substation {} are: {}".format(
sub_id, env.name_load[dict_["loads_id"]]))
print("The names of the generators connected to substation {} are: {}".format(
sub_id, env.name_gen[dict_["generators_id"]]))
print("The powerline whose origin end is connected to substation {} are: {}".format(
sub_id, env.name_line[dict_["lines_or_id"]]))
print("The powerline whose extremity end is connected to substation {} are: {}".format(
sub_id, env.name_line[dict_["lines_ex_id"]]))
print("The storage units connected to substation {} are: {}".format(
sub_id, env.name_line[dict_["storages_id"]]))
"""
if _sentinel is not None:
raise Grid2OpException(
"get_obj_connect_to should be used only with key-word arguments"
)
if substation_id is None:
raise Grid2OpException(
"You ask the composition of a substation without specifying its id."
'Please provide "substation_id"'
)
if substation_id >= len(cls.sub_info):
raise Grid2OpException(
'There are no substation of id "substation_id={}" in this grid.'
"".format(substation_id)
)
res = {
"loads_id": np.where(cls.load_to_subid == substation_id)[0],
"generators_id": np.where(cls.gen_to_subid == substation_id)[0],
"lines_or_id": np.where(cls.line_or_to_subid == substation_id)[0],
"lines_ex_id": np.where(cls.line_ex_to_subid == substation_id)[0],
"storages_id": np.where(cls.storage_to_subid == substation_id)[0],
"nb_elements": cls.sub_info[substation_id],
}
return res
@classmethod
def get_obj_substations(cls, _sentinel=None, substation_id=None):
"""
Return the object connected as a substation in form of a numpy array instead of a dictionary (as
opposed to :func:`GridObjects.get_obj_connect_to`).
This format is particularly useful for example if you want to know the number of generator connected
to a given substation for example (see section examples).
Parameters
----------
_sentinel: ``None``
Used to prevent positional parameters. Internal, do not use.
substation_id: ``int``
ID of the substation we want to inspect
Returns
-------
res: ``numpy.ndarray``
A matrix with as many rows as the number of element of the substation and 6 columns:
1. column 0: the id of the substation
2. column 1: -1 if this object is not a load, or `LOAD_ID` if this object is a load (see example)
3. column 2: -1 if this object is not a generator, or `GEN_ID` if this object is a generator (see example)
4. column 3: -1 if this object is not the origin end of a line, or `LOR_ID` if this object is the
origin end of a powerline(see example)
5. column 4: -1 if this object is not a extremity end, or `LEX_ID` if this object is the extremity
end of a powerline
6. column 5: -1 if this object is not a storage unit, or `STO_ID` if this object is one
Examples
--------
.. code-block:: python
import numpy as np
import grid2op
env = grid2op.make()
# get the vector representation of an observation:
sub_id = 1
mat = env.get_obj_substations(substation_id=sub_id)
# the first element of the substation is:
mat[0,:]
# array([ 1, -1, -1, -1, 0, -1], dtype=int32)
# we know it's connected to substation 1... no kidding...
# we can also get that:
# 1. this is not a load (-1 at position 1 - so 2nd component)
# 2. this is not a generator (-1 at position 2 - so 3rd component)
# 3. this is not the origin end of a powerline (-1 at position 3)
# 4. this is the extremity end of powerline 0 (there is a 0 at position 4)
# 5. this is not a storage unit (-1 at position 5 - so last component)
# likewise, the second element connected at this substation is:
mat[1,:]
# array([ 1, -1, -1, 2, -1, -1], dtype=int32)
# it represents the origin end of powerline 2
# the 5th element connected at this substation is:
mat[4,:]
# which is equal to array([ 1, -1, 0, -1, -1, -1], dtype=int32)
# so it's represents a generator, and this generator has the id 0
# the 6th element connected at this substation is:
mat[5,:]
# which is equal to array([ 1, 0, -1, -1, -1, -1], dtype=int32)
# so it's represents a generator, and this generator has the id 0
# and, last example, if you want to count the number of generator connected at this
# substation you can
is_gen = mat[:,env.GEN_COL] != -1 # a boolean vector saying ``True`` if the object is a generator
nb_gen_this_substation = np.sum(is_gen)
"""
if _sentinel is not None:
raise Grid2OpException(
"get_obj_substations should be used only with key-word arguments"
)
if substation_id is None:
raise Grid2OpException(
"You ask the composition of a substation without specifying its id."
'Please provide "substation_id"'
)
if substation_id >= len(cls.sub_info):
raise Grid2OpException(
'There are no substation of id "substation_id={}" in this grid.'
"".format(substation_id)
)
dict_ = cls.get_obj_connect_to(substation_id=substation_id)
res = np.full((dict_["nb_elements"], 6), fill_value=-1, dtype=dt_int)
# 0 -> load, 1-> gen, 2 -> lines_or, 3 -> lines_ex
res[:, cls.SUB_COL] = substation_id
res[cls.load_to_sub_pos[dict_["loads_id"]], cls.LOA_COL] = dict_["loads_id"]
res[cls.gen_to_sub_pos[dict_["generators_id"]], cls.GEN_COL] = dict_[
"generators_id"
]
res[cls.line_or_to_sub_pos[dict_["lines_or_id"]], cls.LOR_COL] = dict_[
"lines_or_id"
]
res[cls.line_ex_to_sub_pos[dict_["lines_ex_id"]], cls.LEX_COL] = dict_[
"lines_ex_id"
]
res[cls.storage_to_sub_pos[dict_["storages_id"]], cls.STORAGE_COL] = dict_[
"storages_id"
]
return res
def get_lines_id(self, _sentinel=None, from_=None, to_=None):
"""
Returns the list of all the powerlines id in the backend going from `from_` to `to_`
Parameters
----------
_sentinel: ``None``
Internal, do not use
from_: ``int``
Id the substation to which the origin end of the powerline to look for should be connected to
to_: ``int``
Id the substation to which the extremity end of the powerline to look for should be connected to
Returns
-------
res: ``list``
Id of the powerline looked for.
Raises
------
:class:`grid2op.Exceptions.BackendError` if no match is found.
Examples
--------
It can be used like:
.. code-block:: python
import numpy as np
import grid2op
env = grid2op.make()
l_ids = env.get_lines_id(from_=0, to_=1)
print("The powerlines connecting substation 0 to substation 1 have for ids: {}".format(l_ids))
"""
res = []
if from_ is None:
raise BackendError(
"ObservationSpace.get_lines_id: impossible to look for a powerline with no origin "
'substation. Please modify "from_" parameter'
)
if to_ is None:
raise BackendError(
"ObservationSpace.get_lines_id: impossible to look for a powerline with no extremity "
'substation. Please modify "to_" parameter'
)
for i, (ori, ext) in enumerate(
zip(self.line_or_to_subid, self.line_ex_to_subid)
):
if ori == from_ and ext == to_:
res.append(i)
if not res: # res is empty here
raise BackendError(
"ObservationSpace.get_line_id: impossible to find a powerline with connected at "
"origin at {} and extremity at {}".format(from_, to_)
)
return res
def get_generators_id(self, sub_id):
"""
Returns the list of all generators id in the backend connected to the substation sub_id
Parameters
----------
sub_id: ``int``
The substation to which we look for the generator
Returns
-------
res: ``list``
Id of the generators looked for.
Raises
------
:class:`grid2op.Exceptions.BackendError` if no match is found.
Examples
--------
It can be used like:
.. code-block:: python
import numpy as np
import grid2op
env = grid2op.make()
g_ids = env.get_generators_id(sub_id=1)
print("The generators connected to substation 1 have for ids: {}".format(g_ids))
"""
res = []
if sub_id is None:
raise BackendError(
"GridObjects.get_generators_id: impossible to look for a generator not connected to any substation. "
'Please modify "sub_id" parameter'
)
for i, s_id_gen in enumerate(self.gen_to_subid):
if s_id_gen == sub_id:
res.append(i)
if not res: # res is empty here
raise BackendError(
"GridObjects.get_generators_id: impossible to find a generator connected at "
"substation {}".format(sub_id)
)
return res
def get_loads_id(self, sub_id):
"""
Returns the list of all loads id in the backend connected to the substation sub_id
Parameters
----------
sub_id: ``int``
The substation to which we look for the generator
Returns
-------
res: ``list``
Id of the loads looked for.
Raises
------
:class:`grid2op.Exceptions.BackendError` if no match found.
Examples
--------
It can be used like:
.. code-block:: python
import numpy as np
import grid2op
env = grid2op.make()
c_ids = env.get_loads_id(sub_id=1)
print("The loads connected to substation 1 have for ids: {}".format(c_ids))
"""
res = []
if sub_id is None:
raise BackendError(
"GridObjects.get_loads_id: impossible to look for a load not connected to any substation. "
'Please modify "sub_id" parameter'
)
for i, s_id_gen in enumerate(self.load_to_subid):
if s_id_gen == sub_id:
res.append(i)
if not res: # res is empty here
raise BackendError(
"GridObjects.get_loads_id: impossible to find a load connected at substation {}".format(
sub_id
)
)
return res
def get_storages_id(self, sub_id):
"""
Returns the list of all storages element (battery or damp) id in the grid connected to the substation sub_id
Parameters
----------
sub_id: ``int``
The substation to which we look for the storage unit
Returns
-------
res: ``list``
Id of the storage elements looked for.
Raises
------
:class:`grid2op.Exceptions.BackendError` if no match found.
Examples
--------
It can be used like:
.. code-block:: python
import numpy as np
import grid2op
env = grid2op.make()
sto_ids = env.get_storages_id(sub_id=1)
print("The loads connected to substation 1 have for ids: {}".format(c_ids))
"""
res = []
if sub_id is None:
raise BackendError(
"GridObjects.get_storages_id: impossible to look for a load not connected to any substation. "
'Please modify "sub_id" parameter'
)
for i, s_id_gen in enumerate(self.storage_to_subid):
if s_id_gen == sub_id:
res.append(i)
if not res: # res is empty here
raise BackendError(
"GridObjects.bd: impossible to find a storage unit connected at substation {}".format(
sub_id
)
)
return res
@staticmethod
def _make_cls_dict(cls, res, as_list=True, copy_=True):
"""NB: `cls` can be here a class or an object of a class..."""
save_to_dict(res, cls, "glop_version", str, copy_)
res["_PATH_ENV"] = cls._PATH_ENV # i do that manually for more control
save_to_dict(
res,
cls,
"name_gen",
(lambda arr: [str(el) for el in arr]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"name_load",
(lambda li: [str(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"name_line",
(lambda li: [str(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"name_sub",
(lambda li: [str(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"name_storage",
(lambda li: [str(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(res, cls, "env_name", str, copy_)
save_to_dict(
res,
cls,
"sub_info",
(lambda li: [int(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"load_to_subid",
(lambda li: [int(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"gen_to_subid",
(lambda li: [int(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"line_or_to_subid",
(lambda li: [int(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"line_ex_to_subid",
(lambda li: [int(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"storage_to_subid",
(lambda li: [int(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"load_to_sub_pos",
(lambda li: [int(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"gen_to_sub_pos",
(lambda li: [int(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"line_or_to_sub_pos",
(lambda li: [int(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"line_ex_to_sub_pos",
(lambda li: [int(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"storage_to_sub_pos",
(lambda li: [int(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"load_pos_topo_vect",
(lambda li: [int(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"gen_pos_topo_vect",
(lambda li: [int(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"line_or_pos_topo_vect",
(lambda li: [int(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"line_ex_pos_topo_vect",
(lambda li: [int(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"storage_pos_topo_vect",
(lambda li: [int(el) for el in li]) if as_list else None,
copy_,
)
# redispatching
if cls.redispatching_unit_commitment_availble:
for nm_attr, type_attr in zip(cls._li_attr_disp, cls._type_attr_disp):
save_to_dict(
res,
cls,
nm_attr,
(lambda li: [type_attr(el) for el in li]) if as_list else None,
copy_,
)
else:
for nm_attr in cls._li_attr_disp:
res[nm_attr] = None
# shunts
if cls.grid_layout is not None:
save_to_dict(
res,
cls,
"grid_layout",
(lambda gl: {str(k): [float(x), float(y)] for k, (x, y) in gl.items()})
if as_list
else None,
copy_,
)
else:
res["grid_layout"] = None
# shunts
if cls.shunts_data_available:
save_to_dict(
res,
cls,
"name_shunt",
(lambda li: [str(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"shunt_to_subid",
(lambda li: [int(el) for el in li]) if as_list else None,
copy_,
)
else:
res["name_shunt"] = None
res["shunt_to_subid"] = None
# storage data
save_to_dict(
res,
cls,
"storage_type",
(lambda li: [str(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"storage_Emax",
(lambda li: [float(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"storage_Emin",
(lambda li: [float(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"storage_max_p_prod",
(lambda li: [float(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"storage_max_p_absorb",
(lambda li: [float(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"storage_marginal_cost",
(lambda li: [float(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"storage_loss",
(lambda li: [float(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"storage_charging_efficiency",
(lambda li: [float(el) for el in li]) if as_list else None,
copy_,
)
save_to_dict(
res,
cls,
"storage_discharging_efficiency",
(lambda li: [float(el) for el in li]) if as_list else None,
copy_,
)
# alert or alarm
if cls.assistant_warning_type is not None:
res["assistant_warning_type"] = str(cls.assistant_warning_type)
else:
res["assistant_warning_type"] = None
# area for the alarm feature
res["dim_alarms"] = cls.dim_alarms
save_to_dict(
res, cls, "alarms_area_names", (lambda li: [str(el) for el in li]), copy_
)
save_to_dict(
res,
cls,
"alarms_lines_area",
(
lambda dict_: {
str(l_nm): [str(ar_nm) for ar_nm in areas]
for l_nm, areas in dict_.items()
}
),
copy_,
)
save_to_dict(
res,
cls,
"alarms_area_lines",
(lambda lili: [[str(l_nm) for l_nm in lines] for lines in lili]),
copy_,
)
# number of line alert for the alert feature
res['dim_alerts'] = cls.dim_alerts
# save alert line names to dict
save_to_dict(
res, cls, "alertable_line_names", (lambda li: [str(el) for el in li]) if as_list else None, copy_
)
save_to_dict(
res, cls, "alertable_line_ids", (lambda li: [int(el) for el in li]) if as_list else None, copy_
)
return res
@staticmethod
def _make_cls_dict_extended(cls, res, as_list=True, copy_=True):
"""add the n_gen and all in the class created"""
GridObjects._make_cls_dict(cls, res, as_list=as_list, copy_=copy_)
res["n_gen"] = cls.n_gen
res["n_load"] = cls.n_load
res["n_line"] = cls.n_line
res["n_sub"] = cls.n_sub
res["dim_topo"] = 1 * cls.dim_topo
# shunt
res["n_shunt"] = cls.n_shunt
res["shunts_data_available"] = cls.shunts_data_available
# storage
res["n_storage"] = cls.n_storage
# redispatching / curtailment
res[
"redispatching_unit_commitment_availble"
] = cls.redispatching_unit_commitment_availble
@classmethod
def cls_to_dict(cls):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is used internally only to save action_space or observation_space for example. Do not
attempt to use it in a different context.
Convert the object as a dictionary.
Note that unless this method is overridden, a call to it will only output the
Returns
-------
res: ``dict``
The representation of the object as a dictionary that can be json serializable.
"""
res = {}
GridObjects._make_cls_dict(cls, res)
return res
@staticmethod
def from_dict(dict_):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is used internally only to restore action_space or observation_space if they
have been saved by `to_dict`. Do not
attempt to use it in a different context.
Create a valid GridObject (or one of its derived class if this method is overide) from a dictionnary (usually
read from a json file)
Parameters
----------
dict_: ``dict``
The representation of the GridObject as a dictionary.
Returns
-------
res: :class:`GridObject`
The object of the proper class that were initially represented as a dictionary.
"""
# TODO refacto that with the "type(blablabla, blabla, blabal)" syntax !
class res(GridObjects):
pass
cls = res
if "glop_version" in dict_:
cls.glop_version = dict_["glop_version"]
else:
cls.glop_version = cls.BEFORE_COMPAT_VERSION
if "_PATH_ENV" in dict_:
cls._PATH_ENV = str(dict_["_PATH_ENV"])
else:
cls._PATH_ENV = None
cls.name_gen = extract_from_dict(
dict_, "name_gen", lambda x: np.array(x).astype(str)
)
cls.name_load = extract_from_dict(
dict_, "name_load", lambda x: np.array(x).astype(str)
)
cls.name_line = extract_from_dict(
dict_, "name_line", lambda x: np.array(x).astype(str)
)
cls.name_sub = extract_from_dict(
dict_, "name_sub", lambda x: np.array(x).astype(str)
)
if "env_name" in dict_:
# new saved in version >= 1.3.0
cls.env_name = str(dict_["env_name"])
else:
# environment name was not stored, this make the task to retrieve this impossible
pass
cls.sub_info = extract_from_dict(
dict_, "sub_info", lambda x: np.array(x).astype(dt_int)
)
cls.load_to_subid = extract_from_dict(
dict_, "load_to_subid", lambda x: np.array(x).astype(dt_int)
)
cls.gen_to_subid = extract_from_dict(
dict_, "gen_to_subid", lambda x: np.array(x).astype(dt_int)
)
cls.line_or_to_subid = extract_from_dict(
dict_, "line_or_to_subid", lambda x: np.array(x).astype(dt_int)
)
cls.line_ex_to_subid = extract_from_dict(
dict_, "line_ex_to_subid", lambda x: np.array(x).astype(dt_int)
)
cls.load_to_sub_pos = extract_from_dict(
dict_, "load_to_sub_pos", lambda x: np.array(x).astype(dt_int)
)
cls.gen_to_sub_pos = extract_from_dict(
dict_, "gen_to_sub_pos", lambda x: np.array(x).astype(dt_int)
)
cls.line_or_to_sub_pos = extract_from_dict(
dict_, "line_or_to_sub_pos", lambda x: np.array(x).astype(dt_int)
)
cls.line_ex_to_sub_pos = extract_from_dict(
dict_, "line_ex_to_sub_pos", lambda x: np.array(x).astype(dt_int)
)
cls.load_pos_topo_vect = extract_from_dict(
dict_, "load_pos_topo_vect", lambda x: np.array(x).astype(dt_int)
)
cls.gen_pos_topo_vect = extract_from_dict(
dict_, "gen_pos_topo_vect", lambda x: np.array(x).astype(dt_int)
)
cls.line_or_pos_topo_vect = extract_from_dict(
dict_, "line_or_pos_topo_vect", lambda x: np.array(x).astype(dt_int)
)
cls.line_ex_pos_topo_vect = extract_from_dict(
dict_, "line_ex_pos_topo_vect", lambda x: np.array(x).astype(dt_int)
)
cls.n_gen = len(cls.name_gen)
cls.n_load = len(cls.name_load)
cls.n_line = len(cls.name_line)
cls.n_sub = len(cls.name_sub)
cls.dim_topo = np.sum(cls.sub_info)
if dict_["gen_type"] is None:
cls.redispatching_unit_commitment_availble = False
# and no need to make anything else, because everything is already initialized at None
else:
cls.redispatching_unit_commitment_availble = True
type_attr_disp = [
str,
dt_float,
dt_float,
dt_bool,
dt_float,
dt_float,
dt_int,
dt_int,
dt_float,
dt_float,
dt_float,
dt_bool,
]
# small "hack" here for the "gen_renewable" attribute, used for curtailment, that
# is coded in grid2op >= 1.5 only
if "gen_renewable" not in dict_:
# before grid2op 1.4 it was not possible to make the difference between a renewable generator
# and a non dispatchable one. Though no environment have this property yet, it is
# possible to do it.
dict_["gen_renewable"] = [not el for el in dict_["gen_redispatchable"]]
for nm_attr, type_attr in zip(cls._li_attr_disp, type_attr_disp):
setattr(
cls,
nm_attr,
extract_from_dict(
dict_, nm_attr, lambda x: np.array(x).astype(type_attr)
),
)
cls.grid_layout = extract_from_dict(dict_, "grid_layout", lambda x: x)
cls.name_shunt = extract_from_dict(dict_, "name_shunt", lambda x: x)
if cls.name_shunt is not None:
cls.shunts_data_available = True
cls.n_shunt = len(cls.name_shunt)
cls.name_shunt = np.array(cls.name_shunt).astype(str)
cls.shunt_to_subid = extract_from_dict(
dict_, "shunt_to_subid", lambda x: np.array(x).astype(dt_int)
)
if "name_storage" in dict_:
# this is for backward compatibility with logs coming from grid2op <= 1.5
# where storage unit did not exist.
cls.name_storage = extract_from_dict(
dict_, "name_storage", lambda x: np.array(x).astype(str)
)
cls.storage_to_subid = extract_from_dict(
dict_, "storage_to_subid", lambda x: np.array(x).astype(dt_int)
)
cls.storage_to_sub_pos = extract_from_dict(
dict_, "storage_to_sub_pos", lambda x: np.array(x).astype(dt_int)
)
cls.storage_pos_topo_vect = extract_from_dict(
dict_, "storage_pos_topo_vect", lambda x: np.array(x).astype(dt_int)
)
cls.n_storage = len(cls.name_storage)
# storage static data
extract_from_dict(dict_, "storage_type", lambda x: np.array(x).astype(str))
extract_from_dict(
dict_, "storage_Emax", lambda x: np.array(x).astype(dt_float)
)
extract_from_dict(
dict_, "storage_Emin", lambda x: np.array(x).astype(dt_float)
)
extract_from_dict(
dict_, "storage_max_p_prod", lambda x: np.array(x).astype(dt_float)
)
extract_from_dict(
dict_, "storage_max_p_absorb", lambda x: np.array(x).astype(dt_float)
)
extract_from_dict(
dict_, "storage_marginal_cost", lambda x: np.array(x).astype(dt_float)
)
extract_from_dict(
dict_, "storage_loss", lambda x: np.array(x).astype(dt_float)
)
extract_from_dict(
dict_,
"storage_charging_efficiency",
lambda x: np.array(x).astype(dt_float),
)
extract_from_dict(
dict_,
"storage_discharging_efficiency",
lambda x: np.array(x).astype(dt_float),
)
else:
# backward compatibility: no storage were supported
cls.set_no_storage()
cls.process_shunt_satic_data()
if cls.glop_version != grid2op.__version__:
# change name of the environment, this is done in Environment.py for regular environment
# see `self.backend.set_env_name(f"{self.name}_{self._compat_glop_version}")`
# cls.set_env_name(f"{cls.env_name}_{cls.glop_version}")
# and now post process the class attributes for that
cls.process_grid2op_compat()
if "assistant_warning_type" in dict_:
cls.assistant_warning_type = dict_["assistant_warning_type"]
else:
cls.assistant_warning_type = None
# alarm information
if "dim_alarms" in dict_:
# NB by default the constructor do as if there were no alarm so that's great !
cls.dim_alarms = dict_["dim_alarms"]
cls.alarms_area_names = copy.deepcopy(dict_["alarms_area_names"])
cls.alarms_lines_area = copy.deepcopy(dict_["alarms_lines_area"])
cls.alarms_area_lines = copy.deepcopy(dict_["alarms_area_lines"])
# alert information
if "dim_alerts" in dict_:
# NB by default the constructor do as if there were no alert so that's great !
cls.dim_alerts = dict_["dim_alerts"]
cls.alertable_line_names = extract_from_dict(
dict_, "alertable_line_names", lambda x: np.array(x).astype(str)
)
cls.alertable_line_ids = extract_from_dict(
dict_, "alertable_line_ids", lambda x: np.array(x).astype(dt_int)
)
# retrieve the redundant information that are not stored (for efficiency)
obj_ = cls()
obj_._compute_pos_big_topo_cls()
cls = cls.init_grid(obj_, force=True)
return cls()
@classmethod
def process_shunt_satic_data(cls):
"""remove possible shunts data from the classes, if shunts are deactivated"""
pass
@classmethod
def set_no_storage(cls):
"""
this function is used to set all necessary parameters when the grid do not contain any storage element.
Returns
-------
"""
GridObjects.deactivate_storage(cls)
@staticmethod
def deactivate_storage(obj):
obj.n_storage = 0
obj.name_storage = np.array([], dtype=str)
obj.storage_to_subid = np.array([], dtype=dt_int)
obj.storage_pos_topo_vect = np.array([], dtype=dt_int)
obj.storage_to_sub_pos = np.array([], dtype=dt_int)
obj.storage_type = np.array([], dtype=str)
obj.storage_Emax = np.array([], dtype=dt_float)
obj.storage_Emin = np.array([], dtype=dt_float)
obj.storage_max_p_prod = np.array([], dtype=dt_float)
obj.storage_max_p_absorb = np.array([], dtype=dt_float)
obj.storage_marginal_cost = np.array([], dtype=dt_float)
obj.storage_loss = np.array([], dtype=dt_float)
obj.storage_charging_efficiency = np.array([], dtype=dt_float)
obj.storage_discharging_efficiency = np.array([], dtype=dt_float)
@classmethod
def same_grid_class(cls, other_cls) -> bool:
"""
return whether the two classes have the same grid
Notes
------
Two environments can have different name, but representing the same grid. This is why this function
is agnostic to the "env_name" class attribute.
In order for two grid to be equal, they must have everything in common, including the presence /
absence of shunts or storage units for example.
"""
if cls.env_name == other_cls.env_name:
# speed optimization here: if the two classes are from the same environment
# they are from the same grid !
return True
# this implementation is 6 times faster than the "cls_to_dict" one below, so i kept it
me_dict = {}
GridObjects._make_cls_dict_extended(cls, me_dict, as_list=False, copy_=False)
other_cls_dict = {}
GridObjects._make_cls_dict_extended(
other_cls, other_cls_dict, as_list=False, copy_=False
)
if me_dict.keys() - other_cls_dict.keys():
# one key is in me but not in other
return False
if other_cls_dict.keys() - me_dict.keys():
# one key is in other but not in me
return False
for attr_nm in me_dict.keys():
if attr_nm == "env_name":
continue
if attr_nm.startswith("__") and attr_nm.endswith("__"):
continue
if not np.array_equal(getattr(cls, attr_nm), getattr(other_cls, attr_nm)):
return False
return True
@staticmethod
def _build_cls_from_import(name_cls, path_env):
import sys
import os
import importlib
my_class = None
if path_env is None:
return None
if not os.path.exists(path_env):
return None
if not os.path.isdir(path_env):
return None
if not os.path.exists(os.path.join(path_env, "_grid2op_classes")):
return None
sys.path.append(path_env)
try:
module = importlib.import_module("_grid2op_classes")
if hasattr(module, name_cls):
my_class = getattr(module, name_cls)
except (ModuleNotFoundError, ImportError) as exc_:
# normal behaviour i don't do anything there
# TODO explain why
pass
return my_class
@staticmethod
def init_grid_from_dict_for_pickle(name_res, orig_cls, cls_attr):
"""
This function is used internally for pickle to build the classes of the
objects instead of loading them from the module (which is impossible as
most classes are defined on the fly in grid2op)
It is expected to create an object of the correct type. This object will then be
"filled" with the proper content automatically by python, because i provided the "state" of the
object in the __reduce__ method.
"""
res_cls = None
if "_PATH_ENV" in cls_attr and cls_attr["_PATH_ENV"] is not None:
res_cls = GridObjects._build_cls_from_import(
name_res, cls_attr["_PATH_ENV"]
)
# check if the class already exists, if so returns it
if res_cls is not None:
# i recreate the class from local import
pass
elif name_res in globals():
# no need to recreate the class, it already exists
res_cls = globals()[name_res]
else:
# define properly the class, as it is not found
res_cls = type(name_res, (orig_cls,), cls_attr)
res_cls._INIT_GRID_CLS = orig_cls # don't forget to remember the base class
# if hasattr(res_cls, "n_sub") and res_cls.n_sub > 0:
# that's a grid2op class iniailized with an environment, I need to initialize it too
res_cls._compute_pos_big_topo_cls()
if res_cls.glop_version != grid2op.__version__:
res_cls.process_grid2op_compat()
res_cls.process_shunt_satic_data()
# add the class in the "globals" for reuse later
globals()[name_res] = res_cls
# now create an "empty" object (using new)
res = res_cls.__new__(res_cls)
return res
# used for pickle and for deep copy
def __reduce__(self):
"""
It here to avoid issue with pickle.
But the problem is that it's also used by deepcopy... So its implementation is used a lot
"""
# TODO this is not really a convenient use of that i'm sure !
# Try to see if it can be better
cls_attr_as_dict = {}
GridObjects._make_cls_dict_extended(type(self), cls_attr_as_dict, as_list=False)
if hasattr(self, "__getstate__"):
my_state = self.__getstate__()
else:
my_state = {}
for k, v in self.__dict__.items():
my_state[k] = v # copy.copy(v)
my_cls = type(self)
if hasattr(my_cls, "_INIT_GRID_CLS"):
# I am a type created when an environment is loaded
base_cls = my_cls._INIT_GRID_CLS
else:
# i am a "raw" type directly coming from grid2op
base_cls = my_cls
return (
GridObjects.init_grid_from_dict_for_pickle,
(type(self).__name__, base_cls, cls_attr_as_dict),
my_state,
)
@classmethod
def local_bus_to_global(cls, local_bus, to_sub_id):
"""This function translate "local bus" whose id are in a substation, to "global bus id" whose
id are consistent for the whole grid.
Be carefull, when using this function, you might end up with deactivated bus: *eg* if you have an element on bus
with global id 1 and another on bus with global id 42 you might not have any element on bus with
global id 41 or 40 or 39 or etc.
.. note::
Typically, "local bus" are numbered 1 or 2. They represent the id of the busbar to which the element
is connected IN its substation.
On the other hand, the "global bus" are numberd, 0, 1, 2, 3, ..., 2 * self.n_sub. They represent some kind of
"universal" labelling of the busbars of all the grid. For example, substation 0 might have busbar `0` and `self.n_sub`,
substation 1 have busbar `1` and `self.n_sub + 1` etc.
[on_bus_1]
Local and global bus id represents the same thing. The difference comes down to convention.
"""
global_bus = (1 * local_bus).astype(dt_int) # make a copy
on_bus_1 = global_bus == 1
on_bus_2 = global_bus == 2
global_bus[on_bus_1] = to_sub_id[on_bus_1]
global_bus[on_bus_2] = to_sub_id[on_bus_2] + cls.n_sub
return global_bus
@classmethod
def local_bus_to_global_int(cls, local_bus, to_sub_id):
"""This function translate "local bus" whose id are in a substation, to "global bus id" whose
id are consistent for the whole grid.
Be carefull, when using this function, you might end up with deactivated bus: *eg* if you have an element on bus
with global id 1 and another on bus with global id 42 you might not have any element on bus with
global id 41 or 40 or 39 or etc.
.. note::
Typically, "local bus" are numbered 1 or 2. They represent the id of the busbar to which the element
is connected IN its substation.
On the other hand, the "global bus" are numberd, 0, 1, 2, 3, ..., 2 * self.n_sub. They represent some kind of
"universal" labelling of the busbars of all the grid. For example, substation 0 might have busbar `0` and `self.n_sub`,
substation 1 have busbar `1` and `self.n_sub + 1` etc.
Local and global bus id represents the same thing. The difference comes down to convention.
.. note::
This is the "non vectorized" version that applies only on integers.
"""
if local_bus == 1:
return to_sub_id
elif local_bus == 2:
return to_sub_id + cls.n_sub
return -1
@classmethod
def global_bus_to_local(cls, global_bus, to_sub_id):
"""This function translate "local bus" whose id are in a substation, to "global bus id" whose
id are consistent for the whole grid.
Be carefull, when using this function, you might end up with deactivated bus: *eg* if you have an element on bus
with global id 1 and another on bus with global id 42 you might not have any element on bus with
global id 41 or 40 or 39 or etc.
.. note::
Typically, "local bus" are numbered 1 or 2. They represent the id of the busbar to which the element
is connected IN its substation.
On the other hand, the "global bus" are numberd, 0, 1, 2, 3, ..., 2 * self.n_sub. They represent some kind of
"universal" labelling of the busbars of all the grid. For example, substation 0 might have busbar `0` and `self.n_sub`,
substation 1 have busbar `1` and `self.n_sub + 1` etc.
Local and global bus id represents the same thing. The difference comes down to convention.
"""
res = (1 * global_bus).astype(dt_int) # make a copy
res[global_bus < cls.n_sub] = 1
res[global_bus >= cls.n_sub] = 2
res[global_bus == -1] = -1
return res
@classmethod
def global_bus_to_local_int(cls, global_bus, to_sub_id):
"""This function translate "local bus" whose id are in a substation, to "global bus id" whose
id are consistent for the whole grid.
Be carefull, when using this function, you might end up with deactivated bus: *eg* if you have an element on bus
with global id 1 and another on bus with global id 42 you might not have any element on bus with
global id 41 or 40 or 39 or etc.
.. note::
Typically, "local bus" are numbered 1 or 2. They represent the id of the busbar to which the element
is connected IN its substation.
On the other hand, the "global bus" are numberd, 0, 1, 2, 3, ..., 2 * self.n_sub. They represent some kind of
"universal" labelling of the busbars of all the grid. For example, substation 0 might have busbar `0` and `self.n_sub`,
substation 1 have busbar `1` and `self.n_sub + 1` etc.
Local and global bus id represents the same thing. The difference comes down to convention.
"""
if global_bus == -1:
return -1
if global_bus < cls.n_sub:
return 1
if global_bus >= cls.n_sub:
return 2
return -1
@staticmethod
def _format_int_vect_to_cls_str(int_vect):
int_vect_str = "None"
if int_vect is not None:
int_vect_str = ",".join([f"{el}" for el in int_vect])
int_vect_str = f"np.array([{int_vect_str}], dtype=dt_int)"
return int_vect_str
@staticmethod
def _format_float_vect_to_cls_str(float_vect):
float_vect_str = "None"
if float_vect is not None:
float_vect_str = ",".join([f"{el}" for el in float_vect])
float_vect_str = f"np.array([{float_vect_str}], dtype=dt_float)"
return float_vect_str
@staticmethod
def _format_bool_vect_to_cls_str(bool_vect):
bool_vect_str = "None"
if bool_vect is not None:
bool_vect_str = ",".join(["True" if el else "False" for el in bool_vect])
bool_vect_str = f"np.array([{bool_vect_str}], dtype=dt_bool)"
return bool_vect_str
@classmethod
def _get_full_cls_str(cls):
_PATH_ENV_str = "None" if cls._PATH_ENV is None else f'"{cls._PATH_ENV}"'
attr_list_vect_str = None
attr_list_set_str = "{}"
if cls.attr_list_vect is not None:
attr_list_vect_str = f"{cls.attr_list_vect}"
attr_list_set_str = "set(attr_list_vect)"
attr_list_json_str = None
if cls.attr_list_json is not None:
attr_list_json_str = f"{cls.attr_list_json}"
attr_nan_list_set_str = "{}"
if cls.attr_nan_list_set is not None:
tmp_ = ",".join([f'"{el}"' for el in sorted(cls.attr_nan_list_set)])
attr_nan_list_set_str = f"set([{tmp_}])"
name_load_str = ",".join([f'"{el}"' for el in cls.name_load])
name_gen_str = ",".join([f'"{el}"' for el in cls.name_gen])
name_line_str = ",".join([f'"{el}"' for el in cls.name_line])
name_storage_str = ",".join([f'"{el}"' for el in cls.name_storage])
name_sub_str = ",".join([f'"{el}"' for el in cls.name_sub])
sub_info_str = GridObjects._format_int_vect_to_cls_str(cls.sub_info)
load_to_subid_str = GridObjects._format_int_vect_to_cls_str(cls.load_to_subid)
gen_to_subid_str = GridObjects._format_int_vect_to_cls_str(cls.gen_to_subid)
line_or_to_subid_str = GridObjects._format_int_vect_to_cls_str(
cls.line_or_to_subid
)
line_ex_to_subid_str = GridObjects._format_int_vect_to_cls_str(
cls.line_ex_to_subid
)
storage_to_subid_str = GridObjects._format_int_vect_to_cls_str(
cls.storage_to_subid
)
# which index has this element in the substation vector
load_to_sub_pos_str = GridObjects._format_int_vect_to_cls_str(
cls.load_to_sub_pos
)
gen_to_sub_pos_str = GridObjects._format_int_vect_to_cls_str(cls.gen_to_sub_pos)
line_or_to_sub_pos_str = GridObjects._format_int_vect_to_cls_str(
cls.line_or_to_sub_pos
)
line_ex_to_sub_pos_str = GridObjects._format_int_vect_to_cls_str(
cls.line_ex_to_sub_pos
)
storage_to_sub_pos_str = GridObjects._format_int_vect_to_cls_str(
cls.storage_to_sub_pos
)
# which index has this element in the topology vector
load_pos_topo_vect_str = GridObjects._format_int_vect_to_cls_str(
cls.load_pos_topo_vect
)
gen_pos_topo_vect_str = GridObjects._format_int_vect_to_cls_str(
cls.gen_pos_topo_vect
)
line_or_pos_topo_vect_str = GridObjects._format_int_vect_to_cls_str(
cls.line_or_pos_topo_vect
)
line_ex_pos_topo_vect_str = GridObjects._format_int_vect_to_cls_str(
cls.line_ex_pos_topo_vect
)
storage_pos_topo_vect_str = GridObjects._format_int_vect_to_cls_str(
cls.storage_pos_topo_vect
)
def format_el_int(values):
return ",".join([f"{el}" for el in values])
tmp_tmp_ = [format_el_int(el) for el in cls.grid_objects_types]
tmp_ = ",".join([f"[{el}]" for el in tmp_tmp_])
grid_objects_types_str = f"np.array([{tmp_}], " f"dtype=dt_int)"
_topo_vect_to_sub_str = GridObjects._format_int_vect_to_cls_str(
cls._topo_vect_to_sub
)
_vectorized_str = "None"
gen_type_str = (
",".join([f'"{el}"' for el in cls.gen_type])
if cls.redispatching_unit_commitment_availble
else "None"
)
gen_pmin_str = GridObjects._format_float_vect_to_cls_str(cls.gen_pmin)
gen_pmax_str = GridObjects._format_float_vect_to_cls_str(cls.gen_pmax)
gen_redispatchable_str = GridObjects._format_bool_vect_to_cls_str(
cls.gen_redispatchable
)
gen_max_ramp_up_str = GridObjects._format_float_vect_to_cls_str(
cls.gen_max_ramp_up
)
gen_max_ramp_down_str = GridObjects._format_float_vect_to_cls_str(
cls.gen_max_ramp_down
)
gen_min_uptime_str = GridObjects._format_int_vect_to_cls_str(cls.gen_min_uptime)
gen_min_downtime_str = GridObjects._format_int_vect_to_cls_str(
cls.gen_min_downtime
)
gen_cost_per_MW_str = GridObjects._format_float_vect_to_cls_str(
cls.gen_cost_per_MW
)
gen_startup_cost_str = GridObjects._format_float_vect_to_cls_str(
cls.gen_startup_cost
)
gen_shutdown_cost_str = GridObjects._format_float_vect_to_cls_str(
cls.gen_shutdown_cost
)
gen_renewable_str = GridObjects._format_bool_vect_to_cls_str(cls.gen_renewable)
storage_type_str = ",".join([f'"{el}"' for el in cls.storage_type])
storage_Emax_str = GridObjects._format_float_vect_to_cls_str(cls.storage_Emax)
storage_Emin_str = GridObjects._format_float_vect_to_cls_str(cls.storage_Emin)
storage_max_p_prod_str = GridObjects._format_float_vect_to_cls_str(
cls.storage_max_p_prod
)
storage_max_p_absorb_str = GridObjects._format_float_vect_to_cls_str(
cls.storage_max_p_absorb
)
storage_marginal_cost_str = GridObjects._format_float_vect_to_cls_str(
cls.storage_marginal_cost
)
storage_loss_str = GridObjects._format_float_vect_to_cls_str(cls.storage_loss)
storage_charging_efficiency_str = GridObjects._format_float_vect_to_cls_str(
cls.storage_charging_efficiency
)
storage_discharging_efficiency_str = GridObjects._format_float_vect_to_cls_str(
cls.storage_discharging_efficiency
)
def format_el(values):
return ",".join([f'"{el}"' for el in values])
tmp_tmp_ = [f'"{k}": [{format_el(v)}]' for k, v in cls.grid_layout.items()]
tmp_ = ",".join(tmp_tmp_)
grid_layout_str = f"{{{tmp_}}}"
name_shunt_str = ",".join([f'"{el}"' for el in cls.name_shunt])
shunt_to_subid_str = GridObjects._format_int_vect_to_cls_str(cls.shunt_to_subid)
assistant_warning_type_str = (None if cls.assistant_warning_type is None
else f'"{cls.assistant_warning_type}"')
alarms_area_names_str = (
"[]"
if cls.dim_alarms == 0
else ",".join([f'"{el}"' for el in cls.alarms_area_names])
)
tmp_tmp_ = ",".join(
[f'"{k}": [{format_el(v)}]' for k, v in cls.alarms_lines_area.items()]
)
tmp_ = f"{{{tmp_tmp_}}}"
alarms_lines_area_str = "{}" if cls.dim_alarms == 0 else tmp_
tmp_tmp_ = ",".join([f"[{format_el(el)}]" for el in cls.alarms_area_lines])
tmp_ = f"[{tmp_tmp_}]"
alarms_area_lines_str = "[]" if cls.dim_alarms == 0 else tmp_
tmp_tmp_ = ",".join([f"\"{el}\"" for el in cls.alertable_line_names])
tmp_ = f"[{tmp_tmp_}]"
alertable_line_names_str = '[]' if cls.dim_alerts == 0 else tmp_
tmp_tmp_ = ",".join([f"{el}" for el in cls.alertable_line_ids])
tmp_ = f"[{tmp_tmp_}]"
alertable_line_ids_str = '[]' if cls.dim_alerts == 0 else tmp_
res = f"""# Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
# THIS FILE HAS BEEN AUTOMATICALLY GENERATED BY "gridobject._get_full_cls_str()"
# WE DO NOT RECOMMEND TO ALTER IT IN ANY WAY
import numpy as np
import grid2op
from grid2op.dtypes import dt_int, dt_float, dt_bool
from {cls._INIT_GRID_CLS.__module__} import {cls._INIT_GRID_CLS.__name__}
class {cls.__name__}({cls._INIT_GRID_CLS.__name__}):
BEFORE_COMPAT_VERSION = \"{cls.BEFORE_COMPAT_VERSION}\"
glop_version = grid2op.__version__ # tells it's the installed grid2op version
_PATH_ENV = {_PATH_ENV_str}
_INIT_GRID_CLS = {cls._INIT_GRID_CLS.__name__}
SUB_COL = 0
LOA_COL = 1
GEN_COL = 2
LOR_COL = 3
LEX_COL = 4
STORAGE_COL = 5
attr_list_vect = {attr_list_vect_str}
attr_list_set = {attr_list_set_str}
attr_list_json = {attr_list_json_str}
attr_nan_list_set = {attr_nan_list_set_str}
# name of the objects
env_name = "{cls.env_name}"
name_load = np.array([{name_load_str}])
name_gen = np.array([{name_gen_str}])
name_line = np.array([{name_line_str}])
name_sub = np.array([{name_sub_str}])
name_storage = np.array([{name_storage_str}])
n_gen = {cls.n_gen}
n_load = {cls.n_load}
n_line = {cls.n_line}
n_sub = {cls.n_sub}
n_storage = {cls.n_storage}
sub_info = {sub_info_str}
dim_topo = {cls.dim_topo}
# to which substation is connected each element
load_to_subid = {load_to_subid_str}
gen_to_subid = {gen_to_subid_str}
line_or_to_subid = {line_or_to_subid_str}
line_ex_to_subid = {line_ex_to_subid_str}
storage_to_subid = {storage_to_subid_str}
# which index has this element in the substation vector
load_to_sub_pos = {load_to_sub_pos_str}
gen_to_sub_pos = {gen_to_sub_pos_str}
line_or_to_sub_pos = {line_or_to_sub_pos_str}
line_ex_to_sub_pos = {line_ex_to_sub_pos_str}
storage_to_sub_pos = {storage_to_sub_pos_str}
# which index has this element in the topology vector
load_pos_topo_vect = {load_pos_topo_vect_str}
gen_pos_topo_vect = {gen_pos_topo_vect_str}
line_or_pos_topo_vect = {line_or_pos_topo_vect_str}
line_ex_pos_topo_vect = {line_ex_pos_topo_vect_str}
storage_pos_topo_vect = {storage_pos_topo_vect_str}
# "convenient" way to retrieve information of the grid
grid_objects_types = {grid_objects_types_str}
# to which substation each element of the topovect is connected
_topo_vect_to_sub = {_topo_vect_to_sub_str}
# list of attribute to convert it from/to a vector
_vectorized = {_vectorized_str}
# for redispatching / unit commitment
_li_attr_disp = ["gen_type", "gen_pmin", "gen_pmax", "gen_redispatchable", "gen_max_ramp_up",
"gen_max_ramp_down", "gen_min_uptime", "gen_min_downtime", "gen_cost_per_MW",
"gen_startup_cost", "gen_shutdown_cost", "gen_renewable"]
_type_attr_disp = [str, float, float, bool, float, float, int, int, float, float, float, bool]
# redispatch data, not available in all environment
redispatching_unit_commitment_availble = {"True" if cls.redispatching_unit_commitment_availble else "False"}
gen_type = np.array([{gen_type_str}])
gen_pmin = {gen_pmin_str}
gen_pmax = {gen_pmax_str}
gen_redispatchable = {gen_redispatchable_str}
gen_max_ramp_up = {gen_max_ramp_up_str}
gen_max_ramp_down = {gen_max_ramp_down_str}
gen_min_uptime = {gen_min_uptime_str}
gen_min_downtime = {gen_min_downtime_str}
gen_cost_per_MW = {gen_cost_per_MW_str} # marginal cost (in currency / (power.step) and not in $/(MW.h) it would be $ / (MW.5mins) )
gen_startup_cost = {gen_startup_cost_str} # start cost (in currency)
gen_shutdown_cost = {gen_shutdown_cost_str} # shutdown cost (in currency)
gen_renewable = {gen_renewable_str}
# storage unit static data
storage_type = np.array([{storage_type_str}])
storage_Emax = {storage_Emax_str}
storage_Emin = {storage_Emin_str}
storage_max_p_prod = {storage_max_p_prod_str}
storage_max_p_absorb = {storage_max_p_absorb_str}
storage_marginal_cost = {storage_marginal_cost_str}
storage_loss = {storage_loss_str}
storage_charging_efficiency = {storage_charging_efficiency_str}
storage_discharging_efficiency = {storage_discharging_efficiency_str}
# grid layout
grid_layout = {grid_layout_str}
# shunt data, not available in every backend
shunts_data_available = {"True" if cls.redispatching_unit_commitment_availble else "False"}
n_shunt = {cls.n_shunt}
name_shunt = np.array([{name_shunt_str}])
shunt_to_subid = {shunt_to_subid_str}
# alarm / alert
assistant_warning_type = {assistant_warning_type_str}
# alarm feature
# dimension of the alarm "space" (number of alarm that can be raised at each step)
dim_alarms = {cls.dim_alarms}
alarms_area_names = {alarms_area_names_str}
alarms_lines_area = {alarms_lines_area_str}
alarms_area_lines = {alarms_area_lines_str}
# alert feature
dim_alert = {cls.dim_alerts}
alertable_line_names = {alertable_line_names_str}
alertable_line_ids = {alertable_line_ids_str}
"""
return res
| 179,077 | 40.347957 | 137 | py |
Grid2Op | Grid2Op-master/grid2op/Space/RandomObject.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
import numpy as np
from typing import Optional
class RandomObject(object):
"""
Utility class to deal with randomness in some aspect of the game (chronics, action_space, observation_space for
examples.
Attributes
----------
space_prng: ``numpy.random.RandomState``
The random state of the observation (in case of non deterministic observations or BaseAction.
This should not be used at the
moment)
seed_used: ``int``
The seed used throughout the episode in case of non deterministic observations or action.
Notes
-----
In order to be reproducible, and to make proper use of the
:func:`BaseAgent.seed` capabilities, you must absolutely NOT use the `random` python module (which will not
be seeded) nor the `np.random` module and avoid any other "sources" of pseudo random numbers.
You can adapt your code the following way. Instead of using `np.random` use `self.space_prng`.
For example, if you wanted to write
`np.random.randint(1,5)` replace it by `self.space_prng.randint(1,5)`. It is the same for `np.random.normal()`
that is
replaced by `self.space_prng.normal()`.
You have an example of such usage in :func:`RandomAgent.my_act`.
If you really need other sources of randomness (for example if you use tensorflow or torch) we strongly
recommend you to overload the :func:`BaseAgent.seed` accordingly so that the neural networks are always initialized
in the same order using the same weights.
Examples
---------
If you don't use any :class:`grid2op.Runner.Runner` we recommend using this method twice:
1. to set the seed of the :class:`grid2op.Environment.Environment`
2. to set the seed of your :class:`grid2op.Agent.BaseAgent`
.. code-block:: python
import grid2op
from grid2op.Agent import RandomAgent # or any other agent of course. It might also be a custom you developed
# create the environment
env = grid2op.make()
agent = RandomAgent(env.action_space)
# and now set the seed
env_seed = 42
agent_seed = 12345
env.seed(env_seed)
agent.seed(agent_seed)
# continue your experiments
If you are using a :class:`grid2op.Runner.Runner` we recommend using the "env_seeds" and "agent_seeds" when
calling the function :func:`grid2op.Runner.Runner.run` like this:
.. code-block:: python
import grid2op
import numpy as np
from grid2op.dtypes import dt_int
from grid2op.Agent import RandomAgent # or any other agent of course. It might also be a custom you developed
from grid2op.Runner import Runner
np.random.seed(42) # or any other seed of course :-)
# create the environment
env = grid2op.make()
# NB setting a seed in this environment will have absolutely no effect on the runner
# and now set the seed
runner = Runner(**env.get_params_for_runner(), agentClass=RandomAgent)
# and now start your experiments
nb_episode = 2
maximum_int_poss = np.iinfo(dt_int).max # this will be the maximum integer your computer can represent
res = runner.run(nb_episode=nb_episode,
# generate the seeds for the agent
agent_seeds=[np.random.randint(0, maximum_int_poss) for _ in range(nb_episode)],
# generate the seeds for the environment
env_seeds=[np.random.randint(0, maximum_int_poss) for _ in range(nb_episode)]
)
# NB for fully reproducible expriment you have to have called "np.random.seed" before using this method.
"""
def __init__(self):
self.space_prng : np.random.RandomState = np.random.RandomState()
self.seed_used : Optional[int] = None
def seed(self, seed):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
We do not recommend to use this function outside of the two examples given in the description of this class.
Set the seed of the source of pseudo random number used for this RandomObject.
Parameters
----------
seed: ``int``
The seed to be set.
Returns
-------
res: ``tuple``
The associated tuple of seeds used. Tuples are returned because in some cases, multiple objects are seeded
with the same call to :func:`RandomObject.seed`
"""
self.seed_used = seed
if self.seed_used is not None:
# in this case i have specific seed set. So i force the seed to be deterministic.
self.space_prng.seed(seed=self.seed_used)
return (self.seed_used,)
def _custom_deepcopy_for_copy(self, new_obj):
# RandomObject
new_obj.space_prng = copy.deepcopy(self.space_prng)
new_obj.seed_used = copy.deepcopy(self.seed_used)
| 5,503 | 38.035461 | 120 | py |
Grid2Op | Grid2Op-master/grid2op/Space/SerializableSpace.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import os
import re
import json
import copy
from grid2op.Exceptions import Grid2OpException
from grid2op.Space.space_utils import extract_from_dict, save_to_dict
from grid2op.Space.GridObjects import GridObjects
from grid2op.Space.RandomObject import RandomObject
class SerializableSpace(GridObjects, RandomObject):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is a higher level wrapper that allows to avoid code duplicates for
the action_space and observation_space. None of the methods here should be
used outside of `env.action_space` or `env.observation_space`
This class allows to serialize / de serialize the action space or observation space.
It should not be used inside an Environment, as some functions of the action might not be compatible with
the serialization, especially the checking of whether or not an BaseAction is legal or not.
Attributes
----------
subtype: ``type``
Type use to build the template object :attr:`SerializableSpace.template_obj`. This type should derive
from :class:`grid2op.BaseAction.BaseAction` or :class:`grid2op.BaseObservation.BaseObservation`.
_template_obj: :class:`grid2op.GridObjects`
An instance of the "*subtype*" provided used to provide higher level utilities, such as the size of the
action (see :func:`grid2op.BaseAction.BaseAction.size`) or to sample a new BaseAction
(see :func:`grid2op.BaseAction.BaseAction.sample`) for example.
n: ``int``
Size of the space
shape: ``numpy.ndarray``, dtype:int
Shape of each of the component of the Object if represented in a flat vector. An instance that derives from a
GridObject (for example :class:`grid2op.BaseAction.BaseAction` or :class:`grid2op.BaseObservation.BaseObservation`) can be
thought of as being concatenation of independant spaces. This vector gives the dimension of all the basic
spaces they are made of.
dtype: ``numpy.ndarray``, dtype:int
Data type of each of the component of the Object if represented in a flat vector. An instance that derives from
a GridObject (for example :class:`grid2op.BaseAction.BaseAction` or :class:`grid2op.BaseObservation.BaseObservation`) can be
thought of as being concatenation of independant spaces. This vector gives the type of all the basic
spaces they are made of.
"""
def __init__(self, gridobj, subtype=object, _init_grid=True):
"""
subtype: ``type``
Type of action used to build :attr:`SerializableActionSpace._template_act`. This type should derive
from :class:`grid2op.BaseAction.BaseAction` or :class:`grid2op.BaseObservation.BaseObservation` .
_init_grid: ``bool``
Whether or not to call 'init_grid' in the subtype (to initialize the class). Do not modify unless you
are certain of what you want to do
"""
if not isinstance(subtype, type):
raise Grid2OpException(
'Parameter "subtype" used to build the Space should be a type (a class) and not an object '
'(an instance of a class). It is currently "{}"'.format(type(subtype))
)
GridObjects.__init__(self)
RandomObject.__init__(self)
self._init_subtype = subtype # do not use, use to save restore only !!!
if _init_grid:
self.subtype = subtype.init_grid(gridobj)
from grid2op.Action import (
BaseAction,
) # lazy loading to prevent circular reference
if issubclass(self.subtype, BaseAction):
# add the shunt data if needed by the action only
self.subtype._add_shunt_data()
# compute the class attribute "attr_list_set" from "attr_list_vect"
self.subtype._update_value_set()
else:
self.subtype = subtype
from grid2op.Action import BaseAction # lazy import to avoid circular reference
from grid2op.Observation import (
BaseObservation,
) # lazy import to avoid circular reference
if not issubclass(subtype, (BaseAction, BaseObservation)):
raise RuntimeError(
f'"subtype" should inherit either BaseAction or BaseObservation. Currently it '
f'is "{subtype}"'
)
self._template_obj = self.subtype()
self.n = self._template_obj.size()
self.global_vars = None
self.shape = self._template_obj.shape()
self.dtype = self._template_obj.dtype()
self.attr_list_vect = copy.deepcopy(self._template_obj.attr_list_vect)
self._to_extract_vect = {} # key: attr name, value: tuple: (beg_, end_, dtype)
beg_ = 0
end_ = 0
for attr, size, dtype_ in zip(self.attr_list_vect, self.shape, self.dtype):
end_ += size
self._to_extract_vect[attr] = (beg_, end_, dtype_)
beg_ += size
def _custom_deepcopy_for_copy(self, new_obj):
RandomObject._custom_deepcopy_for_copy(self, new_obj)
# SerializableSpace
new_obj._init_subtype = self._init_subtype # const too
new_obj.subtype = self.subtype
new_obj._template_obj = self._template_obj.copy()
new_obj.n = self.n
new_obj.global_vars = copy.deepcopy(self.global_vars)
new_obj.shape = copy.deepcopy(self.shape)
new_obj.dtype = copy.deepcopy(self.dtype)
new_obj.attr_list_vect = copy.deepcopy(self.attr_list_vect)
new_obj._to_extract_vect = copy.deepcopy(self._to_extract_vect)
@staticmethod
def from_dict(dict_):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is used internally only to restore action_space or observation_space if they
have been saved by `to_dict`. Do not
attempt to use it in a different context.
Allows the de-serialization of an object stored as a dictionary (for example in the case of json saving).
Parameters
----------
dict_: ``dict``
Representation of an BaseObservation Space (aka :class:`grid2op.BaseObservation.ObservartionHelper`)
or the BaseAction Space (aka :class:`grid2op.BaseAction.ActionSpace`)
as a dictionary.
Returns
-------
res: :class:`SerializableSpace`
An instance of an SerializableSpace matching the dictionary.
"""
if isinstance(dict_, str):
path = dict_
if not os.path.exists(path):
raise Grid2OpException(
'Unable to find the file "{}" to load the ObservationSpace'.format(
path
)
)
with open(path, "r", encoding="utf-8") as f:
dict_ = json.load(fp=f)
gridobj = GridObjects.from_dict(dict_)
actionClass_str = extract_from_dict(dict_, "_init_subtype", str)
actionClass_li = actionClass_str.split(".")
if actionClass_li[-1] in globals():
subtype = globals()[actionClass_li[-1]]
else:
try:
exec(
"from {} import {}".format(
".".join(actionClass_li[:-1]), actionClass_li[-1]
)
)
except ModuleNotFoundError as exc_:
# prior to grid2op 1.6.5 the Observation module was grid2op.Observation.completeObservation.CompleteObservation
# after its grid2op.Observation.completeObservation.CompleteObservation
# so I try here to make the python file lower case in order to import
# the class correctly
if len(actionClass_li) > 2:
test_str = actionClass_li[2]
actionClass_li[2] = test_str[0].lower() + test_str[1:]
exec(
"from {} import {}".format(
".".join(actionClass_li[:-1]), actionClass_li[-1]
)
)
else:
raise exc_
# TODO make something better and recursive here
try:
subtype = eval(actionClass_li[-1])
except NameError:
if len(actionClass_li) > 1:
try:
subtype = eval(".".join(actionClass_li[1:]))
except Exception as exc_:
msg_err_ = (
'Impossible to find the module "{}" to load back the space (ERROR 1). '
'Try "from {} import {}"'
)
raise Grid2OpException(
msg_err_.format(
actionClass_str,
".".join(actionClass_li[:-1]),
actionClass_li[-1],
)
)
else:
msg_err_ = (
'Impossible to find the module "{}" to load back the space (ERROR 2). '
'Try "from {} import {}"'
)
raise Grid2OpException(
msg_err_.format(
actionClass_str,
".".join(actionClass_li[:-1]),
actionClass_li[-1],
)
)
except AttributeError:
try:
subtype = eval(actionClass_li[-1])
except Exception as exc_:
if len(actionClass_li) > 1:
msg_err_ = (
'Impossible to find the class named "{}" to load back the space (ERROR 3)'
"(module is found but not the class in it) Please import it via "
'"from {} import {}".'
)
msg_err_ = msg_err_.format(
actionClass_str,
".".join(actionClass_li[:-1]),
actionClass_li[-1],
)
else:
msg_err_ = (
'Impossible to import the class named "{}" to load back the space (ERROR 4) '
"(the module is found but not the class in it)"
)
msg_err_ = msg_err_.format(actionClass_str)
raise Grid2OpException(msg_err_)
# create the proper SerializableSpace class for this environment
CLS = SerializableSpace.init_grid(gridobj)
res = CLS(gridobj=gridobj, subtype=subtype, _init_grid=True)
return res
def cls_to_dict(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is used internally only to save action_space or observation_space for example. Do not
attempt to use it in a different context.
Serialize this object as a dictionary.
Returns
-------
res: ``dict``
A dictionary representing this object content. It can be loaded back with
:func:`SerializableObservationSpace.cls_from_dict`
"""
# TODO this is super weird that this is a regular method, but inherit from a class method !
res = super().cls_to_dict()
save_to_dict(
res,
self,
"_init_subtype",
lambda x: re.sub(
"(<class ')|(\\.init_grid\\.<locals>\\.res)|('>)", "", "{}".format(x)
),
)
return res
def size(self):
"""
The size of any action converted to vector.
Returns
-------
n: ``int``
The size of the action space.
Examples
--------
See :func:`GridObjects.size` for more information.
"""
return self.n
def from_vect(self, obj_as_vect, check_legit=True):
"""
Convert an space (action space or observation space),
represented as a vector to a valid :class:`BaseAction` instance. It works the
same way for observations.
Parameters
----------
obj_as_vect: ``numpy.ndarray``
A object living in a space represented as a vector (typically an :class:`grid2op.BaseAction.BaseAction` or an
:class:`grid2op.BaseObservation.BaseObservation` represented as a numpy vector)
Returns
-------
res: :class:`grid2op.Action.Action` or :class:`grid2op.Observation.Observation`
The corresponding action (or observation) as an object (and not as a vector). The return type is given
by the type of :attr:`SerializableSpace._template_obj`
Examples
--------
See :func:`GridObjects.from_vect` for more information.
"""
res = copy.deepcopy(self._template_obj)
res.from_vect(obj_as_vect, check_legit=check_legit)
return res
def extract_from_vect(self, obj_as_vect, attr_name):
"""
This method allows you to extract only a given part of the observation / action if this one
is represented as a vector.
Parameters
----------
obj_as_vect: ``numpy.ndarray``
the object (action or observation) represented as a vector.
attr_name: ``str``
the name of the attribute you want to extract from the object
Returns
-------
res: ``numpy.ndarray``
The value of the attribute with name `attr_name`
Examples
---------
We detail only the process for the observation, but it works the same way for the action too.
.. code-block:: python
import numpy as np
import grid2op
env = grid2op.make()
# get the vector representation of an observation:
obs = env.reset()
obs_as_vect = obs.to_vect()
# and now you can extract for example the load
load_p = env.observation_space.extract_from_vect(obs_as_vect, "load_p")
assert np.all(load_p == obs.load_p)
# and this should assert to True
"""
beg_, end_, dtype = self.get_indx_extract(attr_name)
res = obj_as_vect[beg_:end_].astype(dtype)
return res
def get_indx_extract(self, attr_name):
"""
Retrieve the type, the beginning and the end of a given attribute in the action or observation
once it is represented as vector.
[advanced usage] This is particularly useful to avoid parsing of all the observation / action when you want only
to extract a subset of them (see example)
Parameters
----------
attr_name: ``str``
The name of the attribute you want to extract information from
Returns
-------
beg_: ``int``
The first component of the vector that concerns the attribute
end_: ``int``
The las component of the vector that concerns the attribute
dtype:
The type of the attribute
Examples
--------
This is an "advanced" function used to accelerate the study of an agent. Supposes you have an environment
and you want to compute a runner from it. Then you want to have a quick look at the "relative flows" that
this agent provides:
.. code-block:: python
import grid2op
import os
import numpy as np
from grid2op.Runner import Runner
from grid2op.Episode import EpisodeData
################
# INTRO
# create a runner
env = grid2op.make()
# see the documentation of the Runner if you want to change the agent.
# in this case it will be "do nothing"
runner = Runner(**env.get_params_for_runner())
# execute it a given number of chronics
nb_episode = 2
path_save = "i_saved_the_runner_here"
res = runner.run(nb_episode=nb_episode, path_save=path_save)
# END INTRO
##################
# now let's load only the flows for each of the computed episode
li_episode = EpisodeData.list_episode(path_save) # retrieve the list of where each episode is stored
beg_, end_, dtype = env.observation_space.get_indx_extract("rho")
observation_space_name = "observations.npz"
for full_episode_path, episode_name in li_episode:
all_obs = np.load(os.path.join(full_episode_path, observation_space_name))["data"]
# and you use the function like this:
all_flows = all_obs[:, beg_:end_].astype(dtype)
# you can now do something with the computed flows
# each row will be a time step, each column a powerline
# you can have "nan" if the episode "game over" before the end.
"""
if attr_name not in self._to_extract_vect:
raise Grid2OpException(
'Attribute "{}" is not found in the object of type "{}".'
"".format(attr_name, self.subtype)
)
res = self._to_extract_vect[attr_name]
return res
| 18,212 | 39.11674 | 132 | py |
Grid2Op | Grid2Op-master/grid2op/Space/__init__.py | __all__ = ["RandomObject", "SerializableSpace", "GridObjects"]
from grid2op.Space.RandomObject import RandomObject
from grid2op.Space.SerializableSpace import SerializableSpace
from grid2op.Space.GridObjects import GridObjects
| 228 | 37.166667 | 62 | py |
Grid2Op | Grid2Op-master/grid2op/Space/space_utils.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
from grid2op.Exceptions import Grid2OpException
# i already issued the warning for the "some substations have no controllable elements"
_WARNING_ISSUED_FOR_SUB_NO_ELEM = False
# this global variable is not const ! It is modified in GridObjects.py
def extract_from_dict(dict_, key, converter):
if key not in dict_:
raise Grid2OpException(
'Impossible to find key "{}" while loading the dictionary.'.format(key)
)
try:
res = converter(dict_[key])
except Exception as exc_:
raise Grid2OpException(
'Impossible to convert "{}" into class {} with exception '
'\n"{}"'.format(key, converter, exc_)
)
return res
def save_to_dict(res_dict, me, key, converter, copy_=True):
"""
Parameters
----------
res_dict:
output dictionary
me:
the object to serialize in a dict
key:
the attribute of the object we want to save
converter:
if the attribute need to be converted (for example if you later want to serialize the dictionary as json)
copy_:
whether you copy the attribute or not (only applies if converter is None)
Returns
-------
"""
if not hasattr(me, key):
raise Grid2OpException(
'Impossible to find key "{}" while loading the dictionary.'.format(key)
)
try:
if converter is not None:
res = converter(getattr(me, key))
else:
if copy_:
res = copy.deepcopy(getattr(me, key))
else:
res = getattr(me, key)
except Exception as exc_:
raise Grid2OpException(
'Impossible to convert "{}" into class {} with exception '
'\n"{}"'.format(key, converter, exc_)
)
if key in res_dict:
msg_err_ = (
'Key "{}" is already present in the result dictionary. This would override it'
" and is not supported."
)
raise Grid2OpException(msg_err_.format(key))
res_dict[key] = res
| 2,528 | 31.844156 | 113 | py |
Grid2Op | Grid2Op-master/grid2op/VoltageControler/BaseVoltageController.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from abc import ABC, abstractmethod
import numpy as np
import copy
from grid2op.dtypes import dt_int
from grid2op.Action import VoltageOnlyAction, ActionSpace
from grid2op.Rules import AlwaysLegal
from grid2op.Space import RandomObject
class BaseVoltageController(RandomObject, ABC):
"""
This class is the most basic controler for the voltages. Basically, what it does is read the voltages from the
chronics.
If the voltages are not on the chronics (missing files), it will not change the voltage setpoints at all.
"""
def __init__(self, gridobj, controler_backend, actionSpace_cls):
"""
Parameters
----------
gridobj: :class:`grid2op.Space.Gridobject`
Structure of the powergrid
controler_backend: :class:`grid2op.Backend.Backend`
An instanciated backend to perform some computation on a powergrid, before taking some actions.
"""
RandomObject.__init__(self)
legal_act = AlwaysLegal()
self._actionSpace_cls = actionSpace_cls
self.action_space = actionSpace_cls(
gridobj=gridobj, actionClass=VoltageOnlyAction, legal_action=legal_act
)
def _custom_deepcopy_for_copy(self, new_obj):
RandomObject._custom_deepcopy_for_copy(self, new_obj)
new_obj._actionSpace_cls = self._actionSpace_cls
legal_act = AlwaysLegal()
new_obj.action_space = new_obj._actionSpace_cls(
gridobj=self._actionSpace_cls,
actionClass=VoltageOnlyAction,
legal_action=legal_act,
)
def copy(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Make a (deep) copy of this instance.
"""
res = type(self).__new__(type(self))
self._custom_deepcopy_for_copy(res)
return res
def attach_layout(self, grid_layout):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
"""
self.action_space.attach_layout(grid_layout)
def seed(self, seed):
"""
Used to seed the voltage controler class
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
"""
me_seed = super().seed(seed)
max_int = np.iinfo(dt_int).max
seed_space = self.space_prng.randint(max_int)
space_seed = self.action_space.seed(seed_space)
return me_seed, space_seed
@abstractmethod
def fix_voltage(self, observation, agent_action, env_action, prod_v_chronics):
"""
This method must be overloaded to change the behaviour of the generator setpoint for time t+1.
This simple class will:
- do nothing if the vector `prod_v_chronics` is None
- set the generator setpoint to the value in prod_v_chronics
Basically, this class is pretty fast, but does nothing interesting, beside looking at the data in files.
More general class can use, to adapt the voltage setpoint:
- `observation` the observation (receive by the agent) at time t
- `agent_action` the action of the agent at time t
- `env_action` the modification of the environment at time t, that will be observed by the agent at time
t+1
- `prod_v_chronics` the new setpoint of the generators present in the data (if any, this can be None)
To help this class, a :class:`grid2op.Backend.Backend` is available and can be used to perform simulation of
potential impact of voltages setpoints.
Parameters
----------
observation: :class:`grid2op.Observation.Observation`
The last observation (at time t)
agent_action: :class:`grid2op.Action.Action`
The action that the agent took
env_action: :class:`grid2op.Action.Action`
The modification that the environment will take.
prod_v_chronics: ``numpy.ndarray``
The next voltage setpoint present in the data (if any) or ``None`` if not.
Returns
-------
res: :class:`grid2op.Action.Action`
The new setpoint, in this case depending only on the prod_v_chronics.
"""
pass
def close(self):
"""If you require some "backend" to control the voltages, then you need to implement this
(and also some specific things for the copy) to have it working correctly
"""
pass
| 4,999 | 34.971223 | 116 | py |
Grid2Op | Grid2Op-master/grid2op/VoltageControler/ControlVoltageFromFile.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.VoltageControler.BaseVoltageController import BaseVoltageController
class ControlVoltageFromFile(BaseVoltageController):
"""
This class is the most basic controler for the voltages.
Basically, what it does is read the voltages from the
chronics.
If the voltages are not on the chronics (missing files), it will not change the voltage setpoint at all.
"""
def __init__(self, gridobj, controler_backend, actionSpace_cls):
"""
Parameters
----------
gridobj: :class:`grid2op.Space.GridObjects`
Structure of the powergrid
controler_backend: :class:`grid2op.Backend.Backend`
An instanciated backend to perform some computation on a powergrid, before taking some actions.
"""
BaseVoltageController.__init__(
self,
gridobj=gridobj,
controler_backend=controler_backend,
actionSpace_cls=actionSpace_cls,
)
def fix_voltage(self, observation, agent_action, env_action, prod_v_chronics):
"""
This method must be overloaded to change the behaviour of the generator setpoint for time t+1.
This simple class will:
- do nothing if the vector `prod_v_chronics` is None
- set the generator setpoint to the value in prod_v_chronics
Basically, this class is pretty fast, but does nothing interesting, beside looking at the data in files.
More general class can use, to adapt the voltage setpoint:
- `observation` the observation (receive by the agent) at time t
- `agent_action` the action of the agent at time t
- `env_action` the modification of the environment at time t, that will be observed by the agent at time
t+1
- `prod_v_chronics` the new setpoint of the generators present in the data (if any, this can be None)
To help this class, a :class:`grid2op.Backend.Backend` is available and can be used to perform simulation of
potential impact of voltages setpoints.
Parameters
----------
observation: :class:`grid2op.Observation.Observation`
The last observation (at time t)
agent_action: :class:`grid2op.Action.Action`
The action that the agent took
env_action: :class:`grid2op.Action.Action`
The modification that the environment will take.
prod_v_chronics: ``numpy.ndarray``
The next voltage setpoint present in the data (if any) or ``None`` if not.
Returns
-------
res: :class:`grid2op.Action.Action`
The new setpoint, in this case depending only on the prod_v_chronics.
"""
# TODO add a "reward" and "done" for RL voltage controler
if prod_v_chronics is not None:
res = self.action_space({"injection": {"prod_v": prod_v_chronics}})
else:
res = self.action_space()
return res
| 3,441 | 37.674157 | 116 | py |
Grid2Op | Grid2Op-master/grid2op/VoltageControler/__init__.py | __all__ = ["BaseVoltageController", "ControlVoltageFromFile"]
from grid2op.VoltageControler.BaseVoltageController import BaseVoltageController
from grid2op.VoltageControler.ControlVoltageFromFile import ControlVoltageFromFile
| 227 | 44.6 | 82 | py |
Grid2Op | Grid2Op-master/grid2op/data/blank/config.py | from grid2op.Action import TopologyAction
from grid2op.Reward import L2RPNReward
from grid2op.Rules import AlwaysLegal
from grid2op.Chronics import ChangeNothing
from grid2op.Backend import PandaPowerBackend
config = {
"backend": PandaPowerBackend,
"action_class": TopologyAction,
"observation_class": None,
"reward_class": L2RPNReward,
"gamerules_class": AlwaysLegal,
"chronics_class": ChangeNothing,
"grid_value_class": None,
"volagecontroler_class": None,
"thermal_limits": None,
"names_chronics_to_grid": None,
}
| 558 | 28.421053 | 45 | py |
Grid2Op | Grid2Op-master/grid2op/data/educ_case14_redisp/config.py | from grid2op.Action import PowerlineChangeAndDispatchAction
from grid2op.Reward import L2RPNReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecasts
from grid2op.Backend import PandaPowerBackend
config = {
"backend": PandaPowerBackend,
"action_class": PowerlineChangeAndDispatchAction,
"observation_class": None,
"reward_class": L2RPNReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecasts,
"volagecontroler_class": None,
"thermal_limits": [
541.0,
450.0,
375.0,
636.0,
175.0,
285.0,
335.0,
657.0,
496.0,
827.0,
442.0,
641.0,
840.0,
156.0,
664.0,
235.0,
119.0,
179.0,
1986.0,
1572.0,
],
"names_chronics_to_grid": None,
}
| 983 | 23 | 59 | py |
Grid2Op | Grid2Op-master/grid2op/data/educ_case14_storage/config.py | from grid2op.Action import PowerlineChangeDispatchAndStorageAction
from grid2op.Reward import L2RPNReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecasts
from grid2op.Backend import PandaPowerBackend
config = {
"backend": PandaPowerBackend,
"action_class": PowerlineChangeDispatchAndStorageAction,
"observation_class": None,
"reward_class": L2RPNReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecasts,
"volagecontroler_class": None,
"thermal_limits": [
541.0,
450.0,
375.0,
636.0,
175.0,
285.0,
335.0,
657.0,
496.0,
827.0,
442.0,
641.0,
840.0,
156.0,
664.0,
235.0,
119.0,
179.0,
1986.0,
1572.0,
],
"names_chronics_to_grid": None,
}
| 997 | 23.341463 | 66 | py |
Grid2Op | Grid2Op-master/grid2op/data/l2rpn_2019/config.py | from grid2op.Action import TopologyAction
from grid2op.Reward import L2RPNReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import ReadPypowNetData
from grid2op.Backend import PandaPowerBackend
config = {
"backend": PandaPowerBackend,
"action_class": TopologyAction,
"observation_class": None,
"reward_class": L2RPNReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": ReadPypowNetData,
"volagecontroler_class": None,
"thermal_limits": None,
"names_chronics_to_grid": {
"loads": {
"2_C-10.61": "load_1_0",
"3_C151.15": "load_2_1",
"14_C63.6": "load_13_10",
"4_C-9.47": "load_3_2",
"5_C201.84": "load_4_3",
"6_C-6.27": "load_5_4",
"9_C130.49": "load_8_5",
"10_C228.66": "load_9_6",
"11_C-138.89": "load_10_7",
"12_C-27.88": "load_11_8",
"13_C-13.33": "load_12_9",
},
"lines": {
"1_2_1": "0_1_0",
"1_5_2": "0_4_1",
"9_10_16": "8_9_16",
"9_14_17": "8_13_15",
"10_11_18": "9_10_17",
"12_13_19": "11_12_18",
"13_14_20": "12_13_19",
"2_3_3": "1_2_2",
"2_4_4": "1_3_3",
"2_5_5": "1_4_4",
"3_4_6": "2_3_5",
"4_5_7": "3_4_6",
"6_11_11": "5_10_12",
"6_12_12": "5_11_11",
"6_13_13": "5_12_10",
"4_7_8": "3_6_7",
"4_9_9": "3_8_8",
"5_6_10": "4_5_9",
"7_8_14": "6_7_13",
"7_9_15": "6_8_14",
},
"prods": {
"1_G137.1": "gen_0_4",
"3_G36.31": "gen_1_0",
"6_G63.29": "gen_2_1",
"2_G-56.47": "gen_5_2",
"8_G40.43": "gen_7_3",
},
},
}
| 1,938 | 29.777778 | 45 | py |
Grid2Op | Grid2Op-master/grid2op/data/l2rpn_case14_sandbox/config.py | from grid2op.Action import PlayableAction
from grid2op.Reward import RedispReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecasts
from grid2op.Backend import PandaPowerBackend
config = {
"backend": PandaPowerBackend,
"action_class": PlayableAction,
"observation_class": None,
"reward_class": RedispReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecasts,
"volagecontroler_class": None,
"thermal_limits": [
541.0,
450.0,
375.0,
636.0,
175.0,
285.0,
335.0,
657.0,
496.0,
827.0,
442.0,
641.0,
840.0,
156.0,
664.0,
235.0,
119.0,
179.0,
1986.0,
1572.0,
],
"names_chronics_to_grid": None,
}
| 949 | 22.170732 | 59 | py |
Grid2Op | Grid2Op-master/grid2op/data/l2rpn_case14_sandbox_diff_grid/change_grid_params.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import pandapower as pp
import copy
import numpy as np
real_case = pp.from_json("grid.json")
sim_case = copy.deepcopy(real_case)
np.random.seed(42)
noise = 0.05
# change powerlines
sim_case.line["r_ohm_per_km"] *= np.random.lognormal(0., noise)
sim_case.line["x_ohm_per_km"] *= np.random.lognormal(0., noise)
# TODO do I change trafo ?
pp.runpp(sim_case)
pp.runpp(real_case)
assert sim_case.converged
assert sim_case.res_line.shape[0] == sim_case.line.shape[0]
print(f"L1 error on p: {np.mean(np.abs(sim_case.res_line['p_from_mw'] - real_case.res_line['p_from_mw'])):.2f}MW")
print(f"L1 error on q: {np.mean(np.abs(sim_case.res_line['q_from_mvar'] - real_case.res_line['q_from_mvar'])):.2f}MVAr")
pp.to_json(sim_case, "grid_forecast.json")
| 1,208 | 36.78125 | 120 | py |
Grid2Op | Grid2Op-master/grid2op/data/l2rpn_case14_sandbox_diff_grid/config.py | from grid2op.Action import PlayableAction
from grid2op.Reward import RedispReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecasts
from grid2op.Backend import PandaPowerBackend
config = {
"backend": PandaPowerBackend,
"action_class": PlayableAction,
"observation_class": None,
"reward_class": RedispReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecasts,
"volagecontroler_class": None,
"thermal_limits": [
541.0,
450.0,
375.0,
636.0,
175.0,
285.0,
335.0,
657.0,
496.0,
827.0,
442.0,
641.0,
840.0,
156.0,
664.0,
235.0,
119.0,
179.0,
1986.0,
1572.0,
],
"names_chronics_to_grid": None
}
| 948 | 22.146341 | 59 | py |
Grid2Op | Grid2Op-master/grid2op/data/l2rpn_icaps_2021/config.py | from grid2op.Action import PowerlineSetAction
from grid2op.Reward import AlarmReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecasts
from grid2op.Backend import PandaPowerBackend
from grid2op.Opponent import GeometricOpponent, BaseActionBudget
from grid2op.operator_attention import LinearAttentionBudget
try:
from grid2op.l2rpn_utils import ActionICAPS2021, ObservationICAPS2021
except ImportError:
from grid2op.Action import PlayableAction
from grid2op.Observation import CompleteObservation
import warnings
warnings.warn("The grid2op version you are trying to use is too old for this environment. Please upgrade it.")
ActionICAPS2021 = PlayableAction
ObservationICAPS2021 = CompleteObservation
lines_attacked = [
"62_58_180",
"62_63_160",
"48_50_136",
"48_53_141",
"41_48_131",
"39_41_121",
"43_44_125",
"44_45_126",
"34_35_110",
"54_58_154",
]
opponent_attack_cooldown = 12 # 1 hour, 1 hour being 12 time steps
opponent_attack_duration = 96 # 8 hours at maximum
opponent_budget_per_ts = (
0.17 # opponent_attack_duration / opponent_attack_cooldown + epsilon
)
opponent_init_budget = 144.0 # no need to attack straightfully, it can attack starting at midday the first day
config = {
"backend": PandaPowerBackend,
"action_class": ActionICAPS2021,
"observation_class": ObservationICAPS2021,
"reward_class": AlarmReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecasts,
"volagecontroler_class": None,
"names_chronics_to_grid": None,
"thermal_limits": [
60.9,
231.9,
272.6,
212.8,
749.2,
332.4,
348.0,
414.4,
310.1,
371.4,
401.2,
124.3,
298.5,
86.4,
213.9,
160.8,
112.2,
291.4,
489.0,
489.0,
124.6,
196.7,
191.9,
238.4,
174.2,
105.6,
143.7,
293.4,
288.9,
107.7,
415.5,
148.2,
124.2,
154.4,
85.9,
106.5,
142.0,
124.0,
130.2,
86.2,
278.1,
182.0,
592.1,
173.1,
249.8,
441.0,
344.2,
722.8,
494.6,
494.6,
196.7,
151.8,
263.4,
364.1,
327.0,
370.5,
441.0,
300.3,
656.2,
],
"opponent_attack_cooldown": opponent_attack_cooldown,
"opponent_attack_duration": opponent_attack_duration,
"opponent_budget_per_ts": opponent_budget_per_ts,
"opponent_init_budget": opponent_init_budget,
"opponent_action_class": PowerlineSetAction,
"opponent_class": GeometricOpponent,
"opponent_budget_class": BaseActionBudget,
"kwargs_opponent": {
"lines_attacked": lines_attacked,
"attack_every_xxx_hour": 24,
"average_attack_duration_hour": 4,
"minimum_attack_duration_hour": 1,
},
"has_attention_budget": True,
"attention_budget_class": LinearAttentionBudget,
"kwargs_attention_budget": {
"max_budget": 3.0,
"budget_per_ts": 1.0 / (12.0 * 16),
"alarm_cost": 1.0,
"init_budget": 2.0,
},
}
| 3,439 | 24.481481 | 114 | py |
Grid2Op | Grid2Op-master/grid2op/data/l2rpn_idf_2023/config.py | from grid2op.Action import PlayableAction, PowerlineSetAction
from grid2op.Observation import CompleteObservation
from grid2op.Reward import RedispReward, AlertReward
from grid2op.Rules import RulesByArea
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecastsWithMaintenance
from grid2op.Backend import PandaPowerBackend
from grid2op.Opponent import GeometricOpponentMultiArea, BaseActionBudget
try:
from grid2op.l2rpn_utils import ActionIDF2023, ObservationIDF2023
except ImportError:
from grid2op.Action import PlayableAction
from grid2op.Observation import CompleteObservation
import warnings
warnings.warn("The grid2op version you are trying to use is too old for this environment. Please upgrade it to at least grid2op 1.9.1")
ActionIDF2023 = PlayableAction
ObservationIDF2023 = CompleteObservation
lines_attacked = [["26_31_106",
"21_22_93",
"17_18_88",
"4_10_162",
"12_14_68",
"29_37_117",
],
["62_58_180",
"62_63_160",
"48_50_136",
"48_53_141",
"41_48_131",
"39_41_121",
"43_44_125",
"44_45_126",
"34_35_110",
"54_58_154",
],
["74_117_81",
"93_95_43",
"88_91_33",
"91_92_37",
"99_105_62",
"102_104_61",
]]
opponent_attack_duration = 96 # 8 hours at maximum
attack_every_xxx_hour = 32 # can still change
average_attack_duration_hour = 2 # can still change
# after modifications for infeasibility
th_lim = [ 349., 546., 1151., 581., 743., 613., 69., 801., 731., 953.,
463. , 291. , 876. , 649. , 461. , 916., 281. , 204. , 97. , 251.,
1901., 1356., 601., 793., 351., 509., 409., 566., 339., 899.,
356. , 673. , 543. , 1313. , 411. , 551., 633. , 244. , 589. , 285.,
646. , 418. , 479. , 327. , 1043. , 951., 429. , 871. , 449. , 1056.,
939. , 946. , 759. , 716. , 629. , 486., 409. , 296. , 893. , 411.,
99. , 326. , 506. , 993. , 646. , 257. , 493. , 263. , 323. , 513.,
629. , 566. , 1379. , 659. , 3566. , 423., 306. , 479. , 279. , 376.,
336. , 836. , 759. , 151. , 1143. , 851., 236. , 846. , 397. , 483.,
559. , 216. , 219. , 130. , 1533. , 1733., 916. , 1071. , 513. , 289.,
796. , 773. , 849. , 359. , 566. , 273., 252. , 1119. , 535. , 581.,
83. , 353. , 541. , 316. ,1033. , 379. , 316. ,1221. , 599. , 313.,
371. , 301. , 346. , 449. , 571. , 169., 273. , 88. , 113. , 549.,
446. , 589. , 589. , 279. , 256. , 157., 195. , 221. , 119. , 256.9,
287.5, 326. , 376.6, 179.5, 927.9, 223., 90. , 119. , 75. , 79.,
317.9, 921. , 236. , 249. , 118. , 693., 671. , 453. , 318.5, 427.2,
689. , 701. , 372. , 721. , 616. , 616., 108.7, 340.2, 223. , 384.,
409. , 309. , 696. , 1393. , 1089. , 1751., 341. , 883. , 791. , 661.,
689. , 397. , 1019. , 2063. , 2056. , 1751., ]
this_rules = RulesByArea([[0, 1, 2, 3, 10, 11, 116, 13, 12, 14, 4, 5, 6, 15, 7, 8, 9,
23, 27, 28, 26, 30, 114, 113, 31, 112, 16, 29, 25, 24, 17,
18, 19, 20, 21, 22, 24, 71, 70, 72],
[32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
47, 48, 64, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 66, 65],
[69, 73, 74, 117, 75, 76, 77, 78, 79, 80, 98, 97, 96, 95, 94,
93, 99, 98, 105, 103, 104, 106, 107, 108, 111, 109, 110, 102,
100, 92, 91, 101, 100, 90, 89, 88, 87, 84, 83, 82, 81, 85, 86,
68, 67, 115]
])
config = {
"backend": PandaPowerBackend,
"action_class": ActionIDF2023,
"observation_class": ObservationIDF2023,
"reward_class": RedispReward,
"gamerules_class": this_rules,
"chronics_class": Multifolder,
"data_feeding_kwargs":{"gridvalueClass": GridStateFromFileWithForecastsWithMaintenance,
"h_forecast": [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60]
},
"volagecontroler_class": None,
"names_chronics_to_grid": None,
"thermal_limits": th_lim,
"opponent_budget_per_ts": 0.17 * 3.,
"opponent_init_budget": 1000.,
"opponent_attack_cooldown": 0,
"opponent_attack_duration": 96,
"opponent_action_class": PowerlineSetAction,
"opponent_class": GeometricOpponentMultiArea,
"opponent_budget_class": BaseActionBudget,
"kwargs_opponent": {
"lines_attacked": lines_attacked,
"attack_every_xxx_hour": attack_every_xxx_hour,
"average_attack_duration_hour": average_attack_duration_hour,
"minimum_attack_duration_hour": 1,
"pmax_pmin_ratio": 4
},
"other_rewards": {"alert": AlertReward}
}
| 5,434 | 46.675439 | 139 | py |
Grid2Op | Grid2Op-master/grid2op/data/l2rpn_idf_2023/chronics/keep_only_beginning.py | import pandas as pd
import numpy as np
import grid2op
import os
for chron_nm in ["2035-01-15_0", "2035-08-20_0"]:
for fn in ["load_p", "load_q", "prod_p"]:
df_ = pd.read_csv(os.path.join(".", chron_nm, f"{fn}.csv.bz2"), sep=";")
df_ = df_.iloc[:(288*2)]
df_.to_csv(os.path.join(".", chron_nm, f"{fn}.csv.bz2"), sep=";", header=True, index=False)
df_ = pd.read_csv(os.path.join(".", chron_nm, f"{fn}_forecasted.csv.bz2"), sep=";")
df_ = df_.iloc[:(288*2*12)]
df_.to_csv(os.path.join(".", chron_nm, f"{fn}_forecasted.csv.bz2"), sep=";", header=True, index=False)
| 631 | 41.133333 | 110 | py |
Grid2Op | Grid2Op-master/grid2op/data/l2rpn_neurips_2020_track1/config.py | from grid2op.Action import PowerlineSetAction
from grid2op.Reward import RedispReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecasts
from grid2op.Backend import PandaPowerBackend
from grid2op.Opponent import WeightedRandomOpponent, BaseActionBudget
try:
from grid2op.l2rpn_utils import ActionNeurips2020, ObservationNeurips2020
except ImportError:
from grid2op.Action import TopologyAndDispatchAction
from grid2op.Observation import CompleteObservation
import warnings
warnings.warn("The grid2op version you are trying to use is too old for this environment. Please upgrade it.")
ActionNeurips2020 = TopologyAndDispatchAction
ObservationNeurips2020 = CompleteObservation
lines_attacked = [
"62_58_180",
"62_63_160",
"48_50_136",
"48_53_141",
"41_48_131",
"39_41_121",
"43_44_125",
"44_45_126",
"34_35_110",
"54_58_154",
]
rho_normalization = [0.45, 0.45, 0.6, 0.35, 0.3, 0.2, 0.55, 0.3, 0.45, 0.55]
opponent_attack_cooldown = 12 * 24 # 24 hours, 1 hour being 12 time steps
opponent_attack_duration = 12 * 4 # 4 hours
opponent_budget_per_ts = (
0.16667 # opponent_attack_duration / opponent_attack_cooldown + epsilon
)
opponent_init_budget = 144.0 # no need to attack straightfully, it can attack starting at midday the first day
config = {
"backend": PandaPowerBackend,
"action_class": ActionNeurips2020,
"observation_class": ObservationNeurips2020,
"reward_class": RedispReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecasts,
"volagecontroler_class": None,
"names_chronics_to_grid": None,
"thermal_limits": [
60.9,
231.9,
272.6,
212.8,
749.2,
332.4,
348.0,
414.4,
310.1,
371.4,
401.2,
124.3,
298.5,
86.4,
213.9,
160.8,
112.2,
291.4,
489.0,
489.0,
124.6,
196.7,
191.9,
238.4,
174.2,
105.6,
143.7,
293.4,
288.9,
107.7,
415.5,
148.2,
124.2,
154.4,
85.9,
106.5,
142.0,
124.0,
130.2,
86.2,
278.1,
182.0,
592.1,
173.1,
249.8,
441.0,
344.2,
722.8,
494.6,
494.6,
196.7,
151.8,
263.4,
364.1,
327.0,
370.5,
441.0,
300.3,
656.2,
],
"opponent_attack_cooldown": opponent_attack_cooldown,
"opponent_attack_duration": opponent_attack_duration,
"opponent_budget_per_ts": opponent_budget_per_ts,
"opponent_init_budget": opponent_init_budget,
"opponent_action_class": PowerlineSetAction,
"opponent_class": WeightedRandomOpponent,
"opponent_budget_class": BaseActionBudget,
"kwargs_opponent": {
"lines_attacked": lines_attacked,
"rho_normalization": rho_normalization,
"attack_period": opponent_attack_cooldown,
},
}
| 3,225 | 25.016129 | 114 | py |
Grid2Op | Grid2Op-master/grid2op/data/l2rpn_neurips_2020_track2/x1/config.py | from grid2op.Reward import RedispReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecasts
from grid2op.Backend import PandaPowerBackend
try:
from grid2op.l2rpn_utils import ActionNeurips2020, ObservationNeurips2020
except ImportError:
from grid2op.Action import TopologyAndDispatchAction
from grid2op.Observation import CompleteObservation
import warnings
warnings.warn("The grid2op version you are trying to use is too old for this environment. Please upgrade it.")
ActionNeurips2020 = TopologyAndDispatchAction
ObservationNeurips2020 = CompleteObservation
config = {
"backend": PandaPowerBackend,
"action_class": ActionNeurips2020,
"observation_class": ObservationNeurips2020,
"reward_class": RedispReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecasts,
"volagecontroler_class": None,
"names_chronics_to_grid": None,
"thermal_limits": [
220.7,
334.2,
470.4,
422.1,
445.6,
427.0,
7.4,
546.3,
566.6,
539.3,
344.8,
285.4,
591.5,
393.8,
334.6,
645.0,
336.9,
282.0,
132.8,
182.7,
1185.1,
907.9,
400.4,
528.2,
213.0,
336.9,
264.8,
430.2,
251.0,
473.6,
242.4,
460.6,
317.4,
659.8,
206.5,
361.5,
321.5,
178.5,
261.6,
144.1,
481.2,
296.2,
525.4,
201.1,
581.7,
561.8,
346.8,
486.8,
176.0,
826.0,
546.0,
508.9,
451.5,
480.2,
294.5,
252.4,
219.8,
316.9,
908.6,
359.1,
282.3,
280.5,
390.2,
756.7,
554.8,
237.1,
474.2,
164.3,
202.5,
455.0,
449.4,
387.8,
818.3,
410.2,
259.5,
203.1,
166.3,
259.0,
145.1,
258.3,
196.7,
503.3,
446.2,
162.4,
639.1,
727.3,
115.4,
445.2,
730.1,
253.6,
345.4,
138.2,
198.4,
248.7,
891.1,
1010.8,
557.9,
746.2,
292.2,
150.9,
617.4,
445.5,
475.0,
200.0,
556.5,
190.9,
188.4,
704.9,
387.8,
393.8,
43.4,
205.3,
339.2,
204.8,
601.3,
345.4,
318.2,
678.6,
394.8,
302.0,
329.9,
274.4,
307.6,
176.9,
352.3,
132.4,
174.7,
149.5,
83.0,
579.0,
198.6,
557.2,
557.2,
103.0,
179.9,
196.9,
244.0,
164.8,
100.3,
125.7,
549.0,
277.5,
273.3,
91.9,
351.9,
307.5,
127.3,
157.5,
88.0,
108.9,
148.2,
586.2,
129.3,
135.8,
85.1,
314.1,
207.8,
602.7,
206.5,
233.4,
396.3,
516.9,
646.8,
651.6,
594.6,
594.6,
265.5,
223.0,
325.2,
342.6,
307.5,
488.6,
448.4,
881.4,
579.3,
858.6,
231.5,
423.3,
503.4,
365.4,
396.3,
270.9,
605.7,
863.4,
1152.2,
858.6,
],
}
| 3,872 | 16.847926 | 114 | py |
Grid2Op | Grid2Op-master/grid2op/data/l2rpn_neurips_2020_track2/x2.5/config.py | from grid2op.Reward import RedispReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecasts
from grid2op.Backend import PandaPowerBackend
try:
from grid2op.l2rpn_utils import ActionNeurips2020, ObservationNeurips2020
except ImportError:
from grid2op.Action import TopologyAndDispatchAction
from grid2op.Observation import CompleteObservation
import warnings
warnings.warn("The grid2op version you are trying to use is too old for this environment. Please upgrade it.")
ActionNeurips2020 = TopologyAndDispatchAction
ObservationNeurips2020 = CompleteObservation
config = {
"backend": PandaPowerBackend,
"action_class": ActionNeurips2020,
"observation_class": ObservationNeurips2020,
"reward_class": RedispReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecasts,
"volagecontroler_class": None,
"names_chronics_to_grid": None,
"thermal_limits": [
220.7,
334.2,
470.4,
422.1,
445.6,
427.0,
7.4,
546.3,
566.6,
539.3,
344.8,
285.4,
591.5,
393.8,
334.6,
645.0,
336.9,
282.0,
132.8,
182.7,
1185.1,
907.9,
400.4,
528.2,
213.0,
336.9,
264.8,
430.2,
251.0,
473.6,
242.4,
460.6,
317.4,
659.8,
206.5,
361.5,
321.5,
178.5,
261.6,
144.1,
481.2,
296.2,
525.4,
201.1,
581.7,
561.8,
346.8,
486.8,
176.0,
826.0,
546.0,
508.9,
451.5,
480.2,
294.5,
252.4,
219.8,
316.9,
908.6,
359.1,
282.3,
280.5,
390.2,
756.7,
554.8,
237.1,
474.2,
164.3,
202.5,
455.0,
449.4,
387.8,
818.3,
410.2,
259.5,
203.1,
166.3,
259.0,
145.1,
258.3,
196.7,
503.3,
446.2,
162.4,
639.1,
727.3,
115.4,
445.2,
730.1,
253.6,
345.4,
138.2,
198.4,
248.7,
891.1,
1010.8,
557.9,
746.2,
292.2,
150.9,
617.4,
445.5,
475.0,
200.0,
556.5,
190.9,
188.4,
704.9,
387.8,
393.8,
43.4,
205.3,
339.2,
204.8,
601.3,
345.4,
318.2,
678.6,
394.8,
302.0,
329.9,
274.4,
307.6,
176.9,
352.3,
132.4,
174.7,
149.5,
83.0,
579.0,
198.6,
557.2,
557.2,
103.0,
179.9,
196.9,
244.0,
164.8,
100.3,
125.7,
549.0,
277.5,
273.3,
91.9,
351.9,
307.5,
127.3,
157.5,
88.0,
108.9,
148.2,
586.2,
129.3,
135.8,
85.1,
314.1,
207.8,
602.7,
206.5,
233.4,
396.3,
516.9,
646.8,
651.6,
594.6,
594.6,
265.5,
223.0,
325.2,
342.6,
307.5,
488.6,
448.4,
881.4,
579.3,
858.6,
231.5,
423.3,
503.4,
365.4,
396.3,
270.9,
605.7,
863.4,
1152.2,
858.6,
],
}
| 3,872 | 16.847926 | 114 | py |
Grid2Op | Grid2Op-master/grid2op/data/l2rpn_wcci_2020/config.py | from grid2op.Reward import RedispReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecastsWithMaintenance
from grid2op.Backend import PandaPowerBackend
try:
from grid2op.l2rpn_utils import ActionWCCI2020, ObservationWCCI2020
except ImportError:
from grid2op.Action import TopologyAndDispatchAction
from grid2op.Observation import CompleteObservation
import warnings
warnings.warn("The grid2op version you are trying to use is too old for this environment. Please upgrade it.")
ActionWCCI2020 = TopologyAndDispatchAction
ObservationWCCI2020 = CompleteObservation
config = {
"backend": PandaPowerBackend,
"action_class": ActionWCCI2020,
"observation_class": ObservationWCCI2020,
"reward_class": RedispReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecastsWithMaintenance,
"volagecontroler_class": None,
"names_chronics_to_grid": {},
"thermal_limits": [
43.3,
205.2,
341.2,
204.0,
601.4,
347.1,
319.6,
301.4,
330.3,
274.1,
307.4,
172.3,
354.3,
127.9,
174.9,
152.6,
81.8,
204.3,
561.5,
561.5,
98.7,
179.8,
193.4,
239.9,
164.8,
100.4,
125.7,
278.2,
274.0,
89.9,
352.1,
157.1,
124.4,
154.6,
86.1,
106.7,
148.5,
129.6,
136.1,
86.0,
313.2,
198.5,
599.1,
206.8,
233.7,
395.8,
516.7,
656.4,
583.0,
583.0,
263.1,
222.6,
322.8,
340.6,
305.2,
360.1,
395.8,
274.2,
605.5,
],
}
| 1,976 | 20.725275 | 114 | py |
Grid2Op | Grid2Op-master/grid2op/data/l2rpn_wcci_2022_dev/config.py | from grid2op.Action import PlayableAction, PowerlineSetAction
from grid2op.Observation import CompleteObservation
from grid2op.Reward import RedispReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecastsWithMaintenance
from grid2op.Backend import PandaPowerBackend
from grid2op.Opponent import GeometricOpponent, BaseActionBudget
try:
from grid2op.l2rpn_utils import ActionWCCI2022, ObservationWCCI2022
except ImportError:
from grid2op.Action import PlayableAction
from grid2op.Observation import CompleteObservation
import warnings
warnings.warn("The grid2op version you are trying to use is too old for this environment. Please upgrade it.")
ActionWCCI2022 = PlayableAction
ObservationWCCI2022 = CompleteObservation
lines_attacked = ["26_31_106",
"21_22_93",
"17_18_88",
"4_10_162",
"12_14_68",
"14_32_108",
"62_58_180",
"62_63_160",
"48_50_136",
"48_53_141",
"41_48_131",
"39_41_121",
"43_44_125",
"44_45_126",
"34_35_110",
"54_58_154",
"74_117_81",
"80_79_175",
"93_95_43",
"88_91_33",
"91_92_37",
"99_105_62",
"102_104_61"]
opponent_attack_cooldown = 12 # 1 hour, 1 hour being 12 time steps
opponent_attack_duration = 96 # 8 hours at maximum
opponent_budget_per_ts = (
0.17 # opponent_attack_duration / opponent_attack_cooldown + epsilon
)
opponent_init_budget = 144.0 # no need to attack straightfully, it can attack starting at midday the first day
config = {
"backend": PandaPowerBackend,
"action_class": ActionWCCI2022,
"observation_class": ObservationWCCI2022,
"reward_class": RedispReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecastsWithMaintenance,
"volagecontroler_class": None,
"names_chronics_to_grid": None,
"thermal_limits": [ 233.4, 354.4, 792.7, 550.2, 572.2, 557.2, 8. , 480. ,
567.4, 681.8, 357.6, 336.9, 819. , 419.2, 304.2, 626.2,
256.1, 300.1, 132.7, 165.9, 841. , 1105.5, 428.2, 555.2,
224.2, 374.4, 285.6, 429.8, 253.1, 479.6, 238.3, 452.6,
312.9, 627.8, 196.1, 360.9, 317.1, 325.1, 352.6, 347.3,
565.5, 495.7, 1422.9, 479.8, 646.9, 1603.9, 364.1, 1498.4,
278. , 866.2, 1667.7, 569.6, 1350.2, 1478. , 380.8, 282.4,
246.9, 301.3, 766.9, 401.2, 306.9, 314.4, 333.4, 748.9,
513.4, 255.8, 513. , 268.5, 219. , 492. , 420.4, 417.4,
637.8, 571.9, 593.8, 273.7, 247. , 385.3, 283.4, 251.2,
210.8, 473.9, 408.5, 162.7, 602.2, 1098.6, 205. , 546. ,
435.9, 191.4, 424.1, 106.2, 149.2, 184.9, 1146.1, 1117.8,
569.6, 800.2, 380.3, 292.1, 636.5, 487.5, 490.9, 207.4,
590.6, 243.8, 466. , 698.2, 385. , 351.7, 60.9, 231.9,
340.8, 212.8, 749.2, 332.4, 348. , 798. , 398.3, 414.4,
341.1, 371.4, 401.2, 298.3, 343.3, 267.8, 213.9, 160.8,
112.2, 458.9, 349.7, 489. , 489. , 180.7, 196.7, 191.9,
238.4, 174.2, 105.6, 143.7, 393.6, 293.4, 288.9, 107.7,
623.2, 252.9, 118.3, 154.4, 111.7, 106.5, 177.5, 655.8,
161.2, 169.3, 120.7, 389.3, 291.2, 592.1, 277. , 412.2,
441. , 671.2, 609. , 867.4, 494.6, 494.6, 196.7, 167. ,
263.4, 364.1, 359.7, 803.2, 589. , 887.2, 615.2, 1096.8,
306.9, 472.6, 546.6, 370.5, 441. , 300.3, 656.2, 1346. ,
1246.5, 1196.5],
"opponent_attack_cooldown": opponent_attack_cooldown,
"opponent_attack_duration": opponent_attack_duration,
"opponent_budget_per_ts": opponent_budget_per_ts,
"opponent_init_budget": opponent_init_budget,
"opponent_action_class": PowerlineSetAction,
"opponent_class": GeometricOpponent,
"opponent_budget_class": BaseActionBudget,
"kwargs_opponent": {
"lines_attacked": lines_attacked,
"attack_every_xxx_hour": 24,
"average_attack_duration_hour": 4,
"minimum_attack_duration_hour": 1,
},
}
| 4,859 | 48.090909 | 114 | py |
Grid2Op | Grid2Op-master/grid2op/data/rte_case118_example/config.py | from grid2op.Action import TopologyAndDispatchAction
from grid2op.Reward import RedispReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder, ChangeNothing
from grid2op.Chronics import GridStateFromFileWithForecasts
from grid2op.Backend import PandaPowerBackend
config = {
"backend": PandaPowerBackend,
"action_class": TopologyAndDispatchAction,
"observation_class": None,
"reward_class": RedispReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecasts,
"volagecontroler_class": None,
"thermal_limits": None,
"names_chronics_to_grid": None,
}
| 681 | 33.1 | 59 | py |
Grid2Op | Grid2Op-master/grid2op/data/rte_case14_opponent/config.py | from grid2op.Action import TopologyAndDispatchAction, PowerlineSetAction
from grid2op.Reward import RedispReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecasts
from grid2op.Backend import PandaPowerBackend
from grid2op.Opponent import RandomLineOpponent, BaseActionBudget
config = {
"backend": PandaPowerBackend,
"action_class": TopologyAndDispatchAction,
"observation_class": None,
"reward_class": RedispReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecasts,
"volagecontroler_class": None,
"thermal_limits": [
384.900179,
384.900179,
380.0,
380.0,
157.0,
380.0,
380.0,
1077.7205012,
461.8802148,
769.80036,
269.4301253,
384.900179,
760.0,
380.0,
760.0,
384.900179,
230.9401074,
170.79945452,
3402.24266,
3402.24266,
],
"names_chronics_to_grid": None,
"opponent_attack_cooldown": 12 * 24,
"opponent_attack_duration": 12 * 4,
"opponent_budget_per_ts": 0.5,
"opponent_init_budget": 0.0,
"opponent_action_class": PowerlineSetAction,
"opponent_class": RandomLineOpponent,
"opponent_budget_class": BaseActionBudget,
"kwargs_opponent": {
"lines_attacked": [
"1_3_3",
"1_4_4",
"3_6_15",
"9_10_12",
"11_12_13",
"12_13_14",
]
},
}
| 1,613 | 26.355932 | 72 | py |
Grid2Op | Grid2Op-master/grid2op/data/rte_case14_realistic/config.py | from grid2op.Action import TopologyAndDispatchAction
from grid2op.Reward import RedispReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecasts
from grid2op.Backend import PandaPowerBackend
config = {
"backend": PandaPowerBackend,
"action_class": TopologyAndDispatchAction,
"observation_class": None,
"reward_class": RedispReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecasts,
"volagecontroler_class": None,
"thermal_limits": [
384.900179,
384.900179,
380.0,
380.0,
157.0,
380.0,
380.0,
1077.7205012,
461.8802148,
769.80036,
269.4301253,
384.900179,
760.0,
380.0,
760.0,
384.900179,
230.9401074,
170.79945452,
3402.24266,
3402.24266,
],
"names_chronics_to_grid": None,
}
| 1,035 | 24.268293 | 59 | py |
Grid2Op | Grid2Op-master/grid2op/data/rte_case14_redisp/config.py | from grid2op.Action import TopologyAndDispatchAction
from grid2op.Reward import RedispReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecasts
from grid2op.Backend import PandaPowerBackend
config = {
"backend": PandaPowerBackend,
"action_class": TopologyAndDispatchAction,
"observation_class": None,
"reward_class": RedispReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecasts,
"volagecontroler_class": None,
"thermal_limits": [
3.84900179e02,
3.84900179e02,
2.28997102e05,
2.28997102e05,
2.28997102e05,
1.52664735e04,
2.28997102e05,
3.84900179e02,
3.84900179e02,
1.83285800e02,
3.84900179e02,
3.84900179e02,
2.28997102e05,
2.28997102e05,
6.93930612e04,
3.84900179e02,
3.84900179e02,
2.40562612e02,
3.40224266e03,
3.40224266e03,
],
"names_chronics_to_grid": None,
}
| 1,129 | 26.560976 | 59 | py |
Grid2Op | Grid2Op-master/grid2op/data/rte_case14_test/config.py | from grid2op.Action import TopologyAndDispatchAction
from grid2op.Reward import RedispReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecasts
from grid2op.Backend import PandaPowerBackend
config = {
"backend": PandaPowerBackend,
"action_class": TopologyAndDispatchAction,
"observation_class": None,
"reward_class": RedispReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecasts,
"volagecontroler_class": None,
"thermal_limits": [
352.8251645,
352.8251645,
183197.68156979,
183197.68156979,
183197.68156979,
12213.17877132,
183197.68156979,
352.8251645,
352.8251645,
352.8251645,
352.8251645,
352.8251645,
183197.68156979,
183197.68156979,
183197.68156979,
352.8251645,
352.8251645,
352.8251645,
2721.79412618,
2721.79412618,
],
"names_chronics_to_grid": None,
}
| 1,124 | 26.439024 | 59 | py |
Grid2Op | Grid2Op-master/grid2op/data/rte_case5_example/config.py | from grid2op.Action import TopologyAction
from grid2op.Reward import L2RPNReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecasts
from grid2op.Backend import PandaPowerBackend
config = {
"backend": PandaPowerBackend,
"action_class": TopologyAction,
"observation_class": None,
"reward_class": L2RPNReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecasts,
"volagecontroler_class": None,
"thermal_limits": None,
"names_chronics_to_grid": None,
}
| 642 | 31.15 | 59 | py |
Grid2Op | Grid2Op-master/grid2op/data_test/5bus_example_diff_name/config.py | from grid2op.Action import TopologyAction
from grid2op.Reward import L2RPNReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecasts
from grid2op.Backend import PandaPowerBackend
config = {
"backend": PandaPowerBackend,
"action_class": TopologyAction,
"observation_class": None,
"reward_class": L2RPNReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecasts,
"volagecontroler_class": None,
"thermal_limits": None,
"names_chronics_to_grid": None,
}
| 642 | 31.15 | 59 | py |
Grid2Op | Grid2Op-master/grid2op/data_test/5bus_example_forecasts/config.py | from grid2op.Action import PowerlineSetAction
from grid2op.Reward import RedispReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import (Multifolder,
GridStateFromFileWithForecasts)
# TODO change this !
from grid2op.Backend import PandaPowerBackend
from grid2op.Opponent import BaseActionBudget
from grid2op.l2rpn_utils import ActionWCCI2022, ObservationWCCI2022
opponent_attack_cooldown = 12 # 1 hour, 1 hour being 12 time steps
opponent_attack_duration = 96 # 8 hours at maximum
opponent_budget_per_ts = (
0.17 # opponent_attack_duration / opponent_attack_cooldown + epsilon
)
opponent_init_budget = 144.0 # no need to attack straightfully, it can attack starting at midday the first day
config = {
"backend": PandaPowerBackend,
"action_class": ActionWCCI2022,
"observation_class": ObservationWCCI2022,
"reward_class": RedispReward,
"gamerules_class": DefaultRules,
# TODO change that too
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecasts,
"data_feeding_kwargs": {"h_forecast": [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60]},
"volagecontroler_class": None,
"names_chronics_to_grid": None,
"opponent_attack_cooldown": opponent_attack_cooldown,
"opponent_attack_duration": opponent_attack_duration,
"opponent_budget_per_ts": opponent_budget_per_ts,
"opponent_init_budget": opponent_init_budget,
"opponent_action_class": PowerlineSetAction,
"opponent_budget_class": BaseActionBudget,
}
| 1,547 | 38.692308 | 111 | py |
Grid2Op | Grid2Op-master/grid2op/data_test/5bus_example_forecasts/chronics/0/modif.py | import pandas as pd
import numpy as np
for el in ["load_p", "load_q", "prod_p"]:
dt = pd.read_csv(f"{el}.csv.bz2", sep=";")
arr = np.ones((12, dt.shape[1]))
add = np.vstack([np.cumsum(arr, 0) for _ in range(dt.shape[0])])
dt = dt.loc[dt.index.repeat(12)]
dt += add
dt.to_csv(f"{el}_forecasted.csv.bz2", sep=";", index=False, header=True) | 367 | 32.454545 | 76 | py |
Grid2Op | Grid2Op-master/grid2op/data_test/5bus_example_forecasts/chronics/maintenance/add_maintenance.py | import pandas as pd
import numpy as np
line_name = ['0_1_0', '0_2_1', '0_3_2', '0_4_3', '1_2_4', '2_3_5', '2_3_6',
'3_4_7']
line_maint_id = 5
indx_maint_start = 6
indx_maint_stop = 10
load = pd.read_csv("load_p.csv.bz2", sep=";")
n_row = load.shape[0]
maintenance = np.zeros((n_row, len(line_name)))
maintenance[indx_maint_start:indx_maint_stop,line_maint_id] = 1.
maint = pd.DataFrame(maintenance, columns=line_name)
maint.to_csv("maintenance.csv.bz2", sep=";", index=False, header=True)
| 505 | 28.764706 | 75 | py |
Grid2Op | Grid2Op-master/grid2op/data_test/5bus_example_some_missing/config.py | from grid2op.Action import TopologyAction
from grid2op.Reward import L2RPNReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecasts
from grid2op.Backend import PandaPowerBackend
config = {
"backend": PandaPowerBackend,
"action_class": TopologyAction,
"observation_class": None,
"reward_class": L2RPNReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecasts,
"volagecontroler_class": None,
"thermal_limits": None,
"names_chronics_to_grid": None,
}
| 642 | 31.15 | 59 | py |
Grid2Op | Grid2Op-master/grid2op/data_test/5bus_example_th_lim_dict/config.py | from grid2op.Action import TopologyAction
from grid2op.Reward import L2RPNReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import ChangeNothing
from grid2op.Backend import PandaPowerBackend
config = {
"backend": PandaPowerBackend,
"action_class": TopologyAction,
"observation_class": None,
"reward_class": L2RPNReward,
"gamerules_class": DefaultRules,
"chronics_class": ChangeNothing,
"volagecontroler_class": None,
"thermal_limits": {
"0_1_0": 200.0,
"0_2_1": 300.0,
"0_3_2": 500.0,
"0_4_3": 600.0,
"1_2_4": 700.0,
"2_3_5": 800.0,
"2_3_6": 900.0,
"3_4_7": 1000.0,
},
"names_chronics_to_grid": None,
}
| 726 | 25.925926 | 45 | py |
Grid2Op | Grid2Op-master/grid2op/data_test/5bus_example_with_params/config.py | from grid2op.Action import TopologyAction
from grid2op.Reward import L2RPNReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecasts
from grid2op.Backend import PandaPowerBackend
config = {
"backend": PandaPowerBackend,
"action_class": TopologyAction,
"observation_class": None,
"reward_class": L2RPNReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecasts,
"volagecontroler_class": None,
"thermal_limits": None,
"names_chronics_to_grid": None,
}
| 642 | 31.15 | 59 | py |
Grid2Op | Grid2Op-master/grid2op/data_test/5bus_modif_grid/config.py | from grid2op.Action import TopologyAction
from grid2op.Reward import L2RPNReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecasts
from grid2op.Backend import PandaPowerBackend
config = {
"backend": PandaPowerBackend,
"action_class": TopologyAction,
"observation_class": None,
"reward_class": L2RPNReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecasts,
"volagecontroler_class": None,
"thermal_limits": None,
"names_chronics_to_grid": None,
}
| 642 | 31.15 | 59 | py |
Grid2Op | Grid2Op-master/grid2op/data_test/5bus_modif_grid/readme.md | # see issue https://github.com/rte-france/Grid2Op/issues/217
| 61 | 30 | 60 | md |
Grid2Op | Grid2Op-master/grid2op/data_test/env_14_test_maintenance/config.py | from grid2op.Action import TopologyAndDispatchAction
from grid2op.Reward import RedispReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecasts
from grid2op.Backend import PandaPowerBackend
config = {
"backend": PandaPowerBackend,
"action_class": TopologyAndDispatchAction,
"observation_class": None,
"reward_class": RedispReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecasts,
"volagecontroler_class": None,
"thermal_limits": [
384.900179,
384.900179,
380.0,
380.0,
157.0,
380.0,
380.0,
1077.7205012,
461.8802148,
769.80036,
269.4301253,
384.900179,
760.0,
380.0,
760.0,
384.900179,
230.9401074,
170.79945452,
3402.24266,
3402.24266,
],
"names_chronics_to_grid": None,
}
| 1,035 | 24.268293 | 59 | py |
Grid2Op | Grid2Op-master/grid2op/data_test/ieee118/create_ieee118_zones.py | import matplotlib.pyplot as plt
import pandas as pd
import pandapower as pp
import pandapower.networks as nw
import pandapower.plotting as pplt
def get_net_118_with_zones():
net = nw.case118()
pp.runpp(net)
net.bus.sort_index(inplace=True)
net.bus.loc[:32, "zone"] = 1
net.bus.loc[32:67, "zone"] = 2
net.bus.loc[67:112, "zone"] = 3
net.bus.loc[23, "zone"] = 3
net.bus.loc[[112, 113, 114, 115, 116], "zone"] = 1
net.bus.loc[[115, 117], "zone"] = 3
net.bus.loc[:, "toy_zone"] = False
net.bus.loc[99:112, "toy_zone"] = True
net.bus.loc[[100, 101], "toy_zone"] = False
return net
def get_subnets(net):
areas = dict()
for zone in net.bus.zone.unique():
zone_buses = net.bus.loc[net.bus.zone == zone].index
areas[zone] = pp.select_subnet(net, zone_buses)
return areas
def plot_zones():
net = get_net_118_with_zones()
areas = get_subnets(net)
fig, axes = plt.subplots(1, len(areas.keys()))
keys = areas.keys()
keys = sorted(keys)
for i, zone in enumerate(keys):
net = areas[zone]
collections = list()
ax = axes[i]
sizes = pplt.get_collection_sizes(net)
collections.append(pplt.create_bus_collection(net, size=sizes["bus"]))
collections.append(pplt.create_line_collection(net))
collections.append(pplt.create_trafo_collection(net, size=sizes["trafo"]))
if zone == 3:
collections.append(
pplt.create_bus_collection(
net,
net.bus.loc[net.bus.toy_zone].index,
color="g",
size=2 * sizes["bus"],
zorder=11,
)
)
pplt.draw_collections(collections, ax=ax)
plt.show()
def create_toy_zone():
net = nw.case118()
pp.runpp(net)
vm_ext_grid = net.res_bus.loc[99, "vm_pu"]
va_ext_grid = net.res_bus.loc[99, "va_degree"]
net = get_net_118_with_zones()
areas = get_subnets(net)
net = areas[3]
net = pp.select_subnet(net, buses=net.bus.loc[net.bus.toy_zone].index)
pp.create_ext_grid(net, bus=99, vm_pu=vm_ext_grid, va_degree=va_ext_grid)
pp.runpp(net)
return net
def create_zone3():
net118 = nw.case118()
pp.runpp(net118)
vm_gen = net118.res_bus.at[23, "vm_pu"]
p_mw_gen = net118.res_line.loc[
net118.line.from_bus.isin([23, 24]) & net118.line.to_bus.isin([23, 24]),
"p_from_mw",
]
p_mw_gen = abs(sum(p_mw_gen))
net = get_net_118_with_zones()
areas = get_subnets(net)
net = areas[3]
pp.create_gen(net, bus=23, vm_pu=vm_gen, p_mw=p_mw_gen)
pp.runpp(net)
return net
def create_zone1():
net118 = nw.case118()
pp.runpp(net118)
vm_ext_grid = net118.res_bus.loc[[32, 33, 37], "vm_pu"].mean()
va_ext_grid = net118.res_bus.loc[[32, 33, 37], "va_degree"].mean()
vm_gen = net118.res_bus.at[22, "vm_pu"]
p_mw_gen = net118.res_line.loc[
net118.line.from_bus.isin([22, 23]) & net118.line.to_bus.isin([22, 23]),
"p_from_mw",
]
p_mw_gen = abs(sum(p_mw_gen))
net = get_net_118_with_zones()
areas = get_subnets(net)
net = areas[1]
zone1_lines = [49, 41, 40]
b = pp.create_bus(
net,
vn_kv=net118.bus.loc[33, "vn_kv"],
name="zone2_slack",
index=118,
geodata=(net118.bus_geodata.at[33, "x"], net118.bus_geodata.at[33, "y"]),
)
net.line = pd.concat([net.line, net118.line.loc[zone1_lines]], sort=False)
net.line.loc[zone1_lines, "to_bus"] = int(b)
pp.create_ext_grid(net, bus=b, vm_pu=vm_ext_grid, va_degree=va_ext_grid)
pp.create_gen(net, bus=22, vm_pu=vm_gen, p_mw=p_mw_gen)
pp.runpp(net)
return net
if __name__ == "__main__":
plot_zones()
toy_net = create_toy_zone()
net_zone3 = create_zone3()
net_zone1 = create_zone1()
| 3,908 | 25.773973 | 82 | py |
Grid2Op | Grid2Op-master/grid2op/data_test/ieee118/ieee_118_analysis.py | import pandapower as pp
import pandapower.topology as top
import pandapower.networks as nw
import pandapower.plotting as pplt
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import seaborn as sns
palette = "muted"
n_colors = 8
# three different subgrids
def custom_legend(fig, entries, fontsize=8, loc="upper right", marker="o"):
handles = list()
for label, color in entries.items():
handles.append(Line2D([0], [0], color=color, lw=2, label=label, marker=marker))
fig.legend(
handles=handles,
fancybox=False,
shadow=False,
fontsize=fontsize,
loc=loc,
ncol=1,
handletextpad=0.18,
)
def plot_feeder():
net = nw.case118()
fig, ax = plt.subplots(1, 1)
mg = top.create_nxgraph(net, nogobuses=set(net.trafo.lv_bus.values))
colors = sns.color_palette()
collections = list()
sizes = pplt.get_collection_sizes(net)
voltage_levels = net.bus.vn_kv.unique()
voltage_level_colors = dict(zip(voltage_levels, colors))
legend_entries = dict()
gens = set(net.gen.loc[:, "bus"].values)
for area, color in zip(top.connected_components(mg), colors):
vn_area = net.bus.loc[list(area)[0], "vn_kv"]
color = voltage_level_colors[vn_area]
legend_entries[vn_area] = color
area_gens = gens - area
other = area - gens
collections.append(
pplt.create_bus_collection(
net,
area_gens,
color=color,
size=sizes["bus"],
zorder=11,
patch_type="rect",
)
)
collections.append(
pplt.create_bus_collection(
net, other, color=color, size=sizes["bus"], zorder=11
)
)
line_ind = net.line.loc[:, "from_bus"].isin(area) | net.line.loc[
:, "to_bus"
].isin(area)
lines = net.line.loc[line_ind].index
collections.append(pplt.create_line_collection(net, lines, color=color))
eg_vn = net.bus.at[net.ext_grid.bus.values[0], "vn_kv"]
collections.append(
pplt.create_ext_grid_collection(
net, size=sizes["ext_grid"], color=voltage_level_colors[eg_vn]
)
)
collections.append(pplt.create_trafo_collection(net, size=sizes["trafo"], zorder=1))
pplt.draw_collections(collections, ax=ax)
custom_legend(fig, entries=legend_entries)
legend_entries = {"gen": "grey"}
custom_legend(fig, entries=legend_entries, loc="center right", marker="s")
print_info(net, fig)
plt.show()
def print_info(net, fig):
text = (
"Trafos: "
+ str(len(net.trafo))
+ "\nGens: "
+ str(len(net.gen))
+ "\nGen P [MW]: "
+ str(net.gen.p_mw.sum())
+ "\nLoads: "
+ str(len(net.load))
+ "\nLoad P [MW]: "
+ str(net.load.p_mw.sum())
+ "\nshunts: "
+ str(len(net.shunt))
+ "\nV levels: "
+ str(net.bus.vn_kv.unique())
)
fig.text(0.0, 0.0, text)
if __name__ == "__main__":
plot_feeder()
| 3,124 | 27.669725 | 88 | py |
Grid2Op | Grid2Op-master/grid2op/data_test/ieee118_R2subgrid_wcci_test_maintenance/config.py | from grid2op.Action import TopologyAndDispatchAction
from grid2op.Reward import RedispReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecastsWithMaintenance
from grid2op.Backend import PandaPowerBackend
config = {
"backend": PandaPowerBackend,
"action_class": TopologyAndDispatchAction,
"observation_class": None,
"reward_class": RedispReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecastsWithMaintenance,
"volagecontroler_class": None,
"names_chronics_to_grid": None,
"thermal_limits": [
44.9,
205.2,
341.2,
205.8,
601.4,
347.1,
319.6,
302.8,
330.3,
282.7,
311.2,
184.2,
354.3,
138.9,
174.9,
162.4,
89.5,
205.7,
561.5,
561.5,
105.8,
183.0,
197.2,
244.9,
164.9,
100.4,
125.7,
278.2,
274.0,
92.8,
353.4,
168.7,
134.2,
158.8,
97.6,
109.9,
156.5,
140.7,
146.9,
91.3,
318.2,
355.2,
600.7,
208.7,
233.7,
301.5,
516.7,
656.4,
586.0,
586.0,
270.9,
230.4,
322.8,
351.4,
320.3,
841.8,
723.5,
675.4,
1415.4,
],
}
| 1,580 | 18.7625 | 74 | py |
Grid2Op | Grid2Op-master/grid2op/data_test/ieee118_R2subgrid_wcci_test_maintenance_2/config.py | from grid2op.Action import TopologyAndDispatchAction
from grid2op.Reward import RedispReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecastsWithMaintenance
from grid2op.Backend import PandaPowerBackend
config = {
"backend": PandaPowerBackend,
"action_class": TopologyAndDispatchAction,
"observation_class": None,
"reward_class": RedispReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecastsWithMaintenance,
"volagecontroler_class": None,
"names_chronics_to_grid": None,
"thermal_limits": [
44.9,
205.2,
341.2,
205.8,
601.4,
347.1,
319.6,
302.8,
330.3,
282.7,
311.2,
184.2,
354.3,
138.9,
174.9,
162.4,
89.5,
205.7,
561.5,
561.5,
105.8,
183.0,
197.2,
244.9,
164.9,
100.4,
125.7,
278.2,
274.0,
92.8,
353.4,
168.7,
134.2,
158.8,
97.6,
109.9,
156.5,
140.7,
146.9,
91.3,
318.2,
355.2,
600.7,
208.7,
233.7,
301.5,
516.7,
656.4,
586.0,
586.0,
270.9,
230.4,
322.8,
351.4,
320.3,
841.8,
723.5,
675.4,
1415.4,
],
}
| 1,580 | 18.7625 | 74 | py |
Grid2Op | Grid2Op-master/grid2op/data_test/ieee118_R2subgrid_wcci_test_maintenance_3/config.py | from grid2op.Action import TopologyAndDispatchAction
from grid2op.Reward import RedispReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecastsWithMaintenance
from grid2op.Backend import PandaPowerBackend
config = {
"backend": PandaPowerBackend,
"action_class": TopologyAndDispatchAction,
"observation_class": None,
"reward_class": RedispReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecastsWithMaintenance,
"volagecontroler_class": None,
"names_chronics_to_grid": None,
"thermal_limits": [
44.9,
205.2,
341.2,
205.8,
601.4,
347.1,
319.6,
302.8,
330.3,
282.7,
311.2,
184.2,
354.3,
138.9,
174.9,
162.4,
89.5,
205.7,
561.5,
561.5,
105.8,
183.0,
197.2,
244.9,
164.9,
100.4,
125.7,
278.2,
274.0,
92.8,
353.4,
168.7,
134.2,
158.8,
97.6,
109.9,
156.5,
140.7,
146.9,
91.3,
318.2,
355.2,
600.7,
208.7,
233.7,
301.5,
516.7,
656.4,
586.0,
586.0,
270.9,
230.4,
322.8,
351.4,
320.3,
841.8,
723.5,
675.4,
1415.4,
],
}
| 1,580 | 18.7625 | 74 | py |
Grid2Op | Grid2Op-master/grid2op/data_test/l2rpn_case14_sandbox_qp_cste/config.py | from grid2op.Action import PlayableAction
from grid2op.Reward import RedispReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecasts
from grid2op.Backend import PandaPowerBackend
config = {
"backend": PandaPowerBackend,
"action_class": PlayableAction,
"observation_class": None,
"reward_class": RedispReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecasts,
"volagecontroler_class": None,
"thermal_limits": [
541.0,
450.0,
375.0,
636.0,
175.0,
285.0,
335.0,
657.0,
496.0,
827.0,
442.0,
641.0,
840.0,
156.0,
664.0,
235.0,
119.0,
179.0,
1986.0,
1572.0,
],
"names_chronics_to_grid": None,
}
| 949 | 22.170732 | 59 | py |
Grid2Op | Grid2Op-master/grid2op/data_test/l2rpn_icaps_2021_small_test/config.py | from grid2op.Action import PlayableAction, PowerlineSetAction
from grid2op.Reward import AlarmReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecastsWithMaintenance
from grid2op.Backend import PandaPowerBackend
from grid2op.Opponent import GeometricOpponent, BaseActionBudget
from grid2op.operator_attention import LinearAttentionBudget
lines_attacked = [
"62_58_180",
"62_63_160",
"48_50_136",
"48_53_141",
"41_48_131",
"39_41_121",
"43_44_125",
"44_45_126",
"34_35_110",
"54_58_154",
]
opponent_attack_cooldown = 12 # 1 hour, 1 hour being 12 time steps
opponent_attack_duration = 96 # 8 hours at maximum
opponent_budget_per_ts = (
0.17 # opponent_attack_duration / opponent_attack_cooldown + epsilon
)
opponent_init_budget = 144.0 # no need to attack straightfully, it can attack starting at midday the first day
config = {
"backend": PandaPowerBackend,
"action_class": PlayableAction,
"observation_class": None,
"reward_class": AlarmReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecastsWithMaintenance,
"volagecontroler_class": None,
"names_chronics_to_grid": None,
"thermal_limits": [
60.9,
231.9,
272.6,
212.8,
749.2,
332.4,
348.0,
414.4,
310.1,
371.4,
401.2,
124.3,
298.5,
86.4,
213.9,
160.8,
112.2,
291.4,
489.0,
489.0,
124.6,
196.7,
191.9,
238.4,
174.2,
105.6,
143.7,
293.4,
288.9,
107.7,
415.5,
148.2,
124.2,
154.4,
85.9,
106.5,
142.0,
124.0,
130.2,
86.2,
278.1,
182.0,
592.1,
173.1,
249.8,
441.0,
344.2,
722.8,
494.6,
494.6,
196.7,
151.8,
263.4,
364.1,
327.0,
370.5,
441.0,
300.3,
656.2,
],
"opponent_attack_cooldown": opponent_attack_cooldown,
"opponent_attack_duration": opponent_attack_duration,
"opponent_budget_per_ts": opponent_budget_per_ts,
"opponent_init_budget": opponent_init_budget,
"opponent_action_class": PowerlineSetAction,
"opponent_class": GeometricOpponent,
"opponent_budget_class": BaseActionBudget,
"kwargs_opponent": {
"lines_attacked": lines_attacked,
"attack_every_xxx_hour": 24,
"average_attack_duration_hour": 4,
"minimum_attack_duration_hour": 1,
},
"has_attention_budget": True,
"attention_budget_class": LinearAttentionBudget,
"kwargs_attention_budget": {
"max_budget": 3.0,
"budget_per_ts": 1.0 / (12.0 * 16),
"alarm_cost": 1.0,
"init_budget": 2.0,
},
}
| 3,036 | 23.893443 | 111 | py |
Grid2Op | Grid2Op-master/grid2op/data_test/l2rpn_idf_2023_with_alert/config.py | from grid2op.Action import PlayableAction, PowerlineSetAction
from grid2op.Reward import AlertReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecastsWithMaintenance
from grid2op.Backend import PandaPowerBackend
from grid2op.Opponent import GeometricOpponent, BaseActionBudget
try:
from grid2op.l2rpn_utils import ActionIDF2023, ObservationIDF2023
except ImportError:
warnings.warn("The grid2op version you are trying to use is too old for this environment. Please upgrade it.")
ActionIDF2023 = PlayableAction
ObservationIDF2023 = CompleteObservation
lines_attacked = [
"62_58_180",
"62_63_160",
"48_50_136",
"48_53_141",
"41_48_131",
"39_41_121",
"43_44_125",
"44_45_126",
"34_35_110",
"54_58_154",
]
opponent_attack_cooldown = 12 # 1 hour, 1 hour being 12 time steps
opponent_attack_duration = 96 # 8 hours at maximum
opponent_budget_per_ts = (
0.17 # opponent_attack_duration / opponent_attack_cooldown + epsilon
)
opponent_init_budget = 144.0 # no need to attack straightfully, it can attack starting at midday the first day
config = {
"backend": PandaPowerBackend,
"action_class": PlayableAction,
"observation_class": None,
"reward_class": AlertReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecastsWithMaintenance,
"volagecontroler_class": None,
"names_chronics_to_grid": None,
"thermal_limits": [
60.9,
231.9,
272.6,
212.8,
749.2,
332.4,
348.0,
414.4,
310.1,
371.4,
401.2,
124.3,
298.5,
86.4,
213.9,
160.8,
112.2,
291.4,
489.0,
489.0,
124.6,
196.7,
191.9,
238.4,
174.2,
105.6,
143.7,
293.4,
288.9,
107.7,
415.5,
148.2,
124.2,
154.4,
85.9,
106.5,
142.0,
124.0,
130.2,
86.2,
278.1,
182.0,
592.1,
173.1,
249.8,
441.0,
344.2,
722.8,
494.6,
494.6,
196.7,
151.8,
263.4,
364.1,
327.0,
370.5,
441.0,
300.3,
656.2,
],
"opponent_attack_cooldown": opponent_attack_cooldown,
"opponent_attack_duration": opponent_attack_duration,
"opponent_budget_per_ts": opponent_budget_per_ts,
"opponent_init_budget": opponent_init_budget,
"opponent_action_class": PowerlineSetAction,
"opponent_class": GeometricOpponent,
"opponent_budget_class": BaseActionBudget,
"kwargs_opponent": {
"lines_attacked": lines_attacked,
"attack_every_xxx_hour": 24,
"average_attack_duration_hour": 4,
"minimum_attack_duration_hour": 1,
},
"has_attention_budget": True,
"kwargs_attention_budget": {
"max_budget": 3.0,
"budget_per_ts": 1.0 / (12.0 * 16),
"alert_cost": 1.0,
"init_budget": 2.0,
},
}
| 3,213 | 24.307087 | 114 | py |
Grid2Op | Grid2Op-master/grid2op/data_test/l2rpn_neurips_2020_track1_with_alarm/config.py | from grid2op.Action import PlayableAction, PowerlineSetAction
from grid2op.Reward import AlarmReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecasts
from grid2op.Backend import PandaPowerBackend
from grid2op.Opponent import WeightedRandomOpponent, BaseActionBudget
from grid2op.operator_attention import LinearAttentionBudget
lines_attacked = [
"62_58_180",
"62_63_160",
"48_50_136",
"48_53_141",
"41_48_131",
"39_41_121",
"43_44_125",
"44_45_126",
"34_35_110",
"54_58_154",
]
rho_normalization = [0.45, 0.45, 0.6, 0.35, 0.3, 0.2, 0.55, 0.3, 0.45, 0.55]
opponent_attack_cooldown = 12 * 24 # 24 hours, 1 hour being 12 time steps
opponent_attack_duration = 12 * 4 # 4 hours
opponent_budget_per_ts = (
0.16667 # opponent_attack_duration / opponent_attack_cooldown + epsilon
)
opponent_init_budget = 144.0 # no need to attack straightfully, it can attack starting at midday the first day
config = {
"backend": PandaPowerBackend,
"action_class": PlayableAction,
"observation_class": None,
"reward_class": AlarmReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecasts,
"volagecontroler_class": None,
"names_chronics_to_grid": None,
"thermal_limits": [
60.9,
231.9,
272.6,
212.8,
749.2,
332.4,
348.0,
414.4,
310.1,
371.4,
401.2,
124.3,
298.5,
86.4,
213.9,
160.8,
112.2,
291.4,
489.0,
489.0,
124.6,
196.7,
191.9,
238.4,
174.2,
105.6,
143.7,
293.4,
288.9,
107.7,
415.5,
148.2,
124.2,
154.4,
85.9,
106.5,
142.0,
124.0,
130.2,
86.2,
278.1,
182.0,
592.1,
173.1,
249.8,
441.0,
344.2,
722.8,
494.6,
494.6,
196.7,
151.8,
263.4,
364.1,
327.0,
370.5,
441.0,
300.3,
656.2,
],
"opponent_attack_cooldown": opponent_attack_cooldown,
"opponent_attack_duration": opponent_attack_duration,
"opponent_budget_per_ts": opponent_budget_per_ts,
"opponent_init_budget": opponent_init_budget,
"opponent_action_class": PowerlineSetAction,
"opponent_class": WeightedRandomOpponent,
"opponent_budget_class": BaseActionBudget,
"kwargs_opponent": {
"lines_attacked": lines_attacked,
"rho_normalization": rho_normalization,
"attack_period": opponent_attack_cooldown,
},
"has_attention_budget": True,
"attention_budget_class": LinearAttentionBudget,
"kwargs_attention_budget": {
"max_budget": 5.0,
"budget_per_ts": 1.0 / (12.0 * 8),
"alarm_cost": 1.0,
"init_budget": 3.0,
},
} | 3,069 | 24.583333 | 111 | py |
Grid2Op | Grid2Op-master/grid2op/data_test/multimix/case14_001/config.py | from grid2op.Action import TopologyAndDispatchAction
from grid2op.Reward import RedispReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecasts
from grid2op.Backend import PandaPowerBackend
config = {
"backend": PandaPowerBackend,
"action_class": TopologyAndDispatchAction,
"observation_class": None,
"reward_class": RedispReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecasts,
"volagecontroler_class": None,
"thermal_limits": [
3.84900179e02,
3.84900179e02,
2.28997102e05,
2.28997102e05,
2.28997102e05,
1.52664735e04,
2.28997102e05,
3.84900179e02,
3.84900179e02,
1.83285800e02,
3.84900179e02,
3.84900179e02,
2.28997102e05,
2.28997102e05,
6.93930612e04,
3.84900179e02,
3.84900179e02,
2.40562612e02,
3.40224266e03,
3.40224266e03,
],
"names_chronics_to_grid": None,
}
| 1,129 | 26.560976 | 59 | py |
Grid2Op | Grid2Op-master/grid2op/data_test/multimix/case14_002/config.py | from grid2op.Action import TopologyAndDispatchAction
from grid2op.Reward import RedispReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFileWithForecasts
from grid2op.Backend import PandaPowerBackend
config = {
"backend": PandaPowerBackend,
"action_class": TopologyAndDispatchAction,
"observation_class": None,
"reward_class": RedispReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFileWithForecasts,
"volagecontroler_class": None,
"thermal_limits": [
384.900179,
384.900179,
380.0,
380.0,
157.0,
380.0,
380.0,
1077.7205012,
461.8802148,
769.80036,
269.4301253,
384.900179,
760.0,
380.0,
760.0,
384.900179,
230.9401074,
170.79945452,
3402.24266,
3402.24266,
],
"names_chronics_to_grid": None,
}
| 1,035 | 24.268293 | 59 | py |
Grid2Op | Grid2Op-master/grid2op/data_test/test_issue_367/config.py | from grid2op.Action import PlayableAction, PowerlineSetAction
from grid2op.Observation import CompleteObservation
from grid2op.Reward import RedispReward
from grid2op.Rules import DefaultRules
from grid2op.Chronics import Multifolder
from grid2op.Chronics import GridStateFromFile
from grid2op.Backend import PandaPowerBackend
from grid2op.Opponent import GeometricOpponent, BaseActionBudget
try:
from grid2op.l2rpn_utils import ActionWCCI2022, ObservationWCCI2022
except ImportError:
from grid2op.Action import PlayableAction
from grid2op.Observation import CompleteObservation
import warnings
warnings.warn("The grid2op version you are trying to use is too old for this environment. Please upgrade it.")
ActionWCCI2022 = PlayableAction
ObservationWCCI2022 = CompleteObservation
lines_attacked = ["26_31_106",
"21_22_93",
"17_18_88",
"4_10_162",
"12_14_68",
"14_32_108",
"62_58_180",
"62_63_160",
"48_50_136",
"48_53_141",
"41_48_131",
"39_41_121",
"43_44_125",
"44_45_126",
"34_35_110",
"54_58_154",
"74_117_81",
"80_79_175",
"93_95_43",
"88_91_33",
"91_92_37",
"99_105_62",
"102_104_61"]
opponent_attack_cooldown = 12 # 1 hour, 1 hour being 12 time steps
opponent_attack_duration = 96 # 8 hours at maximum
opponent_budget_per_ts = (
0.17 # opponent_attack_duration / opponent_attack_cooldown + epsilon
)
opponent_init_budget = 144.0 # no need to attack straightfully, it can attack starting at midday the first day
config = {
"backend": PandaPowerBackend,
"action_class": ActionWCCI2022,
"observation_class": ObservationWCCI2022,
"reward_class": RedispReward,
"gamerules_class": DefaultRules,
"chronics_class": Multifolder,
"grid_value_class": GridStateFromFile,
"volagecontroler_class": None,
"names_chronics_to_grid": None,
"thermal_limits": [ 233.4, 354.4, 792.7, 550.2, 572.2, 557.2, 8. , 480. ,
567.4, 681.8, 357.6, 336.9, 819. , 419.2, 304.2, 626.2,
256.1, 300.1, 132.7, 165.9, 841. , 1105.5, 428.2, 555.2,
224.2, 374.4, 285.6, 429.8, 253.1, 479.6, 238.3, 452.6,
312.9, 627.8, 196.1, 360.9, 317.1, 325.1, 352.6, 347.3,
565.5, 495.7, 1422.9, 479.8, 646.9, 1603.9, 364.1, 1498.4,
278. , 866.2, 1667.7, 569.6, 1350.2, 1478. , 380.8, 282.4,
246.9, 301.3, 766.9, 401.2, 306.9, 314.4, 333.4, 748.9,
513.4, 255.8, 513. , 268.5, 219. , 492. , 420.4, 417.4,
637.8, 571.9, 593.8, 273.7, 247. , 385.3, 283.4, 251.2,
210.8, 473.9, 408.5, 162.7, 602.2, 1098.6, 205. , 546. ,
435.9, 191.4, 424.1, 106.2, 149.2, 184.9, 1146.1, 1117.8,
569.6, 800.2, 380.3, 292.1, 636.5, 487.5, 490.9, 207.4,
590.6, 243.8, 466. , 698.2, 385. , 351.7, 60.9, 231.9,
340.8, 212.8, 749.2, 332.4, 348. , 798. , 398.3, 414.4,
341.1, 371.4, 401.2, 298.3, 343.3, 267.8, 213.9, 160.8,
112.2, 458.9, 349.7, 489. , 489. , 180.7, 196.7, 191.9,
238.4, 174.2, 105.6, 143.7, 393.6, 293.4, 288.9, 107.7,
623.2, 252.9, 118.3, 154.4, 111.7, 106.5, 177.5, 655.8,
161.2, 169.3, 120.7, 389.3, 291.2, 592.1, 277. , 412.2,
441. , 671.2, 609. , 867.4, 494.6, 494.6, 196.7, 167. ,
263.4, 364.1, 359.7, 803.2, 589. , 887.2, 615.2, 1096.8,
306.9, 472.6, 546.6, 370.5, 441. , 300.3, 656.2, 1346. ,
1246.5, 1196.5],
"opponent_attack_cooldown": opponent_attack_cooldown,
"opponent_attack_duration": opponent_attack_duration,
"opponent_budget_per_ts": opponent_budget_per_ts,
"opponent_init_budget": opponent_init_budget,
"opponent_action_class": PowerlineSetAction,
"opponent_class": GeometricOpponent,
"opponent_budget_class": BaseActionBudget,
"kwargs_opponent": {
"lines_attacked": lines_attacked,
"attack_every_xxx_hour": 24,
"average_attack_duration_hour": 4,
"minimum_attack_duration_hour": 1,
},
}
| 4,803 | 47.525253 | 114 | py |
Grid2Op | Grid2Op-master/grid2op/gym_compat/__init__.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
__all__ = [
"BaseGymAttrConverter",
"GymEnv",
"GymObservationSpace",
"GymActionSpace",
"ScalerAttrConverter",
"MultiToTupleConverter",
"ContinuousToDiscreteConverter",
"BoxGymObsSpace",
"BoxGymActSpace",
"MultiDiscreteActSpace",
"DiscreteActSpace",
]
from grid2op.gym_compat.utils import _MAX_GYM_VERSION_RANDINT, GYM_VERSION, GYMNASIUM_AVAILABLE, GYM_AVAILABLE
# base for all gym converter
from grid2op.gym_compat.base_gym_attr_converter import BaseGymAttrConverter
if GYMNASIUM_AVAILABLE:
from grid2op.gym_compat.base_gym_attr_converter import BaseGymnasiumAttrConverter
__all__.append("BaseGymnasiumAttrConverter")
if GYM_AVAILABLE:
from grid2op.gym_compat.base_gym_attr_converter import BaseLegacyGymAttrConverter
__all__.append("BaseLegacyGymAttrConverter")
# the environment (by default with dict encoding)
if GYMNASIUM_AVAILABLE:
from grid2op.gym_compat.gymenv import GymnasiumEnv
__all__.append("GymnasiumEnv")
if GYM_AVAILABLE:
from grid2op.gym_compat.gymenv import GymEnv_Legacy, GymEnv_Modern
__all__.append("GymEnv_Legacy")
__all__.append("GymEnv_Modern")
# define the default env to use
if GYMNASIUM_AVAILABLE:
GymEnv = GymnasiumEnv
else:
if GYM_VERSION <= _MAX_GYM_VERSION_RANDINT:
GymEnv = GymEnv_Legacy
else:
GymEnv = GymEnv_Modern
# action space (as Dict)
from grid2op.gym_compat.gym_act_space import GymActionSpace
if GYMNASIUM_AVAILABLE:
from grid2op.gym_compat.gym_act_space import GymnasiumActionSpace
__all__.append("GymnasiumActionSpace")
if GYM_AVAILABLE:
from grid2op.gym_compat.gym_act_space import LegacyGymActionSpace
__all__.append("LegacyGymActionSpace")
# observation space (as Dict)
from grid2op.gym_compat.gym_obs_space import GymObservationSpace
if GYMNASIUM_AVAILABLE:
from grid2op.gym_compat.gym_obs_space import GymnasiumObservationSpace
__all__.append("GymnasiumObservationSpace")
if GYM_AVAILABLE:
from grid2op.gym_compat.gym_obs_space import LegacyGymObservationSpace
__all__.append("LegacyGymObservationSpace")
from grid2op.gym_compat.scaler_attr_converter import ScalerAttrConverter
if GYMNASIUM_AVAILABLE:
from grid2op.gym_compat.scaler_attr_converter import ScalerAttrConverterGymnasium
__all__.append("ScalerAttrConverterGymnasium")
if GYM_AVAILABLE:
from grid2op.gym_compat.scaler_attr_converter import ScalerAttrConverterLegacyGym
__all__.append("ScalerAttrConverterLegacyGym")
from grid2op.gym_compat.multi_to_tuple_converter import MultiToTupleConverter
if GYMNASIUM_AVAILABLE:
from grid2op.gym_compat.multi_to_tuple_converter import MultiToTupleConverterGymnasium
__all__.append("MultiToTupleConverterGymnasium")
if GYM_AVAILABLE:
from grid2op.gym_compat.multi_to_tuple_converter import MultiToTupleConverterLegacyGym
__all__.append("MultiToTupleConverterLegacyGym")
from grid2op.gym_compat.continuous_to_discrete import ContinuousToDiscreteConverter
if GYMNASIUM_AVAILABLE:
from grid2op.gym_compat.continuous_to_discrete import ContinuousToDiscreteConverterGymnasium
__all__.append("ContinuousToDiscreteConverterGymnasium")
if GYM_AVAILABLE:
from grid2op.gym_compat.continuous_to_discrete import ContinuousToDiscreteConverterLegacyGym
__all__.append("ContinuousToDiscreteConverterLegacyGym")
# observation space (as Box)
from grid2op.gym_compat.box_gym_obsspace import BoxGymObsSpace
if GYMNASIUM_AVAILABLE:
from grid2op.gym_compat.box_gym_obsspace import BoxGymnasiumObsSpace
__all__.append("BoxGymnasiumObsSpace")
if GYM_AVAILABLE:
from grid2op.gym_compat.box_gym_obsspace import BoxLegacyGymObsSpace
__all__.append("BoxLegacyGymObsSpace")
from grid2op.gym_compat.box_gym_actspace import BoxGymActSpace
if GYMNASIUM_AVAILABLE:
from grid2op.gym_compat.box_gym_actspace import BoxGymnasiumActSpace
__all__.append("BoxGymnasiumActSpace")
if GYM_AVAILABLE:
from grid2op.gym_compat.box_gym_actspace import BoxLegacyGymActSpace
__all__.append("BoxLegacyGymActSpace")
from grid2op.gym_compat.multidiscrete_gym_actspace import MultiDiscreteActSpace
if GYMNASIUM_AVAILABLE:
from grid2op.gym_compat.multidiscrete_gym_actspace import MultiDiscreteActSpaceGymnasium
__all__.append("MultiDiscreteActSpaceGymnasium")
if GYM_AVAILABLE:
from grid2op.gym_compat.multidiscrete_gym_actspace import MultiDiscreteActSpaceLegacyGym
__all__.append("MultiDiscreteActSpaceLegacyGym")
from grid2op.gym_compat.discrete_gym_actspace import DiscreteActSpace
if GYMNASIUM_AVAILABLE:
from grid2op.gym_compat.discrete_gym_actspace import DiscreteActSpaceGymnasium
__all__.append("DiscreteActSpaceGymnasium")
if GYM_AVAILABLE:
from grid2op.gym_compat.discrete_gym_actspace import DiscreteActSpaceLegacyGym
__all__.append("DiscreteActSpaceLegacyGym")
# TODO doc and test
| 5,349 | 38.62963 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/gym_compat/base_gym_attr_converter.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.gym_compat.utils import GYM_AVAILABLE, GYMNASIUM_AVAILABLE
from grid2op.gym_compat.utils import check_gym_version
class __AuxBaseGymAttrConverter(object):
"""
TODO work in progress !
Need help if you can :-)
.. warning::
Depending on the presence absence of gymnasium and gym packages this class might behave differently.
In grid2op we tried to maintain compatibility both with gymnasium (newest) and gym (legacy,
no more maintained) RL packages. The behaviour is the following:
- :class:`BaseGymAttrConverter` will inherit from gymnasium if it's installed
(in this case it will be :class:`BaseGymnasiumAttrConverter`), otherwise it will
inherit from gym (and will be exactly :class:`BaseLegacyGymAttrConverter`)
- :class:`BaseGymnasiumAttrConverter` will inherit from gymnasium if it's available and never from
from gym
- :class:`BaseLegacyGymAttrConverter` will inherit from gym if it's available and never from
from gymnasium
See :ref:`gymnasium_gym` for more information
"""
def __init__(self, space=None, gym_to_g2op=None, g2op_to_gym=None):
check_gym_version(type(self)._gymnasium)
self.__is_init_super = (
False # is the "super" class initialized, do not modify in child class
)
self._is_init_space = False # is the instance initialized
self._my_gym_to_g2op = None
self._my_g2op_to_gym = None
self.my_space = None
if space is not None:
self.base_initialize(space, gym_to_g2op, g2op_to_gym)
def base_initialize(self, space, gym_to_g2op, g2op_to_gym):
if self.__is_init_super:
return
self._my_gym_to_g2op = gym_to_g2op
self._my_g2op_to_gym = g2op_to_gym
self.my_space = space
self._is_init_space = True
self.__is_init_super = True
def is_init_space(self):
return self._is_init_space
def initialize_space(self, space):
if self._is_init_space:
return
if not isinstance(space, type(self)._SpaceType):
raise RuntimeError(
"Impossible to scale a converter if this one is not from type space.Space"
)
self.my_space = space
self._is_init_space = True
def gym_to_g2op(self, gym_object):
"""
Convert a gym object to a grid2op object
Parameters
----------
gym_object:
An object (action or observation) represented as a gym "ordered dictionary"
Returns
-------
The same object, represented as a grid2op.Action.BaseAction or grid2op.Observation.BaseObservation.
"""
if self._my_gym_to_g2op is None:
raise NotImplementedError(
"Unable to convert gym object to grid2op object with this converter"
)
return self._my_gym_to_g2op(gym_object)
def g2op_to_gym(self, g2op_object):
"""
Convert a gym object to a grid2op object
Parameters
----------
g2op_object:
An object (action or observation) represented as a grid2op.Action.BaseAction or
grid2op.Observation.BaseObservation
Returns
-------
The same object, represented as a gym "ordered dictionary"
"""
if self._my_g2op_to_gym is None:
raise NotImplementedError(
"Unable to convert grid2op object to gym object with this converter"
)
return self._my_g2op_to_gym(g2op_object)
if GYM_AVAILABLE:
from gym.spaces import Space as LegGymSpace
BaseLegacyGymAttrConverter = type("BaseLegacyGymAttrConverter",
(__AuxBaseGymAttrConverter, ),
{"_SpaceType": LegGymSpace,
"_gymnasium": False,
"__module__": __name__})
BaseLegacyGymAttrConverter.__doc__ = __AuxBaseGymAttrConverter.__doc__
BaseGymAttrConverter = BaseLegacyGymAttrConverter
if GYMNASIUM_AVAILABLE:
from gymnasium.spaces import Space
BaseGymnasiumAttrConverter = type("BaseGymnasiumAttrConverter",
(__AuxBaseGymAttrConverter, ),
{"_SpaceType": Space,
"_gymnasium": True,
"__module__": __name__})
BaseGymnasiumAttrConverter.__doc__ = __AuxBaseGymAttrConverter.__doc__
BaseGymAttrConverter = BaseGymnasiumAttrConverter
| 5,163 | 37.251852 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/gym_compat/box_gym_actspace.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from typing import Tuple
import copy
import warnings
import numpy as np
# from gym.spaces import Box
from grid2op.Action import BaseAction, ActionSpace
from grid2op.dtypes import dt_int, dt_bool, dt_float
from grid2op.Exceptions import Grid2OpException
# TODO test that it works normally
# TODO test the casting in dt_int or dt_float depending on the data
# TODO test the scaling
# TODO doc
# TODO test the function part
from grid2op.gym_compat.utils import (ALL_ATTR_CONT,
ATTR_DISCRETE,
check_gym_version,
GYM_AVAILABLE,
GYMNASIUM_AVAILABLE)
class __AuxBoxGymActSpace:
"""
This class allows to convert a grid2op action space into a gym "Box" which is
a regular Box in R^d.
It also allows to customize which part of the action you want to use and offer capacity to
center / reduce the data or to use more complex function from the observation.
.. note::
Though it is possible to use every type of action with this type of action space, be aware that
this is not recommended at all to use it for discrete attribute (set_bus, change_bus, set_line_status or
change_line_status) !
Basically, when doing action in gym for these attributes, this converter will involve rounding and
is definitely not the best representation. Prefer the :class:`MultiDiscreteActSpace` or
the :class:`DiscreteActSpace` classes.
.. note::
A gymnasium Box is encoded as a numpy array, see the example section for more information.
.. danger::
If you use this encoding for the "curtailment" you might end up with "weird" behaviour. Your agent
will perfom some kind of curtailment at each step (there is no real way to express "I don't want to curtail")
So the index corresponding to the "curtail" type of actions should rather be "1." (meaning
"limit the value at 100%" which is somewhat equivalent to "I don't want to curtail")
Examples
--------
If you simply want to use it you can do:
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
from grid2op.gym_compat import GymEnv, BoxGymActSpace
gym_env = GymEnv(env)
gym_env.action_space = BoxGymActSpace(env.action_space)
In this case it will extract all the features in all the action (a detailed list is given
in the documentation at :ref:`action-module`).
You can select the attribute you want to keep, for example:
.. code-block:: python
gym_env.observation_space = BoxGymActSpace(env.observation_space,
attr_to_keep=['redispatch', "curtail"])
You can also apply some basic transformation to the attribute of the action. This can be done with:
.. code-block:: python
gym_env.observation_space = BoxGymActSpace(env.observation_space,
attr_to_keep=['redispatch', "curtail"],
multiply={"redispatch": env.gen_max_ramp_up},
add={"redispatch": 0.5 * env.gen_max_ramp_up})
In the above example, the resulting "redispatch" part of the vector will be given by the following
formula: `grid2op_act = gym_act * multiply + add`
Hint: you can use: `multiply` being the standard deviation and `add` being the average of the attribute.
.. warning::
Depending on the presence absence of gymnasium and gym packages this class might behave differently.
In grid2op we tried to maintain compatibility both with gymnasium (newest) and gym (legacy,
no more maintained) RL packages. The behaviour is the following:
- :class:`BoxGymActSpace` will inherit from gymnasium if it's installed
(in this case it will be :class:`BoxGymnasiumActSpace`), otherwise it will
inherit from gym (and will be exactly :class:`BoxLegacyGymActSpace`)
- :class:`BoxGymnasiumActSpace` will inherit from gymnasium if it's available and never from
from gym
- :class:`BoxLegacyGymActSpace` will inherit from gym if it's available and never from
from gymnasium
See :ref:`gymnasium_gym` for more information
For the "l2rpn_case14_sandbox" environment, a code using :class:`BoxGymActSpace` can look something like
(if you want to build action "by hands"):
.. code-block:: python
import grid2op
from grid2op.gym_compat import GymEnv, BoxGymActSpace
import numpy as np
env_name = "l2rpn_case14_sandbox"
env = grid2op.make(env_name)
gym_env = GymEnv(env)
gym_env.action_space = BoxGymActSpace(env.action_space)
obs = gym_env.reset() # obs will be an OrderedDict (default, but you can customize it)
# you can do a "do nothing" action
act = np.zeros(gym_env.action_space.shape)
# see danger about curtailment !
start_, end_ = gym_env.action_space.get_indexes("curtail")
## real version, not in the space... (write an issue if it's a problem for you)
act[start_:end_] = -1
## version "in the space"
# act[start_:end_] = 1
print(gym_env.action_space.from_gym(act))
obs, reward, done, truncated, info = gym_env.step(act)
# you can also do a random action:
act = gym_env.action_space.sample()
print(gym_env.action_space.from_gym(act))
obs, reward, done, truncated, info = gym_env.step(act)
# you can do an action on say redispatch (for example)
act = np.zeros(gym_env.action_space.shape)
key = "redispatch" # "redispatch", "curtail", "set_storage" (but there is no storage on this env)
start_, end_ = gym_env.action_space.get_indexes(key)
act[start_:end_] = np.random.uniform(high=1, low=-1, size=env.gen_redispatchable.sum()) # the dispatch vector
print(gym_env.action_space.from_gym(act))
obs, reward, done, truncated, info = gym_env.step(act)
Notes
-------
For more customization, this code is roughly equivalent to something like:
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
from grid2op.gym_compat import GymEnv
# this of course will not work... Replace "AGymSpace" with a normal gym space, like Dict, Box, MultiDiscrete etc.
from gym.spaces import AGymSpace
gym_env = GymEnv(env)
class MyCustomActionSpace(AGymSpace):
def __init__(self, whatever, you, want):
# do as you please here
pass
# don't forget to initialize the base class
AGymSpace.__init__(self, see, gym, doc, as, to, how, to, initialize, it)
# eg. MultiDiscrete.__init__(self, nvec=...)
def from_gym(self, gym_action):
# this is this very same function that you need to implement
# it should have this exact name, take only one action (member of your gym space) as input
# and return a grid2op action
return TheGymAction_ConvertedTo_Grid2op_Action
# eg. return np.concatenate((obs.gen_p * 0.1, np.sqrt(obs.load_p))
gym_env.action_space.close()
gym_env.action_space = MyCustomActionSpace(whatever, you, wanted)
And you can implement pretty much anything in the "from_gym" function.
"""
def __init__(
self,
grid2op_action_space,
attr_to_keep=ALL_ATTR_CONT,
add=None,
multiply=None,
functs=None,
):
if not isinstance(grid2op_action_space, ActionSpace):
raise RuntimeError(
f"Impossible to create a BoxGymActSpace without providing a "
f"grid2op action_space. You provided {type(grid2op_action_space)}"
f'as the "grid2op_action_space" attribute.'
)
check_gym_version(type(self)._gymnasium)
if attr_to_keep == ALL_ATTR_CONT:
# by default, i remove all the attributes that are not supported by the action type
# i do not do that if the user specified specific attributes to keep. This is his responsibility in
# in this case
attr_to_keep = {
el for el in attr_to_keep if grid2op_action_space.supports_type(el)
}
for el in ATTR_DISCRETE:
if el in attr_to_keep:
warnings.warn(
f'The class "BoxGymActSpace" should mainly be used to consider only continuous '
f"actions (eg. redispatch, set_storage or curtail). Though it is possible to use "
f'"{el}" when building it, be aware that this discrete action will be treated '
f'as continuous. Consider using the "MultiDiscreteActSpace" for these attributes.'
)
if not attr_to_keep:
raise Grid2OpException("This would be an empty action set. ")
self._attr_to_keep = sorted(attr_to_keep)
act_sp = grid2op_action_space
self._act_space = copy.deepcopy(grid2op_action_space)
low_gen = -1.0 * act_sp.gen_max_ramp_down[act_sp.gen_redispatchable]
high_gen = 1.0 * act_sp.gen_max_ramp_up[act_sp.gen_redispatchable]
nb_redisp = np.sum(act_sp.gen_redispatchable)
nb_curtail = np.sum(act_sp.gen_renewable)
curtail = np.full(shape=(nb_curtail,), fill_value=0.0, dtype=dt_float)
curtail_mw = np.full(shape=(nb_curtail,), fill_value=0.0, dtype=dt_float)
self._dict_properties = {
"set_line_status": (
np.full(shape=(act_sp.n_line,), fill_value=-1, dtype=dt_int),
np.full(shape=(act_sp.n_line,), fill_value=1, dtype=dt_int),
(act_sp.n_line,),
dt_int,
),
"change_line_status": (
np.full(shape=(act_sp.n_line,), fill_value=0, dtype=dt_int),
np.full(shape=(act_sp.n_line,), fill_value=1, dtype=dt_int),
(act_sp.n_line,),
dt_int,
),
"set_bus": (
np.full(shape=(act_sp.dim_topo,), fill_value=-1, dtype=dt_int),
np.full(shape=(act_sp.dim_topo,), fill_value=1, dtype=dt_int),
(act_sp.dim_topo,),
dt_int,
),
"change_bus": (
np.full(shape=(act_sp.dim_topo,), fill_value=0, dtype=dt_int),
np.full(shape=(act_sp.dim_topo,), fill_value=1, dtype=dt_int),
(act_sp.dim_topo,),
dt_int,
),
"redispatch": (low_gen, high_gen, (nb_redisp,), dt_float),
"set_storage": (
-1.0 * act_sp.storage_max_p_prod,
1.0 * act_sp.storage_max_p_absorb,
(act_sp.n_storage,),
dt_float,
),
"curtail": (
curtail,
np.full(shape=(nb_curtail,), fill_value=1.0, dtype=dt_float),
(nb_curtail,),
dt_float,
),
"curtail_mw": (
curtail_mw,
1.0 * act_sp.gen_pmax[act_sp.gen_renewable],
(nb_curtail,),
dt_float,
),
"raise_alarm": (
np.full(shape=(act_sp.dim_alarms,), fill_value=0, dtype=dt_int),
np.full(shape=(act_sp.dim_alarms,), fill_value=1, dtype=dt_int),
(act_sp.dim_alarms,),
dt_int,
),
"raise_alert": (
np.full(shape=(act_sp.dim_alerts,), fill_value=0, dtype=dt_int),
np.full(shape=(act_sp.dim_alerts,), fill_value=1, dtype=dt_int),
(act_sp.dim_alerts,),
dt_int,
),
}
self._key_dict_to_proptype = {
"set_line_status": dt_int,
"change_line_status": dt_bool,
"set_bus": dt_int,
"change_bus": dt_bool,
"redispatch": dt_float,
"set_storage": dt_float,
"curtail": dt_float,
"curtail_mw": dt_float,
"raise_alarm": dt_bool,
"raise_alert": dt_bool
}
if add is not None:
self._add = {k: np.array(v) for k, v in add.items()}
else:
self._add = {}
if multiply is not None:
self._multiply = {k: np.array(v) for k, v in multiply.items()}
else:
self._multiply = {}
# handle the "functional" part
self.__func = {}
self._dims = None
self._dtypes = None
if functs is None:
functs = {}
low, high, shape, dtype = self._get_info(functs)
# initialize the base container
type(self)._BoxType.__init__(self, low=low, high=high, shape=shape, dtype=dtype)
# convert data in `_add` and `_multiply` to the right type
# self._add = {k: v.astype(self.dtype) for k, v in self._add.items()}
# self._multiply = {k: v.astype(self.dtype) for k, v in self._multiply.items()}
self._fix_value_sub_div(self._add, functs)
self._fix_value_sub_div(self._multiply, functs)
def _get_shape(self, el, functs):
if el in functs:
callable_, low_, high_, shape_, dtype_ = functs[el]
elif el in self._dict_properties:
# el is an attribute of an observation, for example "load_q" or "topo_vect"
low_, high_, shape_, dtype_ = self._dict_properties[el]
return shape_
def _fix_value_sub_div(self, dict_, functs):
"""dict_ is either self._add or self._multiply"""
keys = list(dict_.keys())
for k in keys:
v = dict_[k]
if isinstance(v, (list, tuple)):
v = np.array(v).astype(self.dtype)
else:
shape = self._get_shape(k, functs)
v = np.full(shape, fill_value=v, dtype=self.dtype)
dict_[k] = v
def _get_info(self, functs):
low = None
high = None
shape = None
dtype = None
self._dims = []
self._dtypes = []
for el in self._attr_to_keep:
if el in functs:
# the attribute name "el" has been put in the functs
callable_, low_, high_, shape_, dtype_ = functs[el]
if dtype_ is None:
dtype_ = dt_float
if shape_ is None:
raise RuntimeError(
f'Error: if you use the "functs" keyword for the action space, '
f"you need to provide the shape of the vector you expect. See some "
f"examples in the official documentation."
)
if low_ is None:
low_ = np.full(shape_, fill_value=-np.inf, dtype=dtype_)
elif isinstance(low_, float):
low_ = np.full(shape_, fill_value=low_, dtype=dtype_)
if high_ is None:
high_ = np.full(shape_, fill_value=np.inf, dtype=dtype_)
elif isinstance(high_, float):
high_ = np.full(shape_, fill_value=high_, dtype=dtype_)
# simulate a vector in the range low_, high_ and the right shape to test the function given
# by the user
vect_right_properties = np.random.uniform(size=shape_)
finite_both = np.isfinite(low_) & np.isfinite(high_)
fintte_low = np.isfinite(low_) & ~np.isfinite(high_)
fintte_high = ~np.isfinite(low_) & np.isfinite(high_)
vect_right_properties[finite_both] = (
vect_right_properties[finite_both]
* (high_[finite_both] - low_[finite_both])
+ low_[finite_both]
)
vect_right_properties[fintte_low] += low_[fintte_low]
try:
tmp = callable_(vect_right_properties)
except Exception as exc_:
raise RuntimeError(
f'Error for the function your provided with key "{el}". '
f"The error was :\n {exc_}"
)
if not isinstance(tmp, BaseAction):
raise RuntimeError(
f'The function you provided in the "functs" argument for key "{el}" '
f"should take a"
)
self.__func[el] = callable_
elif el in self._dict_properties:
# el is an attribute of an observation, for example "load_q" or "topo_vect"
low_, high_, shape_, dtype_ = self._dict_properties[el]
else:
li_keys = "\n\t- ".join(
sorted(list(self._dict_properties.keys()) + list(self.__func.keys()))
)
raise RuntimeError(
f'Unknown action attributes "{el}". Supported attributes are: '
f"\n\t- {li_keys}"
)
# handle the data type
if dtype is None:
dtype = dtype_
else:
if dtype_ == dt_float:
# promote whatever to float anyway
dtype = dt_float
elif dtype_ == dt_int and dtype == dt_bool:
# promote bool to int
dtype = dt_int
# handle the shape
if shape is None:
shape = shape_
else:
shape = (shape[0] + shape_[0],)
# handle low / high
# NB: the formula is: glop = gym * multiply + add
if el in self._add:
low_ = 1.0 * low_.astype(dtype)
high_ = 1.0 * high_.astype(dtype)
low_ -= self._add[el]
high_ -= self._add[el]
if el in self._multiply:
# special case if a 0 were entered
arr_ = 1.0 * self._multiply[el]
is_nzero = arr_ != 0.0
low_ = 1.0 * low_.astype(dtype)
high_ = 1.0 * high_.astype(dtype)
low_[is_nzero] /= arr_[is_nzero]
high_[is_nzero] /= arr_[is_nzero]
# "fix" the low / high : they can be inverted if self._multiply < 0. for example
tmp_l = copy.deepcopy(low_)
tmp_h = copy.deepcopy(high_)
low_ = np.minimum(tmp_h, tmp_l)
high_ = np.maximum(tmp_h, tmp_l)
if low is None:
low = low_
high = high_
else:
low = np.concatenate((low.astype(dtype), low_.astype(dtype))).astype(
dtype
)
high = np.concatenate((high.astype(dtype), high_.astype(dtype))).astype(
dtype
)
# remember where this need to be stored
self._dims.append(shape[0])
self._dtypes.append(dtype_)
return low, high, shape, dtype
def _handle_attribute(self, res, gym_act_this, attr_nm):
"""
INTERNAL
TODO
Parameters
----------
res
gym_act_this
attr_nm
Returns
-------
"""
if attr_nm in self._multiply:
gym_act_this *= self._multiply[attr_nm]
if attr_nm in self._add:
gym_act_this += self._add[attr_nm]
if attr_nm == "curtail":
gym_act_this_ = np.full(
self._act_space.n_gen, fill_value=np.NaN, dtype=dt_float
)
gym_act_this_[self._act_space.gen_renewable] = gym_act_this
gym_act_this = gym_act_this_
elif attr_nm == "curtail_mw":
gym_act_this_ = np.full(
self._act_space.n_gen, fill_value=np.NaN, dtype=dt_float
)
gym_act_this_[self._act_space.gen_renewable] = gym_act_this
gym_act_this = gym_act_this_
elif attr_nm == "redispatch":
gym_act_this_ = np.zeros(self._act_space.n_gen, dtype=dt_float)
gym_act_this_[self._act_space.gen_redispatchable] = gym_act_this
gym_act_this = gym_act_this_
setattr(res, attr_nm, gym_act_this)
return res
def get_indexes(self, key: str) -> Tuple[int, int]:
"""Allows to retrieve the indexes of the gym action that
are concerned by the attribute name `key` given in input.
Parameters
----------
key : str
the attribute name (*eg* "set_storage" or "redispatch")
Returns
-------
Tuple[int, int]
_description_
Examples
--------
You can use it like:
.. code-block:: python
gym_env = ... # an environment with a BoxActSpace
act = np.zeros(gym_env.action_space.shape)
key = "redispatch" # "redispatch", "curtail", "set_storage"
start_, end_ = gym_env.action_space.get_indexes(key)
act[start_:end_] = np.random.uniform(high=1, low=-1, size=env.gen_redispatchable.sum())
# act only modifies the redispatch with the input given (here a uniform redispatching between -1 and 1)
"""
error_msg =(f"Impossible to use the grid2op action property \"{key}\""
f"with this action space.")
if key not in self._attr_to_keep:
raise Grid2OpException(error_msg)
prev = 0
for attr_nm, where_to_put in zip(
self._attr_to_keep, self._dims
):
if attr_nm == key:
return prev, where_to_put
prev = where_to_put
raise Grid2OpException(error_msg)
def from_gym(self, gym_act):
"""
This is the function that is called to transform a gym action (in this case a numpy array!)
sent by the agent
and convert it to a grid2op action that will be sent to the underlying grid2op environment.
Parameters
----------
gym_act: ``numpy.ndarray``
the gym action
Returns
-------
grid2op_act: :class:`grid2op.Action.BaseAction`
The corresponding grid2op action.
"""
res = self._act_space()
prev = 0
for attr_nm, where_to_put, dtype in zip(
self._attr_to_keep, self._dims, self._dtypes
):
this_part = 1 * gym_act[prev:where_to_put]
if attr_nm in self.__func:
glop_act_tmp = self.__func[attr_nm](this_part)
res += glop_act_tmp
elif hasattr(res, attr_nm):
glop_dtype = self._key_dict_to_proptype[attr_nm]
if glop_dtype == dt_int:
# convert floating point actions to integer.
# NB: i round first otherwise it is cut.
this_part = np.round(this_part, 0).astype(dtype)
elif glop_dtype == dt_bool:
# convert floating point actions to bool.
# NB: it's important here the numbers are between 0 and 1
this_part = (this_part >= 0.5).astype(dt_bool)
if this_part.shape and this_part.shape[0]:
# only update the attribute if there is actually something to update
self._handle_attribute(res, this_part, attr_nm)
else:
raise RuntimeError(f'Unknown attribute "{attr_nm}".')
prev = where_to_put
return res
def close(self):
pass
def normalize_attr(self, attr_nm: str):
"""
This function normalizes the part of the space
that corresponds to the attribute `attr_nm`.
The normalization consists in having a vector between 0. and 1.
It is achieved by:
- dividing by the range (high - low)
- adding the minimum value (low).
.. note::
It only affects continuous attribute. No error / warnings are
raised if you attempt to use it on a discrete attribute.
.. warning::
This normalization relies on the `high` and `low` attribute. It cannot be done if
the attribute is not bounded (for example when its maximum limit is `np.inf`). A warning
is raised in this case.
Parameters
----------
attr_nm : str
The name of the attribute to normalize
"""
if attr_nm in self._multiply or attr_nm in self._add:
raise Grid2OpException(
f"Cannot normalize attribute \"{attr_nm}\" that you already "
"modified with either `add` or `multiply` (action space)."
)
prev = 0
for attr_tmp, where_to_put, dtype in zip(
self._attr_to_keep, self._dims, self._dtypes
):
if attr_tmp == attr_nm and dtype == dt_float:
curr_high = 1.0 * self.high[prev:where_to_put]
curr_low = 1.0 * self.low[prev:where_to_put]
finite_high = np.isfinite(curr_high)
finite_low = np.isfinite(curr_high)
both_finite = finite_high & finite_low
both_finite &= curr_high > curr_low
if np.any(~both_finite):
warnings.warn(f"The normalization of attribute \"{both_finite}\" cannot be performed entirely as "
f"there are some non finite value, or `high == `low` "
f"for some components.")
self._multiply[attr_nm] = np.ones(curr_high.shape, dtype=dtype)
self._add[attr_nm] = np.zeros(curr_high.shape, dtype=dtype)
self._multiply[attr_nm][both_finite] = (
curr_high[both_finite] - curr_low[both_finite]
)
self._add[attr_nm][both_finite] += curr_low[both_finite]
self.high[prev:where_to_put][both_finite] = 1.0
self.low[prev:where_to_put][both_finite] = 0.0
break
prev = where_to_put
if GYM_AVAILABLE:
from gym.spaces import Box as LegGymBox
from grid2op.gym_compat.base_gym_attr_converter import BaseLegacyGymAttrConverter
BoxLegacyGymActSpace = type("BoxLegacyGymActSpace",
(__AuxBoxGymActSpace, LegGymBox, ),
{"_gymnasium": False,
"_BaseGymAttrConverterType": BaseLegacyGymAttrConverter,
"_BoxType": LegGymBox,
"__module__": __name__})
BoxLegacyGymActSpace.__doc__ = __AuxBoxGymActSpace.__doc__
BoxGymActSpace = BoxLegacyGymActSpace
BoxGymActSpace.__doc__ = __AuxBoxGymActSpace.__doc__
if GYMNASIUM_AVAILABLE:
from gymnasium.spaces import Box
from grid2op.gym_compat.base_gym_attr_converter import BaseGymnasiumAttrConverter
BoxGymnasiumActSpace = type("BoxGymnasiumActSpace",
(__AuxBoxGymActSpace, Box, ),
{"_gymnasium": True,
"_BaseGymAttrConverterType": BaseGymnasiumAttrConverter,
"_BoxType": Box,
"__module__": __name__})
BoxGymnasiumActSpace.__doc__ = __AuxBoxGymActSpace.__doc__
BoxGymActSpace = BoxGymnasiumActSpace
BoxGymActSpace.__doc__ = __AuxBoxGymActSpace.__doc__
| 28,842 | 40.145506 | 121 | py |
Grid2Op | Grid2Op-master/grid2op/gym_compat/box_gym_obsspace.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
import warnings
import numpy as np
from grid2op.dtypes import dt_int, dt_bool, dt_float
from grid2op.Observation import ObservationSpace
from grid2op.Exceptions import Grid2OpException
from grid2op.gym_compat.utils import (_compute_extra_power_for_losses,
GYM_AVAILABLE,
GYMNASIUM_AVAILABLE,
check_gym_version)
ALL_ATTR_OBS = (
"year",
"month",
"day",
"hour_of_day",
"minute_of_hour",
"day_of_week",
"gen_p",
"gen_p_before_curtail",
"gen_q",
"gen_v",
"gen_margin_up",
"gen_margin_down",
"load_p",
"load_q",
"load_v",
"p_or",
"q_or",
"v_or",
"a_or",
"p_ex",
"q_ex",
"v_ex",
"a_ex",
"rho",
"line_status",
"timestep_overflow",
"topo_vect",
"time_before_cooldown_line",
"time_before_cooldown_sub",
"time_next_maintenance",
"duration_next_maintenance",
"target_dispatch",
"actual_dispatch",
"storage_charge",
"storage_power_target",
"storage_power",
"curtailment",
"curtailment_limit",
"curtailment_limit_effective",
"thermal_limit",
"is_alarm_illegal",
"time_since_last_alarm",
"last_alarm",
"attention_budget",
"was_alarm_used_after_game_over",
"max_step",
"active_alert",
"attack_under_alert",
"time_since_last_alert",
"alert_duration",
"total_number_of_alert",
"time_since_last_attack",
"was_alert_used_after_attack",
"theta_or",
"theta_ex",
"load_theta",
"gen_theta",
)
# TODO add the alarm stuff
# TODO add the time step
# TODO add the is_illegal and co there
class __AuxBoxGymObsSpace:
"""
This class allows to convert a grid2op observation space into a gym "Box" which is
a regular Box in R^d.
It also allows to customize which part of the observation you want to use and offer capacity to
center / reduce the data or to use more complex function from the observation.
.. warning::
Depending on the presence absence of gymnasium and gym packages this class might behave differently.
In grid2op we tried to maintain compatibility both with gymnasium (newest) and gym (legacy,
no more maintained) RL packages. The behaviour is the following:
- :class:`BoxGymObsSpace` will inherit from gymnasium if it's installed
(in this case it will be :class:`BoxGymnasiumObsSpace`), otherwise it will
inherit from gym (and will be exactly :class:`BoxLegacyGymObsSpace`)
- :class:`BoxGymnasiumObsSpace` will inherit from gymnasium if it's available and never from
from gym
- :class:`BoxLegacyGymObsSpace` will inherit from gym if it's available and never from
from gymnasium
See :ref:`gymnasium_gym` for more information
.. note::
A gymnasium Box is encoded as a numpy array.
Examples
--------
If you simply want to use it you can do:
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
from grid2op.gym_compat import GymEnv, BoxGymObsSpace
gym_env = GymEnv(env)
gym_env.observation_space = BoxGymObsSpace(env.observation_space)
In this case it will extract all the features in all the observation (a detailed list is given
in the documentation at :ref:`observation_module`.
You can select the attribute you want to keep, for example:
.. code-block:: python
gym_env.observation_space = BoxGymObsSpace(env.observation_space,
attr_to_keep=['load_p', "gen_p", "rho])
You can also apply some basic transformation to the attribute of the observation before building
the resulting gym observation (which in this case is a vector). This can be done with:
.. code-block:: python
gym_env.observation_space = BoxGymObsSpace(env.observation_space,
attr_to_keep=['load_p', "gen_p", "rho"],
divide={"gen_p": env.gen_pmax},
substract={"gen_p": 0.5 * env.gen_pmax})
In the above example, the resulting "gen_p" part of the vector will be given by the following
formula: `gym_obs = (grid2op_obs - substract) / divide`.
Hint: you can use: divide being the standard deviation and subtract being the average of the attribute
on a few episodes for example. This can be done with :class:`grid2op.utils.EpisodeStatistics` for example.
Finally, you can also modify more the attribute of the observation and add it to your box. This
can be done rather easily with the "functs" argument like:
.. code-block:: python
gym_env.observation_space = BoxGymObsSpace(env.observation_space,
attr_to_keep=["connectivity_matrix", "log_load"],
functs={"connectivity_matrix":
(lambda grid2opobs: grid2opobs.connectivity_matrix().flatten(),
0., 1.0, None, None),
"log_load":
(lambda grid2opobs: np.log(grid2opobs.load_p),
None, 10., None, None)
}
)
In this case, "functs" should be a dictionary, the "keys" should be string (keys should also be
present in the `attr_to_keep` list) and the values should count 5 elements
(callable, low, high, shape, dtype) with:
- `callable` a function taking as input a grid2op observation and returning a numpy array
- `low` (optional) (put None if you don't want to specify it, defaults to `-np.inf`) the lowest value
your numpy array can take. It can be a single number or an array with the same shape
as the return value of your function.
- `high` (optional) (put None if you don't want to specify it, defaults to `np.inf`) the highest value
your numpy array can take. It can be a single number or an array with the same shape
as the return value of your function.
- `shape` (optional) (put None if you don't want to specify it) the shape of the return value
of your function. It should be a tuple (and not a single number). By default it is computed
with by applying your function to an observation.
- `dtype` (optional, put None if you don't want to change it, defaults to np.float32) the type of
the numpy array as output of your function.
Notes
-----
The range of the values for "gen_p" / "prod_p" are not strictly `env.gen_pmin` and `env.gen_pmax`.
This is due to the "approximation" when some redispatching is performed (the precision of the
algorithm that computes the actual dispatch from the information it receives) and also because
sometimes the losses of the grid are really different that the one anticipated in the "chronics" (yes
env.gen_pmin and env.gen_pmax are not always ensured in grid2op)
"""
def __init__(
self,
grid2op_observation_space,
attr_to_keep=ALL_ATTR_OBS,
subtract=None,
divide=None,
functs=None,
):
check_gym_version(type(self)._gymnasium)
if not isinstance(grid2op_observation_space, ObservationSpace):
raise RuntimeError(
f"Impossible to create a BoxGymObsSpace without providing a "
f"grid2op observation. You provided {type(grid2op_observation_space)}"
f'as the "grid2op_observation_space" attribute.'
)
self._attr_to_keep = sorted(attr_to_keep)
ob_sp = grid2op_observation_space
tol_redisp = (
ob_sp.obs_env._tol_poly
) # add to gen_p otherwise ... well it can crash
extra_for_losses = _compute_extra_power_for_losses(ob_sp)
self._dict_properties = {
"year": (
np.zeros(1, dtype=dt_int),
np.zeros(1, dtype=dt_int) + 2200,
(1,),
dt_int,
),
"month": (
np.zeros(1, dtype=dt_int),
np.zeros(1, dtype=dt_int) + 12,
(1,),
dt_int,
),
"day": (
np.zeros(1, dtype=dt_int),
np.zeros(1, dtype=dt_int) + 31,
(1,),
dt_int,
),
"hour_of_day": (
np.zeros(1, dtype=dt_int),
np.zeros(1, dtype=dt_int) + 24,
(1,),
dt_int,
),
"minute_of_hour": (
np.zeros(1, dtype=dt_int),
np.zeros(1, dtype=dt_int) + 60,
(1,),
dt_int,
),
"day_of_week": (
np.zeros(1, dtype=dt_int),
np.zeros(1, dtype=dt_int) + 7,
(1,),
dt_int,
),
"current_step": (
np.zeros(1, dtype=dt_int),
np.zeros(1, dtype=dt_int) + np.iinfo(dt_int).max,
(1,),
dt_int,
),
"gen_p": (
np.full(shape=(ob_sp.n_gen,), fill_value=0.0, dtype=dt_float)
- tol_redisp
- extra_for_losses,
ob_sp.gen_pmax + tol_redisp + extra_for_losses,
(ob_sp.n_gen,),
dt_float,
),
"gen_q": (
np.full(shape=(ob_sp.n_gen,), fill_value=-np.inf, dtype=dt_float),
np.full(shape=(ob_sp.n_gen,), fill_value=np.inf, dtype=dt_float),
(ob_sp.n_gen,),
dt_float,
),
"gen_v": (
np.full(shape=(ob_sp.n_gen,), fill_value=0.0, dtype=dt_float),
np.full(shape=(ob_sp.n_gen,), fill_value=np.inf, dtype=dt_float),
(ob_sp.n_gen,),
dt_float,
),
"gen_margin_up": (
np.full(shape=(ob_sp.n_gen,), fill_value=0.0, dtype=dt_float),
1.0 * ob_sp.gen_max_ramp_up,
(ob_sp.n_gen,),
dt_float,
),
"gen_margin_down": (
np.full(shape=(ob_sp.n_gen,), fill_value=0.0, dtype=dt_float),
1.0 * ob_sp.gen_max_ramp_down,
(ob_sp.n_gen,),
dt_float,
),
"gen_theta": (
np.full(shape=(ob_sp.n_gen,), fill_value=-180., dtype=dt_float),
np.full(shape=(ob_sp.n_gen,), fill_value=180., dtype=dt_float),
(ob_sp.n_gen,),
dt_float,
),
"load_p": (
np.full(shape=(ob_sp.n_load,), fill_value=-np.inf, dtype=dt_float),
np.full(shape=(ob_sp.n_load,), fill_value=+np.inf, dtype=dt_float),
(ob_sp.n_load,),
dt_float,
),
"load_q": (
np.full(shape=(ob_sp.n_load,), fill_value=-np.inf, dtype=dt_float),
np.full(shape=(ob_sp.n_load,), fill_value=+np.inf, dtype=dt_float),
(ob_sp.n_load,),
dt_float,
),
"load_v": (
np.full(shape=(ob_sp.n_load,), fill_value=0.0, dtype=dt_float),
np.full(shape=(ob_sp.n_load,), fill_value=np.inf, dtype=dt_float),
(ob_sp.n_load,),
dt_float,
),
"load_theta": (
np.full(shape=(ob_sp.n_load,), fill_value=-180., dtype=dt_float),
np.full(shape=(ob_sp.n_load,), fill_value=180., dtype=dt_float),
(ob_sp.n_load,),
dt_float,
),
"p_or": (
np.full(shape=(ob_sp.n_line,), fill_value=-np.inf, dtype=dt_float),
np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float),
(ob_sp.n_line,),
dt_float,
),
"q_or": (
np.full(shape=(ob_sp.n_line,), fill_value=-np.inf, dtype=dt_float),
np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float),
(ob_sp.n_line,),
dt_float,
),
"a_or": (
np.full(shape=(ob_sp.n_line,), fill_value=0.0, dtype=dt_float),
np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float),
(ob_sp.n_line,),
dt_float,
),
"v_or": (
np.full(shape=(ob_sp.n_line,), fill_value=0.0, dtype=dt_float),
np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float),
(ob_sp.n_line,),
dt_float,
),
"theta_or": (
np.full(shape=(ob_sp.n_line,), fill_value=-180., dtype=dt_float),
np.full(shape=(ob_sp.n_line,), fill_value=180., dtype=dt_float),
(ob_sp.n_line,),
dt_float,
),
"p_ex": (
np.full(shape=(ob_sp.n_line,), fill_value=-np.inf, dtype=dt_float),
np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float),
(ob_sp.n_line,),
dt_float,
),
"q_ex": (
np.full(shape=(ob_sp.n_line,), fill_value=-np.inf, dtype=dt_float),
np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float),
(ob_sp.n_line,),
dt_float,
),
"a_ex": (
np.full(shape=(ob_sp.n_line,), fill_value=0.0, dtype=dt_float),
np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float),
(ob_sp.n_line,),
dt_float,
),
"v_ex": (
np.full(shape=(ob_sp.n_line,), fill_value=0.0, dtype=dt_float),
np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float),
(ob_sp.n_line,),
dt_float,
),
"theta_ex": (
np.full(shape=(ob_sp.n_line,), fill_value=-180., dtype=dt_float),
np.full(shape=(ob_sp.n_line,), fill_value=180., dtype=dt_float),
(ob_sp.n_line,),
dt_float,
),
"rho": (
np.full(shape=(ob_sp.n_line,), fill_value=0.0, dtype=dt_float),
np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float),
(ob_sp.n_line,),
dt_float,
),
"line_status": (
np.full(shape=(ob_sp.n_line,), fill_value=0, dtype=dt_int),
np.full(shape=(ob_sp.n_line,), fill_value=1, dtype=dt_int),
(ob_sp.n_line,),
dt_int,
),
"timestep_overflow": (
np.full(
shape=(ob_sp.n_line,), fill_value=np.iinfo(dt_int).min, dtype=dt_int
),
np.full(
shape=(ob_sp.n_line,), fill_value=np.iinfo(dt_int).max, dtype=dt_int
),
(ob_sp.n_line,),
dt_int,
),
"topo_vect": (
np.full(shape=(ob_sp.dim_topo,), fill_value=-1, dtype=dt_int),
np.full(shape=(ob_sp.dim_topo,), fill_value=2, dtype=dt_int),
(ob_sp.dim_topo,),
dt_int,
),
"time_before_cooldown_line": (
np.full(shape=(ob_sp.n_line,), fill_value=0, dtype=dt_int),
np.full(
shape=(ob_sp.n_line,), fill_value=np.iinfo(dt_int).max, dtype=dt_int
),
(ob_sp.n_line,),
dt_int,
),
"time_before_cooldown_sub": (
np.full(shape=(ob_sp.n_sub,), fill_value=0, dtype=dt_int),
np.full(
shape=(ob_sp.n_sub,), fill_value=np.iinfo(dt_int).max, dtype=dt_int
),
(ob_sp.n_sub,),
dt_int,
),
"time_next_maintenance": (
np.full(shape=(ob_sp.n_line,), fill_value=-1, dtype=dt_int),
np.full(
shape=(ob_sp.n_line,), fill_value=np.iinfo(dt_int).max, dtype=dt_int
),
(ob_sp.n_line,),
dt_int,
),
"duration_next_maintenance": (
np.full(shape=(ob_sp.n_line,), fill_value=0, dtype=dt_int),
np.full(
shape=(ob_sp.n_line,), fill_value=np.iinfo(dt_int).max, dtype=dt_int
),
(ob_sp.n_line,),
dt_int,
),
"target_dispatch": (
np.minimum(ob_sp.gen_pmin, -ob_sp.gen_pmax),
np.maximum(-ob_sp.gen_pmin, +ob_sp.gen_pmax),
(ob_sp.n_gen,),
dt_float,
),
"actual_dispatch": (
np.minimum(ob_sp.gen_pmin, -ob_sp.gen_pmax),
np.maximum(-ob_sp.gen_pmin, +ob_sp.gen_pmax),
(ob_sp.n_gen,),
dt_float,
),
"storage_charge": (
np.full(shape=(ob_sp.n_storage,), fill_value=0, dtype=dt_float),
1.0 * ob_sp.storage_Emax,
(ob_sp.n_storage,),
dt_float,
),
"storage_power_target": (
-1.0 * ob_sp.storage_max_p_prod,
1.0 * ob_sp.storage_max_p_absorb,
(ob_sp.n_storage,),
dt_float,
),
"storage_power": (
-1.0 * ob_sp.storage_max_p_prod,
1.0 * ob_sp.storage_max_p_absorb,
(ob_sp.n_storage,),
dt_float,
),
"storage_theta": (
np.full(shape=(ob_sp.n_storage,), fill_value=-180., dtype=dt_float),
np.full(shape=(ob_sp.n_storage,), fill_value=180., dtype=dt_float),
(ob_sp.n_storage,),
dt_float,
),
"curtailment": (
np.full(shape=(ob_sp.n_gen,), fill_value=0.0, dtype=dt_float),
np.full(shape=(ob_sp.n_gen,), fill_value=1.0, dtype=dt_float),
(ob_sp.n_gen,),
dt_float,
),
"curtailment_limit": (
np.full(shape=(ob_sp.n_gen,), fill_value=0.0, dtype=dt_float),
np.full(shape=(ob_sp.n_gen,), fill_value=1.0, dtype=dt_float),
(ob_sp.n_gen,),
dt_float,
),
"curtailment_mw": (
np.full(shape=(ob_sp.n_gen,), fill_value=0.0, dtype=dt_float),
1.0 * ob_sp.gen_pmax,
(ob_sp.n_gen,),
dt_float,
),
"curtailment_limit_mw": (
np.full(shape=(ob_sp.n_gen,), fill_value=0.0, dtype=dt_float),
1.0 * ob_sp.gen_pmax,
(ob_sp.n_gen,),
dt_float,
),
"thermal_limit": (
np.full(shape=(ob_sp.n_line,), fill_value=0.0, dtype=dt_float),
np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float),
(ob_sp.n_line,),
dt_float,
),
"is_alarm_illegal": (
np.full(shape=(1,), fill_value=False, dtype=dt_bool),
np.full(shape=(1,), fill_value=True, dtype=dt_bool),
(1,),
dt_bool,
),
"time_since_last_alarm": (
np.full(shape=(1,), fill_value=-1, dtype=dt_int),
np.full(shape=(1,), fill_value=np.iinfo(dt_int).max, dtype=dt_int),
(1,),
dt_int,
),
"last_alarm": (
np.full(shape=(ob_sp.dim_alarms,), fill_value=-1, dtype=dt_int),
np.full(
shape=(ob_sp.dim_alarms,),
fill_value=np.iinfo(dt_int).max,
dtype=dt_int,
),
(ob_sp.dim_alarms,),
dt_int,
),
"attention_budget": (
np.full(shape=(1,), fill_value=-1, dtype=dt_float),
np.full(shape=(1,), fill_value=np.inf, dtype=dt_float),
(1,),
dt_float,
),
"was_alarm_used_after_game_over": (
np.full(shape=(1,), fill_value=False, dtype=dt_bool),
np.full(shape=(1,), fill_value=True, dtype=dt_bool),
(1,),
dt_bool,
),
"delta_time": (
np.full(shape=(1,), fill_value=0, dtype=dt_float),
np.full(shape=(1,), fill_value=np.inf, dtype=dt_float),
(1,),
dt_float,
),
# alert stuff
"active_alert": (
np.full(shape=(ob_sp.dim_alerts,), fill_value=False, dtype=dt_bool),
np.full(shape=(ob_sp.dim_alerts,), fill_value=True, dtype=dt_bool),
(ob_sp.dim_alerts,),
dt_bool,
),
"time_since_last_alert": (
np.full(shape=(ob_sp.dim_alerts,), fill_value=-1, dtype=dt_int),
np.full(shape=(ob_sp.dim_alerts,), fill_value=np.iinfo(dt_int).max, dtype=dt_int),
(ob_sp.dim_alerts,),
dt_int,
),
"alert_duration": (
np.full(shape=(ob_sp.dim_alerts,), fill_value=-1, dtype=dt_int),
np.full(shape=(ob_sp.dim_alerts,), fill_value=np.iinfo(dt_int).max, dtype=dt_int),
(ob_sp.dim_alerts,),
dt_int,
),
"total_number_of_alert": (
np.full(shape=(1 if ob_sp.dim_alerts else 0,), fill_value=-1, dtype=dt_int),
np.full(shape=(1 if ob_sp.dim_alerts else 0,), fill_value=np.iinfo(dt_int).max, dtype=dt_int),
(1 if ob_sp.dim_alerts else 0,),
dt_int,
),
"time_since_last_attack": (
np.full(shape=(ob_sp.dim_alerts,), fill_value=-1, dtype=dt_int),
np.full(shape=(ob_sp.dim_alerts,), fill_value=np.iinfo(dt_int).max, dtype=dt_int),
(ob_sp.dim_alerts,),
dt_int,
),
"was_alert_used_after_attack": (
np.full(shape=(ob_sp.dim_alerts,), fill_value=-1, dtype=dt_int),
np.full(shape=(ob_sp.dim_alerts,), fill_value=1, dtype=dt_int),
(ob_sp.dim_alerts,),
dt_int,
),
"attack_under_alert": (
np.full(shape=(ob_sp.dim_alerts,), fill_value=-1, dtype=dt_int),
np.full(shape=(ob_sp.dim_alerts,), fill_value=1, dtype=dt_int),
(ob_sp.dim_alerts,),
dt_int,
),
}
self._dict_properties["max_step"] = copy.deepcopy(self._dict_properties["current_step"])
self._dict_properties["delta_time"] = copy.deepcopy(self._dict_properties["current_step"])
self._dict_properties["prod_p"] = copy.deepcopy(self._dict_properties["gen_p"])
self._dict_properties["prod_q"] = copy.deepcopy(self._dict_properties["gen_q"])
self._dict_properties["prod_v"] = copy.deepcopy(self._dict_properties["gen_v"])
self._dict_properties["gen_p_before_curtail"] = copy.deepcopy(self._dict_properties["gen_p"])
self._dict_properties["curtailment_limit_effective"] = copy.deepcopy(self._dict_properties[
"curtailment_limit"
])
if functs is None:
functs = {}
for key in functs.keys():
if key not in self._attr_to_keep:
raise RuntimeError(
f'The key {key} is present in the "functs" dictionary but not in the '
f'"attr_to_keep". This is not consistent: either ignore this function, '
f'in that case remove "{key}" from "functs" or you want to add '
f'something to your observation, in that case add it to "attr_to_keep"'
)
if subtract is None:
subtract = {}
self._subtract = subtract
if divide is None:
divide = {}
self._divide = divide
# handle the "functional" part
self._template_obs = ob_sp._template_obj.copy()
self.__func = {}
self._dims = None
low, high, shape, dtype = self._get_info(functs)
# initialize the base container
type(self)._BoxType.__init__(self, low=low, high=high, shape=shape, dtype=dtype)
# convert data in `_add` and `_multiply` to the right type
# self._subtract = {k: v.astype(self.dtype) for k, v in self._subtract.items()}
# self._divide = {k: v.astype(self.dtype) for k, v in self._divide.items()}
self._fix_value_sub_div(self._subtract, functs)
self._fix_value_sub_div(self._divide, functs)
def _get_shape(self, el, functs):
if el in functs:
callable_, low_, high_, shape_, dtype_ = functs[el]
elif el in self._dict_properties:
# el is an attribute of an observation, for example "load_q" or "topo_vect"
low_, high_, shape_, dtype_ = self._dict_properties[el]
return shape_
def _fix_value_sub_div(self, dict_, functs):
"""dict_ is either self._subtract or self._divide"""
keys = list(dict_.keys())
for k in keys:
v = dict_[k]
if isinstance(v, (list, tuple)):
v = np.array(v).astype(self.dtype)
else:
shape = self._get_shape(k, functs)
v = np.full(shape, fill_value=v, dtype=self.dtype)
dict_[k] = v
def _get_info(self, functs):
low = None
high = None
shape = None
dtype = None
self._dims = []
for el in self._attr_to_keep:
if el in functs:
# the attribute name "el" has been put in the functs
try:
callable_, low_, high_, shape_, dtype_ = functs[el]
except Exception as exc_:
raise RuntimeError(
f'When using keyword argument "functs" you need to provide something '
f"like: (callable_, low_, high_, shape_, dtype_) for each key. "
f'There was an error with "{el}".'
f"The error was:\n {exc_}"
)
try:
tmp = callable_(self._template_obs.copy())
except Exception as exc_:
raise RuntimeError(
f'Error for the function your provided with key "{el}" (using the'
f'"functs" dictionary) '
f"The error was :\n {exc_}"
)
if not isinstance(tmp, np.ndarray):
raise RuntimeError(
f'The result of the function you provided as part of the "functs"'
f"dictionary for key {el}"
f"do not return a numpy array. This is not supported."
)
self.__func[el] = callable_
if dtype_ is None:
dtype_ = dt_float
if shape_ is None:
shape_ = tmp.shape
if not isinstance(shape_, tuple):
raise RuntimeError(
"You need to provide a tuple as a shape of the output of your data"
)
if low_ is None:
low_ = np.full(shape_, fill_value=-np.inf, dtype=dtype_)
elif isinstance(low_, float):
low_ = np.full(shape_, fill_value=low_, dtype=dtype_)
if high_ is None:
high_ = np.full(shape_, fill_value=np.inf, dtype=dtype_)
elif isinstance(high_, float):
high_ = np.full(shape_, fill_value=high_, dtype=dtype_)
if np.any((tmp < low_) | (tmp > high_)):
raise RuntimeError(
f"Wrong value for low / high in the functs argument for key {el}. Please"
f"fix the low_ / high_ in the tuple ( callable_, low_, high_, shape_, dtype_)."
)
elif el in self._dict_properties:
# el is an attribute of an observation, for example "load_q" or "topo_vect"
low_, high_, shape_, dtype_ = self._dict_properties[el]
else:
li_keys = "\n\t-".join(
sorted(list(self._dict_properties.keys()) + list(self.__func.keys()))
)
raise RuntimeError(
f'Unknown observation attributes "{el}". Supported attributes are: '
f"\n{li_keys}"
)
# handle the data type
if dtype is None:
dtype = dtype_
else:
if dtype_ == dt_float:
# promote whatever to float anyway
dtype = dt_float
elif dtype_ == dt_int and dtype == dt_bool:
# promote bool to int
dtype = dt_int
# handle the shape
if shape is None:
shape = shape_
else:
shape = (shape[0] + shape_[0],)
# handle low / high
if el in self._subtract:
low_ = 1.0 * low_.astype(dtype)
high_ = 1.0 * high_.astype(dtype)
low_ -= self._subtract[el]
high_ -= self._subtract[el]
if el in self._divide:
low_ = 1.0 * low_.astype(dtype)
high_ = 1.0 * high_.astype(dtype)
low_ /= self._divide[el]
high_ /= self._divide[el]
if low is None:
low = low_
high = high_
else:
low = np.concatenate((low.astype(dtype), low_.astype(dtype))).astype(
dtype
)
high = np.concatenate((high.astype(dtype), high_.astype(dtype))).astype(
dtype
)
# remember where this need to be stored
self._dims.append(shape[0])
return low, high, shape, dtype
def _handle_attribute(self, grid2op_observation, attr_nm):
res = getattr(grid2op_observation, attr_nm).astype(self.dtype)
if attr_nm in self._subtract:
res -= self._subtract[attr_nm]
if attr_nm in self._divide:
res /= self._divide[attr_nm]
return res
def to_gym(self, grid2op_observation):
"""
This is the function that is called to transform a grid2Op observation, sent by the grid2op environment
and convert it to a numpy array (an element of a gym Box)
Parameters
----------
grid2op_observation:
The grid2op observation (as a grid2op object)
Returns
-------
res: :class:`numpy.ndarray`
A numpy array compatible with the openAI gym Box that represents the action space.
"""
res = np.empty(shape=self.shape, dtype=self.dtype)
prev = 0
for attr_nm, where_to_put in zip(self._attr_to_keep, self._dims):
if attr_nm in self.__func:
tmp = self.__func[attr_nm](grid2op_observation)
elif hasattr(grid2op_observation, attr_nm):
tmp = self._handle_attribute(grid2op_observation, attr_nm)
else:
raise RuntimeError(f'Unknown attribute "{attr_nm}".')
res[prev:where_to_put] = tmp
prev = where_to_put
return res
def close(self):
pass
def normalize_attr(self, attr_nm: str):
"""
This function normalizes the part of the space
that corresponds to the attribute `attr_nm`.
The normalization consists in having a vector between 0. and 1.
It is achieved by:
- dividing by the range (high - low)
- adding the minimum value (low).
.. note::
It only affects continuous attribute. No error / warnings are
raised if you attempt to use it on a discrete attribute.
.. warning::
This normalization relies on the `high` and `low` attribute. It cannot be done if
the attribute is not bounded (for example when its maximum limit is `np.inf`). A warning
is raised in this case.
Parameters
----------
attr_nm : `str`
The name of the attribute to normalize
"""
if attr_nm in self._divide or attr_nm in self._subtract:
raise Grid2OpException(
f"Cannot normalize attribute \"{attr_nm}\" that you already "
f"modified with either `divide` or `subtract` (observation space)."
)
prev = 0
if self.dtype != dt_float:
raise Grid2OpException(
"Cannot normalize attribute with a observation "
"space that is not float !"
)
for attr_tmp, where_to_put in zip(self._attr_to_keep, self._dims):
if attr_tmp == attr_nm:
curr_high = 1.0 * self.high[prev:where_to_put]
curr_low = 1.0 * self.low[prev:where_to_put]
finite_high = np.isfinite(curr_high)
finite_low = np.isfinite(curr_high)
both_finite = finite_high & finite_low
both_finite &= curr_high > curr_low
if np.any(~both_finite):
warnings.warn(f"The normalization of attribute \"{both_finite}\" cannot be performed entirely as "
f"there are some non finite value, or `high == `low` "
f"for some components.")
self._divide[attr_nm] = np.ones(curr_high.shape, dtype=self.dtype)
self._subtract[attr_nm] = np.zeros(curr_high.shape, dtype=self.dtype)
self._divide[attr_nm][both_finite] = (
curr_high[both_finite] - curr_low[both_finite]
)
self._subtract[attr_nm][both_finite] += curr_low[both_finite]
self.high[prev:where_to_put][both_finite] = 1.0
self.low[prev:where_to_put][both_finite] = 0.0
break
prev = where_to_put
if GYM_AVAILABLE:
from gym.spaces import Box as LegGymBox
from grid2op.gym_compat.base_gym_attr_converter import BaseLegacyGymAttrConverter
BoxLegacyGymObsSpace = type("BoxLegacyGymObsSpace",
(__AuxBoxGymObsSpace, LegGymBox, ),
{"_gymnasium": False,
"_BaseGymAttrConverterType": BaseLegacyGymAttrConverter,
"_BoxType": LegGymBox,
"__module__": __name__})
BoxLegacyGymObsSpace.__doc__ = __AuxBoxGymObsSpace.__doc__
BoxGymObsSpace = BoxLegacyGymObsSpace
BoxGymObsSpace.__doc__ = __AuxBoxGymObsSpace.__doc__
if GYMNASIUM_AVAILABLE:
from gymnasium.spaces import Box
from grid2op.gym_compat.base_gym_attr_converter import BaseGymnasiumAttrConverter
BoxGymnasiumObsSpace = type("BoxGymnasiumObsSpace",
(__AuxBoxGymObsSpace, Box, ),
{"_gymnasium": True,
"_BaseGymAttrConverterType": BaseGymnasiumAttrConverter,
"_BoxType": Box,
"__module__": __name__})
BoxGymnasiumObsSpace.__doc__ = __AuxBoxGymObsSpace.__doc__
BoxGymObsSpace = BoxGymnasiumObsSpace
BoxGymObsSpace.__doc__ = __AuxBoxGymObsSpace.__doc__
| 37,331 | 40.069307 | 125 | py |
Grid2Op | Grid2Op-master/grid2op/gym_compat/continuous_to_discrete.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
import numpy as np
from grid2op.dtypes import dt_int
from grid2op.gym_compat.utils import GYM_AVAILABLE, GYMNASIUM_AVAILABLE
# from gym.spaces import Box, MultiDiscrete
# from grid2op.gym_compat.base_gym_attr_converter import BaseGymAttrConverter
class __AuxContinuousToDiscreteConverter:
"""
TODO doc in progress
Some RL algorithms are particularly suited for dealing with discrete action space or observation space.
This "AttributeConverter" is responsible to convert continuous space to discrete space. The way it does
it is by using bins. It uses `np.linspace` to compute the bins.
We recommend using an odd number of bins (eg 3, 7 or 9 for example).
.. warning::
Depending on the presence absence of gymnasium and gym packages this class might behave differently.
In grid2op we tried to maintain compatibility both with gymnasium (newest) and gym (legacy,
no more maintained) RL packages. The behaviour is the following:
- :class:`ContinuousToDiscreteConverter` will inherit from gymnasium if it's installed
(in this case it will be :class:`ContinuousToDiscreteConverterGymnasium`), otherwise it will
inherit from gym (and will be exactly :class:`ContinuousToDiscreteConverterLegacyGym`)
- :class:`ContinuousToDiscreteConverterGymnasium` will inherit from gymnasium if it's available and never from
from gym
- :class:`ContinuousToDiscreteConverterLegacyGym` will inherit from gym if it's available and never from
from gymnasium
See :ref:`gymnasium_gym` for more information
Examples
--------
If `nb_bins` is 3 and the original input space is [-10, 10], then the split is the following:
- 0 encodes all numbers in [-10, -3.33)
- 1 encodes all numbers in [-3.33, 3.33)
- 2 encode all numbers in [3.33, 10.]
And reciprocally, this action with :
- 0 is understood as -5.0 (middle of the interval -10 / 0)
- 1 is understood as 0.0 (middle of the interval represented by -10 / 10)
- 2 is understood as 5.0 (middle of the interval represented by 0 / 10)
If `nb_bins` is 5 and the original input space is [-10, 10], then the split is the following:
- 0 encodes all numbers in [-10, -6)
- 1 encodes all numbers in [-6, -2)
- 2 encode all numbers in [-2, 2)
- 3 encode all numbers in [2, 6)
- 4 encode all numbers in [6, 10]
And reciprocally, this action with :
- 0 is understood as -6.6666...
- 1 is understood as -3.333...
- 2 is understood as 0.
- 3 is understood as 3.333...
- 4 is understood as 6.6666...
TODO add example of code on how to use this.
"""
def __init__(self, nb_bins, init_space=None):
type(self)._BaseGymAttrConverterType.__init__(
self, g2op_to_gym=None, gym_to_g2op=None, space=None
)
if nb_bins < 2:
raise RuntimeError(
"This do not work with less that 1 bin (if you want to ignored some part "
"of the action_space or observation_space please use the "
'"gym_space.ignore_attr" or "gym_space.keep_only_attr"'
)
self._nb_bins = nb_bins
self._ignored = None
self._res = None
self._values = None
self._bins_size = None
self._gen_idx = None
if init_space is not None:
self.initialize_space(init_space)
def initialize_space(self, init_space):
if not isinstance(init_space, type(self)._BoxType):
raise RuntimeError(
"Impossible to convert a gym space of type {} to a discrete space"
" (it should be of "
"type space.Box)"
"".format(type(init_space))
)
min_ = init_space.low
max_ = init_space.high
self._ignored = min_ == max_ # which component are ignored
self._res = min_
self._values = np.linspace(min_, max_, num=self._nb_bins + 2)
self._values = self._values[
1:-1, :
] # the values that will be used when using #gym_to_glop
# TODO there might a cleaner approach here
self._bins_size = np.linspace(min_, max_, num=2 * self._nb_bins + 1)
self._bins_size = self._bins_size[2:-1:2, :] # the values defining the "cuts"
self._gen_idx = np.arange(self._bins_size.shape[-1])
n_bins = np.ones(min_.shape[0], dtype=dt_int) * dt_int(self._nb_bins)
n_bins[
self._ignored
] = 1 # if min and max are equal, i don't want to have multiple variable
space = type(self)._MultiDiscreteType(n_bins)
self.base_initialize(space=space, g2op_to_gym=None, gym_to_g2op=None)
def gym_to_g2op(self, gym_object):
return copy.deepcopy(self._values[gym_object, self._gen_idx])
def g2op_to_gym(self, g2op_object):
mask = self._bins_size >= g2op_object
mask = 1 - mask
res = np.sum(mask, axis=0)
res[self._ignored] = 0
return res
def close(self):
pass
if GYM_AVAILABLE:
from gym.spaces import Box as LegGymBox, MultiDiscrete as LegGymMultiDiscrete
from grid2op.gym_compat.base_gym_attr_converter import BaseLegacyGymAttrConverter
ContinuousToDiscreteConverterLegacyGym = type("ContinuousToDiscreteConverterLegacyGym",
(__AuxContinuousToDiscreteConverter, BaseLegacyGymAttrConverter, ),
{"_gymnasium": False,
"_BaseGymAttrConverterType": BaseLegacyGymAttrConverter,
"_MultiDiscreteType": LegGymMultiDiscrete,
"_BoxType": LegGymBox,
"__module__": __name__})
ContinuousToDiscreteConverterLegacyGym.__doc__ = __AuxContinuousToDiscreteConverter.__doc__
ContinuousToDiscreteConverter = ContinuousToDiscreteConverterLegacyGym
ContinuousToDiscreteConverter.__doc__ = __AuxContinuousToDiscreteConverter.__doc__
if GYMNASIUM_AVAILABLE:
from gymnasium.spaces import Box, MultiDiscrete
from grid2op.gym_compat.base_gym_attr_converter import BaseGymnasiumAttrConverter
ContinuousToDiscreteConverterGymnasium = type("ContinuousToDiscreteConverterGymnasium",
(__AuxContinuousToDiscreteConverter, BaseGymnasiumAttrConverter, ),
{"_gymnasium": True,
"_BaseGymAttrConverterType": BaseGymnasiumAttrConverter,
"_MultiDiscreteType": MultiDiscrete,
"_BoxType": Box,
"__module__": __name__})
ContinuousToDiscreteConverterGymnasium.__doc__ = __AuxContinuousToDiscreteConverter.__doc__
ContinuousToDiscreteConverter = ContinuousToDiscreteConverterGymnasium
ContinuousToDiscreteConverter.__doc__ = __AuxContinuousToDiscreteConverter.__doc__
| 7,739 | 43.739884 | 118 | py |
Grid2Op | Grid2Op-master/grid2op/gym_compat/discrete_gym_actspace.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
import warnings
# from gym.spaces import Discrete
from grid2op.Exceptions import Grid2OpException
from grid2op.Action import ActionSpace
from grid2op.Converter import IdToAct
from grid2op.gym_compat.utils import (ALL_ATTR_FOR_DISCRETE,
ATTR_DISCRETE,
GYM_AVAILABLE,
GYMNASIUM_AVAILABLE)
# TODO test that it works normally
# TODO test the casting in dt_int or dt_float depending on the data
# TODO test the scaling
# TODO doc
# TODO test the function part
class __AuxDiscreteActSpace:
"""
TODO the documentation of this class is in progress.
This class allows to convert a grid2op action space into a gym "Discrete". This means that the action are
labeled, and instead of describing the action itself, you provide only its ID.
Let's take an example of line disconnection. In the "standard" gym representation you need to:
.. code-block:: python
import grid2op
import numpy as np
from grid2op.gym_compat import GymEnv
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
gym_env = GymEnv(env)
# now do an action
gym_act = {}
gym_act["set_bus"] = np.zeros(env.n_line, dtype=np.int)
l_id = ... # the line you want to disconnect
gym_act["set_bus"][l_id] = -1
obs, reward, done, truncated, info = gym_env.step(gym_act)
This has the advantage to be as close as possible to raw grid2op. But the main drawback is that
most of RL framework are not able to do this kind of modification easily. For discrete actions,
what is often do is:
1) enumerate all possible actions (say you have n different actions)
2) assign a unique id to all actions (say from 0 to n-1)
3) have a "policy" output a vector of size n with each component
representing an action (eg `vect[42]` represents the score the policy assign to action `42`)
Instead of having everyone doing the modifications "on its own" we developed the :class:`DiscreteActSpace`
that does exactly this, in a single line of code:
.. code-block:: python
import grid2op
import numpy as np
from grid2op.gym_compat import GymEnv, DiscreteActSpace
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
gym_env = GymEnv(env)
gym_env.action_space = DiscreteActSpace(env.action_space,
attr_to_keep=["set_bus",
"set_line_status",
# or anything else
]
)
# do action with ID 42
gym_act = 42
obs, reward, done, truncated, info = gym_env.step(gym_act)
# to know what the action did, you can
# print(gym_env.action_space.from_gym(gym_act))
It is related to the :class:`MultiDiscreteActSpace` but compared to this other representation, it
does not allow to do "multiple actions". Typically, if you use the snippets below:
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
from grid2op.gym_compat import GymEnv, MultiDiscreteActSpace, DiscreteActSpace
gym_env1 = GymEnv(env)
gym_env2 = GymEnv(env)
gym_env1.action_space = MultiDiscreteActSpace(env.action_space,
attr_to_keep=['redispatch', "curtail", "one_sub_set"])
gym_env2.action_space = DiscreteActSpace(env.action_space,
attr_to_keep=['redispatch', "curtail", "set_bus"])
Then at each step, `gym_env1` will allow to perform a redispatching action (on any number of generators),
a curtailment
action (on any number of generators) __**AND**__ changing the topology at one substation. But at each
steps, the agent should predicts lots of "number".
On the other hand, at each step, the agent for `gym_env2` will have to predict a single integer (which is
usually the case in most RL environment) but it this action will affect redispatching on a single generator,
perform curtailment on a single generator __**OR**__ changing the topology at one substation. But at each
steps, the agent should predicts only one "number".
The action set is then largely constrained compared to the :class:`MultiDiscreteActSpace`
.. note::
This class is really closely related to the :class:`grid2op.Converter.IdToAct`. It basically "maps"
this "IdToAct" into a type of gym space, which, in this case, will be a `Discrete` one.
.. note::
By default, the "do nothing" action is encoded by the integer '0'.
.. warning::
Depending on the presence absence of gymnasium and gym packages this class might behave differently.
In grid2op we tried to maintain compatibility both with gymnasium (newest) and gym (legacy,
no more maintained) RL packages. The behaviour is the following:
- :class:`DiscreteActSpace` will inherit from gymnasium if it's installed
(in this case it will be :class:`DiscreteActSpaceGymnasium`), otherwise it will
inherit from gym (and will be exactly :class:`DiscreteActSpaceLegacyGym`)
- :class:`DiscreteActSpaceGymnasium` will inherit from gymnasium if it's available and never from
from gym
- :class:`DiscreteActSpaceLegacyGym` will inherit from gym if it's available and never from
from gymnasium
See :ref:`gymnasium_gym` for more information
Examples
--------
We recommend to use it like:
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
from grid2op.gym_compat import GymEnv, DiscreteActSpace
gym_env = GymEnv(env)
gym_env.observation_space = DiscreteActSpace(env.observation_space,
attr_to_keep=['redispatch', "curtail", "set_bus"])
The possible attribute you can provide in the "attr_to_keep" are:
- "set_line_status"
- "set_line_status_simple" (grid2op >= 1.6.6) : set line status adds 5 actions per powerlines:
1) disconnect it
2) connect origin side to busbar 1 and extermity side to busbar 1
3) connect origin side to busbar 1 and extermity side to busbar 2
4) connect origin side to busbar 2 and extermity side to busbar 1
5) connect origin side to busbar 2 and extermity side to busbar 2
This is "over complex" for most use case where you just want to "connect it"
or "disconnect it". If you want the simplest version, just use "set_line_status_simple".
- "change_line_status"
- "set_bus": corresponds to changing the topology using the "set_bus" (equivalent to the
"one_sub_set" keyword in the "attr_to_keep" of the :class:`MultiDiscreteActSpace`)
- "change_bus": corresponds to changing the topology using the "change_bus" (equivalent to the
"one_sub_change" keyword in the "attr_to_keep" of the :class:`MultiDiscreteActSpace`)
- "redispatch"
- "set_storage"
- "curtail"
- "curtail_mw" (same effect as "curtail")
If you do not want (each time) to build all the actions from the action space, but would rather
save the actions you find the most interesting and then reload them, you can, for example:
.. code-block:: python
import grid2op
from grid2op.gym_compat import GymEnv, DiscreteActSpace
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
gym_env = GymEnv(env)
action_list = ... # a list of action, that can be processed by
# IdToAct.init_converter (all_actions): see
# https://grid2op.readthedocs.io/en/latest/converter.html#grid2op.Converter.IdToAct.init_converter
gym_env.observation_space = DiscreteActSpace(env.observation_space,
action_list=action_list)
.. note::
This last version (providing explicitly the actions you want to keep and their ID)
is much (much) safer and reproducible. Indeed, the
actions usable by your agent will be the same (and in the same order)
regardless of the grid2op version, of the person using it, of pretty
much everything.
It might not be consistent (between different grid2op versions)
if the actions are built from scratch (for example, depending on the
grid2op version other types of actions can be made, such as curtailment,
or actions on storage units) like it's the case with the key-words
(*eg* "set_bus") you pass as argument in the `attr_to_keep`
"""
def __init__(
self,
grid2op_action_space,
attr_to_keep=ALL_ATTR_FOR_DISCRETE,
nb_bins=None,
action_list=None,
):
if not isinstance(grid2op_action_space, ActionSpace):
raise Grid2OpException(
f"Impossible to create a BoxGymActSpace without providing a "
f"grid2op action_space. You provided {type(grid2op_action_space)}"
f'as the "grid2op_action_space" attribute.'
)
if nb_bins is None:
nb_bins = {"redispatch": 7, "set_storage": 7, "curtail": 7}
if "raise_alert" in attr_to_keep or "raise_alarm" in attr_to_keep:
raise Grid2OpException("This converter cannot be use to raise alarm or raise alert. "
"Please use the MultiDiscreteActSpace space for this purpose.")
act_sp = grid2op_action_space
self.action_space = copy.deepcopy(act_sp)
if attr_to_keep == ALL_ATTR_FOR_DISCRETE:
# by default, i remove all the attributes that are not supported by the action type
# i do not do that if the user specified specific attributes to keep. This is his responsibility in
# in this case
attr_to_keep = {
el for el in attr_to_keep if grid2op_action_space.supports_type(el)
}
else:
if action_list is not None:
raise Grid2OpException(
"Impossible to specify a list of attributes "
"to keep (argument attr_to_keep) AND a list of "
"action to use (argument action_list)."
)
for el in attr_to_keep:
if el not in ATTR_DISCRETE and action_list is None:
warnings.warn(
f'The class "DiscreteActSpace" should mainly be used to consider only discrete '
f"actions (eg. set_line_status, set_bus or change_bus). Though it is possible to use "
f'"{el}" when building it, be aware that this continuous action will be treated '
f"as discrete by splitting it into bins. "
f'Consider using the "BoxGymActSpace" for these attributes.'
)
self._attr_to_keep = sorted(attr_to_keep)
self._nb_bins = nb_bins
self.dict_properties = {
"set_line_status": act_sp.get_all_unitary_line_set,
"change_line_status": act_sp.get_all_unitary_line_change,
"set_bus": act_sp.get_all_unitary_topologies_set,
"change_bus": act_sp.get_all_unitary_topologies_change,
"redispatch": act_sp.get_all_unitary_redispatch,
"set_storage": act_sp.get_all_unitary_storage,
"curtail": act_sp.get_all_unitary_curtail,
"curtail_mw": act_sp.get_all_unitary_curtail,
# "raise_alarm": act_sp.get_all_unitary_alarm,
# "raise_alert": act_sp.get_all_unitary_alert,
"set_line_status_simple": act_sp.get_all_unitary_line_set_simple,
}
if action_list is None:
self.converter = None
n_act = self._get_info()
else:
self.converter = IdToAct(self.action_space)
self.converter.init_converter(all_actions=action_list)
n_act = self.converter.n
# initialize the base container
type(self)._DiscreteType.__init__(self, n=n_act)
def _get_info(self):
converter = IdToAct(self.action_space)
li_act = [self.action_space()]
for attr_nm in self._attr_to_keep:
if attr_nm in self.dict_properties:
if attr_nm not in self._nb_bins:
li_act += self.dict_properties[attr_nm](self.action_space)
else:
if attr_nm == "curtail" or attr_nm == "curtail_mw":
li_act += self.dict_properties[attr_nm](
self.action_space, num_bin=self._nb_bins[attr_nm]
)
else:
li_act += self.dict_properties[attr_nm](
self.action_space,
num_down=self._nb_bins[attr_nm],
num_up=self._nb_bins[attr_nm],
)
else:
li_keys = "\n\t- ".join(sorted(list(self.dict_properties.keys())))
raise RuntimeError(
f'Unknown action attributes "{attr_nm}". Supported attributes are: '
f"\n\t- {li_keys}"
)
converter.init_converter(li_act)
self.converter = converter
return self.converter.n
def from_gym(self, gym_act):
"""
This is the function that is called to transform a gym action (in this case a numpy array!)
sent by the agent
and convert it to a grid2op action that will be sent to the underlying grid2op environment.
Parameters
----------
gym_act: ``int``
the gym action (a single integer for this action space)
Returns
-------
grid2op_act: :class:`grid2op.Action.BaseAction`
The corresponding grid2op action.
"""
res = self.converter.all_actions[int(gym_act)]
return res
def close(self):
pass
if GYM_AVAILABLE:
from gym.spaces import Discrete as LegGymDiscrete
from grid2op.gym_compat.box_gym_actspace import BoxLegacyGymActSpace
from grid2op.gym_compat.continuous_to_discrete import ContinuousToDiscreteConverterLegacyGym
DiscreteActSpaceLegacyGym = type("DiscreteActSpaceLegacyGym",
(__AuxDiscreteActSpace, LegGymDiscrete, ),
{"_gymnasium": False,
"_DiscreteType": LegGymDiscrete,
"_BoxGymActSpaceType": BoxLegacyGymActSpace,
"_ContinuousToDiscreteConverterType": ContinuousToDiscreteConverterLegacyGym,
"__module__": __name__})
DiscreteActSpaceLegacyGym.__doc__ = __AuxDiscreteActSpace.__doc__
DiscreteActSpace = DiscreteActSpaceLegacyGym
DiscreteActSpace.__doc__ = __AuxDiscreteActSpace.__doc__
if GYMNASIUM_AVAILABLE:
from gymnasium.spaces import Discrete
from grid2op.gym_compat.box_gym_actspace import BoxGymnasiumActSpace
from grid2op.gym_compat.continuous_to_discrete import ContinuousToDiscreteConverterGymnasium
DiscreteActSpaceGymnasium = type("MultiDiscreteActSpaceGymnasium",
(__AuxDiscreteActSpace, Discrete, ),
{"_gymnasium": True,
"_DiscreteType": Discrete,
"_BoxGymActSpaceType": BoxGymnasiumActSpace,
"_ContinuousToDiscreteConverterType": ContinuousToDiscreteConverterGymnasium,
"__module__": __name__})
DiscreteActSpaceGymnasium.__doc__ = __AuxDiscreteActSpace.__doc__
DiscreteActSpace = DiscreteActSpaceGymnasium
DiscreteActSpace.__doc__ = __AuxDiscreteActSpace.__doc__
| 17,038 | 44.316489 | 115 | py |
Grid2Op | Grid2Op-master/grid2op/gym_compat/gym_act_space.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from collections import OrderedDict
import warnings
import numpy as np
from grid2op.Environment import (
Environment,
MultiMixEnvironment,
BaseMultiProcessEnvironment,
)
from grid2op.Action import BaseAction, ActionSpace
from grid2op.dtypes import dt_int, dt_bool, dt_float
from grid2op.Converter.Converters import Converter
from grid2op.gym_compat.utils import GYM_AVAILABLE, GYMNASIUM_AVAILABLE
class __AuxGymActionSpace:
"""
This class enables the conversion of the action space into a gym "space".
Resulting action space will be a :class:`gym.spaces.Dict`.
**NB** it is NOT recommended to use the sample of the gym action space. Please use the sampling (
if availabe) of the original action space instead [if not available this means there is no
implemented way to generate reliable random action]
**Note** that gym space converted with this class should be seeded independently. It is NOT seeded
when calling :func:`grid2op.Environment.Environment.seed`.
.. warning::
Depending on the presence absence of gymnasium and gym packages this class might behave differently.
In grid2op we tried to maintain compatibility both with gymnasium (newest) and gym (legacy,
no more maintained) RL packages. The behaviour is the following:
- :class:`GymActionSpace` will inherit from gymnasium if it's installed
(in this case it will be :class:`GymnasiumActionSpace`), otherwise it will
inherit from gym (and will be exactly :class:`LegacyGymActionSpace`)
- :class:`GymnasiumActionSpace` will inherit from gymnasium if it's available and never from
from gym
- :class:`LegacyGymActionSpace` will inherit from gym if it's available and never from
from gymnasium
See :ref:`gymnasium_gym` for more information
.. note::
A gymnasium Dict is encoded as a OrderedDict (`from collection import OrderedDict`)
see the example section for more information.
Examples
--------
For the "l2rpn_case14_sandbox" environment, a code using :class:`BoxGymActSpace` can look something like
(if you want to build action "by hands"):
.. code-block:: python
import grid2op
from grid2op.gym_compat import GymEnv
import numpy as np
env_name = "l2rpn_case14_sandbox"
env = grid2op.make(env_name)
gym_env = GymEnv(env)
obs = gym_env.reset() # obs will be an OrderedDict (default, but you can customize it)
# is equivalent to "do nothing"
act = {}
obs, reward, done, truncated, info = gym_env.step(act)
# you can also do a random action:
act = gym_env.action_space.sample()
print(gym_env.action_space.from_gym(act))
obs, reward, done, truncated, info = gym_env.step(act)
# you can chose the action you want to do (say "redispatch" for example)
# here a random redispatch action
act = {}
attr_nm = "redispatch"
act[attr_nm] = np.random.uniform(high=gym_env.action_space.spaces[attr_nm].low,
low=gym_env.action_space.spaces[attr_nm].high,
size=env.n_gen)
print(gym_env.action_space.from_gym(act))
obs, reward, done, truncated, info = gym_env.step(act)
"""
# deals with the action space (it depends how it's encoded...)
keys_grid2op_2_human = {
"prod_p": "prod_p",
"prod_v": "prod_v",
"load_p": "load_p",
"load_q": "load_q",
"_redispatch": "redispatch",
"_set_line_status": "set_line_status",
"_switch_line_status": "change_line_status",
"_set_topo_vect": "set_bus",
"_change_bus_vect": "change_bus",
"_hazards": "hazards",
"_maintenance": "maintenance",
"_storage_power": "set_storage",
"_curtail": "curtail",
"_raise_alarm": "raise_alarm",
"_raise_alert": "raise_alert",
"shunt_p": "_shunt_p",
"shunt_q": "_shunt_q",
"shunt_bus": "_shunt_bus",
}
keys_human_2_grid2op = {v: k for k, v in keys_grid2op_2_human.items()}
def __init__(self, env, converter=None, dict_variables=None):
"""
note: for consistency with GymObservationSpace, "action_space" here can be an environment or
an action space or a converter
"""
if dict_variables is None:
dict_variables = {}
if isinstance(
env, (Environment, MultiMixEnvironment, BaseMultiProcessEnvironment)
):
# action_space is an environment
self.initial_act_space = env.action_space
self._init_env = env
elif isinstance(env, ActionSpace) and converter is None:
warnings.warn(
"It is now deprecated to initialize an Converter with an "
"action space. Please use an environment instead."
)
self.initial_act_space = env
self._init_env = None
else:
raise RuntimeError(
"GymActionSpace must be created with an Environment of an ActionSpace (or a Converter)"
)
dict_ = {}
# TODO Make sure it works well !
if converter is not None and isinstance(converter, Converter):
# a converter allows to ... convert the data so they have specific gym space
self.initial_act_space = converter
dict_ = converter.get_gym_dict(type(self))
self.__is_converter = True
elif converter is not None:
raise RuntimeError(
'Impossible to initialize a gym action space with a converter of type "{}" '
"A converter should inherit from grid2op.Converter".format(
type(converter)
)
)
else:
self._fill_dict_act_space(
dict_, self.initial_act_space, dict_variables=dict_variables
)
dict_ = self._fix_dict_keys(dict_)
self.__is_converter = False
super().__init__(dict_, dict_variables)
def reencode_space(self, key, fun):
"""
This function is used to reencode the action space. For example, it can be used to scale
the observation into values close to 0., it can also be used to encode continuous variables into
discrete variables or the other way around etc.
Basically, it's a tool that lets you define your own observation space (there is the same for
the action space)
Parameters
----------
key: ``str``
Which part of the observation space you want to study
fun: :class:`BaseGymAttrConverter`
Put `None` to deactivate the feature (it will be hided from the observation space)
It can also be a `BaseGymAttrConverter`. See the example for more information.
Returns
-------
self:
The current instance, to be able to chain these calls
Notes
------
It modifies the observation space. We highly recommend to set it up at the beginning of your script
and not to modify it afterwards
'fun' should be deep copiable (meaning that if `copy.deepcopy(fun)` is called, then it does not crash
If an attribute has been ignored, for example by :func`GymEnv.keep_only_obs_attr`
or and is now present here, it will be re added in the final observation
"""
if self._init_env is None:
raise RuntimeError(
"Impossible to reencode a space that has been initialized with an "
"action space as input. Please provide a valid"
)
if self.__is_converter:
raise RuntimeError(
"Impossible to reencode a space that is a converter space."
)
my_dict = self.get_dict_encoding()
if fun is not None and not isinstance(fun, type(self)._BaseGymAttrConverterType):
raise RuntimeError(
"Impossible to initialize a converter with a function of type {}".format(
type(fun)
)
)
if key in self.keys_human_2_grid2op:
key2 = self.keys_human_2_grid2op[key]
else:
key2 = key
if fun is not None and not fun.is_init_space():
if key2 in my_dict:
fun.initialize_space(my_dict[key2])
elif key in self.spaces:
fun.initialize_space(self.spaces[key])
else:
raise RuntimeError(f"Impossible to find key {key} in your action space")
my_dict[key2] = fun
res = type(self)(env=self._init_env, dict_variables=my_dict)
return res
def _fill_dict_act_space(self, dict_, action_space, dict_variables):
# TODO what about dict_variables !!!
for attr_nm, sh, dt in zip(
action_space.attr_list_vect, action_space.shape, action_space.dtype
):
if sh == 0:
# do not add "empty" (=0 dimension) arrays to gym otherwise it crashes
continue
my_type = None
shape = (sh,)
if attr_nm in dict_variables:
# case where the user specified a dedicated encoding
if dict_variables[attr_nm] is None:
# none is by default to disable this feature
continue
my_type = dict_variables[attr_nm].my_space
elif dt == dt_int:
# discrete action space
if attr_nm == "_set_line_status":
my_type = type(self)._BoxType(low=-1, high=1, shape=shape, dtype=dt)
elif attr_nm == "_set_topo_vect":
my_type = type(self)._BoxType(low=-1, high=2, shape=shape, dtype=dt)
elif dt == dt_bool:
# boolean observation space
my_type = self._boolean_type(sh)
# case for all "change" action and maintenance / hazards
else:
# continuous observation space
low = float("-inf")
high = float("inf")
shape = (sh,)
SpaceType = type(self)._BoxType
if attr_nm == "prod_p":
low = action_space.gen_pmin
high = action_space.gen_pmax
shape = None
elif attr_nm == "prod_v":
# voltages can't be negative
low = 0.0
elif attr_nm == "_redispatch":
# redispatch
low = -1.0 * action_space.gen_max_ramp_down
high = 1.0 * action_space.gen_max_ramp_up
low[~action_space.gen_redispatchable] = 0.0
high[~action_space.gen_redispatchable] = 0.0
elif attr_nm == "_curtail":
# curtailment
low = np.zeros(action_space.n_gen, dtype=dt_float)
high = np.ones(action_space.n_gen, dtype=dt_float)
low[~action_space.gen_renewable] = -1.0
high[~action_space.gen_renewable] = -1.0
elif attr_nm == "_storage_power":
# storage power
low = -1.0 * action_space.storage_max_p_prod
high = 1.0 * action_space.storage_max_p_absorb
my_type = SpaceType(low=low, high=high, shape=shape, dtype=dt)
if my_type is None:
# if nothing has been found in the specific cases above
my_type = self._generic_gym_space(dt, sh)
dict_[attr_nm] = my_type
def _fix_dict_keys(self, dict_: dict) -> dict:
res = {}
for k, v in dict_.items():
res[self.keys_grid2op_2_human[k]] = v
return res
def from_gym(self, gymlike_action: OrderedDict) -> object:
"""
Transform a gym-like action (such as the output of "sample()") into a grid2op action
Parameters
----------
gymlike_action: :class:`gym.spaces.dict.OrderedDict`
The action, represented as a gym action (ordered dict)
Returns
-------
An action that can be understood by the given action_space (either a grid2Op action if the
original action space was used, or a Converter)
"""
if self.__is_converter:
# case where the action space comes from a converter, in this case the converter takes the
# delegation to convert the action to openai gym
res = self.initial_act_space.convert_action_from_gym(gymlike_action)
else:
# case where the action space is a "simple" action space
res = self.initial_act_space()
for k, v in gymlike_action.items():
internal_k = self.keys_human_2_grid2op[k]
if internal_k in self._keys_encoding:
tmp = self._keys_encoding[internal_k].gym_to_g2op(v)
else:
tmp = v
res._assign_attr_from_name(internal_k, tmp)
return res
def to_gym(self, action: object) -> OrderedDict:
"""
Transform an action (non gym) into an action compatible with the gym Space.
Parameters
----------
action:
The action (coming from grid2op or understandable by the converter)
Returns
-------
gym_action:
The same action converted as a OrderedDict (default used by gym in case of action space
being Dict)
"""
if self.__is_converter:
gym_action = self.initial_act_space.convert_action_to_gym(action)
else:
# in that case action should be an instance of grid2op BaseAction
assert isinstance(
action, BaseAction
), "impossible to convert an action not coming from grid2op"
# TODO this do not work in case of multiple converter,
# TODO this should somehow call tmp = self._keys_encoding[internal_k].g2op_to_gym(v)
gym_action = self._base_to_gym(
self.spaces.keys(),
action,
dtypes={k: self.spaces[k].dtype for k in self.spaces},
converter=self.keys_human_2_grid2op,
)
return gym_action
def close(self):
if hasattr(self, "_init_env"):
self._init_env = None # this doesn't own the environment
if GYM_AVAILABLE:
from gym.spaces import (Discrete as LegGymDiscrete,
Box as LegGymBox,
Dict as LegGymDict,
Space as LegGymSpace,
MultiBinary as LegGymMultiBinary,
Tuple as LegGymTuple)
from grid2op.gym_compat.gym_space_converter import _BaseLegacyGymSpaceConverter
from grid2op.gym_compat.base_gym_attr_converter import BaseLegacyGymAttrConverter
LegacyGymActionSpace = type("LegacyGymActionSpace",
(__AuxGymActionSpace, _BaseLegacyGymSpaceConverter, ),
{"_DiscreteType": LegGymDiscrete,
"_BoxType": LegGymBox,
"_DictType": LegGymDict,
"_SpaceType": LegGymSpace,
"_MultiBinaryType": LegGymMultiBinary,
"_TupleType": LegGymTuple,
"_BaseGymAttrConverterType": BaseLegacyGymAttrConverter,
"_gymnasium": False,
"__module__": __name__})
LegacyGymActionSpace.__doc__ = __AuxGymActionSpace.__doc__
GymActionSpace = LegacyGymActionSpace
GymActionSpace.__doc__ = __AuxGymActionSpace.__doc__
if GYMNASIUM_AVAILABLE:
from gymnasium.spaces import Discrete, Box, Dict, Space, MultiBinary, Tuple
from grid2op.gym_compat.gym_space_converter import _BaseGymnasiumSpaceConverter
from grid2op.gym_compat.base_gym_attr_converter import BaseGymnasiumAttrConverter
GymnasiumActionSpace = type("GymnasiumActionSpace",
(__AuxGymActionSpace, _BaseGymnasiumSpaceConverter, ),
{"_DiscreteType": Discrete,
"_BoxType": Box,
"_DictType": Dict,
"_SpaceType": Space,
"_MultiBinaryType": MultiBinary,
"_TupleType": Tuple,
"_BaseGymAttrConverterType": BaseGymnasiumAttrConverter,
"_gymnasium": True,
"__module__": __name__})
GymnasiumActionSpace.__doc__ = __AuxGymActionSpace.__doc__
GymActionSpace = GymnasiumActionSpace
GymActionSpace.__doc__ = __AuxGymActionSpace.__doc__
| 17,758 | 42.104369 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/gym_compat/gym_obs_space.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
import warnings
import numpy as np
from grid2op.Environment import (
Environment,
MultiMixEnvironment,
BaseMultiProcessEnvironment,
)
from grid2op.gym_compat.utils import GYM_AVAILABLE, GYMNASIUM_AVAILABLE
if GYMNASIUM_AVAILABLE:
from gymnasium import spaces # only used for type hints
elif GYM_AVAILABLE:
from gym import spaces
from grid2op.Observation import BaseObservation
from grid2op.dtypes import dt_int, dt_bool, dt_float
from grid2op.gym_compat.utils import _compute_extra_power_for_losses
class __AuxGymObservationSpace:
"""
TODO explain gym / gymnasium
This class allows to transform the observation space into a gym space.
Gym space will be a :class:`gym.spaces.Dict` with the keys being the different attributes
of the grid2op observation. All attributes are used.
Note that gym space converted with this class should be seeded independently. It is NOT seeded
when calling :func:`grid2op.Environment.Environment.seed`.
.. warning::
Depending on the presence absence of gymnasium and gym packages this class might behave differently.
In grid2op we tried to maintain compatibility both with gymnasium (newest) and gym (legacy,
no more maintained) RL packages. The behaviour is the following:
- :class:`GymObservationSpace` will inherit from gymnasium if it's installed
(in this case it will be :class:`GymnasiumObservationSpace`), otherwise it will
inherit from gym (and will be exactly :class:`LegacyGymObservationSpace`)
- :class:`GymnasiumObservationSpace` will inherit from gymnasium if it's available and never from
from gym
- :class:`LegacyGymObservationSpace` will inherit from gym if it's available and never from
from gymnasium
See :ref:`gymnasium_gym` for more information
Examples
--------
Converting an observation space is fairly straightforward:
.. code-block:: python
import grid2op
from grid2op.Converter import GymObservationSpace
env = grid2op.make()
gym_observation_space = GymObservationSpace(env.observation_space)
# and now gym_observation_space is a `gym.spaces.Dict` representing the observation space
# you can "convert" the grid2op observation to / from this space with:
grid2op_obs = env.reset()
same_gym_obs = gym_observation_space.to_gym(grid2op_obs)
# the conversion from gym_obs to grid2op obs is feasible, but i don't imagine
# a situation where it is useful. And especially, you will not be able to
# use "obs.simulate" for the observation converted back from this gym action.
Notes
-----
The range of the values for "gen_p" / "prod_p" are not strictly `env.gen_pmin` and `env.gen_pmax`.
This is due to the "approximation" when some redispatching is performed (the precision of the
algorithm that computes the actual dispatch from the information it receives) and also because
sometimes the losses of the grid are really different that the one anticipated in the "chronics" (yes
`env.gen_pmin` and `env.gen_pmax` are not always ensured in grid2op)
"""
def __init__(self, env, dict_variables=None):
if not isinstance(
env, (Environment, MultiMixEnvironment, BaseMultiProcessEnvironment)
):
raise RuntimeError(
"GymActionSpace must be created with an Environment of an ActionSpace (or a Converter)"
)
self._init_env = env
self.initial_obs_space = self._init_env.observation_space
dict_ = {} # will represent the gym.Dict space
if dict_variables is None:
# get the extra variables in the gym space I want to get
dict_variables = {
"thermal_limit":
type(self)._BoxType(
low=0.,
high=np.inf,
shape=(self._init_env.n_line, ),
dtype=dt_float,
),
"theta_or":
type(self)._BoxType(
low=-180.,
high=180.,
shape=(self._init_env.n_line, ),
dtype=dt_float,
),
"theta_ex":
type(self)._BoxType(
low=-180.,
high=180.,
shape=(self._init_env.n_line, ),
dtype=dt_float,
),
"load_theta":
type(self)._BoxType(
low=-180.,
high=180.,
shape=(self._init_env.n_load, ),
dtype=dt_float,
),
"gen_theta":
type(self)._BoxType(
low=-180.,
high=180.,
shape=(self._init_env.n_gen, ),
dtype=dt_float,
)
}
if self._init_env.n_storage:
dict_variables["storage_theta"] = type(self)._BoxType(
low=-180.,
high=180.,
shape=(self._init_env.n_storage, ),
dtype=dt_float,
)
self._fill_dict_obs_space(
dict_, env.observation_space, env.parameters, env._oppSpace, dict_variables
)
super().__init__(dict_, dict_variables=dict_variables) # super should point to _BaseGymSpaceConverter
def reencode_space(self, key, fun):
"""
This function is used to reencode the observation space. For example, it can be used to scale
the observation into values close to 0., it can also be used to encode continuous variables into
discrete variables or the other way around etc.
Basically, it's a tool that lets you define your own observation space (there is the same for
the action space)
Parameters
----------
key: ``str``
Which part of the observation space you want to study
fun: :class:`BaseGymAttrConverter`
Put `None` to deactive the feature (it will be hided from the observation space)
It can also be a `BaseGymAttrConverter`. See the example for more information.
Returns
-------
self:
The current instance, to be able to chain these calls
Notes
------
It modifies the observation space. We highly recommend to set it up at the beginning of your script
and not to modify it afterwards
'fun' should be deep copiable (meaning that if `copy.deepcopy(fun)` is called, then it does not crash
If an attribute has been ignored, for example by :func`GymEnv.keep_only_obs_attr`
or and is now present here, it will be re added in the final observation
"""
my_dict = self.get_dict_encoding()
if fun is not None and not isinstance(fun, type(self)._BaseGymAttrConverterType):
raise RuntimeError(
"Impossible to initialize a converter with a function of type {}".format(
type(fun)
)
)
if fun is not None and not fun.is_init_space():
if key in my_dict:
fun.initialize_space(my_dict[key])
elif key in self.spaces:
fun.initialize_space(self.spaces[key])
else:
raise RuntimeError(
f"Impossible to find key {key} in your observation space"
)
my_dict[key] = fun
res = type(self)(self._init_env, my_dict)
return res
def _fill_dict_obs_space(
self, dict_, observation_space, env_params, opponent_space, dict_variables={}
):
for attr_nm in dict_variables:
# case where the user specified a dedicated encoding
if dict_variables[attr_nm] is None:
# none is by default to disable this feature
continue
if isinstance(dict_variables[attr_nm], type(self)._SpaceType):
if hasattr(observation_space._template_obj, attr_nm):
# add it only if attribute exists in the observation
dict_[attr_nm] = dict_variables[attr_nm]
else:
dict_[attr_nm] = dict_variables[attr_nm].my_space
# by default consider all attributes that are vectorized
for attr_nm, sh, dt in zip(
observation_space.attr_list_vect,
observation_space.shape,
observation_space.dtype,
):
if sh == 0:
# do not add "empty" (=0 dimension) arrays to gym otherwise it crashes
continue
if (attr_nm in dict_ or
(attr_nm in dict_variables and dict_variables[attr_nm] is None)):
# variable already treated somewhere
continue
my_type = None
shape = (sh,)
if dt == dt_int:
# discrete observation space
if attr_nm == "year":
my_type = type(self)._DiscreteType(n=2100)
elif attr_nm == "month":
my_type = type(self)._DiscreteType(n=13)
elif attr_nm == "day":
my_type = type(self)._DiscreteType(n=32)
elif attr_nm == "hour_of_day":
my_type = type(self)._DiscreteType(n=24)
elif attr_nm == "minute_of_hour":
my_type = type(self)._DiscreteType(n=60)
elif attr_nm == "day_of_week":
my_type = type(self)._DiscreteType(n=8)
elif attr_nm == "topo_vect":
my_type = type(self)._BoxType(low=-1, high=2, shape=shape, dtype=dt)
elif attr_nm == "time_before_cooldown_line":
my_type = type(self)._BoxType(
low=0,
high=max(
env_params.NB_TIMESTEP_COOLDOWN_LINE,
env_params.NB_TIMESTEP_RECONNECTION,
opponent_space.attack_max_duration,
),
shape=shape,
dtype=dt,
)
elif attr_nm == "time_before_cooldown_sub":
my_type = type(self)._BoxType(
low=0,
high=env_params.NB_TIMESTEP_COOLDOWN_SUB,
shape=shape,
dtype=dt,
)
elif (
attr_nm == "duration_next_maintenance"
or attr_nm == "time_next_maintenance"
):
# can be -1 if no maintenance, otherwise always positive
my_type = self._generic_gym_space(dt, sh, low=-1)
elif attr_nm == "time_since_last_alarm":
# can be -1 if no maintenance, otherwise always positive
my_type = self._generic_gym_space(dt, 1, low=-1)
elif attr_nm == "last_alarm":
# can be -1 if no maintenance, otherwise always positive
my_type = self._generic_gym_space(dt, sh, low=-1)
elif attr_nm == "last_alert":
# can be -1 if no maintenance, otherwise always positive
my_type = self._generic_gym_space(dt, sh, low=-1)
elif attr_nm == "was_alert_used_after_attack":
# can be -1 or >= 0
my_type = self._generic_gym_space(dt, sh, low=-1, high=1)
elif attr_nm == "total_number_of_alert":
my_type = self._generic_gym_space(dt, sh, low=0)
elif (attr_nm == "time_since_last_attack" or
attr_nm == "time_since_last_alert"):
my_type = self._generic_gym_space(dt, sh, low=-1)
elif attr_nm == "attack_under_alert":
my_type = self._generic_gym_space(dt, sh, low=-1, high=1)
elif attr_nm == "alert_duration":
my_type = self._generic_gym_space(dt, sh, low=0)
elif dt == dt_bool:
# boolean observation space
if sh > 1:
my_type = self._boolean_type(sh)
else:
my_type = type(self)._DiscreteType(n=2)
else:
# continuous observation space
low = float("-inf")
high = float("inf")
shape = (sh,)
SpaceType = type(self)._BoxType
if attr_nm == "gen_p" or attr_nm == "gen_p_before_curtail":
low = copy.deepcopy(observation_space.gen_pmin)
high = copy.deepcopy(observation_space.gen_pmax)
shape = None
# for redispatching
low -= observation_space.obs_env._tol_poly
high += observation_space.obs_env._tol_poly
# for "power losses" that are not properly computed in the original data
extra_for_losses = _compute_extra_power_for_losses(
observation_space
)
low -= extra_for_losses
high += extra_for_losses
elif (
attr_nm == "gen_v"
or attr_nm == "load_v"
or attr_nm == "v_or"
or attr_nm == "v_ex"
):
# voltages can't be negative
low = 0.0
elif attr_nm == "a_or" or attr_nm == "a_ex" or attr_nm == "rho":
# amps can't be negative
low = 0.0
elif attr_nm == "target_dispatch" or attr_nm == "actual_dispatch":
# TODO check that to be sure
low = np.minimum(
observation_space.gen_pmin, -observation_space.gen_pmax
)
high = np.maximum(
-observation_space.gen_pmin, +observation_space.gen_pmax
)
elif attr_nm == "storage_power" or attr_nm == "storage_power_target":
low = -observation_space.storage_max_p_prod
high = observation_space.storage_max_p_absorb
elif attr_nm == "storage_charge":
low = np.zeros(observation_space.n_storage, dtype=dt_float)
high = observation_space.storage_Emax
elif (
attr_nm == "curtailment"
or attr_nm == "curtailment_limit"
or attr_nm == "curtailment_limit_effective"
):
low = 0.0
high = 1.0
elif attr_nm == "attention_budget":
low = 0.0
high = np.inf
elif attr_nm == "delta_time":
low = 0.0
high = np.inf
elif attr_nm == "gen_margin_up":
low = 0.0
high = observation_space.gen_max_ramp_up
elif attr_nm == "gen_margin_down":
low = 0.0
high = observation_space.gen_max_ramp_down
# curtailment, curtailment_limit, gen_p_before_curtail
my_type = SpaceType(low=low, high=high, shape=shape, dtype=dt)
if my_type is None:
# if nothing has been found in the specific cases above
my_type = self._generic_gym_space(dt, sh)
dict_[attr_nm] = my_type
def from_gym(self, gymlike_observation: spaces.dict.OrderedDict) -> BaseObservation:
"""
This function convert the gym-like representation of an observation to a grid2op observation.
Parameters
----------
gymlike_observation: :class:`gym.spaces.dict.OrderedDict`
The observation represented as a gym ordered dict
Returns
-------
grid2oplike_observation: :class:`grid2op.Observation.BaseObservation`
The corresponding grid2op observation
"""
res = self.initial_obs_space.get_empty_observation()
for k, v in gymlike_observation.items():
try:
res._assign_attr_from_name(k, v)
except ValueError as exc_:
warnings.warn(f"Cannot set attribute \"{k}\" in grid2op. "
f"This key is ignored.")
return res
def to_gym(self, grid2op_observation: BaseObservation) -> spaces.dict.OrderedDict:
"""
Convert a grid2op observation into a gym ordered dict.
Parameters
----------
grid2op_observation: :class:`grid2op.Observation.BaseObservation`
The observation represented as a grid2op observation
Returns
-------
gymlike_observation: :class:`gym.spaces.dict.OrderedDict`
The corresponding gym ordered dict
"""
return self._base_to_gym(
self.spaces.keys(),
grid2op_observation,
dtypes={k: self.spaces[k].dtype for k in self.spaces},
)
def close(self):
if hasattr(self, "_init_env"):
self._init_env = None # this doesn't own the environment
if GYM_AVAILABLE:
from gym.spaces import (Discrete as LegGymDiscrete,
Box as LegGymBox,
Dict as LegGymDict,
Space as LegGymSpace,
MultiBinary as LegGymMultiBinary,
Tuple as LegGymTuple)
from grid2op.gym_compat.gym_space_converter import _BaseLegacyGymSpaceConverter
from grid2op.gym_compat.base_gym_attr_converter import BaseLegacyGymAttrConverter
LegacyGymObservationSpace = type("LegacyGymObservationSpace",
(__AuxGymObservationSpace, _BaseLegacyGymSpaceConverter, ),
{"_DiscreteType": LegGymDiscrete,
"_BoxType": LegGymBox,
"_DictType": LegGymDict,
"_SpaceType": LegGymSpace,
"_MultiBinaryType": LegGymMultiBinary,
"_TupleType": LegGymTuple,
"_BaseGymAttrConverterType": BaseLegacyGymAttrConverter,
"_gymnasium": False,
"__module__": __name__})
LegacyGymObservationSpace.__doc__ = __AuxGymObservationSpace.__doc__
GymObservationSpace = LegacyGymObservationSpace
GymObservationSpace.__doc__ = __AuxGymObservationSpace.__doc__
if GYMNASIUM_AVAILABLE:
from gymnasium.spaces import Discrete, Box, Dict, Space, MultiBinary, Tuple
from grid2op.gym_compat.gym_space_converter import _BaseGymnasiumSpaceConverter
from grid2op.gym_compat.base_gym_attr_converter import BaseGymnasiumAttrConverter
GymnasiumObservationSpace = type("GymnasiumObservationSpace",
(__AuxGymObservationSpace, _BaseGymnasiumSpaceConverter, ),
{"_DiscreteType": Discrete,
"_BoxType": Box,
"_DictType": Dict,
"_SpaceType": Space,
"_MultiBinaryType": MultiBinary,
"_TupleType": Tuple,
"_BaseGymAttrConverterType": BaseGymnasiumAttrConverter,
"_gymnasium": True,
"__module__": __name__})
GymnasiumObservationSpace.__doc__ = __AuxGymObservationSpace.__doc__
GymObservationSpace = GymnasiumObservationSpace
GymObservationSpace.__doc__ = __AuxGymObservationSpace.__doc__
| 21,084 | 43.389474 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/gym_compat/gym_space_converter.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from collections import OrderedDict
import numpy as np
import copy
from grid2op.dtypes import dt_int, dt_bool, dt_float
from grid2op.gym_compat.utils import check_gym_version, sample_seed
from grid2op.gym_compat.utils import GYM_AVAILABLE, GYMNASIUM_AVAILABLE
class __AuxBaseGymSpaceConverter:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Used as a base class to convert grid2op state to gym state (wrapper for some useful function
for both the action space and the observation space).
.. warning::
Depending on the presence absence of gymnasium and gym packages this class might behave differently.
In grid2op we tried to maintain compatibility both with gymnasium (newest) and gym (legacy,
no more maintained) RL packages. The behaviour is the following:
- :class:`_BaseGymSpaceConverter` will inherit from gymnasium if it's installed
(in this case it will be :class:`_BaseGymnasiumSpaceConverter`), otherwise it will
inherit from gym (and will be exactly :class:`_BaseLegacyGymSpaceConverter`)
- :class:`_BaseGymnasiumSpaceConverter` will inherit from gymnasium if it's available and never from
from gym
- :class:`_BaseLegacyGymSpaceConverter` will inherit from gym if it's available and never from
from gymnasium
See :ref:`gymnasium_gym` for more information
"""
def __init__(self, dict_gym_space, dict_variables=None):
check_gym_version(type(self)._gymnasium)
type(self)._DictType.__init__(self, dict_gym_space)
self._keys_encoding = {}
if dict_variables is not None:
for k, v in dict_variables.items():
self._keys_encoding[k] = v
self.__func = {}
@classmethod
def _generic_gym_space(cls, dt, sh, low=None, high=None):
if dt == dt_int:
if low is None:
low = np.iinfo(dt).min
if high is None:
high = np.iinfo(dt).max
else:
low = -np.inf
high = +np.inf
shape = (sh,)
my_type = cls._BoxType(
low=dt.type(low), high=dt.type(high), shape=shape, dtype=dt
)
return my_type
@classmethod
def _boolean_type(cls, sh):
return cls._MultiBinaryType(n=sh)
@staticmethod
def _simplifykeys_for_timestamps(key):
"""some keys are encoded to be returned as scalar, i need to transform them."""
res = (
(key == "year")
or (key == "month")
or (key == "day")
or (key == "hour_of_day")
or (key == "minute_of_hour")
or (key == "day_of_week")
)
res = (
res
or (key == "is_alarm_illegal")
or (key == "was_alarm_used_after_game_over")
or (key == "is_alert_illegal")
or (key == "was_alert_used_after_attack")
)
return res
@staticmethod
def _extract_obj_grid2op(vect, dtype, key):
if len(vect) == 1 and _BaseGymSpaceConverter._simplifykeys_for_timestamps(key):
res = vect[0]
# convert the types for json serializable
# this is not automatically done by gym...
if dtype == dt_int or dtype == np.int64 or dtype == np.int32:
res = int(res)
elif dtype == dt_float or dtype == np.float64 or dtype == np.float32:
res = float(res)
elif dtype == dt_bool:
res = bool(res)
else:
res = vect
return res
def _base_to_gym(self, keys, obj, dtypes, converter=None):
"""convert the obj (grid2op object) into a gym observation / action space"""
res = OrderedDict()
for k in keys:
if k in self.__func:
obj_json_cleaned = self.__func[k](obj)
else:
if converter is not None:
# for converting the names between internal names and "human readable names"
conv_k = converter[k]
else:
conv_k = k
obj_raw = obj._get_array_from_attr_name(conv_k)
if conv_k in self._keys_encoding:
if self._keys_encoding[conv_k] is None:
# keys is deactivated
continue
elif isinstance(self._keys_encoding[conv_k], type(self)._SpaceType):
obj_json_cleaned = getattr(obj, conv_k)
else:
# i need to process the "function" part in the keys
obj_json_cleaned = self._keys_encoding[conv_k].g2op_to_gym(
obj_raw
)
else:
obj_json_cleaned = self._extract_obj_grid2op(obj_raw, dtypes[k], k)
res[k] = obj_json_cleaned
return res
def add_key(self, key_name, function, return_type):
"""
Allows to add arbitrary function to the representation, as a gym environment of
the action space of the observation space.
TODO
**NB** this key is not used when converted back to grid2Op object, as of now we don't recommend to
use it for the action space !
See the example for more information.
Parameters
----------
key_name:
The name you want to get
function:
A function that takes as input
return_type
Returns
-------
Examples
---------
In the example below, we explain how to add the "connectivity_matrix" as part of the observation space
(when converted to gym). The new key "connectivity matrix" will be added to the gym observation.
.. code-block:: python
# create a grid2op environment
import grid2op
env_name = "l2rpn_case14_sandbox"
env_glop = grid2op.make(env_name)
# convert it to gym
import gym
import numpy as np
from grid2op.gym_compat import GymEnv
env_gym = GymEnv(env_glop)
# default gym environment, the connectivity matrix is not computed
obs_gym = env_gym.reset()
print(f"Is the connectivity matrix part of the observation in gym: {'connectivity_matrix' in obs_gym}")
# add the "connectivity matrix" as part of the observation in gym
from gym.spaces import Box
shape_ = (env_glop.dim_topo, env_glop.dim_topo)
env_gym.observation_space.add_key("connectivity_matrix",
lambda obs: obs.connectivity_matrix(),
Box(shape=shape_,
low=np.zeros(shape_),
high=np.ones(shape_),
)
)
# we highly recommend to "reset" the environment after setting up the observation space
obs_gym = env_gym.reset()
print(f"Is the connectivity matrix part of the observation in gym: {'connectivity_matrix' in obs_gym}")
"""
self.spaces[key_name] = return_type
self.__func[key_name] = function
def get_dict_encoding(self):
"""
TODO examples and description
Returns
-------
"""
return copy.deepcopy(self._keys_encoding)
def reencode_space(self, key, func):
"""
TODO examples and description
Returns
-------
"""
raise NotImplementedError(
"This should be implemented in the GymActionSpace and GymObservationSpace"
)
def reenc(self, key, fun):
"""
shorthand for :func:`GymObservationSpace.reencode_space` or
:func:`GymActionSpace.reencode_space`
"""
return self.reencode_space(key, fun)
def keep_only_attr(self, attr_names):
"""
keep only a certain part of the observation
"""
if isinstance(attr_names, str):
attr_names = [attr_names]
dict_ = self.spaces.keys()
res = self
for k in dict_:
if k not in attr_names:
res = res.reencode_space(k, None)
return res
def ignore_attr(self, attr_names):
"""
ignore some attribute names from the space
"""
if isinstance(attr_names, str):
attr_names = [attr_names]
res = self
for k in self.spaces.keys():
if k in attr_names:
res = res.reencode_space(k, None)
return res
def seed(self, seed=None):
"""Seed the PRNG of this space.
see issue https://github.com/openai/gym/issues/2166
of openAI gym
"""
seeds = super(type(self)._DictType, self).seed(seed)
sub_seeds = seeds
max_ = np.iinfo(dt_int).max
for i, space_key in enumerate(sorted(self.spaces.keys())):
sub_seed = sample_seed(max_, self.np_random)
sub_seeds.append(self.spaces[space_key].seed(sub_seed))
return sub_seeds
def close(self):
pass
if GYM_AVAILABLE:
from gym.spaces import (Discrete as LegGymDiscrete,
Box as LegGymBox,
Dict as LegGymDict,
Space as LegGymSpace,
MultiBinary as LegGymMultiBinary,
Tuple as LegGymTuple)
_BaseLegacyGymSpaceConverter = type("_BaseLegacyGymSpaceConverter",
(__AuxBaseGymSpaceConverter, LegGymDict, ),
{"_DiscreteType": LegGymDiscrete,
"_BoxType": LegGymBox,
"_DictType": LegGymDict,
"_SpaceType": LegGymSpace,
"_MultiBinaryType": LegGymMultiBinary,
"_TupleType": LegGymTuple,
"_gymnasium": False,
"__module__": __name__})
_BaseLegacyGymSpaceConverter.__doc__ = __AuxBaseGymSpaceConverter.__doc__
_BaseGymSpaceConverter = _BaseLegacyGymSpaceConverter
_BaseGymSpaceConverter.__doc__ = __AuxBaseGymSpaceConverter.__doc__
if GYMNASIUM_AVAILABLE:
from gymnasium.spaces import Discrete, Box, Dict, Space, MultiBinary, Tuple
_BaseGymnasiumSpaceConverter = type("_BaseGymnasiumSpaceConverter",
(__AuxBaseGymSpaceConverter, Dict, ),
{"_DiscreteType": Discrete,
"_BoxType": Box,
"_DictType": Dict,
"_SpaceType": Space,
"_MultiBinaryType": MultiBinary,
"_TupleType": Tuple,
"_gymnasium": True,
"__module__": __name__})
_BaseGymnasiumSpaceConverter.__doc__ = __AuxBaseGymSpaceConverter.__doc__
_BaseGymSpaceConverter = _BaseGymnasiumSpaceConverter
_BaseGymSpaceConverter.__doc__ = __AuxBaseGymSpaceConverter.__doc__
| 12,155 | 37.347003 | 115 | py |
Grid2Op | Grid2Op-master/grid2op/gym_compat/gymenv.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.dtypes import dt_int
from grid2op.Chronics import Multifolder
from grid2op.gym_compat.utils import GYM_AVAILABLE, GYMNASIUM_AVAILABLE
from grid2op.gym_compat.utils import (check_gym_version, sample_seed)
def conditional_decorator(condition):
def decorator(func):
if condition:
# Return the function unchanged, not decorated.
return func
return NotImplementedError() # anything that is not a callbe anyway
return decorator
class __AuxGymEnv:
"""
fully implements the openAI gym API by using the :class:`GymActionSpace` and :class:`GymObservationSpace`
for compliance with openAI gym.
They can handle action_space_converter or observation_space converter to change the representation of data
that will be fed to the agent. #TODO
.. warning::
The `gym` package has some breaking API change since its version 0.26. Depending on the version installed,
we attempted, in grid2op, to maintain compatibility both with former version and later one. This makes this
class behave differently depending on the version of gym you have installed !
The main changes involve the functions `env.step` and `env.reset`
If you want to use the same version of the GymEnv regardless of the gym / gymnasium version installed you can use:
- :class:`GymnasiumEnv` if `gymnasium` is available
- :class:`GymEnv_Legacy` for gym < 0.26
- :class:`GymEnv_Modern` for gym >= 0.26
.. warning::
Depending on the presence absence of gymnasium and gym packages this class might behave differently.
In grid2op we tried to maintain compatibility both with gymnasium (newest) and gym (legacy,
no more maintained) RL packages. The behaviour is the following:
- :class:`GymEnv` will inherit from gymnasium if it's installed
(in this case it will be :class:`GymnasiumEnv`), otherwise it will
inherit from gym (and will be exactly :class:`GymEnv_Legacy` - gym < 0.26-
or :class:`GymEnv_Modern` - for gym >= 0.26)
- :class:`GymnasiumEnv` will inherit from gymnasium if it's available and never from
from gym
- :class:`GymEnv_Legacy` and :class:`GymEnv_Modern` will inherit from gym if it's
available and never from from gymnasium
See :ref:`gymnasium_gym` for more information
Notes
------
The environment passed as input is copied. It is not modified by this "gym environment"
Examples
--------
This can be used like:
.. code-block:: python
import grid2op
from grid2op.gym_compat import GymEnv
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
gym_env = GymEnv(env) # is a gym environment properly inheriting from gym.Env !
There are a few difference between "raw" grid2op environment and gymnasium environments.
One of the major difference is that, to our knowledge, gymnasium does not support the
`simulate` feature (which allows an agent to test the impact of a given action
on the grid without having to perform a `step` see :ref:`model_based_rl` for more information)
[NB if you know or better are developping some "model based RL library" let us know !]
Another difference is in the way to do some actions. In grid2op, actions are a dedicated class
and can be made with an `action_space` and a dictionary, or using the properties of the action
class.
In gym, there are no specific representations of the action class. More precisely, for each action
type (:class:`MultiDiscreteActSpace`, :class:`DiscreteActSpace`, :class:`BoxGymActSpace` or
:class:`GymActionSpace`) there is a way to encode it. For example, by default (:class:`GymActionSpace`)
an action is represented through an OrderedDict (`from collection import OrderedDict`)
"""
def __init__(self, env_init, shuffle_chronics=True, render_mode="rgb_array"):
check_gym_version(type(self)._gymnasium)
self.init_env = env_init.copy()
self.action_space = type(self)._ActionSpaceType(self.init_env)
self.observation_space = type(self)._ObservationSpaceType(self.init_env)
self.reward_range = self.init_env.reward_range
self.metadata = self.init_env.metadata
self.init_env.render_mode = render_mode
self._shuffle_chronics = shuffle_chronics
super().__init__() # super should reference either gym.Env or gymnasium.Env
if not hasattr(self, "_np_random"):
# for older version of gym it does not exist
self._np_random = np.random.RandomState()
def _aux_step(self, gym_action):
# used for gym < 0.26
g2op_act = self.action_space.from_gym(gym_action)
g2op_obs, reward, done, info = self.init_env.step(g2op_act)
gym_obs = self.observation_space.to_gym(g2op_obs)
return gym_obs, float(reward), done, info
def _aux_step_new(self, gym_action):
# used for gym >= 0.26
# TODO refacto with _aux_step
g2op_act = self.action_space.from_gym(gym_action)
g2op_obs, reward, terminated, info = self.init_env.step(g2op_act)
gym_obs = self.observation_space.to_gym(g2op_obs)
truncated = False # see https://github.com/openai/gym/pull/2752
return gym_obs, float(reward), terminated, truncated, info
def _aux_reset(self, seed=None, return_info=None, options=None):
# used for gym < 0.26
if self._shuffle_chronics and isinstance(
self.init_env.chronics_handler.real_data, Multifolder
):
self.init_env.chronics_handler.sample_next_chronics()
if seed is not None:
seed_, next_seed, underlying_env_seeds = self._aux_seed(seed)
g2op_obs = self.init_env.reset()
gym_obs = self.observation_space.to_gym(g2op_obs)
if return_info:
chron_id = self.init_env.chronics_handler.get_id()
info = {"time serie id": chron_id}
if seed is not None:
info["seed"] = seed
info["grid2op_env_seed"] = next_seed
info["underlying_env_seeds"] = underlying_env_seeds
return gym_obs, info
else:
return gym_obs
def _aux_reset_new(self, seed=None, options=None):
# used for gym > 0.26
if self._shuffle_chronics and isinstance(
self.init_env.chronics_handler.real_data, Multifolder
):
self.init_env.chronics_handler.sample_next_chronics()
super().reset(seed=seed)
if seed is not None:
self._aux_seed_spaces()
seed, next_seed, underlying_env_seeds = self._aux_seed_g2op(seed)
g2op_obs = self.init_env.reset()
gym_obs = self.observation_space.to_gym(g2op_obs)
chron_id = self.init_env.chronics_handler.get_id()
info = {"time serie id": chron_id}
if seed is not None:
info["seed"] = seed
info["grid2op_env_seed"] = next_seed
info["underlying_env_seeds"] = underlying_env_seeds
return gym_obs, info
def render(self):
"""for compatibility with open ai gym render function"""
return self.init_env.render()
def close(self):
if hasattr(self, "init_env") and self.init_env is not None:
self.init_env.close()
del self.init_env
self.init_env = None
if hasattr(self, "action_space") and self.action_space is not None:
self.action_space.close()
self.action_space = None
if hasattr(self, "observation_space") and self.observation_space is not None:
self.observation_space.close()
self.observation_space = None
def _aux_seed_spaces(self):
max_ = np.iinfo(dt_int).max
next_seed = sample_seed(max_, self._np_random)
self.action_space.seed(next_seed)
next_seed = sample_seed(max_, self._np_random)
self.observation_space.seed(next_seed)
def _aux_seed_g2op(self, seed):
# then seed the underlying grid2op env
max_ = np.iinfo(dt_int).max
next_seed = sample_seed(max_, self._np_random)
underlying_env_seeds = self.init_env.seed(next_seed)
return seed, next_seed, underlying_env_seeds
def _aux_seed(self, seed=None):
# deprecated in gym >=0.26
if seed is not None:
# seed the gym env
super().seed(seed)
self._np_random.seed(seed)
self._aux_seed_spaces()
return self._aux_seed_g2op(seed)
return None, None, None
def __del__(self):
# delete possible dangling reference
self.close()
if GYM_AVAILABLE:
from gym import Env as LegacyGymEnv
from grid2op.gym_compat.gym_obs_space import LegacyGymObservationSpace
from grid2op.gym_compat.gym_act_space import LegacyGymActionSpace
_AuxGymEnv = type("_AuxGymEnv",
(__AuxGymEnv, LegacyGymEnv),
{"_gymnasium": False,
"_ActionSpaceType": LegacyGymActionSpace,
"_ObservationSpaceType": LegacyGymObservationSpace,
"__module__": __name__})
_AuxGymEnv.__doc__ = __AuxGymEnv.__doc__
class GymEnv_Legacy(_AuxGymEnv):
# for old version of gym
def reset(self, *args, **kwargs):
return self._aux_reset(*args, **kwargs)
def step(self, action):
return self._aux_step(action)
def seed(self, seed):
# defined only on some cases
return self._aux_seed(seed)
class GymEnv_Modern(_AuxGymEnv):
# for new version of gym
def reset(self,
*,
seed=None,
options=None,):
return self._aux_reset_new(seed, options)
def step(self, action):
return self._aux_step_new(action)
GymEnv_Legacy.__doc__ = __AuxGymEnv.__doc__
GymEnv_Modern.__doc__ = __AuxGymEnv.__doc__
if GYMNASIUM_AVAILABLE:
from gymnasium import Env
from grid2op.gym_compat.gym_act_space import GymnasiumActionSpace
from grid2op.gym_compat.gym_obs_space import GymnasiumObservationSpace
_AuxGymnasiumEnv = type("_AuxGymnasiumEnv",
(__AuxGymEnv, Env),
{"_gymnasium": True,
"_ActionSpaceType": GymnasiumActionSpace,
"_ObservationSpaceType": GymnasiumObservationSpace,
"__module__": __name__})
_AuxGymnasiumEnv.__doc__ = __AuxGymEnv.__doc__
class GymnasiumEnv(_AuxGymnasiumEnv):
# for new version of gym
def reset(self,
*,
seed=None,
options=None,):
return self._aux_reset_new(seed, options)
def step(self, action):
return self._aux_step_new(action)
GymnasiumEnv.__doc__ = __AuxGymEnv.__doc__ | 11,788 | 40.804965 | 118 | py |
Grid2Op | Grid2Op-master/grid2op/gym_compat/multi_to_tuple_converter.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
# from gym.spaces import Tuple, MultiBinary, MultiDiscrete, Discrete
from grid2op.dtypes import dt_int
from grid2op.gym_compat.utils import sample_seed
from grid2op.gym_compat.utils import GYM_AVAILABLE, GYMNASIUM_AVAILABLE
class __AuxFixedTuple:
"""I simply overload the "seed" function because the default one behaves
really really poorly
see issue https://github.com/openai/gym/issues/2166
"""
def seed(self, seed=None):
"""Seed the PRNG of this space.
see issue https://github.com/openai/gym/issues/2166
of openAI gym
"""
seeds = super(type(self)._TupleType, self).seed(seed)
sub_seeds = seeds
max_ = np.iinfo(dt_int).max
for i, space in enumerate(self.spaces):
sub_seed = sample_seed(max_, self.np_random)
sub_seeds.append(space.seed(sub_seed))
return sub_seeds
if GYM_AVAILABLE:
from gym.spaces import Tuple
FixedTupleLegacyGym = type("FixedTupleLegacyGym",
(__AuxFixedTuple, Tuple, ),
{"_gymnasium": False,
"_TupleType": Tuple})
FixedTuple = FixedTupleLegacyGym
if GYMNASIUM_AVAILABLE:
from gymnasium.spaces import Tuple
# I has been fixed in gymnasium so I reuse it
# FixedTupleGymnasium = type("FixedTupleGymnasium",
# (__AuxFixedTuple, Tuple, ),
# {"_gymnasium": True,
# "_TupleType": Tuple})
FixedTupleGymnasium = Tuple
FixedTuple = Tuple
class __AuxMultiToTupleConverter:
"""
Some framework, for example ray[rllib] do not support MultiBinary nor MultiDiscrete gym
action space. Apparently this is not going to change in a near
future (see https://github.com/ray-project/ray/issues/1519).
We choose to encode some variable using `MultiBinary` variable in grid2op. This allows for easy
manipulation of them if using these frameworks.
MultiBinary are encoded with gym Tuple of gym Discrete variables.
.. warning::
Depending on the presence absence of gymnasium and gym packages this class might behave differently.
In grid2op we tried to maintain compatibility both with gymnasium (newest) and gym (legacy,
no more maintained) RL packages. The behaviour is the following:
- :class:`MultiToTupleConverter` will inherit from gymnasium if it's installed
(in this case it will be :class:`MultiToTupleConverterGymnasium`), otherwise it will
inherit from gym (and will be exactly :class:`MultiToTupleConverterLegacyGym`)
- :class:`MultiToTupleConverterGymnasium` will inherit from gymnasium if it's available and never from
from gym
- :class:`MultiToTupleConverterLegacyGym` will inherit from gym if it's available and never from
from gymnasium
See :ref:`gymnasium_gym` for more information
TODO add code example
"""
def __init__(self, init_space=None):
self.size = None
type(self)._BaseGymAttrConverterType.__init__(self, space=None)
if init_space is not None:
self.initialize_space(init_space)
self.previous_fun = self._previous_fun
self.after_fun = self._after_fun
def _previous_fun(self, x):
return x
def _after_fun(self, x):
return x
def initialize_space(self, init_space):
if isinstance(init_space, (type(self)._MultiBinaryType, type(self)._MultiDiscreteType)):
pass
elif isinstance(init_space, type(self)._BaseGymAttrConverterType):
self.previous_fun = init_space.g2op_to_gym
self.after_fun = init_space.gym_to_g2op
if isinstance(init_space.my_space, (type(self)._MultiBinaryType, type(self)._MultiDiscreteType)):
init_space = init_space.my_space
else:
raise RuntimeError(
"Bad converter used. It should be of type MultiBinary or MultiDiscrete"
)
else:
raise RuntimeError(
"Impossible to convert a gym space of type {} to a Tuple (it should be of "
"type space.MultiBinary or space.MultiDiscrete)"
"".format(type(init_space))
)
if isinstance(init_space, type(self)._MultiBinaryType):
self.size = init_space.n
else:
# then it's a MultiDiscrete
self.size = init_space.nvec.shape[0]
li = []
for i in range(self.size):
tmp_sz = 2
if isinstance(init_space, type(self)._MultiDiscreteType):
tmp_sz = init_space.nvec[i]
li.append(type(self)._DiscreteType(tmp_sz))
self.base_initialize(space=type(self)._FixedTupleType(li), g2op_to_gym=None, gym_to_g2op=None)
def gym_to_g2op(self, gym_object):
tmp = np.array(gym_object).astype(dt_int)
return self.after_fun(tmp)
def g2op_to_gym(self, g2op_object):
tmp = self.previous_fun(g2op_object) # TODO
return tuple(tmp.astype(dt_int))
def close(self):
pass
if GYM_AVAILABLE:
from gym.spaces import (MultiBinary as LegacyGymMultiBinary,
MultiDiscrete as LegacyGymMultiDiscrete,
Discrete as LegacyGymDiscrete)
from grid2op.gym_compat.base_gym_attr_converter import BaseLegacyGymAttrConverter
MultiToTupleConverterLegacyGym = type("MultiToTupleConverterLegacyGym",
(__AuxMultiToTupleConverter, BaseLegacyGymAttrConverter, ),
{"_gymnasium": False,
"_FixedTupleType": FixedTupleLegacyGym,
"_BaseGymAttrConverterType": BaseLegacyGymAttrConverter,
"_MultiDiscreteType": LegacyGymMultiDiscrete,
"_MultiBinaryType": LegacyGymMultiBinary,
"_DiscreteType": LegacyGymDiscrete,
"__module__": __name__
})
MultiToTupleConverterLegacyGym.__doc__ = __AuxMultiToTupleConverter.__doc__
MultiToTupleConverter = MultiToTupleConverterLegacyGym
MultiToTupleConverter.__doc__ = __AuxMultiToTupleConverter.__doc__
if GYMNASIUM_AVAILABLE:
from gymnasium.spaces import MultiBinary, MultiDiscrete, Discrete, Tuple
from grid2op.gym_compat.base_gym_attr_converter import BaseGymnasiumAttrConverter
MultiToTupleConverterGymnasium = type("MultiToTupleConverterGymnasium",
(__AuxMultiToTupleConverter, BaseGymnasiumAttrConverter, ),
{"_gymnasium": True,
"_FixedTupleType": Tuple,
"_BaseGymAttrConverterType": BaseGymnasiumAttrConverter,
"_MultiDiscreteType": MultiDiscrete,
"_MultiBinaryType": MultiBinary,
"_DiscreteType": Discrete,
"__module__": __name__
})
MultiToTupleConverterGymnasium.__doc__ = __AuxMultiToTupleConverter.__doc__
MultiToTupleConverter = MultiToTupleConverterGymnasium
MultiToTupleConverter.__doc__ = __AuxMultiToTupleConverter.__doc__
| 8,166 | 44.121547 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/gym_compat/multidiscrete_gym_actspace.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
import warnings
import numpy as np
from grid2op.Action import ActionSpace
from grid2op.dtypes import dt_int, dt_bool, dt_float
from grid2op.gym_compat.utils import (ALL_ATTR,
ATTR_DISCRETE,
check_gym_version,
GYM_AVAILABLE,
GYMNASIUM_AVAILABLE)
class __AuxMultiDiscreteActSpace:
"""
This class allows to convert a grid2op action space into a gym "MultiDiscrete". This means that the action are
labeled, and instead of describing the action itself, you provide only its ID.
.. note::
This action space is particularly suited for represented discrete actions.
It is possible to represent continuous actions with it. In that case, the continuous actions are "binarized"
thanks to the :class:`ContinuousToDiscreteConverter`. Feel free to consult its documentation for
more information.
In this case it will extract all the features in all the action with:
- "set_line_status": `n_line` dimensions, each containing 3 choices "DISCONNECT", "DONT AFFECT", "FORCE CONNECTION"
and affecting the powerline status (connected / disconnected)
- "change_line_status": `n_line` dimensions, each containing 2 elements "CHANGE", "DONT CHANGE" and
affecting the powerline status (connected / disconnected)
- "set_bus": `dim_topo` dimensions, each containing 4 choices: "DISCONNECT", "DONT AFFECT", "CONNECT TO BUSBAR 1",
or "CONNECT TO BUSBAR 2" and affecting to which busbar an object is connected
- "change_bus": `dim_topo` dimensions, each containing 2 choices: "CHANGE", "DONT CHANGE" and affect
to which busbar an element is connected
- "redispatch": `sum(env.gen_redispatchable)` dimensions, each containing a certain number of choices depending on the value
of the keyword argument `nb_bins["redispatch"]` (by default 7).
- "curtail": `sum(env.gen_renewable)` dimensions, each containing a certain number of choices depending on the value
of the keyword argument `nb_bins["curtail"]` (by default 7). This is
the "conversion to discrete action"
of the curtailment action.
- "curtail_mw": `sum(env.gen_renewable)` dimensions, completely equivalent to "curtail" for this representation.
This is the "conversion to discrete action" of the curtailment action.
- "set_storage": `n_storage` dimensions, each containing a certain number of choices depending on the value
of the keyword argument `nb_bins["set_storage"]` (by default 7). This is the "conversion to discrete action"
of the action on storage units.
- "raise_alarm": TODO
- "raise_alert": TODO
We offer some extra customization, with the keywords:
- "sub_set_bus": `n_sub` dimension. This type of representation encodes each different possible combination
of elements that are possible at each substation. The choice at each component depends on the element connected
at this substation. Only configurations that will not lead to straight game over will be generated.
- "sub_change_bus": `n_sub` dimension. Same comment as for "sub_set_bus"
- "one_sub_set": 1 single dimension. This type of representation differs from the previous one only by the fact
that each step you can perform only one single action on a single substation (so unlikely to be illegal).
- "one_sub_change": 1 single dimension. Same as above.
.. warning::
We recommend to use either "set" or "change" way to look at things (**ie** either you want to target
a given state -in that case use "sub_set_bus", "line_set_status", "one_sub_set", or "set_bus" __**OR**__ you
prefer
reasoning in terms of "i want to change this or that" in that case use "sub_change_bus",
"line_change_status", "one_sub_change" or "change_bus".
Combining a "set" and "change" on the same element will most likely lead to an "ambiguous action". Indeed
what grid2op can do if you "tell element A to go to bus 1" and "tell the same element A to switch to bus 2 if it was
to 1 and to move to bus 1 if it was on bus 2". It's not clear at all (hence the "ambiguous").
No error will be thrown if you mix this, this is your absolute right, be aware it might not
lead to the result you expect though.
.. note::
The arguments "set_bus", "sub_set_bus" and "one_sub_set" will all perform "set_bus" actions. The only
difference if "how you represent these actions":
- In "set_bus" each component represent a single element of the grid. When you sample an action
with this keyword you will possibly change all the elements of the grid at once (this is likely to
be illega). Nothing prevents you to perform "weird" stuff, for example disconnecting a load or a generator
(which is straight game over) or having a load or a generator that will be "alone" on a busbar (which
will also lead to a straight game over). You can do anything with it, but as always "A great power
comes with a great responsibility".
- In "sub_set_bus" each component represent a substation of the grid. When you sample an action
from this, you will possibly change all the elements of the grid at once (because you can act
on all the substation at the same time). As opposed to "set_bus" however this constraint the action
space to "action that will not lead directly to a game over", in practice.
- In "one_sub_set": the single component represent the whole grid. When you sample an action
with this, you will sample a single action acting on a single substation. You will not be able to act
on multiple substation with this.
For this reason, we also do not recommend using only one of these arguments and only provide
only one of "set_bus", "sub_set_bus" and "one_sub_set". Again, no error will be thrown if you mix them
but be warned that the resulting behaviour might not be what you expect.
.. warning::
The same as above holds for "change_bus", "sub_change_bus" and "one_sub_change": Use only one of these !
.. danger::
The keys `set_bus` and `change_bus` does not have the same meaning between this representation of the
action and the DiscreteActSpace.
.. warning::
Depending on the presence absence of gymnasium and gym packages this class might behave differently.
In grid2op we tried to maintain compatibility both with gymnasium (newest) and gym (legacy,
no more maintained) RL packages. The behaviour is the following:
- :class:`MultiDiscreteActSpace` will inherit from gymnasium if it's installed
(in this case it will be :class:`MultiDiscreteActSpaceGymnasium`), otherwise it will
inherit from gym (and will be exactly :class:`MultiDiscreteActSpaceLegacyGym`)
- :class:`MultiDiscreteActSpaceGymnasium` will inherit from gymnasium if it's available and never from
from gym
- :class:`MultiDiscreteActSpaceLegacyGym` will inherit from gym if it's available and never from
from gymnasium
See :ref:`gymnasium_gym` for more information
Examples
--------
If you simply want to use it you can do:
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
from grid2op.gym_compat import GymEnv, MultiDiscreteActSpace
gym_env = GymEnv(env)
gym_env.action_space = MultiDiscreteActSpace(env.action_space)
You can select the attribute you want to keep, for example:
.. code-block:: python
gym_env.action_space = MultiDiscreteActSpace(env.observation_space,
attr_to_keep=['redispatch', "curtail", "sub_set_bus"])
You can also apply some basic transformation when you "discretize" continuous action
.. code-block:: python
gym_env.action_space = MultiDiscreteActSpace(env.observation_space,
attr_to_keep=['redispatch', "curtail", "sub_set_bus"],
nb_bins={"redispatch": 3, "curtail": 17},
)
By default it is "discretized" in 7 different "bins". The more "bins" there will be, the more "precise"
you can be in your control, but the higher the dimension of the action space.
"""
ATTR_CHANGE = 0
ATTR_SET = 1
ATTR_NEEDBUILD = 2
ATTR_NEEDBINARIZED = 3
def __init__(self, grid2op_action_space, attr_to_keep=ALL_ATTR, nb_bins=None):
check_gym_version(type(self)._gymnasium)
if not isinstance(grid2op_action_space, ActionSpace):
raise RuntimeError(
f"Impossible to create a BoxGymActSpace without providing a "
f"grid2op action_space. You provided {type(grid2op_action_space)}"
f'as the "grid2op_action_space" attribute.'
)
if nb_bins is None:
nb_bins = {"redispatch": 7, "set_storage": 7, "curtail": 7, "curtail_mw": 7}
if attr_to_keep == ALL_ATTR:
# by default, i remove all the attributes that are not supported by the action type
# i do not do that if the user specified specific attributes to keep. This is his responsibility in
# in this case
attr_to_keep = {
el for el in attr_to_keep if grid2op_action_space.supports_type(el)
}
for el in attr_to_keep:
if el not in ATTR_DISCRETE:
warnings.warn(
f'The class "MultiDiscreteActSpace" should mainly be used to consider only discrete '
f"actions (eg. set_line_status, set_bus or change_bus). Though it is possible to use "
f'"{el}" when building it, be aware that this continuous action will be treated '
f"as discrete by splitting it into bins. "
f'Consider using the "BoxGymActSpace" for these attributes.'
)
self._attr_to_keep = sorted(attr_to_keep)
act_sp = grid2op_action_space
self._act_space = copy.deepcopy(grid2op_action_space)
low_gen = -1.0 * act_sp.gen_max_ramp_down
high_gen = 1.0 * act_sp.gen_max_ramp_up
low_gen[~act_sp.gen_redispatchable] = 0.0
high_gen[~act_sp.gen_redispatchable] = 0.0
# nb, dim, []
self.dict_properties = {
"set_line_status": (
[3 for _ in range(act_sp.n_line)],
act_sp.n_line,
self.ATTR_SET,
),
"change_line_status": (
[2 for _ in range(act_sp.n_line)],
act_sp.n_line,
self.ATTR_CHANGE,
),
"set_bus": (
[4 for _ in range(act_sp.dim_topo)],
act_sp.dim_topo,
self.ATTR_SET,
),
"change_bus": (
[2 for _ in range(act_sp.dim_topo)],
act_sp.dim_topo,
self.ATTR_CHANGE,
),
"raise_alarm": (
[2 for _ in range(act_sp.dim_alarms)],
act_sp.dim_alarms,
self.ATTR_CHANGE,
),
"raise_alert": (
[2 for _ in range(act_sp.dim_alerts)],
act_sp.dim_alerts,
self.ATTR_CHANGE,
),
"sub_set_bus": (
None,
act_sp.n_sub,
self.ATTR_NEEDBUILD,
), # dimension will be computed on the fly, if the stuff is used
"sub_change_bus": (
None,
act_sp.n_sub,
self.ATTR_NEEDBUILD,
), # dimension will be computed on the fly, if the stuff is used
"one_sub_set": (
None,
1,
self.ATTR_NEEDBUILD,
), # dimension will be computed on the fly, if the stuff is used
"one_sub_change": (
None,
1,
self.ATTR_NEEDBUILD,
), # dimension will be computed on the fly, if the stuff is used
}
self._nb_bins = nb_bins
for el in ["redispatch", "set_storage", "curtail", "curtail_mw"]:
if el in attr_to_keep:
if el not in nb_bins:
raise RuntimeError(
f'The attribute you want to keep "{el}" is not present in the '
f'"nb_bins". This attribute is continuous, you have to specify in how '
f"how to convert it to a discrete space. See the documentation "
f"for more information."
)
nb_redispatch = np.sum(act_sp.gen_redispatchable)
nb_renew = np.sum(act_sp.gen_renewable)
if el == "redispatch":
self.dict_properties[el] = (
[nb_bins[el] for _ in range(nb_redispatch)],
nb_redispatch,
self.ATTR_NEEDBINARIZED,
)
elif el == "curtail" or el == "curtail_mw":
self.dict_properties[el] = (
[nb_bins[el] for _ in range(nb_renew)],
nb_renew,
self.ATTR_NEEDBINARIZED,
)
elif el == "set_storage":
self.dict_properties[el] = (
[nb_bins[el] for _ in range(act_sp.n_storage)],
act_sp.n_storage,
self.ATTR_NEEDBINARIZED,
)
else:
raise RuntimeError(f'Unknown attribute "{el}"')
self._dims = None
self._functs = None # final functions that is applied to the gym action to map it to a grid2Op action
self._binarizers = None # contains all the stuff to binarize the data
self._types = None
nvec = self._get_info()
# initialize the base container
type(self)._MultiDiscreteType.__init__(self, nvec=nvec)
@staticmethod
def _funct_set(vect):
# gym encodes:
# for set_bus: 0 -> -1, 1-> 0 (don't change)), 2-> 1, 3 -> 2
# for set_status: 0 -> -1, 1-> 0 (don't change)), 2-> 1 [3 do not exist for set_line_status !]
vect -= 1
return vect
@staticmethod
def _funct_change(vect):
# gym encodes 0 -> False, 1 -> True
vect = vect.astype(dt_bool)
return vect
def _funct_substations(self, orig_act, attr_nm, vect):
"""
Used for "sub_set_bus" and "sub_change_bus"
"""
vect_act = self._sub_modifiers[attr_nm]
for sub_id, act_id in enumerate(vect):
orig_act += vect_act[sub_id][act_id]
def _funct_one_substation(self, orig_act, attr_nm, vect):
"""
Used for "one_sub_set" and "one_sub_change"
"""
orig_act += self._sub_modifiers[attr_nm][int(vect)]
def _get_info(self):
nvec = None
self._dims = []
self._functs = []
self._binarizers = {}
self._sub_modifiers = {}
self._types = []
box_space = None
dim = 0
for el in self._attr_to_keep:
if el in self.dict_properties:
nvec_, dim_, type_ = self.dict_properties[el]
if type_ == self.ATTR_CHANGE:
# I can convert them directly into discrete attributes because it's a
# recognize "change" attribute
funct = self._funct_change
elif type_ == self.ATTR_SET:
# I can convert them directly into discrete attributes because it's a
# recognize "set" attribute
funct = self._funct_set
elif type_ == self.ATTR_NEEDBINARIZED:
# base action was continuous, i need to convert it to discrete action thanks
# to "binarization", that is done automatically here
# from grid2op.gym_compat.box_gym_actspace import BoxGymActSpace
# from grid2op.gym_compat.continuous_to_discrete import (
# ContinuousToDiscreteConverter,
# )
if box_space is None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
box_space = type(self)._BoxGymActSpaceType(
self._act_space,
attr_to_keep=[
"redispatch",
"set_storage",
"curtail",
"curtail_mw",
],
)
if el not in box_space._dict_properties:
raise RuntimeError(
f"Impossible to dertmine lowest and maximum value for "
f'key "{el}".'
)
low_, high_, shape_, dtype_ = box_space._dict_properties[el]
tmp_box = type(self)._BoxType(low=low_, high=high_, dtype=dtype_)
tmp_binarizer = type(self)._ContinuousToDiscreteConverterType(
init_space=tmp_box, nb_bins=self._nb_bins[el]
)
self._binarizers[el] = tmp_binarizer
funct = tmp_binarizer.gym_to_g2op
elif type_ == self.ATTR_NEEDBUILD:
# attributes comes from substation manipulation, i need to build the entire space
nvec_ = []
self._sub_modifiers[el] = []
if el == "sub_set_bus":
# one action per substations, using "set"
for sub_id in range(self._act_space.n_sub):
act_this_sub = [self._act_space()]
act_this_sub += (
self._act_space.get_all_unitary_topologies_set(
self._act_space, sub_id=sub_id
)
)
nvec_.append(len(act_this_sub))
self._sub_modifiers[el].append(act_this_sub)
funct = self._funct_substations
elif el == "sub_change_bus":
# one action per substation, using "change"
for sub_id in range(self._act_space.n_sub):
acts_this_sub = [self._act_space()]
acts_this_sub += (
self._act_space.get_all_unitary_topologies_change(
self._act_space, sub_id=sub_id
)
)
nvec_.append(len(acts_this_sub))
self._sub_modifiers[el].append(acts_this_sub)
funct = self._funct_substations
elif el == "one_sub_set":
# an action change only one substation, using "set"
self._sub_modifiers[
el
] = self._act_space.get_all_unitary_topologies_set(
self._act_space
)
funct = self._funct_one_substation
nvec_ = [len(self._sub_modifiers[el])]
elif el == "one_sub_change":
# an action change only one substation, using "change"
self._sub_modifiers[
el
] = self._act_space.get_all_unitary_topologies_change(
self._act_space
)
funct = self._funct_one_substation
nvec_ = [len(self._sub_modifiers[el])]
else:
raise RuntimeError(
f'Unsupported attribute "{el}" when dealing with '
f"action on substation"
)
else:
raise RuntimeError(f"Unknown way to build the action.")
else:
li_keys = "\n\t- ".join(sorted(list(self.dict_properties.keys())))
raise RuntimeError(
f'Unknown action attributes "{el}". Supported attributes are: '
f"\n\t- {li_keys}"
)
dim += dim_
if nvec is not None:
nvec += nvec_
else:
nvec = nvec_
self._dims.append(dim)
self._functs.append(funct)
self._types.append(type_)
return nvec
def _handle_attribute(self, res, gym_act_this, attr_nm, funct, type_):
"""
INTERNAL
TODO
Parameters
----------
res
gym_act_this
attr_nm
Returns
-------
"""
# TODO code that !
vect = 1 * gym_act_this
if type_ == self.ATTR_NEEDBUILD:
funct(res, attr_nm, vect)
else:
tmp = funct(vect)
if attr_nm == "redispatch":
gym_act_this_ = np.full(
self._act_space.n_gen, fill_value=np.NaN, dtype=dt_float
)
gym_act_this_[self._act_space.gen_redispatchable] = tmp
tmp = gym_act_this_
elif attr_nm == "curtail" or attr_nm == "curtail_mw":
gym_act_this_ = np.full(
self._act_space.n_gen, fill_value=np.NaN, dtype=dt_float
)
gym_act_this_[self._act_space.gen_renewable] = tmp
tmp = gym_act_this_
setattr(res, attr_nm, tmp)
return res
def from_gym(self, gym_act):
"""
This is the function that is called to transform a gym action (in this case a numpy array!)
sent by the agent
and convert it to a grid2op action that will be sent to the underlying grid2op environment.
Parameters
----------
gym_act: ``numpy.ndarray``
the gym action
Returns
-------
grid2op_act: :class:`grid2op.Action.BaseAction`
The corresponding grid2op action.
"""
res = self._act_space()
prev = 0
for attr_nm, where_to_put, funct, type_ in zip(
self._attr_to_keep, self._dims, self._functs, self._types
):
if not gym_act.shape or not gym_act.shape[0]:
continue
this_part = 1 * gym_act[prev:where_to_put]
if attr_nm in self.dict_properties:
self._handle_attribute(res, this_part, attr_nm, funct, type_)
else:
raise RuntimeError(f'Unknown attribute "{attr_nm}".')
prev = where_to_put
return res
def close(self):
pass
if GYM_AVAILABLE:
from gym.spaces import Box as LegacyGymBox, MultiDiscrete as LegacyGymMultiDiscrete
from grid2op.gym_compat.box_gym_actspace import BoxLegacyGymActSpace
from grid2op.gym_compat.continuous_to_discrete import ContinuousToDiscreteConverterLegacyGym
MultiDiscreteActSpaceLegacyGym = type("MultiDiscreteActSpaceLegacyGym",
(__AuxMultiDiscreteActSpace, LegacyGymMultiDiscrete, ),
{"_gymnasium": False,
"_BoxType": LegacyGymBox,
"_MultiDiscreteType": LegacyGymMultiDiscrete,
"_BoxGymActSpaceType": BoxLegacyGymActSpace,
"_ContinuousToDiscreteConverterType": ContinuousToDiscreteConverterLegacyGym,
"__module__": __name__})
MultiDiscreteActSpaceLegacyGym.__doc__ = __AuxMultiDiscreteActSpace.__doc__
MultiDiscreteActSpace = MultiDiscreteActSpaceLegacyGym
MultiDiscreteActSpace.__doc__ = __AuxMultiDiscreteActSpace.__doc__
if GYMNASIUM_AVAILABLE:
from gymnasium.spaces import Box, MultiDiscrete
from grid2op.gym_compat.box_gym_actspace import BoxGymnasiumActSpace
from grid2op.gym_compat.continuous_to_discrete import ContinuousToDiscreteConverterGymnasium
MultiDiscreteActSpaceGymnasium = type("MultiDiscreteActSpaceGymnasium",
(__AuxMultiDiscreteActSpace, MultiDiscrete, ),
{"_gymnasium": True,
"_BoxType": Box,
"_MultiDiscreteType": MultiDiscrete,
"_BoxGymActSpaceType": BoxGymnasiumActSpace,
"_ContinuousToDiscreteConverterType": ContinuousToDiscreteConverterGymnasium,
"__module__": __name__})
MultiDiscreteActSpaceGymnasium.__doc__ = __AuxMultiDiscreteActSpace.__doc__
MultiDiscreteActSpace = MultiDiscreteActSpaceGymnasium
MultiDiscreteActSpace.__doc__ = __AuxMultiDiscreteActSpace.__doc__
| 26,421 | 45.930728 | 128 | py |
Grid2Op | Grid2Op-master/grid2op/gym_compat/scaler_attr_converter.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
import numpy as np
from grid2op.dtypes import dt_float
from grid2op.gym_compat.utils import GYM_AVAILABLE, GYMNASIUM_AVAILABLE
class __AuxScalerAttrConverter:
"""
This is a scaler that transforms a initial gym space `init_space` into its scale version.
It can be use to scale the observation by substracting the mean and dividing by the variance for
example.
TODO work in progress !
Need help if you can :-)
.. warning::
Depending on the presence absence of gymnasium and gym packages this class might behave differently.
In grid2op we tried to maintain compatibility both with gymnasium (newest) and gym (legacy,
no more maintained) RL packages. The behaviour is the following:
- :class:`ScalerAttrConverter` will inherit from gymnasium if it's installed
(in this case it will be :class:`ScalerAttrConverterGymnasium`), otherwise it will
inherit from gym (and will be exactly :class:`ScalerAttrConverterLegacyGym`)
- :class:`ScalerAttrConverterGymnasium` will inherit from gymnasium if it's available and never from
from gym
- :class:`ScalerAttrConverterLegacyGym` will inherit from gym if it's available and never from
from gymnasium
See :ref:`gymnasium_gym` for more information
"""
def __init__(self, substract, divide, dtype=None, init_space=None):
super().__init__(
g2op_to_gym=None, gym_to_g2op=None, space=None
) # super should be from type BaseGymAttrConverter
self._substract = np.array(substract)
self._divide = np.array(divide)
self.dtype = dtype if dtype is not None else dt_float
if init_space is not None:
self.initialize_space(init_space)
def initialize_space(self, init_space):
if self._is_init_space:
return
if not isinstance(init_space, type(self)._BoxType):
raise RuntimeError(
"Impossible to scale a converter if this one is not from type space.Box"
)
tmp_space = copy.deepcopy(init_space)
# properly change the low / high value
low_tmp = self.scale(tmp_space.low)
high_tmp = self.scale(tmp_space.high)
low_ = np.minimum(high_tmp, low_tmp)
high_ = np.maximum(high_tmp, low_tmp)
tmp_space.low[:] = low_
tmp_space.high[:] = high_
if self.dtype is not None:
tmp_space.dtype = np.dtype(self.dtype)
tmp_space.low = tmp_space.low.astype(self.dtype)
tmp_space.high = tmp_space.high.astype(self.dtype)
self.base_initialize(
space=tmp_space, g2op_to_gym=self.scale, gym_to_g2op=self.unscale
)
self.dtype = self.my_space.dtype
self._substract = self._substract.astype(self.dtype)
self._divide = self._divide.astype(self.dtype)
self._is_init_space = True
def scale(self, vect):
tmp = vect.astype(self.dtype)
tmp = (tmp - self._substract) / self._divide
return tmp
def unscale(self, vect):
tmp = vect * self._divide + self._substract
return tmp
def close(self):
pass
if GYM_AVAILABLE:
from gym.spaces import Box as LegacyGymBox
from grid2op.gym_compat.base_gym_attr_converter import BaseLegacyGymAttrConverter
ScalerAttrConverterLegacyGym = type("ScalerAttrConverterLegacyGym",
(__AuxScalerAttrConverter, BaseLegacyGymAttrConverter, ),
{"_gymnasium": False,
"_BoxType": LegacyGymBox,
"__module__": __name__})
ScalerAttrConverterLegacyGym.__doc__ = __AuxScalerAttrConverter.__doc__
ScalerAttrConverter = ScalerAttrConverterLegacyGym
ScalerAttrConverter.__doc__ = __AuxScalerAttrConverter.__doc__
if GYMNASIUM_AVAILABLE:
from gymnasium.spaces import Box
from grid2op.gym_compat.base_gym_attr_converter import BaseGymnasiumAttrConverter
ScalerAttrConverterGymnasium = type("ScalerAttrConverterGymnasium",
(__AuxScalerAttrConverter, BaseGymnasiumAttrConverter, ),
{"_gymnasium": True,
"_BoxType": Box,
"__module__": __name__})
ScalerAttrConverterGymnasium.__doc__ = __AuxScalerAttrConverter.__doc__
ScalerAttrConverter = ScalerAttrConverterGymnasium
ScalerAttrConverter.__doc__ = __AuxScalerAttrConverter.__doc__
| 5,122 | 41.338843 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/gym_compat/utils.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from packaging import version
try:
from importlib.metadata import distribution
except ModuleNotFoundError:
# not available in python 3.7
from importlib_metadata import distribution
try:
import gym
# the current gym version (we should support most recent, but also
# the very old 0.21 because it used by stable baselines3...)
GYM_VERSION = version.parse(distribution('gym').version)
GYM_AVAILABLE = True
except ImportError:
GYM_AVAILABLE = False
GYM_VERSION = version.parse("0.17.2")
try:
import gymnasium
GYMNASIUM_AVAILABLE = True
except ImportError:
GYMNASIUM_AVAILABLE = False
_MIN_GYM_VERSION = version.parse("0.17.2")
# this is the last gym version to use the "old" numpy prng
_MAX_GYM_VERSION_RANDINT = version.parse("0.25.99")
ALL_ATTR = (
"set_line_status",
"change_line_status",
"set_bus",
"change_bus",
"redispatch",
"set_storage",
"curtail",
"raise_alarm",
"raise_alert",
)
# raise alert or alarm is not supported
ALL_ATTR_FOR_DISCRETE = (
"set_line_status",
"change_line_status",
"set_bus",
"change_bus",
"redispatch",
"set_storage",
"curtail"
)
ATTR_DISCRETE = (
"set_line_status",
"set_line_status_simple",
"change_line_status",
"set_bus",
"change_bus",
"sub_set_bus",
"sub_change_bus",
"one_sub_set",
"one_sub_change",
# "raise_alarm"
# "raise_alert"
)
ALL_ATTR_CONT = (
"redispatch",
"set_storage",
"curtail",
)
def check_gym_version(use_gymnasium):
if not use_gymnasium:
if GYM_VERSION < _MIN_GYM_VERSION:
import gym
raise RuntimeError(
f"Grid2op does not work with gym < {_MIN_GYM_VERSION} and you have gym with "
f"version {gym.__version__} installed."
)
else:
if not GYMNASIUM_AVAILABLE:
raise RuntimeError("You are trying to use a class that requries gymnasium, yet you did "
"not appear to have installed it.")
def _compute_extra_power_for_losses(gridobj):
"""
to handle the "because of the power losses gen_pmin and gen_pmax can be slightly altered"
"""
import numpy as np
return 0.3 * np.sum(np.abs(gridobj.gen_pmax))
def sample_seed(max_, np_random):
"""sample a seed based on gym version (np_random has not always the same behaviour)"""
if GYM_VERSION <= _MAX_GYM_VERSION_RANDINT:
if hasattr(np_random, "randint"):
# old gym behaviour
seed_ = np_random.randint(max_)
else:
seed_ = int(np_random.integers(0, max_))
else:
# gym finally use most recent numpy random generator
seed_ = int(np_random.integers(0, max_))
return seed_
| 3,259 | 26.863248 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/l2rpn_utils/__init__.py | # Copyright (c) 2019-2022, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
__all__ = [
"ActionWCCI2020", "ObservationWCCI2020",
"ActionNeurips2020", "ObservationNeurips2020",
"ActionICAPS2021", "ObservationICAPS2021",
"ActionWCCI2022", "ObservationWCCI2022",
"ActionIDF2023", "ObservationIDF2023"
]
from grid2op.l2rpn_utils.wcci_2020 import ActionWCCI2020, ObservationWCCI2020
from grid2op.l2rpn_utils.neurips_2020 import ActionNeurips2020, ObservationNeurips2020
from grid2op.l2rpn_utils.icaps_2021 import ActionICAPS2021, ObservationICAPS2021
from grid2op.l2rpn_utils.wcci_2022 import ActionWCCI2022, ObservationWCCI2022
from grid2op.l2rpn_utils.idf_2023 import ActionIDF2023, ObservationIDF2023 | 1,110 | 51.904762 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/l2rpn_utils/icaps_2021.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Action import PlayableAction
from grid2op.Observation import CompleteObservation
class ActionICAPS2021(PlayableAction):
authorized_keys = {
"set_line_status",
"change_line_status",
"set_bus",
"change_bus",
"redispatch",
"curtail",
"raise_alarm",
}
attr_list_vect = ['_set_line_status',
'_switch_line_status',
'_set_topo_vect',
'_change_bus_vect',
'_redispatch',
'_storage_power',
'_curtail',
'_raise_alarm']
attr_list_set = set(attr_list_vect)
pass
class ObservationICAPS2021(CompleteObservation):
attr_list_vect = ['year',
'month',
'day',
'hour_of_day',
'minute_of_hour',
'day_of_week',
'gen_p',
'gen_q',
'gen_v',
'load_p',
'load_q',
'load_v',
'p_or',
'q_or',
'v_or',
'a_or',
'p_ex',
'q_ex',
'v_ex',
'a_ex',
'rho',
'line_status',
'timestep_overflow',
'topo_vect',
'time_before_cooldown_line',
'time_before_cooldown_sub',
'time_next_maintenance',
'duration_next_maintenance',
'target_dispatch',
'actual_dispatch',
'storage_charge',
'storage_power_target',
'storage_power',
'gen_p_before_curtail',
'curtailment',
'curtailment_limit',
'is_alarm_illegal',
'time_since_last_alarm',
'last_alarm',
'attention_budget',
'was_alarm_used_after_game_over',
'_shunt_p',
'_shunt_q',
'_shunt_v',
'_shunt_bus'
]
attr_list_json = [
"current_step",
"max_step",
"delta_time",
"gen_margin_up",
"gen_margin_down",
"_thermal_limit",
"support_theta",
"theta_or",
"theta_ex",
"load_theta",
"gen_theta",
"storage_theta",
]
attr_list_set = set(attr_list_vect)
| 3,244 | 31.777778 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/l2rpn_utils/idf_2023.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Action import PlayableAction
from grid2op.Observation import CompleteObservation
class ActionIDF2023(PlayableAction):
authorized_keys = {
"set_line_status",
"change_line_status",
"set_bus",
"change_bus",
"redispatch",
"set_storage",
"curtail",
"raise_alert",
}
attr_list_vect = [
"_set_line_status",
"_switch_line_status",
"_set_topo_vect",
"_change_bus_vect",
"_redispatch",
"_storage_power",
"_curtail",
"_raise_alert",
]
attr_list_set = set(attr_list_vect)
pass
class ObservationIDF2023(CompleteObservation):
attr_list_vect = [
"year",
"month",
"day",
"hour_of_day",
"minute_of_hour",
"day_of_week",
"gen_p",
"gen_q",
"gen_v",
"load_p",
"load_q",
"load_v",
"p_or",
"q_or",
"v_or",
"a_or",
"p_ex",
"q_ex",
"v_ex",
"a_ex",
"rho",
"line_status",
"timestep_overflow",
"topo_vect",
"time_before_cooldown_line",
"time_before_cooldown_sub",
"time_next_maintenance",
"duration_next_maintenance",
"target_dispatch",
"actual_dispatch",
"storage_charge",
"storage_power_target",
"storage_power",
"gen_p_before_curtail",
"curtailment",
"curtailment_limit",
"curtailment_limit_effective", # starting grid2op version 1.6.6
"_shunt_p",
"_shunt_q",
"_shunt_v",
"_shunt_bus", # starting from grid2op version 1.6.0
"current_step",
"max_step", # starting from grid2op version 1.6.4
"delta_time", # starting grid2op version 1.6.5
"gen_margin_up",
"gen_margin_down", # starting grid2op version 1.6.6
# line alert (starting grid2Op 1.9.1, for compatible envs)
"active_alert",
"attack_under_alert",
"time_since_last_alert",
"alert_duration",
"total_number_of_alert",
"time_since_last_attack",
"was_alert_used_after_attack",
]
attr_list_json = [
"_thermal_limit",
"support_theta",
"theta_or",
"theta_ex",
"load_theta",
"gen_theta",
"storage_theta"
]
attr_list_set = set(attr_list_vect)
| 2,920 | 27.359223 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/l2rpn_utils/neurips_2020.py | # Copyright (c) 2019-2022, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Action import PlayableAction
from grid2op.Observation import CompleteObservation
class ActionNeurips2020(PlayableAction):
authorized_keys = {
"set_line_status",
"change_line_status",
"set_bus",
"change_bus",
"redispatch",
}
attr_list_vect = ['_set_line_status',
'_set_topo_vect',
'_change_bus_vect',
'_switch_line_status',
'_redispatch']
attr_list_set = set(attr_list_vect)
pass
class ObservationNeurips2020(CompleteObservation):
attr_list_vect = ['year',
'month',
'day',
'hour_of_day',
'minute_of_hour',
'day_of_week',
'gen_p',
'gen_q',
'gen_v',
'load_p',
'load_q',
'load_v',
'p_or',
'q_or',
'v_or',
'a_or',
'p_ex',
'q_ex',
'v_ex',
'a_ex',
'rho',
'line_status',
'timestep_overflow',
'topo_vect',
'time_before_cooldown_line',
'time_before_cooldown_sub',
'time_next_maintenance',
'duration_next_maintenance',
'target_dispatch',
'actual_dispatch']
attr_list_json = [
"storage_charge",
"storage_power_target",
"storage_power",
"gen_p_before_curtail",
"curtailment",
"curtailment_limit",
"curtailment_limit_effective",
"_shunt_p",
"_shunt_q",
"_shunt_v",
"_shunt_bus",
"current_step",
"max_step",
"delta_time",
"gen_margin_up",
"gen_margin_down",
"_thermal_limit",
"support_theta",
"theta_or",
"theta_ex",
"load_theta",
"gen_theta",
"storage_theta",
]
attr_list_set = set(attr_list_vect) | 2,761 | 31.116279 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/l2rpn_utils/wcci_2020.py | # Copyright (c) 2019-2022, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Action import PlayableAction
from grid2op.Observation import CompleteObservation
class ActionWCCI2020(PlayableAction):
authorized_keys = {
"set_line_status",
"change_line_status",
"set_bus",
"change_bus",
"redispatch",
}
attr_list_vect = [
"_set_line_status",
"_switch_line_status",
"_set_topo_vect",
"_change_bus_vect",
'_redispatch'
]
attr_list_set = set(attr_list_vect)
pass
class ObservationWCCI2020(CompleteObservation):
attr_list_vect = [
'year',
'month',
'day',
'hour_of_day',
'minute_of_hour',
'day_of_week',
"gen_p",
"gen_q",
"gen_v",
'load_p',
'load_q',
'load_v',
'p_or',
'q_or',
'v_or',
'a_or',
'p_ex',
'q_ex',
'v_ex',
'a_ex',
'rho',
'line_status',
'timestep_overflow',
'topo_vect',
'time_before_cooldown_line',
'time_before_cooldown_sub',
'time_next_maintenance',
'duration_next_maintenance',
'target_dispatch',
'actual_dispatch'
]
attr_list_json = [
"storage_charge",
"storage_power_target",
"storage_power",
"gen_p_before_curtail",
"curtailment",
"curtailment_limit",
"curtailment_limit_effective",
"_shunt_p",
"_shunt_q",
"_shunt_v",
"_shunt_bus",
"current_step",
"max_step",
"delta_time",
"gen_margin_up",
"gen_margin_down",
"_thermal_limit",
"support_theta",
"theta_or",
"theta_ex",
"load_theta",
"gen_theta",
"storage_theta",
]
attr_list_set = set(attr_list_vect)
| 2,331 | 24.626374 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/l2rpn_utils/wcci_2022.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Action import PlayableAction
from grid2op.Observation import CompleteObservation
class ActionWCCI2022(PlayableAction):
authorized_keys = {
"set_line_status",
"change_line_status",
"set_bus",
"change_bus",
"redispatch",
"set_storage",
"curtail"
}
attr_list_vect = [
"_set_line_status",
"_switch_line_status",
"_set_topo_vect",
"_change_bus_vect",
"_redispatch",
"_storage_power",
"_curtail"
]
attr_list_set = set(attr_list_vect)
pass
class ObservationWCCI2022(CompleteObservation):
attr_list_vect = [
"year",
"month",
"day",
"hour_of_day",
"minute_of_hour",
"day_of_week",
"gen_p",
"gen_q",
"gen_v",
"load_p",
"load_q",
"load_v",
"p_or",
"q_or",
"v_or",
"a_or",
"p_ex",
"q_ex",
"v_ex",
"a_ex",
"rho",
"line_status",
"timestep_overflow",
"topo_vect",
"time_before_cooldown_line",
"time_before_cooldown_sub",
"time_next_maintenance",
"duration_next_maintenance",
"target_dispatch",
"actual_dispatch",
"storage_charge",
"storage_power_target",
"storage_power",
"gen_p_before_curtail",
"curtailment",
"curtailment_limit",
"curtailment_limit_effective", # starting grid2op version 1.6.6
"_shunt_p",
"_shunt_q",
"_shunt_v",
"_shunt_bus", # starting from grid2op version 1.6.0
"current_step",
"max_step", # starting from grid2op version 1.6.4
"delta_time", # starting grid2op version 1.6.5
"gen_margin_up",
"gen_margin_down", # starting grid2op version 1.6.6
]
attr_list_json = [
"_thermal_limit",
"support_theta",
"theta_or",
"theta_ex",
"load_theta",
"gen_theta",
"storage_theta",
]
attr_list_set = set(attr_list_vect) | 2,587 | 27.130435 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/operator_attention/__init__.py | # Copyright (c) 2021, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
__all__ = ["LinearAttentionBudget"]
from grid2op.operator_attention.attention_budget import LinearAttentionBudget
| 573 | 46.833333 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/operator_attention/attention_budget.py | # Copyright (c) 2021, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
import numpy as np
from grid2op.dtypes import dt_float, dt_int, dt_bool
from grid2op.Exceptions import NotEnoughAttentionBudget
class LinearAttentionBudget:
def __init__(self):
self._max_budget = None
self._budget_per_ts = None
self._alarm_cost = None
self._current_budget = None
self._init_budget = None
self._time_last_alarm_raised = dt_int(-1)
self._time_last_successful_alarm_raised = dt_int(-1)
self._last_alarm_raised = None
self._last_successful_alarm_raised = None
self._all_successful_alarms = []
@property
def time_last_alarm_raised(self):
"""
Time step of the last alarm raised (-1 if no alarm has been raised yet)
Returns
-------
"""
return self._time_last_alarm_raised
@property
def time_last_successful_alarm_raised(self):
"""time of the last successful alarm raised"""
return self._time_last_successful_alarm_raised
@property
def current_budget(self):
"""current attention budget"""
return self._current_budget
@property
def last_alarm_raised(self):
"""
for each zone, says:
- -1 if no alarm have been raised for this zone for the entire episode
- `k` (with `k>0`) says that the last alarm raised for this zone was at step `k`
.. note::
This counts both successful and non successful alarms
"""
return self._last_alarm_raised
@property
def last_successful_alarm_raised(self):
"""
for each zone, says:
- -1 if no alarm have been raised for this zone for the entire episode
- `k` (with `k>0`) says that the last alarm raised for this zone was at step `k`
.. note::
This counts only successful alarms
"""
return self._last_successful_alarm_raised
def init(
self, partial_env, init_budget, max_budget, budget_per_ts, alarm_cost, **kwargs
):
self._max_budget = dt_float(max_budget)
self._budget_per_ts = dt_float(budget_per_ts)
self._alarm_cost = dt_float(alarm_cost)
self._init_budget = dt_float(init_budget)
self._last_alarm_raised = np.empty(partial_env.dim_alarms, dtype=dt_int)
self._last_successful_alarm_raised = np.empty(
partial_env.dim_alarms, dtype=dt_int
)
self.reset()
def reset(self):
"""
called each time the scenario is over by the environment
Returns
-------
"""
self._current_budget = self._init_budget
self._time_last_alarm_raised = dt_int(-1)
self._time_last_successful_alarm_raised = dt_int(-1)
self._last_alarm_raised[:] = -1
self._last_successful_alarm_raised[:] = -1
self._all_successful_alarms = []
def get_state(self):
"""used to retrieve the sate in simulate"""
res = (
self._time_last_alarm_raised,
self._last_alarm_raised,
self._current_budget,
self._time_last_successful_alarm_raised,
self._last_successful_alarm_raised,
self._all_successful_alarms,
)
return res
def set_state(self, state):
"""used to update the internal state of the budget, for simulate"""
(
_time_last_alarm_raised,
_last_alarm_raised,
_current_budget,
_time_last_successful_alarm_raised,
_last_successful_alarm_raised,
_all_successful_alarms,
) = state
self._time_last_alarm_raised = _time_last_alarm_raised
self._last_alarm_raised[:] = _last_alarm_raised
self._current_budget = _current_budget
self._time_last_successful_alarm_raised = _time_last_successful_alarm_raised
self._last_successful_alarm_raised[:] = _last_successful_alarm_raised
self._all_successful_alarms = copy.copy(_all_successful_alarms)
def register_action(self, env, action, is_action_illegal, is_action_ambiguous):
"""
INTERNAL
Called at each step to update the budget according to the action played
Parameters
----------
env
action
is_action_illegal
is_action_ambiguous
Returns
-------
"""
if action.dim_alarms == 0 or is_action_illegal or is_action_ambiguous:
# this feature is not supported (grid2op <= 1.6.0) or is not activated
# also, if the action is illegal is ambiguous, it is replaced with do nothing, but i don't really
# want to affect the budget on this case
return None
if action.alarm_raised().size:
# an alarm has been raised
self._time_last_alarm_raised = env.nb_time_step
self._last_alarm_raised[action.raise_alarm] = env.nb_time_step
if self._current_budget >= self._alarm_cost:
# i could raise it
self._current_budget -= self._alarm_cost
self._time_last_successful_alarm_raised = env.nb_time_step
self._last_successful_alarm_raised[
action.raise_alarm
] = env.nb_time_step
self._all_successful_alarms.append(
(env.nb_time_step, copy.deepcopy(action.raise_alarm))
)
else:
# not enough budget
current_budget = self._current_budget
# self._current_budget = 0
return NotEnoughAttentionBudget(
f"You need a budget of {self._alarm_cost} to raise an alarm "
f"but you had only {current_budget}. Nothing is done."
)
else:
# no alarm has been raised, budget increases
self._current_budget = min(
self._max_budget, self._budget_per_ts + self._current_budget
)
return None
class _LinearAttentionBudgetByLine:
"""Currently not implemented, DO NOT USE !"""
def __init__(self):
self._max_budget = None
self._budget_per_ts = None
self._alert_cost = None
self._current_budget = None
self._init_budget = None
self._time_last_alert_raised = dt_int(-1)
self._time_last_successful_alert_raised = dt_int(-1)
self._is_last_alert_successful = None
self._time_window = None
self._alert_size = None
self._last_alert_action_filtered_by_budget = None
@property
def time_last_alert_raised(self):
"""
Time step of the last alert raised (-1 if no alert has been raised yet)
Returns
-------
"""
return self._time_last_alert_raised
@property
def time_last_successful_alert_raised(self):
"""time of the last successful alert raised"""
return self._time_last_successful_alert_raised
@property
def current_budget(self):
"""current attention budget"""
return self._current_budget
@property
def is_last_alert_successful(self):
"""
boolean value telling whether the alert is successfully raised (according to compliance with the budget)
"""
return self._is_last_alert_successful
@property
def last_alert_action_filtered_by_budget(self):
"""the actual alert action, after filtering to be compliant with the budget constraint"""
return self._last_alert_action_filtered_by_budget
@property
def time_window(self):
""" """
return self._time_window
def init(
self, partial_env, init_budget, max_budget, budget_per_ts, alert_cost, **kwargs
):
self._max_budget = dt_float(max_budget)
self._budget_per_ts = dt_float(budget_per_ts)
self._alert_cost = dt_float(alert_cost)
self._init_budget = dt_float(init_budget)
self._time_window = partial_env.parameters.ALERT_TIME_WINDOW
self._dim_alerts = 0
if "dim_alerts" in kwargs.keys():
self._dim_alerts = kwargs["dim_alerts"]
self.reset()
def reset(self):
"""
called each time the scenario is over by the environment
Returns
-------
"""
self._current_budget = self._init_budget
self._time_last_alert_raised = dt_int(-1)
self._time_last_successful_alert_raised = dt_int(-1)
self._is_last_alert_successful = False
self._last_alert_action_filtered_by_budget = np.full(self._dim_alerts, False, dtype=dt_bool)
def get_state(self):
"""used to retrieve the sate in simulate"""
res = (
self._time_last_alert_raised,
self._current_budget,
self._time_last_successful_alert_raised,
self._is_last_alert_successful,
self._last_alert_action_filtered_by_budget
)
return res
def set_state(self, state):
"""used to update the internal state of the budget, for simulate"""
(
_time_last_alert_raised,
_current_budget,
_time_last_successful_alert_raised,
_is_last_alert_successful,
_last_alert_action_filtered_by_budget
) = state
self._time_last_alert_raised = _time_last_alert_raised
self._current_budget = _current_budget
self._time_last_successful_alert_raised = _time_last_successful_alert_raised
self._is_last_alert_successful = _is_last_alert_successful
self._last_alert_action_filtered_by_budget = _last_alert_action_filtered_by_budget
def register_action(self, env, action, is_action_illegal, is_action_ambiguous):
"""
INTERNAL
Called at each step to update the budget according to the action played
Parameters
----------
env
action
is_action_illegal
is_action_ambiguous
Returns
-------
"""
if action.dim_alerts == 0 or is_action_illegal or is_action_ambiguous:
# this feature is not supported (grid2op <= 1.6.0) or is not activated
# also, if the action is illegal is ambiguous, it is replaced with do nothing, but i don't really
# want to affect the budget on this case
self._is_last_alert_successful = False
self._last_alert_action_filtered_by_budget[:] = False
return None
self._is_last_alert_successful = False
self._last_alert_action_filtered_by_budget[:] = False
if action.alert_raised().size:
# an alert has been raised
self._time_last_alert_raised = env.nb_time_step
self._is_last_alert_successful = True
self._last_alert_action_filtered_by_budget = action._raise_alert
nb_of_alerts = sum(action._raise_alert)
if self._current_budget >= self._alert_cost * nb_of_alerts :
# The alert is raisable
self._current_budget -= self._alert_cost * nb_of_alerts
self._time_last_successful_alert_raised = env.nb_time_step
else:
# not enough budget
current_budget = self._current_budget
# self._current_budget = 0
return NotEnoughAttentionBudget(
f"You need a budget of {self._alert_cost} to raise an alert "
f"but you had only {current_budget}. Nothing is done."
)
else:
# no alert has been raised, budget increases
self._current_budget = min(
self._max_budget, self._budget_per_ts + self._current_budget
)
return None
| 12,333 | 34.039773 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/rest_server/__init__.py | 0 | 0 | 0 | py | |
Grid2Op | Grid2Op-master/grid2op/rest_server/app.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import os
import typing as t
import warnings
from flask import Flask
from flask import make_response, jsonify
from flask import request
from collections.abc import Iterable
from grid2op.rest_server.env_cache import EnvCache
import argparse
try:
import ujson
from flask.json import JSONEncoder, JSONDecoder
# define the encoder
class CustomJSONEncoder(JSONEncoder):
def default(self, obj):
try:
return ujson.dumps(obj)
except TypeError:
return JSONEncoder.default(self, obj)
# define the decoder
class CustomJSONDecoder(JSONDecoder):
def dump(
self,
obj: t.Any,
fp: t.IO[str],
app: t.Optional["Flask"] = None,
**kwargs: t.Any,
) -> None:
try:
return ujson.dump(obj=obj, fp=fp)
except TypeError:
return CustomJSONDecoder.dump(self, obj, fp, app, **kwargs)
def dumps(
self, obj: t.Any, app: t.Optional["Flask"] = None, **kwargs: t.Any
) -> str:
try:
return ujson.dumps(obj=obj)
except TypeError:
return CustomJSONDecoder.dumps(self, obj, app, **kwargs)
def loads(
self, s: str, app: t.Optional["Flask"] = None, **kwargs: t.Any
) -> t.Any:
try:
return ujson.loads(s)
except TypeError:
return CustomJSONDecoder.loads(self, s, app, **kwargs)
def load(
self, fp: t.IO[str], app: t.Optional["Flask"] = None, **kwargs: t.Any
) -> t.Any:
try:
return ujson.load(fp=fp)
except TypeError:
return CustomJSONDecoder.load(self, fp, app, **kwargs)
UJSON_AS_JSON = True
except ImportError as exc:
warnings.warn("ujson not available, expect some degraded performance")
UJSON_AS_JSON = False
ENV_CACHE = EnvCache(UJSON_AS_JSON)
app = Flask(__name__)
if UJSON_AS_JSON:
app.json_encoder = CustomJSONEncoder
app.json_decoder = CustomJSONDecoder
# TODO for improved security, not sure it's needed
if False:
from flask_wtf.csrf import CSRFProtect
csrf = CSRFProtect()
csrf.init_app(app)
# set the env variable this way before starting : `set WTF_CSRF_SECRET_KEY=...`
SECRET_KEY = os.urandom(32)
app.config["SECRET_KEY"] = SECRET_KEY
@app.route("/")
def index():
return (
"Welcome to grid2op. This small server lets you use grid2op as an web service to use some grid2op "
"features for example in different computer languages. See the documentation for more information."
"(work in progress)"
"(alpha mode at the moment)"
)
@app.route("/make/<env_name>")
def make_env(env_name):
"""
This function lets you create an environment with the name "env_name".
It is equivalent to perform a call to `grid2op.make(env_name)` followed by `obs = env.reset()`
TODO support parameters and backend and all the other kwargs of make
Notes
------
This is a simple `get` request.
Returns
-------
A json with keys:
- "id": the id of the environment created
- "env_name": the name of the environment created
- "obs" : the json representation of the observation you get after creation of this environment
"""
id_, obs, exc_ = ENV_CACHE.insert_env(env_name)
if exc_ is not None:
return make_response(
jsonify(
{
"error": f'Impossible to create environment "{env_name}" with error:\n'
f"{exc_}"
}
),
400,
)
resp = {"id": id_, "env_name": env_name, "obs": obs}
return jsonify(resp)
@app.route("/reset/<env_name>/<env_id>")
def reset(env_name, env_id):
"""
This call is equivalent to do: `env.reset()` when ``env`` is the environment with id "env_id" and name "env_name"
Notes
------
This is a simple `get` request.
Returns
-------
A json with keys:
- "id": the id of the environment created
- "env_name": the name of the environment created
- "obs" : the json representation of the observation you get after having reset this environment.
"""
obs, (error_code, error_msg) = ENV_CACHE.reset(env_name, env_id)
if error_code is not None:
return make_response(
jsonify({"error": error_msg, "error_code": error_code}), 400
)
resp = {"id": env_id, "env_name": env_name, "obs": obs}
return make_response(jsonify(resp))
@app.route("/close/<env_name>/<env_id>")
def close(env_name, env_id):
"""
This call is equivalent to do: `env.close()` when ``env`` is the environment with id "env_id" and name "env_name".
Note that after being closed, any use of the environment will raise an error.
Notes
------
This is a simple `get` request.
Returns
-------
A json with keys:
- "id": the id of the environment created
- "env_name": the name of the environment created
"""
error_code, error_msg = ENV_CACHE.close(env_name, env_id)
if error_code is not None:
return make_response(
jsonify({"error": error_msg, "error_code": error_code}), 400
)
resp = {"id": env_id, "env_name": env_name}
return make_response(jsonify(resp))
@app.route("/get_path_env/<env_name>/<env_id>")
def get_path_env(env_name, env_id):
"""
This call is equivalent to do: `env.get_path_env()` when ``env`` is the environment with id "env_id" and name
"env_name".
It returns the (local to the server, not the client!) path where the environment is located.
Notes
------
This is a simple `get` request.
Returns
-------
A json with keys:
- "id": the id of the environment created
- "env_name": the name of the environment created
- "path": the path of the environment (**NB** this path is local to the server!)
"""
path, (error_code, error_msg) = ENV_CACHE.get_path_env(env_name, env_id)
if error_code is not None:
return make_response(
jsonify({"error": error_msg, "error_code": error_code}), 400
)
resp = {"id": env_id, "env_name": env_name, "path": path}
return make_response(jsonify(resp))
@app.route("/get_thermal_limit/<env_name>/<env_id>")
def get_thermal_limit(env_name, env_id):
"""
This call is equivalent to do: `env.get_thermal_limit()` when ``env`` is the environment with id "env_id" and name
"env_name".
Notes
------
This is a simple `get` request.
Returns
-------
A json with keys:
- "id": the id of the environment created
- "env_name": the name of the environment created
- "thermal_limit": ``list`` the thermal limit for each powerline.
"""
th_lim, (error_code, error_msg) = ENV_CACHE.get_thermal_limit(env_name, env_id)
if error_code is not None:
return make_response(
jsonify({"error": error_msg, "error_code": error_code}), 400
)
resp = {"id": env_id, "env_name": env_name, "thermal_limit": th_lim}
return make_response(jsonify(resp))
@app.route("/step/<env_name>/<env_id>", methods=["POST"])
def step(env_name, env_id):
"""
This call is equivalent to do: `env.step(action)` when ``env`` is the environment with id "env_id" and name
"env_name".
Notes
------
This is a `post` request.
The payload (data) should be a json with key "action" (``dict``) representing a valid grid2op action
Returns
-------
A json with keys:
- "id": the id of the environment created
- "env_name": the name of the environment created
- "obs": the json representation of the observation you get after this step
- "reward": the reward you get after this step (``float``)
- "done": a flag indicating whether or not the environment has terminated (``bool``). If this flag is
``True`` then you need to call `reset` on this same environment (same name, same id)
if you want to continue to use it.
- "info": a list of detailed information returned by step (more information in the documentation of
:func:`grid2op.Environment.BaseEnv.step`)
"""
# handle the action part
if not request.json or "action" not in request.json:
make_response(
jsonify(
{"error": f'You need to provide an action in order to do a "step".'}
),
400,
)
(obs, reward, done, info), (error_code, error_msg) = ENV_CACHE.step(
env_name, env_id, request.json["action"]
)
if error_code is not None:
return make_response(
jsonify({"error": error_msg, "error_code": error_code}), 400
)
resp = {
"id": env_id,
"env_name": env_name,
"obs": obs,
"reward": reward,
"done": done,
"info": info,
}
return make_response(jsonify(resp))
@app.route("/seed/<env_name>/<env_id>", methods=["POST"])
def seed(env_name, env_id):
"""
This call is equivalent to do: `env.seed(seed)` when ``env`` is the environment with id "env_id" and name
"env_name".
Notes
------
This is a `post` request.
The payload (data) should be a json with key "seed" (``int``) representing the seed (an integer) you want to use.
Returns
-------
A json with keys:
- "id": the id of the environment created
- "env_name": the name of the environment created
- "seeds": the seeds used to ensure reproducibility of all the environment components
(more information in the documentation of :func:`grid2op.Environment.BaseEnv.seed`)
- "info": a generic text to make sure you know that you need to call reset before it has any effect.
"""
if not request.json or "seed" not in request.json:
make_response(
jsonify(
{
"error": f'You need to provide an action in order to "seed" the environment.'
}
),
400,
)
seeds, (error_code, error_msg) = ENV_CACHE.seed(
env_name, env_id, request.json["seed"]
)
if error_code is not None:
return make_response(
jsonify({"error": error_msg, "error_code": error_code}), 400
)
resp = {
"id": env_id,
"env_name": env_name,
"seeds": seeds,
"info": "this has no effect until reset() is called",
}
return make_response(jsonify(resp))
@app.route("/set_id/<env_name>/<env_id>", methods=["POST"])
def set_id(env_name, env_id):
"""
This call is equivalent to do: `env.set_id(id)` when ``env`` is the environment with id "env_id" and name
"env_name".
It has no effect unless "reset" is used.
Notes
------
This is a `post` request.
The payload (data) should be a json with key "id" (``int``) representing the chronic id you want to go to.
Returns
-------
A json with keys:
- "id": the id of the environment created
- "env_name": the name of the environment created
- "info": a generic text to make sure you know that you need to call reset before it has any effect.
"""
if not request.json or "id" not in request.json:
make_response(
jsonify(
{
"error": f'You need to provide an id in order to "set_id" the environment.'
}
),
400,
)
error_code, error_msg = ENV_CACHE.set_id(env_name, env_id, request.json["id"])
if error_code is not None:
return make_response(
jsonify({"error": error_msg, "error_code": error_code}), 400
)
resp = {
"id": env_id,
"env_name": env_name,
"info": "this has no effect until reset() is called",
}
return make_response(jsonify(resp))
@app.route("/set_thermal_limit/<env_name>/<env_id>", methods=["POST"])
def set_thermal_limit(env_name, env_id):
"""
This call is equivalent to do: `env.set_thermal_limit(thermal_limits)` when
``env`` is the environment with id "env_id" and name "env_name".
Notes
------
This is a `post` request.
The payload (data) should be a json with key "thermal_limits" (``list``) representing the new thermal limit you
want to use.
Returns
-------
A json with keys:
- "id": the id of the environment created
- "env_name": the name of the environment created
"""
if not request.json or "thermal_limits" not in request.json:
make_response(
jsonify(
{
"error": f'You need to provide thermal limits in order to "set_thermal_limit" '
f"the environment."
}
),
400,
)
error_code, error_msg = ENV_CACHE.set_thermal_limit(
env_name, env_id, request.json["thermal_limits"]
)
if error_code is not None:
return make_response(
jsonify({"error": error_msg, "error_code": error_code}), 400
)
resp = {"id": env_id, "env_name": env_name}
return make_response(jsonify(resp))
@app.route("/fast_forward_chronics/<env_name>/<env_id>", methods=["POST"])
def fast_forward_chronics(env_name, env_id):
"""
This call is equivalent to do: `env.fast_forward_chronics(nb_step)` when
``env`` is the environment with id "env_id" and name "env_name".
Notes
------
This is a `post` request.
The payload (data) should be a json with key "nb_step" (``int``) representing the number of step you want to
"fast forward" to.
Returns
-------
A json with keys:
- "id": the id of the environment created
- "env_name": the name of the environment created
"""
if not request.json or "nb_step" not in request.json:
make_response(
jsonify(
{
"error": f'You need to provide a number of step in order to "fast_forward_chronics" '
f"the environment."
}
),
400,
)
error_code, error_msg = ENV_CACHE.fast_forward_chronics(
env_name, env_id, request.json["nb_step"]
)
if error_code is not None:
return make_response(
jsonify({"error": error_msg, "error_code": error_code}), 400
)
resp = {"id": env_id, "env_name": env_name}
return make_response(jsonify(resp))
@app.route("/train_val_split/<env_name>/<env_id>", methods=["POST"])
def train_val_split(env_name, env_id):
"""
This call is equivalent to do: `env.train_val_split(chron_id_val)` when
``env`` is the environment with id "env_id" and name "env_name".
Notes
------
This is a `post` request.
The payload (data) should be a json with key "chron_id_val" (``list``)
representing the ids of the chronics that will be put aside in the validation set.
Returns
-------
A json with keys:
- "id": the id of the environment created
- "env_name": the name of the environment created
- "nm_train": name of the environment you can use as training environment, that will contain all the initial
chronics except the one specified in `chron_id_val`. You may initialize it with `make/nm_train`
- "nm_val": name of the environment you can use as validation environment, that will contain only the chronics
ids specified in `chron_id_val`. You may initialize it with `make/nm_val`
"""
if not request.json or "chron_id_val" not in request.json:
make_response(
jsonify(
{
"error": f"You need to provide with the id of the chronics that will go to "
f"the validation set "
}
),
400,
)
chron_id_val = request.json["chron_id_val"]
if not isinstance(chron_id_val, Iterable):
make_response(
jsonify(
{
"error": f'"chron_id_val" should be an iterable representing the name of the '
f"scenarios "
f"you want to place in the validation set."
}
),
400,
)
(nm_train, nm_val), (error_code, error_msg) = ENV_CACHE.train_val_split(
env_name, env_id, chron_id_val
)
if error_code is not None:
return make_response(
jsonify({"error": error_msg, "error_code": error_code}), 400
)
resp = {"id": env_id, "env_name": env_name, "nm_train": nm_train, "nm_val": nm_val}
return make_response(jsonify(resp))
# TODO
# set_id
# set_thermal_limit
# get_thermal_limit
# fast_forward_chronics
# get_path_env
# close
# train_val_split # not tested
# TODO
# asynch here!
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Start a mono thread / mono process grid2op environment server"
)
parser.add_argument(
"--port",
type=int,
default=3000,
help="On which port to start the server (default 3000)",
)
parser.add_argument(
"--debug",
type=str2bool,
nargs="?",
const=True,
default=False,
help="Start the flask server in debug mode (default: False).",
)
args = parser.parse_args()
app.run(debug=args.debug, port=args.port)
| 18,252 | 29.523411 | 118 | py |
Grid2Op | Grid2Op-master/grid2op/rest_server/env_cache.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from collections.abc import Iterable
import numpy as np
from grid2op.MakeEnv import make
try:
from lightsim2grid import LightSimBackend
bkclass = LightSimBackend
# raise ImportError()
except ImportError as excq_:
from grid2op.Backend import PandaPowerBackend
bkclass = PandaPowerBackend
pass
class EnvCache(object):
"""
TODO this is not implemented yet (at least the "cache" part)
We should have a flag that when an environment is computing, it returns an "error" to indicate that on the person
who makes the call (for now i'm pretty sure the current implementation will not work in asynch mode).
"""
ENV_NOT_FOUND = 0
ENV_ID_NOT_FOUND = 1
INVALID_ACTION = 2
INVALID_STEP = 3
ERROR_ENV_SEED = 4
ERROR_ENV_RESET = 5
ERROR_ENV_SET_ID = 6
ERROR_ENV_THERMAL_LIMIT = 7
ERROR_CLOSE = 8
ERROR_ENV_FAST_FORWARD = 9
ERROR_ENV_PATH = 10
def __init__(self, ujson_as_json):
self.all_env = {}
self.ujson_as_json = (
ujson_as_json # do i use the faster "ujson" library to parse json
)
self._convert_json = not self.ujson_as_json
def insert_env(self, env_name):
"""
TODO
"""
try:
env = make(env_name, backend=bkclass()) # TODO look at the RemoteEnv here
env.deactivate_forecast()
# call deactivate_forecast
except Exception as exc_:
return None, None, exc_
if env_name not in self.all_env:
# create an environment with that name
self.all_env[env_name] = [env]
else:
self.all_env[env_name].append(env)
id_ = len(self.all_env[env_name]) - 1
obs = env.reset()
return id_, obs.to_json(convert=self._convert_json), None
def step(self, env_name, env_id, action_as_json):
"""
TODO
"""
res_env = (None, None, None, None)
env, (error_id, error_msg) = self._aux_get_env(env_name, env_id)
if error_id is not None:
return res_env, (error_id, error_msg)
try:
act = env.action_space()
act.from_json(action_as_json)
except Exception as exc_:
msg_ = (
f"impossible to convert the provided action to a valid action on this environment with error:\n"
f"{exc_}"
)
return res_env, (self.INVALID_ACTION, msg_)
try:
obs, reward, done, info = env.step(act)
except Exception as exc_:
msg_ = (
f"impossible to make a step on the give environment with error\n{exc_}"
)
return res_env, (self.INVALID_STEP, msg_)
return (
obs.to_json(convert=self._convert_json),
float(reward),
bool(done),
self._aux_info_to_json(info),
), (None, None)
def seed(self, env_name, env_id, seed):
"""
TODO
"""
env, (error_id, error_msg) = self._aux_get_env(env_name, env_id)
if error_id is not None:
return None, (error_id, error_msg)
try:
seeds = env.seed(seed)
except Exception as exc_:
msg_ = f"Impossible to seed the environment with error:\n{exc_}"
return None, (self.ERROR_ENV_SEED, msg_)
return self._aux_array_to_json(seeds), (None, None)
def reset(self, env_name, env_id):
"""
TODO
"""
env, (error_id, error_msg) = self._aux_get_env(env_name, env_id)
if error_id is not None:
return None, (error_id, error_msg)
try:
obs = env.reset()
except Exception as exc_:
msg_ = f"Impossible to reset the environment with error {exc_}"
return None, (self.ERROR_ENV_RESET, msg_)
return obs.to_json(convert=self._convert_json), (None, None)
def set_id(self, env_name, env_id, chron_id):
"""
TODO
"""
env, (error_id, error_msg) = self._aux_get_env(env_name, env_id)
if error_id is not None:
return error_id, error_msg
try:
env.set_id(chron_id)
except Exception as exc_:
msg_ = f"Impossible to set the chronics id of the environment with error:\n {exc_}"
return self.ERROR_ENV_SET_ID, msg_
return None, None
def set_thermal_limit(self, env_name, env_id, thermal_limit):
"""
TODO
"""
env, (error_id, error_msg) = self._aux_get_env(env_name, env_id)
if error_id is not None:
return error_id, error_msg
try:
env.set_thermal_limit(thermal_limit)
except Exception as exc_:
msg_ = f"Impossible to set the thermal limits of the environment with error:\n {exc_}"
return self.ERROR_ENV_THERMAL_LIMIT, msg_
return None, None
def get_thermal_limit(self, env_name, env_id):
"""
TODO
"""
env, (error_id, error_msg) = self._aux_get_env(env_name, env_id)
if error_id is not None:
return None, (error_id, error_msg)
try:
res = env.get_thermal_limit()
except Exception as exc_:
msg_ = f"Impossible to get the thermal limits of the environment with error:\n {exc_}"
return None, (self.ERROR_ENV_THERMAL_LIMIT, msg_)
res = res.tolist()
return res, (None, None)
def close(self, env_name, env_id):
"""
TODO
"""
env, (error_id, error_msg) = self._aux_get_env(env_name, env_id)
if error_id is not None:
return error_id, error_msg
try:
env.close()
except Exception as exc_:
msg_ = f"Impossible to close the environment with error:\n {exc_}"
return self.ERROR_CLOSE, msg_
return None, None
def fast_forward_chronics(self, env_name, env_id, nb_step):
"""
TODO
"""
env, (error_id, error_msg) = self._aux_get_env(env_name, env_id)
if error_id is not None:
return error_id, error_msg
try:
env.fast_forward_chronics(nb_step)
except Exception as exc_:
msg_ = f"Impossible to fast forward the environment with error:\n {exc_}"
return self.ERROR_ENV_FAST_FORWARD, msg_
return None, None
def get_path_env(self, env_name, env_id):
"""
TODO
"""
env, (error_id, error_msg) = self._aux_get_env(env_name, env_id)
if error_id is not None:
return None, (error_id, error_msg)
try:
res = env.get_path_env()
except Exception as exc_:
msg_ = f"Impossible to fast forward the environment with error:\n {exc_}"
return None, (self.ERROR_ENV_PATH, msg_)
return res, (None, None)
def train_val_split(self, env_name, env_id, id_chron_val):
"""
TODO
"""
env, (error_id, error_msg) = self._aux_get_env(env_name, env_id)
if error_id is not None:
return (None, None), (error_id, error_msg)
try:
res = env.train_val_split(
val_scen_id=id_chron_val, add_for_train="train", add_for_val="val"
)
except Exception as exc_:
msg_ = f"Impossible to split the environment with error:\n {exc_}"
return (None, None), (self.ERROR_ENV_PATH, msg_)
return res, (None, None)
def _aux_array_to_json(self, array):
if isinstance(array, Iterable):
res = None
if isinstance(array, np.ndarray):
if array.shape == ():
res = []
if res is None:
res = [self._aux_array_to_json(el) for el in array]
return res
else:
return float(array)
def _aux_info_to_json(self, info):
# TODO
res = {}
res["disc_lines"] = [int(el) for el in info["disc_lines"]]
res["is_illegal"] = bool(info["is_illegal"])
res["is_ambiguous"] = bool(info["is_ambiguous"])
res["is_dispatching_illegal"] = bool(info["is_dispatching_illegal"])
res["is_illegal_reco"] = bool(info["is_illegal_reco"])
if info["opponent_attack_line"] is not None:
res["opponent_attack_line"] = [
bool(el) for el in info["opponent_attack_line"]
]
else:
res["opponent_attack_line"] = None
res["exception"] = [f"{exc_}" for exc_ in info["exception"]]
return res
def _aux_get_env(self, env_name, env_id):
if env_name not in self.all_env:
return None, (
self.ENV_NOT_FOUND,
f'environment "{env_name}" does not exists',
)
li_env = self.all_env[env_name]
env_id = int(env_id)
nb_env = len(li_env)
if env_id >= nb_env:
msg_ = (
f"you asked to run the environment {env_id} of {env_name}. But there are only {nb_env} "
f"such environments"
)
return None, (self.ENV_ID_NOT_FOUND, msg_)
env = li_env[env_id]
return env, (None, None)
| 9,776 | 32.368601 | 117 | py |
Grid2Op | Grid2Op-master/grid2op/rest_server/multi_env_server.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
import os
import requests
import time
import grid2op
import numpy as np
from tqdm import tqdm
import asyncio
import aiohttp
import warnings
import subprocess
import sys
try:
import ujson
requests.models.complexjson = ujson
except ImportError as exc_:
warnings.warn(
"usjon is not installed. You could potentially get huge benefit if installing it"
)
ERROR_NO_200 = "error due to not receiving 200 status"
NB_SUB_ENV = 4
ENV_NAME = "l2rpn_neurips_2020_track2_small"
SYNCH = True
NB_step = 300
PORTS = [3000 + i for i in range(NB_SUB_ENV)] # TODO start them on the fly
class MultiEnvServer:
def __init__(self, ports=PORTS, env_name=ENV_NAME, address="http://127.0.0.1"):
warnings.warn(
"This is an alpha feature and has absolutely not interest at the moment. Do not use unless "
"you want to improve this feature yourself (-:"
)
self.my_procs = []
for port in ports:
p_ = subprocess.Popen(
[
sys.executable,
"/home/benjamin/Documents/grid2op_dev/grid2op/rest_server/app.py",
"--port",
f"{port}",
],
env=os.environ,
# stdout=subprocess.DEVNULL, # TODO logger
# stderr=subprocess.DEVNULL # TODO logger
)
self.my_procs.append(p_)
self.nb_env = len(ports)
self.ports = ports
self.address = address
self.li_urls = ["{}:{}".format(address, port) for port in ports]
self.env_name = env_name
self._local_env = grid2op.make(env_name)
if SYNCH:
self.session = requests.session()
else:
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.action_space = self._local_env.action_space
self.observation_space = self._local_env.observation_space
if SYNCH:
answ_json = self._make_env_synch()
else:
answ_json = self.loop.run_until_complete(self._make_env_asynch())
self.env_id = [int(el["id"]) for el in answ_json]
self.obs = [el["obs"] for el in answ_json]
def _make_env_synch(self):
answs = []
for url in self.li_urls:
resp = self.session.get(f"{url}/make/{self.env_name}")
answs.append(resp)
import pdb
pdb.set_trace()
assert np.all(np.array([el.status_code for el in answs]) == 200), ERROR_NO_200
answ_json = [el.json() for el in answs]
return answ_json
async def _make_env_asynch(self):
answ_json = []
async with aiohttp.ClientSession() as session:
for url in self.li_urls:
async with session.get(f"{url}/make/{self.env_name}") as resp:
if resp.status != 200:
raise RuntimeError(ERROR_NO_200)
answ_json.append(await resp.json())
return answ_json
def _step_synch(self, acts):
answs = []
for url, id_env, act in zip(self.li_urls, self.env_id, acts):
resp = self.session.post(
f"{url}/step/{self.env_name}/{id_env}", json={"action": act.to_json()}
)
answs.append(resp)
answs = [el.json() for el in answs]
return answs
async def _step_asynch(self, acts):
answs = []
async with aiohttp.ClientSession() as session:
for url, id_env, act in zip(self.li_urls, self.env_id, acts):
async with session.post(
f"{url}/step/{self.env_name}/{id_env}",
json={"action": act.to_json()},
) as resp:
if resp.status != 200:
raise RuntimeError(ERROR_NO_200)
answs.append(await resp.json())
return answs
def step(self, acts):
if SYNCH:
answ_json = self._step_synch(acts)
else:
answ_json = self.loop.run_until_complete(self._step_asynch(acts))
obss = [el["obs"] for el in answ_json]
rewards = [el["reward"] for el in answ_json]
info = [el["info"] for el in answ_json]
done = [el["done"] for el in answ_json]
return obss, rewards, done, info
def close(self):
"""close all the opened port"""
for p_ in self.my_procs:
p_.terminate()
p_.kill()
if __name__ == "__main__":
multi_env = MultiEnvServer()
try:
beg = time.perf_counter()
for _ in tqdm(range(NB_step)):
obs, reward, done, info = multi_env.step(
[multi_env.action_space() for _ in range(multi_env.nb_env)]
)
end = time.perf_counter()
finally:
multi_env.close()
print(
f"Using {'synchronous' if SYNCH else 'asyncio'}, it took {end-beg:.2f}s to make {NB_step} steps "
f"on {ENV_NAME} using {len(PORTS)} sub environment(s)."
)
| 5,518 | 32.448485 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/rest_server/test_server.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
import warnings
import requests
try:
import ujson
requests.models.complexjson = ujson
except ImportError as exc_:
warnings.warn(
"usjon is not installed. You could potentially get huge benefit if installing it"
)
import time
import numpy as np
from tqdm import tqdm
from grid2op.MakeEnv import make
try:
from lightsim2grid import LightSimBackend
bkclass = LightSimBackend
# raise ImportError()
except ImportError as exc_:
from grid2op.Backend import PandaPowerBackend
bkclass = PandaPowerBackend
warnings.warn(
"lightsim2grid is not installed. You could potentially get huge benefit if installing it"
)
pass
URL = " http://127.0.0.1:5000"
env_name = "l2rpn_case14_sandbox"
env_name = "l2rpn_neurips_2020_track1_small"
env_name = "l2rpn_neurips_2020_track2_small"
def assert_rec_equal(li1, li2):
try:
len(li1)
assert len(li1) == len(li2), "wrong seed length"
for (el1, el2) in zip(li1, li2):
assert_rec_equal(el1, el2)
except Exception as exc_:
if isinstance(li1, np.ndarray):
if li1.shape == ():
li1 = []
assert li1 == li2, "wrong seed value"
if __name__ == "__main__":
# create the real environment
real_env = make(env_name, backend=bkclass())
real_obs = real_env.reset()
client = requests.session()
print('Test "make"')
# test the "make" endpoint of the API
resp_make = client.get(f"{URL}/make/{env_name}")
# check that the creation is working
if resp_make.status_code != 200:
raise RuntimeError("Environment not created response not 200")
resp_make_json = resp_make.json()
if "id" not in resp_make_json:
raise RuntimeError("Environment not created (due to id not in json)")
if "obs" not in resp_make_json:
raise RuntimeError("Environment not created (due to obs not in json)")
if env_name == "l2rpn_case14_sandbox":
# the other envs are stochastic so this test cannot work right now (this is why we used the "seed" just after)
reic_obs_json = resp_make_json["obs"]
reic_obs = copy.deepcopy(real_obs)
reic_obs.set_game_over()
assert reic_obs != real_obs, "resetting the observation did not work"
reic_obs.from_json(reic_obs_json)
are_same = reic_obs == real_obs
diff_obs = reic_obs - real_obs
assert are_same, "obs received and obs computed are not the same"
# make a step with do nothing
id_env = resp_make_json["id"]
print('Test "seed"')
seed_used = 0
resp_seed = client.post(
f"{URL}/seed/{env_name}/{id_env}",
json={"seed": seed_used},
# headers={"X-CSRFToken": csrf_token}
)
if resp_seed.status_code != 200:
raise RuntimeError("Environment not seeded response not 200")
resp_seed_json = resp_seed.json()
if "seeds" not in resp_seed_json:
raise RuntimeError("Environment not seeded (due to seeds not in json)")
res_seed = real_env.seed(seed_used)
res_seed = list(res_seed)
assert_rec_equal(res_seed, resp_seed_json["seeds"])
print('Test "reset"')
resp_reset = client.get(f"{URL}/reset/{env_name}/{id_env}")
if resp_reset.status_code != 200:
raise RuntimeError("Environment not reset response not 200")
resp_reset_json = resp_reset.json()
if "obs" not in resp_reset_json:
raise RuntimeError("Environment not reset (due to obs not in json)")
real_obs = real_env.reset()
reic_obs_json = resp_reset_json["obs"]
reic_obs = copy.deepcopy(real_obs)
reic_obs.set_game_over()
assert reic_obs != real_obs, "resetting the observation did not work"
reic_obs.from_json(reic_obs_json)
are_same = reic_obs == real_obs
obs_diff, attr_diff = reic_obs.where_different(real_obs)
for el in attr_diff:
if np.max(np.abs(getattr(obs_diff, el))) > 1e-4:
tmp_ = np.max(np.abs(getattr(obs_diff, el)))
import pdb
pdb.set_trace()
raise RuntimeError(
f"ERROR: after reset, attribute {el} is not the same (max={tmp_:.6f})"
)
if not are_same:
warnings.warn(
"obs received and obs computed are not the exactly the same "
"(but equal up to some small value (1e-4))"
)
print('Test "set_id"')
resp_set_id = client.post(f"{URL}/set_id/{env_name}/{id_env}", json={"id": 0})
if resp_set_id.status_code != 200:
raise RuntimeError("set_id not working: response is not 200")
resp_set_id_json = resp_set_id.json()
if "info" not in resp_set_id_json:
raise RuntimeError("set_id not working: info is not in response")
resp_reset = client.get(f"{URL}/reset/{env_name}/{id_env}")
if resp_seed.status_code != 200:
raise RuntimeError("Environment not reset response not 200")
resp_reset_json = resp_reset.json()
if "obs" not in resp_reset_json:
raise RuntimeError("Environment not reset (due to obs not in json)")
real_env.set_id(0)
real_obs = real_env.reset()
reic_obs_json = resp_reset_json["obs"]
reic_obs = copy.deepcopy(real_obs)
reic_obs.set_game_over()
assert reic_obs != real_obs, "resetting the observation did not work"
reic_obs.from_json(reic_obs_json)
are_same = reic_obs == real_obs
assert are_same, "obs received and obs computed are not the same"
print('Test "set_thermal_limit"')
th_lim = real_env.get_thermal_limit().tolist()
resp_set_thermal_limit = client.post(
f"{URL}/set_thermal_limit/{env_name}/{id_env}", json={"thermal_limits": th_lim}
)
if resp_set_thermal_limit.status_code != 200:
raise RuntimeError("set_thermal_limit not working: response is not 200")
resp_set_thermal_limit_json = resp_set_thermal_limit.json()
if "env_name" not in resp_set_thermal_limit_json:
raise RuntimeError("set_thermal_limit not working: info is not in response")
print('Test "get_thermal_limit"')
resp_get_thermal_limit = client.get(f"{URL}/get_thermal_limit/{env_name}/{id_env}")
if resp_get_thermal_limit.status_code != 200:
raise RuntimeError("get_thermal_limit not working: response is not 200")
resp_get_thermal_limit_json = resp_get_thermal_limit.json()
if "thermal_limit" not in resp_get_thermal_limit_json:
raise RuntimeError(
"get_thermal_limit not working: thermal_limit is not in response"
)
assert (
resp_get_thermal_limit_json["thermal_limit"] == th_lim
), "get_thermal_limit not working: wrong thermal limit"
print('Test "get_path_env"')
resp_get_path_env = client.get(f"{URL}/get_path_env/{env_name}/{id_env}")
if resp_get_path_env.status_code != 200:
raise RuntimeError("get_path_env not working: response is not 200")
resp_get_path_env_json = resp_get_path_env.json()
if "path" not in resp_get_path_env_json:
raise RuntimeError("get_path_env not working: path is not in response")
assert (
resp_get_path_env_json["path"] == real_env.get_path_env()
), "get_path_env not working: wrong path"
print('Test "fast_forward_chronics"')
nb_step_forward = 10
resp_fast_forward_chronics = client.post(
f"{URL}/fast_forward_chronics/{env_name}/{id_env}",
json={"nb_step": nb_step_forward},
)
if resp_fast_forward_chronics.status_code != 200:
raise RuntimeError("set_id not working: response is not 200")
resp_fast_forward_chronics_json = resp_fast_forward_chronics.json()
if "env_name" not in resp_fast_forward_chronics_json:
raise RuntimeError(
"get_thermal_limit not working: thermal_limit is not in response"
)
act = real_env.action_space()
real_env.fast_forward_chronics(nb_step_forward)
obs, reward, done, info = real_env.step(act)
resp_step = client.post(
f"{URL}/step/{env_name}/{id_env}", json={"action": act.to_json()}
)
# check obs are equals
reic_obs_json = resp_step.json()["obs"]
reic_obs = copy.deepcopy(obs)
reic_obs.set_game_over()
assert reic_obs != real_obs, "resetting the observation did not work"
reic_obs.from_json(reic_obs_json)
are_same = reic_obs == obs
assert (
are_same
), "obs received and obs computed are not the same after fast forwarding"
print('Test "step"')
real_obs = real_env.reset()
resp_reset = client.get(f"{URL}/reset/{env_name}/{id_env}")
if resp_seed.status_code != 200:
raise RuntimeError(
"Environment not reset response not 200, fail just before assessing step"
)
nb_step = 0
obs_me = copy.deepcopy(real_obs)
while True:
act = real_env.action_space()
obs, reward, done, info = real_env.step(act)
resp_step = client.post(
f"{URL}/step/{env_name}/{id_env}", json={"action": act.to_json()}
)
if resp_step.status_code != 200:
raise RuntimeError("Step not successful not 200")
resp_step_json = resp_step.json()
if "obs" not in resp_step_json:
raise RuntimeError("Environment not created (due to obs not in json)")
assert resp_step_json["done"] == done
assert resp_step_json["reward"] == reward
if done:
break
reic_obs_json = resp_step_json["obs"]
obs_me.set_game_over()
assert (
obs_me != obs
), f"resetting the observation did not work for step {nb_step}"
obs_me.from_json(reic_obs_json)
is_correct = obs_me == obs
try:
assert (
is_correct
), f"obs received and obs computed are not the same after step for step {nb_step}"
except AssertionError as exc_:
obs_diff, attr_diff = obs_me.where_different(obs)
# import pdb
# pdb.set_trace()
nb_step += 1
print('Test "close"')
resp_close = client.get(f"{URL}/close/{env_name}/{id_env}")
if resp_close.status_code != 200:
raise RuntimeError("close not working: response is not 200")
resp_close_json = resp_close.json()
if "env_name" not in resp_close_json:
raise RuntimeError("close not working: env_name is not in response")
# TODO test all methods fails (get_thermal_limit apparently work...)
print('Test "perfs"')
print(f"Time on a local env: (using {bkclass.__name__})")
env_perf = make(env_name, backend=bkclass())
env_perf.reset()
env_perf.seed(seed_used)
obs = env_perf.reset()
time_for_step = 0
nb_step_local = 0
with tqdm(desc="local env") as pbar:
while True:
act = real_env.action_space()
beg_step = time.perf_counter()
obs, reward, done, info = env_perf.step(act)
time_for_step += time.perf_counter() - beg_step
if done:
break
nb_step_local += 1
pbar.update(1)
print("Time on the remote env:")
resp_make_perf = client.get(f"{URL}/make/{env_name}")
id_env_perf = resp_make_perf.json()["id"]
_ = client.post(f"{URL}/seed/{env_name}/{id_env_perf}", json={"seed": seed_used})
_ = client.get(f"{URL}/reset/{env_name}/{id_env_perf}")
time_for_step_api = 0.0
time_for_all_api = 0.0
time_convert = 0.0
time_get_json = 0.0
nb_step_api = 0
with tqdm(desc="remote env") as pbar:
while True:
act = real_env.action_space()
beg_step = time.perf_counter()
act_as_json = act.to_json()
resp_step = client.post(
f"{URL}/step/{env_name}/{id_env_perf}", json={"action": act_as_json}
)
after_step = time.perf_counter()
time_for_step_api += after_step - beg_step
resp_step_json = resp_step.json()
time_get_json += time.perf_counter() - after_step
reic_obs_json = resp_step_json["obs"]
beg_convert = time.perf_counter()
obs.from_json(reic_obs_json)
time_convert += time.perf_counter() - beg_convert
time_for_all_api += time.perf_counter() - beg_step
if resp_step_json["done"]:
break
pbar.update(1)
nb_step_api += 1
print(f"\tEnv name: {env_name} with {real_env.n_sub} substations")
print(f"\tNumber of step for local env {nb_step_local}")
print(f"\tNumber of step for api env {nb_step_api}")
print(f"\tTime to compute all, for the normal env: {time_for_step:.2f}s")
print(f"\tTime to compute all, for the api env: {time_for_all_api:.2f}s")
print(f"\t\tTime to do the step, for the api env: {time_for_step_api:.2f}s")
print(f"\t\tTime to get the json from the http request: {time_get_json:.2f}s")
print(f"\t\tTime to convert from json: {time_convert:.2f}s")
print(f"\tSpeed up (for normal env): {time_for_all_api/time_for_step:.2f}")
| 13,453 | 38.339181 | 118 | py |
Grid2Op | Grid2Op-master/grid2op/simulator/__init__.py | # Copyright (c) 2019-2022, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
__all__ = ["Simulator"]
from grid2op.simulator.simulator import Simulator
| 538 | 43.916667 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/simulator/simulator.py | # Copyright (c) 2019-2022, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
from typing import Optional, Tuple
import numpy as np
import os
from scipy.optimize import minimize
from scipy.optimize import LinearConstraint
from grid2op.dtypes import dt_float
from grid2op.Environment import BaseEnv
from grid2op.Action import BaseAction
from grid2op.Backend import Backend
from grid2op.Observation.baseObservation import BaseObservation
from grid2op.Observation.highresSimCounter import HighResSimCounter
from grid2op.Exceptions import SimulatorError, InvalidRedispatching
class Simulator(object):
"""This class represents a "simulator". It allows to check the impact on this or that on th powergrid, quite
like what human operators have at their disposal in control rooms.
It behaves similarly to `env.step(...)` or `obs.simulate(...)` with a few key differences:
- you can "chain" the call to simulator: `simulator.predict(...).predict(...).predict(...)`
- it does not take into account the "time": no cooldown on lines nor substation, storage
"state of charge" (energy) does not decrease when you use them
- no automatic line disconnection: lines are not disconnected when they are above their limit
- no opponent will act on the grid
Please see the documentation for usage examples.
"""
def __init__(
self, backend: Optional[Backend], env: Optional[BaseEnv] = None, tol_redisp=1e-6,
_highres_sim_counter: Optional[HighResSimCounter] =None
):
# backend should be initiliazed !
if backend is not None:
if not isinstance(backend, Backend):
raise SimulatorError(
f'The "backend" argument should be an object '
f'of type "Backend" you provided {backend}'
)
if env is not None:
raise SimulatorError(
"When building a simulator with a grid2op backend "
'make sure you set the kwarg "env=None"'
)
if backend._can_be_copied:
self.backend: Backend = backend.copy()
else:
raise SimulatorError("Impossible to make a Simulator when you "
"cannot copy the backend.")
else:
if env is None:
raise SimulatorError(
"If you want to build a simulator with a blank / None "
'backend you should provide an environment (kwargs "env")'
)
if not isinstance(env, BaseEnv):
raise SimulatorError(
f"Make sure the environment you provided is "
f"a grid2op Environment (an object of a type "
f"inheriting from BaseEnv"
)
if env.backend._can_be_copied:
self.backend = env.backend.copy()
else:
raise SimulatorError("Impossible to make a Simulator when you "
"cannot copy the backend of the environment.")
self.current_obs: BaseObservation = None
self._converged: Optional[bool] = None
self._error: Optional[Exception] = None
self._tol_redisp: float = tol_redisp
if _highres_sim_counter is not None:
self._highres_sim_counter = _highres_sim_counter
else:
self._highres_sim_counter = HighResSimCounter()
@property
def converged(self) -> bool:
"""
Returns
-------
bool
Whether or not the powerflow has converged
"""
return self._converged
@converged.setter
def converged(self, values):
raise SimulatorError("Cannot set this property.")
def copy(self) -> "Simulator":
"""Allows to perform a (deep) copy of the simulator.
Returns
-------
Simulator
A (deep) copy of the simulator you want to copy.
Raises
------
SimulatorError
In case the simulator is not initialized.
"""
if self.current_obs is None:
raise SimulatorError(
"Impossible to copy a non initialized Simulator. "
"Have you used `simulator.set_state(obs, ...)` with a valid observation before ?"
)
res = copy.copy(self)
res.backend = res.backend.copy()
res.current_obs = res.current_obs.copy()
# do not copy this !
res._highres_sim_counter = self._highres_sim_counter
return res
def change_backend(self, backend: Backend):
"""You can use this function in case you want to change the "solver" use to perform the computation.
For example, you could use a machine learning based model to do the computation (to accelerate them), provided
that you have at your disposal such an algorithm.
.. warning::
The backend you pass as argument should be initialized with the same grid as the one currently in use.
Notes
-----
Once changed, all the "simulator" that "derived" from this simulator will use the same backend types.
Parameters
----------
backend : Backend
Another grid2op backend you can use to perform the computation.
Raises
------
SimulatorError
When you do not pass a correct backend.
"""
if not isinstance(backend, Backend):
raise SimulatorError(
"when using change_backend function, the backend should"
" be an object (an not a class) of type backend"
)
self.backend.close()
self.backend = backend.copy() # backend_class.init_grid(type(self.backend))
self.set_state(obs=self.current_obs)
def change_backend_type(self, backend_type: type, grid_path: os.PathLike, **kwargs):
"""It allows to change the type of the backend used
Parameters
----------
backend_type : type
The new backend type
grid_path : os.PathLike
The path from where to load the powergrid
kwargs:
Extra arguments used to build the backend.
Notes
-----
Once changed, all the "simulator" that "derived" from this simulator will use the same backend types.
Raises
------
SimulatorError
if something went wrong (eg you do not pass a type, your type does not inherit from Backend, the file
located at `grid_path` does not exists etc.)
"""
if not isinstance(backend_type, type):
raise SimulatorError(
"when using change_backend_type function, the backend_type should"
" be a class an not an object"
)
if not issubclass(backend_type, Backend):
raise SimulatorError(
"when using change_backend_type function, the backend_type should"
" be subtype of class Backend"
)
if not os.path.exists(grid_path):
raise SimulatorError(
f'the supposed grid path "{grid_path}" does not exists'
)
if not os.path.isfile(grid_path):
raise SimulatorError(f'the supposed grid path "{grid_path}" if not a file')
tmp_backend = backend_type(**kwargs)
tmp_backend.load_grid(grid_path)
tmp_backend.assert_grid_correct()
self.backend.close()
self.backend = tmp_backend
self.set_state(obs=self.current_obs)
def set_state(
self,
obs: Optional[BaseObservation] = None,
do_powerflow: bool = True,
new_gen_p: np.ndarray = None,
new_gen_v: np.ndarray = None,
new_load_p: np.ndarray = None,
new_load_q: np.ndarray = None,
update_thermal_limit: bool = True,
):
"""Set the state of the simulator to a given state described by an observation (and optionally some
new loads and generation)
Parameters
----------
obs : Optional[BaseObservation], optional
The observation to get the state from, by default None
do_powerflow : bool, optional
Whether to use the underlying backend to get a consistent state after
this modification or not, by default True
new_gen_p : np.ndarray, optional
new generator active setpoint, by default None
new_gen_v : np.ndarray, optional
new generator voltage setpoint, by default None
new_load_p : np.ndarray, optional
new load active consumption, by default None
new_load_q : np.ndarray, optional
new load reactive consumption, by default None
update_thermal_limit: bool, optional
Do you update the thermal limit of the backend (we recommend to leave it to `True`
otherwise some bugs can appear such as
https://github.com/rte-france/Grid2Op/issues/377)
Raises
------
SimulatorError
In case the current simulator is not initialized.
"""
if obs is not None:
self.current_obs = obs.copy()
if self.current_obs is None:
raise SimulatorError(
"The simulator is not initialized. Have you used `simulator.set_state(obs, ...)` with a valid observation before ?"
)
# you cannot use "simulate" of the observation in this class
self.current_obs._obs_env = None
self.current_obs._forecasted_inj = []
self.current_obs._forecasted_grid = []
# udpate the new state if needed
if new_load_p is not None:
self.current_obs.load_p[:] = new_load_p
if new_load_q is not None:
self.current_obs.load_q[:] = new_load_q
if new_gen_p is not None:
self.current_obs.gen_p[:] = new_gen_p
if new_gen_v is not None:
self.current_obs.gen_v[:] = new_gen_v
self._converged = None
self.error = None
self.backend.update_from_obs(self.current_obs, force_update=True)
if update_thermal_limit:
self.backend.update_thermal_limit_from_vect(self.current_obs.thermal_limit)
if do_powerflow:
self._do_powerflow()
def _do_powerflow(self):
self._highres_sim_counter.add_one()
self._converged, self._error = self.backend.runpf()
def _update_obs(self):
if self._converged:
self.current_obs._update_attr_backend(self.backend)
else:
self.current_obs.set_game_over()
def _adjust_controlable_gen(
self, new_gen_p: np.ndarray, target_dispatch: np.ndarray, sum_target: float
) -> Optional[float]:
nb_dispatchable = np.sum(self.current_obs.gen_redispatchable)
# which generators needs to be "optimized" -> the one where
# the target function matter
gen_in_target = target_dispatch[self.current_obs.gen_redispatchable] != 0.0
# compute the upper / lower bounds for the generators
dispatchable = new_gen_p[self.current_obs.gen_redispatchable]
val_min = (
self.current_obs.gen_pmin[self.current_obs.gen_redispatchable]
- dispatchable
)
val_max = (
self.current_obs.gen_pmax[self.current_obs.gen_redispatchable]
- dispatchable
)
# define the target function (things that will be minimized)
target_dispatch_redisp = target_dispatch[self.current_obs.gen_redispatchable]
coeffs = 1.0 / (
self.current_obs.gen_max_ramp_up
+ self.current_obs.gen_max_ramp_down
+ self._tol_redisp
)
weights = np.ones(nb_dispatchable) * coeffs[self.current_obs.gen_redispatchable]
weights /= weights.sum()
scale_objective = max(0.5 * np.sum(np.abs(target_dispatch_redisp)) ** 2, 1.0)
scale_objective = np.round(scale_objective, decimals=4)
tmp_zeros = np.zeros((1, nb_dispatchable), dtype=float)
# wrap everything into the proper scipy form
def target(actual_dispatchable):
# define my real objective
quad_ = (
1e2
* (
actual_dispatchable[gen_in_target]
- target_dispatch_redisp[gen_in_target]
)
** 2
)
coeffs_quads = weights[gen_in_target] * quad_
coeffs_quads_const = coeffs_quads.sum()
coeffs_quads_const /= scale_objective # scaling the function
coeffs_quads_const += 1e-2 * np.sum(actual_dispatchable**2 * weights)
return coeffs_quads_const
def jac(actual_dispatchable):
res_jac = 1.0 * tmp_zeros
res_jac[0, gen_in_target] = (
1e2
* 2.0
* weights[gen_in_target]
* (
actual_dispatchable[gen_in_target]
- target_dispatch_redisp[gen_in_target]
)
)
res_jac /= scale_objective # scaling the function
res_jac += 2e-2 * actual_dispatchable * weights
return res_jac
mat_sum_ok = np.ones((1, nb_dispatchable))
equality_const = LinearConstraint(
mat_sum_ok, sum_target - self._tol_redisp, sum_target + self._tol_redisp
)
ineq_const = LinearConstraint(np.eye(nb_dispatchable), lb=val_min, ub=val_max)
# objective function
def f(init):
this_res = minimize(
target,
init,
method="SLSQP",
constraints=[equality_const, ineq_const],
options={
"eps": self._tol_redisp,
"ftol": self._tol_redisp,
"disp": False,
},
jac=jac,
)
return this_res
# choose a good initial point (close to the solution)
# the idea here is to chose a initial point that would be close to the
# desired solution (split the (sum of the) dispatch to the available generators)
x0 = 1.0 * target_dispatch_redisp
can_adjust = x0 == 0.0
if np.any(can_adjust):
init_sum = np.sum(x0)
denom_adjust = np.sum(1.0 / weights[can_adjust])
if denom_adjust <= 1e-2:
# i don't want to divide by something too cloose to 0.
denom_adjust = 1.0
x0[can_adjust] = -init_sum / (weights[can_adjust] * denom_adjust)
res = f(x0)
if res.success:
return res.x
else:
return None
def _amount_curtailed(
self, act: BaseAction, new_gen_p: np.ndarray
) -> Tuple[np.ndarray, float]:
curt_vect = 1.0 * act.curtail
curt_vect[curt_vect == -1.0] = 1.0
limit_curtail = curt_vect * act.gen_pmax
curtailed = np.maximum(new_gen_p - limit_curtail, 0.0)
curtailed[~act.gen_renewable] = 0.0
amount_curtail = np.sum(curtailed)
new_gen_p_after_curtail = 1.0 * new_gen_p
new_gen_p_after_curtail -= curtailed
return new_gen_p_after_curtail, amount_curtail
def _amount_storage(self, act: BaseAction) -> Tuple[float, np.ndarray]:
storage_act = 1.0 * act.storage_p
res = np.sum(self.current_obs.storage_power_target)
current_charge = 1.0 * self.current_obs.storage_charge
storage_power = np.zeros(act.n_storage)
if np.all(np.abs(storage_act) <= self._tol_redisp):
return -res, storage_power, current_charge
coeff_p_to_E = (
self.current_obs.delta_time / 60.0
) # obs.delta_time is in minutes
# convert power (action to energy)
storage_act_E = storage_act * coeff_p_to_E
# take into account the efficiencies
do_charge = storage_act_E < 0.0
do_discharge = storage_act_E > 0.0
storage_act_E[do_charge] /= act.storage_charging_efficiency[do_charge]
storage_act_E[do_discharge] *= act.storage_discharging_efficiency[do_discharge]
# make sure we don't go over / above Emin / Emax
min_down_E = act.storage_Emin - current_charge
min_up_E = act.storage_Emax - current_charge
storage_act_E = np.minimum(storage_act_E, min_up_E)
storage_act_E = np.maximum(storage_act_E, min_down_E)
current_charge += storage_act_E
# convert back to power (for the observation) the amount the grid got
storage_power = storage_act_E / coeff_p_to_E
storage_power[do_charge] *= act.storage_charging_efficiency[do_charge]
storage_power[do_discharge] /= act.storage_discharging_efficiency[do_discharge]
res += np.sum(storage_power)
return -res, storage_power, current_charge
def _fix_redisp_curtailment_storage(
self, act: BaseAction, new_gen_p: np.ndarray
) -> Tuple[bool, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray,]:
"""This function emulates the "frequency control" of the
environment.
Its main goal is to ensure that the sum of injected power thanks to redispatching,
storage units and curtailment sum to 0.
It is a very rough simplification of what happens in the environment.
"""
new_gen_p_after_curtail, amount_curtail = self._amount_curtailed(act, new_gen_p)
amount_storage, storage_power, storage_charge = self._amount_storage(act)
sum_target = amount_curtail - amount_storage # TODO !
target_dispatch = self.current_obs.target_dispatch + act.redispatch
# if previous setpoint was say -2 and at this step I redispatch of
# say + 4 then the real setpoint should be +2 (and not +4)
new_vect_redisp = (act.redispatch != 0.0) & (
self.current_obs.target_dispatch == 0.0
)
target_dispatch[new_vect_redisp] += self.current_obs.actual_dispatch[
new_vect_redisp
]
if abs(np.sum(target_dispatch) - sum_target) >= self._tol_redisp:
adjust = self._adjust_controlable_gen(
new_gen_p_after_curtail, target_dispatch, sum_target
)
if adjust is None:
return True, None, None, None, None, None
else:
return (
True,
new_gen_p_after_curtail,
target_dispatch,
adjust,
storage_power,
storage_charge,
)
return False, None, None, None, None, None
def predict(
self,
act: BaseAction,
new_gen_p: np.ndarray = None,
new_gen_v: np.ndarray = None,
new_load_p: np.ndarray = None,
new_load_q: np.ndarray = None,
do_copy: bool = True,
) -> "Simulator":
"""Predict the state of the grid after a given action has been taken.
Parameters
----------
act : BaseAction
The action you want to take
new_gen_p : np.ndarray, optional
the new production active setpoint, by default None
new_gen_v : np.ndarray, optional
the new production voltage setpoint, by default None
new_load_p : np.ndarray, optional
the new consumption active values, by default None
new_load_q : np.ndarray, optional
the new consumption reactive values, by default None
do_copy : bool, optional
Whether to make a copy or not, by default True
Examples
---------
A possible example is:
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
obs = env.reset()
#### later in the code, for example in an Agent:
simulator = obs.get_simulator()
load_p_stressed = obs.load_p * 1.05
gen_p_stressed = obs.gen_p * 1.05
do_nothing = env.action_space()
simulator_stressed = simulator.predict(act=do_nothing,
new_gen_p=gen_p_stressed,
new_load_p=load_p_stressed)
if not simulator_stressed.converged:
# the solver fails to find a solution for this action
# you are likely to run into trouble if you use that...
... # do something
obs_stressed = simulator_stressed.current_obs
Returns
-------
Simulator
The new simulator representing the grid state after the simulation of the action.
"""
# init the result
if do_copy:
res = self.copy()
else:
res = self
this_act = act.copy()
if new_gen_p is None:
new_gen_p = 1.0 * self.current_obs.gen_p
res.set_state(
obs=None,
new_gen_p=new_gen_p,
new_gen_v=new_gen_v,
new_load_p=new_load_p,
new_load_q=new_load_q,
do_powerflow=False,
)
# "fix" the action for the redispatching / curtailment / storage part
(
has_adjusted,
new_gen_p_modif,
target_dispatch,
adjust,
storage_power,
storage_charge,
) = res._fix_redisp_curtailment_storage(this_act, new_gen_p)
if has_adjusted:
if target_dispatch is None:
res._converged = False
res.current_obs.set_game_over()
res._error = InvalidRedispatching("")
return res
redisp_modif = np.zeros(self.current_obs.n_gen)
redisp_modif[self.current_obs.gen_redispatchable] = adjust
# adjust the proper things in the observation
res.current_obs.target_dispatch = target_dispatch
this_act.redispatch = redisp_modif
res.current_obs.actual_dispatch[:] = redisp_modif
this_act._dict_inj["prod_p"] = 1.0 * new_gen_p_modif
this_act._modif_inj = True
# TODO : curtail, curtailment_limit (in observation)
res.current_obs.curtailment[:] = (
new_gen_p - new_gen_p_modif
) / act.gen_pmax
res.current_obs.curtailment_limit[:] = act.curtail
res.current_obs.curtailment_limit_effective[:] = act.curtail
res.current_obs.gen_p_before_curtail[:] = new_gen_p
res.current_obs.storage_power[:] = storage_power
res.current_obs.storage_charge[:] = storage_charge
else:
res.current_obs.storage_power[:] = 0.0
res.current_obs.actual_dispatch[:] = 0.0
# apply the action
bk_act = res.backend.my_bk_act_class()
bk_act += this_act
res.backend.apply_action(bk_act)
# run the powerflow
res._do_powerflow()
# update its observation
res._update_obs()
return res
def close(self):
"""close the underlying backend"""
if hasattr(self, "backend") and self.backend is not None:
self.backend.close()
self.backend = None
self.current_obs = None
self._converged = None
self._error = None
def __del__(self):
self.close()
| 24,214 | 37.558917 | 131 | py |
Grid2Op | Grid2Op-master/grid2op/tests/BaseBackendTest.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
# do some generic tests that can be implemented directly to test if a backend implementation can work out of the box
# with grid2op.
# see an example of test_Pandapower for how to use this suit.
import os
import numpy as np
import copy
from abc import ABC, abstractmethod
import inspect
from grid2op.Action import CompleteAction
try:
# this is only available starting python 3.7 or 3.8... tests are with python 3.6 :-(
from math import comb
except ImportError:
def comb(n, k):
if n == k:
return 1
if n < k:
return 0
res = 1
acc = 1
for i in range(k):
res *= int((n - i))
for i in range(1, k + 1):
res /= i
return res
"""
test to check that it's working
for i in range(10):
for j in range(10):
me_ = comb(i,j)
real_ = math.comb(i,j)
assert me_ == real_, "{}, {}".format(i,j)
"""
import warnings
import grid2op
from grid2op.dtypes import dt_bool, dt_int, dt_float
from grid2op.Action import ActionSpace, CompleteAction
from grid2op.Parameters import Parameters
from grid2op.Chronics import ChronicsHandler
from grid2op.Environment import Environment
from grid2op.Exceptions import *
from grid2op.Rules import RulesChecker
from grid2op.MakeEnv import make
from grid2op.Rules import AlwaysLegal
from grid2op.Action._BackendAction import _BackendAction
import pdb
class MakeBackend(ABC):
@abstractmethod
def make_backend(self, detailed_infos_for_cascading_failures=False):
pass
def get_path(self):
raise NotImplementedError(
"This function should be implemented for the test suit you are developping"
)
def get_casefile(self):
raise NotImplementedError(
"This function should be implemented for the test suit you are developping"
)
def skip_if_needed(self):
if hasattr(self, "tests_skipped"):
nm_ = inspect.currentframe().f_back.f_code.co_name
if nm_ in self.tests_skipped:
self.skipTest('the test "{}" is skipped'.format(nm_))
class BaseTestNames(MakeBackend):
def test_properNames(self):
self.skip_if_needed()
backend = self.make_backend()
path = self.get_path()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
with make(
os.path.join(path, "5bus_example_diff_name"),
backend=backend,
_add_to_name="_BaseTestNames",
) as env:
obs = env.reset()
assert np.all(type(obs).name_load == ["tutu", "toto", "tata"])
assert np.all(type(env).name_load == ["tutu", "toto", "tata"])
class BaseTestLoadingCase(MakeBackend):
def test_load_file(self):
backend = self.make_backend()
path_matpower = self.get_path()
case_file = self.get_casefile()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
backend.load_grid(path_matpower, case_file)
type(backend).set_env_name("BaseTestLoadingCase")
backend.assert_grid_correct()
assert backend.n_line == 20
assert backend.n_gen == 5
assert backend.n_load == 11
assert backend.n_sub == 14
name_line = [
"0_1_0",
"0_4_1",
"8_9_2",
"8_13_3",
"9_10_4",
"11_12_5",
"12_13_6",
"1_2_7",
"1_3_8",
"1_4_9",
"2_3_10",
"3_4_11",
"5_10_12",
"5_11_13",
"5_12_14",
"3_6_15",
"3_8_16",
"4_5_17",
"6_7_18",
"6_8_19",
]
name_line = np.array(name_line)
assert np.all(sorted(backend.name_line) == sorted(name_line))
name_sub = [
"sub_0",
"sub_1",
"sub_2",
"sub_3",
"sub_4",
"sub_5",
"sub_6",
"sub_7",
"sub_8",
"sub_9",
"sub_10",
"sub_11",
"sub_12",
"sub_13",
]
name_sub = np.array(name_sub)
assert np.all(sorted(backend.name_sub) == sorted(name_sub))
name_gen = ["gen_0_4", "gen_1_0", "gen_2_1", "gen_5_2", "gen_7_3"]
name_gen = np.array(name_gen)
assert np.all(sorted(backend.name_gen) == sorted(name_gen))
name_load = [
"load_1_0",
"load_2_1",
"load_13_2",
"load_3_3",
"load_4_4",
"load_5_5",
"load_8_6",
"load_9_7",
"load_10_8",
"load_11_9",
"load_12_10",
]
name_load = np.array(name_load)
assert np.all(sorted(backend.name_load) == sorted(name_load))
assert np.all(backend.get_topo_vect() == np.ones(np.sum(backend.sub_info)))
conv = backend.runpf()
assert conv, "powerflow diverge it is not supposed to!"
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
p_subs, q_subs, p_bus, q_bus, v_bus = backend.check_kirchoff()
assert np.max(np.abs(p_subs)) <= self.tolvect
assert np.max(np.abs(p_bus.flatten())) <= self.tolvect
if backend.shunts_data_available:
assert np.max(np.abs(q_subs)) <= self.tolvect
assert np.max(np.abs(q_bus.flatten())) <= self.tolvect
def test_assert_grid_correct(self):
backend = self.make_backend()
path_matpower = self.get_path()
case_file = self.get_casefile()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
backend.load_grid(path_matpower, case_file)
type(backend).set_env_name("TestLoadingCase_env2_test_assert_grid_correct")
backend.assert_grid_correct()
conv = backend.runpf()
assert conv, "powerflow diverge it is not supposed to!"
backend.assert_grid_correct_after_powerflow()
class BaseTestLoadingBackendFunc(MakeBackend):
def setUp(self):
self.backend = self.make_backend()
self.path_matpower = self.get_path()
self.case_file = self.get_casefile()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.backend.load_grid(self.path_matpower, self.case_file)
type(self.backend).set_env_name("TestLoadingBackendFunc_env")
type(self.backend).set_no_storage()
self.backend.assert_grid_correct()
self.game_rules = RulesChecker()
self.action_env_class = ActionSpace.init_grid(self.backend)
self.action_env = self.action_env_class(
gridobj=self.backend, legal_action=self.game_rules.legal_action
)
self.bkact_class = _BackendAction.init_grid(self.backend)
self.backend.runpf()
self.backend.assert_grid_correct_after_powerflow()
def tearDown(self):
pass
def test_theta_ok(self):
self.skip_if_needed()
if self.backend.can_output_theta:
(
theta_or,
theta_ex,
load_theta,
gen_theta,
storage_theta,
) = self.backend.get_theta()
assert theta_or.shape[0] == self.backend.n_line
assert theta_ex.shape[0] == self.backend.n_line
assert load_theta.shape[0] == self.backend.n_load
assert gen_theta.shape[0] == self.backend.n_gen
assert storage_theta.shape[0] == self.backend.n_storage
assert np.all(np.isfinite(theta_or))
assert np.all(np.isfinite(theta_ex))
assert np.all(np.isfinite(load_theta))
assert np.all(np.isfinite(gen_theta))
assert np.all(np.isfinite(storage_theta))
else:
with self.assertRaises(NotImplementedError):
# if the "can_output_theta" flag is set to false, then it means the backend
# should not implement the get_theta class
self.backend.get_theta()
def test_runpf_dc(self):
self.skip_if_needed()
conv = self.backend.runpf(is_dc=True)
assert conv
true_values_dc = np.array(
[
147.83859556,
71.16140444,
5.7716542,
9.64132512,
-3.2283458,
1.50735814,
5.25867488,
70.01463596,
55.1518527,
40.9721069,
-24.18536404,
-61.74649065,
6.7283458,
7.60735814,
17.25131674,
28.36115279,
16.55182652,
42.78702069,
0.0,
28.36115279,
]
)
p_or, *_ = self.backend.lines_or_info()
assert self.compare_vect(p_or, true_values_dc)
def test_runpf(self):
self.skip_if_needed()
true_values_ac = np.array(
[
1.56882891e02,
7.55103818e01,
5.22755247e00,
9.42638103e00,
-3.78532238e00,
1.61425777e00,
5.64385098e00,
7.32375792e01,
5.61314959e01,
4.15162150e01,
-2.32856901e01,
-6.11582304e01,
7.35327698e00,
7.78606702e00,
1.77479769e01,
2.80741759e01,
1.60797576e01,
4.40873209e01,
-1.11022302e-14,
2.80741759e01,
]
)
conv = self.backend.runpf(is_dc=False)
assert conv
p_or, *_ = self.backend.lines_or_info()
assert self.compare_vect(p_or, true_values_ac)
def test_voltage_convert_powerlines(self):
self.skip_if_needed()
# i have the correct voltages in powerlines if the formula to link mw, mvar, kv and amps is correct
conv = self.backend.runpf(is_dc=False)
assert conv, "powerflow diverge at loading"
p_or, q_or, v_or, a_or = self.backend.lines_or_info()
a_th = np.sqrt(p_or**2 + q_or**2) * 1e3 / (np.sqrt(3) * v_or)
assert self.compare_vect(a_th, a_or)
p_ex, q_ex, v_ex, a_ex = self.backend.lines_ex_info()
a_th = np.sqrt(p_ex**2 + q_ex**2) * 1e3 / (np.sqrt(3) * v_ex)
assert self.compare_vect(a_th, a_ex)
def test_voltages_correct_load_gen(self):
self.skip_if_needed()
# i have the right voltages to generators and load, if it's the same as the voltage (correct from the above test)
# of the powerline connected to it.
conv = self.backend.runpf(is_dc=False)
assert conv, "powerflow diverge at loading"
load_p, load_q, load_v = self.backend.loads_info()
gen_p, gen__q, gen_v = self.backend.generators_info()
p_or, q_or, v_or, a_or = self.backend.lines_or_info()
p_ex, q_ex, v_ex, a_ex = self.backend.lines_ex_info()
for c_id, sub_id in enumerate(self.backend.load_to_subid):
l_ids = np.where(self.backend.line_or_to_subid == sub_id)[0]
if len(l_ids):
l_id = l_ids[0]
assert (
np.abs(v_or[l_id] - load_v[c_id]) <= self.tol_one
), "problem for load {}".format(c_id)
continue
l_ids = np.where(self.backend.line_ex_to_subid == sub_id)[0]
if len(l_ids):
l_id = l_ids[0]
assert (
np.abs(v_ex[l_id] - load_v[c_id]) <= self.tol_one
), "problem for load {}".format(c_id)
continue
assert False, "load {} has not been checked".format(c_id)
for g_id, sub_id in enumerate(self.backend.gen_to_subid):
l_ids = np.where(self.backend.line_or_to_subid == sub_id)[0]
if len(l_ids):
l_id = l_ids[0]
assert (
np.abs(v_or[l_id] - gen_v[g_id]) <= self.tol_one
), "problem for generator {}".format(g_id)
continue
l_ids = np.where(self.backend.line_ex_to_subid == sub_id)[0]
if len(l_ids):
l_id = l_ids[0]
assert (
np.abs(v_ex[l_id] - gen_v[g_id]) <= self.tol_one
), "problem for generator {}".format(g_id)
continue
assert False, "generator {} has not been checked".format(g_id)
def test_copy(self):
self.skip_if_needed()
conv = self.backend.runpf(is_dc=False)
assert conv, "powerflow diverge at loading"
l_id = 3
p_or_orig, *_ = self.backend.lines_or_info()
adn_backend_cpy = self.backend.copy()
self.backend._disconnect_line(l_id)
conv = self.backend.runpf(is_dc=False)
assert conv
conv2 = adn_backend_cpy.runpf(is_dc=False)
assert conv2
p_or_ref, *_ = self.backend.lines_or_info()
p_or, *_ = adn_backend_cpy.lines_or_info()
assert self.compare_vect(
p_or_orig, p_or
), "the copied object affects its original 'parent'"
assert (
np.abs(p_or_ref[l_id]) <= self.tol_one
), "powerline {} has not been disconnected".format(l_id)
def test_copy2(self):
self.skip_if_needed()
self.backend._disconnect_line(8)
conv = self.backend.runpf(is_dc=False)
p_or_orig, *_ = self.backend.lines_or_info()
adn_backend_cpy = self.backend.copy()
adn_backend_cpy._disconnect_line(11)
assert not adn_backend_cpy.get_line_status()[8]
assert not adn_backend_cpy.get_line_status()[11]
assert not self.backend.get_line_status()[8]
assert self.backend.get_line_status()[11]
def test_get_private_line_status(self):
self.skip_if_needed()
if hasattr(self.backend, "_get_line_status"):
assert np.all(self.backend._get_line_status())
else:
assert np.all(self.backend.get_line_status())
self.backend._disconnect_line(3)
if hasattr(self.backend, "_get_line_status"):
vect_ = self.backend._get_line_status()
else:
vect_ = self.backend.get_line_status()
assert np.sum(~vect_) == 1
assert not vect_[3]
def test_get_line_flow(self):
self.skip_if_needed()
self.backend.runpf(is_dc=False)
true_values_ac = np.array(
[
-20.40429168,
3.85499114,
4.2191378,
3.61000624,
-1.61506292,
0.75395917,
1.74717378,
3.56020295,
-1.5503504,
1.17099786,
4.47311562,
15.82364194,
3.56047297,
2.50341424,
7.21657539,
-9.68106571,
-0.42761118,
12.47067981,
-17.16297051,
5.77869057,
]
)
p_or_orig, q_or_orig, *_ = self.backend.lines_or_info()
assert self.compare_vect(q_or_orig, true_values_ac)
self.backend._disconnect_line(3)
a = self.backend.runpf(is_dc=False)
true_values_ac = np.array(
[
-20.40028207,
3.65600775,
3.77916284,
0.0,
-2.10761554,
1.34025308,
5.86505081,
3.58514625,
-2.28717836,
0.81979017,
3.72328838,
17.09556423,
3.9548798,
3.18389804,
11.24144925,
-11.09660174,
-1.70423701,
13.14347167,
-14.82917601,
2.276297,
]
)
p_or_orig, q_or_orig, *_ = self.backend.lines_or_info()
assert self.compare_vect(q_or_orig, true_values_ac)
def test_pf_ac_dc(self):
self.skip_if_needed()
true_values_ac = np.array(
[
-20.40429168,
3.85499114,
4.2191378,
3.61000624,
-1.61506292,
0.75395917,
1.74717378,
3.56020295,
-1.5503504,
1.17099786,
4.47311562,
15.82364194,
3.56047297,
2.50341424,
7.21657539,
-9.68106571,
-0.42761118,
12.47067981,
-17.16297051,
5.77869057,
]
)
conv = self.backend.runpf(is_dc=True)
assert conv
p_or_orig, q_or_orig, *_ = self.backend.lines_or_info()
assert np.all(q_or_orig == 0.0), "in dc mode all q must be zero"
conv = self.backend.runpf(is_dc=False)
assert conv
p_or_orig, q_or_orig, *_ = self.backend.lines_or_info()
assert self.compare_vect(q_or_orig, true_values_ac)
def test_get_thermal_limit(self):
self.skip_if_needed()
res = self.backend.get_thermal_limit()
true_values_ac = np.array(
[
42339.01974057,
42339.01974057,
27479652.23546777,
27479652.23546777,
27479652.23546777,
27479652.23546777,
27479652.23546777,
42339.01974057,
42339.01974057,
42339.01974057,
42339.01974057,
42339.01974057,
27479652.23546777,
27479652.23546777,
27479652.23546777,
42339.01974057,
42339.01974057,
42339.01974057,
408269.11892695,
408269.11892695,
],
dtype=dt_float,
)
assert self.compare_vect(res, true_values_ac)
def test_disconnect_line(self):
self.skip_if_needed()
for i in range(self.backend.n_line):
if i == 18:
# powerflow diverge if line 1 is removed, unfortunately
continue
backend_cpy = self.backend.copy()
backend_cpy._disconnect_line(i)
conv = backend_cpy.runpf()
assert (
conv
), "Power flow computation does not converge if line {} is removed".format(
i
)
flows = backend_cpy.get_line_status()
assert not flows[i]
assert np.sum(~flows) == 1
def test_donothing_action(self):
self.skip_if_needed()
conv = self.backend.runpf()
init_flow = self.backend.get_line_flow()
init_lp, *_ = self.backend.loads_info()
init_gp, *_ = self.backend.generators_info()
init_ls = self.backend.get_line_status()
action = self.action_env({}) # update the action
bk_action = self.bkact_class()
bk_action += action
self.backend.apply_action(bk_action)
after_lp, *_ = self.backend.loads_info()
after_gp, *_ = self.backend.generators_info()
after_ls = self.backend.get_line_status()
assert self.compare_vect(init_lp, after_lp) # check i didn't modify the loads
# assert self.compare_vect(init_gp, after_gp) # check i didn't modify the generators # TODO here !!! problem with steady state P=C+L
assert np.all(init_ls == after_ls) # check i didn't disconnect any powerlines
conv = self.backend.runpf()
assert conv, "Cannot perform a powerflow after doing nothing"
after_flow = self.backend.get_line_flow()
assert self.compare_vect(init_flow, after_flow)
def test_apply_action_active_value(self):
self.skip_if_needed()
# test that i can modify only the load / prod active values of the powergrid
# to do that i modify the productions and load all of a factor 0.5 and compare that the DC flows are
# also multiply by 2
# i set up the stuff to have exactly 0 losses
conv = self.backend.runpf(is_dc=True)
assert conv, "powergrid diverge after loading (even in DC)"
init_flow, *_ = self.backend.lines_or_info()
init_lp, init_l_q, *_ = self.backend.loads_info()
init_gp, *_ = self.backend.generators_info()
init_ls = self.backend.get_line_status()
ratio = 1.0
new_cp = ratio * init_lp
new_pp = ratio * init_gp * np.sum(init_lp) / np.sum(init_gp)
action = self.action_env(
{"injection": {"load_p": new_cp, "prod_p": new_pp}}
) # update the action
bk_action = self.bkact_class()
bk_action += action
self.backend.apply_action(bk_action)
conv = self.backend.runpf(is_dc=True)
# now the system has exactly 0 losses (ie sum load = sum gen)
# i check that if i divide by 2, then everything is divided by 2
assert conv
init_flow, *_ = self.backend.lines_or_info()
init_lp, init_l_q, *_ = self.backend.loads_info()
init_gp, *_ = self.backend.generators_info()
init_ls = self.backend.get_line_status()
ratio = 0.5
new_cp = ratio * init_lp
new_pp = ratio * init_gp
action = self.action_env(
{"injection": {"load_p": new_cp, "prod_p": new_pp}}
) # update the action
bk_action = self.bkact_class()
bk_action += action
self.backend.apply_action(bk_action)
conv = self.backend.runpf(is_dc=True)
assert conv, "Cannot perform a powerflow after doing nothing"
after_lp, after_lq, *_ = self.backend.loads_info()
after_gp, *_ = self.backend.generators_info()
after_ls = self.backend.get_line_status()
assert self.compare_vect(new_cp, after_lp) # check i didn't modify the loads
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
p_subs, q_subs, p_bus, q_bus, v_bus = self.backend.check_kirchoff()
# i'm in DC mode, i can't check for reactive values...
assert (
np.max(np.abs(p_subs)) <= self.tolvect
), "problem with active values, at substation"
assert (
np.max(np.abs(p_bus.flatten())) <= self.tolvect
), "problem with active values, at a bus"
assert self.compare_vect(
new_pp, after_gp
) # check i didn't modify the generators
assert np.all(init_ls == after_ls) # check i didn't disconnect any powerlines
after_flow, *_ = self.backend.lines_or_info()
assert self.compare_vect(
ratio * init_flow, after_flow
) # probably an error with the DC approx
def test_apply_action_prod_v(self):
self.skip_if_needed()
conv = self.backend.runpf(is_dc=False)
assert conv, "powergrid diverge after loading"
prod_p_init, prod_q_init, prod_v_init = self.backend.generators_info()
ratio = 1.05
action = self.action_env(
{"injection": {"prod_v": ratio * prod_v_init}}
) # update the action
bk_action = self.bkact_class()
bk_action += action
self.backend.apply_action(bk_action)
conv = self.backend.runpf(is_dc=False)
assert conv, "Cannot perform a powerflow after modifying the powergrid"
prod_p_after, prod_q_after, prod_v_after = self.backend.generators_info()
assert self.compare_vect(
ratio * prod_v_init, prod_v_after
) # check i didn't modify the generators
def test_apply_action_maintenance(self):
self.skip_if_needed()
# retrieve some initial data to be sure only a subpart of the _grid is modified
conv = self.backend.runpf()
init_lp, *_ = self.backend.loads_info()
init_gp, *_ = self.backend.generators_info()
# check that maintenance vector is properly taken into account
maintenance = np.full((self.backend.n_line,), fill_value=False, dtype=dt_bool)
maintenance[19] = True
action = self.action_env({"maintenance": maintenance}) # update the action
bk_action = self.bkact_class()
bk_action += action
# apply the action here
self.backend.apply_action(bk_action)
# compute a load flow an performs more tests
conv = self.backend.runpf()
assert conv, "Power does not converge if line {} is removed".format(19)
# performs basic check
after_lp, *_ = self.backend.loads_info()
after_gp, *_ = self.backend.generators_info()
after_ls = self.backend.get_line_status()
assert self.compare_vect(init_lp, after_lp) # check i didn't modify the loads
# assert self.compare_vect(init_gp, after_gp) # check i didn't modify the generators # TODO here problem with steady state P=C+L
assert np.all(
~maintenance == after_ls
) # check i didn't disconnect any powerlines beside the correct one
flows = self.backend.get_line_status()
assert np.sum(~flows) == 1
assert not flows[19]
def test_apply_action_hazard(self):
self.skip_if_needed()
conv = self.backend.runpf()
assert conv, "powerflow did not converge at iteration 0"
init_lp, *_ = self.backend.loads_info()
init_gp, *_ = self.backend.generators_info()
# check that maintenance vector is properly taken into account
maintenance = np.full((self.backend.n_line,), fill_value=False, dtype=dt_bool)
maintenance[17] = True
action = self.action_env({"hazards": maintenance}) # update the action
bk_action = self.bkact_class()
bk_action += action
# apply the action here
self.backend.apply_action(bk_action)
# compute a load flow an performs more tests
conv = self.backend.runpf()
assert conv, "Power does not converge if line {} is removed".format(19)
# performs basic check
after_lp, *_ = self.backend.loads_info()
after_gp, *_ = self.backend.generators_info()
after_ls = self.backend.get_line_status()
assert self.compare_vect(init_lp, after_lp) # check i didn't modify the loads
# assert self.compare_vect(init_gp, after_gp) # check i didn't modify the generators # TODO here problem with steady state P=C+L
assert np.all(
maintenance == ~after_ls
) # check i didn't disconnect any powerlines beside the correct one
def test_apply_action_disconnection(self):
self.skip_if_needed()
# retrieve some initial data to be sure only a subpart of the _grid is modified
conv = self.backend.runpf()
init_lp, *_ = self.backend.loads_info()
init_gp, *_ = self.backend.generators_info()
# check that maintenance vector is properly taken into account
maintenance = np.full((self.backend.n_line,), fill_value=False, dtype=dt_bool)
maintenance[19] = True
disc = np.full((self.backend.n_line,), fill_value=False, dtype=dt_bool)
disc[17] = True
action = self.action_env(
{"hazards": disc, "maintenance": maintenance}
) # update the action
bk_action = self.bkact_class()
bk_action += action
# apply the action here
self.backend.apply_action(bk_action)
# compute a load flow an performs more tests
conv = self.backend.runpf()
assert (
conv
), "Powerflow does not converge if lines {} and {} are removed".format(17, 19)
# performs basic check
after_lp, *_ = self.backend.loads_info()
after_gp, *_ = self.backend.generators_info()
after_ls = self.backend.get_line_status()
assert self.compare_vect(init_lp, after_lp) # check i didn't modify the loads
# assert self.compare_vect(init_gp, after_gp) # check i didn't modify the generators # TODO here problem with steady state, P=C+L
assert np.all(
disc | maintenance == ~after_ls
) # check i didn't disconnect any powerlines beside the correct one
flows = self.backend.get_line_status()
assert np.sum(~flows) == 2
assert not flows[19]
assert not flows[17]
class BaseTestTopoAction(MakeBackend):
def setUp(self):
self.backend = self.make_backend()
self.path_matpower = self.get_path()
self.case_file = self.get_casefile()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.backend.load_grid(self.path_matpower, self.case_file)
type(self.backend).set_env_name("BaseTestTopoAction")
type(self.backend).set_no_storage()
self.backend.assert_grid_correct()
self.game_rules = RulesChecker()
as_class = ActionSpace.init_grid(self.backend)
self.helper_action = as_class(
gridobj=self.backend, legal_action=self.game_rules.legal_action
)
self.bkact_class = _BackendAction.init_grid(self.backend)
def tearDown(self):
pass
def compare_vect(self, pred, true):
return np.max(np.abs(pred - true)) <= self.tolvect
def _check_kirchoff(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
p_subs, q_subs, p_bus, q_bus, v_bus = self.backend.check_kirchoff()
assert (
np.max(np.abs(p_subs)) <= self.tolvect
), "problem with active values, at substation"
assert (
np.max(np.abs(p_bus.flatten())) <= self.tolvect
), "problem with active values, at a bus"
if self.backend.shunts_data_available:
assert (
np.max(np.abs(q_subs)) <= self.tolvect
), "problem with reactive values, at substation"
assert (
np.max(np.abs(q_bus.flatten())) <= self.tolvect
), "problem with reaactive values, at a bus"
def test_get_topo_vect_speed(self):
# retrieve some initial data to be sure only a subpart of the _grid is modified
self.skip_if_needed()
conv = self.backend.runpf()
init_amps_flow = self.backend.get_line_flow()
# check that maintenance vector is properly taken into account
arr = np.array([1, 1, 1, 2, 2, 2], dtype=dt_int)
id_ = 1
action = self.helper_action({"set_bus": {"substations_id": [(id_, arr)]}})
bk_action = self.bkact_class()
bk_action += action
# apply the action here
self.backend.apply_action(bk_action)
conv = self.backend.runpf()
assert conv
after_amps_flow = self.backend.get_line_flow()
topo_vect = self.backend.get_topo_vect()
topo_vect_old = np.array(
[
1,
1,
1,
1,
1,
1,
2,
2,
2,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
],
dtype=dt_int,
)
assert self.compare_vect(topo_vect, topo_vect_old)
def test_topo_set1sub(self):
# retrieve some initial data to be sure only a subpart of the _grid is modified
self.skip_if_needed()
conv = self.backend.runpf()
init_amps_flow = self.backend.get_line_flow()
# check that maintenance vector is properly taken into account
arr = np.array([1, 1, 1, 2, 2, 2], dtype=dt_int)
id_ = 1
action = self.helper_action({"set_bus": {"substations_id": [(id_, arr)]}})
bk_action = self.bkact_class()
bk_action += action
# apply the action here
self.backend.apply_action(bk_action)
conv = self.backend.runpf()
assert conv
after_amps_flow = self.backend.get_line_flow()
topo_vect = self.backend.get_topo_vect()
assert np.min(topo_vect) == 1, "all buses have been changed"
assert np.max(topo_vect) == 2, "no buses have been changed"
# check that the objects have been properly moved
load_ids = np.where(self.backend.load_to_subid == id_)[0]
assert np.all(
topo_vect[self.backend.load_pos_topo_vect[load_ids]]
== arr[self.backend.load_to_sub_pos[load_ids]]
)
lor_ids = np.where(self.backend.line_or_to_subid == id_)[0]
assert np.all(
topo_vect[self.backend.line_or_pos_topo_vect[lor_ids]]
== arr[self.backend.line_or_to_sub_pos[lor_ids]]
)
lex_ids = np.where(self.backend.line_ex_to_subid == id_)[0]
assert np.all(
topo_vect[self.backend.line_ex_pos_topo_vect[lex_ids]]
== arr[self.backend.line_ex_to_sub_pos[lex_ids]]
)
gen_ids = np.where(self.backend.gen_to_subid == id_)[0]
assert np.all(
topo_vect[self.backend.gen_pos_topo_vect[gen_ids]]
== arr[self.backend.gen_to_sub_pos[gen_ids]]
)
after_amps_flow_th = np.array(
[
6.38865247e02,
3.81726828e02,
1.78001287e04,
2.70742428e04,
1.06755055e04,
4.71160165e03,
1.52265925e04,
3.37755751e02,
3.00535519e02,
5.01164454e-13,
7.01900962e01,
1.73874580e02,
2.08904697e04,
2.11757439e04,
4.93863382e04,
1.31935835e02,
6.99779475e01,
1.85068609e02,
7.47283039e02,
1.14125596e03,
]
)
after_amps_flow_th = np.array(
[
596.58386539,
342.31364678,
18142.87789987,
27084.37162086,
10155.86483194,
4625.93022957,
15064.92626615,
322.59381855,
273.6977149,
82.21908229,
80.91290202,
206.04740125,
20480.81970337,
21126.22533095,
49275.71520428,
128.04429617,
69.00661266,
188.44754187,
688.1371226,
1132.42521887,
]
)
assert self.compare_vect(after_amps_flow, after_amps_flow_th)
self._check_kirchoff()
def test_topo_change1sub(self):
# check that switching the bus of 3 object is equivalent to set them to bus 2 (as above)
self.skip_if_needed()
conv = self.backend.runpf()
init_amps_flow = self.backend.get_line_flow()
# check that maintenance vector is properly taken into account
arr = np.array([False, False, False, True, True, True], dtype=dt_bool)
id_ = 1
action = self.helper_action({"change_bus": {"substations_id": [(id_, arr)]}})
bk_action = self.bkact_class()
bk_action += action
# apply the action here
self.backend.apply_action(bk_action)
# run the powerflow
conv = self.backend.runpf()
assert conv
after_amps_flow = self.backend.get_line_flow()
topo_vect = self.backend.get_topo_vect()
assert np.min(topo_vect) == 1, "all buses have been changed"
assert np.max(topo_vect) == 2, "no buses have been changed"
# check that the objects have been properly moved
load_ids = np.where(self.backend.load_to_subid == id_)[0]
assert np.all(
topo_vect[self.backend.load_pos_topo_vect[load_ids]]
== 1 + arr[self.backend.load_to_sub_pos[load_ids]]
)
lor_ids = np.where(self.backend.line_or_to_subid == id_)[0]
assert np.all(
topo_vect[self.backend.line_or_pos_topo_vect[lor_ids]]
== 1 + arr[self.backend.line_or_to_sub_pos[lor_ids]]
)
lex_ids = np.where(self.backend.line_ex_to_subid == id_)[0]
assert np.all(
topo_vect[self.backend.line_ex_pos_topo_vect[lex_ids]]
== 1 + arr[self.backend.line_ex_to_sub_pos[lex_ids]]
)
gen_ids = np.where(self.backend.gen_to_subid == id_)[0]
assert np.all(
topo_vect[self.backend.gen_pos_topo_vect[gen_ids]]
== 1 + arr[self.backend.gen_to_sub_pos[gen_ids]]
)
after_amps_flow_th = np.array(
[
596.58386539,
342.31364678,
18142.87789987,
27084.37162086,
10155.86483194,
4625.93022957,
15064.92626615,
322.59381855,
273.6977149,
82.21908229,
80.91290202,
206.04740125,
20480.81970337,
21126.22533095,
49275.71520428,
128.04429617,
69.00661266,
188.44754187,
688.1371226,
1132.42521887,
]
)
assert self.compare_vect(after_amps_flow, after_amps_flow_th)
self._check_kirchoff()
def test_topo_change_1sub_twice(self):
# check that switching the bus of 3 object is equivalent to set them to bus 2 (as above)
# and that setting it again is equivalent to doing nothing
self.skip_if_needed()
conv = self.backend.runpf()
init_amps_flow = copy.deepcopy(self.backend.get_line_flow())
# check that maintenance vector is properly taken into account
arr = np.array([False, False, False, True, True, True], dtype=dt_bool)
id_ = 1
action = self.helper_action({"change_bus": {"substations_id": [(id_, arr)]}})
bk_action = self.bkact_class()
bk_action += action
# apply the action here
self.backend.apply_action(bk_action)
conv = self.backend.runpf()
bk_action.reset()
assert conv
after_amps_flow = self.backend.get_line_flow()
topo_vect = self.backend.get_topo_vect()
assert np.min(topo_vect) == 1, "all buses have been changed"
assert np.max(topo_vect) == 2, "no buses have been changed"
# check that the objects have been properly moved
load_ids = np.where(self.backend.load_to_subid == id_)[0]
assert np.all(
topo_vect[self.backend.load_pos_topo_vect[load_ids]]
== 1 + arr[self.backend.load_to_sub_pos[load_ids]]
)
lor_ids = np.where(self.backend.line_or_to_subid == id_)[0]
assert np.all(
topo_vect[self.backend.line_or_pos_topo_vect[lor_ids]]
== 1 + arr[self.backend.line_or_to_sub_pos[lor_ids]]
)
lex_ids = np.where(self.backend.line_ex_to_subid == id_)[0]
assert np.all(
topo_vect[self.backend.line_ex_pos_topo_vect[lex_ids]]
== 1 + arr[self.backend.line_ex_to_sub_pos[lex_ids]]
)
gen_ids = np.where(self.backend.gen_to_subid == id_)[0]
assert np.all(
topo_vect[self.backend.gen_pos_topo_vect[gen_ids]]
== 1 + arr[self.backend.gen_to_sub_pos[gen_ids]]
)
after_amps_flow_th = np.array(
[
596.58386539,
342.31364678,
18142.87789987,
27084.37162086,
10155.86483194,
4625.93022957,
15064.92626615,
322.59381855,
273.6977149,
82.21908229,
80.91290202,
206.04740125,
20480.81970337,
21126.22533095,
49275.71520428,
128.04429617,
69.00661266,
188.44754187,
688.1371226,
1132.42521887,
]
)
assert self.compare_vect(after_amps_flow, after_amps_flow_th)
self._check_kirchoff()
action = self.helper_action({"change_bus": {"substations_id": [(id_, arr)]}})
bk_action += action
# apply the action here
self.backend.apply_action(bk_action)
conv = self.backend.runpf()
assert conv
after_amps_flow = self.backend.get_line_flow()
assert self.compare_vect(after_amps_flow, init_amps_flow)
topo_vect = self.backend.get_topo_vect()
assert np.min(topo_vect) == 1
assert np.max(topo_vect) == 1
self._check_kirchoff()
def test_topo_change_2sub(self):
# check that maintenance vector is properly taken into account
self.skip_if_needed()
arr1 = np.array([False, False, False, True, True, True], dtype=dt_bool)
arr2 = np.array([1, 1, 2, 2], dtype=dt_int)
id_1 = 1
id_2 = 12
action = self.helper_action(
{
"change_bus": {"substations_id": [(id_1, arr1)]},
"set_bus": {"substations_id": [(id_2, arr2)]},
}
)
bk_action = self.bkact_class()
bk_action += action
# apply the action here
self.backend.apply_action(bk_action)
conv = self.backend.runpf()
assert conv, "powerflow diverge it should not"
# check the _grid is correct
topo_vect = self.backend.get_topo_vect()
assert np.min(topo_vect) == 1, "all buses have been changed"
assert np.max(topo_vect) == 2, "no buses have been changed"
# check that the objects have been properly moved
load_ids = np.where(self.backend.load_to_subid == id_1)[0]
assert np.all(
topo_vect[self.backend.load_pos_topo_vect[load_ids]]
== 1 + arr1[self.backend.load_to_sub_pos[load_ids]]
)
lor_ids = np.where(self.backend.line_or_to_subid == id_1)[0]
assert np.all(
topo_vect[self.backend.line_or_pos_topo_vect[lor_ids]]
== 1 + arr1[self.backend.line_or_to_sub_pos[lor_ids]]
)
lex_ids = np.where(self.backend.line_ex_to_subid == id_1)[0]
assert np.all(
topo_vect[self.backend.line_ex_pos_topo_vect[lex_ids]]
== 1 + arr1[self.backend.line_ex_to_sub_pos[lex_ids]]
)
gen_ids = np.where(self.backend.gen_to_subid == id_1)[0]
assert np.all(
topo_vect[self.backend.gen_pos_topo_vect[gen_ids]]
== 1 + arr1[self.backend.gen_to_sub_pos[gen_ids]]
)
load_ids = np.where(self.backend.load_to_subid == id_2)[0]
# TODO check the topology symmetry
assert np.all(
topo_vect[self.backend.load_pos_topo_vect[load_ids]]
== arr2[self.backend.load_to_sub_pos[load_ids]]
)
lor_ids = np.where(self.backend.line_or_to_subid == id_2)[0]
assert np.all(
topo_vect[self.backend.line_or_pos_topo_vect[lor_ids]]
== arr2[self.backend.line_or_to_sub_pos[lor_ids]]
)
lex_ids = np.where(self.backend.line_ex_to_subid == id_2)[0]
assert np.all(
topo_vect[self.backend.line_ex_pos_topo_vect[lex_ids]]
== arr2[self.backend.line_ex_to_sub_pos[lex_ids]]
)
gen_ids = np.where(self.backend.gen_to_subid == id_2)[0]
assert np.all(
topo_vect[self.backend.gen_pos_topo_vect[gen_ids]]
== arr2[self.backend.gen_to_sub_pos[gen_ids]]
)
after_amps_flow = self.backend.get_line_flow()
after_amps_flow_th = np.array(
[
596.97014348,
342.10559579,
16615.11815357,
31328.50690716,
11832.77202397,
11043.10650167,
11043.10650167,
322.79533908,
273.86501458,
82.34066647,
80.89289074,
208.42396413,
22178.16766548,
27690.51322075,
38684.31540646,
129.44842477,
70.02629553,
185.67687123,
706.77680037,
1155.45754617,
]
)
assert self.compare_vect(after_amps_flow, after_amps_flow_th)
self._check_kirchoff()
def _aux_test_back_orig(self, act_set, prod_p, load_p, p_or, sh_q):
"""function used for test_get_action_to_set"""
bk_act = self.backend.my_bk_act_class()
bk_act += act_set
self.backend.apply_action(bk_act)
self._aux_aux_check_if_matches(prod_p, load_p, p_or, sh_q)
def _aux_aux_check_if_matches(self, prod_p, load_p, p_or, sh_q):
self.backend.runpf()
prod_p3, prod_q3, prod_v3 = self.backend.generators_info()
load_p3, load_q3, load_v3 = self.backend.loads_info()
p_or3, *_ = self.backend.lines_or_info()
if self.backend.shunts_data_available:
_, sh_q3, *_ = self.backend.shunt_info()
assert np.all(
np.abs(prod_p3 - prod_p) <= self.tol_one
), "wrong generators value"
assert np.all(np.abs(load_p3 - load_p) <= self.tol_one), "wrong load value"
assert np.all(
np.abs(p_or3 - p_or) <= self.tol_one
), "wrong value for active flow origin"
assert np.all(
np.abs(p_or3 - p_or) <= self.tol_one
), "wrong value for active flow origin"
if self.backend.shunts_data_available:
assert np.all(
np.abs(sh_q3 - sh_q) <= self.tol_one
), "wrong value for shunt readtive"
def test_get_action_to_set(self):
"""this tests the "get_action_to_set" method"""
self.skip_if_needed()
self.backend.runpf()
self.backend.assert_grid_correct_after_powerflow()
self.backend.runpf()
act = self.backend.get_action_to_set()
prod_p, prod_q, prod_v = self.backend.generators_info()
load_p, load_q, load_v = self.backend.loads_info()
p_or, *_ = self.backend.lines_or_info()
if self.backend.shunts_data_available:
_, sh_q, *_ = self.backend.shunt_info()
else:
sh_q = None
# modify its state for injection
act2 = copy.deepcopy(act)
act2._dict_inj["prod_p"] *= 1.5
act2._dict_inj["load_p"] *= 1.5
bk_act2 = self.backend.my_bk_act_class()
bk_act2 += act2
self.backend.apply_action(bk_act2)
self.backend.runpf()
prod_p2, prod_q2, prod_v2 = self.backend.generators_info()
load_p2, load_q2, load_v2 = self.backend.loads_info()
p_or2, *_ = self.backend.lines_or_info()
assert np.any(np.abs(prod_p2 - prod_p) >= self.tol_one)
assert np.any(np.abs(load_p2 - load_p) >= self.tol_one)
assert np.any(np.abs(p_or2 - p_or) >= self.tol_one)
# check i can put it back to orig state
try:
self._aux_test_back_orig(act, prod_p, load_p, p_or, sh_q)
except AssertionError as exc_:
raise AssertionError("Error for injection: {}".format(exc_))
# disconnect a powerline
act2 = copy.deepcopy(act)
l_id = 0
act2._set_line_status[l_id] = -1
act2._set_topo_vect[act2.line_or_pos_topo_vect[l_id]] = -1
act2._set_topo_vect[act2.line_ex_pos_topo_vect[l_id]] = -1
bk_act2 = self.backend.my_bk_act_class()
bk_act2 += act2
self.backend.apply_action(bk_act2)
self.backend.runpf()
p_or2, *_ = self.backend.lines_or_info()
assert np.abs(p_or2[l_id]) <= self.tol_one, "line has not been disconnected"
assert np.any(np.abs(p_or2 - p_or) >= self.tol_one)
# check i can put it back to orig state
try:
self._aux_test_back_orig(act, prod_p, load_p, p_or, sh_q)
except AssertionError as exc_:
raise AssertionError("Error for line_status: {}".format(exc_))
# change topology
act2 = copy.deepcopy(act)
act2._set_topo_vect[6:9] = 2
act2._set_topo_vect[6:9] = 2
bk_act2 = self.backend.my_bk_act_class()
bk_act2 += act2
self.backend.apply_action(bk_act2)
self.backend.runpf()
p_or2, *_ = self.backend.lines_or_info()
assert np.any(np.abs(p_or2 - p_or) >= self.tol_one)
# check i can put it back to orig state
try:
self._aux_test_back_orig(act, prod_p, load_p, p_or, sh_q)
except AssertionError as exc_:
raise AssertionError("Error for topo: {}".format(exc_))
# change shunt
if self.backend.shunts_data_available:
act2 = copy.deepcopy(act)
act2.shunt_q[:] = -25.0
bk_act2 = self.backend.my_bk_act_class()
bk_act2 += act2
self.backend.apply_action(bk_act2)
self.backend.runpf()
prod_p2, prod_q2, prod_v2 = self.backend.generators_info()
_, sh_q2, *_ = self.backend.shunt_info()
p_or2, *_ = self.backend.lines_or_info()
assert np.any(np.abs(prod_p2 - prod_p) >= self.tol_one)
assert np.any(np.abs(p_or2 - p_or) >= self.tol_one)
assert np.any(np.abs(sh_q2 - sh_q) >= self.tol_one)
# check i can put it back to orig state
try:
self._aux_test_back_orig(act, prod_p, load_p, p_or, sh_q)
except AssertionError as exc_:
raise AssertionError("Error for shunt: {}".format(exc_))
def test_get_action_to_set_storage(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = make(
"educ_case14_storage",
test=True,
backend=self.make_backend(),
_add_to_name="test_gats_storage",
)
env2 = make(
"educ_case14_storage",
test=True,
backend=self.make_backend(),
_add_to_name="test_gats_storage",
)
obs, *_ = env.step(env.action_space({"set_storage": [-1.0, 1.0]}))
act = env.backend.get_action_to_set()
bk_act2 = env2.backend.my_bk_act_class()
bk_act2 += act
env2.backend.apply_action(bk_act2)
env2.backend.runpf()
assert np.all(env2.backend.storages_info()[0] == env.backend.storages_info()[0])
def _aux_test_back_orig_2(self, obs, prod_p, load_p, p_or, sh_q):
self.backend.update_from_obs(obs)
self._aux_aux_check_if_matches(prod_p, load_p, p_or, sh_q)
def test_update_from_obs(self):
"""this tests the "update_from_obs" method"""
self.skip_if_needed()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = make(
"rte_case14_realistic",
test=True,
backend=self.make_backend(),
_add_to_name="test_update_from_obs",
)
self.backend.close()
self.backend = env.backend
act = self.backend.get_action_to_set()
obs = env.reset()
# store the initial value that should be there when i reapply the "update_from_obs"
prod_p, prod_q, prod_v = self.backend.generators_info()
load_p, load_q, load_v = self.backend.loads_info()
p_or, *_ = self.backend.lines_or_info()
if self.backend.shunts_data_available:
_, sh_q, *_ = self.backend.shunt_info()
else:
sh_q = None
# modify its state for injection
act2 = copy.deepcopy(act)
act2._dict_inj["prod_p"] *= 1.5
act2._dict_inj["load_p"] *= 1.5
bk_act2 = self.backend.my_bk_act_class()
bk_act2 += act2
self.backend.apply_action(bk_act2)
self.backend.runpf()
prod_p2, prod_q2, prod_v2 = self.backend.generators_info()
load_p2, load_q2, load_v2 = self.backend.loads_info()
p_or2, *_ = self.backend.lines_or_info()
assert np.any(np.abs(prod_p2 - prod_p) >= self.tol_one)
assert np.any(np.abs(load_p2 - load_p) >= self.tol_one)
assert np.any(np.abs(p_or2 - p_or) >= self.tol_one)
# check i can put it back to orig state
try:
self._aux_test_back_orig_2(obs, prod_p, load_p, p_or, sh_q)
except AssertionError as exc_:
raise AssertionError("Error for injection: {}".format(exc_))
# disconnect a powerline
act2 = copy.deepcopy(act)
l_id = 0
act2._set_line_status[l_id] = -1
act2._set_topo_vect[act2.line_or_pos_topo_vect[l_id]] = -1
act2._set_topo_vect[act2.line_ex_pos_topo_vect[l_id]] = -1
bk_act2 = self.backend.my_bk_act_class()
bk_act2 += act2
self.backend.apply_action(bk_act2)
self.backend.runpf()
p_or2, *_ = self.backend.lines_or_info()
assert np.abs(p_or2[l_id]) <= self.tol_one, "line has not been disconnected"
assert np.any(np.abs(p_or2 - p_or) >= self.tol_one)
# check i can put it back to orig state
try:
self._aux_test_back_orig_2(obs, prod_p, load_p, p_or, sh_q)
except AssertionError as exc_:
raise AssertionError("Error for line_status: {}".format(exc_))
# change topology
act2 = copy.deepcopy(act)
act2._set_topo_vect[6:9] = 2
act2._set_topo_vect[6:9] = 2
bk_act2 = self.backend.my_bk_act_class()
bk_act2 += act2
self.backend.apply_action(bk_act2)
self.backend.runpf()
p_or2, *_ = self.backend.lines_or_info()
assert np.any(np.abs(p_or2 - p_or) >= self.tol_one)
# check i can put it back to orig state
try:
self._aux_test_back_orig_2(obs, prod_p, load_p, p_or, sh_q)
except AssertionError as exc_:
raise AssertionError("Error for topo: {}".format(exc_))
# change shunt
if self.backend.shunts_data_available:
act2 = copy.deepcopy(act)
act2.shunt_q[:] = -25.0
bk_act2 = self.backend.my_bk_act_class()
bk_act2 += act2
self.backend.apply_action(bk_act2)
self.backend.runpf()
prod_p2, prod_q2, prod_v2 = self.backend.generators_info()
_, sh_q2, *_ = self.backend.shunt_info()
p_or2, *_ = self.backend.lines_or_info()
assert np.any(np.abs(prod_p2 - prod_p) >= self.tol_one)
assert np.any(np.abs(p_or2 - p_or) >= self.tol_one)
assert np.any(np.abs(sh_q2 - sh_q) >= self.tol_one)
# check i can put it back to orig state
try:
self._aux_test_back_orig_2(obs, prod_p, load_p, p_or, sh_q)
except AssertionError as exc_:
raise AssertionError("Error for shunt: {}".format(exc_))
class BaseTestEnvPerformsCorrectCascadingFailures(MakeBackend):
"""
Test the "next_grid_state" method of the back-end
"""
def setUp(self):
self.backend = self.make_backend(detailed_infos_for_cascading_failures=True)
type(self.backend)._clear_class_attribute()
self.path_matpower = self.get_path()
self.case_file = self.get_casefile()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.backend.load_grid(self.path_matpower, self.case_file)
type(self.backend).set_env_name("TestEnvPerformsCorrectCascadingFailures_env")
type(self.backend).set_no_storage()
self.backend.assert_grid_correct()
self.game_rules = RulesChecker()
self.action_env = ActionSpace(
gridobj=self.backend, legal_action=self.game_rules.legal_action
)
self.lines_flows_init = np.array(
[
638.28966637,
305.05042301,
17658.9674809,
26534.04334098,
10869.23856329,
4686.71726729,
15612.65903298,
300.07915572,
229.8060832,
169.97292682,
100.40192958,
265.47505664,
21193.86923911,
21216.44452327,
49701.1565287,
124.79684388,
67.59759985,
192.19424706,
666.76961936,
1113.52773632,
]
)
# _parameters for the environment
self.env_params = Parameters()
# used for init an env too
self.chronics_handler = ChronicsHandler()
self.id_first_line_disco = 8 # due to hard overflow
self.id_2nd_line_disco = 11 # due to soft overflow
def tearDown(self):
pass
def next_grid_state_no_overflow(self):
# first i test that, when there is no overflow, i dont do a cascading failure
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = Environment(
init_grid_path=os.path.join(self.path_matpower, self.case_file),
backend=self.backend,
init_env_path=os.path.join(self.path_matpower, self.case_file),
chronics_handler=self.chronics_handler,
parameters=self.env_params,
name="test_pp_env1",
)
disco, infos, conv_ = self.backend.next_grid_state(env, is_dc=False)
assert conv_ is None
assert not infos
def test_next_grid_state_1overflow(self):
# second i test that, when is one line on hard overflow it is disconnected
self.skip_if_needed()
case_file = self.case_file
env_params = copy.deepcopy(self.env_params)
env_params.HARD_OVERFLOW_THRESHOLD = 1.5
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = Environment(
init_grid_path=os.path.join(self.path_matpower, case_file),
init_env_path=os.path.join(self.path_matpower, case_file),
backend=self.backend,
chronics_handler=self.chronics_handler,
parameters=env_params,
name="test_pp_env2",
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.backend.load_grid(self.path_matpower, case_file)
type(self.backend).set_no_storage()
self.backend.assert_grid_correct()
thermal_limit = 10 * self.lines_flows_init
thermal_limit[self.id_first_line_disco] = (
self.lines_flows_init[self.id_first_line_disco] / 2
)
self.backend.set_thermal_limit(thermal_limit)
disco, infos, conv_ = self.backend.next_grid_state(env, is_dc=False)
assert conv_ is None
assert len(infos) == 1 # check that i have only one overflow
assert np.sum(disco >= 0) == 1
def test_next_grid_state_1overflow_envNoCF(self):
# third i test that, if a line is on hard overflow, but i'm on a "no cascading failure" mode,
# i don't simulate a cascading failure
self.skip_if_needed()
self.env_params.NO_OVERFLOW_DISCONNECTION = True
case_file = self.case_file
env_params = copy.deepcopy(self.env_params)
env_params.HARD_OVERFLOW_THRESHOLD = 1.5
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = Environment(
init_grid_path=os.path.join(self.path_matpower, case_file),
backend=self.backend,
init_env_path=os.path.join(self.path_matpower, case_file),
chronics_handler=self.chronics_handler,
parameters=self.env_params,
name="test_pp_env3",
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.backend.load_grid(self.path_matpower, case_file)
type(self.backend).set_no_storage()
self.backend.assert_grid_correct()
conv = self.backend.runpf()
assert conv, "powerflow should converge at loading"
lines_flows_init = self.backend.get_line_flow()
thermal_limit = 10 * lines_flows_init
thermal_limit[self.id_first_line_disco] = (
lines_flows_init[self.id_first_line_disco] / 2
)
self.backend.set_thermal_limit(thermal_limit)
disco, infos, conv_ = self.backend.next_grid_state(env, is_dc=False)
assert conv_ is None
assert not infos # check that don't simulate a cascading failure
assert np.sum(disco >= 0) == 0
def test_set_thermal_limit(self):
thermal_limit = np.arange(self.backend.n_line)
self.backend.set_thermal_limit(thermal_limit)
assert np.all(self.backend.thermal_limit_a == thermal_limit)
def test_nb_timestep_overflow_disc0(self):
# on this _grid, first line with id 5 is overheated,
# it is disconnected
# then powerline 16 have a relative flow of 1.5916318201096937
# in this scenario i don't have a second line disconnection.
self.skip_if_needed()
case_file = self.case_file
env_params = copy.deepcopy(self.env_params)
env_params.HARD_OVERFLOW_THRESHOLD = 1.5
env_params.NB_TIMESTEP_OVERFLOW_ALLOWED = 0
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = Environment(
init_grid_path=os.path.join(self.path_matpower, case_file),
backend=self.backend,
init_env_path=os.path.join(self.path_matpower, case_file),
chronics_handler=self.chronics_handler,
parameters=env_params,
name="test_pp_env4",
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.backend.load_grid(self.path_matpower, case_file)
type(self.backend).set_no_storage()
self.backend.assert_grid_correct()
conv = self.backend.runpf()
assert conv, "powerflow should converge at loading"
lines_flows_init = self.backend.get_line_flow()
thermal_limit = 10 * lines_flows_init
thermal_limit[self.id_first_line_disco] = (
lines_flows_init[self.id_first_line_disco] / 2
)
thermal_limit[self.id_2nd_line_disco] = 400
self.backend.set_thermal_limit(thermal_limit)
disco, infos, conv_ = self.backend.next_grid_state(env, is_dc=False)
assert conv_ is None
assert len(infos) == 2 # check that there is a cascading failure of length 2
assert disco[self.id_first_line_disco] >= 0
assert disco[self.id_2nd_line_disco] >= 0
assert np.sum(disco >= 0) == 2
def test_nb_timestep_overflow_nodisc(self):
# on this _grid, first line with id 18 is overheated,
# it is disconnected
# then powerline 16 have a relative flow of 1.5916318201096937
# in this scenario i don't have a second line disconnection because
# the overflow is a soft overflow and the powerline is presumably overflow since 0
# timestep
self.skip_if_needed()
case_file = self.case_file
env_params = copy.deepcopy(self.env_params)
env_params.HARD_OVERFLOW_THRESHOLD = 1.5
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = Environment(
init_grid_path=os.path.join(self.path_matpower, case_file),
backend=self.backend,
chronics_handler=self.chronics_handler,
init_env_path=os.path.join(self.path_matpower, case_file),
parameters=env_params,
name="test_pp_env5",
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.backend.load_grid(self.path_matpower, case_file)
type(self.backend).set_no_storage()
self.backend.assert_grid_correct()
env._timestep_overflow[self.id_2nd_line_disco] = 0
thermal_limit = 10 * self.lines_flows_init
thermal_limit[self.id_first_line_disco] = (
self.lines_flows_init[self.id_first_line_disco] / 2
)
thermal_limit[self.id_2nd_line_disco] = 400
self.backend.set_thermal_limit(thermal_limit)
disco, infos, conv_ = self.backend.next_grid_state(env, is_dc=False)
assert conv_ is None
assert len(infos) == 1 # check that don't simulate a cascading failure
assert disco[self.id_first_line_disco] >= 0
assert np.sum(disco >= 0) == 1
def test_nb_timestep_overflow_nodisc_2(self):
# on this _grid, first line with id 18 is overheated,
# it is disconnected
# then powerline 16 have a relative flow of 1.5916318201096937
# in this scenario i don't have a second line disconnection because
# the overflow is a soft overflow and the powerline is presumably overflow since only 1
# timestep
self.skip_if_needed()
case_file = self.case_file
env_params = copy.deepcopy(self.env_params)
env_params.HARD_OVERFLOW_THRESHOLD = 1.5
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = Environment(
init_grid_path=os.path.join(self.path_matpower, case_file),
backend=self.backend,
chronics_handler=self.chronics_handler,
init_env_path=os.path.join(self.path_matpower, case_file),
parameters=env_params,
name="test_pp_env6",
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.backend.load_grid(self.path_matpower, case_file)
type(self.backend).set_no_storage()
self.backend.assert_grid_correct()
env._timestep_overflow[self.id_2nd_line_disco] = 1
thermal_limit = 10 * self.lines_flows_init
thermal_limit[self.id_first_line_disco] = (
self.lines_flows_init[self.id_first_line_disco] / 2
)
thermal_limit[self.id_2nd_line_disco] = 400
self.backend.set_thermal_limit(thermal_limit)
disco, infos, conv_ = self.backend.next_grid_state(env, is_dc=False)
assert conv_ is None
assert len(infos) == 1 # check that don't simulate a cascading failure
assert disco[self.id_first_line_disco] >= 0
assert np.sum(disco >= 0) == 1
def test_nb_timestep_overflow_disc2(self):
# on this _grid, first line with id 18 is overheated,
# it is disconnected
# then powerline 16 have a relative flow of 1.5916318201096937
# in this scenario I have a second disconnection, because the powerline is allowed to be on overflow for 2
# timestep and is still on overflow here.
self.skip_if_needed()
case_file = self.case_file
env_params = copy.deepcopy(self.env_params)
env_params.HARD_OVERFLOW_THRESHOLD = 1.5
env_params.NB_TIMESTEP_OVERFLOW_ALLOWED = 2
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = Environment(
init_grid_path=os.path.join(self.path_matpower, case_file),
backend=self.backend,
chronics_handler=self.chronics_handler,
init_env_path=os.path.join(self.path_matpower, case_file),
parameters=env_params,
name="test_pp_env7",
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.backend.load_grid(self.path_matpower, case_file)
type(self.backend).set_no_storage()
self.backend.assert_grid_correct()
env._timestep_overflow[self.id_2nd_line_disco] = 2
thermal_limit = 10 * self.lines_flows_init
thermal_limit[self.id_first_line_disco] = (
self.lines_flows_init[self.id_first_line_disco] / 2
)
thermal_limit[self.id_2nd_line_disco] = 400
self.backend.set_thermal_limit(thermal_limit)
disco, infos, conv_ = self.backend.next_grid_state(env, is_dc=False)
assert conv_ is None
assert len(infos) == 2 # check that there is a cascading failure of length 2
assert disco[self.id_first_line_disco] >= 0
assert disco[self.id_2nd_line_disco] >= 0
assert np.sum(disco >= 0) == 2
for i, grid_tmp in enumerate(infos):
assert not grid_tmp.get_line_status()[self.id_first_line_disco]
if i == 1:
assert not grid_tmp.get_line_status()[self.id_2nd_line_disco]
class BaseTestChangeBusAffectRightBus(MakeBackend):
def test_set_bus(self):
self.skip_if_needed()
# print("test_set_bus")
backend = self.make_backend()
type(backend)._clear_class_attribute()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = make("rte_case14_realistic", test=True, backend=backend)
env.reset()
action = env.action_space({"set_bus": {"lines_or_id": [(17, 2)]}})
obs, reward, done, info = env.step(action)
assert np.all(np.isfinite(obs.v_or))
assert np.sum(env.backend.get_topo_vect() == 2) == 1
assert np.all(np.isfinite(obs.to_vect()))
def test_change_bus(self):
self.skip_if_needed()
# print("test_change_bus")
backend = self.make_backend()
type(backend)._clear_class_attribute()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = make("rte_case14_realistic", test=True, backend=backend)
env.reset()
action = env.action_space({"change_bus": {"lines_or_id": [17]}})
obs, reward, done, info = env.step(action)
assert np.all(np.isfinite(obs.v_or))
assert np.sum(env.backend.get_topo_vect() == 2) == 1
def test_change_bustwice(self):
self.skip_if_needed()
# print("test_change_bustwice")
backend = self.make_backend()
type(backend)._clear_class_attribute()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = make("rte_case14_realistic", test=True, backend=backend)
env.reset()
action = env.action_space({"change_bus": {"lines_or_id": [17]}})
obs, reward, done, info = env.step(action)
assert not done
assert np.all(np.isfinite(obs.v_or))
assert np.sum(env.backend.get_topo_vect() == 2) == 1
action = env.action_space({"change_bus": {"lines_or_id": [17]}})
obs, reward, done, info = env.step(action)
assert not done
assert np.all(np.isfinite(obs.v_or))
assert np.sum(env.backend.get_topo_vect() == 2) == 0
def test_isolate_load(self):
self.skip_if_needed()
# print("test_isolate_load")
backend = self.make_backend()
type(backend)._clear_class_attribute()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = make("rte_case14_realistic", test=True, backend=backend)
act = env.action_space({"set_bus": {"loads_id": [(0, 2)]}})
obs, reward, done, info = env.step(act)
assert done, "an isolated load has not lead to a game over"
def test_reco_disco_bus(self):
self.skip_if_needed()
# print("test_reco_disco_bus")
backend = self.make_backend()
type(backend)._clear_class_attribute()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env_case1 = make(
"rte_case5_example",
test=True,
gamerules_class=AlwaysLegal,
backend=backend,
)
obs = env_case1.reset() # reset is good
act = env_case1.action_space.disconnect_powerline(
line_id=5
) # I disconnect a powerline
obs, reward, done, info = env_case1.step(act) # do the action, it's valid
act_case1 = env_case1.action_space.reconnect_powerline(
line_id=5, bus_or=2, bus_ex=2
) # reconnect powerline on bus 2 both ends
# this should lead to a game over a the powerline is out of the grid, 2 buses are, but without anything
# this is a non connex grid
obs_case1, reward_case1, done_case1, info_case1 = env_case1.step(act_case1)
assert done_case1
def test_reco_disco_bus2(self):
self.skip_if_needed()
# print("test_reco_disco_bus2")
backend = self.make_backend()
type(backend)._clear_class_attribute()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env_case2 = make(
"rte_case5_example",
test=True,
gamerules_class=AlwaysLegal,
backend=backend,
)
obs = env_case2.reset() # reset is good
obs, reward, done, info = env_case2.step(
env_case2.action_space()
) # do the action, it's valid
act_case2 = env_case2.action_space.reconnect_powerline(
line_id=5, bus_or=2, bus_ex=2
) # reconnect powerline on bus 2 both ends
# this should lead to a game over a the powerline is out of the grid, 2 buses are, but without anything
# this is a non connex grid
obs_case2, reward_case2, done_case2, info_case2 = env_case2.step(act_case2)
# this was illegal before, but test it is still illegal
assert done_case2
def test_reco_disco_bus3(self):
self.skip_if_needed()
# print("test_reco_disco_bus3")
backend = self.make_backend()
type(backend)._clear_class_attribute()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env_case2 = make(
"rte_case5_example",
test=True,
gamerules_class=AlwaysLegal,
backend=backend,
)
obs = env_case2.reset() # reset is good
obs, reward, done, info = env_case2.step(
env_case2.action_space()
) # do the action, it's valid
act_case2 = env_case2.action_space.reconnect_powerline(
line_id=5, bus_or=1, bus_ex=2
) # reconnect powerline on bus 2 both ends
# this should not lead to a game over this time, the grid is connex!
obs_case2, reward_case2, done_case2, info_case2 = env_case2.step(act_case2)
assert done_case2 is False
def test_reco_disco_bus4(self):
self.skip_if_needed()
# print("test_reco_disco_bus4")
backend = self.make_backend()
type(backend)._clear_class_attribute()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env_case2 = make(
"rte_case5_example",
test=True,
gamerules_class=AlwaysLegal,
backend=backend,
)
obs = env_case2.reset() # reset is good
obs, reward, done, info = env_case2.step(
env_case2.action_space()
) # do the action, it's valid
act_case2 = env_case2.action_space.reconnect_powerline(
line_id=5, bus_or=2, bus_ex=1
) # reconnect powerline on bus 2 both ends
# this should not lead to a game over this time, the grid is connex!
obs_case2, reward_case2, done_case2, info_case2 = env_case2.step(act_case2)
assert done_case2 is False
def test_reco_disco_bus5(self):
self.skip_if_needed()
# print("test_reco_disco_bus5")
backend = self.make_backend()
type(backend)._clear_class_attribute()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env_case2 = make(
"rte_case5_example",
test=True,
gamerules_class=AlwaysLegal,
backend=backend,
)
obs = env_case2.reset() # reset is good
act_case2 = env_case2.action_space(
{"set_bus": {"lines_or_id": [(5, 2)], "lines_ex_id": [(5, 2)]}}
) # reconnect powerline on bus 2 both ends
# this should not lead to a game over this time, the grid is connex!
obs_case2, reward_case2, done_case2, info_case2 = env_case2.step(act_case2)
assert done_case2
class BaseTestShuntAction(MakeBackend):
def test_shunt_ambiguous_id_incorrect(self):
self.skip_if_needed()
backend = self.make_backend()
type(backend)._clear_class_attribute()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
with make(
"rte_case5_example",
test=True,
gamerules_class=AlwaysLegal,
action_class=CompleteAction,
backend=backend,
) as env_case2:
with self.assertRaises(AmbiguousAction):
act = env_case2.action_space({"shunt": {"set_bus": [(0, 2)]}})
def test_shunt_effect(self):
self.skip_if_needed()
backend1 = self.make_backend()
backend2 = self.make_backend()
type(backend1)._clear_class_attribute()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env_ref = make(
"rte_case14_realistic",
test=True,
gamerules_class=AlwaysLegal,
action_class=CompleteAction,
backend=backend1,
_add_to_name="BaseTestShuntAction",
)
env_change_q = make(
"rte_case14_realistic",
test=True,
gamerules_class=AlwaysLegal,
action_class=CompleteAction,
backend=backend2,
_add_to_name="BaseTestShuntAction",
)
param = env_ref.parameters
param.NO_OVERFLOW_DISCONNECTION = True
env_ref.change_parameters(param)
env_change_q.change_parameters(param)
env_ref.set_id(0)
env_change_q.set_id(0)
env_ref.reset()
env_change_q.reset()
obs_ref, *_ = env_ref.step(env_ref.action_space())
with warnings.catch_warnings():
warnings.filterwarnings("error")
act = env_change_q.action_space({"shunt": {"shunt_q": [(0, -30)]}})
obs_change_p_down, *_ = env_change_q.step(act)
assert obs_ref.v_or[10] < obs_change_p_down.v_or[10] - self.tol_one
obs_change_p_up, *_ = env_change_q.step(
env_change_q.action_space({"shunt": {"shunt_q": [(0, +30)]}})
)
obs_ref, *_ = env_ref.step(env_ref.action_space())
assert obs_ref.v_or[10] > obs_change_p_up.v_or[10] + self.tol_one
obs_disco_sh, *_ = env_change_q.step(
env_change_q.action_space({"shunt": {"set_bus": [(0, -1)]}})
)
# given the shunt amount at first, this is the right test to do
assert obs_ref.v_or[10] > obs_disco_sh.v_or[10] + self.tol_one
# test specific rule on shunt: if alone on a bus, it's disconnected ???
obs_co_bus2_sh_alone, *_ = env_change_q.step(
env_change_q.action_space({"shunt": {"set_bus": [(0, 2)]}})
)
assert obs_co_bus2_sh_alone._shunt_bus == -1
assert obs_co_bus2_sh_alone._shunt_v == 0.
assert obs_co_bus2_sh_alone._shunt_p == 0
assert obs_co_bus2_sh_alone._shunt_q == 0
# note that above the backend can diverge (shunt is alone on its bus !)
# on pp it does not ... but it probably should
env_ref.set_id(0)
env_change_q.set_id(0)
env_ref.reset()
env_change_q.reset()
act = env_change_q.action_space({"set_bus": {"lines_or_id": [(10, 2)]},
"shunt": {"set_bus": [(0, 2)]}
})
obs_co_bus2_sh_notalone, *_ = env_change_q.step(act)
assert obs_co_bus2_sh_notalone.line_or_bus[10] == 2
assert np.allclose(obs_co_bus2_sh_notalone.v_or[10], 23.15359878540039)
assert obs_co_bus2_sh_notalone._shunt_bus == 2
assert np.allclose(obs_co_bus2_sh_notalone._shunt_v, 23.15359878540039)
assert obs_co_bus2_sh_notalone._shunt_p == 0
assert obs_co_bus2_sh_notalone._shunt_q == -25.464233
class BaseTestResetEqualsLoadGrid(MakeBackend):
def setUp(self):
backend1 = self.make_backend()
backend2 = self.make_backend()
type(backend1)._clear_class_attribute()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env1 = make("rte_case5_example", test=True, backend=backend1)
self.backend1 = self.env1.backend
self.env2 = make("rte_case5_example", test=True, backend=backend2)
self.backend2 = self.env2.backend
np.random.seed(69)
def tearDown(self):
self.env1.close()
self.env2.close()
def test_reset_equals_reset(self):
self.skip_if_needed()
# Reset backend1 with reset
self.env1.reset()
# Reset backend2 with reset
self.env2.reset()
self._compare_backends()
def _compare_backends(self):
# Compare
if hasattr(self.backend1, "prod_pu_to_kv") and hasattr(
self.backend2, "prod_pu_to_kv"
):
assert np.all(self.backend1.prod_pu_to_kv == self.backend2.prod_pu_to_kv)
if hasattr(self.backend1, "load_pu_to_kv") and hasattr(
self.backend2, "load_pu_to_kv"
):
assert np.all(self.backend1.load_pu_to_kv == self.backend2.load_pu_to_kv)
if hasattr(self.backend1, "lines_or_pu_to_kv") and hasattr(
self.backend2, "lines_or_pu_to_kv"
):
assert np.all(
self.backend1.lines_or_pu_to_kv == self.backend2.lines_or_pu_to_kv
)
if hasattr(self.backend1, "lines_ex_pu_to_kv") and hasattr(
self.backend2, "lines_ex_pu_to_kv"
):
assert np.all(
self.backend1.lines_ex_pu_to_kv == self.backend2.lines_ex_pu_to_kv
)
if hasattr(self.backend1, "p_or") and hasattr(self.backend2, "p_or"):
assert np.all(self.backend1.p_or == self.backend2.p_or)
if hasattr(self.backend1, "q_or") and hasattr(self.backend2, "q_or"):
assert np.all(self.backend1.q_or == self.backend2.q_or)
if hasattr(self.backend1, "v_or") and hasattr(self.backend2, "v_or"):
assert np.all(self.backend1.v_or == self.backend2.v_or)
if hasattr(self.backend1, "a_or") and hasattr(self.backend2, "a_or"):
assert np.all(self.backend1.a_or == self.backend2.a_or)
if hasattr(self.backend1, "p_ex") and hasattr(self.backend2, "p_ex"):
assert np.all(self.backend1.p_ex == self.backend2.p_ex)
if hasattr(self.backend1, "a_ex") and hasattr(self.backend2, "a_ex"):
assert np.all(self.backend1.a_ex == self.backend2.a_ex)
if hasattr(self.backend1, "v_ex") and hasattr(self.backend2, "v_ex"):
assert np.all(self.backend1.v_ex == self.backend2.v_ex)
def test_reset_equals_load_grid(self):
self.skip_if_needed()
# Reset backend1 with reset
self.env1.reset()
# Reset backend2 with load_grid
self.backend2.reset = self.backend2.load_grid
self.env2.reset()
# Compare
self._compare_backends()
def test_load_grid_equals_load_grid(self):
self.skip_if_needed()
# Reset backend1 with load_grid
self.backend1.reset = self.backend1.load_grid
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env1.reset()
# Reset backend2 with load_grid
self.backend2.reset = self.backend2.load_grid
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env2.reset()
# Compare
self._compare_backends()
def test_obs_from_same_chronic(self):
self.skip_if_needed()
# Store first observation
obs1 = self.env1.current_obs
obs2 = None
for i in range(3):
self.env1.step(self.env1.action_space({}))
# Reset to first chronic
self.env1.chronics_handler.tell_id(-1)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env1.reset()
# Store second observation
obs2 = self.env1.current_obs
# Compare
assert np.allclose(obs1.prod_p, obs2.prod_p)
assert np.allclose(obs1.prod_q, obs2.prod_q)
assert np.allclose(obs1.prod_v, obs2.prod_v)
assert np.allclose(obs1.load_p, obs2.load_p)
assert np.allclose(obs1.load_q, obs2.load_q)
assert np.allclose(obs1.load_v, obs2.load_v)
assert np.allclose(obs1.p_or, obs2.p_or)
assert np.allclose(obs1.q_or, obs2.q_or)
assert np.allclose(obs1.v_or, obs2.v_or)
assert np.allclose(obs1.a_or, obs2.a_or)
assert np.allclose(obs1.p_ex, obs2.p_ex)
assert np.allclose(obs1.q_ex, obs2.q_ex)
assert np.allclose(obs1.v_ex, obs2.v_ex)
assert np.allclose(obs1.a_ex, obs2.a_ex)
assert np.allclose(obs1.rho, obs2.rho)
assert np.all(obs1.line_status == obs2.line_status)
assert np.all(obs1.topo_vect == obs2.topo_vect)
assert np.all(obs1.timestep_overflow == obs2.timestep_overflow)
assert np.all(obs1.time_before_cooldown_line == obs2.time_before_cooldown_line)
assert np.all(obs1.time_before_cooldown_sub == obs2.time_before_cooldown_sub)
assert np.all(obs1.time_next_maintenance == obs2.time_next_maintenance)
assert np.all(obs1.duration_next_maintenance == obs2.duration_next_maintenance)
assert np.all(obs1.target_dispatch == obs2.target_dispatch)
assert np.all(obs1.actual_dispatch == obs2.actual_dispatch)
def test_combined_changes(self):
# Unlimited sub changes
backend = self.make_backend()
type(backend)._clear_class_attribute()
params = grid2op.Parameters.Parameters()
params.MAX_SUB_CHANGED = 999
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = grid2op.make(
"rte_case14_realistic", test=True, backend=backend, param=params
)
# Find N valid iadd combination of R change actions
acts = self.aux_random_topos_act(env, n=16, r=3)
# Pick one at random
act = np.random.choice(acts)
# Reset env
obs = env.reset()
# At t=0 everything is on bus 1 normally
assert np.all(obs.topo_vect == 1)
# Step
obs, _, done, _ = env.step(act)
# This should use valid actions
assert done == False
# At t=1, unchanged elements should be on bus 1
assert np.all(obs.topo_vect[~act._change_bus_vect] == 1)
def aux_nth_combination(self, iterable, r, index):
"Equivalent to list(combinations(iterable, r))[index]"
pool = tuple(iterable)
n = len(pool)
if r < 0 or r > n:
raise ValueError
c = 1
k = min(r, n - r)
for i in range(1, k + 1):
c = c * (n - k + i) // i
if index < 0:
index += c
if index < 0 or index >= c:
raise IndexError
result = []
while r:
c, n, r = c * r // n, n - 1, r - 1
while index >= c:
index -= c
c, n = c * (n - r) // n, n - 1
result.append(pool[-1 - n])
return tuple(result)
def aux_random_topos_act(self, env, n=128, r=2):
actsp = env.action_space
acts = actsp.get_all_unitary_topologies_change(actsp)
res = []
n_comb = comb(len(acts), r)
while len(res) < n:
env.reset()
rnd_idx = np.random.randint(n_comb)
a = self.aux_nth_combination(acts, r, rnd_idx)
atest = env.action_space({})
for atmp in a:
atest += atmp
_, _, done, _ = env.step(atest)
if not done:
res.append(copy.deepcopy(atest))
return res
class BaseTestVoltageOWhenDisco(MakeBackend):
def test_this(self):
self.skip_if_needed()
backend = self.make_backend()
type(backend)._clear_class_attribute()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
with make("rte_case14_realistic", test=True, backend=backend) as env:
line_id = 1
act = env.action_space({"set_line_status": [(line_id, -1)]})
obs, *_ = env.step(act)
assert (
obs.v_or[line_id] == 0.0
) # is not 0 however line is not connected
class BaseTestChangeBusSlack(MakeBackend):
def test_change_slack_case14(self):
self.skip_if_needed()
backend = self.make_backend()
type(backend)._clear_class_attribute()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = grid2op.make("rte_case14_realistic", test=True, backend=backend)
action = env.action_space(
{
"set_bus": {
"generators_id": [(env.n_gen - 1, 2)],
"lines_or_id": [(0, 2)],
}
}
)
obs, reward, am_i_done, info = env.step(action)
assert am_i_done is False
assert np.all(obs.prod_p >= 0.0)
assert np.sum(obs.prod_p) >= np.sum(obs.load_p)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
p_subs, q_subs, p_bus, q_bus, v_bus = env.backend.check_kirchoff()
assert np.all(np.abs(p_subs) <= self.tol_one)
assert np.all(np.abs(p_bus) <= self.tol_one)
class BaseTestStorageAction(MakeBackend):
def _aux_test_kirchoff(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
p_subs, q_subs, p_bus, q_bus, diff_v_bus = self.env.backend.check_kirchoff()
assert np.all(
np.abs(p_subs) <= self.tol_one
), "error with active value at some substations"
assert np.all(
np.abs(q_subs) <= self.tol_one
), "error with reactive value at some substations"
assert np.all(
np.abs(p_bus) <= self.tol_one
), "error with active value at some bus"
assert np.all(
np.abs(q_bus) <= self.tol_one
), "error with reactive value at some bus"
assert np.all(diff_v_bus <= self.tol_one), "error with voltage discrepency"
def test_there_are_storage(self):
"""test the backend properly loaded the storage units"""
self.skip_if_needed()
backend = self.make_backend()
type(backend)._clear_class_attribute()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make("educ_case14_storage", test=True, backend=backend)
assert self.env.n_storage == 2
def test_storage_action_mw(self):
"""test the actions are properly implemented in the backend"""
self.skip_if_needed()
backend = self.make_backend()
type(backend)._clear_class_attribute()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make("educ_case14_storage", test=True, backend=backend)
array_modif = np.array([-1.5, -10.0], dtype=dt_float)
act = self.env.action_space({"set_storage": array_modif})
obs, reward, done, info = self.env.step(act)
assert not info["exception"]
storage_p, storage_q, storage_v = self.env.backend.storages_info()
assert np.all(np.abs(storage_p - array_modif) <= self.tol_one)
assert np.all(np.abs(storage_q - 0.0) <= self.tol_one)
self._aux_test_kirchoff()
array_modif = np.array([2, 8], dtype=dt_float)
act = self.env.action_space({"set_storage": array_modif})
obs, reward, done, info = self.env.step(act)
assert not info["exception"]
storage_p, storage_q, storage_v = self.env.backend.storages_info()
assert np.all(np.abs(storage_p - array_modif) <= self.tol_one)
assert np.all(np.abs(storage_q - 0.0) <= self.tol_one)
self._aux_test_kirchoff()
# illegal action
array_modif = np.array([2, 12], dtype=dt_float)
act = self.env.action_space({"set_storage": array_modif})
obs, reward, done, info = self.env.step(act)
assert info["exception"]
storage_p, storage_q, storage_v = self.env.backend.storages_info()
assert np.all(np.abs(storage_p - [0.0, 0.0]) <= self.tol_one)
assert np.all(np.abs(storage_q - 0.0) <= self.tol_one)
self._aux_test_kirchoff()
# full discharge now
array_modif = np.array([-1.5, -10.0], dtype=dt_float)
for nb_ts in range(3):
act = self.env.action_space({"set_storage": array_modif})
obs, reward, done, info = self.env.step(act)
assert not info["exception"]
storage_p, storage_q, storage_v = self.env.backend.storages_info()
assert np.all(
np.abs(storage_p - array_modif) <= self.tol_one
), f"error for P for time step {nb_ts}"
assert np.all(
np.abs(storage_q - 0.0) <= self.tol_one
), f"error for Q for time step {nb_ts}"
self._aux_test_kirchoff()
obs, reward, done, info = self.env.step(act)
assert not info["exception"]
# i have emptied second battery
storage_p, *_ = self.env.backend.storages_info()
assert np.all(np.abs(storage_p - [-1.5, -4.4599934]) <= self.tol_one)
assert np.all(np.abs(obs.storage_charge[1] - 0.0) <= self.tol_one)
self._aux_test_kirchoff()
obs, reward, done, info = self.env.step(act)
assert not info["exception"]
# i have emptied second battery
storage_p, *_ = self.env.backend.storages_info()
assert np.all(np.abs(storage_p - [-1.5, 0.0]) <= self.tol_one)
assert np.all(np.abs(obs.storage_charge[1] - 0.0) <= self.tol_one)
self._aux_test_kirchoff()
def test_storage_action_topo(self):
"""test the modification of the bus of a storage unit"""
self.skip_if_needed()
param = Parameters()
param.NB_TIMESTEP_COOLDOWN_SUB = 0
param.NB_TIMESTEP_COOLDOWN_LINE = 0
backend = self.make_backend()
type(backend)._clear_class_attribute()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make(
"educ_case14_storage",
test=True,
backend=backend,
param=param,
action_class=CompleteAction,
)
# test i can do a reset
obs = self.env.reset()
# test i can do a step
obs, reward, done, info = self.env.step(self.env.action_space())
exc_ = info["exception"]
assert (
not done
), f"i should be able to do a step with some storage units error is {exc_}"
storage_p, storage_q, storage_v = self.env.backend.storages_info()
assert np.all(np.abs(storage_p - 0.0) <= self.tol_one)
assert np.all(np.abs(storage_q - 0.0) <= self.tol_one)
# first case, standard modification
array_modif = np.array([-1.5, -10.0], dtype=dt_float)
act = self.env.action_space(
{
"set_storage": array_modif,
"set_bus": {
"storages_id": [(0, 2)],
"lines_or_id": [(8, 2)],
"generators_id": [(3, 2)],
},
}
)
obs, reward, done, info = self.env.step(act)
assert not info["exception"]
storage_p, storage_q, storage_v = self.env.backend.storages_info()
assert np.all(np.abs(storage_p - array_modif) <= self.tol_one)
assert np.all(np.abs(storage_q - 0.0) <= self.tol_one)
assert obs.storage_bus[0] == 2
assert obs.line_or_bus[8] == 2
assert obs.gen_bus[3] == 2
self._aux_test_kirchoff()
# second case, still standard modification (set to orig)
array_modif = np.array([1.5, 10.0], dtype=dt_float)
act = self.env.action_space(
{
"set_storage": array_modif,
"set_bus": {
"storages_id": [(0, 1)],
"lines_or_id": [(8, 1)],
"generators_id": [(3, 1)],
},
}
)
obs, reward, done, info = self.env.step(act)
assert not info["exception"]
storage_p, storage_q, storage_v = self.env.backend.storages_info()
assert np.all(np.abs(storage_p - array_modif) <= self.tol_one)
assert np.all(np.abs(storage_q - 0.0) <= self.tol_one)
assert obs.storage_bus[0] == 1
assert obs.line_or_bus[8] == 1
assert obs.gen_bus[3] == 1
self._aux_test_kirchoff()
# fourth case: isolated storage on a busbar (so it is disconnected, but with 0. production => so thats fine)
array_modif = np.array([0.0, 7.0], dtype=dt_float)
act = self.env.action_space(
{
"set_storage": array_modif,
"set_bus": {
"storages_id": [(0, 2)],
"lines_or_id": [(8, 1)],
"generators_id": [(3, 1)],
},
}
)
obs, reward, done, info = self.env.step(act)
assert not info[
"exception"
], "error when storage is disconnected with 0 production, throw an error, but should not"
assert not done
storage_p, storage_q, storage_v = self.env.backend.storages_info()
assert np.all(
np.abs(storage_p - [0.0, array_modif[1]]) <= self.tol_one
), "storage is not disconnected, yet alone on its busbar"
assert obs.storage_bus[0] == -1, "storage should be disconnected"
assert storage_v[0] == 0.0, "storage 0 should be disconnected"
assert obs.line_or_bus[8] == 1
assert obs.gen_bus[3] == 1
self._aux_test_kirchoff()
# check that if i don't touch it it's set to 0
act = self.env.action_space()
obs, reward, done, info = self.env.step(act)
assert not info["exception"]
storage_p, storage_q, storage_v = self.env.backend.storages_info()
assert np.all(
np.abs(storage_p - 0.0) <= self.tol_one
), "storage should produce 0"
assert np.all(
np.abs(storage_q - 0.0) <= self.tol_one
), "storage should produce 0"
assert obs.storage_bus[0] == -1, "storage should be disconnected"
assert storage_v[0] == 0.0, "storage 0 should be disconnected"
assert obs.line_or_bus[8] == 1
assert obs.gen_bus[3] == 1
self._aux_test_kirchoff()
# trying to act on a disconnected storage => illegal)
array_modif = np.array([2.0, 7.0], dtype=dt_float)
act = self.env.action_space({"set_storage": array_modif})
obs, reward, done, info = self.env.step(act)
assert info["exception"] # action should be illegal
assert not done # this is fine, as it's illegal it's replaced by do nothing
self._aux_test_kirchoff()
# trying to reconnect a storage alone on a bus => game over, not connected bus
array_modif = np.array([1.0, 7.0], dtype=dt_float)
act = self.env.action_space(
{
"set_storage": array_modif,
"set_bus": {
"storages_id": [(0, 2)],
"lines_or_id": [(8, 1)],
"generators_id": [(3, 1)],
},
}
)
obs, reward, done, info = self.env.step(act)
assert info["exception"] # this is a game over
assert done
class BaseIssuesTest(MakeBackend):
def test_issue_125(self):
# https://github.com/rte-france/Grid2Op/issues/125
self.skip_if_needed()
backend = self.make_backend()
type(backend)._clear_class_attribute()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = grid2op.make("rte_case14_realistic", test=True, backend=backend)
action = env.action_space({"set_bus": {"loads_id": [(1, -1)]}})
obs, reward, am_i_done, info = env.step(action)
assert info["is_illegal"] is False
assert info["is_ambiguous"] is False
assert len(info["exception"])
assert am_i_done
env.reset()
action = env.action_space({"set_bus": {"generators_id": [(1, -1)]}})
obs, reward, am_i_done, info = env.step(action)
assert info["is_illegal"] is False
assert info["is_ambiguous"] is False
assert len(info["exception"])
assert am_i_done
def test_issue_134(self):
self.skip_if_needed()
backend = self.make_backend()
type(backend)._clear_class_attribute()
param = Parameters()
param.NB_TIMESTEP_COOLDOWN_LINE = 0
param.NB_TIMESTEP_COOLDOWN_SUB = 0
# param.NO_OVERFLOW_DISCONNECTION = True
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = grid2op.make(
"rte_case14_realistic", test=True, backend=backend, param=param
)
obs_init = env.get_obs()
LINE_ID = 2
# Disconnect ex
action = env.action_space(
{
"set_bus": {
"lines_or_id": [(LINE_ID, 0)],
"lines_ex_id": [(LINE_ID, -1)],
}
}
)
obs, reward, done, info = env.step(action)
assert not done
assert obs.line_status[LINE_ID] == False
assert obs.topo_vect[obs.line_or_pos_topo_vect[LINE_ID]] == -1
assert obs.topo_vect[obs.line_ex_pos_topo_vect[LINE_ID]] == -1
# Reconnect ex on bus 2
action = env.action_space(
{
"set_bus": {
"lines_or_id": [(LINE_ID, 0)],
"lines_ex_id": [(LINE_ID, 2)],
}
}
)
obs, reward, done, info = env.step(action)
assert not done
assert obs.line_status[LINE_ID] == True
assert obs.topo_vect[obs.line_or_pos_topo_vect[LINE_ID]] == 1
assert obs.topo_vect[obs.line_ex_pos_topo_vect[LINE_ID]] == 2
# Disconnect or
action = env.action_space(
{
"set_bus": {
"lines_or_id": [(LINE_ID, -1)],
"lines_ex_id": [(LINE_ID, 0)],
}
}
)
obs, reward, done, info = env.step(action)
assert not done
assert obs.line_status[LINE_ID] == False
assert obs.topo_vect[obs.line_or_pos_topo_vect[LINE_ID]] == -1
assert obs.topo_vect[obs.line_ex_pos_topo_vect[LINE_ID]] == -1
# Reconnect or on bus 1
action = env.action_space(
{
"set_bus": {
"lines_or_id": [(LINE_ID, 1)],
"lines_ex_id": [(LINE_ID, 0)],
}
}
)
obs, reward, done, info = env.step(action)
assert not done
assert obs.line_status[LINE_ID] == True
assert obs.topo_vect[obs.line_or_pos_topo_vect[LINE_ID]] == 1
assert obs.topo_vect[obs.line_ex_pos_topo_vect[LINE_ID]] == 2
def test_issue_134_check_ambiguity(self):
self.skip_if_needed()
backend = self.make_backend()
type(backend)._clear_class_attribute()
param = Parameters()
param.MAX_LINE_STATUS_CHANGED = 9999
param.MAX_SUB_CHANGED = 99999
param.NB_TIMESTEP_COOLDOWN_LINE = 0
param.NB_TIMESTEP_COOLDOWN_SUB = 0
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = grid2op.make(
"rte_case14_realistic", test=True, backend=backend, param=param
)
LINE_ID = 2
# Reconnect or on bus 1 disconnect on bus ex -> this should be ambiguous
action = env.action_space(
{
"set_bus": {
"lines_or_id": [(LINE_ID, 1)],
"lines_ex_id": [(LINE_ID, -1)],
}
}
)
obs, reward, done, info = env.step(action)
assert info["is_ambiguous"] == True
def test_issue_134_withcooldown_forrules(self):
self.skip_if_needed()
backend = self.make_backend()
type(backend)._clear_class_attribute()
param = Parameters()
param.NB_TIMESTEP_COOLDOWN_LINE = 20
param.NB_TIMESTEP_COOLDOWN_SUB = 2
param.NO_OVERFLOW_DISCONNECTION = True
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = grid2op.make(
"rte_case14_realistic", test=True, backend=backend, param=param
)
LINE_ID = 2
# Disconnect ex -> this is an action on the powerline
for (or_, ex_) in [(0, -1), (-1, 0)]:
obs = env.reset()
action = env.action_space(
{
"set_bus": {
"lines_or_id": [(LINE_ID, or_)],
"lines_ex_id": [(LINE_ID, ex_)],
}
}
)
# i disconnect a powerline, i should not act on the substation but on the line LINE_ID
obs, reward, done, info = env.step(action)
assert np.all(obs.time_before_cooldown_sub == 0)
assert (
obs.time_before_cooldown_line[LINE_ID]
== param.NB_TIMESTEP_COOLDOWN_LINE
)
assert obs.line_status[LINE_ID] == False
assert obs.topo_vect[obs.line_or_pos_topo_vect[LINE_ID]] == -1
assert obs.topo_vect[obs.line_ex_pos_topo_vect[LINE_ID]] == -1
# i try to reconnect it, should not be possible whether i do it from
# setting a bus at one extremity or playing with the status
obs, *_ = env.step(env.action_space({"set_line_status": [(LINE_ID, 1)]}))
assert obs.line_status[LINE_ID] == False
assert np.all(obs.time_before_cooldown_sub == 0)
assert (
obs.time_before_cooldown_line[LINE_ID]
== param.NB_TIMESTEP_COOLDOWN_LINE - 1
)
obs, *_ = env.step(env.action_space({"change_line_status": [LINE_ID]}))
assert obs.line_status[LINE_ID] == False
assert np.all(obs.time_before_cooldown_sub == 0)
assert (
obs.time_before_cooldown_line[LINE_ID]
== param.NB_TIMESTEP_COOLDOWN_LINE - 2
)
obs, *_ = env.step(
env.action_space(
{
"set_bus": {
"lines_or_id": [(LINE_ID, 0)],
"lines_ex_id": [(LINE_ID, 1)],
}
}
)
)
assert obs.line_status[LINE_ID] == False
assert np.all(obs.time_before_cooldown_sub == 0)
assert (
obs.time_before_cooldown_line[LINE_ID]
== param.NB_TIMESTEP_COOLDOWN_LINE - 3
)
obs, *_ = env.step(
env.action_space(
{
"set_bus": {
"lines_or_id": [(LINE_ID, 1)],
"lines_ex_id": [(LINE_ID, 0)],
}
}
)
)
assert obs.line_status[LINE_ID] == False
assert np.all(obs.time_before_cooldown_sub == 0)
assert (
obs.time_before_cooldown_line[LINE_ID]
== param.NB_TIMESTEP_COOLDOWN_LINE - 4
)
# i wait enough for the cooldown to pass
for _ in range(param.NB_TIMESTEP_COOLDOWN_LINE - 4):
obs, *_ = env.step(env.action_space())
assert np.all(obs.time_before_cooldown_sub == 0)
# and now i try to reconnect, this should not affect the substation but the cooldown on the line
obs, *_ = env.step(
env.action_space(
{
"set_bus": {
"lines_or_id": [(LINE_ID, -2 * or_)],
"lines_ex_id": [(LINE_ID, -2 * ex_)],
}
}
)
)
assert obs.line_status[LINE_ID] == True
assert np.all(obs.time_before_cooldown_sub == 0)
assert (
obs.time_before_cooldown_line[LINE_ID]
== param.NB_TIMESTEP_COOLDOWN_LINE
)
# and now i try to modify the buses at one end of the powerline,
# this should affect the substation and NOT the line (so be possible)
obs, *_ = env.step(
env.action_space(
{
"set_bus": {
"lines_or_id": [(LINE_ID, -1 * or_)],
"lines_ex_id": [(LINE_ID, -1 * ex_)],
}
}
)
)
assert obs.line_status[LINE_ID] == True
if or_ != 0:
assert (
obs.time_before_cooldown_sub[obs.line_or_to_subid[LINE_ID]]
== param.NB_TIMESTEP_COOLDOWN_SUB
)
else:
assert (
obs.time_before_cooldown_sub[obs.line_ex_to_subid[LINE_ID]]
== param.NB_TIMESTEP_COOLDOWN_SUB
)
assert (
obs.time_before_cooldown_line[LINE_ID]
== param.NB_TIMESTEP_COOLDOWN_LINE - 1
)
def test_issue_copyenv(self):
# https://github.com/BDonnot/lightsim2grid/issues/10
backend = self.make_backend()
type(backend)._clear_class_attribute()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env1 = grid2op.make("rte_case14_realistic", test=True, backend=backend)
env2 = env1.copy()
obs1 = env1.reset()
obs2 = env2.get_obs()
assert np.any(obs1.prod_p != obs2.prod_p)
class BaseStatusActions(MakeBackend):
def _make_my_env(self):
backend = self.make_backend()
type(backend)._clear_class_attribute()
param = Parameters()
param.NB_TIMESTEP_COOLDOWN_LINE = 0
param.NB_TIMESTEP_COOLDOWN_SUB = 0
param.NO_OVERFLOW_DISCONNECTION = True
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = grid2op.make(
"rte_case14_realistic", test=True, backend=backend, param=param
)
return env
def _init_disco_or_not(self, LINE_ID, env, disco_before_the_action):
if not disco_before_the_action:
# powerline is supposed to be connected before the action takes place
statuses = env.get_obs().line_status
else:
# i disconnect it
action = env.action_space({"set_line_status": [(LINE_ID, -1)]})
obs, reward, done, info = env.step(action)
statuses = obs.line_status
return statuses
def _line_connected(self, LINE_ID, obs, busor=1):
assert obs.line_status[LINE_ID]
assert obs.topo_vect[obs.line_or_pos_topo_vect[LINE_ID]] == busor
assert obs.topo_vect[obs.line_ex_pos_topo_vect[LINE_ID]] == 1
def _line_disconnected(self, LINE_ID, obs):
assert not obs.line_status[LINE_ID]
assert obs.topo_vect[obs.line_or_pos_topo_vect[LINE_ID]] == -1
assert obs.topo_vect[obs.line_ex_pos_topo_vect[LINE_ID]] == -1
def _only_line_impacted(self, LINE_ID, action, statuses):
lines_impacted, subs_impacted = action.get_topological_impact(statuses)
assert np.sum(subs_impacted) == 0
assert np.sum(lines_impacted) == 1 and lines_impacted[LINE_ID]
def _only_sub_impacted(self, LINE_ID, action, statuses):
lines_impacted, subs_impacted = action.get_topological_impact(statuses)
assert (
np.sum(subs_impacted) == 1
and subs_impacted[action.line_or_to_subid[LINE_ID]]
)
assert np.sum(lines_impacted) == 0
def test_setmin1_prevConn(self):
"""{"set_line_status": [(LINE_ID, -1)]} when connected"""
self.skip_if_needed()
env = self._make_my_env()
LINE_ID = 1
# set the grid to right configuration
statuses = self._init_disco_or_not(LINE_ID, env, disco_before_the_action=False)
action = env.action_space({"set_line_status": [(LINE_ID, -1)]})
obs, reward, done, info = env.step(action)
# right consequences
self._line_disconnected(LINE_ID, obs)
# right way to count it
self._only_line_impacted(LINE_ID, action, statuses)
def test_set1_prevConn(self):
"""{"set_line_status": [(LINE_ID, +1)]} when connected"""
self.skip_if_needed()
env = self._make_my_env()
LINE_ID = 1
# set the grid to right configuration
statuses = self._init_disco_or_not(LINE_ID, env, disco_before_the_action=False)
action = env.action_space({"set_line_status": [(LINE_ID, +1)]})
obs, reward, done, info = env.step(action)
# right consequences
self._line_connected(LINE_ID, obs)
# right way to count it
self._only_line_impacted(LINE_ID, action, statuses)
def test_setmin1_prevDisc(self):
"""{"set_line_status": [(LINE_ID, -1)]} when disconnected"""
self.skip_if_needed()
env = self._make_my_env()
LINE_ID = 1
# set the grid to right configuration
statuses = self._init_disco_or_not(LINE_ID, env, disco_before_the_action=True)
# and now i test the impact of the action
action = env.action_space({"set_line_status": [(LINE_ID, -1)]})
obs, reward, done, info = env.step(action)
# right consequences
self._line_disconnected(LINE_ID, obs)
# right way to count it
self._only_line_impacted(LINE_ID, action, statuses)
def test_set1_prevDisc(self):
"""{"set_line_status": [(LINE_ID, +1)]} when disconnected"""
self.skip_if_needed()
env = self._make_my_env()
LINE_ID = 1
# set the grid to right configuration
statuses = self._init_disco_or_not(LINE_ID, env, disco_before_the_action=True)
# and now i test the impact of the action
action = env.action_space({"set_line_status": [(LINE_ID, +1)]})
obs, reward, done, info = env.step(action)
# right consequences
self._line_connected(LINE_ID, obs)
# right way to count it
self._only_line_impacted(LINE_ID, action, statuses)
def test_chgt_prevConn(self):
"""{"change_line_status": [LINE_ID]} when connected"""
self.skip_if_needed()
env = self._make_my_env()
LINE_ID = 1
# set the grid to right configuration
statuses = self._init_disco_or_not(LINE_ID, env, disco_before_the_action=False)
# and now i test the impact of the action
action = env.action_space({"change_line_status": [LINE_ID]})
obs, reward, done, info = env.step(action)
# right consequences
self._line_disconnected(LINE_ID, obs)
# right way to count it
self._only_line_impacted(LINE_ID, action, statuses)
def test_chgt_prevDisc(self):
"""{"change_line_status": [LINE_ID]} when disconnected"""
self.skip_if_needed()
env = self._make_my_env()
LINE_ID = 1
# set the grid to right configuration
statuses = self._init_disco_or_not(LINE_ID, env, disco_before_the_action=True)
# and now i test the impact of the action
action = env.action_space({"change_line_status": [LINE_ID]})
obs, reward, done, info = env.step(action)
# right consequences
self._line_connected(LINE_ID, obs)
# right way to count it
self._only_line_impacted(LINE_ID, action, statuses)
def test_setbusmin1_prevConn(self):
"""{"set_bus": {"lines_or_id": [(LINE_ID, -1)]}} when connected"""
self.skip_if_needed()
env = self._make_my_env()
LINE_ID = 1
# set the grid to right configuration
statuses = self._init_disco_or_not(LINE_ID, env, disco_before_the_action=False)
# and now i test the impact of the action
action = env.action_space({"set_bus": {"lines_or_id": [(LINE_ID, -1)]}})
obs, reward, done, info = env.step(action)
# right consequences
self._line_disconnected(LINE_ID, obs)
# right way to count it
self._only_line_impacted(LINE_ID, action, statuses)
def test_setbusmin1_prevDisc(self):
"""{"set_bus": {"lines_or_id": [(LINE_ID, -1)]}} when disco"""
self.skip_if_needed()
env = self._make_my_env()
LINE_ID = 1
# set the grid to right configuration
statuses = self._init_disco_or_not(LINE_ID, env, disco_before_the_action=True)
# and now i test the impact of the action
action = env.action_space({"set_bus": {"lines_or_id": [(LINE_ID, -1)]}})
obs, reward, done, info = env.step(action)
# right consequences
self._line_disconnected(LINE_ID, obs)
# right way to count it
self._only_sub_impacted(LINE_ID, action, statuses)
def test_setbus2_prevConn(self):
"""{"set_bus": {"lines_or_id": [(LINE_ID, 2)]}} when connected"""
self.skip_if_needed()
env = self._make_my_env()
LINE_ID = 1
# set the grid to right configuration
statuses = self._init_disco_or_not(LINE_ID, env, disco_before_the_action=False)
# and now i test the impact of the action
action = env.action_space({"set_bus": {"lines_or_id": [(LINE_ID, 2)]}})
obs, reward, done, info = env.step(action)
# right consequences
self._line_connected(LINE_ID, obs, busor=2)
# right way to count it
self._only_sub_impacted(LINE_ID, action, statuses)
def test_setbus2_prevDisc(self):
"""{"set_bus": {"lines_or_id": [(LINE_ID, 2)]}} when disconnected"""
self.skip_if_needed()
env = self._make_my_env()
LINE_ID = 1
# set the grid to right configuration
statuses = self._init_disco_or_not(LINE_ID, env, disco_before_the_action=True)
# and now i test the impact of the action
action = env.action_space({"set_bus": {"lines_or_id": [(LINE_ID, 2)]}})
obs, reward, done, info = env.step(action)
# right consequences
self._line_connected(LINE_ID, obs, busor=2)
# right way to count it
self._only_line_impacted(LINE_ID, action, statuses)
def test_chgtbus_prevConn(self):
"""{"change_bus": {"lines_or_id": [LINE_ID]}} when connected"""
self.skip_if_needed()
env = self._make_my_env()
LINE_ID = 1
# set the grid to right configuration
statuses = self._init_disco_or_not(LINE_ID, env, disco_before_the_action=False)
# and now i test the impact of the action
action = env.action_space({"change_bus": {"lines_or_id": [LINE_ID]}})
obs, reward, done, info = env.step(action)
# right consequences
self._line_connected(LINE_ID, obs, busor=2)
# right way to count it
self._only_sub_impacted(LINE_ID, action, statuses)
def test_chgtbus_prevDisc(self):
"""{"change_bus": {"lines_or_id": [LINE_ID]}} when discconnected"""
self.skip_if_needed()
env = self._make_my_env()
LINE_ID = 1
# set the grid to right configuration
statuses = self._init_disco_or_not(LINE_ID, env, disco_before_the_action=True)
# and now i test the impact of the action
action = env.action_space({"change_bus": {"lines_or_id": [LINE_ID]}})
obs, reward, done, info = env.step(action)
# right consequences
self._line_disconnected(LINE_ID, obs)
# right way to count it
self._only_sub_impacted(LINE_ID, action, statuses)
| 123,207 | 37.635309 | 142 | py |
Grid2Op | Grid2Op-master/grid2op/tests/BaseRedispTest.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
import pdb
import warnings
from grid2op.tests.helper_path_test import *
from grid2op.Exceptions import *
from grid2op.Environment import Environment
from grid2op.Parameters import Parameters
from grid2op.Chronics import ChronicsHandler, GridStateFromFile, ChangeNothing
from grid2op.MakeEnv import make
from grid2op.Action import BaseAction
from grid2op.tests.BaseBackendTest import MakeBackend
class BaseTestRedispatch(MakeBackend):
def setUp(self):
# powergrid
self.backend = self.make_backend()
self.path_matpower = self.get_path()
self.case_file = self.get_casefile()
# chronics
self.path_chron = os.path.join(PATH_CHRONICS, "chronics")
self.chronics_handler = ChronicsHandler(
chronicsClass=GridStateFromFile, path=self.path_chron
)
self.id_chron_to_back_load = np.array([0, 1, 10, 2, 3, 4, 5, 6, 7, 8, 9])
# force the verbose backend
self.backend.detailed_infos_for_cascading_failures = True
self.names_chronics_to_backend = {
"loads": {
"2_C-10.61": "load_1_0",
"3_C151.15": "load_2_1",
"14_C63.6": "load_13_2",
"4_C-9.47": "load_3_3",
"5_C201.84": "load_4_4",
"6_C-6.27": "load_5_5",
"9_C130.49": "load_8_6",
"10_C228.66": "load_9_7",
"11_C-138.89": "load_10_8",
"12_C-27.88": "load_11_9",
"13_C-13.33": "load_12_10",
},
"lines": {
"1_2_1": "0_1_0",
"1_5_2": "0_4_1",
"9_10_16": "8_9_2",
"9_14_17": "8_13_3",
"10_11_18": "9_10_4",
"12_13_19": "11_12_5",
"13_14_20": "12_13_6",
"2_3_3": "1_2_7",
"2_4_4": "1_3_8",
"2_5_5": "1_4_9",
"3_4_6": "2_3_10",
"4_5_7": "3_4_11",
"6_11_11": "5_10_12",
"6_12_12": "5_11_13",
"6_13_13": "5_12_14",
"4_7_8": "3_6_15",
"4_9_9": "3_8_16",
"5_6_10": "4_5_17",
"7_8_14": "6_7_18",
"7_9_15": "6_8_19",
},
"prods": {
"1_G137.1": "gen_0_4",
"3_G36.31": "gen_2_1",
"6_G63.29": "gen_5_2",
"2_G-56.47": "gen_1_0",
"8_G40.43": "gen_7_3",
},
}
# _parameters for the environment
self.env_params = Parameters()
self.env_params.ALLOW_DISPATCH_GEN_SWITCH_OFF = False
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = Environment(
init_grid_path=os.path.join(self.path_matpower, self.case_file),
backend=self.backend,
init_env_path=self.path_matpower,
chronics_handler=self.chronics_handler,
parameters=self.env_params,
names_chronics_to_backend=self.names_chronics_to_backend,
actionClass=BaseAction,
name="test_redisp_env1",
)
self.array_double_dispatch = np.array([0.0, 10.0, 20.0, 0.0, -30.0])
# self.array_double_dispatch = np.array([0., 11.208119, 12.846733, 0., -24.054852])
self.tol_one = self.env._tol_poly
def tearDown(self):
self.env.close()
def test_negative_dispatch(self):
self.skip_if_needed()
act = self.env.action_space({"redispatch": [(1, -10)]})
obs, reward, done, info = self.env.step(act)
assert np.all(obs.prod_p - self.env.gen_pmin >= -self.tol_one)
assert np.all(obs.prod_p <= self.env.gen_pmax + self.tol_one)
assert np.abs(np.sum(obs.actual_dispatch)) <= self.tol_one
def test_no_impact_env(self):
# perform a valid redispatching action
self.skip_if_needed()
obs_init = self.env.reset() # reset the environment
act = self.env.action_space()
for i in range(
1
): # number cherry picked to introduce explain the behaviour in the cells bellow
obsinit, rewardinit, doneinit, infoinit = self.env.step(
self.env.action_space()
)
ref_data = copy.deepcopy(obsinit.prod_p)
act = self.env.action_space({"redispatch": [(0, -10)]})
# act = env.action_space({"redispatch": [(4,0)]})
obs, reward, done, info = self.env.step(act)
assert self.compare_vect(obsinit.prod_p, ref_data)
target_val = obs.prod_p + self.env._actual_dispatch
assert self.compare_vect(
obs.prod_p[:-1], target_val[:-1]
) # I remove last component which is the slack bus
assert np.all(obs.prod_p - self.env.gen_pmin >= -self.tol_one)
assert np.all(target_val <= self.env.gen_pmax + self.tol_one)
assert np.all(obs.prod_p - obsinit.prod_p <= self.env.gen_max_ramp_up)
assert np.all(obsinit.prod_p - obs.prod_p <= self.env.gen_max_ramp_down)
def test_basic_redispatch_act(self):
# test of the implementation of a simple case redispatching on one generator, bellow ramp min and ramp max
self.skip_if_needed()
act = self.env.action_space({"redispatch": (2, 5)})
obs, reward, done, info = self.env.step(act)
assert np.abs(np.sum(self.env._actual_dispatch)) <= self.tol_one
th_dispatch = np.array([0.0, -2.5, 5.0, 0.0, -2.5])
th_dispatch = np.array([0.0, -1.4814819, 5.0, 0.0, -3.518518])
assert self.compare_vect(self.env._actual_dispatch, th_dispatch)
target_val = (
self.chronics_handler.real_data.prod_p[1, :] + self.env._actual_dispatch
)
assert self.compare_vect(
obs.prod_p[:-1], target_val[:-1]
) # I remove last component which is the slack bus
assert np.all(obs.prod_p - self.env.gen_pmin >= -self.tol_one)
assert np.all(target_val <= self.env.gen_pmax + self.tol_one)
# check that the redispatching is apply in the right direction
indx_ok = self.env._target_dispatch != 0.0
assert np.all(
np.sign(self.env._actual_dispatch[indx_ok])
== np.sign(self.env._target_dispatch[indx_ok])
)
def test_redispatch_act_above_pmax(self):
# in this test, the asked redispatching for generator 2 would make it above pmax, so the environment
# need to "cut" it automatically, without invalidating the action
self.skip_if_needed()
act = self.env.action_space({"redispatch": (2, 60)})
obs, reward, done, info = self.env.step(act)
assert np.abs(np.sum(self.env._actual_dispatch)) <= self.tol_one
th_dispatch = np.array([0.0, -23.2999, 50.899902, 0.0, -27.600002])
th_dispatch = np.array([0.0, -20.0, 40.0, 0.0, -20.0])
th_dispatch = np.array([0.0, -13.227808, 50.90005, 0.0, -37.67224])
assert self.compare_vect(self.env._actual_dispatch, th_dispatch)
target_val = (
self.chronics_handler.real_data.prod_p[1, :] + self.env._actual_dispatch
)
assert self.compare_vect(
obs.prod_p[:-1], target_val[:-1]
) # I remove last component which is the slack bus
assert np.all(obs.prod_p - self.env.gen_pmin >= -self.tol_one)
assert np.all(target_val <= self.env.gen_pmax + self.tol_one)
def test_two_redispatch_act(self):
self.skip_if_needed()
act = self.env.action_space({"redispatch": (2, 20)})
obs_first, reward, done, info = self.env.step(act)
act = self.env.action_space({"redispatch": (1, 10)})
obs, reward, done, info = self.env.step(act)
th_dispatch = np.array([0.0, 10, 20.0, 0.0, 0.0])
th_dispatch[1] += obs_first.actual_dispatch[1]
assert self.compare_vect(self.env._target_dispatch, th_dispatch)
# check that the redispatching is apply in the right direction
indx_ok = self.env._target_dispatch != 0.0
assert np.all(
np.sign(self.env._actual_dispatch[indx_ok])
== np.sign(self.env._target_dispatch[indx_ok])
)
th_dispatch = np.array([0.0, 10.0, 20.0, 0.0, -30.0])
th_dispatch = np.array([0.0, 4.0765514, 20.004545, 0.0, -24.081097])
th_dispatch = np.array([0., 4.0710216, 20.015802, 0., -24.086824])
assert self.compare_vect(self.env._actual_dispatch, th_dispatch)
target_val = (
self.chronics_handler.real_data.prod_p[2, :] + self.env._actual_dispatch
)
assert self.compare_vect(
obs.prod_p[:-1], target_val[:-1]
) # I remove last component which is the slack bus
assert np.abs(np.sum(self.env._actual_dispatch)) <= self.tol_one
assert np.all(target_val <= self.env.gen_pmax + self.tol_one)
assert np.all(obs.prod_p - self.env.gen_pmin >= -self.tol_one)
def test_redispacth_two_gen(self):
self.skip_if_needed()
act = self.env.action_space({"redispatch": [(2, 20), (1, 10)]})
obs, reward, done, info = self.env.step(act)
assert not done
th_dispatch = np.array([0.0, 10, 20.0, 0.0, 0.0])
assert self.compare_vect(self.env._target_dispatch, th_dispatch)
assert self.compare_vect(self.env._actual_dispatch, self.array_double_dispatch)
# check that the redispatching is apply in the right direction
indx_ok = self.env._target_dispatch != 0.0
assert np.all(
np.sign(self.env._actual_dispatch[indx_ok])
== np.sign(self.env._target_dispatch[indx_ok])
)
assert np.all(obs.prod_p <= self.env.gen_pmax + self.tol_one)
assert np.all(obs.prod_p - self.env.gen_pmin >= -self.tol_one)
def test_redispacth_all_gen(self):
# this should be exactly the same as the previous one
self.skip_if_needed()
act = self.env.action_space({"redispatch": [(2, 20.0), (1, 10.0), (4, -30.0)]})
obs, reward, done, info = self.env.step(act)
th_dispatch = np.array([0.0, 10, 20.0, 0.0, -30.0])
assert self.compare_vect(self.env._target_dispatch, th_dispatch)
assert self.compare_vect(self.env._actual_dispatch, self.array_double_dispatch)
# check that the redispatching is apply in the right direction
indx_ok = self.env._target_dispatch != 0.0
assert np.all(
np.sign(self.env._actual_dispatch[indx_ok])
== np.sign(self.env._target_dispatch[indx_ok])
)
assert np.all(obs.prod_p <= self.env.gen_pmax + self.tol_one)
assert np.all(obs.prod_p - self.env.gen_pmin >= -self.tol_one)
def test_count_turned_on(self):
self.skip_if_needed()
act = self.env.action_space()
# recoded it: it's the normal behavior to call "env.reset()" to get the first time step
obs = self.env.reset()
assert np.all(self.env._gen_uptime == np.array([0, 1, 1, 0, 1]))
assert np.all(self.env._gen_downtime == np.array([1, 0, 0, 1, 0]))
assert np.all(obs.prod_p <= self.env.gen_pmax + self.tol_one)
assert np.all(obs.prod_p - self.env.gen_pmin >= -self.tol_one)
obs, reward, done, info = self.env.step(act)
assert np.all(self.env._gen_uptime == np.array([0, 2, 2, 0, 2]))
assert np.all(self.env._gen_downtime == np.array([2, 0, 0, 2, 0]))
assert np.all(obs.prod_p <= self.env.gen_pmax + self.tol_one)
assert np.all(obs.prod_p - self.env.gen_pmin >= -self.tol_one)
for i in range(64):
obs, reward, done, info = self.env.step(act)
assert np.all(obs.prod_p <= self.env.gen_pmax + self.tol_one)
assert np.all(obs.prod_p - self.env.gen_pmin >= -self.tol_one)
obs, reward, done, info = self.env.step(act)
assert np.all(self.env._gen_uptime == np.array([0, 67, 67, 1, 67]))
assert np.all(self.env._gen_downtime == np.array([67, 0, 0, 0, 0]))
assert np.all(obs.prod_p <= self.env.gen_pmax + self.tol_one)
assert np.all(obs.prod_p - self.env.gen_pmin >= -self.tol_one)
obs, reward, done, info = self.env.step(act)
assert np.all(self.env._gen_uptime == np.array([1, 68, 68, 2, 68]))
assert np.all(self.env._gen_downtime == np.array([0, 0, 0, 0, 0]))
assert np.all(obs.prod_p <= self.env.gen_pmax + self.tol_one)
assert np.all(obs.prod_p - self.env.gen_pmin >= -self.tol_one)
def test_redispacth_twice_same(self):
self.skip_if_needed()
# this should be exactly the same as the previous one
act = self.env.action_space({"redispatch": [(2, 5.0)]})
obs, reward, done, info = self.env.step(act)
assert np.all(obs.target_dispatch == np.array([0.0, 0.0, 5.0, 0.0, 0.0]))
assert np.abs(np.sum(obs.actual_dispatch)) <= self.tol_one
th_disp = np.array([0.0, -2.5, 5.0, 0.0, -2.5])
th_disp = np.array([0.0, -1.4814819, 5.0, 0.0, -3.518518])
assert self.compare_vect(obs.actual_dispatch, th_disp)
assert np.all(obs.prod_p <= self.env.gen_pmax + self.tol_one)
assert np.all(obs.prod_p - self.env.gen_pmin >= -self.tol_one)
act = self.env.action_space({"redispatch": [(2, 5.0)]})
obs, reward, done, info = self.env.step(act)
assert np.all(obs.target_dispatch == np.array([0.0, 0.0, 10.0, 0.0, 0.0]))
assert np.abs(np.sum(obs.actual_dispatch)) <= self.tol_one
th_disp = np.array([0.0, -5.0, 10.0, 0.0, -5.0])
th_disp = np.array([0., -2.9629638, 10., 0., -7.037036 ])
assert self.compare_vect(obs.actual_dispatch, th_disp)
assert np.all(obs.prod_p <= self.env.gen_pmax + self.tol_one)
assert np.all(obs.prod_p - self.env.gen_pmin >= -self.tol_one)
def test_redispacth_secondabovepmax(self):
self.skip_if_needed()
act = self.env.action_space({"redispatch": [(2, 20.0)]})
obs0, reward, done, info = self.env.step(act)
assert np.all(obs0.target_dispatch == np.array([0.0, 0.0, 20.0, 0.0, 0.0]))
assert np.abs(np.sum(obs0.actual_dispatch)) <= self.tol_one
th_disp = np.array([0.0, -10.0, 20.0, 0.0, -10.0])
th_disp = np.array([0.0, -5.9259276, 20.0, 0.0, -14.074072])
assert self.compare_vect(obs0.actual_dispatch, th_disp)
assert np.all(obs0.prod_p <= self.env.gen_pmax + self.tol_one)
assert np.all(obs0.prod_p >= self.env.gen_pmin - self.tol_one)
act = self.env.action_space({"redispatch": [(2, 40.0)]})
obs, reward, done, info = self.env.step(act)
assert not info["is_dispatching_illegal"]
assert np.all(obs.target_dispatch == np.array([0.0, 0.0, 60.0, 0.0, 0.0]))
th_disp = np.array([0.0, -23.5, 50.4, 0.0, -26.900002])
th_disp = np.array([0., -12.977809, 50.40005, 0., -37.42224 ])
assert self.compare_vect(obs.actual_dispatch, th_disp)
assert np.all(obs.prod_p[:-1] <= self.env.gen_pmax[:-1] + self.tol_one)
assert np.all(obs.prod_p[:-1] >= self.env.gen_pmin[:-1] - self.tol_one)
assert np.all(
obs.prod_p[:-1] - obs0.prod_p[:-1] >= -self.env.gen_max_ramp_down[:-1]
)
assert np.all(
obs.prod_p[:-1] - obs0.prod_p[:-1] <= self.env.gen_max_ramp_up[:-1]
)
def test_redispacth_non_dispatchable_generator(self):
"""Dispatch a non redispatchable generator is ambiguous"""
self.skip_if_needed()
act = self.env.action_space()
obs, reward, done, info = self.env.step(act)
# Check that generator 0 isn't redispatchable
assert self.env.gen_redispatchable[0] == False
# Check that generator 0 is off
assert self.env._gen_downtime[0] >= 1
# Try to redispatch
redispatch_act = self.env.action_space({"redispatch": [(0, 5.0)]})
obs, reward, done, info = self.env.step(redispatch_act)
assert info["is_ambiguous"]
class BaseTestRedispatchChangeNothingEnvironment(MakeBackend):
def setUp(self):
# powergrid
self.backend = self.make_backend()
self.path_matpower = self.get_path()
self.case_file = self.get_casefile()
# chronics
self.path_chron = os.path.join(PATH_CHRONICS, "chronics")
self.chronics_handler = ChronicsHandler(chronicsClass=ChangeNothing)
self.id_chron_to_back_load = np.array([0, 1, 10, 2, 3, 4, 5, 6, 7, 8, 9])
# force the verbose backend
self.backend.detailed_infos_for_cascading_failures = True
self.names_chronics_to_backend = {
"loads": {
"2_C-10.61": "load_1_0",
"3_C151.15": "load_2_1",
"14_C63.6": "load_13_2",
"4_C-9.47": "load_3_3",
"5_C201.84": "load_4_4",
"6_C-6.27": "load_5_5",
"9_C130.49": "load_8_6",
"10_C228.66": "load_9_7",
"11_C-138.89": "load_10_8",
"12_C-27.88": "load_11_9",
"13_C-13.33": "load_12_10",
},
"lines": {
"1_2_1": "0_1_0",
"1_5_2": "0_4_1",
"9_10_16": "8_9_2",
"9_14_17": "8_13_3",
"10_11_18": "9_10_4",
"12_13_19": "11_12_5",
"13_14_20": "12_13_6",
"2_3_3": "1_2_7",
"2_4_4": "1_3_8",
"2_5_5": "1_4_9",
"3_4_6": "2_3_10",
"4_5_7": "3_4_11",
"6_11_11": "5_10_12",
"6_12_12": "5_11_13",
"6_13_13": "5_12_14",
"4_7_8": "3_6_15",
"4_9_9": "3_8_16",
"5_6_10": "4_5_17",
"7_8_14": "6_7_18",
"7_9_15": "6_8_19",
},
"prods": {
"1_G137.1": "gen_0_4",
"3_G36.31": "gen_2_1",
"6_G63.29": "gen_5_2",
"2_G-56.47": "gen_1_0",
"8_G40.43": "gen_7_3",
},
}
# _parameters for the environment
self.env_params = Parameters()
self.env_params.ALLOW_DISPATCH_GEN_SWITCH_OFF = False
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = Environment(
init_grid_path=os.path.join(self.path_matpower, self.case_file),
backend=self.backend,
init_env_path=self.path_matpower,
chronics_handler=self.chronics_handler,
parameters=self.env_params,
names_chronics_to_backend=self.names_chronics_to_backend,
actionClass=BaseAction,
name="test_redisp_env2",
)
self.tol_one = self.env._tol_poly
def tearDown(self):
self.env.close()
def test_redispatch_generator_off(self):
"""Redispatch a turned off generator is illegal"""
self.skip_if_needed()
# Step into simulation once
nothing_act = self.env.action_space()
obs, reward, done, info = self.env.step(nothing_act)
# Check that generator 1 is redispatchable
assert self.env.gen_redispatchable[1] == True
# Check that generator 1 is off
assert obs.prod_p[1] == 0
assert self.env._gen_downtime[1] >= 1
# Try to redispatch generator 1
redispatch_act = self.env.action_space({"redispatch": [(1, 5.0)]})
obs, reward, done, info = self.env.step(redispatch_act)
assert info["is_dispatching_illegal"] == True
class BaseTestRedispTooLowHigh(MakeBackend):
# test bug reported in issues https://github.com/rte-france/Grid2Op/issues/44
def setUp(self) -> None:
backend = self.make_backend()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = make("rte_case14_redisp", test=True, backend=backend)
# i don't want to be bother by ramps in these test (note that is NOT recommended to change that)
type(self.env).gen_max_ramp_down[:] = 5000
type(self.env).gen_max_ramp_up[:] = 5000
act_cls = type(self.env.action_space())
act_cls.gen_max_ramp_down[:] = 5000
act_cls.gen_max_ramp_up[:] = 5000
self.msg_ = (
'Grid2OpException AmbiguousAction InvalidRedispatching NotEnoughGenerators "Attempt to use a '
"redispatch action that does not sum to 0., but a"
)
self.tol_one = self.env._tol_poly
def tearDown(self):
self.env.close()
def test_redisp_toohigh_toolow(self):
"""
This test that: 1) if i do a valid redispatching, it's valid
2) if i set up a redispatching too high (higher than pmax - pmin for a generator) it's not valid
3) if i set up a redispatching too low (demanding to decrease more than pmax - pmin) it's not valid
:return:
"""
self.skip_if_needed()
# this dispatch (though legal) broke everything
act = self.env.action_space({"redispatch": (0, -1)})
obs, reward, done, info = self.env.step(act)
assert not done
assert not info["is_dispatching_illegal"]
assert np.all(self.env._target_dispatch == [-1.0, 0.0, 0.0, 0.0, 0.0])
act = self.env.action_space({"redispatch": (0, 0)})
obs, reward, done, info = self.env.step(act)
assert not done
assert not info["is_dispatching_illegal"]
assert np.all(self.env._target_dispatch == [-1.0, 0.0, 0.0, 0.0, 0.0])
# this one is not correct: too high decrease
act = self.env.action_space(
{"redispatch": (0, self.env.gen_pmin[0] - self.env.gen_pmax[0])}
)
obs, reward, done, info = self.env.step(act)
assert not done
assert info["is_dispatching_illegal"]
assert np.all(self.env._target_dispatch == [-1.0, 0.0, 0.0, 0.0, 0.0])
# this one is not correct: too high increase
act = self.env.action_space(
{"redispatch": (0, self.env.gen_pmax[0] - self.env.gen_pmin[0] + 2)}
)
obs, reward, done, info = self.env.step(act)
assert not done
assert info["is_dispatching_illegal"]
assert np.all(self.env._target_dispatch == [-1.0, 0.0, 0.0, 0.0, 0.0])
def test_error_message_notzerosum_oneshot(self):
self.skipTest("Ok with new redispatching implementation")
act = self.env.action_space(
{
"redispatch": [
(0, 4.9999784936326535),
(1, 4.78524395611872),
(4, -9.999591852954794),
]
}
)
obs, reward, done, info = self.env.step(act)
assert info["is_dispatching_illegal"]
assert info["exception"][0].__str__()[:140] == self.msg_
def test_error_message_notzerosum_threesteps(self):
self.skipTest("Ok with new redispatching implementation")
act = self.env.action_space({"redispatch": [(0, 4.9999784936326535)]})
obs, reward, done, info = self.env.step(act)
assert info["is_dispatching_illegal"] is False
act = self.env.action_space({"redispatch": [(1, 4.78524395611872)]})
obs, reward, done, info = self.env.step(act)
assert info["is_dispatching_illegal"] is False
act = self.env.action_space({"redispatch": [(4, -9.999591852954794)]})
obs, reward, done, info = self.env.step(act)
assert info["is_dispatching_illegal"]
assert info["exception"][0].__str__()[:140] == self.msg_
class BaseTestDispatchRampingIllegalETC(MakeBackend):
def setUp(self):
# powergrid
backend = self.make_backend()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = make("rte_case14_test", test=True, backend=backend)
self.tol_one = self.env._tol_poly
def tearDown(self):
self.env.close()
def test_invalid_dispatch(self):
self.skip_if_needed()
self.env.set_id(0) # make sure to use the same environment input data.
obs_init = self.env.reset() # reset the environment
act = self.env.action_space()
for i in range(
2
): # number cherry picked to introduce explain the behaviour in the cells bellow
obsinit, rewardinit, doneinit, infoinit = self.env.step(act)
act = self.env.action_space({"redispatch": [(0, -10)]})
obs, reward, done, info = self.env.step(act)
assert len(info["exception"])
def test_redispatch_rampminmax(self):
self.skip_if_needed()
# test that the redispatch value is always above the ramp min and below the ramp max
self.env.set_id(0) # make sure to use the same environment input data.
obs_init = self.env.reset() # reset the environment
act = self.env.action_space()
for i in range(
2
): # number cherry picked to introduce explain the behaviour in the cells bellow
obsinit, rewardinit, doneinit, infoinit = self.env.step(act)
act = self.env.action_space({"redispatch": [(0, -5)]})
# act = env.action_space({"redispatch": [(4,0)]})
obs, reward, done, info = self.env.step(act)
target_p = self.env.chronics_handler.real_data.data.prod_p[3, :]
target_p_t = self.env.chronics_handler.real_data.data.prod_p[2, :]
assert self.compare_vect(obsinit.prod_p[:-1], target_p_t[:-1])
# only look at dispatchable generator, remove slack bus (last generator)
assert np.all(
obs.prod_p[0:2] - obsinit.prod_p[0:2]
<= obs.gen_max_ramp_up[0:2] + self.tol_one
)
assert np.all(
obs.prod_p[0:2] - obsinit.prod_p[0:2]
>= -obs.gen_max_ramp_down[0:2] - self.tol_one
)
assert np.all(obs.prod_p[0:2] >= obs.gen_pmin[0:2] - self.tol_one)
assert np.all(obs.prod_p[0:2] <= obs.gen_pmax[0:2] + self.tol_one)
def test_redispatch_noneedtocurtaildispact(self):
self.skip_if_needed()
# test that the redispatch value is always above the ramp min and below the ramp max
self.env.set_id(0) # make sure to use the same environment input data.
obs_init = self.env.reset() # reset the environment
act = self.env.action_space()
for i in range(
2
): # number cherry picked to introduce explain the behaviour in the cells bellow
obsinit, rewardinit, doneinit, infoinit = self.env.step(act)
assert len(infoinit["exception"]) == 0
act = self.env.action_space({"redispatch": [(0, +5)]})
obs, reward, done, info = self.env.step(act)
assert not done
assert np.all(self.env._target_dispatch == [5.0, 0.0, 0.0, 0.0, 0.0])
target_p = self.env.chronics_handler.real_data.data.prod_p[3, :]
target_p_t = self.env.chronics_handler.real_data.data.prod_p[2, :]
assert self.compare_vect(obsinit.prod_p[:-1], target_p_t[:-1])
# only look at dispatchable generator, remove slack bus (last generator)
assert np.all(
obs.prod_p[0:2] - obsinit.prod_p[0:2]
<= obs.gen_max_ramp_up[0:2] + self.tol_one
)
assert np.all(
obs.prod_p[0:2] - obsinit.prod_p[0:2]
>= -obs.gen_max_ramp_down[0:2] - self.tol_one
)
assert np.all(
np.abs(self.env._actual_dispatch - np.array([5.0, -2.5, 0.0, 0.0, -2.5]))
<= self.tol_one
)
def test_sum0_again(self):
# perform a valid redispatching action
self.skip_if_needed()
self.env.set_id(0) # make sure to use the same environment input data.
obs_init = self.env.reset() # reset the environment
act = self.env.action_space({"redispatch": [(0, +10)]})
obs, reward, done, info = self.env.step(act)
assert np.abs(np.sum(obs.actual_dispatch)) <= self.tol_one
indx_ok = self.env._target_dispatch != 0.0
assert np.all(
np.sign(self.env._actual_dispatch[indx_ok])
== np.sign(self.env._target_dispatch[indx_ok])
)
def test_sum0_again2(self):
self.skip_if_needed()
env = self.env
# perform a valid redispatching action
env.set_id(0) # make sure to use the same environment input data.
obs_init = env.reset() # reset the environment
act = env.action_space()
act = env.action_space({"redispatch": [(0, +5)]})
obs, reward, done, info = env.step(act)
assert np.abs(np.sum(obs.actual_dispatch)) <= self.tol_one
indx_ok = self.env._target_dispatch != 0.0
assert np.all(
np.sign(self.env._actual_dispatch[indx_ok])
== np.sign(self.env._target_dispatch[indx_ok])
)
donothing = env.action_space()
obsinit, reward, done, info = env.step(donothing)
act = env.action_space({"redispatch": [(0, -5)]})
# act = env.action_space({"redispatch": [(0,0)]})
obs, reward, done, info = env.step(act)
assert np.all(
obs.prod_p[0:2] - obsinit.prod_p[0:2]
<= obs.gen_max_ramp_up[0:2] + self.tol_one
)
assert np.all(
obs.prod_p[0:2] - obsinit.prod_p[0:2]
>= -obs.gen_max_ramp_down[0:2] - self.tol_one
)
assert np.abs(np.sum(obs.actual_dispatch)) <= self.tol_one
def test_sum0_again3(self):
self.skip_if_needed()
env = self.env
# perform a valid redispatching action
env.set_id(0) # make sure to use the same environment input data.
obs_init = env.reset() # reset the environment
act = env.action_space()
# ask +5
act = env.action_space({"redispatch": [(0, +5)]})
obs, reward, done, info = env.step(act)
assert np.abs(np.sum(obs.actual_dispatch)) <= self.tol_one
indx_ok = self.env._target_dispatch != 0.0
assert np.all(
np.sign(self.env._actual_dispatch[indx_ok])
== np.sign(self.env._target_dispatch[indx_ok])
)
assert np.all(
obs.prod_p[0:2] - obs_init.prod_p[0:2]
<= obs.gen_max_ramp_up[0:2] + self.tol_one
)
assert np.all(
obs.prod_p[0:2] - obs_init.prod_p[0:2]
>= -obs.gen_max_ramp_down[0:2] - self.tol_one
)
assert np.all(
np.abs(obs.actual_dispatch - np.array([5.0, -2.5, 0.0, 0.0, -2.5]))
<= self.tol_one
)
assert len(info["exception"]) == 0
# wait for the setpoint to be reached
donothing = env.action_space()
obsinit, reward, done, info = env.step(donothing)
assert np.all(
np.abs(obs.actual_dispatch - np.array([5.0, -2.5, 0.0, 0.0, -2.5]))
<= self.tol_one
)
assert len(info["exception"]) == 0
# "cancel" action
act = env.action_space({"redispatch": [(0, -5)]})
obs, reward, done, info = env.step(act)
assert not done
assert np.all(
obs.prod_p[0:2] - obsinit.prod_p[0:2]
<= obs.gen_max_ramp_up[0:2] + self.tol_one
)
assert np.all(
obs.prod_p[0:2] - obsinit.prod_p[0:2]
>= -obs.gen_max_ramp_down[0:2] - self.tol_one
)
assert np.abs(np.sum(obs.actual_dispatch)) <= self.tol_one
assert len(info["exception"]) == 0
# wait for setpoint to be reached
obsfinal, reward, done, info = env.step(donothing)
assert not done
assert np.all(
obsfinal.prod_p[0:2] - obs.prod_p[0:2]
<= obs.gen_max_ramp_up[0:2] + self.tol_one
)
assert np.all(
obsfinal.prod_p[0:2] - obs.prod_p[0:2]
>= -obs.gen_max_ramp_down[0:2] - self.tol_one
)
assert (
np.abs(np.sum(obsfinal.actual_dispatch)) <= self.tol_one
) # redispatching should sum at 0.
assert (
np.sum(np.abs(obsfinal.actual_dispatch)) <= self.tol_one
) # redispatching should be canceled by now
assert len(info["exception"]) == 0
def test_dispatch_still_not_zero(self):
self.skip_if_needed()
env = self.env
max_iter = 40
# agent = GreedyEconomic(env.action_space)
done = False
# reward = env.reward_range[0]
env.set_id(0) # reset the env to the same id
obs_init = env.reset()
i = 0
act = env.action_space({"redispatch": [(0, obs_init.gen_max_ramp_up[0])]})
while not done:
obs, reward, done, info = env.step(act)
# print("act._redisp {}".format(act._redispatch))
assert not done, "game over at iteration {}".format(i)
assert len(info["exception"]) == 0, "error at iteration {}".format(i)
# NB: only gen 0 and 1 are included because gen 2,3 are renewables and gen 4 is slack bus
assert np.all(
obs.prod_p[0:2] - obs_init.prod_p[0:2]
<= obs.gen_max_ramp_up[0:2] + self.tol_one
), "above max_ramp for ts {}".format(i)
assert np.all(
obs.prod_p[0:2] - obs_init.prod_p[0:2]
>= -obs.gen_max_ramp_down[0:2] - self.tol_one
), "below min_ramp for ts {}".format(i)
try:
assert np.all(
obs.prod_p[0:2] <= obs.gen_pmax[0:2]
), "above pmax for ts {}".format(i)
except:
pass
assert np.all(
obs.prod_p[0:2] >= -obs.gen_pmin[0:2]
), "below pmin for ts {}".format(i)
assert np.abs(np.sum(obs.actual_dispatch)) <= self.tol_one
i += 1
obs_init = obs
if i >= max_iter:
break
obs, reward, done, info = env.step(act)
assert np.all(
obs.prod_p[0:2] - obs_init.prod_p[0:2]
<= obs.gen_max_ramp_up[0:2] + self.tol_one
), "above max_ramp at the end"
assert np.all(
obs.prod_p[0:2] - obs_init.prod_p[0:2]
>= -obs.gen_max_ramp_down[0:2] - self.tol_one
), "above min_ramp at the end"
assert np.all(
obs.prod_p[0:2] <= obs.gen_pmax[0:2] + self.tol_one
), "above pmax at the end"
assert np.all(
obs.prod_p[0:2] >= -obs.gen_pmin[0:2] - self.tol_one
), "below pmin at the end"
assert (
np.abs(np.sum(obs.actual_dispatch)) <= self.tol_one
), "redisp not 0 at the end"
# this redispatching is impossible because we ask to increase the value of the generator of 210
# which is higher than pmax
assert len(info["exception"]), "this redispatching should not be possible"
class BaseTestLoadingAcceptAlmostZeroSumRedisp(MakeBackend):
def setUp(self):
# powergrid
backend = self.make_backend()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = make("rte_case14_test", test=True, backend=backend)
self.tol_one = self.env._tol_poly
def tearDown(self):
self.env.close()
def test_accept_almost_zerozum_too_high(self):
self.skip_if_needed()
self.skipTest("it is possible now to accept pretty much everything")
redisp_act = self.env.action_space(
{"redispatch": [(0, 3), (1, -1), (-1, -2 + 1e-7)]}
)
obs, reward, done, info = self.env.step(redisp_act)
assert np.all(obs.prod_p[0:2] <= obs.gen_pmax[0:2])
assert np.all(obs.prod_p[0:2] >= -obs.gen_pmin[0:2])
assert np.abs(np.sum(obs.actual_dispatch)) <= self.tol_one
assert len(info["exception"]) == 0
def test_accept_almost_zerozum_too_low(self):
self.skip_if_needed()
self.skipTest("it is possible now to accept pretty much everything")
redisp_act = self.env.action_space(
{"redispatch": [(0, 3), (1, -1), (-1, -2 - 1e-7)]}
)
obs, reward, done, info = self.env.step(redisp_act)
assert np.all(obs.prod_p[0:2] <= obs.gen_pmax[0:2])
assert np.all(obs.prod_p[0:2] >= -obs.gen_pmin[0:2])
assert np.abs(np.sum(obs.actual_dispatch)) <= self.tol_one
assert len(info["exception"]) == 0
def test_accept_almost_zerozum_shouldnotbepossible_low(self):
self.skip_if_needed()
self.skipTest("it is possible now to accept pretty much everything")
redisp_act = self.env.action_space(
{"redispatch": [(0, 3), (1, -1), (-1, -2 - 1e-1)]}
)
obs, reward, done, info = self.env.step(redisp_act)
assert np.all(obs.prod_p[0:2] <= obs.gen_pmax[0:2])
assert np.all(obs.prod_p[0:2] >= -obs.gen_pmin[0:2])
assert np.all(obs.actual_dispatch == 0.0)
assert len(info["exception"])
def test_accept_almost_zerozum_shouldnotbepossible_high(self):
self.skip_if_needed()
self.skipTest("it is possible now to accept pretty much everything")
redisp_act = self.env.action_space(
{"redispatch": [(0, 3), (1, -1), (-1, -2 + 1e-1)]}
)
obs, reward, done, info = self.env.step(redisp_act)
assert np.all(obs.prod_p[0:2] <= obs.gen_pmax[0:2])
assert np.all(obs.prod_p[0:2] >= -obs.gen_pmin[0:2])
assert np.all(obs.actual_dispatch == 0.0)
assert len(info["exception"])
| 38,221 | 42.732265 | 114 | py |
Grid2Op | Grid2Op-master/grid2op/tests/__init__.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
__all__ = ["BaseBackendTest", "BaseIssuesTest", "BaseRedispTest"]
| 529 | 52 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/tests/_aux_test_gym_compat.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import warnings
# TODO test the json part but... https://github.com/openai/gym-http-api/issues/62 or https://github.com/openai/gym/issues/1841
# TODO when functions are called in the converter (especially with graph)
from grid2op.tests.helper_path_test import *
import grid2op
from grid2op.dtypes import dt_float, dt_int
from grid2op.tests.helper_path_test import *
from grid2op.Action import PlayableAction
from grid2op.gym_compat import GymActionSpace, GymObservationSpace
from grid2op.gym_compat import GymEnv
from grid2op.gym_compat import ContinuousToDiscreteConverter
from grid2op.gym_compat import ScalerAttrConverter
from grid2op.gym_compat import MultiToTupleConverter
from grid2op.gym_compat import (
GYM_AVAILABLE,
GYMNASIUM_AVAILABLE,
BoxGymObsSpace,
BoxGymActSpace,
MultiDiscreteActSpace,
DiscreteActSpace,
)
from grid2op.gym_compat.utils import _compute_extra_power_for_losses, _MAX_GYM_VERSION_RANDINT, GYM_VERSION
import pdb
class AuxilliaryForTest:
def _aux_GymEnv_cls(self):
return GymEnv
def _aux_ContinuousToDiscreteConverter_cls(self):
return ContinuousToDiscreteConverter
def _aux_ScalerAttrConverter_cls(self):
return ScalerAttrConverter
def _aux_MultiToTupleConverter_cls(self):
return MultiToTupleConverter
def _aux_BoxGymObsSpace_cls(self):
return BoxGymObsSpace
def _aux_BoxGymActSpace_cls(self):
return BoxGymActSpace
def _aux_MultiDiscreteActSpace_cls(self):
return MultiDiscreteActSpace
def _aux_DiscreteActSpace_cls(self):
return DiscreteActSpace
def _aux_Box_cls(self):
if GYMNASIUM_AVAILABLE:
from gymnasium.spaces import Box
return Box
if GYM_AVAILABLE:
from gym.spaces import Box
return Box
def _aux_MultiDiscrete_cls(self):
if GYMNASIUM_AVAILABLE:
from gymnasium.spaces import MultiDiscrete
return MultiDiscrete
if GYM_AVAILABLE:
from gym.spaces import MultiDiscrete
return MultiDiscrete
def _aux_Discrete_cls(self):
if GYMNASIUM_AVAILABLE:
from gymnasium.spaces import Discrete
return Discrete
if GYM_AVAILABLE:
from gym.spaces import Discrete
return Discrete
def _aux_Tuple_cls(self):
if GYMNASIUM_AVAILABLE:
from gymnasium.spaces import Tuple
return Tuple
if GYM_AVAILABLE:
from gym.spaces import Tuple
return Tuple
def _aux_Dict_cls(self):
if GYMNASIUM_AVAILABLE:
from gymnasium.spaces import Dict
return Dict
if GYM_AVAILABLE:
from gym.spaces import Dict
return Dict
def _skip_if_no_gym(self):
if not GYM_AVAILABLE and not GYMNASIUM_AVAILABLE:
self.skipTest("Gym is not available")
class _AuxTestGymCompatModule:
def setUp(self) -> None:
self._skip_if_no_gym()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make(
"l2rpn_case14_sandbox", test=True, _add_to_name="TestGymCompatModule"
)
self.env.seed(0)
self.env.reset() # seed part !
def tearDown(self) -> None:
self.env.close()
def test_print_with_no_storage(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make(
"l2rpn_icaps_2021", test=True, _add_to_name="TestGymCompatModule"
)
self.env.seed(0)
self.env.reset() # seed part !
env_gym = self._aux_GymEnv_cls()(self.env)
str_ = env_gym.action_space.__str__() # this crashed
str_ = env_gym.observation_space.__str__()
def test_can_create(self):
env_gym = self._aux_GymEnv_cls()(self.env)
assert isinstance(env_gym, self._aux_GymEnv_cls())
assert isinstance(env_gym.action_space, self._aux_Dict_cls())
assert isinstance(env_gym.observation_space, self._aux_Dict_cls())
def test_convert_togym(self):
"""test i can create the env"""
env_gym = self._aux_GymEnv_cls()(self.env)
dim_act_space = np.sum(
[
np.sum(env_gym.action_space[el].shape)
for el in env_gym.action_space.spaces
]
)
assert dim_act_space == 166, f"{dim_act_space} != 166"
dim_obs_space = np.sum(
[
np.sum(env_gym.observation_space[el].shape).astype(int)
for el in env_gym.observation_space.spaces
]
)
size_th = 536 # as of grid2Op 1.7.1 (where all obs attributes are there)
assert (
dim_obs_space == size_th
), f"Size should be {size_th} but is {dim_obs_space}"
# test that i can do basic stuff there
obs, info = env_gym.reset()
for k in env_gym.observation_space.spaces.keys():
assert obs[k] in env_gym.observation_space[k], f"error for key: {k}"
act = env_gym.action_space.sample()
obs2, reward2, done2, truncated, info2 = env_gym.step(act)
assert obs2 in env_gym.observation_space
# test for the __str__ method
str_ = self.env.action_space.__str__()
str_ = self.env.observation_space.__str__()
def test_ignore(self):
"""test the ignore_attr method"""
env_gym = self._aux_GymEnv_cls()(self.env)
env_gym.action_space = env_gym.action_space.ignore_attr("set_bus").ignore_attr(
"set_line_status"
)
dim_act_space = np.sum(
[
np.sum(env_gym.action_space[el].shape)
for el in env_gym.action_space.spaces
]
)
assert dim_act_space == 89, f"{dim_act_space=} != 89"
def test_keep_only(self):
"""test the keep_only_attr method"""
env_gym = self._aux_GymEnv_cls()(self.env)
env_gym.observation_space = env_gym.observation_space.keep_only_attr(
["rho", "gen_p", "load_p", "topo_vect", "actual_dispatch"]
)
new_dim_obs_space = np.sum(
[
np.sum(env_gym.observation_space[el].shape).astype(int)
for el in env_gym.observation_space.spaces
]
)
assert new_dim_obs_space == 100
def test_scale_attr_converter(self):
"""test a scale_attr converter"""
env_gym = self._aux_GymEnv_cls()(self.env)
ob_space = env_gym.observation_space
key = "actual_dispatch"
low = -self.env.gen_pmax
high = 1.0 * self.env.gen_pmax
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
ob_space = ob_space.reencode_space(
"actual_dispatch",
self._aux_ScalerAttrConverter_cls()(substract=0.0, divide=self.env.gen_pmax),
)
env_gym.observation_space = ob_space
obs, info = env_gym.reset()
assert key in env_gym.observation_space.spaces
low = np.zeros(self.env.n_gen) - 1
high = np.zeros(self.env.n_gen) + 1
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
assert obs in env_gym.observation_space
def test_add_key(self):
"""test the add_key feature"""
env_gym = self._aux_GymEnv_cls()(self.env)
shape_ = (self.env.dim_topo, self.env.dim_topo)
key = "connectivity_matrix"
env_gym.observation_space.add_key(
key,
lambda obs: obs.connectivity_matrix(),
self._aux_Box_cls()(
shape=shape_,
low=np.zeros(shape_, dtype=dt_float),
high=np.ones(shape_, dtype=dt_float),
dtype=dt_float,
),
)
# we highly recommend to "reset" the environment after setting up the observation space
obs_gym, info = env_gym.reset()
assert key in env_gym.observation_space.spaces
assert obs_gym in env_gym.observation_space
def test_chain_converter(self):
"""test i can do two converters on the same key
this method depends on the version of gym you have installed, tests are made for gym-0.23.1
"""
from grid2op._glop_platform_info import _IS_LINUX, _IS_WINDOWS, _IS_MACOS
if _IS_MACOS:
self.skipTest("Test not suited on macos")
env_gym = self._aux_GymEnv_cls()(self.env)
env_gym.action_space = env_gym.action_space.reencode_space(
"redispatch", self._aux_ContinuousToDiscreteConverter_cls()(nb_bins=11)
)
env_gym.action_space.seed(0)
act_gym = env_gym.action_space.sample()
if _IS_WINDOWS:
res = (7, 9, 0, 0, 0, 9)
else:
# it's linux
if GYM_VERSION <= _MAX_GYM_VERSION_RANDINT:
res = (1, 2, 0, 0, 0, 0)
res = (5, 3, 0, 0, 0, 1)
res = (2, 2, 0, 0, 0, 9)
res = (10, 3, 0, 0, 0, 7)
else:
res = (0, 6, 0, 0, 0, 5)
res = (10, 3, 0, 0, 0, 7)
assert np.all(
act_gym["redispatch"] == res
), f'wrong action: {act_gym["redispatch"]}'
act_gym = env_gym.action_space.sample()
if _IS_WINDOWS:
res = (2, 9, 0, 0, 0, 1)
else:
# it's linux
if GYM_VERSION <= _MAX_GYM_VERSION_RANDINT:
res = (0, 1, 0, 0, 0, 4)
res = (5, 5, 0, 0, 0, 9)
res = (0, 9, 0, 0, 0, 7)
res = (7, 5, 0, 0, 0, 8)
else:
res = (2, 9, 0, 0, 0, 1)
res = (7, 5, 0, 0, 0, 8)
assert np.all(
act_gym["redispatch"] == res
), f'wrong action: {act_gym["redispatch"]}'
assert isinstance(env_gym.action_space["redispatch"], self._aux_MultiDiscrete_cls())
env_gym.action_space = env_gym.action_space.reencode_space(
"redispatch", self._aux_MultiToTupleConverter_cls()()
)
assert isinstance(env_gym.action_space["redispatch"], self._aux_Tuple_cls())
# and now test that the redispatching is properly computed
env_gym.action_space.seed(0)
# TODO this doesn't work... because when you seed it appears to use the same seed on all
# on all the "sub part" of the Tuple.. Thanks gym !
# see https://github.com/openai/gym/issues/2166
act_gym = env_gym.action_space.sample()
if _IS_WINDOWS:
res_tup = (6, 5, 0, 0, 0, 9)
res_disp = np.array([0.833333, 0.0, 0.0, 0.0, 0.0, 10.0], dtype=dt_float)
else:
# it's linux
if GYM_VERSION <= _MAX_GYM_VERSION_RANDINT:
res_tup = (1, 4, 0, 0, 0, 8)
res_disp = np.array(
[-3.3333333, -1.666667, 0.0, 0.0, 0.0, 7.5], dtype=dt_float
)
res_tup = (7, 4, 0, 0, 0, 0)
res_disp = np.array(
[1.666667, -1.666667, 0.0, 0.0, 0.0, -12.5], dtype=dt_float
)
res_tup = (8, 5, 0, 0, 0, 8)
res_tup = (8, 2, 0, 0, 0, 9)
res_disp = np.array(
[2.5, 0.0, 0.0, 0.0, 0.0, 7.5], dtype=dt_float
)
res_disp = np.array(
[2.5, -5., 0., 0., 0., 10.], dtype=dt_float
)
else:
res_tup = (8, 9, 0, 0, 0, 2)
res_tup = (8, 2, 0, 0, 0, 9)
res_disp = np.array(
[2.5, -5., 0., 0., 0., 10.], dtype=dt_float
)
assert (
act_gym["redispatch"] == res_tup
), f'error. redispatch is {act_gym["redispatch"]}'
act_glop = env_gym.action_space.from_gym(act_gym)
assert np.array_equal(
act_glop._redispatch, res_disp
), f"error. redispatch is {act_glop._redispatch}"
act_gym = env_gym.action_space.sample()
if _IS_WINDOWS:
res_tup = (5, 8, 0, 0, 0, 10)
res_disp = np.array([0.0, 5.0, 0.0, 0.0, 0.0, 12.5], dtype=dt_float)
else:
# it's linux
if GYM_VERSION <= _MAX_GYM_VERSION_RANDINT:
res_tup = (3, 9, 0, 0, 0, 0)
res_disp = np.array(
[-1.6666665, 6.666666, 0.0, 0.0, 0.0, -12.5], dtype=dt_float
)
res_tup = (8, 6, 0, 0, 0, 0)
res_disp = np.array(
[2.5, 1.666666, 0.0, 0.0, 0.0, -12.5], dtype=dt_float
)
res_tup = (7, 6, 0, 0, 0, 4)
res_disp = np.array(
[1.666667, 1.666666, 0.0, 0.0, 0.0, -2.5], dtype=dt_float
)
res_tup = (3, 8, 0, 0, 0, 8)
res_disp = np.array(
[-1.6666665, 5., 0.0, 0.0, 0.0, 7.5], dtype=dt_float
)
else:
res_tup = (4, 2, 0, 0, 0, 5)
res_tup = (3, 8, 0, 0, 0, 8)
res_disp = np.array(
[-1.6666665, 5., 0.0, 0.0, 0.0, 7.5], dtype=dt_float
)
assert (
act_gym["redispatch"] == res_tup
), f'error. redispatch is {act_gym["redispatch"]}'
act_glop = env_gym.action_space.from_gym(act_gym)
assert np.allclose(
act_glop._redispatch, res_disp, atol=1e-5
), f"error. redispatch is {act_glop._redispatch}"
def test_all_together(self):
"""combine all test above (for the action space)"""
env_gym = self._aux_GymEnv_cls()(self.env)
env_gym.action_space = env_gym.action_space.ignore_attr("set_bus").ignore_attr(
"set_line_status"
)
env_gym.action_space = env_gym.action_space.reencode_space(
"redispatch", self._aux_ContinuousToDiscreteConverter_cls()(nb_bins=11)
)
env_gym.action_space = env_gym.action_space.reencode_space(
"change_bus", self._aux_MultiToTupleConverter_cls()()
)
env_gym.action_space = env_gym.action_space.reencode_space(
"change_line_status", self._aux_MultiToTupleConverter_cls()()
)
env_gym.action_space = env_gym.action_space.reencode_space(
"redispatch", self._aux_MultiToTupleConverter_cls()()
)
assert isinstance(env_gym.action_space["redispatch"], self._aux_Tuple_cls())
assert isinstance(env_gym.action_space["change_bus"], self._aux_Tuple_cls())
assert isinstance(env_gym.action_space["change_line_status"], self._aux_Tuple_cls())
act_gym = env_gym.action_space.sample()
act_glop = env_gym.action_space.from_gym(act_gym)
act_gym2 = env_gym.action_space.to_gym(act_glop)
act_glop2 = env_gym.action_space.from_gym(act_gym2)
assert act_gym in env_gym.action_space
assert act_gym2 in env_gym.action_space
assert isinstance(act_gym["redispatch"], tuple)
assert isinstance(act_gym["change_bus"], tuple)
assert isinstance(act_gym["change_line_status"], tuple)
# check the gym actions are the same
for k in act_gym.keys():
assert np.array_equal(act_gym[k], act_gym2[k]), f"error for {k}"
for k in act_gym2.keys():
assert np.array_equal(act_gym[k], act_gym2[k]), f"error for {k}"
# check grid2op action are the same
assert act_glop == act_glop2
def test_low_high_obs_space(self):
"""test the observation space, by default, is properly converted"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = grid2op.make(
"educ_case14_storage", test=True, _add_to_name="TestGymCompatModule"
)
env.seed(0)
env.reset() # seed part !
env_gym = self._aux_GymEnv_cls()(env)
assert "a_ex" in env_gym.observation_space.spaces
assert np.array_equal(
env_gym.observation_space["a_ex"].low,
np.zeros(
shape=(env.n_line,),
),
)
assert "a_or" in env_gym.observation_space.spaces
assert np.array_equal(
env_gym.observation_space["a_or"].low,
np.zeros(
shape=(env.n_line,),
),
)
key = "actual_dispatch"
assert key in env_gym.observation_space.spaces
low = np.minimum(env.gen_pmin, -env.gen_pmax)
high = np.maximum(-env.gen_pmin, +env.gen_pmax)
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
key = "curtailment"
assert key in env_gym.observation_space.spaces
low = np.zeros(shape=(env.n_gen,))
high = np.ones(shape=(env.n_gen,))
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
key = "curtailment_limit"
assert key in env_gym.observation_space.spaces
low = np.zeros(shape=(env.n_gen,))
high = np.ones(shape=(env.n_gen,))
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
# Discrete
assert "day" in env_gym.observation_space.spaces
assert "day_of_week" in env_gym.observation_space.spaces
assert "hour_of_day" in env_gym.observation_space.spaces
assert "minute_of_hour" in env_gym.observation_space.spaces
assert "month" in env_gym.observation_space.spaces
assert "year" in env_gym.observation_space.spaces
# multi binary
assert "line_status" in env_gym.observation_space.spaces
key = "duration_next_maintenance"
assert key in env_gym.observation_space.spaces
low = np.zeros(shape=(env.n_line,), dtype=dt_int) - 1
high = np.full(shape=(env.n_line,), fill_value=2147483647, dtype=dt_int)
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
key = "gen_p"
assert key in env_gym.observation_space.spaces
low = np.zeros(shape=(env.n_gen,), dtype=dt_float)
high = 1.0 * env.gen_pmax
low -= env._tol_poly
high += env._tol_poly
# for "power losses" that are not properly computed in the original data
extra_for_losses = _compute_extra_power_for_losses(env.observation_space)
low -= extra_for_losses
high += extra_for_losses
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
key = "gen_p_before_curtail"
low = np.zeros(shape=(env.n_gen,), dtype=dt_float)
high = 1.0 * env.gen_pmax
low -= env._tol_poly
high += env._tol_poly
# for "power losses" that are not properly computed in the original data
extra_for_losses = _compute_extra_power_for_losses(env.observation_space)
low -= extra_for_losses
high += extra_for_losses
assert key in env_gym.observation_space.spaces
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
key = "gen_q"
assert key in env_gym.observation_space.spaces
low = np.full(shape=(env.n_gen,), fill_value=-np.inf, dtype=dt_float)
high = np.full(shape=(env.n_gen,), fill_value=np.inf, dtype=dt_float)
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
key = "gen_v"
assert key in env_gym.observation_space.spaces
low = np.zeros(shape=(env.n_gen,), dtype=dt_int)
high = np.full(shape=(env.n_gen,), fill_value=np.inf, dtype=dt_float)
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
key = "load_p"
assert key in env_gym.observation_space.spaces
low = np.full(shape=(env.n_load,), fill_value=-np.inf, dtype=dt_float)
high = np.full(shape=(env.n_load,), fill_value=np.inf, dtype=dt_float)
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
key = "load_q"
assert key in env_gym.observation_space.spaces
low = np.full(shape=(env.n_load,), fill_value=-np.inf, dtype=dt_float)
high = np.full(shape=(env.n_load,), fill_value=np.inf, dtype=dt_float)
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
key = "load_v"
assert key in env_gym.observation_space.spaces
low = np.full(shape=(env.n_load,), fill_value=0.0, dtype=dt_float)
high = np.full(shape=(env.n_load,), fill_value=np.inf, dtype=dt_float)
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
key = "p_ex"
assert key in env_gym.observation_space.spaces
low = np.full(shape=(env.n_line,), fill_value=-np.inf, dtype=dt_float)
high = np.full(shape=(env.n_line,), fill_value=np.inf, dtype=dt_float)
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
key = "p_or"
assert key in env_gym.observation_space.spaces
low = np.full(shape=(env.n_line,), fill_value=-np.inf, dtype=dt_float)
high = np.full(shape=(env.n_line,), fill_value=np.inf, dtype=dt_float)
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
key = "q_ex"
assert key in env_gym.observation_space.spaces
low = np.full(shape=(env.n_line,), fill_value=-np.inf, dtype=dt_float)
high = np.full(shape=(env.n_line,), fill_value=np.inf, dtype=dt_float)
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
key = "q_or"
assert key in env_gym.observation_space.spaces
low = np.full(shape=(env.n_line,), fill_value=-np.inf, dtype=dt_float)
high = np.full(shape=(env.n_line,), fill_value=np.inf, dtype=dt_float)
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
key = "rho"
assert key in env_gym.observation_space.spaces
low = np.full(shape=(env.n_line,), fill_value=0.0, dtype=dt_float)
high = np.full(shape=(env.n_line,), fill_value=np.inf, dtype=dt_float)
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
key = "storage_charge"
assert key in env_gym.observation_space.spaces
low = np.full(shape=(env.n_storage,), fill_value=0.0, dtype=dt_float)
high = env.storage_Emax
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
key = "storage_power"
assert key in env_gym.observation_space.spaces
low = -env.storage_max_p_absorb
high = env.storage_max_p_prod
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
key = "storage_power_target"
assert key in env_gym.observation_space.spaces
low = -env.storage_max_p_absorb
high = env.storage_max_p_prod
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
key = "target_dispatch"
assert key in env_gym.observation_space.spaces
low = -env.gen_pmax
high = env.gen_pmax
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
key = "time_before_cooldown_line"
assert key in env_gym.observation_space.spaces
low = np.zeros(env.n_line, dtype=dt_int)
high = np.zeros(env.n_line, dtype=dt_int) + max(
env.parameters.NB_TIMESTEP_RECONNECTION,
env.parameters.NB_TIMESTEP_COOLDOWN_LINE,
env._oppSpace.attack_max_duration,
)
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
key = "time_before_cooldown_sub"
assert key in env_gym.observation_space.spaces
low = np.zeros(env.n_sub, dtype=dt_int)
high = (
np.zeros(env.n_sub, dtype=dt_int) + env.parameters.NB_TIMESTEP_COOLDOWN_SUB
)
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
key = "time_next_maintenance"
assert key in env_gym.observation_space.spaces
low = np.zeros(env.n_line, dtype=dt_int) - 1
high = np.full(env.n_line, fill_value=2147483647, dtype=dt_int)
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}: {env_gym.observation_space[key].low}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key} {env_gym.observation_space[key].high}"
key = "timestep_overflow"
assert key in env_gym.observation_space.spaces
low = np.full(env.n_line, fill_value=-2147483648, dtype=dt_int)
high = np.full(env.n_line, fill_value=2147483647, dtype=dt_int)
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key} {env_gym.observation_space[key].low}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key} {env_gym.observation_space[key].high}"
key = "topo_vect"
assert key in env_gym.observation_space.spaces
low = np.zeros(env.dim_topo, dtype=dt_int) - 1
high = np.zeros(env.dim_topo, dtype=dt_int) + 2
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
key = "v_or"
assert key in env_gym.observation_space.spaces
low = np.full(shape=(env.n_line,), fill_value=0.0, dtype=dt_float)
high = np.full(shape=(env.n_line,), fill_value=np.inf, dtype=dt_float)
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
key = "v_ex"
assert key in env_gym.observation_space.spaces
low = np.full(shape=(env.n_line,), fill_value=0.0, dtype=dt_float)
high = np.full(shape=(env.n_line,), fill_value=np.inf, dtype=dt_float)
assert np.array_equal(
env_gym.observation_space[key].low, low
), f"issue for {key}"
assert np.array_equal(
env_gym.observation_space[key].high, high
), f"issue for {key}"
# TODO add tests for the alarm feature and curtailment and storage (if not present already)
class _AuxTestBoxGymObsSpace:
def setUp(self) -> None:
self._skip_if_no_gym()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make(
"educ_case14_storage",
test=True,
action_class=PlayableAction,
_add_to_name="TestBoxGymObsSpace",
)
self.env.seed(0)
self.env.reset() # seed part !
self.obs_env = self.env.reset()
self.env_gym = self._aux_GymEnv_cls()(self.env)
def test_assert_raises_creation(self):
with self.assertRaises(RuntimeError):
self.env_gym.observation_space = self._aux_BoxGymObsSpace_cls()(
self.env_gym.observation_space
)
def test_can_create(self):
kept_attr = [
"gen_p",
"load_p",
"topo_vect",
"rho",
"actual_dispatch",
"connectivity_matrix",
]
self.env_gym.observation_space = self._aux_BoxGymObsSpace_cls()(
self.env.observation_space,
attr_to_keep=kept_attr,
divide={
"gen_p": self.env.gen_pmax,
"load_p": self.obs_env.load_p,
"actual_dispatch": self.env.gen_pmax,
},
functs={
"connectivity_matrix": (
lambda grid2obs: grid2obs.connectivity_matrix().flatten(),
0.0,
1.0,
None,
None,
)
},
)
assert isinstance(self.env_gym.observation_space, self._aux_Box_cls())
obs_gym, info = self.env_gym.reset()
assert obs_gym in self.env_gym.observation_space
assert self.env_gym.observation_space._attr_to_keep == sorted(kept_attr)
assert len(obs_gym) == 3583
def test_can_create_int(self):
kept_attr = ["topo_vect", "line_status"]
self.env_gym.observation_space = self._aux_BoxGymObsSpace_cls()(
self.env.observation_space, attr_to_keep=kept_attr
)
obs_gym, info = self.env_gym.reset()
assert obs_gym in self.env_gym.observation_space
assert self.env_gym.observation_space._attr_to_keep == sorted(kept_attr)
assert len(obs_gym) == 79
assert obs_gym.dtype == dt_int
def test_scaling(self):
kept_attr = ["gen_p", "load_p"]
# first test, with nothing
observation_space = self._aux_BoxGymObsSpace_cls()(
self.env.observation_space, attr_to_keep=kept_attr
)
self.env_gym.observation_space = observation_space
obs_gym, info = self.env_gym.reset()
assert obs_gym in observation_space
assert observation_space._attr_to_keep == kept_attr
assert len(obs_gym) == 17
assert np.abs(obs_gym).max() >= 80
# second test: just scaling (divide)
observation_space = self._aux_BoxGymObsSpace_cls()(
self.env.observation_space,
attr_to_keep=kept_attr,
divide={"gen_p": self.env.gen_pmax, "load_p": self.obs_env.load_p},
)
self.env_gym.observation_space = observation_space
obs_gym, info = self.env_gym.reset()
assert obs_gym in observation_space
assert observation_space._attr_to_keep == kept_attr
assert len(obs_gym) == 17
assert np.abs(obs_gym).max() <= 2
assert np.abs(obs_gym).max() >= 1.0
# third step: center and reduce too
observation_space = self._aux_BoxGymObsSpace_cls()(
self.env.observation_space,
attr_to_keep=kept_attr,
divide={"gen_p": self.env.gen_pmax, "load_p": self.obs_env.load_p},
subtract={"gen_p": 100.0, "load_p": 100.0},
)
self.env_gym.observation_space = observation_space
obs_gym, info = self.env_gym.reset()
assert obs_gym in observation_space
assert observation_space._attr_to_keep == kept_attr
assert len(obs_gym) == 17
# the substract are calibrated so that the maximum is really close to 0
assert obs_gym.max() <= 0
assert obs_gym.max() >= -0.5
def test_functs(self):
"""test the functs keyword argument"""
# test i can make something with a funct keyword
kept_attr = [
"gen_p",
"load_p",
"topo_vect",
"rho",
"actual_dispatch",
"connectivity_matrix",
]
self.env_gym.observation_space = self._aux_BoxGymObsSpace_cls()(
self.env.observation_space,
attr_to_keep=kept_attr,
divide={
"gen_p": self.env.gen_pmax,
"load_p": self.obs_env.load_p,
"actual_dispatch": self.env.gen_pmax,
},
functs={
"connectivity_matrix": (
lambda grid2obs: grid2obs.connectivity_matrix().flatten(),
0.0,
1.0,
None,
None,
)
},
)
obs_gym, info = self.env_gym.reset()
assert obs_gym in self.env_gym.observation_space
assert self.env_gym.observation_space._attr_to_keep == sorted(kept_attr)
assert len(obs_gym) == 3583
# test the stuff crashes if not used properly
# bad shape provided
with self.assertRaises(RuntimeError):
tmp = self._aux_BoxGymObsSpace_cls()(
self.env.observation_space,
attr_to_keep=kept_attr,
divide={
"gen_p": self.env.gen_pmax,
"load_p": self.obs_env.load_p,
"actual_dispatch": self.env.gen_pmax,
},
functs={
"connectivity_matrix": (
lambda grid2obs: grid2obs.connectivity_matrix().flatten(),
None,
None,
22,
None,
)
},
)
# wrong input (tuple too short)
with self.assertRaises(RuntimeError):
tmp = self._aux_BoxGymObsSpace_cls()(
self.env.observation_space,
attr_to_keep=kept_attr,
divide={
"gen_p": self.env.gen_pmax,
"load_p": self.obs_env.load_p,
"actual_dispatch": self.env.gen_pmax,
},
functs={
"connectivity_matrix": (
lambda grid2obs: grid2obs.connectivity_matrix().flatten(),
None,
None,
22,
)
},
)
# function cannot be called
with self.assertRaises(RuntimeError):
tmp = self._aux_BoxGymObsSpace_cls()(
self.env.observation_space,
attr_to_keep=kept_attr,
divide={
"gen_p": self.env.gen_pmax,
"load_p": self.obs_env.load_p,
"actual_dispatch": self.env.gen_pmax,
},
functs={
"connectivity_matrix": (
self.obs_env.connectivity_matrix().flatten(),
None,
None,
None,
None,
)
},
)
# low not correct
with self.assertRaises(RuntimeError):
tmp = self._aux_BoxGymObsSpace_cls()(
self.env.observation_space,
attr_to_keep=kept_attr,
divide={
"gen_p": self.env.gen_pmax,
"load_p": self.obs_env.load_p,
"actual_dispatch": self.env.gen_pmax,
},
functs={
"connectivity_matrix": (
lambda grid2obs: grid2obs.connectivity_matrix().flatten(),
0.5,
1.0,
None,
None,
)
},
)
# high not correct
with self.assertRaises(RuntimeError):
tmp = self._aux_BoxGymObsSpace_cls()(
self.env.observation_space,
attr_to_keep=kept_attr,
divide={
"gen_p": self.env.gen_pmax,
"load_p": self.obs_env.load_p,
"actual_dispatch": self.env.gen_pmax,
},
functs={
"connectivity_matrix": (
lambda grid2obs: grid2obs.connectivity_matrix().flatten(),
0.0,
0.9,
None,
None,
)
},
)
# not added in attr_to_keep
with self.assertRaises(RuntimeError):
tmp = self._aux_BoxGymObsSpace_cls()(
self.env.observation_space,
attr_to_keep=["gen_p", "load_p", "topo_vect", "rho", "actual_dispatch"],
divide={
"gen_p": self.env.gen_pmax,
"load_p": self.obs_env.load_p,
"actual_dispatch": self.env.gen_pmax,
},
functs={
"connectivity_matrix": (
lambda grid2obs: grid2obs.connectivity_matrix().flatten(),
0.0,
1.0,
None,
None,
)
},
)
# another normal function
self.env_gym.observation_space = self._aux_BoxGymObsSpace_cls()(
self.env.observation_space,
attr_to_keep=["connectivity_matrix", "log_load"],
functs={
"connectivity_matrix": (
lambda grid2opobs: grid2opobs.connectivity_matrix().flatten(),
0.0,
1.0,
None,
None,
),
"log_load": (
lambda grid2opobs: np.log(grid2opobs.load_p + 1.0),
None,
10.0,
None,
None,
),
},
)
class _AuxTestBoxGymActSpace:
def setUp(self) -> None:
self._skip_if_no_gym()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make(
"educ_case14_storage",
test=True,
action_class=PlayableAction,
_add_to_name="TestBoxGymActSpace",
)
self.env.seed(0)
self.env.reset() # seed part !
self.obs_env = self.env.reset()
self.env_gym = self._aux_GymEnv_cls()(self.env)
def test_assert_raises_creation(self):
with self.assertRaises(RuntimeError):
self.env_gym.action_space = self._aux_BoxGymActSpace_cls()(self.env_gym.action_space)
def test_can_create(self):
"""test a simple creation"""
kept_attr = ["set_bus", "change_bus", "redispatch"]
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env_gym.action_space = self._aux_BoxGymActSpace_cls()(
self.env.action_space, attr_to_keep=kept_attr
)
assert isinstance(self.env_gym.action_space, self._aux_Box_cls())
self.env_gym.action_space.seed(0)
grid2op_act = self.env_gym.action_space.from_gym(
self.env_gym.action_space.sample()
)
assert isinstance(grid2op_act, PlayableAction)
assert self.env_gym.action_space._attr_to_keep == sorted(kept_attr)
assert len(self.env_gym.action_space.sample()) == 121
# check that all types
ok_setbus = False
ok_change_bus = False
ok_redisp = False
for _ in range(10):
grid2op_act = self.env_gym.action_space.from_gym(
self.env_gym.action_space.sample()
)
ok_setbus = ok_setbus or np.any(grid2op_act.set_bus != 0)
ok_change_bus = ok_change_bus or np.any(grid2op_act.change_bus)
ok_redisp = ok_redisp or np.any(grid2op_act.redispatch != 0.0)
if (not ok_setbus) or (not ok_change_bus) or (not ok_redisp):
raise RuntimeError("Some property of the actions are not modified !")
def test_all_attr_modified(self):
"""test all the attribute of the action can be modified"""
all_attr = {
"set_line_status": 20,
"change_line_status": 20,
"set_bus": 59,
"change_bus": 59,
"redispatch": 3,
"set_storage": 2,
"curtail": 3,
"curtail_mw": 3,
}
func_check = {
"set_line_status": lambda act: np.any(act.line_set_status != 0),
"change_line_status": lambda act: np.any(act.line_change_status),
"set_bus": lambda act: np.any(act.set_bus != 0.0),
"change_bus": lambda act: np.any(act.change_bus),
"redispatch": lambda act: np.any(act.redispatch != 0.0),
"set_storage": lambda act: np.any(act.set_storage != 0.0),
"curtail": lambda act: np.any(act.curtail != 1.0),
"curtail_mw": lambda act: np.any(act.curtail != 1.0),
}
for attr_nm in sorted(all_attr.keys()):
kept_attr = [attr_nm]
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env_gym.action_space = self._aux_BoxGymActSpace_cls()(
self.env.action_space, attr_to_keep=kept_attr
)
self.env_gym.action_space.seed(0)
grid2op_act = self.env_gym.action_space.from_gym(
self.env_gym.action_space.sample()
)
assert isinstance(grid2op_act, PlayableAction)
assert self.env_gym.action_space._attr_to_keep == sorted(kept_attr)
assert (
len(self.env_gym.action_space.sample()) == all_attr[attr_nm]
), f"wrong size for {attr_nm}"
self.env_gym.action_space.seed(0)
# check that all types
ok_ = func_check[attr_nm](grid2op_act)
if not ok_:
raise RuntimeError(
f"Some property of the actions are not modified for attr {attr_nm}"
)
def test_all_attr_modified_when_float(self):
"""test all the attribute of the action can be modified when the action is converted to a float"""
redisp_size = 3
all_attr = {
"set_line_status": 20 + redisp_size,
"change_line_status": 20 + redisp_size,
"set_bus": 59 + redisp_size,
"change_bus": 59 + redisp_size,
"redispatch": redisp_size + redisp_size,
"set_storage": 2 + redisp_size,
"curtail": 3 + redisp_size,
"curtail_mw": 3 + redisp_size,
}
func_check = {
"set_line_status": lambda act: np.any(act.line_set_status != 0)
and ~np.all(act.line_set_status != 0),
"change_line_status": lambda act: np.any(act.line_change_status)
and ~np.all(act.line_change_status),
"set_bus": lambda act: np.any(act.set_bus != 0.0)
and ~np.all(act.set_bus != 0.0),
"change_bus": lambda act: np.any(act.change_bus)
and ~np.all(act.change_bus),
"redispatch": lambda act: np.any(act.redispatch != 0.0),
"set_storage": lambda act: np.any(act.set_storage != 0.0),
"curtail": lambda act: np.any(act.curtail != 1.0),
"curtail_mw": lambda act: np.any(act.curtail != 1.0),
}
for attr_nm in sorted(all_attr.keys()):
kept_attr = [attr_nm, "redispatch"]
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env_gym.action_space = self._aux_BoxGymActSpace_cls()(
self.env.action_space, attr_to_keep=kept_attr
)
self.env_gym.action_space.seed(0)
grid2op_act = self.env_gym.action_space.from_gym(
self.env_gym.action_space.sample()
)
assert isinstance(grid2op_act, PlayableAction)
assert self.env_gym.action_space._attr_to_keep == sorted(kept_attr)
assert (
len(self.env_gym.action_space.sample()) == all_attr[attr_nm]
), f"wrong size for {attr_nm}"
self.env_gym.action_space.seed(0)
# check that all types
ok_ = func_check[attr_nm](grid2op_act)
if not ok_:
raise RuntimeError(
f"Some property of the actions are not modified for attr {attr_nm}"
)
def test_curtailment_dispatch(self):
"""test curtail action will have no effect on non renewable, and dispatch action no effect
on non dispatchable
"""
kept_attr = ["curtail", "redispatch"]
self.env_gym.action_space.close()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env_gym.action_space = self._aux_BoxGymActSpace_cls()(
self.env.action_space, attr_to_keep=kept_attr
)
self.env_gym.action_space.seed(0)
grid2op_act = self.env_gym.action_space.from_gym(
self.env_gym.action_space.sample()
)
assert isinstance(grid2op_act, PlayableAction)
assert self.env_gym.action_space._attr_to_keep == sorted(kept_attr)
assert len(self.env_gym.action_space.sample()) == 6, "wrong size"
self.env_gym.action_space.seed(0)
for _ in range(10):
grid2op_act = self.env_gym.action_space.from_gym(
self.env_gym.action_space.sample()
)
assert np.all(
grid2op_act.redispatch[~grid2op_act.gen_redispatchable] == 0.0
)
assert np.all(grid2op_act.curtail[~grid2op_act.gen_renewable] == -1.0)
def test_can_create_int(self):
"""test that if I use only discrete value, it gives me an array with discrete values"""
kept_attr = ["change_line_status", "set_bus"]
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env_gym.action_space = self._aux_BoxGymActSpace_cls()(
self.env.action_space, attr_to_keep=kept_attr
)
self.env_gym.action_space.seed(0)
act_gym = self.env_gym.action_space.sample()
assert self.env_gym.action_space._attr_to_keep == kept_attr
assert act_gym.dtype == dt_int
assert len(act_gym) == 79
kept_attr = ["change_line_status", "set_bus", "redispatch"]
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env_gym.action_space = self._aux_BoxGymActSpace_cls()(
self.env.action_space, attr_to_keep=kept_attr
)
self.env_gym.action_space.seed(0)
act_gym = self.env_gym.action_space.sample()
assert self.env_gym.action_space._attr_to_keep == sorted(kept_attr)
assert act_gym.dtype == dt_float
assert len(act_gym) == 79 + 3
def test_scaling(self):
"""test the add and multiply stuff"""
kept_attr = ["redispatch"]
# first test, with nothing
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env_gym.action_space = self._aux_BoxGymActSpace_cls()(
self.env.action_space, attr_to_keep=kept_attr
)
self.env_gym.action_space.seed(0)
gen_redisp = self.env.gen_redispatchable
act_gym = self.env_gym.action_space.sample()
assert np.array_equal(
self.env_gym.action_space.low, -self.env.gen_max_ramp_down[gen_redisp]
)
assert np.array_equal(
self.env_gym.action_space.high, self.env.gen_max_ramp_up[gen_redisp]
)
assert self.env_gym.action_space._attr_to_keep == sorted(kept_attr)
assert len(act_gym) == 3
assert np.any(act_gym >= 1.0)
assert np.any(act_gym <= -1.0)
grid2op_act = self.env_gym.action_space.from_gym(act_gym)
assert not grid2op_act.is_ambiguous()[0]
# second test: just scaling (divide)
self.env_gym.action_space.close()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env_gym.action_space = self._aux_BoxGymActSpace_cls()(
self.env.action_space,
attr_to_keep=kept_attr,
multiply={"redispatch": self.env.gen_max_ramp_up[gen_redisp]},
)
self.env_gym.action_space.seed(0)
assert np.array_equal(self.env_gym.action_space.low, -np.ones(3))
assert np.array_equal(self.env_gym.action_space.high, np.ones(3))
act_gym = self.env_gym.action_space.sample()
assert self.env_gym.action_space._attr_to_keep == sorted(kept_attr)
assert len(act_gym) == 3
assert np.all(act_gym <= 1.0)
assert np.all(act_gym >= -1.0)
grid2op_act2 = self.env_gym.action_space.from_gym(act_gym)
assert not grid2op_act2.is_ambiguous()[0]
assert np.all(np.isclose(grid2op_act.redispatch, grid2op_act2.redispatch))
# third step: center and reduce too
self.env_gym.action_space.close()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env_gym.action_space = self._aux_BoxGymActSpace_cls()(
self.env.action_space,
attr_to_keep=kept_attr,
multiply={"redispatch": self.env.gen_max_ramp_up[gen_redisp]},
add={"redispatch": self.env.gen_max_ramp_up[gen_redisp]},
)
assert np.array_equal(self.env_gym.action_space.low, -np.ones(3) - 1.0)
assert np.array_equal(self.env_gym.action_space.high, np.ones(3) - 1.0)
self.env_gym.action_space.seed(0)
act_gym = self.env_gym.action_space.sample()
assert self.env_gym.action_space._attr_to_keep == sorted(kept_attr)
assert len(act_gym) == 3
assert np.all(act_gym <= 0.0)
assert np.all(act_gym >= -2.0)
grid2op_act3 = self.env_gym.action_space.from_gym(act_gym)
assert np.all(grid2op_act3.redispatch[~grid2op_act3.gen_redispatchable] == 0.0)
assert not grid2op_act3.is_ambiguous()[0]
assert np.all(np.isclose(grid2op_act.redispatch, grid2op_act3.redispatch))
class _AuxTestMultiDiscreteGymActSpace:
def setUp(self) -> None:
self._skip_if_no_gym()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make(
"educ_case14_storage",
test=True,
action_class=PlayableAction,
_add_to_name="TestMultiDiscreteGymActSpace",
)
self.env.seed(0)
self.env.reset() # seed part !
self.obs_env = self.env.reset()
self.env_gym = self._aux_GymEnv_cls()(self.env)
def test_assert_raises_creation(self):
with self.assertRaises(RuntimeError):
self.env_gym.action_space = self._aux_MultiDiscreteActSpace_cls()(self.env_gym.action_space)
def test_can_create(self):
"""test a simple creation"""
kept_attr = ["set_bus", "change_bus", "redispatch"]
del self.env_gym.action_space
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env_gym.action_space = self._aux_MultiDiscreteActSpace_cls()(
self.env.action_space, attr_to_keep=kept_attr
)
assert isinstance(self.env_gym.action_space, self._aux_MultiDiscrete_cls())
self.env_gym.action_space.seed(0)
gym_act = self.env_gym.action_space.sample()
grid2op_act = self.env_gym.action_space.from_gym(gym_act)
assert isinstance(grid2op_act, PlayableAction)
assert self.env_gym.action_space._attr_to_keep == sorted(kept_attr)
assert len(self.env_gym.action_space.sample()) == 121
# check that all types
ok_setbus = False
ok_change_bus = False
ok_redisp = False
for _ in range(10):
grid2op_act = self.env_gym.action_space.from_gym(
self.env_gym.action_space.sample()
)
ok_setbus = ok_setbus or np.any(grid2op_act.set_bus != 0)
ok_change_bus = ok_change_bus or np.any(grid2op_act.change_bus)
ok_redisp = ok_redisp or np.any(grid2op_act.redispatch != 0.0)
if (not ok_setbus) or (not ok_change_bus) or (not ok_redisp):
raise RuntimeError("Some property of the actions are not modified !")
def test_use_bins(self):
"""test the binarized version work"""
kept_attr = ["set_bus", "change_bus", "redispatch"]
for nb_bin in [3, 6, 9, 12]:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env_gym.action_space = self._aux_MultiDiscreteActSpace_cls()(
self.env.action_space,
attr_to_keep=kept_attr,
nb_bins={"redispatch": nb_bin},
)
self.env_gym.action_space.seed(0)
grid2op_act = self.env_gym.action_space.from_gym(
self.env_gym.action_space.sample()
)
assert isinstance(grid2op_act, PlayableAction)
assert self.env_gym.action_space._attr_to_keep == sorted(kept_attr)
assert len(self.env_gym.action_space.sample()) == 121
assert np.all(
self.env_gym.action_space.nvec[59:62] == [nb_bin, nb_bin, nb_bin]
)
ok_setbus = False
ok_change_bus = False
ok_redisp = False
for _ in range(10):
grid2op_act = self.env_gym.action_space.from_gym(
self.env_gym.action_space.sample()
)
ok_setbus = ok_setbus or np.any(grid2op_act.set_bus != 0)
ok_change_bus = ok_change_bus or np.any(grid2op_act.change_bus)
ok_redisp = ok_redisp or np.any(grid2op_act.redispatch != 0.0)
if (not ok_setbus) or (not ok_change_bus) or (not ok_redisp):
raise RuntimeError("Some property of the actions are not modified !")
def test_use_substation(self):
"""test the keyword sub_set_bus, sub_change_bus"""
kept_attr = ["sub_set_bus"]
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env_gym.action_space = self._aux_MultiDiscreteActSpace_cls()(
self.env.action_space, attr_to_keep=kept_attr
)
self.env_gym.action_space.seed(0)
grid2op_act = self.env_gym.action_space.from_gym(
self.env_gym.action_space.sample()
)
assert isinstance(grid2op_act, PlayableAction)
assert self.env_gym.action_space._attr_to_keep == sorted(kept_attr)
assert len(self.env_gym.action_space.sample()) == 14
assert np.all(
self.env_gym.action_space.nvec
== [4, 30, 6, 32, 16, 114, 5, 1, 16, 4, 4, 4, 8, 4]
)
# assert that i can "do nothing" in all substation
for sub_id, li_act in enumerate(
self.env_gym.action_space._sub_modifiers[kept_attr[0]]
):
assert li_act[0] == self.env.action_space()
ok_setbus = False
for _ in range(10):
grid2op_act = self.env_gym.action_space.from_gym(
self.env_gym.action_space.sample()
)
ok_setbus = ok_setbus or np.any(grid2op_act.set_bus != 0)
if not ok_setbus:
raise RuntimeError("Some property of the actions are not modified !")
kept_attr = ["sub_change_bus"]
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env_gym.action_space = self._aux_MultiDiscreteActSpace_cls()(
self.env.action_space, attr_to_keep=kept_attr
)
self.env_gym.action_space.seed(0)
grid2op_act = self.env_gym.action_space.from_gym(
self.env_gym.action_space.sample()
)
assert isinstance(grid2op_act, PlayableAction)
assert self.env_gym.action_space._attr_to_keep == sorted(kept_attr)
assert len(self.env_gym.action_space.sample()) == 14
assert np.all(
self.env_gym.action_space.nvec
== [4, 32, 8, 32, 16, 128, 4, 4, 16, 4, 4, 4, 8, 4]
)
# assert that i can "do nothing" in all substation
for sub_id, li_act in enumerate(
self.env_gym.action_space._sub_modifiers[kept_attr[0]]
):
assert li_act[0] == self.env.action_space()
ok_changebus = False
for _ in range(10):
grid2op_act = self.env_gym.action_space.from_gym(
self.env_gym.action_space.sample()
)
ok_changebus = ok_changebus or np.any(grid2op_act.change_bus)
if not ok_changebus:
raise RuntimeError("Some property of the actions are not modified !")
def test_supported_keys(self):
"""test that i can modify every action with the keys"""
dims = {
"set_line_status": 20,
"change_line_status": 20,
"set_bus": 59,
"change_bus": 59,
"sub_set_bus": 14,
"sub_change_bus": 14,
"one_sub_set": 1,
"one_sub_change": 1,
"redispatch": 3,
"curtail": 3,
"curtail_mw": 3,
"set_storage": 2,
}
func_check = {
"set_line_status": lambda act: np.any(act.line_set_status != 0)
and ~np.all(act.line_set_status != 0),
"change_line_status": lambda act: np.any(act.line_change_status)
and ~np.all(act.line_change_status),
"set_bus": lambda act: np.any(act.set_bus != 0.0)
and ~np.all(act.set_bus != 0.0),
"change_bus": lambda act: np.any(act.change_bus)
and ~np.all(act.change_bus),
"redispatch": lambda act: np.any(act.redispatch != 0.0),
"set_storage": lambda act: np.any(act.set_storage != 0.0),
"curtail": lambda act: np.any(act.curtail != 1.0),
"curtail_mw": lambda act: np.any(act.curtail != 1.0),
"sub_change_bus": lambda act: np.any(act.change_bus)
and ~np.all(act.change_bus),
"sub_set_bus": lambda act: np.any(act.set_bus != 0.0)
and ~np.all(act.set_bus != 0.0),
"one_sub_set": lambda act: np.any(act.set_bus != 0.0)
and ~np.all(act.set_bus != 0.0),
"one_sub_change": lambda act: np.any(act.change_bus)
and ~np.all(act.change_bus),
}
for attr_nm in sorted(dims.keys()):
kept_attr = [attr_nm]
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env_gym.action_space = self._aux_MultiDiscreteActSpace_cls()(
self.env.action_space, attr_to_keep=kept_attr
)
assert self.env_gym.action_space._attr_to_keep == kept_attr
assert self.env_gym.action_space._attr_to_keep == sorted(kept_attr)
assert (
len(self.env_gym.action_space.sample()) == dims[attr_nm]
), f"wrong size for {attr_nm}"
self.env_gym.action_space.seed(0)
assert (
len(self.env_gym.action_space.sample()) == dims[attr_nm]
), f"wrong size for {attr_nm}"
grid2op_act = self.env_gym.action_space.from_gym(
self.env_gym.action_space.sample()
)
assert isinstance(grid2op_act, PlayableAction)
# check that all types
ok_ = func_check[attr_nm](grid2op_act)
if not ok_:
raise RuntimeError(
f"Some property of the actions are not modified for attr {attr_nm}"
)
class _AuxTestDiscreteGymActSpace:
def setUp(self) -> None:
self._skip_if_no_gym()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make(
"educ_case14_storage",
test=True,
action_class=PlayableAction,
_add_to_name="TestMultiDiscreteGymActSpace",
)
self.env.seed(0)
self.env.reset() # seed part !
self.obs_env = self.env.reset()
self.env_gym = self._aux_GymEnv_cls()(self.env)
def test_assert_raises_creation(self):
with self.assertRaises(RuntimeError):
self.env_gym.action_space = self._aux_DiscreteActSpace_cls()(self.env_gym.action_space)
def test_can_create(self):
"""test a simple creation"""
kept_attr = ["set_bus", "change_bus", "redispatch"]
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env_gym.action_space = self._aux_DiscreteActSpace_cls()(
self.env.action_space, attr_to_keep=kept_attr
)
assert isinstance(self.env_gym.action_space, self._aux_Discrete_cls())
self.env_gym.action_space.seed(0)
grid2op_act = self.env_gym.action_space.from_gym(
self.env_gym.action_space.sample()
)
assert isinstance(grid2op_act, PlayableAction)
assert self.env_gym.action_space._attr_to_keep == sorted(kept_attr)
act = self.env_gym.action_space.sample()
assert isinstance(act, (int, np.int32, np.int64, dt_int)), f"{act} not an int but {type(act)}"
assert self.env_gym.action_space.n == 525
# check that all types
ok_setbus = False
ok_change_bus = False
ok_redisp = False
for _ in range(30):
grid2op_act = self.env_gym.action_space.from_gym(
self.env_gym.action_space.sample()
)
ok_setbus = ok_setbus or np.any(grid2op_act.set_bus != 0)
ok_change_bus = ok_change_bus or np.any(grid2op_act.change_bus)
ok_redisp = ok_redisp or np.any(grid2op_act.redispatch != 0.0)
if (not ok_setbus) or (not ok_change_bus) or (not ok_redisp):
raise RuntimeError("Some property of the actions are not modified !")
def test_use_bins(self):
"""test the binarized version work"""
kept_attr = ["set_bus", "change_bus", "redispatch"]
for nb_bin in [3, 6, 9, 12]:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env_gym.action_space = self._aux_DiscreteActSpace_cls()(
self.env.action_space,
attr_to_keep=kept_attr,
nb_bins={"redispatch": nb_bin},
)
self.env_gym.action_space.seed(0)
grid2op_act = self.env_gym.action_space.from_gym(
self.env_gym.action_space.sample()
)
assert isinstance(grid2op_act, PlayableAction)
assert self.env_gym.action_space._attr_to_keep == sorted(kept_attr)
assert self.env_gym.action_space.n == 525 - (7 - nb_bin) * 3 * 2
def test_supported_keys(self):
"""test that i can modify every action with the keys"""
dims = {
"set_line_status": 101,
"change_line_status": 21,
"set_bus": 235,
"change_bus": 255,
"redispatch": 37,
"curtail": 22,
"curtail_mw": 31,
"set_storage": 25,
}
func_check = {
"set_line_status": lambda act: np.any(act.line_set_status != 0)
and ~np.all(act.line_set_status != 0),
"change_line_status": lambda act: np.any(act.line_change_status)
and ~np.all(act.line_change_status),
"set_bus": lambda act: np.any(act.set_bus != 0.0)
and ~np.all(act.set_bus != 0.0),
"change_bus": lambda act: np.any(act.change_bus)
and ~np.all(act.change_bus),
"redispatch": lambda act: np.any(act.redispatch != 0.0),
"set_storage": lambda act: np.any(act.set_storage != 0.0),
"curtail": lambda act: np.any(act.curtail != 1.0),
"curtail_mw": lambda act: np.any(act.curtail != 1.0),
}
for attr_nm in sorted(dims.keys()):
kept_attr = [attr_nm]
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env_gym.action_space = self._aux_DiscreteActSpace_cls()(
self.env.action_space, attr_to_keep=kept_attr
)
assert self.env_gym.action_space._attr_to_keep == sorted(kept_attr)
assert (
self.env_gym.action_space.n == dims[attr_nm]
), f"wrong size for {attr_nm}"
self.env_gym.action_space.seed(0)
grid2op_act = self.env_gym.action_space.from_gym(
self.env_gym.action_space.sample()
)
assert isinstance(grid2op_act, PlayableAction)
# check that all types
ok_ = func_check[attr_nm](grid2op_act)
if not ok_:
pdb.set_trace()
raise RuntimeError(
f"Some property of the actions are not modified for attr {attr_nm}"
)
class _AuxTestAllGymActSpaceWithAlarm:
def setUp(self) -> None:
self._skip_if_no_gym()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make(
os.path.join(PATH_DATA_TEST, "l2rpn_neurips_2020_track1_with_alarm"),
test=True,
action_class=PlayableAction,
_add_to_name="TestAllGymActSpaceWithAlarm",
)
self.env.seed(0)
self.env.reset() # seed part !
self.obs_env = self.env.reset()
self.env_gym = self._aux_GymEnv_cls()(self.env)
def test_supported_keys_box(self):
"""test all the attribute of the action can be modified when the action is converted to a float"""
all_attr = {
"set_line_status": 59,
"change_line_status": 59,
"set_bus": 177,
"change_bus": 177,
"redispatch": np.sum(self.env.gen_redispatchable),
"set_storage": 0,
"curtail": np.sum(self.env.gen_renewable),
"curtail_mw": np.sum(self.env.gen_renewable),
"raise_alarm": 3,
}
func_check = {
"set_line_status": lambda act: np.any(act.line_set_status != 0)
and ~np.all(act.line_set_status != 0),
"change_line_status": lambda act: np.any(act.line_change_status)
and ~np.all(act.line_change_status),
"set_bus": lambda act: np.any(act.set_bus != 0.0)
and ~np.all(act.set_bus != 0.0),
"change_bus": lambda act: np.any(act.change_bus)
and ~np.all(act.change_bus),
"redispatch": lambda act: np.any(act.redispatch != 0.0),
"set_storage": lambda act: np.any(act.set_storage != 0.0),
"curtail": lambda act: np.any(act.curtail != 1.0),
"curtail_mw": lambda act: np.any(act.curtail != 1.0),
"raise_alarm": lambda act: np.any(act.raise_alarm)
and ~np.all(act.raise_alarm),
}
for attr_nm in sorted(all_attr.keys()):
kept_attr = [attr_nm]
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env_gym.action_space = self._aux_BoxGymActSpace_cls()(
self.env.action_space, attr_to_keep=kept_attr
)
self.env_gym.action_space.seed(0)
gym_act = self.env_gym.action_space.sample()
grid2op_act = self.env_gym.action_space.from_gym(gym_act)
assert isinstance(grid2op_act, PlayableAction)
assert self.env_gym.action_space._attr_to_keep == kept_attr
assert (
len(self.env_gym.action_space.sample()) == all_attr[attr_nm]
), f"wrong size for {attr_nm}"
# check that all types
ok_ = func_check[attr_nm](grid2op_act)
if not ok_ and attr_nm != "set_storage":
# NB for "set_storage" as there are no storage unit on this grid, then this test is doomed to fail
# this is why i don't perform it in this case
raise RuntimeError(
f"Some property of the actions are not modified for attr {attr_nm}"
)
def test_supported_keys_multidiscrete(self):
"""test that i can modify every action with the keys"""
dims = {
"set_line_status": 59,
"change_line_status": 59,
"set_bus": 177,
"change_bus": 177,
"redispatch": np.sum(self.env.gen_redispatchable),
"curtail": np.sum(self.env.gen_renewable),
"curtail_mw": np.sum(self.env.gen_renewable),
"set_storage": 0,
"raise_alarm": 3,
}
func_check = {
"set_line_status": lambda act: np.any(act.line_set_status != 0)
and ~np.all(act.line_set_status != 0),
"change_line_status": lambda act: np.any(act.line_change_status)
and ~np.all(act.line_change_status),
"set_bus": lambda act: np.any(act.set_bus != 0.0)
and ~np.all(act.set_bus != 0.0),
"change_bus": lambda act: np.any(act.change_bus)
and ~np.all(act.change_bus),
"redispatch": lambda act: np.any(act.redispatch != 0.0),
"set_storage": lambda act: np.any(act.set_storage != 0.0),
"curtail": lambda act: np.any(act.curtail != -1.0),
"curtail_mw": lambda act: np.any(act.curtail != -1.0),
"raise_alarm": lambda act: np.any(act.raise_alarm)
and ~np.all(act.raise_alarm),
}
for attr_nm in sorted(dims.keys()):
kept_attr = [attr_nm]
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env_gym.action_space = self._aux_MultiDiscreteActSpace_cls()(
self.env.action_space, attr_to_keep=kept_attr
)
assert self.env_gym.action_space._attr_to_keep == kept_attr
self.env_gym.action_space.seed(0)
assert (
len(self.env_gym.action_space.sample()) == dims[attr_nm]
), f"wrong size for {attr_nm}"
grid2op_act = self.env_gym.action_space.from_gym(
self.env_gym.action_space.sample()
)
assert isinstance(grid2op_act, PlayableAction)
# check that all types
ok_ = func_check[attr_nm](grid2op_act)
if not ok_ and attr_nm != "set_storage":
# NB for "set_storage" as there are no storage unit on this grid, then this test is doomed to fail
# this is why i don't perform it in this case
raise RuntimeError(
f"Some property of the actions are not modified for attr {attr_nm}"
)
def test_supported_keys_discrete(self):
"""test that i can modify every action with the keys"""
dims = {
"set_line_status": 5 * 59 + 1,
"change_line_status": 59 + 1,
# "set_bus": 5*177, # already tested on the case 14 and takes a lot to compute !
# "change_bus": 255, # already tested on the case 14 and takes a lot to compute !
"redispatch": 121,
"curtail": 85,
"curtail_mw": 121,
"set_storage": 1,
# "raise_alarm": 4, # not supported in "discrete"!
}
func_check = {
"set_line_status": lambda act: np.any(act.line_set_status != 0)
and ~np.all(act.line_set_status != 0),
"change_line_status": lambda act: np.any(act.line_change_status)
and ~np.all(act.line_change_status),
"set_bus": lambda act: np.any(act.set_bus != 0.0)
and ~np.all(act.set_bus != 0.0),
"change_bus": lambda act: np.any(act.change_bus)
and ~np.all(act.change_bus),
"redispatch": lambda act: np.any(act.redispatch != 0.0),
"set_storage": lambda act: np.any(act.set_storage != 0.0),
"curtail": lambda act: np.any(act.curtail != 1.0),
"curtail_mw": lambda act: np.any(act.curtail != 1.0),
# "raise_alarm": lambda act: np.any(act.raise_alarm)
# and ~np.all(act.raise_alarm),
}
for attr_nm in sorted(dims.keys()):
kept_attr = [attr_nm]
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env_gym.action_space = self._aux_DiscreteActSpace_cls()(
self.env.action_space, attr_to_keep=kept_attr
)
assert self.env_gym.action_space._attr_to_keep == kept_attr
assert self.env_gym.action_space.n == dims[attr_nm], (
f"wrong size for {attr_nm}, should be {dims[attr_nm]} "
f"but is {self.env_gym.action_space.n}"
)
self.env_gym.action_space.seed(1) # with seed 0 it does not work
act_gym = self.env_gym.action_space.sample()
grid2op_act = self.env_gym.action_space.from_gym(act_gym)
assert isinstance(grid2op_act, PlayableAction)
# check that all types
ok_ = func_check[attr_nm](grid2op_act)
if not ok_ and attr_nm != "set_storage":
# NB for "set_storage" as there are no storage unit on this grid, then this test is doomed to fail
# this is why i don't perform it in this case
raise RuntimeError(
f"Some property of the actions are not modified for attr {attr_nm}"
)
class _AuxTestGOObsInRange:
def setUp(self) -> None:
self._skip_if_no_gym()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make(
"educ_case14_storage",
test=True,
action_class=PlayableAction,
_add_to_name="TestMultiDiscreteGymActSpace",
)
self.env.seed(0)
self.env.reset() # seed part !
self.obs_env = self.env.reset()
self.env_gym = self._aux_GymEnv_cls()(self.env)
def test_obs_in_go_state_dont_exceed_max(self):
obs, reward, done, info = self.env.step(
self.env.action_space({"set_bus": {"generators_id": [(0, -1)]}})
)
assert done
gym_obs = self.env_gym.observation_space.to_gym(obs)
for key in self.env_gym.observation_space.spaces.keys():
assert key in gym_obs, f"key: {key} no in the observation"
for key in gym_obs.keys():
assert gym_obs[key] in self.env_gym.observation_space.spaces[key], f"error for {key}"
class _AuxObsAllAttr:
def test_all_attr_in_obs(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = grid2op.make("educ_case14_storage", test=True,
action_class=PlayableAction)
gym_env = self._aux_GymEnv_cls()(env)
obs, info = gym_env.reset()
all_attrs = ["year",
"month",
"day",
"hour_of_day",
"minute_of_hour",
"day_of_week",
"timestep_overflow",
"line_status",
"topo_vect",
"gen_p",
"gen_q",
"gen_v",
"gen_margin_up",
"gen_margin_down",
"load_p",
"load_q",
"load_v",
"p_or",
"q_or",
"v_or",
"a_or",
"p_ex",
"q_ex",
"v_ex",
"a_ex",
"rho",
"time_before_cooldown_line",
"time_before_cooldown_sub",
"time_next_maintenance",
"duration_next_maintenance",
"target_dispatch",
"actual_dispatch",
"storage_charge",
"storage_power_target",
"storage_power",
"is_alarm_illegal",
"time_since_last_alarm",
# "last_alarm",
# "attention_budget",
# "was_alarm_used_after_game_over",
"_shunt_p",
"_shunt_q",
"_shunt_v",
"_shunt_bus",
"thermal_limit",
"gen_p_before_curtail",
"curtailment",
"curtailment_limit",
"curtailment_limit_effective",
"theta_or",
"theta_ex",
"load_theta",
"gen_theta",
"storage_theta",
"current_step",
"max_step",
"delta_time"]
for el in all_attrs:
assert el in obs.keys(), f"\"{el}\" not in obs.keys()"
| 81,288 | 40.221602 | 126 | py |
Grid2Op | Grid2Op-master/grid2op/tests/_aux_test_some_gym_issues.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
"""put at the same place some test for gym, mainly to run them in a single command"""
import unittest
from test_issue_185 import Issue185Tester
from test_issue_196 import Issue196Tester
from test_issue_281 import Issue281Tester
from test_issue_282 import Issue282Tester
from test_issue_283 import Issue283Tester
from test_issue_379 import Issue379Tester
from test_issue_407 import Issue407Tester
from test_issue_418 import Issue418Tester
from test_gym_compat import (TestGymCompatModule,
TestBoxGymObsSpace,
TestBoxGymActSpace,
TestMultiDiscreteGymActSpace,
TestDiscreteGymActSpace,
TestAllGymActSpaceWithAlarm,
TestGOObsInRange
)
from test_gym_env_renderer import TestGymEnvRenderer
from test_GymConverter import (TestWithoutConverterWCCI,
TestIdToAct,
TestToVect,
TestDropAttr,
TestContinuousToDiscrete,
TestWithoutConverterStorage,
TestDiscreteActSpace,
)
from test_timeOutEnvironment import TestTOEnvGym
from test_pickling import TestMultiProc
if __name__ == "__main__":
unittest.main()
| 1,882 | 41.795455 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/tests/generate_coverage.sh | #/bin/bash
coverage run --source=.. -m unittest discover
coverage report -m
coverage html
| 91 | 14.333333 | 45 | sh |
Grid2Op | Grid2Op-master/grid2op/tests/helper_data_test.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.dtypes import dt_float
_case_14_layout = [
(-280, -81),
(-100, -270),
(366, -270),
(366, -54),
(-64, -54),
(-64, 54),
(450, 0),
(550, 0),
(326, 54),
(222, 108),
(79, 162),
(-170, 270),
(-64, 270),
(222, 216),
]
_case_5_layout = [(0, 0), (0, 400), (200, 400), (400, 400), (400, 0)]
case14_test_layout = _case_14_layout
case14_test_TH_LIM = np.array(
[
352.8251645,
352.8251645,
183197.68156979,
183197.68156979,
183197.68156979,
12213.17877132,
183197.68156979,
352.8251645,
352.8251645,
352.8251645,
352.8251645,
352.8251645,
183197.68156979,
183197.68156979,
183197.68156979,
352.8251645,
352.8251645,
352.8251645,
2721.79412618,
2721.79412618,
]
).astype(dt_float)
case14_redisp_layout = _case_14_layout
case14_redisp_TH_LIM = np.array(
[
3.84900179e02,
3.84900179e02,
2.28997102e05,
2.28997102e05,
2.28997102e05,
1.52664735e04,
2.28997102e05,
3.84900179e02,
3.84900179e02,
1.83285800e02,
3.84900179e02,
3.84900179e02,
2.28997102e05,
2.28997102e05,
6.93930612e04,
3.84900179e02,
3.84900179e02,
2.40562612e02,
3.40224266e03,
3.40224266e03,
]
).astype(dt_float)
case14_real_layout = _case_14_layout
case14_real_TH_LIM = np.array(
[
384.900179,
384.900179,
380.0,
380.0,
157.0,
380.0,
380.0,
1077.7205012,
461.8802148,
769.80036,
269.4301253,
384.900179,
760.0,
380.0,
760.0,
384.900179,
230.9401074,
170.79945452,
3402.24266,
3402.24266,
]
).astype(dt_float)
L2RPN_2019_layout = _case_14_layout
L2RPN_2019_dict = {
"loads": {
"2_C-10.61": "load_1_0",
"3_C151.15": "load_2_1",
"14_C63.6": "load_13_10",
"4_C-9.47": "load_3_2",
"5_C201.84": "load_4_3",
"6_C-6.27": "load_5_4",
"9_C130.49": "load_8_5",
"10_C228.66": "load_9_6",
"11_C-138.89": "load_10_7",
"12_C-27.88": "load_11_8",
"13_C-13.33": "load_12_9",
},
"lines": {
"1_2_1": "0_1_0",
"1_5_2": "0_4_1",
"9_10_16": "8_9_16",
"9_14_17": "8_13_15",
"10_11_18": "9_10_17",
"12_13_19": "11_12_18",
"13_14_20": "12_13_19",
"2_3_3": "1_2_2",
"2_4_4": "1_3_3",
"2_5_5": "1_4_4",
"3_4_6": "2_3_5",
"4_5_7": "3_4_6",
"6_11_11": "5_10_12",
"6_12_12": "5_11_11",
"6_13_13": "5_12_10",
"4_7_8": "3_6_7",
"4_9_9": "3_8_8",
"5_6_10": "4_5_9",
"7_8_14": "6_7_13",
"7_9_15": "6_8_14",
},
"prods": {
"1_G137.1": "gen_0_4",
"3_G36.31": "gen_1_0",
"6_G63.29": "gen_2_1",
"2_G-56.47": "gen_5_2",
"8_G40.43": "gen_7_3",
},
}
| 3,633 | 22.294872 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/tests/helper_gen_iadd.py | #!/usr/bin/env python3
import itertools
header_content = """
import unittest
import warnings
from abc import ABC, abstractmethod
import numpy as np
import grid2op
from grid2op.Action import *
from grid2op.dtypes import dt_float
"""
print(header_content)
base_content = """
class Test_iadd_Base(ABC):
@abstractmethod
def _action_setup(self):
pass
def _skipMissingKey(self, key):
if key not in self.action_t.authorized_keys:
skip_msg = "Skipped: Missing authorized_key {key}"
unittest.TestCase.skipTest(self, skip_msg)
@classmethod
def setUpClass(cls):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
cls.action_t = cls._action_setup()
cls.env = grid2op.make("rte_case14_realistic",
test=True,
action_class=cls.action_t)
@classmethod
def tearDownClass(cls):
cls.env.close()
"""
print(base_content)
actions_names = ["dn", "set_line", "change_line", "set_bus", "change_bus", "redisp"]
actions_skip = [
""" # No skip for do nothing""",
""" self._skipMissingKey("set_line_status")""",
""" self._skipMissingKey("change_line_status")""",
""" self._skipMissingKey("set_bus")""",
""" self._skipMissingKey("change_bus")""",
""" self._skipMissingKey("redispatch")""",
]
actions = [
"""self.env.action_space({})""",
"""self.env.action_space({
"set_line_status": [(0, -1)]
})""",
"""self.env.action_space({
"change_line_status": [0]
})""",
"""self.env.action_space({
"set_bus": {
"substations_id": [
(0, [2] + [0] * (self.env.sub_info[0] - 1))
]
}
})""",
"""self.env.action_space({
"change_bus": {
"substations_id": [
(0, [True] + [False] * (self.env.sub_info[0] - 1))
]
}
})""",
"""self.env.action_space({
"redispatch": {
2: 1.42
}
})""",
]
actions_test = [
"""
assert np.all({1}._set_line_status == 0)
assert np.all({1}._switch_line_status == False)
assert np.all({1}._set_topo_vect == 0)
assert np.all({1}._change_bus_vect == 0)
assert np.all({1}._redispatch == 0.0)
""",
"""
assert {1}._set_line_status[0] == -1
assert np.all({1}._set_line_status[1:] == 0)
assert np.all({1}._switch_line_status == False)
assert np.all({1}._set_topo_vect == 0)
assert np.all({1}._change_bus_vect == 0)
assert np.all({1}._redispatch == 0.0)
""",
"""
assert np.all({1}._set_line_status == 0)
assert {1}._switch_line_status[0] == True
assert np.all({1}._switch_line_status[1:] == False)
assert np.all({1}._set_topo_vect == 0)
assert np.all({1}._change_bus_vect == 0)
assert np.all({1}._redispatch == 0.0)
""",
"""
assert np.all({1}._set_line_status == 0)
assert np.all({1}._switch_line_status == False)
assert {1}._set_topo_vect[0] == 2
assert np.all({1}._set_topo_vect[1:] == 0)
assert np.all({1}._change_bus_vect == 0)
assert np.all({1}._redispatch == 0.0)
""",
"""
assert np.all({1}._set_line_status == 0)
assert np.all({1}._switch_line_status == False)
assert np.all({1}._set_topo_vect == 0)
assert {1}._change_bus_vect[0] == True
assert np.all({1}._change_bus_vect[1:] == False)
assert np.all({1}._redispatch == 0.0)
""",
"""
assert np.all({1}._set_line_status == 0)
assert np.all({1}._switch_line_status == False)
assert np.all({1}._set_topo_vect == 0)
assert np.all({1}._change_bus_vect == 0)
assert {1}._redispatch[2] == dt_float(1.42)
assert np.all({1}._redispatch[:2] == 0.0)
assert np.all({1}._redispatch[3:] == 0.0)
""",
]
fn_name = " def test_{0}_iadd_{1}(self):"
a_create = """
# Create action me [{0}]
act_me = {1}
"""
a_test = " # Test action me [{0}]"
b_create = """ # Create action oth [{0}]
act_oth = {1}
"""
b_test = " # Test action oth [{0}]"
iadd_content = """ # Iadd actions
act_me += act_oth
"""
test_content = """ # Test combination:
assert False, "TODO {} += {} test dumdumb"
"""
import sys
sys.exit(0) # because of the above it do not WORK !!!!!!!!!!!!!!!!!!
for c in itertools.product(range(len(actions)), repeat=2):
a_idx = c[0]
b_idx = c[1]
a_skip = actions_skip[a_idx]
b_skip = actions_skip[b_idx]
a_name = actions_names[a_idx]
b_name = actions_names[b_idx]
a_act = actions[a_idx]
b_act = actions[b_idx]
a_t = actions_test[a_idx]
b_t = actions_test[b_idx]
print(fn_name.format(a_name, b_name))
if len(a_skip) > 0:
print(a_skip)
if len(b_skip) > 0:
print(b_skip)
print(a_create.format(a_name, a_act))
print((a_test + a_t).format(a_name, "act_me"))
print(b_create.format(b_name, b_act))
print((b_test + b_t).format(b_name, "act_oth"))
print(iadd_content)
print(test_content.format(a_name, b_name))
classes_names = [
"CompleteAction",
"DispatchAction",
"DontAct",
"PlayableAction",
"PowerlineChangeAction",
"PowerlineChangeAndDispatchAction",
"PowerlineSetAction",
"PowerlineSetAndDispatchAction",
"TopologyAction",
"TopologyAndDispatchAction",
"TopologyChangeAction",
"TopologyChangeAndDispatchAction",
"TopologySetAction",
"TopologySetAndDispatchAction",
]
class_suite = """
class Test_iadd_{0}(Test_iadd_Base, unittest.TestCase):
\"""
Action iadd test suite for subclass: {0}
\"""
@classmethod
def _action_setup(self):
return {0}
"""
for c_name in classes_names:
print(class_suite.format(c_name))
main_content = """
if __name__ == "__main__":
unittest.main()"""
print(main_content)
| 6,171 | 27.182648 | 84 | py |
Grid2Op | Grid2Op-master/grid2op/tests/helper_list_test.py | #!/usr/bin/env python3
import sys
import unittest
def print_suite(suite):
if hasattr(suite, "__iter__"):
for x in suite:
print_suite(x)
else:
testmodule = suite.__class__.__module__
testsuite = suite.__class__.__name__
testmethod = suite._testMethodName
test_name = "{}.{}.{}".format(testmodule, testsuite, testmethod)
print(test_name)
print_suite(unittest.defaultTestLoader.discover("."))
| 462 | 23.368421 | 72 | py |
Grid2Op | Grid2Op-master/grid2op/tests/helper_path_test.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
# making sure test can be ran from:
# root package directory
# Grid2Op subdirectory
# Grid2Op/tests subdirectory
import sys
import os
import unittest
import numpy as np
from pathlib import Path
from grid2op.dtypes import dt_float
test_dir = Path(__file__).parent.absolute()
grid2op_dir = os.fspath(test_dir.parent.absolute())
data_test_dir = os.path.abspath(os.path.join(grid2op_dir, "data_test"))
data_dir = os.path.abspath(os.path.join(grid2op_dir, "data"))
sys.path.insert(0, grid2op_dir)
PATH_DATA = data_dir
PATH_DATA_TEST = data_test_dir
PATH_CHRONICS = data_test_dir
PATH_CHRONICS_Make2 = os.path.abspath(os.path.join(grid2op_dir, "data"))
PATH_DATA_TEST_PP = os.path.abspath(os.path.join(PATH_DATA_TEST, "test_PandaPower"))
EXAMPLE_CHRONICSPATH = os.path.abspath(
os.path.join(data_test_dir, "5bus_example", "chronics")
)
EXAMPLE_CASEFILE = os.path.abspath(
os.path.join(data_test_dir, "5bus_example", "5bus_example.json")
)
PATH_DATA_MULTIMIX = os.path.abspath(os.path.join(data_test_dir, "multimix"))
class HelperTests(unittest.TestCase):
def __init__(self, methodName="runTest"):
unittest.TestCase.__init__(self, methodName=methodName)
self.tolvect = dt_float(1e-2)
self.tol_one = dt_float(1e-5)
def compare_vect(self, pred, true):
res = dt_float(np.max(np.abs(pred - true))) <= self.tolvect
res = res and dt_float(np.mean(np.abs(pred - true))) <= self.tolvect
return res
| 1,917 | 35.884615 | 112 | py |