Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364 values |
|---|---|---|---|---|---|---|
Grid2Op | Grid2Op-master/grid2op/Download/download.py | #!/usr/bin/env python
# Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import argparse
import os
import sys
from grid2op.MakeEnv.PathUtils import DEFAULT_PATH_DATA
from grid2op.Download.DownloadDataset import main_download
from grid2op.Download.DownloadDataset import LI_VALID_ENV
def main(args=None):
"""
DEPRECATED, use the `grid2op.make(...)` function that will automatically download an environment
if not present on your system already.
"""
if args is None:
args = download_cli()
dataset_name = args.name
try:
path_data = os.path.abspath(args.path_save)
except Exception as e:
print(
'Argument "--path_save" should be a valid path (directory) on your machine.'
)
sys.exit(1)
try:
main_download(dataset_name, path_data)
except Exception as e:
sys.exit("Aborted")
def download_cli():
parser = argparse.ArgumentParser(
description="Download some datasets compatible with grid2op."
)
parser.add_argument(
"--path_save",
default=DEFAULT_PATH_DATA,
type=str,
help="The path where the data will be downloaded.",
)
parser.add_argument(
"--name",
default="rte_case14_redisp",
type=str,
help="The name of the dataset (one of {} )." "".format(",".join(LI_VALID_ENV)),
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = download_cli()
main(args)
| 1,909 | 27.507463 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Environment/BaseEnv.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from datetime import datetime
import logging
import time
import copy
import os
import json
from typing import Optional, Tuple
import warnings
import numpy as np
from scipy.optimize import minimize
from scipy.optimize import LinearConstraint
from abc import ABC, abstractmethod
from grid2op.Action.ActionSpace import ActionSpace
from grid2op.Observation.baseObservation import BaseObservation
from grid2op.Observation.observationSpace import ObservationSpace
from grid2op.Observation.highresSimCounter import HighResSimCounter
from grid2op.Backend import Backend
from grid2op.dtypes import dt_int, dt_float, dt_bool
from grid2op.Space import GridObjects, RandomObject
from grid2op.Exceptions import *
from grid2op.Parameters import Parameters
from grid2op.Reward import BaseReward
from grid2op.Reward import RewardHelper
from grid2op.Opponent import OpponentSpace, NeverAttackBudget
from grid2op.Action import DontAct, BaseAction
from grid2op.Rules import AlwaysLegal
from grid2op.Opponent import BaseOpponent
from grid2op.operator_attention import LinearAttentionBudget
from grid2op.Action._BackendAction import _BackendAction
from grid2op.Chronics import ChronicsHandler
from grid2op.Rules import AlwaysLegal, BaseRules
# TODO put in a separate class the redispatching function
DETAILED_REDISP_ERR_MSG = (
"\nThis is an attempt to explain why the dispatch did not succeed and caused a game over.\n"
"To compensate the {increase} of loads and / or {decrease} of "
"renewable energy (due to naturl causes but also through curtailment) and / or variation in the storage units, "
"the generators should {increase} their total production of {sum_move:.2f}MW (in total).\n"
"But, if you take into account the generator constraints ({pmax} and {max_ramp_up}) you "
"can have at most {avail_up_sum:.2f}MW.\n"
"Indeed at time t, generators are in state:\n\t{gen_setpoint}\ntheir ramp max is:"
"\n\t{ramp_up}\n and pmax is:\n\t{gen_pmax}\n"
"Wrapping up, each generator can {increase} at {maximum} of:\n\t{avail_up}\n"
"NB: if you did not do any dispatch during this episode, it would have been possible to "
"meet these constraints. This situation is caused by not having enough degree of freedom "
'to "compensate" the variation of the load due to (most likely) an "over usage" of '
"redispatching feature (some generators stuck at {pmax} as a consequence of your "
"redispatching. They can't increase their productions to meet the {increase} in demand or "
"{decrease} of renewables)"
)
BASE_TXT_COPYRIGHT = """# Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
# THIS FILE HAS BEEN AUTOMATICALLY GENERATED BY "env.generate_classes()"
# WE DO NOT RECOMMEND TO ALTER IT IN ANY WAY
"""
class BaseEnv(GridObjects, RandomObject, ABC):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This class represent some usefull abstraction that is re used by :class:`Environment` and
:class:`grid2op.Observation._Obsenv` for example.
The documentation is showed here to document the common attributes of an "BaseEnvironment".
Attributes
----------
parameters: :class:`grid2op.Parameters.Parameters`
The parameters of the game (to expose more control on what is being simulated)
with_forecast: ``bool``
Whether the chronics allow to have some kind of "forecast". See :func:`BaseEnv.activate_forceast`
for more information
logger:
TO BE DONE: a way to log what is happening (**currently not implemented**)
time_stamp: ``datetime.datetime``
The actual time stamp of the current observation.
nb_time_step: ``int``
Number of time steps played in the current environment
current_obs: :class:`grid2op.Observation.BaseObservation`
The current observation (or None if it's not intialized)
backend: :class:`grid2op.Backend.Backend`
The backend used to compute the powerflows and cascading failures.
done: ``bool``
Whether the environment is "done". If ``True`` you need to call :func:`Environment.reset` in order
to continue.
current_reward: ``float``
The last computed reward (reward of the current step)
other_rewards: ``dict``
Dictionary with key being the name (identifier) and value being some RewardHelper. At each time step, all the
values will be computed by the :class:`Environment` and the information about it will be returned in the
"reward" key of the "info" dictionnary of the :func:`Environment.step`.
chronics_handler: :class:`grid2op.Chronics.ChronicsHandler`
The object in charge managing the "chronics", which store the information about load and generator for example.
reward_range: ``tuple``
For open ai gym compatibility. It represents the range of the rewards: reward min, reward max
_viewer:
For open ai gym compatibility.
viewer_fig:
For open ai gym compatibility.
_gen_activeprod_t:
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Should be initialized at 0. for "step" to properly recognize it's the first time step of the game
_no_overflow_disconnection: ``bool``
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Whether or not cascading failures are computed or not (TRUE = the powerlines above their thermal limits will
not be disconnected). This is initialized based on the attribute
:attr:`grid2op.Parameters.Parameters.NO_OVERFLOW_DISCONNECTION`.
_timestep_overflow: ``numpy.ndarray``, dtype: int
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Number of consecutive timesteps each powerline has been on overflow.
_nb_timestep_overflow_allowed: ``numpy.ndarray``, dtype: int
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Number of consecutive timestep each powerline can be on overflow. It is usually read from
:attr:`grid2op.Parameters.Parameters.NB_TIMESTEP_POWERFLOW_ALLOWED`.
_hard_overflow_threshold: ``float``
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Number of timestep before an :class:`grid2op.BaseAgent.BaseAgent` can reconnet a powerline that has been
disconnected
by the environment due to an overflow.
_env_dc: ``bool``
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Whether the environment computes the powerflow using the DC approximation or not. It is usually read from
:attr:`grid2op.Parameters.Parameters.ENV_DC`.
_names_chronics_to_backend: ``dict``
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Configuration file used to associated the name of the objects in the backend
(both extremities of powerlines, load or production for
example) with the same object in the data (:attr:`Environment.chronics_handler`). The idea is that, usually
data generation comes from a different software that does not take into account the powergrid infrastructure.
Hence, the same "object" can have a different name. This mapping is present to avoid the need to rename
the "object" when providing data. A more detailed description is available at
:func:`grid2op.ChronicsHandler.GridValue.initialize`.
_env_modification: :class:`grid2op.Action.Action`
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Representation of the actions of the environment for the modification of the powergrid.
_rewardClass: ``type``
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Type of reward used. Should be a subclass of :class:`grid2op.BaseReward.BaseReward`
_init_grid_path: ``str``
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
The path where the description of the powergrid is located.
_game_rules: :class:`grid2op.Rules.RulesChecker`
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
The rules of the game (define which actions are legal and which are not)
_action_space: :class:`grid2op.Action.ActionSpace`
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Helper used to manipulate more easily the actions given to / provided by the :class:`grid2op.Agent.BaseAgent`
(player)
_helper_action_env: :class:`grid2op.Action.ActionSpace`
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Helper used to manipulate more easily the actions given to / provided by the environment to the backend.
_observation_space: :class:`grid2op.Observation.ObservationSpace`
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Helper used to generate the observation that will be given to the :class:`grid2op.BaseAgent`
_reward_helper: :class:`grid2p.BaseReward.RewardHelper`
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Helper that is called to compute the reward at each time step.
kwargs_observation: ``dict``
TODO
# TODO add the units (eg MW, MWh, MW/time step,etc.) in the redispatching related attributes
"""
ALARM_FILE_NAME = "alerts_info.json"
ALARM_KEY = "fixed"
ALERT_FILE_NAME = "alerts_info.json"
ALERT_KEY = "by_line"
CAN_SKIP_TS = False # each step is exactly one time step
def __init__(
self,
init_env_path: os.PathLike,
init_grid_path: os.PathLike,
parameters: Parameters,
voltagecontrolerClass: type,
thermal_limit_a: Optional[np.ndarray] = None,
epsilon_poly: float = 1e-4, # precision of the redispatching algorithm
tol_poly: float = 1e-2, # i need to compute a redispatching if the actual values are "more than tol_poly" the values they should be
other_rewards: dict = None,
with_forecast: bool = True,
opponent_space_type: type = OpponentSpace,
opponent_action_class: type = DontAct,
opponent_class: type = BaseOpponent,
opponent_init_budget: float = 0.0,
opponent_budget_per_ts: float = 0.0,
opponent_budget_class: type = NeverAttackBudget,
opponent_attack_duration: int = 0,
opponent_attack_cooldown: int = 99999,
kwargs_opponent: dict = None,
has_attention_budget: bool = False,
attention_budget_cls: type = LinearAttentionBudget,
kwargs_attention_budget: dict = None,
logger: Optional[logging.Logger] = None,
kwargs_observation: Optional[dict] = None,
observation_bk_class=None, # type of backend for the observation space
observation_bk_kwargs=None, # type of backend for the observation space
highres_sim_counter=None,
update_obs_after_reward=False,
_is_test: bool = False, # TODO not implemented !!
_init_obs: Optional[BaseObservation] =None
):
GridObjects.__init__(self)
RandomObject.__init__(self)
if other_rewards is None:
other_rewards = {}
if kwargs_attention_budget is None:
kwargs_attention_budget = {}
if kwargs_opponent is None:
kwargs_opponent = {}
self._is_test: bool = _is_test
if logger is None:
self.logger = logging.getLogger(__name__)
self.logger.disabled = True
else:
self.logger: logging.Logger = logger.getChild("grid2op_BaseEnv")
if init_grid_path is not None:
self._init_grid_path: os.PathLike = os.path.abspath(init_grid_path)
else:
self._init_grid_path = None
self._DEBUG: bool = False
self._complete_action_cls: type = None
self.__closed: bool = False # by default the environment is not closed
# specific to power system
if not isinstance(parameters, Parameters):
raise Grid2OpException(
'Parameter "parameters" used to build the Environment should derived form the '
'grid2op.Parameters class, type provided is "{}"'.format(
type(parameters)
)
)
parameters.check_valid() # check the provided parameters are valid
self._parameters: Parameters = copy.deepcopy(parameters)
self.with_forecast: bool = with_forecast
self._forecasts = None
# some timers
self._time_apply_act: float = dt_float(0)
self._time_powerflow: float = dt_float(0)
self._time_extract_obs: float = dt_float(0)
self._time_create_bk_act: float = dt_float(0)
self._time_opponent: float = dt_float(0)
self._time_redisp: float = dt_float(0)
self._time_step: float = dt_float(0)
# data relative to interpolation
self._epsilon_poly: float = dt_float(epsilon_poly)
self._tol_poly: float = dt_float(tol_poly)
# class used for the action spaces
self._helper_action_class: ActionSpace = None
self._helper_observation_class: ActionSpace = None
# and calendar data
self.time_stamp: time.struct_time = None
self.nb_time_step: datetime.timedelta = dt_int(0)
self.delta_time_seconds = None # number of seconds between two consecutive step
# observation
self.current_obs: Optional[BaseObservation] = None
self._line_status: np.ndarray = None
self._ignore_min_up_down_times: bool = self._parameters.IGNORE_MIN_UP_DOWN_TIME
self._forbid_dispatch_off: bool = (
not self._parameters.ALLOW_DISPATCH_GEN_SWITCH_OFF
)
# type of power flow to play
# if True, then it will not disconnect lines above their thermal limits
self._no_overflow_disconnection: bool = (
self._parameters.NO_OVERFLOW_DISCONNECTION
)
self._timestep_overflow: np.ndarray = None
self._nb_timestep_overflow_allowed: np.ndarray = None
self._hard_overflow_threshold: float = self._parameters.HARD_OVERFLOW_THRESHOLD
# store actions "cooldown"
self._times_before_line_status_actionable: np.ndarray = None
self._max_timestep_line_status_deactivated: int = (
self._parameters.NB_TIMESTEP_COOLDOWN_LINE
)
self._times_before_topology_actionable: np.ndarray = None
self._max_timestep_topology_deactivated: int = (
self._parameters.NB_TIMESTEP_COOLDOWN_SUB
)
self._nb_ts_reco: int = self._parameters.NB_TIMESTEP_RECONNECTION
# for maintenance operation
self._time_next_maintenance: np.ndarray = None
self._duration_next_maintenance: np.ndarray = None
# hazard (not used outside of this class, information is given in `times_before_line_status_actionable`
self._hazard_duration: np.ndarray = None
self._env_dc = self._parameters.ENV_DC
# redispatching data
self._target_dispatch: np.ndarray = None
self._already_modified_gen: np.ndarray = None
self._actual_dispatch: np.ndarray = None
self._gen_uptime: np.ndarray = None
self._gen_downtime: np.ndarray = None
self._gen_activeprod_t: np.ndarray = None
self._gen_activeprod_t_redisp: np.ndarray = None
self._thermal_limit_a: np.ndarray = thermal_limit_a
self._disc_lines: np.ndarray = None
# store environment modifications
self._injection = None
self._maintenance = None
self._hazards = None
self._env_modification = None
# to use the data
self.done = False
self.current_reward = None
self._helper_action_env: ActionSpace = None
self.chronics_handler : ChronicsHandler = None
self._game_rules = None
self._action_space: ActionSpace = None
self._rewardClass: type = None
self._actionClass: type = None
self._observationClass: type = None
self._legalActClass: type = None
self._observation_space: ObservationSpace = None
self._names_chronics_to_backend: dict = None
self._reward_helper = None
# gym compatibility
self.reward_range = None, None
self._viewer = None
self.viewer_fig = None
# other rewards
self.other_rewards = {}
for k, v in other_rewards.items():
if isinstance(v, type):
if not issubclass(v, BaseReward):
raise Grid2OpException(
'All values of "rewards" key word argument should be classes that inherit '
'from "grid2op.BaseReward"'
)
else:
if not isinstance(v, BaseReward):
raise Grid2OpException(
'All values of "rewards" key word argument should be classes that inherit '
'from "grid2op.BaseReward"'
)
if not isinstance(k, str):
raise Grid2OpException(
'All keys of "rewards" should be of string type.'
)
self.other_rewards[k] = RewardHelper(v, self.logger)
# opponent
self._opponent_action_class = (
opponent_action_class # class of the action of the opponent
)
self._opponent_space_type = opponent_space_type # type of the opponent action space
self._opponent_class = opponent_class # class of the opponent
self._opponent_init_budget = dt_float(opponent_init_budget)
self._opponent_attack_duration = dt_int(opponent_attack_duration)
self._opponent_attack_cooldown = dt_int(opponent_attack_cooldown)
self._opponent_budget_per_ts = dt_float(opponent_budget_per_ts)
self._kwargs_opponent = kwargs_opponent
self._opponent_budget_class = opponent_budget_class
# below initialized by _create_env, above: need to be called
self._opponent_action_space = None
self._compute_opp_budget = None
self._opponent = None
self._oppSpace = None
# voltage
self._voltagecontrolerClass = voltagecontrolerClass
self._voltage_controler = None
# backend action
self._backend_action_class = None
self._backend_action = None
# specific to Basic Env, do not change
self.backend :Backend = None
self.__is_init = False
self.debug_dispatch = False
# to change the parameters
self.__new_param = None
self.__new_forecast_param = None
self.__new_reward_func = None
# storage units
# TODO storage: what to do when self.storage_Emin >0. and self.storage_loss > 0.
# TODO and we have self._storage_current_charge - self.storage_loss < self.storage_Emin
self._storage_current_charge = None # the current storage charge
self._storage_previous_charge = None # the previous storage charge
self._action_storage = None # the storage action performed
self._amount_storage = None # total amount of storage to be dispatched
self._amount_storage_prev = None
self._storage_power = None
self._storage_power_prev = None
# curtailment
self._limit_curtailment = None
self._limit_curtailment_prev = None
self._gen_before_curtailment = None
self._sum_curtailment_mw = None
self._sum_curtailment_mw_prev = None
self._limited_before = 0.0 # TODO curt
# attention budget
self._has_attention_budget = has_attention_budget
self._attention_budget = None
self._attention_budget_cls = attention_budget_cls
self._is_alarm_illegal = False
self._is_alarm_used_in_reward = False
# alert infos
self._is_alert_illegal = False
self._is_alert_used_in_reward = False
self._kwargs_attention_budget = copy.deepcopy(kwargs_attention_budget)
# to ensure self.get_obs() has a reproducible behaviour
self._last_obs = None
# to retrieve previous result (before 1.6.5 the seed of the
# action space or observation space was not done each reset)
self._has_just_been_seeded = False
if kwargs_observation is not None:
self._kwargs_observation = copy.deepcopy(kwargs_observation)
else:
self._kwargs_observation = {}
if init_env_path is not None:
self._init_env_path = os.path.abspath(init_env_path)
else:
self._init_env_path = None
# time_dependant attributes for the "forecast env"
if _init_obs is not None:
self._init_obs = _init_obs.copy()
self._init_obs._obs_env = None
else:
self._init_obs = None
self._observation_bk_class = observation_bk_class
self._observation_bk_kwargs = observation_bk_kwargs
if highres_sim_counter is not None:
self._highres_sim_counter = highres_sim_counter
else:
self._highres_sim_counter = HighResSimCounter()
self._update_obs_after_reward = update_obs_after_reward
# alert
self._last_alert = None
self._time_since_last_alert = None
self._alert_duration= None
self._total_number_of_alert = 0
self._time_since_last_attack = None
self._was_alert_used_after_attack = None
self._attack_under_alert = None
self._is_already_attacked = None
# general things that can be used by the reward
self._reward_to_obs = {}
@property
def highres_sim_counter(self):
return self._highres_sim_counter
@property
def nb_highres_called(self):
return self._highres_sim_counter.nb_highres_called
def _custom_deepcopy_for_copy(self, new_obj, dict_=None):
if self.__closed:
raise RuntimeError("Impossible to make a copy of a closed environment !")
if not self.backend._can_be_copied:
raise RuntimeError("Impossible to copy your environment: the backend "
"class you used cannot be copied.")
RandomObject._custom_deepcopy_for_copy(self, new_obj)
if dict_ is None:
dict_ = {}
new_obj._init_grid_path = copy.deepcopy(self._init_grid_path)
new_obj._init_env_path = copy.deepcopy(self._init_env_path)
new_obj._DEBUG = self._DEBUG
new_obj._parameters = copy.deepcopy(self._parameters)
new_obj.with_forecast = self.with_forecast
new_obj._forecasts = copy.deepcopy(self._forecasts)
# some timers
new_obj._time_apply_act = self._time_apply_act
new_obj._time_powerflow = self._time_powerflow
new_obj._time_extract_obs = self._time_extract_obs
new_obj._time_create_bk_act = self._time_create_bk_act
new_obj._time_opponent = self._time_opponent
new_obj._time_redisp = self._time_redisp
new_obj._time_step = self._time_step
# data relative to interpolation
new_obj._epsilon_poly = self._epsilon_poly
new_obj._tol_poly = self._tol_poly
#
new_obj._complete_action_cls = copy.deepcopy(self._complete_action_cls)
# define logger
new_obj.logger = copy.deepcopy(self.logger) # TODO does that make any sense ?
# class used for the action spaces
new_obj._helper_action_class = self._helper_action_class # const
new_obj._helper_observation_class = self._helper_observation_class
# and calendar data
new_obj.time_stamp = self.time_stamp
new_obj.nb_time_step = self.nb_time_step
new_obj.delta_time_seconds = self.delta_time_seconds
# observation
if self.current_obs is not None:
new_obj.current_obs = self.current_obs.copy()
# backend
# backend action
new_obj._backend_action_class = self._backend_action_class
new_obj._backend_action = copy.deepcopy(self._backend_action)
# specific to Basic Env, do not change
new_obj.backend = self.backend.copy()
if self._thermal_limit_a is not None:
new_obj.backend.set_thermal_limit(self._thermal_limit_a)
new_obj._thermal_limit_a = copy.deepcopy(self._thermal_limit_a)
new_obj.__is_init = self.__is_init
new_obj.__closed = self.__closed
new_obj.debug_dispatch = self.debug_dispatch
new_obj._line_status = copy.deepcopy(self._line_status)
new_obj._ignore_min_up_down_times = self._ignore_min_up_down_times
new_obj._forbid_dispatch_off = self._forbid_dispatch_off
# type of power flow to play
# if True, then it will not disconnect lines above their thermal limits
new_obj._no_overflow_disconnection = self._no_overflow_disconnection
new_obj._timestep_overflow = copy.deepcopy(self._timestep_overflow)
new_obj._nb_timestep_overflow_allowed = copy.deepcopy(
self._nb_timestep_overflow_allowed
)
new_obj._hard_overflow_threshold = self._hard_overflow_threshold
# store actions "cooldown"
new_obj._times_before_line_status_actionable = copy.deepcopy(
self._times_before_line_status_actionable
)
new_obj._max_timestep_line_status_deactivated = (
self._max_timestep_line_status_deactivated
)
new_obj._times_before_topology_actionable = copy.deepcopy(
self._times_before_topology_actionable
)
new_obj._max_timestep_topology_deactivated = (
self._max_timestep_topology_deactivated
)
new_obj._nb_ts_reco = self._nb_ts_reco
# for maintenance operation
new_obj._time_next_maintenance = copy.deepcopy(self._time_next_maintenance)
new_obj._duration_next_maintenance = copy.deepcopy(
self._duration_next_maintenance
)
# hazard (not used outside of this class, information is given in `times_before_line_status_actionable`
new_obj._hazard_duration = copy.deepcopy(self._hazard_duration)
new_obj._env_dc = self._env_dc
# redispatching data
new_obj._target_dispatch = copy.deepcopy(self._target_dispatch)
new_obj._already_modified_gen = copy.deepcopy(self._already_modified_gen)
new_obj._actual_dispatch = copy.deepcopy(self._actual_dispatch)
new_obj._gen_uptime = copy.deepcopy(self._gen_uptime)
new_obj._gen_downtime = copy.deepcopy(self._gen_downtime)
new_obj._gen_activeprod_t = copy.deepcopy(self._gen_activeprod_t)
new_obj._gen_activeprod_t_redisp = copy.deepcopy(self._gen_activeprod_t_redisp)
new_obj._disc_lines = copy.deepcopy(self._disc_lines)
# store environment modifications
new_obj._injection = copy.deepcopy(self._injection)
new_obj._maintenance = copy.deepcopy(self._maintenance)
new_obj._hazards = copy.deepcopy(self._hazards)
new_obj._env_modification = copy.deepcopy(self._env_modification)
# to use the data
new_obj.done = self.done
new_obj.current_reward = copy.deepcopy(self.current_reward)
new_obj.chronics_handler = copy.deepcopy(self.chronics_handler)
new_obj._game_rules = copy.deepcopy(self._game_rules)
new_obj._helper_action_env = self._helper_action_env.copy()
new_obj._helper_action_env.legal_action = new_obj._game_rules.legal_action
new_obj._action_space = self._action_space.copy()
new_obj._action_space.legal_action = new_obj._game_rules.legal_action
new_obj._rewardClass = self._rewardClass
new_obj._actionClass = self._actionClass
new_obj._observationClass = self._observationClass
new_obj._legalActClass = self._legalActClass
new_obj._observation_space = self._observation_space.copy(copy_backend=True)
new_obj._observation_space._legal_action = (
new_obj._game_rules.legal_action
) # TODO this does not respect SOLID principles at all !
new_obj._kwargs_observation = copy.deepcopy(self._kwargs_observation)
new_obj._observation_space._ptr_kwargs_observation = new_obj._kwargs_observation
new_obj._names_chronics_to_backend = self._names_chronics_to_backend
new_obj._reward_helper = copy.deepcopy(self._reward_helper)
# gym compatibility
new_obj.reward_range = copy.deepcopy(self.reward_range)
new_obj._viewer = copy.deepcopy(self._viewer)
new_obj.viewer_fig = copy.deepcopy(self.viewer_fig)
# other rewards
new_obj.other_rewards = copy.deepcopy(self.other_rewards)
# opponent
new_obj._opponent_space_type = self._opponent_space_type
new_obj._opponent_action_class = self._opponent_action_class # const
new_obj._opponent_class = self._opponent_class # const
new_obj._opponent_init_budget = self._opponent_init_budget
new_obj._opponent_attack_duration = self._opponent_attack_duration
new_obj._opponent_attack_cooldown = self._opponent_attack_cooldown
new_obj._opponent_budget_per_ts = self._opponent_budget_per_ts
new_obj._kwargs_opponent = copy.deepcopy(self._kwargs_opponent)
new_obj._opponent_budget_class = copy.deepcopy(
self._opponent_budget_class
) # const
new_obj._opponent_action_space = self._opponent_action_space # const
new_obj._compute_opp_budget = self._opponent_budget_class(
self._opponent_action_space
)
# init the opponent
new_obj._opponent = new_obj._opponent_class.__new__(new_obj._opponent_class)
self._opponent._custom_deepcopy_for_copy(
new_obj._opponent, {"partial_env": new_obj, **new_obj._kwargs_opponent}
)
new_obj._oppSpace = new_obj._opponent_space_type(
compute_budget=new_obj._compute_opp_budget,
init_budget=new_obj._opponent_init_budget,
attack_duration=new_obj._opponent_attack_duration,
attack_cooldown=new_obj._opponent_attack_cooldown,
budget_per_timestep=new_obj._opponent_budget_per_ts,
opponent=new_obj._opponent,
)
state_me, state_opp = self._oppSpace._get_state()
new_obj._oppSpace._set_state(state_me)
# voltage
new_obj._voltagecontrolerClass = self._voltagecontrolerClass
new_obj._voltage_controler = self._voltage_controler.copy()
# to change the parameters
new_obj.__new_param = copy.deepcopy(self.__new_param)
new_obj.__new_forecast_param = copy.deepcopy(self.__new_forecast_param)
new_obj.__new_reward_func = copy.deepcopy(self.__new_reward_func)
# storage units
new_obj._storage_current_charge = copy.deepcopy(self._storage_current_charge)
new_obj._storage_previous_charge = copy.deepcopy(self._storage_previous_charge)
new_obj._action_storage = copy.deepcopy(self._action_storage)
new_obj._amount_storage = copy.deepcopy(self._amount_storage)
new_obj._amount_storage_prev = copy.deepcopy(self._amount_storage_prev)
new_obj._storage_power = copy.deepcopy(self._storage_power)
new_obj._storage_power_prev = copy.deepcopy(self._storage_power_prev)
# curtailment
new_obj._limit_curtailment = copy.deepcopy(self._limit_curtailment)
new_obj._limit_curtailment_prev = copy.deepcopy(self._limit_curtailment_prev)
new_obj._gen_before_curtailment = copy.deepcopy(self._gen_before_curtailment)
new_obj._sum_curtailment_mw = copy.deepcopy(self._sum_curtailment_mw)
new_obj._sum_curtailment_mw_prev = copy.deepcopy(self._sum_curtailment_mw_prev)
new_obj._limited_before = copy.deepcopy(self._limited_before)
# attention budget
new_obj._has_attention_budget = self._has_attention_budget
new_obj._attention_budget = copy.deepcopy(self._attention_budget)
new_obj._attention_budget_cls = self._attention_budget_cls # const
new_obj._is_alarm_illegal = copy.deepcopy(self._is_alarm_illegal)
new_obj._is_alarm_used_in_reward = copy.deepcopy(self._is_alarm_used_in_reward)
# alert
new_obj._is_alert_illegal = copy.deepcopy(self._is_alert_illegal)
new_obj._is_alert_used_in_reward = copy.deepcopy(self._is_alert_used_in_reward)
new_obj._kwargs_attention_budget = copy.deepcopy(self._kwargs_attention_budget)
new_obj._last_obs = self._last_obs.copy()
new_obj._has_just_been_seeded = self._has_just_been_seeded
# extra things used by the reward to pass to the obs
new_obj._reward_to_obs = copy.deepcopy(self._reward_to_obs)
# time_dependant attributes for the "forecast env"
if self._init_obs is None:
new_obj._init_obs = None
else:
new_obj._init_obs = self._init_obs.copy()
new_obj._observation_bk_class = self._observation_bk_class
new_obj._observation_bk_kwargs = self._observation_bk_kwargs
# do not forget !
new_obj._is_test = self._is_test
# do not copy it.
new_obj._highres_sim_counter = self._highres_sim_counter
# alert
new_obj._last_alert = copy.deepcopy(self._last_alert)
new_obj._time_since_last_alert = copy.deepcopy(self._time_since_last_alert)
new_obj._alert_duration = copy.deepcopy(self._alert_duration)
new_obj._total_number_of_alert = self._total_number_of_alert
new_obj._time_since_last_attack = copy.deepcopy(self._time_since_last_attack)
new_obj._is_already_attacked = copy.deepcopy(self._is_already_attacked)
new_obj._attack_under_alert = copy.deepcopy(self._attack_under_alert)
new_obj._was_alert_used_after_attack = copy.deepcopy(self._was_alert_used_after_attack)
new_obj._update_obs_after_reward = copy.deepcopy(self._update_obs_after_reward)
def get_path_env(self):
"""
Get the path that allows to create this environment.
It can be used for example in :func:`grid2op.utils.EpisodeStatistics`
to save the information directly inside
the environment data.
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot get its path.")
res = self._init_env_path if self._init_env_path is not None else ""
return res
def _check_alarm_file_consistent(self, dict_):
if (self.ALERT_KEY not in dict_) and (self.ALARM_KEY not in dict_):
raise EnvError(
f'One of {self.ALERT_KEY} or {self.ALARM_KEY} should be present in the alarm data json, for now.'
)
def _set_no_alarm(self):
bk_cls = type(self.backend)
bk_cls.dim_alarms = 0
bk_cls.alarms_area_names = []
bk_cls.alarms_lines_area = {}
bk_cls.alarms_area_lines = []
def load_alarm_data(self):
"""
Internal
.. warning::
/!\\\\ Only valid with "l2rpn_icaps_2021" environment /!\\\\
Notes
------
This is called when the environment class is not created, so i need to read the data of the grid from the
backend.
I cannot use "self.name_line" for example.
This function update the backend INSTANCE. The backend class is then updated in the
:func:`BaseEnv._init_backend`
function with a call to `self.backend.assert_grid_correct()`
Returns
-------
"""
file_alarms = os.path.join(self.get_path_env(), BaseEnv.ALARM_FILE_NAME)
if os.path.exists(file_alarms) and os.path.isfile(file_alarms):
with open(file_alarms, mode="r", encoding="utf-8") as f:
dict_alarm = json.load(f)
self._check_alarm_file_consistent(dict_alarm)
if self.ALARM_KEY not in dict_alarm:
# not an alarm but an alert
self._set_no_alarm()
return # TODO update grid in this case !
nb_areas = len(dict_alarm[self.ALARM_KEY]) # need to be remembered
line_names = {
el: [] for el in self.backend.name_line
} # need to be remembered
area_names = sorted(dict_alarm[self.ALARM_KEY].keys()) # need to be remembered
area_lines = [[] for _ in range(nb_areas)] # need to be remembered
for area_id, area_name in enumerate(area_names):
# check that: all lines in files are in the grid
area = dict_alarm[self.ALARM_KEY][area_name]
for line in area:
if line not in line_names:
raise EnvError(
f"You provided a description of the area of the grid for the alarms, but a "
f'line named "{line}" is present in your file but not in the grid. Please '
f"check the file {file_alarms} and make sure it contains only the line named "
f"{sorted(self.backend.name_line)}."
)
# update the list and dictionary that remembers everything
line_names[line].append(area_name)
area_lines[area_id].append(line)
for line, li_area in line_names.items():
# check that all lines in the grid are in at least one area
if not li_area:
raise EnvError(
f"Line (on the grid) named {line} is not in any areas. This is not supported at "
f"the moment"
)
# every check pass, i update the backend class
bk_cls = type(self.backend)
bk_cls.tell_dim_alarm(nb_areas)
bk_cls.alarms_area_names = copy.deepcopy(area_names)
bk_cls.alarms_lines_area = copy.deepcopy(line_names)
bk_cls.alarms_area_lines = copy.deepcopy(area_lines)
else:
self._set_no_alarm()
def _set_no_alert(self):
bk_cls = type(self.backend)
bk_cls.tell_dim_alert(0)
bk_cls.alertable_line_names = []
bk_cls.alertable_line_ids = np.array([], dtype=dt_int)
def load_alert_data(self):
"""
Internal
Notes
------
This is called to get the alertable lines when the warning is raised "by line"
Returns
-------
"""
file_alarms = os.path.join(self.get_path_env(), BaseEnv.ALERT_FILE_NAME)
if os.path.exists(file_alarms) and os.path.isfile(file_alarms):
with open(file_alarms, mode="r", encoding="utf-8") as f:
dict_alert = json.load(f)
self._check_alarm_file_consistent(dict_alert)
if self.ALERT_KEY not in dict_alert:
# not an alert but an alarm
self._set_no_alert()
return
if dict_alert[self.ALERT_KEY] != "opponent":
raise EnvError('You can only define alert from the opponent for now.')
if "lines_attacked" in self._kwargs_opponent:
lines_attacked = copy.deepcopy(self._kwargs_opponent["lines_attacked"])
if isinstance(lines_attacked[0], list):
lines_attacked = sum(lines_attacked, start=[])
else:
lines_attacked = []
warnings.warn("The kwargs \"lines_attacked\" is not present in the description of your opponent "
"yet you want to use alert. Know that in this case no alert will be defined...")
alertable_line_names = copy.deepcopy(lines_attacked)
alertable_line_ids = np.empty(len(alertable_line_names), dtype=dt_int)
for i, el in enumerate(alertable_line_names):
indx = np.where(self.backend.name_line == el)[0]
if not len(indx):
raise Grid2OpException(f"Attacked line {el} is not found in the grid.")
alertable_line_ids[i] = indx[0]
nb_lines = len(alertable_line_ids)
bk_cls = type(self.backend)
bk_cls.tell_dim_alert(nb_lines)
bk_cls.alertable_line_names = copy.deepcopy(alertable_line_names)
bk_cls.alertable_line_ids = np.array(alertable_line_ids).astype(dt_int)
else:
self._set_no_alert()
@property
def action_space(self) -> ActionSpace:
"""this represent a view on the action space"""
return self._action_space
@action_space.setter
def action_space(self, other):
raise EnvError(
"Impossible to modify the action space of the environment. You probably want to modify "
"the action with which the agent is interacting. You can do that with a converter, or "
"using the GymEnv. Please consult the documentation."
)
@property
def observation_space(self) -> ObservationSpace:
"""this represent a view on the action space"""
return self._observation_space
@observation_space.setter
def observation_space(self, other):
raise EnvError(
"Impossible to modify the observation space of the environment. You probably want to modify "
"the observation with which the agent is interacting. You can do that with a converter, or "
"using the GymEnv. Please consult the documentation."
)
def change_parameters(self, new_parameters):
"""
Allows to change the parameters of an environment.
Notes
------
This only affects the environment AFTER `env.reset()` has been called.
This only affects the environment and NOT the forecast.
Parameters
----------
new_parameters: :class:`grid2op.Parameters.Parameters`
The new parameters you want the environment to get.
Examples
---------
You can use this function like:
.. code-block:: python
import grid2op
from grid2op.Parameters import Parameters
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
env.parameters.NO_OVERFLOW_DISCONNECTION # -> False
new_param = Parameters()
new_param.A_MEMBER = A_VALUE # eg new_param.NO_OVERFLOW_DISCONNECTION = True
env.change_parameters(new_param)
obs = env.reset()
env.parameters.NO_OVERFLOW_DISCONNECTION # -> True
"""
if self.__closed:
raise EnvError(
"This environment is closed, you cannot change its parameters."
)
if not isinstance(new_parameters, Parameters):
raise EnvError(
'The new parameters "new_parameters" should be an instance of '
"grid2op.Parameters.Parameters. "
)
new_parameters.check_valid() # check the provided parameters are valid
self.__new_param = new_parameters
def change_forecast_parameters(self, new_parameters):
"""
Allows to change the parameters of a "forecast environment" that is for
the method :func:`grid2op.Observation.BaseObservation.simulate` and
:func:`grid2op.Observation.BaseObservation.get_forecast_env`
Notes
------
This only affects the environment AFTER `env.reset()` has been called.
This only affects the "forecast env" and NOT the env itself.
Parameters
----------
new_parameters: :class:`grid2op.Parameters.Parameters`
The new parameters you want the environment to get.
Examples
--------
This can be used like:
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
param = env.parameters
param.NO_OVERFLOW_DISCONNECTION = True # or any other properties of the environment
env.change_forecast_parameters(param)
# at this point this has no impact.
obs = env.reset()
# now, after the reset, the right parameters are used
sim_obs, sim_reward, sim_done, sim_info = obs.simulate(env.action_space())
# the new parameters `param` are used for this
# and also for
forecasted_env = obs.get_forecast_env()
"""
if self.__closed:
raise EnvError(
"This environment is closed, you cannot change its parameters (for the forecast / simulate)."
)
if not isinstance(new_parameters, Parameters):
raise EnvError(
'The new parameters "new_parameters" should be an instance of '
"grid2op.Parameters.Parameters."
)
new_parameters.check_valid() # check the provided parameters are valid
self.__new_forecast_param = new_parameters
def _create_attention_budget(self, **kwargs):
if not self.__is_init:
raise EnvError(
"Impossible to create an attention budget with a non initialized environment!"
)
if self._has_attention_budget:
if type(self).assistant_warning_type == "zonal":
self._attention_budget = self._attention_budget_cls()
try:
self._kwargs_attention_budget.update(kwargs)
self._attention_budget.init(
partial_env=self, **self._kwargs_attention_budget
)
except TypeError as exc_:
raise EnvError(
"Impossible to create the attention budget with the provided argument. Please "
'change the content of the argument "kwargs_attention_budget".'
) from exc_
elif type(self).assistant_warning_type == "by_line":
self._has_attention_budget = False
def _create_opponent(self):
if not self.__is_init:
raise EnvError(
"Impossible to create an opponent with a non initialized environment!"
)
if not issubclass(self._opponent_action_class, BaseAction):
raise EnvError(
"Impossible to make an environment with an opponent action class not derived from BaseAction"
)
try:
self._opponent_init_budget = dt_float(self._opponent_init_budget)
except Exception as e:
raise EnvError(
'Impossible to convert "opponent_init_budget" to a float with error {}'.format(
e
)
)
if self._opponent_init_budget < 0.0:
raise EnvError(
"If you want to deactivate the opponent, please don't set its budget to a negative number."
'Prefer the use of the DontAct action type ("opponent_action_class=DontAct" '
"and / or set its budget to 0."
)
if not issubclass(self._opponent_class, BaseOpponent):
raise EnvError(
"Impossible to make an opponent with a type that does not inherit from BaseOpponent."
)
self._opponent_action_class._add_shunt_data()
self._opponent_action_class._update_value_set()
self._opponent_action_space = self._helper_action_class(
gridobj=type(self.backend),
legal_action=AlwaysLegal,
actionClass=self._opponent_action_class,
)
self._compute_opp_budget = self._opponent_budget_class(
self._opponent_action_space
)
self._opponent = self._opponent_class(self._opponent_action_space)
self._oppSpace = self._opponent_space_type(
compute_budget=self._compute_opp_budget,
init_budget=self._opponent_init_budget,
attack_duration=self._opponent_attack_duration,
attack_cooldown=self._opponent_attack_cooldown,
budget_per_timestep=self._opponent_budget_per_ts,
opponent=self._opponent,
)
self._oppSpace.init_opponent(partial_env=self, **self._kwargs_opponent)
self._oppSpace.reset()
def _init_myclass(self):
if self._backend_action_class is not None:
# the class has already been initialized
return
# remember the original grid2op class
type(self)._INIT_GRID_CLS = type(self)
bk_type = type(
self.backend
) # be careful here: you need to initialize from the class, and not from the object
# create the proper environment class for this specific environment
self.__class__ = type(self).init_grid(bk_type)
def _has_been_initialized(self):
# type of power flow to play
# if True, then it will not disconnect lines above their thermal limits
self._init_myclass()
bk_type = type(self.backend)
if np.min([self.n_line, self.n_gen, self.n_load, self.n_sub]) <= 0:
raise EnvironmentError("Environment has not been initialized properly")
self._backend_action_class = _BackendAction.init_grid(bk_type)
self._backend_action = self._backend_action_class()
# initialize maintenance / hazards
self._time_next_maintenance = np.full(self.n_line, -1, dtype=dt_int)
self._duration_next_maintenance = np.zeros(shape=(self.n_line,), dtype=dt_int)
self._times_before_line_status_actionable = np.full(
shape=(self.n_line,), fill_value=0, dtype=dt_int
)
# create the vector to the proper shape
self._target_dispatch = np.zeros(self.n_gen, dtype=dt_float)
self._already_modified_gen = np.zeros(self.n_gen, dtype=dt_bool)
self._actual_dispatch = np.zeros(self.n_gen, dtype=dt_float)
self._gen_uptime = np.zeros(self.n_gen, dtype=dt_int)
self._gen_downtime = np.zeros(self.n_gen, dtype=dt_int)
self._gen_activeprod_t = np.zeros(self.n_gen, dtype=dt_float)
self._gen_activeprod_t_redisp = np.zeros(self.n_gen, dtype=dt_float)
self._nb_timestep_overflow_allowed = np.ones(shape=self.n_line, dtype=dt_int)
self._max_timestep_line_status_deactivated = (
self._parameters.NB_TIMESTEP_COOLDOWN_LINE
)
self._times_before_line_status_actionable = np.zeros(
shape=(self.n_line,), dtype=dt_int
)
self._times_before_topology_actionable = np.zeros(
shape=(self.n_sub,), dtype=dt_int
)
self._nb_timestep_overflow_allowed = np.full(
shape=(self.n_line,),
fill_value=self._parameters.NB_TIMESTEP_OVERFLOW_ALLOWED,
dtype=dt_int,
)
self._timestep_overflow = np.zeros(shape=(self.n_line,), dtype=dt_int)
# update the parameters
self.__new_param = self._parameters # small hack to have it working as expected
self._update_parameters()
self._reset_redispatching()
# storage
self._storage_current_charge = np.zeros(self.n_storage, dtype=dt_float)
self._storage_previous_charge = np.zeros(self.n_storage, dtype=dt_float)
self._action_storage = np.zeros(self.n_storage, dtype=dt_float)
self._storage_power = np.zeros(self.n_storage, dtype=dt_float)
self._storage_power_prev = np.zeros(self.n_storage, dtype=dt_float)
self._amount_storage = 0.0
self._amount_storage_prev = 0.0
# curtailment
self._limit_curtailment = np.ones(
self.n_gen, dtype=dt_float
) # in ratio of pmax
self._limit_curtailment_prev = np.ones(
self.n_gen, dtype=dt_float
) # in ratio of pmax
self._gen_before_curtailment = np.zeros(self.n_gen, dtype=dt_float) # in MW
self._sum_curtailment_mw = dt_float(0.0)
self._sum_curtailment_mw_prev = dt_float(0.0)
self._reset_curtailment()
# register this is properly initialized
self.__is_init = True
def _update_parameters(self):
"""update value for the new parameters"""
self._parameters = self.__new_param
self._ignore_min_up_down_times = self._parameters.IGNORE_MIN_UP_DOWN_TIME
self._forbid_dispatch_off = not self._parameters.ALLOW_DISPATCH_GEN_SWITCH_OFF
# type of power flow to play
# if True, then it will not disconnect lines above their thermal limits
self._no_overflow_disconnection = self._parameters.NO_OVERFLOW_DISCONNECTION
self._hard_overflow_threshold = self._parameters.HARD_OVERFLOW_THRESHOLD
# store actions "cooldown"
self._max_timestep_line_status_deactivated = (
self._parameters.NB_TIMESTEP_COOLDOWN_LINE
)
self._max_timestep_topology_deactivated = (
self._parameters.NB_TIMESTEP_COOLDOWN_SUB
)
self._nb_ts_reco = self._parameters.NB_TIMESTEP_RECONNECTION
self._nb_timestep_overflow_allowed[
:
] = self._parameters.NB_TIMESTEP_OVERFLOW_ALLOWED
# hard overflow part
self._env_dc = self._parameters.ENV_DC
self.__new_param = None
def reset(self):
"""
Reset the base environment (set the appropriate variables to correct initialization).
It is (and must be) overloaded in other :class:`grid2op.Environment`
"""
if self.__closed:
raise EnvError("This environment is closed. You cannot use it anymore.")
self.__is_init = True
# current = None is an indicator that this is the first step of the environment
# so don't change the setting of current_obs = None unless you are willing to change that
self.current_obs = None
self._line_status[:] = True
if self.__new_param is not None:
self._update_parameters() # reset __new_param to None too
if self.__new_forecast_param is not None:
self._observation_space._change_parameters(self.__new_forecast_param)
self.__new_forecast_param = None
if self.__new_reward_func is not None:
self._reward_helper.change_reward(self.__new_reward_func)
self._reward_helper.initialize(self)
self.reward_range = self._reward_helper.range()
# change also the reward used in simulate
self._observation_space.change_reward(self._reward_helper.template_reward)
self.__new_reward_func = None
self._last_obs = None
# seeds (so that next episode does not depend on what happened in previous episode)
if self.seed_used is not None and not self._has_just_been_seeded:
self.seed(None, _seed_me=False)
self._reset_storage()
self._reset_curtailment()
self._reset_alert()
self._reward_to_obs = {}
self._has_just_been_seeded = False
def _reset_alert(self):
self._last_alert[:] = False
self._is_already_attacked[:] = False
self._time_since_last_alert[:] = -1
self._alert_duration[:] = 0
self._total_number_of_alert = 0
self._time_since_last_attack[:] = -1
self._was_alert_used_after_attack[:] = 0
self._attack_under_alert[:] = 0
def _reset_storage(self):
"""reset storage capacity at the beginning of new environment if needed"""
if self.n_storage > 0:
tmp = self._parameters.INIT_STORAGE_CAPACITY * self.storage_Emax
if self._parameters.ACTIVATE_STORAGE_LOSS:
tmp += self.storage_loss * self.delta_time_seconds / 3600.0
self._storage_previous_charge[
:
] = tmp # might not be needed, but it's not for the time it takes...
self._storage_current_charge[:] = tmp
self._storage_power[:] = 0.0
self._storage_power_prev[:] = 0.0
self._amount_storage = 0.0
self._amount_storage_prev = 0.0
# TODO storage: check in simulate too!
def _reset_curtailment(self):
self._limit_curtailment[self.gen_renewable] = 1.0
self._limit_curtailment_prev[self.gen_renewable] = 1.0
self._gen_before_curtailment[:] = 0.0
self._sum_curtailment_mw = dt_float(0.0)
self._sum_curtailment_mw_prev = dt_float(0.0)
self._limited_before = dt_float(0.0)
def seed(self, seed=None, _seed_me=True):
"""
Set the seed of this :class:`Environment` for a better control and to ease reproducible experiments.
Parameters
----------
seed: ``int``
The seed to set.
_seed_me: ``bool``
Whether to seed this instance or just the other things. Used internally only.
Returns
---------
seed: ``tuple``
The seed used to set the prng (pseudo random number generator) for the environment
seed_chron: ``tuple``
The seed used to set the prng for the chronics_handler (if any), otherwise ``None``
seed_obs: ``tuple``
The seed used to set the prng for the observation space (if any), otherwise ``None``
seed_action_space: ``tuple``
The seed used to set the prng for the action space (if any), otherwise ``None``
seed_env_modif: ``tuple``
The seed used to set the prng for the modification of th environment (if any otherwise ``None``)
seed_volt_cont: ``tuple``
The seed used to set the prng for voltage controler (if any otherwise ``None``)
seed_opponent: ``tuple``
The seed used to set the prng for the opponent (if any otherwise ``None``)
Examples
---------
Seeding an environment should be done with:
.. code-block:: python
import grid2op
env = grid2op.make()
env.seed(0)
obs = env.reset()
As long as the environment instance (variable `env` in the above code) is not `reset` the `env.seed` has no
real effect (but can have side effect).
For a full control on the seed mechanism it is more than advised to reset it after it has been seeded.
"""
if self.__closed:
raise EnvError("This environment is closed. You cannot use it anymore.")
seed_init = None
seed_chron = None
seed_obs = None
seed_action_space = None
seed_env_modif = None
seed_volt_cont = None
seed_opponent = None
if _seed_me:
max_int = np.iinfo(dt_int).max
if seed > max_int:
raise Grid2OpException("Seed is too big. Max value is {}, provided value is {}".format(max_int, seed))
try:
seed = np.array(seed).astype(dt_int)
except Exception as exc_:
raise Grid2OpException(
"Impossible to seed with the seed provided. Make sure it can be converted to a"
"numpy 32 bits integer."
)
# example from gym
# self.np_random, seed = seeding.np_random(seed)
# inspiration from @ https://github.com/openai/gym/tree/master/gym/utils
seed_init = seed
super().seed(seed_init)
max_seed = np.iinfo(dt_int).max # 2**32 - 1
if self.chronics_handler is not None:
seed = self.space_prng.randint(max_seed)
seed_chron = self.chronics_handler.seed(seed)
if self._observation_space is not None:
seed = self.space_prng.randint(max_seed)
seed_obs = self._observation_space.seed(seed)
if self._action_space is not None:
seed = self.space_prng.randint(max_seed)
seed_action_space = self._action_space.seed(seed)
if self._helper_action_env is not None:
seed = self.space_prng.randint(max_seed)
seed_env_modif = self._helper_action_env.seed(seed)
if self._voltage_controler is not None:
seed = self.space_prng.randint(max_seed)
seed_volt_cont = self._voltage_controler.seed(seed)
if self._opponent is not None:
seed = self.space_prng.randint(max_seed)
seed_opponent = self._opponent.seed(seed)
self._has_just_been_seeded = True
return (
seed_init,
seed_chron,
seed_obs,
seed_action_space,
seed_env_modif,
seed_volt_cont,
seed_opponent,
)
def deactivate_forecast(self):
"""
This function will have the effect to deactivate the `obs.simulate`, the forecast will not be updated
in the observation space.
This will most likely lead to some performance increase (~10-15% faster) if you don't use the
`obs.simulate` function.
Notes
------
If you really don't want to use the `obs.simulate` functionality, you should rather disable it at the creation
of the environment. For example, if you use the recommended `make` function, you can pass an argument
that will ignore the chronics even when reading it (using `GridStateFromFile` instead of
`GridStateFromFileWithForecast` for example) this would give something like:
.. code-block:: python
import grid2op
from grid2op.Chronics import GridStateFromFile
# tell grid2op not to read the "forecast"
env = grid2op.make("rte_case14_realistic", data_feeding_kwargs={"gridvalueClass": GridStateFromFile})
do_nothing_action = env.action_space()
# improve speed ups to not even try to use forecast
env.deactivate_forecast()
# this is normal behavior
obs = env.reset()
# but this will make the programm stop working
# obs.simulate(do_nothing_action) # DO NOT RUN IT RAISES AN ERROR
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
if self._observation_space is not None:
self._observation_space.with_forecast = False
self.with_forecast = False
def reactivate_forecast(self):
"""
This function will have the effect to reactivate the `obs.simulate`, the forecast will be updated
in the observation space.
This will most likely lead to some performance decrease but you will be able to use `obs.simulate` function.
.. warning::
Forecast are deactivated by default (and cannot be reactivated) if the
backend cannot be copied.
.. warning::
You need to call 'env.reset()' for this function to work properly. It is NOT recommended
to reactivate forecasts in the middle of an episode.
Notes
------
You can use this function as followed:
.. code-block:: python
import grid2op
from grid2op.Chronics import GridStateFromFile
# tell grid2op not to read the "forecast"
env = grid2op.make("rte_case14_realistic", data_feeding_kwargs={"gridvalueClass": GridStateFromFile})
do_nothing_action = env.action_space()
# improve speed ups to not even try to use forecast
env.deactivate_forecast()
# this is normal behavior
obs = env.reset()
# but this will make the programm stop working
# obs.simulate(do_nothing_action) # DO NOT RUN IT RAISES AN ERROR
env.reactivate_forecast()
obs = env.reset() # you need to reset the env for this function to have any effects
obs, reward, done, info = env.step(do_nothing_action)
# and now forecast are available again
simobs, sim_r, sim_d, sim_info = obs.simulate(do_nothing_action)
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
if not self.backend._can_be_copied:
raise EnvError("Impossible to activate the forecasts with a "
"backend that cannot be copied.")
if self._observation_space is not None:
self._observation_space.reactivate_forecast(self)
self.with_forecast = True
def _init_alert_data(self):
cls = type(self)
self._last_alert = np.full(cls.dim_alerts,
dtype=dt_bool, fill_value=False)
self._is_already_attacked = np.full(cls.dim_alerts, dtype=dt_bool,
fill_value=False)
self._time_since_last_alert = np.full(cls.dim_alerts,
dtype=dt_int, fill_value=-1)
self._alert_duration = np.full(cls.dim_alerts,
dtype=dt_int, fill_value=0)
self._total_number_of_alert = 0
self._time_since_last_attack = np.full(cls.dim_alerts,
dtype=dt_int, fill_value=-1)
self._was_alert_used_after_attack = np.full(cls.dim_alerts,
dtype=dt_int, fill_value=0)
self._attack_under_alert = np.full(cls.dim_alerts,
dtype=dt_int, fill_value=0)
@abstractmethod
def _init_backend(
self,
chronics_handler,
backend,
names_chronics_to_backend,
actionClass,
observationClass,
rewardClass,
legalActClass,
):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This method is used for Environment specific implementation. Only use it if you know exactly what
you are doing.
"""
pass
def set_thermal_limit(self, thermal_limit):
"""
Set the thermal limit effectively.
Parameters
----------
thermal_limit: ``numpy.ndarray``
The new thermal limit. It must be a numpy ndarray vector (or convertible to it). For each powerline it
gives the new thermal limit.
Alternatively, this can be a dictionary mapping the line names (keys) to its thermal limits (values). In
that case, all thermal limits for all powerlines should be specified (this is a safety measure
to reduce the odds of misuse).
Examples
---------
This function can be used like this:
.. code-block:: python
import grid2op
# I create an environment
env = grid2op.make("rte_case5_example", test=True)
# i set the thermal limit of each powerline to 20000 amps
env.set_thermal_limit([20000 for _ in range(env.n_line)])
Notes
-----
As of grid2op > 1.5.0, it is possible to set the thermal limit by using a dictionary with the keys being
the name of the powerline and the values the thermal limits.
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
if not self.__is_init:
raise Grid2OpException(
"Impossible to set the thermal limit to a non initialized Environment. "
"Have you called `env.reset()` after last game over ?"
)
if isinstance(thermal_limit, dict):
tmp = np.full(self.n_line, fill_value=np.NaN, dtype=dt_float)
for key, val in thermal_limit.items():
if key not in self.name_line:
raise Grid2OpException(
f"When setting a thermal limit with a dictionary, the keys should be line "
f"names. We found: {key} which is not a line name. The names of the "
f"powerlines are {self.name_line}"
)
ind_line = np.where(self.name_line == key)[0][0]
if np.isfinite(tmp[ind_line]):
raise Grid2OpException(
f"Humm, there is a really strange bug, some lines are set twice."
)
try:
val_fl = float(val)
except Exception as exc_:
raise Grid2OpException(
f"When setting thermal limit with a dictionary, the keys should be "
f"the values of the thermal limit (in amps) you provided something that "
f'cannot be converted to a float. Error was "{exc_}".'
)
tmp[ind_line] = val_fl
elif isinstance(thermal_limit, (np.ndarray, list)):
try:
tmp = np.array(thermal_limit).flatten().astype(dt_float)
except Exception as exc_:
raise Grid2OpException(
f"Impossible to convert the vector as input into a 1d numpy float array. "
f"Error was: \n {exc_}"
)
if tmp.shape[0] != self.n_line:
raise Grid2OpException(
"Attempt to set thermal limit on {} powerlines while there are {}"
"on the grid".format(tmp.shape[0], self.n_line)
)
if np.any(~np.isfinite(tmp)):
raise Grid2OpException(
"Impossible to use non finite value for thermal limits."
)
else:
raise Grid2OpException(
f"You can only set the thermal limits of the environment with a dictionary (in that "
f"case the keys are the line names, and the values the thermal limits) or with "
f"a numpy array that has as many components of the number of powerlines on "
f'the grid. You provided something with type "{type(thermal_limit)}" which '
f"is not supported."
)
self._thermal_limit_a[:] = tmp
self.backend.set_thermal_limit(self._thermal_limit_a)
self.observation_space.set_thermal_limit(self._thermal_limit_a)
def _reset_redispatching(self):
# redispatching
self._target_dispatch[:] = 0.0
self._already_modified_gen[:] = False
self._actual_dispatch[:] = 0.0
self._gen_uptime[:] = 0
self._gen_downtime[:] = 0
self._gen_activeprod_t[:] = 0.0
self._gen_activeprod_t_redisp[:] = 0.0
def _get_new_prod_setpoint(self, action):
"""
NB this is overidden in _ObsEnv where the data are read from the action to set this environment
instead
"""
# get the modification of generator active setpoint from the action
new_p = 1.0 * self._gen_activeprod_t
if "prod_p" in action._dict_inj:
tmp = action._dict_inj["prod_p"]
indx_ok = np.isfinite(tmp)
new_p[indx_ok] = tmp[indx_ok]
# modification of the environment always override the modification of the agents (if any)
# TODO have a flag there if this is the case.
if "prod_p" in self._env_modification._dict_inj:
# modification of the production setpoint value
tmp = self._env_modification._dict_inj["prod_p"]
indx_ok = np.isfinite(tmp)
new_p[indx_ok] = tmp[indx_ok]
return new_p
def _get_already_modified_gen(self, action):
redisp_act_orig = 1.0 * action._redispatch
self._target_dispatch[self._already_modified_gen] += redisp_act_orig[self._already_modified_gen]
first_modified = (~self._already_modified_gen) & (redisp_act_orig != 0)
self._target_dispatch[first_modified] = (
self._actual_dispatch[first_modified] + redisp_act_orig[first_modified]
)
self._already_modified_gen[redisp_act_orig != 0] = True
return self._already_modified_gen
def _prepare_redisp(self, action, new_p, already_modified_gen):
# trying with an optimization method
except_ = None
info_ = []
valid = True
# get the redispatching action (if any)
redisp_act_orig = 1.0 * action._redispatch
if (
np.all(redisp_act_orig == 0.0)
and np.all(self._target_dispatch == 0.0)
and np.all(self._actual_dispatch == 0.0)
):
return valid, except_, info_
# check that everything is consistent with pmin, pmax:
if np.any(self._target_dispatch > self.gen_pmax - self.gen_pmin):
# action is invalid, the target redispatching would be above pmax for at least a generator
cond_invalid = self._target_dispatch > self.gen_pmax - self.gen_pmin
except_ = InvalidRedispatching(
"You cannot ask for a dispatch higher than pmax - pmin [it would be always "
"invalid because, even if the sepoint is pmin, this dispatch would set it "
"to a number higher than pmax, which is impossible]. Invalid dispatch for "
"generator(s): "
"{}".format(np.where(cond_invalid)[0])
)
self._target_dispatch -= redisp_act_orig
return valid, except_, info_
if np.any(self._target_dispatch < self.gen_pmin - self.gen_pmax):
# action is invalid, the target redispatching would be below pmin for at least a generator
cond_invalid = self._target_dispatch < self.gen_pmin - self.gen_pmax
except_ = InvalidRedispatching(
"You cannot ask for a dispatch lower than pmin - pmax [it would be always "
"invalid because, even if the sepoint is pmax, this dispatch would set it "
"to a number bellow pmin, which is impossible]. Invalid dispatch for "
"generator(s): "
"{}".format(np.where(cond_invalid)[0])
)
self._target_dispatch -= redisp_act_orig
return valid, except_, info_
# i can't redispatch turned off generators [turned off generators need to be turned on before redispatching]
if np.any(redisp_act_orig[new_p == 0.0]) and self._forbid_dispatch_off:
# action is invalid, a generator has been redispatched, but it's turned off
except_ = InvalidRedispatching(
"Impossible to dispatch a turned off generator"
)
self._target_dispatch -= redisp_act_orig
return valid, except_, info_
if self._forbid_dispatch_off is True:
redisp_act_orig_cut = 1.0 * redisp_act_orig
redisp_act_orig_cut[new_p == 0.0] = 0.0
if np.any(redisp_act_orig_cut != redisp_act_orig):
info_.append(
{
"INFO: redispatching cut because generator will be turned_off": np.where(
redisp_act_orig_cut != redisp_act_orig
)[
0
]
}
)
return valid, except_, info_
def _make_redisp(self, already_modified_gen, new_p):
"""this computes the redispaching vector, taking into account the storage units"""
except_ = None
valid = True
mismatch = self._actual_dispatch - self._target_dispatch
mismatch = np.abs(mismatch)
if (
np.abs(np.sum(self._actual_dispatch)) >= self._tol_poly
or np.max(mismatch) >= self._tol_poly
or np.abs(self._amount_storage) >= self._tol_poly
or np.abs(self._sum_curtailment_mw) >= self._tol_poly
):
except_ = self._compute_dispatch_vect(already_modified_gen, new_p)
valid = except_ is None
return valid, except_
def _compute_dispatch_vect(self, already_modified_gen, new_p):
except_ = None
# first i define the participating generators
# these are the generators that will be adjusted for redispatching
gen_participating = (
(new_p > 0.0)
| (self._actual_dispatch != 0.0)
| (self._target_dispatch != self._actual_dispatch)
)
gen_participating[~self.gen_redispatchable] = False
incr_in_chronics = new_p - (
self._gen_activeprod_t_redisp - self._actual_dispatch
)
# check if the constraints are violated
## total available "juice" to go down (incl ramp and pmin / pmax)
p_min_down = (
self.gen_pmin[gen_participating]
- self._gen_activeprod_t_redisp[gen_participating]
)
avail_down = np.maximum(p_min_down, -self.gen_max_ramp_down[gen_participating])
## total available "juice" to go up (incl. ramp and pmin / pmax)
p_max_up = (
self.gen_pmax[gen_participating]
- self._gen_activeprod_t_redisp[gen_participating]
)
avail_up = np.minimum(p_max_up, self.gen_max_ramp_up[gen_participating])
except_ = self._detect_infeasible_dispatch(
incr_in_chronics[gen_participating], avail_down, avail_up
)
if except_ is not None:
# try to force the turn on of turned off generators (if parameters allow it)
if (
self._parameters.IGNORE_MIN_UP_DOWN_TIME
and self._parameters.ALLOW_DISPATCH_GEN_SWITCH_OFF
):
gen_participating_tmp = self.gen_redispatchable
p_min_down_tmp = (
self.gen_pmin[gen_participating_tmp]
- self._gen_activeprod_t_redisp[gen_participating_tmp]
)
avail_down_tmp = np.maximum(
p_min_down_tmp, -self.gen_max_ramp_down[gen_participating_tmp]
)
p_max_up_tmp = (
self.gen_pmax[gen_participating_tmp]
- self._gen_activeprod_t_redisp[gen_participating_tmp]
)
avail_up_tmp = np.minimum(
p_max_up_tmp, self.gen_max_ramp_up[gen_participating_tmp]
)
except_tmp = self._detect_infeasible_dispatch(
incr_in_chronics[gen_participating_tmp],
avail_down_tmp,
avail_up_tmp,
)
if except_tmp is None:
# I can "save" the situation by turning on all generators, I do it
# TODO logger here
gen_participating = gen_participating_tmp
except_ = None
else:
return except_tmp
else:
return except_
# define the objective value
target_vals = (
self._target_dispatch[gen_participating]
- self._actual_dispatch[gen_participating]
)
already_modified_gen_me = already_modified_gen[gen_participating]
target_vals_me = target_vals[already_modified_gen_me]
nb_dispatchable = np.sum(gen_participating)
tmp_zeros = np.zeros((1, nb_dispatchable), dtype=dt_float)
coeffs = 1.0 / (
self.gen_max_ramp_up + self.gen_max_ramp_down + self._epsilon_poly
)
weights = np.ones(nb_dispatchable) * coeffs[gen_participating]
weights /= weights.sum()
if target_vals_me.shape[0] == 0:
# no dispatch means all dispatchable, otherwise i will never get to 0
already_modified_gen_me[:] = True
target_vals_me = target_vals[already_modified_gen_me]
# for numeric stability
# to scale the input also:
# see https://stackoverflow.com/questions/11155721/positive-directional-derivative-for-linesearch
scale_x = max(np.max(np.abs(self._actual_dispatch)), 1.0)
scale_x = dt_float(scale_x)
target_vals_me_optim = 1.0 * (target_vals_me / scale_x)
target_vals_me_optim = target_vals_me_optim.astype(dt_float)
# see https://stackoverflow.com/questions/11155721/positive-directional-derivative-for-linesearch
# where they advised to scale the function
scale_objective = max(0.5 * np.sum(np.abs(target_vals_me_optim)) ** 2, 1.0)
scale_objective = np.round(scale_objective, decimals=4)
scale_objective = dt_float(scale_objective)
# add the "sum to 0"
mat_sum_0_no_turn_on = np.ones((1, nb_dispatchable), dtype=dt_float)
# this is where the storage is taken into account
# storages are "load convention" this means that i need to sum the amount of production to sum of storage
# hence the "+ self._amount_storage" below
# self._sum_curtailment_mw is "generator convention" hence the "-" there
const_sum_0_no_turn_on = (
np.zeros(1, dtype=dt_float)
+ self._amount_storage
- self._sum_curtailment_mw
)
# gen increase in the chronics
new_p_th = new_p[gen_participating] + self._actual_dispatch[gen_participating]
# minimum value available for disp
## first limit delta because of pmin
p_min_const = self.gen_pmin[gen_participating] - new_p_th
## second limit delta because of ramps
ramp_down_const = (
-self.gen_max_ramp_down[gen_participating]
- incr_in_chronics[gen_participating]
)
## take max of the 2
min_disp = np.maximum(p_min_const, ramp_down_const)
min_disp = min_disp.astype(dt_float)
# maximum value available for disp
## first limit delta because of pmin
p_max_const = self.gen_pmax[gen_participating] - new_p_th
## second limit delta because of ramps
ramp_up_const = (
self.gen_max_ramp_up[gen_participating]
- incr_in_chronics[gen_participating]
)
## take min of the 2
max_disp = np.minimum(p_max_const, ramp_up_const)
max_disp = max_disp.astype(dt_float)
# add everything into a linear constraint object
# equality
added = 0.5 * self._epsilon_poly
equality_const = LinearConstraint(
mat_sum_0_no_turn_on, # do the sum
(const_sum_0_no_turn_on) / scale_x, # lower bound
(const_sum_0_no_turn_on) / scale_x, # upper bound
)
mat_pmin_max_ramps = np.eye(nb_dispatchable)
ineq_const = LinearConstraint(
mat_pmin_max_ramps,
(min_disp - added) / scale_x,
(max_disp + added) / scale_x,
)
# choose a good initial point (close to the solution)
# the idea here is to chose a initial point that would be close to the
# desired solution (split the (sum of the) dispatch to the available generators)
x0 = np.zeros(np.sum(gen_participating))
if np.any(self._target_dispatch != 0.) or np.any(already_modified_gen):
gen_for_x0 = self._target_dispatch[gen_participating] != 0.
gen_for_x0 |= already_modified_gen[gen_participating]
x0[gen_for_x0] = (
self._target_dispatch[gen_participating][gen_for_x0]
- self._actual_dispatch[gen_participating][gen_for_x0]
) / scale_x
# at this point x0 is made of the difference between the target and the
# actual dispatch for all generators that have a
# target dispatch non 0.
# in this "if" block I set the other component of x0 to
# their "right" value
can_adjust = (x0 == 0.0)
if np.any(can_adjust):
init_sum = np.sum(x0)
denom_adjust = np.sum(1.0 / weights[can_adjust])
if denom_adjust <= 1e-2:
# i don't want to divide by something too cloose to 0.
denom_adjust = 1.0
x0[can_adjust] = -init_sum / (weights[can_adjust] * denom_adjust)
else:
# to "force" the exact reset to 0.0 for all components
x0 -= self._actual_dispatch[gen_participating] / scale_x
def target(actual_dispatchable):
# define my real objective
quad_ = (
actual_dispatchable[already_modified_gen_me] - target_vals_me_optim
) ** 2
coeffs_quads = weights[already_modified_gen_me] * quad_
coeffs_quads_const = coeffs_quads.sum()
coeffs_quads_const /= scale_objective # scaling the function
return coeffs_quads_const
def jac(actual_dispatchable):
res_jac = 1.0 * tmp_zeros
res_jac[0, already_modified_gen_me] = (
2.0
* weights[already_modified_gen_me]
* (actual_dispatchable[already_modified_gen_me] - target_vals_me_optim)
)
res_jac /= scale_objective # scaling the function
return res_jac
# objective function
def f(init):
this_res = minimize(
target,
init,
method="SLSQP",
constraints=[equality_const, ineq_const],
options={
"eps": max(self._epsilon_poly / scale_x, 1e-6),
"ftol": max(self._epsilon_poly / scale_x, 1e-6),
"disp": False,
},
jac=jac
# hess=hess # not used for SLSQP
)
return this_res
res = f(x0)
if res.success:
self._actual_dispatch[gen_participating] += res.x * scale_x
else:
# check if constraints are "approximately" met
mat_const = np.concatenate((mat_sum_0_no_turn_on, mat_pmin_max_ramps))
downs = np.concatenate(
(const_sum_0_no_turn_on / scale_x, (min_disp - added) / scale_x)
)
ups = np.concatenate(
(const_sum_0_no_turn_on / scale_x, (max_disp + added) / scale_x)
)
vals = np.matmul(mat_const, res.x)
ok_down = np.all(
vals - downs >= -self._tol_poly
) # i don't violate "down" constraints
ok_up = np.all(vals - ups <= self._tol_poly)
if ok_up and ok_down:
# it's ok i can tolerate "small" perturbations
self._actual_dispatch[gen_participating] += res.x * scale_x
else:
# TODO try with another method here, maybe
error_dispatch = (
"Redispatching automaton terminated with error (no more information available "
'at this point):\n"{}"'.format(res.message)
)
except_ = InvalidRedispatching(error_dispatch)
return except_
def _detect_infeasible_dispatch(self, incr_in_chronics, avail_down, avail_up):
"""This function is an attempt to give more detailed log by detecting infeasible dispatch"""
except_ = None
sum_move = (
np.sum(incr_in_chronics) + self._amount_storage - self._sum_curtailment_mw
)
avail_down_sum = np.sum(avail_down)
avail_up_sum = np.sum(avail_up)
gen_setpoint = self._gen_activeprod_t_redisp[self.gen_redispatchable]
if sum_move > avail_up_sum:
# infeasible because too much is asked
msg = DETAILED_REDISP_ERR_MSG.format(
sum_move=sum_move,
avail_up_sum=avail_up_sum,
gen_setpoint=np.round(gen_setpoint, decimals=2),
ramp_up=self.gen_max_ramp_up[self.gen_redispatchable],
gen_pmax=self.gen_pmax[self.gen_redispatchable],
avail_up=np.round(avail_up, decimals=2),
increase="increase",
decrease="decrease",
maximum="maximum",
pmax="pmax",
max_ramp_up="max_ramp_up",
)
except_ = InvalidRedispatching(msg)
elif sum_move < avail_down_sum:
# infeasible because not enough is asked
msg = DETAILED_REDISP_ERR_MSG.format(
sum_move=sum_move,
avail_up_sum=avail_down_sum,
gen_setpoint=np.round(gen_setpoint, decimals=2),
ramp_up=self.gen_max_ramp_down[self.gen_redispatchable],
gen_pmax=self.gen_pmin[self.gen_redispatchable],
avail_up=np.round(avail_up, decimals=2),
increase="decrease",
decrease="increase",
maximum="minimum",
pmax="pmin",
max_ramp_up="max_ramp_down",
)
except_ = InvalidRedispatching(msg)
return except_
def _update_actions(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Retrieve the actions to perform the update of the underlying powergrid represented by
the :class:`grid2op.Backend`in the next time step.
A call to this function will also read the next state of :attr:`chronics_handler`, so it must be called only
once per time step.
Returns
--------
res: :class:`grid2op.Action.Action`
The action representing the modification of the powergrid induced by the Backend.
"""
(
timestamp,
tmp,
maintenance_time,
maintenance_duration,
hazard_duration,
prod_v,
) = self.chronics_handler.next_time_step()
if "injection" in tmp:
self._injection = tmp["injection"]
else:
self._injection = None
if "maintenance" in tmp:
self._maintenance = tmp["maintenance"]
else:
self._maintenance = None
if "hazards" in tmp:
self._hazards = tmp["hazards"]
else:
self._hazards = None
self.time_stamp = timestamp
self._duration_next_maintenance = maintenance_duration
self._time_next_maintenance = maintenance_time
self._hazard_duration = hazard_duration
act = self._helper_action_env(
{
"injection": self._injection,
"maintenance": self._maintenance,
"hazards": self._hazards,
}
)
return act, prod_v
def _update_time_reconnection_hazards_maintenance(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This supposes that :attr:`Environment.times_before_line_status_actionable` is already updated
with the cascading failure, soft overflow and hard overflow.
It also supposes that :func:`Environment._update_actions` has been called, so that the vectors
:attr:`Environment.duration_next_maintenance`, :attr:`Environment._time_next_maintenance` and
:attr:`Environment._hazard_duration` are updated with the most recent values.
Finally the Environment supposes that this method is called before calling :func:`Environment.get_obs`
This function integrates the hazards and maintenance in the
:attr:`Environment.times_before_line_status_actionable` vector.
For example, if a powerline `i` has no problem
of overflow, but is affected by a hazard, :attr:`Environment.times_before_line_status_actionable`
should be updated with the duration of this hazard (stored in one of the three vector mentionned in the
above paragraph)
For this Environment, we suppose that the maximum of the 3 values are taken into account. The reality would
be more complicated.
"""
first_time_maintenance = self._time_next_maintenance == 0
self._times_before_line_status_actionable[first_time_maintenance] = np.maximum(
self._times_before_line_status_actionable[first_time_maintenance],
self._duration_next_maintenance[first_time_maintenance],
)
self._times_before_line_status_actionable[:] = np.maximum(
self._times_before_line_status_actionable, self._hazard_duration
)
def _voltage_control(self, agent_action, prod_v_chronics):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Update the environment action "action_env" given a possibly new voltage setpoint for the generators. This
function can be overide for a more complex handling of the voltages.
It must update (if needed) the voltages of the environment action :attr:`BaseEnv.env_modification`
Parameters
----------
agent_action: :class:`grid2op.Action.Action`
The action performed by the player (or do nothing is player action were not legal or ambiguous)
prod_v_chronics: ``numpy.ndarray`` or ``None``
The voltages that has been specified in the chronics
"""
res = self._helper_action_env()
if prod_v_chronics is not None:
res.update({"injection": {"prod_v": prod_v_chronics}})
return res
def _handle_updown_times(self, gen_up_before, redisp_act):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Handles the up and down tims for the generators.
"""
# get the generators that are not connected after the action
except_ = None
# computes which generator will be turned on after the action
gen_up_after = 1.0 * self._gen_activeprod_t
if "prod_p" in self._env_modification._dict_inj:
tmp = self._env_modification._dict_inj["prod_p"]
indx_ok = np.isfinite(tmp)
gen_up_after[indx_ok] = self._env_modification._dict_inj["prod_p"][indx_ok]
gen_up_after += redisp_act
gen_up_after = gen_up_after > 0.0
# update min down time, min up time etc.
gen_disconnected_this = gen_up_before & (~gen_up_after)
gen_connected_this_timestep = (~gen_up_before) & (gen_up_after)
gen_still_connected = gen_up_before & gen_up_after
gen_still_disconnected = (~gen_up_before) & (~gen_up_after)
if (
np.any(
self._gen_downtime[gen_connected_this_timestep]
< self.gen_min_downtime[gen_connected_this_timestep]
)
and not self._ignore_min_up_down_times
):
# i reconnected a generator before the minimum time allowed
id_gen = (
self._gen_downtime[gen_connected_this_timestep]
< self.gen_min_downtime[gen_connected_this_timestep]
)
id_gen = np.where(id_gen)[0]
id_gen = np.where(gen_connected_this_timestep[id_gen])[0]
except_ = GeneratorTurnedOnTooSoon(
"Some generator has been connected too early ({})".format(id_gen)
)
return except_
else:
self._gen_downtime[gen_connected_this_timestep] = -1
self._gen_uptime[gen_connected_this_timestep] = 1
if (
np.any(
self._gen_uptime[gen_disconnected_this]
< self.gen_min_uptime[gen_disconnected_this]
)
and not self._ignore_min_up_down_times
):
# i disconnected a generator before the minimum time allowed
id_gen = (
self._gen_uptime[gen_disconnected_this]
< self.gen_min_uptime[gen_disconnected_this]
)
id_gen = np.where(id_gen)[0]
id_gen = np.where(gen_connected_this_timestep[id_gen])[0]
except_ = GeneratorTurnedOffTooSoon(
"Some generator has been disconnected too early ({})".format(id_gen)
)
return except_
else:
self._gen_downtime[gen_connected_this_timestep] = 0
self._gen_uptime[gen_connected_this_timestep] = 1
self._gen_uptime[gen_still_connected] += 1
self._gen_downtime[gen_still_disconnected] += 1
return except_
def get_obs(self, _update_state=True):
"""
Return the observations of the current environment made by the :class:`grid2op.Agent.BaseAgent`.
.. note::
This function is called twice when the env is reset, otherwise once per step
Returns
-------
res: :class:`grid2op.Observation.BaseObservation`
The current observation usually given to the :class:`grid2op.Agent.BaseAgent` / bot / controler.
Examples
---------
This function can be use at any moment, even if the actual observation is not present.
.. code-block:: python
import grid2op
# I create an environment
env = grid2op.make()
obs = env.reset()
# have a big piece of code
obs2 = env.get_obs()
# obs2 and obs are identical.
"""
if self.__closed:
raise EnvError("This environment is closed. You cannot use it anymore.")
if not self.__is_init:
raise EnvError(
"This environment is not initialized. You cannot retrieve its observation. "
"Have you called `env.reset()` after last game over ?"
)
if self._last_obs is None:
self._last_obs = self._observation_space(
env=self, _update_state=_update_state
)
return self._last_obs.copy()
def get_thermal_limit(self):
"""
Get the current thermal limit in amps registered for the environment.
Examples
---------
It can be used like this:
.. code-block:: python
import grid2op
# I create an environment
env = grid2op.make()
thermal_limits = env.get_thermal_limit()
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
if not self.__is_init:
raise EnvError(
"This environment is not initialized. It has no thermal limits. "
"Have you called `env.reset()` after last game over ?"
)
return 1.0 * self._thermal_limit_a
def _withdraw_storage_losses(self):
"""
empty the energy in the storage units depending on the `storage_loss`
NB this is a loss, this is not seen grid side, so `storage_discharging_efficiency` has no impact on this
"""
# NB this should be done AFTER the computation of self._amount_storage, because this energy is dissipated
# in the storage units, thus NOT seen as power from the grid.
if self._parameters.ACTIVATE_STORAGE_LOSS:
tmp_ = self.storage_loss * self.delta_time_seconds / 3600.0
self._storage_current_charge -= tmp_
# charge cannot be negative, but it can be below Emin if there are some uncompensated losses
self._storage_current_charge[:] = np.maximum(
self._storage_current_charge, 0.0
)
def _aux_remove_power_too_high(self, delta_, indx_too_high):
"""
delta_ is given in energy (and NOT power)
handles self._storage_power in
case we need to cut the storage action because the power would be too high
"""
coeff_p_to_E = (
self.delta_time_seconds / 3600.0
) # TODO optim this is const for all time steps
tmp_ = 1.0 / coeff_p_to_E * delta_
if self._parameters.ACTIVATE_STORAGE_LOSS:
# from the storage i need to reduce of tmp_ MW (to compensate the delta_ MWh)
# but when it's "transfer" to the grid i don't have the same amount (due to inefficiencies)
# it's a "/" because i need more energy from the grid than what the actual charge will be
tmp_ /= self.storage_charging_efficiency[indx_too_high]
self._storage_power[indx_too_high] -= tmp_
def _aux_remove_power_too_low(self, delta_, indx_too_low):
"""
delta_ is given in energy (and NOT power)
handles self._storage_power in
case we need to cut the storage action because the power would be too low
"""
coeff_p_to_E = (
self.delta_time_seconds / 3600.0
) # TODO optim this is const for all time steps
tmp_ = 1.0 / coeff_p_to_E * delta_
if self._parameters.ACTIVATE_STORAGE_LOSS:
# from the storage i need to increase of tmp_ MW (to compensate the delta_ MWh)
# but when it's "transfer" to the grid i don't have the same amount (due to inefficiencies)
# it's a "*" because i have less power on the grid than what is removed from the battery
tmp_ *= self.storage_discharging_efficiency[indx_too_low]
self._storage_power[indx_too_low] -= tmp_
def _compute_storage(self, action_storage_power):
self._storage_previous_charge[:] = self._storage_current_charge
storage_act = np.isfinite(action_storage_power) & (action_storage_power != 0.0)
self._action_storage[:] = 0.0
self._storage_power[:] = 0.0
modif = False
coeff_p_to_E = (
self.delta_time_seconds / 3600.0
) # TODO optim this is const for all time steps
if np.any(storage_act):
modif = True
this_act_stor = action_storage_power[storage_act]
eff_ = np.ones(np.sum(storage_act))
if self._parameters.ACTIVATE_STORAGE_LOSS:
fill_storage = (
this_act_stor > 0.0
) # index of storages that sees their charge increasing
unfill_storage = (
this_act_stor < 0.0
) # index of storages that sees their charge decreasing
eff_[fill_storage] *= self.storage_charging_efficiency[storage_act][
fill_storage
]
eff_[unfill_storage] /= self.storage_discharging_efficiency[
storage_act
][unfill_storage]
self._storage_current_charge[storage_act] += (
this_act_stor * coeff_p_to_E * eff_
)
self._action_storage[storage_act] += action_storage_power[storage_act]
self._storage_power[storage_act] = this_act_stor
if modif:
# indx when there is too much energy on the battery
indx_too_high = self._storage_current_charge > self.storage_Emax
if np.any(indx_too_high):
delta_ = (
self._storage_current_charge[indx_too_high]
- self.storage_Emax[indx_too_high]
)
self._aux_remove_power_too_high(delta_, indx_too_high)
self._storage_current_charge[indx_too_high] = self.storage_Emax[
indx_too_high
]
# indx when there is not enough energy on the battery
indx_too_low = self._storage_current_charge < self.storage_Emin
if np.any(indx_too_low):
delta_ = (
self._storage_current_charge[indx_too_low]
- self.storage_Emin[indx_too_low]
)
self._aux_remove_power_too_low(delta_, indx_too_low)
self._storage_current_charge[indx_too_low] = self.storage_Emin[
indx_too_low
]
self._storage_current_charge[:] = np.maximum(
self._storage_current_charge, self.storage_Emin
)
# storage is "load convention", dispatch is "generator convention"
# i need the generator to have the same sign as the action on the batteries
self._amount_storage = np.sum(self._storage_power)
else:
# battery effect should be removed, so i multiply it by -1.
self._amount_storage = 0.0
tmp = self._amount_storage
self._amount_storage -= self._amount_storage_prev
self._amount_storage_prev = tmp
# dissipated energy, it's not seen on the grid, just lost in the storage unit.
# this is why it should not be taken into account in self._amount_storage
# and NOT absorbed by the generators either
# NB loss in the storage unit can make it got below Emin in energy, but never below 0.
self._withdraw_storage_losses()
# end storage
def _compute_max_ramp_this_step(self, new_p):
"""
compute the total "power" i can add or remove this step that takes into account
generators ramps and Pmin / Pmax
new_p: array of the (temporary) new production in the chronics that should happen
"""
# TODO
# maximum value it can take
th_max = np.minimum(
self._gen_activeprod_t_redisp[self.gen_redispatchable]
+ self.gen_max_ramp_up[self.gen_redispatchable],
self.gen_pmax[self.gen_redispatchable],
)
# minimum value it can take
th_min = np.maximum(
self._gen_activeprod_t_redisp[self.gen_redispatchable]
- self.gen_max_ramp_down[self.gen_redispatchable],
self.gen_pmin[self.gen_redispatchable],
)
max_total_up = np.sum(th_max - new_p[self.gen_redispatchable])
max_total_down = np.sum(
th_min - new_p[self.gen_redispatchable]
) # TODO is that it ?
return max_total_down, max_total_up
def _aux_update_curtail_env_act(self, new_p):
if "prod_p" in self._env_modification._dict_inj:
self._env_modification._dict_inj["prod_p"][:] = new_p
else:
self._env_modification._dict_inj["prod_p"] = 1.0 * new_p
self._env_modification._modif_inj = True
def _aux_update_curtailment_act(self, action):
curtailment_act = 1.0 * action._curtail
ind_curtailed_in_act = (curtailment_act != -1.0) & self.gen_renewable
self._limit_curtailment_prev[:] = self._limit_curtailment
self._limit_curtailment[ind_curtailed_in_act] = curtailment_act[
ind_curtailed_in_act
]
def _aux_compute_new_p_curtailment(self, new_p, curtailment_vect):
"""modifies the new_p argument !!!!"""
gen_curtailed = (
curtailment_vect != 1.0
) # curtailed either right now, or in a previous action
max_action = self.gen_pmax[gen_curtailed] * curtailment_vect[gen_curtailed]
new_p[gen_curtailed] = np.minimum(max_action, new_p[gen_curtailed])
return gen_curtailed
def _aux_handle_curtailment_without_limit(self, action, new_p):
"""modifies the new_p argument !!!! (but not the action)"""
if self.redispatching_unit_commitment_availble and (
action._modif_curtailment or np.any(self._limit_curtailment != 1.0)
):
self._aux_update_curtailment_act(action)
gen_curtailed = self._aux_compute_new_p_curtailment(
new_p, self._limit_curtailment
)
tmp_sum_curtailment_mw = dt_float(
np.sum(new_p[gen_curtailed])
- np.sum(self._gen_before_curtailment[gen_curtailed])
)
self._sum_curtailment_mw = (
tmp_sum_curtailment_mw - self._sum_curtailment_mw_prev
)
self._sum_curtailment_mw_prev = tmp_sum_curtailment_mw
self._aux_update_curtail_env_act(new_p)
else:
self._sum_curtailment_mw = -self._sum_curtailment_mw_prev
self._sum_curtailment_mw_prev = dt_float(0.0)
gen_curtailed = self._limit_curtailment != 1.0
return gen_curtailed
def _aux_readjust_curtailment_after_limiting(
self, total_curtailment, new_p_th, new_p
):
self._sum_curtailment_mw += total_curtailment
self._sum_curtailment_mw_prev += total_curtailment
if total_curtailment > self._tol_poly:
# in this case, the curtailment is too strong, I need to make it less strong
curtailed = new_p_th - new_p
else:
# in this case, the curtailment is too low, this can happen, for example when there is a
# "strong" curtailment but afterwards you ask to set everything to 1. (so no curtailment)
# I cannot reuse the previous case (too_much > self._tol_poly) because the
# curtailment is already computed there...
new_p_with_previous_curtailment = 1.0 * new_p_th
self._aux_compute_new_p_curtailment(
new_p_with_previous_curtailment, self._limit_curtailment_prev
)
curtailed = new_p_th - new_p_with_previous_curtailment
curt_sum = curtailed.sum()
if abs(curt_sum) > self._tol_poly:
curtailed[~self.gen_renewable] = 0.0
curtailed *= total_curtailment / curt_sum
new_p[self.gen_renewable] += curtailed[self.gen_renewable]
def _aux_readjust_storage_after_limiting(self, total_storage):
new_act_storage = 1.0 * self._storage_power
sum_this_step = new_act_storage.sum()
if abs(total_storage) < abs(sum_this_step):
# i can modify the current action
modif_storage = new_act_storage * total_storage / sum_this_step
else:
# i need to retrieve what I did in a previous action
# because the current action is not enough (the previous actions
# cause a problem right now)
new_act_storage = 1.0 * self._storage_power_prev
sum_this_step = new_act_storage.sum()
if abs(sum_this_step) > 1e-1:
modif_storage = new_act_storage * total_storage / sum_this_step
else:
# TODO: this is not cover by any test :-(
# it happens when you do an action too strong, then a do nothing,
# then you decrease the limit to rapidly
# (game over would jappen after at least one do nothing)
# In this case I reset it completely or do I ? I don't really
# know what to do !
modif_storage = new_act_storage # or self._storage_power ???
# handle self._storage_power and self._storage_current_charge
coeff_p_to_E = (
self.delta_time_seconds / 3600.0
) # TODO optim this is const for all time steps
self._storage_power -= modif_storage
# now compute the state of charge of the storage units (with efficiencies)
is_discharging = self._storage_power < 0.0
is_charging = self._storage_power > 0.0
modif_storage[is_discharging] /= type(self).storage_discharging_efficiency[
is_discharging
]
modif_storage[is_charging] *= type(self).storage_charging_efficiency[
is_charging
]
self._storage_current_charge -= coeff_p_to_E * modif_storage
# inform the grid that the storage is reduced
self._amount_storage -= total_storage
self._amount_storage_prev -= total_storage
def _aux_limit_curtail_storage_if_needed(self, new_p, new_p_th, gen_curtailed):
gen_redisp = self.gen_redispatchable
normal_increase = new_p - (
self._gen_activeprod_t_redisp - self._actual_dispatch
)
normal_increase = normal_increase[gen_redisp]
p_min_down = (
self.gen_pmin[gen_redisp] - self._gen_activeprod_t_redisp[gen_redisp]
)
avail_down = np.maximum(p_min_down, -self.gen_max_ramp_down[gen_redisp])
p_max_up = self.gen_pmax[gen_redisp] - self._gen_activeprod_t_redisp[gen_redisp]
avail_up = np.minimum(p_max_up, self.gen_max_ramp_up[gen_redisp])
sum_move = (
np.sum(normal_increase) + self._amount_storage - self._sum_curtailment_mw
)
total_storage_curtail = self._amount_storage - self._sum_curtailment_mw
update_env_act = False
if abs(total_storage_curtail) >= self._tol_poly:
# if there is an impact on the curtailment / storage (otherwise I cannot fix anything)
too_much = 0.0
if sum_move > np.sum(avail_up):
# I need to limit curtailment (not enough ramps up available)
too_much = dt_float(sum_move - np.sum(avail_up) + self._tol_poly)
self._limited_before = too_much
elif sum_move < np.sum(avail_down):
# I need to limit storage unit (not enough ramps down available)
too_much = dt_float(sum_move - np.sum(avail_down) - self._tol_poly)
self._limited_before = too_much
elif np.abs(self._limited_before) >= self._tol_poly:
# adjust the "mess" I did before by not curtailing enough
# max_action = self.gen_pmax[gen_curtailed] * self._limit_curtailment[gen_curtailed]
update_env_act = True
too_much = min(np.sum(avail_up) - self._tol_poly, self._limited_before)
self._limited_before -= too_much
too_much = self._limited_before
if abs(too_much) > self._tol_poly:
total_curtailment = (
-self._sum_curtailment_mw / total_storage_curtail * too_much
)
total_storage = (
self._amount_storage / total_storage_curtail * too_much
) # TODO !!!
update_env_act = True
# TODO "log" the total_curtailment and total_storage somewhere (in the info part of the step function)
if np.sign(total_curtailment) != np.sign(total_storage):
# curtailment goes up, storage down, i only "limit" the one that
# has the same sign as too much
total_curtailment = (
too_much
if np.sign(total_curtailment) == np.sign(too_much)
else 0.0
)
total_storage = (
too_much if np.sign(total_storage) == np.sign(too_much) else 0.0
)
# NB i can directly assign all the "curtailment" to the maximum because in this case, too_much will
# necessarily be > than total_curtail (or total_storage) because the other
# one is of opposite sign
# fix curtailment
self._aux_readjust_curtailment_after_limiting(
total_curtailment, new_p_th, new_p
)
# fix storage
self._aux_readjust_storage_after_limiting(total_storage)
if update_env_act:
self._aux_update_curtail_env_act(new_p)
def _aux_handle_act_inj(self, action: BaseAction):
for inj_key in ["load_p", "prod_p", "load_q"]:
# modification of the injections in the action, this erases the actions in the environment
if inj_key in action._dict_inj:
if inj_key in self._env_modification._dict_inj:
this_p_load = 1.0 * self._env_modification._dict_inj[inj_key]
act_modif = action._dict_inj[inj_key]
this_p_load[np.isfinite(act_modif)] = act_modif[
np.isfinite(act_modif)
]
self._env_modification._dict_inj[inj_key][:] = this_p_load
else:
self._env_modification._dict_inj[inj_key] = (
1.0 * action._dict_inj[inj_key]
)
self._env_modification._modif_inj = True
def _aux_handle_attack(self, action: BaseAction):
# TODO code the opponent part here and split more the timings! here "opponent time" is
# TODO included in time_apply_act
lines_attacked, subs_attacked = None, None
attack, attack_duration = self._oppSpace.attack(
observation=self.current_obs,
agent_action=action,
env_action=self._env_modification,
)
if attack is not None:
# the opponent choose to attack
# i update the "cooldown" on these things
lines_attacked, subs_attacked = attack.get_topological_impact()
self._times_before_line_status_actionable[lines_attacked] = np.maximum(
attack_duration,
self._times_before_line_status_actionable[lines_attacked],
)
self._times_before_topology_actionable[subs_attacked] = np.maximum(
attack_duration, self._times_before_topology_actionable[subs_attacked]
)
self._backend_action += attack
return lines_attacked, subs_attacked, attack_duration
def _aux_apply_redisp(self, action, new_p, new_p_th, gen_curtailed, except_):
is_illegal_redisp = False
is_done = False
is_illegal_reco = False
# remember generator that were "up" before the action
gen_up_before = self._gen_activeprod_t > 0.0
# compute the redispatching and the new productions active setpoint
already_modified_gen = self._get_already_modified_gen(action)
valid_disp, except_tmp, info_ = self._prepare_redisp(
action, new_p, already_modified_gen
)
if except_tmp is not None:
orig_action = action
action = self._action_space({})
if type(self).dim_alerts:
action.raise_alert = orig_action.raise_alert
is_illegal_redisp = True
except_.append(except_tmp)
if self.n_storage > 0:
# TODO curtailment: cancel it here too !
self._storage_current_charge[:] = self._storage_previous_charge
self._amount_storage -= self._amount_storage_prev
# dissipated energy, it's not seen on the grid, just lost in the storage unit.
# this is why it should not be taken into account in self._amount_storage
# and NOT absorbed by the generators either
self._withdraw_storage_losses()
# end storage
# fix redispatching for curtailment storage
if (
self.redispatching_unit_commitment_availble
and self._parameters.LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION
):
# limit the curtailment / storage in case of infeasible redispatching
self._aux_limit_curtail_storage_if_needed(new_p, new_p_th, gen_curtailed)
self._storage_power_prev[:] = self._storage_power
# case where the action modifies load (TODO maybe make a different env for that...)
self._aux_handle_act_inj(action)
valid_disp, except_tmp = self._make_redisp(already_modified_gen, new_p)
if not valid_disp or except_tmp is not None:
# game over case (divergence of the scipy routine to compute redispatching)
res_action = self._action_space({})
if type(self).dim_alerts:
res_action.raise_alert = action.raise_alert
is_illegal_redisp = True
except_.append(except_tmp)
is_done = True
except_.append(
InvalidRedispatching(
"Game over due to infeasible redispatching state. "
'The routine used to compute the "next state" has diverged. '
"This means that there is no way to compute a physically valid generator state "
"(one that meets all pmin / pmax - ramp min / ramp max with the information "
"provided. As one of the physical constraints would be violated, this means that "
"a generator would be damaged in real life. This is a game over."
)
)
return res_action, is_illegal_redisp, is_illegal_reco, is_done
# check the validity of min downtime and max uptime
except_tmp = self._handle_updown_times(gen_up_before, self._actual_dispatch)
if except_tmp is not None:
is_illegal_reco = True
res_action = self._action_space({})
if type(self).dim_alerts:
res_action.raise_alert = action.raise_alert
except_.append(except_tmp)
else:
res_action = action
return res_action, is_illegal_redisp, is_illegal_reco, is_done
def _aux_update_backend_action(self, action, action_storage_power, init_disp):
# make sure the dispatching action is not implemented "as is" by the backend.
# the environment must make sure it's a zero-sum action.
# same kind of limit for the storage
action._redispatch[:] = 0.0
action._storage_power[:] = self._storage_power
self._backend_action += action
action._storage_power[:] = action_storage_power
action._redispatch[:] = init_disp
# TODO storage: check the original action, even when replaced by do nothing is not modified
self._backend_action += self._env_modification
self._backend_action.set_redispatch(self._actual_dispatch)
def _update_alert_properties(self, action, lines_attacked, subs_attacked):
# update the environment with the alert information from the
# action (if env supports it)
if type(self).dim_alerts == 0:
return
self._last_alert[:] = action.raise_alert
self._time_since_last_alert[~self._last_alert & (self._time_since_last_alert != -1)] += 1
self._time_since_last_alert[self._last_alert] = 0
self._alert_duration[self._last_alert] += 1
self._alert_duration[~self._last_alert] = 0
self._total_number_of_alert += self._last_alert.sum()
if lines_attacked is not None:
lines_attacked_al = lines_attacked[type(self).alertable_line_ids]
mask_first_ts_attack = lines_attacked_al & (~self._is_already_attacked)
self._time_since_last_attack[mask_first_ts_attack] = 0
self._time_since_last_attack[~mask_first_ts_attack & (self._time_since_last_attack != -1)] += 1
# update the time already attacked
self._is_already_attacked[lines_attacked_al] = False
self._is_already_attacked[lines_attacked_al] = True
else:
self._time_since_last_attack[self._time_since_last_attack != -1] += 1
self._is_already_attacked[:] = False
mask_new_attack = self._time_since_last_attack == 0
self._attack_under_alert[mask_new_attack] = 2 * self._last_alert[mask_new_attack] - 1
mask_attack_too_old = self._time_since_last_attack > self._parameters.ALERT_TIME_WINDOW
self._attack_under_alert[mask_attack_too_old] = 0
# TODO more complicated (will do it in update_after_reward)
# self._was_alert_used_after_attack[:] = XXX
# TODO after alert budget will be implemented !
# self._is_alert_illegal
def _aux_register_env_converged(self, disc_lines, action, init_line_status, new_p):
beg_res = time.perf_counter()
self.backend.update_thermal_limit(
self
) # update the thermal limit, for DLR for example
overflow_lines = self.backend.get_line_overflow()
# save the current topology as "last" topology (for connected powerlines)
# and update the state of the disconnected powerline due to cascading failure
self._backend_action.update_state(disc_lines)
# one timestep passed, i can maybe reconnect some lines
self._times_before_line_status_actionable[
self._times_before_line_status_actionable > 0
] -= 1
# update the vector for lines that have been disconnected
self._times_before_line_status_actionable[disc_lines >= 0] = int(
self._nb_ts_reco
)
self._update_time_reconnection_hazards_maintenance()
# for the powerline that are on overflow, increase this time step
self._timestep_overflow[overflow_lines] += 1
# set to 0 the number of timestep for lines that are not on overflow
self._timestep_overflow[~overflow_lines] = 0
# build the topological action "cooldown"
aff_lines, aff_subs = action.get_topological_impact(init_line_status)
if self._max_timestep_line_status_deactivated > 0:
# i update the cooldown only when this does not impact the line disconnected for the
# opponent or by maintenance for example
cond = aff_lines # powerlines i modified
# powerlines that are not affected by any other "forced disconnection"
cond &= (
self._times_before_line_status_actionable
< self._max_timestep_line_status_deactivated
)
self._times_before_line_status_actionable[
cond
] = self._max_timestep_line_status_deactivated
if self._max_timestep_topology_deactivated > 0:
self._times_before_topology_actionable[
self._times_before_topology_actionable > 0
] -= 1
self._times_before_topology_actionable[
aff_subs
] = self._max_timestep_topology_deactivated
# extract production active value at this time step (should be independent of action class)
self._gen_activeprod_t[:], *_ = self.backend.generators_info()
# problem with the gen_activeprod_t above, is that the slack bus absorbs alone all the losses
# of the system. So basically, when it's too high (higher than the ramp) it can
# mess up the rest of the environment
self._gen_activeprod_t_redisp[:] = new_p + self._actual_dispatch
# set the line status
self._line_status[:] = copy.deepcopy(self.backend.get_line_status())
# finally, build the observation (it's a different one at each step, we cannot reuse the same one)
# THIS SHOULD BE DONE AFTER EVERYTHING IS INITIALIZED !
self.current_obs = self.get_obs()
# TODO storage: get back the result of the storage ! with the illegal action when a storage unit
# TODO is non zero and disconnected, this should be ok.
self._time_extract_obs += time.perf_counter() - beg_res
def _aux_run_pf_after_state_properly_set(
self, action, init_line_status, new_p, except_
):
has_error = True
try:
# compute the next _grid state
beg_pf = time.perf_counter()
disc_lines, detailed_info, conv_ = self.backend.next_grid_state(
env=self, is_dc=self._env_dc
)
self._disc_lines[:] = disc_lines
self._time_powerflow += time.perf_counter() - beg_pf
if conv_ is None:
# everything went well, so i register what is needed
self._aux_register_env_converged(
disc_lines, action, init_line_status, new_p
)
has_error = False
else:
except_.append(conv_)
except Grid2OpException as exc_:
except_.append(exc_)
if self.logger is not None:
self.logger.error(
'Impossible to compute next grid state with error "{}"'.format(exc_)
)
return detailed_info, has_error
def step(self, action: BaseAction) -> Tuple[BaseObservation, float, bool, dict]:
"""
Run one timestep of the environment's dynamics. When end of
episode is reached, you are responsible for calling `reset()`
to reset this environment's state.
Accepts an action and returns a tuple (observation, reward, done, info).
If the :class:`grid2op.BaseAction.BaseAction` is illegal or ambiguous, the step is performed, but the action is
replaced with a "do nothing" action.
Parameters
----------
action: :class:`grid2op.Action.Action`
an action provided by the agent that is applied on the underlying through the backend.
Returns
-------
observation: :class:`grid2op.Observation.Observation`
agent's observation of the current environment
reward: ``float``
amount of reward returned after previous action
done: ``bool``
whether the episode has ended, in which case further step() calls will return undefined results
info: ``dict``
contains auxiliary diagnostic information (helpful for debugging, and sometimes learning). It is a
dictionary with keys:
- "disc_lines": a numpy array (or ``None``) saying, for each powerline if it has been disconnected
due to overflow (if not disconnected it will be -1, otherwise it will be a
positive integer: 0 meaning that is one of the cause of the cascading failure, 1 means
that it is disconnected just after, 2 that it's disconnected just after etc.)
- "is_illegal" (``bool``) whether the action given as input was illegal
- "is_ambiguous" (``bool``) whether the action given as input was ambiguous.
- "is_dispatching_illegal" (``bool``) was the action illegal due to redispatching
- "is_illegal_reco" (``bool``) was the action illegal due to a powerline reconnection
- "reason_alarm_illegal" (``None`` or ``Exception``) reason for which the alarm is illegal
(it's None if no alarm are raised or if the alarm feature is not used)
- "reason_alert_illegal" (``None`` or ``Exception``) reason for which the alert is illegal
(it's None if no alert are raised or if the alert feature is not used)
- "opponent_attack_line" (``np.ndarray``, ``bool``) for each powerline, say if the opponent
attacked it (``True``) or not (``False``).
- "opponent_attack_sub" (``np.ndarray``, ``bool``) for each substation, say if the opponent
attacked it (``True``) or not (``False``).
- "opponent_attack_duration" (``int``) the duration of the current attack (if any)
- "exception" (``list`` of :class:`Exceptions.Exceptions.Grid2OpException` if an exception was
raised or ``[]`` if everything was fine.)
- "detailed_infos_for_cascading_failures" (optional, only if the backend has been create with
`detailed_infos_for_cascading_failures=True`) the list of the intermediate steps computed during
the simulation of the "cascading failures".
- "rewards": dictionary of all "other_rewards" provided when the env was built.
Examples
---------
As any openAI gym environment, this is used like:
.. code-block:: python
import grid2op
from grid2op.Agent import RandomAgent
# I create an environment
env = grid2op.make()
# define an agent here, this is an example
agent = RandomAgent(env.action_space)
# environment need to be "reset" before usage:
obs = env.reset()
reward = env.reward_range[0]
done = False
# now run through each steps like this
while not done:
action = agent.act(obs, reward, done)
obs, reward, done, info = env.step(action)
Notes
-----
If the flag `done=True` is raised (*ie* this is the end of the episode) then the observation is NOT properly
updated and should not be used at all.
Actually, it will be in a "game over" state (see :class:`grid2op.Observation.BaseObservation.set_game_over`).
"""
if self.__closed:
raise EnvError("This environment is closed. You cannot use it anymore.")
if not self.__is_init:
raise Grid2OpException(
"Impossible to make a step with a non initialized backend. Have you called "
'"env.reset()" after the last game over ?'
)
# I did something after calling "env.seed()" which is
# somehow "env.step()" or "env.reset()"
self._has_just_been_seeded = False
has_error = True
is_done = False
is_illegal = False
is_ambiguous = False
is_illegal_redisp = False
is_illegal_reco = False
reason_alarm_illegal = None
self._is_alarm_illegal = False
self._is_alarm_used_in_reward = False
reason_alert_illegal = None
self._is_alert_illegal = False
self._is_alert_used_in_reward = False
except_ = []
detailed_info = []
init_disp = 1.0 * action._redispatch # dispatching action
init_alert = None
if type(self).dim_alerts > 0:
init_alert = copy.deepcopy(action._raise_alert)
action_storage_power = 1.0 * action._storage_power # battery information
attack_duration = 0
lines_attacked, subs_attacked = None, None
conv_ = None
init_line_status = copy.deepcopy(self.backend.get_line_status())
self.nb_time_step += 1
self._disc_lines[:] = -1
beg_step = time.perf_counter()
self._last_obs : Optional[BaseObservation] = None
self._forecasts = None # force reading the forecast from the time series
try:
beg_ = time.perf_counter()
ambiguous, except_tmp = action.is_ambiguous()
if ambiguous:
# action is replace by do nothing
action = self._action_space({})
init_disp = 1.0 * action._redispatch # dispatching action
action_storage_power = (
1.0 * action._storage_power
) # battery information
is_ambiguous = True
if type(self).dim_alerts > 0:
# keep the alert even if the rest is ambiguous (if alert is non ambiguous)
is_ambiguous_alert = isinstance(except_tmp, AmbiguousActionRaiseAlert)
if is_ambiguous_alert:
# reset the alert
init_alert = np.zeros(type(self).dim_alerts, dtype=dt_bool)
else:
action.raise_alert = init_alert
except_.append(except_tmp)
is_legal, reason = self._game_rules(action=action, env=self)
if not is_legal:
# action is replace by do nothing
action = self._action_space({})
init_disp = 1.0 * action._redispatch # dispatching action
action_storage_power = (
1.0 * action._storage_power
) # battery information
except_.append(reason)
if type(self).dim_alerts > 0:
# keep the alert even if the rest is illegal
action.raise_alert = init_alert
is_illegal = True
if self._has_attention_budget:
if type(self).assistant_warning_type == "zonal":
# this feature is implemented, so i do it
reason_alarm_illegal = self._attention_budget.register_action(
self, action, is_illegal, is_ambiguous
)
self._is_alarm_illegal = reason_alarm_illegal is not None
# get the modification of generator active setpoint from the environment
self._env_modification, prod_v_chronics = self._update_actions()
self._env_modification._single_act = (
False # because it absorbs all redispatching actions
)
new_p = self._get_new_prod_setpoint(action)
new_p_th = 1.0 * new_p
# storage unit
if self.n_storage > 0:
# limiting the storage units is done in `_aux_apply_redisp`
# this only ensure the Emin / Emax and all the actions
self._compute_storage(action_storage_power)
# curtailment (does not attempt to "limit" the curtailment to make sure
# it is feasible)
self._gen_before_curtailment[self.gen_renewable] = new_p[self.gen_renewable]
gen_curtailed = self._aux_handle_curtailment_without_limit(action, new_p)
beg__redisp = time.perf_counter()
if self.redispatching_unit_commitment_availble or self.n_storage > 0.0:
# this computes the "optimal" redispatching
# and it is also in this function that the limiting of the curtailment / storage actions
# is perform to make the state "feasible"
res_disp = self._aux_apply_redisp(
action, new_p, new_p_th, gen_curtailed, except_
)
action, is_illegal_redisp, is_illegal_reco, is_done = res_disp
self._time_redisp += time.perf_counter() - beg__redisp
if not is_done:
self._aux_update_backend_action(action, action_storage_power, init_disp)
# now get the new generator voltage setpoint
voltage_control_act = self._voltage_control(action, prod_v_chronics)
self._backend_action += voltage_control_act
# handle the opponent here
tick = time.perf_counter()
lines_attacked, subs_attacked, attack_duration = self._aux_handle_attack(
action
)
tock = time.perf_counter()
self._time_opponent += tock - tick
self._time_create_bk_act += tock - beg_
self.backend.apply_action(self._backend_action)
self._time_apply_act += time.perf_counter() - beg_
# now it's time to run the powerflow properly
# and to update the time dependant properties
self._update_alert_properties(action, lines_attacked, subs_attacked)
detailed_info, has_error = self._aux_run_pf_after_state_properly_set(
action, init_line_status, new_p, except_
)
else:
has_error = True
except StopIteration:
# episode is over
is_done = True
self._backend_action.reset()
end_step = time.perf_counter()
self._time_step += end_step - beg_step
if conv_ is not None:
except_.append(conv_)
self.infos = {
"disc_lines": self._disc_lines,
"is_illegal": is_illegal,
"is_ambiguous": is_ambiguous,
"is_dispatching_illegal": is_illegal_redisp,
"is_illegal_reco": is_illegal_reco,
"reason_alarm_illegal": reason_alarm_illegal,
"reason_alert_illegal": reason_alert_illegal,
"opponent_attack_line": lines_attacked,
"opponent_attack_sub": subs_attacked,
"opponent_attack_duration": attack_duration,
"exception": except_,
}
if self.backend.detailed_infos_for_cascading_failures:
self.infos["detailed_infos_for_cascading_failures"] = detailed_info
self.done = self._is_done(has_error, is_done)
self.current_reward, other_reward = self._get_reward(
action,
has_error,
self.done, # is_done
is_illegal or is_illegal_redisp or is_illegal_reco,
is_ambiguous,
)
self.infos["rewards"] = other_reward
if has_error and self.current_obs is not None:
# forward to the observation if an alarm is used or not
if hasattr(self._reward_helper.template_reward, "has_alarm_component"):
self._is_alarm_used_in_reward = (
self._reward_helper.template_reward.is_alarm_used
)
if hasattr(self._reward_helper.template_reward, "has_alert_component"):
self._is_alert_used_in_reward = (
self._reward_helper.template_reward.is_alert_used
)
self.current_obs = self.get_obs(_update_state=False)
# update the observation so when it's plotted everything is "shutdown"
self.current_obs.set_game_over(self)
if self._update_obs_after_reward and self.current_obs is not None:
# transfer some information computed in the reward into the obs (if any)
self.current_obs.update_after_reward(self)
# TODO documentation on all the possible way to be illegal now
if self.done:
self.__is_init = False
return self.current_obs, self.current_reward, self.done, self.infos
def _get_reward(self, action, has_error, is_done, is_illegal, is_ambiguous):
res = self._reward_helper(
action, self, has_error, is_done, is_illegal, is_ambiguous
)
other_rewards = {
k: v(action, self, has_error, is_done, is_illegal, is_ambiguous)
for k, v in self.other_rewards.items()
}
return res, other_rewards
def get_reward_instance(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Returns the instance of the object that is used to compute the reward.
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
return self._reward_helper.template_reward
def _is_done(self, has_error, is_done):
no_more_data = self.chronics_handler.done()
return has_error or is_done or no_more_data
def _reset_vectors_and_timings(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Maintenance are not reset, otherwise the data are not read properly (skip the first time step)
"""
self._no_overflow_disconnection = self._parameters.NO_OVERFLOW_DISCONNECTION
self._timestep_overflow[:] = 0
self._nb_timestep_overflow_allowed[
:
] = self._parameters.NB_TIMESTEP_OVERFLOW_ALLOWED
self.nb_time_step = 0 # to have the first step at 0
self._hard_overflow_threshold = self._parameters.HARD_OVERFLOW_THRESHOLD
self._env_dc = self._parameters.ENV_DC
self._times_before_line_status_actionable[:] = 0
self._max_timestep_line_status_deactivated = (
self._parameters.NB_TIMESTEP_COOLDOWN_LINE
)
self._times_before_topology_actionable[:] = 0
self._max_timestep_topology_deactivated = (
self._parameters.NB_TIMESTEP_COOLDOWN_SUB
)
# reset timings
self._time_apply_act = dt_float(0.0)
self._time_powerflow = dt_float(0.0)
self._time_extract_obs = dt_float(0.0)
self._time_opponent = dt_float(0.0)
self._time_create_bk_act = dt_float(0.0)
self._time_redisp = dt_float(0.0)
self._time_step = dt_float(0.0)
if self._has_attention_budget:
self._attention_budget.reset()
# reward and others
self.current_reward = self.reward_range[0]
self.done = False
def _reset_maintenance(self):
self._time_next_maintenance[:] = -1
self._duration_next_maintenance[:] = 0
def __enter__(self):
"""
Support *with-statement* for the environment.
Examples
--------
.. code-block:: python
import grid2op
import grid2op.BaseAgent
with grid2op.make() as env:
agent = grid2op.BaseAgent.DoNothingAgent(env.action_space)
act = env.action_space()
obs, r, done, info = env.step(act)
act = agent.act(obs, r, info)
obs, r, done, info = env.step(act)
"""
return self
def __exit__(self, *args):
"""
Support *with-statement* for the environment.
"""
self.close()
# propagate exception
return False
def close(self):
"""close an environment: this will attempt to free as much memory as possible.
Note that after an environment is closed, you will not be able to use anymore.
Any attempt to use a closed environment might result in non deterministic behaviour.
"""
if self.__closed:
raise EnvError(
f"This environment {id(self)} {self} is closed already, you cannot close it a second time."
)
# todo there might be some side effect
if hasattr(self, "_viewer") and self._viewer is not None:
self._viewer = None
self.viewer_fig = None
if hasattr(self, "backend") and self.backend is not None:
self.backend.close()
del self.backend
self.backend :Backend = None
if hasattr(self, "_observation_space") and self._observation_space is not None:
# do not forget to close the backend of the observation (used for simulate)
self._observation_space.close()
self._observation_space = None
if hasattr(self, "_voltage_controler") and self._voltage_controler is not None:
# in case there is a backend in the voltage controler
self._voltage_controler.close()
self._voltage_controler = None
if hasattr(self, "_oppSpace") and self._oppSpace is not None:
# in case there is a backend in the opponent space
self._oppSpace.close()
self._oppSpace = None
if hasattr(self, "_helper_action_env") and self._helper_action_env is not None:
# close the action helper
self._helper_action_env.close()
self._helper_action_env = None
if hasattr(self, "_action_space") and self._action_space is not None:
# close the action space if needed
self._action_space.close()
self._action_space = None
if hasattr(self, "_reward_helper") and self._reward_helper is not None:
# close the reward if needed
self._reward_helper.close()
self._reward_helper = None
if hasattr(self, "other_rewards") and self.other_rewards is not None:
for el, reward in self.other_rewards.items():
# close the "other rewards"
reward.close()
self.other_rewards = None
self.backend : Backend = None
self.__is_init = False
self.__closed = True
# clean all the attributes
for attr_nm in [
"logger",
"_init_grid_path",
"_DEBUG",
"_complete_action_cls",
"_parameters",
"with_forecast",
"_time_apply_act",
"_time_powerflow",
"_time_extract_obs",
"_time_create_bk_act",
"_time_opponent",
"_time_redisp",
"_time_step",
"_epsilon_poly",
"_helper_action_class",
"_helper_observation_class",
"time_stamp",
"nb_time_step",
"delta_time_seconds",
"current_obs",
"_line_status",
"_ignore_min_up_down_times",
"_forbid_dispatch_off",
"_no_overflow_disconnection",
"_timestep_overflow",
"_nb_timestep_overflow_allowed",
"_hard_overflow_threshold",
"_times_before_line_status_actionable",
"_max_timestep_line_status_deactivated",
"_times_before_topology_actionable",
"_nb_ts_reco",
"_time_next_maintenance",
"_duration_next_maintenance",
"_hazard_duration",
"_env_dc",
"_target_dispatch",
"_actual_dispatch",
"_gen_uptime",
"_gen_downtime",
"_gen_activeprod_t",
"_gen_activeprod_t_redisp",
"_thermal_limit_a",
"_disc_lines",
"_injection",
"_maintenance",
"_hazards",
"_env_modification",
"done",
"current_reward",
"_helper_action_env",
"chronics_handler",
"_game_rules",
"_action_space",
"_rewardClass",
"_actionClass",
"_observationClass",
"_legalActClass",
"_observation_space",
"_names_chronics_to_backend",
"_reward_helper",
"reward_range",
"_viewer",
"viewer_fig",
"other_rewards",
"_opponent_action_class",
"_opponent_class",
"_opponent_init_budget",
"_opponent_attack_duration",
"_opponent_attack_cooldown",
"_opponent_budget_per_ts",
"_kwargs_opponent",
"_opponent_budget_class",
"_opponent_action_space",
"_compute_opp_budget",
"_opponent",
"_oppSpace",
"_voltagecontrolerClass",
"_voltage_controler",
"_backend_action_class",
"_backend_action",
"backend",
"debug_dispatch",
# "__new_param", "__new_forecast_param", "__new_reward_func",
"_storage_current_charge",
"_storage_previous_charge",
"_action_storage",
"_amount_storage",
"_amount_storage_prev",
"_storage_power",
"_storage_power_prev",
"_limit_curtailment",
"_limit_curtailment_prev",
"_gen_before_curtailment",
"_sum_curtailment_mw",
"_sum_curtailment_mw_prev",
"_has_attention_budget",
"_attentiong_budget",
"_attention_budget_cls",
"_is_alarm_illegal",
"_is_alarm_used_in_reward",
"_is_alert_illegal",
"_is_alert_used_in_reward",
"_kwargs_attention_budget",
"_limited_before",
]:
if hasattr(self, attr_nm):
delattr(self, attr_nm)
setattr(self, attr_nm, None)
def attach_layout(self, grid_layout):
"""
Compare to the method of the base class, this one performs a check.
This method must be called after initialization.
Parameters
----------
grid_layout: ``dict``
The layout of the grid (*i.e* the coordinates (x,y) of all substations). The keys
should be the substation names, and the values a tuple (with two float) representing
the coordinate of the substation.
Examples
---------
Here is an example on how to attach a layout for an environment:
.. code-block:: python
import grid2op
# create the environment
env = grid2op.make()
# assign coordinates (0., 0.) to all substations (this is a dummy thing to do here!)
layout = {sub_name: (0., 0.) for sub_name in env.name_sub}
env.attach_layout(layout)
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
if isinstance(grid_layout, dict):
pass
elif isinstance(grid_layout, list):
grid_layout = {k: v for k, v in zip(self.name_sub, grid_layout)}
else:
raise EnvError(
"Attempt to set a layout from something different than a dictionary or a list. "
"This is for now not supported."
)
if self.__is_init:
res = {}
for el in self.name_sub:
if not el in grid_layout:
raise EnvError(
'The substation "{}" is not present in grid_layout while in the powergrid.'
"".format(el)
)
tmp = grid_layout[el]
try:
x, y = tmp
x = dt_float(x)
y = dt_float(y)
res[el] = (x, y)
except Exception as e_:
raise EnvError(
'attach_layout: impossible to convert the value of "{}" to a pair of float '
'that will be used the grid layout. The error is: "{}"'
"".format(el, e_)
)
super().attach_layout(res)
if self._action_space is not None:
self._action_space.attach_layout(res)
if self._helper_action_env is not None:
self._helper_action_env.attach_layout(res)
if self._observation_space is not None:
self._observation_space.attach_layout(res)
if self._voltage_controler is not None:
self._voltage_controler.attach_layout(res)
if self._opponent_action_space is not None:
self._opponent_action_space.attach_layout(res)
def fast_forward_chronics(self, nb_timestep):
"""
This method allows you to skip some time step at the beginning of the chronics.
This is usefull at the beginning of the training, if you want your agent to learn on more diverse scenarios.
Indeed, the data provided in the chronics usually starts always at the same date time (for example Jan 1st at
00:00). This can lead to suboptimal exploration, as during this phase, only a few time steps are managed by
the agent, so in general these few time steps will correspond to grid state around Jan 1st at 00:00.
Parameters
----------
nb_timestep: ``int``
Number of time step to "fast forward"
Examples
---------
This can be used like this:
.. code-block:: python
import grid2op
# create the environment
env = grid2op.make()
# skip the first 150 steps of the chronics
env.fast_forward_chronics(150)
done = env.is_done
if not done:
obs = env.get_obs()
# do something
else:
# there was a "game over"
# you need to reset the env (which will "cancel" the fast_forward)
pass
# do something else
Notes
-----
This method can set the state of the environment in a 'game over' state (`done=True`) for example if the
chronics last `xxx` time steps and you ask to "fast foward" more than `xxx` steps. This is why we advise to
check the state of the environment after the call to this method if you use it (see the "Examples" paragaph)
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
if not self.__is_init:
raise EnvError("This environment is not intialized. "
"Have you called `env.reset()` after last game over ?")
nb_timestep = int(nb_timestep)
# Go to the timestep requested minus one
nb_timestep = max(1, nb_timestep - 1)
self.chronics_handler.fast_forward(nb_timestep)
self.nb_time_step += nb_timestep
# Update the timing vectors
min_time_line_reco = np.zeros(self.n_line, dtype=dt_int)
min_time_topo = np.zeros(self.n_sub, dtype=dt_int)
ff_time_line_act = self._times_before_line_status_actionable - nb_timestep
ff_time_topo_act = self._times_before_topology_actionable - nb_timestep
self._times_before_line_status_actionable[:] = np.maximum(
ff_time_line_act, min_time_line_reco
)
self._times_before_topology_actionable[:] = np.maximum(
ff_time_topo_act, min_time_topo
)
# Update to the fast forward state using a do nothing action
self.step(self._action_space({}))
def get_current_line_status(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
prefer using :attr:`grid2op.Observation.BaseObservation.line_status`
This method allows to retrieve the line status.
"""
if self.current_obs is not None:
powerline_status = self._line_status
else:
# at first time step, every powerline is connected
powerline_status = np.full(self.n_line, fill_value=True, dtype=dt_bool)
# powerline_status = self._line_status
return powerline_status
@property
def parameters(self):
"""
Return a deepcopy of the parameters used by the environment
It is a deepcopy, so modifying it will have absolutely no effect on the environment.
If you want to change the parameters of an environment, please use either
:func:`grid2op.Environment.BaseEnv.change_parameters` to change the parameters of this environment or
:func:`grid2op.Environment.BaseEnv.change_forecast_parameters` to change the parameter of the environment
used by :func:`grid2op.Observation.BaseObservation.simulate` or
:func:`grid2op.Observation.BaseObservation.get_forecast_env`
.. danger::
To modify the environment parameters you need to do:
.. code-block:: python
params = env.parameters
params.WHATEVER = NEW_VALUE
env.change_parameters(params)
env.reset()
If you simply do:
.. code-block:: python
env.params.WHATEVER = NEW_VALUE
This will have absolutely no impact.
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
res = copy.deepcopy(self._parameters)
# res.read_only = True # TODO at some point !
return res
@parameters.setter
def parameters(self, value):
raise RuntimeError(
"Use the env.change_parameters(new_parameters) to change the parameters. "
"NB: it will only have an effect AFTER the env is reset."
)
def change_reward(self, new_reward_func):
"""
Change the reward function used for the environment.
TODO examples !
Parameters
----------
new_reward_func:
Either an object of class BaseReward, or a subclass of BaseReward: the new reward function to use
Notes
------
This only affects the environment AFTER `env.reset()` has been called.
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
is_ok = isinstance(new_reward_func, BaseReward) or issubclass(
new_reward_func, BaseReward
)
if not is_ok:
raise EnvError(
f"Impossible to change the reward function with type {type(new_reward_func)}. "
f"It should be an object from a class that inherit grid2op.Reward.BaseReward "
f"or a subclass of grid2op.Reward.BaseReward"
)
self.__new_reward_func = new_reward_func
def _aux_gen_classes(self, cls, sys_path):
if not isinstance(cls, type):
raise RuntimeError(f"cls should be a type and not an object !: {cls}")
if not issubclass(cls, GridObjects):
raise RuntimeError(f"cls should inherit from GridObjects: {cls}")
from pathlib import Path
path_env = cls._PATH_ENV
cls._PATH_ENV = str(Path(self.get_path_env()).as_posix())
res = cls._get_full_cls_str()
cls._PATH_ENV = path_env
output_file = os.path.join(sys_path, f"{cls.__name__}_file.py")
if not os.path.exists(output_file):
# if the file is not already saved, i save it and add it to the __init__ file
with open(output_file, "w", encoding="utf-8") as f:
f.write(res)
return f"\nfrom .{cls.__name__}_file import {cls.__name__}"
else:
# otherwise i do nothing
return ""
def generate_classes(self, _guard=None, _is_base_env__=True, sys_path=None):
"""
Use with care, but can be incredibly useful !
If you get into trouble like :
.. code-block:: none
AttributeError: Can't get attribute 'ActionSpace_l2rpn_icaps_2021_small'
on <module 'grid2op.Space.GridObjects' from
/home/user/Documents/grid2op_dev/grid2op/Space/GridObjects.py'>
You might want to call this function and that MIGHT solve your problem.
This function will create a subdirectory ino the env directory,
that will be accessed when loading the classes
used for the environment.
The default behaviour is to build the class on the fly which can cause some
issues when using `pickle` or `multiprocessing` for example.
Examples
--------
Here is how to best leverage this functionality:
First step, generated the classes once and for all.
.. warning::
You need to redo this step each time
you customize the environment. This customization includes, but is not limited to:
- change the backend type: `grid2op.make(..., backend=...)`
- change the action class: `grid2op.make(..., action_class=...)`
- change observation class: `grid2op.make(..., observation_class=...)`
- change the `volagecontroler_class`
- change the `grid_path`
- change the `opponent_action_class`
- etc.
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name, ...) # again: redo this step each time you customize "..."
# for example if you change the `action_class` or the `backend` etc.
env.generate_classes()
Then, next time you want to use the SAME environment, you can do:
.. code-block:: python
import grid2op
env_name = SAME NAME AS ABOVE
env = grid2op.make(env_name,
experimental_read_from_local_dir=True,
SAME ENV CUSTOMIZATION AS ABOVE)
And it should (this is experimerimental for now, and we expect feedback on the matter) solve
the issues involving pickle.
Again, if you customize your environment (see above for more information) you'll have to redo this step !
"""
if self.__closed:
return
# create the folder
if _guard is not None:
raise RuntimeError("use `env.generate_classes()` with no arguments !")
if type(self)._PATH_ENV is not None:
raise RuntimeError(
"This function should only be called ONCE without specifying that the classes "
"need to be read from disk (class attribute type(self)._PATH_ENV should be None)"
)
import shutil
if sys_path is None:
if not _is_base_env__:
raise RuntimeError("Cannot generate file from a \"sub env\" "
"(eg no the top level env) if I don't know the path of "
"the top level environment.")
sys_path = os.path.join(self.get_path_env(), "_grid2op_classes")
if _is_base_env__:
if os.path.exists(sys_path):
shutil.rmtree(sys_path)
os.mkdir(sys_path)
# initialized the "__init__" file
_init_txt = ""
mode = "w"
if not _is_base_env__:
_init_txt = BASE_TXT_COPYRIGHT + _init_txt
else:
# i am apppending to the __init__ file in case of obs_env
mode = "a"
# generate the classes
_init_txt += self._aux_gen_classes(type(self), sys_path)
_init_txt += self._aux_gen_classes(type(self.backend), sys_path)
_init_txt += self._aux_gen_classes(
self.backend._complete_action_class, sys_path
)
_init_txt += self._aux_gen_classes(self._backend_action_class, sys_path)
_init_txt += self._aux_gen_classes(type(self.action_space), sys_path)
_init_txt += self._aux_gen_classes(self._actionClass, sys_path)
_init_txt += self._aux_gen_classes(self._complete_action_cls, sys_path)
_init_txt += self._aux_gen_classes(type(self.observation_space), sys_path)
_init_txt += self._aux_gen_classes(self._observationClass, sys_path)
_init_txt += self._aux_gen_classes(
self._opponent_action_space.subtype, sys_path
)
# now do the same for the obs_env
if _is_base_env__:
_init_txt += self._aux_gen_classes(
self._voltage_controler.action_space.subtype, sys_path
)
init_grid_tmp = self._observation_space.obs_env._init_grid_path
self._observation_space.obs_env._init_grid_path = self._init_grid_path
self._observation_space.obs_env.generate_classes(_is_base_env__=False, sys_path=sys_path)
self._observation_space.obs_env._init_grid_path = init_grid_tmp
# now write the __init__ file
_init_txt += "\n"
with open(os.path.join(sys_path, "__init__.py"), mode, encoding="utf-8") as f:
f.write(_init_txt)
def __del__(self):
"""when the environment is garbage collected, free all the memory, including cross reference to itself in the observation space."""
if hasattr(self, "_BaseEnv__closed") and not self.__closed:
self.close()
def _update_vector_with_timestep(self, horizon, is_overflow):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
update the value of the "time dependant" attributes, used mainly for the "_ObsEnv" (simulate) or
the "Forecasted env" (obs.get_forecast_env())
"""
cls = type(self)
# update the cooldowns
self._times_before_line_status_actionable[:] = np.maximum(
self._times_before_line_status_actionable - (horizon - 1), 0
)
self._times_before_topology_actionable[:] = np.maximum(
self._times_before_topology_actionable - (horizon - 1), 0
)
# update the maintenance
tnm_orig = 1 * self._time_next_maintenance
dnm_orig = 1 * self._duration_next_maintenance
has_maint = self._time_next_maintenance != -1
reconnected = np.full(cls.n_line, fill_value=False)
maint_started = np.full(cls.n_line, fill_value=False)
maint_over = np.full(cls.n_line, fill_value=False)
maint_started[has_maint] = (tnm_orig[has_maint] <= horizon)
maint_over[has_maint] = (tnm_orig[has_maint] + dnm_orig[has_maint]
<= horizon)
reconnected[has_maint] = tnm_orig[has_maint] + dnm_orig[has_maint] == horizon
first_ts_maintenance = tnm_orig == horizon
still_in_maintenance = maint_started & (~maint_over) & (~first_ts_maintenance)
# count down time next maintenance
self._time_next_maintenance[:] = np.maximum(
self._time_next_maintenance - horizon, -1
)
# powerline that are still in maintenance at this time step
self._time_next_maintenance[still_in_maintenance] = 0
self._duration_next_maintenance[still_in_maintenance] -= (horizon - tnm_orig[still_in_maintenance])
# powerline that will be in maintenance at this time step
self._time_next_maintenance[first_ts_maintenance] = 0
# powerline that will be in maintenance at this time step
self._time_next_maintenance[reconnected | maint_over] = -1
self._duration_next_maintenance[reconnected | maint_over] = 0
# soft overflow
# this is tricky here because I have no model to predict the future...
# As i cannot do better, I simply do "if I am in overflow now, i will be later"
self._timestep_overflow[is_overflow] += (horizon - 1)
return still_in_maintenance, reconnected, first_ts_maintenance
def _reset_to_orig_state(self, obs):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
reset this "environment" to the state it should be
update the value of the "time dependant" attributes, used mainly for the "_ObsEnv" (simulate) or
the "Forecasted env" (obs.get_forecast_env())
"""
self.backend.set_thermal_limit(obs._thermal_limit)
if "opp_space_state" in obs._env_internal_params:
self._oppSpace._set_state(obs._env_internal_params["opp_space_state"],
obs._env_internal_params["opp_state"])
# storage unit
self._storage_current_charge[:] = obs.storage_charge
self._storage_previous_charge[:] = obs._env_internal_params["_storage_previous_charge"]
self._action_storage[:] = obs.storage_power_target
self._storage_power[:] = obs.storage_power
self._amount_storage = obs._env_internal_params["_amount_storage"]
self._amount_storage_prev = obs._env_internal_params["_amount_storage_prev"]
# curtailment
self._limit_curtailment[:] = obs.curtailment_limit
self._gen_before_curtailment[:] = obs.gen_p_before_curtail
self._sum_curtailment_mw = obs._env_internal_params["_sum_curtailment_mw"]
self._sum_curtailment_mw_prev = obs._env_internal_params["_sum_curtailment_mw_prev"]
# line status
self._line_status[:] = obs._env_internal_params["_line_status_env"] == 1
# attention budget
if self._has_attention_budget:
self._attention_budget.set_state(obs._env_internal_params["_attention_budget_state"])
# cooldown
self._times_before_line_status_actionable[
:
] = obs.time_before_cooldown_line
self._times_before_topology_actionable[
:
] = obs.time_before_cooldown_sub
# maintenance
self._time_next_maintenance[:] = obs.time_next_maintenance
self._duration_next_maintenance[:] = obs.duration_next_maintenance
# redisp
self._target_dispatch[:] = obs.target_dispatch
self._actual_dispatch[:] = obs.actual_dispatch
self._already_modified_gen[:] = obs._env_internal_params["_already_modified_gen"]
self._gen_activeprod_t[:] = obs._env_internal_params["_gen_activeprod_t"]
self._gen_activeprod_t_redisp[:] = obs._env_internal_params["_gen_activeprod_t_redisp"]
# current step
self.nb_time_step = obs.current_step
self.delta_time_seconds = 60. * obs.delta_time
# soft overflow
self._timestep_overflow[:] = obs.timestep_overflow
def forecasts(self):
# ensure that the "env.chronics_handler.forecasts" is called at most once per step
# this should NOT be called is self.deactive_forecast is true
if not self.with_forecast:
raise Grid2OpException("Attempt to retrieve the forecasts when they are not available.")
if self._forecasts is None:
self._forecasts = self.chronics_handler.forecasts()
return self._forecasts
@staticmethod
def _check_rules_correct(legalActClass):
if isinstance(legalActClass, type):
# raise Grid2OpException(
# 'Parameter "legalActClass" used to build the Environment should be a type '
# "(a class) and not an object (an instance of a class). "
# 'It is currently "{}"'.format(type(legalActClass))
# )
if not issubclass(legalActClass, BaseRules):
raise Grid2OpException(
'Parameter "legalActClass" used to build the Environment should derived form the '
'grid2op.BaseRules class, type provided is "{}"'.format(
type(legalActClass)
)
)
else:
if not isinstance(legalActClass, BaseRules):
raise Grid2OpException(
'Parameter "legalActClass" used to build the Environment should be an instance of the '
'grid2op.BaseRules class, type provided is "{}"'.format(
type(legalActClass)
)
) | 176,434 | 42.001462 | 140 | py |
Grid2Op | Grid2Op-master/grid2op/Environment/BaseMultiProcessEnv.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from multiprocessing import Process, Pipe, Array
import numpy as np
import warnings
import time
from grid2op.Exceptions import EnvError
from grid2op.dtypes import dt_int
from grid2op.Exceptions import Grid2OpException, MultiEnvException
from grid2op.Space import GridObjects
from grid2op.Environment import Environment
from grid2op.Action import BaseAction
class RemoteEnv(Process):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This class represent the environment that is executed on a remote process.
Note that the environment is only created in the subprocess, and is not available in the main process. Once created
it is not possible to access anything directly from it in the main process, where the BaseAgent lives. Only the
:class:`grid2op.Observation.BaseObservation` are forwarded to the agent.
"""
def __init__(
self,
env_params,
p_id,
remote,
parent_remote,
seed,
logger=None,
name=None,
return_info=True,
_obs_to_vect=True,
):
Process.__init__(self, group=None, target=None, name=name)
if logger is None:
import logging
self.logger = logging.getLogger(__name__)
self.logger.disabled = True
else:
self.logger = logger.getChild(f"grid2op_RemoteEnv_{p_id}")
self.p_id = p_id
self.backend = None
self.env = None
self.env_params = env_params
self.remote = remote
self.parent_remote = parent_remote
self.seed_used = seed
self.space_prng = None
self.fast_forward = 0
self.all_seeds = []
# internal do not modify # Do not work (in the sens that is it less efficient)
self.return_info = return_info
self._obs_to_vect = _obs_to_vect
self._comp_time = 0.0
def init_env(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Initialize the environment that will perform all the computation of this process.
Remember the environment only lives in this process. It cannot
be transfer to / from the main process.
This function also makes sure the chronics are read in different order accross all processes. This is done
by calling the :func:`grid2op.Chronics.GridValue.shuffle` method. An example of how to use this function
is provided in :func:`grid2op.Chronics.Multifolder.shuffle`.
"""
self.space_prng = np.random.RandomState()
self.space_prng.seed(seed=self.seed_used)
self.backend = self.env_params["_raw_backend_class"]()
with warnings.catch_warnings():
# warnings have bee already sent in the main process, no need to resend them
warnings.filterwarnings("ignore")
if "logger" in self.env_params:
# disable the logger of the environment, to force the use of this one
del self.env_params["logger"]
self.env = Environment(
**self.env_params, backend=self.backend, logger=self.logger
)
env_seed = self.space_prng.randint(np.iinfo(dt_int).max)
self.all_seeds = self.env.seed(env_seed)
self.env.chronics_handler.shuffle(
shuffler=lambda x: x[
self.space_prng.choice(len(x), size=len(x), replace=False)
]
)
def _clean_observation(self, obs):
obs._forecasted_grid = []
obs._forecasted_inj = []
obs._obs_env = None
obs.action_helper = None
return obs
def get_obs_ifnotconv(self):
# warnings.warn(f"get_obs_ifnotconv is used")
# TODO dirty hack because of wrong chronics
# need to check!!!
conv = False
obs_v = None
obs = None
while not conv:
try:
self.env.reset()
if self.fast_forward > 0:
self.env.fast_forward_chronics(
self.space_prng.randint(0, self.fast_forward)
)
obs = self.env.get_obs()
obs_v = obs.to_vect()
if np.all(np.isfinite(obs_v)):
# i make sure that everything is not Nan
# other i consider it's "divergence" so "game over"
conv = True
except Exception as exc_:
pass
if self._obs_to_vect:
res = obs_v
else:
res = obs
return res
def run(self):
if self.env is None:
self.init_env()
while True:
cmd, data = self.remote.recv()
if cmd == "get_spaces":
self.remote.send((self.env.observation_space, self.env.action_space))
elif cmd == "s":
# perform a step
beg_ = time.perf_counter()
if data is None:
data = self.env.action_space()
else:
data = self.env.action_space.from_vect(data)
obs, reward, done, info = self.env.step(data)
obs_v = obs.to_vect()
if done or np.any(~np.isfinite(obs_v)):
# if done do a reset
res_obs = self.get_obs_ifnotconv()
elif self._obs_to_vect:
res_obs = obs.to_vect()
else:
res_obs = self._clean_observation(obs)
if not self.return_info:
info = None
end_ = time.perf_counter()
self._comp_time += end_ - beg_
self.remote.send((res_obs, reward, done, info))
elif cmd == "r":
# perfom a reset
obs_v = self.get_obs_ifnotconv()
self.remote.send(obs_v)
elif cmd == "c":
# close everything
self.env.close()
self.remote.close()
break
elif cmd == "z":
# adapt the chunk size
self.env.set_chunk_size(data)
elif cmd == "o":
# get_obs
tmp = self.env.get_obs()
if self._obs_to_vect:
res_obs = tmp.to_vect()
else:
res_obs = self._clean_observation(tmp)
self.remote.send(res_obs)
elif cmd == "f":
# fast forward the chronics when restart
self.fast_forward = int(data)
elif cmd == "seed":
self.remote.send((self.seed_used, self.all_seeds))
elif cmd == "params":
self.remote.send(self.env.parameters)
elif cmd == "comp_time":
self.remote.send(self._comp_time)
elif cmd == "powerflow_time":
self.remote.send(self.env.backend.comp_time)
elif cmd == "step_time":
self.remote.send(self.env._time_step)
elif cmd == "set_filter":
self.env.chronics_handler.set_filter(data)
self.remote.send(None)
elif cmd == "set_id":
self.env.set_id(data)
self.remote.send(None)
elif cmd == "sim":
action = self.env.action_space.from_vect(data)
obs = self.env.get_obs()
sim_obs, sim_reward, sim_done, sim_info = obs.simulate(action)
sim_obs_v = sim_obs.to_vect()
self.remote.send((sim_obs_v, sim_reward, sim_done, sim_info))
elif hasattr(self.env, cmd):
tmp = getattr(self.env, cmd)
self.remote.send(tmp)
else:
raise NotImplementedError
class BaseMultiProcessEnvironment(GridObjects):
"""
This class allows to evaluate a single agent instance on multiple environments running in parrallel.
It uses the python "multiprocessing" framework to work, and thus is suitable only on a single machine with multiple
cores (cpu / thread). We do not recommend to use this method on a cluster of different machines.
This class uses the following representation:
- an :class:`grid2op.BaseAgent.BaseAgent`: lives in a main process
- different environments lives into different processes
- a call to :func:`MultiEnv.step` will perform one step per environment, in parallel using a ``Pipe`` to transfer data
to and from the main process from each individual environment process. It is a synchronous function. It means
it will wait for every environment to finish the step before returning all the information.
There are some limitations. For example, even if forecast are available, it's not possible to use forecast of the
observations. This imply that :func:`grid2op.Observation.BaseObservation.simulate` is not available when using
:class:`MultiEnvironment`
Compare to regular Environments, :class:`MultiEnvironment` simply stack everything. You need to send not a single
:class:`grid2op.Action.BaseAction` but as many actions as there are underlying environments. You receive not one single
:class:`grid2op.Observation.BaseObservation` but as many observations as the number of underlying environments.
A broader support of regular grid2op environment capabilities as well as support for
:func:`grid2op.Observation.BaseObservation.simulate` call might be added in the future.
**NB** As opposed to :func:`Environment.step` a call to :func:`BaseMultiProcessEnvironment.step` or any of
its derived class (:class:`SingleEnvMultiProcess` or :class:`MultiEnvMultiProcess`) if a sub environment
is "done" then it is automatically reset. This means entails that you can call
:func:`BaseMultiProcessEnvironment.step` without worrying about having to reset.
Attributes
-----------
envs: `list::grid2op.Environment.Environment`
Al list of environments for which the evaluation will be made in parallel.
nb_env: ``int``
Number of parallel underlying environment that will be handled. It is also the size of the list of actions
that need to be provided in :func:`MultiEnvironment.step` and the return sizes of the list of this
same function.
obs_as_class: ``bool``
Whether to convert the observations back to :class:`grid2op.Observation` object to to leave them as
numpy array. Default (`obs_as_class=True`) to send them as observation object, but it's slower.
return_info: ``bool``
Whether to return the information dictionary or not (might speed up computation)
"""
def __init__(self, envs, obs_as_class=True, return_info=True, logger=None):
GridObjects.__init__(self)
self.__closed = False
for env in envs:
if not isinstance(env, Environment):
raise MultiEnvException(
'You provided environment of type "{}" which is not supported.'
"Please only provide a grid2op.Environment.Environment class."
"".format(type(env))
)
self.nb_env = len(envs)
max_int = np.iinfo(dt_int).max
_remotes, _work_remotes = zip(*[Pipe() for _ in range(self.nb_env)])
env_params = [sub_env.get_kwargs(with_backend=False) for sub_env in envs]
self._ps = [
RemoteEnv(
env_params=env_,
p_id=i,
remote=work_remote,
parent_remote=remote,
name="{}_{}".format(envs[i].name, i),
return_info=return_info,
seed=envs[i].space_prng.randint(max_int),
logger=logger.getChild("BaseMultiProcessEnvironment")
if logger is not None
else None,
)
for i, (work_remote, remote, env_) in enumerate(
zip(_work_remotes, _remotes, env_params)
)
]
# on windows, this has to be created after
self.envs = envs
self._remotes = _remotes
self._work_remotes = _work_remotes
for p in self._ps:
p.daemon = (
True # if the main process crashes, we should not cause things to hang
)
p.start()
for remote in self._work_remotes:
remote.close()
self.obs_as_class = obs_as_class
# self.__return_info = return_info
self._waiting = True
def _send_act(self, actions):
for remote, action in zip(self._remotes, actions):
vect = action.to_vect()
# vect = None # TODO
remote.send(("s", vect))
self._waiting = True
def _wait_for_obs(self):
results = [remote.recv() for remote in self._remotes]
self._waiting = False
obs, rews, dones, infos = zip(*results)
if self.obs_as_class:
obs = [
self.envs[e].observation_space.from_vect(ob) for e, ob in enumerate(obs)
]
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def copy(self):
raise NotImplementedError(
"It is not possible to copy multiprocessing environments at the moment."
)
def step(self, actions):
"""
Perform a step in all the underlying environments.
If one or more of the underlying environments encounters a game over, it is automatically restarted.
The observation sent back to the user is the observation after the :func:`grid2op.Environment.Environment.reset`
has been called.
As opposed to :class:`Environment.step` a call to this function will automatically reset
any of the underlying environments in case one of them is "done". This is performed the following way.
In the case one underlying environment is over (due to game over or due to end of the chronics), then:
- the corresponding "done" is returned as ``True``
- the corresponding observation returned is not the observation of the last time step (corresponding to the
underlying environment that is game over) but is the first observation after reset.
At the next call to step, the flag done will be (if not game over arise) set to ``False`` and the
corresponding observation is the next observation of this underlying environment: every thing works
as usual in this case.
We did that because restarting the game over environment added un necessary complexity.
Parameters
----------
actions: ``list``
List of :attr:`MultiEnvironment.nb_env` :class:`grid2op.Action.BaseAction`. Each action will be executed
in the corresponding underlying environment.
Returns
-------
obs: ``list``
List all the observations returned by each underlying environment.
rews: ``list``
List all the rewards returned by each underlying environment.
dones: ``list``
List all the "done" returned by each underlying environment. If one of this value is "True" this means
the environment encounter a game over.
infos: ``list``
List of dictionaries corresponding
Examples
---------
You can use this class as followed:
.. code-block:: python
import grid2op
from grid2op.Environment import BaseMultiProcessEnv
env1 = grid2op.make() # create an environment of your choosing
env2 = grid2op.make() # create another environment of your choosing
multi_env = BaseMultiProcessEnv([env1, env2])
obss = multi_env.reset()
obs1, obs2 = obss # here i extract the observation of the first environment and of the second one
# note that you cannot do obs1.simulate().
# this is equivalent to a call to
# obs1 = env1.reset(); obs2 = env2.reset()
# then you can do regular steps
action_env1 = env1.action_space()
action_env2 = env2.action_space()
obss, rewards, dones, infos = env.step([action_env1, action_env2])
# if you define
# obs1, obs2 = obss
# r1, r2 = rewards
# done1, done2 = dones
# info1, info2 = infos
# in this case, it is equivalent to calling
# obs1, r1, done1, info1 = env1.step(action_env1)
# obs2, r2, done2, info2 = env2.step(action_env2)
Let us now focus on the "automatic" reset part.
.. code-block:: python
# see above for the creation of a multi_env and the proper imports
multi_env = BaseMultiProcessEnv([env1, env2])
action_env1 = env1.action_space()
action_env2 = env2.action_space()
obss, rewards, dones, infos = env.step([action_env1, action_env2])
# say dones[0] is ``True``
# in this case if you define
# obs1 = obss[0]
# r1=rewards[0]
# done1=done[0]
# info1=info[0]
# in that case it is equivalent to the "single processed" code
# obs1_tmp, r1_tmp, done1_tmp, info1_tmp = env1.step(action_env1)
# done1 = done1_tmp
# r1 = r1_tmp
# info1 = info1_tmp
# obs1_aux = env1.reset()
# obs1 = obs1_aux
# CAREFULLL in this case, obs1 is NOT obs1_tmp but is really
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
if len(actions) != self.nb_env:
raise MultiEnvException(
"Incorrect number of actions provided. You provided {} actions, but the "
"MultiEnvironment counts {} different environment."
"".format(len(actions), self.nb_env)
)
for act in actions:
if not isinstance(act, BaseAction):
raise MultiEnvException(
'All actions send to MultiEnvironment.step should be of type "grid2op.BaseAction"'
"and not {}".format(type(act))
)
self._send_act(actions)
obs, rews, dones, infos = self._wait_for_obs()
return obs, rews, dones, infos
def reset(self):
"""
Reset all the environments, and return all the associated observation.
**NB** Except in some specific occasion, there is no need to call this function reset. Indeed, when
a sub environment is "done" then it is automatically restarted in the
:func:BaseMultiEnvMultiProcess.step` function.
Returns
-------
res: ``list``
The list of all observations. This list counts :attr:`MultiEnvironment.nb_env` elements, each one being
an :class:`grid2op.Observation.BaseObservation`.
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
for remote in self._remotes:
remote.send(("r", None))
res = [remote.recv() for e, remote in enumerate(self._remotes)]
if self.obs_as_class:
res = [
self.envs[e].observation_space.from_vect(el) for e, el in enumerate(res)
]
return np.stack(res)
def close(self):
"""
Close all the environments and all the processes.
"""
if self.__closed:
return
for remote in self._remotes:
remote.send(("c", None))
self.__closed = True
def set_chunk_size(self, new_chunk_size):
"""
Dynamically adapt the amount of data read from the hard drive. Usefull to set it to a low integer value (eg 10
or 100) at the beginning of the learning process, when agent fails pretty quickly.
This takes effect only after a reset has been performed.
Parameters
----------
new_chunk_size: ``int``
The new chunk size (positive integer)
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
try:
new_chunk_size = int(new_chunk_size)
except Exception as e:
raise Grid2OpException(
"Impossible to set the chunk size. It should be convertible a integer, and not"
"{}".format(new_chunk_size)
)
if new_chunk_size <= 0:
raise Grid2OpException(
'Impossible to read less than 1 data at a time. Please make sure "new_chunk_size"'
"is a positive integer."
)
for remote in self._remotes:
remote.send(("z", new_chunk_size))
def set_ff(self, ff_max=7 * 24 * 60 / 5):
"""
This method is primarily used for training.
The problem this method aims at solving is the following: most of grid2op environments starts a Monday at
00:00. This method will "fast forward" an environment for a random number of timestep between 0 and ``ff_max``
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
try:
ff_max = int(ff_max)
except Exception as exc_:
raise RuntimeError(
"ff_max parameters should be convertible to an integer."
) from exc_
for remote in self._remotes:
remote.send(("f", ff_max))
def get_seeds(self):
"""
Get the seeds used to initialize each sub environments.
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
for remote in self._remotes:
remote.send(("seed", None))
res = [remote.recv() for remote in self._remotes]
return res
def get_parameters(self):
"""
Get the parameters of each sub environments
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
for remote in self._remotes:
remote.send(("params", None))
res = [remote.recv() for remote in self._remotes]
return res
def get_obs(self):
"""implement the get_obs function that is "broken" if you use the __getattr__"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
for remote in self._remotes:
remote.send(("o", None))
res = [
self.envs[e].observation_space.from_vect(remote.recv())
for e, remote in enumerate(self._remotes)
]
return res
def _send_sim(self, actions):
for remote, action in zip(self._remotes, actions):
remote.send(("sim", action.to_vect()))
self._waiting = True
def simulate(self, actions):
"""
Perform the equivalent of `obs.simulate` in all the underlying environment
Parameters
----------
actions: ``list``
List of all action to simulate
Returns
---------
sim_obs:
The observation resulting from the simulation
sim_rews:
The reward resulting from the simulation
sim_dones:
For each simulation, whether or not this the simulated action lead to a game over
sim_infos:
Additional information for each simulated actions.
Examples
--------
You can use this feature like:
.. code-block::
import grid2op
from grid2op.Environment import BaseMultiProcessEnvironment
env_name = "l2rpn_case14_sandbox" # or any other name
env1 = grid2op.make(env_name)
env2 = grid2op.make(env_name)
multi_env = BaseMultiProcessEnvironment([env1, env2])
obss = multi_env.reset()
# simulate
actions = [env1.action_space(), env2.action_space()]
sim_obss, sim_rs, sim_ds, sim_is = multi_env.simulate(actions)
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
if len(actions) != self.nb_env:
raise MultiEnvException(
"Incorrect number of actions provided. You provided {} actions, but the "
"MultiEnvironment counts {} different environment."
"".format(len(actions), self.nb_env)
)
for act in actions:
if not isinstance(act, BaseAction):
raise MultiEnvException(
"All actions send to MultiEnvironment.step should be of type "
'"grid2op.BaseAction" and not {}'.format(type(act))
)
self._send_sim(actions)
sim_obs, sim_rews, sim_dones, sim_infos = self._wait_for_obs()
return sim_obs, sim_rews, sim_dones, sim_infos
def __getattr__(self, name):
"""
This function is used to get the attribute of the underlying sub environments.
Note that setting attributes or information to the sub_env this way will not work. This method only allows
to get the value of some attributes, NOT to modify them.
/!\ **DANGER** /!\ is you use this function, you are entering the danger zone. This might not work and
make your all python session dies without any notice. You've been warned.
Parameters
----------
name: ``str``
Name of the attribute you want to get the value, for each sub_env
Returns
-------
res: ``list``
The value of the given attribute for each sub env. Again, use with care.
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
res = True
for sub_env in self.envs:
if not hasattr(sub_env, name):
res = False
if not res:
raise RuntimeError(
'At least one of the sub_env has not the attribute "{}". This will not be '
"executed.".format(name)
)
for remote in self._remotes:
remote.send((name, None))
res = [remote.recv() for remote in self._remotes]
return res
def get_comp_time(self):
"""
Get the computation time (only of the step part, corresponds to sub_env.comp_time) of each sub environments
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
for remote in self._remotes:
remote.send(("comp_time", None))
res = [remote.recv() for remote in self._remotes]
return res
def get_powerflow_time(self):
"""
Get the computation time (corresponding to sub_env.backend.comp_time) of each sub environments
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
for remote in self._remotes:
remote.send(("powerflow_time", None))
res = [remote.recv() for remote in self._remotes]
return res
def get_step_time(self):
"""
Get the computation time (corresponding to sub_env._time_step) of each sub environments
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
for remote in self._remotes:
remote.send(("step_time", None))
res = [remote.recv() for remote in self._remotes]
return res
def set_filter(self, filter_funs):
"""
Set a `filter_fun` for each of the underlying environment.
See :func:`grid2op.Chronis.MultiFolder.set_filter` for more information
Examples
--------
TODO usage example
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
if callable(filter_funs):
filter_funs = [filter_funs for _ in range(self.nb_env)]
if len(filter_funs) != self.nb_env:
raise RuntimeError(
"filter_funs should be either a single function that will be applied "
"identically to each sub_env or a list of callable functions."
)
for el in filter_funs:
if not callable(el):
raise RuntimeError(
"filter_funs should be composed of callable elements, such as functions "
"that can be use with `env.chronics_handler.set_filter`"
)
for sub_env_id, remote in enumerate(self._remotes):
remote.send(("set_filter", filter_funs[sub_env_id]))
res = [remote.recv() for remote in self._remotes]
return res
def set_id(self, id_):
"""
Set a chronics id for each of the underlying environment to be used for each of the sub_env.
See :func:`grid2op.Environment.Environment.set_id` for more information
Examples
--------
TODO usage example
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
if isinstance(id_, int):
id_ = [id_ for _ in range(self.nb_env)]
if len(id_) != self.nb_env:
raise RuntimeError(
"id_ should be either a single integer or an integer that represents "
"the chronics to use."
)
for el in id_:
if not isinstance(el, int):
raise RuntimeError("id_ should be composed of integers.")
for sub_env_id, remote in enumerate(self._remotes):
remote.send(("set_id", id_[sub_env_id]))
res = [remote.recv() for remote in self._remotes]
return res
def __del__(self):
"""when the environment is garbage collected, free all the memory, including cross reference to itself in the observation space."""
if not self.__closed:
self.close()
if __name__ == "__main__":
from tqdm import tqdm
from grid2op import make
from grid2op.Agent import DoNothingAgent
nb_env = 8 # change that to adapt to your system
NB_STEP = 100 # number of step for each environment
env = make()
env.seed(42)
envs = [env for _ in range(nb_env)]
agent = DoNothingAgent(env.action_space)
multi_envs = BaseMultiProcessEnvironment(envs)
obs = multi_envs.reset()
rews = [env.reward_range[0] for i in range(nb_env)]
dones = [False for i in range(nb_env)]
total_reward = 0.0
for i in tqdm(range(NB_STEP)):
acts = [None for _ in range(nb_env)]
for env_act_id in range(nb_env):
acts[env_act_id] = agent.act(
obs[env_act_id], rews[env_act_id], dones[env_act_id]
)
obs, rews, dones, infos = multi_envs.step(acts)
total_reward += np.sum(rews)
len(rews)
multi_envs.close()
ob = env.reset()
rew = env.reward_range[0]
done = False
total_reward_single = 0
for i in tqdm(range(NB_STEP)):
act = agent.act(ob, rew, done)
ob, rew, done, info = env.step(act)
if done:
ob = env.reset()
total_reward_single += np.sum(rew)
env.close()
print("total_reward mluti_env: {}".format(total_reward))
print("total_reward single env: {}".format(total_reward_single))
| 32,201 | 37.657863 | 139 | py |
Grid2Op | Grid2Op-master/grid2op/Environment/Environment.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import os
import copy
import warnings
import numpy as np
import re
import grid2op
from grid2op.Opponent.OpponentSpace import OpponentSpace
from grid2op.dtypes import dt_float, dt_bool, dt_int
from grid2op.Action import (
ActionSpace,
BaseAction,
TopologyAction,
DontAct,
CompleteAction,
)
from grid2op.Exceptions import *
from grid2op.Observation import CompleteObservation, ObservationSpace, BaseObservation
from grid2op.Reward import FlatReward, RewardHelper, BaseReward
from grid2op.Rules import RulesChecker, AlwaysLegal, BaseRules
from grid2op.Backend import Backend
from grid2op.Chronics import ChronicsHandler
from grid2op.VoltageControler import ControlVoltageFromFile, BaseVoltageController
from grid2op.Environment.BaseEnv import BaseEnv
from grid2op.Opponent import BaseOpponent, NeverAttackBudget
from grid2op.operator_attention import LinearAttentionBudget
class Environment(BaseEnv):
"""
This class is the grid2op implementation of the "Environment" entity in the RL framework.
Attributes
----------
name: ``str``
The name of the environment
action_space: :class:`grid2op.Action.ActionSpace`
Another name for :attr:`Environment.helper_action_player` for gym compatibility.
observation_space: :class:`grid2op.Observation.ObservationSpace`
Another name for :attr:`Environment.helper_observation` for gym compatibility.
reward_range: ``(float, float)``
The range of the reward function
metadata: ``dict``
For gym compatibility, do not use
spec: ``None``
For Gym compatibility, do not use
_viewer: ``object``
Used to display the powergrid. Currently properly supported.
"""
REGEX_SPLIT = r"^[a-zA-Z0-9]*$"
def __init__(
self,
init_env_path: str,
init_grid_path: str,
chronics_handler,
backend,
parameters,
name="unknown",
names_chronics_to_backend=None,
actionClass=TopologyAction,
observationClass=CompleteObservation,
rewardClass=FlatReward,
legalActClass=AlwaysLegal,
voltagecontrolerClass=ControlVoltageFromFile,
other_rewards={},
thermal_limit_a=None,
with_forecast=True,
epsilon_poly=1e-4, # precision of the redispatching algorithm we don't recommend to go above 1e-4
tol_poly=1e-2, # i need to compute a redispatching if the actual values are "more than tol_poly" the values they should be
opponent_space_type=OpponentSpace,
opponent_action_class=DontAct,
opponent_class=BaseOpponent,
opponent_init_budget=0.0,
opponent_budget_per_ts=0.0,
opponent_budget_class=NeverAttackBudget,
opponent_attack_duration=0,
opponent_attack_cooldown=99999,
kwargs_opponent={},
attention_budget_cls=LinearAttentionBudget,
kwargs_attention_budget={},
has_attention_budget=False,
logger=None,
kwargs_observation=None,
observation_bk_class=None,
observation_bk_kwargs=None,
highres_sim_counter=None,
_update_obs_after_reward=True,
_init_obs=None,
_raw_backend_class=None,
_compat_glop_version=None,
_read_from_local_dir=True, # TODO runner and all here !
_is_test=False,
):
BaseEnv.__init__(
self,
init_env_path=init_env_path,
init_grid_path=init_grid_path,
parameters=parameters,
thermal_limit_a=thermal_limit_a,
epsilon_poly=epsilon_poly,
tol_poly=tol_poly,
other_rewards=other_rewards,
with_forecast=with_forecast,
voltagecontrolerClass=voltagecontrolerClass,
opponent_space_type=opponent_space_type,
opponent_action_class=opponent_action_class,
opponent_class=opponent_class,
opponent_budget_class=opponent_budget_class,
opponent_init_budget=opponent_init_budget,
opponent_budget_per_ts=opponent_budget_per_ts,
opponent_attack_duration=opponent_attack_duration,
opponent_attack_cooldown=opponent_attack_cooldown,
kwargs_opponent=kwargs_opponent,
has_attention_budget=has_attention_budget,
attention_budget_cls=attention_budget_cls,
kwargs_attention_budget=kwargs_attention_budget,
logger=logger.getChild("grid2op_Environment")
if logger is not None
else None,
kwargs_observation=kwargs_observation,
observation_bk_class=observation_bk_class,
observation_bk_kwargs=observation_bk_kwargs,
highres_sim_counter=highres_sim_counter,
update_obs_after_reward=_update_obs_after_reward,
_init_obs=_init_obs,
_is_test=_is_test, # is this created with "test=True" # TODO not implemented !!
)
if name == "unknown":
warnings.warn(
'It is NOT recommended to create an environment without "make" and EVEN LESS '
"to use an environment without a name..."
)
self.name = name
self._read_from_local_dir = _read_from_local_dir
# for gym compatibility (initialized below)
# self.action_space = None
# self.observation_space = None
self.reward_range = None
self._viewer = None
self.metadata = None
self.spec = None
if _raw_backend_class is None:
self._raw_backend_class = type(backend)
else:
self._raw_backend_class = _raw_backend_class
self._compat_glop_version = _compat_glop_version
# for plotting
self._init_backend(
chronics_handler,
backend,
names_chronics_to_backend,
actionClass,
observationClass,
rewardClass,
legalActClass,
)
self._actionClass_orig = actionClass
self._observationClass_orig = observationClass
def _init_backend(
self,
chronics_handler,
backend,
names_chronics_to_backend,
actionClass,
observationClass,
rewardClass,
legalActClass,
):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Create a proper and valid environment.
"""
if isinstance(rewardClass, type):
if not issubclass(rewardClass, BaseReward):
raise Grid2OpException(
'Parameter "rewardClass" used to build the Environment should derived form '
'the grid2op.BaseReward class, type provided is "{}"'.format(
type(rewardClass)
)
)
else:
if not isinstance(rewardClass, BaseReward):
raise Grid2OpException(
'Parameter "rewardClass" used to build the Environment should derived form '
'the grid2op.BaseReward class, type provided is "{}"'.format(
type(rewardClass)
)
)
# backend
if not isinstance(backend, Backend):
raise Grid2OpException(
'Parameter "backend" used to build the Environment should derived form the '
'grid2op.Backend class, type provided is "{}"'.format(type(backend))
)
self.backend = backend
if self.backend.is_loaded and self._init_obs is None:
raise EnvError(
"Impossible to use the same backend twice. Please create your environment with a "
"new backend instance (new object)."
)
need_process_backend = False
if not self.backend.is_loaded:
# usual case: the backend is not loaded
# NB it is loaded when the backend comes from an observation for
# example
if self._read_from_local_dir:
# test to support pickle conveniently
self.backend._PATH_ENV = self.get_path_env()
# all the above should be done in this exact order, otherwise some weird behaviour might occur
# this is due to the class attribute
self.backend.set_env_name(self.name)
self.backend.load_grid(
self._init_grid_path
) # the real powergrid of the environment
try:
self.backend.load_redispacthing_data(self.get_path_env())
except BackendError as exc_:
self.backend.redispatching_unit_commitment_availble = False
warnings.warn(f"Impossible to load redispatching data. This is not an error but you will not be able "
f"to use all grid2op functionalities. "
f"The error was: \"{exc_}\"")
self.backend.load_storage_data(self.get_path_env())
exc_ = self.backend.load_grid_layout(self.get_path_env())
if exc_ is not None:
warnings.warn(
f"No layout have been found for you grid (or the layout provided was corrupted). You will "
f'not be able to use the renderer, plot the grid etc. The error was "{exc_}"'
)
self.backend.is_loaded = True
# alarm set up
self.load_alarm_data()
self.load_alert_data()
# to force the initialization of the backend to the proper type
self.backend.assert_grid_correct()
need_process_backend = True
self._handle_compat_glop_version(need_process_backend)
self._has_been_initialized() # really important to include this piece of code! and just here after the
# backend has loaded everything
self._line_status = np.ones(shape=self.n_line, dtype=dt_bool)
self._disc_lines = np.zeros(shape=self.n_line, dtype=dt_int) - 1
if self._thermal_limit_a is None:
self._thermal_limit_a = self.backend.thermal_limit_a.astype(dt_float)
else:
self.backend.set_thermal_limit(self._thermal_limit_a.astype(dt_float))
*_, tmp = self.backend.generators_info()
# rules of the game
self._check_rules_correct(legalActClass)
self._game_rules = RulesChecker(legalActClass=legalActClass)
self._game_rules.initialize(self)
self._legalActClass = legalActClass
# action helper
if not isinstance(actionClass, type):
raise Grid2OpException(
'Parameter "actionClass" used to build the Environment should be a type (a class) '
"and not an object (an instance of a class). "
'It is currently "{}"'.format(type(legalActClass))
)
if not issubclass(actionClass, BaseAction):
raise Grid2OpException(
'Parameter "actionClass" used to build the Environment should derived form the '
'grid2op.BaseAction class, type provided is "{}"'.format(
type(actionClass)
)
)
if not isinstance(observationClass, type):
raise Grid2OpException(
f'Parameter "observationClass" used to build the Environment should be a type (a class) '
f"and not an object (an instance of a class). "
f'It is currently : {observationClass} (type "{type(observationClass)}")'
)
if not issubclass(observationClass, BaseObservation):
raise Grid2OpException(
f'Parameter "observationClass" used to build the Environment should derived form the '
f'grid2op.BaseObservation class, type provided is "{type(observationClass)}"'
)
# action affecting the grid that will be made by the agent
bk_type = type(
self.backend
) # be careful here: you need to initialize from the class, and not from the object
self._rewardClass = rewardClass
self._actionClass = actionClass.init_grid(gridobj=bk_type)
self._actionClass._add_shunt_data()
self._actionClass._update_value_set()
self._observationClass = observationClass.init_grid(gridobj=bk_type)
self._complete_action_cls = CompleteAction.init_grid(gridobj=bk_type)
self._helper_action_class = ActionSpace.init_grid(gridobj=bk_type)
self._action_space = self._helper_action_class(
gridobj=bk_type,
actionClass=actionClass,
legal_action=self._game_rules.legal_action,
)
# action that affect the grid made by the environment.
self._helper_action_env = self._helper_action_class(
gridobj=bk_type,
actionClass=CompleteAction,
legal_action=self._game_rules.legal_action,
)
# handles input data
if not isinstance(chronics_handler, ChronicsHandler):
raise Grid2OpException(
'Parameter "chronics_handler" used to build the Environment should derived form the '
'grid2op.ChronicsHandler class, type provided is "{}"'.format(
type(chronics_handler)
)
)
if names_chronics_to_backend is None and type(self.backend).IS_BK_CONVERTER:
names_chronics_to_backend = self.backend.names_target_to_source
self.chronics_handler = chronics_handler
self.chronics_handler.initialize(
self.name_load,
self.name_gen,
self.name_line,
self.name_sub,
names_chronics_to_backend=names_chronics_to_backend,
)
self._names_chronics_to_backend = names_chronics_to_backend
self.delta_time_seconds = dt_float(self.chronics_handler.time_interval.seconds)
# this needs to be done after the chronics handler: rewards might need information
# about the chronics to work properly.
self._helper_observation_class = ObservationSpace.init_grid(gridobj=bk_type)
# FYI: this try to copy the backend if it fails it will modify the backend
# and the environment to force the deactivation of the
# forecasts
self._observation_space = self._helper_observation_class(
gridobj=bk_type,
observationClass=observationClass,
actionClass=actionClass,
rewardClass=rewardClass,
env=self,
kwargs_observation=self._kwargs_observation,
observation_bk_class=self._observation_bk_class,
observation_bk_kwargs=self._observation_bk_kwargs
)
# test to make sure the backend is consistent with the chronics generator
self.chronics_handler.check_validity(self.backend)
self._reset_storage() # this should be called after the self.delta_time_seconds is set
# reward function
self._reward_helper = RewardHelper(self._rewardClass, logger=self.logger)
self._reward_helper.initialize(self)
for k, v in self.other_rewards.items():
v.initialize(self)
# controller for voltage
if not issubclass(self._voltagecontrolerClass, BaseVoltageController):
raise Grid2OpException(
'Parameter "voltagecontrolClass" should derive from "ControlVoltageFromFile".'
)
self._voltage_controler = self._voltagecontrolerClass(
gridobj=bk_type,
controler_backend=self.backend,
actionSpace_cls=self._helper_action_class,
)
# create the opponent
# At least the 3 following attributes should be set before calling _create_opponent
self._create_opponent()
# create the attention budget
self._create_attention_budget()
# init the alert relate attributes
self._init_alert_data()
# performs one step to load the environment properly (first action need to be taken at first time step after
# first injections given)
self._reset_maintenance()
self._reset_redispatching()
self._reward_to_obs = {}
do_nothing = self._helper_action_env({})
*_, fail_to_start, info = self.step(do_nothing)
if fail_to_start:
raise Grid2OpException(
"Impossible to initialize the powergrid, the powerflow diverge at iteration 0. "
"Available information are: {}".format(info)
)
# test the backend returns object of the proper size
if need_process_backend:
self.backend.assert_grid_correct_after_powerflow()
# for gym compatibility
self.reward_range = self._reward_helper.range()
self._viewer = None
self.viewer_fig = None
self.metadata = {"render.modes": ["rgb_array"]}
self.spec = None
self.current_reward = self.reward_range[0]
self.done = False
# reset everything to be consistent
self._reset_vectors_and_timings()
def max_episode_duration(self):
"""
Return the maximum duration (in number of steps) of the current episode.
Notes
-----
For possibly infinite episode, the duration is returned as `np.iinfo(np.int32).max` which corresponds
to the maximum 32 bit integer (usually `2147483647`)
"""
tmp = dt_int(self.chronics_handler.max_episode_duration())
if tmp < 0:
tmp = dt_int(np.iinfo(dt_int).max)
return tmp
def set_max_iter(self, max_iter):
"""
Parameters
----------
max_iter: ``int``
The maximum number of iteration you can do before reaching the end of the episode. Set it to "-1" for
possibly infinite episode duration.
Notes
-------
Maximum length of the episode can depend on the chronics used. See :attr:`Environment.chronics_handler` for
more information
"""
self.chronics_handler.set_max_iter(max_iter)
@property
def _helper_observation(self):
return self._observation_space
@property
def _helper_action_player(self):
return self._action_space
def _handle_compat_glop_version(self, need_process_backend):
if (
self._compat_glop_version is not None
and self._compat_glop_version != grid2op.__version__
):
warnings.warn(
'You are using a grid2op "compatibility" environment. This means that some '
"feature will not be available. This feature is absolutely NOT recommended except to "
"read back data (for example with EpisodeData) that were stored with previous "
"grid2op version."
)
if need_process_backend:
self.backend.set_env_name(f"{self.name}_{self._compat_glop_version}")
cls_bk = type(self.backend)
cls_bk.glop_version = self._compat_glop_version
if cls_bk.glop_version == cls_bk.BEFORE_COMPAT_VERSION:
# oldest version: no storage and no curtailment available
# deactivate storage
# recompute the topology vector (more or less everything need to be adjusted...
stor_locs = [pos for pos in cls_bk.storage_pos_topo_vect]
for stor_loc in sorted(stor_locs, reverse=True):
for vect in [
cls_bk.load_pos_topo_vect,
cls_bk.gen_pos_topo_vect,
cls_bk.line_or_pos_topo_vect,
cls_bk.line_ex_pos_topo_vect,
]:
vect[vect >= stor_loc] -= 1
# deals with the "sub_pos" vector
for sub_id in range(cls_bk.n_sub):
if np.any(cls_bk.storage_to_subid == sub_id):
stor_ids = np.where(cls_bk.storage_to_subid == sub_id)[0]
stor_locs = cls_bk.storage_to_sub_pos[stor_ids]
for stor_loc in sorted(stor_locs, reverse=True):
for vect, sub_id_me in zip(
[
cls_bk.load_to_sub_pos,
cls_bk.gen_to_sub_pos,
cls_bk.line_or_to_sub_pos,
cls_bk.line_ex_to_sub_pos,
],
[
cls_bk.load_to_subid,
cls_bk.gen_to_subid,
cls_bk.line_or_to_subid,
cls_bk.line_ex_to_subid,
],
):
vect[(vect >= stor_loc) & (sub_id_me == sub_id)] -= 1
# remove storage from the number of element in the substation
for sub_id in range(cls_bk.n_sub):
cls_bk.sub_info[sub_id] -= np.sum(cls_bk.storage_to_subid == sub_id)
# remove storage from the total number of element
cls_bk.dim_topo -= cls_bk.n_storage
# recompute this private member
cls_bk._topo_vect_to_sub = np.repeat(
np.arange(cls_bk.n_sub), repeats=cls_bk.sub_info
)
self.backend._topo_vect_to_sub = np.repeat(
np.arange(cls_bk.n_sub), repeats=cls_bk.sub_info
)
new_grid_objects_types = cls_bk.grid_objects_types
new_grid_objects_types = new_grid_objects_types[
new_grid_objects_types[:, cls_bk.STORAGE_COL] == -1, :
]
cls_bk.grid_objects_types = 1 * new_grid_objects_types
self.backend.grid_objects_types = 1 * new_grid_objects_types
# erase all trace of storage units
cls_bk.set_no_storage()
Environment.deactivate_storage(self.backend)
if need_process_backend:
# the following line must be called BEFORE "self.backend.assert_grid_correct()" !
self.backend.storage_deact_for_backward_comaptibility()
# and recomputes everything while making sure everything is consistent
self.backend.assert_grid_correct()
type(self.backend)._topo_vect_to_sub = np.repeat(
np.arange(cls_bk.n_sub), repeats=cls_bk.sub_info
)
type(self.backend).grid_objects_types = new_grid_objects_types
def _voltage_control(self, agent_action, prod_v_chronics):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Update the environment action "action_env" given a possibly new voltage setpoint for the generators. This
function can be overide for a more complex handling of the voltages.
It must update (if needed) the voltages of the environment action :attr:`BaseEnv.env_modification`
Parameters
----------
agent_action: :class:`grid2op.Action.Action`
The action performed by the player (or do nothing is player action were not legal or ambiguous)
prod_v_chronics: ``numpy.ndarray`` or ``None``
The voltages that has been specified in the chronics
"""
volt_control_act = self._voltage_controler.fix_voltage(
self.current_obs, agent_action, self._env_modification, prod_v_chronics
)
return volt_control_act
def set_chunk_size(self, new_chunk_size):
"""
For an efficient data pipeline, it can be usefull to not read all part of the input data
(for example for load_p, prod_p, load_q, prod_v). Grid2Op support the reading of large chronics by "chunk"
of given size.
Reading data in chunk can also reduce the memory footprint, useful in case of multiprocessing environment while
large chronics.
It is critical to set a small chunk_size in case of training machine learning algorithm (reinforcement
learning agent) at the beginning when the agent performs poorly, the software might spend most of its time
loading the data.
**NB** this has no effect if the chronics does not support this feature.
**NB** The environment need to be **reset** for this to take effect (it won't affect the chronics already
loaded)
Parameters
----------
new_chunk_size: ``int`` or ``None``
The new chunk size (positive integer)
Examples
---------
Here is an example on how to use this function
.. code-block:: python
import grid2op
# I create an environment
env = grid2op.make("rte_case5_example", test=True)
env.set_chunk_size(100)
env.reset() # otherwise chunk size has no effect !
# and now data will be read from the hard drive 100 time steps per 100 time steps
# instead of the whole episode at once.
"""
if new_chunk_size is None:
self.chronics_handler.set_chunk_size(new_chunk_size)
return
try:
new_chunk_size = int(new_chunk_size)
except Exception as exc_:
raise Grid2OpException(
"Impossible to set the chunk size. It should be convertible a integer, and not"
'{}. The error was: \n"{}"'.format(new_chunk_size, exc_)
)
if new_chunk_size <= 0:
raise Grid2OpException(
'Impossible to read less than 1 data at a time. Please make sure "new_chunk_size"'
"is a positive integer."
)
self.chronics_handler.set_chunk_size(new_chunk_size)
def simulate(self, action):
"""
Another method to call `obs.simulate` to ensure compatibility between multi environment and
regular one.
Parameters
----------
action:
A grid2op action
Returns
-------
Same return type as :func:`grid2op.Environment.BaseEnv.step` or
:func:`grid2op.Observation.BaseObservation.simulate`
Notes
-----
Prefer using `obs.simulate` if possible, it will be faster than this function.
"""
return self.get_obs().simulate(action)
def set_id(self, id_):
"""
Set the id that will be used at the next call to :func:`Environment.reset`.
**NB** this has no effect if the chronics does not support this feature.
**NB** The environment need to be **reset** for this to take effect.
Parameters
----------
id_: ``int``
the id of the chronics used.
Examples
--------
Here an example that will loop 10 times through the same chronics (always using the same injection then):
.. code-block:: python
import grid2op
from grid2op import make
from grid2op.BaseAgent import DoNothingAgent
env = make("rte_case14_realistic") # create an environment
agent = DoNothingAgent(env.action_space) # create an BaseAgent
for i in range(10):
env.set_id(0) # tell the environment you simply want to use the chronics with ID 0
obs = env.reset() # it is necessary to perform a reset
reward = env.reward_range[0]
done = False
while not done:
act = agent.act(obs, reward, done)
obs, reward, done, info = env.step(act)
And here you have an example on how you can loop through the scenarios in a given order:
.. code-block:: python
import grid2op
from grid2op import make
from grid2op.BaseAgent import DoNothingAgent
env = make("rte_case14_realistic") # create an environment
agent = DoNothingAgent(env.action_space) # create an BaseAgent
scenario_order = [1,2,3,4,5,10,8,6,5,7,78, 8]
for id_ in scenario_order:
env.set_id(id_) # tell the environment you simply want to use the chronics with ID 0
obs = env.reset() # it is necessary to perform a reset
reward = env.reward_range[0]
done = False
while not done:
act = agent.act(obs, reward, done)
obs, reward, done, info = env.step(act)
"""
if isinstance(id_, str):
# new in grid2op 1.6.4
self.chronics_handler.tell_id(id_, previous=True)
return
try:
id_ = int(id_)
except Exception as exc_:
raise EnvError(
'the "id_" parameters should be convertible to integer and not be of type {}'
'with error \n"{}"'.format(type(id_), exc_)
)
self.chronics_handler.tell_id(id_ - 1)
def attach_renderer(self, graph_layout=None):
"""
This function will attach a renderer, necessary to use for plotting capabilities.
Parameters
----------
graph_layout: ``dict``
Here for backward compatibility. Currently not used.
If you want to set a specific layout call :func:`BaseEnv.attach_layout`
If ``None`` this class will use the default substations layout provided when the environment was created.
Otherwise it will use the data provided.
Examples
---------
Here is how to use the function
.. code-block:: python
import grid2op
# create the environment
env = grid2op.make()
if False:
# if you want to change the default layout of the powergrid
# assign coordinates (0., 0.) to all substations (this is a dummy thing to do here!)
layout = {sub_name: (0., 0.) for sub_name in env.name_sub}
env.attach_layout(layout)
# NB again, this code will make everything look super ugly !!!! Don't change the
# default layout unless you have a reason to.
# and if you want to use the renderer
env.attach_renderer()
# and now you can "render" (plot) the state of the grid
obs = env.reset()
done = False
reward = env.reward_range[0]
while not done:
env.render()
action = agent.act(obs, reward, done)
obs, reward, done, info = env.step(action)
"""
# Viewer already exists: skip
if self._viewer is not None:
return
# Do we have the dependency
try:
from grid2op.PlotGrid import PlotMatplot
except ImportError:
err_msg = (
"Cannot attach renderer: missing dependency\n"
"Please install matplotlib or run pip install grid2op[optional]"
)
raise Grid2OpException(err_msg) from None
self._viewer = PlotMatplot(self._observation_space)
self.viewer_fig = None
# Set renderer modes
self.metadata = {"render.modes": ["silent", "rgb_array"]} # "human",
def __str__(self):
return "<{} instance named {}>".format(type(self).__name__, self.name)
# TODO be closer to original gym implementation
def reset_grid(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is automatically called when using `env.reset`
Reset the backend to a clean state by reloading the powergrid from the hard drive.
This might takes some time.
If the thermal has been modified, it also modify them into the new backend.
"""
self.backend.reset(
self._init_grid_path
) # the real powergrid of the environment
self.backend.assert_grid_correct()
if self._thermal_limit_a is not None:
self.backend.set_thermal_limit(self._thermal_limit_a.astype(dt_float))
self._backend_action = self._backend_action_class()
self.nb_time_step = -1 # to have init obs at step 1
do_nothing = self._helper_action_env({})
*_, fail_to_start, info = self.step(do_nothing)
if fail_to_start:
raise Grid2OpException(
"Impossible to initialize the powergrid, the powerflow diverge at iteration 0. "
"Available information are: {}".format(info)
)
# assign the right
self._observation_space.set_real_env_kwargs(self)
def add_text_logger(self, logger=None):
"""
Add a text logger to this :class:`Environment`
Logging is for now an incomplete feature, really incomplete (not used)
Parameters
----------
logger:
The logger to use
"""
self.logger = logger
return self
def reset(self) -> BaseObservation:
"""
Reset the environment to a clean state.
It will reload the next chronics if any. And reset the grid to a clean state.
This triggers a full reloading of both the chronics (if they are stored as files) and of the powergrid,
to ensure the episode is fully over.
This method should be called only at the end of an episode.
Examples
--------
The standard "gym loop" can be done with the following code:
.. code-block:: python
import grid2op
# create the environment
env = grid2op.make()
# and now you can "render" (plot) the state of the grid
obs = env.reset()
done = False
reward = env.reward_range[0]
while not done:
action = agent.act(obs, reward, done)
obs, reward, done, info = env.step(action)
"""
super().reset()
self.chronics_handler.next_chronics()
self.chronics_handler.initialize(
self.backend.name_load,
self.backend.name_gen,
self.backend.name_line,
self.backend.name_sub,
names_chronics_to_backend=self._names_chronics_to_backend,
)
self._env_modification = None
self._reset_maintenance()
self._reset_redispatching()
self._reset_vectors_and_timings() # it need to be done BEFORE to prevent cascading failure when there has been
self.reset_grid()
if self.viewer_fig is not None:
del self.viewer_fig
self.viewer_fig = None
# if True, then it will not disconnect lines above their thermal limits
self._reset_vectors_and_timings() # and it needs to be done AFTER to have proper timings at tbe beginning
# the attention budget is reset above
# reset the opponent
self._oppSpace.reset()
# reset, if need, reward and other rewards
self._reward_helper.reset(self)
for extra_reward in self.other_rewards.values():
extra_reward.reset(self)
# and reset also the "simulated env" in the observation space
self._observation_space.reset(self)
self._observation_space.set_real_env_kwargs(self)
self._last_obs = None # force the first observation to be generated properly
if self._init_obs is not None:
self._reset_to_orig_state(self._init_obs)
return self.get_obs()
def render(self, mode="rgb_array"):
"""
Render the state of the environment on the screen, using matplotlib
Also returns the Matplotlib figure
Examples
--------
Rendering need first to define a "renderer" which can be done with the following code:
.. code-block:: python
import grid2op
# create the environment
env = grid2op.make()
# if you want to use the renderer
env.attach_renderer()
# and now you can "render" (plot) the state of the grid
obs = env.reset()
done = False
reward = env.reward_range[0]
while not done:
env.render() # this piece of code plot the grid
action = agent.act(obs, reward, done)
obs, reward, done, info = env.step(action)
"""
# Try to create a plotter instance
# Does nothing if viewer exists
# Raises if matplot is not installed
self.attach_renderer()
# Check mode is correct
if mode not in self.metadata["render.modes"]:
err_msg = 'Renderer mode "{}" not supported. Available modes are {}.'
raise Grid2OpException(err_msg.format(mode, self.metadata["render.modes"]))
# Render the current observation
fig = self._viewer.plot_obs(
self.current_obs, figure=self.viewer_fig, redraw=True
)
# First time show for human mode
if self.viewer_fig is None and mode == "human":
fig.show()
else: # Update the figure content
fig.canvas.draw()
# Store to re-use the figure
self.viewer_fig = fig
# Return the rgb array
rgb_array = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8).reshape(self._viewer.height, self._viewer.width, 3)
return rgb_array
def _custom_deepcopy_for_copy(self, new_obj):
super()._custom_deepcopy_for_copy(new_obj)
new_obj.name = self.name
new_obj._read_from_local_dir = self._read_from_local_dir
new_obj.metadata = copy.deepcopy(self.metadata)
new_obj.spec = copy.deepcopy(self.spec)
new_obj._raw_backend_class = self._raw_backend_class
new_obj._compat_glop_version = self._compat_glop_version
new_obj._actionClass_orig = self._actionClass_orig
new_obj._observationClass_orig = self._observationClass_orig
def copy(self) -> "Environment":
"""
Performs a deep copy of the environment
Unless you have a reason to, it is not advised to make copy of an Environment.
Examples
--------
It should be used as follow:
.. code-block:: python
import grid2op
env = grid2op.make()
cpy_of_env = env.copy()
"""
# res = copy.deepcopy(self) # painfully slow...
# create an empty "me"
my_cls = type(self)
res = my_cls.__new__(my_cls)
# fill its attribute
self._custom_deepcopy_for_copy(res)
return res
def get_kwargs(self, with_backend=True, with_chronics_handler=True):
"""
This function allows to make another Environment with the same parameters as the one that have been used
to make this one.
This is useful especially in cases where Environment is not pickable (for example if some non pickable c++
code are used) but you still want to make parallel processing using "MultiProcessing" module. In that case,
you can send this dictionary to each child process, and have each child process make a copy of ``self``
**NB** This function should not be used to make a copy of an environment. Prefer using :func:`Environment.copy`
for such purpose.
Returns
-------
res: ``dict``
A dictionary that helps build an environment like ``self`` (which is NOT a copy of self) but rather
an instance of an environment with the same properties.
Examples
--------
It should be used as follow:
.. code-block:: python
import grid2op
from grid2op.Environment import Environment
env = grid2op.make() # create the environment of your choice
copy_of_env = Environment(**env.get_kwargs())
# And you can use this one as you would any other environment.
# NB this is not a "proper" copy. for example it will not be at the same step, it will be possible
# seeded with a different seed.
# use `env.copy()` to make a proper copy of an environment.
"""
res = {}
res["init_env_path"] = self._init_env_path
res["init_grid_path"] = self._init_grid_path
if with_chronics_handler:
res["chronics_handler"] = copy.deepcopy(self.chronics_handler)
if with_backend:
if not self.backend._can_be_copied:
raise RuntimeError("Impossible to get the kwargs for this "
"environment, the backend cannot be copied.")
res["backend"] = self.backend.copy()
res["backend"]._is_loaded = False # i can reload a copy of an environment
res["parameters"] = copy.deepcopy(self._parameters)
res["names_chronics_to_backend"] = copy.deepcopy(
self._names_chronics_to_backend
)
res["actionClass"] = self._actionClass_orig
res["observationClass"] = self._observationClass_orig
res["rewardClass"] = self._rewardClass
res["legalActClass"] = self._legalActClass
res["epsilon_poly"] = self._epsilon_poly
res["tol_poly"] = self._tol_poly
res["thermal_limit_a"] = self._thermal_limit_a
res["voltagecontrolerClass"] = self._voltagecontrolerClass
res["other_rewards"] = {k: v.rewardClass for k, v in self.other_rewards.items()}
res["name"] = self.name
res["_raw_backend_class"] = self._raw_backend_class
res["with_forecast"] = self.with_forecast
res["opponent_space_type"] = self._opponent_space_type
res["opponent_action_class"] = self._opponent_action_class
res["opponent_class"] = self._opponent_class
res["opponent_init_budget"] = self._opponent_init_budget
res["opponent_budget_per_ts"] = self._opponent_budget_per_ts
res["opponent_budget_class"] = self._opponent_budget_class
res["opponent_attack_duration"] = self._opponent_attack_duration
res["opponent_attack_cooldown"] = self._opponent_attack_cooldown
res["kwargs_opponent"] = self._kwargs_opponent
res["attention_budget_cls"] = self._attention_budget_cls
res["kwargs_attention_budget"] = copy.deepcopy(self._kwargs_attention_budget)
res["has_attention_budget"] = self._has_attention_budget
res["_read_from_local_dir"] = self._read_from_local_dir
res["kwargs_observation"] = copy.deepcopy(self._kwargs_observation)
res["logger"] = self.logger
res["observation_bk_class"] = self._observation_bk_class
res["observation_bk_kwargs"] = self._observation_bk_kwargs
return res
def _chronics_folder_name(self):
return "chronics"
def train_val_split(
self,
val_scen_id,
add_for_train="train",
add_for_val="val",
add_for_test=None,
test_scen_id=None,
remove_from_name=None,
deep_copy=False,
):
"""
This function is used as :func:`Environment.train_val_split_random`.
Please refer to this the help of :func:`Environment.train_val_split_random` for more information about
this function.
Parameters
----------
val_scen_id: ``list``
List of the scenario names that will be placed in the validation set
test_scen_id: ``list``
.. versionadded:: 2.6.5
List of the scenario names that will be placed in the test set (only used
if add_for_test is not None - and mandatory in this case)
add_for_train: ``str``
See :func:`Environment.train_val_split_random` for more information
add_for_val: ``str``
See :func:`Environment.train_val_split_random` for more information
add_for_test: ``str``
.. versionadded:: 2.6.5
See :func:`Environment.train_val_split_random` for more information
remove_from_name: ``str``
See :func:`Environment.train_val_split_random` for more information
deep_copy: ``bool``
.. versionadded:: 2.6.5
See :func:`Environment.train_val_split_random` for more information
Returns
-------
nm_train: ``str``
See :func:`Environment.train_val_split_random` for more information
nm_val: ``str``
See :func:`Environment.train_val_split_random` for more information
nm_test: ``str``, optionnal
.. versionadded:: 2.6.5
See :func:`Environment.train_val_split_random` for more information
Examples
--------
A full example on a training / validation / test split with explicit specification of which
chronics goes in which scenarios is:
.. code-block:: python
import grid2op
import os
env_name = "l2rpn_case14_sandbox" # or any other...
env = grid2op.make(env_name)
# retrieve the names of the chronics:
full_path_data = env.chronics_handler.subpaths
chron_names = [os.path.split(el)[-1] for el in full_path_data]
# splitting into training / test, keeping the "last" 10 chronics to the test set
nm_env_train, m_env_val, nm_env_test = env.train_val_split(test_scen_id=chron_names[-10:], # last 10 in test set
add_for_test="test",
val_scen_id=chron_names[-20:-10], # last 20 to last 10 in val test
)
env_train = grid2op.make(env_name+"_train")
env_val = grid2op.make(env_name+"_val")
env_test = grid2op.make(env_name+"_test")
For a more simple example, with less parametrization and with random assignment (recommended),
please refer to the help of :func:`Environment.train_val_split_random`
**NB** read the "Notes" of this section for possible "unexpected" behaviour of the code snippet above.
On Some windows based platform, if you don't have an admin account nor a
"developer" account (see https://docs.python.org/3/library/os.html#os.symlink)
you might need to do:
.. code-block:: python
import grid2op
import os
env_name = "l2rpn_case14_sandbox" # or any other...
env = grid2op.make(env_name)
# retrieve the names of the chronics:
full_path_data = env.chronics_handler.subpaths
chron_names = [os.path.split(el)[-1] for el in full_path_data]
# splitting into training / test, keeping the "last" 10 chronics to the test set
nm_env_train, m_env_val, nm_env_test = env.train_val_split(test_scen_id=chron_names[-10:], # last 10 in test set
add_for_test="test",
val_scen_id=chron_names[-20:-10], # last 20 to last 10 in val test
deep_copy=True)
.. warning::
The above code will use much more memory on your hard drive than the version using symbolic links.
It will also be significantly slower !
As an "historical curiosity", this is what you needed to do in grid2op version < 1.6.5:
.. code-block:: python
import grid2op
import os
env_name = "l2rpn_case14_sandbox" # or any other...
env = grid2op.make(env_name)
# retrieve the names of the chronics:
full_path_data = env.chronics_handler.subpaths
chron_names = [os.path.split(el)[-1] for el in full_path_data]
# splitting into training / test, keeping the "last" 10 chronics to the test set
nm_env_trainval, nm_env_test = env.train_val_split(val_scen_id=chron_names[-10:],
add_for_val="test",
add_for_train="trainval")
# now splitting again the training set into training and validation, keeping the last 10 chronics
# of this environment for validation
env_trainval = grid2op.make(nm_env_trainval) # create the "trainval" environment
full_path_data = env_trainval.chronics_handler.subpaths
chron_names = [os.path.split(el)[-1] for el in full_path_data]
nm_env_train, nm_env_val = env_trainval.train_val_split(val_scen_id=chron_names[-10:],
remove_from_name="_trainval$")
# and now you can use the following code to load the environments:
env_train = grid2op.make(env_name+"_train")
env_val = grid2op.make(env_name+"_val")
env_test = grid2op.make(env_name+"_test")
Notes
------
We don't recommend you to use this function. It provides a great level of control on which
scenarios goes into which dataset, which is nice, but
"*with great power comes great responsibilities*".
Keep in mind that scenarios might be "sorted" by having some "month" in their names.
For example, the first k scenarios might be called "April_XXX"
and the last k ones having names with "September_XXX".
In general, we would not consider good practice to have all validation (or test) scenarios coming
from the same months. Keep that in mind if you use the code snippet above.
"""
# define all the locations
if re.match(self.REGEX_SPLIT, add_for_train) is None:
raise EnvError(
f"The suffixes you can use for training data (add_for_train) "
f'should match the regex "{self.REGEX_SPLIT}"'
)
if re.match(self.REGEX_SPLIT, add_for_val) is None:
raise EnvError(
f"The suffixes you can use for validation data (add_for_val)"
f'should match the regex "{self.REGEX_SPLIT}"'
)
if add_for_test is not None:
if re.match(self.REGEX_SPLIT, add_for_test) is None:
raise EnvError(
f"The suffixes you can use for test data (add_for_test)"
f'should match the regex "{self.REGEX_SPLIT}"'
)
if add_for_test is None and test_scen_id is not None:
raise EnvError(f"add_for_test is None and test_scen_id is not None.")
if add_for_test is not None and test_scen_id is None:
raise EnvError(f"add_for_test is not None and test_scen_id is None.")
from grid2op.Chronics import MultifolderWithCache, Multifolder
if not isinstance(
self.chronics_handler.real_data, (MultifolderWithCache, Multifolder)
):
raise EnvError(
"It does not make sense to split a environment between training / validation "
"if the chronics are not read from directories."
)
my_path = self.get_path_env()
path_train = os.path.split(my_path)
my_name = path_train[1]
if remove_from_name is not None:
if re.match(r"^[a-zA-Z0-9\\^\\$_]*$", remove_from_name) is None:
raise EnvError(
"The suffixes you can remove from the name of the environment (remove_from_name)"
'should match the regex "^[a-zA-Z0-9^$_]*$"'
)
my_name = re.sub(remove_from_name, "", my_name)
nm_train = f"{my_name}_{add_for_train}"
path_train = os.path.join(path_train[0], nm_train)
path_val = os.path.split(my_path)
nm_val = f"{my_name}_{add_for_val}"
path_val = os.path.join(path_val[0], nm_val)
nm_test = None
path_test = None
if add_for_test is not None:
path_test = os.path.split(my_path)
nm_test = f"{my_name}_{add_for_test}"
path_test = os.path.join(path_test[0], nm_test)
chronics_dir = self._chronics_folder_name()
# create the folder
if os.path.exists(path_val):
raise RuntimeError(
f"Impossible to create the validation environment that should have the name "
f'"{nm_val}" because an environment is already named this way. If you want to '
f'continue either delete the folder "{path_val}" or name your validation environment '
f"differently "
f'using the "add_for_val" keyword argument of this function.'
)
if os.path.exists(path_train):
raise RuntimeError(
f"Impossible to create the training environment that should have the name "
f'"{nm_train}" because an environment is already named this way. If you want to '
f'continue either delete the folder "{path_train}" or name your training environment '
f" differently "
f'using the "add_for_train" keyword argument of this function.'
)
if nm_test is not None and os.path.exists(path_test):
raise RuntimeError(
f"Impossible to create the test environment that should have the name "
f'"{nm_test}" because an environment is already named this way. If you want to '
f'continue either delete the folder "{path_test}" or name your test environment '
f" differently "
f'using the "add_for_test" keyword argument of this function.'
)
os.mkdir(path_val)
os.mkdir(path_train)
if nm_test is not None:
os.mkdir(path_test)
# assign which chronics goes where
chronics_path = os.path.join(my_path, chronics_dir)
all_chron = sorted(os.listdir(chronics_path))
to_val = set(val_scen_id)
to_test = set() # see https://github.com/rte-france/Grid2Op/issues/363
if nm_test is not None:
to_test = set(test_scen_id)
if deep_copy:
import shutil
copy_file_fun = shutil.copy2
copy_dir_fun = shutil.copytree
else:
copy_file_fun = os.symlink
copy_dir_fun = os.symlink
# "copy" the files
for el in os.listdir(my_path):
tmp_path = os.path.join(my_path, el)
if os.path.isfile(tmp_path):
# this is a regular env file
copy_file_fun(tmp_path, os.path.join(path_train, el))
copy_file_fun(tmp_path, os.path.join(path_val, el))
if nm_test is not None:
copy_file_fun(tmp_path, os.path.join(path_test, el))
elif os.path.isdir(tmp_path):
if el == chronics_dir:
# this is the chronics folder
os.mkdir(os.path.join(path_train, chronics_dir))
os.mkdir(os.path.join(path_val, chronics_dir))
if nm_test is not None:
os.mkdir(os.path.join(path_test, chronics_dir))
for chron_name in all_chron:
tmp_path_chron = os.path.join(tmp_path, chron_name)
if chron_name in to_val:
copy_dir_fun(
tmp_path_chron,
os.path.join(path_val, chronics_dir, chron_name),
)
elif chron_name in to_test:
copy_dir_fun(
tmp_path_chron,
os.path.join(path_test, chronics_dir, chron_name),
)
else:
copy_dir_fun(
tmp_path_chron,
os.path.join(path_train, chronics_dir, chron_name),
)
if add_for_test is None:
res = nm_train, nm_val
else:
res = nm_train, nm_val, nm_test
return res
def train_val_split_random(
self,
pct_val=10.0,
add_for_train="train",
add_for_val="val",
add_for_test=None,
pct_test=None,
remove_from_name=None,
deep_copy=False,
):
"""
By default a grid2op environment contains multiple "scenarios" containing values for all the producers
and consumers representing multiple days. In a "game like" environment, you can think of the scenarios as
being different "game levels": different mazes in pacman, different levels in mario etc.
We recommend to train your agent on some of these "chroncis" (aka levels) and test the performance of your
agent on some others, to avoid overfitting.
This function allows to easily split an environment into different part. This is most commonly used in machine
learning where part of a dataset is used for training and another part is used for assessing the performance
of the trained model.
This function rely on "symbolic link" and will not duplicate data.
New created environments will behave like regular grid2op environment and will be accessible with "make" just
like any others (see the examples section for more information).
This function will make the split at random. If you want more control on the which scenarios to use for
training and which for validation, use the :func:`Environment.train_val_split` that allows to specify
which scenarios goes in the validation environment (and the others go in the training environment).
Parameters
----------
pct_val: ``float``
Percentage of chronics that will go to the validation set.
For 10% of the chronics, set it to 10. and NOT to 0.1.
add_for_train: ``str``
Suffix that will be added to the name of the environment for the training set. We don't recommend to
modify the default value ("train")
add_for_val: ``str``
Suffix that will be added to the name of the environment for the validation set. We don't recommend to
modify the default value ("val")
add_for_test: ``str``, (optional)
.. versionadded:: 2.6.5
Suffix that will be added to the name of the environment for the test set. By default,
it only splits into training and validation, so this is ignored. We recommend
to assign it to "test" if you want to split into training / validation and test.
If it is set, then the `pct_test` must also be set.
pct_test: ``float``, (optional)
.. versionadded:: 2.6.5
Percentage of chronics that will go to the test set.
For 10% of the chronics, set it to 10. and NOT to 0.1.
(If you set it, you need to set the `add_for_test` argument.)
remove_from_name: ``str``
If you "split" an environment multiple times, this allows you to keep "short" names (for example
you will be able to call `grid2op.make(env_name+"_train")` instead of
`grid2op.make(env_name+"_train_train")`)
deep_copy: ``bool``
.. versionadded:: 2.6.5
A function to specify to "copy" the elements of the original
environment to the created one. By default it will save as
much memory as possible using symbolic links (rather than performing
copies). By default it does use symbolic links (`deep_copy=False`).
.. note::
If set to ``True`` the new environment will take much more space
on the hard drive, and the execution of this function will
be much slower !
.. warning::
On windows based system, you will most likely run into issues
if you don't set this parameters.
Indeed, Windows does not link symbolink links
(https://docs.python.org/3/library/os.html#os.symlink).
In this case, you can use the ``deep_copy=True`` and
it will work fine (examples in the function
:func:`Environment.train_val_split`)
Returns
-------
nm_train: ``str``
Complete name of the "training" environment
nm_val: ``str``
Complete name of the "validation" environment
nm_test: ``str``, optionnal
.. versionadded:: 2.6.5
Complete name of the "test" environment. It is only returned if
`add_for_test` and `pct_test` are not `None`.
Examples
--------
This function can be used like:
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other...
env = grid2op.make(env_name)
# extract 1% of the "chronics" to be used in the validation environment. The other 99% will
# be used for test
nm_env_train, nm_env_val = env.train_val_split_random(pct_val=1.)
# and now you can use the training set only to train your agent:
print(f"The name of the training environment is \\"{nm_env_train}\\"")
print(f"The name of the validation environment is \\"{nm_env_val}\\"")
env_train = grid2op.make(nm_env_train)
And even after you close the python session, you can still use this environment for training. If you used
the exact code above that will look like:
.. code-block:: python
import grid2op
env_name_train = "l2rpn_case14_sandbox_train" # depending on the option you passed above
env_train = grid2op.make(env_name_train)
.. versionadded:: 2.6.5
Possibility to create a training, validation AND test set.
If you have grid2op version >= 1.6.5, you can also use the following:
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other...
env = grid2op.make(env_name)
# extract 1% of the "chronics" to be used in the validation environment. The other 99% will
# be used for test
nm_env_train, nm_env_val, nm_env_test = env.train_val_split_random(pct_val=1., pct_test=1.)
# and now you can use the training set only to train your agent:
print(f"The name of the training environment is \\"{nm_env_train}\\"")
print(f"The name of the validation environment is \\"{nm_env_val}\\"")
print(f"The name of the test environment is \\"{nm_env_test}\\"")
env_train = grid2op.make(nm_env_train)
.. warning::
In this case this function returns 3 elements and not 2 !
Notes
-----
This function will fail if an environment already exists with one of the name that would be given
to the training environment or the validation environment (or test environment).
"""
if re.match(self.REGEX_SPLIT, add_for_train) is None:
raise EnvError(
"The suffixes you can use for training data (add_for_train) "
'should match the regex "{self.REGEX_SPLIT}"'
)
if re.match(self.REGEX_SPLIT, add_for_val) is None:
raise EnvError(
"The suffixes you can use for validation data (add_for_val)"
'should match the regex "{self.REGEX_SPLIT}"'
)
if add_for_test is None and pct_test is not None:
raise EnvError(f"add_for_test is None and pct_test is not None.")
if add_for_test is not None and pct_test is None:
raise EnvError(f"add_for_test is not None and pct_test is None.")
my_path = self.get_path_env()
chronics_path = os.path.join(my_path, self._chronics_folder_name())
all_chron = sorted(os.listdir(chronics_path))
all_chron = [
el for el in all_chron if os.path.isdir(os.path.join(chronics_path, el))
]
nb_init = len(all_chron)
to_val = self.space_prng.choice(
all_chron, size=int(nb_init * pct_val * 0.01), replace=False
)
test_scen_id = None
if pct_test is not None:
all_chron = set(all_chron) - set(to_val)
all_chron = list(all_chron)
test_scen_id = self.space_prng.choice(
all_chron, size=int(nb_init * pct_test * 0.01), replace=False
)
return self.train_val_split(
to_val,
add_for_train=add_for_train,
add_for_val=add_for_val,
remove_from_name=remove_from_name,
add_for_test=add_for_test,
test_scen_id=test_scen_id,
deep_copy=deep_copy,
)
def get_params_for_runner(self):
"""
This method is used to initialize a proper :class:`grid2op.Runner.Runner` to use this specific environment.
Examples
--------
It should be used as followed:
.. code-block:: python
import grid2op
from grid2op.Runner import Runner
from grid2op.Agent import DoNothingAgent # for example
env = grid2op.make() # create the environment of your choice
# create the proper runner
runner = Runner(**env.get_params_for_runner(), agentClass=DoNothingAgent)
# now you can run
runner.run(nb_episode=1) # run for 1 episode
"""
res = {}
res["init_env_path"] = self._init_env_path
res["init_grid_path"] = self._init_grid_path
res["path_chron"] = self.chronics_handler.path
res["parameters_path"] = self._parameters.to_dict()
res["names_chronics_to_backend"] = self._names_chronics_to_backend
res["actionClass"] = self._actionClass_orig
res["observationClass"] = self._observationClass_orig
res["rewardClass"] = copy.deepcopy(self._rewardClass)
res["legalActClass"] = self._legalActClass
res["envClass"] = Environment # TODO !
res["gridStateclass"] = self.chronics_handler.chronicsClass
res["backendClass"] = self._raw_backend_class
if hasattr(self.backend, "_my_kwargs"):
res["backend_kwargs"] = self.backend._my_kwargs
else:
msg_ = ("You are probably using a legacy backend class that cannot "
"be copied properly. Please upgrade your backend to the latest version.")
self.logger.warn(msg_)
warnings.warn(msg_)
res["backend_kwargs"] = None
res["verbose"] = False
dict_ = copy.deepcopy(self.chronics_handler.kwargs)
if "path" in dict_:
# path is handled elsewhere
del dict_["path"]
if self.chronics_handler.max_iter is not None:
res["max_iter"] = self.chronics_handler.max_iter
res["gridStateclass_kwargs"] = dict_
res["thermal_limit_a"] = self._thermal_limit_a
res["voltageControlerClass"] = self._voltagecontrolerClass
res["other_rewards"] = {k: v.rewardClass for k, v in self.other_rewards.items()}
res["grid_layout"] = self.grid_layout
res["name_env"] = self.name
res["opponent_space_type"] = self._opponent_space_type
res["opponent_action_class"] = self._opponent_action_class
res["opponent_class"] = self._opponent_class
res["opponent_init_budget"] = self._opponent_init_budget
res["opponent_budget_per_ts"] = self._opponent_budget_per_ts
res["opponent_budget_class"] = self._opponent_budget_class
res["opponent_attack_duration"] = self._opponent_attack_duration
res["opponent_attack_cooldown"] = self._opponent_attack_cooldown
res["opponent_kwargs"] = self._kwargs_opponent
res["attention_budget_cls"] = self._attention_budget_cls
res["kwargs_attention_budget"] = copy.deepcopy(self._kwargs_attention_budget)
res["has_attention_budget"] = self._has_attention_budget
res["_read_from_local_dir"] = self._read_from_local_dir
res["logger"] = self.logger
res["kwargs_observation"] = copy.deepcopy(self._kwargs_observation)
res["observation_bk_class"] = self._observation_bk_class
res["observation_bk_kwargs"] = self._observation_bk_kwargs
res["_is_test"] = self._is_test # TODO not implemented !!
return res
@classmethod
def init_obj_from_kwargs(cls,
other_env_kwargs,
init_env_path,
init_grid_path,
chronics_handler,
backend,
parameters,
name,
names_chronics_to_backend,
actionClass,
observationClass,
rewardClass,
legalActClass,
voltagecontrolerClass,
other_rewards,
opponent_space_type,
opponent_action_class,
opponent_class,
opponent_init_budget,
opponent_budget_per_ts,
opponent_budget_class,
opponent_attack_duration,
opponent_attack_cooldown,
kwargs_opponent,
with_forecast,
attention_budget_cls,
kwargs_attention_budget,
has_attention_budget,
logger,
kwargs_observation,
observation_bk_class,
observation_bk_kwargs,
_raw_backend_class,
_read_from_local_dir):
res = Environment(init_env_path=init_env_path,
init_grid_path=init_grid_path,
chronics_handler=chronics_handler,
backend=backend,
parameters=parameters,
name=name,
names_chronics_to_backend=names_chronics_to_backend,
actionClass=actionClass,
observationClass=observationClass,
rewardClass=rewardClass,
legalActClass=legalActClass,
voltagecontrolerClass=voltagecontrolerClass,
other_rewards=other_rewards,
opponent_space_type=opponent_space_type,
opponent_action_class=opponent_action_class,
opponent_class=opponent_class,
opponent_init_budget=opponent_init_budget,
opponent_budget_per_ts=opponent_budget_per_ts,
opponent_budget_class=opponent_budget_class,
opponent_attack_duration=opponent_attack_duration,
opponent_attack_cooldown=opponent_attack_cooldown,
kwargs_opponent=kwargs_opponent,
with_forecast=with_forecast,
attention_budget_cls=attention_budget_cls,
kwargs_attention_budget=kwargs_attention_budget,
has_attention_budget=has_attention_budget,
logger=logger,
kwargs_observation=kwargs_observation,
observation_bk_class=observation_bk_class,
observation_bk_kwargs=observation_bk_kwargs,
_raw_backend_class=_raw_backend_class,
_read_from_local_dir=_read_from_local_dir)
return res
def generate_data(self, nb_year=1, nb_core=1, seed=None, **kwargs):
"""This function uses the chronix2grid package to generate more data that will then
be available locally. You need to install it independently (see https://github.com/BDonnot/ChroniX2Grid#installation
for more information)
I also requires the lightsim2grid simulator.
This is only available for some environment (only the environment used for wcci 2022 competition at
time of writing).
Generating data takes some time (around 1 - 2 minutes to generate a weekly scenario) and this why we recommend
to do it "offline" and then use the generated data for training or evaluation.
.. warning::
You should not start this function twice. Before starting a new run, make sure the previous one has terminated (otherwise you might
erase some previously generated scenario)
Examples
---------
The recommended process when you want to use this function is to first generate some more data:
.. code-block:: python
import grid2op
env = grid2op.make("l2rpn_wcci_2022")
env.generate_data(nb_year=XXX) # replace XXX by the amount of data you want. If you put 1 you will have 52 different
# scenarios
Then, later on, you can use it as you please, transparently:
.. code-block:: python
import grid2op
env = grid2op.make("l2rpn_wcci_2022")
obs = env.reset() # obs might come from the data you have generated
Parameters
----------
nb_year : int, optional
the number of "year" you want to generate. Each "year" is made of 52 weeks meaning that if you
ask to generate one year, you have 52 more scenarios, by default 1
nb_core : int, optional
number of computer cores to use, by default 1.
seed: int, optional
If the same seed is given, then the same data will be generated.
**kwargs:
key word arguments passed to `add_data` function of `chronix2grid.grid2op_utils` module
"""
try:
from chronix2grid.grid2op_utils import add_data
except ImportError as exc_:
raise ImportError(
f"Chronix2grid package is not installed. Install it with `pip install grid2op[chronix2grid]`"
f"Please visit https://github.com/bdonnot/chronix2grid#installation "
f"for further install instructions."
) from exc_
pot_file = None
if self.get_path_env() is not None:
pot_file = os.path.join(self.get_path_env(), "chronix2grid_adddata_kwargs.json")
if os.path.exists(pot_file) and os.path.isfile(pot_file):
import json
with open(pot_file, "r", encoding="utf-8") as f:
kwargs_default = json.load(f)
for el in kwargs_default:
if not el in kwargs:
kwargs[el] = kwargs_default[el]
# TODO logger here for the kwargs used (including seed=seed, nb_scenario=nb_year, nb_core=nb_core)
add_data(
env=self, seed=seed, nb_scenario=nb_year, nb_core=nb_core,
**kwargs
)
| 77,149 | 40.322978 | 143 | py |
Grid2Op | Grid2Op-master/grid2op/Environment/MultiEnvMultiProcess.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from multiprocessing import Process, Pipe
import numpy as np
from grid2op.dtypes import dt_int
from grid2op.Exceptions import Grid2OpException, MultiEnvException
from grid2op.Space import GridObjects
from grid2op.Environment.BaseMultiProcessEnv import BaseMultiProcessEnvironment
from grid2op.Action import BaseAction
class MultiEnvMultiProcess(BaseMultiProcessEnvironment):
"""
This class allows to evaluate a single agent instance on multiple environments running in parrallel.
It is a kind of :class:`BaseMultiProcessEnvironment`. For more information you can consult the
documentation of this parent class. This class allows to interact at the same time with different copy of
possibly different environments in parallel
Attributes
-----------
envs: `list:grid2op.Environment.Environment`
Al list of environments for which the evaluation will be made in parallel.
nb_envs: ``list:int``
Number of parallel underlying environment that will be handled.
MUST be the same length as the parameter `envs`.
The total number of subprocesses will be the sum of this list.
Examples
--------
This class can be used as:
.. code-block:: python
import grid2op
from grid2op.Environment import MultiEnvMultiProcess
env0 = grid2op.make() # create an environment
env1 = grid2op.make() # create a second environment, that can be similar, or not
# it is recommended to filter or create the environment with different parameters, otherwise this class
# is of little interest
envs = [env0, env1] # list of all environments created
nb_envs = [1, 7] # number of "copies" of each environment that will be made.
# in this case the first one will be copied only once, and the second one 7 times.
# the total number of environments used in the multi env will be the sum(nb_envs), here 8.
multi_env = MultiEnvMultiProcess(envs=envs, nb_envs=nb_envs)
# and now you can use it like any other grid2op environment (almost)
observations = multi_env.reset()
"""
def __init__(self, envs, nb_envs, obs_as_class=True, return_info=True, logger=None):
try:
nb_envs = np.array(nb_envs)
nb_envs = nb_envs.astype(dt_int)
except Exception as exc_:
raise MultiEnvException(
'"nb_envs" argument should be a list of integers. We could not '
'convert it to such with error "{}"'.format(exc_)
)
if np.any(nb_envs < 0):
raise MultiEnvException(
'You ask to perform "{}" copy of an environment. This is a negative '
'integer. I cannot do that. Please make sure "nb_envs" argument '
"is all made of strictly positive integers and not {}."
"".format(np.min(nb_envs), nb_envs)
)
if np.any(nb_envs == 0):
raise MultiEnvException(
"You ask to perform 0 copy of an environment. This is not supported at "
'the moment. Please make sure "nb_envs" argument '
"is all made of strictly positive integers and not {}."
"".format(nb_envs)
)
all_envs = []
for e, n in enumerate(nb_envs):
all_envs += [envs[e] for _ in range(n)]
super().__init__(
all_envs,
obs_as_class=obs_as_class,
return_info=return_info,
logger=logger.getChild("MultiEnvMultiProcess")
if logger is not None
else None,
)
if __name__ == "__main__":
from tqdm import tqdm
from grid2op import make
from grid2op.Agent import DoNothingAgent
nb_env = [2, 2, 1, 1, 2] # change that to adapt to your system
NB_STEP = 100 # number of step for each environment
env = make()
env.seed(42)
envs = [env, env, env, env, env]
agent = DoNothingAgent(env.action_space)
multi_envs = MultiEnvMultiProcess(envs, nb_env)
obs = multi_envs.reset()
rews = [env.reward_range[0] for i in range(multi_envs.nb_env)]
dones = [False for i in range(multi_envs.nb_env)]
total_reward = 0.0
for i in tqdm(range(NB_STEP)):
acts = [None for _ in range(multi_envs.nb_env)]
for env_act_id in range(multi_envs.nb_env):
acts[env_act_id] = agent.act(
obs[env_act_id], rews[env_act_id], dones[env_act_id]
)
obs, rews, dones, infos = multi_envs.step(acts)
total_reward += np.sum(rews)
len(rews)
multi_envs.close()
ob = env.reset()
rew = env.reward_range[0]
done = False
total_reward_single = 0
for i in tqdm(range(NB_STEP)):
act = agent.act(ob, rew, done)
ob, rew, done, info = env.step(act)
if done:
ob = env.reset()
total_reward_single += np.sum(rew)
env.close()
print("total_reward mluti_env: {}".format(total_reward))
print("total_reward single env: {}".format(total_reward_single))
| 5,561 | 37.895105 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Environment/MultiMixEnv.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import os
import warnings
import numpy as np
import copy
from grid2op.dtypes import dt_int, dt_float
from grid2op.Space import GridObjects, RandomObject
from grid2op.Exceptions import EnvError, Grid2OpException
class MultiMixEnvironment(GridObjects, RandomObject):
"""
This class represent a single powergrid configuration,
backed by multiple environments parameters and chronics
It implements most of the :class:`BaseEnv` public interface:
so it can be used as a more classic environment.
MultiMixEnvironment environments behave like a superset of the environment: they
are made of sub environments (called mixes) that are grid2op regular :class:`Environment`.
You might think the MultiMixEnvironment as a dictionary of :class:`Environment` that implements
some of the :class:`BaseEnv` interface such as :func:`BaseEnv.step` or :func:`BaseEnv.reset`.
By default, each time you call the "step" function a different mix is used. Mixes, by default
are looped through always in the same order. You can see the Examples section for information
about control of these
Examples
--------
In this section we present some common use of the MultiMix environment.
**Basic Usage**
You can think of a MultiMixEnvironment as any :class:`Environment`. So this is a perfectly
valid way to use a MultiMix:
.. code-block:: python
import grid2op
from grid2op.Agent import RandomAgent
# we use an example of a multimix dataset attached with grid2op pacakage
multimix_env = grid2op.make("l2rpn_neurips_2020_track2", test=True)
# define an agent like in any environment
agent = RandomAgent(multimix_env.action_space)
# and now you can do the open ai gym loop
NB_EPISODE = 10
for i in range(NB_EPISODE):
obs = multimix_env.reset()
# each time "reset" is called, another mix is used.
reward = multimix_env.reward_range[0]
done = False
while not done:
act = agent.act(obs, reward, done)
obs, reward, done, info = multimix_env.step(act)
**Use each mix one after the other**
In case you want to study each mix independently, you can iterate through the MultiMix
in a pythonic way. This makes it easy to perform, for example, 10 episode for a given mix
before passing to the next one.
.. code-block:: python
import grid2op
from grid2op.Agent import RandomAgent
# we use an example of a multimix dataset attached with grid2op pacakage
multimix_env = grid2op.make("l2rpn_neurips_2020_track2", test=True)
NB_EPISODE = 10
for mix in multimix_env:
# mix is a regular environment, you can do whatever you want with it
# for example
for i in range(NB_EPISODE):
obs = multimix_env.reset()
# each time "reset" is called, another mix is used.
reward = multimix_env.reward_range[0]
done = False
while not done:
act = agent.act(obs, reward, done)
obs, reward, done, info = multimix_env.step(act)
**Selecting a given Mix**
Sometimes it might be interesting to study only a given mix.
For that you can use the `[]` operator to select only a given mix (which is a grid2op environment)
and use it as you would.
This can be done with:
.. code-block:: python
import grid2op
from grid2op.Agent import RandomAgent
# we use an example of a multimix dataset attached with grid2op pacakage
multimix_env = grid2op.make("l2rpn_neurips_2020_track2", test=True)
# define an agent like in any environment
agent = RandomAgent(multimix_env.action_space)
# list all available mixes:
mixes_names = list(multimix_env.keys())
# and now supposes we want to study only the first one
mix = multimix_env[mixes_names[0]]
# and now you can do the open ai gym loop, or anything you want with it
NB_EPISODE = 10
for i in range(NB_EPISODE):
obs = mix.reset()
# each time "reset" is called, another mix is used.
reward = mix.reward_range[0]
done = False
while not done:
act = agent.act(obs, reward, done)
obs, reward, done, info = mix.step(act)
**Using the Runner**
For MultiMixEnvironment using the :class:`grid2op.Runner.Runner` cannot be done in a
straightforward manner. Here we give an example on how to do it.
.. code-block:: python
import os
import grid2op
from grid2op.Agent import RandomAgent
# we use an example of a multimix dataset attached with grid2op pacakage
multimix_env = grid2op.make("l2rpn_neurips_2020_track2", test=True)
# you can use the runner as following
PATH = "PATH/WHERE/YOU/WANT/TO/SAVE/THE/RESULTS"
for mix in multimix_env:
runner = Runner(**mix.get_params_for_runner(), agentClass=RandomAgent)
runner.run(nb_episode=1,
path_save=os.path.join(PATH,mix.name))
"""
def __init__(
self,
envs_dir,
logger=None,
experimental_read_from_local_dir=False,
_add_to_name="", # internal, for test only, do not use !
_compat_glop_version=None, # internal, for test only, do not use !
_test=False,
**kwargs,
):
GridObjects.__init__(self)
RandomObject.__init__(self)
self.current_env = None
self.env_index = None
self.mix_envs = []
self._env_dir = os.path.abspath(envs_dir)
self.__closed = False
# Special case handling for backend
# TODO: with backend.copy() instead !
backendClass = None
backend_kwargs = {}
if "backend" in kwargs:
backendClass = type(kwargs["backend"])
if hasattr(kwargs["backend"], "_my_kwargs"):
# was introduced in grid2op 1.7.1
backend_kwargs = kwargs["backend"]._my_kwargs
del kwargs["backend"]
# Inline import to prevent cyclical import
from grid2op.MakeEnv.Make import make
# TODO reuse same observation_space and action_space in all the envs maybe ?
try:
for env_dir in sorted(os.listdir(envs_dir)):
env_path = os.path.join(envs_dir, env_dir)
if not os.path.isdir(env_path):
continue
this_logger = (
logger.getChild(f"MultiMixEnvironment_{env_dir}")
if logger is not None
else None
)
# Special case for backend
if backendClass is not None:
try:
# should pass with grid2op >= 1.7.1
bk = backendClass(**backend_kwargs)
except TypeError as exc_:
# with grid2Op version prior to 1.7.1
# you might have trouble with
# "TypeError: __init__() got an unexpected keyword argument 'can_be_copied'"
msg_ = ("Impossible to create a backend for each mix using the "
"backend key-word arguments. Falling back to creating "
"with no argument at all (default behaviour with grid2op <= 1.7.0).")
warnings.warn(msg_)
bk = backendClass()
env = make(
env_path,
backend=bk,
_add_to_name=_add_to_name,
_compat_glop_version=_compat_glop_version,
test=_test,
logger=this_logger,
experimental_read_from_local_dir=experimental_read_from_local_dir,
**kwargs,
)
else:
env = make(
env_path,
_add_to_name=_add_to_name,
_compat_glop_version=_compat_glop_version,
test=_test,
logger=this_logger,
experimental_read_from_local_dir=experimental_read_from_local_dir,
**kwargs,
)
self.mix_envs.append(env)
except Exception as exc_:
err_msg = "MultiMix environment creation failed: {}".format(exc_)
raise EnvError(err_msg)
if len(self.mix_envs) == 0:
err_msg = "MultiMix envs_dir did not contain any valid env"
raise EnvError(err_msg)
self.env_index = 0
self.current_env = self.mix_envs[self.env_index]
# Make sure GridObject class attributes are set from first env
# Should be fine since the grid is the same for all envs
multi_env_name = os.path.basename(os.path.abspath(envs_dir)) + _add_to_name
save_env_name = self.current_env.env_name
self.current_env.env_name = multi_env_name
self.__class__ = self.init_grid(self.current_env)
self.current_env.env_name = save_env_name
def get_path_env(self):
"""
Get the path that allows to create this environment.
It can be used for example in `grid2op.utils.underlying_statistics` to save the information directly inside
the environment data.
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
return self._env_dir
@property
def current_index(self):
return self.env_index
def __len__(self):
return len(self.mix_envs)
def __iter__(self):
"""
Operator __iter__ overload to make a ``MultiMixEnvironment`` iterable
.. code-block:: python
import grid2op
from grid2op.Environment import MultiMixEnvironment
from grid2op.Runner import Runner
mm_env = MultiMixEnvironment("/path/to/multi/dataset/folder")
for env in mm_env:
run_p = env.get_params_for_runner()
runner = Runner(**run_p)
runner.run(nb_episode=1, max_iter=-1)
"""
self.env_index = 0
return self
def __next__(self):
if self.env_index < len(self.mix_envs):
r = self.mix_envs[self.env_index]
self.env_index = self.env_index + 1
return r
else:
self.env_index = 0
raise StopIteration
def __getattr__(self, name):
# TODO what if name is an integer ? make it possible to loop with integer here
return getattr(self.current_env, name)
def keys(self):
for mix in self.mix_envs:
yield mix.name
def values(self):
for mix in self.mix_envs:
yield mix
def items(self):
for mix in self.mix_envs:
yield mix.name, mix
def copy(self):
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
mix_envs = self.mix_envs
self.mix_envs = None
current_env = self.current_env
self.current_env = None
cls = self.__class__
res = cls.__new__(cls)
for k in self.__dict__:
if k == "mix_envs" or k == "current_env":
# this is handled elsewhere
continue
setattr(res, k, copy.deepcopy(getattr(self, k)))
res.mix_envs = [mix.copy() for mix in mix_envs]
res.current_env = res.mix_envs[res.env_index]
self.mix_envs = mix_envs
self.current_env = current_env
return res
def __getitem__(self, key):
"""
Operator [] overload for accessing underlying mixes by name
.. code-block:: python
import grid2op
from grid2op.Environment import MultiMixEnvironment
mm_env = MultiMixEnvironment("/path/to/multi/dataset/folder")
mix1_env.name = mm_env["mix_1"]
assert mix1_env == "mix_1"
mix2_env.name = mm_env["mix_2"]
assert mix2_env == "mix_2"
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
# Search for key
for mix in self.mix_envs:
if mix.name == key:
return mix
# Not found by name
raise KeyError
def reset(self, random=False):
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
if random:
self.env_index = self.space_prng.randint(len(self.mix_envs))
else:
self.env_index = (self.env_index + 1) % len(self.mix_envs)
self.current_env = self.mix_envs[self.env_index]
self.current_env.reset()
return self.get_obs()
def seed(self, seed=None):
"""
Set the seed of this :class:`Environment` for a better control
and to ease reproducible experiments.
Parameters
----------
seed: ``int``
The seed to set.
Returns
---------
seeds: ``list``
The seed used to set the prng (pseudo random number generator)
for all environments, and each environment ``tuple`` seeds
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
try:
seed = np.array(seed).astype(dt_int)
except Exception as e:
raise Grid2OpException(
"Cannot to seed with the seed provided."
"Make sure it can be converted to a"
"numpy 32 bits integer."
)
s = super().seed(seed)
seeds = [s]
max_dt_int = np.iinfo(dt_int).max
for env in self.mix_envs:
env_seed = self.space_prng.randint(max_dt_int)
env_seeds = env.seed(env_seed)
seeds.append(env_seeds)
return seeds
def set_chunk_size(self, new_chunk_size):
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
for mix in self.mix_envs:
mix.set_chunk_size(new_chunk_size)
def set_id(self, id_):
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
for mix in self.mix_envs:
mix.set_id(id_)
def deactivate_forecast(self):
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
for mix in self.mix_envs:
mix.deactivate_forecast()
def reactivate_forecast(self):
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
for mix in self.mix_envs:
mix.reactivate_forecast()
def set_thermal_limit(self, thermal_limit):
"""
Set the thermal limit effectively.
Will propagate to all underlying mixes
"""
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
for mix in self.mix_envs:
mix.set_thermal_limit(thermal_limit)
def __enter__(self):
"""
Support *with-statement* for the environment.
"""
return self
def __exit__(self, *args):
"""
Support *with-statement* for the environment.
"""
self.close()
# propagate exception
return False
def close(self):
if self.__closed:
return
for mix in self.mix_envs:
mix.close()
self.__closed = True
def attach_layout(self, grid_layout):
if self.__closed:
raise EnvError("This environment is closed, you cannot use it.")
for mix in self.mix_envs:
mix.attach_layout(grid_layout)
def __del__(self):
"""when the environment is garbage collected, free all the memory, including cross reference to itself in the observation space."""
if not self.__closed:
self.close()
def generate_classes(self):
# TODO this is not really a good idea, as the multi-mix itself is not read from the
# files !
for mix in self.mix_envs:
mix.generate_classes()
| 17,113 | 34.286598 | 139 | py |
Grid2Op | Grid2Op-master/grid2op/Environment/SingleEnvMultiProcess.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.Environment.BaseMultiProcessEnv import BaseMultiProcessEnvironment
class SingleEnvMultiProcess(BaseMultiProcessEnvironment):
"""
This class allows to evaluate a single agent instance on multiple environments running in parallel.
It is a kind of :class:`BaseMultiProcessEnvironment`. For more information you can consult the
documentation of this parent class. It allows to interact at the same time with different copy of the
(same) environment in parallel
Attributes
-----------
env: `list::grid2op.Environment.Environment`
Al list of environments for which the evaluation will be made in parallel.
nb_env: ``int``
Number of parallel underlying environment that will be handled. It is also the size of the list of actions
that need to be provided in :func:`MultiEnvironment.step` and the return sizes of the list of this
same function.
Examples
--------
An example on how you can best leverage this class is given in the getting_started notebooks. Another simple
example is:
.. code-block:: python
from grid2op.BaseAgent import DoNothingAgent
from grid2op.MakeEnv import make
from grid2op.Environment import SingleEnvMultiProcess
# create a simple environment
env = make()
# number of parrallel environment
nb_env = 2 # change that to adapt to your system
NB_STEP = 100 # number of step for each environment
# create a simple agent
agent = DoNothingAgent(env.action_space)
# create the multi environment class
multi_envs = SingleEnvMultiProcess(env=env, nb_env=nb_env)
# making is usable
obs = multi_envs.reset()
rews = [env.reward_range[0] for i in range(nb_env)]
dones = [False for i in range(nb_env)]
# performs the appropriated steps
for i in range(NB_STEP):
acts = [None for _ in range(nb_env)]
for env_act_id in range(nb_env):
acts[env_act_id] = agent.act(obs[env_act_id], rews[env_act_id], dones[env_act_id])
obs, rews, dones, infos = multi_envs.step(acts)
# DO SOMETHING WITH THE AGENT IF YOU WANT
# close the environments
multi_envs.close()
# close the initial environment
env.close()
"""
def __init__(self, env, nb_env, obs_as_class=True, return_info=True, logger=None):
envs = [env for _ in range(nb_env)]
super().__init__(
envs,
obs_as_class=obs_as_class,
return_info=return_info,
logger=logger.getChild("SingleEnvMultiProcess")
if logger is not None
else None,
)
if __name__ == "__main__":
from tqdm import tqdm
from grid2op import make
from grid2op.Agent import DoNothingAgent
nb_env = 8 # change that to adapt to your system
NB_STEP = 100 # number of step for each environment
env = make()
env.seed(42)
agent = DoNothingAgent(env.action_space)
multi_envs = SingleEnvMultiProcess(env, nb_env)
obs = multi_envs.reset()
rews = [env.reward_range[0] for i in range(nb_env)]
dones = [False for i in range(nb_env)]
total_reward = 0.0
for i in tqdm(range(NB_STEP)):
acts = [None for _ in range(nb_env)]
for env_act_id in range(nb_env):
acts[env_act_id] = agent.act(
obs[env_act_id], rews[env_act_id], dones[env_act_id]
)
obs, rews, dones, infos = multi_envs.step(acts)
total_reward += np.sum(rews)
len(rews)
multi_envs.close()
ob = env.reset()
rew = env.reward_range[0]
done = False
total_reward_single = 0
for i in tqdm(range(NB_STEP)):
act = agent.act(ob, rew, done)
ob, rew, done, info = env.step(act)
if done:
ob = env.reset()
total_reward_single += np.sum(rew)
env.close()
print("total_reward mluti_env: {}".format(total_reward))
print("total_reward single env: {}".format(total_reward_single))
| 4,583 | 33.466165 | 114 | py |
Grid2Op | Grid2Op-master/grid2op/Environment/_ObsEnv.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
import numpy as np
import warnings
from grid2op.Exceptions.EnvExceptions import EnvError
from grid2op.dtypes import dt_int, dt_float, dt_bool
from grid2op.Environment.BaseEnv import BaseEnv
from grid2op.Chronics import ChangeNothing
from grid2op.Rules import RulesChecker
from grid2op.operator_attention import LinearAttentionBudget
class _ObsCH(ChangeNothing):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This class is reserved to internal use. Do not attempt to do anything with it.
"""
def forecasts(self):
return []
class _ObsEnv(BaseEnv):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This class is an 'Emulator' of a :class:`grid2op.Environment.Environment` used to be able to 'simulate'
forecasted grid states.
It should not be used outside of an :class:`grid2op.Observation.BaseObservation` instance, or one of its derivative.
It contains only the most basic element of an Environment. See :class:`grid2op.Environment.Environment` for more
details.
This class is reserved for internal use. Do not attempt to do anything with it.
"""
def __init__(
self,
init_env_path,
init_grid_path,
backend_instanciated,
parameters,
reward_helper,
obsClass, # not initialized :-/
action_helper,
thermal_limit_a,
legalActClass,
helper_action_class,
helper_action_env,
epsilon_poly,
tol_poly,
max_episode_duration,
delta_time_seconds,
other_rewards={},
has_attention_budget=False,
attention_budget_cls=LinearAttentionBudget,
kwargs_attention_budget={},
logger=None,
highres_sim_counter=None,
_complete_action_cls=None,
_ptr_orig_obs_space=None,
):
BaseEnv.__init__(
self,
init_env_path,
init_grid_path,
copy.deepcopy(parameters),
thermal_limit_a,
other_rewards=other_rewards,
epsilon_poly=epsilon_poly,
tol_poly=tol_poly,
has_attention_budget=has_attention_budget,
attention_budget_cls=attention_budget_cls,
kwargs_attention_budget=kwargs_attention_budget,
kwargs_observation=None,
logger=logger,
highres_sim_counter=highres_sim_counter,
update_obs_after_reward=False,
)
self.__unusable = False # unsuable if backend cannot be copied
self._reward_helper = reward_helper
self._helper_action_class = helper_action_class
# initialize the observation space
self._obsClass = None
# line status (inherited from BaseEnv)
self._line_status = np.full(self.n_line, dtype=dt_bool, fill_value=True)
# line status (for this usage)
self._line_status_me = np.ones(
shape=self.n_line, dtype=dt_int
) # this is "line status" but encode in +1 / -1
if self._thermal_limit_a is None:
self._thermal_limit_a = 1.0 * thermal_limit_a.astype(dt_float)
else:
self._thermal_limit_a[:] = thermal_limit_a
self._init_backend(
chronics_handler=_ObsCH(),
backend=backend_instanciated,
names_chronics_to_backend=None,
actionClass=action_helper.actionClass,
observationClass=obsClass,
rewardClass=None,
legalActClass=legalActClass,
)
self.delta_time_seconds = delta_time_seconds
####
# to be able to save and import (using env.generate_classes) correctly
self._actionClass = action_helper.subtype
self._observationClass = _complete_action_cls # not used anyway
self._complete_action_cls = _complete_action_cls
self._action_space = (
action_helper # obs env and env share the same action space
)
self._ptr_orig_obs_space = _ptr_orig_obs_space
####
self.no_overflow_disconnection = parameters.NO_OVERFLOW_DISCONNECTION
self._topo_vect = np.zeros(type(backend_instanciated).dim_topo, dtype=dt_int)
# other stuff
self.is_init = False
self._helper_action_env = helper_action_env
self.env_modification = self._helper_action_env()
self._do_nothing_act = self._helper_action_env()
if self.__unusable:
self._backend_action_set = None
else:
self._backend_action_set = self._backend_action_class()
if self.__unusable:
self._disc_lines = np.zeros(shape=0, dtype=dt_int) - 1
else:
self._disc_lines = np.zeros(shape=self.n_line, dtype=dt_int) - 1
self._max_episode_duration = max_episode_duration
def max_episode_duration(self):
return self._max_episode_duration
def _init_myclass(self):
"""this class has already all the powergrid information: it is initialized in the obs space !"""
pass
def _init_backend(
self,
chronics_handler,
backend,
names_chronics_to_backend,
actionClass,
observationClass, # base grid2op type
rewardClass,
legalActClass,
):
if backend is None:
self.__unusable = True
return
self.__unusable = False
self._env_dc = self.parameters.ENV_DC
self.chronics_handler = chronics_handler
self.backend = backend
self._has_been_initialized() # really important to include this piece of code! and just here after the
self._check_rules_correct(legalActClass)
self._game_rules = RulesChecker(legalActClass=legalActClass)
self._game_rules.initialize(self)
self._legalActClass = legalActClass
# self._action_space = self._do_nothing
self.backend.set_thermal_limit(self._thermal_limit_a)
from grid2op.Observation import ObservationSpace
from grid2op.Reward import FlatReward
ob_sp_cls = ObservationSpace.init_grid(backend)
self._observation_space = ob_sp_cls(backend,
env=self,
with_forecast=False,
rewardClass=FlatReward,
_with_obs_env=False)
# create the opponent
self._create_opponent()
# create the attention budget
self._create_attention_budget()
self._obsClass = observationClass.init_grid(type(self.backend))
self._obsClass._INIT_GRID_CLS = observationClass
self.current_obs_init = self._obsClass(obs_env=None, action_helper=None)
self.current_obs = self.current_obs_init
# init the alert relate attributes
self._init_alert_data()
# backend has loaded everything
self._hazard_duration = np.zeros(shape=self.n_line, dtype=dt_int)
def _do_nothing(self, x):
"""
this is should be only called within _Obsenv.step, and there, only return the "do nothing"
action.
This is why this function is used as the "obsenv action space"
"""
return self._do_nothing_act
def _update_actions(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Retrieve the actions to perform the update of the underlying powergrid represented by
the :class:`grid2op.Backend`in the next time step.
A call to this function will also read the next state of :attr:`chronics_handler`, so it must be called only
once per time step.
Returns
--------
res: :class:`grid2op.Action.Action`
The action representing the modification of the powergrid induced by the Backend.
"""
# TODO consider disconnecting maintenance forecasted :-)
# This "environment" doesn't modify anything
return self._do_nothing_act, None
def copy(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Implement the deep copy of this instance.
Returns
-------
res: :class:`ObsEnv`
A deep copy of this instance.
"""
if self.__unusable:
raise EnvError("Impossible to use a Observation backend with an "
"environment that cannot be copied.")
backend = self.backend
self.backend = None
_highres_sim_counter = self._highres_sim_counter
self._highres_sim_counter = None
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
res = copy.deepcopy(self)
res.backend = backend.copy()
res._highres_sim_counter = _highres_sim_counter
self.backend = backend
self._highres_sim_counter = _highres_sim_counter
return res
def _reset_to_orig_state(self, obs):
super()._reset_to_orig_state(obs)
self._line_status_me[:] = obs._env_internal_params["_line_status_env"]
def init(
self,
new_state_action,
time_stamp,
obs,
time_step=1
):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Initialize a "forecasted grid state" based on the new injections, possibly new topological modifications etc.
Parameters
----------
new_state_action: :class:`grid2op.Action`
The action that is performed on the powergrid to get the forecast at the current date. This "action" is
NOT performed by the user, it's performed internally by the BaseObservation to have a "forecasted" powergrid
with the forecasted values present in the chronics.
time_stamp: ``datetime.datetime``
The time stamp of the forecast, as a datetime.datetime object. NB this is not the time stamp at which the
forecast is produced, but the time stamp of the powergrid forecasted.
timestep_overflow: ``numpy.ndarray``
The see :attr:`grid2op.Env.timestep_overflow` for a better description of this argument.
Returns
-------
``None``
"""
if self.__unusable:
raise EnvError("Impossible to use a Observation backend with an "
"environment that cannot be copied.")
self.reset() # reset the "BaseEnv"
self._reset_to_orig_state(obs)
self._topo_vect[:] = obs.topo_vect
if time_step >= 1:
is_overflow = obs.rho > 1.
# handle the components that depends on the time
(
still_in_maintenance,
reconnected,
first_ts_maintenance,
) = self._update_vector_with_timestep(time_step, is_overflow)
if np.any(first_ts_maintenance):
set_status = np.array(self._line_status_me, dtype=dt_int)
set_status[first_ts_maintenance] = -1
topo_vect = np.array(self._topo_vect, dtype=dt_int)
topo_vect[self.line_or_pos_topo_vect[first_ts_maintenance]] = -1
topo_vect[self.line_ex_pos_topo_vect[first_ts_maintenance]] = -1
else:
set_status = self._line_status_me
topo_vect = self._topo_vect
if np.any(still_in_maintenance):
set_status[still_in_maintenance] = -1
topo_vect = np.array(self._topo_vect, dtype=dt_int)
topo_vect[self.line_or_pos_topo_vect[still_in_maintenance]] = -1
topo_vect[self.line_ex_pos_topo_vect[still_in_maintenance]] = -1
else:
set_status = self._line_status_me
topo_vect = self._topo_vect
# TODO set the shunts here
# update the action that set the grid to the real value
self._backend_action_set += self._helper_action_env(
{
"set_line_status": set_status,
"set_bus": topo_vect,
"injection": {
"prod_p": obs.gen_p,
"prod_v": obs.gen_v,
"load_p": obs.load_p,
"load_q": obs.load_q,
},
}
)
self._backend_action_set += new_state_action
# for storage unit
self._backend_action_set.storage_power.values[:] = 0.0
self._backend_action_set.all_changed()
self._backend_action = copy.deepcopy(self._backend_action_set)
# for curtailment
if self._env_modification is not None:
self._env_modification._dict_inj = {}
self.is_init = True
self.current_obs.reset()
self.time_stamp = time_stamp
def _get_new_prod_setpoint(self, action):
new_p = 1.0 * self._backend_action_set.prod_p.values
if "prod_p" in action._dict_inj:
tmp = action._dict_inj["prod_p"]
indx_ok = np.isfinite(tmp)
new_p[indx_ok] = tmp[indx_ok]
# modification of the environment always override the modification of the agents (if any)
# TODO have a flag there if this is the case.
if "prod_p" in self._env_modification._dict_inj:
# modification of the production setpoint value
tmp = self._env_modification._dict_inj["prod_p"]
indx_ok = np.isfinite(tmp)
new_p[indx_ok] = tmp[indx_ok]
return new_p
def reset(self):
if self.__unusable:
raise EnvError("Impossible to use a Observation backend with an "
"environment that cannot be copied.")
super().reset()
self.current_obs = self.current_obs_init
def simulate(self, action):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Prefer using `obs.simulate(action)`
This function is the core method of the :class:`ObsEnv`. It allows to perform a simulation of what would
give and action if it were to be implemented on the "forecasted" powergrid.
It has the same signature as :func:`grid2op.Environment.Environment.step`. One of the major difference is that
it doesn't
check whether the action is illegal or not (but an implementation could be provided for this method). The
reason for this is that there is not one single and unique way to "forecast" how the thermal limit will behave,
which lines will be available or not, which actions will be done or not between the time stamp at which
"simulate" is called, and the time stamp that is simulated.
Parameters
----------
action: :class:`grid2op.Action.Action`
The action to test
Returns
-------
observation: :class:`grid2op.Observation.Observation`
agent's observation of the current environment
reward: ``float``
amount of reward returned after previous action
done: ``bool``
whether the episode has ended, in which case further step() calls will return undefined results
info: ``dict``
contains auxiliary diagnostic information (helpful for debugging, and sometimes learning). It is a
dictionary with keys:
- "disc_lines": a numpy array (or ``None``) saying, for each powerline if it has been disconnected
due to overflow
- "is_illegal" (``bool``) whether the action given as input was illegal
- "is_ambiguous" (``bool``) whether the action given as input was ambiguous.
"""
if self.__unusable:
raise EnvError("Impossible to use a Observation backend with an "
"environment that cannot be copied.")
self._ptr_orig_obs_space.simulate_called()
maybe_exc = self._ptr_orig_obs_space.can_use_simulate()
self._highres_sim_counter.add_one()
if maybe_exc is not None:
raise maybe_exc
obs, reward, done, info = self.step(action)
return obs, reward, done, info
def get_obs(self, _update_state=True):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Method to retrieve the "forecasted grid" as a valid observation object.
Returns
-------
res: :class:`grid2op.Observation.Observation`
The observation available.
"""
if self.__unusable:
raise EnvError("Impossible to use a Observation backend with an "
"environment that cannot be copied.")
if _update_state:
self.current_obs.update(self, with_forecast=False)
res = self.current_obs.copy()
return res
def update_grid(self, env):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Update this "emulated" environment with the real powergrid.
# TODO it should be updated from the observation only, especially if the observation is partially
# TODO observable. This would lead to data leakage here somehow.
Parameters
----------
env: :class:`grid2op.Environment.BaseEnv`
A reference to the environment
"""
if self.__unusable:
raise EnvError("Impossible to use a Observation backend with an "
"environment that cannot be copied.")
self.is_init = False
def get_current_line_status(self):
if self.__unusable:
raise EnvError("Impossible to use a Observation backend with an "
"environment that cannot be copied.")
return self._line_status == 1
def is_valid(self):
"""return whether or not the obs_env is valid, *eg* whether
we could copy the backend of the environment."""
return not self.__unusable
def close(self):
"""close this environment, once and for all"""
super().close()
# clean all the attributes
for attr_nm in [
"_obsClass",
"_line_status",
"_line_status_me",
"_max_episode_duration",
"_ptr_orig_obs_space",
]:
if hasattr(self, attr_nm):
delattr(self, attr_nm)
setattr(self, attr_nm, None)
| 19,451 | 36.335893 | 120 | py |
Grid2Op | Grid2Op-master/grid2op/Environment/__init__.py | __all__ = [
"BaseEnv",
"Environment",
"BaseMultiProcessEnvironment",
"SingleEnvMultiProcess",
"MultiEnvMultiProcess",
"MultiMixEnvironment",
"TimedOutEnvironment"
]
from grid2op.Environment.BaseEnv import BaseEnv
from grid2op.Environment.Environment import Environment
from grid2op.Environment.BaseMultiProcessEnv import BaseMultiProcessEnvironment
from grid2op.Environment.SingleEnvMultiProcess import SingleEnvMultiProcess
from grid2op.Environment.MultiEnvMultiProcess import MultiEnvMultiProcess
from grid2op.Environment.MultiMixEnv import MultiMixEnvironment
from grid2op.Environment.timedOutEnv import TimedOutEnvironment
| 656 | 35.5 | 79 | py |
Grid2Op | Grid2Op-master/grid2op/Environment/_forecast_env.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from typing import Tuple
from grid2op.Action import BaseAction
from grid2op.Observation import BaseObservation
from grid2op.Environment.Environment import Environment
class _ForecastEnv(Environment):
"""Type of environment that increments the `highres_simulator` when it calls the env.step method.
It is used by obs.get_forecast_env.
"""
def __init__(self, *args, **kwargs):
if "_update_obs_after_reward" not in kwargs:
kwargs["_update_obs_after_reward"] = False
super().__init__(*args, **kwargs)
def step(self, action: BaseAction) -> Tuple[BaseObservation, float, bool, dict]:
self._highres_sim_counter += 1
return super().step(action)
| 1,180 | 41.178571 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Environment/timedOutEnv.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import time
from math import floor
from typing import Tuple, Union, List
from grid2op.Environment.Environment import Environment
from grid2op.Action import BaseAction
from grid2op.Observation import BaseObservation
from grid2op.Exceptions import EnvError
class TimedOutEnvironment(Environment): # TODO heritage ou alors on met un truc de base
"""This class is the grid2op implementation of a "timed out environment" entity in the RL framework.
This class is very similar to the
standard environment. They only differ in the behaivour
of the `step` function.
For more information, see the documentation of
:func:`TimedOutEnvironment.step` for
Attributes
----------
name: ``str``
The name of the environment
time_out_ms: ``int``
maximum duration before performing a do_nothing action and updating to the next time_step.
action_space: :class:`grid2op.Action.ActionSpace`
Another name for :attr:`Environment.helper_action_player` for gym compatibility.
observation_space: :class:`grid2op.Observation.ObservationSpace`
Another name for :attr:`Environment.helper_observation` for gym compatibility.
reward_range: ``(float, float)``
The range of the reward function
metadata: ``dict``
For gym compatibility, do not use
spec: ``None``
For Gym compatibility, do not use
_viewer: ``object``
Used to display the powergrid. Currently properly supported.
"""
CAN_SKIP_TS = True # some steps can be more than one time steps
def __init__(self,
grid2op_env: Union[Environment, dict],
time_out_ms: int=1e3) -> None:
if time_out_ms <= 0.:
raise EnvError(f"For TimedOutEnvironment you need to provide "
f"a time_out_ms > 0 (currently {time_out_ms})")
self.time_out_ms = float(time_out_ms) # in ms
self.__last_act_send = time.perf_counter()
self.__last_act_received = self.__last_act_send
self._nb_dn_last = 0
self._is_init_dn = False
if isinstance(grid2op_env, Environment):
super().__init__(**grid2op_env.get_kwargs())
elif isinstance(grid2op_env, dict):
super().__init__(**grid2op_env)
else:
raise EnvError(f"For TimedOutEnvironment you need to provide "
f"either an Environment or a dict "
f"for grid2op_env. You provided: {type(grid2op_env)}")
self._is_init_dn = True
self._res_skipped = []
self._opp_attacks = []
def step(self, action: BaseAction) -> Tuple[BaseObservation, float, bool, dict]:
"""This function allows to pass to the
next step for the action.
Provided the action the agent wants to do, it will
perform the action on the grid and resturn the typical
"observation, reward, done, info" tuple.
Compared to :func:`BaseEnvironment.step` this function
will emulate the "time that passes" supposing that the duration
between each step should be `time_out_ms`. Indeed, in reality,
there is only 5 mins to take an action between two grid states
separated from 5 mins.
More precisely:
If your agent takes less than `time_out_ms` to chose its action
then this function behaves normally.
If your agent takes between `time_out_ms` and `2 x time_out_ms`
to provide an action then
a "do nothing" action is performed and then the provided
action is performed.
If your agent takes between `2 x time_out_ms` and `3 x time_out_ms`
to provide an action, then 2 "do nothing" actions are
performed before your action.
.. note::
It is possible that the environment "fails" before
the action of the agent is implemented on the grid.
Parameters
----------
action : `grid2op.Action.BaseAction`
The action the agent wish to perform.
Returns
-------
Tuple[BaseObservation, float, bool, dict]
_description_
"""
self.__last_act_received = time.perf_counter()
self._res_skipped = []
self._opp_attacks = []
# do the "do nothing" actions
self._nb_dn_last = 0
if self._is_init_dn:
nb_dn = floor(1000. * (self.__last_act_received - self.__last_act_send) / (self.time_out_ms))
else:
nb_dn = 0
do_nothing_action = self.action_space()
for _ in range(nb_dn):
obs, reward, done, info = super().step(do_nothing_action)
self._nb_dn_last += 1
self._opp_attacks.append(self._oppSpace.last_attack)
if done:
info["nb_do_nothing"] = nb_dn
info["nb_do_nothing_made"] = self._nb_dn_last
info["action_performed"] = False
info["last_act_received"] = self.__last_act_received
info["last_act_send"] = self.__last_act_send
return obs, reward, done, info
self._res_skipped.append((obs, reward, done, info))
# now do the action
obs, reward, done, info = super().step(action)
self._opp_attacks.append(self._oppSpace.last_attack)
info["nb_do_nothing"] = nb_dn
info["nb_do_nothing_made"] = self._nb_dn_last
info["action_performed"] = True
info["last_act_received"] = self.__last_act_received
info["last_act_send"] = self.__last_act_send
self.__last_act_send = time.perf_counter()
return obs, reward, done, info
def steps(self, action) -> Tuple[List[Tuple[BaseObservation, float, bool, dict]],
List[BaseAction]]:
tmp = self.step(action)
res = []
for el in self._res_skipped:
res.append(el)
res.append(tmp)
return res, self._opp_attacks
def get_kwargs(self, with_backend=True, with_chronics_handler=True):
res = {}
res["time_out_ms"] = self.time_out_ms
res["grid2op_env"] = super().get_kwargs(with_backend, with_chronics_handler)
return res
def get_params_for_runner(self):
res = super().get_params_for_runner()
res["envClass"] = TimedOutEnvironment
res["other_env_kwargs"] = {"time_out_ms": self.time_out_ms}
return res
@classmethod
def init_obj_from_kwargs(cls,
other_env_kwargs,
init_env_path,
init_grid_path,
chronics_handler,
backend,
parameters,
name,
names_chronics_to_backend,
actionClass,
observationClass,
rewardClass,
legalActClass,
voltagecontrolerClass,
other_rewards,
opponent_space_type,
opponent_action_class,
opponent_class,
opponent_init_budget,
opponent_budget_per_ts,
opponent_budget_class,
opponent_attack_duration,
opponent_attack_cooldown,
kwargs_opponent,
with_forecast,
attention_budget_cls,
kwargs_attention_budget,
has_attention_budget,
logger,
kwargs_observation,
observation_bk_class,
observation_bk_kwargs,
_raw_backend_class,
_read_from_local_dir):
res = TimedOutEnvironment(grid2op_env={"init_env_path": init_env_path,
"init_grid_path": init_grid_path,
"chronics_handler": chronics_handler,
"backend": backend,
"parameters": parameters,
"name": name,
"names_chronics_to_backend": names_chronics_to_backend,
"actionClass": actionClass,
"observationClass": observationClass,
"rewardClass": rewardClass,
"legalActClass": legalActClass,
"voltagecontrolerClass": voltagecontrolerClass,
"other_rewards": other_rewards,
"opponent_space_type": opponent_space_type,
"opponent_action_class": opponent_action_class,
"opponent_class": opponent_class,
"opponent_init_budget": opponent_init_budget,
"opponent_budget_per_ts": opponent_budget_per_ts,
"opponent_budget_class": opponent_budget_class,
"opponent_attack_duration": opponent_attack_duration,
"opponent_attack_cooldown": opponent_attack_cooldown,
"kwargs_opponent": kwargs_opponent,
"with_forecast": with_forecast,
"attention_budget_cls": attention_budget_cls,
"kwargs_attention_budget": kwargs_attention_budget,
"has_attention_budget": has_attention_budget,
"logger": logger,
"kwargs_observation": kwargs_observation,
"observation_bk_class": observation_bk_class,
"observation_bk_kwargs": observation_bk_kwargs,
"_raw_backend_class": _raw_backend_class,
"_read_from_local_dir": _read_from_local_dir},
**other_env_kwargs)
return res
def reset(self) -> BaseObservation:
"""Reset the environment.
Returns
-------
BaseObservation
The first observation of the new episode.
"""
self.__last_act_send = time.perf_counter()
self.__last_act_received = self.__last_act_send
self._is_init_dn = False
res = super().reset()
self.__last_act_send = time.perf_counter()
self._is_init_dn = True
return res
def _custom_deepcopy_for_copy(self, new_obj):
super()._custom_deepcopy_for_copy(new_obj)
new_obj.__last_act_send = time.perf_counter()
new_obj.__last_act_received = new_obj.__last_act_send
new_obj._is_init_dn = self._is_init_dn
new_obj.time_out_ms = self.time_out_ms
| 12,323 | 44.476015 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Episode/EpisodeData.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import json
import os
import warnings
import copy
import numpy as np
import grid2op
from grid2op.Exceptions import (
Grid2OpException,
EnvError,
IncorrectNumberOfElements,
NonFiniteElement,
)
from grid2op.Action import ActionSpace
from grid2op.Observation import ObservationSpace
# TODO refacto the "save / load" logic. For now save is in the CollectionWrapper and load in the EpisodeData
class EpisodeData:
"""
.. warning:: The attributes of this class are not up to date.
TODO be consistent with the real behaviour now.
This module provides a way to serialize on disk et deserialize one run episode along with some
methods and utilities to ease its manipulation.
If enabled when usign the :class:`Runner`, the :class:`EpisodeData`
will save the information in a structured way. For each episode there will be a folder with:
- "episode_meta.json" that represents some meta information about:
- "agent_seed": the seed used to seed the agent (if any)
- "backend_type": the name of the :class:`grid2op.Backend.Backend` class used
- "chronics_max_timestep": the **maximum** number of timestep for the chronics used
- "chronics_path": the path where the time dependant data (chronics) are located
- "cumulative_reward": the cumulative reward over all the episode
- "env_seed": the seed used to seed the environment (if any)
- "env_type": the name of the :class:`grid2op.Environment` class used.
- "grid_path": the path where the powergrid has been loaded from
- "nb_timestep_played": number of time step the agent has succesfully managed
- "episode_times.json": gives some information about the total time spend in multiple part of the runner, mainly the
:class:`grid2op.Agent.BaseAgent` (and especially its method :func:`grid2op.BaseAgent.act`) and amount of time
spent in the :class:`grid2op.Environment.Environment`
- "_parameters.json": is a representation as json of a the :class:`grid2op.Parameters.Parameters` used for this episode
- "rewards.npz" is a numpy 1d array giving the rewards at each time step. We adopted the convention that the stored
reward at index `i` is the one observed by the agent at time `i` and **NOT** the reward sent by the
:class:`grid2op.Environment` after the action has been implemented.
- "exec_times.npy" is a numpy 1d array giving the execution time of each time step of the episode
- "actions.npy" gives the actions that has been taken by the :class:`grid2op.BaseAgent.BaseAgent`. At row `i` of
"actions.npy" is a
vectorized representation of the action performed by the agent at timestep `i` *ie.* **after** having observed
the observation present at row `i` of "observation.npy" and the reward showed in row `i` of "rewards.npy".
- "disc_lines.npy" gives which lines have been disconnected during the simulation of the cascading failure at each
time step. The same convention as for "rewards.npy" has been adopted. This means that the powerlines are
disconnected when the :class:`grid2op.Agent.BaseAgent` takes the :class:`grid2op.BaseAction` at time step `i`.
- "observations.npy" is a numpy 2d array representing the :class:`grid2op.BaseObservation.BaseObservation` at the
disposal of the
:class:`grid2op.Agent.BaseAgent` when he took his action.
- "env_modifications.npy" is a 2d numpy array representing the modification of the powergrid from the environment.
these modification usually concerns the hazards, maintenance, as well as modification of the generators production
setpoint or the loads consumption.
All of the above should allow to read back, and better understand the behaviour of some
:class:`grid2op.Agent.BaseAgent`, even though such utility functions have not been coded yet.
Attributes
----------
actions: ``type``
Stores the Agent actions as a collection of :class:`grid2op.BaseAction`.
The collection is stored the utility class :class:`grid2op.Episode.CollectionWrapper`.
observations: ``type``
Stores the Observations as a collection of :class:`grid2op.BaseObservation`.
The collection is stored the utility class :class:`grid2op.Episode.CollectionWrapper`.
env_actions: ``type``
Stores the Environment actions as a collection of :class:`grid2op.BaseAction`.
The collection is stored the utility class :class:`grid2op.Episode.CollectionWrapper`.
attacks: ``type``
Stores the Opponent actions as a collection of :class:`grid2op.BaseAction`.
The collection is stored the utility class :class:`grid2op.Episode.CollectionWrapper`.
Examples
--------
Here is an example on how to save the action your agent was doing by the :class:`grid2op.Runner.Runner` of grid2op.
.. code-block:: python
import grid2op
from grid2op.Runner import Runner
# I create an environment
env = grid2op.make("rte_case5_example", test=True)
# I create the runner
runner = Runner(**env.get_params_for_runner())
# I start the runner and save the results in "/I/SAVED/RESULTS/THERE"
# I start the evaluation on 2 different episode
res = runner.run(path_save="/I/SAVED/RESULTS/THERE", nb_episode=2)
And now i can reload the data easily with the EpisodeData class:
.. code-block:: python
import grid2op
from grid2op.Episode import EpisodeData
path_agent = ... # path to a directory where a runner has been saved
# I study only the first episode saved, because... why not
li_episode = EpisodeData.list_episode(path_agent)
full_path, episode_studied = li_episode[0]
this_episode = EpisodeData.from_disk(full_path, episode_studied)
# now the episode is loaded, and you can easily iterate through the observation, the actions etc.
for act in this_episode.actions:
print(act)
for i, obs in enumerate(this_episode.observations):
print("At step {} the active productions were {}".format(i, obs.prod_p))
"""
ACTION_SPACE = "dict_action_space.json"
OBS_SPACE = "dict_observation_space.json"
ENV_MODIF_SPACE = "dict_env_modification_space.json"
ATTACK_SPACE = "dict_attack_space.json" # action space of the attack (this is NOT the OpponentSpace) this is the "opponent action space"
PARAMS = "_parameters.json"
META = "episode_meta.json"
TIMES = "episode_times.json"
OTHER_REWARDS = "other_rewards.json"
AG_EXEC_TIMES = "agent_exec_times.npz"
LEGAL_AMBIGUOUS = "legal_ambiguous.npz"
ACTIONS_FILE = "actions.npz"
ENV_ACTIONS_FILE = "env_modifications.npz"
OBSERVATIONS_FILE = "observations.npz"
LINES_FAILURES = "disc_lines_cascading_failure.npz"
ATTACK = "opponent_attack.npz"
REWARDS = "rewards.npz"
GRID2OPINFO_FILE = "grid2op.info"
ATTR_EPISODE = [
PARAMS,
META,
TIMES,
OTHER_REWARDS,
AG_EXEC_TIMES,
ACTIONS_FILE,
ENV_ACTIONS_FILE,
OBSERVATIONS_FILE,
LINES_FAILURES,
ATTACK,
REWARDS,
]
def __init__(
self,
actions=None,
env_actions=None,
observations=None,
rewards=None,
disc_lines=None,
times=None,
params=None,
meta=None,
episode_times=None,
observation_space=None,
action_space=None,
helper_action_env=None,
attack_space=None,
path_save=None,
disc_lines_templ=None,
attack_templ=None,
attack=None,
logger=None,
name="EpisodeData",
get_dataframes=None,
force_detail=False,
other_rewards=[],
legal=None,
ambiguous=None,
has_legal_ambiguous=False,
_init_collections=False,
):
self.parameters = None
self.actions = CollectionWrapper(
actions,
action_space,
"actions",
check_legit=False,
init_me=_init_collections,
)
self.observations = CollectionWrapper(
observations, observation_space, "observations", init_me=_init_collections
)
self.env_actions = CollectionWrapper(
env_actions,
helper_action_env,
"env_actions",
check_legit=False,
init_me=_init_collections,
)
self.attacks = CollectionWrapper(
attack, attack_space, "attacks", init_me=_init_collections
)
self.meta = meta
# gives a unique game over for everyone
# TODO this needs testing!
action_go = self.actions._game_over
obs_go = self.observations._game_over
env_go = self.env_actions._game_over
# raise RuntimeError("Add the attaks game over too !")
real_go = action_go
if self.meta is not None:
# when initialized by the runner, meta is None
if "nb_timestep_played" in self.meta:
real_go = int(self.meta["nb_timestep_played"])
if real_go is None:
real_go = action_go
else:
if action_go is not None:
real_go = min(action_go, real_go)
if real_go is None:
real_go = obs_go
else:
if obs_go is not None:
real_go = min(obs_go, real_go)
if real_go is None:
real_go = env_go
else:
if env_go is not None:
real_go = min(env_go, real_go)
if real_go is not None:
# there is a real game over, i assign the proper value for each collection
self.actions._game_over = real_go
self.observations._game_over = real_go + 1
self.env_actions._game_over = real_go
self.other_rewards = other_rewards
self.observation_space = observation_space
self.attack_space = attack_space
self.rewards = rewards
self.disc_lines = disc_lines
self.times = times
self.params = params
self.episode_times = episode_times
self.name = name
self.disc_lines_templ = disc_lines_templ
self.attack_templ = attack_templ
self.logger = logger
self.serialize = False
self.load_names = action_space.name_load
self.n_loads = len(self.load_names)
self.prod_names = action_space.name_gen
self.n_prods = len(self.prod_names)
self.line_names = action_space.name_line
self.n_lines = len(self.line_names)
self.name_sub = action_space.name_sub
self.force_detail = force_detail
self.has_legal_ambiguous = has_legal_ambiguous
self.legal = copy.deepcopy(legal)
self.ambiguous = copy.deepcopy(ambiguous)
if path_save is not None:
self.agent_path = os.path.abspath(path_save)
self.episode_path = os.path.join(self.agent_path, name)
self.serialize = True
if not os.path.exists(self.agent_path):
try:
os.mkdir(self.agent_path)
self.logger.info(
'Creating path "{}" to save the runner'.format(self.agent_path)
)
except FileExistsError:
pass
act_space_path = os.path.join(self.agent_path, EpisodeData.ACTION_SPACE)
obs_space_path = os.path.join(self.agent_path, EpisodeData.OBS_SPACE)
env_modif_space_path = os.path.join(
self.agent_path, EpisodeData.ENV_MODIF_SPACE
)
attack_space_path = os.path.join(self.agent_path, EpisodeData.ATTACK_SPACE)
if not os.path.exists(act_space_path):
dict_action_space = action_space.cls_to_dict()
with open(act_space_path, "w", encoding="utf8") as f:
json.dump(obj=dict_action_space, fp=f, indent=4, sort_keys=True)
if not os.path.exists(obs_space_path):
dict_observation_space = observation_space.cls_to_dict()
with open(obs_space_path, "w", encoding="utf8") as f:
json.dump(
obj=dict_observation_space, fp=f, indent=4, sort_keys=True
)
if not os.path.exists(env_modif_space_path):
dict_helper_action_env = helper_action_env.cls_to_dict()
with open(env_modif_space_path, "w", encoding="utf8") as f:
json.dump(
obj=dict_helper_action_env, fp=f, indent=4, sort_keys=True
)
if not os.path.exists(attack_space_path):
dict_attack_space = attack_space.cls_to_dict()
with open(attack_space_path, "w", encoding="utf8") as f:
json.dump(obj=dict_attack_space, fp=f, indent=4, sort_keys=True)
if not os.path.exists(self.episode_path):
os.mkdir(self.episode_path)
logger.info(
'Creating path "{}" to save the episode {}'.format(
self.episode_path, self.name
)
)
@staticmethod
def list_episode(path_agent):
"""
From a given path where a runner is supposed to have run, it extracts the subdirectories that can
store values from an episode.
Parameters
----------
path_agent: ``str``
The path where to look for data coming from "episode"
Returns
-------
res: ``list``
A list of possible episodes. Each element of this list is a tuple: (full_path, episode_name)
Examples
--------
.. code-block:: python
import grid2op
import os
import numpy as np
from grid2op.Runner import Runner
from grid2op.Episode import EpisodeData
################
# INTRO
# create a runner
env = grid2op.make()
# see the documentation of the Runner if you want to change the agent.
# in this case it will be "do nothing"
runner = Runner(**env.get_params_for_runner())
# execute it a given number of chronics
nb_episode = 2
path_save = "i_saved_the_runner_here"
res = runner.run(nb_episode=nb_episode, path_save=path_save)
# END INTRO
##################
li_episode = EpisodeData.list_episode(path_save)
# and now you can iterate through it:
for full_episode_path, episode_name in li_episode:
this_episode = EpisodeData.from_disk(path_agent, episode_name)
# you can do something with it now
"""
res = []
li_subfiles = list(os.listdir(path_agent))
for el in sorted(li_subfiles):
# loop through the files that stores the agent's logs
this_dir = os.path.join(path_agent, el)
if not os.path.isdir(this_dir):
# it cannot be the result of an episode if it is not a directory.
continue
ok_ = True
for file_that_should_be in EpisodeData.ATTR_EPISODE:
if not os.path.exists(os.path.join(this_dir, file_that_should_be)):
# one file is missing
ok_ = False
break
if ok_:
res.append((os.path.abspath(path_agent), el))
return res
def reboot(self):
"""
Do as if the data just got read from the hard drive (loop again from the
initial observation and action)
"""
self.actions.reboot()
self.observations.reboot()
self.env_actions.reboot()
def go_to(self, index):
self.actions.go_to(index)
self.observations.go_to(index + 1)
self.env_actions.go_to(index)
def get_actions(self):
return self.actions.collection
def get_observations(self):
return self.observations.collection
def __len__(self):
return int(self.meta["chronics_max_timestep"])
@classmethod
def from_disk(cls, agent_path, name="1"):
"""
This function allows you to reload an episode stored using the runner.
See the example at the definition of the class for more information on how to use it.
Parameters
----------
agent_path: ``str``
Path pass at the "runner.run" method
name: ``str``
The name of the episode you want to reload.
Returns
-------
res:
The data loaded properly in memory.
"""
if agent_path is None:
raise Grid2OpException(
'A path to an episode should be provided, please call "from_disk" with '
'"agent_path" other than None'
)
episode_path = os.path.abspath(os.path.join(agent_path, name))
try:
with open(os.path.join(episode_path, EpisodeData.PARAMS)) as f:
_parameters = json.load(fp=f)
with open(os.path.join(episode_path, EpisodeData.META)) as f:
episode_meta = json.load(fp=f)
with open(os.path.join(episode_path, EpisodeData.TIMES)) as f:
episode_times = json.load(fp=f)
with open(os.path.join(episode_path, EpisodeData.OTHER_REWARDS)) as f:
other_rewards = json.load(fp=f)
times = np.load(os.path.join(episode_path, EpisodeData.AG_EXEC_TIMES))[
"data"
]
actions = np.load(os.path.join(episode_path, EpisodeData.ACTIONS_FILE))["data"]
env_actions = np.load(os.path.join(episode_path, EpisodeData.ENV_ACTIONS_FILE))[
"data"
]
observations = np.load(
os.path.join(episode_path, EpisodeData.OBSERVATIONS_FILE)
)["data"]
disc_lines = np.load(
os.path.join(episode_path, EpisodeData.LINES_FAILURES)
)["data"]
attack = np.load(os.path.join(episode_path, EpisodeData.ATTACK))["data"]
rewards = np.load(os.path.join(episode_path, EpisodeData.REWARDS))["data"]
path_legal_ambiguous = os.path.join(episode_path, EpisodeData.LEGAL_AMBIGUOUS)
has_legal_ambiguous = False
if os.path.exists(path_legal_ambiguous):
legal_ambiguous = np.load(path_legal_ambiguous)["data"]
legal = copy.deepcopy(legal_ambiguous[:, 0])
ambiguous = copy.deepcopy(legal_ambiguous[:, 1])
has_legal_ambiguous = True
else:
legal = None
ambiguous = None
except FileNotFoundError as ex:
raise Grid2OpException(f"EpisodeData file not found \n {str(ex)}")
observation_space = ObservationSpace.from_dict(
os.path.join(agent_path, EpisodeData.OBS_SPACE)
)
action_space = ActionSpace.from_dict(
os.path.join(agent_path, EpisodeData.ACTION_SPACE)
)
helper_action_env = ActionSpace.from_dict(
os.path.join(agent_path, EpisodeData.ENV_MODIF_SPACE)
)
attack_space = ActionSpace.from_dict(
os.path.join(agent_path, EpisodeData.ATTACK_SPACE)
)
if observation_space.glop_version != grid2op.__version__:
warnings.warn(
'You are using a "grid2op compatibility" feature (the data you saved '
"have been saved with a previous grid2op version). When we loaded your data, we attempted "
"to not include most recent grid2op features. This is feature is not well tested. It would "
"be wise to regenerate the data with the latest grid2Op version."
)
return cls(
actions=actions,
env_actions=env_actions,
observations=observations,
rewards=rewards,
disc_lines=disc_lines,
times=times,
params=_parameters,
meta=episode_meta,
episode_times=episode_times,
observation_space=observation_space,
action_space=action_space,
helper_action_env=helper_action_env,
path_save=None, # No save when reading
attack=attack,
attack_space=attack_space,
name=name,
get_dataframes=True,
other_rewards=other_rewards,
legal=legal,
ambiguous=ambiguous,
has_legal_ambiguous=has_legal_ambiguous,
_init_collections=True,
)
def set_parameters(self, env):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Used by the Runner to serialize properly an episode
TODO
Parameters
----------
env
Returns
-------
"""
if self.force_detail or self.serialize:
self.parameters = env.parameters.to_dict()
def set_meta(self, env, time_step, cum_reward, env_seed, agent_seed):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Used by he runner to serialize properly an episode
TODO
Parameters
----------
env
time_step
cum_reward
env_seed
agent_seed
Returns
-------
"""
if self.force_detail or self.serialize:
self.meta = {}
self.meta["chronics_path"] = "{}".format(env.chronics_handler.get_id())
self.meta["chronics_max_timestep"] = "{}".format(
env.chronics_handler.max_timestep()
)
self.meta["grid_path"] = "{}".format(env._init_grid_path)
self.meta["backend_type"] = "{}".format(type(env.backend).__name__)
self.meta["env_type"] = "{}".format(type(env).__name__)
self.meta["nb_timestep_played"] = time_step
self.meta["cumulative_reward"] = cum_reward
if env_seed is None:
self.meta["env_seed"] = env_seed
else:
self.meta["env_seed"] = int(env_seed)
if agent_seed is None:
self.meta["agent_seed"] = agent_seed
else:
self.meta["agent_seed"] = int(agent_seed)
def incr_store(
self,
efficient_storing,
time_step,
time_step_duration,
reward,
env_act,
act,
obs,
opp_attack,
info,
):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Used by he runner to serialize properly an episode
TODO
Parameters
----------
efficient_storing
time_step
time_step_duration
reward
env_act
act
obs
opp_attack
info
Returns
-------
"""
if not (self.force_detail or self.serialize):
return
self.actions.update(time_step, act, efficient_storing)
self.env_actions.update(time_step, env_act, efficient_storing)
# deactive the possibility to do "forecast" in this serialized instance
tmp_obs_env = obs._obs_env
tmp_inj = obs._forecasted_inj
obs._obs_env = None
obs._forecasted_inj = []
self.observations.update(time_step + 1, obs, efficient_storing)
obs._obs_env = tmp_obs_env
obs._forecasted_inj = tmp_inj
if opp_attack is not None:
self.attacks.update(time_step, opp_attack, efficient_storing)
else:
if efficient_storing:
self.attacks.collection[time_step - 1, :] = 0.0
else:
# might not work !
self.attacks = np.concatenate((self.attacks, self.attack_templ))
if efficient_storing:
# efficient way of writing
self.times[time_step - 1] = time_step_duration
self.rewards[time_step - 1] = reward
if "disc_lines" in info:
arr = info["disc_lines"]
if arr is not None:
self.disc_lines[time_step - 1, :] = arr
else:
self.disc_lines[time_step - 1, :] = self.disc_lines_templ
else:
# might not work !
# completely inefficient way of writing
self.times = np.concatenate((self.times, (time_step_duration,)))
self.rewards = np.concatenate((self.rewards, (reward,)))
if "disc_lines" in info:
arr = info["disc_lines"]
if arr is not None:
self.disc_lines = np.concatenate(
(self.disc_lines, arr.reshape(1, -1))
)
else:
self.disc_lines = np.concatenate(
(self.disc_lines, self.disc_lines_templ)
)
if "rewards" in info:
self.other_rewards.append(
{k: self._convert_to_float(v) for k, v in info["rewards"].items()}
)
# TODO add is_illegal and is_ambiguous flags!
if self.has_legal_ambiguous:
# I need to create everything
if efficient_storing:
self.legal[time_step - 1] = not info["is_illegal"]
self.ambiguous[time_step - 1] = info["is_ambiguous"]
else:
self.legal = np.concatenate((self.legal, (not info["is_illegal"],)))
self.ambiguous = np.concatenate((self.ambiguous, (info["is_ambiguous"],)))
def _convert_to_float(self, el):
try:
res = float(el)
except Exception as exc_:
res = -float("inf")
return res
def set_episode_times(self, env, time_act, beg_, end_):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Used by he runner to serialize properly an episode
TODO
Parameters
----------
env
time_act
beg_
end_
Returns
-------
"""
if self.force_detail or self.serialize:
self.episode_times = {}
self.episode_times["Env"] = {}
self.episode_times["Env"]["total"] = float(
env._time_apply_act + env._time_powerflow + env._time_extract_obs
)
self.episode_times["Env"]["apply_act"] = float(env._time_apply_act)
self.episode_times["Env"]["powerflow_computation"] = float(
env._time_powerflow
)
self.episode_times["Env"]["observation_computation"] = float(
env._time_extract_obs
)
self.episode_times["Agent"] = {}
self.episode_times["Agent"]["total"] = float(time_act)
self.episode_times["total"] = float(end_ - beg_)
def to_disk(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Used by he runner to serialize properly an episode
TODO
Returns
-------
"""
if self.serialize:
parameters_path = os.path.join(self.episode_path, EpisodeData.PARAMS)
with open(parameters_path, "w", encoding="utf-8") as f:
json.dump(obj=self.parameters, fp=f, indent=4, sort_keys=True)
meta_path = os.path.join(self.episode_path, EpisodeData.META)
with open(meta_path, "w", encoding="utf-8") as f:
json.dump(obj=self.meta, fp=f, indent=4, sort_keys=True)
episode_times_path = os.path.join(self.episode_path, EpisodeData.TIMES)
with open(episode_times_path, "w", encoding="utf-8") as f:
json.dump(obj=self.episode_times, fp=f, indent=4, sort_keys=True)
episode_other_rewards_path = os.path.join(
self.episode_path, EpisodeData.OTHER_REWARDS
)
with open(episode_other_rewards_path, "w", encoding="utf-8") as f:
json.dump(obj=self.other_rewards, fp=f, indent=4, sort_keys=True)
np.savez_compressed(
os.path.join(self.episode_path, EpisodeData.AG_EXEC_TIMES),
data=self.times,
)
self.actions.save(os.path.join(self.episode_path, EpisodeData.ACTIONS_FILE))
self.env_actions.save(
os.path.join(self.episode_path, EpisodeData.ENV_ACTIONS_FILE)
)
self.observations.save(
os.path.join(self.episode_path, EpisodeData.OBSERVATIONS_FILE)
)
self.attacks.save(
os.path.join(os.path.join(self.episode_path, EpisodeData.ATTACK))
)
np.savez_compressed(
os.path.join(self.episode_path, EpisodeData.LINES_FAILURES),
data=self.disc_lines,
)
np.savez_compressed(
os.path.join(self.episode_path, EpisodeData.REWARDS), data=self.rewards
)
with open(
os.path.join(self.episode_path, self.GRID2OPINFO_FILE),
"w",
encoding="utf-8",
) as f:
dict_ = {"version": f"{grid2op.__version__}"}
json.dump(obj=dict_, fp=f, indent=4, sort_keys=True)
@staticmethod
def get_grid2op_version(path_episode):
"""
Utility function to retrieve the grid2op version used to generate this episode serialized on disk.
This is introduced in grid2op 1.5.0, with older runner version stored, this function will return "<=1.4.0"
otherwise it returns the grid2op version, as a string.
"""
version = "<=1.4.0"
if os.path.exists(os.path.join(path_episode, EpisodeData.GRID2OPINFO_FILE)):
with open(
os.path.join(path_episode, EpisodeData.GRID2OPINFO_FILE),
"r",
encoding="utf-8",
) as f:
dict_ = json.load(fp=f)
if "version" in dict_:
version = dict_["version"]
return version
class CollectionWrapper:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Utility to make the interaction with stored actions and stored observations more pythonic
A wrapping class to add some behaviors (iterability, item access, update, save)
to grid2op object collections (:class:`grid2op.Action.BaseAction` and :class:`grid2op.Observation.BaseObservation`
classes essentially).
Attributes
----------
collection: ``type``
The collection to wrap.
helper:
The helper object used to access elements of the collection through a
`from_vect` method.
collection_name: ``str``
The name of the collection.
elem_name: ``str``
The name of one element of the collection.
i: ``int``
Integer used for iteration.
_game_over: ``int``
The time step at which the game_over occurs. None if there is no game_over
objects:
The collection of objects built with the `from_vect` method
Methods
-------
update(time_step, values, efficient_storage)
update the collection with new `values` for a given `time_step`.
save(path)
save the collection to disk using `path` as the path to the file to write in.
Raises
------
:class:`grid2op.Exceptions.Grid2OpException`
If the helper function has no from_vect method.
If trying to access an element outside of the collection
"""
def __init__(
self, collection, helper, collection_name, check_legit=True, init_me=True
):
self.collection = collection
if not hasattr(helper, "from_vect"):
raise Grid2OpException(
f"Object {helper} must implement a " f"from_vect method."
)
self.helper = helper
self.collection_name = collection_name
self.elem_name = self.collection_name[:-1]
self.i = 0
self._game_over = None
self.objects = []
if not init_me:
# the runner just has been created, so i don't need to update this collection
# from previous data, but we need to initialize the list holder
self.objects = [None] * len(self.collection)
return
for i, elem in enumerate(self.collection):
try:
collection_obj = self.helper.from_vect(
self.collection[i, :], check_legit=check_legit
)
self.objects.append(collection_obj)
except IncorrectNumberOfElements as exc_:
# grid2op does not allow to load the object: there is a mismatch between what has been stored
# and what is currently used.
raise
except NonFiniteElement:
self._game_over = i
break
except EnvError as exc_:
self._game_over = i
break
def __len__(self):
if self._game_over is None:
return self.collection.shape[0]
else:
return self._game_over
def __getitem__(self, i):
if isinstance(i, slice) or i < len(self):
return self.objects[i]
else:
raise Grid2OpException(
f"Trying to reach {self.elem_name} {i + 1} but "
f"there are only {len(self)} {self.collection_name}."
)
def __iter__(self):
self.i = 0
return self
def __next__(self):
self.i = self.i + 1
if self.i < len(self) + 1:
return self.objects[self.i - 1]
else:
raise StopIteration
def update(self, time_step, value, efficient_storage):
if efficient_storage:
self.collection[time_step - 1, :] = value.to_vect()
else:
self.collection = np.concatenate(
(self.collection, value.to_vect().reshape(1, -1))
)
self.objects[time_step - 1] = value
def save(self, path):
np.savez_compressed(
path, data=self.collection
) # do not change keyword arguments
def reboot(self):
self.i = 0
def go_to(self, index):
if index >= len(self):
raise Grid2OpException(
"index too long for collection {}".format(self.collection_name)
)
self.i = index
if __name__ == "__main__":
pass
| 35,729 | 36.102804 | 141 | py |
Grid2Op | Grid2Op-master/grid2op/Episode/EpisodeReboot.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import warnings
import copy
import json
import os
import re
import numpy as np
from datetime import timedelta
from grid2op.dtypes import dt_float, dt_int, dt_bool
from grid2op.Exceptions import Grid2OpException
from grid2op.Chronics import GridValue, ChronicsHandler
from grid2op.Opponent import BaseOpponent
from grid2op.Environment import Environment
from grid2op.Episode.EpisodeData import EpisodeData
class _GridFromLog(GridValue):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
"""
def __init__(
self,
episode_data,
time_interval=timedelta(minutes=5),
max_iter=-1,
start_datetime=None,
chunk_size=None,
):
# TODO reload directly the loadp, loadq, prodp and prodv from the path of the episode data if possible
self.episode_data = episode_data
if start_datetime is None:
warnings.warn(
'"start_datetime" argument is ignored when building the _GridFromLog'
)
if chunk_size is None:
warnings.warn(
'"chunk_size" argument is ignored when building the _GridFromLog'
)
GridValue.__init__(
self,
time_interval=time_interval,
max_iter=max_iter,
start_datetime=self.episode_data.observations[0].get_time_stamp(),
chunk_size=None,
)
# TODO reload that
self.maintenance_time = (
np.zeros(self.episode_data.observations[0].line_status.shape[0], dtype=int)
- 1
)
self.maintenance_duration = np.zeros(
self.episode_data.observations[0].line_status.shape[0], dtype=int
)
self.hazard_duration = np.zeros(
self.episode_data.observations[0].line_status.shape[0], dtype=int
)
self.curr_iter = 0
def initialize(
self,
order_backend_loads,
order_backend_prods,
order_backend_lines,
order_backend_subs,
names_chronics_to_backend,
):
# This is required to follow the GridValue interface, but it needs to be empty here.
pass
def load_next(self):
self.curr_iter += 1
obs = self.episode_data.observations[self.curr_iter]
self.current_datetime = obs.get_time_stamp()
res = {}
injs = {
"prod_p": obs.prod_p.astype(dt_float),
"load_p": obs.load_p.astype(dt_float),
"load_q": obs.load_q.astype(dt_float),
}
res["injection"] = injs
# TODO
# if self.maintenance is not None:
# res["maintenance"] = self.maintenance[self.current_index, :]
# if self.hazards is not None:
# res["hazards"] = self.hazards[self.current_index, :]
prod_v = obs.prod_v
return (
self.current_datetime,
res,
self.maintenance_time,
self.maintenance_duration,
self.hazard_duration,
prod_v,
)
def check_validity(self, backend):
return True
def next_chronics(self):
self.episode_data.reboot()
class OpponentFromLog(BaseOpponent):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
"""
pass
class EpisodeReboot:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is a first implementation to serve as "what can be done".
It is a beta feature
"""
def __init__(self):
self.episode_data = None
self.env = None
self.chronics_handler = None
self.current_time_step = None
self.action = None # the last action played
warnings.warn(
"EpisodeReboot is a beta feature, it will likely be renamed, methods will be adapted "
"and it has probably some bugs. Use with care!"
)
def load(self, backend, agent_path=None, name=None, data=None, env_kwargs={}):
if data is None:
if agent_path is not None and name is not None:
self.episode_data = EpisodeData.from_disk(agent_path, name)
else:
raise Grid2OpException(
"To replay an episode you need at least to provide an EpisodeData "
'(using the keyword argument "data=...") or provide the path and name where '
"the "
'episode is stored (keyword arguments "agent_path" and "name").'
)
else:
self.episode_data = copy.deepcopy(data)
self.episode_data.reboot()
if self.env is not None:
self.env.close()
self.env = None
self.chronics_handler = ChronicsHandler(
chronicsClass=_GridFromLog, episode_data=self.episode_data
)
if "chronics_handler" in env_kwargs:
del env_kwargs["chronics_handler"]
if "backend" in env_kwargs:
del env_kwargs["backend"]
if "opponent_class" in env_kwargs:
del env_kwargs["opponent_class"]
if "name" in env_kwargs:
del env_kwargs["name"]
seed = None
with open(os.path.join(agent_path, name, "episode_meta.json")) as f:
dict_ = json.load(f)
nm = re.sub("Environment_", "", dict_["env_type"])
if dict_["env_seed"] is not None:
seed = int(dict_["env_seed"])
self.env = Environment(
**env_kwargs,
backend=backend,
chronics_handler=self.chronics_handler,
opponent_class=OpponentFromLog,
name=nm
)
if seed is not None:
self.env.seed(seed)
tmp = self.env.reset()
# always have the two bellow synch ! otherwise it messes up the "chronics"
# in the env, when calling "env.step"
self.current_time_step = 0
self.env.chronics_handler.real_data.curr_iter = 0
# first observation of the scenario
current_obs = self.episode_data.observations[self.current_time_step]
self._assign_state(current_obs)
return self.env.get_obs()
def _assign_state(self, obs):
"""
works only if observation store the complete state of the grid...
"""
if self.env.done:
# if there has been a game over previously i reset it
self.env.chronics_handler.real_data.curr_iter = self.current_time_step
self.env.reset()
self.env._gen_activeprod_t[:] = obs.prod_p.astype(dt_float)
self.env._actual_dispatch[:] = obs.actual_dispatch.astype(dt_float)
self.env._target_dispatch[:] = obs.target_dispatch.astype(dt_float)
self.env._gen_activeprod_t_redisp[:] = obs.prod_p.astype(
dt_float
) + obs.actual_dispatch.astype(dt_float)
self.env.current_obs = obs
self.env._timestep_overflow[:] = obs.timestep_overflow.astype(dt_int)
self.env._times_before_line_status_actionable[
:
] = obs.time_before_cooldown_line.astype(dt_int)
self.env._times_before_topology_actionable[
:
] = obs.time_before_cooldown_sub.astype(dt_int)
self.env._duration_next_maintenance[:] = obs.duration_next_maintenance.astype(
dt_int
)
self.env._time_next_maintenance[:] = obs.time_next_maintenance.astype(dt_int)
# # TODO check that the "stored" "last bus for when the powerline were connected" are
# # kept there (I might need to do a for loop)
self.env.backend.update_from_obs(obs)
disc_lines, detailed_info, conv_ = self.env.backend.next_grid_state(
env=self.env
)
if conv_ is None:
self.env._backend_action.update_state(disc_lines)
self.env._backend_action.reset()
def next(self, _sentinel=None, _update=False):
"""
go to next time step
if "update" then i reuse the observation stored to go to this time step, otherwise not
do as if the environment will execute the action the stored agent did at the next time step
(compared to the time step the environment is currently at)
Parameters
----------
_sentinel: ``None``
Used to prevent positional parameters. Internal, do not use.
_update: ``bool``
Internal, you should not use it.
# TODO split self._next (called by both self.next and self.go_to that has the `_update` kwargs
"""
if _sentinel is not None:
raise Grid2OpException(
"You should not use reboot.next() with any argument."
)
if self.current_time_step is None:
raise Grid2OpException(
"Impossible to go to the next time step with an episode not loaded. "
'Call "EpisodeReboot.load" before.'
)
if _update:
# I put myself at the observation just before the next time step
obs = self.episode_data.observations[self.current_time_step]
self.env._backend_action = self.env._backend_action_class()
# update the "previous topological state" to the right value
self._update_bk_act_topo(obs)
# assign the right state of the grid
self._assign_state(obs)
self.action = self.episode_data.actions[self.current_time_step]
self.env.chronics_handler.real_data.curr_iter = self.current_time_step
new_obs, new_reward, new_done, new_info = self.env.step(self.action)
self.current_time_step += 1
# the chronics handler handled the "self.env.chronics_handler.curr_iter += 1"
return new_obs, new_reward, new_done, new_info
def _update_bk_act_topo(self, obs):
"""update the "previous topological state" to the right value"""
self.env._backend_action.current_topo.values[:] = obs.topo_vect
self.env._backend_action.current_topo.changed[:] = True
if obs.shunts_data_available:
self.env._backend_action.shunt_bus.values[:] = obs._shunt_bus
self.env._backend_action.shunt_bus.changed[:] = True
# TODO previous update self.env._backend_action.last_topo_registered too !
def go_to(self, time_step):
"""
goes to the step number "time_step".
So if you go_to timestep 10 then you retrieve the 10th observation and its as if the
agent did the 9th action (just before)
"""
if time_step > len(self.episode_data.actions):
raise Grid2OpException(
"The stored episode counts only {} time steps. You cannot go "
"at time step {}"
"".format(len(self.episode_data.actions), time_step)
)
if time_step <= 0:
raise Grid2OpException(
'You cannot go to timestep <= 0, it does not make sense (as there is not "-1th"'
'action). If you want to load the data, please use "EpisodeReboot.load".'
)
self.current_time_step = time_step - 1
return self.next(_update=True)
| 11,770 | 34.561934 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Episode/EpisodeReplay.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import os
import warnings
import time
import imageio
import argparse
from grid2op.Exceptions import Grid2OpException
from grid2op.PlotGrid.PlotMatplot import PlotMatplot
from grid2op.Episode.EpisodeData import EpisodeData
class EpisodeReplay(object):
"""
This class allows to see visually what an agent has done during an episode. It uses for now the "PlotMatplot" as the
method to plot the different states of the system. It reads directly data from the runner.
Examples
--------
It can be used the following manner.
.. code-block:: python
import grid2op
agent_class = grid2op.Agent.DoNothingAgent # change that for studying other agent
env = grid2op.make() # make the default environment
runner = grid2op.Runner.Runner(**env.get_params_for_runner(), agentClass=agent_class)
path_log = "agent_log" # where the runner will output the standardized data when running the agent.
res = runner.run(nb_episode=1, path_save=path_log)
# and when it's done, you can visualize it this way:
episode_replay = EpisodeReplay(agent_path=path_log)
episode_id = res[0][1]
episode_replay.plot_episode(episode_id, max_fps=10)
# you can pause by clicking the "space" key
# At any time, you can quit by pressing the "esc" key or the "exit" button of the window.
Attributes
----------
agent_path: ``str``
The path were the log of the agent are stored. It is recommended to use a :class:`grid2op.Runner.Runner`
to save tha log of the agent.
episode_data: :class:`grid2op.EpisodeData.EpisodeData`, optional
The last data of the episode inspected.replay_cli
"""
def __init__(self, agent_path):
if not os.path.exists(agent_path):
raise Grid2OpException(
'Nothing is found at "{}" where an agent path should have been.'.format(
agent_path
)
)
self.agent_path = agent_path
self.episode_data = None
def replay_episode(
self,
episode_id,
fps=2.0,
gif_name=None,
display=True,
start_step=0,
end_step=-1,
line_info="rho",
load_info="p",
gen_info="p",
resolution=(1280, 720),
):
"""
When called, this function will start the display of the episode in a "mini movie" format.
Parameters
----------
episode_id: ``str``
ID of the episode to replay
fps: ``float``
Frames per second. When it's low, you will have more time to look at each frame, but the episode
will last longer. When it's high, episode will be faster, but frames will stay less time on the screen.
gif_name: ``str``
If provided, a .gif file is saved in the episode folder with the name :gif_name:.
The .gif extension is happened by this function
start_step: ``int``
Default to 0. The step at which to start generating the gif
end_step: ``int``
Default to -1. The step at which to stop generating the gif.
Set to -1 to specify no limit
load_info: ``str``
Defaults to "p". What kind of values to show on loads.
Can be oneof `["p", "v", None]`
gen_info: ``str``
Defaults to "p". What kind of values to show on generators.
Can be oneof `["p", "v", None]`
line_info: ``str``
Defaults to "rho". What kind of values to show on lines.
Can be oneof `["rho", "a", "p", "v", None]`
resolution: ``tuple``
Defaults to (1280, 720). The resolution to use for the gif.
"""
# Check args
path_ep = os.path.join(self.agent_path, episode_id)
if not os.path.exists(path_ep):
raise Grid2OpException('No episode is found at "{}".'.format(path_ep))
# Load episode observations
self.episode_data = EpisodeData.from_disk(
agent_path=self.agent_path, name=episode_id
)
all_obs = [el for el in self.episode_data.observations]
# Create a plotter
width, height = resolution
plot_runner = PlotMatplot(
self.episode_data.observation_space,
width=width,
height=height,
load_name=False,
gen_name=False,
)
# Some vars for gif export if enabled
frames = []
gif_path = None
if gif_name is not None:
gif_path = os.path.join(path_ep, gif_name + ".gif")
# Render loop
figure = None
time_per_frame = 1.0 / fps
for step, obs in enumerate(all_obs):
# Skip up to start_step
if step < start_step:
continue
# Terminate if reached end_step
if end_step > 0 and step >= end_step:
break
# Get a timestamp for current frame
start_time = time.perf_counter()
# Render the observation
fig = plot_runner.plot_obs(
observation=obs,
line_info=line_info,
gen_info=gen_info,
load_info=load_info,
figure=figure,
redraw=True,
)
if figure is None and display:
fig.show()
elif display:
fig.canvas.draw()
# Store figure for re-use
figure = fig
# Save pixel array if needed
if gif_name is not None:
frames.append(plot_runner.convert_figure_to_numpy_HWC(figure))
# Get the timestamp after frame is rendered
end_time = time.perf_counter()
delta_time = end_time - start_time
# Cap fps for display mode
if display:
wait_time = time_per_frame - delta_time
if wait_time > 0.0:
time.sleep(wait_time)
# Export all frames as gif if enabled
if gif_name is not None and len(frames) > 0:
try:
imageio.mimwrite(gif_path, frames, fps=fps)
# Try to compress
try:
from pygifsicle import optimize
optimize(gif_path, options=["-w", "--no-conserve-memory"])
except:
warn_msg = (
"Failed to optimize .GIF size, but gif is still saved:\n"
"Install dependencies to reduce size by ~3 folds\n"
"apt-get install gifsicle && pip3 install pygifsicle"
)
warnings.warn(warn_msg)
except Exception as e:
warnings.warn("Impossible to save gif with error :\n{}".format(e))
def episode_replay_cli():
parser = argparse.ArgumentParser(description="EpisodeReplay")
parser.add_argument("--agent_path", required=True, type=str)
parser.add_argument("--episode_id", required=True, type=str)
parser.add_argument("--display", required=False, default=False, action="store_true")
parser.add_argument("--fps", required=False, default=2.0, type=float)
parser.add_argument("--gif_name", required=False, default=None, type=str)
parser.add_argument("--gif_start", required=False, default=0, type=int)
parser.add_argument("--gif_end", required=False, default=-1, type=int)
args = parser.parse_args()
return args
def main(args=None):
if args is None:
args = episode_replay_cli()
er = EpisodeReplay(args.agent_path)
er.replay_episode(
args.episode_id,
fps=args.fps,
gif_name=args.gif_name,
start_step=args.gif_start,
end_step=args.gif_end,
display=args.display,
)
# Dev / Test by running this file
if __name__ == "__main__":
args = episode_replay_cli()
main(args)
| 8,466 | 34.57563 | 120 | py |
Grid2Op | Grid2Op-master/grid2op/Episode/__init__.py | __all__ = ["EpisodeData"]
from grid2op.Episode.EpisodeData import EpisodeData
# Try to import optional module
try:
from grid2op.Episode.EpisodeReplay import EpisodeReplay
__all__.append("EpisodeReplay")
except ImportError:
pass # Silent fail for optional dependencies
| 284 | 22.75 | 59 | py |
Grid2Op | Grid2Op-master/grid2op/Exceptions/AmbiguousActionExceptions.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Exceptions.Grid2OpException import Grid2OpException
# ambiguous action
class AmbiguousAction(Grid2OpException):
"""
This exception indicate that the :class:`grid2op.BaseAction` is ambiguous.
It could be understood differently according
to the backend used.
Such a kind of action are forbidden in this package. These kind of exception are mainly thrown by the
:class:`grid2op.BaseAction.BaseAction` in
the :func:`grid2op.BaseAction.update` and :func:`grid2op.BaseAction.__call__` methods.
As opposed to a :class:`IllegalAction` an :class:`AmbiguousAction` is forbidden for all the backend,
in all the scenarios.
It doesn't depend on the implemented rules.
"""
pass
class InvalidLineStatus(AmbiguousAction):
"""
This is a more precise exception than :class:`AmbiguousAction` indicating that the
:class:`grid2op.BaseAction.BaseAction` is ambiguous due to powerlines manipulation.
"""
pass
class InvalidStorage(AmbiguousAction):
"""
This is a more precise exception than :class:`AmbiguousAction` indicating that the
:class:`grid2op.BaseAction.BaseAction` is ambiguous due to storage unit manipulation.
"""
pass
class UnrecognizedAction(AmbiguousAction):
"""
This is a more precise exception than :class:`AmbiguousAction` indicating that the
:class:`grid2op.BaseAction.BaseAction` is ambiguous due to the bad formatting of the action.
"""
pass
class InvalidNumberOfLoads(AmbiguousAction):
"""
This is a more precise exception than :class:`AmbiguousAction` indicating that
the :class:`grid2op.BaseAction.BaseAction` is ambiguous because an incorrect number of loads tries to be modified.
"""
pass
class InvalidNumberOfGenerators(AmbiguousAction):
"""
This is a more precise exception than :class:`AmbiguousAction` indicating that
the :class:`grid2op.BaseAction.BaseAction`
is ambiguous because an incorrect number of generator tries to be modified.
"""
pass
class InvalidNumberOfLines(AmbiguousAction):
"""
This is a more precise exception than :class:`AmbiguousAction` indicating that
the :class:`grid2op.BaseAction.BaseAction`
is ambiguous because an incorrect number of lines tries to be modified.
"""
pass
class InvalidNumberOfObjectEnds(AmbiguousAction):
"""
This is a more precise exception than :class:`AmbiguousAction` indicating that
the :class:`grid2op.BaseAction.BaseAction`
is ambiguous because an incorrect number of object at a substation try to be modified.
"""
pass
class InvalidBusStatus(AmbiguousAction):
"""
This is a more precise exception than :class:`AmbiguousAction` indicating that
the :class:`grid2op.BaseAction.BaseAction`
try to both "set" and "switch" some bus to which an object is connected.
"""
pass
class InvalidRedispatching(AmbiguousAction):
"""
This is a more precise exception than :class:`AmbiguousAction` indicating that
the :class:`grid2op.BaseAction.BaseAction`
try to apply an invalid redispatching strategy.
"""
pass
class InvalidCurtailment(AmbiguousAction):
"""
This is a more precise exception than :class:`AmbiguousAction` indicating that
the :class:`grid2op.BaseAction.BaseAction`
try to apply an invalid curtailment strategy.
"""
pass
class GeneratorTurnedOnTooSoon(InvalidRedispatching):
"""
This is a more precise exception than :class:`AmbiguousAction` indicating that a generator has been turned on
before gen_min_up_time time steps.
"""
pass
class GeneratorTurnedOffTooSoon(InvalidRedispatching):
"""
This is a more precise exception than :class:`AmbiguousAction` indicating that a generator has been turned off
before gen_min_down_time time steps.
"""
pass
class NotEnoughGenerators(InvalidRedispatching):
"""
This is a more precise exception than :class:`AmbiguousAction` indicating that there is not enough turned off
generators to meet the demand.
"""
pass
class NonFiniteElement(InvalidRedispatching):
"""
This is a more precise exception than :class:`AmbiguousAction` indicating that an action / observation
non initialized (full of Nan)
has been loaded by the "from_vect" method.
"""
pass
class AmbiguousActionRaiseAlert(AmbiguousAction):
"""Raise if the type of action is ambiguous due to the 'raiseAlert' part"""
pass | 4,965 | 28.915663 | 118 | py |
Grid2Op | Grid2Op-master/grid2op/Exceptions/AttentionBudgetExceptions.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Exceptions.Grid2OpException import Grid2OpException
class NotEnoughAttentionBudget(Grid2OpException):
"""
This exception is raised when the player attempted to alert the "human operator" but it has not
enough budget to do so.
"""
pass
| 734 | 37.684211 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Exceptions/BackendExceptions.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Exceptions.Grid2OpException import Grid2OpException
# Backend
class BackendError(Grid2OpException):
"""
Base class of all error regarding the Backend that might be badly configured.
"""
pass
| 686 | 35.157895 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Exceptions/ChronicsExceptions.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Exceptions.Grid2OpException import Grid2OpException
# Chronics
class ChronicsError(Grid2OpException):
"""
Base class of all error regarding the chronics and the gridValue (see :class:`grid2op.ChronicsHandler.GridValue` for
more information)
"""
pass
class ChronicsNotFoundError(ChronicsError):
"""
This exception is raised where there are no chronics folder found at the indicated location.
"""
pass
class InsufficientData(ChronicsError):
"""
This exception is raised where there are not enough data compare to the size of the episode asked.
"""
pass
| 1,088 | 29.25 | 120 | py |
Grid2Op | Grid2Op-master/grid2op/Exceptions/EnvExceptions.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Exceptions.Grid2OpException import Grid2OpException
# Exception bad environment configured
class EnvError(Grid2OpException):
"""
This exception indicate that the :class:`grid2op.Environment.Environment` is poorly configured.
It is for example thrown when assessing if a backend is properly set up with
:func:`grid2op.Backend.Backend.assert_grid_correct`
"""
pass
class IncorrectNumberOfLoads(EnvError):
"""
This is a more precise exception than :class:`EnvError` indicating that there is a mismatch in the total number of
loads of the powergrid.
"""
pass
class IncorrectNumberOfGenerators(EnvError):
"""
This is a more precise exception than :class:`EnvError` indicating that there is a mismatch in the total number of
generators of the powergrid.
"""
pass
class IncorrectNumberOfLines(EnvError):
"""
This is a more precise exception than :class:`EnvError` indicating that there is a mismatch in the total number of
powerlines of the powergrid.
"""
pass
class IncorrectNumberOfSubstation(EnvError):
"""
This is a more precise exception than :class:`EnvError` indicating that there is a mismatch in the total
number of substation of the powergrid.
"""
pass
class IncorrectNumberOfStorages(EnvError):
"""
This is a more precise exception than :class:`EnvError` indicating that there is a mismatch in the total
number of storage of the powergrid.
"""
pass
class IncorrectNumberOfElements(EnvError):
"""
This is a more precise exception than :class:`EnvError` indicating that there is a mismatch in the total number
of elements of the powergrid.
"""
pass
class IncorrectPositionOfLoads(EnvError):
"""
This is a more precise exception than :class:`EnvError` indicating that there is a mismatch in the number of
loads at a substation.
"""
pass
class IncorrectPositionOfGenerators(EnvError):
"""
This is a more precise exception than :class:`EnvError` indicating that there is a mismatch in the number of
generators at a substation.
"""
pass
class IncorrectPositionOfLines(EnvError):
"""
This is a more precise exception than :class:`EnvError` indicating that there is a mismatch in the number of
power lines at a substation.
"""
pass
class IncorrectPositionOfStorages(EnvError):
"""
This is a more precise exception than :class:`EnvError` indicating that there is a mismatch in the number of
storage unit at a substation.
"""
pass
# Unknown environment at creation
class UnknownEnv(Grid2OpException):
"""
This exception indicate that a bad argument has been sent to the :func:`grid2op.make` function.
It does not recognize the name of the :class:`grid2op.Environment.Environment`.
"""
pass
# multi environment
class MultiEnvException(Grid2OpException):
"""General exception raised by :class:`grid2Op.MultiEnv.MultiEnvironment`"""
pass
| 3,499 | 25.923077 | 118 | py |
Grid2Op | Grid2Op-master/grid2op/Exceptions/Grid2OpException.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
"""
In this module are defined all the exceptions that are used in the Grid2Op package.
They all inherit from :class:`Grid2OpException`, which is nothing more than a :class:`RuntimeError` with
customs :func:`Grid2OpException.__repr__` and :func:`Grid2OpException.__str__` definition to allow for easier logging.
"""
import inspect
class Grid2OpException(RuntimeError):
"""
Base Exception from which all Grid2Op raise exception derived.
"""
def vect_hierarchy_cleaned(self):
hierarchy = inspect.getmro(self.__class__)
names_hierarchy = [el.__name__ for el in hierarchy]
names_hierarchy = names_hierarchy[::-1]
# i = names_hierarchy.index("RuntimeError")
i = names_hierarchy.index("Grid2OpException")
names_hierarchy = names_hierarchy[i:]
res = " ".join(names_hierarchy) + " "
return res
def __repr__(self):
res = self.vect_hierarchy_cleaned()
res += RuntimeError.__repr__(self)
return res
def __str__(self):
res = self.vect_hierarchy_cleaned()
res += '"{}"'.format(RuntimeError.__str__(self))
return res
| 1,609 | 36.44186 | 118 | py |
Grid2Op | Grid2Op-master/grid2op/Exceptions/IllegalActionExceptions.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Exceptions.Grid2OpException import Grid2OpException
# exception bad actions
class IllegalAction(Grid2OpException):
"""
This exception indicate that the :class:`grid2op.BaseAction` is illegal.
It is for example thrown when an :class:`grid2op.BaseAgent` tries to perform an action against the rule.
This is handled in :func:`grid2op.Environment.Environment.step`
An :class:`grid2op.BaseAction` is said to be **illegal** depending on some rules implemented in
:func:`grid2op.BaseAction.ActionSpace.is_legal` method.
An action can be legal in some context, but illegal in others.
"""
pass
class OnProduction(IllegalAction):
"""
This is a more precise exception than :class:`IllegalAction` indicating that the action is illegal due to
setting wrong values to generators.
"""
pass
class VSetpointModified(OnProduction):
"""
This is a more precise exception than :class:`OnProduction` indicating that the action is illegal because the
setpoint voltage magnitude of a production has been changed.
"""
pass
class ActiveSetPointAbovePmax(OnProduction):
"""
This is a more precise exception than :class:`OnProduction` indicating that the action is illegal because the
setpoint active power of a production is set to be higher than Pmax.
"""
pass
class ActiveSetPointBelowPmin(OnProduction):
"""
This is a more precise exception than :class:`OnProduction` indicating that the action is illegal because the
setpoint active power of a production is set to be lower than Pmin.
"""
pass
class OnLoad(IllegalAction):
"""
This is a more precise exception than :class:`IllegalAction` indicating that the action is illegal due to
setting wrong values to loads.
"""
pass
class OnLines(IllegalAction):
"""
This is a more precise exception than :class:`IllegalAction` indicating that the action is illegal due to setting
wrong values to lines (reconnection impossible, disconnection impossible etc).
"""
pass
class InvalidReconnection(OnLines):
"""
This is a more precise exception than :class:`OnLines` indicating that the :class:`grid2op.BaseAgent` tried to
reconnect a powerline illegally.
"""
pass
# attempt to use redispatching or unit commit method in an environment not set up.
class UnitCommitorRedispachingNotAvailable(IllegalAction):
"""
attempt to use redispatching or unit commit method in an environment not set up.
"""
pass
| 3,015 | 29.464646 | 117 | py |
Grid2Op | Grid2Op-master/grid2op/Exceptions/ObservationExceptions.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Exceptions.Grid2OpException import Grid2OpException
class BaseObservationError(Grid2OpException):
"""
Generic type of exceptions raised by the observation
"""
pass
# BaseObservation
# Functionality not implemented by the observation
class NoForecastAvailable(Grid2OpException):
"""
This exception is mainly raised by the :class:`grid2op.Observation.BaseObservation`. It specifies the
:class:`grid2op.Agent.BaseAgent`
that the :class:`grid2op.Chronics.GridValue` doesn't produce any forecasts.
In that case it is not possible to use the :func:`grid2op.Observation.BaseObservation.forecasts` method.
"""
pass
class SimulateError(BaseObservationError):
"""
This is the generic exception related to :func:`grid2op.Observation.BaseObservation.simulate` function
"""
pass
class SimulateUsedTooMuch(SimulateError):
pass
class SimulateUsedTooMuchThisStep(SimulateUsedTooMuch):
"""
This exception is raised by the :class:`grid2op.Observation.BaseObservation` when using "obs.simulate(...)".
It is raised when the total number of calls to `obs.simulate(...)` exceeds the maximum number of allowed
calls to it, for a given step.
You can do more "obs.simulate" if you call "env.step".
"""
pass
class SimulateUsedTooMuchThisEpisode(SimulateUsedTooMuch):
"""
This exception is raised by the :class:`grid2op.Observation.BaseObservation` when using "obs.simulate(...)".
It is raised when the total number of calls to `obs.simulate(...)` exceeds the maximum number of allowed
calls to it for this episode.
The only way to use "obs.simulate(...)" again is to call "env.reset(...)"
"""
pass
| 2,190 | 30.3 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Exceptions/OpponentError.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Exceptions.Grid2OpException import Grid2OpException
class OpponentError(Grid2OpException):
"""General error for the :class:`grid2op.Opponent`"""
pass
| 637 | 38.875 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Exceptions/PlotExceptions.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Exceptions.Grid2OpException import Grid2OpException
# plot error
class PlotError(Grid2OpException):
"""General exception raised by any class that handles plots"""
pass
class PyGameQuit(PlotError):
"""Raised when the player quit the renderer"""
pass
| 747 | 31.521739 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Exceptions/PowerflowExceptions.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Exceptions.Grid2OpException import Grid2OpException
# powerflow exception
class DivergingPowerFlow(Grid2OpException):
"""
This exception indicate that the :class:`grid2op.Backend.Backend` is not able to find a valid solution to the
physical _grid it represents.
This divergence can be due to:
- the system is not feasible: there is no solution to Kirchhoff's law given the state
- the powergrid is not connex
- there is a "voltage collapse" : the voltages are ill conditioned making the _grid un realistic.
- the method to solve the powerflow fails to find a valid solution. In this case, adopting a different
:class:`grid2op.Backend.Backend` might solve the problem.
"""
pass
| 1,215 | 42.428571 | 113 | py |
Grid2Op | Grid2Op-master/grid2op/Exceptions/RunnerError.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Exceptions.Grid2OpException import Grid2OpException
# Exception Runner is used twice, not possible on windows / macos due to the way multiprocessing works
class UsedRunnerError(Grid2OpException):
"""
This exception indicate that runner (object of :class:`grid2op.Runner.Runner`) has already been used.
This behaviour is not supported on windows / macos given the way the Multiprocessing package works (spawning
a process where grid2op objects are made is not completly supported at the moment).
The best solution is to recreate a runner, and then use this new one.
"""
pass
| 1,083 | 44.166667 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Exceptions/__init__.py | __all__ = [
"Grid2OpException",
"EnvError",
"IncorrectNumberOfLoads",
"IncorrectNumberOfGenerators",
"IncorrectNumberOfLines",
"IncorrectNumberOfSubstation",
"IncorrectNumberOfStorages",
"IncorrectNumberOfElements",
"IncorrectPositionOfLoads",
"IncorrectPositionOfGenerators",
"IncorrectPositionOfLines",
"IncorrectPositionOfStorages",
"UnknownEnv",
"MultiEnvException",
"IllegalAction",
"OnProduction",
"VSetpointModified",
"ActiveSetPointAbovePmax",
"ActiveSetPointBelowPmin",
"OnLoad",
"OnLines",
"InvalidReconnection",
"UnitCommitorRedispachingNotAvailable",
"NotEnoughGenerators",
"GeneratorTurnedOffTooSoon",
"GeneratorTurnedOnTooSoon",
"InvalidRedispatching",
"InvalidBusStatus",
"InvalidNumberOfObjectEnds",
"InvalidNumberOfLines",
"InvalidNumberOfGenerators",
"InvalidNumberOfLoads",
"UnrecognizedAction",
"InvalidLineStatus",
"InvalidStorage",
"InvalidCurtailment",
"AmbiguousAction",
"NonFiniteElement",
"AmbiguousActionRaiseAlert",
"DivergingPowerFlow",
"BaseObservationError",
"NoForecastAvailable",
"SimulateError",
"SimulateUsedTooMuchThisStep",
"SimulateUsedTooMuchThisEpisode",
"ChronicsError",
"ChronicsNotFoundError",
"InsufficientData",
"BackendError",
"PlotError",
"OpponentError",
"UsedRunnerError",
"NotEnoughAttentionBudget",
"AgentError",
"SimulatorError",
"HandlerError"
]
from grid2op.Exceptions.Grid2OpException import Grid2OpException
from grid2op.Exceptions.EnvExceptions import EnvError
from grid2op.Exceptions.EnvExceptions import IncorrectNumberOfLoads
from grid2op.Exceptions.EnvExceptions import IncorrectNumberOfGenerators
from grid2op.Exceptions.EnvExceptions import IncorrectNumberOfLines
from grid2op.Exceptions.EnvExceptions import IncorrectNumberOfSubstation
from grid2op.Exceptions.EnvExceptions import IncorrectNumberOfStorages
from grid2op.Exceptions.EnvExceptions import IncorrectNumberOfElements
from grid2op.Exceptions.EnvExceptions import IncorrectPositionOfLoads
from grid2op.Exceptions.EnvExceptions import IncorrectPositionOfGenerators
from grid2op.Exceptions.EnvExceptions import IncorrectPositionOfLines
from grid2op.Exceptions.EnvExceptions import IncorrectPositionOfStorages
from grid2op.Exceptions.EnvExceptions import UnknownEnv
from grid2op.Exceptions.EnvExceptions import MultiEnvException
from grid2op.Exceptions.IllegalActionExceptions import IllegalAction
from grid2op.Exceptions.IllegalActionExceptions import OnProduction
from grid2op.Exceptions.IllegalActionExceptions import VSetpointModified
from grid2op.Exceptions.IllegalActionExceptions import ActiveSetPointAbovePmax
from grid2op.Exceptions.IllegalActionExceptions import ActiveSetPointBelowPmin
from grid2op.Exceptions.IllegalActionExceptions import OnLoad
from grid2op.Exceptions.IllegalActionExceptions import OnLines
from grid2op.Exceptions.IllegalActionExceptions import InvalidReconnection
from grid2op.Exceptions.IllegalActionExceptions import (
UnitCommitorRedispachingNotAvailable,
)
from grid2op.Exceptions.AmbiguousActionExceptions import NotEnoughGenerators
from grid2op.Exceptions.AmbiguousActionExceptions import GeneratorTurnedOffTooSoon
from grid2op.Exceptions.AmbiguousActionExceptions import GeneratorTurnedOnTooSoon
from grid2op.Exceptions.AmbiguousActionExceptions import InvalidRedispatching
from grid2op.Exceptions.AmbiguousActionExceptions import InvalidBusStatus
from grid2op.Exceptions.AmbiguousActionExceptions import InvalidNumberOfObjectEnds
from grid2op.Exceptions.AmbiguousActionExceptions import InvalidNumberOfLines
from grid2op.Exceptions.AmbiguousActionExceptions import InvalidNumberOfGenerators
from grid2op.Exceptions.AmbiguousActionExceptions import InvalidNumberOfLoads
from grid2op.Exceptions.AmbiguousActionExceptions import InvalidCurtailment
from grid2op.Exceptions.AmbiguousActionExceptions import UnrecognizedAction
from grid2op.Exceptions.AmbiguousActionExceptions import InvalidLineStatus
from grid2op.Exceptions.AmbiguousActionExceptions import InvalidStorage
from grid2op.Exceptions.AmbiguousActionExceptions import AmbiguousAction
from grid2op.Exceptions.AmbiguousActionExceptions import NonFiniteElement
from grid2op.Exceptions.AmbiguousActionExceptions import AmbiguousActionRaiseAlert
from grid2op.Exceptions.PowerflowExceptions import DivergingPowerFlow
from grid2op.Exceptions.ObservationExceptions import BaseObservationError
from grid2op.Exceptions.ObservationExceptions import NoForecastAvailable
from grid2op.Exceptions.ObservationExceptions import SimulateError
from grid2op.Exceptions.ObservationExceptions import SimulateUsedTooMuchThisStep
from grid2op.Exceptions.ObservationExceptions import SimulateUsedTooMuchThisEpisode
from grid2op.Exceptions.ChronicsExceptions import ChronicsError
from grid2op.Exceptions.ChronicsExceptions import ChronicsNotFoundError
from grid2op.Exceptions.ChronicsExceptions import InsufficientData
from grid2op.Exceptions.handlers_exceptions import HandlerError
from grid2op.Exceptions.BackendExceptions import BackendError
from grid2op.Exceptions.PlotExceptions import PlotError
from grid2op.Exceptions.OpponentError import OpponentError
from grid2op.Exceptions.RunnerError import UsedRunnerError
from grid2op.Exceptions.AttentionBudgetExceptions import NotEnoughAttentionBudget
from grid2op.Exceptions.agentError import AgentError
from grid2op.Exceptions.simulatorExceptions import SimulatorError
| 5,554 | 41.40458 | 83 | py |
Grid2Op | Grid2Op-master/grid2op/Exceptions/agentError.py | from grid2op.Exceptions.Grid2OpException import Grid2OpException
# Exception Runner is used twice, not possible on windows / macos due to the way multiprocessing works
class AgentError(Grid2OpException):
"""
This exception indicate that there is an error in the creation of an agent
"""
pass
| 311 | 27.363636 | 102 | py |
Grid2Op | Grid2Op-master/grid2op/Exceptions/handlers_exceptions.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Exceptions.ChronicsExceptions import ChronicsError
class HandlerError(ChronicsError):
"""This class indicates that the "handler" you are trying to use is not suitable."""
pass
| 662 | 43.2 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Exceptions/simulatorExceptions.py | # Copyright (c) 2019-2022, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Exceptions.Grid2OpException import Grid2OpException
class SimulatorError(Grid2OpException):
"""
This exception indicate that the simulator you are trying to use is not initialized.
You might want to call `simulator.set_state(...)` before using it.
"""
pass
| 757 | 36.9 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/MakeEnv/Make.py | # Copyright (c) 2019-2021, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import time
import requests
import os
import warnings
import pkg_resources
from grid2op.Environment import Environment
from grid2op.MakeEnv.MakeFromPath import make_from_dataset_path, ERR_MSG_KWARGS
from grid2op.Exceptions import Grid2OpException, UnknownEnv
import grid2op.MakeEnv.PathUtils
from grid2op.MakeEnv.PathUtils import _create_path_folder
from grid2op.Download.DownloadDataset import _aux_download
_VAR_FORCE_TEST = "_GRID2OP_FORCE_TEST"
DEV_DATA_FOLDER = pkg_resources.resource_filename("grid2op", "data")
DEV_DATASET = os.path.join(DEV_DATA_FOLDER, "{}")
TEST_DEV_ENVS = {
"blank": DEV_DATASET.format("blank"),
"rte_case14_realistic": DEV_DATASET.format("rte_case14_realistic"),
"rte_case14_redisp": DEV_DATASET.format("rte_case14_redisp"),
"rte_case14_test": DEV_DATASET.format("rte_case14_test"),
"rte_case5_example": DEV_DATASET.format("rte_case5_example"),
"rte_case118_example": DEV_DATASET.format("rte_case118_example"),
"rte_case14_opponent": DEV_DATASET.format("rte_case14_opponent"),
"l2rpn_wcci_2020": DEV_DATASET.format("l2rpn_wcci_2020"),
"l2rpn_neurips_2020_track2": DEV_DATASET.format("l2rpn_neurips_2020_track2"),
"l2rpn_neurips_2020_track1": DEV_DATASET.format("l2rpn_neurips_2020_track1"),
"l2rpn_case14_sandbox": DEV_DATASET.format("l2rpn_case14_sandbox"),
"l2rpn_case14_sandbox_diff_grid": DEV_DATASET.format("l2rpn_case14_sandbox_diff_grid"),
"l2rpn_icaps_2021": DEV_DATASET.format("l2rpn_icaps_2021"),
"l2rpn_wcci_2022_dev": DEV_DATASET.format("l2rpn_wcci_2022_dev"),
"l2rpn_wcci_2022": DEV_DATASET.format("l2rpn_wcci_2022_dev"),
"l2rpn_idf_2023": DEV_DATASET.format("l2rpn_idf_2023"),
# educational files
"educ_case14_redisp": DEV_DATASET.format("educ_case14_redisp"),
"educ_case14_storage": DEV_DATASET.format("educ_case14_storage"),
# keep the old names for now
"case14_realistic": DEV_DATASET.format("rte_case14_realistic"),
"case14_redisp": DEV_DATASET.format("rte_case14_redisp"),
"case14_test": DEV_DATASET.format("rte_case14_test"),
"case5_example": DEV_DATASET.format("rte_case5_example"),
"case14_fromfile": DEV_DATASET.format("rte_case14_test"),
}
_REQUEST_FAIL_EXHAUSTED_ERR = (
'Impossible to retrieve data at "{}".\n'
"If the problem persists, please contact grid2op developers by sending an issue at "
"https://github.com/rte-france/Grid2Op/issues"
)
_REQUEST_FAIL_RETRY_ERR = (
'Failure to get a response from the url "{}".\n'
"Retrying... {} attempt(s) remaining"
)
_REQUEST_EXCEPT_RETRY_ERR = (
'Exception in getting an answer from "{}".\n' "Retrying... {} attempt(s) remaining"
)
_LIST_REMOTE_URL = (
"https://api.github.com/repos/bdonnot/grid2op-datasets/contents/datasets.json"
)
_LIST_REMOTE_KEY = "download_url"
_LIST_REMOTE_INVALID_CONTENT_JSON_ERR = (
"Impossible to retrieve available datasets. "
"File could not be converted to json. "
"Parsing error:\n {}"
)
_LIST_REMOTE_CORRUPTED_CONTENT_JSON_ERR = (
"Corrupted json retrieved from github api. "
"Please wait a few minutes and try again. "
"If the error persist, contact grid2op devs by making an issue at "
"\n\thttps://github.com/rte-france/Grid2Op/issues/new/choose"
)
_LIST_REMOTE_INVALID_DATASETS_JSON_ERR = (
"Impossible to retrieve available datasets. "
"File could not be converted to json. "
'The error was \n"{}"'
)
_FETCH_ENV_UNKNOWN_ERR = (
'Impossible to find the environment named "{}".\n'
"Current available environments are:\n{}"
)
_MULTIMIX_FILE = ".multimix"
_MAKE_DEV_ENV_WARN = (
"You are using a development environment. "
"This environment is not intended for training agents. It might not be up to date "
'and its primary use if for tests (hence the "test=True" you passed as argument). '
"Use at your own risk."
)
_MAKE_DEV_ENV_DEPRECATED_WARN = (
'Dev env "{}" has been deprecated '
"and will be removed in future version.\n"
'Please update to dev envs starting by "rte" or "l2rpn"'
)
_MAKE_FIRST_TIME_WARN = (
'It is the first time you use the environment "{}".\n'
"We will attempt to download this environment from remote"
)
_MAKE_UNKNOWN_ENV = 'Impossible to load the environment named "{}".'
_EXTRACT_DS_NAME_CONVERT_ERR = (
'The "dataset_name" argument '
"should be convertible to string, "
'but "{}" was provided.'
)
_EXTRACT_DS_NAME_RECO_ERR = (
'Impossible to recognize the environment name from path "{}"'
)
def _force_test_dataset():
res = False
if _VAR_FORCE_TEST in os.environ:
try:
var_int = int(os.environ[_VAR_FORCE_TEST])
except Exception as exc_:
warnings.warn(f"The environment variable {_VAR_FORCE_TEST}, "
f"used to force the \"test=True\" in grid2op "
f"cannot be converted to an integer with error "
f"\"{exc_}\". As it is set nonetheless, we "
f"assume you want to force \"test=True\".")
var_int = 1
res = var_int >= 1
return res
def _send_request_retry(url, nb_retry=10, gh_session=None):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
"""
if nb_retry <= 0:
raise Grid2OpException(_REQUEST_FAIL_EXHAUSTED_ERR.format(url))
if gh_session is None:
gh_session = requests.Session()
try:
response = gh_session.get(url=url)
if response.status_code == 200:
return response
warnings.warn(_REQUEST_FAIL_RETRY_ERR.format(url, nb_retry - 1))
time.sleep(1)
return _send_request_retry(url, nb_retry=nb_retry - 1, gh_session=gh_session)
except Grid2OpException:
raise
except KeyboardInterrupt:
raise
except Exception as exc_:
warnings.warn(_REQUEST_EXCEPT_RETRY_ERR.format(url, nb_retry - 1))
time.sleep(1)
return _send_request_retry(url, nb_retry=nb_retry - 1, gh_session=gh_session)
def _retrieve_github_content(url, is_json=True):
answer = _send_request_retry(url)
try:
answer_json = answer.json()
except Exception as e:
raise Grid2OpException(_LIST_REMOTE_INVALID_CONTENT_JSON_ERR.format(e))
if _LIST_REMOTE_KEY not in answer_json:
raise Grid2OpException(_LIST_REMOTE_CORRUPTED_CONTENT_JSON_ERR)
time.sleep(1)
avail_datasets = _send_request_retry(answer_json[_LIST_REMOTE_KEY])
if is_json:
try:
res = avail_datasets.json()
except Exception as e:
raise Grid2OpException(_LIST_REMOTE_INVALID_DATASETS_JSON_ERR.format(e))
else:
res = avail_datasets.text
return res
def _list_available_remote_env_aux():
return _retrieve_github_content(url=_LIST_REMOTE_URL)
def _fecth_environments(dataset_name):
avail_datasets_json = _list_available_remote_env_aux()
if not dataset_name in avail_datasets_json:
known_ds = sorted(avail_datasets_json.keys())
raise UnknownEnv(_FETCH_ENV_UNKNOWN_ERR.format(dataset_name, known_ds))
# url = _FETCH_ENV_TAR_URL.format(avail_datasets_json[dataset_name], dataset_name)
dict_ = avail_datasets_json[dataset_name]
baseurl, filename = dict_["base_url"], dict_["filename"]
url = baseurl + filename
# name is "tar.bz2" so i need to get rid of 2 extensions
ds_name_dl = os.path.splitext(os.path.splitext(filename)[0])[0]
return url, ds_name_dl
def _extract_ds_name(dataset_path):
"""
If a path is provided, clean it to have a proper datasetname.
If a dataset name is already provided, then i just returns it.
Parameters
----------
dataset_path: ``str``
The path in the form of a
Returns
-------
dataset_name: ``str``
The name of the dataset (all lowercase, without "." etc.)
"""
try:
dataset_path = str(dataset_path)
except Exception as exc_:
raise Grid2OpException(
_EXTRACT_DS_NAME_CONVERT_ERR.format(dataset_path)
) from exc_
try:
dataset_name = os.path.split(dataset_path)[-1]
except Exception as exc_:
raise UnknownEnv(_EXTRACT_DS_NAME_RECO_ERR.format(dataset_path)) from exc_
dataset_name = dataset_name.lower().rstrip().lstrip()
dataset_name = os.path.splitext(dataset_name)[0]
return dataset_name
def _aux_is_multimix(dataset_path):
if os.path.exists(os.path.join(dataset_path, _MULTIMIX_FILE)):
return True
return False
def _aux_make_multimix(
dataset_path,
test=False,
experimental_read_from_local_dir=False,
_add_to_name="",
_compat_glop_version=None,
logger=None,
**kwargs
) -> Environment:
# Local import to prevent imports loop
from grid2op.Environment import MultiMixEnvironment
return MultiMixEnvironment(
dataset_path,
experimental_read_from_local_dir=experimental_read_from_local_dir,
_test=test,
_add_to_name=_add_to_name,
_compat_glop_version=_compat_glop_version,
logger=logger,
**kwargs
)
def make(
dataset=None,
test=False,
logger=None,
experimental_read_from_local_dir=False,
_add_to_name="",
_compat_glop_version=None,
**kwargs
) -> Environment:
"""
This function is a shortcut to rapidly create some (pre defined) environments within the grid2op Framework.
Other environments, with different powergrids will be made available in the future and will be easily downloadable
using this function.
It mimic the `gym.make` function.
Parameters
----------
dataset: ``str``
Name of the environment you want to create
test: ``bool``
Whether you want to use a test environment (**NOT** recommended). Use at your own risk.
logger:
If you want to use a specific logger for environment and all other
grid2op objects, you can put it here. This feature is still under development.
experimental_read_from_local_dir: ``bool``
Grid2op "embed" the grid description into the description of the classes
themselves. By default this is done "on the fly" (when the environment is created)
but for some usecase (especially ones involving multiprocessing or "pickle")
it might not be easily usable. If you encounter issues with pickle or multi
processing, you can set this flag to ``True``. See the doc of
:func:`grid2op.Environment.BaseEnv.generate_classes` for more information.
kwargs:
Other keyword argument to give more control on the environment you are creating. See
the Parameters information of the :func:`make_from_dataset_path`.
_add_to_name:
Internal, do not use (and can only be used when setting "test=True"). If
`experimental_read_from_local_dir` is set to True, this has no effect.
_compat_glop_version:
Internal, do not use (and can only be used when setting "test=True")
Returns
-------
env: :class:`grid2op.Environment.Environment`
The created environment.
Examples
--------
If you want to create the environment "rte_case14_realistic":
.. code-block: python
import grid2op
env_name = "rte_case14_realistic" # or any other supported environment
env = grid2op.make(env_name)
# env implements the openai gym interface (env.step, env.render, env.reset etc.)
**NB** the first time you type this command, the dataset (approximately 300 MB for this one) will be
downloaded from the internet, sizes vary per dataset.
"""
if _force_test_dataset():
if not test:
warnings.warn(f"The environment variable \"{_VAR_FORCE_TEST}\" is defined so grid2op will be forced in \"test\" mode. "
f"This is equivalent to pass \"grid2op.make(..., test=True)\" and prevents any download of data.")
test = True
if dataset is None:
raise Grid2OpException("Impossible to create an environment without its name. Please call something like: \n"
"> env = grid2op.make('l2rpn_case14_sandbox') \nor\n"
"> env = grid2op.make('rte_case14_realistic')")
accepted_kwargs = ERR_MSG_KWARGS.keys() | {"dataset", "test"}
for el in kwargs:
if el not in accepted_kwargs:
raise Grid2OpException(
'The keyword argument "{}" you provided is invalid. Possible keyword '
'arguments to create environments are "{}".'
"".format(el, sorted(accepted_kwargs))
)
# Select how to create the environment:
# Default with make from path
make_from_path_fn = make_from_dataset_path
# dataset arg is a valid path: load it
if os.path.exists(dataset):
# check if its a test environment
if test:
_add_to_name_tmp = _add_to_name
_compat_glop_version_tmp = _compat_glop_version
test_tmp = True
else:
_add_to_name_tmp = ""
_compat_glop_version_tmp = None
test_tmp = False
# Check if multimix from path
if _aux_is_multimix(dataset) and not test_tmp:
make_from_path_fn = _aux_make_multimix
elif _aux_is_multimix(dataset) and test_tmp:
def make_from_path_fn_(*args, **kwargs):
if not "logger" in kwargs:
kwargs["logger"] = logger
if not "experimental_read_from_local_dir" in kwargs:
kwargs[
"experimental_read_from_local_dir"
] = experimental_read_from_local_dir
return _aux_make_multimix(*args, test=True, **kwargs)
make_from_path_fn = make_from_path_fn_
if not "logger" in kwargs:
kwargs["logger"] = logger
if not "experimental_read_from_local_dir" in kwargs:
kwargs[
"experimental_read_from_local_dir"
] = experimental_read_from_local_dir
return make_from_path_fn(
dataset_path=dataset,
_add_to_name=_add_to_name_tmp,
_compat_glop_version=_compat_glop_version_tmp,
**kwargs
)
# Not a path: get the dataset name and cache path
dataset_name = _extract_ds_name(dataset)
real_ds_path = os.path.join(
grid2op.MakeEnv.PathUtils.DEFAULT_PATH_DATA, dataset_name
)
# Unknown dev env
if test and dataset_name not in TEST_DEV_ENVS:
raise Grid2OpException(_MAKE_UNKNOWN_ENV.format(dataset))
# Known test env and test flag enabled
if test:
warnings.warn(_MAKE_DEV_ENV_WARN)
# Warning for deprecated dev envs
if not (
dataset_name.startswith("rte")
or dataset_name.startswith("l2rpn")
or dataset_name.startswith("educ")
):
warnings.warn(_MAKE_DEV_ENV_DEPRECATED_WARN.format(dataset_name))
ds_path = TEST_DEV_ENVS[dataset_name]
# Check if multimix from path
if _aux_is_multimix(ds_path):
def make_from_path_fn_(*args, **kwargs):
if "logger" not in kwargs:
kwargs[
"logger"
] = logger # foward the logger if not present already
return _aux_make_multimix(*args, test=True, **kwargs)
make_from_path_fn = make_from_path_fn_
return make_from_path_fn(
dataset_path=ds_path,
logger=logger,
_add_to_name=_add_to_name,
_compat_glop_version=_compat_glop_version,
experimental_read_from_local_dir=experimental_read_from_local_dir,
**kwargs
)
# Env directory is present in the DEFAULT_PATH_DATA
if os.path.exists(real_ds_path):
if _aux_is_multimix(real_ds_path):
make_from_path_fn = _aux_make_multimix
return make_from_path_fn(
real_ds_path,
logger=logger,
experimental_read_from_local_dir=experimental_read_from_local_dir,
**kwargs
)
# Env needs to be downloaded
warnings.warn(_MAKE_FIRST_TIME_WARN.format(dataset_name))
_create_path_folder(grid2op.MakeEnv.PathUtils.DEFAULT_PATH_DATA)
url, ds_name_dl = _fecth_environments(dataset_name)
_aux_download(
url, dataset_name, grid2op.MakeEnv.PathUtils.DEFAULT_PATH_DATA, ds_name_dl
)
# Check if multimix from path
if _aux_is_multimix(real_ds_path):
make_from_path_fn = _aux_make_multimix
return make_from_path_fn(
dataset_path=real_ds_path,
logger=logger,
experimental_read_from_local_dir=experimental_read_from_local_dir,
**kwargs
)
| 17,361 | 35.706131 | 131 | py |
Grid2Op | Grid2Op-master/grid2op/MakeEnv/MakeFromPath.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import os
import importlib.util
import numpy as np
import json
import warnings
from grid2op.Environment import Environment
from grid2op.Backend import Backend, PandaPowerBackend
from grid2op.Opponent.OpponentSpace import OpponentSpace
from grid2op.Parameters import Parameters
from grid2op.Chronics import ChronicsHandler, ChangeNothing, FromNPY, FromChronix2grid
from grid2op.Chronics import GridStateFromFile, GridValue
from grid2op.Action import BaseAction, DontAct
from grid2op.Exceptions import *
from grid2op.Observation import CompleteObservation, BaseObservation
from grid2op.Reward import BaseReward, L2RPNReward
from grid2op.Rules import BaseRules, DefaultRules
from grid2op.VoltageControler import ControlVoltageFromFile
from grid2op.Opponent import BaseOpponent, BaseActionBudget, NeverAttackBudget
from grid2op.operator_attention import LinearAttentionBudget
from grid2op.MakeEnv.get_default_aux import _get_default_aux
DIFFICULTY_NAME = "difficulty"
CHALLENGE_NAME = "competition"
ERR_MSG_KWARGS = {
"backend": 'The backend of the environment (keyword "backend") must be an instance of grid2op.Backend',
"observation_class": 'The type of observation of the environment (keyword "observation_class")'
" must be a subclass of grid2op.BaseObservation",
"param": 'The parameters of the environment (keyword "param") must be an instance of grid2op.Parameters',
"gamerules_class": 'The type of rules of the environment (keyword "gamerules_class")'
" must be a subclass of grid2op.BaseRules",
"reward_class": 'The type of reward in the environment (keyword "reward_class") must be a subclass of '
"grid2op.BaseReward",
"action_class": 'The type of action of the environment (keyword "action_class") must be a subclass of '
"grid2op.BaseAction",
"data_feeding_kwargs": "The argument to build the data generation process [chronics]"
' (keyword "data_feeding_kwargs") should be a dictionnary.',
"chronics_class": 'The argument to build the data generation process [chronics] (keyword "chronics_class")'
" should be a class that inherit grid2op.Chronics.GridValue.",
"chronics_handler": 'The argument to build the data generation process [chronics] (keyword "data_feeding")'
" should be a class that inherit grid2op.ChronicsHandler.ChronicsHandler.",
"voltagecontroler_class": "The argument to build the online controler for chronics (keyword "
'"volagecontroler_class")'
" should be a class that inherit grid2op.VoltageControler.ControlVoltageFromFile.",
"names_chronics_to_grid": 'The converter between names (keyword "names_chronics_to_backend") '
"should be a dictionnary.",
"other_rewards": 'The argument to build the online controler for chronics (keyword "other_rewards") '
"should be dictionary.",
"chronics_path": 'The path where the data is located (keyword "chronics_path") should be a string.',
"grid_path": 'The path where the grid is located (keyword "grid_path") should be a string.',
"opponent_space_type": 'The argument used to build the opponent space (expects a type / class and not an instance of that type)',
"opponent_action_class": 'The argument used to build the "opponent_action_class" should be a class that '
'inherit from "BaseAction"',
"opponent_class": 'The argument used to build the "opponent_class" should be a class that '
'inherit from "BaseOpponent"',
"opponent_attack_duration": "The number of time steps an attack from the opponent lasts",
"opponent_attack_cooldown": "The number of time steps the opponent as to wait for an attack",
"opponent_init_budget": 'The initial budget of the opponent "opponent_init_budget" should be a float',
"opponent_budget_class": 'The opponent budget class ("opponent_budget_class") should derive from '
'"BaseActionBudget".',
"opponent_budget_per_ts": 'The increase of the opponent\'s budget ("opponent_budget_per_ts") should be a float.',
"kwargs_opponent": "The extra kwargs argument used to properly initialized the opponent "
'("kwargs_opponent") should '
"be a dictionary.",
"has_attention_budget": 'The "has_attention_budget" key word argument should be a flag indicating whether '
"you want this feature or not. It should be a boolean.",
"attention_budget_class": 'The attention budget class ("attention_budget_class") should derive from '
'"LinearAttentionBudget".',
"kwargs_attention_budget": "The extra kwargs argument used to properly initialized the attention budget "
'("kwargs_attention_budget") should '
"be a dictionary.",
DIFFICULTY_NAME: "Unknown difficulty level {difficulty} for this environment. Authorized difficulties are "
"{difficulties}",
"kwargs_observation": "The extra kwargs argument used to properly initialized each observations "
'("kwargs_observation") should '
"be a dictionary.",
"observation_backend_class": ("The class used to build the observation backend (used for Simulator "
"obs.simulate and obs.get_forecasted_env). If provided, this should "
"be a type / class and not an instance of this class. (by default it's None)"),
"observation_backend_kwargs": ("key-word arguments to build the observation backend (used for Simulator, "
" obs.simulate and obs.get_forecasted_env). This should be a dictionnary. (by default it's None)")
}
NAME_CHRONICS_FOLDER = "chronics"
NAME_GRID_FILE = "grid.json"
NAME_GRID_LAYOUT_FILE = "grid_layout.json"
NAME_CONFIG_FILE = "config.py"
def _check_kwargs(kwargs):
for el in kwargs:
if el not in ERR_MSG_KWARGS.keys():
raise EnvError(
'Unknown keyword argument "{}" used to create an Environment. '
"No Environment will be created. "
"Accepted keyword arguments are {}".format(el, ERR_MSG_KWARGS.keys())
)
def _check_path(path, info):
if path is None or os.path.exists(path) is False:
raise EnvError("Cannot find {}. {}".format(path, info))
def make_from_dataset_path(
dataset_path="/",
logger=None,
experimental_read_from_local_dir=False,
_add_to_name="",
_compat_glop_version=None,
**kwargs,
) -> Environment:
"""
INTERNAL USE ONLY
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Prefer using the :func:`grid2op.make` function.
This function is a shortcut to rapidly create environments within the grid2op Framework. We don't
recommend using directly this function. Prefer using the :func:`make` function.
It mimic the ``gym.make`` function.
.. _Parameters-make-from-path:
Parameters
----------
dataset_path: ``str``
Path to the dataset folder
logger:
Something to pass to grid2op environment to be used as logger.
param: ``grid2op.Parameters.Parameters``, optional
Type of parameters used for the Environment. Parameters defines how the powergrid problem is cast into an
markov decision process, and some internal
backend: ``grid2op.Backend.Backend``, optional
The backend to use for the computation. If provided, it must be an instance of :class:`grid2op.Backend.Backend`.
action_class: ``type``, optional
Type of BaseAction the BaseAgent will be able to perform.
If provided, it must be a subclass of :class:`grid2op.BaseAction.BaseAction`
observation_class: ``type``, optional
Type of BaseObservation the BaseAgent will receive.
If provided, It must be a subclass of :class:`grid2op.BaseAction.BaseObservation`
reward_class: ``type``, optional
Type of reward signal the BaseAgent will receive.
If provided, It must be a subclass of :class:`grid2op.BaseReward.BaseReward`
other_rewards: ``dict``, optional
Used to additional information than the "info" returned value after a call to env.step.
gamerules_class: ``type``, optional
Type of "Rules" the BaseAgent need to comply with. Rules are here to model some operational constraints.
If provided, It must be a subclass of :class:`grid2op.RulesChecker.BaseRules`
data_feeding_kwargs: ``dict``, optional
Dictionnary that is used to build the `data_feeding` (chronics) objects.
chronics_class: ``type``, optional
The type of chronics that represents the dynamics of the Environment created. Usually they come from different
folders.
data_feeding: ``type``, optional
The type of chronics handler you want to use.
volagecontroler_class: ``type``, optional
The type of :class:`grid2op.VoltageControler.VoltageControler` to use, it defaults to
chronics_path: ``str``
Path where to look for the chronics dataset (optional)
grid_path: ``str``, optional
The path where the powergrid is located.
If provided it must be a string, and point to a valid file present on the hard drive.
difficulty: ``str``, optional
the difficulty level. If present it starts from "0" the "easiest" but least realistic mode. In the case of the
dataset being used in the l2rpn competition, the level used for the competition is "competition" ("hardest" and
most realistic mode). If multiple difficulty levels are available, the most realistic one
(the "hardest") is the default choice.
opponent_space_type: ``type``, optional
The type of opponent space to use. If provided, it must be a subclass of `OpponentSpace`.
opponent_action_class: ``type``, optional
The action class used for the opponent. The opponent will not be able to use action that are invalid with
the given action class provided. It defaults to :class:`grid2op.Action.DontAct` which forbid any type
of action possible.
opponent_class: ``type``, optional
The opponent class to use. The default class is :class:`grid2op.Opponent.BaseOpponent` which is a type
of opponents that does nothing.
opponent_init_budget: ``float``, optional
The initial budget of the opponent. It defaults to 0.0 which means the opponent cannot perform any action
if this is not modified.
opponent_attack_duration: ``int``, optional
The number of time steps an attack from the opponent lasts.
opponent_attack_cooldown: ``int``, optional
The number of time steps the opponent as to wait for an attack.
opponent_budget_per_ts: ``float``, optional
The increase of the opponent budget per time step. Each time step the opponent see its budget increase. It
defaults to 0.0.
opponent_budget_class: ``type``, optional
defaults: :class:`grid2op.Opponent.UnlimitedBudget`
kwargs_observation: ``dict``
Key words used to initialize the observation. For example, in case of NoisyObservation,
it might be the standar error for each underlying distribution. It might
be more complicated for other type of custom observations but should be
deep copiable.
Each observation will be initialized (by the observation_space) with:
.. code-block:: python
obs = observation_class(obs_env=self.obs_env,
action_helper=self.action_helper_env,
random_prng=self.space_prng,
**kwargs_observation # <- this kwargs is used here
)
observation_backend_class:
The class used to build the observation backend (used for Simulator
obs.simulate and obs.get_forecasted_env). If provided, this should
be a type / class and not an instance of this class. (by default it's None)
observation_backend_kwargs:
The key-word arguments to build the observation backend (used for Simulator,
obs.simulate and obs.get_forecasted_env). This should be a dictionnary.
(by default it's None)
_add_to_name:
Internal, used for test only. Do not attempt to modify under any circumstances.
_compat_glop_version:
Internal, used for test only. Do not attempt to modify under any circumstances.
# TODO update doc with attention budget
Returns
-------
env: :class:`grid2op.Environment.Environment`
The created environment with the given properties.
"""
# Compute and find root folder
_check_path(dataset_path, "Dataset root directory")
dataset_path_abs = os.path.abspath(dataset_path)
# Compute env name from directory name
name_env = os.path.split(dataset_path_abs)[1]
# Compute and find chronics folder
chronics_path = _get_default_aux(
"chronics_path",
kwargs,
defaultClassApp=str,
defaultinstance="",
msg_error=ERR_MSG_KWARGS["chronics_path"],
)
if chronics_path == "":
# if no "chronics_path" argument is provided, look into the "chronics" folder
chronics_path_abs = os.path.abspath(
os.path.join(dataset_path_abs, NAME_CHRONICS_FOLDER)
)
else:
# otherwise use it
chronics_path_abs = os.path.abspath(chronics_path)
exc_chronics = None
try:
_check_path(chronics_path_abs, "Dataset chronics folder")
except Exception as exc_:
exc_chronics = exc_
# Compute and find backend/grid file
grid_path = _get_default_aux(
"grid_path",
kwargs,
defaultClassApp=str,
defaultinstance="",
msg_error=ERR_MSG_KWARGS["grid_path"],
)
if grid_path == "":
grid_path_abs = os.path.abspath(os.path.join(dataset_path_abs, NAME_GRID_FILE))
else:
grid_path_abs = os.path.abspath(grid_path)
_check_path(grid_path_abs, "Dataset power flow solver configuration")
# Compute and find grid layout file
grid_layout_path_abs = os.path.abspath(
os.path.join(dataset_path_abs, NAME_GRID_LAYOUT_FILE)
)
try:
_check_path(grid_layout_path_abs, "Dataset grid layout")
except EnvError as exc_:
warnings.warn(
f'Impossible to load the coordinate of the substation with error: "{exc_}". Expect some issue '
f"if you attempt to plot the grid."
)
# Check provided config overrides are valid
_check_kwargs(kwargs)
# Compute and find config file
config_path_abs = os.path.abspath(os.path.join(dataset_path_abs, NAME_CONFIG_FILE))
_check_path(config_path_abs, "Dataset environment configuration")
# Read config file
try:
spec = importlib.util.spec_from_file_location("config.config", config_path_abs)
config_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(config_module)
config_data = config_module.config
except Exception as exc_:
print(exc_)
raise EnvError(
"Invalid dataset config file: {}".format(config_path_abs)
) from None
# Get graph layout
graph_layout = None
try:
with open(grid_layout_path_abs) as layout_fp:
graph_layout = json.load(layout_fp)
except Exception as exc_:
warnings.warn(
"Dataset {} doesn't have a valid graph layout. Expect some failures when attempting "
"to plot the grid. Error was: {}".format(config_path_abs, exc_)
)
# Get thermal limits
thermal_limits = None
if "thermal_limits" in config_data:
thermal_limits = config_data["thermal_limits"]
# Get chronics_to_backend
name_converter = None
if "names_chronics_to_grid" in config_data:
name_converter = config_data["names_chronics_to_grid"]
if name_converter is None:
name_converter = {}
is_none = True
else:
is_none = False
names_chronics_to_backend = _get_default_aux(
"names_chronics_to_backend",
kwargs,
defaultClassApp=dict,
defaultinstance=name_converter,
msg_error=ERR_MSG_KWARGS["names_chronics_to_grid"],
)
if is_none and names_chronics_to_backend == {}:
names_chronics_to_backend = None
# Get default backend class
backend_class_cfg = PandaPowerBackend
if "backend_class" in config_data and config_data["backend_class"] is not None:
backend_class_cfg = config_data["backend_class"]
## Create the backend, to compute the powerflow
backend = _get_default_aux(
"backend",
kwargs,
defaultClass=backend_class_cfg,
defaultClassApp=Backend,
msg_error=ERR_MSG_KWARGS["backend"],
)
# Get default observation class
observation_class_cfg = CompleteObservation
if (
"observation_class" in config_data
and config_data["observation_class"] is not None
):
observation_class_cfg = config_data["observation_class"]
## Setup the type of observation the agent will receive
observation_class = _get_default_aux(
"observation_class",
kwargs,
defaultClass=observation_class_cfg,
isclass=True,
defaultClassApp=BaseObservation,
msg_error=ERR_MSG_KWARGS["observation_class"],
)
## Create the parameters of the game, thermal limits threshold,
# simulate cascading failure, powerflow mode etc. (the gamification of the game)
if "param" in kwargs:
param = _get_default_aux(
"param",
kwargs,
defaultClass=Parameters,
defaultClassApp=Parameters,
msg_error=ERR_MSG_KWARGS["param"],
)
else:
# param is not in kwargs
param = Parameters()
json_path = os.path.join(dataset_path_abs, "difficulty_levels.json")
if os.path.exists(json_path):
with open(json_path, "r", encoding="utf-8") as f:
dict_ = json.load(f)
available_parameters = sorted(dict_.keys())
if DIFFICULTY_NAME in kwargs:
# player enters a difficulty levels
my_difficulty = kwargs[DIFFICULTY_NAME]
try:
my_difficulty = str(my_difficulty)
except Exception as exc_:
raise EnvError(
"Impossible to convert your difficulty into a valid string. Please make sure to "
'pass a string (eg "2") and not something else (eg. int(2)) as a difficulty.'
"Error was \n{}".format(exc_)
)
if my_difficulty in dict_:
param.init_from_dict(dict_[my_difficulty])
else:
raise EnvError(
ERR_MSG_KWARGS[DIFFICULTY_NAME].format(
difficulty=my_difficulty, difficulties=available_parameters
)
)
else:
# no difficulty name provided, i need to chose the most suited one
if CHALLENGE_NAME in dict_:
param.init_from_dict(dict_[CHALLENGE_NAME])
else:
# i chose the most difficult one
available_parameters_int = {}
for el in available_parameters:
try:
int_ = int(el)
available_parameters_int[int_] = el
except:
pass
max_ = np.max(list(available_parameters_int.keys()))
keys_ = available_parameters_int[max_]
param.init_from_dict(dict_[keys_])
else:
json_path = os.path.join(dataset_path_abs, "parameters.json")
if os.path.exists(json_path):
param.init_from_json(json_path)
# Get default rules class
rules_class_cfg = DefaultRules
if "rules_class" in config_data and config_data["rules_class"] is not None:
warnings.warn("You used the deprecated rules_class in your config. Please change its "
"name to 'gamerules_class' to mimic the grid2op.make kwargs.")
rules_class_cfg = config_data["rules_class"]
if "gamerules_class" in config_data and config_data["gamerules_class"] is not None:
rules_class_cfg = config_data["gamerules_class"]
## Create the rules of the game (mimic the operationnal constraints)
gamerules_class = _get_default_aux(
"gamerules_class",
kwargs,
defaultClass=rules_class_cfg,
defaultClassApp=BaseRules,
msg_error=ERR_MSG_KWARGS["gamerules_class"],
isclass=None,
)
# Get default reward class
reward_class_cfg = L2RPNReward
if "reward_class" in config_data and config_data["reward_class"] is not None:
reward_class_cfg = config_data["reward_class"]
## Setup the reward the agent will receive
reward_class = _get_default_aux(
"reward_class",
kwargs,
defaultClass=reward_class_cfg,
defaultClassApp=BaseReward,
msg_error=ERR_MSG_KWARGS["reward_class"],
isclass=None,
)
# Get default BaseAction class
action_class_cfg = BaseAction
if "action_class" in config_data and config_data["action_class"] is not None:
action_class_cfg = config_data["action_class"]
## Setup the type of action the BaseAgent can perform
action_class = _get_default_aux(
"action_class",
kwargs,
defaultClass=action_class_cfg,
defaultClassApp=BaseAction,
msg_error=ERR_MSG_KWARGS["action_class"],
isclass=True,
)
# Get default Voltage class
voltage_class_cfg = ControlVoltageFromFile
if "voltage_class" in config_data and config_data["voltage_class"] is not None:
voltage_class_cfg = config_data["voltage_class"]
### Create controler for voltages
volagecontroler_class = _get_default_aux(
"volagecontroler_class",
kwargs,
defaultClassApp=voltage_class_cfg,
defaultClass=ControlVoltageFromFile,
msg_error=ERR_MSG_KWARGS["voltagecontroler_class"],
isclass=True,
)
# Get default Chronics class
chronics_class_cfg = ChangeNothing
if "chronics_class" in config_data and config_data["chronics_class"] is not None:
chronics_class_cfg = config_data["chronics_class"]
# Get default Grid class
grid_value_class_cfg = GridStateFromFile
if (
"grid_value_class" in config_data
and config_data["grid_value_class"] is not None
):
grid_value_class_cfg = config_data["grid_value_class"]
## the chronics to use
### the arguments used to build the data, note that the arguments must be compatible with the chronics class
default_chronics_kwargs = {
"path": chronics_path_abs,
"chronicsClass": chronics_class_cfg,
# "gridvalueClass": grid_value_class_cfg,
}
if "data_feeding_kwargs" in config_data and config_data["data_feeding_kwargs"] is not None:
dfkwargs_cfg = config_data["data_feeding_kwargs"]
for el in dfkwargs_cfg:
default_chronics_kwargs[el] = dfkwargs_cfg[el]
data_feeding_kwargs = _get_default_aux(
"data_feeding_kwargs",
kwargs,
defaultClassApp=dict,
defaultinstance=default_chronics_kwargs,
msg_error=ERR_MSG_KWARGS["data_feeding_kwargs"],
)
for el in default_chronics_kwargs:
if el not in data_feeding_kwargs:
data_feeding_kwargs[el] = default_chronics_kwargs[el]
### the chronics generator
chronics_class_used = _get_default_aux(
"chronics_class",
kwargs,
defaultClassApp=GridValue,
defaultClass=data_feeding_kwargs["chronicsClass"],
msg_error=ERR_MSG_KWARGS["chronics_class"],
isclass=True,
)
if (
(chronics_class_used != ChangeNothing) and (chronics_class_used != FromNPY) and (chronics_class_used != FromChronix2grid)
) and exc_chronics is not None:
raise EnvError(
f"Impossible to find the chronics for your environment. Please make sure to provide "
f'a folder "{NAME_CHRONICS_FOLDER}" within your environment folder.'
)
data_feeding_kwargs["chronicsClass"] = chronics_class_used
if chronics_class_used.MULTI_CHRONICS:
# add the default "gridvalueClass" in case of multi chronics and if the
# parameters is not given in the "make" function but present in the config file
if "gridvalueClass" not in data_feeding_kwargs:
data_feeding_kwargs["gridvalueClass"] = grid_value_class_cfg
# now build the chronics handler
data_feeding = _get_default_aux(
"data_feeding",
kwargs,
defaultClassApp=ChronicsHandler,
defaultClass=ChronicsHandler,
build_kwargs=data_feeding_kwargs,
msg_error=ERR_MSG_KWARGS["chronics_handler"],
)
### other rewards
other_rewards_cfg = {}
if "other_rewards" in config_data and config_data["other_rewards"] is not None:
other_rewards_cfg = config_data["other_rewards"]
other_rewards = _get_default_aux(
"other_rewards",
kwargs,
defaultClassApp=dict,
defaultinstance={},
msg_error=ERR_MSG_KWARGS["other_rewards"],
isclass=False,
)
for k in other_rewards_cfg:
if k not in other_rewards:
other_rewards[k] = other_rewards_cfg[k]
# Opponent
opponent_space_type_cfg = OpponentSpace
if "opponent_space_type" in config_data and config_data["opponent_space_type"] is not None:
opponent_space_type_cfg = config_data["opponent_space_type"]
opponent_space_type = _get_default_aux(
"opponent_space_type",
kwargs,
defaultClassApp=OpponentSpace,
defaultClass=opponent_space_type_cfg,
msg_error=ERR_MSG_KWARGS["opponent_space_type"],
isclass=True,
)
chronics_class_cfg = DontAct
if (
"opponent_action_class" in config_data
and config_data["opponent_action_class"] is not None
):
chronics_class_cfg = config_data["opponent_action_class"]
opponent_action_class = _get_default_aux(
"opponent_action_class",
kwargs,
defaultClassApp=BaseAction,
defaultClass=chronics_class_cfg,
msg_error=ERR_MSG_KWARGS["opponent_action_class"],
isclass=True,
)
opponent_class_cfg = BaseOpponent
if "opponent_class" in config_data and config_data["opponent_class"] is not None:
opponent_class_cfg = config_data["opponent_class"]
opponent_class = _get_default_aux(
"opponent_class",
kwargs,
defaultClassApp=BaseOpponent,
defaultClass=opponent_class_cfg,
msg_error=ERR_MSG_KWARGS["opponent_class"],
isclass=True,
)
opponent_budget_class_cfg = NeverAttackBudget
if (
"opponent_budget_class" in config_data
and config_data["opponent_budget_class"] is not None
):
opponent_budget_class_cfg = config_data["opponent_budget_class"]
opponent_budget_class = _get_default_aux(
"opponent_budget_class",
kwargs,
defaultClassApp=BaseActionBudget,
defaultClass=opponent_budget_class_cfg,
msg_error=ERR_MSG_KWARGS["opponent_budget_class"],
isclass=True,
)
opponent_init_budget_cfg = 0.0
if (
"opponent_init_budget" in config_data
and config_data["opponent_init_budget"] is not None
):
opponent_init_budget_cfg = config_data["opponent_init_budget"]
opponent_init_budget = _get_default_aux(
"opponent_init_budget",
kwargs,
defaultClassApp=float,
defaultinstance=opponent_init_budget_cfg,
msg_error=ERR_MSG_KWARGS["opponent_init_budget"],
isclass=False,
)
opponent_budget_per_ts_cfg = 0.0
if (
"opponent_budget_per_ts" in config_data
and config_data["opponent_budget_per_ts"] is not None
):
opponent_budget_per_ts_cfg = config_data["opponent_budget_per_ts"]
opponent_budget_per_ts = _get_default_aux(
"opponent_budget_per_ts",
kwargs,
defaultClassApp=float,
defaultinstance=opponent_budget_per_ts_cfg,
msg_error=ERR_MSG_KWARGS["opponent_budget_per_ts"],
isclass=False,
)
opponent_attack_duration_cfg = 0
if (
"opponent_attack_duration" in config_data
and config_data["opponent_attack_duration"] is not None
):
opponent_attack_duration_cfg = config_data["opponent_attack_duration"]
opponent_attack_duration = _get_default_aux(
"opponent_attack_duration",
kwargs,
defaultClassApp=int,
defaultinstance=opponent_attack_duration_cfg,
msg_error=ERR_MSG_KWARGS["opponent_attack_duration"],
isclass=False,
)
opponent_attack_cooldown_cfg = 99999
if (
"opponent_attack_cooldown" in config_data
and config_data["opponent_attack_cooldown"] is not None
):
opponent_attack_cooldown_cfg = config_data["opponent_attack_cooldown"]
opponent_attack_cooldown = _get_default_aux(
"opponent_attack_cooldown",
kwargs,
defaultClassApp=int,
defaultinstance=opponent_attack_cooldown_cfg,
msg_error=ERR_MSG_KWARGS["opponent_attack_cooldown"],
isclass=False,
)
kwargs_opponent_cfg = {}
if "kwargs_opponent" in config_data and config_data["kwargs_opponent"] is not None:
kwargs_opponent_cfg = config_data["kwargs_opponent"]
kwargs_opponent = _get_default_aux(
"kwargs_opponent",
kwargs,
defaultClassApp=dict,
defaultinstance=kwargs_opponent_cfg,
msg_error=ERR_MSG_KWARGS["kwargs_opponent"],
isclass=False,
)
# attention budget
has_attention_budget_cfg = False
if (
"has_attention_budget" in config_data
and config_data["has_attention_budget"] is not None
):
has_attention_budget_cfg = config_data["has_attention_budget"]
has_attention_budget = _get_default_aux(
"has_attention_budget",
kwargs,
defaultClassApp=bool,
defaultinstance=has_attention_budget_cfg,
msg_error=ERR_MSG_KWARGS["has_attention_budget"],
isclass=False,
)
attention_budget_class_cfg = LinearAttentionBudget
if (
"attention_budget_class" in config_data
and config_data["attention_budget_class"] is not None
):
attention_budget_class_cfg = config_data["attention_budget_class"]
attention_budget_class = _get_default_aux(
"attention_budget_class",
kwargs,
defaultClassApp=LinearAttentionBudget,
defaultClass=attention_budget_class_cfg,
msg_error=ERR_MSG_KWARGS["attention_budget_class"],
isclass=True,
)
kwargs_attention_budget_cfg = {}
if (
"kwargs_attention_budget" in config_data
and config_data["kwargs_attention_budget"] is not None
):
kwargs_attention_budget_cfg = config_data["kwargs_attention_budget"]
kwargs_attention_budget = _get_default_aux(
"kwargs_attention_budget",
kwargs,
defaultClassApp=dict,
defaultinstance=kwargs_attention_budget_cfg,
msg_error=ERR_MSG_KWARGS["kwargs_attention_budget"],
isclass=False,
)
if experimental_read_from_local_dir:
sys_path = os.path.join(os.path.split(grid_path_abs)[0], "_grid2op_classes")
if not os.path.exists(sys_path):
raise RuntimeError(
"Attempting to load the grid classes from the env path. Yet the directory "
"where they should be placed does not exists. Did you call `env.generate_classes()` "
"BEFORE creating an environment with `experimental_read_from_local_dir=True` ?"
)
if not os.path.isdir(sys_path) or not os.path.exists(
os.path.join(sys_path, "__init__.py")
):
raise RuntimeError(
f"Impossible to load the classes from the env path. There is something that is "
f"not a directory and that is called `_grid2op_classes`. "
f'Please remove "{sys_path}" and call `env.generate_classes()` where env is an '
f"environment created with `experimental_read_from_local_dir=False` (default)"
)
# observation key word arguments
kwargs_observation = _get_default_aux(
"kwargs_observation",
kwargs,
defaultClassApp=dict,
defaultinstance={},
msg_error=ERR_MSG_KWARGS["kwargs_observation"],
isclass=False,
)
# backend for the observation
observation_backend_class_cfg = Backend
if (
"observation_backend_class" in config_data
and config_data["observation_backend_class"] is not None
):
observation_backend_class_cfg = config_data["observation_backend_class"]
observation_backend_class = _get_default_aux(
"observation_backend_class",
kwargs,
defaultClass=observation_backend_class_cfg,
defaultClassApp=Backend,
msg_error=ERR_MSG_KWARGS["observation_backend_class"],
isclass=True,
)
if observation_backend_class is Backend:
# in this case nothing is provided neither in the call to "make"
# nor in the config
observation_backend_class = None
# kwargs for observation backend
observation_backend_kwargs_cfg_ = {"null": True}
# None and {} have specific meanings, so I "hack" it
# to make the difference between "observation_backend_kwargs is not in config nor in
# the kwargs" and "observation_backend_kwargs is {} in the config or in the kwargs"
observation_backend_kwargs_cfg = observation_backend_kwargs_cfg_
if (
"observation_backend_kwargs" in config_data
and config_data["observation_backend_kwargs"] is not None
):
observation_backend_kwargs_cfg = config_data["observation_backend_kwargs"]
observation_backend_kwargs = _get_default_aux(
"observation_backend_kwargs",
kwargs,
defaultClassApp=dict,
defaultinstance=observation_backend_kwargs_cfg,
msg_error=ERR_MSG_KWARGS["kwargs_observation"],
isclass=False,
)
if observation_backend_kwargs is observation_backend_kwargs_cfg_:
observation_backend_kwargs = None
# Finally instantiate env from config & overrides
env = Environment(
init_env_path=os.path.abspath(dataset_path),
init_grid_path=grid_path_abs,
chronics_handler=data_feeding,
backend=backend,
parameters=param,
name=name_env + _add_to_name,
names_chronics_to_backend=names_chronics_to_backend,
actionClass=action_class,
observationClass=observation_class,
rewardClass=reward_class,
legalActClass=gamerules_class,
voltagecontrolerClass=volagecontroler_class,
other_rewards=other_rewards,
opponent_space_type=opponent_space_type,
opponent_action_class=opponent_action_class,
opponent_class=opponent_class,
opponent_init_budget=opponent_init_budget,
opponent_attack_duration=opponent_attack_duration,
opponent_attack_cooldown=opponent_attack_cooldown,
opponent_budget_per_ts=opponent_budget_per_ts,
opponent_budget_class=opponent_budget_class,
kwargs_opponent=kwargs_opponent,
has_attention_budget=has_attention_budget,
attention_budget_cls=attention_budget_class,
kwargs_attention_budget=kwargs_attention_budget,
logger=logger,
_compat_glop_version=_compat_glop_version,
_read_from_local_dir=experimental_read_from_local_dir,
kwargs_observation=kwargs_observation,
observation_bk_class=observation_backend_class,
observation_bk_kwargs=observation_backend_kwargs,
)
# Update the thermal limit if any
if thermal_limits is not None:
env.set_thermal_limit(thermal_limits)
# Set graph layout if not None and not an empty dict
if graph_layout is not None and graph_layout:
env.attach_layout(graph_layout)
return env
| 36,932 | 40.358343 | 133 | py |
Grid2Op | Grid2Op-master/grid2op/MakeEnv/MakeOld.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import os
import warnings
import pkg_resources
from grid2op.Environment import Environment
from grid2op.Backend import Backend, PandaPowerBackend
from grid2op.Parameters import Parameters
from grid2op.Chronics import ChronicsHandler, Multifolder, ChangeNothing
from grid2op.Chronics import (
GridStateFromFile,
GridStateFromFileWithForecasts,
GridValue,
)
from grid2op.Action import (
BaseAction,
TopologyAction,
TopologyAndDispatchAction,
DontAct,
)
from grid2op.Exceptions import *
from grid2op.Observation import CompleteObservation, BaseObservation
from grid2op.Reward import BaseReward, L2RPNReward, RedispReward
from grid2op.Rules import BaseRules, AlwaysLegal, DefaultRules
from grid2op.VoltageControler import ControlVoltageFromFile
from grid2op.Opponent import BaseOpponent
from grid2op.Chronics.Settings_L2RPN2019 import (
L2RPN2019_CASEFILE,
L2RPN2019_DICT_NAMES,
ReadPypowNetData,
CASE_14_L2RPN2019_LAYOUT,
)
from grid2op.Chronics.Settings_5busExample import (
EXAMPLE_CHRONICSPATH,
EXAMPLE_CASEFILE,
CASE_5_GRAPH_LAYOUT,
)
from grid2op.Chronics.Settings_case14_test import (
case14_test_CASEFILE,
case14_test_CHRONICSPATH,
case14_test_TH_LIM,
)
from grid2op.Chronics.Settings_case14_redisp import (
case14_redisp_CASEFILE,
case14_redisp_CHRONICSPATH,
case14_redisp_TH_LIM,
)
from grid2op.Chronics.Settings_case14_realistic import (
case14_real_CASEFILE,
case14_real_CHRONICSPATH,
case14_real_TH_LIM,
)
from grid2op.MakeEnv.get_default_aux import _get_default_aux
data_folder = pkg_resources.resource_filename("grid2op", "data")
CASE_14_FILE = os.path.abspath(
os.path.join(data_folder, "rte_case14_redisp", "grid.json")
)
CHRONICS_FODLER = os.path.abspath(
os.path.join(data_folder, "rte_case14_redisp", "chronics", "0")
)
CHRONICS_MLUTIEPISODE = os.path.join(data_folder, "rte_case14_redisp", "chronics")
NAMES_CHRONICS_TO_BACKEND = {
"loads": {
"2_C-10.61": "load_1_0",
"3_C151.15": "load_2_1",
"14_C63.6": "load_13_2",
"4_C-9.47": "load_3_3",
"5_C201.84": "load_4_4",
"6_C-6.27": "load_5_5",
"9_C130.49": "load_8_6",
"10_C228.66": "load_9_7",
"11_C-138.89": "load_10_8",
"12_C-27.88": "load_11_9",
"13_C-13.33": "load_12_10",
},
"lines": {
"1_2_1": "0_1_0",
"1_5_2": "0_4_1",
"9_10_16": "8_9_2",
"9_14_17": "8_13_3",
"10_11_18": "9_10_4",
"12_13_19": "11_12_5",
"13_14_20": "12_13_6",
"2_3_3": "1_2_7",
"2_4_4": "1_3_8",
"2_5_5": "1_4_9",
"3_4_6": "2_3_10",
"4_5_7": "3_4_11",
"6_11_11": "5_10_12",
"6_12_12": "5_11_13",
"6_13_13": "5_12_14",
"4_7_8": "3_6_15",
"4_9_9": "3_8_16",
"5_6_10": "4_5_17",
"7_8_14": "6_7_18",
"7_9_15": "6_8_19",
},
"prods": {
"1_G137.1": "gen_0_4",
"3_G36.31": "gen_2_1",
"6_G63.29": "gen_5_2",
"2_G-56.47": "gen_1_0",
"8_G40.43": "gen_7_3",
},
}
ALLOWED_KWARGS_MAKE = {
"param",
"backend",
"observation_class",
"gamerules_class",
"chronics_path",
"reward_class",
"action_class",
"grid_path",
"names_chronics_to_backend",
"data_feeding_kwargs",
"chronics_class",
"volagecontroler_class",
"other_rewards",
"opponent_action_class",
"opponent_class",
"opponent_init_budget",
}
ERR_MSG_KWARGS = {
"backend": 'The backend of the environment (keyword "backend") must be an instance of grid2op.Backend',
"observation_class": 'The type of observation of the environment (keyword "observation_class")'
" must be a subclass of grid2op.BaseObservation",
"param": 'The parameters of the environment (keyword "param") must be an instance of grid2op.Parameters',
"gamerules_class": 'The type of rules of the environment (keyword "gamerules_class")'
" must be a subclass of grid2op.BaseRules",
"reward_class": 'The type of reward in the environment (keyword "reward_class") must be a subclass of grid2op.BaseReward',
"action_class": 'The type of action of the environment (keyword "action_class") must be a subclass of grid2op.BaseAction',
"data_feeding_kwargs": "The argument to build the data generation process [chronics]"
' (keyword "data_feeding_kwargs") should be a dictionnary.',
"chronics_class": 'The argument to build the data generation process [chronics] (keyword "chronics_class")'
" should be a class that inherit grid2op.Chronics.GridValue.",
"chronics_handler": 'The argument to build the data generation process [chronics] (keyword "data_feeding")'
" should be a class that inherit grid2op.ChronicsHandler.ChronicsHandler.",
"voltagecontroler_class": 'The argument to build the online controler for chronics (keyword "volagecontroler_class")'
" should be a class that inherit grid2op.VoltageControler.ControlVoltageFromFile.",
"names_chronics_to_grid": 'The converter between names (keyword "names_chronics_to_backend") should be a dictionnary.',
"other_rewards": 'The argument to build the online controler for chronics (keyword "other_rewards") '
"should be dictionnary.",
"opponent_action_class": 'The argument used to build the "opponent_action_class" should be a class that '
'inherit from "BaseAction"',
"opponent_class": 'The argument used to build the "opponent_class" should be a class that '
'inherit from "BaseOpponent"',
"opponent_init_budget": 'The initial budget of the opponent "opponent_init_budget" should be a float',
"chronics_path": 'The path where the data is located (keyword "chronics_path") should be a string.',
"grid_path": 'The path where the grid is located (keyword "grid_path") should be a string.',
}
def make_old(name_env="case14_realistic", **kwargs):
"""
INTERNAL USE ONLY
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
(DEPRECATED) This function is a shortcut to rapidly create some (pre defined) environments within the grid2op Framework.
For now, only the environment corresponding to the IEEE "case14" powergrid, with some pre defined chronics
is available.
Other environments, with different powergrids will be made available in the future.
It mimic the ``gym.make`` function.
Parameters
----------
name_env: ``str``
Name of the environment to create.
param: ``grid2op.Parameters.Parameters``, optional
Type of parameters used for the Environment. Parameters defines how the powergrid problem is cast into an
markov decision process, and some internal
backend: ``grid2op.Backend.Backend``, optional
The backend to use for the computation. If provided, it must be an instance of :class:`grid2op.Backend.Backend`.
action_class: ``type``, optional
Type of BaseAction the BaseAgent will be able to perform.
If provided, it must be a subclass of :class:`grid2op.BaseAction.BaseAction`
observation_class: ``type``, optional
Type of BaseObservation the BaseAgent will receive.
If provided, It must be a subclass of :class:`grid2op.BaseAction.BaseObservation`
reward_class: ``type``, optional
Type of reward signal the BaseAgent will receive.
If provided, It must be a subclass of :class:`grid2op.BaseReward.BaseReward`
gamerules_class: ``type``, optional
Type of "Rules" the BaseAgent need to comply with. Rules are here to model some operational constraints.
If provided, It must be a subclass of :class:`grid2op.RulesChecker.BaseRules`
grid_path: ``str``, optional
The path where the powergrid is located.
If provided it must be a string, and point to a valid file present on the hard drive.
data_feeding_kwargs: ``dict``, optional
Dictionnary that is used to build the `data_feeding` (chronics) objects.
chronics_class: ``type``, optional
The type of chronics that represents the dynamics of the Environment created. Usually they come from different
folders.
data_feeding: ``type``, optional
The type of chronics handler you want to use.
chronics_path: ``str``
Path where to look for the chronics dataset.
volagecontroler_class: ``type``, optional
The type of :class:`grid2op.VoltageControler.VoltageControler` to use, it defaults to
other_rewards: ``dict``, optional
Dictionnary with other rewards we might want to look at at during training. It is given as a dictionnary with
keys the name of the reward, and the values a class representing the new variables.
Returns
-------
env: :class:`grid2op.Environment.Environment`
The created environment.
"""
warnings.warn("make_old is deprecated. Please consider using make instead")
for el in kwargs:
if not el in ALLOWED_KWARGS_MAKE:
raise EnvError(
'Unknown keyword argument "{}" used to create an Environement. '
"No Environment will be created. "
"Accepted keyword arguments are {}".format(
el, sorted(ALLOWED_KWARGS_MAKE)
)
)
# first extract parameters that doesn't not depend on the powergrid
## the parameters of the game, thermal limits threshold, simulate cascading failure, powerflow mode etc. (the gamification of the game)
msg_error = 'The parameters of the environment (keyword "param") must be an instance of grid2op.Parameters'
param = _get_default_aux(
"param",
kwargs,
defaultClass=Parameters,
defaultClassApp=Parameters,
msg_error=msg_error,
)
## the backend use, to compute the powerflow
msg_error = 'The backend of the environment (keyword "backend") must be an instance of grid2op.Backend'
backend = _get_default_aux(
"backend",
kwargs,
defaultClass=PandaPowerBackend,
defaultClassApp=Backend,
msg_error=msg_error,
)
## type of observation the agent will receive
msg_error = (
'The type of observation of the environment (keyword "observation_class")'
)
msg_error += " must be a subclass of grid2op.BaseObservation"
observation_class = _get_default_aux(
"observation_class",
kwargs,
defaultClass=CompleteObservation,
defaultClassApp=BaseObservation,
msg_error=msg_error,
isclass=True,
)
## type of rules of the game (mimic the operationnal constraints)
msg_error = 'The path where the data is located (keyword "chronics_path") should be a string.'
chronics_path = _get_default_aux(
"chronics_path",
kwargs,
defaultClassApp=str,
defaultinstance="",
msg_error=msg_error,
)
# bulid the default parameters for each case file
data_feeding_default_class = ChronicsHandler
gamerules_class = AlwaysLegal
defaultinstance_chronics_kwargs = {}
if name_env.lower() == "case14_fromfile":
default_grid_path = CASE_14_FILE
if chronics_path == "":
chronics_path = CHRONICS_MLUTIEPISODE
defaultinstance_chronics_kwargs = {
"chronicsClass": Multifolder,
"path": chronics_path,
"gridvalueClass": GridStateFromFileWithForecasts,
}
default_name_converter = {}
default_action_class = TopologyAction
default_reward_class = L2RPNReward
elif name_env.lower() == "l2rpn_2019":
warnings.warn(
'You are using the "l2rpn_2019" environmnet, which will be remove from this package in '
'future versions. Please use "make_new" to download the real l2rpn dataset.'
)
if chronics_path == "":
msg_error = "Default chronics (provided in this package) cannot be used with the environment "
msg_error += (
'"l2rpn_2019". Please download the training data using either the method described in'
"Grid2Op/l2rpn_2019/README.md (if you downloaded the github repository) or\n"
"running the command line script (in a terminal):\n"
'python -m grid2op.download --name "l2rpn_2019" --path_save PATH\WHERE\YOU\WANT\TO\DOWNLOAD'
)
raise EnvError(msg_error)
default_grid_path = L2RPN2019_CASEFILE
defaultinstance_chronics_kwargs = {
"chronicsClass": Multifolder,
"path": chronics_path,
"gridvalueClass": ReadPypowNetData,
}
default_name_converter = L2RPN2019_DICT_NAMES
default_action_class = TopologyAction
default_reward_class = L2RPNReward
gamerules_class = DefaultRules
elif name_env.lower() == "case5_example":
if chronics_path == "":
chronics_path = EXAMPLE_CHRONICSPATH
default_grid_path = EXAMPLE_CASEFILE
defaultinstance_chronics_kwargs = {
"chronicsClass": Multifolder,
"path": chronics_path,
"gridvalueClass": GridStateFromFileWithForecasts,
}
default_name_converter = {}
default_action_class = TopologyAction
default_reward_class = L2RPNReward
gamerules_class = DefaultRules
elif name_env.lower() == "case14_test":
if chronics_path == "":
chronics_path = case14_test_CHRONICSPATH
warnings.warn(
'Your are using a case designed for testing purpose. Consider using the "case14_redisp" '
"environment instead."
)
default_grid_path = case14_test_CASEFILE
defaultinstance_chronics_kwargs = {
"chronicsClass": Multifolder,
"path": chronics_path,
"gridvalueClass": GridStateFromFileWithForecasts,
}
default_name_converter = {}
default_action_class = TopologyAndDispatchAction
default_reward_class = RedispReward
gamerules_class = DefaultRules
elif name_env.lower() == "case14_redisp":
if chronics_path == "":
chronics_path = case14_redisp_CHRONICSPATH
warnings.warn(
"Your are using only 2 chronics for this environment. More can be download by running, "
"from a command line:\n"
'python -m grid2op.download --name "case14_redisp" '
"--path_save PATH\WHERE\YOU\WANT\TO\DOWNLOAD\DATA"
)
default_grid_path = case14_redisp_CASEFILE
defaultinstance_chronics_kwargs = {
"chronicsClass": Multifolder,
"path": chronics_path,
"gridvalueClass": GridStateFromFileWithForecasts,
}
default_name_converter = {}
default_action_class = TopologyAndDispatchAction
default_reward_class = RedispReward
gamerules_class = DefaultRules
elif name_env.lower() == "case14_realistic":
if chronics_path == "":
chronics_path = case14_real_CHRONICSPATH
warnings.warn(
"Your are using only 2 chronics for this environment. More can be download by running, "
"from a command line:\n"
'python -m grid2op.download --name "case14_realistic" '
"--path_save PATH\WHERE\YOU\WANT\TO\DOWNLOAD\DATA"
)
default_grid_path = case14_real_CASEFILE
defaultinstance_chronics_kwargs = {
"chronicsClass": Multifolder,
"path": chronics_path,
"gridvalueClass": GridStateFromFileWithForecasts,
}
default_name_converter = {}
default_action_class = TopologyAndDispatchAction
default_reward_class = RedispReward
gamerules_class = DefaultRules
elif name_env.lower() == "blank":
default_name_converter = {}
default_grid_path = ""
default_action_class = TopologyAction
default_reward_class = L2RPNReward
gamerules_class = AlwaysLegal
else:
raise UnknownEnv(
'Unknown Environment named "{}". Current known environments are "case14_fromfile" '
'(default), "case5_example", "case14_redisp", "case14_realistic" '
'and "l2rpn_2019"'.format(name_env)
)
if "chronicsClass" not in defaultinstance_chronics_kwargs:
defaultinstance_chronics_kwargs["chronicsClass"] = ChangeNothing
# extract powergrid dependant parameters
## type of rules of the game (mimic the operationnal constraints)
msg_error = 'The type of rules of the environment (keyword "gamerules_class")'
msg_error += " must be a subclass of grid2op.BaseRules"
gamerules_class = _get_default_aux(
"gamerules_class",
kwargs,
defaultClass=gamerules_class,
defaultClassApp=BaseRules,
msg_error=msg_error,
isclass=True,
)
## type of reward the agent will receive
msg_error = 'The type of observation of the environment (keyword "reward_class")'
msg_error += " must be a subclass of grid2op.BaseReward"
reward_class = _get_default_aux(
"reward_class",
kwargs,
defaultClass=default_reward_class,
defaultClassApp=BaseReward,
msg_error=msg_error,
isclass=True,
)
## type of action the BaseAgent can perform
msg_error = 'The type of action of the environment (keyword "action_class") must be a subclass of grid2op.BaseAction'
action_class = _get_default_aux(
"action_class",
kwargs,
defaultClass=default_action_class,
defaultClassApp=BaseAction,
msg_error=msg_error,
isclass=True,
)
## the powergrid path to use
msg_error = (
'The path where the grid is located (keyword "grid_path") should be a string.'
)
grid_path = _get_default_aux(
"grid_path",
kwargs,
defaultClassApp=str,
defaultinstance=default_grid_path,
msg_error=msg_error,
)
##
msg_error = 'The converter between names (keyword "names_chronics_to_backend") should be a dictionnary.'
names_chronics_to_backend = _get_default_aux(
"names_chronics_to_backend",
kwargs,
defaultClassApp=dict,
defaultinstance=default_name_converter,
msg_error=msg_error,
)
## the chronics to use
### the arguments used to build the data, note that the arguments must be compatible with the chronics class
msg_error = 'The argument to build the data generation process [chronics] (keyword "data_feeding_kwargs")'
msg_error += " should be a dictionnary."
data_feeding_kwargs = _get_default_aux(
"data_feeding_kwargs",
kwargs,
defaultClassApp=dict,
defaultinstance=defaultinstance_chronics_kwargs,
msg_error=msg_error,
)
for el in defaultinstance_chronics_kwargs:
if not el in data_feeding_kwargs:
data_feeding_kwargs[el] = defaultinstance_chronics_kwargs[el]
### the chronics generator
msg_error = 'The argument to build the data generation process [chronics] (keyword "chronics_class")'
msg_error += " should be a class that inherit grid2op.ChronicsHandler.GridValue."
chronics_class_used = _get_default_aux(
"chronics_class",
kwargs,
defaultClassApp=GridValue,
defaultClass=data_feeding_kwargs["chronicsClass"],
msg_error=msg_error,
isclass=True,
)
data_feeding_kwargs["chronicsClass"] = chronics_class_used
### the chronics generator
msg_error = 'The argument to build the data generation process [chronics] (keyword "data_feeding")'
msg_error += (
" should be a class that inherit grid2op.ChronicsHandler.ChronicsHandler."
)
data_feeding = _get_default_aux(
"data_feeding",
kwargs,
defaultClassApp=ChronicsHandler,
defaultClass=data_feeding_default_class,
build_kwargs=data_feeding_kwargs,
msg_error=msg_error,
)
### controler for voltages
msg_error = 'The argument to build the online controler for chronics (keyword "volagecontroler_class")'
msg_error += " should be a class that inherit grid2op.VoltageControler.ControlVoltageFromFile."
volagecontroler_class = _get_default_aux(
"volagecontroler_class",
kwargs,
defaultClassApp=ControlVoltageFromFile,
defaultClass=ControlVoltageFromFile,
msg_error=msg_error,
isclass=True,
)
### other rewards
msg_error = 'The argument to build the online controler for chronics (keyword "other_rewards")'
msg_error += " should be dictionnary."
other_rewards = _get_default_aux(
"other_rewards",
kwargs,
defaultClassApp=dict,
defaultinstance={},
msg_error=msg_error,
isclass=False,
)
# Opponent
opponent_action_class = _get_default_aux(
"opponent_action_class",
kwargs,
defaultClassApp=BaseAction,
defaultClass=DontAct,
msg_error=ERR_MSG_KWARGS["opponent_action_class"],
isclass=True,
)
opponent_class = _get_default_aux(
"opponent_class",
kwargs,
defaultClassApp=BaseOpponent,
defaultClass=BaseOpponent,
msg_error=ERR_MSG_KWARGS["opponent_class"],
isclass=True,
)
opponent_init_budget = _get_default_aux(
"opponent_init_budget",
kwargs,
defaultClassApp=float,
defaultinstance=0.0,
msg_error=ERR_MSG_KWARGS["opponent_init_budget"],
isclass=False,
)
if not os.path.exists(grid_path):
raise EnvError(
'There is noting at "{}" where the powergrid should be located'.format(
os.path.abspath(grid_path)
)
)
env = Environment(
init_grid_path=grid_path,
chronics_handler=data_feeding,
backend=backend,
parameters=param,
names_chronics_to_backend=names_chronics_to_backend,
actionClass=action_class,
observationClass=observation_class,
rewardClass=reward_class,
legalActClass=gamerules_class,
voltagecontrolerClass=volagecontroler_class,
other_rewards=other_rewards,
opponent_action_class=opponent_action_class,
opponent_class=opponent_class,
opponent_init_budget=opponent_init_budget,
name=name_env,
)
# update the thermal limit if any
if name_env.lower() == "case14_test":
env.set_thermal_limit(case14_test_TH_LIM)
env.attach_layout(CASE_14_L2RPN2019_LAYOUT)
elif name_env.lower() == "case14_redisp":
env.set_thermal_limit(case14_redisp_TH_LIM)
env.attach_layout(CASE_14_L2RPN2019_LAYOUT)
elif name_env.lower() == "case14_realistic":
env.set_thermal_limit(case14_real_TH_LIM)
env.attach_layout(CASE_14_L2RPN2019_LAYOUT)
elif name_env.lower() == "l2rpn_2019":
env.attach_layout(CASE_14_L2RPN2019_LAYOUT)
elif name_env.lower() == "case5_example":
env.attach_layout(CASE_5_GRAPH_LAYOUT)
return env
| 23,708 | 37.55122 | 139 | py |
Grid2Op | Grid2Op-master/grid2op/MakeEnv/PathUtils.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
# this files present utilitary class for handling the folder where data are stored mainly
import os
import json
DEFAULT_PATH_CONFIG = os.path.expanduser("~/.grid2opconfig.json")
DEFAULT_PATH_DATA = os.path.expanduser("~/data_grid2op")
KEY_DATA_PATH = "data_path"
if os.path.exists(DEFAULT_PATH_CONFIG):
with open(DEFAULT_PATH_CONFIG, "r") as f:
dict_ = json.load(f)
if KEY_DATA_PATH in dict_:
DEFAULT_PATH_DATA = os.path.abspath(dict_[KEY_DATA_PATH])
def _create_path_folder(data_path):
if not os.path.exists(data_path):
try:
os.mkdir(data_path)
except Exception as exc_:
raise RuntimeError(
'Impossible to create a directory in "{}". Make sure you can write here. If you don\'t '
'have writing permissions there, you can edit / create a config file in "{}"'
'and set the "data_path" to point to a path where you can store data.'
"".format(data_path, DEFAULT_PATH_CONFIG)
)
| 1,486 | 40.305556 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/MakeEnv/UpdateEnv.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import time
import os
import grid2op.MakeEnv.PathUtils
from grid2op.Exceptions import UnknownEnv
from grid2op.MakeEnv.UserUtils import list_available_local_env
from grid2op.MakeEnv.Make import _retrieve_github_content
_LIST_REMOTE_URL = (
"https://api.github.com/repos/bdonnot/grid2op-datasets/contents/updates.json"
)
_LIST_REMOTE_ENV_HASH = (
"https://api.github.com/repos/bdonnot/grid2op-datasets/contents/env_hashes.json"
)
def _write_file(path_local_env, new_config, file_name):
with open(os.path.join(path_local_env, file_name), "w", encoding="utf-8") as f:
f.write(new_config)
def update_env(env_name=None):
"""
This function allows you to retrieve the latest version of the some of files used to create the
environment.
File can be for example "config.py" or "prod_charac.csv" or "difficulty_levels.json".
Parameters
----------
env_name: ``str``
The name of the environment you want to update the config file (must be an environment you
have already downloaded). If ``None`` it will look for updates for all the environments
locally available.
Examples
--------
Here is an example on how to for the update of your environments:
.. code-block:: python
import grid2op
grid2op.update_env()
# it will download the files "config.py" or "prod_charac.csv" or "difficulty_levels.json"
# of your local environment to match the latest version available.
"""
_update_files(env_name=env_name)
def _update_file(dict_, env_name, file_name):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Update a single file of a single environment.
File can be for example "config.py" or "prod_charac.csv" or "difficulty_levels.json".
"""
baseurl, filename = dict_["base_url"], dict_["filename"]
url_ = baseurl + filename
time.sleep(1)
new_config = _retrieve_github_content(url_, is_json=False)
path_local_env = os.path.join(grid2op.MakeEnv.PathUtils.DEFAULT_PATH_DATA, env_name)
if os.path.exists(os.path.join(path_local_env, ".multimix")):
# this is a multimix env ...
mixes = os.listdir(path_local_env)
for mix in mixes:
mix_dir = os.path.join(path_local_env, mix)
if os.path.exists(os.path.join(mix_dir, file_name)):
# this is indeed a mix
_write_file(mix_dir, new_config, file_name=file_name)
else:
_write_file(path_local_env, new_config, file_name=file_name)
print(
'\t Successfully updated file "{}" for environment "{}"'.format(
file_name, env_name
)
)
def _do_env_need_update(env_name, env_hashes):
if env_name not in env_hashes:
# no hash for this environment is provided, i don't know, so in doubt i need to update it (old behaviour)
return True
else:
# i check if "my" hash is different from the remote hash
base_path = grid2op.get_current_local_dir()
hash_remote_hex = env_hashes[env_name]
hash_local = _hash_env(os.path.join(base_path, env_name))
hash_local_hex = hash_local.hexdigest()
res = hash_remote_hex != hash_local_hex
return res
def _update_files(env_name=None, answer_json=None, env_hashes=None):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Update all the "modified" files of a given environment. If ``None`` is provided as input, all local environments
will be checked for update.
Parameters
----------
env_name: ``str``
Name of the environment you want to update (should be locally available)
"""
avail_envs = list_available_local_env()
if answer_json is None:
# optimization to retrieve only once this file
answer_json = _retrieve_github_content(_LIST_REMOTE_URL)
if env_hashes is None:
# optimization to retrieve only once this file
env_hashes = _retrieve_github_content(_LIST_REMOTE_ENV_HASH)
if env_name is None:
# i update all the files for all the environments
for env_name in avail_envs:
_update_files(env_name, answer_json=answer_json, env_hashes=env_hashes)
else:
# i update the files for only an environment
if env_name in avail_envs:
need_update = _do_env_need_update(env_name, env_hashes)
if env_name in answer_json and need_update:
dict_main = answer_json[env_name]
for k, dict_ in dict_main.items():
_update_file(dict_, env_name, file_name=k)
elif need_update and env_name not in answer_json:
print(
f'Environment: "{env_name}" is not up to date, but we did not found any files to update. '
f'IF this environment is officially supported by grid2op (see full list at '
f'https://grid2op.readthedocs.io/en/latest/available_envs.html#description-of-some-environments) '
f'Please write an issue at :\n\t\t'
f'https://github.com/rte-france/Grid2Op/issues/new?assignees=&labels=question&title=Environment%20{env_name}%20is%20not%20up%20to%20date%20but%20I%20cannot%20update%20it.&body=%3c%21%2d%2dDescribe%20shortly%20the%20context%20%2d%2d%3e%0d'
)
else:
# environment is up to date
print('Environment "{}" is up to date'.format(env_name))
else:
raise UnknownEnv(
'Impossible to locate the environment named "{}". Have you downlaoded it?'
"".format(env_name)
)
# TODO make that a method of the environment maybe ?
def _hash_env(
path_local_env,
hash_=None,
blocksize=64, # TODO is this correct ?
):
import hashlib # lazy import
if hash_ is None:
# we use this as it is supposedly faster than md5
# we don't really care about the "secure" part of it (though it's a nice tool to have)
hash_ = hashlib.blake2b()
if os.path.exists(os.path.join(path_local_env, ".multimix")):
# this is a multi mix, so i need to run through all sub env
mixes = sorted(os.listdir(path_local_env))
for mix in mixes:
mix_dir = os.path.join(path_local_env, mix)
if os.path.isdir(mix_dir):
hash_ = _hash_env(mix_dir, hash_=hash_, blocksize=blocksize)
else:
# i am hashing a regular environment
# first i hash the config files
for fn_ in [
"alerts_info.json",
"config.py",
"difficulty_levels.json",
"grid.json",
"grid_layout.json",
"prods_charac.csv",
"storage_units_charac.csv",
# chronix2grid files, if any
"loads_charac.csv",
"params.json",
"params_load.json",
"params_loss.json",
"params_opf.json",
"params_res.json",
"scenario_params.json",
]: # list the file we want to hash (we don't hash everything
full_path_file = os.path.join(path_local_env, fn_)
import re
if os.path.exists(full_path_file):
with open(full_path_file, "r", encoding="utf-8") as f:
text_ = f.read()
text_ = re.sub(
"\s", "", text_
) # this is done to ensure a compatibility between platform
# sometime git replaces the "\r\n" in windows with "\n" on linux / macos and it messes
# up the hash
hash_.update(text_.encode("utf-8"))
# now I hash the chronics
# but as i don't want to read every chronics (for time purposes) i will only hash the names
# of all the chronics
path_chronics = os.path.join(path_local_env, "chronics")
for chron_name in sorted(os.listdir(path_chronics)):
hash_.update(chron_name.encode("utf-8"))
return hash_
| 8,627 | 38.39726 | 258 | py |
Grid2Op | Grid2Op-master/grid2op/MakeEnv/UserUtils.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import os
import json
from grid2op.MakeEnv.Make import _list_available_remote_env_aux
import grid2op.MakeEnv.PathUtils
from grid2op.MakeEnv.PathUtils import DEFAULT_PATH_CONFIG, KEY_DATA_PATH
from grid2op.Exceptions import Grid2OpException
def list_available_remote_env():
"""
This function returns the list of available environments. It returns all the environments, you might already
have downloaded some, they will be listed here.
Returns
-------
res: ``list``
a sorted list of available to environments that can be downloaded.
Examples
---------
A usage example is
.. code-block:: python
import grid2op
li = grid2op.list_available_remote_env()
li_fmt = '\\n * '.join(li)
print(f"The available environments are: \\n * {li_fmt}")
"""
avail_datasets_json = _list_available_remote_env_aux()
return sorted(avail_datasets_json.keys())
def list_available_local_env():
"""
This function returns the environment that are available locally. It does not return the environments that
are included in the package.
Returns
-------
res: ``list``
a sorted list of available environments locally.
Examples
---------
.. code-block:: python
import grid2op
li = grid2op.list_available_local_env()
li_fmt = '\\n + '.join(li)
print(f"The locally available environments (without downloading anything) are: \\n * {li_fmt}")
"""
res = []
if not os.path.exists(grid2op.MakeEnv.PathUtils.DEFAULT_PATH_DATA):
return res
for el in os.listdir(grid2op.MakeEnv.PathUtils.DEFAULT_PATH_DATA):
tmp_dir = os.path.join(grid2op.MakeEnv.PathUtils.DEFAULT_PATH_DATA, el)
if (
os.path.exists(os.path.join(tmp_dir, "config.py"))
and os.path.exists(os.path.join(tmp_dir, "grid.json"))
) or os.path.exists(os.path.join(tmp_dir, ".multimix")):
res.append(el)
return res
def list_available_test_env():
"""
This functions list the environment available through "grid2op.make(..., test=True)", which are the environment
used for testing purpose, but available without the need to download any data.
The "test" environment are provided with the grid2op package.
Returns
-------
res: ``list``
a sorted list of available environments for testing / illustration purpose.
Examples
---------
.. code-block:: python
import grid2op
li = grid2op.list_available_test_env()
env = grid2op.make(li[0], test=True)
"""
from grid2op.MakeEnv.Make import TEST_DEV_ENVS
import re
res = sorted(
[
el
for el in TEST_DEV_ENVS.keys()
if re.match("(^rte_.*)|(^l2rpn_.*)|(^educ_.*)", el) is not None
]
)
return res
def get_current_local_dir():
"""
This function allows you to get the directory in which grid2op will download the datasets. This path can
be modified with the ".grid2opconfig.json" file.
Returns
-------
res: ``str``
The current path were data are downloaded in.
Examples
---------
.. code-block:: python
import grid2op
print(f"Data about grid2op downloaded environments are stored in: \"{grid2op.get_current_local_dir()}\"")
"""
return os.path.abspath(grid2op.MakeEnv.PathUtils.DEFAULT_PATH_DATA)
def change_local_dir(new_path):
"""
This function will change the path were datasets are read to / from.
The previous datasets will be left in the previous configuration folder and will not be accessible by other
grid2op function such as "make" for example.
Parameters
----------
new_path: ``str``
The new path in which to download the datasets.
Examples
---------
To set the download path, and the path where grid2op will look for available local environment you can:
.. code-block:: python
import grid2op
local_dir = ... # should be a valid path on your machine
grid2op.change_local_dir(local_dir)
# check it has worked:
print(f"Data about grid2op downloaded environments are now stored in: \"{grid2op.get_current_local_dir()}\"")
"""
try:
new_path = str(new_path)
except:
raise Grid2OpException(
'The new path should be convertible to str. It is currently "{}"'.format(
new_path
)
)
root_dir = os.path.split(new_path)[0]
if not os.path.exists(root_dir):
raise Grid2OpException(
'Data cannot be stored in "{}" as the base path of this directory ("{}") does '
"not exists.".format(new_path, root_dir)
)
if not os.path.isdir(new_path):
raise Grid2OpException(
'Data cannot be stored in "{}" as it is a file and not a directory.'.format(
new_path
)
)
newconfig = {}
if os.path.exists(DEFAULT_PATH_CONFIG):
try:
with open(DEFAULT_PATH_CONFIG, "r", encoding="utf-8") as f:
newconfig = json.load(f)
except:
raise Grid2OpException(
'Impossible to read the grid2op configuration files "{}". Make sure it is a '
'valid json encoded with "utf-8" encoding.'.format(DEFAULT_PATH_CONFIG)
)
newconfig[KEY_DATA_PATH] = new_path
try:
with open(DEFAULT_PATH_CONFIG, "w", encoding="utf-8") as f:
json.dump(fp=f, obj=newconfig, sort_keys=True, indent=4)
except:
raise Grid2OpException(
'Impossible to write the grid2op configuration files "{}". Make sure you have '
"writing access to it.".format(DEFAULT_PATH_CONFIG)
)
grid2op.MakeEnv.PathUtils.DEFAULT_PATH_DATA = new_path
| 6,347 | 29.085308 | 117 | py |
Grid2Op | Grid2Op-master/grid2op/MakeEnv/__init__.py | __all__ = [
"make",
"make_from_dataset_path",
"list_available_remote_env",
"list_available_local_env",
"get_current_local_dir",
"change_local_dir",
"list_available_test_env",
"update_env",
# deprecated in v 0.8.0
"make_old",
]
from grid2op.MakeEnv.MakeOld import make_old
from grid2op.MakeEnv.MakeFromPath import make_from_dataset_path
from grid2op.MakeEnv.Make import make
from grid2op.MakeEnv.UserUtils import (
list_available_remote_env,
list_available_local_env,
get_current_local_dir,
)
from grid2op.MakeEnv.UserUtils import change_local_dir, list_available_test_env
from grid2op.MakeEnv.UpdateEnv import update_env
| 674 | 27.125 | 79 | py |
Grid2Op | Grid2Op-master/grid2op/MakeEnv/get_default_aux.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numbers
from tarfile import ENCODING
from grid2op.Exceptions import *
def _get_default_aux(
name,
kwargs,
defaultClassApp,
_sentinel=None,
msg_error="Error when building the default parameter",
defaultinstance=None,
defaultClass=None,
build_kwargs={},
isclass=False,
):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Helper to build default parameters forwarded to :class:`grid2op.Environment.Environment` for its creation.
Exactly one of ``defaultinstance`` or ``defaultClass`` should be used, and set to not ``None``
Parameters
----------
name: ``str``
Name of the argument to look for
kwargs: ``dict``
The key word arguments given to the :func:`make` function
defaultClassApp; ``type``
The default class to which the returned object should belong to. The final object should either be an instance
of this ``defaultClassApp`` (if isclass is ``False``) or a subclass of this (if isclass is ``True``)
_sentinel: ``None``
Internal, do not use. Present to force key word arguments.
msg_error: ``str`` or ``None``
The message error to display if the object does not belong to ``defaultClassApp``
defaultinstance: ``object`` or ``None``
The default instance that will be returned. Note that if ``defaultinstance`` is not None, then
``defaultClass`` should be ``None`` and ``build_kwargs`` and empty dictionnary.
defaultClass: ``type`` or ``None``
The class used to build the default object. Note that if ``defaultClass`` is not None, then
``defaultinstance`` should be.
build_kwargs: ``dict``
The keyword arguments used to build the final object (if ``isclass`` is ``True``). Note that:
* if ``isclass`` is ``False``, this should be empty
* if ``defaultinstance`` is not None, then this should be empty
* This parameter should allow to create a valid object of type ``defaultClass``: it's key must be
proper keys accepted by the class
isclass: ``bool``
Whether to build an instance of a class, or just return the class.
Returns
-------
res:
The parameters, either read from kwargs, or with its default value.
"""
err_msg = 'Impossible to create the parameter "{}": '
if _sentinel is not None:
err_msg += "Impossible to get default parameters for building the environment. Please use keywords arguments."
raise RuntimeError(err_msg)
res = None
# first seek for the parameter in the kwargs, and check it's valid
if name in kwargs:
res = kwargs[name]
if isclass is None:
# I don't know whether it's an object or a class
error_msg_here = None
res = None
try:
# I try to build it as an object
res = _get_default_aux(
name,
kwargs=kwargs,
defaultClassApp=defaultClassApp,
_sentinel=_sentinel,
msg_error=msg_error,
defaultinstance=defaultinstance,
defaultClass=defaultClass,
build_kwargs=build_kwargs,
isclass=False,
)
except EnvError as exc1_:
# I try to build it as a class
try:
res = _get_default_aux(
name,
kwargs=kwargs,
defaultClassApp=defaultClassApp,
_sentinel=_sentinel,
msg_error=msg_error,
defaultinstance=defaultinstance,
defaultClass=defaultClass,
build_kwargs=build_kwargs,
isclass=True,
)
except EnvError as exc2_:
# both fails !
error_msg_here = f"{exc1_} AND {exc2_}"
if error_msg_here is not None:
raise EnvError(error_msg_here)
elif isclass is False:
# i must create an instance of a class. I check whether it's a instance.
if not isinstance(res, defaultClassApp):
if issubclass(defaultClassApp, numbers.Number):
try:
# if this is base numeric type, like float or anything, i try to convert to it (i want to
# accept that "int" are float for example.
res = defaultClassApp(res)
except Exception as exc_:
# if there is any error, i raise the error message
raise EnvError(msg_error)
else:
# if there is any error, i raise the error message
raise EnvError(msg_error)
elif isclass is True:
# so it should be a class
if not isinstance(res, type):
raise EnvError(
'Parameter "{}" should be a type and not an instance. It means that you provided an '
"object instead of the class to build it.".format(name)
)
# I must create a class, i check whether it's a subclass
if not issubclass(res, defaultClassApp):
raise EnvError(msg_error)
else:
raise EnvError(
'Impossible to use the "_get_default_aux" function with "isclass" kwargs being different '
"from None, True and False"
)
if res is None:
# build the default parameter if not found
if isclass is False:
# i need building an instance
if defaultClass is not None:
if defaultinstance is not None:
err_msg += "Impossible to build an environment with both a default instance, and a default class"
raise EnvError(err_msg.format(name))
try:
res = defaultClass(**build_kwargs)
except Exception as e:
e.args = e.args + (
'Cannot create and instance of {} with parameters "{}"'.format(
defaultClass, build_kwargs
),
)
raise
elif defaultinstance is not None:
if len(build_kwargs):
err_msg += "An instance is provided, yet kwargs to build it is also provided"
raise EnvError(err_msg.format(name))
res = defaultinstance
else:
err_msg = ' None of "defaultClass" and "defaultinstance" is provided.'
raise EnvError(err_msg.format(name))
else:
# I returning a class
if len(build_kwargs):
err_msg += (
"A class must be returned, yet kwargs to build it is also provided"
)
raise EnvError(err_msg.format(name))
if defaultinstance is not None:
err_msg += "A class must be returned yet a default instance is provided"
raise EnvError(err_msg.format(name))
res = defaultClass
return res
| 7,884 | 39.435897 | 118 | py |
Grid2Op | Grid2Op-master/grid2op/Observation/__init__.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
__all__ = [
# real export
"CompleteObservation",
"NoisyObservation",
"BaseObservation",
"ObservationSpace",
]
from grid2op.Observation.completeObservation import CompleteObservation
from grid2op.Observation.noisyObservation import NoisyObservation
from grid2op.Observation.baseObservation import BaseObservation
from grid2op.Observation.observationSpace import ObservationSpace
| 862 | 40.095238 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Observation/baseObservation.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
import datetime
import warnings
import networkx
from abc import abstractmethod
import numpy as np
from scipy.sparse import csr_matrix
from typing import Optional
from grid2op.dtypes import dt_int, dt_float, dt_bool
from grid2op.Exceptions import (
Grid2OpException,
NoForecastAvailable,
BaseObservationError,
)
from grid2op.Space import GridObjects
# TODO have a method that could do "forecast" by giving the _injection by the agent,
# TODO if he wants to make custom forecasts
# TODO fix "bug" when action not initalized it should return nan in to_vect
# TODO be consistent with gen_* and prod_* also in dictionaries
ERROR_ONLY_SINGLE_EL = "You can only the inspect the effect of an action on one single element"
class BaseObservation(GridObjects):
"""
Basic class representing an observation.
All observation must derive from this class and implement all its abstract methods.
Attributes
----------
action_helper: :class:`grid2op.Action.ActionSpace`
A representation of the possible action space.
year: ``int``
The current year
month: ``int``
The current month (1 = january, 12 = december)
day: ``int``
The current day of the month (1 = first day of the month)
hour_of_day: ``int``
The current hour of the day (from O to 23)
minute_of_hour: ``int``
The current minute of the current hour (from 0 to 59)
day_of_week: ``int``
The current day of the week (monday = 0 and sunday = 6)
support_theta: ``bool``
This flag indicates whether the backend supports the retrieval of the
voltage angle. If so (which is the case for most backend) then
some supplementary attributes are available, such as
:attr:`BaseObservation.gen_theta`,
:attr:`BaseObservation.load_theta`,
:attr:`BaseObservation.storage_theta`,
:attr:`BaseObservation.theta_or` or
:attr:`BaseObservation.theta_ex` .
gen_p: :class:`numpy.ndarray`, dtype:float
The active production value of each generator (expressed in MW).
(the old name "prod_p" is still usable)
gen_q: :class:`numpy.ndarray`, dtype:float
The reactive production value of each generator (expressed in MVar).
(the old name "prod_q" is still usable)
gen_v: :class:`numpy.ndarray`, dtype:float
The voltage magnitude of the bus to which each generator is connected (expressed in kV).
(the old name "prod_v" is still usable)
gen_theta: :class:`numpy.ndarray`, dtype:float
The voltage angle (in degree) of the bus to which each generator is
connected. Only availble if the backend supports the retrieval of
voltage angles (see :attr:`BaseObservation.support_theta`).
load_p: :class:`numpy.ndarray`, dtype:float
The active load value of each consumption (expressed in MW).
load_q: :class:`numpy.ndarray`, dtype:float
The reactive load value of each consumption (expressed in MVar).
load_v: :class:`numpy.ndarray`, dtype:float
The voltage magnitude of the bus to which each consumption is connected (expressed in kV).
load_theta: :class:`numpy.ndarray`, dtype:float
The voltage angle (in degree) of the bus to which each consumption
is connected. Only availble if the backend supports the retrieval of
voltage angles (see :attr:`BaseObservation.support_theta`).
p_or: :class:`numpy.ndarray`, dtype:float
The active power flow at the origin end of each powerline (expressed in MW).
q_or: :class:`numpy.ndarray`, dtype:float
The reactive power flow at the origin end of each powerline (expressed in MVar).
v_or: :class:`numpy.ndarray`, dtype:float
The voltage magnitude at the bus to which the origin end of each powerline is connected (expressed in kV).
theta_or: :class:`numpy.ndarray`, dtype:float
The voltage angle at the bus to which the origin end of each powerline
is connected (expressed in degree). Only availble if the backend supports the retrieval of
voltage angles (see :attr:`BaseObservation.support_theta`).
a_or: :class:`numpy.ndarray`, dtype:float
The current flow at the origin end of each powerline (expressed in A).
p_ex: :class:`numpy.ndarray`, dtype:float
The active power flow at the extremity end of each powerline (expressed in MW).
q_ex: :class:`numpy.ndarray`, dtype:float
The reactive power flow at the extremity end of each powerline (expressed in MVar).
v_ex: :class:`numpy.ndarray`, dtype:float
The voltage magnitude at the bus to which the extremity end of each powerline is connected (expressed in kV).
theta_ex: :class:`numpy.ndarray`, dtype:float
The voltage angle at the bus to which the extremity end of each powerline
is connected (expressed in degree). Only availble if the backend supports the retrieval of
voltage angles (see :attr:`BaseObservation.support_theta`).
a_ex: :class:`numpy.ndarray`, dtype:float
The current flow at the extremity end of each powerline (expressed in A).
rho: :class:`numpy.ndarray`, dtype:float
The capacity of each powerline. It is defined at the observed current flow divided by the thermal limit of each
powerline (no unit)
topo_vect: :class:`numpy.ndarray`, dtype:int
For each object (load, generator, ends of a powerline) it gives on which bus this object is connected
in its substation. See :func:`grid2op.Backend.Backend.get_topo_vect` for more information.
line_status: :class:`numpy.ndarray`, dtype:bool
Gives the status (connected / disconnected) for every powerline (``True`` at position `i` means the powerline
`i` is connected)
timestep_overflow: :class:`numpy.ndarray`, dtype:int
Gives the number of time steps since a powerline is in overflow.
time_before_cooldown_line: :class:`numpy.ndarray`, dtype:int
For each powerline, it gives the number of time step the powerline is unavailable due to "cooldown"
(see :attr:`grid2op.Parameters.NB_TIMESTEP_COOLDOWN_LINE` for more information). 0 means the
an action will be able to act on this same powerline, a number > 0 (eg 1) means that an action at this time step
cannot act on this powerline (in the example the agent have to wait 1 time step)
time_before_cooldown_sub: :class:`numpy.ndarray`, dtype:int
Same as :attr:`BaseObservation.time_before_cooldown_line` but for substations. For each substation, it gives the
number of timesteps to wait before acting on this substation (see
see :attr:`grid2op.Parameters.NB_TIMESTEP_COOLDOWN_SUB` for more information).
time_next_maintenance: :class:`numpy.ndarray`, dtype:int
For each powerline, it gives the time of the next planned maintenance. For example if there is:
- `1` at position `i` it means that the powerline `i` will be disconnected for maintenance operation at
the next time step.
- `0` at position `i` means that powerline `i` is disconnected from the powergrid for maintenance operation
at the current time step.
- `-1` at position `i` means that powerline `i` will not be disconnected for maintenance reason for this
episode.
- `k` > 1 at position `i` it means that the powerline `i` will be disconnected for maintenance operation at
in `k` time steps
When a powerline is "in maintenance", it cannot be reconnected by the `Agent` before the end of this
maintenance.
duration_next_maintenance: :class:`numpy.ndarray`, dtype:int
For each powerline, it gives the number of time step that the maintenance will last (if any). This means that,
if at position `i` of this vector:
- there is a `0`: the powerline is not disconnected from the grid for maintenance
- there is a `1`, `2`, ... the powerline will be disconnected for at least `1`, `2`, ... timestep (**NB**
in all case, the powerline will stay disconnected until a :class:`grid2op.BaseAgent.BaseAgent` performs the
proper :class:`grid2op.BaseAction.BaseAction` to reconnect it).
When a powerline is "in maintenance", it cannot be reconnected by the `Agent` before the end of this
maintenance.
target_dispatch: :class:`numpy.ndarray`, dtype:float
For **each** generators, it gives the target redispatching, asked by the agent. This is the sum of all
redispatching asked by the agent for during all the episode. It for each generator it is a number between:
- pmax and pmax. Note that there is information about all generators there, even the one that are not
dispatchable.
actual_dispatch: :class:`numpy.ndarray`, dtype:float
For **each** generators, it gives the redispatching currently implemented by the environment.
Indeed, the environment tries to implement at best the :attr:`BaseObservation.target_dispatch`, but sometimes,
due to physical limitation (pmin, pmax, ramp min and ramp max) it cannot. In this case, only the best possible
redispatching is implemented at the current time step, and this is what this vector stores. Note that there is
information about all generators there, even the one that are not
dispatchable.
storage_charge: :class:`numpy.ndarray`, dtype:float
The actual 'state of charge' of each storage unit, expressed in MWh.
storage_power_target: :class:`numpy.ndarray`, dtype:float
For each storage units, give the setpoint of production / consumption as given by the agent
storage_power: :class:`numpy.ndarray`, dtype:float
Give the actual storage production / loads at the given state.
storage_theta: :class:`numpy.ndarray`, dtype:float
The voltage angle (in degree) of the bus to which each storage units
is connected. Only availble if the backend supports the retrieval of
voltage angles (see :attr:`BaseObservation.support_theta`).
gen_p_before_curtail: :class:`numpy.ndarray`, dtype:float
Give the production of renewable generator there would have been
if no curtailment were applied (**NB** it returns 0.0 for non renewable
generators that cannot be curtailed)
curtailment_limit: :class:`numpy.ndarray`, dtype:float
Limit (in ratio of gen_pmax) imposed on each renewable generator as set by the agent.
It is always 1. if no curtailment actions is acting on the generator.
This is the "curtailment" given in the action by the agent.
curtailment_limit_effective: :class:`numpy.ndarray`, dtype:float
Limit (in ratio of gen_pmax) imposed on each renewable generator effectively imposed by the environment.
It matches :attr:`BaseObservation.curtailment_limit` if `param.LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION`
is ``False`` (default) otherwise the environment is able to limit the curtailment actions if too much
power would be needed to compensate the "loss" of generation due to renewables.
It is always 1. if no curtailment actions is acting on the generator.
curtailment_mw: :class:`numpy.ndarray`, dtype:float
Gives the amount of power curtailed for each generator (it is 0. for all
non renewable generators)
This is NOT the "curtailment" given in the action by the agent.
curtailment: :class:`numpy.ndarray`, dtype:float
Give the power curtailed for each generator. It is expressed in
ratio of gen_pmax (so between 0. - meaning no curtailment in effect for this
generator - to 1.0 - meaning this generator should have produced pmax, but
a curtailment action limits it to 0.)
This is NOT the "curtailment" given in the action by the agent.
current_step: ``int``
Current number of step performed up until this observation (NB this is not given in the observation if
it is transformed into a vector)
max_step: ``int``
Maximum number of steps possible for this episode
delta_time: ``float``
Time (in minutes) between the last step and the current step (usually constant in an episode, even in an environment)
is_alarm_illegal: ``bool``
whether the last alarm has been illegal (due to budget constraint). It can only be ``True`` if an alarm
was raised by the agent on the previous step. Otherwise it is always ``False`` (warning: /!\\\\ Only valid with "l2rpn_icaps_2021" environment /!\\\\)
time_since_last_alarm: ``int``
Number of steps since the last successful alarm has been raised. It is `-1` if no alarm has been raised yet. (warning: /!\\\\ Only valid with "l2rpn_icaps_2021" environment /!\\\\)
last_alarm: :class:`numpy.ndarray`, dtype:int
For each zones, gives how many steps since the last alarm was raised successfully for this zone (warning: /!\\\\ Only valid with "l2rpn_icaps_2021" environment /!\\\\)
attention_budget: ``int``
The current attention budget
was_alarm_used_after_game_over: ``bool``
Was the last alarm used to compute anything related
to the attention budget when there was a game over. It can only be set to ``True`` if the observation
corresponds to a game over, but not necessarily. (warning: /!\\\\ Only valid with "l2rpn_icaps_2021" environment /!\\\\)
gen_margin_up: :class:`numpy.ndarray`, dtype:float
From how much can you increase each generators production between this
step and the next.
It is always 0. for non renewable generators. For the others it is defined as
`np.minimum(type(self).gen_pmax - self.gen_p, self.gen_max_ramp_up)`
gen_margin_down: :class:`numpy.ndarray`, dtype:float
From how much can you decrease each generators production between this
step and the next.
It is always 0. for non renewable generators. For the others it is defined as
`np.minimum(self.gen_p - type(self).gen_pmin, self.gen_max_ramp_down)`
active_alert: :class:`numpy.ndarray`, dtype:bool
.. warning:: Only available if the environment supports the "alert" feature (*eg* "l2rpn_idf_2023").
This function gives the lines "under alert" at the given observation.
It is only relevant for the "real" environment and not for `obs.simulate` nor `obs.get_forecast_env`
active_alert time_since_last_alert alert_duration total_number_of_alert time_since_last_attack was_alert_used_after_attack
time_since_last_alert: :class:`numpy.ndarray`, dtype:int
.. warning:: Only available if the environment supports the "alert" feature (*eg* "l2rpn_idf_2023").
Give the time since an alert has been raised for each powerline. If you just raise an
alert for attackable line `i` then obs.time_since_last_alert[i] = 0 (and counter
increase by 1 each step).
If attackable line `i` has never been "under alert" then obs.time_since_last_alert[i] = -1
alert_duration: :class:`numpy.ndarray`, dtype:int
.. warning:: Only available if the environment supports the "alert" feature (*eg* "l2rpn_idf_2023").
Give the time since an alert has started for all attackable line. If you just raise an
alert for attackable line `i` then obs.time_since_last_alert[i] = 1 and this counter
increase by 1 each step as long as the agent continues to "raise an alert on attackable line i"
When the attackable line `i` is not under an alert then obs.time_since_last_alert[i] = 0
total_number_of_alerts: :class:`numpy.ndarray`, dtype:int
.. warning:: Only available if the environment supports the "alert" feature (*eg* "l2rpn_idf_2023").
This function counts, since the beginning of the current episode, the total number
of alerts (here 1 alert = one alert for 1 powerline for 1 step) sent by the agent.
time_since_last_attack: :class:`numpy.ndarray`, dtype:int
.. warning:: Only available if the environment supports the "alert" feature (*eg* "l2rpn_idf_2023").
Similar to `time_since_last_alert` but for the attack.
For each attackable line `i` it counts the number of steps since the powerline has
been attacked:
- obs.time_since_last_attack[i] = -1 then attackable line `i` has never been attacked
- obs.time_since_last_attack[i] = 0 then attackable line `i` has been attacked "for the
first time" this step
- obs.time_since_last_attack[i] = 1 then attackable line `i` has been attacked "for the
first time" the previous step
- obs.time_since_last_attack[i] = 2 then attackable line `i` has been attacked "for the
first time" 2 steps ago
.. note::
An attack "for the first time" is NOT an attack "for the first time of the scenario".
Indeed, for this attribute, if a powerline is under attack for say 5 consecutive steps,
then the opponent stops its attack on this line and says 6 or 7 steps later it
start again to attack it then obs.time_since_last_attack[i] = 0 at the "first time" the
opponent attacks again this powerline.
was_alert_used_after_attack: :class:`numpy.ndarray`, dtype:int
.. warning:: Only available if the environment supports the "alert" feature (*eg* "l2rpn_idf_2023").
.. danger::
This attribute is only filled
if you use a compatible reward (*eg* :class:`grid2op.Reward.AlertReward`)
as the main reward (or a "combined" reward with this reward being part of it)
For each attackable line `i` it says:
- obs.was_alert_used_after_attack[i] = 0 => attackable line i has not been attacked
- obs.was_alert_used_after_attack[i] = -1 => attackable line i has been attacked and for the last attack
the INCORRECT alert was sent (meaning that: if the agent survives, it sends an alert
and if the agent died it fails to send an alert)
- obs.was_alert_used_after_attack[i] = +1 => attackable line i has been attacked and for the last attack
the CORRECT alert was sent (meaning that: if the agent survives, it did not send an alert
and if the agent died it properly sent an alert)
By "last attack", we mean the last attack that occured until now.
attack_under_alert: :class:`numpy.ndarray`, dtype:int
.. warning:: Only available if the environment supports the "alert" feature (*eg* "l2rpn_idf_2023").
For each attackable line `i` it says:
- obs.attack_under_alert[i] = 0 => attackable line i has not been attacked OR it
has been attacked before the relevant window (env.parameters.ALERT_TIME_WINDOW)
- obs.attack_under_alert[i] = -1 => attackable line i has been attacked and (before
the attack) no alert was sent (so your agent expects to survive at least
env.parameters.ALERT_TIME_WINDOW steps)
- obs.attack_under_alert[i] = +1 => attackable line i has been attacked and (before
the attack) an alert was sent (so your agent expects to "game over" within the next
env.parameters.ALERT_TIME_WINDOW steps)
_shunt_p: :class:`numpy.ndarray`, dtype:float
Shunt active value (only available if shunts are available) (in MW)
_shunt_q: :class:`numpy.ndarray`, dtype:float
Shunt reactive value (only available if shunts are available) (in MVAr)
_shunt_v: :class:`numpy.ndarray`, dtype:float
Shunt voltage (only available if shunts are available) (in kV)
_shunt_bus: :class:`numpy.ndarray`, dtype:float
Bus (-1 disconnected, 1 for bus 1, 2 for bus 2) at which each shunt is connected
(only available if shunts are available)
"""
_attr_eq = [
"line_status",
"topo_vect",
"timestep_overflow",
"gen_p",
"gen_q",
"gen_v",
"load_p",
"load_q",
"load_v",
"p_or",
"q_or",
"v_or",
"a_or",
"p_ex",
"q_ex",
"v_ex",
"a_ex",
"time_before_cooldown_line",
"time_before_cooldown_sub",
"time_next_maintenance",
"duration_next_maintenance",
"target_dispatch",
"actual_dispatch",
"_shunt_p",
"_shunt_q",
"_shunt_v",
"_shunt_bus",
# storage
"storage_charge",
"storage_power_target",
"storage_power",
# curtailment
"gen_p_before_curtail",
"curtailment",
"curtailment_limit",
"curtailment_limit_effective",
# attention budget
"is_alarm_illegal",
"time_since_last_alarm",
"last_alarm",
"attention_budget",
"was_alarm_used_after_game_over",
# line alert
"active_alert",
"attack_under_alert",
"time_since_last_alert",
"alert_duration",
"total_number_of_alert",
"time_since_last_attack",
"was_alert_used_after_attack",
# gen up / down
"gen_margin_up",
"gen_margin_down",
]
attr_list_vect = None
# value to assess if two observations are equal
_tol_equal = 1e-3
def __init__(self,
obs_env=None,
action_helper=None,
random_prng=None,
kwargs_env=None):
GridObjects.__init__(self)
self._is_done = True
self.random_prng = random_prng
self.action_helper = action_helper
# handles the forecasts here
self._forecasted_grid_act = {}
self._forecasted_inj = []
self._env_internal_params = {}
self._obs_env = obs_env
self._ptr_kwargs_env = kwargs_env
# calendar data
self.year = dt_int(1970)
self.month = dt_int(1)
self.day = dt_int(1)
self.hour_of_day = dt_int(0)
self.minute_of_hour = dt_int(0)
self.day_of_week = dt_int(0)
self.timestep_overflow = np.empty(shape=(self.n_line,), dtype=dt_int)
# 0. (line is disconnected) / 1. (line is connected)
self.line_status = np.empty(shape=self.n_line, dtype=dt_bool)
# topological vector
self.topo_vect = np.empty(shape=self.dim_topo, dtype=dt_int)
# generators information
self.gen_p = np.empty(shape=self.n_gen, dtype=dt_float)
self.gen_q = np.empty(shape=self.n_gen, dtype=dt_float)
self.gen_v = np.empty(shape=self.n_gen, dtype=dt_float)
self.gen_margin_up = np.empty(shape=self.n_gen, dtype=dt_float)
self.gen_margin_down = np.empty(shape=self.n_gen, dtype=dt_float)
# loads information
self.load_p = np.empty(shape=self.n_load, dtype=dt_float)
self.load_q = np.empty(shape=self.n_load, dtype=dt_float)
self.load_v = np.empty(shape=self.n_load, dtype=dt_float)
# lines origin information
self.p_or = np.empty(shape=self.n_line, dtype=dt_float)
self.q_or = np.empty(shape=self.n_line, dtype=dt_float)
self.v_or = np.empty(shape=self.n_line, dtype=dt_float)
self.a_or = np.empty(shape=self.n_line, dtype=dt_float)
# lines extremity information
self.p_ex = np.empty(shape=self.n_line, dtype=dt_float)
self.q_ex = np.empty(shape=self.n_line, dtype=dt_float)
self.v_ex = np.empty(shape=self.n_line, dtype=dt_float)
self.a_ex = np.empty(shape=self.n_line, dtype=dt_float)
# lines relative flows
self.rho = np.empty(shape=self.n_line, dtype=dt_float)
# cool down and reconnection time after hard overflow, soft overflow or cascading failure
self.time_before_cooldown_line = np.empty(shape=self.n_line, dtype=dt_int)
self.time_before_cooldown_sub = np.empty(shape=self.n_sub, dtype=dt_int)
self.time_next_maintenance = 1 * self.time_before_cooldown_line
self.duration_next_maintenance = 1 * self.time_before_cooldown_line
# redispatching
self.target_dispatch = np.empty(shape=self.n_gen, dtype=dt_float)
self.actual_dispatch = np.empty(shape=self.n_gen, dtype=dt_float)
# storage unit
self.storage_charge = np.empty(shape=self.n_storage, dtype=dt_float) # in MWh
self.storage_power_target = np.empty(
shape=self.n_storage, dtype=dt_float
) # in MW
self.storage_power = np.empty(shape=self.n_storage, dtype=dt_float) # in MW
# attention budget
self.is_alarm_illegal = np.ones(shape=1, dtype=dt_bool)
self.time_since_last_alarm = np.empty(shape=1, dtype=dt_int)
self.last_alarm = np.empty(shape=self.dim_alarms, dtype=dt_int)
self.attention_budget = np.empty(shape=1, dtype=dt_float)
self.was_alarm_used_after_game_over = np.zeros(shape=1, dtype=dt_bool)
# alert
dim_alert = type(self).dim_alerts
self.active_alert = np.empty(shape=dim_alert, dtype=dt_bool)
self.attack_under_alert = np.empty(shape=dim_alert, dtype=dt_int)
self.time_since_last_alert = np.empty(shape=dim_alert, dtype=dt_int)
self.alert_duration = np.empty(shape=dim_alert, dtype=dt_int)
self.total_number_of_alert = np.empty(shape=1 if dim_alert else 0, dtype=dt_int)
self.time_since_last_attack = np.empty(shape=dim_alert, dtype=dt_int)
self.was_alert_used_after_attack = np.empty(shape=dim_alert, dtype=dt_int)
# to save some computation time
self._connectivity_matrix_ = None
self._bus_connectivity_matrix_ = None
self._dictionnarized = None
self._vectorized = None
# for shunt (these are not stored!)
if self.shunts_data_available:
self._shunt_p = np.empty(shape=self.n_shunt, dtype=dt_float)
self._shunt_q = np.empty(shape=self.n_shunt, dtype=dt_float)
self._shunt_v = np.empty(shape=self.n_shunt, dtype=dt_float)
self._shunt_bus = np.empty(shape=self.n_shunt, dtype=dt_int)
self._thermal_limit = np.empty(shape=self.n_line, dtype=dt_float)
self.gen_p_before_curtail = np.empty(shape=self.n_gen, dtype=dt_float)
self.curtailment = np.empty(shape=self.n_gen, dtype=dt_float)
self.curtailment_limit = np.empty(shape=self.n_gen, dtype=dt_float)
self.curtailment_limit_effective = np.empty(shape=self.n_gen, dtype=dt_float)
# the "theta" (voltage angle, in degree)
self.support_theta = False
self.theta_or = np.empty(shape=self.n_line, dtype=dt_float)
self.theta_ex = np.empty(shape=self.n_line, dtype=dt_float)
self.load_theta = np.empty(shape=self.n_load, dtype=dt_float)
self.gen_theta = np.empty(shape=self.n_gen, dtype=dt_float)
self.storage_theta = np.empty(shape=self.n_storage, dtype=dt_float)
# counter
self.current_step = dt_int(0)
self.max_step = dt_int(np.iinfo(dt_int).max)
self.delta_time = dt_float(5.0)
def _aux_copy(self, other):
attr_simple = [
"max_step",
"current_step",
"support_theta",
"day_of_week",
"minute_of_hour",
"hour_of_day",
"day",
"month",
"year",
"delta_time",
"_is_done",
]
attr_vect = [
"storage_theta",
"gen_theta",
"load_theta",
"theta_ex",
"theta_or",
"curtailment_limit",
"curtailment",
"gen_p_before_curtail",
"_thermal_limit",
"is_alarm_illegal",
"time_since_last_alarm",
"last_alarm",
"attention_budget",
"was_alarm_used_after_game_over",
# alert (new in 1.9.1)
"active_alert",
"attack_under_alert",
"time_since_last_alert",
"alert_duration",
"total_number_of_alert",
"time_since_last_attack",
"was_alert_used_after_attack",
# other
"storage_power",
"storage_power_target",
"storage_charge",
"actual_dispatch",
"target_dispatch",
"duration_next_maintenance",
"time_next_maintenance",
"time_before_cooldown_sub",
"time_before_cooldown_line",
"rho",
"a_ex",
"v_ex",
"q_ex",
"p_ex",
"a_or",
"v_or",
"q_or",
"p_or",
"load_p",
"load_q",
"load_v",
"gen_p",
"gen_q",
"gen_v",
"topo_vect",
"line_status",
"timestep_overflow",
"gen_margin_up",
"gen_margin_down",
"curtailment_limit_effective",
]
if self.shunts_data_available:
attr_vect += ["_shunt_bus", "_shunt_v", "_shunt_q", "_shunt_p"]
for attr_nm in attr_simple:
setattr(other, attr_nm, getattr(self, attr_nm))
for attr_nm in attr_vect:
getattr(other, attr_nm)[:] = getattr(self, attr_nm)
def __copy__(self):
res = type(self)(obs_env=self._obs_env,
action_helper=self.action_helper,
kwargs_env=self._ptr_kwargs_env)
# copy regular attributes
self._aux_copy(other=res)
# just copy
res._connectivity_matrix_ = copy.copy(self._connectivity_matrix_)
res._bus_connectivity_matrix_ = copy.copy(self._bus_connectivity_matrix_)
res._dictionnarized = copy.copy(self._dictionnarized)
res._vectorized = copy.copy(self._vectorized)
# handles the forecasts here
res._forecasted_grid_act = copy.copy(self._forecasted_grid_act)
res._forecasted_inj = copy.copy(self._forecasted_inj)
res._env_internal_params = copy.copy(self._env_internal_params )
return res
def __deepcopy__(self, memodict={}):
res = type(self)(obs_env=self._obs_env,
action_helper=self.action_helper,
kwargs_env=self._ptr_kwargs_env)
# copy regular attributes
self._aux_copy(other=res)
# just deepcopy
res._connectivity_matrix_ = copy.deepcopy(self._connectivity_matrix_, memodict)
res._bus_connectivity_matrix_ = copy.deepcopy(
self._bus_connectivity_matrix_, memodict
)
res._dictionnarized = copy.deepcopy(self._dictionnarized, memodict)
res._vectorized = copy.deepcopy(self._vectorized, memodict)
# handles the forecasts here
res._forecasted_grid_act = copy.deepcopy(self._forecasted_grid_act, memodict)
res._forecasted_inj = copy.deepcopy(self._forecasted_inj, memodict)
res._env_internal_params = copy.deepcopy(self._env_internal_params, memodict)
return res
def state_of(
self,
_sentinel=None,
load_id=None,
gen_id=None,
line_id=None,
storage_id=None,
substation_id=None,
):
"""
Return the state of this action on a give unique load, generator unit, powerline of substation.
Only one of load, gen, line or substation should be filled.
The querry of these objects can only be done by id here (ie by giving the integer of the object in the backed).
The :class:`ActionSpace` has some utilities to access them by name too.
Parameters
----------
_sentinel: ``None``
Used to prevent positional parameters. Internal, do not use.
load_id: ``int``
ID of the load we want to inspect
gen_id: ``int``
ID of the generator we want to inspect
line_id: ``int``
ID of the powerline we want to inspect
line_id: ``int``
ID of the powerline we want to inspect
storage_id: ``int``
ID of the storage unit we want to inspect
substation_id: ``int``
ID of the substation unit we want to inspect
Returns
-------
res: :class:`dict`
A dictionary with keys and value depending on which object needs to be inspected:
- if a load is inspected, then the keys are:
- "p" the active value consumed by the load
- "q" the reactive value consumed by the load
- "v" the voltage magnitude of the bus to which the load is connected
- "theta" (optional) the voltage angle (in degree) of the bus to which the load is connected
- "bus" on which bus the load is connected in the substation
- "sub_id" the id of the substation to which the load is connected
- if a generator is inspected, then the keys are:
- "p" the active value produced by the generator
- "q" the reactive value consumed by the generator
- "v" the voltage magnitude of the bus to which the generator is connected
- "theta" (optional) the voltage angle (in degree) of the bus to which the gen. is connected
- "bus" on which bus the generator is connected in the substation
- "sub_id" the id of the substation to which the generator is connected
- "actual_dispatch" the actual dispatch implemented for this generator
- "target_dispatch" the target dispatch (cumulation of all previously asked dispatch by the agent)
for this generator
- if a powerline is inspected then the keys are "origin" and "extremity" each being dictionary with keys:
- "p" the active flow on line side (extremity or origin)
- "q" the reactive flow on line side (extremity or origin)
- "v" the voltage magnitude of the bus to which the line side (extremity or origin) is connected
- "theta" (optional) the voltage angle (in degree) of the bus to which line side (extremity or origin)
is connected
- "bus" on which bus the line side (extremity or origin) is connected in the substation
- "sub_id" the id of the substation to which the line side is connected
- "a" the current flow on the line side (extremity or origin)
In the case of a powerline, additional information are:
- "maintenance": information about the maintenance operation (time of the next maintenance and duration
of this next maintenance.
- "cooldown_time": for how many timestep i am not supposed to act on the powerline due to cooldown
(see :attr:`grid2op.Parameters.Parameters.NB_TIMESTEP_COOLDOWN_LINE` for more information)
- if a storage unit is inspected, information are:
- "storage_power": the power the unit actually produced / absorbed
- "storage_charge": the state of the charge of the storage unit
- "storage_power_target": the power production / absorbtion targer
- "storage_theta": (optional) the voltage angle of the bus at which the storage unit is connected
- "bus": the bus (1 or 2) to which the storage unit is connected
- "sub_id" : the id of the substation to which the sotrage unit is connected
- if a substation is inspected, it returns the topology to this substation in a dictionary with keys:
- "topo_vect": the representation of which object is connected where
- "nb_bus": number of active buses in this substations
- "cooldown_time": for how many timestep i am not supposed to act on the substation due to cooldown
(see :attr:`grid2op.Parameters.Parameters.NB_TIMESTEP_COOLDOWN_SUB` for more information)
Notes
-----
This function can only be used to retrieve the state of the element of the grid, and not the alarm sent
or not, to the operator.
Raises
------
Grid2OpException
If _sentinel is modified, or if None of the arguments are set or alternatively if 2 or more of the
parameters are being set.
"""
if _sentinel is not None:
raise Grid2OpException(
"action.effect_on should only be called with named argument."
)
if (
load_id is None
and gen_id is None
and line_id is None
and substation_id is None
and storage_id is None
):
raise Grid2OpException(
"You ask the state of an object in a observation without specifying the object id. "
'Please provide "load_id", "gen_id", "line_id", "storage_id" or '
'"substation_id"'
)
if load_id is not None:
if (
gen_id is not None
or line_id is not None
or substation_id is not None
or storage_id is not None
):
raise Grid2OpException(ERROR_ONLY_SINGLE_EL)
if load_id >= len(self.load_p):
raise Grid2OpException(
'There are no load of id "load_id={}" in this grid.'.format(load_id)
)
if load_id < 0:
raise Grid2OpException("`load_id` should be a positive integer")
res = {
"p": self.load_p[load_id],
"q": self.load_q[load_id],
"v": self.load_v[load_id],
"bus": self.topo_vect[self.load_pos_topo_vect[load_id]],
"sub_id": self.load_to_subid[load_id],
}
if self.support_theta:
res["theta"] = self.load_theta[load_id]
elif gen_id is not None:
if (
line_id is not None
or substation_id is not None
or storage_id is not None
):
raise Grid2OpException(ERROR_ONLY_SINGLE_EL)
if gen_id >= len(self.gen_p):
raise Grid2OpException(
'There are no generator of id "gen_id={}" in this grid.'.format(
gen_id
)
)
if gen_id < 0:
raise Grid2OpException("`gen_id` should be a positive integer")
res = {
"p": self.gen_p[gen_id],
"q": self.gen_q[gen_id],
"v": self.gen_v[gen_id],
"bus": self.topo_vect[self.gen_pos_topo_vect[gen_id]],
"sub_id": self.gen_to_subid[gen_id],
"target_dispatch": self.target_dispatch[gen_id],
"actual_dispatch": self.target_dispatch[gen_id],
"curtailment": self.curtailment[gen_id],
"curtailment_limit": self.curtailment_limit[gen_id],
"curtailment_limit_effective": self.curtailment_limit_effective[gen_id],
"p_before_curtail": self.gen_p_before_curtail[gen_id],
"margin_up": self.gen_margin_up[gen_id],
"margin_down": self.gen_margin_down[gen_id],
}
if self.support_theta:
res["theta"] = self.gen_theta[gen_id]
elif line_id is not None:
if substation_id is not None or storage_id is not None:
raise Grid2OpException(ERROR_ONLY_SINGLE_EL)
if line_id >= len(self.p_or):
raise Grid2OpException(
'There are no powerline of id "line_id={}" in this grid.'.format(
line_id
)
)
if line_id < 0:
raise Grid2OpException("`line_id` should be a positive integer")
res = {}
# origin information
res["origin"] = {
"p": self.p_or[line_id],
"q": self.q_or[line_id],
"v": self.v_or[line_id],
"a": self.a_or[line_id],
"bus": self.topo_vect[self.line_or_pos_topo_vect[line_id]],
"sub_id": self.line_or_to_subid[line_id],
}
if self.support_theta:
res["origin"]["theta"] = self.theta_or[line_id]
# extremity information
res["extremity"] = {
"p": self.p_ex[line_id],
"q": self.q_ex[line_id],
"v": self.v_ex[line_id],
"a": self.a_ex[line_id],
"bus": self.topo_vect[self.line_ex_pos_topo_vect[line_id]],
"sub_id": self.line_ex_to_subid[line_id],
}
if self.support_theta:
res["origin"]["theta"] = self.theta_ex[line_id]
# maintenance information
res["maintenance"] = {
"next": self.time_next_maintenance[line_id],
"duration_next": self.duration_next_maintenance[line_id],
}
# cooldown
res["cooldown_time"] = self.time_before_cooldown_line[line_id]
elif storage_id is not None:
if substation_id is not None:
raise Grid2OpException(ERROR_ONLY_SINGLE_EL)
if storage_id >= self.n_storage:
raise Grid2OpException(
'There are no storage unit with id "storage_id={}" in this grid.'.format(
storage_id
)
)
if storage_id < 0:
raise Grid2OpException("`storage_id` should be a positive integer")
res = {}
res["storage_power"] = self.storage_power[storage_id]
res["storage_charge"] = self.storage_charge[storage_id]
res["storage_power_target"] = self.storage_power_target[storage_id]
res["bus"] = self.topo_vect[self.storage_pos_topo_vect[storage_id]]
res["sub_id"] = self.storage_to_subid[storage_id]
if self.support_theta:
res["theta"] = self.storage_theta[storage_id]
else:
if substation_id >= len(self.sub_info):
raise Grid2OpException(
'There are no substation of id "substation_id={}" in this grid.'.format(
substation_id
)
)
beg_ = int(np.sum(self.sub_info[:substation_id]))
end_ = int(beg_ + self.sub_info[substation_id])
topo_sub = self.topo_vect[beg_:end_]
if np.any(topo_sub > 0):
nb_bus = (
np.max(topo_sub[topo_sub > 0]) - np.min(topo_sub[topo_sub > 0]) + 1
)
else:
nb_bus = 0
res = {
"topo_vect": topo_sub,
"nb_bus": nb_bus,
"cooldown_time": self.time_before_cooldown_sub[substation_id],
}
return res
@classmethod
def process_shunt_satic_data(cls):
if not cls.shunts_data_available:
# this is really important, otherwise things from grid2op base types will be affected
cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect)
cls.attr_list_set = copy.deepcopy(cls.attr_list_set)
# remove the shunts from the list to vector
for el in ["_shunt_p", "_shunt_q", "_shunt_v", "_shunt_bus"]:
if el in cls.attr_list_vect:
try:
cls.attr_list_vect.remove(el)
except ValueError:
pass
cls.attr_list_set = set(cls.attr_list_vect)
return super().process_shunt_satic_data()
@classmethod
def process_grid2op_compat(cls):
if cls.glop_version == cls.BEFORE_COMPAT_VERSION:
# oldest version: no storage and no curtailment available
# this is really important, otherwise things from grid2op base types will be affected
cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect)
cls.attr_list_set = copy.deepcopy(cls.attr_list_set)
# deactivate storage
cls.set_no_storage()
for el in ["storage_charge", "storage_power_target", "storage_power"]:
if el in cls.attr_list_vect:
try:
cls.attr_list_vect.remove(el)
except ValueError:
pass
# remove the curtailment
for el in ["gen_p_before_curtail", "curtailment", "curtailment_limit"]:
if el in cls.attr_list_vect:
try:
cls.attr_list_vect.remove(el)
except ValueError:
pass
cls.attr_list_set = set(cls.attr_list_vect)
if cls.glop_version < "1.6.0" or cls.glop_version == cls.BEFORE_COMPAT_VERSION:
# this feature did not exist before and was introduced in grid2op 1.6.0
cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect)
cls.attr_list_set = copy.deepcopy(cls.attr_list_set)
cls.dim_alarms = 0
for el in [
"is_alarm_illegal",
"time_since_last_alarm",
"last_alarm",
"attention_budget",
"was_alarm_used_after_game_over",
]:
try:
cls.attr_list_vect.remove(el)
except ValueError as exc_:
# this attribute was not there in the first place
pass
for el in ["_shunt_p", "_shunt_q", "_shunt_v", "_shunt_bus"]:
# added in grid2op 1.6.0 mainly for the EpisodeReboot
try:
cls.attr_list_vect.remove(el)
except ValueError as exc_:
# this attribute was not there in the first place
pass
cls.attr_list_set = set(cls.attr_list_vect)
if cls.glop_version < "1.6.4" or cls.glop_version == cls.BEFORE_COMPAT_VERSION:
# "current_step", "max_step" were added in grid2Op 1.6.4
cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect)
cls.attr_list_set = copy.deepcopy(cls.attr_list_set)
for el in ["max_step", "current_step"]:
try:
cls.attr_list_vect.remove(el)
except ValueError as exc_:
# this attribute was not there in the first place
pass
cls.attr_list_set = set(cls.attr_list_vect)
if cls.glop_version < "1.6.5" or cls.glop_version == cls.BEFORE_COMPAT_VERSION:
# "current_step", "max_step" were added in grid2Op 1.6.5
cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect)
cls.attr_list_set = copy.deepcopy(cls.attr_list_set)
for el in ["delta_time"]:
try:
cls.attr_list_vect.remove(el)
except ValueError as exc_:
# this attribute was not there in the first place
pass
cls.attr_list_set = set(cls.attr_list_vect)
if cls.glop_version < "1.6.6" or cls.glop_version == cls.BEFORE_COMPAT_VERSION:
# "gen_margin_up", "gen_margin_down" were added in grid2Op 1.6.6
cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect)
cls.attr_list_set = copy.deepcopy(cls.attr_list_set)
for el in [
"gen_margin_up",
"gen_margin_down",
"curtailment_limit_effective",
]:
try:
cls.attr_list_vect.remove(el)
except ValueError as exc_:
# this attribute was not there in the first place
pass
cls.attr_list_set = set(cls.attr_list_vect)
if cls.glop_version < "1.9.1" or cls.glop_version == cls.BEFORE_COMPAT_VERSION:
# alert attributes have been added in 1.9.1
cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect)
cls.attr_list_set = copy.deepcopy(cls.attr_list_set)
for el in [
"active_alert",
"attack_under_alert",
"time_since_last_alert",
"alert_duration",
"total_number_of_alert",
"time_since_last_attack",
"was_alert_used_after_attack"
]:
try:
cls.attr_list_vect.remove(el)
except ValueError as exc_:
# this attribute was not there in the first place
pass
cls.attr_list_set = set(cls.attr_list_vect)
def reset(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Resetting a single observation is unlikely to do what you want to do.
Reset the :class:`BaseObservation` to a blank state, where everything is set to either ``None`` or to its default
value.
"""
self._is_done = True
# 0. (line is disconnected) / 1. (line is connected)
self.line_status[:] = True
# topological vector
self.topo_vect[:] = 0
# generators information
self.gen_p[:] = np.NaN
self.gen_q[:] = np.NaN
self.gen_v[:] = np.NaN
# loads information
self.load_p[:] = np.NaN
self.load_q[:] = np.NaN
self.load_v[:] = np.NaN
# lines origin information
self.p_or[:] = np.NaN
self.q_or[:] = np.NaN
self.v_or[:] = np.NaN
self.a_or[:] = np.NaN
# lines extremity information
self.p_ex[:] = np.NaN
self.q_ex[:] = np.NaN
self.v_ex[:] = np.NaN
self.a_ex[:] = np.NaN
# lines relative flows
self.rho[:] = np.NaN
# cool down and reconnection time after hard overflow, soft overflow or cascading failure
self.time_before_cooldown_line[:] = -1
self.time_before_cooldown_sub[:] = -1
self.time_next_maintenance[:] = -1
self.duration_next_maintenance[:] = -1
self.timestep_overflow[:] = 0
# calendar data
self.year = dt_int(1970)
self.month = dt_int(0)
self.day = dt_int(0)
self.hour_of_day = dt_int(0)
self.minute_of_hour = dt_int(0)
self.day_of_week = dt_int(0)
# forecasts
self._forecasted_inj = []
self._forecasted_grid_act = {}
self._env_internal_params = {}
# redispatching
self.target_dispatch[:] = np.NaN
self.actual_dispatch[:] = np.NaN
# storage units
self.storage_charge[:] = np.NaN
self.storage_power_target[:] = np.NaN
self.storage_power[:] = np.NaN
# to save up computation time
self._dictionnarized = None
self._connectivity_matrix_ = None
self._bus_connectivity_matrix_ = None
if self.shunts_data_available:
self._shunt_p[:] = np.NaN
self._shunt_q[:] = np.NaN
self._shunt_v[:] = np.NaN
self._shunt_bus[:] = -1
self.support_theta = False
self.theta_or[:] = np.NaN
self.theta_ex[:] = np.NaN
self.load_theta[:] = np.NaN
self.gen_theta[:] = np.NaN
self.storage_theta[:] = np.NaN
# alarm feature
self.is_alarm_illegal[:] = False
self.time_since_last_alarm[:] = -1
self.last_alarm[:] = False
self.attention_budget[:] = 0
self.was_alarm_used_after_game_over[:] = False
# alert line feature
self.active_alert[:] = False
self.attack_under_alert[:] = 0
self.time_since_last_alert[:] = 0
self.alert_duration[:] = 0
self.total_number_of_alert[:] = 0
self.time_since_last_attack[:] = -1
self.was_alert_used_after_attack[:] = 0
self.current_step = dt_int(0)
self.max_step = dt_int(np.iinfo(dt_int).max)
self.delta_time = dt_float(5.0)
def set_game_over(self, env=None):
"""
Set the observation to the "game over" state:
- all powerlines are disconnected
- all loads are 0.
- all prods are 0.
- etc.
Notes
-----
As some attributes are initialized with `np.empty` it is recommended to reset here all attributes to avoid
non deterministic behaviour.
"""
self._is_done = True
self.gen_p[:] = 0.0
self.gen_q[:] = 0.0
self.gen_v[:] = 0.0
self.gen_margin_up[:] = 0.0
self.gen_margin_down[:] = 0.0
# loads information
self.load_p[:] = 0.0
self.load_q[:] = 0.0
self.load_v[:] = 0.0
# lines origin information
self.p_or[:] = 0.0
self.q_or[:] = 0.0
self.v_or[:] = 0.0
self.a_or[:] = 0.0
# lines extremity information
self.p_ex[:] = 0.0
self.q_ex[:] = 0.0
self.v_ex[:] = 0.0
self.a_ex[:] = 0.0
# lines relative flows
self.rho[:] = 0.0
# line status
self.line_status[:] = False
# topological vector
self.topo_vect[:] = -1
# forecasts
self._forecasted_inj = []
self._forecasted_grid_act = {}
self._env_internal_params = {}
# redispatching
self.target_dispatch[:] = 0.0
self.actual_dispatch[:] = 0.0
# storage
self.storage_charge[:] = 0.0
self.storage_power_target[:] = 0.0
self.storage_power[:] = 0.0
# curtailment
self.curtailment[:] = 0.0
self.curtailment_limit[:] = 1.0
self.curtailment_limit_effective[:] = 1.0
self.gen_p_before_curtail[:] = 0.0
# cooldown
self.time_before_cooldown_line[:] = 0
self.time_before_cooldown_sub[:] = 0
self.time_next_maintenance[:] = -1
self.duration_next_maintenance[:] = 0
# overflow
self.timestep_overflow[:] = 0
if self.shunts_data_available:
self._shunt_p[:] = 0.0
self._shunt_q[:] = 0.0
self._shunt_v[:] = 0.0
self._shunt_bus[:] = -1
if env is None:
# set an old date (as i don't know anything about the env)
self.year = 1970
self.month = 1
self.day = 1
self.hour_of_day = 0
self.minute_of_hour = 0
self.day_of_week = 1
else:
# retrieve the date from the environment
self.year = dt_int(env.time_stamp.year)
self.month = dt_int(env.time_stamp.month)
self.day = dt_int(env.time_stamp.day)
self.hour_of_day = dt_int(env.time_stamp.hour)
self.minute_of_hour = dt_int(env.time_stamp.minute)
self.day_of_week = dt_int(env.time_stamp.weekday())
if env is not None:
self._thermal_limit[:] = env.get_thermal_limit()
else:
self._thermal_limit[:] = 0.
# by convention, I say it's 0 if the grid is in total blackout
self.theta_or[:] = 0.0
self.theta_ex[:] = 0.0
self.load_theta[:] = 0.0
self.gen_theta[:] = 0.0
self.storage_theta[:] = 0.0
# counter
if env is not None:
self.current_step = dt_int(env.nb_time_step)
self.max_step = dt_int(env.max_episode_duration())
# stuff related to alarm
self.is_alarm_illegal[:] = False
self.time_since_last_alarm[:] = -1
self.last_alarm[:] = False
self.attention_budget[:] = 0
if env is not None:
self.was_alarm_used_after_game_over[:] = env._is_alarm_used_in_reward
else:
self.was_alarm_used_after_game_over[:] = False
# related to alert
self.active_alert[:] = False
self.time_since_last_alert[:] = 0
self.alert_duration[:] = 0
self.total_number_of_alert[:] = 0
self.time_since_last_attack[:] = -1
# was_alert_used_after_attack not updated here in this case
# attack_under_alert not updated here in this case
def __compare_stats(self, other, name):
attr_me = getattr(self, name)
attr_other = getattr(other, name)
if attr_me is None and attr_other is not None:
return False
if attr_me is not None and attr_other is None:
return False
if attr_me is not None:
if attr_me.shape != attr_other.shape:
return False
if attr_me.dtype != attr_other.dtype:
return False
if np.issubdtype(attr_me.dtype, np.dtype(dt_float).type):
# first special case: there can be Nan there
me_finite = np.isfinite(attr_me)
oth_finite = np.isfinite(attr_other)
if np.any(me_finite != oth_finite):
return False
# special case of floating points, otherwise vector are never equal
if not np.all(
np.abs(attr_me[me_finite] - attr_other[oth_finite])
<= self._tol_equal
):
return False
else:
if not np.all(attr_me == attr_other):
return False
return True
def __eq__(self, other):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Test the equality of two observations.
2 actions are said to be identical if the have the same impact on the powergrid. This is unlrelated to their
respective class. For example, if an BaseAction is of class :class:`BaseAction` and doesn't act on the
_injection, it
can be equal to a an BaseAction of derived class :class:`TopologyAction` (if the topological modification
are the same of course).
This implies that the attributes :attr:`BaseAction.authorized_keys` is not checked in this method.
Note that if 2 actions doesn't act on the same powergrid, or on the same backend (eg number of loads, or
generators is not the same in *self* and *other*, or they are not in the same order) then action will be
declared as different.
**Known issue** if two backend are different, but the description of the _grid are identical (ie all
n_gen, n_load, n_line, sub_info, dim_topo, all vectors \*_to_subid, and \*_pos_topo_vect are
identical) then this method will not detect the backend are different, and the action could be declared
as identical. For now, this is only a theoretical behaviour: if everything is the same, then probably, up to
the naming convention, then the powergrid are identical too.
Parameters
----------
other: :class:`BaseObservation`
An instance of class BaseAction to which "self" will be compared.
Returns
-------
``True`` if the action are equal, ``False`` otherwise.
"""
if self.year != other.year:
return False
if self.month != other.month:
return False
if self.day != other.day:
return False
if self.day_of_week != other.day_of_week:
return False
if self.hour_of_day != other.hour_of_day:
return False
if self.minute_of_hour != other.minute_of_hour:
return False
# check that the underlying grid is the same in both instances
same_grid = type(self).same_grid_class(type(other))
if not same_grid:
return False
for stat_nm in self._attr_eq:
if not self.__compare_stats(other, stat_nm):
# one of the above stat is not equal in this and in other
return False
return True
def __sub__(self, other):
"""
computes the difference between two observation, and return an observation corresponding to
this difference.
This can be used to easily plot the difference between two observations at different step for
example.
"""
same_grid = type(self).same_grid_class(type(other))
if not same_grid:
raise Grid2OpException(
"Cannot compare to observation not coming from the same powergrid."
)
tmp_obs_env = self._obs_env
self._obs_env = None # keep aside the backend
_ptr_kwargs_env = self._ptr_kwargs_env
self._ptr_kwargs_env = None # keep aside the pointer to the env kwargs
res = copy.deepcopy(self)
self._obs_env = tmp_obs_env
self._ptr_kwargs_env = _ptr_kwargs_env
for stat_nm in self._attr_eq:
me_ = getattr(self, stat_nm)
oth_ = getattr(other, stat_nm)
if me_ is None and oth_ is None:
diff_ = None
elif me_ is not None and oth_ is None:
diff_ = me_
elif me_ is None and oth_ is not None:
if oth_.dtype == dt_bool:
diff_ = np.full(oth_.shape, fill_value=False, dtype=dt_bool)
else:
diff_ = -oth_
else:
# both are not None
if oth_.dtype == dt_bool:
diff_ = ~np.logical_xor(me_, oth_)
else:
diff_ = me_ - oth_
res.__setattr__(stat_nm, diff_)
return res
def where_different(self, other):
"""
Returns the difference between two observation.
Parameters
----------
other:
Other action to compare
Returns
-------
diff_: :class:`grid2op.Observation.BaseObservation`
The observation showing the difference between `self` and `other`
attr_nm: ``list``
List of string representing the names of the different attributes. It's [] if the two observations
are identical.
"""
diff_ = self - other
res = []
for attr_nm in self._attr_eq:
array_ = getattr(diff_, attr_nm)
if array_.dtype == dt_bool:
if np.any(~array_):
res.append(attr_nm)
else:
if (array_.shape[0] > 0) and np.max(np.abs(array_)):
res.append(attr_nm)
return diff_, res
@abstractmethod
def update(self, env, with_forecast=True):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is carried out automatically by the environment in `env.step`
Update the actual instance of BaseObservation with the new received value from the environment.
An observation is a description of the powergrid perceived by an agent. The agent takes his decision based on
the current observation and the past rewards.
This method `update` receive complete detailed information about the powergrid, but that does not mean an
agent sees everything.
For example, it is possible to derive this class to implement some noise in the generator or load, or flows to
mimic sensor inaccuracy.
It is also possible to give fake information about the topology, the line status etc.
In the Grid2Op framework it's also through the observation that the agent has access to some forecast (the way
forecast are handled depends are implemented in this class). For example, forecast data (retrieved thanks to
`chronics_handler`) are processed, but can be processed differently. One can apply load / production forecast to
each _grid state, or to make forecast for one "reference" _grid state valid a whole day and update this one
only etc.
All these different mechanisms can be implemented in Grid2Op framework by overloading the `update` observation
method.
This class is really what a dispatcher observes from it environment.
It can also include some temperatures, nebulosity, wind etc. can also be included in this class.
Notes
-----
We strongly recommend to call :attr:`BaseObservation.reset` when implementing this function.
"""
pass
def connectivity_matrix(self, as_csr_matrix=False):
"""
Computes and return the "connectivity matrix" `con_mat`.
Let "dim_topo := 2 * n_line + n_prod + n_conso + n_storage" (the total number of elements on the grid)
It is a matrix of size dim_topo, dim_topo, with values 0 or 1.
For two objects (lines extremity, generator unit, load) i,j :
- if i and j are connected on the same substation:
- if `conn_mat[i,j] = 0` it means the objects id'ed i and j are not connected to the same bus.
- if `conn_mat[i,j] = 1` it means the objects id'ed i and j are connected to the same bus
- if i and j are not connected on the same substation then`conn_mat[i,j] = 0` except if i and j are
the two extremities of the same power line, in this case `conn_mat[i,j] = 1` (if the powerline is
in service or 0 otherwise).
By definition, the diagonal is made of 0.
Returns
-------
res: ``numpy.ndarray``, shape:dim_topo,dim_topo, dtype:float
The connectivity matrix, as defined above
Notes
-------
Matrix can be either a sparse matrix or a dense matrix depending on the argument `as_csr_matrix`
An object, is not disconnected, is always connected to itself.
Examples
---------
If you want to know if powerline 0 is connected at its "extremity" side with the load of id 0 you can do
.. code-block:: python
import grid2op
env = grid2op.make()
obs = env.reset()
# retrieve the id of extremity of powerline 1:
id_lineex_0 = obs.line_ex_pos_topo_vect[0]
id_load_1 = obs.load_pos_topo_vect[0]
# get the connectivity matrix
connectivity_matrix = obs.connectivity_matrix()
# know if the objects are connected or not
are_connected = connectivity_matrix[id_lineex_0, id_load_1]
# as `are_connected` is 1.0 then these objects are indeed connected
And now, supposes we do an action that changes the topology of the substation to which these
two objects are connected, then we get (same example continues)
.. code-block:: python
topo_action = env.action_space({"set_bus": {"substations_id": [(1, [1,1,1,2,2,2])]}})
print(topo_action)
# This action will:
# - NOT change anything to the injections
# - NOT perform any redispatching action
# - NOT force any line status
# - NOT switch any line status
# - NOT switch anything in the topology
# - Set the bus of the following element:
# - assign bus 1 to line (extremity) 0 [on substation 1]
# - assign bus 1 to line (origin) 2 [on substation 1]
# - assign bus 1 to line (origin) 3 [on substation 1]
# - assign bus 2 to line (origin) 4 [on substation 1]
# - assign bus 2 to generator 0 [on substation 1]
# - assign bus 2 to load 0 [on substation 1]
obs, reward, done, info = env.step(topo_action)
# and now retrieve the matrix
connectivity_matrix = obs.connectivity_matrix()
# know if the objects are connected or not
are_connected = connectivity_matrix[id_lineex_0, id_load_1]
# as `are_connected` is 0.0 then these objects are not connected anymore
# this is visible when you "print" the action (see above) in the two following lines:
# - assign bus 1 to line (extremity) 0 [on substation 1]
# - assign bus 2 to load 0 [on substation 1]
# -> one of them is on bus 1 [line (extremity) 0] and the other on bus 2 [load 0]
"""
if (
self._connectivity_matrix_ is None
or (
isinstance(self._connectivity_matrix_, csr_matrix) and not as_csr_matrix
)
or (
(not isinstance(self._connectivity_matrix_, csr_matrix))
and as_csr_matrix
)
):
# self._connectivity_matrix_ = np.zeros(shape=(self.dim_topo, self.dim_topo), dtype=dt_float)
# fill it by block for the objects
beg_ = 0
end_ = 0
row_ind = []
col_ind = []
for sub_id, nb_obj in enumerate(self.sub_info):
# it must be a vanilla python integer, otherwise it's not handled by some backend
# especially if written in c++
nb_obj = int(nb_obj)
end_ += nb_obj
# tmp = np.zeros(shape=(nb_obj, nb_obj), dtype=dt_float)
for obj1 in range(nb_obj):
my_bus = self.topo_vect[beg_ + obj1]
if my_bus == -1:
# object is disconnected, nothing is done
continue
# connect an object to itself
row_ind.append(beg_ + obj1)
col_ind.append(beg_ + obj1)
# connect the other objects to it
for obj2 in range(obj1 + 1, nb_obj):
my_bus2 = self.topo_vect[beg_ + obj2]
if my_bus2 == -1:
# object is disconnected, nothing is done
continue
if my_bus == my_bus2:
# objects are on the same bus
# tmp[obj1, obj2] = 1
# tmp[obj2, obj1] = 1
row_ind.append(beg_ + obj2)
col_ind.append(beg_ + obj1)
row_ind.append(beg_ + obj1)
col_ind.append(beg_ + obj2)
beg_ += nb_obj
# both ends of a line are connected together (if line is connected)
for q_id in range(self.n_line):
if self.line_status[q_id]:
# if powerline is connected connect both its side
row_ind.append(self.line_or_pos_topo_vect[q_id])
col_ind.append(self.line_ex_pos_topo_vect[q_id])
row_ind.append(self.line_ex_pos_topo_vect[q_id])
col_ind.append(self.line_or_pos_topo_vect[q_id])
row_ind = np.array(row_ind).astype(dt_int)
col_ind = np.array(col_ind).astype(dt_int)
if not as_csr_matrix:
self._connectivity_matrix_ = np.zeros(
shape=(self.dim_topo, self.dim_topo), dtype=dt_float
)
self._connectivity_matrix_[row_ind.T, col_ind] = 1.0
else:
data = np.ones(row_ind.shape[0], dtype=dt_float)
self._connectivity_matrix_ = csr_matrix(
(data, (row_ind, col_ind)),
shape=(self.dim_topo, self.dim_topo),
dtype=dt_float,
)
return self._connectivity_matrix_
def _aux_fun_get_bus(self):
"""see in bus_connectivity matrix"""
bus_or = self.topo_vect[self.line_or_pos_topo_vect]
bus_ex = self.topo_vect[self.line_ex_pos_topo_vect]
connected = (bus_or > 0) & (bus_ex > 0)
bus_or = bus_or[connected]
bus_ex = bus_ex[connected]
bus_or = self.line_or_to_subid[connected] + (bus_or - 1) * self.n_sub
bus_ex = self.line_ex_to_subid[connected] + (bus_ex - 1) * self.n_sub
unique_bus = np.unique(np.concatenate((bus_or, bus_ex)))
unique_bus = np.sort(unique_bus)
nb_bus = unique_bus.shape[0]
return nb_bus, unique_bus, bus_or, bus_ex
def bus_connectivity_matrix(self, as_csr_matrix=False, return_lines_index=False):
"""
If we denote by `nb_bus` the total number bus of the powergrid (you can think of a "bus" being
a "node" if you represent a powergrid as a graph [mathematical object, not a plot] with the lines
being the "edges"].
The `bus_connectivity_matrix` will have a size nb_bus, nb_bus and will be made of 0 and 1.
If `bus_connectivity_matrix[i,j] = 1` then at least a power line connects bus i and bus j.
Otherwise, nothing connects it.
.. warning::
The matrix returned by this function has not a fixed size. Its
number of nodes and edges can change depending on the state of the grid.
See :ref:`get-the-graph-gridgraph` for more information.
Also, note that when "done=True" this matrix has size (1, 1)
and contains only 0.
Parameters
----------
as_csr_matrix: ``bool``
Whether to return the bus connectivity matrix as a sparse matrix (csr format) or as a
dense matrix. By default it's ``False`` meaning a dense matrix is returned.
return_lines_index: ``bool``
Whether to also return the bus index associated to both side of each powerline.
Returns
-------
res: ``numpy.ndarray``, shape: (nb_bus, nb_bus) dtype:float
The bus connectivity matrix defined above.
Notes
------
By convention we say that a bus is connected to itself. So the diagonal of this matrix is 1.
Examples
--------
Here is how you can use this function:
.. code-block:: python
bus_bus_graph, (line_or_bus, line_ex_bus) = obs.bus_connectivity_matrix(return_lines_index=True)
# bus_bus_graph is the matrix described above.
# line_or_bus[0] give the id of the bus to which the origin side of powerline 0 is connected
# line_ex_bus[0] give the id of the bus to which the extremity side of powerline 0 is connected
# (NB: if the powerline is disconnected, both are -1)
# this means that if line 0 is connected: bus_bus_graph[line_or_bus[0], line_ex_bus[0]] = 1
# and bus_bus_graph[line_ex_bus[0], line_or_bus[0]] = 1
# (of course you can replace 0 with any integer `0 <= l_id < obs.n_line`
"""
if self._is_done:
self._bus_connectivity_matrix_ = None
nb_bus = 1
if as_csr_matrix:
tmp_ = csr_matrix((1,1), dtype=dt_float)
else:
tmp_ = np.zeros(shape=(nb_bus, nb_bus), dtype=dt_float)
if not return_lines_index:
res = tmp_
else:
cls = type(self)
lor_bus = np.zeros(cls.n_line, dtype=dt_int)
lex_bus = np.zeros(cls.n_line, dtype=dt_int)
res = (tmp_, lor_bus, lex_bus)
return res
if (
self._bus_connectivity_matrix_ is None
or (
isinstance(self._bus_connectivity_matrix_, csr_matrix)
and not as_csr_matrix
)
or (
(not isinstance(self._bus_connectivity_matrix_, csr_matrix))
and as_csr_matrix
)
or return_lines_index
):
nb_bus, unique_bus, bus_or, bus_ex = self._aux_fun_get_bus()
# convert the bus id (from 0 to 2 * n_sub) to the row / column in the matrix (number between 0 and nb_bus)
all_indx = np.arange(nb_bus)
tmplate = np.arange(np.max(unique_bus) + 1)
tmplate[unique_bus] = all_indx
bus_or_in_mat = tmplate[bus_or]
bus_ex_in_mat = tmplate[bus_ex]
if not as_csr_matrix:
self._bus_connectivity_matrix_ = np.zeros(
shape=(nb_bus, nb_bus), dtype=dt_float
)
self._bus_connectivity_matrix_[bus_or_in_mat, bus_ex_in_mat] = 1.0
self._bus_connectivity_matrix_[bus_ex_in_mat, bus_or_in_mat] = 1.0
self._bus_connectivity_matrix_[all_indx, all_indx] = 1.0
else:
data = np.ones(
nb_bus + bus_or_in_mat.shape[0] + bus_ex_in_mat.shape[0],
dtype=dt_float,
)
row_ind = np.concatenate((all_indx, bus_or_in_mat, bus_ex_in_mat))
col_ind = np.concatenate((all_indx, bus_ex_in_mat, bus_or_in_mat))
self._bus_connectivity_matrix_ = csr_matrix(
(data, (row_ind, col_ind)), shape=(nb_bus, nb_bus), dtype=dt_float
)
if not return_lines_index:
res = self._bus_connectivity_matrix_
else:
# bus or and bus ex are defined above is return_line_index is True
lor_bus, _ = self._get_bus_id(
self.line_or_pos_topo_vect, self.line_or_to_subid
)
lex_bus, _ = self._get_bus_id(
self.line_ex_pos_topo_vect, self.line_ex_to_subid
)
res = (self._bus_connectivity_matrix_, (tmplate[lor_bus], tmplate[lex_bus]))
return res
def _get_bus_id(self, id_topo_vect, sub_id):
"""
get the bus id with the internal convention that:
- if object on bus 1, its bus is `sub_id`
- if object on bus 2, its bus is `sub_id` + n_sub
- if object on bus 3, its bus is `sub_id` + 2 * n_sub
- etc.
"""
bus_id = 1 * self.topo_vect[id_topo_vect]
connected = bus_id > 0
bus_id[connected] = sub_id[connected] + (bus_id[connected] - 1) * self.n_sub
return bus_id, connected
def flow_bus_matrix(self, active_flow=True, as_csr_matrix=False):
"""
A matrix of size "nb bus" "nb bus". Each row and columns represent a "bus" of the grid ("bus" is a power
system word that for computer scientist means "nodes" if the powergrid is represented as a graph).
See the note in case of a grid in "game over" mode.
The diagonal will sum the power produced and consumed at each bus.
The other element of each **row** of this matrix will be the flow of power from the bus represented
by the line i to the bus represented by column j.
.. warning::
The matrix returned by this function has not a fixed size. Its
number of nodes and edges can change depending on the state of the grid.
See :ref:`get-the-graph-gridgraph` for more information.
Also, note that when "done=True" this matrix has size (1, 1)
and contains only 0.
Notes
------
When the observation is in a "done" state (*eg* there has been a game over) then this function returns a
"matrix" of dimension (1,1) [yes, yes it's a scalar] with only one element that is 0.
In this case, `load_bus`, `prod_bus`, `stor_bus`, `lor_bus` and `lex_bus` are vectors full of 0.
Parameters
----------
active_flow: ``bool``
Whether to get the active flow (in MW) or the reactive flow (in MVAr). Defaults to active flow.
as_csr_matrix: ``bool``
Whether to retrieve the results as a scipy csr sparse matrix or as a dense matrix (default)
Returns
-------
res: ``matrix``
Which can either be a sparse matrix or a dense matrix depending on the value of the argument
"as_csr_matrix".
mappings: ``tuple``
The mapping that makes the correspondence between each object and the bus to which it is connected.
It is made of 4 elements: (load_bus, prod_bus, stor_bus, lor_bus, lex_bus).
For example if `load_bus[i] = 14` it means that the load with id `i` is connected to the
bus 14. If `load_bus[i] = -1` then the object is disconnected.
Examples
--------
Here is how you can use this function:
.. code-block:: python
flow_mat, (load, prod, stor, ind_lor, ind_lex) = obs.flow_bus_matrix()
# flow_mat is the matrix described above.
Lots of information can be deduce from this matrix. For example if you want to know
how much power goes from one bus say bus `i` to another bus (say bus `j` )
you can look at the associated coefficient `flow_mat[i,j]` which will also be related to the
flow on the origin (or extremity) side of the powerline connecting bus `i` to bus `j`
You can also know how much power
(total generation + total storage discharging - total load - total storage charging - )
is injected at each bus `i`
by looking at the `i` th diagonal coefficient.
Another use would be to check if the current powergrid state (as seen by grid2op) meet
the Kirchhoff circuit laws (conservation of energy), by doing the sum (row by row) of this
matrix. `flow_mat.sum(axis=1)`
"""
if self._is_done:
flow_mat = csr_matrix((1,1), dtype=dt_float)
if not as_csr_matrix:
flow_mat = flow_mat.toarray()
cls = type(self)
load_bus = np.zeros(cls.n_load, dtype=dt_int)
prod_bus = np.zeros(cls.n_gen, dtype=dt_int)
stor_bus = np.zeros(cls.n_storage, dtype=dt_int)
lor_bus = np.zeros(cls.n_line, dtype=dt_int)
lex_bus = np.zeros(cls.n_line, dtype=dt_int)
return flow_mat, (load_bus, prod_bus, stor_bus, lor_bus, lex_bus)
nb_bus, unique_bus, bus_or, bus_ex = self._aux_fun_get_bus()
prod_bus, prod_conn = self._get_bus_id(
self.gen_pos_topo_vect, self.gen_to_subid
)
load_bus, load_conn = self._get_bus_id(
self.load_pos_topo_vect, self.load_to_subid
)
stor_bus, stor_conn = self._get_bus_id(
self.storage_pos_topo_vect, self.storage_to_subid
)
lor_bus, lor_conn = self._get_bus_id(
self.line_or_pos_topo_vect, self.line_or_to_subid
)
lex_bus, lex_conn = self._get_bus_id(
self.line_ex_pos_topo_vect, self.line_ex_to_subid
)
if self.shunts_data_available:
sh_bus = 1 * self._shunt_bus
sh_bus[sh_bus > 0] = (
self.shunt_to_subid[sh_bus > 0] * (sh_bus[sh_bus > 0] - 1)
+ self.shunt_to_subid[sh_bus > 0]
)
sh_conn = self._shunt_bus != -1
# convert the bus to be "id of row or column in the matrix" instead of the bus id with
# the "grid2op convention"
all_indx = np.arange(nb_bus)
tmplate = np.arange(np.max(unique_bus) + 1)
tmplate[unique_bus] = all_indx
prod_bus = tmplate[prod_bus]
load_bus = tmplate[load_bus]
lor_bus = tmplate[lor_bus]
lex_bus = tmplate[lex_bus]
stor_bus = tmplate[stor_bus]
if active_flow:
prod_vect = self.gen_p
load_vect = self.load_p
or_vect = self.p_or
ex_vect = self.p_ex
stor_vect = self.storage_power
if self.shunts_data_available:
sh_vect = self._shunt_p
else:
prod_vect = self.gen_q
load_vect = self.load_q
or_vect = self.q_or
ex_vect = self.q_ex
stor_vect = np.zeros(self.n_storage, dtype=dt_float)
if self.shunts_data_available:
sh_vect = self._shunt_q
nb_lor = np.sum(lor_conn)
nb_lex = np.sum(lex_conn)
data = np.zeros(nb_bus + nb_lor + nb_lex, dtype=dt_float)
# if two generators / loads / storage unit are connected at the same bus
# this is why i go with matrix product and sparse matrices
nb_prod = np.sum(prod_conn)
if nb_prod:
bus_prod = np.arange(prod_bus[prod_conn].max() + 1)
map_mat = csr_matrix(
(np.ones(nb_prod), (prod_bus[prod_conn], np.arange(nb_prod))),
shape=(bus_prod.shape[0], nb_prod),
dtype=dt_float,
)
data[bus_prod] += map_mat.dot(prod_vect[prod_conn])
# handle load
nb_load = np.sum(load_conn)
if nb_load:
bus_load = np.arange(load_bus[load_conn].max() + 1)
map_mat = csr_matrix(
(np.ones(nb_load), (load_bus[load_conn], np.arange(nb_load))),
shape=(bus_load.shape[0], nb_load),
dtype=dt_float,
)
data[bus_load] -= map_mat.dot(load_vect[load_conn])
# handle storage
nb_stor = np.sum(stor_conn)
if nb_stor:
bus_stor = np.arange(stor_bus[stor_conn].max() + 1)
map_mat = csr_matrix(
(np.ones(nb_stor), (stor_bus[stor_conn], np.arange(nb_stor))),
shape=(bus_stor.shape[0], nb_stor),
dtype=dt_float,
)
data[bus_stor] -= map_mat.dot(stor_vect[stor_conn])
if self.shunts_data_available:
# handle shunts
nb_shunt = np.sum(sh_conn)
if nb_shunt:
bus_shunt = np.arange(sh_bus[sh_conn].max() + 1)
map_mat = csr_matrix(
(np.ones(nb_shunt), (sh_bus[sh_conn], np.arange(nb_shunt))),
shape=(bus_shunt.shape[0], nb_shunt),
dtype=dt_float,
)
data[bus_shunt] -= map_mat.dot(sh_vect[sh_conn])
# powerlines
data[np.arange(nb_lor) + nb_bus] -= or_vect[lor_conn]
data[np.arange(nb_lex) + nb_bus + nb_lor] -= ex_vect[lex_conn]
row_ind = np.concatenate((all_indx, lor_bus[lor_conn], lex_bus[lex_conn]))
col_ind = np.concatenate((all_indx, lex_bus[lex_conn], lor_bus[lor_conn]))
res = csr_matrix(
(data, (row_ind, col_ind)), shape=(nb_bus, nb_bus), dtype=dt_float
)
if not as_csr_matrix:
res = res.toarray()
return res, (load_bus, prod_bus, stor_bus, lor_bus, lex_bus)
def _add_edges_simple(self, vector, attr_nm, lor_bus, lex_bus, graph, fun_reduce=None):
"""add the edges, when the attributes are common for the all the powerline"""
dict_ = {}
for lid, val in enumerate(vector):
if not self.line_status[lid]:
# see issue https://github.com/rte-france/Grid2Op/issues/433
continue
tup_ = (lor_bus[lid], lex_bus[lid])
if not tup_ in dict_:
# data is not in the graph, I insert it
dict_[tup_] = val
else:
# data is already in the graph, so I need to either "reduce" the 2 data (if
# they are not the same) or "do nothing"
# in the case i need to "reduce" the two and I did not provide a "fun_reduce"
# I throw an error
if fun_reduce is None:
if val != dict_[tup_]:
raise BaseObservationError(f"Impossible to merge data of type '{attr_nm}'. There are "
f"some parrallel lines merged into the same edges "
f"but I don't know how to merge their data.")
else:
dict_[tup_] = fun_reduce(dict_[tup_], val)
networkx.set_edge_attributes(graph, dict_, attr_nm)
def _add_edges_multi(self, vector_or, vector_ex, attr_nm, lor_bus, lex_bus, graph):
"""
Utilities to add attributes of the edges of the graph in networkx, because edges are not necessarily
"oriented" the same way (so we need to reverse or / ex if networkx oriented it in the same way)
"""
dict_or_glop = {}
for lid, val in enumerate(vector_or):
if not self.line_status[lid]:
# see issue https://github.com/rte-france/Grid2Op/issues/433
continue
tup_ = (lor_bus[lid], lex_bus[lid])
if tup_ in dict_or_glop:
dict_or_glop[tup_] += val
else:
dict_or_glop[tup_] = val
dict_ex_glop = {}
for lid, val in enumerate(vector_ex):
if not self.line_status[lid]:
# see issue https://github.com/rte-france/Grid2Op/issues/433
continue
tup_ = (lor_bus[lid], lex_bus[lid])
if tup_ in dict_ex_glop:
dict_ex_glop[tup_] += val
else:
dict_ex_glop[tup_] = val
dict_or = {}
dict_ex = {}
for (k1, k2), val in dict_or_glop.items():
if k1 < k2:
# networkx put it in the right "direction"
dict_or[(k1, k2)] = val
else:
# networkx and grid2op do not share the same "direction"
dict_or[(k2, k1)] = dict_ex_glop[(k1, k2)]
for (k1, k2), val in dict_ex_glop.items():
if k1 < k2:
# networkx put it in the right "direction"
dict_ex[(k1, k2)] = val
else:
# networkx and grid2op do not share the same "direction"
dict_ex[(k2, k1)] = dict_or_glop[(k1, k2)]
networkx.set_edge_attributes(graph, dict_or, "{}_or".format(attr_nm))
networkx.set_edge_attributes(graph, dict_ex, "{}_ex".format(attr_nm))
def as_networkx(self) -> networkx.Graph:
"""Old name for :func:`BaseObservation.get_energy_graph`,
will be removed in the future.
"""
return self.get_energy_graph()
def get_energy_graph(self) -> networkx.Graph:
"""
Convert this observation as a networkx graph. This graph is the graph "seen" by
"the electron" / "the energy" of the power grid.
Notes
------
The resulting graph is "frozen" this means that you cannot add / remove attribute on nodes or edges, nor add /
remove edges or nodes.
This graphs has the following properties:
- it counts as many nodes as the number of buses of the grid
- it counts less edges than the number of lines of the grid (two lines connecting the same buses are "merged"
into one single edge - this is the case for parallel line, that are hence "merged" into the same edge)
- nodes (represents "buses" of the grid) have attributes:
- `p`: the active power produced at this node (negative means the sum of power produce minus power absorbed
is negative) in MW
- `q`: the reactive power produced at this node in MVAr
- `v`: the voltage magnitude at this node
- `cooldown`: how much longer you need to wait before being able to merge / split or change this node
- 'sub_id': the id of the substation to which it is connected (typically between `0` and `obs.n_sub - 1`)
- (optional) `theta`: the voltage angle (in degree) at this nodes
- `cooldown` : the time you need to wait (in number of steps) before being able to act on the
substation to which this bus is connected.
- edges have attributes too (in this modeling an edge might represent more than one powerline, all
parallel powerlines are represented by the same edge):
- `nb_connected`: number of connected powerline represented by this edge.
- `rho`: the relative flow on this powerline (in %) (sum over all powerlines))
- `cooldown`: the number of step you need to wait before being able to act on this powerline (max over all powerlines)
- `thermal_limit`: maximum flow allowed on the the powerline (sum over all powerlines)
- `timestep_overflow`: number of time steps during which the powerline is on overflow (max over all powerlines)
- `p_or`: active power injected at this node at the "origin side" (in MW) (sum over all the powerlines).
- `p_ex`: active power injected at this node at the "extremity side" (in MW) (sum over all the powerlines).
- `q_or`: reactive power injected at this node at the "origin side" (in MVAr) (sum over all the powerlines).
- `q_ex`: reactive power injected at this node at the "extremity side" (in MVAr) (sum over all the powerlines).
- `a_or`: current flow injected at this node at the "origin side" (in A) (sum over all the powerlines) (sum over all powerlines).
- `a_ex`: current flow injected at this node at the "extremity side" (in A) (sum over all the powerlines) (sum over all powerlines).
- `p`: active power injected at the "or" side (equal to p_or) (in MW)
- `v_or`: voltage magnitude at the "or" bus (in kV)
- `v_ex`: voltage magnitude at the "ex" bus (in kV)
- (optional) `theta_or`: voltage angle at the "or" bus (in deg)
- (optional) `theta_ex`: voltage angle at the "ex" bus (in deg)
- `time_next_maintenance`: see :attr:`BaseObservation.time_next_maintenance` (min over all powerline)
- `duration_next_maintenance` see :attr:`BaseObservation.duration_next_maintenance` (max over all powerlines)
- `sub_id_or`: id of the substation of the "or" side of the powerlines
- `sub_id_ex`: id of the substation of the "ex" side of the powerlines
- `node_id_or`: id of the node (in this graph) of the "or" side of the powergraph
- `node_id_ex`: id of the node (in this graph) of the "ex" side of the powergraph
- `bus_or`: on which bus [1 or 2] is this powerline connected to at its "or" substation
- `bus_ex`: on which bus [1 or 2] is this powerline connected to at its "ex" substation
.. danger::
**IMPORTANT NOTE** edges represents "fusion" of 1 or more powerlines. This graph is intended to be
a Graph and not a MultiGraph on purpose. This is why sometimes some attributes of the edges are not
the same of the attributes of a given powerlines. For example, in the case of 2 parrallel powerlines
(say powerlines 3 and 4)
going from bus 10 to bus 12 (for example), the edges graph.edges[(10, 12)]["nb_connected"] will be `2`
and you will get `graph.edges[(10, 12)]["p_or"] = obs.p_or[3] + obs.p_or[4]`
.. warning::
The graph returned by this function has not a fixed size. Its
number of nodes and edges can change depending on the state of the grid.
See :ref:`get-the-graph-gridgraph` for more information.
Also, note that when "done=True" this graph has only one node and
no edge.
.. note::
The graph returned by this function is "frozen" to prevent its modification. If you really want to modify
it you can "unfroze" it.
Returns
-------
graph: ``networkx graph``
A possible representation of the observation as a networkx graph
Examples
--------
The following code explains how to check that a grid meet the kirchoffs law (conservation of energy)
.. code-block:: python
# create an environment and get the observation
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
obs = env.reset()
# retrieve the networkx graph
graph = obs.get_energy_graph()
# perform the check for every nodes
for node_id in graph.nodes:
# retrieve power (active and reactive) produced at this node
p_ = graph.nodes[node_id]["p"]
q_ = graph.nodes[node_id]["q"]
# get the edges
edges = graph.edges(node_id)
p_lines = 0
q_lines = 0
# get the power that is "evacuated" at each nodes on all the edges connecting it to the other nodes
# of the network
for (k1, k2) in edges:
# now retrieve the active / reactive power injected at this node (looking at either *_or or *_ex
# depending on the direction of the powerline: remember that the "origin" is always the lowest
# bus id.
if k1 < k2:
# the current inspected node is the lowest, so on the "origin" side
p_lines += graph.edges[(k1, k2)]["p_or"]
q_lines += graph.edges[(k1, k2)]["q_or"]
else:
# the current node is the largest, so on the "extremity" side
p_lines += graph.edges[(k1, k2)]["p_ex"]
q_lines += graph.edges[(k1, k2)]["q_ex"]
assert abs(p_line - p_) <= 1e-5, "error for kirchoff's law for graph for P"
assert abs(q_line - q_) <= 1e-5, "error for kirchoff's law for graph for Q"
"""
cls = type(self)
# TODO save this graph somewhere, in a self._as_networkx attributes for example
mat_p, (load_bus, gen_bus, stor_bus, lor_bus, lex_bus) = self.flow_bus_matrix(
active_flow=True, as_csr_matrix=True
)
mat_q, *_ = self.flow_bus_matrix(active_flow=False, as_csr_matrix=True)
# for efficiency
mat_p = mat_p.tocoo()
# bus voltage
bus_v = np.zeros(mat_p.shape[0])
# i need to put lor_bus[self.line_status] otherwise pandapower might not detect a line
# is disconnected and output the "wrong" voltage / theta in the graph
# see issue https://github.com/rte-france/Grid2Op/issues/389
bus_v[lor_bus[self.line_status]] = self.v_or[self.line_status]
bus_v[lex_bus[self.line_status]] = self.v_ex[self.line_status]
bus_theta = np.zeros(mat_p.shape[0])
bus_subid = np.zeros(mat_p.shape[0], dtype=dt_int)
bus_subid[lor_bus[self.line_status]] = cls.line_or_to_subid[self.line_status]
bus_subid[lex_bus[self.line_status]] = cls.line_ex_to_subid[self.line_status]
if self.support_theta:
bus_theta[lor_bus[self.line_status]] = self.theta_or[self.line_status]
bus_theta[lex_bus[self.line_status]] = self.theta_ex[self.line_status]
# bus active injection
bus_p = mat_p.diagonal().copy()
mat_p.setdiag(0.0)
mat_p.eliminate_zeros()
# create the networkx graph
try:
graph = networkx.from_scipy_sparse_array(mat_p, edge_attribute="p")
except AttributeError:
# oldest version of scipy did not have the `from_scipy_sparse_array` function
graph = networkx.from_scipy_sparse_matrix(mat_p, edge_attribute="p")
if not len(graph.edges):
return graph
# add the nodes attributes
networkx.set_node_attributes(
graph, {el: val for el, val in enumerate(bus_p)}, "p"
)
networkx.set_node_attributes(
graph, {el: val for el, val in enumerate(mat_q.diagonal())}, "q"
)
networkx.set_node_attributes(
graph, {el: val for el, val in enumerate(bus_v)}, "v"
)
networkx.set_node_attributes(
graph, {el: val for el, val in enumerate(bus_subid)}, "sub_id"
)
if self.support_theta:
networkx.set_node_attributes(
graph, {el: val for el, val in enumerate(bus_theta)}, "theta"
)
networkx.set_node_attributes(graph,
{el: self.time_before_cooldown_sub[val] for el, val in enumerate(bus_subid)},
"cooldown")
# add the edges attributes
self._add_edges_multi(self.p_or, self.p_ex, "p", lor_bus, lex_bus, graph)
self._add_edges_multi(self.q_or, self.q_ex, "q", lor_bus, lex_bus, graph)
self._add_edges_multi(self.a_or, self.a_ex, "a", lor_bus, lex_bus, graph)
if self.support_theta:
self._add_edges_multi(
self.theta_or, self.theta_ex, "theta", lor_bus, lex_bus, graph
)
self._add_edges_simple(self.v_or, "v_or", lor_bus, lex_bus, graph)
self._add_edges_simple(self.v_ex, "v_ex", lor_bus, lex_bus, graph)
self._add_edges_simple(self.rho, "rho", lor_bus, lex_bus, graph,
fun_reduce=max)
self._add_edges_simple(
self.time_before_cooldown_line, "cooldown", lor_bus, lex_bus, graph,
fun_reduce=max
)
self._add_edges_simple(
self._thermal_limit, "thermal_limit", lor_bus, lex_bus, graph,
fun_reduce=lambda x, y: x+y
)
self._add_edges_simple(
self.time_next_maintenance, "time_next_maintenance", lor_bus, lex_bus,
graph,
fun_reduce=min)
self._add_edges_simple(
self.duration_next_maintenance, "duration_next_maintenance", lor_bus,
lex_bus, graph,
fun_reduce=max)
self._add_edges_simple(1 * self.line_status, "nb_connected", lor_bus, lex_bus, graph,
fun_reduce=lambda x, y: x + y)
self._add_edges_simple(
self.timestep_overflow, "timestep_overflow", lor_bus, lex_bus, graph,
fun_reduce=max
)
self._add_edges_simple(
self.line_or_to_subid,
"sub_id_or", lor_bus, lex_bus, graph
)
self._add_edges_simple(
self.line_ex_to_subid,
"sub_id_ex", lor_bus, lex_bus, graph
)
self._add_edges_simple(
lor_bus,
"node_id_or", lor_bus, lex_bus, graph
)
self._add_edges_simple(
lex_bus,
"node_id_ex", lor_bus, lex_bus, graph
)
self._add_edges_simple(
self.line_or_bus,
"bus_or", lor_bus, lex_bus, graph
)
self._add_edges_simple(
self.line_ex_bus,
"bus_ex", lor_bus, lex_bus, graph
)
# extra layer of security: prevent accidental modification of this graph
networkx.freeze(graph)
return graph
def _aux_get_connected_buses(self):
res = np.full(2 * self.n_sub, fill_value=False)
global_bus = type(self).local_bus_to_global(self.topo_vect,
self._topo_vect_to_sub)
res[np.unique(global_bus[global_bus != -1])] = True
return res
def _aux_add_edges(self,
el_ids,
cls,
el_global_bus,
nb_el,
el_connected,
el_name,
edges_prop,
graph
):
edges_el = [(el_ids[el_id], cls.n_sub + el_global_bus[el_id]) if el_connected[el_id] else None
for el_id in range(nb_el)
]
li_el_edges = [(*edges_el[el_id],
{"id": el_id,
"type": f"{el_name}_to_bus"})
for el_id in range(nb_el)
if el_connected[el_id]]
if edges_prop is not None:
ed_num = 0 # edge number
for el_id in range(nb_el):
if not el_connected[el_id]:
continue
for prop_nm, prop_vect in edges_prop:
li_el_edges[ed_num][-1][prop_nm] = prop_vect[el_id]
ed_num += 1
graph.add_edges_from(li_el_edges)
def _aux_add_el_to_comp_graph(self,
graph,
first_id,
el_names_vect,
el_name,
nb_el,
el_bus=None,
el_to_sub_id=None,
nodes_prop=None,
edges_prop=None):
if el_bus is None and el_to_sub_id is not None:
raise Grid2OpException("el_bus is None and el_to_sub_id is not None")
if el_bus is not None and el_to_sub_id is None:
raise Grid2OpException("el_bus is not None and el_to_sub_id is None")
cls = type(self)
# add the nodes for the elements of this types
el_ids = first_id + np.arange(nb_el)
# add the properties for these nodes
li_el_node = [(el_ids[el_id],
{"id": el_id,
"type": f"{el_name}",
"name": el_names_vect[el_id]
}
)
for el_id in range(nb_el)]
if el_bus is not None:
el_global_bus = cls.local_bus_to_global(el_bus,
el_to_sub_id)
el_connected = np.array(el_global_bus) >= 0
for el_id in range(nb_el):
li_el_node[el_id][-1]["connected"] = el_connected[el_id]
if nodes_prop is not None:
for el_id in range(nb_el):
for prop_nm, prop_vect in nodes_prop:
li_el_node[el_id][-1][prop_nm] = prop_vect[el_id]
graph.add_nodes_from(li_el_node)
graph.graph[f"{el_name}_nodes_id"] = el_ids
if el_bus is None and el_to_sub_id is None:
return el_ids
# add the edges
self._aux_add_edges(el_ids,
cls,
el_global_bus,
nb_el,
el_connected,
el_name,
edges_prop,
graph)
return el_ids
def _aux_add_buses(self, graph, cls, first_id):
bus_ids = first_id + np.arange(2 * cls.n_sub)
conn_bus = self._aux_get_connected_buses()
bus_li = [
(bus_ids[bus_id],
{"id": bus_id,
"global_id": bus_id,
"local_id": type(self).global_bus_to_local_int(bus_id, None),
"type": "bus",
"connected": conn_bus[bus_id]}
)
for bus_id in range(2 * cls.n_sub)
]
graph.add_nodes_from(bus_li)
edge_bus_li = [(bus_id,
bus_id % cls.n_sub,
{"type": "bus_to_substation"})
for id_, bus_id in enumerate(bus_ids)]
graph.add_edges_from(edge_bus_li)
graph.graph["bus_nodes_id"] = bus_ids
return bus_ids
def _aux_add_loads(self, graph, cls, first_id):
edges_prop=[
("p", self.load_p),
("q", self.load_q),
("v", self.load_v)
]
if self.support_theta:
edges_prop.append(("theta", self.load_theta))
load_ids = self._aux_add_el_to_comp_graph(graph,
first_id,
cls.name_load,
"load",
cls.n_load,
self.load_bus,
cls.load_to_subid,
nodes_prop=None,
edges_prop=edges_prop)
return load_ids
def _aux_add_gens(self, graph, cls, first_id):
nodes_prop = [("target_dispatch", self.target_dispatch),
("actual_dispatch", self.actual_dispatch),
("gen_p_before_curtail", self.gen_p_before_curtail),
("curtailment_mw", self.curtailment_mw),
("curtailment", self.curtailment),
("curtailment_limit", self.curtailment_limit),
("gen_margin_up", self.gen_margin_up),
("gen_margin_down", self.gen_margin_down)
] # todo class attributes gen_max_ramp_up etc.
edges_prop=[
("p", - self.gen_p),
("q", - self.gen_q),
("v", self.gen_v)
]
if self.support_theta:
edges_prop.append(("theta", self.gen_theta))
gen_ids = self._aux_add_el_to_comp_graph(graph,
first_id,
cls.name_gen,
"gen",
cls.n_gen,
self.gen_bus,
cls.gen_to_subid,
nodes_prop=nodes_prop, # todo cls attributes
edges_prop=edges_prop)
return gen_ids
def _aux_add_storages(self, graph, cls, first_id):
nodes_prop = [("storage_charge", self.storage_charge),
("storage_power_target", self.storage_power_target)]
# TODO class attr in nodes_prop: storageEmax etc.
edges_prop=[("p", self.storage_power)]
if self.support_theta:
edges_prop.append(("theta", self.storage_theta))
sto_ids = self._aux_add_el_to_comp_graph(graph,
first_id,
cls.name_storage,
"storage",
cls.n_storage,
self.storage_bus,
cls.storage_to_subid,
nodes_prop=nodes_prop,
edges_prop=edges_prop
)
return sto_ids
def _aux_add_edge_line_side(self,
cls,
graph,
bus,
sub_id,
line_node_ids,
side,
p_vect,
q_vect,
v_vect,
a_vect,
theta_vect):
global_bus = cls.local_bus_to_global(bus, sub_id)
conn_ = np.array(global_bus) >= 0
edges_prop = [
("p", p_vect),
("q", q_vect),
("v", v_vect),
("a", a_vect),
("side", [side for _ in range(p_vect.size)])
]
if theta_vect is not None:
edges_prop.append(("theta", theta_vect))
self._aux_add_edges(line_node_ids,
cls,
global_bus,
cls.n_line,
conn_,
"line",
edges_prop,
graph)
def _aux_add_lines(self, graph, cls, first_id):
nodes_prop = [("rho", self.rho),
("connected", self.line_status),
("timestep_overflow", self.timestep_overflow),
("time_before_cooldown_line", self.time_before_cooldown_line),
("time_next_maintenance", self.time_next_maintenance),
("duration_next_maintenance", self.duration_next_maintenance),
]
# only add the nodes, not the edges right now
lin_ids = self._aux_add_el_to_comp_graph(graph,
first_id,
cls.name_line,
"line",
cls.n_line,
el_bus=None,
el_to_sub_id=None,
nodes_prop=nodes_prop,
edges_prop=None
)
# add "or" edges
self._aux_add_edge_line_side(cls,
graph,
self.line_or_bus,
cls.line_or_to_subid,
lin_ids,
"or",
self.p_or,
self.q_or,
self.v_or,
self.a_or,
self.theta_or if self.support_theta else None)
# add "ex" edges
self._aux_add_edge_line_side(cls,
graph,
self.line_ex_bus,
cls.line_ex_to_subid,
lin_ids,
"ex",
self.p_ex,
self.q_ex,
self.v_ex,
self.a_ex,
self.theta_ex if self.support_theta else None)
return lin_ids
def _aux_add_shunts(self, graph, cls, first_id):
nodes_prop = None
# TODO in grid2Op in general: have the "tap" modeling
# for shunt
edges_prop=[("p", self._shunt_p),
("q", self._shunt_q),
("v", self._shunt_v),
]
sto_ids = self._aux_add_el_to_comp_graph(graph,
first_id,
cls.name_shunt,
"shunt",
cls.n_shunt,
self._shunt_bus,
cls.shunt_to_subid,
nodes_prop=nodes_prop,
edges_prop=edges_prop
)
return sto_ids
def get_elements_graph(self) -> networkx.DiGraph:
"""This function returns the "elements graph" as a networkx object.
.. seealso::
This object is extensively described in the documentation, see :ref:`elmnt-graph-gg` for more information.
Basically, each "element" of the grid (element = a substation, a bus, a load, a generator,
a powerline, a storate unit or a shunt) is represented by a node in this graph.
There might be some edges between the nodes representing buses and the nodes representing
substations, indicating "this bus is part of this substation".
There might be some edges between the nodes representing load / generator / powerline /
storage unit / shunt and the nodes representing buses, indicating "this load / generator /
powerline / storage unit is connected to this bus".
Nodes and edges of this graph have different attributes depending on the underlying element
they represent. For a detailed description, please refer to the documentation:
:ref:`elmnt-graph-gg`
Examples
---------
You can use, for example to "check" Kirchoff Current Law (or at least that no energy is created
at none of the buses):
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name...
env = grid2op.make(env_name)
obs = env.reset()
# retrieve the graph and do something
elmnt_graph = obs.get_elements_graph()
for bus_id, node_id in enumerate(elmnt_graph.graph["bus_nodes_id"]):
sum_p = 0.
sum_q = 0.
for ancestor in graph.predecessors(node_id):
# ancestor is the id of a node representing an element connected to this
# bus
this_edge = graph.edges[(ancestor, node_id)]
if "p" in this_edge:
sum_p += this_edge["p"]
if "q" in this_edge:
sum_q += this_edge["q"]
assert abs(sum_p) <= self.tol, f"error for node {node_id} representing bus {bus_id}: {abs(sum_p)} != 0."
assert abs(sum_q) <= self.tol, f"error for node {node_id} representing bus {bus_id}: {abs(sum_q)} != 0."
Returns
-------
networkx.DiGraph
The "elements graph", see :ref:`elmnt-graph-gg` .
"""
cls = type(self)
# init the graph with "grid level" attributes
graph = networkx.DiGraph(max_step=self.max_step,
current_step=self.current_step,
delta_time=self.delta_time,
year=self.year,
month=self.month,
day=self.day,
hour_of_day=self.hour_of_day,
minute_of_hour=self.minute_of_hour,
day_of_week=self.day_of_week,
time_stamp=self.get_time_stamp()
)
# add the substations
sub_li = [(sub_id,
{"id": sub_id,
"type": "substation",
"name": cls.name_sub[sub_id],
"cooldown": self.time_before_cooldown_sub[sub_id]}
) for sub_id in range(cls.n_sub)]
graph.add_nodes_from(sub_li)
graph.graph["substation_nodes_id"] = np.arange(cls.n_sub)
# handle the buses
bus_ids = self._aux_add_buses(graph, cls, cls.n_sub)
# handle loads
load_ids = self._aux_add_loads(graph, cls, bus_ids[-1] + 1)
# handle gens
gen_ids = self._aux_add_gens(graph, cls, load_ids[-1] + 1)
# handle lines
line_ids = self._aux_add_lines(graph, cls, gen_ids[-1] + 1)
# handle storages
sto_ids = self._aux_add_storages(graph, cls, line_ids[-1] + 1)
next_id = line_ids[-1] + 1
if sto_ids.size > 0:
next_id = sto_ids[-1] + 1
# handle shunts
if cls.shunts_data_available:
shunt_ids = self._aux_add_shunts(graph, cls, next_id)
if shunt_ids.size > 0:
next_id = shunt_ids[-1] + 1
# and now we use the data above to put the right properties to the nodes for the buses
bus_v_theta = {}
for bus_id in bus_ids:
li_pred = list(graph.predecessors(n=bus_id))
if li_pred:
edge = (li_pred[0], bus_id)
bus_v_theta[bus_id] = {"connected": True, "v": graph.edges[edge]["v"]}
if "theta" in graph.edges[edge]:
bus_v_theta[bus_id]["theta"] = graph.edges[edge]["theta"]
else:
bus_v_theta[bus_id] = {"connected": False}
networkx.set_node_attributes(graph, bus_v_theta)
# extra layer of security: prevent accidental modification of this graph
networkx.freeze(graph)
return graph
def get_forecasted_inj(self, time_step=1):
"""
This function allows you to retrieve directly the "forecast" injections for the step `time_step`.
We remind that the environment, under some conditions, can produce these forecasts automatically.
This function allows to retrieve what has been forecast.
Parameters
----------
time_step: ``int``
The horizon of the forecast (given in number of time steps)
Returns
-------
gen_p_f: ``numpy.ndarray``
The forecast generators active values
gen_v_f: ``numpy.ndarray``
The forecast generators voltage setpoins
load_p_f: ``numpy.ndarray``
The forecast load active consumption
load_q_f: ``numpy.ndarray``
The forecast load reactive consumption
"""
if time_step >= len(self._forecasted_inj):
raise NoForecastAvailable(
"Forecast for {} timestep ahead is not possible with your chronics.".format(
time_step
)
)
t, a = self._forecasted_inj[time_step]
prod_p_f = np.full(self.n_gen, fill_value=np.NaN, dtype=dt_float)
prod_v_f = np.full(self.n_gen, fill_value=np.NaN, dtype=dt_float)
load_p_f = np.full(self.n_load, fill_value=np.NaN, dtype=dt_float)
load_q_f = np.full(self.n_load, fill_value=np.NaN, dtype=dt_float)
if "prod_p" in a["injection"]:
prod_p_f = a["injection"]["prod_p"]
if "prod_v" in a["injection"]:
prod_v_f = a["injection"]["prod_v"]
if "load_p" in a["injection"]:
load_p_f = a["injection"]["load_p"]
if "load_q" in a["injection"]:
load_q_f = a["injection"]["load_q"]
tmp_arg = ~np.isfinite(prod_p_f)
prod_p_f[tmp_arg] = self.gen_p[tmp_arg]
tmp_arg = ~np.isfinite(prod_v_f)
prod_v_f[tmp_arg] = self.gen_v[tmp_arg]
tmp_arg = ~np.isfinite(load_p_f)
load_p_f[tmp_arg] = self.load_p[tmp_arg]
tmp_arg = ~np.isfinite(load_q_f)
load_q_f[tmp_arg] = self.load_q[tmp_arg]
return prod_p_f, prod_v_f, load_p_f, load_q_f
def get_time_stamp(self):
"""
Get the time stamp of the current observation as a `datetime.datetime` object
"""
res = datetime.datetime(
year=self.year,
month=self.month,
day=self.day,
hour=self.hour_of_day,
minute=self.minute_of_hour,
)
return res
def simulate(self, action, time_step=1):
"""
This method is used to simulate the effect of an action on a forecast powergrid state. This forecast
state is built upon the current observation.
The forecast are pre computed by the environment.
More concretely, if not deactivated by the environment
(see :func:`grid2op.Environment.BaseEnv.deactivate_forecast`) and the environment has the capacity to
generate these forecasts (which is the case in most grid2op environments) this function will simulate
the effect of doing an action now and return the "next state" (often the state you would get at
time `t + 5` mins) if you were to do the action at this step.
It has the same return
value as the :func:`grid2op.Environment.BaseEnv.step` function.
.. seealso::
:func:`BaseObservation.get_forecast_env` and :func:`BaseObservation.get_env_from_external_forecasts`
.. seealso::
:ref:`model_based_rl`
.. versionadded:: 1.9.0
If the data of the :class:`grid2op.Environment.Environment` you are using supports it
(**ie** you can access multiple steps ahead forecasts), then you can
now "chain" the simulate calls.
Examples
---------
If forecast are available, you can use this function like this:
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox"
env = grid2op.make(env_name)
obs = env.reset()
an_action = env.action_space() # or any other action
simobs, sim_reward, sim_done, sim_info = obs.simulate(an_action)
# in this case, simobs will be an APPROXIMATION of the observation you will
# get after performing `an_action`
# obs, *_ = env.step(an_action)
And if your environment allows to use "multiple steps ahead forecast" you can even
chain the calls like this:
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox"
env = grid2op.make(env_name)
obs = env.reset()
an_action = env.action_space() # or any other action
simobs1, sim_reward1, sim_done1, sim_info1 = obs.simulate(an_action)
another_action = env.action_space() # or any other action
simobs2, sim_reward2, sim_done2, sim_info2 = simobs1.simulate(another_action)
# in this case, simobs will be an APPROXIMATION of the observation you will
# get after performing `an_action` and then `another_action`:
# *_ = env.step(an_action)
# obs, *_ = env.step(another_action)
Parameters
----------
action: :class:`grid2op.Action.BaseAction`
The action to simulate
time_step: ``int``
The time step of the forecasted grid to perform the action on. If no forecast are available for this
time step, a :class:`grid2op.Exceptions.NoForecastAvailable` is thrown.
Raises
------
:class:`grid2op.Exceptions.NoForecastAvailable`
if no forecast are available for the time_step querried.
Returns
-------
simulated_observation: :class:`grid2op.Observation.BaseObservation`
agent's observation of the current environment after the application of the action "act" on the
the current state.
reward: ``float``
amount of reward returned after previous action
done: ``bool``
whether the episode has ended, in which case further step() calls will return undefined results
info: ``dict``
contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)
Notes
------
This is a simulation in the sense that the "next grid state" is not the real grid state you will get. As you
don't know the future, the "injections you forecast for the next step" will not be the real injection you
will get in the next step.
Also, in some circumstances, the "Backend" (ie the powerflow) used to do the simulation may not be the
same one as the one used by the environment. This is to model a real fact: as accurate your powerflow is,
it does
not model all the reality (*"all models are wrong"*). Having a different solver for the environment (
"the reality") than the one used to anticipate the impact of the action (this "simulate" function)
is a way to represent this fact.
Examples
--------
To simulate what would be the effect of the action "act" if you were to take this action at this step
you can do the following:
.. code-block:: python
import grid2op
# retrieve an environment
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
# retrieve an observation, this is the same for all observations
obs = env.reset()
# and now you can simulate the effect of doing nothing in the next time step
act = env.action_space() # this can be any action that grid2op understands
simulated_obs, simulated_reward, simulated_done, simulated_info = obs.simulate(act)
# `simulated_obs` will be the "observation" after the application of action `act` on the
# " forecast of the grid state (it will be the "forecast state at time t+5mins usually)
# `simulated_reward` will be the reward for the same action on the same forecast state
# `simulated_done` will indicate whether or not the simulation ended up in a "game over"
# `simulated_info` gives extra information on this forecast state
You can now chain the calls to simulate (if your environment supports it)
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
obs = env.reset()
act_1 = ... # a grid2op action
# you can do that (if your environment provide forecasts more tha 1 step ahead):
sim_obs_1, *_ = obs.simulate(act_1)
act_2 = ... # a grid2op action
# but also (if your environment provide forecast more than 2 steps ahead)
sim_obs_2, *_ = sim_obs_1.simulate(act_2)
act_3 = ... # a grid2op action
# but also (if your environment provide forecast more than 3 steps ahead)
sim_obs_3, *_ = sim_obs_2.simulate(act_3)
# you get the idea!
.. note::
The code above is closely related to the :func:`BaseObservation.get_forecast_env` and a
very similar result (up to some corner cases beyond the scope of this documentation)
could be achieved with:
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
obs = env.reset()
forecast_env = obs.get_forecast_env()
f_obs = forecast_env.reset()
act_1 = ... # a grid2op action
f_obs_1, *_ = forecast_env.step(act_1)
# f_obs_1 should be sim_obs_1
act_2 = ... # a grid2op action
f_obs_2, *_ = forecast_env.step(act_2)
# f_obs_2 should be sim_obs_2
act_3 = ... # a grid2op action
f_obs_3, *_ = forecast_env.step(act_3)
# f_obs_3 should be sim_obs_3
Finally, another possible use of this method is to get a "glimpse" of the
effect of an action if you delay it a maximum, you can also use the `time_step`
parameters.
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
obs = env.reset()
act = ... # a grid2op action
sim_obs_1, *_ = obs.simulate(act, time_step=1)
sim_obs_2, *_ = obs.simulate(act, time_step=2)
sim_obs_3, *_ = obs.simulate(act, time_step=3)
# in this case:
# + sim_obs_1 give the results after 1 step (if your agent survives)
# of applying the action `act`
# + sim_obs_2 give the results after 2 steps (if your agent survives)
# of applying the action `act`
# + sim_obs_3 give the results after 3 steps (if your agent survives)
# of applying the action `act`
This is an approximation as the "time is not simulated". Here you only make 1 simulation
of the effect of your action regardless of the horizon you want to target. It is related
to the :ref:`simulator_page` if used this way.
This might be used to chose the "best" time at which you could do an action for example.
There is no coupling between the different simulation that you perform here.
"""
if self.action_helper is None:
raise NoForecastAvailable(
"No forecasts are available for this instance of BaseObservation "
"(no action_space "
"and no simulated environment are set)."
)
if self._obs_env is None:
raise NoForecastAvailable(
'This observation has no "environment used for simulation" (_obs_env) is not created. '
"This is the case if you loaded this observation from a disk (for example using "
"EpisodeData) "
'or used a Runner with multi processing with the "add_detailed_output=True" '
"flag or even if you use an environment with a non serializable backend. "
"This is a feature of grid2op: it does not require backends to be serializable."
)
if time_step < 0:
raise NoForecastAvailable("Impossible to forecast in the past.")
if time_step >= len(self._forecasted_inj):
raise NoForecastAvailable(
"Forecast for {} timestep(s) ahead is not possible with your chronics."
"".format(time_step)
)
if time_step not in self._forecasted_grid_act:
timestamp, inj_forecasted = self._forecasted_inj[time_step]
self._forecasted_grid_act[time_step] = {
"timestamp": timestamp,
"inj_action": self.action_helper(inj_forecasted),
}
timestamp = self._forecasted_grid_act[time_step]["timestamp"]
inj_action = self._forecasted_grid_act[time_step]["inj_action"]
self._obs_env.init(
inj_action,
time_stamp=timestamp,
obs=self,
time_step=time_step,
)
sim_obs, *rest = self._obs_env.simulate(action)
sim_obs = copy.deepcopy(sim_obs)
if self._forecasted_inj:
# allow "chain" to simulate
sim_obs.action_helper = self.action_helper # no copy !
sim_obs._obs_env = self._obs_env # no copy
sim_obs._forecasted_inj = self._forecasted_inj[1:] # remove the first one
sim_obs._update_internal_env_params(self._obs_env)
return (sim_obs, *rest) # parentheses are needed for python 3.6 at least.
def copy(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Make a copy of the observation.
Returns
-------
res: :class:`BaseObservation`
The deep copy of the observation
Notes
--------
The "obs_env" attributes
"""
obs_env = self._obs_env
self._obs_env = None # _obs_env is a pointer, it is not held by this !
action_helper = self.action_helper
self.action_helper = None
_ptr_kwargs_env = self._ptr_kwargs_env
self._ptr_kwargs_env = None
res = copy.deepcopy(self)
self._obs_env = obs_env
res._obs_env = obs_env
self.action_helper = action_helper
res.action_helper = action_helper
self._ptr_kwargs_env = _ptr_kwargs_env
res._ptr_kwargs_env = _ptr_kwargs_env
return res
@property
def line_or_bus(self):
"""
Retrieve the busbar at which each origin end of powerline is connected.
The result follow grid2op convention:
- -1 means the powerline is disconnected
- 1 means it is connected to busbar 1
- 2 means it is connected to busbar 2
- etc.
Notes
-----
In a same substation, two objects are connected together if (and only if) they are connected
to the same busbar.
"""
res = self.topo_vect[self.line_or_pos_topo_vect]
res.flags.writeable = False
return res
@property
def line_ex_bus(self):
"""
Retrieve the busbar at which each extremity end of powerline is connected.
The result follow grid2op convention:
- -1 means the powerline is disconnected
- 1 means it is connected to busbar 1
- 2 means it is connected to busbar 2
- etc.
Notes
-----
In a same substation, two objects are connected together if (and only if) they are connected
to the same busbar.
"""
res = self.topo_vect[self.line_ex_pos_topo_vect]
res.flags.writeable = False
return res
@property
def gen_bus(self):
"""
Retrieve the busbar at which each generator is connected.
The result follow grid2op convention:
- -1 means the generator is disconnected
- 1 means it is generator to busbar 1
- 2 means it is connected to busbar 2
- etc.
Notes
-----
In a same substation, two objects are connected together if (and only if) they are connected
to the same busbar.
"""
res = self.topo_vect[self.gen_pos_topo_vect]
res.flags.writeable = False
return res
@property
def load_bus(self):
"""
Retrieve the busbar at which each load is connected.
The result follow grid2op convention:
- -1 means the load is disconnected
- 1 means it is load to busbar 1
- 2 means it is load to busbar 2
- etc.
Notes
-----
In a same substation, two objects are connected together if (and only if) they are connected
to the same busbar.
"""
res = self.topo_vect[self.load_pos_topo_vect]
res.flags.writeable = False
return res
@property
def storage_bus(self):
"""
Retrieve the busbar at which each storage unit is connected.
The result follow grid2op convention:
- -1 means the storage unit is disconnected
- 1 means it is storage unit to busbar 1
- 2 means it is connected to busbar 2
- etc.
Notes
-----
In a same substation, two objects are connected together if (and only if) they are connected
to the same busbar.
"""
res = self.topo_vect[self.storage_pos_topo_vect]
res.flags.writeable = False
return res
@property
def prod_p(self):
"""
As of grid2op version 1.5.0, for better consistency, the "prod_p" attribute has been renamed "gen_p",
see the doc of :attr:`BaseObservation.gen_p` for more information.
This property is present to maintain the backward compatibility.
Returns
-------
:attr:`BaseObservation.gen_p`
"""
return self.gen_p
@property
def prod_q(self):
"""
As of grid2op version 1.5.0, for better consistency, the "prod_q" attribute has been renamed "gen_q",
see the doc of :attr:`BaseObservation.gen_q` for more information.
This property is present to maintain the backward compatibility.
Returns
-------
:attr:`BaseObservation.gen_q`
"""
return self.gen_q
@property
def prod_v(self):
"""
As of grid2op version 1.5.0, for better consistency, the "prod_v" attribute has been renamed "gen_v",
see the doc of :attr:`BaseObservation.gen_v` for more information.
This property is present to maintain the backward compatibility.
Returns
-------
:attr:`BaseObservation.gen_v`
"""
return self.gen_v
def sub_topology(self, sub_id):
"""
Returns the topology of the given substation.
We remind the reader that for substation id `sud_id`, its topology is represented
by a vector of length `type(obs).subs_info[sub_id]` elements. And for each
elements of this vector, you now on which bus (1 or 2) it is connected or
if the corresponding element is disconnected (in this case it's -1)
Returns
-------
"""
tmp = self.topo_vect[self._topo_vect_to_sub == sub_id]
tmp.flags.writeable = False
return tmp
def _reset_matrices(self):
self._vectorized = None
def from_vect(self, vect, check_legit=True):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
To reload an observation from a vector, use the "env.observation_space.from_vect()".
Convert back an observation represented as a vector into a proper observation.
Some conversion are done silently from float to the type of the corresponding observation attribute.
Parameters
----------
vect: ``numpy.ndarray``
A representation of an BaseObservation in the form of a vector that is used to convert back the current
observation to be equal to the vect.
"""
# reset the matrices
self._reset_matrices()
# and ensure everything is reloaded properly
super().from_vect(vect, check_legit=check_legit)
def to_dict(self):
"""
Transform this observation as a dictionary. This dictionary allows you to inspect the state of this
observation and is simply a shortcut of the class instance.
Returns
-------
A dictionary representing the observation.
Notes
-------
The returned dictionary is not necessarily json serializable. To have a grid2op observation that you can
serialize in a json fashion, please use the :func:`grid2op.Space.GridObjects.to_json` function.
"""
if self._dictionnarized is None:
self._dictionnarized = {}
self._dictionnarized["timestep_overflow"] = self.timestep_overflow
self._dictionnarized["line_status"] = self.line_status
self._dictionnarized["topo_vect"] = self.topo_vect
self._dictionnarized["loads"] = {}
self._dictionnarized["loads"]["p"] = self.load_p
self._dictionnarized["loads"]["q"] = self.load_q
self._dictionnarized["loads"]["v"] = self.load_v
self._dictionnarized[
"prods"
] = {} # TODO will be removed in future versions
self._dictionnarized["prods"][
"p"
] = self.gen_p # TODO will be removed in future versions
self._dictionnarized["prods"][
"q"
] = self.gen_q # TODO will be removed in future versions
self._dictionnarized["prods"][
"v"
] = self.gen_v # TODO will be removed in future versions
self._dictionnarized["gens"] = {}
self._dictionnarized["gens"]["p"] = self.gen_p
self._dictionnarized["gens"]["q"] = self.gen_q
self._dictionnarized["gens"]["v"] = self.gen_v
self._dictionnarized["lines_or"] = {}
self._dictionnarized["lines_or"]["p"] = self.p_or
self._dictionnarized["lines_or"]["q"] = self.q_or
self._dictionnarized["lines_or"]["v"] = self.v_or
self._dictionnarized["lines_or"]["a"] = self.a_or
self._dictionnarized["lines_ex"] = {}
self._dictionnarized["lines_ex"]["p"] = self.p_ex
self._dictionnarized["lines_ex"]["q"] = self.q_ex
self._dictionnarized["lines_ex"]["v"] = self.v_ex
self._dictionnarized["lines_ex"]["a"] = self.a_ex
self._dictionnarized["rho"] = self.rho
self._dictionnarized["maintenance"] = {}
self._dictionnarized["maintenance"][
"time_next_maintenance"
] = self.time_next_maintenance
self._dictionnarized["maintenance"][
"duration_next_maintenance"
] = self.duration_next_maintenance
self._dictionnarized["cooldown"] = {}
self._dictionnarized["cooldown"]["line"] = self.time_before_cooldown_line
self._dictionnarized["cooldown"][
"substation"
] = self.time_before_cooldown_sub
self._dictionnarized["redispatching"] = {}
self._dictionnarized["redispatching"][
"target_redispatch"
] = self.target_dispatch
self._dictionnarized["redispatching"][
"actual_dispatch"
] = self.actual_dispatch
# storage
self._dictionnarized["storage_charge"] = 1.0 * self.storage_charge
self._dictionnarized["storage_power_target"] = (
1.0 * self.storage_power_target
)
self._dictionnarized["storage_power"] = 1.0 * self.storage_power
# curtailment
self._dictionnarized["gen_p_before_curtail"] = (
1.0 * self.gen_p_before_curtail
)
self._dictionnarized["curtailment"] = 1.0 * self.curtailment
self._dictionnarized["curtailment_limit"] = 1.0 * self.curtailment_limit
self._dictionnarized["curtailment_limit_effective"] = (
1.0 * self.curtailment_limit_effective
)
# alarm / attention budget
self._dictionnarized["is_alarm_illegal"] = self.is_alarm_illegal[0]
self._dictionnarized["time_since_last_alarm"] = self.time_since_last_alarm[
0
]
self._dictionnarized["last_alarm"] = copy.deepcopy(self.last_alarm)
self._dictionnarized["attention_budget"] = self.attention_budget[0]
self._dictionnarized[
"was_alarm_used_after_game_over"
] = self.was_alarm_used_after_game_over[0]
# alert
self._dictionnarized["active_alert"] = copy.deepcopy(self.active_alert)
self._dictionnarized["attack_under_alert"] = copy.deepcopy(self.attack_under_alert)
self._dictionnarized["time_since_last_alert"] = copy.deepcopy(self.time_since_last_alert)
self._dictionnarized["alert_duration"] = copy.deepcopy(self.alert_duration)
self._dictionnarized["time_since_last_attack"] = copy.deepcopy(self.time_since_last_attack)
self._dictionnarized["was_alert_used_after_attack"] = copy.deepcopy(self.was_alert_used_after_attack)
self._dictionnarized[
"total_number_of_alert"
] = self.total_number_of_alert[0] if type(self).dim_alerts else []
# current_step / max step
self._dictionnarized["current_step"] = self.current_step
self._dictionnarized["max_step"] = self.max_step
return self._dictionnarized
def add_act(self, act, issue_warn=True):
"""
Easier access to the impact on the observation if an action were applied.
This is for now only useful to get a topology in which the grid would be without
doing an expensive `obs.simuulate`
Notes
-----
This will not give the real topology of the grid in all cases for many reasons amongst:
1) past topologies are not known by the observation. If you reconnect a powerline in the action
without having specified on which bus, it has no way to know (but the environment does!)
on which bus it should be reconnected (which is the last known bus)
2) some "protections" are emulated in the environment. This means that the environment
can disconnect some powerline under certain conditions. This is absolutely not
taken into account here.
3) the environment is stochastic, for example there can be maintenance or attacks (hazards)
and the generators and loads change each step. This is not taken into account
in this function.
4) no checks are performed to see if the action meets the rules of the game (number of elements
you can modify in the action, cooldowns etc.) This method **supposes** that the action
is legal and non ambiguous.
5) It do not check for possible "game over", for example due to isolated elements or non-connected
grid (grid with 2 or more connex components)
If these issues are important for you, you will need to use the
:func:`grid2op.Observation.BaseObservation.simulate` method. It can be used like
`obs.simulate(act, time_step=0)` but it is much more expensive.
Parameters
----------
act: :class:`grid2op.Action.BaseAction`
The action you want to add to the observation
issue_warn: ``bool``
Issue a warning when this method might not compute the proper resulting topologies. Default to ``True``:
it issues warning when something not supported is done in the action.
Returns
-------
res: :class:`grid2op.Observation.Observation`
The resulting observation. Note that this observation is not initialized with everything.
It is only relevant when you want to study the resulting topology after you applied an
action. Lots of `res` attributes are empty.
Examples
--------
You can use it this way, for example if you want to retrieve the topology you would get (see the restriction
in the above description) after applying an action:
.. code-block:: python
import grid2op
# create the environment
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
# generate the first observation
obs = env.reset()
# make some action
act = ... # see the dedicated page
# have a look at the impact on the action on the topology
partial_obs = obs + act
# or `partial_obs = obs.add_act(act, issue_warn=False)` if you want to silence the warnings
# and now you can inspect the topology with any method you want:
partial_obs.topo_vect
partial_obs.load_bus
bus_mat = partial_obs.bus_connectivity_matrix()
# or even
elem_mat = partial_obs.connectivity_matrix()
# but you cannot use
partial_obs.prod_p
# or
partial_obs.load_q
etc.
"""
from grid2op.Action import BaseAction
if not isinstance(act, BaseAction):
raise RuntimeError("You can only add actions to observation at the moment")
act = copy.deepcopy(act)
res = type(self)()
res.set_game_over(env=None)
res.topo_vect[:] = self.topo_vect
res.line_status[:] = self.line_status
ambiguous, except_tmp = act.is_ambiguous()
if ambiguous:
raise RuntimeError(
f"Impossible to add an ambiguous action to an observation. Your action was "
f'ambiguous because: "{except_tmp}"'
)
# if a powerline has been reconnected without specific bus, i issue a warning
if "set_line_status" in act.authorized_keys:
reco_powerline = act.line_set_status
if "set_bus" in act.authorized_keys:
line_ex_set_bus = act.line_ex_set_bus
line_or_set_bus = act.line_or_set_bus
else:
line_ex_set_bus = np.zeros(res.n_line, dtype=dt_int)
line_or_set_bus = np.zeros(res.n_line, dtype=dt_int)
error_no_bus_set = (
"You reconnected a powerline with your action but did not specify on which bus "
"to reconnect both its end. This behaviour, also perfectly fine for an environment "
"will not be accurate in the method obs + act. Consult the documentation for more "
"information. Problem arose for powerlines with id {}"
)
tmp = (
(reco_powerline == 1)
& (line_ex_set_bus <= 0)
& (res.topo_vect[self.line_ex_pos_topo_vect] == -1)
)
if np.any(tmp):
id_issue_ex = np.where(tmp)[0]
if issue_warn:
warnings.warn(error_no_bus_set.format(id_issue_ex))
if "set_bus" in act.authorized_keys:
# assign 1 in the bus in this case
act.line_ex_set_bus = [(el, 1) for el in id_issue_ex]
tmp = (
(reco_powerline == 1)
& (line_or_set_bus <= 0)
& (res.topo_vect[self.line_or_pos_topo_vect] == -1)
)
if np.any(tmp):
id_issue_or = np.where(tmp)[0]
if issue_warn:
warnings.warn(error_no_bus_set.format(id_issue_or))
if "set_bus" in act.authorized_keys:
# assign 1 in the bus in this case
act.line_or_set_bus = [(el, 1) for el in id_issue_or]
# topo vect
if "set_bus" in act.authorized_keys:
res.topo_vect[act.set_bus != 0] = act.set_bus[act.set_bus != 0]
if "change_bus" in act.authorized_keys:
do_change_bus_on = act.change_bus & (
res.topo_vect > 0
) # change bus of elements that were on
res.topo_vect[do_change_bus_on] = 3 - res.topo_vect[do_change_bus_on]
# topo vect: reco of powerline that should be
res.line_status = (res.topo_vect[self.line_or_pos_topo_vect] >= 1) & (
res.topo_vect[self.line_ex_pos_topo_vect] >= 1
)
# powerline status
if "set_line_status" in act.authorized_keys:
disco_line = (act.line_set_status == -1) & res.line_status
res.topo_vect[res.line_or_pos_topo_vect[disco_line]] = -1
res.topo_vect[res.line_ex_pos_topo_vect[disco_line]] = -1
res.line_status[disco_line] = False
reco_line = (act.line_set_status >= 1) & (~res.line_status)
# i can do that because i already "fixed" the action to have it put 1 in case it
# bus were not provided
if "set_bus" in act.authorized_keys:
# I assign previous bus (because it could have been modified)
res.topo_vect[
res.line_or_pos_topo_vect[reco_line]
] = act.line_or_set_bus[reco_line]
res.topo_vect[
res.line_ex_pos_topo_vect[reco_line]
] = act.line_ex_set_bus[reco_line]
else:
# I assign one (action do not allow me to modify the bus)
res.topo_vect[res.line_or_pos_topo_vect[reco_line]] = 1
res.topo_vect[res.line_ex_pos_topo_vect[reco_line]] = 1
res.line_status[reco_line] = True
if "change_line_status" in act.authorized_keys:
disco_line = act.line_change_status & res.line_status
reco_line = act.line_change_status & (~res.line_status)
# handle disconnected powerlines
res.topo_vect[res.line_or_pos_topo_vect[disco_line]] = -1
res.topo_vect[res.line_ex_pos_topo_vect[disco_line]] = -1
res.line_status[disco_line] = False
# handle reconnected powerlines
if np.any(reco_line):
if "set_bus" in act.authorized_keys:
line_ex_set_bus = 1 * act.line_ex_set_bus
line_or_set_bus = 1 * act.line_or_set_bus
else:
line_ex_set_bus = np.zeros(res.n_line, dtype=dt_int)
line_or_set_bus = np.zeros(res.n_line, dtype=dt_int)
if issue_warn and (
np.any(line_or_set_bus[reco_line] == 0)
or np.any(line_ex_set_bus[reco_line] == 0)
):
warnings.warn(
'A powerline has been reconnected with a "change_status" action without '
"specifying on which bus it was supposed to be reconnected. This is "
"perfectly fine in regular grid2op environment, but this behaviour "
"cannot be properly implemented with the only information in the "
"observation. Please see the documentation for more information."
)
line_or_set_bus[reco_line & (line_or_set_bus == 0)] = 1
line_ex_set_bus[reco_line & (line_ex_set_bus == 0)] = 1
res.topo_vect[res.line_or_pos_topo_vect[reco_line]] = line_or_set_bus[
reco_line
]
res.topo_vect[res.line_ex_pos_topo_vect[reco_line]] = line_ex_set_bus[
reco_line
]
res.line_status[reco_line] = True
if "redispatch" in act.authorized_keys:
redisp = act.redispatch
if np.any(redisp != 0) and issue_warn:
warnings.warn(
"You did redispatching on this action. Redispatching is heavily transformed "
"by the environment (consult the documentation about the modeling of the "
"generators for example) so we will not even try to mimic this here."
)
if "set_storage" in act.authorized_keys:
storage_p = act.storage_p
if np.any(storage_p != 0) and issue_warn:
warnings.warn(
"You did action on storage units in this action. This implies performing some "
"redispatching which is heavily transformed "
"by the environment (consult the documentation about the modeling of the "
"generators for example) so we will not even try to mimic this here."
)
return res
def __add__(self, act):
from grid2op.Action import BaseAction
if isinstance(act, BaseAction):
return self.add_act(act, issue_warn=True)
raise Grid2OpException(
"Only grid2op action can be added to grid2op observation at the moment."
)
@property
def thermal_limit(self):
"""
Return the thermal limit of the powergrid, given in Amps (A)
Examples
--------
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
obs = env.reset()
thermal_limit = obs.thermal_limit
"""
res = 1.0 * self._thermal_limit
res.flags.writeable = False
return res
@property
def curtailment_mw(self):
"""
return the curtailment, expressed in MW rather than in ratio of pmax.
Examples
--------
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
obs = env.reset()
curtailment_mw = obs.curtailment_mw
"""
return self.curtailment * self.gen_pmax
@property
def curtailment_limit_mw(self):
"""
return the limit of production of a generator in MW rather in ratio
Examples
--------
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
obs = env.reset()
curtailment_limit_mw = obs.curtailment_limit_mw
"""
return self.curtailment_limit * self.gen_pmax
def _update_attr_backend(self, backend):
"""This function updates the attribute of the observation that
depends only on the backend.
Parameters
----------
backend :
The backend from which to update the observation
"""
self.line_status[:] = backend.get_line_status()
self.topo_vect[:] = backend.get_topo_vect()
# get the values related to continuous values
self.gen_p[:], self.gen_q[:], self.gen_v[:] = backend.generators_info()
self.load_p[:], self.load_q[:], self.load_v[:] = backend.loads_info()
self.p_or[:], self.q_or[:], self.v_or[:], self.a_or[:] = backend.lines_or_info()
self.p_ex[:], self.q_ex[:], self.v_ex[:], self.a_ex[:] = backend.lines_ex_info()
self.rho[:] = backend.get_relative_flow().astype(dt_float)
# margin up and down
if type(self).redispatching_unit_commitment_availble:
self.gen_margin_up[:] = np.minimum(
type(self).gen_pmax - self.gen_p, self.gen_max_ramp_up
)
self.gen_margin_up[type(self).gen_renewable] = 0.0
self.gen_margin_down[:] = np.minimum(
self.gen_p - type(self).gen_pmin, self.gen_max_ramp_down
)
self.gen_margin_down[type(self).gen_renewable] = 0.0
# because of the slack, sometimes it's negative...
# see https://github.com/rte-france/Grid2Op/issues/313
self.gen_margin_up[self.gen_margin_up < 0.] = 0.
self.gen_margin_down[self.gen_margin_down < 0.] = 0.
else:
self.gen_margin_up[:] = 0.0
self.gen_margin_down[:] = 0.0
# handle shunts (if avaialble)
if self.shunts_data_available:
sh_p, sh_q, sh_v, sh_bus = backend.shunt_info()
self._shunt_p[:] = sh_p
self._shunt_q[:] = sh_q
self._shunt_v[:] = sh_v
self._shunt_bus[:] = sh_bus
if backend.can_output_theta:
self.support_theta = True # backend supports the computation of theta
(
self.theta_or[:],
self.theta_ex[:],
self.load_theta[:],
self.gen_theta[:],
self.storage_theta[:],
) = backend.get_theta()
else:
# theta will be always 0. by convention
self.theta_or[:] = 0.
self.theta_ex[:] = 0.
self.load_theta[:] = 0.
self.gen_theta[:] = 0.
self.storage_theta[:] = 0.
def _update_internal_env_params(self, env):
# this is only done if the env supports forecast
# some parameters used for the "forecast env"
# but not directly accessible in the observation
self._env_internal_params = {
"_storage_previous_charge": 1.0 * env._storage_previous_charge,
"_amount_storage": 1.0 * env._amount_storage,
"_amount_storage_prev": 1.0 * env._amount_storage_prev,
"_sum_curtailment_mw": 1.0 * env._sum_curtailment_mw,
"_sum_curtailment_mw_prev": 1.0 * env._sum_curtailment_mw_prev,
"_line_status_env": env.get_current_line_status().astype(dt_int), # false -> 0 true -> 1
"_gen_activeprod_t": 1.0 * env._gen_activeprod_t,
"_gen_activeprod_t_redisp": 1.0 * env._gen_activeprod_t_redisp,
"_already_modified_gen": copy.deepcopy(env._already_modified_gen),
}
self._env_internal_params["_line_status_env"] *= 2 # false -> 0 true -> 2
self._env_internal_params["_line_status_env"] -= 1 # false -> -1; true -> 1
if env._has_attention_budget:
self._env_internal_params["_attention_budget_state"] = env._attention_budget.get_state()
# # TODO this looks suspicious !
# (self._env_internal_params["opp_space_state"],
# self._env_internal_params["opp_state"]) = env._oppSpace._get_state()
def _update_obs_complete(self, env, with_forecast=True):
"""
update all the observation attributes as if it was a complete, fully
observable and without noise observation
"""
self._is_done = False
# counter
self.current_step = dt_int(env.nb_time_step)
self.max_step = dt_int(env.max_episode_duration())
# extract the time stamps
self.year = dt_int(env.time_stamp.year)
self.month = dt_int(env.time_stamp.month)
self.day = dt_int(env.time_stamp.day)
self.hour_of_day = dt_int(env.time_stamp.hour)
self.minute_of_hour = dt_int(env.time_stamp.minute)
self.day_of_week = dt_int(env.time_stamp.weekday())
# get the values related to topology
self.timestep_overflow[:] = env._timestep_overflow
# attribute that depends only on the backend state
self._update_attr_backend(env.backend)
# storage units
self.storage_charge[:] = env._storage_current_charge
self.storage_power_target[:] = env._action_storage
self.storage_power[:] = env._storage_power
# handles forecasts here
self._update_forecast(env, with_forecast)
# cool down and reconnection time after hard overflow, soft overflow or cascading failure
self.time_before_cooldown_line[:] = env._times_before_line_status_actionable
self.time_before_cooldown_sub[:] = env._times_before_topology_actionable
self.time_next_maintenance[:] = env._time_next_maintenance
self.duration_next_maintenance[:] = env._duration_next_maintenance
# redispatching
self.target_dispatch[:] = env._target_dispatch
self.actual_dispatch[:] = env._actual_dispatch
self._thermal_limit[:] = env.get_thermal_limit()
if self.redispatching_unit_commitment_availble:
self.gen_p_before_curtail[:] = env._gen_before_curtailment
self.curtailment[:] = (
self.gen_p_before_curtail - self.gen_p
) / self.gen_pmax
self.curtailment[~self.gen_renewable] = 0.0
self.curtailment_limit[:] = env._limit_curtailment
self.curtailment_limit[self.curtailment_limit >= 1.0] = 1.0
gen_curtailed = self.gen_renewable
is_acted = (self.gen_p_before_curtail != self.gen_p)
self.curtailment_limit_effective[gen_curtailed & is_acted] = (
self.gen_p[gen_curtailed & is_acted] / self.gen_pmax[gen_curtailed & is_acted]
)
self.curtailment_limit_effective[gen_curtailed & ~is_acted] = (
self.curtailment_limit[gen_curtailed & ~is_acted]
)
self.curtailment_limit_effective[~gen_curtailed] = 1.0
else:
self.curtailment[:] = 0.0
self.gen_p_before_curtail[:] = self.gen_p
self.curtailment_limit[:] = 1.0
self.curtailment_limit_effective[:] = 1.0
self._update_alarm(env)
self.delta_time = dt_float(1.0 * env.delta_time_seconds / 60.0)
self._update_alert(env)
def _update_forecast(self, env, with_forecast):
if not with_forecast:
return
inj_action = {}
dict_ = {}
dict_["load_p"] = dt_float(1.0 * self.load_p)
dict_["load_q"] = dt_float(1.0 * self.load_q)
dict_["prod_p"] = dt_float(1.0 * self.gen_p)
dict_["prod_v"] = dt_float(1.0 * self.gen_v)
inj_action["injection"] = dict_
# inj_action = self.action_helper(inj_action)
timestamp = self.get_time_stamp()
self._forecasted_inj = [(timestamp, inj_action)]
self._forecasted_inj += env.forecasts()
self._forecasted_grid = [None for _ in self._forecasted_inj]
self._env_internal_params = {}
self._update_internal_env_params(env)
def _update_alarm(self, env):
if not (self.dim_alarms and env._has_attention_budget):
return
self.is_alarm_illegal[:] = env._is_alarm_illegal
if env._attention_budget.time_last_successful_alarm_raised > 0:
self.time_since_last_alarm[:] = (
self.current_step
- env._attention_budget.time_last_successful_alarm_raised
)
else:
self.time_since_last_alarm[:] = -1
self.last_alarm[:] = env._attention_budget.last_successful_alarm_raised
self.attention_budget[:] = env._attention_budget.current_budget
def _update_alert(self, env):
self.active_alert[:] = env._last_alert
self.time_since_last_alert[:] = env._time_since_last_alert
self.alert_duration[:] = env._alert_duration
self.total_number_of_alert[:] = env._total_number_of_alert
self.time_since_last_attack[:] = env._time_since_last_attack
self.attack_under_alert[:] = env._attack_under_alert
# self.was_alert_used_after_attack # handled in self.update_after_reward
def get_simulator(self) -> "grid2op.simulator.Simulator":
"""This function allows to retrieve a valid and properly initialized "Simulator"
A :class:`grid2op.simulator.Simulator` can be used to simulate the impact of
multiple consecutive actions, without taking into account any
kind of rules.
It can also be use with forecast of the productions / consumption to
predict whether or not a given state is "robust" to variation of the
injections for example.
You can find more information about simulator on the dedicated page of the
documentation :ref:`simulator_page`. TODO
Basic usage are:
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
obs = env.reset()
simulator = obs.get_simulator()
Please consult the page :ref:`simulator_page` for more information about how to use them.
.. seealso::
:ref:`model_based_rl`
"""
# BaseObservation is only used for typing in the simulator...
if self._obs_env is None:
raise BaseObservationError(
"Impossible to build a simulator is the "
"observation space does not support it. This can be the case if the "
"observation is loaded from disk or if the backend cannot be copied "
"for example."
)
if not self._obs_env.is_valid():
raise BaseObservationError("Impossible to use a Simulator backend with an "
"environment that cannot be copied (most "
"liekly due to the backend that cannot be "
"copied).")
from grid2op.simulator import (
Simulator,
) # lazy import to prevent circular references
nb_highres_called = self._obs_env.highres_sim_counter.nb_highres_called
res = Simulator(backend=self._obs_env.backend, _highres_sim_counter=self._obs_env._highres_sim_counter)
res.set_state(self)
# it does one simulation when it inits it (calling env.step) so I remove 1 here
self._obs_env.highres_sim_counter._HighResSimCounter__nb_highres_called = nb_highres_called
return res
def _get_array_from_forecast(self, name):
if len(self._forecasted_inj) <= 1:
# self._forecasted_inj already embed the current step
raise NoForecastAvailable("It appears this environment does not support any forecast at all.")
nb_h = len(self._forecasted_inj)
nb_el = self._forecasted_inj[0][1]['injection'][name].shape[0]
prev = 1.0 * self._forecasted_inj[0][1]['injection'][name]
res = np.zeros((nb_h, nb_el))
for h in range(nb_h):
dict_tmp = self._forecasted_inj[h][1]['injection']
if name in dict_tmp:
this_row = 1.0 * dict_tmp[name]
prev = 1.0 * this_row
else:
this_row = 1.0 * prev
res[h,:] = this_row
return res
def _generate_forecasted_maintenance_for_simenv(self, nb_h: int):
n_line = type(self).n_line
res = np.full((nb_h, n_line), fill_value=False, dtype=dt_bool)
for l_id in range(n_line):
tnm = self.time_next_maintenance[l_id]
if tnm != -1:
dnm = self.duration_next_maintenance[l_id]
res[tnm:(tnm+dnm),l_id] = True
return res
def get_forecast_env(self) -> "grid2op.Environment.Environment":
"""
.. versionadded:: 1.9.0
This function will return a grid2op "environment" where the data (load, generation and maintenance)
comes from the forecast data in the observation.
This "forecasted environment" can be used like any grid2op environment. It checks the same "rules" as the
:func:`BaseObservation.simulate` (if you want to change them, make sure to use
:func:`grid2op.Environment.BaseEnv.change_forecast_parameters` or
:func:`BaseObservation.change_forecast_parameters`), with the exact same behaviour
as "env.step(...)".
With this function, your agent can now make some predictions about the future.
This can be particularly useful for model based RL for example.
.. seealso::
:func:`BaseObservation.simulate` and :func:`BaseObservation.get_env_from_external_forecasts`
.. seealso::
:ref:`model_based_rl`
Examples
--------
A typical use might look like
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
obs = env.reset()
# and now retrieve the "forecasted_env"
forcast_env = obs.get_forecast_env()
# when reset this should be at the same "step" as the action
forecast_obs = forcast_env.reset()
# forecast_obs == obs # should be True
done = False
while not done:
next_forecast_obs, reward, done, info = forcast_env.step(env.action_space())
.. note::
The code above is closely related to the :func:`BaseObservation.simulate` and a
very similar result (up to some corner cases beyond the scope of this documentation)
can be obtained with:
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
obs = env.reset()
forecast_env = obs.get_forecast_env()
f_obs = forecast_env.reset()
act_1 = ... # a grid2op action
f_obs_1, *_ = forecast_env.step(act_1)
sim_obs_1, *_ = obs.simulate(act_1)
# f_obs_1 should be sim_obs_1
act_2 = ... # a grid2op action
f_obs_2, *_ = forecast_env.step(act_2)
sim_obs_2, *_ = sim_obs_1.simulate(act_2)
# f_obs_2 should be sim_obs_2
act_3 = ... # a grid2op action
f_obs_3, *_ = forecast_env.step(act_3)
sim_obs_3, *_ = sim_obs_2.simulate(act_3)
# f_obs_3 should be sim_obs_3
Returns
-------
grid2op.Environment.Environment
The "forecasted environment" that is a grid2op environment with the data corresponding to the
forecast made at the time of the observation.
Raises
------
BaseObservationError
When no forecast are available, for example.
"""
if not self._ptr_kwargs_env:
raise BaseObservationError("Cannot build a environment with the forecast "
"data as this Observation does not appear to "
"support forecast.")
# build the forecast
load_p = self._get_array_from_forecast("load_p")
load_q = self._get_array_from_forecast("load_q")
prod_p = self._get_array_from_forecast("prod_p")
prod_v = self._get_array_from_forecast("prod_v")
maintenance = self._generate_forecasted_maintenance_for_simenv(prod_v.shape[0])
return self._make_env_from_arays(load_p, load_q, prod_p, prod_v, maintenance)
def get_forecast_arrays(self):
"""
This functions allows to retrieve (as numpy arrays) the values for all the loads / generators / maintenance
for the forseable future (they are the forecast availble in :func:`BaseObservation.simulate` and
:func:`BaseObservation.get_forecast_env`)
.. versionadded:: 1.9.0
Examples
-----------
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
obs = env.reset()
load_p, load_q, prod_p, prod_v, maintenance = obs.get_forecast_arrays()
"""
load_p = self._get_array_from_forecast("load_p")
load_q = self._get_array_from_forecast("load_q")
prod_p = self._get_array_from_forecast("prod_p")
prod_v = self._get_array_from_forecast("prod_v")
maintenance = self._generate_forecasted_maintenance_for_simenv(prod_v.shape[0])
return load_p, load_q, prod_p, prod_v, maintenance
def _aux_aux_get_nb_ts(self, res, array) -> int:
if res == 0 and array is not None:
# first non empty array
return array.shape[0]
if res > 0 and array is not None:
# an array is provided with a shape
# and there is another array
# I check both shape match
if array.shape[0] != res:
raise BaseObservationError("Shape mismatch between some of the input arrays")
return res
# now array is None, so I return res anyway (size not changed)
return res
def _aux_get_nb_ts(self,
load_p: Optional[np.ndarray] = None,
load_q: Optional[np.ndarray] = None,
gen_p: Optional[np.ndarray] = None,
gen_v: Optional[np.ndarray] = None,
) -> int:
res = 0
for arr in [load_p, load_q, gen_p, gen_v]:
res = self._aux_aux_get_nb_ts(res, arr)
return res
def get_env_from_external_forecasts(self,
load_p: Optional[np.ndarray] = None,
load_q: Optional[np.ndarray] = None,
gen_p: Optional[np.ndarray] = None,
gen_v: Optional[np.ndarray] = None,
with_maintenance: bool= False,
) -> "grid2op.Environment.Environment":
"""
.. versionadded:: 1.9.0
This function will return a grid2op "environment" where the data (load, generation and maintenance)
comes from the provided forecast data.
This "forecasted environment" can be used like any grid2op environment. It checks the same "rules" as the
:func:`BaseObservation.simulate` (if you want to change them, make sure to use
:func:`grid2op.Environment.BaseEnv.change_forecast_parameters` or
:func:`BaseObservation.change_forecast_parameters`), with the exact same behaviour
as "env.step(...)".
This can be particularly useful for model based RL for example.
Data should be:
- `load_p` a numpy array of float32 (or convertible to it) with n_rows and n_load columns
representing the load active values in MW.
- `load_q` a numpy array of float32 (or convertible to it) with n_rows and n_load columns
representing the load reactive values in MVAr.
- `gen_p` a numpy array of float32 (or convertible to it) with n_rows and n_gen columns
representing the generation active values in MW.
- `gen_v` a numpy array of float32 (or convertible to it) with n_rows and n_gen columns
representing the voltage magnitude setpoint in kV.
All arrays are optional. If nothing is provided for a given array then it's replaced by the value
in the observation. For example, if you do not provided the `gen_p` value then `obs.gen_p` is used.
All provided arrays should have the same number of rows.
.. note::
Maintenance will be added from the information of the observation. If you don't want to add
maintenance, you can passe the kwarg `with_maintenance=False`
.. seealso::
:func:`BaseObservation.simulate` and :func:`BaseObservation.get_forecast_env`
.. seealso::
:ref:`model_based_rl`
.. note::
With this method, you can have as many "steps" in the forecasted environment as you want. You are
not limited with the amount of data provided: if you send data with 10 rows, you have 10 steps. If
you have 100 rows then you have 100 steps.
.. warning::
We remind that, if you provide some forecasts, it is expected that
Examples
--------
A typical use might look like
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
obs = env.reset()
# make some "forecast" with the method of your choice
load_p_forecasted = ...
load_q_forecasted = ...
gen_p_forecasted = ...
gen_v_forecasted = ...
# and now retrieve the associated "forecasted_env"
forcast_env = obs.get_env_from_external_forecasts(load_p_forecasted,
load_q_forecasted,
gen_p_forecasted,
gen_v_forecasted)
# when reset this should be at the same "step" as the action
forecast_obs = forcast_env.reset()
# forecast_obs == obs # should be True
done = False
while not done:
next_forecast_obs, reward, done, info = forcast_env.step(env.action_space())
Returns
-------
grid2op.Environment.Environment
The "forecasted environment" that is a grid2op environment with the data corresponding to the
forecasts provided as input.
"""
nb_ts = self._aux_get_nb_ts(load_p, load_q, gen_p, gen_v) + 1
if load_p is not None:
load_p_this = np.concatenate((self.load_p.reshape(1, -1), load_p.astype(dt_float)))
else:
load_p_this = np.tile(self.load_p, nb_ts).reshape(nb_ts, -1)
if load_q is not None:
load_q_this = np.concatenate((self.load_q.reshape(1, -1), load_q.astype(dt_float)))
else:
load_q_this = np.tile(self.load_q, nb_ts).reshape(nb_ts, -1)
if gen_p is not None:
gen_p_this = np.concatenate((self.gen_p.reshape(1, -1), gen_p.astype(dt_float)))
else:
gen_p_this = np.tile(self.gen_p, nb_ts).reshape(nb_ts, -1)
if gen_v is not None:
gen_v_this = np.concatenate((self.gen_v.reshape(1, -1), gen_v.astype(dt_float)))
else:
gen_v_this = np.tile(self.gen_v, nb_ts).reshape(nb_ts, -1)
if with_maintenance:
maintenance = self._generate_forecasted_maintenance_for_simenv(nb_ts)
else:
maintenance = None
return self._make_env_from_arays(load_p_this, load_q_this, gen_p_this, gen_v_this, maintenance)
def _make_env_from_arays(self,
load_p: np.ndarray,
load_q: np.ndarray,
prod_p: np.ndarray,
prod_v: Optional[np.ndarray] = None,
maintenance: Optional[np.ndarray] = None):
from grid2op.Chronics import FromNPY, ChronicsHandler
from grid2op.Environment._forecast_env import _ForecastEnv
ch = ChronicsHandler(FromNPY,
load_p=load_p,
load_q=load_q,
prod_p=prod_p,
prod_v=prod_v,
maintenance=maintenance)
backend = self._obs_env.backend.copy()
backend._is_loaded = True
nb_highres_called = self._obs_env.highres_sim_counter.nb_highres_called
res = _ForecastEnv(**self._ptr_kwargs_env,
backend=backend,
chronics_handler=ch,
parameters=self._obs_env.parameters,
_init_obs=self,
highres_sim_counter=self._obs_env.highres_sim_counter
)
# it does one simulation when it inits it (calling env.step) so I remove 1 here
res.highres_sim_counter._HighResSimCounter__nb_highres_called = nb_highres_called
return res
def change_forecast_parameters(self, params):
"""This function allows to change the parameters (see :class:`grid2op.Parameters.Parameters`
for more information) that are used for the `obs.simulate()` and `obs.get_forecast_env()` method.
.. danger::
This function has a global impact. It changes the parameters for all sucessive calls to
:func:`BaseObservation.simulate` and :func:`BaseObservation.get_forecast_env` !
.. seealso::
:func:`grid2op.Environment.BaseEnv.change_parameters` to change the parameters of the environment
of :func:`grid2op.Environment.BaseEnv.change_forecast_parameters` to change the paremters used
for the `obs.simulate` and `obs.get_forecast_env` functions.
The main advantages of this function is that you do not require to have access to an environment
to change them.
.. versionadded:: 1.9.0
Examples
-----------
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
obs = env.reset()
new_params = env.parameters
new_params.NO_OVERFLOW_DISCONNECTION = True
obs.change_forecast_parameters(new_params)
obs.simulate(...) # uses the parameters `new_params`
f_env = obs.get_forecast_env() # uses also the parameters `new_params`
"""
self._obs_env.change_parameters(params)
self._obs_env._parameters = params
def update_after_reward(self, env):
"""Only called for the regular environment (so not available for
:func:`BaseObservation.get_forecast_env` or
:func:`BaseObservation.simulate`)
.. warning::
You probably don't have to use except if you develop a specific
observation class !
.. info::
If you want to develop a new type of observation with a new type of reward, you can use the
`env._reward_to_obs` attribute (dictionary) in the reward to pass information to the
observation (in this function).
Basically, update `env._reward_to_obs` in the reward, and use the values in `env._reward_to_obs`
in this function.
.. versionadded:: 1.9.1
Parameters
----------
env : grid2op.Environment.BaseEnv
The environment with which to update the observation
"""
if type(self).dim_alerts == 0:
return
# update the was_alert_used_after_attack !
self.was_alert_used_after_attack[:] = env._was_alert_used_after_attack | 191,403 | 41.915695 | 188 | py |
Grid2Op | Grid2Op-master/grid2op/Observation/completeObservation.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Observation.baseObservation import BaseObservation
class CompleteObservation(BaseObservation):
"""
This class represent a complete observation, where everything on the powergrid can be observed without
any noise.
This is the only :class:`BaseObservation` implemented (and used) in Grid2Op. Other type of observation, for other
usage can of course be implemented following this example.
It has the same attributes as the :class:`BaseObservation` class. Only one is added here.
For a :class:`CompleteObservation` the unique representation as a vector is:
1. :attr:`BaseObservation.year` the year [1 element]
2. :attr:`BaseObservation.month` the month [1 element]
3. :attr:`BaseObservation.day` the day [1 element]
4. :attr:`BaseObservation.hour_of_day` the hour of the day [1 element]
5. :attr:`BaseObservation.minute_of_hour` minute of the hour [1 element]
6. :attr:`BaseObservation.day_of_week` the day of the week. Monday = 0, Sunday = 6 [1 element]
7. :attr:`BaseObservation.gen_p` the active value of the productions
[:attr:`grid2op.Space.GridObjects.n_gen` elements]
8. :attr:`BaseObservation.gen_q` the reactive value of the productions
[:attr:`grid2op.Space.GridObjects.n_gen` elements]
9. :attr:`BaseObservation.gen_v` the voltage setpoint of the productions
[:attr:`grid2op.Space.GridObjects.n_gen` elements]
10. :attr:`BaseObservation.load_p` the active value of the loads
[:attr:`grid2op.Space.GridObjects.n_load` elements]
11. :attr:`BaseObservation.load_q` the reactive value of the loads
[:attr:`grid2op.Space.GridObjects.n_load` elements]
12. :attr:`BaseObservation.load_v` the voltage setpoint of the loads
[:attr:`grid2op.Space.GridObjects.n_load` elements]
13. :attr:`BaseObservation.p_or` active flow at origin of powerlines
[:attr:`grid2op.Space.GridObjects.n_line` elements]
14. :attr:`BaseObservation.q_or` reactive flow at origin of powerlines
[:attr:`grid2op.Space.GridObjects.n_line` elements]
15. :attr:`BaseObservation.v_or` voltage at origin of powerlines
[:attr:`grid2op.Space.GridObjects.n_line` elements]
16. :attr:`BaseObservation.a_or` current flow at origin of powerlines
[:attr:`grid2op.Space.GridObjects.n_line` elements]
17. :attr:`BaseObservation.p_ex` active flow at extremity of powerlines
[:attr:`grid2op.Space.GridObjects.n_line` elements]
18. :attr:`BaseObservation.q_ex` reactive flow at extremity of powerlines
[:attr:`grid2op.Space.GridObjects.n_line` elements]
19. :attr:`BaseObservation.v_ex` voltage at extremity of powerlines
[:attr:`grid2op.Space.GridObjects.n_line` elements]
20. :attr:`BaseObservation.a_ex` current flow at extremity of powerlines
[:attr:`grid2op.Space.GridObjects.n_line` elements]
21. :attr:`BaseObservation.rho` line capacity used (current flow / thermal limit)
[:attr:`grid2op.Space.GridObjects.n_line` elements]
22. :attr:`BaseObservation.line_status` line status [:attr:`grid2op.Space.GridObjects.n_line` elements]
23. :attr:`BaseObservation.timestep_overflow` number of timestep since the powerline was on overflow
(0 if the line is not on overflow)[:attr:`grid2op.Space.GridObjects.n_line` elements]
24. :attr:`BaseObservation.topo_vect` representation as a vector of the topology [for each element
it gives its bus]. See :func:`grid2op.Backend.Backend.get_topo_vect` for more information.
25. :attr:`BaseObservation.time_before_cooldown_line` representation of the cooldown time on the powerlines
[:attr:`grid2op.Space.GridObjects.n_line` elements]
26. :attr:`BaseObservation.time_before_cooldown_sub` representation of the cooldown time on the substations
[:attr:`grid2op.Space.GridObjects.n_sub` elements]
27. :attr:`BaseObservation.time_next_maintenance` number of timestep before the next maintenance (-1 means
no maintenance are planned, 0 a maintenance is in operation) [:attr:`BaseObservation.n_line` elements]
28. :attr:`BaseObservation.duration_next_maintenance` duration of the next maintenance. If a maintenance
is taking place, this is the number of timestep before it ends. [:attr:`BaseObservation.n_line` elements]
29. :attr:`BaseObservation.target_dispatch` the target dispatch for each generator
[:attr:`grid2op.Space.GridObjects.n_gen` elements]
30. :attr:`BaseObservation.actual_dispatch` the actual dispatch for each generator
[:attr:`grid2op.Space.GridObjects.n_gen` elements]
31. :attr:`BaseObservation.storage_charge` the actual state of charge of each storage unit
[:attr:`grid2op.Space.GridObjects.n_storage` elements]
32. :attr:`BaseObservation.storage_power_target` the production / consumption of setpoint of each storage unit
[:attr:`grid2op.Space.GridObjects.n_storage` elements]
33. :attr:`BaseObservation.storage_power` the realized production / consumption of each storage unit
[:attr:`grid2op.Space.GridObjects.n_storage` elements]
34. :attr:`BaseObservation.gen_p_before_curtail` : the theoretical generation that would have happened
if no generator from renewable energy sources have been performed (in MW)
[:attr:`grid2op.Space.GridObjects.n_gen` elements]
35. :attr:`BaseObservation.curtailment` : the current curtailment applied
[:attr:`grid2op.Space.GridObjects.n_gen` elements]
36. :attr:`BaseObservation.is_alarm_illegal` whether the last alarm has been illegal (due to budget
constraint) [``bool``],
.. warning: /!\\\\ Only valid with "l2rpn_icaps_2021" environment /!\\\\
37. :attr:`BaseObservation.curtailment_limit` : the current curtailment limit (if any)
[:attr:`grid2op.Space.GridObjects.n_gen` elements]
38. :attr:`BaseObservation.time_since_last_alarm` number of step since the last alarm has been raised
successfully [``int``]
.. warning: /!\\\\ Only valid with "l2rpn_icaps_2021" environment /!\\\\
39. :attr:`BaseObservation.last_alarm` : for each alarm zone, gives the last step at which an alarm has
been successfully raised at this zone
.. warning: /!\\\\ Only valid with "l2rpn_icaps_2021" environment /!\\\\
[:attr:`grid2op.Space.GridObjects.dim_alarms` elements]
40. :attr:`BaseObservation.attention_budget` : the current attention budget
[``int``]
41. :attr:`BaseObservation.was_alarm_used_after_game_over` : was the last alarm used to compute anything related
to the attention budget when there was a game over (can only be set to ``True`` if the observation
corresponds to a game over), warning: /!\\\\ Only valid with "l2rpn_icaps_2021" environment /!\\\\
[``bool``]
42. :attr:`BaseObservation.is_alarm_illegal` whether the last alert has been illegal (due to budget
constraint) [``bool``]
43. :attr:`BaseObservation.curtailment_limit` : the current curtailment limit (if any)
[:attr:`grid2op.Space.GridObjects.n_gen` elements]
44. :attr:`BaseObservation.curtailment_limit_effective` TODO
45. :attr:`BaseObservation.current_step` TODO
46. :attr:`BaseObservation.max_step` TODO
47. :attr:`BaseObservation.delta_time` TODO
48. :attr:`BaseObservation.gen_margin_up` TODO
49. :attr:`BaseObservation.gen_margin_down` TODO
50. :attr:`BaseObservation.last_alert` TODO
51. :attr:`BaseObservation.time_since_last_alert` TODO
52. :attr:`BaseObservation.alert_duration` TODO
53. :attr:`BaseObservation.total_number_of_alert` TODO
54. :attr:`BaseObservation.time_since_last_attack` TODO
55. :attr:`BaseObservation.was_alert_used_after_attack` TODO
"""
attr_list_vect = [
"year",
"month",
"day",
"hour_of_day",
"minute_of_hour",
"day_of_week",
"gen_p",
"gen_q",
"gen_v",
"load_p",
"load_q",
"load_v",
"p_or",
"q_or",
"v_or",
"a_or",
"p_ex",
"q_ex",
"v_ex",
"a_ex",
"rho",
"line_status",
"timestep_overflow",
"topo_vect",
"time_before_cooldown_line",
"time_before_cooldown_sub",
"time_next_maintenance",
"duration_next_maintenance",
"target_dispatch",
"actual_dispatch",
"storage_charge",
"storage_power_target",
"storage_power",
"gen_p_before_curtail",
"curtailment",
"curtailment_limit",
"curtailment_limit_effective", # starting grid2op version 1.6.6
"is_alarm_illegal",
"time_since_last_alarm",
"last_alarm",
"attention_budget",
"was_alarm_used_after_game_over",
"_shunt_p",
"_shunt_q",
"_shunt_v",
"_shunt_bus", # starting from grid2op version 1.6.0
"current_step",
"max_step", # starting from grid2op version 1.6.4
"delta_time", # starting grid2op version 1.6.5
"gen_margin_up",
"gen_margin_down", # starting grid2op version 1.6.6
# line alert (starting grid2Op 1.9.1, for compatible envs)
"active_alert",
"attack_under_alert",
"time_since_last_alert",
"alert_duration",
"total_number_of_alert",
"time_since_last_attack",
"was_alert_used_after_attack",
]
attr_list_json = [
"_thermal_limit",
"support_theta",
"theta_or",
"theta_ex",
"load_theta",
"gen_theta",
"storage_theta",
]
attr_list_set = set(attr_list_vect)
def __init__(self,
obs_env=None,
action_helper=None,
random_prng=None,
kwargs_env=None):
BaseObservation.__init__(
self,
obs_env=obs_env,
action_helper=action_helper,
random_prng=random_prng,
kwargs_env=kwargs_env
)
self._dictionnarized = None
def update(self, env, with_forecast=True):
# reset the matrices
self._reset_matrices()
self.reset()
self._update_obs_complete(env, with_forecast=with_forecast)
| 11,177 | 50.75 | 120 | py |
Grid2Op | Grid2Op-master/grid2op/Observation/highresSimCounter.py | # Copyright (c) 2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
class HighResSimCounter:
"""This classes helps to count the total number of call to "high fidelity simulator"
the agent made.
"""
def __init__(self) -> None:
self.__nb_highres_called = 0
def __iadd__(self, other):
self.__nb_highres_called += int(other)
return self
def add_one(self):
self.__nb_highres_called += 1
@property
def nb_highres_called(self):
return self.__nb_highres_called
| 943 | 33.962963 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Observation/noisyObservation.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.dtypes import dt_int, dt_float
from grid2op.Observation.baseObservation import BaseObservation
from grid2op.Observation.completeObservation import CompleteObservation
class NoisyObservation(BaseObservation):
"""
This class represent a complete observation (in the sens that all attributes
of an :attr:`CompleteObservation` are accessible) but some of them are
"noisy".
That is, the observation that the agent has access to is not
exactly the same as the environment internal values.
The affected attributes are :
- load_p: \*= lognormal (to keep the sign)
- load_q: \*= lognormal (to keep the sign)
- gen_p: \*= lognormal (to keep the sign)
- gen_q: \*= lognormal (to keep the sign)
- p_or += normal
- p_ex += normal
- q_or += normal
- q_ex += normal
- a_or: \*= lognormal (to keep the sign)
- a_ex: \*= lognormal (to keep the sign)
- rho: same noise as a_or (because rho is not "physical" it's the result of a computation)
- storage_power += normal
It can be used to emuate the acquisition of data coming from noisy sensors for
example.
Examples
--------
It can be used as follow:
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
kwargs_observation = {"sigma_load_p": 0.1, "sigma_gen_p": 1.0} # noise of the observation
env = grid2op.make(env_name,
observation_class=NoisyObservation,
kwargs_observation=kwargs_observation)
# do whatever you want with env !
"""
attr_list_vect = CompleteObservation.attr_list_vect
attr_list_json = CompleteObservation.attr_list_json
attr_list_set = CompleteObservation.attr_list_set
def __init__(
self,
obs_env=None,
action_helper=None,
random_prng=None,
kwargs_env=None,
sigma_load_p=0.01, # multiplicative (log normal)
sigma_load_q=0.01, # multiplicative (log normal)
sigma_gen_p=0.01, # multiplicative (log normal)
sigma_gen_q=0.01, # multiplicative (log normal)
sigma_a=0.01, # multiplicative (log normal) same for a_or and a_ex
sigma_p=0.1, # additive (normal) same for p_or and p_ex
sigma_q=0.1, # additive (normal) same for q_or and q_ex
sigma_storage=0.1, # additive (normal)
):
BaseObservation.__init__(
self,
obs_env=obs_env,
action_helper=action_helper,
random_prng=random_prng,
kwargs_env=kwargs_env
)
self._dictionnarized = None
self._sigma_load_p = sigma_load_p # multiplicative (log normal)
self._sigma_load_q = sigma_load_q # multiplicative (log normal)
self._sigma_gen_p = sigma_gen_p # multiplicative (log normal)
self._sigma_gen_q = sigma_gen_q # multiplicative (log normal)
self._sigma_a = sigma_a # multiplicative (log normal) same for a_or and a_ex
self._sigma_p = sigma_p # additive (normal) same for p_or and p_ex
self._sigma_q = sigma_q # additive (normal) same for q_or and q_ex
self._sigma_storage = sigma_storage # additive (normal)
def update(self, env, with_forecast=True):
# reset the matrices
self._reset_matrices()
self.reset()
# update as if the data were complete
self._update_obs_complete(env, with_forecast=with_forecast)
# multiplicative noise
mult_load_p = self.random_prng.lognormal(
mean=0.0, sigma=self._sigma_load_p, size=self.load_p.shape
)
self.load_p[:] *= mult_load_p
mult_load_q = self.random_prng.lognormal(
mean=0.0, sigma=self._sigma_load_q, size=self.load_p.shape
)
self.load_q[:] *= mult_load_q
mult_gen_p = self.random_prng.lognormal(
mean=0.0, sigma=self._sigma_gen_p, size=self.gen_p.shape
)
self.gen_p[:] *= mult_gen_p
mult_gen_q = self.random_prng.lognormal(
mean=0.0, sigma=self._sigma_gen_q, size=self.gen_q.shape
)
self.gen_q[:] *= mult_gen_q
mult_aor = self.random_prng.lognormal(
mean=0.0, sigma=self._sigma_a, size=self.a_or.shape
)
self.a_or[:] *= mult_aor
self.rho[:] *= mult_aor
mult_a_ex = self.random_prng.lognormal(
mean=0.0, sigma=self._sigma_a, size=self.a_ex.shape
)
self.a_ex[:] *= mult_a_ex
# additive noise
add_por = self.random_prng.normal(
loc=0.0,
scale=self._sigma_p, # 0.01 * np.abs(self.p_or),
size=self.p_or.shape,
)
self.p_or[:] += add_por
add_pex = self.random_prng.normal(
loc=0.0,
scale=self._sigma_p, # 0.01 * np.abs(self.p_ex),
size=self.p_or.shape,
)
self.p_ex[:] += add_pex
add_qor = self.random_prng.normal(
loc=0.0,
scale=self._sigma_q, # 0.01 * np.abs(self.q_or),
size=self.p_or.shape,
)
self.q_or[:] += add_qor
add_qex = self.random_prng.normal(
loc=0.0,
scale=self._sigma_q, # 0.01 * np.abs(self.q_ex),
size=self.p_or.shape,
)
self.q_ex[:] += add_qex
add_storp = self.random_prng.normal(
loc=0.0,
scale=self._sigma_storage, # 0.01 * np.abs(self.storage_power),
size=self.storage_power.shape,
)
self.storage_power[:] += add_storp
| 6,100 | 34.888235 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Observation/observationSpace.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import sys
import copy
import logging
import os
from grid2op.Exceptions.EnvExceptions import EnvError
from grid2op.Observation.serializableObservationSpace import (
SerializableObservationSpace,
)
from grid2op.Reward import RewardHelper
from grid2op.Observation.completeObservation import CompleteObservation
class ObservationSpace(SerializableObservationSpace):
"""
Helper that provides useful functions to manipulate :class:`BaseObservation`.
BaseObservation should only be built using this Helper. It is absolutely not recommended to make an observation
directly form its constructor.
This class represents the same concept as the "BaseObservation Space" in the OpenAI gym framework.
Attributes
----------
with_forecast: ``bool``
If ``True`` the :func:`BaseObservation.simulate` will be available. If ``False`` it will deactivate this
possibility. If `simulate` function is not used, setting it to ``False`` can lead to non neglectible speed-ups.
observationClass: ``type``
Class used to build the observations. It defaults to :class:`CompleteObservation`
_simulate_parameters: :class:`grid2op.Parameters.Parameters`
Type of Parameters used to compute powerflow for the forecast.
rewardClass: ``type``
Class used by the :class:`grid2op.Environment.Environment` to send information about its state to the
:class:`grid2op.BaseAgent.BaseAgent`. You can change this class to differentiate between the reward of output of
:func:`BaseObservation.simulate` and the reward used to train the BaseAgent.
action_helper_env: :class:`grid2op.Action.ActionSpace`
BaseAction space used to create action during the :func:`BaseObservation.simulate`
reward_helper: :class:`grid2op.Reward.HelperReward`
BaseReward function used by the the :func:`BaseObservation.simulate` function.
obs_env: :class:`_ObsEnv`
Instance of the environment used by the BaseObservation Helper to provide forcecast of the grid state.
_empty_obs: :class:`BaseObservation`
An instance of the observation with appropriate dimensions. It is updated and will be sent to he BaseAgent.
"""
def __init__(
self,
gridobj,
env,
rewardClass=None,
observationClass=CompleteObservation,
actionClass=None,
with_forecast=True,
kwargs_observation=None,
observation_bk_class=None,
observation_bk_kwargs=None,
logger=None,
_with_obs_env=True, # pass
):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Env: requires :attr:`grid2op.Environment.parameters` and :attr:`grid2op.Environment.backend` to be valid
"""
# lazy import to prevent circular references (Env -> Observation -> Obs Space -> _ObsEnv -> Env)
from grid2op.Environment._ObsEnv import _ObsEnv
if actionClass is None:
from grid2op.Action import CompleteAction
actionClass = CompleteAction
if logger is None:
self.logger = logging.getLogger(__name__)
self.logger.disabled = True
else:
self.logger: logging.Logger = logger.getChild("grid2op_ObsSpace")
SerializableObservationSpace.__init__(
self, gridobj, observationClass=observationClass
)
self.with_forecast = with_forecast
self._simulate_parameters = copy.deepcopy(env.parameters)
self._legal_action = env._game_rules.legal_action
self._env_param = copy.deepcopy(env.parameters)
if rewardClass is None:
self._reward_func = env._reward_helper.template_reward
else:
self._reward_func = rewardClass
# helpers
self.action_helper_env = env._helper_action_env
self.reward_helper = RewardHelper(reward_func=self._reward_func, logger=self.logger)
self.__can_never_use_simulate = False
# TODO here: have another backend class maybe
_with_obs_env = _with_obs_env and self._create_backend_obs(env, observation_bk_class, observation_bk_kwargs)
self._ObsEnv_class = _ObsEnv.init_grid(
type(env.backend), force_module=_ObsEnv.__module__
)
self._ObsEnv_class._INIT_GRID_CLS = _ObsEnv # otherwise it's lost
setattr(sys.modules[_ObsEnv.__module__], self._ObsEnv_class.__name__, self._ObsEnv_class)
if _with_obs_env:
self._create_obs_env(env)
self.reward_helper.initialize(self.obs_env)
else:
self.with_forecast = False
self.obs_env = None
self._backend_obs = None
self.__can_never_use_simulate = True
self._empty_obs = self._template_obj
self._update_env_time = 0.0
self.__nb_simulate_called_this_step = 0
self.__nb_simulate_called_this_episode = 0
self._highres_sim_counter = env.highres_sim_counter
# extra argument to build the observation
if kwargs_observation is None:
kwargs_observation = {}
self._ptr_kwargs_observation = kwargs_observation
self._real_env_kwargs = {}
self._observation_bk_class = observation_bk_class
self._observation_bk_kwargs = observation_bk_kwargs
def set_real_env_kwargs(self, env):
if not self.with_forecast:
return
# I don't need the backend nor the chronics_handler
from grid2op.Environment.Environment import Environment
self._real_env_kwargs = Environment.get_kwargs(env, False, False)
# remove the parameters anyways (the 'forecast parameters will be used
# when building the forecasted_env)
del self._real_env_kwargs["parameters"]
# i also "remove" the opponent
from grid2op.Action import DontAct
from grid2op.Opponent import BaseOpponent, NeverAttackBudget
self._real_env_kwargs["opponent_action_class"] = DontAct
self._real_env_kwargs["opponent_class"] = BaseOpponent
self._real_env_kwargs["opponent_init_budget"] = 0.
self._real_env_kwargs["opponent_budget_per_ts"] = 0.
self._real_env_kwargs["opponent_budget_class"] = NeverAttackBudget
self._real_env_kwargs["opponent_attack_duration"] = 0
self._real_env_kwargs["opponent_attack_cooldown"] = 999999
# and finally I remove the extra bk_class and bk_kwargs
if "observation_bk_class" in self._real_env_kwargs:
del self._real_env_kwargs["observation_bk_class"]
if "observation_bk_kwargs" in self._real_env_kwargs:
del self._real_env_kwargs["observation_bk_kwargs"]
def _create_obs_env(self, env):
other_rewards = {k: v.rewardClass for k, v in env.other_rewards.items()}
self.obs_env = self._ObsEnv_class(
init_env_path=None, # don't leak the path of the real grid to the observation space
init_grid_path=None, # don't leak the path of the real grid to the observation space
backend_instanciated=self._backend_obs,
obsClass=CompleteObservation, # do not put self.observationClass otherwise it's initialized twice
parameters=self._simulate_parameters,
reward_helper=self.reward_helper,
action_helper=self.action_helper_env,
thermal_limit_a=env.get_thermal_limit(),
legalActClass=copy.deepcopy(env._legalActClass),
other_rewards=other_rewards,
helper_action_class=env._helper_action_class,
helper_action_env=env._helper_action_env,
epsilon_poly=env._epsilon_poly,
tol_poly=env._tol_poly,
has_attention_budget=env._has_attention_budget,
attention_budget_cls=env._attention_budget_cls,
kwargs_attention_budget=env._kwargs_attention_budget,
max_episode_duration=env.max_episode_duration(),
delta_time_seconds=env.delta_time_seconds,
logger=self.logger,
highres_sim_counter=env.highres_sim_counter,
_complete_action_cls=env._complete_action_cls,
_ptr_orig_obs_space=self,
)
for k, v in self.obs_env.other_rewards.items():
v.initialize(self.obs_env)
def _aux_create_backend(self, env, observation_bk_class, observation_bk_kwargs, path_grid_for):
if observation_bk_kwargs is None:
observation_bk_kwargs = env.backend._my_kwargs
observation_bk_class_used = observation_bk_class.init_grid(env.backend)
self._backend_obs = observation_bk_class_used(**observation_bk_kwargs)
self._backend_obs.set_env_name(env.name)
self._backend_obs.load_grid(path_grid_for)
self._backend_obs.assert_grid_correct()
self._backend_obs.runpf()
self._backend_obs.assert_grid_correct_after_powerflow()
self._backend_obs.set_thermal_limit(env.get_thermal_limit())
def _create_backend_obs(self, env, observation_bk_class, observation_bk_kwargs):
_with_obs_env = True
path_sim_bk = os.path.join(env.get_path_env(), "grid_forecast.json")
if observation_bk_class is not None or observation_bk_kwargs is not None:
# backend used for simulate is of a different class (or build with different arguments)
if observation_bk_class is not None:
self.logger.warn("Using a backend for the 'forecast' of a different class. Make sure the "
"elements of the grid are in the same order and have the same name ! "
"Do not hesitate to use a 'BackendConverter' if that is not the case.")
else:
observation_bk_class = env._raw_backend_class
if os.path.exists(path_sim_bk) and os.path.isfile(path_sim_bk):
path_grid_for = path_sim_bk
else:
path_grid_for = os.path.join(env.get_path_env(), "grid.json")
self._aux_create_backend(env, observation_bk_class, observation_bk_kwargs, path_grid_for)
elif os.path.exists(path_sim_bk) and os.path.isfile(path_sim_bk):
# backend used for simulate will use the same class with same args as the env
# backend, but with a different grid
observation_bk_class = env._raw_backend_class
self._aux_create_backend(env, observation_bk_class, observation_bk_kwargs, path_sim_bk)
elif env.backend._can_be_copied:
# case where I can copy the backend for the 'simulate' and I don't need to build
# it (uses same class and same grid)
try:
self._backend_obs = env.backend.copy()
except Exception as exc_:
self._backend_obs = None
self.logger.warn(f"Backend cannot be copied, simulate feature will "
f"be unsusable. Error was: {exc_}")
self._deactivate_simulate(env)
_with_obs_env = False
self.__can_never_use_simulate = True
else:
# no 'simulate' can be made unfortunately
self._backend_obs = None
self._deactivate_simulate(env)
_with_obs_env = False
self.__can_never_use_simulate = True
return _with_obs_env
def _deactivate_simulate(self, env):
if self._backend_obs is not None:
self._backend_obs.close()
self._backend_obs = None
self.with_forecast = False
env.deactivate_forecast()
env.backend._can_be_copied = False
self.logger.warn("Forecasts have been deactivated because "
"the backend cannot be copied.")
def reactivate_forecast(self, env):
if self.__can_never_use_simulate:
raise EnvError("You cannot use `simulate` for this environment, either because the "
"backend you used cannot be copied, or because this observation space "
"does not support this feature.")
if self.obs_env is None or self._backend_obs is None:
# force create of everything in this case
if self._backend_obs is not None:
self._backend_obs.close()
self._backend_obs = None
self._create_backend_obs(env, self._observation_bk_class, self._observation_bk_kwargs)
if self.obs_env is not None :
self.obs_env.close()
self.obs_env = None
self._create_obs_env(env)
self.set_real_env_kwargs(env)
self.with_forecast = True
def simulate_called(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Tells this class that the "obs.simulate" function has been called.
"""
self.__nb_simulate_called_this_step += 1
self.__nb_simulate_called_this_episode += 1
@property
def nb_simulate_called_this_episode(self):
return self.__nb_simulate_called_this_episode
@property
def nb_simulate_called_this_step(self):
return self.__nb_simulate_called_this_step
@property
def total_simulate_simulator_calls(self):
return self._highres_sim_counter.total_simulate_simulator_calls
def can_use_simulate(self) -> bool:
"""
This checks on the rules if the agent has not made too many calls to "obs.simulate" this step
"""
return self._legal_action.can_use_simulate(
self.__nb_simulate_called_this_step,
self.__nb_simulate_called_this_episode,
self._env_param,
)
def _change_parameters(self, new_param):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
change the parameter of the "simulate" environment
"""
self.obs_env.change_parameters(new_param)
self._simulate_parameters = new_param
def change_other_rewards(self, dict_reward):
"""
this function is used to change the "other rewards" used when you perform simulate.
This can be used, for example, when you want to do faster call to "simulate". In this case you can remove all
the "other_rewards" that will be used by the simulate function.
Parameters
----------
dict_reward: ``dict``
see description of :attr:`grid2op.Environment.BaseEnv.other_rewards`
Examples
---------
If you want to deactivate the reward in the simulate function, you can do as following:
.. code-block:: python
import grid2op
from grid2op.Reward import CloseToOverflowReward, L2RPNReward, RedispReward
env_name = "l2rpn_case14_sandbox"
other_rewards = {"close_overflow": CloseToOverflowReward,
"l2rpn": L2RPNReward,
"redisp": RedispReward}
env = grid2op.make(env_name, other_rewards=other_rewards)
env.observation_space.change_other_rewards({})
"""
from grid2op.Reward import BaseReward
from grid2op.Exceptions import Grid2OpException
if self.obs_env is not None:
self.obs_env.other_rewards = {}
for k, v in dict_reward.items():
if not issubclass(v, BaseReward):
raise Grid2OpException(
'All values of "rewards" key word argument should be classes that inherit '
'from "grid2op.BaseReward"'
)
if not isinstance(k, str):
raise Grid2OpException(
'All keys of "rewards" should be of string type.'
)
self.obs_env.other_rewards[k] = RewardHelper(v)
for k, v in self.obs_env.other_rewards.items():
v.initialize(self.obs_env)
def change_reward(self, reward_func):
if self.obs_env is not None:
if self.obs_env.is_valid():
self.obs_env._reward_helper.change_reward(reward_func)
else:
raise EnvError("Impossible to change the reward of the simulate "
"function when you cannot simulate (because the "
"backend could not be copied)")
def set_thermal_limit(self, thermal_limit_a):
if self.obs_env is not None:
self.obs_env.set_thermal_limit(thermal_limit_a)
if self._backend_obs is not None:
self._backend_obs.set_thermal_limit(thermal_limit_a)
def reset_space(self):
if self.with_forecast:
if self.obs_env.is_valid():
self.obs_env.reset_space()
else:
raise EnvError("Impossible to reset_space "
"function when you cannot simulate (because the "
"backend could not be copied)")
self.action_helper_env.actionClass.reset_space()
def __call__(self, env, _update_state=True):
obs_env_obs = None
if self.with_forecast:
self.obs_env.update_grid(env)
obs_env_obs = self.obs_env if self.obs_env.is_valid() else None
res = self.observationClass(
obs_env=obs_env_obs,
action_helper=self.action_helper_env,
random_prng=self.space_prng,
kwargs_env=self._real_env_kwargs,
**self._ptr_kwargs_observation
)
self.__nb_simulate_called_this_step = 0
if _update_state:
# TODO how to make sure that whatever the number of time i call "simulate" i still get the same observations
# TODO use self.obs_prng when updating actions
res.update(env=env, with_forecast=self.with_forecast)
return res
def size_obs(self):
"""
Size if the observation vector would be flatten
:return:
"""
return self.n
def get_empty_observation(self):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
return an empty observation, for internal use only.
"""
return copy.deepcopy(self._empty_obs)
def reset(self, real_env):
"""reset the observation space with the new values of the environment"""
self.__nb_simulate_called_this_step = 0
self.__nb_simulate_called_this_episode = 0
if self.with_forecast:
self.obs_env._reward_helper.reset(real_env)
for k, v in self.obs_env.other_rewards.items():
v.reset(real_env)
self.obs_env.reset()
self._env_param = copy.deepcopy(real_env.parameters)
def _custom_deepcopy_for_copy(self, new_obj):
"""implements a faster "res = copy.deepcopy(self)" to use
in "self.copy"
Do not use it anywhere else...
"""
# TODO clean that after it is working... (ie make this method per class...)
# fill the super classes
super()._custom_deepcopy_for_copy(new_obj)
# now fill my class
new_obj.with_forecast = self.with_forecast
new_obj._simulate_parameters = copy.deepcopy(self._simulate_parameters)
new_obj._reward_func = copy.deepcopy(self._reward_func)
new_obj.action_helper_env = self.action_helper_env # const
new_obj.reward_helper = copy.deepcopy(self.reward_helper)
new_obj._backend_obs = self._backend_obs # ptr to a backend for simulate
new_obj.obs_env = self.obs_env # it is None anyway !
new_obj._update_env_time = self._update_env_time
new_obj.__can_never_use_simulate = self.__can_never_use_simulate
new_obj.__nb_simulate_called_this_step = self.__nb_simulate_called_this_step
new_obj.__nb_simulate_called_this_episode = (
self.__nb_simulate_called_this_episode
)
# never copied (keep track of it)
new_obj._highres_sim_counter = (
self._highres_sim_counter
)
new_obj._env_param = copy.deepcopy(self._env_param)
# as it's a "pointer" it's init from the env when needed here
# this is why i don't deep copy it here !
new_obj._ptr_kwargs_observation = self._ptr_kwargs_observation
# real env kwargs, these is a "pointer" anyway
new_obj._real_env_kwargs = self._real_env_kwargs
new_obj._observation_bk_class = self._observation_bk_class
new_obj._observation_bk_kwargs = self._observation_bk_kwargs
new_obj._ObsEnv_class = self._ObsEnv_class
def copy(self, copy_backend=False):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Perform a deep copy of the Observation space.
"""
backend = self._backend_obs
self._backend_obs = None
obs_ = self._empty_obs
self._empty_obs = None
obs_env = self.obs_env
self.obs_env = None
# performs the copy
# res = copy.deepcopy(self) # painfully slow...
# create an empty "me"
my_cls = type(self)
res = my_cls.__new__(my_cls)
self._custom_deepcopy_for_copy(res)
if not copy_backend:
res._backend_obs = backend
res._empty_obs = obs_.copy()
res.obs_env = obs_env
else:
res.obs_env = obs_env.copy()
res.obs_env._ptr_orig_obs_space = res
res._backend_obs = res.obs_env.backend
res._empty_obs = obs_.copy()
res._empty_obs._obs_env = res.obs_env
# assign back the results
self._backend_obs = backend
self._empty_obs = obs_
self.obs_env = obs_env
return res
def close(self):
if self.obs_env is not None:
self.obs_env.close()
del self.obs_env
self.obs_env = None
| 22,838 | 41.138376 | 120 | py |
Grid2Op | Grid2Op-master/grid2op/Observation/serializableObservationSpace.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Space import SerializableSpace
from grid2op.Observation.completeObservation import CompleteObservation
class SerializableObservationSpace(SerializableSpace):
"""
This class allows to serialize / de serialize the action space.
It should not be used inside an Environment, as some functions of the action might not be compatible with
the serialization, especially the checking of whether or not an BaseObservation is legal or not.
Attributes
----------
observationClass: ``type``
Type used to build the :attr:`SerializableActionSpace._template_act`
_empty_obs: :class:`BaseObservation`
An instance of the "*observationClass*" provided used to provide higher level utilities
"""
def __init__(self, gridobj, observationClass=CompleteObservation, _init_grid=True):
"""
Parameters
----------
gridobj: :class:`grid2op.Space.GridObjects`
Representation of the objects in the powergrid.
observationClass: ``type``
Type of action used to build :attr:`Space.SerializableSpace._template_obj`
"""
SerializableSpace.__init__(
self, gridobj=gridobj, subtype=observationClass, _init_grid=_init_grid
)
self.observationClass = self.subtype
self._empty_obs = self._template_obj
def _custom_deepcopy_for_copy(self, new_obj):
super()._custom_deepcopy_for_copy(new_obj)
# SerializableObservationSpace
new_obj.observationClass = self.observationClass # const
new_obj._empty_obs = self._template_obj # const
@staticmethod
def from_dict(dict_):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is used internally by EpisodeData to restore the state of the powergrid
Allows the de-serialization of an object stored as a dictionary (for example in the case of json saving).
Parameters
----------
dict_: ``dict``
Representation of an BaseObservation Space (aka SerializableObservationSpace) as a dictionary.
Returns
-------
res: :class:``SerializableObservationSpace``
An instance of an action space matching the dictionary.
"""
tmp = SerializableSpace.from_dict(dict_)
CLS = SerializableObservationSpace.init_grid(tmp)
res = CLS(gridobj=tmp, observationClass=tmp.subtype, _init_grid=False)
return res
def get_indx_extract(self, attr_name):
# backward compatibility (due to consistency with previous names)
if attr_name == "prod_p":
attr_name = "gen_p"
elif attr_name == "prod_q":
attr_name = "gen_q"
elif attr_name == "prod_v":
attr_name = "gen_v"
return super().get_indx_extract(attr_name)
| 3,364 | 36.808989 | 113 | py |
Grid2Op | Grid2Op-master/grid2op/Opponent/BaseActionBudget.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.Exceptions import OpponentError
class BaseActionBudget:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is the base class representing the action budget.
It makes sure the opponent uses the correct type of "action", and compute the bugdet associated to it.
"""
def __init__(self, action_space):
self.action_space = action_space
def __call__(self, attack):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This function takes an attack as input and compute the cost associated to it.
**NB** The cost of a "None" attack is necessarily 0 !
Parameters
----------
attack: :class:`ŋrid2op.BaseAction.BaseAction`
The attack performed by the opponent
Returns
-------
cost: the cost of the action performed by the opponent.
"""
if attack is None:
return 0
if not isinstance(attack, self.action_space.actionClass):
raise OpponentError(
'Attempt to use an attack of type "{}" which is not a instance of "{}", '
"the type of action the opponent was supposed to use."
"".format(type(attack), self.action_space.actionClass)
)
aff_lines, aff_subs = attack.get_topological_impact()
cost = np.sum(aff_lines) + np.sum(aff_subs)
return cost
| 2,012 | 33.118644 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Opponent/BaseOpponent.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Space import RandomObject
class BaseOpponent(RandomObject):
def __init__(self, action_space):
RandomObject.__init__(self)
self.action_space = action_space
self._do_nothing = self.action_space()
def init(self, partial_env, **kwargs):
"""
Generic function used to initialize the derived classes. For example, if an opponent reads from a file, the
path where is the file is located should be pass with this method.
"""
pass
def reset(self, initial_budget):
"""
This function is called at the end of an episode, when the episode is over. It aims at resetting the
self and prepare it for a new episode.
Parameters
----------
initial_budget: ``float``
The initial budget the opponent has
"""
pass
def attack(self, observation, agent_action, env_action, budget, previous_fails):
"""
This method is the equivalent of "act" for a regular agent.
Opponent, in this framework can have more information than a regular agent (in particular it can
view time step t+1), it has access to its current budget etc.
Parameters
----------
observation: :class:`grid2op.Observation.Observation`
The last observation (at time t)
opp_reward: ``float``
THe opponent "reward" (equivalent to the agent reward, but for the opponent) TODO do i add it back ???
done: ``bool``
Whether the game ended or not TODO do i add it back ???
agent_action: :class:`grid2op.Action.Action`
The action that the agent took
env_action: :class:`grid2op.Action.Action`
The modification that the environment will take.
budget: ``float``
The current remaining budget (if an action is above this budget, it will be replaced by a do nothing.
previous_fails: ``bool``
Wheter the previous attack failed (due to budget or ambiguous action)
Returns
-------
attack: :class:`grid2op.Action.Action`
The attack performed by the opponent. In this case, a do nothing, all the time.
duration: ``int``
The duration of the attack
"""
# TODO maybe have a class "GymOpponent" where the observation would include the budget and all other
# TODO information, and forward something to the "act" method.
return None, None
def tell_attack_continues(self, observation, agent_action, env_action, budget):
"""
The purpose of this method is to tell the agent that his attack is being continued
and to indicate the current state of the grid.
At every time step, either "attack" or "tell_acttack_continues" is called exactly once.
Parameters
----------
observation: :class:`grid2op.Observation.Observation`
The last observation (at time t)
agent_action: :class:`grid2op.Action.Action`
The action that the agent took
env_action: :class:`grid2op.Action.Action`
The modification that the environment will take.
budget: ``float``
The current remaining budget (if an action is above this budget, it will be replaced by a do nothing.
"""
pass
def get_state(self):
"""
This function should return the internal state of the Opponent.
This means that after a call to `opponent.set_state(opponent.get_state())` the opponent should do the exact
same things than without these calls.
Returns
-------
"""
return None
def set_state(self, my_state):
"""
This function is used to set the internal state of the Opponent.
Parameters
----------
my_state
"""
pass
def _custom_deepcopy_for_copy(self, new_obj, dict_=None):
super()._custom_deepcopy_for_copy(new_obj)
if dict_ is None:
dict_ = {}
new_obj.action_space = self.action_space # const
new_obj._do_nothing = new_obj.action_space()
new_obj.set_state(self.get_state())
| 4,697 | 34.059701 | 115 | py |
Grid2Op | Grid2Op-master/grid2op/Opponent/GeometricOpponent.py | # Copyright (c) 2019-2021, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import warnings
import copy
import numpy as np
from grid2op.dtypes import dt_int
from grid2op.Opponent import BaseOpponent
from grid2op.Exceptions import OpponentError
class GeometricOpponent(BaseOpponent):
"""
This opponent will disconnect lines randomly among the attackable lines `lines_attacked`.
The sampling is done according to the lines load factor (ratio <current going through the line> to <thermal limit
of the line>)
(see init for more details).
The time of the attack is sampled according to a geometric distribution
"""
def __init__(self, action_space):
BaseOpponent.__init__(self, action_space)
self._do_nothing = None
self._attacks = None
self._lines_ids = None
self._next_attack_time = None
self._attack_hazard_rate = None
self._recovery_minimum_duration = None
self._recovery_rate = None
self._pmax_pmin_ratio = None
self._attack_times = None
self._attack_waiting_times = None
self._attack_durations = None
self._attack_counter = None
self._number_of_attacks = None
self._episode_max_time = None
self._env = None # I need to keep a pointer to the environment for computing the maximum length of the episode
# this is the constructor:
# it should have the exact same signature as here
def init(
self,
partial_env,
lines_attacked=(),
attack_every_xxx_hour=24,
average_attack_duration_hour=4,
minimum_attack_duration_hour=2,
pmax_pmin_ratio=4,
**kwargs,
):
"""
Generic function used to initialize the derived classes. For example, if an opponent reads from a file, the
path where is the file is located should be pass with this method.
Parameters
----------
partial_env: grid2op Environment
A pointer to the environment that initializes the opponent
lines_attacked: ``list``
The list of lines that the XPOpponent should be able to disconnect
attack_every_xxx_hour: ``float``
Provide the average duration between two attacks. Note that this should be greater
than `average_attack_duration_hour` as, for now, an agent can only do one consecutive attack.
You should provide it in "number of hours" and not in "number of steps"
It is used to compute the `attack_hazard_rate`.
Attacks time are sampled with a duration distribution. For this opponent, we use the simplest of these
distributions : The geometric disribution
https://en.wikipedia.org/wiki/Geometric_distribution (the discrete time counterpart of the exponential
distribution).
The attack_hazard_rate is the main parameter of this distribution. It can be seen as the (constant)
probability of having an attack
in the next step. It is also the inverse of the expectation of the time to an attack.
average_attack_duration_hour: ``float``
Give, in number of hours, the average attack duration. This should be greater than
`recovery_minimum_duration_hour`
Used to compute the `recovery_rate`:
Recovery times are random or at least should have a random part.
In our case, we will say that the recovery time is equal to a fixed time (safety procedure time) plus a
random time (investigations
and repair operations) sampled according to a geometric distribution
minimum_attack_duration_hour: ``int``
Minimum duration of an attack (give it in hour)
pmax_pmin_ratio: ``float``
Ratio between the probability of the most likely line to be disconnected and the least likely one.
"""
self._env = partial_env
if len(lines_attacked) == 0:
warnings.warn(
f"The opponent is deactivated as there is no information as to which line to attack. "
f'You can set the argument "kwargs_opponent" to the list of the line names you want '
f' the opponent to attack in the "make" function.'
)
# Store attackable lines IDs
self._lines_ids = []
for l_name in lines_attacked:
l_id = np.where(self.action_space.name_line == l_name)
if len(l_id) and len(l_id[0]):
self._lines_ids.append(l_id[0][0])
else:
raise OpponentError(
'Unable to find the powerline named "{}" on the grid. For '
"information, powerlines on the grid are : {}"
"".format(l_name, sorted(self.action_space.name_line))
)
# Pre-build attacks actions
self._do_nothing = self.action_space({})
self._attacks = []
for l_id in self._lines_ids:
a = self.action_space({"set_line_status": [(l_id, -1)]})
self._attacks.append(a)
self._attacks = np.array(self._attacks)
# Opponent's attack and recovery rates and minimum duration
# number of steps per hour
ts_per_hour = 3600.0 / partial_env.delta_time_seconds
self._recovery_minimum_duration = int(
minimum_attack_duration_hour * ts_per_hour
)
if average_attack_duration_hour < minimum_attack_duration_hour:
raise OpponentError(
"The average duration of an attack cannot be lower than the minimum time of an attack"
)
elif average_attack_duration_hour == minimum_attack_duration_hour:
raise OpponentError(
"Case average_attack_duration_hour == minimum_attack_duration_hour is not supported "
"at the moment"
)
self._recovery_rate = 1.0 / (
ts_per_hour * (average_attack_duration_hour - minimum_attack_duration_hour)
)
if attack_every_xxx_hour <= average_attack_duration_hour:
raise OpponentError(
"attack_every_xxx_hour <= average_attack_duration_hour is not supported at the moment."
)
self._attack_hazard_rate = 1.0 / (
ts_per_hour * (attack_every_xxx_hour - average_attack_duration_hour)
)
# Opponent's pmax pmin ratio
self._pmax_pmin_ratio = pmax_pmin_ratio
# Episode max time
self._episode_max_time = self._get_episode_duration()
# Sample attack times and durations for the whole episode
self.sample_attack_times_and_durations()
# Set the attack counter to 0
self._attack_counter = 0
def _get_episode_duration(self):
tmp = self._env.max_episode_duration()
if (not np.isfinite(tmp)) or (tmp == np.iinfo(tmp).max):
raise OpponentError(
"Geometric opponent only works (for now) with a known finite episode duration."
)
return tmp
def reset(self, initial_budget):
# Sample attack times and durations for the whole episode
self.sample_attack_times_and_durations()
# Reset the attack counter to 0
self._attack_counter = 0
self._next_attack_time = None
# Episode max time
self._episode_max_time = self._get_episode_duration()
def sample_attack_times_and_durations(self):
self._attack_times = []
self._attack_waiting_times = []
self._attack_durations = []
self._number_of_attacks = 0
t = 0 # t=0 at the beginning of the episode
while t < self._episode_max_time:
# Sampling the next time to attack
t_to_attack = self.space_prng.geometric(p=self._attack_hazard_rate)
t_of_attack = t + t_to_attack
t = t_of_attack
if t < self._episode_max_time:
self._attack_waiting_times.append(t_to_attack)
self._attack_times.append(t_of_attack)
self._number_of_attacks += 1
# Sampling the attack duration
attack_duration = (
self._recovery_minimum_duration
+ self.space_prng.geometric(p=self._recovery_rate)
)
self._attack_durations.append(attack_duration)
t = t + attack_duration
# TODO : Log these times and durations in a log.file
self._attack_times = np.array(self._attack_times).astype(dt_int)
self._attack_waiting_times = np.array(self._attack_waiting_times).astype(dt_int)
self._attack_durations = np.array(self._attack_durations).astype(dt_int)
def tell_attack_continues(self, observation, agent_action, env_action, budget):
self._next_attack_time = None
def attack(self, observation, agent_action, env_action, budget, previous_fails):
"""
This method is the equivalent of "attack" for a regular agent.
Opponent, in this framework can have more information than a regular agent (in particular it can
view time step t+1), it has access to its current budget etc.
Parameters
----------
observation: :class:`grid2op.Observation.Observation`
The last observation (at time t)
opp_reward: ``float``
THe opponent "reward" (equivalent to the agent reward, but for the opponent) TODO do i add it back ???
done: ``bool``
Whether the game ended or not TODO do i add it back ???
agent_action: :class:`grid2op.Action.Action`
The action that the agent took
env_action: :class:`grid2op.Action.Action`
The modification that the environment will take.
budget: ``float``
The current remaining budget (if an action is above this budget, it will be replaced by a do nothing.
previous_fails: ``bool``
Wheter the previous attack failed (due to budget or ambiguous action)
Returns
-------
attack: :class:`grid2op.Action.Action`
The attack performed by the opponent. In this case, a do nothing, all the time.
duration: ``int``
The duration of the attack (if ``None`` then the attack will be made for the longest allowed time)
"""
# During creation of the environment, do not attack
if observation is None:
return None, None
# If there are no more attacks to come, do not attack
if self._attack_counter >= self._number_of_attacks:
return None, None
if previous_fails:
# i cannot do the attack, it failed (so self._attack_counter >= 1)
self._next_attack_time = (
self._attack_waiting_times[self._attack_counter]
+ self._attack_durations[self._attack_counter - 1]
)
# Set the time of the next attack
if self._next_attack_time is None:
self._next_attack_time = (
1 + self._attack_waiting_times[self._attack_counter]
)
attack_duration = self._attack_durations[self._attack_counter]
self._next_attack_time -= 1
# If the attack time has not come yet, do not attack
if self._next_attack_time > 0:
return None, None
else:
# Attack is launched
self._attack_counter += 1
# If all attackable lines are disconnected, abort attack
status = observation.line_status[self._lines_ids]
if np.all(status == False):
return None, None
available_attacks = self._attacks[status]
# If we have a unique attackable line we just attack it
if len(available_attacks) == 1:
return available_attacks[0], attack_duration
# We have several lines, so we need to choose one
# This will be according to their load factor (rho)
rho = observation.rho[self._lines_ids][status]
# The rho_rank vector is the ranking of the lines according
# to their rho (load factor)
# 0 : for the line with the lowest load factor
# (n_attackable_lines - 1) : for the line with the highest load factor
temp = rho.argsort()
rho_ranks = np.empty_like(temp)
rho_ranks[temp] = np.arange(len(rho))
# We choose the attacked line using a Boltzmann distribution
# on rho ranks, with a beta parameter (temperature) set to ensure
# that the probability ratio between the most and the least prefered
# lines is equal to the pmax_pmin_ratio parameter
n_attackable_line = len(available_attacks)
b_beta = np.log(self._pmax_pmin_ratio) / (n_attackable_line - 1)
raw_probabilities = np.exp(b_beta * rho_ranks)
b_probabilities = raw_probabilities / raw_probabilities.sum()
attack = self.space_prng.choice(available_attacks, p=b_probabilities)
return attack, attack_duration
def get_state(self):
return (
self._attack_times,
self._attack_waiting_times,
self._attack_durations,
self._number_of_attacks,
)
def set_state(self, my_state):
(
_attack_times,
_attack_waiting_times,
_attack_durations,
_number_of_attacks,
) = my_state
self._attack_times = 1 * _attack_times
self._attack_waiting_times = 1 * _attack_waiting_times
self._attack_durations = 1 * _attack_durations
self._number_of_attacks = 1 * _number_of_attacks
def _custom_deepcopy_for_copy(self, new_obj, dict_=None):
super()._custom_deepcopy_for_copy(new_obj, dict_)
if dict_ is None:
raise OpponentError("Impossible to deep copy an Opponent without a pointer "
"to the original env, named `partial_env`.")
new_obj._attacks = copy.deepcopy(self._attacks)
new_obj._lines_ids = copy.deepcopy(self._lines_ids)
new_obj._next_attack_time = copy.deepcopy(self._next_attack_time)
new_obj._attack_hazard_rate = copy.deepcopy(self._attack_hazard_rate)
new_obj._recovery_minimum_duration = copy.deepcopy(
self._recovery_minimum_duration
)
new_obj._recovery_rate = copy.deepcopy(self._recovery_rate)
new_obj._pmax_pmin_ratio = copy.deepcopy(self._pmax_pmin_ratio)
new_obj._attack_counter = copy.deepcopy(self._attack_counter)
new_obj._episode_max_time = copy.deepcopy(self._episode_max_time)
new_obj._env = dict_[
"partial_env"
] # I need to keep a pointer to the environment for computing the maximum length of the episode
| 15,241 | 42.056497 | 119 | py |
Grid2Op | Grid2Op-master/grid2op/Opponent/NeverAttackBudget.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.Opponent.BaseActionBudget import BaseActionBudget
class NeverAttackBudget(BaseActionBudget):
"""
This class define an unlimited budget for the opponent.
It SHOULD NOT be used if the opponent is allowed to take any actions!
"""
def __init__(self, action_space):
BaseActionBudget.__init__(self, action_space)
def __call__(self, attack):
if attack is not None:
return np.inf
return 0.0
| 942 | 35.269231 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Opponent/OpponentSpace.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import os
import numpy as np
from grid2op.Exceptions import OpponentError
class OpponentSpace(object):
"""
Is similar to the action space, but for the opponent.
This class is used to express some "constraints" on the opponent attack. The opponent is free to attack whatever
it wants, for how long it wants and when it wants. This class ensures that the opponent does not break any
rules.
Attributes
----------
action_space: :class:`grid2op.Action.ActionSpace`
The action space defining which action the Opponent are allowed to take
init_budget: ``float``
The initial budget of the opponent
compute_budget: :class:`grid2op.Opponent.ActionBudget`
The tool used to compute the budget
opponent: :class:`grid2op.Opponent.BaseOpponent`
The agent that will take malicious actions.
previous_fails: ``bool``
Whether the last attack of the opponent failed or not
budget_per_timestep: ``float``
The increase of the opponent budget per time step (if any)
"""
def __init__(
self,
compute_budget,
init_budget,
opponent,
attack_duration, # maximum duration of an attack
attack_cooldown, # minimum duration between two consecutive attack
budget_per_timestep=0.0,
action_space=None,
):
if action_space is not None:
if not isinstance(action_space, compute_budget.action_space):
raise OpponentError(
"BaseAction space provided to build the agent is not a subclass from the"
"action space to compute the cost of each action."
)
self.action_space = action_space
else:
self.action_space = compute_budget.action_space
self.init_budget = init_budget
self.budget = init_budget
self.compute_budget = compute_budget
self.opponent = opponent
self._do_nothing = self.action_space()
self.previous_fails = False
self.budget_per_timestep = budget_per_timestep
self.attack_max_duration = attack_duration
self.attack_cooldown = attack_cooldown
self.current_attack_duration = 0
self.current_attack_cooldown = attack_cooldown
self.last_attack = None
if init_budget < 0.0:
raise OpponentError(
"An opponent should at least have a positive (or null) budget. If you "
"want to deactivate the opponent set its budget to 0 and use the"
'DontAct class as the "opponent_class"'
)
# TODO do i add it back
# if not isinstance(opponent_reward_class, BaseReward):
# raise OpponentError("Impossible to build an opponent reward with a reward of type {}".format(opponent_reward_class))
# self.opp_reward_helper = RewardHelper(opponent_reward_class)
def init_opponent(self, partial_env, **kwargs):
"""
Generic function used to initialize the opponent. For example, if an opponent reads from a file, the
path where is the file is located should be pass with this method.
"""
self.opponent.init(partial_env=partial_env, **kwargs)
def reset(self):
"""
Reset the state of the Opponent to its original state, in particular re assign the proper budget to it.
"""
self.budget = self.init_budget
self.previous_fails = False
self.current_attack_duration = 0
self.current_attack_cooldown = self.attack_cooldown
self.last_attack = None
self.opponent.reset(self.budget)
self.previous_fails = False
def _get_state(self):
# used for simulate
state_me = (
self.budget,
self.previous_fails,
self.current_attack_duration,
self.current_attack_cooldown,
self.last_attack,
)
state_opp = self.opponent.get_state()
return state_me, state_opp
def _set_state(self, my_state, opp_state=None):
# used for simulate (and for deep copy)
if opp_state is not None:
self.opponent.set_state(opp_state)
(
budget,
previous_fails,
current_attack_duration,
current_attack_cooldown,
last_attack,
) = my_state
self.budget = budget
self.previous_fails = previous_fails
self.current_attack_duration = current_attack_duration
self.current_attack_cooldown = current_attack_cooldown
self.last_attack = last_attack
def has_failed(self):
"""
This signal is sent by the environment and indicated the opponent attack could not be implmented on the
powergrid, most likely due to the attack to be ambiguous.
"""
self.previous_fails = True
def attack(self, observation, agent_action, env_action):
"""
This function calls the attack from the opponent.
It check whether the budget is consistent with the attack (budget should be more that the cosst
associated with the attack). If the attack cost too much, then it is replaced by a "do nothing"
action. Otherwise, the attack will be implemented by the environment.
Note that if the attack is "ambiguous" it will fails (the environment will replace it by a
"do nothing" action), but the budget will still be consumed.
**NB** it is expected that this function update the :attr:`OpponentSpace.last_attack` attribute
with ``None`` if the opponent choose not to attack, or with the attack of the opponent otherwise.
Parameters
----------
observation: :class:`grid2op.Observation.Observation`
The last observation (at time t)
agent_action: :class:`grid2op.Action.Action`
The action that the agent took
env_action: :class:`grid2op.Action.Action`
The modification that the environment will take.
Returns
-------
res: :class:`grid2op.Action.Action` : The attack the opponent wants to perform
(or "do nothing" if the attack was too costly)
or class:`NoneType` : Returns None if no action is taken
"""
if observation is None:
# this is the first time step, which is not a "real" one
# just here to load the data properly, so opponent do not attack there
return None, 0
# Update variables
self.budget += self.budget_per_timestep
self.current_attack_duration = max(0, self.current_attack_duration - 1)
self.current_attack_cooldown = max(0, self.current_attack_cooldown - 1)
attack_called = False
if self.current_attack_duration > 0:
# previous attack is not over
attack = self.last_attack
elif self.current_attack_cooldown > self.attack_cooldown:
# minimum time between two consecutive attack not met
attack = None
# If the opponent can attack
else:
attack_called = True
attack, duration = self.opponent.attack(
observation, agent_action, env_action, self.budget, self.previous_fails
)
if duration is None:
if np.isfinite(self.attack_max_duration):
duration = self.attack_max_duration
else:
duration = 1
self.previous_fails = False
if duration > self.attack_max_duration:
# duration chosen by the opponent would exceed the maximum duration allowed
attack = None
self.previous_fails = True
# If the cost is too high
final_budget = (
self.budget
) # TODO add the: + self.budget_per_timestep * (self.attack_duration - 1)
# i did not do it in case an attack is ok at the beginning, ok at the end, but at some point in the attack
# process it is not (but i'm not sure this can happen, and don't have time to think about it right now)
if duration * self.compute_budget(attack) > final_budget:
attack = None
self.previous_fails = True
# If we can afford the attack
if attack is not None:
# even if it's "do nothing", it's sill an attack. Too bad if the opponent chose to do nothing.
self.current_attack_duration = duration
self.current_attack_cooldown += self.attack_cooldown
if not attack_called:
self.opponent.tell_attack_continues(
observation, agent_action, env_action, self.budget
)
self.previous_fails = False
self.budget -= self.compute_budget(attack)
self.last_attack = attack
attack_duration = self.current_attack_duration
if attack is None:
attack_duration = 0
return attack, attack_duration
def close(self):
"""if this has a reference to a backend, you need to close it for grid2op to work properly. Do not forget to do it."""
pass
| 9,752 | 38.326613 | 129 | py |
Grid2Op | Grid2Op-master/grid2op/Opponent/RandomLineOpponent.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import warnings
import numpy as np
import copy
from grid2op.Opponent import BaseOpponent
from grid2op.Exceptions import OpponentError
class RandomLineOpponent(BaseOpponent):
"""
An opponent that disconnect at random any powerlines among a specified list given
at the initialization.
"""
def __init__(self, action_space):
BaseOpponent.__init__(self, action_space)
self._do_nothing = None
self._attacks = None
self._lines_ids = None
def init(self, partial_env, lines_attacked=[], **kwargs):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Used when the opponent is created.
Parameters
----------
partial_env
lines_attacked
kwargs
Returns
-------
"""
# this if the function used to properly set the object.
# It has the generic signature above,
# and it's way more flexible that the other one.
if len(lines_attacked) == 0:
warnings.warn(
f"The opponent is deactivated as there is no information as to which line to attack. "
f'You can set the argument "kwargs_opponent" to the list of the line names you want '
f' the opponent to attack in the "make" function.'
)
# Store attackable lines IDs
self._lines_ids = []
for l_name in lines_attacked:
l_id = np.where(self.action_space.name_line == l_name)
if len(l_id) and len(l_id[0]):
self._lines_ids.append(l_id[0][0])
else:
raise OpponentError(
'Unable to find the powerline named "{}" on the grid. For '
"information, powerlines on the grid are : {}"
"".format(l_name, sorted(self.action_space.name_line))
)
# Pre-build attacks actions
self._attacks = []
for l_id in self._lines_ids:
att = self.action_space({"set_line_status": [(l_id, -1)]})
self._attacks.append(att)
self._attacks = np.array(self._attacks)
def attack(self, observation, agent_action, env_action, budget, previous_fails):
"""
This method is the equivalent of "attack" for a regular agent.
Opponent, in this framework can have more information than a regular agent (in particular it can
view time step t+1), it has access to its current budget etc.
Parameters
----------
observation: :class:`grid2op.Observation.Observation`
The last observation (at time t)
opp_reward: ``float``
THe opponent "reward" (equivalent to the agent reward, but for the opponent) TODO do i add it back ???
done: ``bool``
Whether the game ended or not TODO do i add it back ???
agent_action: :class:`grid2op.Action.Action`
The action that the agent took
env_action: :class:`grid2op.Action.Action`
The modification that the environment will take.
budget: ``float``
The current remaining budget (if an action is above this budget, it will be replaced by a do nothing.
previous_fails: ``bool``
Wheter the previous attack failed (due to budget or ambiguous action)
Returns
-------
attack: :class:`grid2op.Action.Action`
The attack performed by the opponent. In this case, a do nothing, all the time.
duration: ``int``
The duration of the attack (if ``None`` then the attack will be made for the longest allowed time)
"""
# TODO maybe have a class "GymOpponent" where the observation would include the budget and all other
# TODO information, and forward something to the "act" method.
if observation is None: # during creation of the environment
return None, 0 # i choose not to attack in this case
# Status of attackable lines
status = observation.line_status[self._lines_ids]
# If all attackable lines are disconnected
if np.all(~status):
return None, 0 # i choose not to attack in this case
# Pick a line among the connected lines
attack = self.space_prng.choice(self._attacks[status])
return attack, None
def _custom_deepcopy_for_copy(self, new_obj, dict_=None):
super()._custom_deepcopy_for_copy(new_obj, dict_)
if dict_ is None:
dict_ = {}
new_obj._attacks = copy.deepcopy(self._attacks)
new_obj._lines_ids = copy.deepcopy(self._lines_ids)
| 5,166 | 36.172662 | 114 | py |
Grid2Op | Grid2Op-master/grid2op/Opponent/UnlimitedBudget.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.dtypes import dt_float
from grid2op.Opponent.BaseActionBudget import BaseActionBudget
class UnlimitedBudget(BaseActionBudget):
"""
This class define an unlimited budget for the opponent.
It SHOULD NOT be used if the opponent is allowed to take any actions!
"""
def __init__(self, action_space):
BaseActionBudget.__init__(self, action_space)
self._zero = dt_float(0.0)
def __call__(self, attack):
return self._zero
| 943 | 35.307692 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Opponent/WeightedRandomOpponent.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import warnings
import numpy as np
import copy
from grid2op.Opponent import BaseOpponent
from grid2op.Exceptions import OpponentError
class WeightedRandomOpponent(BaseOpponent):
"""
This opponent will disconnect lines randomly among the attackable lines `lines_attacked`.
The sampling is weighted by the lines current usage rate divided by some factor `rho_normalization`
(see init for more details).
When an attack becomes possible, the time of the attack will be sampled uniformly
in the next `attack_period` steps (see init).
"""
def __init__(self, action_space):
BaseOpponent.__init__(self, action_space)
self._do_nothing = None
self._attacks = None
self._lines_ids = None
self._next_attack_time = None
self._attack_period = None
self._rho_normalization = None
# this is the constructor:
# it should have the exact same signature as here
def init(
self,
partial_env,
lines_attacked=[],
rho_normalization=[],
attack_period=12 * 24,
**kwargs,
):
"""
Generic function used to initialize the derived classes. For example, if an opponent reads from a file, the
path where is the file is located should be pass with this method.
Parameters
----------
lines_attacked: ``list``
The list of lines that the WeightedRandomOpponent should be able to disconnect
rho_normalization: ``list``
The list of mean usage rates for the attackable lines. Should have
the same length as lines_attacked. If no value is given, no normalization will be performed.
The weights for sampling the attacked line are rho / rho_normalization.
attack_period: ``int``
The number of steps among which the attack may happen.
If attack_period=10, then whenever an attack can be made, it will happen in the 10
next steps.
"""
if len(lines_attacked) == 0:
warnings.warn(
f"The opponent is deactivated as there is no information as to which line to attack. "
f'You can set the argument "kwargs_opponent" to the list of the line names you want '
f' the opponent to attack in the "make" function.'
)
# Store attackable lines IDs
self._lines_ids = []
for l_name in lines_attacked:
l_id = np.where(self.action_space.name_line == l_name)
if len(l_id) and len(l_id[0]):
self._lines_ids.append(l_id[0][0])
else:
raise OpponentError(
'Unable to find the powerline named "{}" on the grid. For '
"information, powerlines on the grid are : {}"
"".format(l_name, sorted(self.action_space.name_line))
)
# Pre-build attacks actions
self._do_nothing = self.action_space({})
self._attacks = []
for l_id in self._lines_ids:
a = self.action_space({"set_line_status": [(l_id, -1)]})
self._attacks.append(a)
self._attacks = np.array(self._attacks)
# Usage rates normalization
self._rho_normalization = np.ones_like(lines_attacked)
if len(rho_normalization) == 0:
warnings.warn(
"The usage rate normalization is not specified. No normalization will be performed."
)
elif len(rho_normalization) != len(lines_attacked):
raise Warning(
f"The usage rate normalization must have the same length as the number "
f"of attacked lines. No normalization will be performed."
)
else:
self._rho_normalization = np.array(rho_normalization)
# Opponent's attack period
self._attack_period = attack_period
if self._attack_period <= 0:
raise OpponentError("Opponent attack cooldown need to be > 0")
def reset(self, initial_budget):
self._next_attack_time = None
def tell_attack_continues(self, observation, agent_action, env_action, budget):
self._next_attack_time = None
def attack(self, observation, agent_action, env_action, budget, previous_fails):
"""
This method is the equivalent of "attack" for a regular agent.
Opponent, in this framework can have more information than a regular agent (in particular it can
view time step t+1), it has access to its current budget etc.
Parameters
----------
observation: :class:`grid2op.Observation.Observation`
The last observation (at time t)
opp_reward: ``float``
THe opponent "reward" (equivalent to the agent reward, but for the opponent) TODO do i add it back ???
done: ``bool``
Whether the game ended or not TODO do i add it back ???
agent_action: :class:`grid2op.Action.Action`
The action that the agent took
env_action: :class:`grid2op.Action.Action`
The modification that the environment will take.
budget: ``float``
The current remaining budget (if an action is above this budget, it will be replaced by a do nothing.
previous_fails: ``bool``
Wheter the previous attack failed (due to budget or ambiguous action)
Returns
-------
attack: :class:`grid2op.Action.Action`
The attack performed by the opponent. In this case, a do nothing, all the time.
duration: ``int``
The duration of the attack
"""
# TODO maybe have a class "GymOpponent" where the observation would include the budget and all other
# TODO information, and forward something to the "act" method.
# During creation of the environment, do not attack
if observation is None:
return None, 0
# Decide the time of the next attack
if self._next_attack_time is None:
self._next_attack_time = 1 + self.space_prng.randint(self._attack_period)
self._next_attack_time -= 1
# If the attack time has not come yet, do not attack
if self._next_attack_time > 0:
return None, 0
# If all attackable lines are disconnected, do not attack
status = observation.line_status[self._lines_ids]
if not np.sum(status):
return None, 0
available_attacks = self._attacks[status]
rho = observation.rho[self._lines_ids][status] / self._rho_normalization[status]
rho_sum = rho.sum()
if rho_sum <= 0.0:
# this case can happen if a powerline has a flow of 0.0 but is connected, and it's the only one
# that can be attacked... Pretty rare hey !
return None, 0
attack = self.space_prng.choice(available_attacks, p=rho / rho_sum)
return attack, None
def _custom_deepcopy_for_copy(self, new_obj, dict_=None):
super()._custom_deepcopy_for_copy(new_obj, dict_)
if dict_ is None:
dict_ = {}
new_obj._attacks = copy.deepcopy(self._attacks)
new_obj._lines_ids = copy.deepcopy(self._lines_ids)
new_obj._next_attack_time = copy.deepcopy(self._next_attack_time)
new_obj._attack_period = copy.deepcopy(self._attack_period)
new_obj._rho_normalization = copy.deepcopy(self._rho_normalization)
| 7,974 | 39.277778 | 115 | py |
Grid2Op | Grid2Op-master/grid2op/Opponent/__init__.py | __all__ = [
"OpponentSpace",
"BaseActionBudget",
"BaseOpponent",
"UnlimitedBudget",
"RandomLineOpponent",
"WeightedRandomOpponent",
"NeverAttackBudget",
"GeometricOpponent",
"GeometricOpponentMultiArea"
]
from grid2op.Opponent.OpponentSpace import OpponentSpace
from grid2op.Opponent.BaseActionBudget import BaseActionBudget
from grid2op.Opponent.BaseOpponent import BaseOpponent
from grid2op.Opponent.UnlimitedBudget import UnlimitedBudget
from grid2op.Opponent.RandomLineOpponent import RandomLineOpponent
from grid2op.Opponent.WeightedRandomOpponent import WeightedRandomOpponent
from grid2op.Opponent.NeverAttackBudget import NeverAttackBudget
from grid2op.Opponent.GeometricOpponent import GeometricOpponent
from grid2op.Opponent.geometricOpponentMultiArea import GeometricOpponentMultiArea
| 833 | 36.909091 | 82 | py |
Grid2Op | Grid2Op-master/grid2op/Opponent/geometricOpponentMultiArea.py | #Copyright (c) 2019-2021, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from typing import Optional, List
import numpy as np
from grid2op.dtypes import dt_int
from grid2op.Opponent import BaseOpponent
from grid2op.Opponent import GeometricOpponent
from grid2op.Exceptions import OpponentError
class GeometricOpponentMultiArea(BaseOpponent):
"""
This opponent is a combination of several similar opponents (of Kind Geometric Opponent at this stage) attacking on different areas.
The difference between unitary opponents is mainly the attackable lines (which belongs to different pre-identified areas
"""
def __init__(self, action_space):
BaseOpponent.__init__(self, action_space)
self.list_opponents : Optional[List[GeometricOpponent]] = None
self._new_attack_time_counters : Optional[np.ndarray] = None
self._previous_attacks = None
def init(
self,
partial_env,
lines_attacked=None,
attack_every_xxx_hour=24,
average_attack_duration_hour=4,
minimum_attack_duration_hour=2,
pmax_pmin_ratio=4,
**kwargs,
):
"""
Generic function used to initialize the derived classes. For example, if an opponent reads from a file, the
path where is the file is located should be pass with this method.
This is based on init from GeometricOpponent, only parameter lines_attacked becomes a list of list
Parameters
----------
partial_env: grid2op Environment
see the GeometricOpponent::init documentation
lines_attacked: ``list(list)``
The lists of lines attacked by each unitary opponent (this is a list of list: the size
of the outer list is the number of underlying opponent / number of areas and for each inner
list it gives the name of the lines to attack.)
attack_every_xxx_hour: ``float``
see the GeometricOpponent::init documentation
average_attack_duration_hour: ``float``
see the GeometricOpponent::init documentation
minimum_attack_duration_hour: ``int``
see the GeometricOpponent::init documentation
pmax_pmin_ratio: ``float``
see the GeometricOpponent::init documentation
"""
if lines_attacked is None:
partial_env.logger.warning("GeometricOpponentMultiArea: no area provided, the opponent will be deactivated.")
return
self.list_opponents = [GeometricOpponent(action_space=self.action_space) for _ in lines_attacked]
self._previous_attacks = [None for _ in lines_attacked]
for lines_attacked, opp in zip(lines_attacked, self.list_opponents):
opp.init(
partial_env=partial_env,
lines_attacked=lines_attacked,
attack_every_xxx_hour=attack_every_xxx_hour,
average_attack_duration_hour=average_attack_duration_hour,
minimum_attack_duration_hour=minimum_attack_duration_hour,
pmax_pmin_ratio=pmax_pmin_ratio,
**kwargs,
)
self._new_attack_time_counters = np.array([-1 for _ in lines_attacked])#ou plutôt 0 comme dans Geometric Opponent ?
def reset(self, initial_budget):
self._new_attack_time_counters = np.array([-1 for _ in self.list_opponents])
for opp in self.list_opponents: # maybe loop in different orders each time
opp.reset(initial_budget)
def attack(self, observation, agent_action, env_action, budget, previous_fails):
"""
This method is the equivalent of "attack" for a regular agent.
Opponent, in this framework can have more information than a regular agent (in particular it can
view time step t+1), it has access to its current budget etc.
Here we take the combination of unitary opponent attacks if they happen at the same time.
We choose the attack duration as the minimum duration of several simultaneous attacks if that happen.
Parameters
----------
observation: :class:`grid2op.Observation.Observation`
see the GeometricOpponent::attack documentation
opp_reward: ``float``
see the GeometricOpponent::attack documentation
done: ``bool``
see the GeometricOpponent::attack documentation
agent_action: :class:`grid2op.Action.Action`
see the GeometricOpponent::attack documentation
env_action: :class:`grid2op.Action.Action`
see the GeometricOpponent::attack documentation
budget: ``float``
see the GeometricOpponent::attack documentation
previous_fails: ``bool``
see the GeometricOpponent::attack documentation
Returns
-------
attack: :class:`grid2op.Action.Action`
see the GeometricOpponent::attack documentation
duration: ``int``
see the GeometricOpponent::attack documentation
"""
#go through opponents and check if attack or not. As soon as one attack, stop there
self._new_attack_time_counters -= 1
self._new_attack_time_counters[self._new_attack_time_counters < -1] = -1
attack_combined = None
for opp_id, opp in enumerate(self.list_opponents):
if self._new_attack_time_counters[opp_id] == -1:
attack_opp, attack_duration_opp = opp.attack(observation, agent_action, env_action, budget, previous_fails)
if attack_opp is not None:
self._new_attack_time_counters[opp_id] = attack_duration_opp
self._previous_attacks[opp_id] = attack_opp
if attack_combined is None:
attack_combined = attack_opp.copy()
else:
attack_combined += attack_opp
else:
self._previous_attacks[opp_id] = None
else:
opp.tell_attack_continues(observation, agent_action, env_action, budget)
if attack_combined is None:
attack_combined = self._previous_attacks[opp_id].copy()
else:
attack_combined += self._previous_attacks[opp_id]
return attack_combined, 1
def tell_attack_continues(self, observation, agent_action, env_action, budget):
raise RuntimeError("I should not get there !")
def get_state(self):
return (self._new_attack_time_counters,
self._previous_attacks,
[opp.get_state() for opp in self.list_opponents])
def set_state(self, my_state):
self._new_attack_time_counters = np.array(my_state[0])
self._previous_attacks = [el.copy() if el is not None else None for el in my_state[1]]
for el, opp in zip(my_state[2], self.list_opponents):
opp.set_state(el)
def _custom_deepcopy_for_copy(self, new_obj, dict_=None):
new_obj._new_attack_time_counters = 1 * self._new_attack_time_counters
new_obj._previous_attacks = [el.copy() if el is not None else None
for el in self._previous_attacks]
new_obj.list_opponents = []
for opp in self.list_opponents:
new_opp = type(opp).__new__(type(opp))
opp._custom_deepcopy_for_copy(new_opp, dict_)
new_obj.list_opponents.append(new_opp)
super()._custom_deepcopy_for_copy(new_obj)
return new_obj
def seed(self, seed):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
We do not recommend to use this function outside of the two examples given in the description of this class.
Set the seeds of the source of pseudo random number used for these several unitary opponents.
Parameters
----------
seed: ``int``
The root seed to be set for the random number generator.
Returns
-------
seeds: ``list``
The associated list of seeds used.
"""
seeds = []
super().seed(seed)
max_seed = np.iinfo(dt_int).max # 2**32 - 1
for opp in self.list_opponents:
this_seed = self.space_prng.randint(max_seed)
seeds.append(opp.seed(this_seed))
return (seed, seeds)
| 8,850 | 41.966019 | 136 | py |
Grid2Op | Grid2Op-master/grid2op/Plot/BasePlot.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
"""
This module is the base module for all graphical representation of the :class:`grid2op.BaseObservation.BaseObservation`.
It allows, from the layout of the graph of the powergrid (*eg* the coordinates of each substation) the position of
each objects (powerline ends, loads and generators) and the position of the buses in case of "node splitting" (when
a substations is split into independent electrical buses).
"""
import cmath
import math
import numpy as np
import warnings
import copy
from grid2op.Space import GridObjects
from grid2op.Exceptions import PlotError
class BasePlot(GridObjects):
"""
INTERNAL
.. warning:: /!\\\\ This module is deprecated /!\\\\
Prefer using the module `grid2op.PlotGrid
Utility class that allows to compute the position of the objects of the powergrid.
Deriving from this class allows to perform the display of the powergrid.
**NB** this class only performs the computation of the position, but does not display anything.
Attributes
-----------
observation_space: :class:`grid2op.Observation.HelperObservation`
The observation space used.
"""
def __init__(
self,
observation_space,
substation_layout=None,
radius_sub=20.0,
load_prod_dist=70.0,
bus_radius=6.0,
):
warnings.warn(
"This whole class has been deprecated. Use `grid2op.PlotGrid module instead`",
category=DeprecationWarning,
)
if substation_layout is None:
if observation_space.grid_layout is None:
# if no layout is provided, and observation_space has no layout, then it fails
raise PlotError(
"Impossible to use plotting abilities without specifying a layout (coordinates) "
"of the substations."
)
# if no layout is provided, use the one in the observation_space
substation_layout = []
for el in observation_space.name_sub:
substation_layout.append(observation_space.grid_layout[el])
if len(substation_layout) != observation_space.n_sub:
raise PlotError(
"You provided a layout with {} elements while there are {} substations on the powergrid. "
"Your layout is invalid".format(
len(substation_layout), observation_space.n_sub
)
)
GridObjects.__init__(self)
self.init_grid(observation_space)
self.observation_space = observation_space
self._layout = {}
self._layout["substations"] = self._get_sub_layout(substation_layout)
self.radius_sub = radius_sub
self.load_prod_dist = load_prod_dist # distance between load and generator to the center of the substation
self.bus_radius = bus_radius
self.subs_elements = [None for _ in self.observation_space.sub_info]
# get the element in each substation
for sub_id in range(self.observation_space.sub_info.shape[0]):
this_sub = {}
objs = self.observation_space.get_obj_connect_to(substation_id=sub_id)
for c_id in objs["loads_id"]:
c_nm = self._get_load_name(sub_id, c_id)
this_load = {}
this_load["type"] = "load"
this_load["sub_pos"] = self.observation_space.load_to_sub_pos[c_id]
this_sub[c_nm] = this_load
for g_id in objs["generators_id"]:
g_nm = self._get_gen_name(sub_id, g_id)
this_gen = {}
this_gen["type"] = "gen"
this_gen["sub_pos"] = self.observation_space.gen_to_sub_pos[g_id]
this_sub[g_nm] = this_gen
for lor_id in objs["lines_or_id"]:
ext_id = self.observation_space.line_ex_to_subid[lor_id]
l_nm = self._get_line_name(sub_id, ext_id, lor_id)
this_line = {}
this_line["type"] = "line"
this_line["sub_pos"] = self.observation_space.line_or_to_sub_pos[lor_id]
this_sub[l_nm] = this_line
for lex_id in objs["lines_ex_id"]:
or_id = self.observation_space.line_or_to_subid[lex_id]
l_nm = self._get_line_name(or_id, sub_id, lex_id)
this_line = {}
this_line["type"] = "line"
this_line["sub_pos"] = self.observation_space.line_ex_to_sub_pos[lex_id]
this_sub[l_nm] = this_line
self.subs_elements[sub_id] = this_sub
self._compute_layout()
self.col_line = None
self.col_sub = None
self.col_load = None
self.col_gen = None
self.default_color = None
def plot_layout(self, fig=None, reward=None, done=None, timestamp=None):
"""
.. warning:: /!\\\\ This module is deprecated /!\\\\
Prefer using the module `grid2op.PlotGrid
This function plot the layout of the grid, as well as the object. You will see the name of each elements and
their id.
"""
fig = self.init_fig(fig, reward, done, timestamp)
# draw powerline
lines = self._draw_powerlines(fig)
# draw substation
subs = self._draw_subs(fig)
# draw loads
loads = self._draw_loads(fig)
# draw gens
gens = self._draw_gens(fig)
self._post_process_obs(
fig,
reward=None,
done=None,
timestamp=None,
subs=subs,
lines=lines,
loads=loads,
gens=gens,
topos=[],
)
return fig
def plot_info(
self,
fig=None,
line_info=None,
load_info=None,
gen_info=None,
sub_info=None,
colormap=None,
unit=None,
):
"""
.. warning:: /!\\\\ This module is deprecated /!\\\\
Prefer using the module `grid2op.PlotGrid
Plot some information on the powergrid. For now, only numeric data are supported.
Parameters
----------
line_info: ``list``
information to be displayed in the powerlines, in place of their name and id (for example their
thermal limit) [must have the same size as the number of powerlines and convertible to float]
load_info: ``list``
information to display in the generators, in place of their name and id
[must have the same size as the number of loads and convertible to float]
gen_info: ``list``
information to display in the generators, in place of their name and id (for example their pmax)
[must have the same size as the number of generators and convertible to float]
sub_info: ``list``
information to display in the substation, in place of their name and id (for example the number of
different topologies possible at this substation) [must have the same size as the number of substations,
and convertible to float]
colormap: ``str``
If not None, one of "line", "load", "gen" or "sub". If None, default colors will be used for each
elements (default color is the coloring of
If not None, all elements will be black, and the selected element will be highlighted.
fig: ``matplotlib figure``
The figure on which to draw. It is created by the method if ``None``.
unit: ``str``, optional
The unit in which the data are provided. For example, if you provide in `line_info` some data in mega-watt
(MW) you can add `unit="MW"` to have the unit display on the screen.
"""
fig = self.init_fig(fig, reward=None, done=None, timestamp=None)
# draw powerline
unit_line = None
if line_info is not None:
unit_line = unit
if len(line_info) != self.n_line:
raise PlotError(
"Impossible to display these information on the powerlines: there are {} elements"
"provided while {} powerlines on this grid".format(
len(line_info), self.n_line
)
)
line_info = np.array(line_info).astype(np.float)
line_info = [line_info, line_info, line_info]
lines = self._draw_powerlines(
fig, vals=line_info, colormap=colormap, unit=unit_line
)
# draw substation
unit_sub = None
if sub_info is not None:
unit_sub = unit
if len(sub_info) != self.n_sub:
raise PlotError(
"Impossible to display these information on the substations: there are {} elements"
"provided while {} substations on this grid".format(
len(sub_info), self.n_sub
)
)
sub_info = np.array(sub_info).astype(np.float)
subs = self._draw_subs(fig, vals=sub_info, colormap=colormap, unit=unit_sub)
# draw loads
unit_load = None
if load_info is not None:
unit_load = unit
if len(load_info) != self.n_load:
raise PlotError(
"Impossible to display these information on the loads: there are {} elements"
"provided while {} loads on this grid".format(
len(load_info), self.n_load
)
)
load_info = np.array(load_info).astype(np.float)
loads = self._draw_loads(fig, vals=load_info, colormap=colormap, unit=unit_load)
# draw gens
unit_gen = None
if gen_info is not None:
unit_gen = unit
if len(gen_info) != self.n_gen:
raise PlotError(
"Impossible to display these information on the generators: there are {} elements"
"provided while {} generators on this grid".format(
len(gen_info), self.n_gen
)
)
gen_info = np.array(gen_info).astype(np.float)
gens = self._draw_gens(fig, vals=gen_info, colormap=colormap, unit=unit_gen)
self._post_process_obs(
fig,
reward=None,
done=None,
timestamp=None,
subs=subs,
lines=lines,
loads=loads,
gens=gens,
topos=[],
)
return fig
def plot_obs(
self,
observation,
fig=None,
reward=None,
done=None,
timestamp=None,
line_info="rho",
load_info="p",
gen_info="p",
colormap="line",
):
"""
.. warning:: /!\\\\ This module is deprecated /!\\\\
Prefer using the module `grid2op.PlotGrid
Plot the given observation in the given figure.
For now it represents information about load and generator active values.
It also display dashed powerlines when they are disconnected and the color of each powerlines depends on
its relative flow (its flow in amperes divided by its maximum capacity).
If a substation counts only 1 bus, nothing specific is display. If it counts more, then buses are materialized
by colored dot and lines will connect every object to its appropriate bus (with the proper color).
Names of substation and objects are NOT displayed on this figure to lower the amount of information.
Parameters
----------
observation: :class:`grid2op.Observation.Observation`
The observation to plot
fig: :class:`plotly.graph_objects.Figure`
The figure on which to plot the observation. Possibly ``None``, in this case a new figure is made.
line_info: ``str``
One of "rho", "a", or "p" or "v" the information that will be plotted on the powerline By default "rho".
All flow are taken "origin" side.
load_info: ``str``
One of "p" or "v" the information displayed on the load (défault to "p").
gen_info: ``str``
One of "p" or "v" the information displayed on the generators (default to "p").
Returns
-------
res: :class:`plotly.graph_objects.Figure`
The figure updated with the data from the new observation.
"""
fig = self.init_fig(fig, reward, done, timestamp)
# draw substation
subs = self._draw_subs(fig=fig, vals=[None for el in range(self.n_sub)])
# draw powerlines
if line_info == "rho":
line_vals = [observation.rho]
line_units = "%"
elif line_info == "a":
line_vals = [observation.a_or]
line_units = "A"
elif line_info == "p":
line_vals = [observation.p_or]
line_units = "MW"
elif line_info == "v":
line_vals = [observation.v_or]
line_units = "kV"
else:
raise PlotError(
'Impossible to plot value "{}" for line. Possible values are "rho", "p", "v" and "a".'
)
line_vals.append(observation.line_status)
line_vals.append(observation.p_or)
lines = self._draw_powerlines(
fig, vals=line_vals, unit=line_units, colormap=colormap
)
# draw the loads
if load_info == "p":
loads_vals = -observation.load_p
load_units = "MW"
elif load_info == "v":
loads_vals = observation.load_v
load_units = "kV"
else:
raise PlotError(
'Impossible to plot value "{}" for load. Possible values are "p" and "v".'
)
loads = self._draw_loads(
fig, vals=loads_vals, unit=load_units, colormap=colormap
)
# draw the generators
if gen_info == "p":
gen_vals = observation.prod_p
gen_units = "MW"
elif gen_info == "v":
gen_vals = observation.prod_v
gen_units = "kV"
else:
raise PlotError(
'Impossible to plot value "{}" for generators. Possible values are "p" and "v".'
)
gens = self._draw_gens(fig, vals=gen_vals, unit=gen_units, colormap=colormap)
# draw the topologies
topos = self._draw_topos(fig=fig, observation=observation)
self._post_process_obs(
fig, reward, done, timestamp, subs, lines, loads, gens, topos
)
return fig
def _get_sub_layout(self, init_layout):
return init_layout
def _get_line_name(self, subor_id, sub_ex_id, line_id):
l_nm = "l_{}_{}_{}".format(subor_id, sub_ex_id, line_id)
return l_nm
def _get_load_name(self, sub_id, c_id):
c_nm = "load_{}_{}".format(sub_id, c_id)
return c_nm
def _get_gen_name(self, sub_id, g_id):
p_nm = "gen_{}_{}".format(sub_id, g_id)
return p_nm
def _compute_layout(self):
"""
.. warning:: /!\\\\ This module is deprecated /!\\\\
Prefer using the module `grid2op.PlotGrid
Compute the position of each of the objects.
Parameters
----------
observation: :class:`grid2op.Observation.Observation`
The observation used to know which object belong where.
Returns
-------
"""
self._layout["line"] = {}
# assign powerline coordinates
for line_id in range(self.n_line):
if line_id not in self._layout["line"]:
# state = observation.state_of(line_id=line_id)
sub_or_id = self.line_or_to_subid[line_id] # state["origin"]["sub_id"]
sub_ex_id = self.line_ex_to_subid[
line_id
] # state["extremity"]["sub_id"]
pos_or = self._layout["substations"][sub_or_id]
pos_ex = self._layout["substations"][sub_ex_id]
# make sure the powerline are connected to the circle of the substation and not to the center of it
z_or_tmp = pos_or[0] + 1j * pos_or[1]
z_ex_tmp = pos_ex[0] + 1j * pos_ex[1]
module_or = cmath.phase(z_ex_tmp - z_or_tmp)
module_ex = cmath.phase(-(z_ex_tmp - z_or_tmp))
# check parrallel lines:
# for now it works only if there are 2 parrallel lines. The idea is to add / withdraw
# 10° for each module in this case.
# TODO draw line but not straight line in this case, this looks ugly for now :-/
deg_parrallel = 25
tmp_parrallel = self.observation_space.get_lines_id(
from_=sub_or_id, to_=sub_ex_id
)
if len(tmp_parrallel) > 1:
if line_id == tmp_parrallel[0]:
module_or += deg_parrallel / 360 * 2 * cmath.pi
module_ex -= deg_parrallel / 360 * 2 * cmath.pi
else:
module_or -= deg_parrallel / 360 * 2 * cmath.pi
module_ex += deg_parrallel / 360 * 2 * cmath.pi
z_or = z_or_tmp + self.radius_sub * cmath.exp(module_or * 1j)
z_ex = z_ex_tmp + self.radius_sub * cmath.exp(module_ex * 1j)
pos_or = z_or.real, z_or.imag
pos_ex = z_ex.real, z_ex.imag
self._layout["line"][line_id] = sub_or_id, sub_ex_id
# TODO here get proper name
l_nm = self._get_line_name(sub_or_id, sub_ex_id, line_id)
self.subs_elements[sub_or_id][l_nm]["pos"] = pos_or
self.subs_elements[sub_or_id][l_nm]["z"] = z_or
self.subs_elements[sub_ex_id][l_nm]["pos"] = pos_ex
self.subs_elements[sub_ex_id][l_nm]["z"] = z_ex
# assign loads and generators coordinates
# this is done by first computing the "optimal" placement if there were only substation (so splitting equally
# the objects around the circle) and then remove the closest position that are taken by the powerlines.
for sub_id, elements in enumerate(self.subs_elements):
nb_el = len(elements)
# equally split
pos_sub = self._layout["substations"][sub_id]
z_sub = pos_sub[0] + 1j * pos_sub[1]
pos_possible = [
self.radius_sub * cmath.exp(1j * 2 * cmath.pi * i / nb_el) + z_sub
for i in range(nb_el)
]
# remove powerlines (already assigned)
for el_nm, dict_el in elements.items():
if dict_el["type"] == "line":
z = dict_el["z"]
closest = np.argmin([abs(pos - z) ** 2 for pos in pos_possible])
pos_possible = [
el for i, el in enumerate(pos_possible) if i != closest
]
i = 0
# now assign load and generator
for el_nm, dict_el in elements.items():
if dict_el["type"] != "line":
dict_el["pos"] = (pos_possible[i].real, pos_possible[i].imag)
dict_el["z"] = pos_possible[i]
i += 1
self._layout["load"] = {}
for c_id in range(self.n_load):
# state = observation.state_of(load_id=c_id)
# sub_id = state["sub_id"]
sub_id = self.load_to_subid[c_id]
self._layout["load"][c_id] = sub_id
self._layout["gen"] = {}
for g_id in range(self.n_gen):
# state = observation.state_of(gen_id=g_id)
# sub_id = state["sub_id"]
sub_id = self.gen_to_subid[g_id]
self._layout["gen"][g_id] = sub_id
def _get_line_coord(self, line_id):
sub_or_id, sub_ex_id = self._layout["line"][line_id]
l_nm = self._get_line_name(sub_or_id, sub_ex_id, line_id)
pos_or = self.subs_elements[sub_or_id][l_nm]["pos"]
pos_ex = self.subs_elements[sub_ex_id][l_nm]["pos"]
return pos_or, pos_ex
def _get_load_coord(self, load_id):
sub_id = self._layout["load"][load_id]
c_nm = self._get_load_name(sub_id, load_id)
if not "elements_display" in self.subs_elements[sub_id][c_nm]:
pos_load_sub = self.subs_elements[sub_id][c_nm]["pos"]
pos_center_sub = self._layout["substations"][sub_id]
z_sub = pos_center_sub[0] + 1j * pos_center_sub[1]
theta = cmath.phase((self.subs_elements[sub_id][c_nm]["z"] - z_sub))
pos_load = z_sub + cmath.exp(1j * theta) * self.load_prod_dist
# position of the end of the line connecting the object to the substation
pos_end_line = pos_load - cmath.exp(1j * theta) * 20
how_center = self._get_position(theta)
tmp_dict = {
"pos_end_line": pos_end_line,
"pos_load_sub": pos_load_sub,
"pos_load": pos_load,
"how_center": how_center,
}
self.subs_elements[sub_id][c_nm]["elements_display"] = tmp_dict
else:
dict_element = self.subs_elements[sub_id][c_nm]["elements_display"]
pos_end_line = dict_element["pos_end_line"]
pos_load_sub = dict_element["pos_load_sub"]
pos_load = dict_element["pos_load"]
how_center = dict_element["how_center"]
return pos_end_line, pos_load_sub, pos_load, how_center
def _get_gen_coord(self, gen_id):
sub_id = self._layout["gen"][gen_id]
c_nm = self._get_gen_name(sub_id, gen_id)
if not "elements_display" in self.subs_elements[sub_id][c_nm]:
pos_gen_sub = self.subs_elements[sub_id][c_nm]["pos"]
pos_center_sub = self._layout["substations"][sub_id]
z_sub = pos_center_sub[0] + 1j * pos_center_sub[1]
theta = cmath.phase((self.subs_elements[sub_id][c_nm]["z"] - z_sub))
pos_gen = z_sub + cmath.exp(1j * theta) * self.load_prod_dist
# position of the end of the line connecting the object to the substation
pos_end_line = pos_gen - cmath.exp(1j * theta) * 20
how_center = self._get_position(theta)
tmp_dict = {
"pos_end_line": pos_end_line,
"pos_gen_sub": pos_gen_sub,
"pos_gen": pos_gen,
"how_center": how_center,
}
self.subs_elements[sub_id][c_nm]["elements_display"] = tmp_dict
else:
dict_element = self.subs_elements[sub_id][c_nm]["elements_display"]
pos_end_line = dict_element["pos_end_line"]
pos_gen_sub = dict_element["pos_gen_sub"]
pos_gen = dict_element["pos_gen"]
how_center = dict_element["how_center"]
return pos_end_line, pos_gen_sub, pos_gen, how_center
def _get_topo_coord(self, sub_id, observation, elements):
pos_center_sub = self._layout["substations"][sub_id]
z_sub = pos_center_sub[0] + 1j * pos_center_sub[1]
tmp = observation.state_of(substation_id=sub_id)
if tmp["nb_bus"] == 1:
# not to overload the plot, if everything is at the same bus, i don't plot it
return [], []
# I have at least 2 buses
# I compute the position of each elements
bus_vect = tmp["topo_vect"]
# i am not supposed to have more than 2 buses
buses_z = [None, None] # center of the different buses
nb_co = [0, 0] # center of the different buses
# the position of a bus is for now the average of all the elements in there
for el_nm, dict_el in elements.items():
this_el_bus = bus_vect[dict_el["sub_pos"]] - 1
if this_el_bus >= 0:
nb_co[this_el_bus] += 1
if buses_z[this_el_bus] is None:
buses_z[this_el_bus] = dict_el["z"]
else:
buses_z[this_el_bus] += dict_el["z"]
#
buses_z = [el / nb for el, nb in zip(buses_z, nb_co)]
theta_z = [cmath.phase((el - z_sub)) for el in buses_z]
# try to have nodes "in opposition" to one another
NN = np.array(nb_co) / np.sum(nb_co)
diff_theta = theta_z[0] - theta_z[1]
# alpha = cmath.pi + diff_theta
alpha = -cmath.pi + diff_theta
alpha = math.fmod(alpha, 2 * cmath.pi)
theta_z = [theta_z[0] - alpha * NN[1], theta_z[1] + alpha * NN[0]]
# buses_z = [z_sub + (self.radius_sub - self.bus_radius) * 0.75 * cmath.exp(1j * theta) for theta in theta_z]
buses_z = [
z_sub + (self.radius_sub - self.bus_radius) * 0.6 * cmath.exp(1j * theta)
for theta in theta_z
]
return buses_z, bus_vect
@staticmethod
def _get_position(theta):
quarter_pi = cmath.pi / 4
half_pi = cmath.pi / 2.0
if theta >= -quarter_pi and theta < quarter_pi:
res = "center|left"
elif theta >= quarter_pi and theta < quarter_pi + half_pi:
res = "up|center"
elif theta >= quarter_pi + half_pi and theta < quarter_pi + 2.0 * half_pi:
res = "center|right"
else:
res = "down|center"
return res
def _get_text_unit(self, number, unit):
if number is not None:
if isinstance(number, float) or isinstance(number, np.float):
if np.isfinite(number):
if unit == "%":
number *= 100.0
number = "{:.1f}".format(number)
else:
return None
if unit is not None:
txt_ = "{}{}".format(number, unit)
else:
txt_ = number
else:
return None
return txt_
def _draw_subs(self, fig=None, vals=None, colormap=None, unit=None):
subs = []
colormap_ = lambda x: self.col_sub
texts = None
if vals is not None:
texts = [self._get_text_unit(val, unit) for val in vals]
if colormap is not None:
if colormap == "sub":
# normalize value for the color map
vals = self._get_vals(vals)
if texts is not None:
vals = [float(text if text is not None else 0.0) for text in texts]
for sub_id, center in enumerate(self._layout["substations"]):
if texts is None:
txt_ = "{}\nid: {}".format(self.name_sub[sub_id], sub_id)
this_col = colormap_("")
else:
txt_ = texts[sub_id]
if colormap == "sub":
this_col = self._get_sub_color_map(vals[sub_id])
else:
this_col = self.default_color
subs.append(self._draw_subs_one_sub(fig, sub_id, center, this_col, txt_))
return subs
def get_sub_color_map(self):
return None
def _draw_subs_one_sub(self, fig, sub_id, center, this_col, text):
return None
def _draw_powerlines(self, fig=None, vals=None, colormap=None, unit=None):
lines = []
colormap_ = lambda x: self.col_line
texts = None
if vals is not None:
vals_0 = vals[0]
texts = [self._get_text_unit(val, unit) for val in vals[0]]
if colormap is not None:
if colormap == "line" and unit != "%":
# normalize the value for the color map
vals_0 = self._get_vals(vals[0])
for line_id in range(self.n_line):
pos_or, pos_ex, *_ = self._get_line_coord(line_id)
if texts is None:
txt_ = "{}\nid: {}".format(self.name_line[line_id], line_id)
this_col = colormap_("")
else:
txt_ = texts[line_id]
if colormap == "line":
this_col = self._get_line_color_map(vals_0[line_id])
else:
this_col = self.default_color
if vals is not None:
value = vals_0[line_id]
status = vals[1][line_id]
por = vals[2][line_id]
else:
value = 0.0
status = True
por = 1.0
if por is None:
por = 1.0
if status is None:
status = True
if not status:
this_col = self.default_color
lines.append(
self._draw_powerlines_one_powerline(
fig,
line_id,
pos_or,
pos_ex,
status,
value,
txt_,
por >= 0.0,
this_col,
)
)
return lines
def _draw_powerlines_one_powerline(
self, fig, l_id, pos_or, pos_ex, status, value, txt_, or_to_ex, this_col
):
return None
def _draw_loads(self, fig=None, vals=None, colormap=None, unit=None):
loads = []
colormap_ = lambda x: self.col_load
texts = None
if vals is not None:
texts = [self._get_text_unit(val, unit) for val in vals]
if colormap is not None:
if colormap == "load":
# normalized the value for the color map
vals = self._get_vals(vals)
for c_id in range(self.n_load):
pos_end_line, pos_load_sub, pos_load, how_center = self._get_load_coord(
c_id
)
if texts is None:
txt_ = "{}\nid: {}".format(self.name_load[c_id], c_id)
this_col = colormap_("")
else:
txt_ = texts[c_id]
if colormap == "load":
this_col = self._get_load_color_map(vals[c_id])
else:
this_col = self.default_color
loads.append(
self._draw_loads_one_load(
fig,
c_id,
pos_load,
txt_,
pos_end_line,
pos_load_sub,
how_center,
this_col,
)
)
return loads
def _draw_loads_one_load(
self,
fig,
l_id,
pos_load,
txt_,
pos_end_line,
pos_load_sub,
how_center,
this_col,
):
return None
def _get_sub_color_map(self, normalized_val):
return self._get_default_cmap(normalized_val)
def _get_load_color_map(self, normalized_val):
return self._get_default_cmap(normalized_val)
def _get_gen_color_map(self, normalized_val):
return self._get_default_cmap(normalized_val)
def _get_line_color_map(self, normalized_val):
return self._get_default_cmap(normalized_val)
def _get_default_cmap(self, normalized_val):
return self.default_color
def _get_vals(self, vals):
vals = copy.deepcopy(vals)
min_ = np.min(vals)
max_ = np.max(vals)
vals -= min_
vals /= max_ - min_ + 1e-5
# now vals is between 0 and 1, i push it toward 1 a bit to better see it
vals += 0.5
vals /= 1.5
return vals
def _draw_gens(self, fig=None, vals=None, colormap=None, unit=None):
gens = []
colormap_ = lambda x: self.col_gen
texts = None
if vals is not None:
texts = [self._get_text_unit(val, unit) for val in vals]
if colormap is not None:
if colormap == "gen":
# normalized the value for plot
vals = self._get_vals(vals)
for g_id in range(self.n_gen):
pos_end_line, pos_gen_sub, pos_gen, how_center = self._get_gen_coord(g_id)
if texts is None:
txt_ = "{}\nid: {}".format(self.name_gen[g_id], g_id)
this_col = colormap_("")
else:
txt_ = texts[g_id]
if colormap == "gen":
this_col = self._get_gen_color_map(vals[g_id])
else:
this_col = self.default_color
gens.append(
self._draw_gens_one_gen(
fig,
g_id,
pos_gen,
txt_,
pos_end_line,
pos_gen_sub,
how_center,
this_col,
)
)
return gens
def _draw_gens_one_gen(
self, fig, g_id, pos_gen, txt_, pos_end_line, pos_gen_sub, how_center, this_col
):
return None
def _draw_topos(self, observation, fig):
res_topo = []
for sub_id, elements in enumerate(self.subs_elements):
buses_z, bus_vect = self._get_topo_coord(sub_id, observation, elements)
if not buses_z:
# I don't plot details of substations with 1 bus for better quality
continue
res_topo += self._draw_topos_one_sub(
fig, sub_id, buses_z, elements, bus_vect
)
return res_topo
def _draw_topos_one_sub(self, fig, sub_id, buses_z, elements, bus_vect):
return [None]
def _post_process_obs(
self, fig, reward, done, timestamp, subs, lines, loads, gens, topos
):
pass
def init_fig(self, fig, reward, done, timestamp):
pass
## DEPRECATED FUNCTIONS
def plot_observation(
self, observation, fig=None, line_info="rho", load_info="p", gen_info="p"
):
"""
.. warning:: /!\\\\ This module is deprecated /!\\\\
Prefer using the module `grid2op.PlotGrid
Parameters
----------
observation: :class:`grid2op.Observation.Observation`
The observation to plot
fig: ``plotly figure``
will be created if ``None``
line_info: ``str``
One of "rho", "a", or "p" the information that will be plotted on the powerline By default "rho".
load_info: ``str``
One of "p" or "v" the information displayed on the load (défault to "p").
gen_info: ``str``
One of "p" or "v" the information displayed on the generators (default to "p").
Returns
-------
res: ``plotly figure``
The resulting figure.
"""
warnings.warn(
'"plot_observation" method will be deprecated in future version. '
'Please use "plot_obs" instead.',
category=PendingDeprecationWarning,
)
res = self.plot_obs(
observation,
fig=fig,
line_info=line_info,
load_info=load_info,
gen_info=gen_info,
)
return res
def get_plot_observation(
self, observation, fig=None, line_info="rho", load_info="p", gen_info="p"
):
"""
.. warning:: /!\\\\ This module is deprecated /!\\\\
Prefer using the module `grid2op.PlotGrid
Parameters
----------
observation: :class:`grid2op.Observation.Observation`
The observation to plot
fig: ``plotly figure``
will be created if ``None``
line_info: ``str``
One of "rho", "a", or "p" the information that will be plotted on the powerline By default "rho".
load_info: ``str``
One of "p" or "v" the information displayed on the load (défault to "p").
gen_info: ``str``
One of "p" or "v" the information displayed on the generators (default to "p").
Returns
-------
res: ``plotly figure``
The resulting figure.
"""
warnings.warn(
'"get_plot_observation" method will be deprecated in future version. '
'Please use "plot_obs" instead.',
category=PendingDeprecationWarning,
)
res = self.plot_obs(
observation,
fig=fig,
line_info=line_info,
load_info=load_info,
gen_info=gen_info,
)
return res
| 37,103 | 35.519685 | 120 | py |
Grid2Op | Grid2Op-master/grid2op/Plot/EpisodeReplay.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import os
import sys
import numpy as np
from datetime import datetime
import warnings
from grid2op.Episode import EpisodeData
from grid2op.Exceptions import Grid2OpException
from grid2op.Plot.PlotPyGame import PlotPyGame
from grid2op.Exceptions.PlotExceptions import PyGameQuit
try:
os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "hide"
import pygame
can_plot = True
except Exception as e:
can_plot = False
pass
try:
# from array2gif import write_gif
import imageio
import imageio_ffmpeg
can_save_gif = True
except:
can_save_gif = False
class EpisodeReplay(object):
"""
INTERNAL
.. warning:: /!\\\\ This class is deprecated /!\\\\
Prefer using the class `grid2op.Episode.EpisodeReplay`
This class allows to see visually what an agent has done during an episode. It uses for now the "PlotPygame" as the
method to plot the different states of the system. It reads directly data from the runner.
It can be used the following manner.
.. code-block:: python
import grid2op
agent_class = grid2op.Agent.DoNothingAgent # change that for studying other agent
env = grid2op.make() # make the default environment
runner = grid2op.Runner.Runner(**env.get_params_for_runner(), agentClass=agent_class)
path_log = "agent_log" # where the runner will output the standardized data when running the agent.
res = runner.run(nb_episode=1, path_save=path_log)
# and when it's done, you can visualize it this way:
episode_replay = EpisodeReplay(agent_path=path_log)
episode_id = res[0][1]
episode_replay.plot_episode(episode_id, max_fps=10)
# you can pause by clicking the "space" key
# At any time, you can quit by pressing the "esc" key or the "exit" button of the window.
Attributes
----------
agent_path: ``str``
The path were the log of the agent are stored. It is recommended to use a :class:`grid2op.Runner.Runner`
to save tha log of the agent.
episode_data: :class:`grid2op.EpisodeData.EpisodeData`, optional
The last data of the episode inspected.
"""
def __init__(self, agent_path):
warnings.warn(
"This whole class has been deprecated. Use `grid2op.PlotGrid module instead`",
category=DeprecationWarning,
)
if not os.path.exists(agent_path):
raise Grid2OpException(
'Nothing is found at "{}" where an agent path should have been.'.format(
agent_path
)
)
self.agent_path = agent_path
self.episode_data = None
if not can_save_gif:
warnings.warn(
'The final video will not be saved as "imageio" and "imageio_ffmpeg" packages cannot be '
'imported. Please try "{} -m pip install imageio imageio-ffmpeg"'.format(
sys.executable
)
)
def replay_episode(self, episode_id, max_fps=10, video_name=None, display=True):
"""
.. warning:: /!\\\\ This class is deprecated /!\\\\
Prefer using the class `grid2op.Episode.EpisodeReplay`
When called, this function will start the display of the episode in a "mini movie" format.
Parameters
----------
episode_id: ``str``
ID of the episode to replay
max_fps: ``int``
Maximum "frame per second". When it's low, you will have more time to look at each frame, but the episode
will last longer. When it's high, episode will be faster, but frames will stay less time on the screen.
video_name: ``str``
In beta mode for now. This allows to save the "video" of the episode in a gif or a mp4 for example.
Returns
-------
"""
path_ep = os.path.join(self.agent_path, episode_id)
if not os.path.exists(path_ep):
raise Grid2OpException(
'No episode is found at "{}" where the episode should have been.'.format(
path_ep
)
)
if video_name is None:
if not can_save_gif:
raise Grid2OpException(
'The final video cannot be saved as "imageio" and "imageio_ffmpeg" '
"packages cannot be imported. Please try "
'"{} -m pip install imageio imageio-ffmpeg"'.format(sys.executable)
)
self.episode_data = EpisodeData.from_disk(
agent_path=self.agent_path, name=episode_id
)
plot_runner = PlotPyGame(
self.episode_data.observation_space, timestep_duration_seconds=1.0 / max_fps
)
nb_timestep_played = int(self.episode_data.meta["nb_timestep_played"])
all_obs = [el for el in self.episode_data.observations]
all_reward = [el for el in self.episode_data.rewards]
if video_name is not None:
total_array = np.zeros(
(
nb_timestep_played + 1,
plot_runner.video_width,
plot_runner.video_height,
3,
),
dtype=np.uint8,
)
if display is False:
plot_runner.deactivate_display()
for i, (obs, reward) in enumerate(zip(all_obs, all_reward)):
timestamp = datetime(
year=obs.year,
month=obs.month,
day=obs.day,
hour=obs.hour_of_day,
minute=obs.minute_of_hour,
)
try:
plot_runner.plot_obs(
observation=obs,
reward=reward,
timestamp=timestamp,
done=i == nb_timestep_played - 1,
)
array_ = pygame.surfarray.array3d(plot_runner.screen)
if video_name is not None:
total_array[i, :, :, :] = array_.astype(np.uint8)
except PyGameQuit:
break
if video_name is not None:
imageio.mimwrite(video_name, np.swapaxes(total_array, 1, 2), fps=max_fps)
plot_runner.close()
| 6,746 | 35.080214 | 119 | py |
Grid2Op | Grid2Op-master/grid2op/Plot/PlotMatplotlib.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
"""
TODO
.. code-block:: python
# make the relevant import
from grid2op.MakeEnv import make
from grid2op.PlotPlotly import PlotObs
# create a simple toy environment
environment = make("case5_example")
# set up the plot utility
graph_layout = [(0,0), (0,400), (200,400), (400, 400), (400, 0)]
plot_helper = PlotObs(substation_layout=graph_layout,
observation_space=environment.observation_space)
# perform a step from this environment:
do_nothing = environment.action_space({})
environment.step(act)
# do the actual plot
fig = plot_helper.get_plot_observation(environment.get_obs())
fig.show()
"""
import warnings
from grid2op.Exceptions import PlotError
from grid2op.Plot.BasePlot import BasePlot
try:
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
can_plot = True
except Exception as e:
can_plot = False
pass
# TODO add tests there
class PlotMatplotlib(BasePlot):
"""
INTERNAL
.. warning:: /!\\\\ This class is deprecated /!\\\\
Prefer using the class `grid2op.PlotGrid.PlotMatplot`
This class aims at simplifying the representation of the grid using matplotlib graphical libraries.
It can be used to inspect position of elements, or to project some static data on this plot. It can be usefull
to have a look at the thermal limit or the maximum value produced by generators etc.
"""
def __init__(
self,
observation_space,
substation_layout=None,
radius_sub=25.0,
load_prod_dist=70.0,
bus_radius=4.0,
alpha_obj=0.3,
figsize=(15, 15),
):
BasePlot.__init__(
self,
substation_layout=substation_layout,
observation_space=observation_space,
radius_sub=radius_sub,
load_prod_dist=load_prod_dist,
bus_radius=bus_radius,
)
warnings.warn(
"This whole class has been deprecated. Use `grid2op.PlotGrid.PlotMatplot` instead`",
category=DeprecationWarning,
)
if not can_plot:
raise RuntimeError(
'Impossible to plot as matplotlib cannot be imported. Please install "matplotlib" '
' with "pip install --update matplotlib"'
)
self.alpha_obj = alpha_obj
self.col_line = "b"
self.col_sub = "r"
self.col_load = "k"
self.col_gen = "g"
self.figsize = figsize
self.default_color = "k"
self.my_cmap = plt.get_cmap("Reds")
self.accepted_figure_class = matplotlib.figure.Figure
self.accepted_figure_class_tuple = (
matplotlib.figure.Figure,
matplotlib.axes.Axes,
)
def init_fig(self, fig, reward, done, timestamp):
if fig is None:
fig, ax = plt.subplots(1, 1, figsize=self.figsize)
elif isinstance(fig, tuple):
if len(fig) != 2:
raise PlotError(
'PlotMatplotlib "fig" argument should be, if a tuple, a tuple containing a figure '
"and an axe, for example the results of `plt.subplots(1, 1)`. You provided "
"a tuple of length {}".format(len(fig))
)
fig, ax = fig
if not isinstance(fig, self.accepted_figure_class):
raise PlotError(
'PlotMatplotlib "fig" argument should be an object of type "{}" and not "{}".'
"".format(self.accepted_figure_class, type(fig))
)
if not isinstance(ax, self.accepted_figure_class_tuple[1]):
raise PlotError(
'PlotMatplotlib "fig" argument should be an object of type "{}" and not "{}".'
"".format(self.accepted_figure_class, type(ax))
)
elif isinstance(fig, self.accepted_figure_class):
ax = fig.gca()
else:
raise PlotError(
'PlotMatplotlib "fig" argument should be an object of type "{}" and not "{}".'
"".format(self.accepted_figure_class, type(fig))
)
return (fig, ax)
def post_process_layout(self, fig, subs, lines, loads, gens, topos):
legend_help = [
Line2D([0], [0], color=self.col_line, lw=4),
Line2D([0], [0], color=self.col_sub, lw=4),
Line2D([0], [0], color=self.col_load, lw=4),
Line2D([0], [0], color=self.col_gen, lw=4),
]
fig, ax = fig
ax.legend(legend_help, ["powerline", "substation", "load", "generator"])
def _getverticalalignment(self, how_center):
verticalalignment = "center"
if how_center.split("|")[0] == "up":
verticalalignment = "bottom"
elif how_center.split("|")[0] == "down":
verticalalignment = "top"
return verticalalignment
def _draw_loads_one_load(
self,
fig,
l_id,
pos_load,
txt_,
pos_end_line,
pos_load_sub,
how_center,
this_col,
):
fig, ax = fig
ax.plot(
[pos_load_sub[0], pos_load.real],
[pos_load_sub[1], pos_load.imag],
color=this_col,
alpha=self.alpha_obj,
)
if txt_ is not None:
verticalalignment = self._getverticalalignment(how_center)
ax.text(
pos_load.real,
pos_load.imag,
txt_,
color=this_col,
horizontalalignment=how_center.split("|")[1],
verticalalignment=verticalalignment,
)
def _draw_gens_one_gen(
self, fig, g_id, pos_gen, txt_, pos_end_line, pos_gen_sub, how_center, this_col
):
fig, ax = fig
pos_end_line_, pos_gen_sub_, pos_gen_, how_center_ = self._get_gen_coord(g_id)
ax.plot(
[pos_gen_sub_[0], pos_gen_.real],
[pos_gen_sub_[1], pos_gen_.imag],
color=this_col,
alpha=self.alpha_obj,
)
if txt_ is not None:
verticalalignment = self._getverticalalignment(how_center_)
ax.text(
pos_gen_.real,
pos_gen_.imag,
txt_,
color=this_col,
horizontalalignment=how_center_.split("|")[1],
verticalalignment=verticalalignment,
)
def _draw_powerlines_one_powerline(
self, fig, l_id, pos_or, pos_ex, status, value, txt_, or_to_ex, this_col
):
fig, ax = fig
ax.plot(
[pos_or[0], pos_ex[0]],
[pos_or[1], pos_ex[1]],
color=this_col,
alpha=self.alpha_obj,
linestyle="solid" if status else "dashed",
)
if txt_ is not None:
ax.text(
(pos_or[0] + pos_ex[0]) * 0.5,
(pos_or[1] + pos_ex[1]) * 0.5,
txt_,
color=this_col,
horizontalalignment="center",
verticalalignment="center",
)
def _draw_subs_one_sub(self, fig, sub_id, center, this_col, text):
fig, ax = fig
sub_circ = plt.Circle(
center, self.radius_sub, color=this_col, fill=False
) # , alpha=self.alpha_obj)
ax.add_artist(sub_circ)
if text is not None:
ax.text(
center[0],
center[1],
text,
color=this_col,
horizontalalignment="center",
verticalalignment="center",
)
def _get_default_cmap(self, normalized_val):
return self.my_cmap(normalized_val)
def _draw_topos_one_sub(self, fig, sub_id, buses_z, elements, bus_vect):
fig, ax = fig
res_sub = []
# I plot the buses
for bus_id, z_bus in enumerate(buses_z):
bus_color = "#ff7f0e" if bus_id == 0 else "#1f77b4"
bus_circ = plt.Circle(
(z_bus.real, z_bus.imag), self.bus_radius, color=bus_color, fill=True
)
ax.add_artist(bus_circ)
# i connect every element to the proper bus with the proper color
for el_nm, dict_el in elements.items():
this_el_bus = bus_vect[dict_el["sub_pos"]] - 1
if this_el_bus >= 0:
color = "#ff7f0e" if this_el_bus == 0 else "#1f77b4"
ax.plot(
[buses_z[this_el_bus].real, dict_el["z"].real],
[buses_z[this_el_bus].imag, dict_el["z"].imag],
color=color,
alpha=self.alpha_obj,
)
return []
def _draw_powerlines____________(self, ax, texts=None, colormap=None):
colormap_ = lambda x: self.col_line
vals = [0.0 for _ in range(self.n_line)]
if texts is not None:
vals = [float(text if text is not None else 0.0) for text in texts]
if colormap is not None:
colormap_ = lambda x: "k"
if colormap == "line":
colormap_ = plt.get_cmap("Reds")
vals = self._get_vals(vals)
for line_id in range(self.n_line):
if texts is None:
text = "{}\nid: {}".format(self.name_line[line_id], line_id)
this_col = colormap_("")
else:
text = texts[line_id]
this_col = colormap_(vals[line_id])
pos_or, pos_ex, *_ = self._get_line_coord(line_id)
| 10,122 | 32.856187 | 114 | py |
Grid2Op | Grid2Op-master/grid2op/Plot/PlotPlotly.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
"""
This module provide a simple class to represent an :class:`grid2op.BaseObservation.BaseObservation` as a plotly graph.
We are aware that the graph can be largely improved. This tool is an example on what can be done with the Grid2Op
framework.
We hope It requires an valid installation of plotly and seaborn. These dependencies can be installed with:
.. code-block:: bash
pip3 install grid2op[plots]
To use this plotting utilities, for example in a jupyter notebook please refer to the
``getting_started/4_StudyYourAgent`` notebook for more details information. The basic usage of this function is:
.. code-block:: python
# make the relevant import
from grid2op.MakeEnv import make
from grid2op.PlotPlotly import PlotObs
# create a simple toy environment
environment = make("case5_example")
# set up the plot utility
graph_layout = [(0,0), (0,400), (200,400), (400, 400), (400, 0)]
plot_helper = PlotObs(substation_layout=graph_layout,
observation_space=environment.observation_space)
# perform a step from this environment:
do_nothing = environment.action_space({})
environment.step(act)
# do the actual plot
fig = plot_helper.get_plot_observation(environment.get_obs())
fig.show()
"""
import numpy as np
from grid2op.Plot.BasePlot import BasePlot
from grid2op.Exceptions import PlotError
try:
import plotly.graph_objects as go
import seaborn as sns
can_plot = True
except Exception as e:
can_plot = False
pass
# TODO add tests there
# Some utilities to plot substation, lines or get the color id for the colormap.
def draw_sub(pos, radius=50, line_color="LightSeaGreen"):
"""
INTERNAL
.. warning:: /!\\\\ This class is deprecated /!\\\\
This function will draw the contour of a unique substation.
Parameters
----------
pos: ``tuple``
It represents the position (x,y) of the center of the substation
radius: ``float``
Positive floating point representing the "radius" of the substation.
Returns
-------
res: :class:`plotly.graph_objects.layout.Shape`
A representation, as a plotly object of the substation
"""
pos_x, pos_y = pos
res = go.layout.Shape(
type="circle",
xref="x",
yref="y",
x0=pos_x - radius,
y0=pos_y - radius,
x1=pos_x + radius,
y1=pos_y + radius,
line_color=line_color,
layer="below",
)
return res
def get_col(rho):
"""
INTERNAL
.. warning:: /!\\\\ This class is deprecated /!\\\\
Get the index (in the color palette) of the current capacity usage.
Parameters
----------
rho: ``float``
The capacity usage of a given powerline.
Returns
-------
res: ``int``
The integer (between 0 and 6) of this line capacity usage in terms of color.
"""
if rho < 0.3:
return 0
if rho < 0.5:
return 1
if rho < 0.75:
return 2
if rho < 0.9:
return 3
if rho < 0.95:
return 5
return 6
def draw_line(pos_sub_or, pos_sub_ex, rho, color_palette, status, line_color="gray"):
"""
INTERNAL
.. warning:: /!\\\\ This class is deprecated /!\\\\
Draw a powerline with the color depending on its line capacity usage.
Parameters
----------
pos_sub_or: ``tuple``
Position (x,y) of the origin end of the powerline
pos_sub_ex: ``tuple``
Position (x,y) of the extremity end of the powerline
rho: ``float``
Line capacity usage
color_palette: ``object``
The color palette to use
status: ``bool``
Powerline status (connected / disconnected). Disconnected powerlines are dashed.
Returns
-------
res: :class:`plotly.graph_objects.layout.Shape`
A representation, as a plotly object of the powerline
"""
x_0, y_0 = pos_sub_or
x_1, y_1 = pos_sub_ex
res = go.layout.Shape(
type="line",
xref="x",
yref="y",
x0=x_0,
y0=y_0,
x1=x_1,
y1=y_1,
layer="below",
line=dict(color=line_color, dash=None if status else "dash"),
)
return res
class PlotPlotly(BasePlot):
"""
INTERNAL
.. warning:: /!\\\\ This class is deprecated /!\\\\
Prefer using the class `grid2op.PlotGrid.PlotPlotly`
This class aims at simplifying the representation of an observation as a plotly object given a layout of a given
powergrid substation.
It "automatically" handles the positionning of the powerlines, loads and generators based on that.
This class is just here as an inspection tool. The results can be of course improved, epsecially the label of the
powerlines, or the ppositioning of the loads and generators.
Attributes
----------
_layout: ``dict``
Initial layout of the powergrid.
subs_elements: ``list``
For each substation, it gives a representation of all the object connected to it. So, for each substation, it
has a dictionnary with:
- key: the name of the objects
- value: a dictionnary representing this object containing:
- "type" : its type, among "load", "gen" and "line"
- "sub_pos" (``int``) and index representing which element of the substation represents this object
- "pos" : its position as a tuple
- "z": its position as a complex number
cols: ``object``
A color palette, this should not be changed for now.
radius_sub: ``float``
The radius of each substation. The bigger this number, the better the topology will be visible, but the more
space taken on the overall plot
load_prod_dist: ``float``
The distance between a load and a generator from the center of the substation. This must be higher than
:attr:`PlotObs.radius_sub`
bus_radius: ``float``
The radius of the bus. When multiple buses are present in a substation, they are materialized by a filled
circle. This number represents the size of these circles.
"""
def __init__(
self,
observation_space,
substation_layout=None,
radius_sub=25.0,
load_prod_dist=70.0,
bus_radius=4.0,
):
"""
Parameters
----------
substation_layout: ``list``
List of tupe given the position of each of the substation of the powergrid.
observation_space: :class:`grid2op.Observation.ObservationSpace`
BaseObservation space
"""
BasePlot.__init__(
self,
substation_layout=substation_layout,
observation_space=observation_space,
radius_sub=radius_sub,
load_prod_dist=load_prod_dist,
bus_radius=bus_radius,
)
if not can_plot:
raise PlotError(
'Impossible to plot as plotly cannot be imported. Please install "plotly" and '
'"seaborn" with "pip install --update plotly seaborn"'
)
# define a color palette, whatever...
sns.set()
pal = sns.light_palette("darkred", 8)
self.cols = pal.as_hex()[1:]
self.col_line = "royalblue"
self.col_sub = "red"
self.col_load = "black"
self.col_gen = "darkgreen"
self.default_color = "black"
self.type_fig_allowed = go.Figure
def init_fig(self, fig, reward, done, timestamp):
if fig is None:
fig = go.Figure()
elif not isinstance(fig, self.type_fig_allowed):
raise PlotError(
"PlotPlotly cannot plot on figure of type {}. The accepted type is {}. You provided an "
'invalid argument for "fig"'.format(type(fig), self.type_fig_allowed)
)
return fig
def _post_process_obs(
self, fig, reward, done, timestamp, subs, lines, loads, gens, topos
):
# update the figure with all these information
traces = []
subs_el = []
lines_el = []
loads_el = []
gens_el = []
topos_el = []
for el, trace_ in subs:
subs_el.append(el)
traces.append(trace_)
for el, trace_ in lines:
lines_el.append(el)
traces.append(trace_)
for el, trace_ in loads:
loads_el.append(el)
traces.append(trace_)
for el, trace_ in gens:
gens_el.append(el)
traces.append(trace_)
for el, _ in topos:
topos_el.append(el)
topos_el.append(el)
# traces.append(trace_)
fig.update_layout(shapes=subs_el + lines_el + loads_el + gens_el + topos_el)
for trace_ in traces:
fig.add_trace(trace_)
# update legend, background color, size of the plot etc.
fig.update_xaxes(
range=[
np.min([el for el, _ in self._layout["substations"]])
- 1.5 * (self.radius_sub + self.load_prod_dist),
np.max([el for el, _ in self._layout["substations"]])
+ 1.5 * (self.radius_sub + self.load_prod_dist),
],
zeroline=False,
)
fig.update_yaxes(
range=[
np.min([el for _, el in self._layout["substations"]])
- 1.5 * (self.radius_sub + self.load_prod_dist),
np.max([el for _, el in self._layout["substations"]])
+ 1.5 * (self.radius_sub + self.load_prod_dist),
]
)
fig.update_layout(
margin=dict(l=20, r=20, b=100),
height=600,
width=800,
plot_bgcolor="white",
yaxis={"showgrid": False, "showline": False, "zeroline": False},
xaxis={"showgrid": False, "showline": False, "zeroline": False},
)
return fig
def _draw_subs_one_sub(self, fig, sub_id, center, this_col, txt_):
trace = go.Scatter(
x=[center[0]],
y=[center[1]],
text=[txt_],
mode="text",
showlegend=False,
textfont=dict(color=this_col),
)
res = draw_sub(center, radius=self.radius_sub, line_color=this_col)
return res, trace
def _draw_powerlines_one_powerline(
self, fig, l_id, pos_or, pos_ex, status, value, txt_, or_to_ex, this_col
):
"""
INTERNAL
.. warning:: /!\\\\ This class is deprecated /!\\\\
Prefer using the class `grid2op.PlotGrid.PlotPlotly`
Draw the powerline, between two substations.
Parameters
----------
observation
fig
Returns
-------
"""
tmp = draw_line(
pos_or,
pos_ex,
rho=value,
color_palette=self.cols,
status=status,
line_color=this_col,
)
trace = go.Scatter(
x=[(pos_or[0] + pos_ex[0]) / 2],
y=[(pos_or[1] + pos_ex[1]) / 2],
text=[txt_],
mode="text",
showlegend=False,
textfont=dict(color=this_col),
)
return tmp, trace
def _draw_loads_one_load(
self,
fig,
l_id,
pos_load,
txt_,
pos_end_line,
pos_load_sub,
how_center,
this_col,
):
# add the MW load
trace = go.Scatter(
x=[pos_load.real],
y=[pos_load.imag],
text=[txt_],
mode="text",
showlegend=False,
textfont=dict(color=this_col),
)
# add the line between the MW display and the substation
# TODO later one, add something that looks like a load, a house for example
res = go.layout.Shape(
type="line",
xref="x",
yref="y",
x0=pos_end_line.real,
y0=pos_end_line.imag,
x1=pos_load_sub[0],
y1=pos_load_sub[1],
layer="below",
line=dict(color=this_col),
)
return res, trace
def _draw_gens_one_gen(
self, fig, g_id, pos_gen, txt_, pos_end_line, pos_gen_sub, how_center, this_col
):
# add the MW load
trace = go.Scatter(
x=[pos_gen.real],
y=[pos_gen.imag],
text=[txt_],
mode="text",
showlegend=False,
textfont=dict(color=this_col),
)
# add the line between the MW display and the substation
# TODO later one, add something that looks like a generator, and could depend on the type of it!
res = go.layout.Shape(
type="line",
xref="x",
yref="y",
x0=pos_end_line.real,
y0=pos_end_line.imag,
x1=pos_gen_sub[0],
y1=pos_gen_sub[1],
layer="below",
line=dict(color=this_col),
)
return res, trace
def _draw_topos_one_sub(self, fig, sub_id, buses_z, elements, bus_vect):
res_sub = []
# I plot the buses
for bus_id, z_bus in enumerate(buses_z):
bus_color = "#ff7f0e" if bus_id == 0 else "#1f77b4"
res = go.layout.Shape(
type="circle",
xref="x",
yref="y",
x0=z_bus.real - self.bus_radius,
y0=z_bus.imag - self.bus_radius,
x1=z_bus.real + self.bus_radius,
y1=z_bus.imag + self.bus_radius,
fillcolor=bus_color,
line_color=bus_color,
)
res_sub.append((res, None))
# i connect every element to the proper bus with the proper color
for el_nm, dict_el in elements.items():
this_el_bus = bus_vect[dict_el["sub_pos"]] - 1
if this_el_bus >= 0:
res = go.layout.Shape(
type="line",
xref="x",
yref="y",
x0=dict_el["z"].real,
y0=dict_el["z"].imag,
x1=buses_z[this_el_bus].real,
y1=buses_z[this_el_bus].imag,
line=dict(color="#ff7f0e" if this_el_bus == 0 else "#1f77b4"),
)
res_sub.append((res, None))
return res_sub
def _get_default_cmap(self, normalized_value):
return self.cols[get_col(normalized_value)]
| 15,071 | 29.325956 | 118 | py |
Grid2Op | Grid2Op-master/grid2op/Plot/PlotPyGame.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
"""
This module defines the :class:`Renderer` that is able to display the state (:class:`grid2op.BaseObservation.BaseObservation`)
of the powergrid on a dedicated window.
It is also able to output a 3d representation of this representation to be further used by other libraries to
output gifs for example.
"""
import numpy as np
import cmath
import math
import os
import time
from grid2op.Plot.BasePlot import BasePlot
from grid2op.Exceptions.PlotExceptions import PyGameQuit, PlotError
try:
os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "hide"
import pygame
can_plot = True
except Exception as e:
can_plot = False
pass
class Point:
# https://codereview.stackexchange.com/questions/70143/drawing-a-dashed-line-with-pygame
# constructed using a normal tupple
def __init__(self, point_t=(0, 0)):
self.x = float(point_t[0])
self.y = float(point_t[1])
# define all useful operators
def __add__(self, other):
return Point((self.x + other.x, self.y + other.y))
def __sub__(self, other):
return Point((self.x - other.x, self.y - other.y))
def __mul__(self, scalar):
return Point((self.x * scalar, self.y * scalar))
def __div__(self, scalar):
return Point((self.x / scalar, self.y / scalar))
def __floordiv__(self, scalar):
return Point((self.x / scalar, self.y / scalar))
def __truediv__(self, scalar):
return Point((self.x / scalar, self.y / scalar))
def __len__(self):
return int(math.sqrt(self.x**2 + self.y**2))
# get back values in original tuple format
def get(self):
return (self.x, self.y)
def to_cplx(self):
return self.x + 1j * self.y
@staticmethod
def from_cplx(cplx):
return Point((cplx.real, cplx.imag))
def _draw_dashed_line(surf, color, start_pos, end_pos, width=1, dash_length=10):
# https://codereview.stackexchange.com/questions/70143/drawing-a-dashed-line-with-pygame
origin = Point(start_pos)
target = Point(end_pos)
displacement = target - origin
length = len(displacement)
slope = displacement / length
for index in range(0, int(length / dash_length), 2):
start = origin + (slope * index * dash_length)
end = origin + (slope * (index + 1) * dash_length)
pygame.draw.line(surf, color, start.get(), end.get(), width)
def _draw_arrow(
surf,
color,
start_pos,
end_pos,
positive_flow,
width=1,
num_arrows=10,
length_arrow=10,
angle_arrow=30,
):
if positive_flow:
origin = Point(start_pos)
target = Point(end_pos)
else:
target = Point(start_pos)
origin = Point(end_pos)
displacement = target - origin
length = len(displacement)
slope = displacement / length
# phi = cmath.phase(slope.to_cplx()) * 360 / 2*cmath.pi
phi = cmath.phase(displacement.to_cplx()) * 360 / (2 * cmath.pi)
cste_ = 2 * cmath.pi / 360 * 1j
rotatedown = cmath.exp(cste_ * (180 + phi + angle_arrow))
rotateup = cmath.exp(cste_ * (180 + phi - angle_arrow))
first_arrow_part = length_arrow * rotateup
second_arrow_part = length_arrow * rotatedown
per_displ = displacement / (num_arrows + 1)
for index in range(0, int(num_arrows)):
mid = origin + (per_displ * (index + 1))
start_arrow = Point.from_cplx(mid.to_cplx() + first_arrow_part)
end_arrow = Point.from_cplx(mid.to_cplx() + second_arrow_part)
# , end_arrow.get()
pygame.draw.lines(
surf, color, False, [start_arrow.get(), mid.get(), end_arrow.get()], width
)
class PlotPyGame(BasePlot):
"""
INTERNAL
.. warning:: /!\\\\ This class is deprecated /!\\\\
This renderer should be used only for "online" representation of a powergrid.
"""
def __init__(
self,
observation_space,
substation_layout=None,
radius_sub=20.0,
load_prod_dist=70.0,
bus_radius=5.0,
timestep_duration_seconds=1.0,
fontsize=20,
):
"""
Parameters
----------
substation_layout: ``list``
List of tupe given the position of each of the substation of the powergrid.
observation_space: :class:`grid2op.Observation.ObservationSpace`
BaseObservation space used for the display
radius_sub: ``int``
radius (in pixel) of the substations representation.
load_prod_dist: ``int``
distance (in pixels) between the substation and the load or the generator.
bus_radius: ``int``
The buses are represented by small circles. This is the radius (in pixel) for the pixels representing
the buses.
timestep_duration_seconds: ``float``
Minimum time during which a time step will stay on the screen, in second.
fontsize: ``int``
size of the font used to display the texts.
"""
if not can_plot:
raise PlotError("Impossible to plot as pygame cannot be imported.")
self.window_grid = (1000, 700)
self.lag_x = 150
self.lag_y = 100
BasePlot.__init__(
self,
substation_layout=substation_layout,
observation_space=observation_space,
radius_sub=radius_sub,
load_prod_dist=load_prod_dist,
bus_radius=bus_radius,
)
# pygame
self.__is_init = False
self.video_width, self.video_height = 1300, 700
self.timestep_duration_seconds = timestep_duration_seconds
self.time_last = None
self.fontsize = fontsize
self.background_color = [70, 70, 73]
# init pygame
self.display_called = None
self.screen = None
self.font = None
self.init_pygame()
# pause button
self.font_pause = pygame.font.Font(None, 30)
self.color_text = pygame.Color(255, 255, 255)
self.text_paused = self.font_pause.render("Game Paused", True, self.color_text)
# maximum overflow possible
self.rho_max = 2.0
# utilities
self.cum_reward = 0.0
self.nb_timestep = 0
# colors
self.col_line = pygame.Color(0, 0, 255)
self.col_sub = pygame.Color(255, 0, 0)
self.col_load = pygame.Color(0, 0, 0)
self.col_gen = pygame.Color(0, 255, 0)
self.default_color = pygame.Color(0, 0, 0)
# deactivate the display on the screen
self._deactivate_display = False
def change_duration_timestep_display(self, new_timestep_duration_seconds):
"""
INTERNAL
.. warning:: /!\\\\ This class is deprecated /!\\\\
Change the duration on which the screen is displayed.
"""
self.timestep_duration_seconds = new_timestep_duration_seconds
def init_pygame(self):
if self.__is_init is False:
pygame.init()
self.display_called = False
self.screen = pygame.display.set_mode(
(self.video_width, self.video_height), pygame.RESIZABLE
)
self.font = pygame.font.Font(None, self.fontsize)
self.__is_init = True
def reset(self, env):
"""
INTERNAL
.. warning:: /!\\\\ This class is deprecated /!\\\\
Parameters
----------
env: :class:`grid2op.Environment.Environment`
The used environment.
Returns
-------
"""
self.cum_reward = 0.0
self.nb_timestep = 0
self.rho_max = env.parameters.HARD_OVERFLOW_THRESHOLD
def _get_sub_layout(self, init_layout):
tmp = [(el1, -el2) for el1, el2 in init_layout]
# then scale the grid to be on the window, with the proper margin (careful, margin are applied both left and r
# and right, so count twice
tmp_arr = np.array(tmp)
min_ = tmp_arr.min(axis=0)
max_ = tmp_arr.max(axis=0)
b = min_
a = max_ - min_
res = [
(
int((el1 - b[0]) / a[0] * (self.window_grid[0] - 2 * self.lag_x))
+ self.lag_x,
int((el2 - b[1]) / a[1] * (self.window_grid[1] - 2 * self.lag_y))
+ self.lag_y,
)
for el1, el2 in tmp
]
return res
def _event_looper(self, force=False):
has_quit = False
if self._deactivate_display:
return force, has_quit
for event in pygame.event.get():
if event.type == pygame.QUIT:
has_quit = True
return force, has_quit
# pygame.quit()
# exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
has_quit = True
return force, has_quit
if event.key == pygame.K_SPACE:
self._get_plot_pause()
# pause_surface = self.draw_plot_pause()
# self.screen.blit(pause_surface, (320 + self.left_menu_shape[0], 320))
return not force, has_quit
return force, has_quit
def _press_key_to_quit(self):
"""
INTERNAL
.. warning:: /!\\\\ This class is deprecated /!\\\\
This utility function waits for the player to press a key to exit the renderer (called when the episode is done)
Returns
-------
res: ``bool``, ``bool``
``True`` if the human player closed the window, in this case it will stop the computation: no other episode
will be computed. ``False`` otherwise.
"""
if self._deactivate_display:
return
has_quit = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
has_quit = True
return True, has_quit
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
has_quit = True
return True, has_quit
if event.key == pygame.K_SPACE:
return True, has_quit
return False, has_quit
def close(self):
"""
This method is called when the renderer should be close.
"""
self.display_called = False
try:
self._quit_and_close()
except PyGameQuit:
pass
def _get_plot_pause(self):
position = 300
start_pause = position + self.text_paused.get_height()
end_pause = start_pause + 50
y_text_left = self.window_grid[0] + 100
self.screen.blit(self.text_paused, (y_text_left, 300))
pygame.draw.line(
self.screen,
self.color_text,
(y_text_left + self.text_paused.get_width() // 2 - 10, start_pause),
(y_text_left + self.text_paused.get_width() // 2 - 10, end_pause),
10,
)
pygame.draw.line(
self.screen,
self.color_text,
(y_text_left + self.text_paused.get_width() // 2 + 10, start_pause),
(y_text_left + self.text_paused.get_width() // 2 + 10, end_pause),
10,
)
pygame.display.flip()
def _draw_final_information(self, reward, done, timestamp):
if done is not None:
if done:
text_label = "GAME OVER, press any key to continue to next episode."
text_graphic = self.font.render(text_label, True, self.color_text)
self.screen.blit(text_graphic, (self.window_grid[0] + 100, 100))
text_label = "Total cumulated reward: {:.1f}".format(self.cum_reward)
text_graphic = self.font.render(text_label, True, self.color_text)
self.screen.blit(text_graphic, (self.window_grid[0] + 100, 130))
text_label = "Total number timesteps: {:.1f}".format(self.nb_timestep)
text_graphic = self.font.render(text_label, True, self.color_text)
self.screen.blit(text_graphic, (self.window_grid[0] + 100, 160))
def _quit_and_close(self):
# pygame.reset_vars()
# pygame.gameLoop()
pygame.display.quit()
# pygame.quit()
self.display_called = None
self.screen = None
self.font = None
self.__is_init = False
raise PyGameQuit()
def deactivate_display(self):
self._deactivate_display = True
def get_rgb(self, obs, reward=None, done=None, timestamp=None):
"""
INTERNAL
.. warning:: /!\\\\ This class is deprecated /!\\\\
Computes and returns the rgb 3d array from an observation, and potentially other informations.
Parameters
----------
obs: :class:`grid2op.Observation.Observation`
The observation to converte into a 3d array
reward: ``float``
The current reward
done: ``bool``
Whether this is the last frame of the episode.
timestamp: ``datetime.datetime``
The curent datetime corresponding to the observation
Returns
-------
res: ``numpy.ndarray``
The 3d representation of the observation that can then be converted to a gif, or an image using appropriate
softwares.
"""
self.plot_obs(obs, reward, done, timestamp)
return pygame.surfarray.array3d(self.screen)
def init_fig(self, fig, reward, done, timestamp):
self.init_pygame()
if not self.display_called:
self.display_called = True
self.screen.fill(self.background_color)
pygame.display.set_caption("Grid2Op Renderer") # Window title
force, has_quit = self._event_looper(force=False)
while force:
force, has_quit = self._event_looper(force=force)
pygame.time.wait(250) # it's in ms
if has_quit:
self._quit_and_close()
if reward is not None:
self.cum_reward += reward
self.nb_timestep += 1
# The game is not paused anymore (or never has been), I can render the next surface
if self.time_last is not None and self._deactivate_display is False:
tmp = time.perf_counter() # in second
if tmp - self.time_last < self.timestep_duration_seconds:
nb_sec_wait = int(
1000 * (self.timestep_duration_seconds - (tmp - self.time_last))
)
pygame.time.wait(nb_sec_wait) # it's in ms
self.time_last = time.perf_counter()
else:
self.time_last = time.perf_counter()
self.screen.fill(self.background_color)
if done is not None:
if not done:
# draw the generic information on the right part
self._draw_generic_info(reward, done, timestamp)
else:
# inform user that it's over
self._draw_final_information(reward, done, timestamp)
def _post_process_obs(
self, fig, reward, done, timestamp, subs, lines, loads, gens, topos
):
"""
INTERNAL
.. warning:: /!\\\\ This class is deprecated /!\\\\
In canse of plotply, fig is whether the player press "quit" or not
Parameters
----------
fig
subs
lines
loads
gens
topos
Returns
-------
"""
self._draw_final_information(reward, done, timestamp)
pygame.display.flip()
if self._deactivate_display is False:
if done:
key_pressed = False
while not key_pressed:
key_pressed, has_quit = self._press_key_to_quit()
# TODO that with fps !!!
pygame.time.wait(250) # it's in ms
self._quit_and_close()
def _draw_generic_info(self, reward=None, done=None, timestamp=None):
if reward is not None:
text_label = "Instantaneous reward: {:.1f}".format(reward)
text_graphic = self.font.render(text_label, True, self.color_text)
self.screen.blit(text_graphic, (self.window_grid[0] + 100, 100))
text_label = "Cumulated reward: {:.1f}".format(self.cum_reward)
text_graphic = self.font.render(text_label, True, self.color_text)
self.screen.blit(text_graphic, (self.window_grid[0] + 100, 130))
text_label = "Number timesteps: {:.1f}".format(self.nb_timestep)
text_graphic = self.font.render(text_label, True, self.color_text)
self.screen.blit(text_graphic, (self.window_grid[0] + 100, 160))
if timestamp is not None:
text_label = "Date : {:%Y-%m-%d %H:%M}".format(timestamp)
text_graphic = self.font.render(text_label, True, self.color_text)
self.screen.blit(text_graphic, (self.window_grid[0] + 100, 200))
def _draw_subs_one_sub(self, fig, sub_id, center, this_col, text):
pygame.draw.circle(
self.screen,
self.color_text,
[int(el) for el in center],
int(self.radius_sub),
2,
)
text_graphic = self.font.render(text, True, this_col)
self._aligned_text(center, text_graphic, center)
def _get_default_cmap(self, normalized_val):
# step 0: compute thickness and color
max_val = 1.0
ratio_ok = 0.7
start_red = 0.5
amount_green = 235 - int(235.0 * (normalized_val / (max_val * ratio_ok)) ** 4)
amount_red = int(255 * (normalized_val / (max_val * ratio_ok)) ** 4)
# if normalized_val < ratio_ok * max_val:
# amount_green = 100 - int(100. * normalized_val / (max_val * ratio_ok))
# if normalized_val > ratio_ok:
# tmp = (1.0 * (normalized_val - ratio_ok) / (max_val - ratio_ok))**2
# amount_red += int(235. * tmp)
# print("normalized_val {}, amount_red {}".format(normalized_val, amount_red))
# fix to prevent pygame bug
if amount_red < 0:
amount_red = int(0)
elif amount_red > 255:
amount_red = int(255)
if amount_green < 0:
amount_green = int(0)
elif amount_green > 255:
amount_green = int(255)
amount_red = int(amount_red)
amount_green = int(amount_green)
color = pygame.Color(amount_red, amount_green, 20)
return color
def _draw_powerlines_one_powerline(
self, fig, l_id, pos_or, pos_ex, status, value, txt_, or_to_ex, this_col
):
text_graphic = self.font.render(txt_, True, this_col)
pos_txt = [
int((pos_or[0] + pos_ex[0]) * 0.5),
int((pos_or[1] + pos_ex[1]) * 0.5),
]
how_center = "center|center"
self._aligned_text(how_center, text_graphic, pos_txt)
if not status:
# line is disconnected
_draw_dashed_line(self.screen, this_col, pos_or, pos_ex)
else:
# line is connected
width = 1
if value > self.rho_max:
width = 4
elif value > 1.0:
width = 3
elif value > 0.9:
width = 2
width += 3
# step 1: draw the powerline with right color and thickness
pygame.draw.line(self.screen, this_col, pos_or, pos_ex, width)
# step 2: draw arrows indicating current flows
_draw_arrow(
self.screen,
this_col,
pos_or,
pos_ex,
or_to_ex,
num_arrows=width,
width=width,
)
def _aligned_text(self, pos, text_graphic, pos_text):
if isinstance(pos_text, complex):
pos_x = pos_text.real
pos_y = pos_text.imag
else:
pos_x, pos_y = pos_text
width = text_graphic.get_width()
height = text_graphic.get_height()
if pos == "center|left":
pos_y -= height // 2
elif pos == "up|center":
pos_x -= width // 2
pos_y -= height
elif pos == "center|right":
pos_x -= width
pos_y -= height // 2
elif pos == "down|center":
pos_x -= width // 2
elif pos == "center|center":
pos_x -= width // 2
pos_y -= height // 2
self.screen.blit(text_graphic, (pos_x, pos_y))
def _draw_loads_one_load(
self,
fig,
l_id,
pos_load,
txt_,
pos_end_line,
pos_load_sub,
how_center,
this_col,
):
width = 2
pygame.draw.line(
self.screen,
this_col,
pos_load_sub,
(pos_end_line.real, pos_end_line.imag),
width,
)
text_graphic = self.font.render(txt_, True, this_col)
self._aligned_text(how_center, text_graphic, pos_load)
def _draw_gens_one_gen(
self, fig, g_id, pos_gen, txt_, pos_end_line, pos_gen_sub, how_center, this_col
):
width = 2
pygame.draw.line(
self.screen,
this_col,
pos_gen_sub,
(pos_end_line.real, pos_end_line.imag),
width,
)
text_graphic = self.font.render(txt_, True, this_col)
self._aligned_text(how_center, text_graphic, pos_gen)
return None
def _draw_topos_one_sub(self, fig, sub_id, buses_z, elements, bus_vect):
colors = [pygame.Color(255, 127, 14), pygame.Color(31, 119, 180)]
# I plot the buses
for bus_id, z_bus in enumerate(buses_z):
pygame.draw.circle(
self.screen,
colors[bus_id],
[int(z_bus.real), int(z_bus.imag)],
int(self.bus_radius),
0,
)
# i connect every element to the proper bus with the proper color
for el_nm, dict_el in elements.items():
this_el_bus = bus_vect[dict_el["sub_pos"]] - 1
if this_el_bus >= 0:
pygame.draw.line(
self.screen,
colors[this_el_bus],
[int(dict_el["z"].real), int(dict_el["z"].imag)],
[int(buses_z[this_el_bus].real), int(buses_z[this_el_bus].imag)],
2,
)
return []
| 23,098 | 32.04578 | 126 | py |
Grid2Op | Grid2Op-master/grid2op/Plot/Plotting.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Exceptions import PlotError
from grid2op.Plot.PlotPlotly import PlotPlotly
from grid2op.Plot.PlotMatplotlib import PlotMatplotlib
from grid2op.Plot.PlotPyGame import PlotPyGame
from grid2op.Exceptions.PlotExceptions import PyGameQuit
class Plotting:
"""
INTERNAL
.. warning:: /!\\\\ This class is deprecated /!\\\\
"""
allwed_display_mod = {
"pygame": PlotPyGame,
"plotly": PlotPlotly,
"matplotlib": PlotMatplotlib,
}
def __init__(
self,
observation_space,
display_mod="plotly",
substation_layout=None,
radius_sub=20.0,
load_prod_dist=70.0,
bus_radius=6.0,
):
if display_mod not in self.allwed_display_mod:
raise PlotError(
'Only avaible plot mod are "{}". You specified "{}" which is not supported.'
"".format(self.allwed_display_mod, display_mod)
)
cls_ = self.allwed_display_mod[display_mod]
self.displ_backend = cls_(
observation_space,
substation_layout=substation_layout,
radius_sub=radius_sub,
load_prod_dist=load_prod_dist,
bus_radius=bus_radius,
)
self.display_mod = display_mod
def _display_fig(self, fig, display):
if display:
if self.display_mod == "plotly":
fig.show()
elif self.display_mod == "matplotlib":
fig, ax = fig
fig.show()
def plot_layout(
self, fig=None, reward=None, done=None, timestamp=None, display=True
):
try:
fig = self.displ_backend.plot_layout(
fig=fig, reward=reward, done=done, timestamp=timestamp
)
self._display_fig(fig, display=display)
except PyGameQuit:
pass
return fig
def plot_info(
self,
fig=None,
line_info=None,
load_info=None,
gen_info=None,
sub_info=None,
colormap=None,
display=True,
):
try:
fig = self.displ_backend.plot_info(
fig=fig,
line_info=line_info,
load_info=load_info,
gen_info=gen_info,
sub_info=sub_info,
colormap=colormap,
)
self._display_fig(fig, display=display)
except PyGameQuit:
pass
return fig
def plot_obs(
self,
observation,
fig=None,
reward=None,
done=None,
timestamp=None,
line_info="rho",
load_info="p",
gen_info="p",
colormap="line",
display=True,
):
try:
fig = self.displ_backend.plot_obs(
observation,
fig=fig,
reward=reward,
done=done,
timestamp=timestamp,
line_info=line_info,
load_info=load_info,
gen_info=gen_info,
colormap=colormap,
)
self._display_fig(fig, display=display)
except PyGameQuit:
pass
return fig
| 3,687 | 27.369231 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Plot/__init__.py | __all__ = [
"BasePlot",
"PlotMatplotlib",
"PlotPlotly",
"PlotPyGame",
"Plotting",
"EpisodeReplay",
]
from grid2op.Plot.BasePlot import BasePlot
from grid2op.Plot.PlotMatplotlib import PlotMatplotlib
from grid2op.Plot.PlotPlotly import PlotPlotly
from grid2op.Plot.PlotPyGame import PlotPyGame
from grid2op.Plot.Plotting import Plotting
from grid2op.Plot.EpisodeReplay import EpisodeReplay
import warnings
class PlotGraph(BasePlot):
def __init__(self, *args, **kwargs):
super().__init__(self, *args, **kwargs)
warnings.warn(
"PlotGraph has been renamed to BasePlot"
" -- The old name will be removed in future versions",
category=PendingDeprecationWarning,
)
| 750 | 25.821429 | 66 | py |
Grid2Op | Grid2Op-master/grid2op/PlotGrid/BasePlot.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
import warnings
import numpy as np
from abc import ABC, abstractmethod
from grid2op.Observation import BaseObservation
from grid2op.Exceptions import PlotError
from grid2op.PlotGrid.LayoutUtil import layout_obs_sub_load_and_gen
from grid2op.PlotGrid.PlotUtil import PlotUtil as pltu
from grid2op.dtypes import dt_float, dt_int
class BasePlot(ABC):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Abstract interface to plot the state of the powergrid
Implement the interface with a plotting library to generate drawings
Attributes
-----------
observation_space: ``grid2op.Observation.ObservationSpace``
The observation space used.
width: ``int`` Width of the drawing
height: ``int`` Height of the drawing
grid_layout: ``dict`` A grid layout dict to use
"""
def __init__(
self,
observation_space,
width=800,
height=600,
scale=2000.0,
grid_layout=None,
parallel_spacing=3.0,
):
self.observation_space = observation_space
self.width = width
self.height = height
self.scale = scale
self._parallel_spacing = parallel_spacing
self._info_to_units = {"rho": "%", "a": "A", "p": "MW", "v": "kV"}
self._lines_info = ["rho", "a", "p", "v", None]
self._loads_info = ["p", "v", None]
self._gens_info = ["p", "v", None]
self._grid_layout = self.compute_grid_layout(observation_space, grid_layout)
# Augment observation_space with dummy observation data
# so we can use it as an observation for plotting just the layout or custom infos
self.observation_space.topo_vect = np.ones(
self.observation_space.dim_topo, dtype=dt_int
)
self.observation_space.line_status = np.full(
self.observation_space.n_line, True
)
self.observation_space.rho = np.full(self.observation_space.n_line, 0.0)
self.observation_space.p_or = np.ones(self.observation_space.n_line)
# TODO storage: display the storage units too
# TODO storage doc (yes also the documentation)
@abstractmethod
def create_figure(self):
"""
Creates a new figure to draw into.
Depending on the library can also be called Plot, canvas, screen ..
"""
pass
@abstractmethod
def clear_figure(self, figure):
"""
Clears a figure
Depending on the library can also be called Plot, canvas, screen ..
"""
pass
@abstractmethod
def convert_figure_to_numpy_HWC(self, figure):
"""
Given a figure as returned by `BasePlot.create_figure`
Convert it to a numpy array of dtype uint8
and data layed out in the HWC format
"""
pass
def compute_grid_layout(self, observation_space, grid_layout=None):
"""
Compute the grid layout from the observation space
This should return a native python ``dict``
in the same format as observation_space.grid_layout :
.. code-block:: python
{
"substation1_name": [x_coord, y_coord],
"substation2_name": [x_coord, y_coord],
[...],
"load1_name": [x_coord, y_coord],
[...],
"gen1_name": [x_coord, y_coord],
[...]
}
Note that is must contain at least the positions for the substations.
The loads and generators will be skipped if missing.
By default, if `grid_layout` is provided this is returned,
otherwise returns observation_space.grid_layout
Parameters
----------
observation_space: ``grid2op.Observation.ObservationSpace``
The observation space of the environment
grid_layout: ``dict`` or ``None``
A dictionary containing the coordinates for each substation.
"""
# We need an intial layout to work with
use_grid_layout = None
if grid_layout is not None:
use_grid_layout = grid_layout
elif observation_space.grid_layout is not None:
use_grid_layout = observation_space.grid_layout
else:
raise PlotError("No grid layout provided for plotting")
# Compute loads and gens positions using a default implementation
observation_space.grid_layout = use_grid_layout
return layout_obs_sub_load_and_gen(
observation_space, scale=self.scale, use_initial=True
)
@abstractmethod
def draw_substation(self, figure, observation, sub_id, sub_name, pos_x, pos_y):
"""
Draws a substation into the figure
Parameters
----------
figure: :object: Figure to draw to.
This is the object returned by create_figure
observation: :grid2op.Observation.BaseObservation:
Current state of the grid being drawn
sub_id: :int: Id of the substation, Index in the observation
sub_name: :str: Name of the substation
pos_x: :int: x position from the layout
pos_y: :int: y position from the layout
"""
pass
def update_substation(self, figure, observation, sub_id, sub_name, pos_x, pos_y):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Update a substation into the figure
"""
pass
@abstractmethod
def draw_load(
self,
figure,
observation,
load_name,
load_id,
load_bus,
load_value,
load_unit,
pos_x,
pos_y,
sub_x,
sub_y,
):
"""
Draws a load into the figure
Parameters
----------
figure: :object: Figure to draw to.
This is the object returned by create_figure
observation: :grid2op.Observation.BaseObservation:
Current state of the grid being drawn
load_name: ``str`` Name of the load
load_id: ``int`` Id of the load, Index in the observation
load_bus: ``int`` Id of bus the load is connected to.
load_value: ``float`` An informative value of the load current state
load_unit: ``str`` The unit of the `load_value` argument as a string
pos_x: ``int`` x position from the layout
pos_y: ``int`` y position from the layout
sub_x: ``int`` x position of the connected substation from the layout
sub_y: ``int`` y position of the connected substation from the layout
"""
pass
def update_load(
self,
figure,
observation,
load_name,
load_id,
load_bus,
load_value,
load_unit,
pos_x,
pos_y,
sub_x,
sub_y,
):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Update a load into the figure
"""
pass
@abstractmethod
def draw_gen(
self,
figure,
observation,
gen_name,
gen_id,
gen_bus,
gen_value,
gen_unit,
pos_x,
pos_y,
sub_x,
sub_y,
):
"""
Draws a generator into the figure
Parameters
----------
figure: :object: Figure to draw to.
This is the object returned by create_figure
observation: `grid2op.Observation.BaseObservation`
Current state of the grid being drawn
gen_name: ``str`` Name of the load
gen_id: ``int`` Id of the generator, Index in the observation
gen_bus: ``int`` Bus id the generator is connected to
gen_value: ``float``
An informative value of the generator current state
gen_unit: ``str`` The unit of the `gen_value` argument as a string
pos_x: ``int`` x position from the layout
pos_y: ``int`` y position from the layout
sub_x: ``int`` x position of the connected substation from the layout
sub_y: ``int`` y position of the connected substation from the layout
"""
pass
def update_gen(
self,
figure,
observation,
gen_name,
gen_id,
gen_bus,
gen_value,
gen_unit,
pos_x,
pos_y,
sub_x,
sub_y,
):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Updates a generator into the figure
"""
pass
@abstractmethod
def draw_powerline(
self,
figure,
observation,
line_id,
line_name,
connected,
line_value,
line_unit,
or_bus,
pos_or_x,
pos_or_y,
ex_bus,
pos_ex_x,
pos_ex_y,
):
"""
Draws a powerline into the figure
Parameters
----------
figure: ``object`` Figure to draw to.
This is the object returned by `create_figure`
observation: ``grid2op.Observation.BaseObservation``
Current state of the grid being drawn
line_id: ``int`` Id of the powerline, index in the observation
line_name: ``str`` Name of the powerline
connected: ``bool`` Is the line connected ?
line_value: ``float`` An informative value of the line current state
line_unit: ``str`` The unit of the `line_value` argument as a string
or_bus: ``int`` Bus the powerline origin is connected to
pos_or_x: ``int`` Powerline origin x position from the layout
pos_or_y: ``int`` Powerline origin y position from the layout
ex_bus: ``int`` Bus the powerline extremity is connected to
pos_ex_x: ``int`` Powerline extremity x position from the layout
pos_ex_y: ``int`` Powerline extremity y position from the layout
"""
pass
def update_powerline(
self,
figure,
observation,
line_id,
line_name,
connected,
line_value,
line_unit,
or_bus,
pos_or_x,
pos_or_y,
ex_bus,
pos_ex_x,
pos_ex_y,
):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Draws a powerline into the figure
"""
pass
@abstractmethod
def draw_storage(
self,
figure,
observation,
storage_name,
storage_id,
storage_bus,
storage_value,
storage_unit,
pos_x,
pos_y,
sub_x,
sub_y,
):
"""
Draws a storage unit into the figure
Parameters
----------
figure: :object: Figure to draw to.
This is the object returned by create_figure
observation: :grid2op.Observation.BaseObservation:
Current state of the grid being drawn
storage_name: ``str`` Name of the load
storage_id: ``int`` Id of the load, Index in the observation
storage_bus: ``int`` Id of bus the load is connected to.
storage_value: ``float`` An informative value of the load current state
storage_unit: ``str`` The unit of the `load_value` argument as a string
pos_x: ``int`` x position from the layout
pos_y: ``int`` y position from the layout
sub_x: ``int`` x position of the connected substation from the layout
sub_y: ``int`` y position of the connected substation from the layout
"""
pass
def update_storage(
self,
figure,
observation,
storage_name,
storage_id,
storage_bus,
storage_value,
storage_unit,
pos_x,
pos_y,
sub_x,
sub_y,
):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Update a storage unit into the figure
"""
pass
@abstractmethod
def draw_legend(self, figure, observation):
"""
Setup the legend for the given figure.
Parameters
----------
figure: ``object`` Figure to draw to.
This is the object returned by `create_figure`
observation: ``grid2op.Observation.BaseObservation``
Current state of the grid being drawn
"""
pass
def update_legend(self, figure, observation):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Updates the legend for the given figure.
"""
pass
def plot_postprocess(self, figure, observation, is_update):
"""
Some implementations may need post-processing.
This is called at the end of plot.
"""
pass
def _plot_subs(self, figure, observation, redraw):
draw_fn = self.draw_substation
if not redraw:
draw_fn = self.update_substation
for sub_idx in range(observation.n_sub):
sub_name = observation.name_sub[sub_idx]
sub_x = self._grid_layout[sub_name][0]
sub_y = self._grid_layout[sub_name][1]
draw_fn(figure, observation, sub_idx, sub_name, sub_x, sub_y)
def _aux_draw_elements(
self,
figure,
observation,
load_values,
load_unit,
draw_fn,
el_names,
el_to_subid,
el_pos_topo_vect,
):
"""
generic method to loop through all elements of a given type and call the draw function
on them
"""
topo = observation.topo_vect
topo_pos = el_pos_topo_vect
for stor_idx, stor_name in enumerate(el_names):
if stor_name not in self._grid_layout:
continue
load_value = None
if load_values is not None:
if load_values[stor_idx] is not None:
load_value = np.round(float(load_values[stor_idx]), 2)
else:
load_value = None
sto_x = self._grid_layout[stor_name][0]
sto_y = self._grid_layout[stor_name][1]
sto_subid = el_to_subid[stor_idx]
subname = observation.name_sub[sto_subid]
sto_bus = topo[topo_pos[stor_idx]]
sto_bus = sto_bus if sto_bus > 0 else 0
sub_x = self._grid_layout[subname][0]
sub_y = self._grid_layout[subname][1]
draw_fn(
figure,
observation,
stor_idx,
stor_name,
sto_bus,
load_value,
load_unit,
sto_x,
sto_y,
sub_x,
sub_y,
)
def _plot_loads(self, figure, observation, load_values, load_unit, redraw):
draw_fn = self.draw_load
if not redraw:
draw_fn = self.update_load
self._aux_draw_elements(
figure,
observation,
load_values,
load_unit,
draw_fn,
observation.name_load,
observation.load_to_subid,
observation.load_pos_topo_vect,
)
def _plot_storages(self, figure, observation, storage_values, storage_unit, redraw):
if observation.n_storage == 0:
return
draw_fn = self.draw_storage
if not redraw:
draw_fn = self.update_storage
self._aux_draw_elements(
figure,
observation,
storage_values,
storage_unit,
draw_fn,
observation.name_storage,
observation.storage_to_subid,
observation.storage_pos_topo_vect,
)
def _plot_gens(self, figure, observation, gen_values, gen_unit, redraw):
draw_fn = self.draw_gen
if not redraw:
draw_fn = self.update_gen
self._aux_draw_elements(
figure,
observation,
gen_values,
gen_unit,
draw_fn,
observation.name_gen,
observation.gen_to_subid,
observation.gen_pos_topo_vect,
)
def _plot_lines(self, figure, observation, line_values, line_unit, redraw):
draw_fn = self.draw_powerline
if not redraw:
draw_fn = self.update_powerline
topo = observation.topo_vect
line_or_pos = observation.line_or_pos_topo_vect
line_ex_pos = observation.line_ex_pos_topo_vect
for line_idx in range(observation.n_line):
line_or_sub = observation.line_or_to_subid[line_idx]
line_or_sub_name = observation.name_sub[line_or_sub]
line_ex_sub = observation.line_ex_to_subid[line_idx]
line_ex_sub_name = observation.name_sub[line_ex_sub]
line_name = observation.name_line[line_idx]
line_status = True
line_status = observation.line_status[line_idx]
line_value = None
if line_values is not None:
lv = line_values[line_idx]
if isinstance(lv, (float, dt_float)):
line_value = np.round(float(lv), 2)
elif isinstance(lv, (int, dt_int)):
line_value = int(lv)
else:
line_value = lv
line_or_bus = topo[line_or_pos[line_idx]]
line_or_bus = line_or_bus if line_or_bus > 0 else 0
line_or_x = self._grid_layout[line_or_sub_name][0]
line_or_y = self._grid_layout[line_or_sub_name][1]
line_ex_bus = topo[line_ex_pos[line_idx]]
line_ex_bus = line_ex_bus if line_ex_bus > 0 else 0
line_ex_x = self._grid_layout[line_ex_sub_name][0]
line_ex_y = self._grid_layout[line_ex_sub_name][1]
# Special case for parralel lines
tmp = self.observation_space.get_lines_id(
from_=line_or_sub, to_=line_ex_sub
)
if len(tmp) > 1:
ox, oy = pltu.orth_norm_from_points(
line_or_x, line_or_y, line_ex_x, line_ex_y
)
if line_idx == tmp[0]:
line_or_x += ox * self._parallel_spacing
line_or_y += oy * self._parallel_spacing
line_ex_x += ox * self._parallel_spacing
line_ex_y += oy * self._parallel_spacing
else:
line_or_x -= ox * self._parallel_spacing
line_or_y -= oy * self._parallel_spacing
line_ex_x -= ox * self._parallel_spacing
line_ex_y -= oy * self._parallel_spacing
draw_fn(
figure,
observation,
line_idx,
line_name,
line_status,
line_value,
line_unit,
line_or_bus,
line_or_x,
line_or_y,
line_ex_bus,
line_ex_x,
line_ex_y,
)
def _plot_legend(self, fig, observation, redraw):
draw_fn = self.draw_legend
if not redraw:
draw_fn = self.update_legend
draw_fn(fig, observation)
def plot_layout(self):
"""
This function plot the layout of the grid, as well as the object. You will see the name of each elements and
their id.
"""
return self.plot_info(
observation=self.observation_space, figure=None, redraw=True
)
def plot_obs(
self,
observation,
figure=None,
redraw=True,
line_info="rho",
load_info="p",
gen_info="p",
storage_info="p",
):
"""
Plot an observation.
Parameters
----------
observation: :class:`grid2op.Observation.BaseObservation`
The observation to plot
figure: ``object``
The figure on which to plot the observation.
If figure is ``None``, a new figure is created.
line_info: ``str``
One of "rho", "a", or "p" or "v"
The information that will be plotted on the powerline.
By default "rho".
All flow are taken "origin" side.
load_info: ``str``
One of "p" or "v" the information displayed on the load.
(default to "p").
gen_info: ``str``
One of "p" or "v" the information displayed on the generators
(default to "p").
storage_info: ``str``
One of "p" or None the information displayed on the generators
(default to "p").
Returns
-------
res: ``object``
The figure updated with the data from the new observation.
"""
# TODO add gen_info=dispatch and gen_info=target_dispatch
# TODO add line_info=cooldown
# Start by checking arguments are valid
if not isinstance(observation, BaseObservation):
err_msg = (
"Observation is not a derived type of "
"grid2op.Observation.BaseObservation"
)
raise PlotError(err_msg)
if line_info not in self._lines_info:
err_msg = (
'Impossible to plot line info "{}" for line.' " Possible values are {}"
)
raise PlotError(err_msg.format(line_info, str(self._lines_info)))
if load_info not in self._loads_info:
err_msg = (
'Impossible to plot load info "{}" for line.' " Possible values are {}"
)
raise PlotError(err_msg.format(load_info, str(self._loads_info)))
if gen_info not in self._gens_info:
err_msg = (
'Impossible to plot gen info "{}" for line.' " Possible values are {}"
)
raise PlotError(err_msg.format(gen_info, str(self._gens_info)))
line_values = None
line_unit = ""
if line_info is not None:
line_unit = self._info_to_units[line_info]
if line_info == "rho":
line_values = observation.rho * 100.0
elif line_info == "p":
line_values = observation.p_or
elif line_info == "a":
line_values = observation.a_or
elif line_info == "v":
line_values = observation.v_or
elif line_info is None:
pass
else:
raise PlotError(
f'Impossible to understand the keyword argument "{line_info}" '
f'provided as "line_info"'
)
load_values = None
load_unit = ""
if load_info is not None:
load_unit = self._info_to_units[load_info]
if load_info == "p":
load_values = copy.copy(observation.load_p) * -1.0
elif load_info == "v":
load_values = observation.load_v
elif load_info is None:
pass
else:
raise PlotError(
f'Impossible to understand the keyword argument "{load_info}" '
f'provided as "load_info"'
)
gen_values = None
gen_unit = ""
if gen_info is not None:
gen_unit = self._info_to_units[gen_info]
if gen_info == "p":
gen_values = observation.prod_p
elif gen_info == "v":
gen_values = observation.prod_v
elif gen_info is None:
pass
else:
raise PlotError(
f'Impossible to understand the keyword argument "{gen_info}" '
f'provided as "gen_info"'
)
storage_values = None
storage_unit = ""
if storage_info is not None:
storage_unit = self._info_to_units[storage_info]
if storage_info == "p":
storage_values = -1.0 * observation.storage_power
elif storage_info is None:
pass
else:
raise PlotError(
f'Impossible to understand the keyword argument "{storage_info}" '
f'provided as "storage_info"'
)
return self.plot_info(
observation=observation,
figure=figure,
redraw=redraw,
line_values=line_values,
line_unit=line_unit,
load_values=load_values,
load_unit=load_unit,
gen_values=gen_values,
gen_unit=gen_unit,
storage_values=storage_values,
storage_unit=storage_unit,
)
def plot_info(
self,
figure=None,
redraw=True,
line_values=None,
line_unit="",
load_values=None,
load_unit="",
storage_values=None,
storage_unit="",
gen_values=None,
gen_unit="",
observation=None,
coloring=None,
):
"""
Plot an observation with custom values
Parameters
----------
figure: ``object``
The figure on which to plot the observation.
If figure is ``None`` a new figure is created.
line_values: ``list``
information to be displayed for the powerlines
[must have the same size as observation.n_line and convertible to float]
line_unit: ``str``
Unit string for the :line_values: argument, displayed after the line value
load_values: ``list``
information to display for the loads
[must have the same size as observation.n_load and convertible to float]
load_unit: ``str``
Unit string for the :load_values: argument, displayed after the load value
storage_values: ``list``
information to display for the storage units
[must have the same size as observation.n_storage and convertible to float]
storage_unit: ``str``
Unit string for the :storage_values: argument, displayed after the storage value
gen_values: ``list``
information to display in the generators
[must have the same size as observation.n_gen and convertible to float]
gen_unit: ``str``
Unit string for the :gen_values: argument, displayed after the generator value
observation: :class:`grid2op.Observation.BaseObservation`
An observation to plot, can be None if no values are drawn from the observation
coloring:
``None`` for no special coloring, or "line" to color the powerline based on the value
("gen" and "load" coming soon)
Examples
--------
More examples on how to use this function is given in the "8_PlottingCapabilities.ipynb" notebook.
The basic concept is:
.. code-block:: python
import grid2op
from grid2op.PlotGrid import PlotMatplot
env = grid2op.make()
plot_helper = PlotMatplot(env.observation_space)
# plot the layout (position of each elements) of the powergrid
plot_helper.plot_layout()
# project some data on the grid
line_values = env.get_thermal_limit()
plot_helper.plot_info(line_values=line_values)
# to plot an observation
obs = env.reset()
plot_helper.plot_obs(obs)
Returns
-------
res: ``object``
The figure updated with the data from the new observation.
"""
# Check values are in the correct format
if (
line_values is not None
and len(line_values) != self.observation_space.n_line
):
raise PlotError(
"Impossible to display these values on the powerlines: there are {} values"
"provided for {} powerlines in the grid".format(
len(line_values), self.observation_space.n_line
)
)
if (
load_values is not None
and len(load_values) != self.observation_space.n_load
):
raise PlotError(
"Impossible to display these values on the loads: there are {} values"
"provided for {} loads in the grid".format(
len(load_values), self.observation_space.n_load
)
)
if gen_values is not None and len(gen_values) != self.observation_space.n_gen:
raise PlotError(
"Impossible to display these values on the generators: there are {} values"
"provided for {} generators in the grid".format(
len(gen_values), self.observation_space.n_gen
)
)
if (
storage_values is not None
and len(storage_values) != self.observation_space.n_storage
):
raise PlotError(
"Impossible to display these values on the storage units: there are {} values"
"provided for {} generators in the grid".format(
len(storage_values), self.observation_space.n_storage
)
)
# Get a valid figure to draw into
if figure is None:
fig = self.create_figure()
redraw = True
elif redraw:
self.clear_figure(figure)
fig = figure
else:
fig = figure
# Get a valid Observation
if observation is None:
# See dummy data added in the constructor
observation = self.observation_space
if coloring is not None:
observation = copy.deepcopy(observation)
if coloring == "line":
if line_values is None:
raise PlotError(
"Impossible to color the grid based on the line information (key word argument "
'"line_values") if this argument is None.'
)
observation.rho = copy.deepcopy(line_values)
try:
observation.rho = np.array(observation.rho).astype(dt_float)
except:
raise PlotError(
"Impossible to convert the input values (line_values) to floating point"
)
# rescaling to have range 0 - 1.0
tmp = observation.rho[np.isfinite(observation.rho)]
observation.rho -= (
np.min(tmp) - 1e-1
) # so the min is 1e-1 otherwise 0.0 is plotted as black
tmp = observation.rho[np.isfinite(observation.rho)]
observation.rho /= np.max(tmp)
elif coloring == "load":
# TODO
warnings.warn("coloring = loads is not available at the moment")
elif coloring == "gen":
if gen_values is None:
raise PlotError(
"Impossible to color the grid based on the gen information (key word argument "
'"gen_values") if this argument is None.'
)
observation.prod_p = copy.deepcopy(gen_values)
try:
observation.prod_p = np.array(observation.prod_p).astype(
dt_float
)
except:
raise PlotError(
"Impossible to convert the input values (gen_values) to floating point"
)
# rescaling to have range 0 - 1.0
tmp = observation.prod_p[np.isfinite(observation.prod_p)]
if np.any(np.isfinite(observation.prod_p)):
observation.prod_p -= (
np.min(tmp) - 1e-1
) # so the min is 1e-1 otherwise 0.0 is plotted as black
tmp = observation.prod_p[np.isfinite(observation.prod_p)]
observation.prod_p /= np.max(tmp)
else:
raise PlotError('coloring must be one of "line", "load" or "gen"')
# Trigger draw calls
self._plot_lines(fig, observation, line_values, line_unit, redraw)
self._plot_loads(fig, observation, load_values, load_unit, redraw)
self._plot_storages(fig, observation, storage_values, storage_unit, redraw)
self._plot_gens(fig, observation, gen_values, gen_unit, redraw)
self._plot_subs(fig, observation, redraw)
self._plot_legend(fig, observation, redraw)
# Some implementations may need postprocessing
self.plot_postprocess(fig, observation, not redraw)
# Return updated figure
return fig
| 33,775 | 30.566355 | 116 | py |
Grid2Op | Grid2Op-master/grid2op/PlotGrid/LayoutUtil.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import networkx as nx
import numpy as np
import copy
import math
from grid2op.PlotGrid.PlotUtil import PlotUtil as pltu
def layout_obs_sub_only(obs, scale=1000.0):
n_sub = obs.n_sub
n_line = obs.n_line
or_sub = obs.line_or_to_subid
ex_sub = obs.line_ex_to_subid
# Create a graph of substations vertices
G = nx.Graph()
# Set lines edges
for line_idx in range(n_line):
lor_sub = or_sub[line_idx]
lex_sub = ex_sub[line_idx]
# Compute edge vertices indices for current graph
left_v = lor_sub
right_v = lex_sub
# Register edge in graph
G.add_edge(left_v, right_v)
# Convert our layout to nx format
initial_layout = {}
for sub_idx, sub_name in enumerate(obs.name_sub):
initial_layout[sub_idx] = obs.grid_layout[sub_name]
# Use kamada_kawai algorithm
kkl = nx.kamada_kawai_layout(G, scale=scale)
# Convert back to our layout format
improved_layout = {}
for sub_idx, v in kkl.items():
sub_key = obs.name_sub[sub_idx]
vx = int(np.round(v[0]))
vy = int(np.round(v[1]))
improved_layout[sub_key] = [vx, vy]
return improved_layout
def layout_obs_sub_load_and_gen(
obs, scale=1000.0, use_initial=False, parallel_spacing=3.0
):
# Create a graph of substations vertices
G = nx.Graph()
sub_w = 0 if use_initial else 100
load_w = 25
gen_w = 25
stor_w = 25
# add the nodes
for sub_id in range(obs.n_sub):
G.add_node(sub_id)
# Set lines edges
for line_idx in range(obs.n_line):
lor_sub = obs.line_or_to_subid[line_idx]
lex_sub = obs.line_ex_to_subid[line_idx]
# Compute edge vertices indices for current graph
left_v = lor_sub
right_v = lex_sub
# Register edge in graph
G.add_edge(left_v, right_v, weight=sub_w)
# Set edges for loads
load_offset = obs.n_sub
for load_idx in range(obs.n_load):
load_sub = obs.load_to_subid[load_idx]
left_v = load_sub
right_v = load_offset + load_idx
# Register edge
G.add_edge(left_v, right_v, weight=load_w)
# Set edges for gens
gen_offset = obs.n_sub + obs.n_load
for load_idx in range(obs.n_gen):
gen_sub = obs.gen_to_subid[load_idx]
left_v = gen_sub
right_v = gen_offset + load_idx
# Register edge
G.add_edge(left_v, right_v, weight=gen_w)
# Set edges for storages
stor_offset = obs.n_sub + obs.n_load + obs.n_gen
for stor_idx in range(obs.n_storage):
stor_sub = obs.storage_to_subid[stor_idx]
left_v = stor_sub
right_v = stor_offset + stor_idx
# Register edge
G.add_edge(left_v, right_v, weight=stor_w)
# Convert our layout to nx format
layout_keys = list(obs.name_sub)
if use_initial:
initial_layout = {}
for sub_idx, sub_name in enumerate(layout_keys):
sub_pos = copy.deepcopy(obs.grid_layout[sub_name])
initial_layout[sub_idx] = sub_pos
for load_idx, load_subid in enumerate(obs.load_to_subid):
sub_name = layout_keys[load_subid]
load_sub_pos = obs.load_to_sub_pos[load_idx]
load_sub_pos /= obs.sub_info[load_subid]
load_sub_pos *= 2.0 * math.pi
load_pos = list(copy.deepcopy(obs.grid_layout[sub_name]))
load_pos[0] += math.cos(load_sub_pos) * load_w
load_pos[1] += math.sin(load_sub_pos) * load_w
initial_layout[load_offset + load_idx] = load_pos
for gen_idx, gen_subid in enumerate(obs.gen_to_subid):
sub_name = layout_keys[gen_subid]
gen_sub_pos = obs.gen_to_sub_pos[gen_idx]
gen_sub_pos /= obs.sub_info[gen_subid]
gen_sub_pos *= 2.0 * math.pi
gen_pos = list(copy.deepcopy(obs.grid_layout[sub_name]))
gen_pos[0] += math.cos(gen_sub_pos) * gen_w
gen_pos[1] += math.sin(gen_sub_pos) * gen_w
initial_layout[gen_offset + gen_idx] = gen_pos
for stor_idx, stor_subid in enumerate(obs.storage_to_subid):
sub_name = layout_keys[stor_subid]
stor_sub_pos = obs.storage_to_sub_pos[stor_idx]
stor_sub_pos /= obs.sub_info[stor_subid]
stor_sub_pos *= 2.0 * math.pi
stor_pos = list(copy.deepcopy(obs.grid_layout[sub_name]))
stor_pos[0] += math.cos(stor_sub_pos) * gen_w
stor_pos[1] += math.sin(stor_sub_pos) * gen_w
initial_layout[stor_offset + stor_idx] = stor_pos
else:
initial_layout = None
if use_initial:
fix = list(range(obs.n_sub))
seed = np.random.RandomState(0)
# Use Fruchterman-Reingold algorithm
kkl = nx.spring_layout(
G, scale=scale, fixed=fix, pos=initial_layout, seed=seed, iterations=1000
)
else:
# Use kamada_kawai algorithm
kkl = nx.kamada_kawai_layout(G, scale=scale)
# Convert back to our layout format
improved_layout = {}
for sub_idx, sub_name in enumerate(layout_keys):
key = sub_name
v = kkl[sub_idx]
vx = np.round(v[0])
vy = np.round(v[1])
improved_layout[key] = [vx, vy]
for load_idx, load_subid in enumerate(obs.load_to_subid):
key = obs.name_load[load_idx]
v = kkl[load_offset + load_idx]
vx = np.round(v[0])
vy = np.round(v[1])
improved_layout[key] = [vx, vy]
for gen_idx, gen_subid in enumerate(obs.gen_to_subid):
key = obs.name_gen[gen_idx]
v = kkl[gen_offset + gen_idx]
vx = np.round(v[0])
vy = np.round(v[1])
improved_layout[key] = [vx, vy]
for stor_idx, stor_subid in enumerate(obs.storage_to_subid):
key = obs.name_storage[stor_idx]
v = kkl[stor_offset + stor_idx]
vx = np.round(v[0])
vy = np.round(v[1])
improved_layout[key] = [vx, vy]
return improved_layout
| 6,465 | 32.853403 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/PlotGrid/PlotMatplot.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
import io
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
from grid2op.PlotGrid.BasePlot import BasePlot
from grid2op.PlotGrid.PlotUtil import PlotUtil as pltu
import matplotlib.patches as patches
from matplotlib.lines import Line2D
from grid2op.PlotGrid.config import * # all colors
class GenDraw(patches.CirclePolygon):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Empty class to handle the legend
"""
def __init__(self, *args, resolution=5, **kwargs):
patches.CirclePolygon.__init__(self, *args, resolution=resolution, **kwargs)
class LoadDraw(patches.CirclePolygon):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Empty class to handle the legend
"""
def __init__(self, *args, resolution=3, **kwargs):
patches.CirclePolygon.__init__(self, *args, resolution=resolution, **kwargs)
class StorageDraw(patches.CirclePolygon):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Empty class to handle the legend
"""
def __init__(self, *args, resolution=4, **kwargs):
patches.CirclePolygon.__init__(self, *args, resolution=resolution, **kwargs)
# TODO refactor this class to make possible some calls like
# plotmatplot.plot_info(...).plot_gentype(...) is possible
# TODO add some transparency when coloring=... is used in plot_info
# TODO code the load part in the plot_info
class PlotMatplot(BasePlot):
"""
This class uses the python library "matplotlib" to draw the powergrid.
Attributes
----------
width: ``int``
Width of the figure in pixels
height: ``int``
Height of the figure in pixel
dpi: ``int``
Dots per inch, to convert pixels dimensions into inches
_scale: ``float``
Scale of the drawing in arbitrary units
_sub_radius: ``int``
Substation circle size
_sub_face_color: ``str``
Substation circle fill color
_sub_edge_color: ``str``
Substation circle edge color
_sub_txt_color: ``str``
Substation info text color
_load_radius: ``int``
Load circle size
_load_name: ``bool``
Show load names (default True)
_load_face_color: ``str``
Load circle fill color
_load_edge_color: ``str``
Load circle edge color
_load_txt_color: ``str``
Load info text color
_load_line_color: ``str``
Color of the line from load to substation
_load_line_width: ``int``
Width of the line from load to substation
_gen_radius: ``int``
Generators circle size
_gen_name: ``bool``
Show generators names (default True)
_gen_face_color: ``str``
Generators circle fill color
_gen_edge_color: ``str``
Generators circle edge color
_gen_txt_color: ``str``
Generators info txt color
_gen_line_color: ``str``
Color of the line form generator to substation
_gen_line_width: ``str``
Width of the line from generator to substation
_line_color_scheme: ``list``
List of color strings to color powerlines based on rho values
_line_color_width: ``int``
Width of the powerlines lines
_line_bus_radius: ``int``
Size of the bus display circle
_line_bus_face_colors: ``list``
List of 3 colors strings, each corresponding to the fill color of the bus circle
_line_arrow_len: ``int``
Length of the arrow on the powerlines
_line_arrow_width: ``int``
Width of the arrow on the powerlines
Examples
--------
You can use it this way:
.. code-block:: python
import grid2op
from grid2op.PlotGrid import PlotMatplot
env = grid2op.make()
plot_helper = PlotMatplot(env.observation_space)
# and now plot an observation (for example)
obs = env.reset()
fig = plot_helper.plot_obs(obs)
fig.show()
# more information about it on the `getting_started/8_PlottingCapabilities.ipynb` notebook of grid2op
"""
def __init__(
self,
observation_space,
width=1280,
height=720,
grid_layout=None,
dpi=96,
scale=2000.0,
bus_radius=6,
sub_radius=15,
load_radius=8,
load_name=False,
load_id=False,
load_resolution=3, # number of edges of the polygon representing the generator
gen_radius=8,
gen_name=False,
gen_id=False,
gen_resolution=5, # number of edges of the polygon representing the generator
storage_resolution=4, # number of edges of the polygon representing the generator
line_name=False,
line_id=False,
):
self.dpi = dpi
super().__init__(observation_space, width, height, scale, grid_layout)
self._sub_radius = sub_radius
self._sub_face_color = "w"
self._sub_edge_color = "blue"
self._sub_txt_color = "black"
self._display_sub_name = True
self._load_radius = load_radius
self._load_name = load_name
self._load_id = load_id
self._load_face_color = "w"
self._load_edge_color = "orange"
self._load_resolution = load_resolution
self._load_patch = self._load_patch_default
self._load_txt_color = "black"
self._load_line_color = "black"
self._load_line_width = 1
self._display_load_name = True
self._gen_radius_orig = gen_radius
self._gen_radius = None # init in self.restore_gen_palette()
self._gen_resolution = gen_resolution
self._gen_patch = self._gen_patch_default
self._gen_name = gen_name
self._gen_id = gen_id
self._gen_face_color = "w"
self._gen_edge_color_orig = "green"
self._gen_edge_color = None
self._gen_txt_color = "black"
self._gen_line_color = "black"
self._gen_line_width_orig = 1
self._gen_line_width = None
self._display_gen_value = True
self._display_gen_name = True
self.restore_gen_palette()
self._storage_radius = load_radius
self._storage_name = load_name
self._storage_id = load_id # bool : do i plot the id
self._storage_face_color = "w"
self._storage_edge_color = "purple"
self._storage_resolution = storage_resolution
self._storage_patch = self._storage_patch_default
self._storage_txt_color = "black"
self._storage_line_color = "black"
self._storage_line_width = 1
self._display_storage_name = True
self._line_name = line_name
self._line_id = line_id
self._line_color_scheme_orig = ["blue", "orange", "red"]
self._line_color_scheme = None
self.restore_line_palette()
self._line_color_width = 1
self._line_bus_radius = bus_radius
self._line_bus_face_colors = ["black", "red", "lime"]
self._line_arrow_len = 10
self._line_arrow_width = 10.0
self.xlim = [0, 0]
self.xpad = 5
self.ylim = [0, 0]
self.ypad = 5
# for easize manipulation
self.legend = None
self.figure = None
def _gen_patch_default(self, xy, radius, edgecolor, facecolor):
"""default patch used to draw generator"""
# TODO maybe make a better version of this
patch = GenDraw(
xy,
radius=radius,
edgecolor=edgecolor,
facecolor=facecolor,
resolution=self._gen_resolution,
linewidth=self._gen_line_width,
)
return patch
def _load_patch_default(self, xy, radius, edgecolor, facecolor):
"""default patch used to draw generator"""
# TODO maybe make a better version of this
patch = LoadDraw(
xy,
radius=radius,
edgecolor=edgecolor,
facecolor=facecolor,
resolution=self._load_resolution,
)
return patch
def _storage_patch_default(self, xy, radius, edgecolor, facecolor):
"""default patch used to draw generator"""
# TODO maybe make a better version of this
patch = StorageDraw(
xy,
radius=radius,
edgecolor=edgecolor,
facecolor=facecolor,
resolution=self._storage_resolution,
)
return patch
def _v_textpos_from_dir(self, dirx, diry):
if diry > 0:
return "bottom"
else:
return "top"
def _h_textpos_from_dir(self, dirx, diry):
if dirx == 0:
return "center"
elif dirx > 0:
return "left"
else:
return "right"
def create_figure(self):
# lazy loading of graphics library (reduce loading time)
# and mainly because matplolib has weird impact on argparse
import matplotlib.pyplot as plt
w_inch = self.width / self.dpi
h_inch = self.height / self.dpi
f = plt.figure(figsize=(w_inch, h_inch), dpi=self.dpi)
self.ax = f.subplots()
f.canvas.draw()
return f
def clear_figure(self, figure):
self.xlim = [0, 0]
self.ylim = [0, 0]
figure.clear()
self.ax = figure.subplots()
def convert_figure_to_numpy_HWC(self, figure):
w, h = figure.get_size_inches() * figure.dpi
w = int(w)
h = int(h)
buf = io.BytesIO()
figure.canvas.print_raw(buf)
buf.seek(0)
img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)
buf.close()
img_arr = np.reshape(img_arr, (h, w, 4))
return img_arr
def _draw_substation_txt(self, pos_x, pos_y, text):
self.ax.text(
pos_x,
pos_y,
text,
color=self._sub_txt_color,
horizontalalignment="center",
verticalalignment="center",
)
def _draw_substation_circle(self, pos_x, pos_y):
patch = patches.Circle(
(pos_x, pos_y),
radius=self._sub_radius,
facecolor=self._sub_face_color,
edgecolor=self._sub_edge_color,
)
self.ax.add_patch(patch)
def draw_substation(self, figure, observation, sub_id, sub_name, pos_x, pos_y):
self.xlim[0] = min(self.xlim[0], pos_x - self._sub_radius)
self.xlim[1] = max(self.xlim[1], pos_x + self._sub_radius)
self.ylim[0] = min(self.ylim[0], pos_y - self._sub_radius)
self.ylim[1] = max(self.ylim[1], pos_y + self._sub_radius)
self._draw_substation_circle(pos_x, pos_y)
if self._display_sub_name:
self._draw_substation_txt(pos_x, pos_y, str(sub_id))
def _draw_load_txt(self, pos_x, pos_y, sub_x, sub_y, text):
dir_x, dir_y = pltu.vec_from_points(sub_x, sub_y, pos_x, pos_y)
off_x, off_y = pltu.norm_from_points(sub_x, sub_y, pos_x, pos_y)
txt_x = pos_x + off_x * self._gen_radius
txt_y = pos_y + off_y * self._gen_radius
ha = self._h_textpos_from_dir(dir_x, dir_y)
va = self._v_textpos_from_dir(dir_x, dir_y)
self.ax.text(
txt_x,
txt_y,
text,
color=self._load_txt_color,
horizontalalignment=ha,
fontsize="small",
verticalalignment=va,
)
def _draw_load_name(self, pos_x, pos_y, txt):
self.ax.text(
pos_x,
pos_y,
txt,
color=self._load_txt_color,
va="center",
ha="center",
fontsize="x-small",
)
def _draw_load_circle(self, pos_x, pos_y):
patch = self._load_patch(
(pos_x, pos_y),
radius=self._load_radius,
facecolor=self._load_face_color,
edgecolor=self._load_edge_color,
)
self.ax.add_patch(patch)
def _draw_load_line(self, pos_x, pos_y, sub_x, sub_y):
codes = [Path.MOVETO, Path.LINETO]
verts = [(pos_x, pos_y), (sub_x, sub_y)]
path = Path(verts, codes)
patch = patches.PathPatch(
path, color=self._load_line_color, lw=self._load_line_width
)
self.ax.add_patch(patch)
def _draw_load_bus(self, pos_x, pos_y, norm_dir_x, norm_dir_y, bus_id):
center_x = pos_x + norm_dir_x * self._sub_radius
center_y = pos_y + norm_dir_y * self._sub_radius
face_color = self._line_bus_face_colors[bus_id]
patch = patches.Circle(
(center_x, center_y), radius=self._line_bus_radius, facecolor=face_color
)
self.ax.add_patch(patch)
def draw_load(
self,
figure,
observation,
load_id,
load_name,
load_bus,
load_value,
load_unit,
pos_x,
pos_y,
sub_x,
sub_y,
):
self.xlim[0] = min(self.xlim[0], pos_x - self._load_radius)
self.xlim[1] = max(self.xlim[1], pos_x + self._load_radius)
self.ylim[0] = min(self.ylim[0], pos_y - self._load_radius)
self.ylim[1] = max(self.ylim[1], pos_y + self._load_radius)
self._draw_load_line(pos_x, pos_y, sub_x, sub_y)
self._draw_load_circle(pos_x, pos_y)
load_txt = ""
if self._load_name:
load_txt += '"{}":\n'.format(load_name)
if self._load_id:
load_txt += "id: {}\n".format(load_id)
if load_value is not None:
load_txt += pltu.format_value_unit(load_value, load_unit)
if load_txt:
self._draw_load_txt(pos_x, pos_y, sub_x, sub_y, load_txt)
if self._display_load_name:
self._draw_load_name(pos_x, pos_y, str(load_id))
load_dir_x, load_dir_y = pltu.norm_from_points(sub_x, sub_y, pos_x, pos_y)
self._draw_load_bus(sub_x, sub_y, load_dir_x, load_dir_y, load_bus)
def update_load(
self,
figure,
observation,
load_id,
load_name,
load_bus,
load_value,
load_unit,
pos_x,
pos_y,
sub_x,
sub_y,
):
pass
def draw_storage(
self,
figure,
observation,
load_id,
load_name,
load_bus,
load_value,
load_unit,
pos_x,
pos_y,
sub_x,
sub_y,
):
self.xlim[0] = min(self.xlim[0], pos_x - self._load_radius)
self.xlim[1] = max(self.xlim[1], pos_x + self._load_radius)
self.ylim[0] = min(self.ylim[0], pos_y - self._load_radius)
self.ylim[1] = max(self.ylim[1], pos_y + self._load_radius)
self._draw_storage_line(
pos_x, pos_y, sub_x, sub_y
) # line from the storage to the substation
self._draw_storage_circle(pos_x, pos_y) # storage element
load_txt = ""
if self._storage_name:
load_txt += '"{}":\n'.format(load_name)
if self._storage_id:
load_txt += "id: {}\n".format(load_id)
if load_value is not None:
load_txt += pltu.format_value_unit(load_value, load_unit)
if load_txt:
self._draw_load_txt(pos_x, pos_y, sub_x, sub_y, load_txt)
if self._display_load_name:
self._draw_load_name(pos_x, pos_y, str(load_id))
load_dir_x, load_dir_y = pltu.norm_from_points(sub_x, sub_y, pos_x, pos_y)
self._draw_storage_bus(sub_x, sub_y, load_dir_x, load_dir_y, load_bus)
def _draw_storage_circle(self, pos_x, pos_y):
patch = self._storage_patch(
(pos_x, pos_y),
radius=self._storage_radius,
facecolor=self._storage_face_color,
edgecolor=self._storage_edge_color,
)
self.ax.add_patch(patch)
def _draw_storage_line(self, pos_x, pos_y, sub_x, sub_y):
codes = [Path.MOVETO, Path.LINETO]
verts = [(pos_x, pos_y), (sub_x, sub_y)]
path = Path(verts, codes)
patch = patches.PathPatch(
path, color=self._storage_line_color, lw=self._storage_line_width
)
self.ax.add_patch(patch)
def _draw_storage_bus(self, pos_x, pos_y, norm_dir_x, norm_dir_y, bus_id):
center_x = pos_x + norm_dir_x * self._sub_radius
center_y = pos_y + norm_dir_y * self._sub_radius
face_color = self._line_bus_face_colors[bus_id]
patch = patches.Circle(
(center_x, center_y), radius=self._line_bus_radius, facecolor=face_color
)
self.ax.add_patch(patch)
def update_storage(
self,
figure,
observation,
storage_name,
storage_id,
storage_bus,
storage_value,
storage_unit,
pos_x,
pos_y,
sub_x,
sub_y,
):
pass
def _draw_gen_txt(self, pos_x, pos_y, sub_x, sub_y, text):
dir_x, dir_y = pltu.vec_from_points(sub_x, sub_y, pos_x, pos_y)
off_x, off_y = pltu.norm_from_points(sub_x, sub_y, pos_x, pos_y)
txt_x = pos_x + off_x * self._gen_radius
txt_y = pos_y + off_y * self._gen_radius
ha = self._h_textpos_from_dir(dir_x, dir_y)
va = self._v_textpos_from_dir(dir_x, dir_y)
self.ax.text(
txt_x,
txt_y,
text,
color=self._gen_txt_color,
wrap=True,
fontsize="small",
horizontalalignment=ha,
verticalalignment=va,
)
def _draw_gen_circle(self, pos_x, pos_y, gen_edgecolor):
patch = self._gen_patch(
(pos_x, pos_y),
radius=self._gen_radius,
edgecolor=gen_edgecolor,
facecolor=self._gen_face_color,
)
self.ax.add_patch(patch)
def _draw_gen_line(self, pos_x, pos_y, sub_x, sub_y):
codes = [Path.MOVETO, Path.LINETO]
verts = [(pos_x, pos_y), (sub_x, sub_y)]
path = Path(verts, codes)
patch = patches.PathPatch(
path, color=self._gen_line_color, lw=self._load_line_width
)
self.ax.add_patch(patch)
def _draw_gen_name(self, pos_x, pos_y, txt):
self.ax.text(
pos_x,
pos_y,
txt,
color=self._gen_txt_color,
va="center",
ha="center",
fontsize="x-small",
)
def _draw_gen_bus(self, pos_x, pos_y, norm_dir_x, norm_dir_y, bus_id):
center_x = pos_x + norm_dir_x * self._sub_radius
center_y = pos_y + norm_dir_y * self._sub_radius
face_color = self._line_bus_face_colors[bus_id]
patch = patches.Circle(
(center_x, center_y), radius=self._line_bus_radius, facecolor=face_color
)
self.ax.add_patch(patch)
def draw_gen(
self,
figure,
observation,
gen_id,
gen_name,
gen_bus,
gen_value,
gen_unit,
pos_x,
pos_y,
sub_x,
sub_y,
):
self.xlim[0] = min(self.xlim[0], pos_x - self._gen_radius)
self.xlim[1] = max(self.xlim[1], pos_x + self._gen_radius)
self.ylim[0] = min(self.ylim[0], pos_y - self._gen_radius)
self.ylim[1] = max(self.ylim[1], pos_y + self._gen_radius)
hide = False
if isinstance(self._gen_edge_color, str):
# case where the color of the generator is a string (same color for all generators)
gen_color = self._gen_edge_color
else:
my_val = observation.prod_p[gen_id]
n_colors = len(self._gen_edge_color) - 1
if np.isfinite(my_val):
color_idx = max(0, min(n_colors, int(my_val * n_colors)))
else:
color_idx = 0
hide = True
gen_color = self._gen_edge_color[color_idx]
if not hide:
self._draw_gen_line(pos_x, pos_y, sub_x, sub_y)
self._draw_gen_circle(pos_x, pos_y, gen_color)
gen_txt = ""
if self._gen_name:
gen_txt += '"{}":\n'.format(gen_name)
if self._gen_id:
gen_txt += "id: {}\n".format(gen_id)
if gen_value is not None and self._display_gen_value:
gen_txt += pltu.format_value_unit(gen_value, gen_unit)
if gen_txt:
self._draw_gen_txt(pos_x, pos_y, sub_x, sub_y, gen_txt)
if self._display_gen_name:
self._draw_gen_name(pos_x, pos_y, str(gen_id))
gen_dir_x, gen_dir_y = pltu.norm_from_points(sub_x, sub_y, pos_x, pos_y)
self._draw_gen_bus(sub_x, sub_y, gen_dir_x, gen_dir_y, gen_bus)
def update_gen(
self,
figure,
observation,
gen_id,
gen_name,
gen_bus,
gen_value,
gen_unit,
pos_x,
pos_y,
sub_x,
sub_y,
):
pass
def _draw_powerline_txt(self, pos_or_x, pos_or_y, pos_ex_x, pos_ex_y, text):
pos_x, pos_y = pltu.middle_from_points(pos_or_x, pos_or_y, pos_ex_x, pos_ex_y)
off_x, off_y = pltu.orth_norm_from_points(
pos_or_x, pos_or_y, pos_ex_x, pos_ex_y
)
txt_x = pos_x + off_x * (self._load_radius / 2)
txt_y = pos_y + off_y * (self._load_radius / 2)
ha = self._h_textpos_from_dir(off_x, off_y)
va = self._v_textpos_from_dir(off_x, off_y)
self.ax.text(
txt_x,
txt_y,
text,
color=self._gen_txt_color,
fontsize="small",
horizontalalignment=ha,
verticalalignment=va,
)
def _draw_powerline_line(
self, pos_or_x, pos_or_y, pos_ex_x, pos_ex_y, color, line_style
):
codes = [Path.MOVETO, Path.LINETO]
verts = [(pos_or_x, pos_or_y), (pos_ex_x, pos_ex_y)]
path = Path(verts, codes)
patch = patches.PathPatch(
path, color=color, lw=self._line_color_width, ls=line_style
)
self.ax.add_patch(patch)
def _draw_powerline_bus(self, pos_x, pos_y, norm_dir_x, norm_dir_y, bus_id):
center_x = pos_x + norm_dir_x * self._sub_radius
center_y = pos_y + norm_dir_y * self._sub_radius
face_color = self._line_bus_face_colors[bus_id]
patch = patches.Circle(
(center_x, center_y), radius=self._line_bus_radius, facecolor=face_color
)
self.ax.add_patch(patch)
def _draw_powerline_arrow(
self, pos_or_x, pos_or_y, pos_ex_x, pos_ex_y, color, watt_value
):
sign = 1.0 if watt_value > 0.0 else -1.0
off = 1.0 if watt_value > 0.0 else 2.0
dx, dy = pltu.norm_from_points(pos_or_x, pos_or_y, pos_ex_x, pos_ex_y)
lx = dx * self._line_arrow_len
ly = dy * self._line_arrow_len
arr_x = pos_or_x + dx * self._sub_radius + off * lx
arr_y = pos_or_y + dy * self._sub_radius + off * ly
patch = patches.FancyArrow(
arr_x,
arr_y,
sign * lx,
sign * ly,
length_includes_head=True,
head_length=self._line_arrow_len,
head_width=self._line_arrow_width,
edgecolor=color,
facecolor=color,
)
self.ax.add_patch(patch)
def assign_line_palette(
self, palette_name="YlOrRd", nb_color=10, line_color_scheme=None
):
"""
Assign a new color palette when you want to plot information on the powerline.
Parameters
----------
palette_name: ``str``
Name of the Maplotlib.plyplot palette to use (name forwarded to `plt.get_cmap(palette_name)`)
nb_color: ``int``
Number of color to use
Examples
-------
.. code-block:: python
# color a grid based on the value of the thermal limit
plot_helper.assign_line_palette(nb_color=100)
# plot this grid
_ = plot_helper.plot_info(line_values=env.get_thermal_limit(), line_unit="A", coloring="line")
# restore the default coloring (["blue", "orange", "red"])
plot_helper.restore_line_palette()
Notes
-----
Some palette are available there `colormaps <https://matplotlib.org/tutorials/colors/colormaps.html>`_
"""
if line_color_scheme is None:
palette = plt.get_cmap(palette_name)
cols = []
for i in range(1, nb_color + 1):
cols.append(palette(i / nb_color))
self._line_color_scheme = cols
else:
self._line_color_scheme = copy.deepcopy(line_color_scheme)
def restore_line_palette(self):
self._line_color_scheme = self._line_color_scheme_orig
def assign_gen_palette(
self,
palette_name="YlOrRd",
nb_color=10,
increase_gen_size=None,
gen_line_width=None,
):
"""
Assign a new color palette when you want to plot information on the generator.
Parameters
----------
palette_name: ``str``
Name of the Maplotlib.plyplot palette to use (name forwarded to `plt.get_cmap(palette_name)`)
nb_color: ``int``
Number of color to use
increase_gen_size: ``float``
Whether or not to increase the generator sizes (``None`` to disable this feature, 1 has no effect)
gen_line_width: ``float``
Increase the width of the generator (if not ``None``)
Examples
-------
.. code-block:: python
# color a grid based on the value of the thermal limit
plot_helper.assign_gen_palette(nb_color=100)
# plot this grid
_ = plot_helper.plot_info(gen_values=env.gen_pmax, coloring="gen")
# restore the default coloring (all green)
plot_helper.restore_gen_palette()
Notes
-----
Some palette are available there `colormaps <https://matplotlib.org/tutorials/colors/colormaps.html>`_
"""
if palette_name is not None and nb_color > 0:
# the user changed the palette
palette = plt.get_cmap(palette_name)
cols = []
for i in range(1, nb_color + 1):
cols.append(palette(i / nb_color))
self._gen_edge_color = cols
if increase_gen_size is not None:
# the user changed the generator sizes
self._gen_radius = float(increase_gen_size) * self._gen_radius_orig
if gen_line_width is not None:
# the user changed the generator line width
self._gen_line_width = float(gen_line_width)
def restore_gen_palette(self):
"""restore every properties of the default generator layout"""
self._gen_edge_color = self._gen_edge_color_orig
self._gen_radius = self._gen_radius_orig
self._gen_line_width = self._gen_line_width_orig
def draw_powerline(
self,
figure,
observation,
line_id,
line_name,
connected,
line_value,
line_unit,
or_bus,
pos_or_x,
pos_or_y,
ex_bus,
pos_ex_x,
pos_ex_y,
):
rho = observation.rho[line_id]
n_colors = len(self._line_color_scheme) - 1
hide = False
if np.isfinite(rho):
color_idx = max(0, min(n_colors, int(rho * n_colors)))
else:
color_idx = 0
hide = True
color = "black"
if not hide:
if connected and rho > 0.0:
color = self._line_color_scheme[color_idx]
line_style = "-" if connected else "--"
self._draw_powerline_line(
pos_or_x, pos_or_y, pos_ex_x, pos_ex_y, color, line_style
)
# Deal with line text configurations
txt = ""
if self._line_name:
txt += '"{}"\n'.format(line_name)
if self._line_id:
txt += "id: {}\n".format(str(line_id))
if line_value is not None:
txt += pltu.format_value_unit(line_value, line_unit)
if txt:
self._draw_powerline_txt(pos_or_x, pos_or_y, pos_ex_x, pos_ex_y, txt)
or_dir_x, or_dir_y = pltu.norm_from_points(
pos_or_x, pos_or_y, pos_ex_x, pos_ex_y
)
self._draw_powerline_bus(pos_or_x, pos_or_y, or_dir_x, or_dir_y, or_bus)
ex_dir_x, ex_dir_y = pltu.norm_from_points(
pos_ex_x, pos_ex_y, pos_or_x, pos_or_y
)
self._draw_powerline_bus(pos_ex_x, pos_ex_y, ex_dir_x, ex_dir_y, ex_bus)
watt_value = observation.p_or[line_id]
if rho > 0.0 and watt_value != 0.0:
self._draw_powerline_arrow(
pos_or_x, pos_or_y, pos_ex_x, pos_ex_y, color, watt_value
)
def update_powerline(
self,
figure,
observation,
line_id,
line_name,
connected,
line_value,
line_unit,
or_bus,
pos_or_x,
pos_or_y,
ex_bus,
pos_ex_x,
pos_ex_y,
):
pass
def _get_gen_legend(self):
"""super complex function to display the proper shape in the legend"""
if isinstance(self._gen_edge_color, str):
gen_legend_col = self._gen_edge_color
else:
gen_legend_col = self._gen_edge_color[int(len(self._gen_edge_color) / 2)]
my_res = self._gen_resolution
class GenObjectHandler:
def legend_artist(self, legend, orig_handle, fontsize, handlebox):
xdescent, ydescent = handlebox.xdescent, handlebox.ydescent
width, height = handlebox.width, handlebox.height
center = 0.5 * width - 0.5 * xdescent, 0.5 * height - 0.5 * ydescent
pp_ = GenDraw(
xy=center,
radius=min(width, height),
facecolor="w",
edgecolor=gen_legend_col,
transform=handlebox.get_transform(),
resolution=my_res,
)
handlebox.add_artist(pp_)
return pp_
gen_legend = self._gen_patch(
(0, 0),
facecolor=self._gen_face_color,
edgecolor=gen_legend_col,
radius=self._gen_radius,
)
return gen_legend, GenObjectHandler()
def _get_load_legend(self):
"""super complex function to display the proper shape in the legend"""
if isinstance(self._load_edge_color, str):
load_legend_col = self._load_edge_color
else:
load_legend_col = self._load_edge_color[int(len(self._load_edge_color) / 2)]
my_res = self._load_resolution
class LoadObjectHandler:
def legend_artist(self, legend, orig_handle, fontsize, handlebox):
xdescent, ydescent = handlebox.xdescent, handlebox.ydescent
width, height = handlebox.width, handlebox.height
center = 0.5 * width - 0.5 * xdescent, 0.5 * height - 0.5 * ydescent
pp_ = LoadDraw(
xy=center,
radius=min(width, height),
facecolor="w",
edgecolor=load_legend_col,
transform=handlebox.get_transform(),
resolution=my_res,
)
handlebox.add_artist(pp_)
return pp_
load_legend = self._load_patch(
(0, 0),
facecolor=self._load_face_color,
edgecolor=load_legend_col,
radius=self._load_radius,
)
return load_legend, LoadObjectHandler()
def _get_storage_legend(self):
"""super complex function to display the proper shape in the legend"""
if isinstance(self._storage_edge_color, str):
storage_legend_col = self._storage_edge_color
else:
storage_legend_col = self._storage_edge_color[
int(len(self._storage_edge_color) / 2)
]
my_res = self._storage_resolution
class StorageObjectHandler:
def legend_artist(self, legend, orig_handle, fontsize, handlebox):
xdescent, ydescent = handlebox.xdescent, handlebox.ydescent
width, height = handlebox.width, handlebox.height
center = 0.5 * width - 0.5 * xdescent, 0.5 * height - 0.5 * ydescent
pp_ = StorageDraw(
xy=center,
radius=min(width, height),
facecolor="w",
edgecolor=storage_legend_col,
transform=handlebox.get_transform(),
resolution=my_res,
)
handlebox.add_artist(pp_)
return pp_
storage_legend = self._storage_patch(
(0, 0),
facecolor=self._storage_face_color,
edgecolor=storage_legend_col,
radius=self._storage_radius,
)
return storage_legend, StorageObjectHandler()
def draw_legend(self, figure, observation):
title_str = observation.env_name
if hasattr(observation, "month"):
title_str = "{:02d}/{:02d} {:02d}:{:02d}".format(
observation.day,
observation.month,
observation.hour_of_day,
observation.minute_of_hour,
)
# generate the right legend for generator
gen_legend, gen_handler = self._get_gen_legend()
# generate the correct legend for load
load_legend, load_handler = self._get_load_legend()
# generate the correct legend for storage
storage_legend, storage_handler = self._get_storage_legend()
legend_help = [
Line2D([0], [0], color="black", lw=1),
Line2D([0], [0], color=self._sub_edge_color, lw=3),
load_legend,
gen_legend,
storage_legend,
Line2D([0], [0], marker="o", color=self._line_bus_face_colors[0]),
Line2D([0], [0], marker="o", color=self._line_bus_face_colors[1]),
Line2D([0], [0], marker="o", color=self._line_bus_face_colors[2]),
]
self.legend = self.ax.legend(
legend_help,
[
"powerline",
"substation",
"load",
"generator",
"storage",
"no bus",
"bus 1",
"bus 2",
],
title=title_str,
handler_map={
GenDraw: gen_handler,
LoadDraw: load_handler,
StorageDraw: storage_handler,
},
)
# Hide axis
self.ax.get_xaxis().set_visible(False)
self.ax.get_yaxis().set_visible(False)
# Hide frame
self.ax.set(frame_on=False)
# save the figure
self.figure = figure
def plot_postprocess(self, figure, observation, update):
if not update:
xmin = self.xlim[0] - self.xpad
xmax = self.xlim[1] + self.xpad
self.ax.set_xlim(xmin, xmax)
ymin = self.ylim[0] - self.ypad
ymax = self.ylim[1] + self.ypad
self.ax.set_ylim(ymin, ymax)
figure.tight_layout()
def _save_plot_charact(self):
_gen_edge_color_orig = self._gen_edge_color
_gen_radius_orig = self._gen_radius
_gen_line_width_orig = self._gen_line_width
_display_gen_value = self._display_gen_value
_display_gen_name = self._display_gen_name
_display_sub_name = self._display_sub_name
_display_load_name = self._display_load_name
return (
_gen_edge_color_orig,
_gen_radius_orig,
_gen_line_width_orig,
_display_gen_value,
_display_gen_name,
_display_sub_name,
_display_load_name,
)
def _restore_plot_charact(self, data):
(
_gen_edge_color_orig,
_gen_radius_orig,
_gen_line_width_orig,
_display_gen_value,
_display_gen_name,
_display_sub_name,
_display_load_name,
) = data
self._gen_edge_color = _gen_edge_color_orig
self._gen_radius = _gen_radius_orig
self._gen_line_width = _gen_line_width_orig
self._display_gen_value = _display_gen_value
self._display_gen_name = _display_gen_name
self._display_sub_name = _display_sub_name
self._display_load_name = _display_load_name
def plot_gen_type(self, increase_gen_size=1.5, gen_line_width=3):
# save the sate of the generators config
data = self._save_plot_charact()
# do the plot
self._display_gen_value = False
self._display_gen_name = False
self._display_sub_name = False
self._display_load_name = False
self.assign_gen_palette(
nb_color=0,
increase_gen_size=increase_gen_size,
gen_line_width=gen_line_width,
)
self._gen_edge_color = [COLOR_GEN[i] for i in range(len(TYPE_GEN))]
gen_values = [TYPE_GEN[el] for el in self.observation_space.gen_type]
self.figure = self.plot_info(gen_values=gen_values, coloring="gen")
self.add_legend_gentype()
# restore the state to its initial configuration
self._restore_plot_charact(data)
return self.figure
def plot_current_dispatch(
self,
obs,
do_plot_actual_dispatch=True,
increase_gen_size=1.5,
gen_line_width=3,
palette_name="coolwarm",
):
# save the sate of the generators config
data = self._save_plot_charact()
# do the plot
self._display_sub_name = False
self._display_load_name = False
self.assign_gen_palette(
nb_color=5,
palette_name=palette_name,
increase_gen_size=increase_gen_size,
gen_line_width=gen_line_width,
)
if do_plot_actual_dispatch:
gen_values = obs.actual_dispatch
else:
gen_values = obs.target_dispatch
self.figure = self.plot_info(
gen_values=gen_values, coloring="gen", gen_unit="MW"
)
# restore the state to its initial configuration
self._restore_plot_charact(data)
return self.figure
def add_legend_gentype(self, loc="lower right"):
"""add the legend for each generator type"""
keys = sorted(TYPE_GEN.keys())
ax_ = self.figure.axes[0]
legend_help = [
Line2D([0], [0], color=COLOR_GEN[TYPE_GEN[k]], label=k) for k in keys
]
_ = ax_.legend(legend_help, keys, title="generator types", loc=loc)
ax_.add_artist(self.legend)
| 39,665 | 32.873612 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/PlotGrid/PlotPlotly.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
import imageio
import warnings
# lazy loading of plotting utilities, to save loading time
import plotly.graph_objects as go
import plotly.colors as pc
from grid2op.PlotGrid.BasePlot import BasePlot
from grid2op.PlotGrid.PlotUtil import PlotUtil as pltu
class PlotPlotly(BasePlot):
"""
This class uses the python library "plotly" to draw the powergrid. Plotly has the ability to generate
interactive graphs.
Examples
--------
You can use it this way:
.. code-block:: python
import grid2op
from grid2op.PlotGrid import PlotPlotly
env = grid2op.make()
plot_helper = PlotPlotly(env.observation_space)
# and now plot an observation (for example)
obs = env.reset()
fig = plot_helper.plot_obs(obs)
fig.show()
# more information about it on the `getting_started/8_PlottingCapabilities.ipynb` notebook of grid2op
"""
def __init__(
self,
observation_space,
width=1280,
height=720,
grid_layout=None,
responsive=False,
scale=2000.0,
sub_radius=25,
load_radius=12,
gen_radius=12,
show_gen_txt=False,
show_load_txt=False,
):
super().__init__(observation_space, width, height, scale, grid_layout)
self.show_gen_txt = show_gen_txt
self.show_load_txt = show_load_txt
self._responsive = responsive
self._sub_radius = sub_radius
self._sub_fill_color = "PaleTurquoise"
self._sub_line_color = "black"
self._sub_line_width = 1
self._sub_prefix = "z"
self._load_radius = load_radius
self._load_fill_color = "DarkOrange"
self._load_line_color = "black"
self._load_line_width = 1
self._load_prefix = "c"
self._gen_radius = gen_radius
self._gen_fill_color = "LightGreen"
self._gen_line_color = "black"
self._gen_line_width = 1
self._gen_prefix = "b"
self._line_prefix = "a"
self.line_color_scheme = (
pc.sequential.Blues_r[:4]
+ pc.sequential.Oranges[4:6]
+ pc.sequential.Reds[-3:-1]
)
self._line_bus_radius = 10
self._line_bus_colors = ["black", "red", "lime"]
self._bus_prefix = "_bus_"
self._or_prefix = "_or_"
self._ex_prefix = "_ex_"
self._line_arrow_radius = 10
self._line_arrow_len = 5
self._arrow_prefix = "_->_"
def _textpos_from_dir(self, dirx, diry):
typos = "bottom" if diry < 0 else "top"
txpos = "left" if dirx < 0 else "right"
return "{} {}".format(typos, txpos)
def _set_layout(self, f):
if not self._responsive:
f.update_layout(
width=self.width,
height=self.height,
xaxis=dict(visible=False),
yaxis=dict(visible=False),
plot_bgcolor="rgba(0,0,0,0)",
margin=dict(l=0, r=0, b=0, t=0, pad=0),
)
else:
f.update_layout(
xaxis=dict(visible=False),
yaxis=dict(visible=False),
plot_bgcolor="rgba(0,0,0,0)",
margin=dict(l=0, r=0, b=0, t=0, pad=0),
)
def create_figure(self):
f = go.Figure()
self._set_layout(f)
return f
def clear_figure(self, figure):
figure.layout = {}
figure.data = []
f = figure
self._set_layout(f)
def convert_figure_to_numpy_HWC(self, figure):
try:
img_bytes = figure.to_image(
format="png", width=self.width, height=self.height, scale=1
)
return imageio.imread(img_bytes, format="png")
except:
warnings.warn("Plotly need additional dependencies for offline rendering")
return np.full((self.height, self.width, 3), 255, dtype=np.unit8)
def _draw_substation_txt(self, name, pos_x, pos_y, text):
return go.Scatter(
x=[pos_x],
y=[pos_y],
text=[text],
mode="text",
name=name,
textposition="middle center",
hoverinfo="skip",
showlegend=False,
)
def _draw_substation_circle(self, name, pos_x, pos_y):
marker_dict = dict(
size=self._sub_radius,
color=self._sub_fill_color,
showscale=False,
line=dict(width=self._sub_line_width, color=self._sub_line_color),
)
return go.Scatter(
x=[pos_x],
y=[pos_y],
mode="markers",
text=[name],
name=self._sub_prefix + name,
marker=marker_dict,
showlegend=False,
)
def draw_substation(self, figure, observation, sub_id, sub_name, pos_x, pos_y):
circle_trace = self._draw_substation_circle(sub_name, pos_x, pos_y)
figure.add_trace(circle_trace)
txt_trace = self._draw_substation_txt(sub_name, pos_x, pos_y, str(sub_id))
figure.add_trace(txt_trace)
def _draw_load_txt(self, name, pos_x, pos_y, text, textpos):
return go.Scatter(
x=[pos_x],
y=[pos_y],
text=[text],
mode="text",
name=name,
hoverinfo="skip",
textposition=textpos,
showlegend=False,
)
def _draw_load_circle(self, pos_x, pos_y, name, text):
marker_dict = dict(
size=self._load_radius,
color=self._load_fill_color,
showscale=False,
line=dict(width=self._load_line_width, color=self._load_line_color),
)
return go.Scatter(
x=[pos_x],
y=[pos_y],
mode="markers",
text=[text],
name=self._load_prefix + name,
marker=marker_dict,
showlegend=False,
)
def _draw_load_line(self, pos_x, pos_y, sub_x, sub_y):
style_line = dict(color="black", width=self._load_line_width)
line_trace = go.Scatter(
x=[pos_x, sub_x],
y=[pos_y, sub_y],
hoverinfo="skip",
line=style_line,
showlegend=False,
)
return line_trace
def _draw_load_bus(self, pos_x, pos_y, dir_x, dir_y, bus, load_name):
bus = bus if bus > 0 else 0
marker_dict = dict(
size=self._line_bus_radius,
color=self._line_bus_colors[bus],
showscale=False,
)
center_x = pos_x + dir_x * (self._sub_radius - self._line_bus_radius)
center_y = pos_y + dir_y * (self._sub_radius - self._line_bus_radius)
trace_name = self._load_prefix + self._bus_prefix + load_name
return go.Scatter(
x=[center_x],
y=[center_y],
marker=marker_dict,
name=trace_name,
hoverinfo="skip",
showlegend=False,
)
def draw_load(
self,
figure,
observation,
load_id,
load_name,
load_bus,
load_value,
load_unit,
pos_x,
pos_y,
sub_x,
sub_y,
):
dir_x, dir_y = pltu.norm_from_points(sub_x, sub_y, pos_x, pos_y)
nd_x, nd_y = pltu.norm_from_points(sub_x, sub_y, pos_x, pos_y)
load_text = ""
if load_value is not None:
txt_x = pos_x + nd_x * (self._load_radius / 2)
txt_y = pos_y + nd_y * (self._load_radius / 2)
text_pos = self._textpos_from_dir(dir_x, dir_y)
load_text = load_name + "<br>"
load_text += pltu.format_value_unit(load_value, load_unit)
if self.show_load_txt:
trace1 = self._draw_load_txt(
load_name, txt_x, txt_y, load_text, text_pos
)
figure.add_trace(trace1)
trace2 = self._draw_load_line(pos_x, pos_y, sub_x, sub_y)
figure.add_trace(trace2)
trace3 = self._draw_load_circle(pos_x, pos_y, load_name, load_text)
figure.add_trace(trace3)
trace4 = self._draw_load_bus(sub_x, sub_y, dir_x, dir_y, load_bus, load_name)
figure.add_trace(trace4)
def update_load(
self,
figure,
observation,
load_id,
load_name,
load_bus,
load_value,
load_unit,
pos_x,
pos_y,
sub_x,
sub_y,
):
load_text = ""
if load_value is not None:
load_text = load_name + "<br>"
load_text += pltu.format_value_unit(load_value, load_unit)
if self.show_load_txt:
figure.update_traces(text=load_text, selector=dict(name=load_name))
circle_name = self._load_prefix + load_name
figure.update_traces(text=load_text, selector=dict(name=circle_name))
load_marker = dict(color=self._line_bus_colors[load_bus])
load_select_name = self._load_prefix + self._bus_prefix + load_name
figure.update_traces(marker=load_marker, selector=dict(name=load_select_name))
def _draw_gen_txt(self, name, pos_x, pos_y, text, text_pos):
return go.Scatter(
x=[pos_x],
y=[pos_y],
text=[text],
name=name,
mode="text",
hoverinfo="skip",
textposition=text_pos,
showlegend=False,
)
def _draw_gen_circle(self, pos_x, pos_y, name, text):
marker_dict = dict(
size=self._gen_radius,
color=self._gen_fill_color,
showscale=False,
line=dict(width=self._gen_line_width, color=self._gen_line_color),
)
return go.Scatter(
x=[pos_x],
y=[pos_y],
mode="markers",
text=[text],
name=self._gen_prefix + name,
marker=marker_dict,
showlegend=False,
)
def _draw_gen_line(self, pos_x, pos_y, sub_x, sub_y):
style_line = dict(color="black", width=self._gen_line_width)
line_trace = go.Scatter(
x=[pos_x, sub_x],
y=[pos_y, sub_y],
hoverinfo="skip",
line=style_line,
showlegend=False,
)
return line_trace
def _draw_gen_bus(self, pos_x, pos_y, dir_x, dir_y, bus, gen_name):
bus = bus if bus > 0 else 0
marker_dict = dict(
size=self._line_bus_radius,
color=self._line_bus_colors[bus],
showscale=False,
)
center_x = pos_x + dir_x * (self._sub_radius - self._line_bus_radius)
center_y = pos_y + dir_y * (self._sub_radius - self._line_bus_radius)
trace_name = self._gen_prefix + self._bus_prefix + gen_name
return go.Scatter(
x=[center_x],
y=[center_y],
marker=marker_dict,
name=trace_name,
hoverinfo="skip",
showlegend=False,
)
def draw_gen(
self,
figure,
observation,
gen_id,
gen_name,
gen_bus,
gen_value,
gen_unit,
pos_x,
pos_y,
sub_x,
sub_y,
):
dir_x, dir_y = pltu.norm_from_points(sub_x, sub_y, pos_x, pos_y)
nd_x, nd_y = pltu.norm_from_points(sub_x, sub_y, pos_x, pos_y)
gen_text = ""
if gen_value is not None:
txt_x = pos_x + nd_x * (self._gen_radius / 2)
txt_y = pos_y + nd_y * (self._gen_radius / 2)
text_pos = self._textpos_from_dir(dir_x, dir_y)
gen_text = gen_name + "<br>"
gen_text += pltu.format_value_unit(gen_value, gen_unit)
if self.show_gen_txt:
trace1 = self._draw_gen_txt(gen_name, txt_x, txt_y, gen_text, text_pos)
figure.add_trace(trace1)
trace2 = self._draw_gen_line(pos_x, pos_y, sub_x, sub_y)
figure.add_trace(trace2)
trace3 = self._draw_gen_circle(pos_x, pos_y, gen_name, gen_text)
figure.add_trace(trace3)
trace4 = self._draw_gen_bus(sub_x, sub_y, dir_x, dir_y, gen_bus, gen_name)
figure.add_trace(trace4)
def update_gen(
self,
figure,
observation,
gen_name,
gen_id,
gen_bus,
gen_value,
gen_unit,
pos_x,
pos_y,
sub_x,
sub_y,
):
gen_text = ""
if gen_value is not None:
gen_text = gen_name + "<br>"
gen_text += pltu.format_value_unit(gen_value, gen_unit)
if self.show_gen_txt:
figure.update_traces(text=gen_text, selector=dict(name=gen_name))
circle_name = self._gen_prefix + gen_name
figure.update_traces(text=gen_text, selector=dict(name=circle_name))
gen_marker = dict(color=self._line_bus_colors[gen_bus])
gen_select_name = self._gen_prefix + self._bus_prefix + gen_name
figure.update_traces(marker=gen_marker, selector=dict(name=gen_select_name))
def _draw_powerline_txt(self, name, pos_or_x, pos_or_y, pos_ex_x, pos_ex_y, text):
mid_x = (pos_or_x + pos_ex_x) / 2
mid_y = (pos_or_y + pos_ex_y) / 2
dir_x = pos_ex_x - pos_or_x
dir_y = pos_ex_y - pos_or_y
orth_x = -dir_y
orth_y = dir_x
orth_norm = np.linalg.norm([orth_x, orth_y])
txt_x = mid_x + (orth_x / orth_norm) * 2
txt_y = mid_y + (orth_y / orth_norm) * 2
text_pos = self._textpos_from_dir(orth_x, orth_y)
txt_trace = go.Scatter(
x=[txt_x],
y=[txt_y],
text=[text],
name=name,
mode="text",
textposition=text_pos,
showlegend=False,
)
return txt_trace
def _draw_powerline_line(self, name, pos_or_x, pos_or_y, pos_ex_x, pos_ex_y, style):
line_trace = go.Scatter(
x=[pos_or_x, pos_ex_x],
y=[pos_or_y, pos_ex_y],
name=self._line_prefix + name,
line=style,
hoverinfo="skip",
showlegend=False,
)
return line_trace
def _draw_powerline_bus(
self, pos_x, pos_y, dir_x, dir_y, bus, line_name, side_prefix
):
marker_dict = dict(
size=self._line_bus_radius,
color=self._line_bus_colors[bus],
showscale=False,
)
center_x = pos_x + dir_x * (self._sub_radius - self._line_bus_radius)
center_y = pos_y + dir_y * (self._sub_radius - self._line_bus_radius)
trace_name = self._line_prefix + self._bus_prefix + side_prefix + line_name
return go.Scatter(
x=[center_x],
y=[center_y],
marker=marker_dict,
name=trace_name,
hoverinfo="skip",
showlegend=False,
)
def _plotly_tri_from_line_dir_and_sign(self, dx, dy, sign):
# One dimension dirs
if dx >= -0.25 and dx <= 0.25: # Vertical
if (dy < 0.0 and sign > 0.0) or (dy > 0.0 and sign < 0.0):
return "triangle-down"
else:
return "triangle-up"
if dy >= -0.25 and dy <= 0.25: # Horizontal
if (dx < 0.0 and sign > 0.0) or (dx > 0.0 and sign < 0.0):
return "triangle-left"
else:
return "triangle-right"
# Two dimensions dirs
if dx >= 0.0 and dy >= 0.0 and sign >= 0.0: # NE
return "triangle-ne"
if dx >= 0.0 and dy >= 0.0 and sign < 0.0: # NE * -1 = SW
return "triangle-sw"
if dx >= 0.0 and dy < 0.0 and sign >= 0.0: # SE
return "triangle-se"
if dx >= 0.0 and dy < 0.0 and sign < 0.0: # SE *-1 = NW
return "triangle-nw"
if dx < 0.0 and dy >= 0.0 and sign >= 0.0: # NW
return "triangle-nw"
if dx < 0.0 and dy >= 0.0 and sign < 0.0: # NW * -1 = SE
return "triangle-se"
if dx < 0.0 and dy < 0.0 and sign >= 0.0: # SW
return "triangle-sw"
if dx < 0.0 and dy < 0.0 and sign < 0.0: # SW*-1 = NE
return "triangle-ne"
return "triangle-up-dot" # Should not be reached
def _draw_powerline_arrow(
self, pos_or_x, pos_or_y, pos_ex_x, pos_ex_y, watts_value, line_name, line_color
):
cx, cy = pltu.middle_from_points(pos_or_x, pos_or_y, pos_ex_x, pos_ex_y)
dx, dy = pltu.norm_from_points(pos_or_x, pos_or_y, pos_ex_x, pos_ex_y)
sym = self._plotly_tri_from_line_dir_and_sign(dx, dy, watts_value)
marker_dict = dict(
size=self._line_arrow_radius, color=line_color, showscale=False, symbol=sym
)
sub_offx = dx * self._sub_radius
sub_offy = dy * self._sub_radius
or_offx = dx * self._line_arrow_len
or_offy = dy * self._line_arrow_len
arrx_or = pos_or_x + sub_offx + or_offx
arrx_ex = pos_or_x + sub_offx
arry_or = pos_or_y + sub_offy + or_offy
arry_ex = pos_or_y + sub_offy
trace_name = self._line_prefix + self._arrow_prefix + line_name
return go.Scatter(
x=[arrx_or, arrx_ex],
y=[arry_or, arry_ex],
hoverinfo="skip",
showlegend=False,
marker=marker_dict,
name=trace_name,
)
def draw_powerline(
self,
figure,
observation,
line_id,
line_name,
connected,
line_value,
line_unit,
or_bus,
pos_or_x,
pos_or_y,
ex_bus,
pos_ex_x,
pos_ex_y,
):
color_scheme = self.line_color_scheme
capacity = observation.rho[line_id]
capacity = np.clip(capacity, 0.0, 1.0)
color = color_scheme[int(capacity * float(len(color_scheme) - 1))]
if capacity == 0.0:
color = "black"
line_style = dict(dash=None if connected else "dash", color=color)
line_text = ""
if line_value is not None:
line_text = pltu.format_value_unit(line_value, line_unit)
trace1 = self._draw_powerline_txt(
line_name, pos_or_x, pos_or_y, pos_ex_x, pos_ex_y, line_text
)
figure.add_trace(trace1)
trace2 = self._draw_powerline_line(
line_name, pos_or_x, pos_or_y, pos_ex_x, pos_ex_y, line_style
)
figure.add_trace(trace2)
dir_x, dir_y = pltu.norm_from_points(pos_or_x, pos_or_y, pos_ex_x, pos_ex_y)
trace3 = self._draw_powerline_bus(
pos_or_x, pos_or_y, dir_x, dir_y, or_bus, line_name, self._or_prefix
)
trace4 = self._draw_powerline_bus(
pos_ex_x, pos_ex_y, -dir_x, -dir_y, ex_bus, line_name, self._ex_prefix
)
figure.add_trace(trace3)
figure.add_trace(trace4)
watt_sign = observation.p_or[line_id]
trace5 = self._draw_powerline_arrow(
pos_or_x, pos_or_y, pos_ex_x, pos_ex_y, watt_sign, line_name, color
)
figure.add_trace(trace5)
def update_powerline(
self,
figure,
observation,
line_id,
line_name,
connected,
line_value,
line_unit,
or_bus,
pos_or_x,
pos_or_y,
ex_bus,
pos_ex_x,
pos_ex_y,
):
color_scheme = self.line_color_scheme
capacity = min(observation.rho[line_id], 1.0)
color_idx = int(capacity * (len(color_scheme) - 1))
color = color_scheme[color_idx]
if capacity == 0.0:
color = "black"
if line_value is not None:
line_text = pltu.format_value_unit(line_value, line_unit)
figure.update_traces(text=line_text, selector=dict(name=line_name))
line_style = dict(dash=None if connected else "dash", color=color)
figure.update_traces(
line=line_style, selector=dict(name=self._line_prefix + line_name)
)
or_bus = or_bus if or_bus > 0 else 0
ex_bus = ex_bus if ex_bus > 0 else 0
or_marker = dict(color=self._line_bus_colors[or_bus])
ex_marker = dict(color=self._line_bus_colors[ex_bus])
or_select_name = (
self._line_prefix + self._bus_prefix + self._or_prefix + line_name
)
ex_select_name = (
self._line_prefix + self._bus_prefix + self._ex_prefix + line_name
)
figure.update_traces(marker=or_marker, selector=dict(name=or_select_name))
figure.update_traces(marker=ex_marker, selector=dict(name=ex_select_name))
arrow_select_name = self._line_prefix + self._arrow_prefix + line_name
watt_value = observation.p_or[line_id]
dx, dy = pltu.norm_from_points(pos_or_x, pos_or_y, pos_ex_x, pos_ex_y)
arrow_sym = self._plotly_tri_from_line_dir_and_sign(dx, dy, watt_value)
arrow_display = True if capacity > 0.0 else False
arrow_marker = dict(color=color, symbol=arrow_sym)
figure.update_traces(
marker=arrow_marker,
visible=arrow_display,
selector=dict(name=arrow_select_name),
)
def draw_legend(self, figure, observation):
figure.update_layout(showlegend=False)
def draw_storage(
self,
figure,
observation,
storage_name,
storage_id,
storage_bus,
storage_value,
storage_unit,
pos_x,
pos_y,
sub_x,
sub_y,
):
# TODO storage doc
# TODO storage plot
# TODO update the plotly with storage units
pass
| 22,193 | 32.424699 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/PlotGrid/PlotUtil.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
class PlotUtil:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
This is a wrapper that contains utilities to draw the information on the plots more easily.
"""
@staticmethod
def format_value_unit(value, unit):
if isinstance(value, float):
return "{:.2f} {}".format(value, unit)
elif isinstance(value, int):
return "{:d} {}".format(value, unit)
else:
return "{} {}".format(value, unit)
@staticmethod
def middle_from_points(x1, y1, x2, y2):
return (x1 + x2) * 0.5, (y1 + y2) * 0.5
@staticmethod
def vec_from_points(x1, y1, x2, y2):
return x2 - x1, y2 - y1
@staticmethod
def mag_from_points(x1, y1, x2, y2):
x, y = PlotUtil.vec_from_points(x1, y1, x2, y2)
return np.linalg.norm([x, y])
@staticmethod
def norm_from_points(x1, y1, x2, y2):
(
x,
y,
) = PlotUtil.vec_from_points(x1, y1, x2, y2)
return PlotUtil.norm_from_vec(x, y)
@staticmethod
def norm_from_vec(x, y):
n = np.linalg.norm([x, y])
return x / n, y / n
@staticmethod
def orth_from_points(x1, y1, x2, y2):
x, y = PlotUtil.vec_from_points(x1, y1, x2, y2)
return -y, x
@staticmethod
def orth_norm_from_points(x1, y1, x2, y2):
x, y = PlotUtil.vec_from_points(x1, y1, x2, y2)
xn, yn = PlotUtil.norm_from_vec(x, y)
return -yn, xn
| 2,003 | 29.363636 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/PlotGrid/__init__.py | __all__ = [
"NUKE_COLOR",
"THERMAL_COLOR",
"WIND_COLOR",
"SOLAR_COLOR",
"HYDRO_COLOR",
"NUKE_ID",
"THERMAL_ID",
"WIND_ID",
"SOLAR_ID",
"HYDRO_ID",
"TYPE_GEN",
"COLOR_GEN",
"BasePlot",
]
from grid2op.PlotGrid.config import *
from grid2op.PlotGrid.BasePlot import BasePlot
# Conditional exports for optional dependencies
try:
from grid2op.PlotGrid.PlotMatplot import PlotMatplot
__all__.append("PlotMatplot")
except ImportError:
pass # Silent fail because it is optional
try:
from grid2op.PlotGrid.PlotPlotly import PlotPlotly
__all__.append("PlotPlotly")
except ImportError:
pass # Silent fail because it is optional
| 698 | 20.181818 | 56 | py |
Grid2Op | Grid2Op-master/grid2op/PlotGrid/config.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
NUKE_COLOR = "#e5cd00"
THERMAL_COLOR = "#7e52a0"
WIND_COLOR = "#71cdb8"
SOLAR_COLOR = "#d66b0d"
HYDRO_COLOR = "#1f73b5"
NUKE_ID = 0
THERMAL_ID = 1
WIND_ID = 2
SOLAR_ID = 3
HYDRO_ID = 4
TYPE_GEN = {
"nuclear": NUKE_ID,
"thermal": THERMAL_ID,
"wind": WIND_ID,
"solar": SOLAR_ID,
"hydro": HYDRO_ID,
}
COLOR_GEN = {
NUKE_ID: NUKE_COLOR,
THERMAL_ID: THERMAL_COLOR,
WIND_ID: WIND_COLOR,
SOLAR_ID: SOLAR_COLOR,
HYDRO_ID: HYDRO_COLOR,
}
| 932 | 27.272727 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Reward/__init__.py | __all__ = [
"ConstantReward",
"EconomicReward",
"FlatReward",
"IncreasingFlatReward",
"L2RPNReward",
"RedispReward",
"BridgeReward",
"CloseToOverflowReward",
"DistanceReward",
"GameplayReward",
"LinesReconnectedReward",
"LinesCapacityReward",
"CombinedReward",
"CombinedScaledReward",
"RewardHelper",
"BaseReward",
"EpisodeDurationReward",
"AlarmReward",
"N1Reward",
# TODO it would be better to have a specific package for this, but in the mean time i put it here
"L2RPNSandBoxScore",
"L2RPNWCCI2022ScoreFun",
"AlertReward",
"_AlarmScore",
"_NewRenewableSourcesUsageScore",
"_AssistantConfidenceScore",
"_AssistantCostScore"
]
from grid2op.Reward.constantReward import ConstantReward
from grid2op.Reward.economicReward import EconomicReward
from grid2op.Reward.flatReward import FlatReward
from grid2op.Reward.increasingFlatReward import IncreasingFlatReward
from grid2op.Reward.l2RPNReward import L2RPNReward
from grid2op.Reward.redispReward import RedispReward
from grid2op.Reward.bridgeReward import BridgeReward
from grid2op.Reward.closeToOverflowReward import CloseToOverflowReward
from grid2op.Reward.distanceReward import DistanceReward
from grid2op.Reward.gameplayReward import GameplayReward
from grid2op.Reward.linesReconnectedReward import LinesReconnectedReward
from grid2op.Reward.linesCapacityReward import LinesCapacityReward
from grid2op.Reward.combinedReward import CombinedReward
from grid2op.Reward.combinedScaledReward import CombinedScaledReward
from grid2op.Reward.rewardHelper import RewardHelper
from grid2op.Reward.baseReward import BaseReward
from grid2op.Reward.l2RPNSandBoxScore import L2RPNSandBoxScore
from grid2op.Reward.episodeDurationReward import EpisodeDurationReward
from grid2op.Reward.alarmReward import AlarmReward
from grid2op.Reward._alarmScore import _AlarmScore
from grid2op.Reward.n1Reward import N1Reward
from grid2op.Reward.l2rpn_wcci2022_scorefun import L2RPNWCCI2022ScoreFun
from grid2op.Reward.alertReward import AlertReward
from grid2op.Reward._newRenewableSourcesUsageScore import _NewRenewableSourcesUsageScore
from grid2op.Reward._assistantScore import _AssistantConfidenceScore, _AssistantCostScore
import warnings
class Reward(BaseReward):
def __init__(self, *args, **kwargs):
BaseReward.__init__(self, *args, **kwargs)
warnings.warn(
'Reward class has been renamed "BaseReward". '
"This class Action will be removed in future versions.",
category=PendingDeprecationWarning,
)
| 2,610 | 36.84058 | 101 | py |
Grid2Op | Grid2Op-master/grid2op/Reward/_alarmScore.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
import copy
from grid2op.Exceptions import Grid2OpException
from grid2op.Reward import AlarmReward
from grid2op.dtypes import dt_float
class _AlarmScore(AlarmReward):
"""
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
It **must not** serve as a reward. This scored needs to be minimized,
and a reward needs to be maximized! Also, this "reward" is not scaled or anything. Use it as your
own risk.
Implemented as a reward to make it easier to use in the context of the L2RPN competitions, this "reward"
computed the "grid operation cost". It should not be used to train an agent.
The "reward" the closest to this score is given by the :class:`AlarmReward` class.
This reward is based on the "alarm feature" where the agent is asked to send information about potential issue
on the grid.
On this case, when the environment is in a "game over" state (eg it's the end) then the reward is computed
the following way:
- if the environment has been successfully manage until the end of the chronics, then 1.0 is returned
- if no alarm has been raised, then -2.0 is return
- points for pointing to the right zones are computed based on the lines disconnected either in a short window
before game over or otherwise at the time of game over
Examples
---------
You can use this reward in any environment with:
.. code-block:
import grid2op
from grid2op.Reward import AlarmReward
# then you create your environment with it:
NAME_OF_THE_ENVIRONMENT = "rte_case14_realistic"
env = grid2op.make(NAME_OF_THE_ENVIRONMENT,reward_class=AlarmScore)
# and do a step with a "do nothing" action
obs = env.reset()
obs, reward, done, info = env.step(env.action_space())
# the reward is computed with the AlarmScore class
"""
def __init__(self, logger=None):
AlarmReward.__init__(self, logger=logger)
# required if you want to design a custom reward taking into account the
# alarm feature
self.reward_min = dt_float(-2.0)
# we keep other parameters values from AlarmReward as is
self.mult_for_right_zone = 1.5
self.window_disconnection = 4
self.disc_lines_all_before_cascade = []
self.n_line = None
# This class remembers the past state of the grid, this does not make sense for the "simulate" env
# so i deactivate it in this case.
from grid2op.Environment._ObsEnv import (
_ObsEnv,
) # to avoid circular dependencies
self._deactivate_reward_cls = (_ObsEnv,)
def initialize(self, env):
if not env._has_attention_budget:
raise Grid2OpException(
'Impossible to use the "_AlarmScore" with an environment for which this feature '
'is disabled. Please make sure "env._has_attention_budget" is set to ``True`` or '
"change the reward class with `grid2op.make(..., reward_class=AnyOtherReward)`"
)
self.n_line = env.n_line
self.reset(env)
def reset(self, env):
super().reset(env)
self.window_disconnection = max(self.best_time - self.window_size, 4)
self.disc_lines_all_before_cascade = []
def _lines_disconnected_first(self, disc_lines_at_cascading_time):
"""
here we detect the disconnected lines that we will consider to compute the `mult_for_zone` multiplying factor.
Either the lines that were disconnected in a short period before final failure. Otherwise the first lines
disconnected at the time of failure
:param disc_lines_at_cascading_time: lines that are disconnected first at time of failure
:return:
"""
disc_lines_to_consider_for_score = np.zeros(self.n_line, dtype=bool)
nb_obs = len(self.disc_lines_all_before_cascade)
for step in range(nb_obs - self.window_disconnection, nb_obs):
disc_lines_to_consider_for_score[
self.disc_lines_all_before_cascade[step] >= 0
] = True
if np.sum(disc_lines_to_consider_for_score) == 0:
disc_lines_to_consider_for_score = disc_lines_at_cascading_time == 0
# if we are there, it is because we have identified before that the failure is due to disconnected powerlines
assert np.any(disc_lines_to_consider_for_score)
# we transform the vector so that disconnected lines have a zero, to be coherent with env._disc_lines
return 1 - disc_lines_to_consider_for_score
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
if self.is_simulated_env(env):
return self.reward_no_game_over
disc_lines_now = env._disc_lines
if is_done:
if not has_error:
# agent went until the end
return self.reward_max
if np.all(env._disc_lines == -1):
# game over is not caused by the tripping of a powerline
return self.reward_min
if len(env._attention_budget._all_successful_alarms) == 0:
# no alarm have been sent, so it's the minimum
return self.reward_min
successfull_alarms = env._attention_budget._all_successful_alarms
step_game_over = env.nb_time_step
disc_lines_to_consider_for_score = self._lines_disconnected_first(
disc_lines_now
)
# so now i can consider the alarms.
best_score, is_alarm_used = self.reward_min, False
for alarm in successfull_alarms:
tmp_sc, tmp_is = self._points_for_alarm(
*alarm,
step_game_over=step_game_over,
disc_lines=disc_lines_to_consider_for_score,
env=env
)
if tmp_sc > best_score:
best_score = tmp_sc
is_alarm_used = tmp_is
self.is_alarm_used = is_alarm_used
return best_score
else:
# make sure to deepcopy, otherwise it gets updated with the last timestep value for every previous timesteps
# we log the line disconnected over time
# TODO have a cache there and store only the last few states, most of what is stored here is not used
self.disc_lines_all_before_cascade.append(copy.deepcopy(disc_lines_now))
res = self.reward_no_game_over
return res
| 7,115 | 41.86747 | 120 | py |
Grid2Op | Grid2Op-master/grid2op/Reward/_assistantScore.py | # Copyright (c) 2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.Reward.baseReward import BaseReward
class _AssistantConfidenceScore(BaseReward):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
It **must not** serve as a reward. This scored needs to be **MINIMIZED**,
and a reward needs to be maximized! Also, this "reward" is not scaled or anything. Use it as your
own risk.
Implemented as a reward to make it easier to use in the context of the L2RPN competitions, this "reward"
computed the assistant "confidence score", which evaluates how confident an agent was in its actions for handling
unforeseen line l disconnection events prior to occurring.
It should not be used to train an agent.
"""
def __init__(self, logger=None):
BaseReward.__init__(self, logger=logger)
def __initialize__(self, env):
self.reset(env)
def reset(self):
pass
def __call__(self, env, obs, is_done):
pass
class _AssistantCostScore(BaseReward):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
It **must not** serve as a reward. This scored needs to be **MINIMIZED**,
and a reward needs to be maximized! Also, this "reward" is not scaled or anything. Use it as your
own risk.
Implemented as a reward to make it easier to use in the context of the L2RPN competitions, this "reward"
computed the assistant"cost score", which penalized the number of alarm the assistant have produced.
It should not be used to train an agent.
"""
def __init__(self, logger=None):
BaseReward.__init__(self, logger=logger)
def __initialize__(self, env):
self.reset(env)
def reset(self):
pass
def __call__(self, env, obs, is_done):
pass | 2,421 | 36.261538 | 117 | py |
Grid2Op | Grid2Op-master/grid2op/Reward/_newRenewableSourcesUsageScore.py | # Copyright (c) 2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.Reward.baseReward import BaseReward
from grid2op.dtypes import dt_float
class _NewRenewableSourcesUsageScore(BaseReward):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
It **must not** serve as a reward. This score needs to be **MAXIMIZED**,
and a reward needs to be maximized! Also, this "reward" is not scaled or anything. Use it as your
own risk.
Implemented as a reward to make it easier to use in the context of the L2RPN competitions, this "reward"
computed the "low carbon score", meaning here how much of the new renewable energy sources capacities have been called.
It should not be used to train an agent.
It has been designed to be defined in the continuous domain [50,100] with outputs values between[-1,1]
"""
def __init__(self, logger=None):
BaseReward.__init__(self, logger=logger)
self.reward_min = dt_float(-1.0)
self.reward_max = dt_float(1.0)
self.gen_res_p_curtailed_list = None
self.gen_res_p_before_curtail_list = None
self._is_simul_env = False
def initialize(self, env):
self.reset(env)
def reset(self, env):
self._is_simul_env = is_simulated_env(env)
if self._is_simul_env:
return
self.gen_res_p_curtailed_list = np.zeros(env.chronics_handler.max_timestep() + 1)
self.gen_res_p_before_curtail_list = np.zeros(env.chronics_handler.max_timestep() + 1)
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
if self._is_simul_env:
return dt_float(0.)
if not is_done:
gen_nres_p_effective, gen_nres_p_before_curtail = self._get_total_nres_usage(env)
self.gen_res_p_curtailed_list[env.nb_time_step] = gen_nres_p_effective
self.gen_res_p_before_curtail_list[env.nb_time_step] = gen_nres_p_before_curtail
return dt_float(0.)
else:
ratio_nres_usage = 100 * np.sum(self.gen_res_p_curtailed_list[1:]) / np.sum(self.gen_res_p_before_curtail_list[1:])
return self._surlinear_func_curtailment(ratio_nres_usage)
@staticmethod
def _get_total_nres_usage(env):
nres_mask = env.gen_renewable
gen_p, *_ = env.backend.generators_info()
gen_nres_p_before_curtail = np.sum(env._gen_before_curtailment[nres_mask])
gen_nres_p_effective = np.sum(gen_p[nres_mask])
return gen_nres_p_effective, gen_nres_p_before_curtail
@staticmethod
def _surlinear_func_curtailment(x, center=80, eps=1e-6):
x = np.fmax(x, eps) # to avoid log(0)...
f_surlinear = lambda x: x * np.log(x)
f_centralized = lambda x : f_surlinear(x) - f_surlinear(center)
f_standardizer= lambda x : np.ones_like(x) * f_centralized(100) * (x >= center) - np.ones_like(x) * f_centralized(50) * (x < center)
return f_centralized(x) / f_standardizer(x)
#to wait before PR Laure
def is_simulated_env(env):
# to prevent cyclical import
from grid2op.Environment._ObsEnv import _ObsEnv
from grid2op.Environment._forecast_env import _ForecastEnv
# This reward is not compatible with simulations
return isinstance(env, (_ObsEnv, _ForecastEnv))
| 3,877 | 41.615385 | 140 | py |
Grid2Op | Grid2Op-master/grid2op/Reward/alarmReward.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.Exceptions import Grid2OpException
from grid2op.Reward.baseReward import BaseReward
from grid2op.dtypes import dt_float
class AlarmReward(BaseReward):
"""
This reward is based on the "alarm feature" where the agent is asked to send information about potential issue
on the grid.
On this case, when the environment is in a "game over" state (eg it's the end) then the reward is computed
the following way:
- if the environment has been successfully manage until the end of the chronics, then 1.0 is returned
- if no alarm has been raised, then -1.0 is return
Examples
---------
You can use this reward in any environment with:
.. code-block:
import grid2op
from grid2op.Reward import AlarmReward
# then you create your environment with it:
NAME_OF_THE_ENVIRONMENT = "rte_case14_realistic"
env = grid2op.make(NAME_OF_THE_ENVIRONMENT,reward_class=AlarmReward)
# and do a step with a "do nothing" action
obs = env.reset()
obs, reward, done, info = env.step(env.action_space())
# the reward is computed with the AlarmReward class
"""
def __init__(self, logger=None):
BaseReward.__init__(self, logger=logger)
# required if you want to design a custom reward taking into account the
# alarm feature
self.has_alarm_component = True
self.is_alarm_used = False # required to update it in __call__ !!
self.total_time_steps = dt_float(0.0)
self.reward_min = dt_float(-1.0)
self.reward_max = dt_float(1.0)
self.reward_no_game_over = dt_float(0.0)
self.window_size = None
self.best_time = None
self.mult_for_right_zone = 2
def initialize(self, env):
if not env._has_attention_budget:
raise Grid2OpException(
'Impossible to use the "AlarmReward" with an environment for which this feature '
'is disabled. Please make sure "env._has_attention_budget" is set to ``True`` or '
"change the reward class with `grid2op.make(..., reward_class=AnyOtherReward)`"
)
self.reset(env)
def reset(self, env):
self.total_time_steps = env.max_episode_duration()
self.best_time = env.parameters.ALARM_BEST_TIME
self.window_size = env.parameters.ALARM_WINDOW_SIZE
def _tmp_score_time(self, step_alarm, step_game_over):
"""
compute the "temporal" score.
Should give a number between 0 and 1
"""
if step_game_over - step_alarm > self.best_time + self.window_size:
# alarm too soon
res = 0
elif step_game_over - step_alarm < self.best_time - self.window_size:
# alarm too late
res = 0
else:
# square function such that: it gives 1 if step_game_over - step_alarm equals self.best_time
# and 0 if step_game_over - step_alarm = self.best_time + self.window_size or
# if step_game_over - step_alarm self.best_time - self.window_size
dist_to_game_over = step_game_over - step_alarm
dist_to_best = dist_to_game_over - self.best_time
# set it to 0 for the extreme case
polynom = (dist_to_best - self.window_size) * (
dist_to_best + self.window_size
)
# scale it such that it is 1 for dist_to_best == 0 (ie step_game_over - step_alarm == self.best_time)
res = -polynom / self.window_size**2
return res
def _mult_for_zone(self, alarm, disc_lines, env):
"""compute the multiplicative factor that increases the score if the right zone is predicted"""
res = 1.0
# extract the lines that have been disconnected due to cascading failures
lines_disconnected_first = np.where(disc_lines == 0)[0]
if (
np.sum(alarm) > 1
): # if we have more than one zone in the alarm, we cannot discrtiminate, no bonus points
return res
# extract the zones they belong too
zones_these_lines = set()
zone_for_each_lines = env.alarms_lines_area
for line_id in lines_disconnected_first:
line_name = env.name_line[line_id]
for zone_name in zone_for_each_lines[line_name]:
zones_these_lines.add(zone_name)
# now retrieve the id of the zones in which a powerline has been disconnected
list_zone_names = list(zones_these_lines)
list_zone_ids = np.where(np.isin(env.alarms_area_names, list_zone_names))[0]
# and finally, award some extra points if one of the zone, containing one of the powerline disconnected
# by protection is in the alarm
if np.any(alarm[list_zone_ids]):
res *= self.mult_for_right_zone
return res
def _points_for_alarm(self, step_alarm, alarm, step_game_over, disc_lines, env):
"""how much points are given for this specific alarm"""
is_alarm_used = False
score = self.reward_min
score_for_time = self._tmp_score_time(step_alarm, step_game_over)
if score_for_time != 0:
is_alarm_used = True # alarm is in the right time window
score = score_for_time
score *= (
self._mult_for_zone(alarm, disc_lines, env) / self.mult_for_right_zone
)
return score, is_alarm_used
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
if is_done:
if not has_error:
# agent went until the end
return self.reward_max
if np.all(env._disc_lines == -1):
# game over is not caused by the tripping of a powerline
return self.reward_min
if len(env._attention_budget._all_successful_alarms) == 0:
# no alarm have been sent, so it's the minimum
return self.reward_min
successfull_alarms = env._attention_budget._all_successful_alarms
step_game_over = env.nb_time_step
disc_lines = env._disc_lines
# so now i can consider the alarms.
best_score, is_alarm_used = self.reward_min, False
for alarm in successfull_alarms:
tmp_sc, tmp_is = self._points_for_alarm(
*alarm,
step_game_over=step_game_over,
disc_lines=disc_lines,
env=env
)
if tmp_sc > best_score:
best_score = tmp_sc
is_alarm_used = tmp_is
self.is_alarm_used = is_alarm_used
return best_score
else:
res = self.reward_no_game_over
return res
| 7,328 | 39.04918 | 114 | py |
Grid2Op | Grid2Op-master/grid2op/Reward/alertReward.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.Reward.baseReward import BaseReward
from grid2op.dtypes import dt_float, dt_bool, dt_int
class AlertReward(BaseReward):
"""
.. note::
DOC IN PROGRESS !
This reward is based on the "alert feature" where the agent is asked to send information about potential line overload issue
on the grid after unpredictable powerline disconnection (attack of the opponent). The alerts are assessed once per attack.
This rewards is computed as followed:
- if an attack occurs and the agent survives `env.parameters.ALERT_TIME_WINDOW` steps then:
- if the agent sent an alert BEFORE the attack, reward returns `reward_min_no_blackout` (-1 by default)
- if the agent did not sent an alert BEFORE the attack, reward returns `reward_max_no_blackout` (1 by default)
- if an attack occurs and the agent "games over" withing `env.parameters.ALERT_TIME_WINDOW` steps then:
- if the agent sent an alert BEFORE the attack, reward returns `reward_max_blackout` (2 by default)
- if the agent did not sent an alert BEFORE the attack, reward returns `reward_min_blackout` (-10 by default)
- whatever the attacks / no attacks / alert / no alert, if the scenario is completed until the end,
then agent receive `reward_end_episode_bonus` (1 by default)
In all other cases, including but not limited to:
- agent games over but there has been no attack within the previous `env.parameters.ALERT_TIME_WINDOW` (12) steps
- there is no attack
The reward outputs 0.
This is then a "delayed reward": you receive the reward (in general) `env.parameters.ALERT_TIME_WINDOW` after
having sent the alert.
This is also a "sparse reward": in the vast majority of cases it's 0. It is only non zero in case of blackout (at
most once per episode) and each time an attack occurs (and in general there is relatively few attacks)
TODO explain a bit more in the "multi lines attacked"
.. seealso:: :ref:`grid2op-alert-module` section of the doc for more information
Examples
---------
You can use this reward in any environment with:
.. code-block:
import grid2op
from grid2op.Reward import AlertReward
# then you create your environment with it:
# at time of writing, the only env supporting it is "l2rpn_idf_2023"
NAME_OF_THE_ENVIRONMENT = "l2rpn_idf_2023"
env = grid2op.make(NAME_OF_THE_ENVIRONMENT, reward_class=AlertReward)
# and do a step with a "do nothing" action
obs = env.reset()
obs, reward, done, info = env.step(env.action_space())
# the reward is computed with the AlertReward class
"""
def __init__(self,
logger=None,
reward_min_no_blackout=-1.0,
reward_min_blackout=-10.0,
reward_max_no_blackout=1.0,
reward_max_blackout=2.0,
reward_end_episode_bonus=1.0):
BaseReward.__init__(self, logger=logger)
self.reward_min_no_blackout = dt_float(reward_min_no_blackout)
self.reward_min_blackout = dt_float(reward_min_blackout)
self.reward_max_no_blackout = dt_float(reward_max_no_blackout)
self.reward_max_blackout = dt_float(reward_max_blackout)
self.reward_end_episode_bonus = dt_float(reward_end_episode_bonus)
self.reward_no_game_over = dt_float(0.0)
self._reward_range_blackout = (self.reward_max_blackout - self.reward_min_blackout)
self.total_time_steps = dt_int(0.0)
self.time_window = None
self._ts_attack : np.ndarray = None
self._current_id : int = 0
self._lines_currently_attacked : np.ndarray = None
self._alert_launched : np.ndarray = None
self._nrows_array : int = None
self._i_am_simulate : bool = False
def initialize(self, env: "grid2op.Environment.BaseEnv"):
self.total_time_steps = env.max_episode_duration()
self.time_window = env.parameters.ALERT_TIME_WINDOW
self._nrows_array = self.time_window + 2
# TODO simulate env stuff !
# TODO vectors proper size
self._ts_attack = np.full((self._nrows_array, type(env).dim_alerts), False, dtype=dt_bool)
self._alert_launched = np.full((self._nrows_array, type(env).dim_alerts), False, dtype=dt_bool)
self._current_id = 0
self._lines_currently_attacked = np.full(type(env).dim_alerts, False, dtype=dt_bool)
self._i_am_simulate = self.is_simulated_env(env)
return super().initialize(env)
def _update_attack(self, env):
if env.infos["opponent_attack_line"] is None:
# no attack at this step
self._lines_currently_attacked[:] = False
self._ts_attack[self._current_id, :] = False
else:
# an attack at this step
lines_attacked = env.infos["opponent_attack_line"][type(env).alertable_line_ids]
# compute the list of lines that are "newly" attacked
new_lines_attacked = lines_attacked & (~self._lines_currently_attacked)
# remember the steps where these lines are attacked
self._ts_attack[self._current_id, new_lines_attacked] = True
# and now update the state of lines under attack
self._lines_currently_attacked[:] = False
self._lines_currently_attacked[lines_attacked] = True
def _update_alert(self, action):
self._alert_launched[self._current_id, :] = 1 * action.raise_alert
def _update_state(self, env, action):
self._current_id += 1
self._current_id %= self._nrows_array
# update attack
self._update_attack(env)
# update alerts
self._update_alert(action)
# update internal state of the environment
# (this is updated in case the reward returns non 0)
env._was_alert_used_after_attack[:] = 0
def _compute_score_attack_blackout(self, env, ts_attack_in_order, indexes_to_look):
# retrieve the lines that have been attacked in the time window
ts_ind, line_ind = np.where(ts_attack_in_order)
line_first_attack, first_ind_line_attacked = np.unique(line_ind, return_index=True)
ts_first_line_attacked = ts_ind[first_ind_line_attacked]
# now retrieve the array starting at the correct place
ts_first_line_attacked_orig = indexes_to_look[ts_first_line_attacked]
# and now look at the previous step if alerts were send
# prev_ts = (ts_first_line_attacked_orig - 1) % self._nrows_array
prev_ts = ts_first_line_attacked_orig
# update the state of the environment
env._was_alert_used_after_attack[line_first_attack] = self._alert_launched[prev_ts, line_first_attack] * 2 - 1
return np.mean(self._alert_launched[prev_ts, line_first_attack]) * self._reward_range_blackout + self.reward_min_blackout
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
# retrieve the alert made by the agent
res = 0.
if self._i_am_simulate:
# does not make sense for simulate
return res
if is_done & (not has_error):
# end of episode, no blackout => reward specific for this case
return self.reward_end_episode_bonus
self._update_state(env, action)
if self.is_in_blackout(has_error, is_done):
# I am in blackout, I need to check for attack in the time window
# if there is no attack, I do nothing
indexes_to_look = (np.arange(-self.time_window, 1) + self._current_id) % self._nrows_array # include current step (hence the np.arange(..., **1**))
ts_attack_in_order = self._ts_attack[indexes_to_look, :]
has_attack = np.any(ts_attack_in_order)
if has_attack:
# I need to check the alarm for the attacked lines
res = self._compute_score_attack_blackout(env, ts_attack_in_order, indexes_to_look)
else:
# no blackout: i check the first step in the window before me to see if there is an attack,
index_window = (self._current_id - self.time_window) % self._nrows_array
lines_attack = self._ts_attack[index_window, :]
if np.any(lines_attack):
# prev_ind = (index_window - 1) % self._nrows_array
# I don't need the "-1" because the action is already BEFORE the observation in the reward.
prev_ind = index_window
alert_send = self._alert_launched[prev_ind, lines_attack]
# update the state of the environment
env._was_alert_used_after_attack[lines_attack] = 1 - alert_send * 2
res = (self.reward_min_no_blackout - self.reward_max_no_blackout) * np.mean(alert_send) + self.reward_max_no_blackout
self._ts_attack[index_window, :] = False # attack has been taken into account we "cancel" it
return res
| 9,721 | 47.128713 | 160 | py |
Grid2Op | Grid2Op-master/grid2op/Reward/baseReward.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import logging
from abc import ABC, abstractmethod
from grid2op.dtypes import dt_float
class BaseReward(ABC):
"""
Base class from which all rewards used in the Grid2Op framework should derived.
In reinforcement learning, a reward is a signal send by the :class:`grid2op.Environment.Environment` to the
:class:`grid2op.BaseAgent` indicating how well this agent performs.
One of the goal of Reinforcement Learning is to maximize the (discounted) sum of (expected) rewards over time.
You can create all rewards you want in grid2op. The only requirement is that all rewards should inherit this
BaseReward.
Attributes
----------
reward_min: ``float``
The minimum reward an :class:`grid2op.BaseAgent` can get performing the worst possible
:class:`grid2op.Action.BaseAction` in
the worst possible scenario.
reward_max: ``float``
The maximum reward an :class:`grid2op.Agent.BaseAgent` can get performing the best possible
:class:`grid2op.Action.BaseAction` in
the best possible scenario.
Examples
---------
If you want the environment to compute a reward that is the sum of the flow (this is not a good reward, but
we use it as an example on how to do it) you can achieve it with:
.. code-block:
import grid2op
from grid2op.Reward import BaseReward
# first you create your reward
class SumOfFlowReward(BaseReward):
def __init__(self):
BaseReward.__init__(self)
def initialize(self, env):
# this function is used to inform the class instance about the environment specification
# you can use `env.n_line` or `env.n_load` or `env.get_thermal_limit()` for example
# do not forget to initialize "reward_min" and "reward_max"
self.reward_min = 0.
self.reward_max = np.sum(env.get_thermal_limit)
# in this case the maximum reward is obtained when i compute the sum of the maximum flows
# on each powerline
def __call__(action, env, has_error, is_done, is_illegal, is_ambiguous):
# this method is called at the end of 'env.step' to compute the reward
# in our case we just want to sum the flow on each powerline because... why not...
if has_error:
# see the "Notes" paragraph for more information
res = self.reward_min
else:
res = np.sum(env.get_obs().a_or)
return res
# then you create your environment with it:
NAME_OF_THE_ENVIRONMENT = "rte_case14_realistic"
env = grid2op.make(NAME_OF_THE_ENVIRONMENT,reward_class=SumOfFlowReward)
# and do a step with a "do nothing" action
obs = env.reset()
obs, reward, done, info = env.step(env.action_space())
assert np.sum(obs.a_or) == reward
# the above should be true
Notes
------
If the flag `has_error` is set to ``True`` this indicates there has been an error in the "env.step" function.
This might induce some undefined behaviour if using some method of the environment.
Please make sure to check whether or not this is the case when defining your reward.
This "new" behaviour has been introduce to "fix" the akward behavior spotted in
# https://github.com/rte-france/Grid2Op/issues/146
.. code-block:: python
def __call__(action, env, has_error, is_done, is_illegal, is_ambiguous):
if has_error:
# DO SOMETHING IN THIS CASE
res = self.reward_min
else:
# DO NOT USE `env.get_obs()` (nor any method of the environment `env.XXX` if the flag `has_error`
# is set to ``True``
# This might result in undefined behaviour
res = np.sum(env.get_obs().a_or)
return res
"""
def __init__(self, logger: logging.Logger=None):
"""
Initializes :attr:`BaseReward.reward_min` and :attr:`BaseReward.reward_max`
"""
self.reward_min = dt_float(0.0)
self.reward_max = dt_float(0.0)
if logger is None:
self.logger = logging.getLogger(__name__)
self.logger.disabled = True
else:
self.logger: logging.Logger = logger.getChild(f"{type(self).__name__}")
def is_simulated_env(self, env):
# to prevent cyclical import
from grid2op.Environment._ObsEnv import _ObsEnv
from grid2op.Environment._forecast_env import _ForecastEnv
return isinstance(env, (_ObsEnv, _ForecastEnv))
def initialize(self, env):
"""
If :attr:`BaseReward.reward_min`, :attr:`BaseReward.reward_max` or other custom attributes require to have a
valid :class:`grid2op.Environment.Environment` to be initialized, this should be done in this method.
**NB** reward_min and reward_max are used by the environment to compute the maximum and minimum reward and
cast it in "reward_range" which is part of the openAI gym public interface. If you don't define them, some
piece of code might not work as expected.
Parameters
----------
env: :class:`grid2op.Environment.Environment`
An environment instance properly initialized.
"""
pass
def reset(self, env):
"""
This method is called each time `env` is reset.
It can be usefull, for example if the reward depends on the length of the current chronics.
It does nothing by default.
Parameters
----------
env: :class:`grid2op.Environment.Environment`
The current environment
.. danger::
This function should not modify self.reward_min nor self.reward_max !!!
It might cause really hard trouble for agent to learn if you do so.
"""
pass
@abstractmethod
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
"""
Method called to compute the reward.
Parameters
----------
action: :class:`grid2op.Action.Action`
BaseAction that has been submitted by the :class:`grid2op.BaseAgent`
env: :class:`grid2op.Environment.Environment`
An environment instance properly initialized.
has_error: ``bool``
Has there been an error, for example a :class:`grid2op.DivergingPowerFlow` be thrown when the action has
been implemented in the environment.
is_done: ``bool``
Is the episode over (either because the agent has reached the end, or because there has been a game over)
is_illegal: ``bool``
Has the action submitted by the BaseAgent raised an :class:`grid2op.Exceptions.IllegalAction` exception.
In this case it has been replaced by "do nohting" by the environment. **NB** an illegal action is NOT
an ambiguous action. See the description of the Action module: :ref:`Illegal-vs-Ambiguous` for more details.
is_ambiguous: ``bool``
Has the action submitted by the BaseAgent raised an :class:`grid2op.Exceptions.AmbiguousAction` exception.
In this case it has been replaced by "do nothing" by the environment. **NB** an illegal action is NOT
an ambiguous action. See the description of the Action module: :ref:`Illegal-vs-Ambiguous` for more details.
Returns
-------
res: ``float``
The reward associated to the input parameters.
Notes
------
All the flags can be used to know on which type of situation the reward is computed.
For example, if `has_error` is ``True`` it means there was an error during the computation of the powerflow.
this means there is a "game_over", so ``is_done`` is ``True`` in this case.
But, if there is ``is_done`` equal to ``True`` but ``has_error`` equal to ``False`` this means that the episode
is over without any error. In other word, your agent sucessfully managed all the scenario and to get to the
end of the episode.
"""
return self.reward_min
def get_range(self):
"""
Shorthand to retrieve both the minimum and maximum possible rewards in one command.
It is not recommended to override this function.
Returns
-------
reward_min: ``float``
The minimum reward, see :attr:`BaseReward.reward_min`
reward_max: ``float``
The maximum reward, see :attr:`BaseReward.reward_max`
"""
return self.reward_min, self.reward_max
def set_range(self, reward_min, reward_max):
"""
Setter function for the :attr:`BaseReward.reward_min` and :attr:`BaseReward.reward_max`.
It is not recommended to override this function
Parameters
-------
reward_min: ``float``
The minimum reward, see :attr:`BaseReward.reward_min`
reward_max: ``float``
The maximum reward, see :attr:`BaseReward.reward_max`
"""
self.reward_min = reward_min
self.reward_max = reward_max
def __iter__(self):
"""
Implements python iterable to get a dict summary using `summary = dict(reward_instance)`
Can be overloaded by subclass, default implementation gives name, reward_min, reward_max
"""
yield ("name", self.__class__.__name__)
yield ("reward_min", float(self.reward_min))
yield ("reward_max", float(self.reward_max))
def close(self):
"""overide this for certain reward that might need specific behaviour"""
pass
def is_in_blackout(self, has_error, is_done):
return is_done and has_error
| 10,409 | 38.581749 | 120 | py |
Grid2Op | Grid2Op-master/grid2op/Reward/bridgeReward.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
import networkx as nx
from grid2op.Reward.baseReward import BaseReward
from grid2op.dtypes import dt_float
class BridgeReward(BaseReward):
"""
This reward computes a penalty based on how many bridges are present in the grid network.
In graph theory, a bridge is an edge that if removed will cause the graph to be disconnected.
Examples
---------
You can use this reward in any environment with:
.. code-block:
import grid2op
from grid2op.Reward import BridgeReward
# then you create your environment with it:
NAME_OF_THE_ENVIRONMENT = "rte_case14_realistic"
env = grid2op.make(NAME_OF_THE_ENVIRONMENT,reward_class=BridgeReward)
# and do a step with a "do nothing" action
obs = env.reset()
obs, reward, done, info = env.step(env.action_space())
# the reward is computed with this class (computing the penalty based on the number of "bridges" in the grid)
"""
def __init__(self, min_pen_lte=0.0, max_pen_gte=1.0, logger=None):
BaseReward.__init__(self, logger=logger)
self.reward_min = dt_float(0.0)
self.reward_max = dt_float(1.0)
self.min_pen_lte = dt_float(min_pen_lte)
self.max_pen_gte = dt_float(max_pen_gte)
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
if has_error or is_illegal or is_ambiguous:
return self.reward_min
n_bus = 2
# Get info from env
obs = env.current_obs
n_sub = obs.n_sub
n_line = obs.n_line
topo = obs.topo_vect
or_topo = obs.line_or_pos_topo_vect
ex_topo = obs.line_ex_pos_topo_vect
or_sub = obs.line_or_to_subid
ex_sub = obs.line_ex_to_subid
# Create a graph of vertices
# Use one vertex per substation per bus
G = nx.Graph()
# Set lines edges for current bus
for line_idx in range(n_line):
# Skip if line is disconnected
if obs.line_status[line_idx] is False:
continue
# Get substation index for current line
lor_sub = or_sub[line_idx]
lex_sub = ex_sub[line_idx]
# Get the buses for current line
lor_bus = topo[or_topo[line_idx]]
lex_bus = topo[ex_topo[line_idx]]
if lor_bus <= 0 or lex_bus <= 0:
continue
# Compute edge vertices indices for current graph
left_v = lor_sub + (lor_bus - 1) * n_sub
right_v = lex_sub + (lex_bus - 1) * n_sub
# Register edge in graph
G.add_edge(left_v, right_v)
# Find the bridges
n_bridges = dt_float(len(list(nx.bridges(G))))
# Clip to min penalty
n_bridges = max(n_bridges, self.min_pen_lte)
# Clip to max penalty
n_bridges = min(n_bridges, self.max_pen_gte)
r = np.interp(
n_bridges,
[self.min_pen_lte, self.max_pen_gte],
[self.reward_max, self.reward_min],
)
return dt_float(r)
| 3,574 | 34.04902 | 117 | py |
Grid2Op | Grid2Op-master/grid2op/Reward/closeToOverflowReward.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.Reward.baseReward import BaseReward
from grid2op.dtypes import dt_float
class CloseToOverflowReward(BaseReward):
"""
This reward finds all lines close to overflowing.
Returns max reward when there is no overflow, min reward if more than one line is close to overflow
and the mean between max and min reward if one line is close to overflow
Examples
---------
You can use this reward in any environment with:
.. code-block:
import grid2op
from grid2op.Reward import CloseToOverflowReward
# then you create your environment with it:
NAME_OF_THE_ENVIRONMENT = "rte_case14_realistic"
env = grid2op.make(NAME_OF_THE_ENVIRONMENT,reward_class=CloseToOverflowReward)
# and do a step with a "do nothing" action
obs = env.reset()
obs, reward, done, info = env.step(env.action_space())
# the reward is computed with this class (computing the penalty based on the number of overflow)
"""
def __init__(self, max_lines=5, logger=None):
BaseReward.__init__(self, logger=logger)
self.reward_min = dt_float(0.0)
self.reward_max = dt_float(1.0)
self.max_overflowed = dt_float(max_lines)
def initialize(self, env):
pass
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
if has_error or is_illegal or is_ambiguous:
return self.reward_min
thermal_limits = env.backend.get_thermal_limit()
lineflow_ratio = env.current_obs.rho
close_to_overflow = dt_float(0.0)
for ratio, limit in zip(lineflow_ratio, thermal_limits):
# Seperate big line and small line
if (limit < 400.00 and ratio >= 0.95) or ratio >= 0.975:
close_to_overflow += dt_float(1.0)
close_to_overflow = np.clip(
close_to_overflow, dt_float(0.0), self.max_overflowed
)
reward = np.interp(
close_to_overflow,
[dt_float(0.0), self.max_overflowed],
[self.reward_max, self.reward_min],
)
return reward
| 2,612 | 36.869565 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Reward/combinedReward.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Reward.baseReward import BaseReward
from grid2op.dtypes import dt_float
class CombinedReward(BaseReward):
"""
This class allows to combine multiple pre defined reward. The reward it computes will
be the sum of all the sub rewards it is made of.
Each sub reward is identified by a key.
It is used a bit differently that the other rewards. See the section example for more information.
Examples
--------
.. code-block:: python
import grid2op
from grid2op.Reward import GameplayReward, FlatReward, CombinedReward
env = grid2op.make(..., reward_class=CombinedReward)
cr = self.env.get_reward_instance()
cr.addReward("Gameplay", GameplayReward(), 1.0)
cr.addReward("Flat", FlatReward(), 1.0)
cr.initialize(self.env)
obs = env.reset()
obs, reward, done, info = env.step(env.action_space())
# reward here is computed by summing the results of what would have
# given `GameplayReward` and the one from `FlatReward`
"""
def __init__(self, logger=None):
BaseReward.__init__(self, logger=logger)
self.reward_min = dt_float(0.0)
self.reward_max = dt_float(0.0)
self.rewards = {}
def addReward(self, reward_name, reward_instance, reward_weight=1.0):
self.rewards[reward_name] = {
"instance": reward_instance,
"weight": dt_float(reward_weight),
}
return True
def removeReward(self, reward_name):
if reward_name in self.rewards:
self.rewards.pop(reward_name)
return True
return False
def updateRewardWeight(self, reward_name, reward_weight):
if reward_name in self.rewards:
self.rewards[reward_name]["weight"] = reward_weight
return True
return False
def __iter__(self):
for k, v in super().__iter__():
yield (k, v)
for k, v in self.rewards.items():
r_dict = dict(v["instance"])
r_dict["weight"] = float(v["weight"])
yield (k, r_dict)
def initialize(self, env):
self.reward_min = dt_float(0.0)
self.reward_max = dt_float(0.0)
for key, reward in self.rewards.items():
reward_w = reward["weight"]
reward_instance = reward["instance"]
reward_instance.initialize(env)
self.reward_max += dt_float(reward_instance.reward_max * reward_w)
self.reward_min += dt_float(reward_instance.reward_min * reward_w)
env.reward_range = self.get_range()
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
res = dt_float(0.0)
# Loop over registered rewards
for key, reward in self.rewards.items():
r_instance = reward["instance"]
# Call individual reward
r = r_instance(action, env, has_error, is_done, is_illegal, is_ambiguous)
# Sum by weighted result
w = dt_float(reward["weight"])
res += dt_float(r) * w
# Return total sum
return res
def close(self):
for key, reward in self.rewards.items():
reward["instance"].close()
| 3,715 | 34.390476 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Reward/combinedScaledReward.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.Reward.combinedReward import CombinedReward
from grid2op.dtypes import dt_float
class CombinedScaledReward(CombinedReward):
"""
This class allows to combine multiple rewards.
It will compute a scaled reward of the weighted sum of the registered rewards.
Scaling is done by linearly interpolating the weighted sum,
from the range [min_sum; max_sum] to [reward_min; reward_max]
min_sum and max_sum are computed from the weights and ranges of registered rewards.
See :class:`Reward.BaseReward` for setting the output range.
Examples
--------
.. code-block:: python
import grid2op
from grid2op.Reward import GameplayReward, FlatReward, CombinedScaledReward
env = grid2op.make(..., reward_class=CombinedScaledReward)
cr = self.env.get_reward_instance()
cr.addReward("Gameplay", GameplayReward(), 1.0)
cr.addReward("Flat", FlatReward(), 1.0)
cr.initialize(self.env)
obs = env.reset()
obs, reward, done, info = env.step(env.action_space())
# reward here is computed by summing the results of what would have
# given `GameplayReward` and the one from `FlatReward`
"""
def __init__(self, logger=None):
super().__init__(logger=logger)
self.reward_min = dt_float(-0.5)
self.reward_max = dt_float(0.5)
self._sum_max = dt_float(0.0)
self._sum_min = dt_float(0.0)
self.rewards = {}
def initialize(self, env):
"""
Overloaded initialze from `Reward.CombinedReward`.
This is because it needs to store the ranges internaly
"""
self._sum_max = dt_float(0.0)
self._sum_min = dt_float(0.0)
for key, reward in self.rewards.items():
reward_w = dt_float(reward["weight"])
reward_instance = reward["instance"]
reward_instance.initialize(env)
self._sum_max += dt_float(reward_instance.reward_max * reward_w)
self._sum_min += dt_float(reward_instance.reward_min * reward_w)
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
# Get weighted sum from parent
ws = super().__call__(action, env, has_error, is_done, is_illegal, is_ambiguous)
# Scale to range
res = np.interp(
ws, [self._sum_min, self._sum_max], [self.reward_min, self.reward_max]
)
return dt_float(res)
def close(self):
super().close()
| 2,990 | 35.925926 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Reward/constantReward.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Reward.baseReward import BaseReward
from grid2op.dtypes import dt_float
class ConstantReward(BaseReward):
"""
Most basic implementation of reward: everything has the same values: 0.0
Note that this :class:`BaseReward` subtype is not useful at all, whether to train an :attr:`BaseAgent`
nor to assess its performance of course.
Examples
---------
You can use this reward in any environment with:
.. code-block:
import grid2op
from grid2op.Reward import ConstantReward
# then you create your environment with it:
NAME_OF_THE_ENVIRONMENT = "rte_case14_realistic"
env = grid2op.make(NAME_OF_THE_ENVIRONMENT,reward_class=ConstantReward)
# and do a step with a "do nothing" action
obs = env.reset()
obs, reward, done, info = env.step(env.action_space())
# the reward is 0., always... Not really useful
"""
def __init__(self, logger=None):
BaseReward.__init__(self, logger=logger)
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
return dt_float(0.0)
| 1,593 | 34.422222 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Reward/distanceReward.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.Reward.baseReward import BaseReward
from grid2op.dtypes import dt_float
class DistanceReward(BaseReward):
"""
This reward computes a penalty based on the distance of the current grid to the grid at time 0 where
everything is connected to bus 1.
Examples
---------
You can use this reward in any environment with:
.. code-block:
import grid2op
from grid2op.Reward import DistanceReward
# then you create your environment with it:
NAME_OF_THE_ENVIRONMENT = "rte_case14_realistic"
env = grid2op.make(NAME_OF_THE_ENVIRONMENT,reward_class=DistanceReward)
# and do a step with a "do nothing" action
obs = env.reset()
obs, reward, done, info = env.step(env.action_space())
# the reward is computed with the DistanceReward class
"""
def __init__(self, logger=None):
BaseReward.__init__(self, logger=logger)
self.reward_min = dt_float(0.0)
self.reward_max = dt_float(1.0)
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
if has_error or is_illegal or is_ambiguous:
return self.reward_min
# Get topo from env
obs = env.get_obs()
topo = obs.topo_vect
idx = 0
diff = dt_float(0.0)
for n_elems_on_sub in obs.sub_info:
# Find this substation elements range in topology vect
sub_start = idx
sub_end = idx + n_elems_on_sub
current_sub_topo = topo[sub_start:sub_end]
# Count number of elements not on bus 1
# Because at the initial state, all elements are on bus 1
diff += dt_float(1.0) * np.count_nonzero(current_sub_topo != 1)
# Set index to next sub station
idx += n_elems_on_sub
r = np.interp(
diff,
[dt_float(0.0), len(topo) * dt_float(1.0)],
[self.reward_max, self.reward_min],
)
return r
| 2,489 | 33.583333 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Reward/economicReward.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.Exceptions import Grid2OpException
from grid2op.Reward.baseReward import BaseReward
from grid2op.dtypes import dt_float
class EconomicReward(BaseReward):
"""
This reward computes the marginal cost of the powergrid. As RL is about maximising a reward, while we want to
minimize the cost, this class also ensures that:
- the reward is positive if there is no game over, no error etc.
- the reward is inversely proportional to the cost of the grid (the higher the reward, the lower the economic cost).
Examples
---------
You can use this reward in any environment with:
.. code-block:
import grid2op
from grid2op.Reward import EconomicReward
# then you create your environment with it:
NAME_OF_THE_ENVIRONMENT = "rte_case14_realistic"
env = grid2op.make(NAME_OF_THE_ENVIRONMENT,reward_class=EconomicReward)
# and do a step with a "do nothing" action
obs = env.reset()
obs, reward, done, info = env.step(env.action_space())
# the reward is computed with the EconomicReward class
"""
def __init__(self, logger=None):
BaseReward.__init__(self, logger=logger)
self.reward_min = dt_float(0.0)
self.reward_max = dt_float(1.0)
self.worst_cost = None
def initialize(self, env):
if not env.redispatching_unit_commitment_availble:
raise Grid2OpException(
"Impossible to use the EconomicReward reward with an environment without generators"
"cost. Please make sure env.redispatching_unit_commitment_availble is available."
)
self.worst_cost = dt_float(np.sum(env.gen_cost_per_MW * env.gen_pmax) * env.delta_time_seconds / 3600.0)
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
if has_error or is_illegal or is_ambiguous:
res = self.reward_min
else:
# compute the cost of the grid
res = dt_float(np.sum(env.get_obs().prod_p * env.gen_cost_per_MW) * env.delta_time_seconds / 3600.0)
# we want to minimize the cost by maximizing the reward so let's take the opposite
res *= dt_float(-1.0)
# to be sure it's positive, add the highest possible cost
res += self.worst_cost
res = np.interp(
res, [dt_float(0.0), self.worst_cost], [self.reward_min, self.reward_max]
)
return dt_float(res)
| 2,971 | 40.277778 | 120 | py |
Grid2Op | Grid2Op-master/grid2op/Reward/episodeDurationReward.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.Reward.baseReward import BaseReward
from grid2op.dtypes import dt_float
class EpisodeDurationReward(BaseReward):
"""
This reward will always be 0., unless at the end of an episode where it will return the number
of steps made by the agent divided by the total number of steps possible in the episode.
Examples
---------
You can use this reward in any environment with:
.. code-block:
import grid2op
from grid2op.Reward import EpisodeDurationReward
# then you create your environment with it:
NAME_OF_THE_ENVIRONMENT = "rte_case14_realistic"
env = grid2op.make(NAME_OF_THE_ENVIRONMENT,reward_class=EpisodeDurationReward)
# and do a step with a "do nothing" action
obs = env.reset()
obs, reward, done, info = env.step(env.action_space())
# the reward is computed with the EpisodeDurationReward class
Notes
-----
In case of an environment being "fast forward" (see :func:`grid2op.Environment.BaseEnv.fast_forward_chronics`)
the time "during" the fast forward are counted "as if" they were successful.
This means that if you "fast forward" up until the end of an episode, you are likely to receive a reward of 1.0
"""
def __init__(self, per_timestep=1, logger=None):
BaseReward.__init__(self, logger=logger)
self.per_timestep = dt_float(per_timestep)
self.total_time_steps = dt_float(0.0)
self.reward_min = dt_float(0.0)
self.reward_max = dt_float(1.0)
def initialize(self, env):
self.reset(env)
def reset(self, env):
if env.chronics_handler.max_timestep() > 0:
self.total_time_steps = env.max_episode_duration() * self.per_timestep
else:
self.total_time_steps = np.inf
self.reward_max = np.inf
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
if is_done:
res = env.nb_time_step
if np.isfinite(self.total_time_steps):
res /= self.total_time_steps
else:
res = self.reward_min
return res
| 2,637 | 36.15493 | 115 | py |
Grid2Op | Grid2Op-master/grid2op/Reward/flatReward.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Reward.baseReward import BaseReward
from grid2op.dtypes import dt_float
class FlatReward(BaseReward):
"""
This reward return a fixed number (if there are not error) or 0 if there is an error.
Examples
---------
You can use this reward in any environment with:
.. code-block:
import grid2op
from grid2op.Reward import FlatReward
# then you create your environment with it:
NAME_OF_THE_ENVIRONMENT = "rte_case14_realistic"
env = grid2op.make(NAME_OF_THE_ENVIRONMENT,reward_class=FlatReward)
# and do a step with a "do nothing" action
obs = env.reset()
obs, reward, done, info = env.step(env.action_space())
# the reward is computed with the FlatReward class
"""
def __init__(self, per_timestep=1, logger=None):
BaseReward.__init__(self, logger=logger)
self.per_timestep = dt_float(per_timestep)
self.reward_min = dt_float(0.0)
self.reward_max = dt_float(per_timestep)
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
if not has_error:
res = self.per_timestep
else:
res = self.reward_min
return res
| 1,699 | 34.416667 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Reward/gameplayReward.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Reward.baseReward import BaseReward
from grid2op.dtypes import dt_float
class GameplayReward(BaseReward):
"""
This rewards is strictly computed based on the Game status.
It yields a negative reward in case of game over.
A half negative reward on rules infringement.
Otherwise the reward is positive.
Examples
---------
You can use this reward in any environment with:
.. code-block:
import grid2op
from grid2op.Reward import GameplayReward
# then you create your environment with it:
NAME_OF_THE_ENVIRONMENT = "rte_case14_realistic"
env = grid2op.make(NAME_OF_THE_ENVIRONMENT,reward_class=GameplayReward)
# and do a step with a "do nothing" action
obs = env.reset()
obs, reward, done, info = env.step(env.action_space())
# the reward is computed with the GameplayReward class
"""
def __init__(self, logger=None):
BaseReward.__init__(self, logger=logger)
self.reward_min = dt_float(-1.0)
self.reward_max = dt_float(1.0)
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
if has_error:
return self.reward_min
elif is_illegal or is_ambiguous:
# Did not respect the rules
return self.reward_min / dt_float(2.0)
else:
# Keep playing or finished episode
return self.reward_max
| 1,912 | 35.09434 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Reward/increasingFlatReward.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.Reward.baseReward import BaseReward
from grid2op.dtypes import dt_float
class IncreasingFlatReward(BaseReward):
"""
This reward just counts the number of timestep the agent has successfully manage to perform.
It adds a constant reward for each time step successfully handled.
Examples
---------
You can use this reward in any environment with:
.. code-block:
import grid2op
from grid2op.Reward import IncreasingFlatReward
# then you create your environment with it:
NAME_OF_THE_ENVIRONMENT = "rte_case14_realistic"
env = grid2op.make(NAME_OF_THE_ENVIRONMENT,reward_class=IncreasingFlatReward)
# and do a step with a "do nothing" action
obs = env.reset()
obs, reward, done, info = env.step(env.action_space())
# the reward is computed with the IncreasingFlatReward class
"""
def __init__(self, per_timestep=1, logger=None):
BaseReward.__init__(self, logger=logger)
self.per_timestep = dt_float(per_timestep)
self.reward_min = dt_float(0.0)
def initialize(self, env):
if env.chronics_handler.max_timestep() > 0:
self.reward_max = env.chronics_handler.max_timestep() * self.per_timestep
else:
self.reward_max = np.inf
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
if not has_error:
res = dt_float(env.nb_time_step * self.per_timestep)
else:
res = self.reward_min
return res
| 2,038 | 35.410714 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Reward/l2RPNReward.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.Reward.baseReward import BaseReward
from grid2op.dtypes import dt_float
class L2RPNReward(BaseReward):
"""
This is the historical :class:`BaseReward` used for the Learning To Run a Power Network competition on WCCI 2019
See `L2RPN <https://l2rpn.chalearn.org/>`_ for more information.
This rewards makes the sum of the "squared margin" on each powerline.
The margin is defined, for each powerline as:
`margin of a powerline = (thermal limit - flow in amps) / thermal limit`
(if flow in amps <= thermal limit) else `margin of a powerline = 0.`
This rewards is then: `sum (margin of this powerline) ^ 2`, for each powerline.
Examples
---------
You can use this reward in any environment with:
.. code-block:
import grid2op
from grid2op.Reward import L2RPNReward
# then you create your environment with it:
NAME_OF_THE_ENVIRONMENT = "rte_case14_realistic"
env = grid2op.make(NAME_OF_THE_ENVIRONMENT,reward_class=L2RPNReward)
# and do a step with a "do nothing" action
obs = env.reset()
obs, reward, done, info = env.step(env.action_space())
# the reward is computed with the L2RPNReward class
"""
def __init__(self, logger=None):
BaseReward.__init__(self, logger=logger)
def initialize(self, env):
self.reward_min = dt_float(0.0)
self.reward_max = dt_float(env.backend.n_line)
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
if not is_done and not has_error:
line_cap = self.__get_lines_capacity_usage(env)
res = np.sum(line_cap)
else:
# no more data to consider, no powerflow has been run, reward is what it is
res = self.reward_min
# print(f"\t env.backend.get_line_flow(): {env.backend.get_line_flow()}")
return res
@staticmethod
def __get_lines_capacity_usage(env):
ampere_flows = np.abs(env.backend.get_line_flow(), dtype=dt_float)
thermal_limits = np.abs(env.get_thermal_limit(), dtype=dt_float)
thermal_limits += 1e-1 # for numerical stability
relative_flow = np.divide(ampere_flows, thermal_limits, dtype=dt_float)
x = np.minimum(relative_flow, dt_float(1.0))
lines_capacity_usage_score = np.maximum(
dt_float(1.0) - x**2, np.zeros(x.shape, dtype=dt_float)
)
return lines_capacity_usage_score
| 2,967 | 37.545455 | 116 | py |
Grid2Op | Grid2Op-master/grid2op/Reward/l2RPNSandBoxScore.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.Reward.baseReward import BaseReward
from grid2op.dtypes import dt_float
class L2RPNSandBoxScore(BaseReward):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
It **must not** serve as a reward. This scored needs to be **MINIMIZED**,
and a reward needs to be maximized! Also, this "reward" is not scaled or anything. Use it as your
own risk.
Implemented as a reward to make it easier to use in the context of the L2RPN competitions, this "reward"
computed the "grid operation cost". It should not be used to train an agent.
The "reward" the closest to this score is given by the :class:`RedispReward` class.
"""
def __init__(self,
alpha_redisp=1.0,
alpha_loss=1.0,
alpha_storage=1.0,
alpha_curtailment=1.0,
reward_max=1000.,
logger=None):
BaseReward.__init__(self, logger=logger)
self.reward_min = dt_float(1.0) # carefull here between min and max...
self.reward_max = dt_float(reward_max)
self.alpha_redisp = dt_float(alpha_redisp)
self.alpha_loss = dt_float(alpha_loss)
self.alpha_storage = dt_float(alpha_storage)
self.alpha_curtailment = dt_float(alpha_curtailment)
def initialize(self, env):
# TODO compute reward max!
return super().initialize(env)
def _get_load_p(self, env):
load_p, *_ = env.backend.loads_info()
return load_p
def _get_gen_p(self, env):
gen_p, *_ = env.backend.generators_info()
return gen_p
def _get_losses(self, env, gen_p, load_p):
return (np.sum(gen_p, dtype=dt_float) - np.sum(load_p, dtype=dt_float)) * env.delta_time_seconds / 3600.0
def _get_marginal_cost(self, env):
gen_activeprod_t = env._gen_activeprod_t
p_t = np.max(env.gen_cost_per_MW[gen_activeprod_t > 0.0]).astype(dt_float)
# price is per MWh be sure to convert the MW (of losses and generation) to MWh before multiplying by the cost
return p_t
def _get_redisp_cost(self, env, p_t):
actual_dispatch = env._actual_dispatch
c_redispatching = (
np.sum(np.abs(actual_dispatch)) * p_t * env.delta_time_seconds / 3600.0
)
return c_redispatching
def _get_curtail_cost(self, env, p_t):
curtailment_mw = -env._sum_curtailment_mw # curtailment is always negative in the env
c_curtailment = (
curtailment_mw * p_t * env.delta_time_seconds / 3600.0
)
return c_curtailment
def _get_loss_cost(self, env, p_t):
gen_p = self._get_gen_p(env)
load_p = self._get_load_p(env)
losses = self._get_losses(env, gen_p, load_p)
c_loss = losses * p_t
return c_loss
def _get_storage_cost(self, env, p_t):
c_storage = np.sum(np.abs(env._storage_power)) * p_t * env.delta_time_seconds / 3600.0
return c_storage
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
if has_error:
# DO SOMETHING IN THIS CASE
return self.reward_min
# compute the marginal cost
p_t = self._get_marginal_cost(env)
# redispatching amount
c_redispatching = self._get_redisp_cost(env, p_t)
# curtailment amount
c_curtailment = self._get_curtail_cost(env, p_t)
# cost of losses
c_loss = self._get_loss_cost(env, p_t)
# storage units
c_storage = self._get_storage_cost(env, p_t)
# total "operationnal cost"
c_operations = dt_float(self.alpha_loss * c_loss +
self.alpha_redisp * c_redispatching +
self.alpha_storage * c_storage +
self.alpha_curtailment * c_curtailment)
return c_operations
| 4,531 | 37.40678 | 118 | py |
Grid2Op | Grid2Op-master/grid2op/Reward/l2rpn_wcci2022_scorefun.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.Reward.l2RPNSandBoxScore import L2RPNSandBoxScore
from grid2op.dtypes import dt_float
class L2RPNWCCI2022ScoreFun(L2RPNSandBoxScore):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
It **must not** serve as a reward. This scored needs to be **MINIMIZED**,
and a reward needs to be maximized! Also, this "reward" is not scaled or anything. Use it as your
own risk.
Implemented as a reward to make it easier to use in the context of the L2RPN competitions, this "reward"
computed the "grid operation cost". It should not be used to train an agent.
The "reward" the closest to this score is given by the :class:`RedispReward` class.
"""
def __init__(self,
storage_cost=10., # € / MWh
alpha_redisp=1.0,
alpha_loss=1.0,
alpha_storage=1.0,
alpha_curtailment=1.0,
reward_max=1000.,
logger=None):
super().__init__(alpha_redisp, alpha_loss, alpha_storage, alpha_curtailment, reward_max, logger)
self.storage_cost = dt_float(storage_cost)
def _get_storage_cost(self, env, p_t):
"""storage cost is a flat 10 € / MWh instead of depending on the marginal cost"""
c_storage = np.sum(np.abs(env._storage_power)) * self.storage_cost * env.delta_time_seconds / 3600.0
return c_storage
| 1,962 | 43.613636 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Reward/linesCapacityReward.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.Reward.baseReward import BaseReward
from grid2op.dtypes import dt_float
class LinesCapacityReward(BaseReward):
"""
Reward based on lines capacity usage
Returns max reward if no current is flowing in the lines
Returns min reward if all lines are used at max capacity
Compared to `:class:L2RPNReward`:
This reward is linear (instead of quadratic) and only
considers connected lines capacities
Examples
---------
You can use this reward in any environment with:
.. code-block:
import grid2op
from grid2op.Reward import LinesCapacityReward
# then you create your environment with it:
NAME_OF_THE_ENVIRONMENT = "rte_case14_realistic"
env = grid2op.make(NAME_OF_THE_ENVIRONMENT,reward_class=LinesCapacityReward)
# and do a step with a "do nothing" action
obs = env.reset()
obs, reward, done, info = env.step(env.action_space())
# the reward is computed with the LinesCapacityReward class
"""
def __init__(self, logger=None):
BaseReward.__init__(self, logger=logger)
self.reward_min = dt_float(0.0)
self.reward_max = dt_float(1.0)
def initialize(self, env):
pass
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
if has_error or is_illegal or is_ambiguous:
return self.reward_min
obs = env.get_obs()
n_connected = np.sum(obs.line_status.astype(dt_float))
usage = np.sum(obs.rho[obs.line_status == True])
usage = np.clip(usage, 0.0, float(n_connected))
reward = np.interp(
n_connected - usage,
[dt_float(0.0), float(n_connected)],
[self.reward_min, self.reward_max],
)
return reward
| 2,287 | 34.2 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Reward/linesReconnectedReward.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.Reward.baseReward import BaseReward
from grid2op.dtypes import dt_float
class LinesReconnectedReward(BaseReward):
"""
This reward computes a penalty
based on the number of powerline that could have been reconnected (cooldown at 0.) but
are still disconnected.
Examples
---------
You can use this reward in any environment with:
.. code-block:
import grid2op
from grid2op.Reward import LinesReconnectedReward
# then you create your environment with it:
NAME_OF_THE_ENVIRONMENT = "rte_case14_realistic"
env = grid2op.make(NAME_OF_THE_ENVIRONMENT,reward_class=LinesReconnectedReward)
# and do a step with a "do nothing" action
obs = env.reset()
obs, reward, done, info = env.step(env.action_space())
# the reward is computed with the LinesReconnectedReward class
"""
def __init__(self, logger=None):
BaseReward.__init__(self, logger=logger)
self.reward_min = dt_float(0.0)
self.reward_max = dt_float(1.0)
self.penalty_max_at_n_lines = dt_float(2.0)
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
if has_error or is_illegal or is_ambiguous:
return self.reward_min
# Get obs from env
obs = env.get_obs()
# All lines ids
lines_id = np.arange(env.n_line)
lines_id = lines_id[obs.time_before_cooldown_line == 0]
n_penalties = dt_float(0.0)
for line_id in lines_id:
# Line could be reconnected but isn't
if obs.line_status[line_id] == False:
n_penalties += dt_float(1.0)
max_p = self.penalty_max_at_n_lines
n_penalties = np.clip(n_penalties, dt_float(0.0), max_p)
r = np.interp(
n_penalties, [dt_float(0.0), max_p], [self.reward_max, self.reward_min]
)
return dt_float(r)
| 2,420 | 34.086957 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Reward/n1Reward.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
from grid2op.Reward import BaseReward
from grid2op.Action._BackendAction import _BackendAction
class N1Reward(BaseReward):
"""
This class implements the "n-1" reward, which returns the maximum flows after a powerline
Examples
--------
This can be used as:
.. code-block:: python
import grid2op
from grid2op.Reward import N1Reward
L_ID = 0
env = grid2op.make("l2rpn_case14_sandbox",
reward_class=N1Reward(l_id=L_ID)
)
obs = env.reset()
obs, reward, *_ = env.step(env.action_space())
print(f"reward: {reward:.3f}")
print("We can check that it is exactly like 'simulate' on the current step the disconnection of the same powerline")
obs_n1, *_ = obs.simulate(env.action_space({"set_line_status": [(L_ID, -1)]}), time_step=0)
print(f"\tmax flow after disconnection of line {L_ID}: {obs_n1.rho.max():.3f}")
Notes
-----
It is also possible to use the `other_rewards` argument to simulate multiple powerline disconnections, for example:
.. code-block:: python
import grid2op
from grid2op.Reward import N1Reward
L_ID = 0
env = grid2op.make("l2rpn_case14_sandbox",
other_rewards={f"line_{l_id}": N1Reward(l_id=l_id) for l_id in [0, 1]}
)
obs = env.reset()
obs, reward, *_ = env.step(env.action_space())
print(f"reward: {reward:.3f}")
print("We can check that it is exactly like 'simulate' on the current step the disconnection of the same powerline")
obs_n1, *_ = obs.simulate(env.action_space({"set_line_status": [(L_ID, -1)]}), time_step=0)
print(f"\tmax flow after disconnection of line {L_ID}: {obs_n1.rho.max():.3f}")
"""
def __init__(self, l_id=0, logger=None):
BaseReward.__init__(self, logger=logger)
self._backend = None
self._backend_action = None
self.l_id = l_id
def initialize(self, env):
self._backend = env.backend.copy()
bk_act_cls = _BackendAction.init_grid(env.backend)
self._backend_action = bk_act_cls()
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
if is_done:
return self.reward_min
self._backend_action.reset()
act = env.backend.get_action_to_set()
th_lim = env.get_thermal_limit()
th_lim[th_lim <= 1] = 1 # assign 1 for the thermal limit
this_n1 = copy.deepcopy(act)
self._backend_action += this_n1
self._backend.apply_action(self._backend_action)
self._backend._disconnect_line(self.l_id)
div_exc_ = None
try:
# TODO there is a bug in lightsimbackend that make it crash instead of diverging
conv, div_exc_ = self._backend.runpf()
except Exception as exc_:
conv = False
div_exc_ = exc_
if conv:
flow = self._backend.get_line_flow()
res = (flow / th_lim).max()
else:
self.logger.info(f"Divergence of the backend at step {env.nb_time_step} for N1Reward with error `{div_exc_}`")
res = -1
return res
def close(self):
self._backend.close()
del self._backend
self._backend = None
| 3,850 | 36.754902 | 124 | py |
Grid2Op | Grid2Op-master/grid2op/Reward/redispReward.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import sys
import numpy as np
import re
from grid2op._glop_platform_info import _IS_WINDOWS, _IS_LINUX, _IS_MACOS
from grid2op.Exceptions import Grid2OpException
from grid2op.Reward.baseReward import BaseReward
from grid2op.dtypes import dt_float
class RedispReward(BaseReward):
"""
This reward can be used for environments where redispatching is available. It assigns a cost to redispatching action
and penalizes with the losses.
This is the closest reward to the score used for the l2RPN competitions.
Examples
---------
You can use this reward in any environment with:
.. code-block:
import grid2op
from grid2op.Reward import RedispReward
# then you create your environment with it:
NAME_OF_THE_ENVIRONMENT = "rte_case14_realistic"
env = grid2op.make(NAME_OF_THE_ENVIRONMENT,reward_class=RedispReward)
# and do a step with a "do nothing" action
obs = env.reset()
obs, reward, done, info = env.step(env.action_space())
# the reward is computed with the RedispReward class
# NB this is the default reward of many environments in the grid2op framework
This class depends on some "meta parameters". These meta parameters can be changed when the class is created
in the following way:
.. code-block:: python
import grid2op
from grid2op.Reward import RedispReward
reward_cls = RedispReward.generate_class_custom_params(alpha_redisph=5,
min_load_ratio=0.1,
worst_losses_ratio=0.05,
min_reward=-10.,
reward_illegal_ambiguous=0.,
least_losses_ratio=0.015)
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name,reward_class=reward_cls)
These meta parameters means:
- alpha_redisp: extra cost paid when performing redispatching. For 1MW of redispatching done, you pay
"alpha_redisph"
- min_load_ratio: how to compute the minimum load on the grid, based on the total generation (sum of gen_pmax)
- worst_losses_ratio: worst loss possible on the grid (5% is an upper bound for normal grid)
- min_reward: what is the minimum reward of this class (can be parametrized, and is only used when there is
a game over
- reward_illegal_ambiguous: reward given when the action is illegal or ambiguous
- least_losses_ratio: the minimum loss you can have (1.5% of the total demand should be a lower bound for real grid)
Notes
------
On windows and MacOs, due to a compatibility issue with multi-processing, it is not possible to have different
"RedisReward" with different meta parameters (see the "Examples" section).
"""
_alpha_redisp = dt_float(5.0)
_min_load_ratio = dt_float(0.1) # min load = min_load_ratio * max_load
_worst_losses_ratio = dt_float(
0.05
) # worst_losses = worst_losses_ratio * worst_load
_min_reward = dt_float(-10.0) # reward when game over
_reward_illegal_ambiguous = dt_float(
0.0
) # reward when action is illegal or ambiguous
_least_losses_ratio = dt_float(
0.015
) # least_losses = least_losses_ratio * least_loads
def __init__(self, logger=None):
BaseReward.__init__(self, logger=logger)
self.reward_min = None
self.reward_max = None
self.max_regret = dt_float(0.0)
self.reward_illegal_ambiguous = None
@classmethod
def generate_class_custom_params(
cls,
alpha_redisph=5.0,
min_load_ratio=0.1, # min load = min_load_ratio * max_load
worst_losses_ratio=0.05, # worst_losses = worst_losses_ratio * worst_load
min_reward=-10.0,
least_losses_ratio=0.015, # least_losses = least_losses_ratio * least_loads
reward_illegal_ambiguous=0.0,
):
if _IS_LINUX:
# on linux it's fine, i can create new classes for each meta parameters
nm_res = f"RedispReward_{alpha_redisph:.2f}_{min_load_ratio:.2f}_{worst_losses_ratio:.2f}"
nm_res += f"_{min_reward:.2f}_{least_losses_ratio:.2f}_{reward_illegal_ambiguous:.2f}"
nm_res = re.sub("\\.", "@", nm_res)
cls_attr_as_dict = {
"_alpha_redisp": dt_float(alpha_redisph),
"_min_load_ratio": dt_float(min_load_ratio),
"_worst_losses_ratio": dt_float(worst_losses_ratio),
"_min_reward": dt_float(min_reward),
"_least_losses_ratio": dt_float(least_losses_ratio),
"_reward_illegal_ambiguous": dt_float(reward_illegal_ambiguous),
}
res_cls = type(nm_res, (cls,), cls_attr_as_dict)
res_cls.__module__ = cls.__module__
setattr(sys.modules[cls.__module__], nm_res, res_cls)
globals()[nm_res] = res_cls
else:
# i mess with the default parameters in the base class, i know i know it's not pretty, but hey...
# TODO make that prettier and clean the way to make the reward in the env (for example allow to pass
# objects and not just class)
cls._alpha_redisp = dt_float(alpha_redisph)
cls._min_load_ratio = dt_float(min_load_ratio)
cls._worst_losses_ratio = dt_float(worst_losses_ratio)
cls._min_reward = dt_float(min_reward)
cls._least_losses_ratio = dt_float(least_losses_ratio)
cls._reward_illegal_ambiguous = dt_float(reward_illegal_ambiguous)
res_cls = cls
return res_cls
def initialize(self, env):
if not env.redispatching_unit_commitment_availble:
raise Grid2OpException(
"Impossible to use the RedispReward reward with an environment without generators "
"cost. Please make sure env.redispatching_unit_commitment_availble is available."
)
cls_ = type(self)
worst_marginal_cost = np.max(env.gen_cost_per_MW)
worst_load = dt_float(np.sum(env.gen_pmax))
# it's not the worst, but definitely an upper bound
worst_losses = dt_float(cls_._worst_losses_ratio) * worst_load
worst_redisp = cls_._alpha_redisp * np.sum(env.gen_pmax) # not realistic, but an upper bound
self.max_regret = (worst_losses + worst_redisp) * worst_marginal_cost * env.delta_time_seconds / 3600.0
self.reward_min = dt_float(cls_._min_reward)
least_loads = dt_float(
worst_load * cls_._min_load_ratio
) # half the capacity of the grid
least_losses = dt_float(
cls_._least_losses_ratio * least_loads * env.delta_time_seconds / 3600.0
) # 1.5% of losses
least_redisp = dt_float(0.0) # lower_bound is 0
base_marginal_cost = np.min(env.gen_cost_per_MW[env.gen_cost_per_MW > 0.0])
min_regret = (least_losses + least_redisp) * base_marginal_cost
self.reward_max = dt_float((self.max_regret - min_regret) / least_loads)
self.reward_illegal_ambiguous = cls_._reward_illegal_ambiguous
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
res = None
if is_done:
# if the episode is over and it's my fault (i did a blackout) i strongly
if has_error or is_illegal or is_ambiguous:
res = self.reward_min
elif is_illegal or is_ambiguous:
res = self._reward_illegal_ambiguous
if res is None:
# compute the losses
gen_p, *_ = env.backend.generators_info()
load_p, *_ = env.backend.loads_info()
# don't forget to convert MW to MWh !
losses = (np.sum(gen_p) - np.sum(load_p)) * env.delta_time_seconds / 3600.0
# compute the marginal cost
gen_activeprod_t = env._gen_activeprod_t
marginal_cost = np.max(env.gen_cost_per_MW[gen_activeprod_t > 0.0])
# redispatching amount
actual_dispatch = env._actual_dispatch
redisp_cost = (
self._alpha_redisp * np.sum(np.abs(actual_dispatch)) * marginal_cost * env.delta_time_seconds / 3600.0
)
# cost of losses
losses_cost = losses * marginal_cost
# cost of storage
c_storage = np.sum(np.abs(env._storage_power)) * marginal_cost * env.delta_time_seconds / 3600.0
# total "regret"
regret = losses_cost + redisp_cost + c_storage
# compute reward
reward = self.max_regret - regret
# divide it by load, to be less sensitive to load variation
res = dt_float(reward / np.sum(load_p))
return res
| 9,454 | 43.599057 | 120 | py |
Grid2Op | Grid2Op-master/grid2op/Reward/rewardHelper.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
from grid2op.Reward.baseReward import BaseReward
from grid2op.Reward.constantReward import ConstantReward
from grid2op.Exceptions import Grid2OpException
class RewardHelper:
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
It is a class internal to the :class:`grid2op.Environment.Environment` do not use outside
of its purpose and do not attempt to modify it.
This class aims at making the creation of rewards class more automatic by the :class:`grid2op.Environment`.
It is not recommended to derived or modified this class. If a different reward need to be used, it is recommended
to build another object of this class, and change the :attr:`RewardHelper.rewardClass` attribute.
Attributes
----------
rewardClass: ``type``
Type of reward that will be use by this helper. Note that the type (and not an instance / object of that type)
must be given here. It defaults to :class:`ConstantReward`
template_reward: :class:`BaseReward`
An object of class :attr:`RewardHelper.rewardClass` used to compute the rewards.
"""
def __init__(self, reward_func=ConstantReward, logger=None):
self.rewardClass = None
self.template_reward = None
self.logger = logger
self.change_reward(reward_func)
def initialize(self, env):
"""
This function initializes the template_reward with the environment. It is used especially for using
:func:`RewardHelper.range`.
Parameters
----------
env: :class:`grid2op.Environment.Environment`
The current used environment.
"""
self.template_reward.initialize(env)
def range(self):
"""
Provides the range of the rewards.
Returns
-------
res: ``(float, float)``
The minimum reward per time step (possibly infinity) and the maximum reward per timestep (possibly infinity)
"""
return self.template_reward.get_range()
def reset(self, env):
"""called each time `env.reset` is invoked"""
self.template_reward.reset(env)
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
"""
Gives the reward that follows the execution of the :class:`grid2op.BaseAction.BaseAction` action in the
:class:`grid2op.Environment.Environment` env;
Parameters
----------
action: :class:`grid2op.Action.Action`
The action performed by the BaseAgent.
env: :class:`grid2op.Environment.Environment`
The current environment.
has_error: ``bool``
Does the action caused an error, such a diverging powerflow for example= (``True``: the action caused
an error)
is_done: ``bool``
Is the game over (``True`` = the game is over)
is_illegal: ``bool``
Is the action legal or not (``True`` = the action was illegal). See
:class:`grid2op.Exceptions.IllegalAction` for more information.
is_ambiguous: ``bool``
Is the action ambiguous or not (``True`` = the action was ambiguous). See
:class:`grid2op.Exceptions.AmbiguousAction` for more information.
Returns
-------
res: ``float``
The computed reward
"""
res = self.template_reward(
action, env, has_error, is_done, is_illegal, is_ambiguous
)
return res
def change_reward(self, reward_func):
"""
INTERNAL
.. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\
Use `env.change_reward` instead (:func:`grid2op.Environment.BaseEnv.change_reward`)
"""
if isinstance(reward_func, BaseReward):
# reward object given directly
self.rewardClass = type(reward_func)
self.template_reward = copy.deepcopy(reward_func)
elif issubclass(reward_func, BaseReward):
# reward is provided as a class
self.rewardClass = reward_func
try:
self.template_reward = reward_func(logger=self.logger)
except TypeError as exc_:
self.logger.warn(f"Reward \"{reward_func.__name__}\" does not support the logger feature. Error was : {exc_}")
# old (<= 1.7.0) behaviour
self.template_reward = reward_func()
else:
raise Grid2OpException(
f"Impossible to build a reward with input reward_func={reward_func}. "
f"NB `reward_func` should be either an object of type `BaseReward` (or "
f"one of its derivative) "
f"or a class that inherit from `BaseReward`"
)
def close(self):
"""clsoe the reward helper (in case there are specific behaviour for certain rewards"""
self.template_reward.close()
| 5,480 | 36.8 | 126 | py |
Grid2Op | Grid2Op-master/grid2op/Rules/AlwaysLegal.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Rules.BaseRules import BaseRules
class AlwaysLegal(BaseRules):
"""
This subclass doesn't implement any rules regarding the legality of the actions. All actions are legal.
"""
def __call__(self, action, env):
"""
All actions being legal, this returns always true.
See :func:`BaseRules.__call__` for a definition of the parameters of this function.
"""
return True, None
| 906 | 35.28 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Rules/BaseRules.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from abc import ABC, abstractmethod
class BaseRules(ABC):
"""
This class is a base class that determines whether or not an action is legal in certain environment.
See the definition of :func:`BaseRules.__call__` for more information.
Basically, this is an empty class with an overload of the __call__ operator that should return ``True`` or ``False``
depending on the legality of the action.
In :class:`grid2op.Environment`, only action of the users are checked for legality.
"""
def initialize(self, env):
"""
This function is used to inform the class instance about the environment specification.
It can be the place to assert the defined rules are suited for the environement.
Parameters
----------
env: :class:`grid2op.Environment.Environment`
The environment on which the action is performed. The environement instance is not fully initialized itself.
"""
pass
@abstractmethod
def __call__(self, action, env):
"""
As opposed to "ambiguous action", "illegal action" are not illegal per se.
They are legal or not on a certain environment. For example, disconnecting
a powerline that has been cut off for maintenance is illegal. Saying to action to both disconnect a
powerline and assign it to bus 2 on it's origin end is ambiguous, and not tolerated in Grid2Op.
Parameters
----------
action: :class:`grid2op.Action.Action`
The action of which the legality is tested.
env: :class:`grid2op.Environment.Environment`
The environment on which the action is performed.
Returns
-------
is_legal: ``bool``
Whether the action is legal or not
reason:
The cause of the illegal part of the action (should be a grid2op exception)
"""
pass
def can_use_simulate(self, nb_simulate_call_step, nb_simulate_call_episode, param):
"""
This function can be overriden.
It is expected to return either SimulateUsedTooMuchThisStep or SimulateUsedTooMuchThisEpisode if the number of calls to `obs.simulate`
is too high in total or for the given step
"""
return None
| 2,759 | 39.588235 | 142 | py |
Grid2Op | Grid2Op-master/grid2op/Rules/DefaultRules.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.Rules.LookParam import LookParam
from grid2op.Rules.PreventReconnection import PreventReconnection
from grid2op.Rules.PreventDiscoStorageModif import PreventDiscoStorageModif
class DefaultRules(LookParam, PreventDiscoStorageModif, PreventReconnection):
"""
This subclass combine both :class:`LookParam` and :class:`PreventReconnection`.
An action is declared legal if and only if:
- It doesn't disconnect / reconnect more power lines than what stated in the actual game _parameters
:class:`grid2op.Parameters`
- It doesn't attempt to act on more substations that what is stated in the actual game _parameters
:class:`grid2op.Parameters`
- It doesn't attempt to modify the power produce by a turned off storage unit
"""
def __call__(self, action, env):
"""
See :func:`BaseRules.__call__` for a definition of the _parameters of this function.
"""
is_legal, reason = LookParam.__call__(self, action, env)
if not is_legal:
return False, reason
is_legal, reason = PreventDiscoStorageModif.__call__(self, action, env)
if not is_legal:
return False, reason
return PreventReconnection.__call__(self, action, env)
def can_use_simulate(self, nb_simulate_call_step, nb_simulate_call_episode, param):
return LookParam.can_use_simulate(
self, nb_simulate_call_step, nb_simulate_call_episode, param
)
| 1,946 | 42.266667 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/Rules/LookParam.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
from grid2op.Exceptions import (
IllegalAction,
SimulateUsedTooMuchThisStep,
SimulateUsedTooMuchThisEpisode,
)
from grid2op.Rules.BaseRules import BaseRules
class LookParam(BaseRules):
"""
This subclass only check that the number of powerlines reconnected / disconnected by the agent.
This class doesn't require any environment information. The "env" argument is only used to look for the
game rules implemented in :class:`grid2op.Parameters`.
See :func:`BaseRules.__call__` for a definition of the parameters of this function.
"""
def __call__(self, action, env):
"""
See :func:`BaseRules.__call__` for a definition of the parameters of this function.
"""
# at first iteration, env.current_obs is None...
powerline_status = env.get_current_line_status()
aff_lines, aff_subs = action.get_topological_impact(powerline_status)
if np.sum(aff_lines) > env._parameters.MAX_LINE_STATUS_CHANGED:
ids = np.where(aff_lines)[0]
return False, IllegalAction(
"More than {} line status affected by the action: {}"
"".format(env.parameters.MAX_LINE_STATUS_CHANGED, ids)
)
if np.sum(aff_subs) > env._parameters.MAX_SUB_CHANGED:
ids = np.where(aff_subs)[0]
return False, IllegalAction(
"More than {} substation affected by the action: {}"
"".format(env.parameters.MAX_SUB_CHANGED, ids)
)
return True, None
def can_use_simulate(self, nb_simulate_call_step, nb_simulate_call_episode, param):
if param.MAX_SIMULATE_PER_STEP >= 0:
if nb_simulate_call_step > param.MAX_SIMULATE_PER_STEP:
return SimulateUsedTooMuchThisStep(
f"attempt to use {nb_simulate_call_step} times `obs.simulate(...)` while the maximum allowed for this step is {param.MAX_SIMULATE_PER_STEP}"
)
if param.MAX_SIMULATE_PER_EPISODE >= 0:
if nb_simulate_call_episode > param.MAX_SIMULATE_PER_EPISODE:
return SimulateUsedTooMuchThisEpisode(
f"attempt to use {nb_simulate_call_episode} times `obs.simulate(...)` while the maximum allowed for this episode is {param.MAX_SIMULATE_PER_EPISODE}"
)
| 2,825 | 44.580645 | 169 | py |