Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364 values |
|---|---|---|---|---|---|---|
Grid2Op | Grid2Op-master/grid2op/tests/test_issue_379.py | # Copyright (c) 2019-2022, RTE (https://www.rte-france.com)
# See AUTHORS.txt and https://github.com/rte-france/Grid2Op/pull/319
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import grid2op
import unittest
import warnings
from grid2op.gym_compat import (GymEnv, GYM_AVAILABLE, GYMNASIUM_AVAILABLE)
import grid2op
CAN_TEST_ALL = True
if GYMNASIUM_AVAILABLE:
from gymnasium.utils.env_checker import check_env
from gymnasium.utils.env_checker import check_reset_return_type, check_reset_options, check_reset_seed
elif GYM_AVAILABLE:
from gym.utils.env_checker import check_env
from gym.utils.env_checker import check_reset_return_type, check_reset_options, check_reset_seed
else:
CAN_TEST_ALL = False
class Issue379Tester(unittest.TestCase):
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make("l2rpn_case14_sandbox", test=True)
self.gym_env = GymEnv(self.env)
def tearDown(self) -> None:
self.env.close()
self.gym_env.close()
return super().tearDown()
def test_check_env(self):
if CAN_TEST_ALL:
check_reset_return_type(self.gym_env)
check_reset_seed(self.gym_env)
check_reset_options(self.gym_env)
check_env(self.gym_env)
if __name__ == "__main__":
unittest.main()
| 1,742 | 33.86 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_issue_380.py | # Copyright (c) 2019-2022, RTE (https://www.rte-france.com)
# See AUTHORS.txt and https://github.com/rte-france/Grid2Op/pull/319
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import grid2op
import unittest
import warnings
import numpy as np
import pdb
class Issue380Tester(unittest.TestCase):
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make("l2rpn_case14_sandbox", test=True)
self.env.seed(0)
self.env.set_id(0)
def tearDown(self) -> None:
self.env.close()
return super().tearDown()
def test_limit_cs_margin(self):
obs = self.env.reset()
act = self.env.action_space({"curtail": [(2, 0.), (3, 0.), (4, 0.)]})
# this action "cut" 36.4 MW, I have a margin of 30
# margin is enough, i just cut the 6.4
res, res_add_curtailed, res_add_storage = act.limit_curtail_storage(obs, 0, do_copy=True)
assert np.sum(np.abs(res_add_storage)) == 0.
assert abs(np.sum(res_add_curtailed * obs.gen_pmax) - 6.4) <= 1e-4
# margin is relatively low, i just got 5. MW, so i "cut" 31.4
res, res_add_curtailed, res_add_storage = act.limit_curtail_storage(obs, 25, do_copy=True)
assert np.sum(np.abs(res_add_storage)) == 0.
assert abs(np.sum(res_add_curtailed * obs.gen_pmax) - 31.4) <= 1e-4
# margin is extremely low, i cut almost everything
res, res_add_curtailed, res_add_storage = act.limit_curtail_storage(obs, 29.8, do_copy=True)
assert np.sum(np.abs(res_add_storage)) == 0.
assert abs(np.sum(res_add_curtailed * obs.gen_pmax) - 36.2) <= 1e-4
# margin is extremely low, i cut almost everything
res, res_add_curtailed, res_add_storage = act.limit_curtail_storage(obs, 29.89, do_copy=True, _tol_equal=0.1)
assert np.sum(np.abs(res_add_storage)) == 0.
assert abs(np.sum(res_add_curtailed * obs.gen_pmax) - 36.29) <= 1e-4
# margin is extremely low, i cut everything (due to precision _tol_equal)
res, res_add_curtailed, res_add_storage = act.limit_curtail_storage(obs, 29.91, do_copy=True, _tol_equal=0.1)
assert np.sum(np.abs(res_add_storage)) == 0.
assert abs(np.sum(res_add_curtailed * obs.gen_pmax) - 36.4) <= 1e-4
# margin is null, i cut everything
res, res_add_curtailed, res_add_storage = act.limit_curtail_storage(obs, 30., do_copy=True, _tol_equal=0.1)
assert np.sum(np.abs(res_add_storage)) == 0.
assert abs(np.sum(res_add_curtailed * obs.gen_pmax) - 36.4) <= 1e-4
# Change of the _tol_equal
# margin is extremely low, i cut almost everything
res, res_add_curtailed, res_add_storage = act.limit_curtail_storage(obs, 29.989, do_copy=True, _tol_equal=0.01)
assert np.sum(np.abs(res_add_storage)) == 0.
assert abs(np.sum(res_add_curtailed * obs.gen_pmax) - 36.389) <= 1e-4
# margin is extremely low, i cut everything (due to precision _tol_equal)
res, res_add_curtailed, res_add_storage = act.limit_curtail_storage(obs, 29.991, do_copy=True, _tol_equal=0.01)
assert np.sum(np.abs(res_add_storage)) == 0.
assert abs(np.sum(res_add_curtailed * obs.gen_pmax) - 36.4) <= 1e-4
if __name__ == "__main__":
unittest.main()
| 3,716 | 51.352113 | 119 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_issue_389.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt and https://github.com/rte-france/Grid2Op/pull/319
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import grid2op
import unittest
import warnings
import numpy as np
import pdb
import grid2op
import numpy as np
class Issue389Tester(unittest.TestCase):
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
# this needs to be tested with pandapower backend
self.env = grid2op.make("rte_case5_example", test=True)
self.env.seed(0)
self.env.set_id(0)
def test_issue(self):
act = self.env.action_space({"set_bus":{ "substations_id": [(4, (2, 1, 2))]}})
obs, reward, done, info = self.env.step(act)
act = self.env.action_space({"set_bus":{ "lines_or_id": [(7, -1)]}})
obs, reward, done, info = self.env.step(act)
assert not done
assert not np.isnan(obs.theta_ex[-1])
G = obs.get_energy_graph()
assert not np.isnan(G.nodes[4]["theta"])
assert G.edges[(0, 4)]["theta_or"] == G.nodes[0]["theta"]
assert G.edges[(0, 4)]["theta_ex"] == G.nodes[4]["theta"]
assert G.edges[(0, 4)]["v_or"] == G.nodes[0]["v"]
assert G.edges[(0, 4)]["v_ex"] == G.nodes[4]["v"]
if __name__ == "__main__":
unittest.main()
| 1,719 | 36.391304 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_issue_396.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt and https://github.com/rte-france/Grid2Op/pull/319
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import grid2op
import unittest
import warnings
import os
import pdb
import tempfile
from grid2op.Runner import Runner
from grid2op.Episode import EpisodeReplay, EpisodeData
class Issue396Tester(unittest.TestCase):
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
# this needs to be tested with pandapower backend
self.env = grid2op.make("rte_case5_example", test=True)
self.env.seed(0)
self.env.set_id(0)
def test_gif(self):
with tempfile.TemporaryDirectory() as path:
runner = Runner(**self.env.get_params_for_runner())
_ = runner.run(path_save=path,
nb_episode=1,
nb_process=1,
max_iter=10,
)
li_ep = EpisodeData.list_episode(path)
ep_replay = EpisodeReplay(li_ep[0][0])
ep_replay.replay_episode(episode_id=li_ep[0][1],
gif_name=li_ep[0][1],
display=False)
if __name__ == "__main__":
unittest.main()
| 1,691 | 35.782609 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_issue_403.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import grid2op
from grid2op.gym_compat import BoxGymActSpace, BoxGymObsSpace
import unittest
import warnings
import numpy as np
import pdb
class Issue403Tester(unittest.TestCase):
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
# this needs to be tested with pandapower backend
self.env = grid2op.make("l2rpn_case14_sandbox", test=True)
self.env.seed(0)
self.env.set_id(0)
def test_box_action_space(self):
# We considers only redispatching actions
gym_action_space = BoxGymActSpace(self.env.action_space,
attr_to_keep=["redispatch"],
add={"redispatch": [0.0, 0.0, 0.0]}, # Normalization part
multiply={"redispatch": [5.0, 10.0, 15.0]} # Normalization part
)
assert np.all(gym_action_space.low == [-1, -1, -1])
assert np.all(gym_action_space.high == [1, 1, 1])
assert np.all(gym_action_space._dict_properties["redispatch"][0] == [ -5., -10., -15.])
assert np.all(gym_action_space._dict_properties["redispatch"][1] == [5, 10, 15])
def test_box_obs_space(self):
# We considers only redispatching actions
gym_obs_space = BoxGymObsSpace(self.env.observation_space,
attr_to_keep=["target_dispatch"],
subtract={"target_dispatch":[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]}, # Normalization part
divide={"target_dispatch":[140., 120., 70., 70., 40., 100.]} # Normalization part
)
assert np.all(gym_obs_space.low == [-1, -1, -1, -1, -1, -1])
assert np.all(gym_obs_space.high == [1, 1, 1, 1, 1, 1])
assert np.all(gym_obs_space._dict_properties["target_dispatch"][0] == [-140., -120., -70., -70., -40., -100.])
assert np.all(gym_obs_space._dict_properties["target_dispatch"][1] == [140., 120., 70., 70., 40., 100.])
if __name__ == "__main__":
unittest.main()
| 2,696 | 47.160714 | 123 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_issue_407.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import grid2op
from grid2op.gym_compat import GymEnv
import unittest
import warnings
class CustomGym(GymEnv):
def __init__(self, env_init, shuffle_chronics=True, render_mode="rgb_array"):
super().__init__(env_init, shuffle_chronics, render_mode)
self._reset_called = 0
def reset(self, *args, **kwargs):
self._reset_called += 1
super().reset(*args, **kwargs)
class Issue407Tester(unittest.TestCase):
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
# this needs to be tested with pandapower backend
self.env = grid2op.make("l2rpn_case14_sandbox", test=True)
self.env.seed(0)
self.env.set_id(0)
def test_reset(self):
gym_env = CustomGym(self.env)
obs = gym_env.reset()
assert gym_env._reset_called == 1
if __name__ == "__main__":
unittest.main()
| 1,413 | 32.666667 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_issue_418.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import grid2op
from grid2op.gym_compat import GymEnv, GYMNASIUM_AVAILABLE
import unittest
import warnings
import numpy as np
from grid2op.gym_compat.utils import (check_gym_version, sample_seed,
_MAX_GYM_VERSION_RANDINT, GYM_VERSION)
class Issue418Tester(unittest.TestCase):
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
# this needs to be tested with pandapower backend
self.env = grid2op.make("l2rpn_case14_sandbox", test=True)
self.env.seed(0)
self.env.set_id(0)
def test_seed(self):
gymenv = GymEnv(self.env)
if GYM_VERSION <= _MAX_GYM_VERSION_RANDINT and not GYMNASIUM_AVAILABLE:
# legacy gym, with old gym gym version
gymenv.seed(42)
obs = gymenv.reset()
curt = np.array([1,1.,0.35566905,0.23095788,0.6338101,1])
year = 1249
day = 28
else:
# most recent gym API
obs = gymenv.reset(seed=42)
curt = np.array([1,1.,0.18852758,0.5537014,0.43770432,1])
curt = np.array([-1,-1.,0.18852758,0.5537014,0.43770432,-1])
year = 571
day = 9
# year = 1887
# day = 9
# test that the seeding worked also in action space and observation space
sampled_act = gymenv.action_space.sample()
assert np.allclose(sampled_act['curtail'], curt), f"{sampled_act['curtail']}"
sampled_obs = gymenv.observation_space.sample()
assert sampled_obs["year"] == year, f'{sampled_obs["year"]}'
assert sampled_obs["day"] == day, f'{sampled_obs["day"]}'
if __name__ == "__main__":
unittest.main()
| 2,239 | 39 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_issue_433.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import grid2op
import unittest
import warnings
import pdb
class Issue433Tester(unittest.TestCase):
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
# this needs to be tested with pandapower backend
self.env = grid2op.make("l2rpn_case14_sandbox", test=True)
self.env.seed(0)
self.env.set_id(0)
def tearDown(self) -> None:
self.env.close()
return super().tearDown()
def test_disco_2_lines(self):
act0 = self.env.action_space({"set_line_status": [(4, -1)]})
act1 = self.env.action_space({"set_line_status": [(13, -1)]})
obs = self.env.reset()
obs, *_ = self.env.step(act0)
graph = obs.get_energy_graph()
obs, *_ = self.env.step(act1)
obs.get_energy_graph() # crashed
if __name__ == '__main__':
unittest.main()
| 1,374 | 34.25641 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_l2rpn_idf_2023.py | # Copyright (c) 2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import grid2op
from grid2op.gym_compat import GymEnv, BoxGymActSpace, BoxGymObsSpace, DiscreteActSpace, MultiDiscreteActSpace
from grid2op.l2rpn_utils import ActionIDF2023, ObservationIDF2023
from grid2op.Opponent import GeometricOpponentMultiArea
from grid2op.Reward import AlertReward
import unittest
import warnings
import numpy as np
import pdb
class L2RPNIDF2023Tester(unittest.TestCase):
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
# this needs to be tested with pandapower backend
self.env = grid2op.make("l2rpn_idf_2023", test=True)
self.env.seed(0)
self.env.set_id(0)
def tearDown(self) -> None:
self.env.close()
return super().tearDown()
def legal_action_2subs(self):
act12 = self.env.action_space({"set_bus": {"substations_id": [(3, (1, 2, 1, 2, 1)), (33, (1, 2, 1, 2, 1, 2))]}})
act23 = self.env.action_space({"set_bus": {"substations_id": [(33, (1, 2, 1, 2, 1, 2)), (67, (1, 2, 1, 2))]}})
act13 = self.env.action_space({"set_bus": {"substations_id": [(3, (1, 2, 1, 2, 1)), (67, (1, 2, 1, 2))]}})
obs, reward, done, info = self.env.step(act12)
assert not info["is_illegal"]
self.env.reset()
obs, reward, done, info = self.env.step(act13)
assert not info["is_illegal"]
self.env.reset()
obs, reward, done, info = self.env.step(act23)
assert not info["is_illegal"]
self.env.reset()
def test_illegal_action_2subs(self):
# illegal actions
act11 = self.env.action_space({"set_bus": {"substations_id": [(3, (1, 2, 1, 2, 1)), (4, (1, 2, 1, 2, 1))]}})
act22 = self.env.action_space({"set_bus": {"substations_id": [(33, (1, 2, 1, 2, 1, 2)), (36, (1, 2, 1, 2, 1, 2))]}})
act33 = self.env.action_space({"set_bus": {"substations_id": [(67, (1, 2, 1, 2)), (68, (1, 2, 1, 2, 1, 2, 1)) ]}})
obs, reward, done, info = self.env.step(act11)
assert info["is_illegal"]
self.env.reset()
obs, reward, done, info = self.env.step(act22)
assert info["is_illegal"]
self.env.reset()
obs, reward, done, info = self.env.step(act33)
assert info["is_illegal"]
self.env.reset()
def test_legal_action_2lines(self):
# legal actions
act12 = self.env.action_space({"set_line_status": [(0, -1), (110, -1)]})
act23 = self.env.action_space({"set_line_status": [(110, -1), (3, -1)]})
act13 = self.env.action_space({"set_line_status": [(0, -1), (3, -1)]})
obs, reward, done, info = self.env.step(act12)
assert not info["is_illegal"]
self.env.reset()
obs, reward, done, info = self.env.step(act13)
assert not info["is_illegal"]
self.env.reset()
obs, reward, done, info = self.env.step(act23)
assert not info["is_illegal"]
self.env.reset()
def test_other_rewards(self):
assert "alert" in self.env.other_rewards
assert isinstance(self.env.other_rewards["alert"].template_reward, AlertReward)
def test_illegal_action_2lines(self):
# illegal actions
act11 = self.env.action_space({"set_line_status": [(0, -1), (1, -1)]})
act22 = self.env.action_space({"set_line_status": [(110, -1), (111, -1)]})
act33 = self.env.action_space({"set_line_status": [(3, -1), (7, -1)]})
obs, reward, done, info = self.env.step(act11)
assert info["is_illegal"]
self.env.reset()
obs, reward, done, info = self.env.step(act22)
assert info["is_illegal"]
self.env.reset()
obs, reward, done, info = self.env.step(act33)
assert info["is_illegal"]
self.env.reset()
def test_to_gym(self):
env_gym = GymEnv(self.env)
for k in ["active_alert",
"attack_under_alert",
"time_since_last_alert",
"alert_duration",
"total_number_of_alert",
"time_since_last_attack",
"was_alert_used_after_attack"]:
assert k in env_gym.observation_space.spaces, f"missing key {k} in obs space"
assert "raise_alert" in env_gym.action_space.spaces, f"missing key raise_alert in act space"
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
box_act = BoxGymActSpace(self.env.action_space,
attr_to_keep=(
"set_line_status",
"change_line_status",
"set_bus",
"change_bus",
"redispatch",
"set_storage",
"curtail",
"raise_alert",
))
assert box_act.shape[0] == 1543, f'{box_act.shape[0]} vs 1543'
box_act2 = BoxGymActSpace(self.env.action_space)
assert box_act2.shape[0] == 69, f'{box_act2.shape[0]} vs 69'
box_obs = BoxGymObsSpace(self.env.observation_space)
assert box_obs.shape[0] == 5125, f'{box_obs.shape[0]} vs 5125'
disc_act = DiscreteActSpace(self.env.action_space)
assert disc_act.n == 147878, f'{disc_act.n} vs 147878'
multidisc_0 = MultiDiscreteActSpace(self.env.action_space)
assert multidisc_0.shape[0] == 1543, f'{multidisc_0.shape[0]} vs 1543'
multidisc_1 = MultiDiscreteActSpace(self.env.action_space, attr_to_keep=["raise_alert"])
assert multidisc_1.shape[0] == 22, f'{multidisc_1.shape[0]} vs 22'
multidisc_2 = MultiDiscreteActSpace(self.env.action_space, attr_to_keep=["sub_set_bus"])
assert multidisc_2.shape[0] == 118, f'{multidisc_2.shape[0]} vs 118'
assert np.array_equal(multidisc_2.nvec, [ 4, 4, 8, 10, 17, 4, 4, 14, 3,
1, 58, 254, 4, 4, 242, 4, 64, 6,
30, 4, 4, 4, 30, 8, 8, 4, 58,
4, 4, 9, 8, 32, 4, 30, 4, 4,
33, 5, 4, 30, 4, 114, 4, 4, 14,
14, 8, 4, 65506, 4, 8, 4, 4, 126,
14, 498, 4, 4, 506, 14, 16, 58, 3,
5, 16, 62, 4, 9, 64, 122, 5, 4,
1, 4, 32, 6, 1010, 4, 4, 1018, 3,
8, 14, 4, 62, 4, 1, 4, 64, 26,
4, 254, 4, 32, 4, 62, 4, 4, 4,
2034, 4, 4, 16, 14, 62, 8, 6, 4,
4, 16, 1, 1, 10, 4, 4, 1, 1,
4])
def test_forecast_env(self):
obs = self.env.reset()
for_env = obs.get_forecast_env()
assert for_env.max_episode_duration() == 13 # 12 + 1
def test_correct_action_observation(self):
"""test the observation and action class"""
obs = self.env.reset()
act = self.env.action_space()
assert isinstance(obs, ObservationIDF2023)
assert isinstance(act, ActionIDF2023)
assert obs.dim_alerts == 22
assert act.dim_alerts == 22
assert np.all(act.alertable_line_ids == [106, 93, 88, 162, 68, 117, 180, 160, 136, 141, 131, 121, 125,
126, 110, 154, 81, 43, 33, 37, 62, 61])
def test_maintenance_attack(self):
# test the attacks
assert isinstance(self.env._oppSpace.opponent, GeometricOpponentMultiArea)
opp = self.env._oppSpace.opponent
assert len(opp.list_opponents) == 3
line_attacked = []
for sub_opp in opp.list_opponents:
line_attacked += sub_opp._lines_ids
assert np.all(line_attacked == [106, 93, 88, 162, 68, 117, 180, 160, 136, 141, 131, 121, 125,
126, 110, 154, 81, 43, 33, 37, 62, 61])
# test the maintenance
time_series = self.env.chronics_handler.real_data.data
time_series.line_to_maintenance
assert time_series.line_to_maintenance == {'21_22_93', '93_95_43', '80_79_175', '88_91_33', '41_48_131', '62_58_180',
'26_31_106', '62_63_160', '44_45_126', '48_53_141', '34_35_110',
'74_117_81', '12_14_68', '39_41_121', '54_58_154', '17_18_88',
'91_92_37', '4_10_162', '43_44_125', '48_50_136', '29_37_117'}
def test_was_alert_used_after_attack(self):
self.env.seed(0)
obs = self.env.reset()
for i in range(13):
obs, reward, done, info = self.env.step(self.env.action_space())
act = self.env.action_space()
obs, reward, done, info = self.env.step(act) # an attack at this step
assert info["opponent_attack_line"] is not None
# count 12 steps
for i in range(12):
obs, reward, done, info = self.env.step(self.env.action_space())
assert obs.was_alert_used_after_attack[0] == 1
def test_alertreward_counted_only_once_per_attack(self):
self.env.seed(0)
obs = self.env.reset()
for i in range(13):
obs, reward, done, info = self.env.step(self.env.action_space())
act = self.env.action_space()
obs, reward, done, info = self.env.step(act) # an attack at this step
assert info["opponent_attack_line"] is not None
for i in range(11):
obs, reward, done, info = self.env.step(self.env.action_space())
assert info["rewards"]["alert"] == 0, f"error for step {i}"
assert obs.was_alert_used_after_attack[0] == 0
obs, reward, done, info = self.env.step(self.env.action_space()) # end of the time window
assert obs.was_alert_used_after_attack[0] == 1
assert info["rewards"]["alert"] != 0
for i in range(15):
obs, reward, done, info = self.env.step(self.env.action_space())
assert info["rewards"]["alert"] == 0, f"error for step {i}"
assert obs.was_alert_used_after_attack[0] == 0, f"error for step {i}"
def do_not_run_oom_error_test_act_space_alert(self):
# this crashed
all_act = self.env.action_space.get_all_unitary_alert(self.env.action_space)
# bug is fixed but OOM error !
if __name__ == '__main__':
unittest.main()
| 12,053 | 48.809917 | 125 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_legacygym_compat.py | # Copyright (c) 2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from _aux_test_gym_compat import (GYM_AVAILABLE,
_AuxTestGymCompatModule,
_AuxTestBoxGymObsSpace,
_AuxTestBoxGymActSpace,
_AuxTestMultiDiscreteGymActSpace,
_AuxTestDiscreteGymActSpace,
_AuxTestAllGymActSpaceWithAlarm,
_AuxTestGOObsInRange)
import unittest
class AuxilliaryForTestLegacyGym:
def _aux_GymEnv_cls(self):
from grid2op.gym_compat import GymEnv_Modern
return GymEnv_Modern
def _aux_ContinuousToDiscreteConverter_cls(self):
from grid2op.gym_compat import ContinuousToDiscreteConverterLegacyGym
return ContinuousToDiscreteConverterLegacyGym
def _aux_ScalerAttrConverter_cls(self):
from grid2op.gym_compat import ScalerAttrConverterLegacyGym
return ScalerAttrConverterLegacyGym
def _aux_MultiToTupleConverter_cls(self):
from grid2op.gym_compat import MultiToTupleConverterLegacyGym
return MultiToTupleConverterLegacyGym
def _aux_BoxGymObsSpace_cls(self):
from grid2op.gym_compat import BoxLegacyGymObsSpace
return BoxLegacyGymObsSpace
def _aux_BoxGymActSpace_cls(self):
from grid2op.gym_compat import BoxLegacyGymActSpace
return BoxLegacyGymActSpace
def _aux_MultiDiscreteActSpace_cls(self):
from grid2op.gym_compat import MultiDiscreteActSpaceLegacyGym
return MultiDiscreteActSpaceLegacyGym
def _aux_DiscreteActSpace_cls(self):
from grid2op.gym_compat import DiscreteActSpaceLegacyGym
return DiscreteActSpaceLegacyGym
def _aux_Box_cls(self):
if GYM_AVAILABLE:
from gym.spaces import Box
return Box
def _aux_MultiDiscrete_cls(self):
if GYM_AVAILABLE:
from gym.spaces import MultiDiscrete
return MultiDiscrete
def _aux_Discrete_cls(self):
if GYM_AVAILABLE:
from gym.spaces import Discrete
return Discrete
def _aux_Tuple_cls(self):
if GYM_AVAILABLE:
from gym.spaces import Tuple
return Tuple
def _aux_Dict_cls(self):
if GYM_AVAILABLE:
from gym.spaces import Dict
return Dict
def _skip_if_no_gym(self):
if not GYM_AVAILABLE:
self.skipTest("Gym is not available")
class TestLegacyGymCompatModule(_AuxTestGymCompatModule, AuxilliaryForTestLegacyGym, unittest.TestCase):
pass
class TestBoxLegacyGymObsSpace(_AuxTestBoxGymObsSpace, AuxilliaryForTestLegacyGym, unittest.TestCase):
pass
class TestBoxLegacyGymActSpace(_AuxTestBoxGymActSpace, AuxilliaryForTestLegacyGym, unittest.TestCase):
pass
class TestMultiDiscreteLegacyGymActSpace(_AuxTestMultiDiscreteGymActSpace, AuxilliaryForTestLegacyGym, unittest.TestCase):
pass
class TestDiscreteLegacyGymActSpace(_AuxTestDiscreteGymActSpace, AuxilliaryForTestLegacyGym, unittest.TestCase):
pass
class TestAllLegacyGymActSpaceWithAlarm(_AuxTestAllGymActSpaceWithAlarm, AuxilliaryForTestLegacyGym, unittest.TestCase):
pass
class TestGOObsInRangeLegacyGym(_AuxTestGOObsInRange, AuxilliaryForTestLegacyGym, unittest.TestCase):
pass
if __name__ == "__main__":
unittest.main()
| 3,912 | 35.570093 | 122 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_limit_curtail.py | # Copyright (c) 2019-2022, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import warnings
import grid2op
import numpy as np
from lightsim2grid import LightSimBackend
import pdb
import unittest
class TestLimitAction(unittest.TestCase):
def _aux_reset_env(self):
self.env.seed(self.seed_)
self.env.set_id(self.scen_nm)
return self.env.reset()
def setUp(self) -> None:
self.seed_ = 0
self.scen_nm = "2050-02-14_0"
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make("l2rpn_wcci_2022_dev",
test=True,
backend=LightSimBackend())
self.act = self.env.action_space()
tmp_ = np.zeros(self.env.n_gen, dtype=float) -1
tmp_[self.env.gen_renewable] = 0.
self.act.curtail = tmp_
self.act_stor = self.env.action_space()
self.act_stor.storage_p = self.act_stor.storage_max_p_absorb
tmp_ = np.zeros(self.env.n_gen, dtype=float) -1
tmp_[self.env.gen_renewable] = 0.17
self.act_stor.curtail = tmp_
def tearDown(self) -> None:
self.env.close()
def test_curtailment_limitup(self):
"""test the action is indeed "capped" when there is too much curtailment,
eg when the available generators could not increase their power too much
to compensate the fall of renewable energy.
"""
# for curtailment:
self._aux_reset_env()
obs, reward, done, info = self.env.step(self.act)
assert done
assert info["exception"]
obs = self._aux_reset_env()
act2, *_ = self.act.limit_curtail_storage(obs, margin=0., do_copy=True) # not enough "margin"
obs, reward, done, info = self.env.step(act2)
assert done
assert info["exception"]
obs = self._aux_reset_env()
act3, *_ = self.act.limit_curtail_storage(obs, margin=15., do_copy=True) # not enough "margin"
obs, reward, done, info = self.env.step(act3)
assert not done
assert not info["exception"]
def test_storage_limitup(self):
"""test when the storage consumption is too much for the generator to compensate"""
# for storage (I need to add curtailment otherwise i don't have enough "juice")
obs = self._aux_reset_env()
obs, reward, done, info = self.env.step(self.act_stor)
assert done
assert info["exception"]
obs = self._aux_reset_env()
act4, *_ = self.act_stor.limit_curtail_storage(obs, margin=20., do_copy=True) # not enough "margin"
obs, reward, done, info = self.env.step(act4)
assert done
assert info["exception"]
obs = self._aux_reset_env()
act5, *_ = self.act_stor.limit_curtail_storage(obs, margin=25., do_copy=True)
obs, reward, done, info = self.env.step(act5)
assert not done
assert not info["exception"]
def aux_test_margin_increase_cut(self, action):
obs = self._aux_reset_env()
act5, add_curtailed_5, add_storage_5 = action.limit_curtail_storage(obs, margin=5., do_copy=True)
act10, add_curtailed_10, add_storage_10 = action.limit_curtail_storage(obs, margin=10., do_copy=True)
act15, add_curtailed_15, add_storage_15 = action.limit_curtail_storage(obs, margin=15., do_copy=True)
act20, add_curtailed_20, add_storage_20 = action.limit_curtail_storage(obs, margin=20., do_copy=True)
act25, add_curtailed_25, add_storage_25 = action.limit_curtail_storage(obs, margin=25., do_copy=True)
act30, add_curtailed_30, add_storage_30 = action.limit_curtail_storage(obs, margin=30., do_copy=True)
assert np.all(add_curtailed_30 >= add_curtailed_25)
assert np.any(add_curtailed_30 > add_curtailed_25)
assert np.all(add_curtailed_25 >= add_curtailed_20)
assert np.any(add_curtailed_25 > add_curtailed_20)
assert np.all(add_curtailed_20 >= add_curtailed_15)
assert np.any(add_curtailed_20 > add_curtailed_15)
assert np.all(add_curtailed_15 >= add_curtailed_10)
assert np.any(add_curtailed_15 > add_curtailed_10)
assert np.all(add_curtailed_10 >= add_curtailed_5)
assert np.any(add_curtailed_10 > add_curtailed_5)
if np.any(action._storage_power != 0.):
assert np.all(-add_storage_30 >= -add_storage_25)
assert np.any(-add_storage_30 > -add_storage_25)
assert np.all(-add_storage_25 >= -add_storage_20)
assert np.any(-add_storage_25 > -add_storage_20)
assert np.all(-add_storage_20 >= -add_storage_15)
assert np.any(-add_storage_20 > -add_storage_15)
assert np.all(-add_storage_15 >= -add_storage_10)
assert np.any(-add_storage_15 > -add_storage_10)
assert np.all(-add_storage_10 >= -add_storage_5)
assert np.any(-add_storage_10 > -add_storage_5)
def test_margin_increase_cut(self):
"""test that if I increase the "margin=..." it does increase the amount of MW removed"""
self.aux_test_margin_increase_cut(self.act)
self.aux_test_margin_increase_cut(self.act_stor)
def _aux_prep_env_for_tests_down(self):
act0 = self.env.action_space()
tmp_ = np.zeros(self.env.n_gen, dtype=float) -1
tmp_[self.env.gen_renewable] = 0.15
act0.curtail = tmp_
act1 = self.env.action_space()
tmp_ = np.zeros(self.env.n_gen, dtype=float) -1
tmp_[self.env.gen_renewable] = 0.09
act1.curtail = tmp_
act2 = self.env.action_space()
tmp_ = np.zeros(self.env.n_gen, dtype=float) -1
tmp_[self.env.gen_renewable] = 0.04
act2.curtail = tmp_
self._aux_reset_env()
obs, reward, done, info = self.env.step(act0)
assert not done
assert not info["exception"]
obs, reward, done, info = self.env.step(act1)
assert not done
assert not info["exception"]
obs, reward, done, info = self.env.step(act2)
assert not done
assert not info["exception"]
return obs
def test_curtailment_limitdown(self):
"""test the action is indeed "capped" when there is not enough curtailment,
eg when the available generators could not decrease their power too much
to compensate the increase of renewable energy.
"""
act_too_much = self.env.action_space()
tmp_ = np.zeros(self.env.n_gen, dtype=float) -1
tmp_[self.env.gen_renewable] = 1.
act_too_much.curtail = tmp_
# for curtailment:
self._aux_prep_env_for_tests_down()
obs, reward, done, info0 = self.env.step(act_too_much) # If i do this it crashes
assert done
assert info0["exception"]
obs = self._aux_prep_env_for_tests_down()
act5, *_ = act_too_much.limit_curtail_storage(obs, margin=15., do_copy=True) # not enough "margin" => it crashes
obs, reward, done, info = self.env.step(act5)
assert done
assert info["exception"]
obs = self._aux_prep_env_for_tests_down()
act6, *_ = act_too_much.limit_curtail_storage(obs, margin=20., do_copy=True) # "just enough" "margin" => it passes
obs, reward, done, info = self.env.step(act6)
assert not done
assert not info["exception"]
def test_storage_limitdown(self):
"""test the action is indeed "capped" when there is not enough storage
eg when the available generators could not decrease their power too much
to compensate the increase of storage power (and curtailment because storage unit is too weak on its own).
"""
act_too_much = self.env.action_space()
tmp_ = np.zeros(self.env.n_gen, dtype=float) -1
tmp_[self.env.gen_renewable] = 0.06
act_too_much.storage_p = - act_too_much.storage_max_p_prod
act_too_much.curtail = tmp_
# for storage:
self._aux_prep_env_for_tests_down()
obs, reward, done, info0 = self.env.step(act_too_much) # If i do this it crashes
assert done
assert info0["exception"]
obs = self._aux_prep_env_for_tests_down()
act7, *_ = act_too_much.limit_curtail_storage(obs, margin=5., do_copy=True) # not enough "margin" => it crashes
obs, reward, done, info = self.env.step(act7)
assert done
assert info["exception"]
obs = self._aux_prep_env_for_tests_down()
act8, *_ = act_too_much.limit_curtail_storage(obs, margin=10., do_copy=True) # "just enough" "margin" => it passes
obs, reward, done, info = self.env.step(act8)
assert not done
assert not info["exception"]
if __name__ == "__main__":
unittest.main()
| 9,379 | 42.425926 | 123 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_multi_steps_env.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import grid2op
import unittest
import warnings
import numpy as np
import copy
import pdb
import os
from grid2op.tests.helper_path_test import *
import grid2op
import numpy as np
class ForecastEnvTester(unittest.TestCase):
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
# this needs to be tested with pandapower backend
self.env = grid2op.make(os.path.join(PATH_DATA_TEST, "5bus_example_forecasts"), test=True)
self.env.seed(0)
self.env.set_id(0)
self.dn = self.env.action_space()
def _check_ok(self, f_obs, obs, h):
sim_obs, *_ = obs.simulate(self.dn, h)
self._check_obs(sim_obs, f_obs, h)
def _check_obs(self, sim_obs, f_obs, h):
assert np.all(sim_obs.load_p == f_obs.load_p), f"error for h={h}"
assert np.all(sim_obs.load_q == f_obs.load_q), f"error for h={h}"
assert np.all(sim_obs.gen_p == f_obs.gen_p), f"error for h={h}"
assert np.all(sim_obs.gen_q == f_obs.gen_q), f"error for h={h}"
assert np.all(sim_obs.rho == f_obs.rho), f"error for h={h}"
def test_when_do_nothing(self):
obs = self.env.reset()
forecast_env = obs.get_forecast_env()
f_obs = forecast_env.reset()
self._check_ok(f_obs, obs, 0)
for h in range(12):
f_obs, *_ = forecast_env.step(self.dn)
self._check_ok(f_obs, obs, h + 1)
def test_soft_overflow(self):
# the forecasted env should start with the same "values" for cooldowns, soft overflows etc.
# get ready for the soft overflow
a_or_first = np.array([442.308, 198.55365, 116.50534, 93.63006,
442.2703 , 110.96754, 110.96754, 92.05039])
th_lim = a_or_first * 2.
th_lim[5] /= 2.5
self.env.set_thermal_limit(th_lim)
param = self.env.parameters
param.NO_OVERFLOW_DISCONNECTION = False
param.NB_TIMESTEP_RECONNECTION = 4
self.env.change_parameters(param)
self.env.change_forecast_parameters(param)
obs = self.env.reset()
obs, *_ = self.env.step(self.dn)
assert obs.timestep_overflow[5] == 1
forecast_env = obs.get_forecast_env()
f_obs = forecast_env.reset()
assert f_obs.timestep_overflow[5] == 1
f_obs2, *_ = forecast_env.step(self.dn)
assert f_obs2.timestep_overflow[5] == 2
f_obs3, *_ = forecast_env.step(self.dn)
assert f_obs3.timestep_overflow[5] == 0
assert not f_obs3.line_status[5]
assert f_obs3.time_before_cooldown_line[5] == 4
f_obs4, *_ = forecast_env.step(self.dn)
assert not f_obs4.line_status[5]
assert f_obs4.time_before_cooldown_line[5] == 3
def test_cooldown(self):
act = self.env.action_space({"set_line_status": [(5, -1)]})
un_act = self.env.action_space({"set_line_status": [(5, +1)]})
obs = self.env.reset()
obs, *_ = self.env.step(act)
assert obs.time_before_cooldown_line[5] == 3
forecast_env = obs.get_forecast_env()
f_obs = forecast_env.reset()
assert f_obs.time_before_cooldown_line[5] == 3
f_obs2, *_ = forecast_env.step(self.dn)
assert f_obs2.time_before_cooldown_line[5] == 2
f_obs3, *_ = forecast_env.step(self.dn)
assert f_obs3.time_before_cooldown_line[5] == 1
f_obs4, r, done, info = forecast_env.step(un_act)
assert f_obs4.time_before_cooldown_line[5] == 0
assert info["is_illegal"] # because cooldown is 1 when i took the action
# now I can reco
f_obs5, r, done, info = forecast_env.step(un_act)
assert f_obs5.time_before_cooldown_line[5] == 3
assert not info["is_illegal"] # because cooldown is 1 when i took the action
assert f_obs5.line_status[5]
def test_maintenance(self):
reco = self.env.action_space({"set_line_status": [(5, +1)]})
obs = self.env.reset() # no maintenance
obs = self.env.reset() # maintenance
assert obs.time_next_maintenance[5] == 6
assert obs.duration_next_maintenance[5] == 4
forecast_env = obs.get_forecast_env()
f_obs = forecast_env.reset()
assert f_obs.time_next_maintenance[5] == 6
assert f_obs.duration_next_maintenance[5] == 4
f_obs1, *_ = forecast_env.step(self.dn)
f_obs2, *_ = forecast_env.step(self.dn)
f_obs3, *_ = forecast_env.step(self.dn)
f_obs4, *_ = forecast_env.step(self.dn)
f_obs5, *_ = forecast_env.step(self.dn)
f_obs6, *_ = forecast_env.step(self.dn)
assert f_obs6.time_next_maintenance[5] == 0
assert f_obs6.duration_next_maintenance[5] == 4
assert f_obs6.time_before_cooldown_line[5] == 4
assert not f_obs6.line_status[5]
f_obs7, *_ = forecast_env.step(self.dn)
assert f_obs7.time_next_maintenance[5] == 0
assert f_obs7.duration_next_maintenance[5] == 3
assert f_obs7.time_before_cooldown_line[5] == 3
assert not f_obs7.line_status[5]
f_obs8, *_ = forecast_env.step(self.dn)
f_obs9, *_ = forecast_env.step(self.dn)
# I cannot reco yet
f_obs10, r, d, info = forecast_env.step(reco)
assert f_obs10.time_next_maintenance[5] == -1
assert f_obs10.duration_next_maintenance[5] == 0
assert f_obs10.time_before_cooldown_line[5] == 0
assert not f_obs10.line_status[5]
assert info["is_illegal"]
f_obs11, r, d, info = forecast_env.step(reco)
assert f_obs11.time_next_maintenance[5] == -1
assert f_obs11.duration_next_maintenance[5] == 0
assert f_obs11.time_before_cooldown_line[5] == 3 # because i could act
assert f_obs11.line_status[5]
assert not info["is_illegal"]
def test_with_actions(self):
disco = self.env.action_space({"set_line_status": [(5, -1)]})
reco = self.env.action_space({"set_line_status": [(5, 1)]})
change_bus = self.env.action_space({"set_bus": {"substations_id": [(0, [1, 2, 1, 2, 1, 2])]}})
obs = self.env.reset() # no maintenance
forecast_env = obs.get_forecast_env()
f_obs1, *_ = forecast_env.step(disco)
sim_obs1, *_ = obs.simulate(disco)
self._check_obs(f_obs1, sim_obs1, 1)
assert np.all(f_obs1.time_before_cooldown_line == sim_obs1.time_before_cooldown_line)
f_obs2, *_ = forecast_env.step(self.dn)
sim_obs2, *_ = sim_obs1.simulate(self.dn)
self._check_obs(f_obs2, sim_obs2, 2)
f_obs3, f_r, f_d, f_info = forecast_env.step(change_bus)
sim_obs3, s_r, s_d, s_info = sim_obs2.simulate(change_bus)
assert not f_d
assert not s_d
self._check_obs(f_obs3, sim_obs3, 3)
assert np.all(f_obs3.time_before_cooldown_line == sim_obs3.time_before_cooldown_line)
assert np.all(f_obs3.time_before_cooldown_sub == sim_obs3.time_before_cooldown_sub)
f_obs4, *_ = forecast_env.step(self.dn)
sim_obs4, *_ = sim_obs3.simulate(self.dn)
self._check_obs(f_obs4, sim_obs4, 4)
assert np.all(f_obs4.time_before_cooldown_line == sim_obs4.time_before_cooldown_line)
assert np.all(f_obs4.time_before_cooldown_sub == sim_obs4.time_before_cooldown_sub)
f_obs5, *_ = forecast_env.step(reco)
sim_obs5, *_ = sim_obs4.simulate(reco)
self._check_obs(f_obs5, sim_obs5, 1)
assert np.all(f_obs5.time_before_cooldown_line == sim_obs5.time_before_cooldown_line)
assert np.all(f_obs5.time_before_cooldown_sub == sim_obs5.time_before_cooldown_sub)
def _aux_equal_tuple(self, ref, other):
assert len(ref) == len(other)
for i, (el_ref, el_other) in enumerate(zip(ref, other)):
assert np.all(el_ref == el_other), f"error for arrays {i}"
def test_simulate_does_not_impact_reality(self):
disco = self.env.action_space({"set_line_status": [(5, -1)]})
obs = self.env.reset() # no maintenance
forecast_env = obs.get_forecast_env()
forecast_env_cpy = forecast_env.copy()
forecast_env2 = obs.get_forecast_env()
loads_init = copy.deepcopy(self.env.backend.loads_info())
gens_init = copy.deepcopy(self.env.backend.generators_info())
lines_or_init = copy.deepcopy(self.env.backend.lines_or_info())
lines_ex_init = copy.deepcopy(self.env.backend.lines_ex_info())
# backend is properly copied
assert self.env.backend is not forecast_env.backend
assert self.env.backend is not forecast_env2.backend
assert forecast_env.backend is not forecast_env2.backend
assert forecast_env.backend is not forecast_env_cpy.backend
# I do an action in one of the simulate, it has only an impact there
f_obs, *_ = forecast_env.step(self.dn)
self._aux_equal_tuple(loads_init, self.env.backend.loads_info())
self._aux_equal_tuple(gens_init, self.env.backend.generators_info())
self._aux_equal_tuple(lines_or_init, self.env.backend.lines_or_info())
self._aux_equal_tuple(lines_ex_init, self.env.backend.lines_ex_info())
assert np.all(f_obs.load_p != forecast_env_cpy.get_obs().load_p)
assert np.all(f_obs.load_p != forecast_env2.get_obs().load_p)
# now try to do a real action
f_obs2, *_ = forecast_env.step(disco)
self._aux_equal_tuple(loads_init, self.env.backend.loads_info())
self._aux_equal_tuple(gens_init, self.env.backend.generators_info())
self._aux_equal_tuple(lines_or_init, self.env.backend.lines_or_info())
self._aux_equal_tuple(lines_ex_init, self.env.backend.lines_ex_info())
assert np.all(f_obs2.load_p != forecast_env_cpy.get_obs().load_p)
assert np.all(f_obs2.load_p != forecast_env2.get_obs().load_p)
# now try to do a real action
f2_obs1, *_ = forecast_env2.step(disco)
self._aux_equal_tuple(loads_init, self.env.backend.loads_info())
self._aux_equal_tuple(gens_init, self.env.backend.generators_info())
self._aux_equal_tuple(lines_or_init, self.env.backend.lines_or_info())
self._aux_equal_tuple(lines_ex_init, self.env.backend.lines_ex_info())
assert np.all(f2_obs1.load_p != forecast_env_cpy.get_obs().load_p)
assert np.all(f2_obs1.load_p != forecast_env.get_obs().load_p)
def test_right_parameters(self):
# the forecasted env should start with the same "values" for cooldowns, soft overflows etc.
assert self.env.parameters.NB_TIMESTEP_RECONNECTION == 10
param = self.env.parameters
param.NO_OVERFLOW_DISCONNECTION = False
param.NB_TIMESTEP_RECONNECTION = 4
self.env.change_parameters(param)
obs = self.env.reset()
assert self.env.parameters.NB_TIMESTEP_RECONNECTION == 4
forecast_env = obs.get_forecast_env()
f_obs = forecast_env.reset()
assert forecast_env.parameters.NB_TIMESTEP_RECONNECTION == 10
self.env.change_forecast_parameters(param)
obs2 = self.env.reset()
forecast_env2 = obs2.get_forecast_env()
assert forecast_env2.parameters.NB_TIMESTEP_RECONNECTION == 4
param.NB_TIMESTEP_RECONNECTION = 17
forecast_env2.change_parameters(param)
obs3 = forecast_env2.reset()
assert forecast_env2.parameters.NB_TIMESTEP_RECONNECTION == 17
if __name__ == "__main__":
unittest.main()
| 12,288 | 43.687273 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_multi_steps_forecasts.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import grid2op
import unittest
import warnings
import numpy as np
import pdb
import os
from grid2op.tests.helper_path_test import *
from grid2op.Exceptions import NoForecastAvailable
from grid2op.Chronics import MultifolderWithCache
import grid2op
import numpy as np
class MultiStepsForcaTester(unittest.TestCase):
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make(os.path.join(PATH_DATA_TEST, "5bus_example_forecasts"), test=True)
self.env.seed(0)
self.env.set_id(0)
def aux_test_for_consistent(self, obs):
tmp_o_1, *_ = obs.simulate(self.env.action_space(),
time_step=1)
assert (obs.load_p + 1. == tmp_o_1.load_p).all() # that's how I generated the forecast for this "env"
tmp_o_2, *_ = obs.simulate(self.env.action_space(),
time_step=2)
assert (obs.load_p + 2. == tmp_o_2.load_p).all() # that's how I generated the forecast for this "env"
tmp_o_3, *_ = obs.simulate(self.env.action_space(),
time_step=3)
assert (obs.load_p + 3. == tmp_o_3.load_p).all()
tmp_o_12, *_ = obs.simulate(self.env.action_space(),
time_step=12)
assert (obs.load_p + 12. == tmp_o_12.load_p).all()
def test_can_do(self):
obs = self.env.reset()
self.aux_test_for_consistent(obs)
# should raise because there is no "13 steps ahead forecasts"
with self.assertRaises(NoForecastAvailable):
obs.simulate(self.env.action_space(),
time_step=13)
# check it's still consistent
obs, *_ = self.env.step(self.env.action_space())
self.aux_test_for_consistent(obs)
# check it's still consistent
obs, *_ = self.env.step(self.env.action_space())
self.aux_test_for_consistent(obs)
def test_chunk_size(self):
self.env.set_chunk_size(1)
obs = self.env.reset()
self.aux_test_for_consistent(obs)
# check it's still consistent
obs, *_ = self.env.step(self.env.action_space())
self.aux_test_for_consistent(obs)
# check it's still consistent
obs, *_ = self.env.step(self.env.action_space())
self.aux_test_for_consistent(obs)
# check it's still consistent
obs, *_ = self.env.step(self.env.action_space())
self.aux_test_for_consistent(obs)
def test_max_iter(self):
max_iter = 4
self.env.chronics_handler.set_max_iter(max_iter)
obs = self.env.reset()
self.aux_test_for_consistent(obs)
# check it's still consistent
obs, reward, done, info = self.env.step(self.env.action_space())
self.aux_test_for_consistent(obs)
obs, reward, done, info = self.env.step(self.env.action_space())
self.aux_test_for_consistent(obs)
obs, reward, done, info = self.env.step(self.env.action_space())
self.aux_test_for_consistent(obs)
obs, reward, done, info = self.env.step(self.env.action_space())
self.aux_test_for_consistent(obs)
assert done
def test_cache(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = grid2op.make(os.path.join(PATH_DATA_TEST, "5bus_example_forecasts"),
test=True,
chronics_class=MultifolderWithCache)
env.seed(0)
env.set_id(0)
env.chronics_handler.reset()
obs = self.env.reset()
self.aux_test_for_consistent(obs)
# check it's still consistent
obs, reward, done, info = self.env.step(self.env.action_space())
self.aux_test_for_consistent(obs)
obs, reward, done, info = self.env.step(self.env.action_space())
self.aux_test_for_consistent(obs)
obs, reward, done, info = self.env.step(self.env.action_space())
self.aux_test_for_consistent(obs)
obs, reward, done, info = self.env.step(self.env.action_space())
self.aux_test_for_consistent(obs)
def test_cooldowns(self):
obs = self.env.reset()
dn = self.env.action_space()
act = self.env.action_space({"set_bus": {"substations_id": [(2, (2, 1, 2, 1))]}})
# check it properly applies in "simulate"
tmp_o_1, *_ = obs.simulate(act, time_step=1)
assert tmp_o_1.time_before_cooldown_sub[2] == 3
tmp_o_2, *_ = obs.simulate(act, time_step=2)
assert tmp_o_2.time_before_cooldown_sub[2] == 3
tmp_o_3, *_ = obs.simulate(act, time_step=3)
assert tmp_o_3.time_before_cooldown_sub[2] == 3
tmp_o_12, *_ = obs.simulate(act, time_step=12)
assert tmp_o_12.time_before_cooldown_sub[2] == 3
# check if a cooldown exists it is properly changed in simulate
obs2, reward, done, info = self.env.step(act)
tmp_o_1_2, *_ = obs2.simulate(dn, time_step=1)
assert tmp_o_1_2.time_before_cooldown_sub[2] == 2
tmp_o_2_2, *_ = obs2.simulate(dn, time_step=2)
assert tmp_o_2_2.time_before_cooldown_sub[2] == 1
tmp_o_3_2, *_ = obs2.simulate(dn, time_step=3)
assert tmp_o_3_2.time_before_cooldown_sub[2] == 0
tmp_o_12_2, *_ = obs2.simulate(dn, time_step=12)
assert tmp_o_12_2.time_before_cooldown_sub[2] == 0
# check if a cooldown exists it is properly changed in simulate
obs3, reward, done, info = self.env.step(dn)
tmp_o_1_3, *_ = obs3.simulate(dn, time_step=1)
assert tmp_o_1_3.time_before_cooldown_sub[2] == 1
tmp_o_2_3, *_ = obs3.simulate(dn, time_step=2)
assert tmp_o_2_3.time_before_cooldown_sub[2] == 0
tmp_o_3_3, *_ = obs3.simulate(dn, time_step=3)
assert tmp_o_3_3.time_before_cooldown_sub[2] == 0
tmp_o_12_3, *_ = obs3.simulate(dn, time_step=12)
assert tmp_o_12_3.time_before_cooldown_sub[2] == 0
def test_maintenance(self):
obs = self.env.reset() # no maintenance
obs = self.env.reset() # maintenance
dn = self.env.action_space()
assert obs.time_next_maintenance[5] == 6
assert obs.duration_next_maintenance[5] == 4
# check it properly applies in "simulate"
obs_1, *_ = self.env.step(dn)
assert obs_1.time_next_maintenance[5] == 5
assert obs.time_next_maintenance[5] == 6
tmp_o_1, reward, done, info = obs.simulate(dn, time_step=1)
assert not done
assert tmp_o_1.time_next_maintenance[5] == 5
obs_2, *_ = self.env.step(dn)
assert obs_2.time_next_maintenance[5] == 4
tmp_o_2, *_ = obs.simulate(dn, time_step=2)
assert tmp_o_2.time_next_maintenance[5] == 4
obs_3, *_ = self.env.step(dn)
assert obs_3.time_next_maintenance[5] == 3
tmp_o_3, *_ = obs.simulate(dn, time_step=3)
assert tmp_o_3.time_next_maintenance[5] == 3
obs_4, *_ = self.env.step(dn)
assert obs_4.time_next_maintenance[5] == 2
tmp_o_4, *_ = obs.simulate(dn, time_step=4)
assert tmp_o_4.time_next_maintenance[5] == 2
obs_5, *_ = self.env.step(dn)
assert obs_5.time_next_maintenance[5] == 1
tmp_o_5, *_ = obs.simulate(dn, time_step=5)
assert tmp_o_5.time_next_maintenance[5] == 1
# first corner case: line should be disconnected (first step)
obs_6, *_ = self.env.step(dn)
assert obs_6.time_next_maintenance[5] == 0
assert obs_6.duration_next_maintenance[5] == 4
tmp_o_6, *_ = obs.simulate(dn, time_step=6)
assert tmp_o_6.time_next_maintenance[5] == 0
assert tmp_o_6.duration_next_maintenance[5] == 4
assert not tmp_o_6.line_status[5]
# now the "duration next maintenance" should decrease of 1
tmp_o_7, *_ = obs.simulate(dn, time_step=7)
assert tmp_o_7.time_next_maintenance[5] == 0
assert tmp_o_7.duration_next_maintenance[5] == 3
assert not tmp_o_7.line_status[5]
tmp_o_8, *_ = obs.simulate(dn, time_step=8)
assert tmp_o_8.time_next_maintenance[5] == 0
assert tmp_o_8.duration_next_maintenance[5] == 2
assert not tmp_o_8.line_status[5]
tmp_o_9, *_ = obs.simulate(dn, time_step=9)
assert tmp_o_9.time_next_maintenance[5] == 0
assert tmp_o_9.duration_next_maintenance[5] == 1
assert not tmp_o_9.line_status[5]
# second corner case: line should not be modified
# maintenance is totally 'skiped' : forecast horizon is after maintenance
# occured
tmp_o_10, *_ = obs.simulate(dn, time_step=10)
assert tmp_o_10.time_next_maintenance[5] == -1
assert tmp_o_10.duration_next_maintenance[5] == 0
assert tmp_o_10.line_status[5]
tmp_o_12, *_ = obs.simulate(dn, time_step=12)
assert tmp_o_12.time_next_maintenance[5] == -1
assert tmp_o_12.duration_next_maintenance[5] == 0
assert tmp_o_12.line_status[5]
class ChainSimulateTester(unittest.TestCase):
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make(os.path.join(PATH_DATA_TEST, "5bus_example_forecasts"), test=True)
self.env.seed(0)
self.env.set_id(0)
def aux_test_for_consistent_independant(self, obs, tmp_o, h):
assert (obs.load_p + 1. * h == tmp_o.load_p).all()
tmp_o_1, *_ = obs.simulate(self.env.action_space(),
time_step=h)
assert (obs.load_p + 1. * h == tmp_o_1.load_p).all()
def test_can_chain_independant(self):
obs = self.env.reset()
tmp_o_1, *_ = obs.simulate(self.env.action_space(),
time_step=1)
self.aux_test_for_consistent_independant(obs, tmp_o_1, 1)
tmp_o_2, *_ = tmp_o_1.simulate(self.env.action_space(),
time_step=1)
self.aux_test_for_consistent_independant(obs, tmp_o_2, 2)
_ = tmp_o_1.simulate(self.env.action_space(),
time_step=11)
with self.assertRaises(NoForecastAvailable):
# not available
tmp_o_2, *_ = tmp_o_1.simulate(self.env.action_space(),
time_step=12)
tmp_o_3, *_ = tmp_o_2.simulate(self.env.action_space(),
time_step=1)
self.aux_test_for_consistent_independant(obs, tmp_o_3, 3)
_ = tmp_o_2.simulate(self.env.action_space(),
time_step=10)
with self.assertRaises(NoForecastAvailable):
# not available
tmp_o_2, *_ = tmp_o_2.simulate(self.env.action_space(),
time_step=11)
def test_can_chain_dependant(self):
obs = self.env.reset()
dn = self.env.action_space()
# if I do nothing it's like it's independant
tmp_o_1, *_ = obs.simulate(dn, time_step=1)
self.aux_test_for_consistent_independant(obs, tmp_o_1, 1)
# check that it's not independant
act = self.env.action_space({"set_bus": {"substations_id": [(2, (2, 1, 2, 1))]}})
tmp_o_1_1, *_ = obs.simulate(act, time_step=1)
assert (tmp_o_1_1.topo_vect[[9, 11]] == [2, 2]).all()
tmp_o_2_1, *_ = tmp_o_1_1.simulate(dn, time_step=1)
assert (tmp_o_2_1.topo_vect[[9, 11]] == [2, 2]).all()
# check that the original simulate is not "broken"
tmp_o_1_base, *_ = obs.simulate(dn, time_step=1)
assert (tmp_o_1_base.topo_vect[[9, 11]] == [1, 1]).all()
# and to be sure, check that it's independant if i put the flag
# it's surprising that it works TODO !
act = self.env.action_space({"set_bus": {"substations_id": [(2, (2, 1, 2, 1))]}})
tmp_o_1_2, *_ = obs.simulate(act, time_step=1)
assert (tmp_o_1_2.topo_vect[[9, 11]] == [2, 2]).all()
# check that the original simulate is not "broken"
tmp_o_1_base, *_ = obs.simulate(dn, time_step=1)
assert (tmp_o_1_base.topo_vect[[9, 11]] == [1, 1]).all()
# check 2nd simulate is indpendant of first one
tmp_o_2_2, *_ = tmp_o_1_2.simulate(dn, time_step=1)
assert (tmp_o_2_2.topo_vect[[9, 11]] == [2, 2]).all()
# check that the original simulate is not "broken"
tmp_o_1_base, *_ = obs.simulate(dn, time_step=1)
assert (tmp_o_1_base.topo_vect[[9, 11]] == [1, 1]).all()
def test_cooldown_when_chained(self):
obs = self.env.reset()
dn = self.env.action_space()
act = self.env.action_space({"set_bus": {"substations_id": [(2, (2, 1, 2, 1))]}})
tmp_o_1_1, *_ = obs.simulate(act, time_step=1)
assert (tmp_o_1_1.topo_vect[[9, 11]] == [2, 2]).all()
assert tmp_o_1_1.time_before_cooldown_sub[2] == 3
tmp_o_2, *_ = tmp_o_1_1.simulate(dn, time_step=1)
assert tmp_o_2.time_before_cooldown_sub[2] == 2
tmp_o_2_2, reward, done, info = tmp_o_1_1.simulate(act, time_step=1)
assert info["is_illegal"]
assert tmp_o_2_2.time_before_cooldown_sub[2] == 2
tmp_o_3, r, done, info = tmp_o_2.simulate(dn, time_step=1)
assert not done
assert tmp_o_3.time_before_cooldown_sub[2] == 1
tmp_o_4, r, done, info = tmp_o_3.simulate(dn, time_step=1)
assert not done
assert tmp_o_4.time_before_cooldown_sub[2] == 0
tmp_o_4_2, r, done, info = tmp_o_3.simulate(act, time_step=1)
assert not done
assert info["is_illegal"] # because cooldown > 0 for tmp_o_3
assert tmp_o_4_2.time_before_cooldown_sub[2] == 0
tmp_o_5, reward, done, info = tmp_o_4.simulate(act, time_step=1)
assert not done
assert not info["is_illegal"]
assert tmp_o_5.time_before_cooldown_sub[2] == 3
def test_maintenance(self):
obs = self.env.reset() # no maintenance
obs = self.env.reset() # maintenance
dn = self.env.action_space()
assert obs.time_next_maintenance[5] == 6
assert obs.duration_next_maintenance[5] == 4
# check it properly applies in "simulate"
obs_1, *_ = self.env.step(dn)
assert obs_1.time_next_maintenance[5] == 5
assert obs.time_next_maintenance[5] == 6
tmp_o_1, reward, done, info = obs.simulate(dn, time_step=1)
assert not done
assert tmp_o_1.time_next_maintenance[5] == 5
obs_2, *_ = self.env.step(dn)
assert obs_2.time_next_maintenance[5] == 4
tmp_o_2, *_ = tmp_o_1.simulate(dn, time_step=1)
assert tmp_o_2.time_next_maintenance[5] == 4
obs_3, *_ = self.env.step(dn)
assert obs_3.time_next_maintenance[5] == 3
tmp_o_3, *_ = tmp_o_2.simulate(dn, time_step=1)
assert tmp_o_3.time_next_maintenance[5] == 3
obs_4, *_ = self.env.step(dn)
assert obs_4.time_next_maintenance[5] == 2
tmp_o_4, *_ = tmp_o_3.simulate(dn, time_step=1)
assert tmp_o_4.time_next_maintenance[5] == 2
obs_5, *_ = self.env.step(dn)
assert obs_5.time_next_maintenance[5] == 1
tmp_o_5, *_ = tmp_o_4.simulate(dn, time_step=1)
assert tmp_o_5.time_next_maintenance[5] == 1
# first corner case: line should be disconnected (first step)
obs_6, *_ = self.env.step(dn)
assert obs_6.time_next_maintenance[5] == 0
assert obs_6.duration_next_maintenance[5] == 4
tmp_o_6, *_ = tmp_o_5.simulate(dn, time_step=1)
assert tmp_o_6.time_next_maintenance[5] == 0
assert tmp_o_6.duration_next_maintenance[5] == 4
assert not tmp_o_6.line_status[5]
# now the "duration next maintenance" should decrease of 1
tmp_o_7, *_ = tmp_o_6.simulate(dn, time_step=1)
assert tmp_o_7.time_next_maintenance[5] == 0
assert tmp_o_7.duration_next_maintenance[5] == 3
assert not tmp_o_7.line_status[5]
tmp_o_8, *_ = tmp_o_7.simulate(dn, time_step=1)
assert tmp_o_8.time_next_maintenance[5] == 0
assert tmp_o_8.duration_next_maintenance[5] == 2
assert not tmp_o_8.line_status[5]
tmp_o_9, *_ = tmp_o_8.simulate(dn, time_step=1)
assert tmp_o_9.time_next_maintenance[5] == 0
assert tmp_o_9.duration_next_maintenance[5] == 1
assert not tmp_o_9.line_status[5]
# second corner case: line should not be modified
# maintenance is totally 'skiped' : forecast horizon is after maintenance
# occured
tmp_o_10, *_ = tmp_o_9.simulate(dn, time_step=1)
assert tmp_o_10.time_next_maintenance[5] == -1
assert tmp_o_10.duration_next_maintenance[5] == 0
# no reason to reconnect the line automatically
assert not tmp_o_10.line_status[5]
tmp_o_12, *_ = tmp_o_10.simulate(dn, time_step=2)
assert tmp_o_12.time_next_maintenance[5] == -1
assert tmp_o_12.duration_next_maintenance[5] == 0
# no reason to reconnect the line automatically
assert not tmp_o_12.line_status[5]
class SoftOverflowTester(unittest.TestCase):
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make(os.path.join(PATH_DATA_TEST, "5bus_example_forecasts"), test=True)
self.env.seed(0)
self.env.set_id(0)
a_or_first = np.array([442.308, 198.55365, 116.50534, 93.63006,
442.2703 , 110.96754, 110.96754, 92.05039])
th_lim = a_or_first * 2.
th_lim[5] /= 2.5
self.env.set_thermal_limit(th_lim)
param = self.env.parameters
param.NO_OVERFLOW_DISCONNECTION = False
param.NB_TIMESTEP_RECONNECTION = 3
self.env.change_parameters(param)
self.env.change_forecast_parameters(param)
def test_my_env_is_correct(self):
obs = self.env.reset()
assert obs.timestep_overflow[5] == 0
obs, *_ = self.env.step(self.env.action_space())
assert obs.timestep_overflow[5] == 1
assert obs.line_status[5]
obs, *_ = self.env.step(self.env.action_space())
assert obs.timestep_overflow[5] == 2
assert obs.line_status[5]
# now it should be disconnected
obs, *_ = self.env.step(self.env.action_space())
assert not obs.line_status[5]
assert obs.timestep_overflow[5] == 0
def test_simulate_multi_h(self):
reco = self.env.action_space({"set_line_status": [(5, +1)]})
obs = self.env.reset()
assert obs.timestep_overflow[5] == 0
sim_o1, *_ = obs.simulate(self.env.action_space(), 1)
assert sim_o1.line_status[5]
assert sim_o1.timestep_overflow[5] == 1
sim_o2, *_ = obs.simulate(self.env.action_space(), 2)
assert sim_o2.line_status[5]
assert sim_o2.timestep_overflow[5] == 2
# now it should be disconnected
sim_o3, *_ = obs.simulate(self.env.action_space(), 3)
assert not sim_o3.line_status[5]
assert sim_o3.timestep_overflow[5] == 0
assert sim_o3.time_before_cooldown_line[5] == 3 # because it's like that in the parameters
# still disconnected
sim_o4, *_ = obs.simulate(self.env.action_space(), 4)
assert not sim_o4.line_status[5]
assert sim_o4.timestep_overflow[5] == 0
# assert sim_o4.time_before_cooldown_line[5] == 2
# still disconnected
sim_o5, *_ = obs.simulate(self.env.action_space(), 5)
assert not sim_o5.line_status[5]
assert sim_o5.timestep_overflow[5] == 0
# assert sim_o5.time_before_cooldown_line[5] == 1
# still disconnected
sim_o6, *_ = obs.simulate(self.env.action_space(), 6)
assert not sim_o6.line_status[5]
assert sim_o6.timestep_overflow[5] == 0
# assert sim_o6.time_before_cooldown_line[5] == 0
# still disconnected
sim_o7, *_ = obs.simulate(self.env.action_space(), 7)
assert not sim_o7.line_status[5]
assert sim_o7.timestep_overflow[5] == 0
# assert sim_o7.time_before_cooldown_line[5] == 0
# still disconnected
sim_o12, *_ = obs.simulate(self.env.action_space(), 12)
assert not sim_o12.line_status[5]
# assert sim_o12.time_before_cooldown_line[5] == 0
def test_simulate_chained(self):
reco = self.env.action_space({"set_line_status": [(5, +1)]})
obs = self.env.reset()
assert obs.timestep_overflow[5] == 0
sim_o1, *_ = obs.simulate(self.env.action_space(), 1)
assert sim_o1.line_status[5]
assert sim_o1.timestep_overflow[5] == 1
sim_o2, *_ = sim_o1.simulate(self.env.action_space(), 1)
assert sim_o2.line_status[5]
assert sim_o2.timestep_overflow[5] == 2
# now it should be disconnected
sim_o3, *_ = sim_o2.simulate(self.env.action_space(), 1)
assert not sim_o3.line_status[5]
assert sim_o3.timestep_overflow[5] == 0
assert sim_o3.time_before_cooldown_line[5] == 3 # because it's like that in the parameters
# still disconnected
sim_o4, *_ = sim_o3.simulate(self.env.action_space(), 1)
assert not sim_o4.line_status[5]
assert sim_o4.timestep_overflow[5] == 0
# assert sim_o4.time_before_cooldown_line[5] == 2
# still disconnected
sim_o5, *_ = sim_o4.simulate(self.env.action_space(), 1)
assert not sim_o5.line_status[5]
assert sim_o5.timestep_overflow[5] == 0
# assert sim_o5.time_before_cooldown_line[5] == 1
# still disconnected
sim_o6, *_ = sim_o5.simulate(self.env.action_space(), 1)
assert not sim_o6.line_status[5]
assert sim_o6.timestep_overflow[5] == 0
# assert sim_o6.time_before_cooldown_line[5] == 0
# still disconnected, but I can reconnect it if I want
sim_o7, *_ = sim_o6.simulate(self.env.action_space(), 1)
assert not sim_o7.line_status[5]
assert sim_o7.timestep_overflow[5] == 0
# assert sim_o7.time_before_cooldown_line[5] == 0
if __name__ == "__main__":
unittest.main()
| 23,664 | 42.027273 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_nb_simulate_called.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import grid2op
import unittest
import warnings
import copy
from grid2op.Parameters import Parameters
from grid2op.Exceptions import (
SimulateUsedTooMuchThisStep,
SimulateUsedTooMuchThisEpisode,
)
class TestSimulateCount(unittest.TestCase):
"""
This class tests the possibility in grid2op to limit the number of call to "obs.simulate"
"""
def _aux_make_env(self, param=None):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
if param is not None:
env = grid2op.make("l2rpn_case14_sandbox", test=True, param=param)
else:
env = grid2op.make("l2rpn_case14_sandbox", test=True)
return env
def test_simple_cases(self):
env = self._aux_make_env()
obs = env.reset()
# basic test
obs.simulate(env.action_space())
assert env.observation_space.nb_simulate_called_this_step == 1
obs.simulate(env.action_space())
obs.simulate(env.action_space())
assert env.observation_space.nb_simulate_called_this_step == 3
obs = env.reset()
assert env.observation_space.nb_simulate_called_this_step == 0
def test_with_copies(self):
env = self._aux_make_env()
# test with copies
env_cpy = env.copy()
obs_cpy = env_cpy.reset()
assert env_cpy.observation_space.nb_simulate_called_this_step == 0
obs = env.reset()
obs.simulate(env.action_space())
assert env.observation_space.nb_simulate_called_this_step == 1
assert env_cpy.observation_space.nb_simulate_called_this_step == 0
obs_cpy.simulate(env.action_space())
assert env.observation_space.nb_simulate_called_this_step == 1
assert env_cpy.observation_space.nb_simulate_called_this_step == 1
obs_cpy.simulate(env.action_space())
assert env.observation_space.nb_simulate_called_this_step == 1
assert env_cpy.observation_space.nb_simulate_called_this_step == 2
obs_cpy = env_cpy.reset()
assert env.observation_space.nb_simulate_called_this_step == 1
assert env_cpy.observation_space.nb_simulate_called_this_step == 0
def test_max_step(self):
MAX_SIMULATE_PER_STEP = 10
param = Parameters()
param.MAX_SIMULATE_PER_STEP = MAX_SIMULATE_PER_STEP
env = self._aux_make_env(param)
obs = env.reset()
for i in range(MAX_SIMULATE_PER_STEP):
obs.simulate(env.action_space())
with self.assertRaises(SimulateUsedTooMuchThisStep):
obs.simulate(env.action_space()) # raises a SimulateUsedTooMuchThisStep
# should be OK now
obs, *_ = env.step(env.action_space())
obs.simulate(env.action_space())
def test_max_episode(self):
MAX_SIMULATE_PER_EPISODE = 10
param = Parameters()
param.MAX_SIMULATE_PER_EPISODE = MAX_SIMULATE_PER_EPISODE
env = self._aux_make_env(param)
obs = env.reset()
for i in range(MAX_SIMULATE_PER_EPISODE):
obs.simulate(env.action_space())
obs, *_ = env.step(env.action_space())
with self.assertRaises(SimulateUsedTooMuchThisEpisode):
obs.simulate(env.action_space()) # raises a SimulateUsedTooMuchThisEpisode
obs = env.reset()
for i in range(MAX_SIMULATE_PER_EPISODE):
obs.simulate(env.action_space()) # should work now (reset called)
obs, *_ = env.step(env.action_space())
with self.assertRaises(SimulateUsedTooMuchThisEpisode):
obs.simulate(env.action_space()) # raises a SimulateUsedTooMuchThisEpisode
obs = env.reset()
obs.simulate(env.action_space())
def test_max_step_with_copy(self):
MAX_SIMULATE_PER_STEP = 10
MAX_SIMULATE_PER_STEP_CPY = 5
param = Parameters()
param.MAX_SIMULATE_PER_STEP = MAX_SIMULATE_PER_STEP
env = self._aux_make_env(param)
param = copy.deepcopy(param)
param.MAX_SIMULATE_PER_STEP = MAX_SIMULATE_PER_STEP_CPY
env_cpy = env.copy()
env_cpy.change_parameters(param)
obs = env.reset()
obs_cpy = env_cpy.reset()
for i in range(MAX_SIMULATE_PER_STEP):
obs.simulate(env.action_space())
with self.assertRaises(SimulateUsedTooMuchThisStep):
obs.simulate(env.action_space()) # raises a SimulateUsedTooMuchThisStep
for i in range(MAX_SIMULATE_PER_STEP_CPY):
obs_cpy.simulate(env.action_space()) # should work
with self.assertRaises(SimulateUsedTooMuchThisStep):
obs_cpy.simulate(env.action_space()) # raises a SimulateUsedTooMuchThisStep
# should be OK now
obs, *_ = env.step(env.action_space())
obs.simulate(env.action_space()) # I can simulate on the original env correctly
with self.assertRaises(SimulateUsedTooMuchThisStep):
obs_cpy.simulate(env.action_space()) # raises a SimulateUsedTooMuchThisStep
def test_max_episode_with_copy(self):
MAX_SIMULATE_PER_EPISODE = 10
MAX_SIMULATE_PER_EPISODE_CPY = 10
param = Parameters()
param.MAX_SIMULATE_PER_EPISODE = MAX_SIMULATE_PER_EPISODE
env = self._aux_make_env(param)
param = copy.deepcopy(param)
param.MAX_SIMULATE_PER_EPISODE = MAX_SIMULATE_PER_EPISODE_CPY
env_cpy = env.copy()
env_cpy.change_parameters(param)
obs = env.reset()
obs_cpy = env_cpy.reset()
for i in range(MAX_SIMULATE_PER_EPISODE):
obs.simulate(env.action_space())
obs, *_ = env.step(env.action_space())
with self.assertRaises(SimulateUsedTooMuchThisEpisode):
obs.simulate(env.action_space()) # raises a SimulateUsedTooMuchThisEpisode
for i in range(MAX_SIMULATE_PER_EPISODE_CPY):
obs_cpy.simulate(env.action_space()) # should not raise
obs_cpy, *_ = env_cpy.step(env.action_space())
with self.assertRaises(SimulateUsedTooMuchThisEpisode):
obs_cpy.simulate(
env.action_space()
) # raises a SimulateUsedTooMuchThisEpisode
obs = env.reset()
for i in range(MAX_SIMULATE_PER_EPISODE):
obs.simulate(env.action_space()) # should work now (reset called)
with self.assertRaises(SimulateUsedTooMuchThisEpisode):
obs_cpy.simulate(
env.action_space()
) # raises a SimulateUsedTooMuchThisEpisode (copy not reset)
def test_no_limit(self):
MAX_SIMULATE_PER_EPISODE = 7
env = self._aux_make_env()
obs = env.reset()
for _ in range(MAX_SIMULATE_PER_EPISODE + 1):
obs.simulate(env.action_space())
# change parameters and see if the limit works
param = Parameters()
param.MAX_SIMULATE_PER_EPISODE = MAX_SIMULATE_PER_EPISODE
env.change_parameters(param)
obs = env.reset()
for _ in range(MAX_SIMULATE_PER_EPISODE):
obs.simulate(env.action_space())
with self.assertRaises(SimulateUsedTooMuchThisEpisode):
obs.simulate(
env.action_space()
) # raises a SimulateUsedTooMuchThisEpisode (copy not reset)
if __name__ == "__main__":
unittest.main()
| 7,797 | 38.18593 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_no_backend_copy.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import grid2op
import unittest
import warnings
import copy
from grid2op.Backend import PandaPowerBackend
from grid2op.Exceptions import NoForecastAvailable
from grid2op.Exceptions.EnvExceptions import EnvError
from grid2op.Exceptions.ObservationExceptions import BaseObservationError
from grid2op.Exceptions.simulatorExceptions import SimulatorError
from grid2op.simulator import Simulator
class PPNoCpy(PandaPowerBackend):
def copy(self):
raise NotImplementedError("Not used for this class")
class PPNoCpyInCtor(PandaPowerBackend):
def __init__(self,
detailed_infos_for_cascading_failures=False,
ligthsim2grid=False,
dist_slack=False,
max_iter=10):
super().__init__(detailed_infos_for_cascading_failures,
ligthsim2grid,
dist_slack,
max_iter,
can_be_copied=False)
class NoCopyTester(unittest.TestCase):
"""test grid2op works when the backend cannot be copied."""
def setUp(self) -> None:
env_name = "l2rpn_case14_sandbox"
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make(env_name, test=True, backend=PPNoCpy())
def tearDown(self) -> None:
self.env.close()
def test_env_correct_flags(self):
assert not self.env.with_forecast
assert self.env.get_obs()._obs_env is None
assert not self.env.observation_space.with_forecast
assert not self.env.backend._can_be_copied
def test_no_backend_needs_copy(self):
obs = self.env.reset()
obs, reward, done, info = self.env.step(self.env.action_space())
def test_cannot_reactivate_forecasts(self):
with self.assertRaises(EnvError):
self.env.reactivate_forecast()
def test_cannot_use_simulate(self):
obs = self.env.reset()
with self.assertRaises(NoForecastAvailable):
res = obs.simulate(self.env.action_space())
def test_simulator_from_obs(self):
obs = self.env.reset()
with self.assertRaises(BaseObservationError):
res = obs.get_simulator()
def test_cannot_use_simulator(self):
with self.assertRaises(SimulatorError):
Simulator(backend=self.env.backend)
with self.assertRaises(SimulatorError):
Simulator(backend=None, env=self.env.backend)
class NoCopy2Tester(NoCopyTester):
"""test grid2op works when the backend cannot be copied."""
def setUp(self) -> None:
env_name = "l2rpn_case14_sandbox"
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make(env_name, test=True, backend=PPNoCpyInCtor())
if __name__ == "__main__":
unittest.main()
| 3,425 | 36.23913 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_noisy_obs.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import numpy as np
import unittest
import warnings
import pdb
import grid2op
from grid2op.Runner import Runner
from grid2op.Observation import CompleteObservation
from grid2op.Observation import NoisyObservation
class TestNoisy(unittest.TestCase):
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make(
"educ_case14_storage", test=True, observation_class=NoisyObservation
)
self.env.seed(0)
self.env.set_id(0)
self.obs = self.env.reset()
def tearDown(self) -> None:
self.env.close()
def test_create_ok(self):
# simply test the creation
pass
def _obs_equals(self, obs1, obs2):
assert np.all(obs1.load_p == obs2.load_p)
assert np.all(obs1.load_q == obs2.load_q)
assert np.all(obs1.gen_p == obs2.prod_p)
assert np.all(obs1.gen_q == obs2.prod_q)
assert np.all(obs1.a_or == obs2.a_or)
assert np.all(obs1.a_ex == obs2.a_ex)
assert np.all(obs1.p_or == obs2.p_or)
assert np.all(obs1.p_ex == obs2.p_ex)
assert np.all(obs1.q_or == obs2.q_or)
assert np.all(obs1.q_ex == obs2.q_ex)
assert np.all(obs1.storage_power == obs2.storage_power)
def test_getobs_sameres(self):
# simply test the creation
obs0 = self.env.get_obs()
obs1 = self.env.get_obs()
assert np.all(obs0.load_p == obs1.load_p)
assert np.any(obs0.load_p != self.env.backend.load_p)
assert np.all(obs0.load_q == obs1.load_q)
assert np.any(obs0.load_q != self.env.backend.load_q)
assert np.all(obs0.gen_p == obs1.prod_p)
assert np.any(obs0.gen_p != self.env.backend.prod_p)
assert np.all(obs0.gen_q == obs1.prod_q)
assert np.any(obs0.gen_q != self.env.backend.prod_q)
assert np.all(obs0.a_or == obs1.a_or)
assert np.any(obs0.a_or != self.env.backend.a_or)
assert np.all(obs0.a_ex == obs1.a_ex)
assert np.any(obs0.a_ex != self.env.backend.a_ex)
assert np.all(obs0.p_ex == obs1.p_ex)
assert np.any(obs0.p_ex != self.env.backend.p_ex)
assert np.all(obs0.q_ex == obs1.q_ex)
assert np.any(obs0.q_ex != self.env.backend.q_ex)
assert np.all(obs0.p_or == obs1.p_or)
assert np.any(obs0.p_or != self.env.backend.p_or)
assert np.all(obs0.q_or == obs1.q_or)
assert np.any(obs0.q_or != self.env.backend.q_or)
assert np.all(obs0.storage_power == obs1.storage_power)
assert np.any(obs0.storage_power != self.env._storage_power)
def test_seed_works(self):
self.env.seed(0)
self.env.set_id(0)
obs = self.env.reset()
self._obs_equals(obs, self.obs)
def test_seed_independant_previous(self):
"""test that the seed of a given episode is independant on what happened in the previous"""
obs, *_ = self.env.step(self.env.action_space())
obs, *_ = self.env.step(self.env.action_space())
obs, *_ = self.env.step(self.env.action_space())
obs = self.env.reset()
self.env.seed(0)
self.env.set_id(0)
as_ref = self.env.reset() # should match self.obs
self._obs_equals(as_ref, self.obs)
# don't do anything (instead of 3 steps)
as_obs = self.env.reset()
self._obs_equals(obs, as_obs) # should match the case where I did 3 steps
def test_with_copy(self):
env_cpy = self.env.copy()
obs, *_ = self.env.step(self.env.action_space())
obs, *_ = self.env.step(self.env.action_space())
obs, *_ = self.env.step(self.env.action_space())
obs = self.env.reset()
obs_cpy = env_cpy.reset()
self._obs_equals(obs_cpy, obs)
obs, *_ = self.env.step(self.env.action_space())
obs_cpy, *_ = env_cpy.step(self.env.action_space())
self._obs_equals(obs_cpy, obs)
def test_simulate(self):
sim_o, *_ = self.obs.simulate(self.env.action_space())
assert type(sim_o).env_name == "educ_case14_storage"
assert isinstance(sim_o, CompleteObservation)
# test that it is reproducible
self.env.seed(0)
self.env.set_id(0)
as_ref = self.env.reset() # should match self.obs
sim_o2, *_ = as_ref.simulate(self.env.action_space())
self._obs_equals(sim_o2, sim_o)
# test that it is the same as non stochastic observation
# (simulate is based on forecast, not on actual environment state)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = grid2op.make(
"educ_case14_storage", test=True, observation_class=CompleteObservation
)
env.seed(0)
env.set_id(0)
obs = env.reset()
sim_o3, *_ = obs.simulate(self.env.action_space())
self._obs_equals(sim_o3, sim_o)
def test_runner(self):
runner = Runner(**self.env.get_params_for_runner())
# check it's the same when seed is the same
res = runner.run(
nb_episode=1,
max_iter=10,
episode_id=[0],
env_seeds=[0],
add_detailed_output=True,
)
res2 = runner.run(
nb_episode=1,
max_iter=10,
episode_id=[0],
env_seeds=[0],
add_detailed_output=True,
)
self._obs_equals(res[0][-1].observations[0], self.obs)
for el in range(10):
obs1 = res[0][-1].observations[el]
obs2 = res2[0][-1].observations[el]
self._obs_equals(obs1, obs2)
# check it's different when seed is different
res3 = runner.run(
nb_episode=1,
max_iter=10,
episode_id=[0],
env_seeds=[1],
add_detailed_output=True,
)
for el in range(10):
obs1 = res[0][-1].observations[el]
obs3 = res3[0][-1].observations[el]
with self.assertRaises(AssertionError):
self._obs_equals(obs1, obs3)
class TestNoisyDiffParams(TestNoisy):
def setUp(self) -> None:
kwargs_observation = {"sigma_load_p": 1.0, "sigma_gen_p": 0.1}
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make(
"educ_case14_storage",
test=True,
observation_class=NoisyObservation,
kwargs_observation=kwargs_observation,
)
self.env.seed(0)
self.env.set_id(0)
self.obs = self.env.reset()
def test_param_working(self):
# change the kwargs to make sure it has an impact
kwargs_observation = {"sigma_load_p": 0.1, "sigma_gen_p": 1.0}
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = grid2op.make(
"educ_case14_storage",
test=True,
observation_class=NoisyObservation,
kwargs_observation=kwargs_observation,
)
env.seed(0)
env.set_id(0)
obs = env.reset()
with self.assertRaises(AssertionError):
self._obs_equals(obs, self.obs)
# TODO next: have a powerflow there to compute the outcome of the state
# after the modification
if __name__ == "__main__":
unittest.main()
| 7,922 | 35.680556 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_opp_with_area.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import unittest
import numpy as np
import warnings
from grid2op.Opponent import (
GeometricOpponentMultiArea,
GeometricOpponent
)
from grid2op.Action import TopologyAction
from grid2op.MakeEnv import make
from grid2op.Opponent.BaseActionBudget import BaseActionBudget
from grid2op.dtypes import dt_int
from grid2op.Parameters import Parameters
from grid2op.Runner import Runner
import pdb
LINES_ATTACKED = ["1_3_3", "1_4_4", "3_6_15", "9_10_12", "11_12_13", "12_13_14"]
class TestMultiAreaOpponentBasic(unittest.TestCase):
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = make("l2rpn_case14_sandbox", test=True)
self.opponent = GeometricOpponentMultiArea(self.env.action_space)
self.opponent.init(self.env,
lines_attacked=[LINES_ATTACKED[:3],LINES_ATTACKED[3:]],
attack_every_xxx_hour=24,
average_attack_duration_hour=4,
minimum_attack_duration_hour=2,
pmax_pmin_ratio=4)
def tearDown(self) -> None:
self.env.close()
return super().tearDown()
def test_seed(self):
self.opponent.seed(0)
obs = self.env.reset()
initial_budget = 250
self.opponent.reset(initial_budget)
assert np.all(self.opponent.list_opponents[0]._attack_times == [160])
assert np.all(self.opponent.list_opponents[1]._attack_times == [182, 467])
class TestMultiAreaOpponent(unittest.TestCase):
def setUp(self):
# make an environment and check it works
params = Parameters()
params.NO_OVERFLOW_DISCONNECTION = True
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = make("l2rpn_case14_sandbox",
test=True,
_add_to_name="multiarea",
opponent_budget_per_ts=0.17*2, # 0.17 per area
opponent_init_budget=1000, # I don't really care much right now
opponent_attack_cooldown=0, # otherwise it will not work
opponent_attack_duration=96,
opponent_budget_class=BaseActionBudget,
opponent_class=GeometricOpponentMultiArea,
opponent_action_class=TopologyAction,
param=params,
kwargs_opponent=dict(lines_attacked=[LINES_ATTACKED[:3],LINES_ATTACKED[3:]],
attack_every_xxx_hour=24,
average_attack_duration_hour=4,
minimum_attack_duration_hour=2,
pmax_pmin_ratio=4)
)
self.env.seed(0)
self.env.reset()
def tearDown(self) -> None:
self.env.close()
return super().tearDown()
def test_when_env_copied(self):
# check it's properly propagated when copied
env_cpy = self.env.copy()
assert isinstance(env_cpy._opponent, GeometricOpponentMultiArea)
# check it's properly propagated in the kwargs
env_params = self.env.get_kwargs()
assert env_params["opponent_class"] == GeometricOpponentMultiArea
# check it's properly propagated in the runner
runner_params = self.env.get_params_for_runner()
assert runner_params["opponent_class"] == GeometricOpponentMultiArea
runner = Runner(**runner_params)
assert runner.opponent_class == GeometricOpponentMultiArea
# check the runner can make an env with the right opponent space type
env_runner = runner.init_env()
assert isinstance(env_runner._opponent, GeometricOpponentMultiArea)
def test_creation_ok(self):
assert isinstance(self.env._opponent, GeometricOpponentMultiArea)
assert isinstance(self.env._opponent.list_opponents[0], GeometricOpponent)
assert isinstance(self.env._opponent.list_opponents[1], GeometricOpponent)
assert np.all(self.env._opponent.list_opponents[0]._attack_times == [9, 370, 472])
assert np.all(self.env._opponent.list_opponents[0]._attack_durations == [28, 53, 25])
assert np.all(self.env._opponent.list_opponents[1]._attack_times == [345])
assert np.all(self.env._opponent.list_opponents[1]._attack_durations == [55])
def test_does_one_attack(self):
"""test a single opponent can attack at a given step (most basic)"""
self.env._opponent.list_opponents[0]._attack_durations[0] = 3
for ts in range(9):
obs, reward, done, info = self.env.step(self.env.action_space())
assert np.all(obs.line_status), f"error for {ts}"
obs, reward, done, info = self.env.step(self.env.action_space())
assert info["opponent_attack_line"] is not None
assert info["opponent_attack_line"][4]
assert not obs.line_status[4]
#attack continues
obs, reward, done, info = self.env.step(self.env.action_space())
assert info["opponent_attack_line"] is not None
assert info["opponent_attack_line"][4]
assert not obs.line_status[4]
#attack continues
obs, reward, done, info = self.env.step(self.env.action_space())
assert info["opponent_attack_line"] is not None
assert info["opponent_attack_line"][4]
assert not obs.line_status[4]
#attack continues
obs, reward, done, info = self.env.step(self.env.action_space())
assert info["opponent_attack_line"] is not None
assert info["opponent_attack_line"][4]
assert not obs.line_status[4]
# attack stops
obs, reward, done, info = self.env.step(self.env.action_space())
assert info["opponent_attack_line"] is None
def test_does_two_simulatneous_attacks(self):
"""test both opponent can attack at a given step"""
self.env._opponent.list_opponents[0]._attack_durations[0] = 3
self.env._opponent.list_opponents[1]._attack_times[0] = 9
self.env._opponent.list_opponents[1]._attack_waiting_times[0] = 9
self.env._opponent.list_opponents[1]._attack_durations[0] = 2
for ts in range(9):
obs, reward, done, info = self.env.step(self.env.action_space())
assert np.all(obs.line_status), f"error for {ts}"
# attack starts
obs, reward, done, info = self.env.step(self.env.action_space())
assert info["opponent_attack_line"] is not None
assert info["opponent_attack_line"].sum() == 2
assert info["opponent_attack_line"][4]
assert info["opponent_attack_line"][14]
assert not obs.line_status[4]
assert not obs.line_status[14]
# both attacks continue
obs, reward, done, info = self.env.step(self.env.action_space())
assert info["opponent_attack_line"] is not None
assert info["opponent_attack_line"].sum() == 2
assert info["opponent_attack_line"][4]
assert info["opponent_attack_line"][14]
assert not obs.line_status[4]
assert not obs.line_status[14]
# both attacks continue
obs, reward, done, info = self.env.step(self.env.action_space())
assert info["opponent_attack_line"] is not None
assert info["opponent_attack_line"].sum() == 2
assert info["opponent_attack_line"][4]
assert info["opponent_attack_line"][14]
assert not obs.line_status[4]
assert not obs.line_status[14]
# second attack stops
obs, reward, done, info = self.env.step(self.env.action_space())
assert info["opponent_attack_line"] is not None
assert info["opponent_attack_line"][4]
assert info["opponent_attack_line"].sum() == 1
assert not obs.line_status[4]
# all attack have stoped
obs, reward, done, info = self.env.step(self.env.action_space())
assert info["opponent_attack_line"] is None
def test_one_after_the_other(self):
"""test one opponent can attack after the other"""
self.env._opponent.list_opponents[0]._attack_durations[0] = 3
self.env._opponent.list_opponents[1]._attack_times[0] = 10
self.env._opponent.list_opponents[1]._attack_waiting_times[0] = 10
self.env._opponent.list_opponents[1]._attack_durations[0] = 3
for ts in range(9):
obs, reward, done, info = self.env.step(self.env.action_space())
assert np.all(obs.line_status), f"error for {ts}"
# first attack starts
obs, reward, done, info = self.env.step(self.env.action_space())
assert info["opponent_attack_line"] is not None
assert info["opponent_attack_line"].sum() == 1
assert info["opponent_attack_line"][4]
assert not obs.line_status[4]
# second attack starts
obs, reward, done, info = self.env.step(self.env.action_space())
assert info["opponent_attack_line"] is not None
assert info["opponent_attack_line"].sum() == 2
assert info["opponent_attack_line"][4]
assert info["opponent_attack_line"][14]
assert not obs.line_status[4]
assert not obs.line_status[14]
# both attacks continue
obs, reward, done, info = self.env.step(self.env.action_space())
assert info["opponent_attack_line"] is not None
assert info["opponent_attack_line"].sum() == 2
assert info["opponent_attack_line"][4]
assert info["opponent_attack_line"][14]
assert not obs.line_status[4]
assert not obs.line_status[14]
# both attacks continue
obs, reward, done, info = self.env.step(self.env.action_space())
assert info["opponent_attack_line"] is not None
assert info["opponent_attack_line"].sum() == 2
assert info["opponent_attack_line"][4]
assert info["opponent_attack_line"][14]
assert not obs.line_status[4]
assert not obs.line_status[14]
# first attack stops
obs, reward, done, info = self.env.step(self.env.action_space())
assert info["opponent_attack_line"] is not None
assert info["opponent_attack_line"][14]
assert info["opponent_attack_line"].sum() == 1
# all attack have stoped
obs, reward, done, info = self.env.step(self.env.action_space())
assert info["opponent_attack_line"] is None
if __name__ == "__main__":
unittest.main()
| 11,281 | 46.403361 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_pickling.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import unittest
import warnings
import copy
import multiprocessing as mp
import grid2op
from grid2op.gym_compat import (
ContinuousToDiscreteConverter,
GymEnv,
MultiToTupleConverter,
ScalerAttrConverter,
)
with warnings.catch_warnings():
# this needs to be imported in the main module for multiprocessing to work "approximately"
warnings.filterwarnings("ignore")
_ = grid2op.make("l2rpn_case14_sandbox", test=True, _add_to_name="for_mp_test")
class TestMultiProc(unittest.TestCase):
@staticmethod
def f(env_gym):
return env_gym.action_space.sample()
@staticmethod
def g(env_gym):
act = env_gym.action_space.sample()
return env_gym.step(act)[0]
def test_basic(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env = grid2op.make(
"l2rpn_case14_sandbox", test=True, _add_to_name="for_mp_test"
)
env_gym = GymEnv(env)
obs_gym, *_ = env_gym.reset()
# 3. (optional) customize it (see section above for more information)
## customize action space
env_gym.action_space = env_gym.action_space.ignore_attr("set_bus").ignore_attr(
"set_line_status"
)
env_gym.action_space = env_gym.action_space.reencode_space(
"redispatch", ContinuousToDiscreteConverter(nb_bins=11)
)
env_gym.action_space = env_gym.action_space.reencode_space(
"change_bus", MultiToTupleConverter()
)
env_gym.action_space = env_gym.action_space.reencode_space(
"change_line_status", MultiToTupleConverter()
)
env_gym.action_space = env_gym.action_space.reencode_space(
"redispatch", MultiToTupleConverter()
)
## customize observation space
ob_space = env_gym.observation_space
ob_space = ob_space.keep_only_attr(
["rho", "gen_p", "load_p", "topo_vect", "actual_dispatch"]
)
ob_space = ob_space.reencode_space(
"actual_dispatch", ScalerAttrConverter(substract=0.0, divide=env.gen_pmax)
)
ob_space = ob_space.reencode_space(
"gen_p", ScalerAttrConverter(substract=0.0, divide=env.gen_pmax)
)
ob_space = ob_space.reencode_space(
"load_p",
ScalerAttrConverter(
substract=obs_gym["load_p"], divide=0.5 * obs_gym["load_p"]
),
)
env_gym.observation_space = ob_space
ctx = mp.get_context("spawn")
env_gym1 = copy.deepcopy(env_gym)
env_gym2 = copy.deepcopy(env_gym)
with ctx.Pool(2) as p:
p.map(TestMultiProc.f, [env_gym1, env_gym2])
with ctx.Pool(2) as p:
p.map(TestMultiProc.g, [env_gym1, env_gym2])
if __name__ == "__main__":
unittest.main()
| 3,343 | 32.777778 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_recopowerlineperarea.py | # Copyright (c) 2019-2022, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import warnings
import os
import numpy as np
import grid2op
from grid2op.Agent import RecoPowerlinePerArea
import unittest
import pdb
"""snippet for the "debug" stuff
if hasattr(self, "_debug") and self._debug:
import pdb
pdb.set_trace()
"""
class TestRecoPowerlinePerArea(unittest.TestCase):
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make("l2rpn_idf_2023", test=True)
param = self.env.parameters
param.NO_OVERFLOW_DISCONNECTION = True
self.env.change_parameters(param)
self.env.seed(0)
self.env.set_id(0)
self.agent = RecoPowerlinePerArea(self.env.action_space,
self.env._game_rules.legal_action.substations_id_by_area)
def tearDown(self) -> None:
self.env.close()
return super().tearDown()
def test_can_act_dn(self):
obs = self.env.reset()
act = self.agent.act(obs, None, None)
assert not act.can_affect_something()
def test_can_act_reco1(self):
"""test it can reconnect one line if one is connected"""
obs = self.env.reset()
obs, *_ = self.env.step(self.env.action_space({"set_line_status": [(0, -1)]}))
act = self.agent.act(obs, None, None)
assert not act.can_affect_something() # cooldown
obs, *_ = self.env.step(self.env.action_space())
act = self.agent.act(obs, None, None)
assert not act.can_affect_something() # cooldown
obs, *_ = self.env.step(self.env.action_space())
act = self.agent.act(obs, None, None)
assert not act.can_affect_something() # cooldown
obs, *_ = self.env.step(self.env.action_space())
act = self.agent.act(obs, None, None)
assert act.can_affect_something()
assert act.get_topological_impact()[0][0]
def test_can_act_reco2(self):
"""test it can reconnect two lines if two are disconnected, not on the same area"""
obs = self.env.reset()
obs, *_ = self.env.step(self.env.action_space({"set_line_status": [(0, -1), (3, -1)]}))
act = self.agent.act(obs, None, None)
assert not act.can_affect_something() # cooldown
obs, *_ = self.env.step(self.env.action_space())
act = self.agent.act(obs, None, None)
assert not act.can_affect_something() # cooldown
obs, *_ = self.env.step(self.env.action_space())
act = self.agent.act(obs, None, None)
assert not act.can_affect_something() # cooldown
obs, *_ = self.env.step(self.env.action_space())
act = self.agent.act(obs, None, None)
assert act.can_affect_something()
assert act.get_topological_impact()[0][0]
assert act.get_topological_impact()[0][3]
def test_can_act_reco_only1(self):
"""test it does not attempt to reconnect two lines on the same area"""
obs = self.env.reset()
obs, *_ = self.env.step(self.env.action_space({"set_line_status": [(0, -1)]}))
obs, *_ = self.env.step(self.env.action_space({"set_line_status": [(2, -1)]}))
obs, *_ = self.env.step(self.env.action_space())
obs, *_ = self.env.step(self.env.action_space())
obs, *_ = self.env.step(self.env.action_space())
assert np.all(obs.time_before_cooldown_line == 0)
act = self.agent.act(obs, None, None)
assert act.get_topological_impact()[0][0]
assert not act.get_topological_impact()[0][2]
def test_do_not_attempt_reco_cooldown(self):
obs = self.env.reset()
obs, *_ = self.env.step(self.env.action_space({"set_line_status": [(0, -1)]}))
obs, *_ = self.env.step(self.env.action_space({"set_line_status": [(3, -1)]}))
obs, *_ = self.env.step(self.env.action_space())
obs, *_ = self.env.step(self.env.action_space())
act = self.agent.act(obs, None, None) # line 3 still in cooldown
assert act.get_topological_impact()[0][0]
assert not act.get_topological_impact()[0][3]
obs, *_ = self.env.step(act)
act = self.agent.act(obs, None, None)
assert act.get_topological_impact()[0][3]
if __name__ == "__main__":
unittest.main()
| 4,794 | 41.8125 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_redisp_extreme.py | # Copyright (c) 2019-2022, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import warnings
import os
import numpy as np
import grid2op
from grid2op.Action.PlayableAction import PlayableAction
from grid2op.tests.helper_path_test import *
import unittest
import pdb
"""snippet for the "debug" stuff
if hasattr(self, "_debug") and self._debug:
import pdb
pdb.set_trace()
"""
class TestExtremeCurtail(unittest.TestCase):
def setUp(self) -> None:
self.env_name = os.path.join(PATH_DATA_TEST, "l2rpn_icaps_2021_small_test")
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make(
self.env_name,
test=True
)
# retrieve the reference values, without curtailment
self.env.seed(0)
self.env.set_id(0)
self.obs_ref = self.env.reset()
self.obs1_ref, *_ = self.env.step(self.env.action_space())
self.obs2_ref, *_ = self.env.step(self.env.action_space())
self.obs3_ref, *_ = self.env.step(self.env.action_space())
self.obs4_ref, *_ = self.env.step(self.env.action_space())
self.obs5_ref, *_ = self.env.step(self.env.action_space())
self.obs6_ref, *_ = self.env.step(self.env.action_space())
self.curtail_ok = self.env.action_space(
{"curtail": [(el, 0.64) for el in np.where(self.env.gen_renewable)[0]]}
)
self.curtail_ok_if_all_on = self.env.action_space(
{"curtail": [(el, 0.32) for el in np.where(self.env.gen_renewable)[0]]}
)
self.curtail_ko = self.env.action_space(
{"curtail": [(el, 0.16) for el in np.where(self.env.gen_renewable)[0]]}
)
self.all_zero = self.env.action_space(
{"curtail": [(el, 0.0) for el in np.where(self.env.gen_renewable)[0]]}
)
self.all_one = self.env.action_space(
{"curtail": [(el, 1.0) for el in np.where(self.env.gen_renewable)[0]]}
)
@staticmethod
def _aux_test_gen(obsbefore, obsafter, tol=1e-4, min_loss_slack=0.2):
assert np.all(obsbefore.gen_p <= obsbefore.gen_pmax + tol)
assert np.all(obsbefore.gen_p >= obsbefore.gen_pmin - tol)
assert np.all(obsafter.gen_p <= obsafter.gen_pmax + tol)
assert np.all(obsafter.gen_p >= obsafter.gen_pmin - tol)
dispatchable = obsbefore.gen_redispatchable
dispatchable[-1] = False # we remove the slack... !
assert np.all(
(obsafter.gen_p[dispatchable] - obsbefore.gen_p[dispatchable])
<= obsbefore.gen_max_ramp_up[dispatchable] + tol
)
assert np.all(
(obsafter.gen_p[dispatchable] - obsbefore.gen_p[dispatchable])
>= -obsbefore.gen_max_ramp_down[dispatchable] - tol
)
# check the slack does not violate too much the constraints (this would indicate an error in the
# amount of power that needs to be redispatched)
slack = -1
slack_variation = obsafter.gen_p[slack] - obsbefore.gen_p[slack]
loss_after = TestExtremeCurtail.aux_obs_loss(obsafter)
loss_before = TestExtremeCurtail.aux_obs_loss(obsbefore)
slack_tol = max(2.0 * abs(loss_after - loss_before), min_loss_slack)
assert (
slack_variation <= obsbefore.gen_max_ramp_up[slack] + slack_tol
), f"{slack_variation = :.2f}MW, way above the ramp up: {obsbefore.gen_max_ramp_up[slack]:.2f}"
assert (
slack_variation >= -obsbefore.gen_max_ramp_down[slack] - slack_tol
), f"{slack_variation = :.2f}MW, way below the ramp down: {-obsbefore.gen_max_ramp_down[slack]:.2f}"
@staticmethod
def _aux_compare_with_ref(env, obs, obs_ref, tol=1e-4, min_loss_slack=0.2):
slack_id = -1
# slack does not absorb too much
assert np.all(
np.abs(env._gen_activeprod_t_redisp[:slack_id] - obs.gen_p[:slack_id])
<= tol
)
# power for each generator is the same (when curtailment taken into account)
assert np.all(
np.abs(
obs.gen_p[:slack_id]
+ obs.curtailment_mw[:slack_id]
- obs.actual_dispatch[:slack_id]
- obs_ref.gen_p[:slack_id]
)
<= tol
)
# check the slack
loss = TestExtremeCurtail.aux_obs_loss(obs)
loss_ref = TestExtremeCurtail.aux_obs_loss(obs_ref)
slack_tol = max(2.0 * abs(loss_ref - loss), min_loss_slack)
assert (
abs(
obs.gen_p[slack_id]
- obs.actual_dispatch[slack_id]
- obs_ref.gen_p[slack_id]
)
<= slack_tol
)
@staticmethod
def aux_obs_loss(obs):
loss = np.sum(obs.gen_p) - np.sum(obs.storage_power) - np.sum(obs.load_p)
return loss
def test_curtail_ok(self):
"""test that the env can automatically turn on all generators to prevent issues if curtailment is too strong
new in grid2Op version 1.6.6"""
self.env.seed(0)
self.env.set_id(0)
obs = self.env.reset()
act = self.curtail_ok
obs1, reward, done, info = self.env.step(act)
assert not done
# not too much losses (which would indicate errors in the computation of the total amount to dispatch)
assert (
np.all(
np.abs(self.env._gen_activeprod_t_redisp - self.env._gen_activeprod_t)
)
<= 1
)
assert np.any(obs1.gen_p[obs.gen_redispatchable] == 0.0)
self._aux_test_gen(obs, obs1)
def test_fix_curtail(self):
"""test that the env can automatically turn on all generators to prevent issues if curtailment is too strong
new in grid2Op version 1.6.6"""
self.env.seed(0)
self.env.set_id(0)
obs = self.env.reset()
act = self.curtail_ok_if_all_on
obs1, reward, done, info = self.env.step(act)
assert not done
# not too much losses (which would indicate errors in the computation of the total amount to dispatch)
assert (
np.all(
np.abs(self.env._gen_activeprod_t_redisp - self.env._gen_activeprod_t)
)
<= 1
)
assert np.all(obs1.gen_p[obs.gen_redispatchable] > 0.0)
self._aux_test_gen(obs, obs1)
self._aux_compare_with_ref(self.env, obs1, self.obs1_ref)
def test_curtail_fail(self):
"""test that the env fails if the parameters is set to LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION = False"
default behaviour and only possible behaviour is grid2op <= 1.6.5"""
self.env.seed(0)
self.env.set_id(0)
obs = self.env.reset()
assert not self.env.parameters.LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION
act = self.curtail_ko
obs, reward, done, info = self.env.step(act)
assert done
def test_curtail_dont_fail(self):
"""when setting the parameters to LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION = True,
the env does not faile anymore (as opposed to test_curtail_fail)"""
param = self.env.parameters
param.LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION = True
self.env.change_parameters(param)
self.env.seed(0)
self.env.set_id(0)
obs = self.env.reset()
act = self.curtail_ko
obs1, reward, done, info = self.env.step(act)
assert not done
# not too much losses (which would indicate errors in the computation of the total amount to dispatch)
assert (
np.all(
np.abs(self.env._gen_activeprod_t_redisp - self.env._gen_activeprod_t)
)
<= 1
)
assert np.all(obs1.gen_p[obs1.gen_redispatchable] > 0.0)
# the curtailment should be limited (so higher that originally)
gen_part = self.env.gen_renewable & (obs1.gen_p > 0.0)
assert np.all(
obs1.gen_p[gen_part] / obs1.gen_pmax[gen_part] > act.curtail[gen_part]
)
self._aux_test_gen(obs, obs1)
self._aux_compare_with_ref(self.env, obs1, self.obs1_ref)
def test_set_back_to_normal(self):
"""test that the curtailment setpoint, once enough time has passed is achieved"""
param = self.env.parameters
param.LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION = True
self.env.change_parameters(param)
self.env.seed(0)
self.env.set_id(0)
obs = self.env.reset()
act = self.curtail_ko
# first action would break the grid, it is limited
obs0, reward, done, info = self.env.step(act)
assert not done
# not too much losses (which would indicate errors in the computation of the total amount to dispatch)
assert (
np.all(
np.abs(self.env._gen_activeprod_t_redisp - self.env._gen_activeprod_t)
)
<= 1
)
gen_part = self.env.gen_renewable & (obs0.gen_p > 0.0)
assert np.all(
obs0.gen_p[gen_part] / obs0.gen_pmax[gen_part] > act.curtail[gen_part]
)
assert np.all(obs0.gen_p >= -self.env._tol_poly)
assert np.all(
obs0.gen_p_before_curtail[self.env.gen_renewable]
== self.obs1_ref.gen_p[self.env.gen_renewable]
)
self._aux_test_gen(obs, obs0)
self._aux_compare_with_ref(self.env, obs0, self.obs1_ref)
# next step = the action can be completely made, it does it
obs1, reward, done, info = self.env.step(self.env.action_space())
assert not done
# not too much losses (which would indicate errors in the computation of the total amount to dispatch)
assert (
np.all(
np.abs(self.env._gen_activeprod_t_redisp - self.env._gen_activeprod_t)
)
<= 1
)
gen_part = self.env.gen_renewable & (obs1.gen_p > 0.0)
assert np.all(obs1.gen_p >= -self.env._tol_poly)
assert np.all(
obs1.curtailment_limit[gen_part]
== obs1.gen_p[gen_part] / obs1.gen_pmax[gen_part]
)
assert np.all(
obs1.gen_p_before_curtail[self.env.gen_renewable]
== self.obs2_ref.gen_p[self.env.gen_renewable]
)
self._aux_test_gen(obs0, obs1)
self._aux_compare_with_ref(self.env, obs1, self.obs2_ref)
# make sure it stays at the sepoint
obs2, reward, done, info = self.env.step(self.env.action_space())
assert not done
# not too much losses (which would indicate errors in the computation of the total amount to dispatch)
assert (
np.all(
np.abs(self.env._gen_activeprod_t_redisp - self.env._gen_activeprod_t)
)
<= 1
)
gen_part = self.env.gen_renewable & (obs2.gen_p > 0.0)
assert np.all(obs2.gen_p >= -self.env._tol_poly)
assert np.all(
obs2.curtailment_limit[gen_part]
== obs2.gen_p[gen_part] / obs2.gen_pmax[gen_part]
)
assert np.all(
obs2.gen_p_before_curtail[self.env.gen_renewable]
== self.obs3_ref.gen_p[self.env.gen_renewable]
)
self._aux_test_gen(obs1, obs2)
self._aux_compare_with_ref(self.env, obs2, self.obs3_ref)
def test_set_back_to_normal_2(self):
"""test that the curtailment setpoint, once enough time has passed is achieved
enough time should be 3 steps here"""
param = self.env.parameters
param.LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION = True
self.env.change_parameters(param)
self.env.seed(0)
self.env.set_id(0)
obs = self.env.reset()
act = self.all_zero
# first action would break the grid, it is limited
obs0, reward, done, info = self.env.step(act)
assert not done, "env should not have diverge at first acction"
# not too much losses (which would indicate errors in the computation of the total amount to dispatch)
assert (
np.all(
np.abs(self.env._gen_activeprod_t_redisp - self.env._gen_activeprod_t)
)
<= 1
)
gen_part = self.env.gen_renewable & (obs0.gen_p > 0.0)
assert np.all(
obs0.gen_p[gen_part] / obs0.gen_pmax[gen_part] > act.curtail[gen_part]
)
assert np.all(
obs0.gen_p_before_curtail[self.env.gen_renewable]
== self.obs1_ref.gen_p[self.env.gen_renewable]
)
self._aux_test_gen(obs, obs0)
self._aux_compare_with_ref(self.env, obs0, self.obs1_ref)
# next step = we got close to the setpoint, but still not there yet
obs1, reward, done, info = self.env.step(self.env.action_space())
assert not done, "env should not have diverge after first do nothing"
# not too much losses (which would indicate errors in the computation of the total amount to dispatch)
assert (
np.all(
np.abs(self.env._gen_activeprod_t_redisp - self.env._gen_activeprod_t)
)
<= 1
)
# I got close to the setpoint
assert np.all(
obs1.gen_p[gen_part] / obs1.gen_pmax[gen_part]
< obs.gen_p[gen_part] / obs.gen_pmax[gen_part]
)
# I am still not at the setpoint
gen_part = self.env.gen_renewable & (obs1.gen_p > 0.0)
assert np.all(
obs1.gen_p[gen_part] / obs1.gen_pmax[gen_part] > act.curtail[gen_part]
)
assert np.all(
obs1.gen_p_before_curtail[self.env.gen_renewable]
== self.obs2_ref.gen_p[self.env.gen_renewable]
)
self._aux_test_gen(obs0, obs1)
self._aux_compare_with_ref(self.env, obs1, self.obs2_ref)
# next step = the action can be completely made, it does it
obs2, reward, done, info = self.env.step(self.env.action_space())
assert not done, "env should not have diverge after second do nothing"
# not too much losses (which would indicate errors in the computation of the total amount to dispatch)
assert (
np.all(
np.abs(self.env._gen_activeprod_t_redisp - self.env._gen_activeprod_t)
)
<= 1
)
gen_part = self.env.gen_renewable & (obs2.gen_p > 0.0)
assert np.all(obs2.gen_p >= -self.env._tol_poly)
assert np.all(
obs2.gen_p[gen_part] / obs2.gen_pmax[gen_part]
< obs1.gen_p[gen_part] / obs1.gen_pmax[gen_part]
)
assert np.all(
obs2.gen_p[gen_part] / obs2.gen_pmax[gen_part] == act.curtail[gen_part]
)
assert np.all(
obs2.gen_p_before_curtail[self.env.gen_renewable]
== self.obs3_ref.gen_p[self.env.gen_renewable]
)
self._aux_test_gen(obs1, obs2)
self._aux_compare_with_ref(self.env, obs2, self.obs3_ref)
# make sure it stays at the sepoint
obs3, reward, done, info = self.env.step(self.env.action_space())
assert not done, "env should not have diverge after third do nothing"
# not too much losses (which would indicate errors in the computation of the total amount to dispatch)
assert (
np.all(
np.abs(self.env._gen_activeprod_t_redisp - self.env._gen_activeprod_t)
)
<= 1
)
gen_part = self.env.gen_renewable & (obs3.gen_p > 0.0)
assert np.all(obs3.gen_p >= -self.env._tol_poly)
assert np.all(
obs3.curtailment_limit[gen_part]
== obs3.gen_p[gen_part] / obs3.gen_pmax[gen_part]
)
assert np.all(
obs3.gen_p_before_curtail[self.env.gen_renewable]
== self.obs4_ref.gen_p[self.env.gen_renewable]
)
self._aux_test_gen(obs2, obs3)
self._aux_compare_with_ref(self.env, obs3, self.obs4_ref)
def test_down_then_up(self):
"""test that i can curtail down to the setpoint, then up again until the curtailment is canceled"""
param = self.env.parameters
param.LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION = True
self.env.change_parameters(param)
self.env.seed(0)
self.env.set_id(0)
obs = self.env.reset()
act = self.curtail_ko
# we first do as in test_set_back_to_normal
obs0, reward, done, info = self.env.step(act)
assert not done
obs1, reward, done, info = self.env.step(self.env.action_space())
assert not done
# now the setpoint is reached, let's increase "at once" (it is possible without violating anything)
obs2, reward, done, info = self.env.step(self.all_one)
assert not done
# not too much losses (which would indicate errors in the computation of the total amount to dispatch)
assert (
np.all(
np.abs(self.env._gen_activeprod_t_redisp - self.env._gen_activeprod_t)
)
<= 1
)
assert np.all(obs2.gen_p >= 0.0)
assert np.all(
obs2.gen_p[self.env.gen_renewable] >= obs1.gen_p[self.env.gen_renewable]
)
self._aux_test_gen(obs1, obs2)
self._aux_compare_with_ref(self.env, obs2, self.obs3_ref)
# re increase to check that the setpoint is correct
obs3, reward, done, info = self.env.step(self.env.action_space())
assert not done
# not too much losses (which would indicate errors in the computation of the total amount to dispatch)
assert (
np.all(
np.abs(self.env._gen_activeprod_t_redisp - self.env._gen_activeprod_t)
)
<= 1
)
assert np.all(obs3.gen_p >= -self.env._tol_poly)
assert np.all(
obs3.gen_p[self.env.gen_renewable] >= obs2.gen_p[self.env.gen_renewable]
)
self._aux_test_gen(obs2, obs3)
self._aux_compare_with_ref(self.env, obs3, self.obs4_ref)
gen_part = self.env.gen_renewable & (obs3.gen_p > self.env._tol_poly)
# generator produce less than pmax
assert np.all(obs3.curtailment_limit[gen_part] <= obs3.gen_pmax[gen_part])
# no more curtailment, so productions increase
assert np.all(
obs3.gen_p[self.env.gen_renewable] >= obs2.gen_p[self.env.gen_renewable]
)
# information of generation without curtailment is correct
assert np.all(
obs3.gen_p_before_curtail[self.env.gen_renewable]
== self.obs4_ref.gen_p[self.env.gen_renewable]
)
# setpoint is matched
assert np.all(
obs3.gen_p_before_curtail[self.env.gen_renewable]
== obs3.gen_p[self.env.gen_renewable]
)
def test_down_then_up_2(self):
"""test that i can curtail down to the setpoint, then up again until the curtailment is canceled
but for a more complex case
"""
param = self.env.parameters
param.LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION = True
self.env.change_parameters(param)
self.env.seed(0)
self.env.set_id(0)
obs = self.env.reset()
act = self.all_zero
# we first do as in test_set_back_to_normal_2
obs0, reward, done, info = self.env.step(act)
assert not done
obs1, reward, done, info = self.env.step(self.env.action_space())
assert not done
obs2, reward, done, info = self.env.step(self.env.action_space())
assert not done
# now the setpoint is reached, let's increase "at once" (it is possible without violating anything)
obs3, reward, done, info = self.env.step(self.all_one)
assert not done
# not too much losses (which would indicate errors in the computation of the total amount to dispatch)
assert (
np.all(
np.abs(self.env._gen_activeprod_t_redisp - self.env._gen_activeprod_t)
)
<= 1
)
assert np.all(obs3.gen_p >= -self.env._tol_poly)
assert np.all(
obs3.gen_p[self.env.gen_renewable] >= obs2.gen_p[self.env.gen_renewable]
)
self._aux_test_gen(obs2, obs3)
self._aux_compare_with_ref(self.env, obs3, self.obs4_ref)
# another do nothing (setpoint still not reached)
obs4, reward, done, info = self.env.step(self.env.action_space())
assert not done
# not too much losses (which would indicate errors in the computation of the total amount to dispatch)
assert (
np.all(
np.abs(self.env._gen_activeprod_t_redisp - self.env._gen_activeprod_t)
)
<= 1
)
assert np.all(obs4.gen_p >= -self.env._tol_poly)
assert np.all(
obs4.gen_p[self.env.gen_renewable] >= obs3.gen_p[self.env.gen_renewable]
)
self._aux_test_gen(obs3, obs4)
self._aux_compare_with_ref(self.env, obs4, self.obs5_ref)
# setpoint should be correct now
obs5, reward, done, info = self.env.step(self.env.action_space())
assert not done
# not too much losses (which would indicate errors in the computation of the total amount to dispatch)
assert (
np.all(
np.abs(self.env._gen_activeprod_t_redisp - self.env._gen_activeprod_t)
)
<= 1
)
assert np.all(obs5.gen_p >= -self.env._tol_poly)
assert np.all(
obs5.gen_p[self.env.gen_renewable] >= obs1.gen_p[self.env.gen_renewable]
)
self._aux_test_gen(obs4, obs5)
self._aux_compare_with_ref(self.env, obs5, self.obs6_ref)
gen_part = self.env.gen_renewable & (obs3.gen_p > 0.0)
# generator produce less than pmax
assert np.all(obs5.curtailment_limit[gen_part] <= obs5.gen_pmax[gen_part])
# no more curtailment, so productions increase
assert np.all(
obs5.gen_p[self.env.gen_renewable] >= obs4.gen_p[self.env.gen_renewable]
)
# information of generation without curtailment is correct
assert np.all(
obs5.gen_p_before_curtail[self.env.gen_renewable]
== self.obs6_ref.gen_p[self.env.gen_renewable]
)
# setpoint is matched
assert np.all(
obs5.gen_p_before_curtail[self.env.gen_renewable]
== obs5.gen_p[self.env.gen_renewable]
)
class TestExtremeStorage(unittest.TestCase):
def setUp(self) -> None:
self.env_name = "educ_case14_storage"
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make(
self.env_name,
test=True,
data_feeding_kwargs={"max_iter": 10},
_add_to_name="TestExtremeStorage",
action_class=PlayableAction,
)
# increase the storage capacity
increase_storage = np.array([15.0, 30.0])
type(self.env).storage_max_p_absorb[:] = increase_storage
type(self.env).storage_max_p_prod[:] = increase_storage
type(self.env.action_space).storage_max_p_absorb[:] = increase_storage
type(self.env.action_space).storage_max_p_prod[:] = increase_storage
self.env.action_space.actionClass.storage_max_p_absorb[:] = increase_storage
self.env.action_space.actionClass.storage_max_p_prod[:] = increase_storage
self.env.observation_space.observationClass.storage_max_p_absorb[
:
] = increase_storage
self.env.observation_space.observationClass.storage_max_p_prod[
:
] = increase_storage
# retrieve the reference values, without curtailment
self.env.seed(0)
self.env.set_id(0)
self.obs_ref = self.env.reset()
self.obs1_ref, *_ = self.env.step(self.env.action_space())
self.obs2_ref, *_ = self.env.step(self.env.action_space())
self.obs3_ref, *_ = self.env.step(self.env.action_space())
self.obs4_ref, *_ = self.env.step(self.env.action_space())
self.obs5_ref, *_ = self.env.step(self.env.action_space())
self.obs6_ref, *_ = self.env.step(self.env.action_space())
self.storage_ko_down = self.env.action_space(
{"set_storage": -self.env.storage_max_p_absorb}
)
self.storage_ko_up = self.env.action_space(
{"set_storage": +self.env.storage_max_p_absorb}
)
self.storage_ok_down = self.env.action_space(
{"set_storage": -0.5 * self.env.storage_max_p_absorb}
)
self.storage_curtail = self.env.action_space(
{
"set_storage": 0.8 * self.env.storage_max_p_absorb,
"curtail": [(el, 0.0) for el in np.where(self.env.gen_renewable)[0]],
}
)
@staticmethod
def _aux_test_storage(obsbefore, obsafter, tol=1.1e-2):
prod_ = obsafter.storage_power < 0.0
consume_ = obsafter.storage_power > 0.0
assert np.all(
obsbefore.storage_power[prod_] >= -obsbefore.storage_max_p_prod[prod_]
)
assert np.all(
obsbefore.storage_power[consume_]
<= obsbefore.storage_max_p_absorb[consume_]
)
prod_ = obsafter.storage_power < 0.0
consume_ = obsafter.storage_power > 0.0
assert np.all(
obsafter.storage_power[prod_] >= -type(obsafter).storage_max_p_prod[prod_]
)
assert np.all(
obsafter.storage_power[consume_]
<= type(obsafter).storage_max_p_absorb[consume_]
)
assert np.all(obsbefore.storage_charge <= type(obsbefore).storage_Emax)
assert np.all(obsbefore.storage_charge >= type(obsbefore).storage_Emin)
assert np.all(obsafter.storage_charge <= type(obsafter).storage_Emax)
assert np.all(obsafter.storage_charge >= type(obsafter).storage_Emin)
# check links between storage and energy
delta_t = obsafter.delta_time * 60.0
energy_to_power = 3600.0 / delta_t
storage_power = 1.0 * obsafter.storage_power
delta_energy = obsafter.storage_charge - obsbefore.storage_charge
delta_energy[delta_energy < 0.0] *= obsbefore.storage_discharging_efficiency[
delta_energy < 0.0
]
delta_energy[delta_energy > 0.0] /= obsbefore.storage_charging_efficiency[
delta_energy > 0.0
]
assert np.all(
np.abs(
delta_energy * energy_to_power + obsbefore.storage_loss - storage_power
)
<= tol
)
def test_do_break(self):
self.env.seed(0)
self.env.set_id(0)
obs0 = self.env.reset()
obs1, reward, done, info = self.env.step(self.storage_ko_down)
# there is not enough ramp down to "absorb" what the storage units produces
assert done
self.env.seed(0)
self.env.set_id(0)
obs0 = self.env.reset()
obs1, reward, done, info = self.env.step(self.storage_ko_up)
# there is not enough ramp up to "produce" what the storage units absorbs
assert done
assert done
self.env.seed(0)
self.env.set_id(0)
obs0 = self.env.reset()
obs1, reward, done, info = self.env.step(self.storage_curtail)
# there is not enough ramp up to "produce" what the storage units absorbs
assert done
def test_storage_limit_gen_down(self):
"""
test that the storage action that would lead to a game over (see test_do_break)
do not when the parameters is properly set
"""
param = self.env.parameters
param.LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION = True
self.env.change_parameters(param)
self.env.seed(0)
self.env.set_id(0)
obs = self.env.reset()
obs1, reward, done, info = self.env.step(self.storage_ko_down)
assert not done
amount_storage_first_step = 1.0 * self.env._amount_storage
# not too much losses (which would indicate errors in the computation of the total amount to dispatch)
assert (
np.all(
np.abs(self.env._gen_activeprod_t_redisp - self.env._gen_activeprod_t)
)
<= 1
)
# test the storage is "limited"
assert np.all(obs1.storage_power > self.storage_ko_down.storage_p)
# test the energy / power is properly converted
self._aux_test_storage(obs, obs1)
# test the generators are ok
TestExtremeCurtail._aux_test_gen(
obs, obs1, min_loss_slack=4
) # I generate ~40 MW on this grid with storage, losses changes a lot !
TestExtremeCurtail._aux_compare_with_ref(
self.env, obs1, self.obs1_ref, min_loss_slack=4
)
obs2, reward, done, info = self.env.step(self.env.action_space())
assert not done
# assert np.all(obs2.storage_power == 0.) # this is no more true because i did not get enough "ramp"
obs2_power_storage = np.sum(obs2.storage_power)
assert (
self.env._amount_storage == -amount_storage_first_step + obs2_power_storage
)
self._aux_test_storage(obs1, obs2)
TestExtremeCurtail._aux_test_gen(
obs1, obs2, min_loss_slack=4
) # I generate ~40 MW on this grid with storage, losses changes a lot !
TestExtremeCurtail._aux_compare_with_ref(
self.env, obs2, self.obs2_ref, min_loss_slack=4
)
obs3, reward, done, info = self.env.step(self.env.action_space())
assert np.all(obs3.storage_power == 0.0)
assert abs(self.env._amount_storage - (-obs2_power_storage)) <= 1e-4
self._aux_test_storage(obs2, obs3)
TestExtremeCurtail._aux_test_gen(
obs2, obs3, min_loss_slack=4
) # I generate ~40 MW on this grid with storage, losses changes a lot !
TestExtremeCurtail._aux_compare_with_ref(
self.env, obs3, self.obs3_ref, min_loss_slack=4
)
obs4, reward, done, info = self.env.step(self.env.action_space())
assert np.all(obs4.storage_power == 0.0)
assert self.env._amount_storage == 0.0
self._aux_test_storage(obs3, obs4)
TestExtremeCurtail._aux_test_gen(
obs3, obs4, min_loss_slack=4
) # I generate ~40 MW on this grid with storage, losses changes a lot !
TestExtremeCurtail._aux_compare_with_ref(
self.env, obs4, self.obs4_ref, min_loss_slack=4
)
def test_tests_down(self):
"""in this test i do not test the new feature, i test that the tests performed are working
in a standard grid2op fashion
"""
self.env.seed(0)
self.env.set_id(0)
obs = self.env.reset()
obs1, reward, done, info = self.env.step(self.storage_ok_down)
assert not done
amount_storage_first_step = 1.0 * self.env._amount_storage
# not too much losses (which would indicate errors in the computation of the total amount to dispatch)
assert (
np.all(
np.abs(self.env._gen_activeprod_t_redisp - self.env._gen_activeprod_t)
)
<= 1
)
# test the storage is "limited"
assert np.all(obs1.storage_power > self.storage_ko_down.storage_p)
# test the energy / power is properly converted
self._aux_test_storage(obs, obs1)
# test the generators are ok
TestExtremeCurtail._aux_test_gen(
obs, obs1, min_loss_slack=4
) # I generate ~40 MW on this grid with storage, losses changes a lot !
TestExtremeCurtail._aux_compare_with_ref(
self.env, obs1, self.obs1_ref, min_loss_slack=4
)
obs2, reward, done, info = self.env.step(self.env.action_space())
assert np.all(obs2.storage_power == 0.0)
assert self.env._amount_storage == -amount_storage_first_step
self._aux_test_storage(obs1, obs2)
TestExtremeCurtail._aux_test_gen(
obs1, obs2, min_loss_slack=4
) # I generate ~40 MW on this grid with storage, losses changes a lot !
TestExtremeCurtail._aux_compare_with_ref(
self.env, obs2, self.obs2_ref, min_loss_slack=4
)
obs3, reward, done, info = self.env.step(self.env.action_space())
assert np.all(obs3.storage_power == 0.0)
assert self.env._amount_storage == 0.0
self._aux_test_storage(obs2, obs3)
TestExtremeCurtail._aux_test_gen(
obs3, obs3, min_loss_slack=4
) # I generate ~40 MW on this grid with storage, losses changes a lot !
TestExtremeCurtail._aux_compare_with_ref(
self.env, obs3, self.obs3_ref, min_loss_slack=4
)
def test_storage_limit_gen_up(self):
"""
test that the storage action that would lead to a game over (see test_do_break)
do not when the parameters is properly set
"""
param = self.env.parameters
param.LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION = True
self.env.change_parameters(param)
self.env.seed(0)
self.env.set_id(0)
obs = self.env.reset()
obs1, reward, done, info = self.env.step(self.storage_ko_up)
assert not done
amount_storage_first_step = 1.0 * self.env._amount_storage
# not too much losses (which would indicate errors in the computation of the total amount to dispatch)
assert (
np.all(
np.abs(self.env._gen_activeprod_t_redisp - self.env._gen_activeprod_t)
)
<= 1
)
# test the storage is "limited"
assert np.all(obs1.storage_power < self.storage_ko_up.storage_p)
# test the energy / power is properly converted
self._aux_test_storage(obs, obs1)
# test the generators are ok
TestExtremeCurtail._aux_test_gen(
obs, obs1, min_loss_slack=4
) # I generate ~40 MW on this grid with storage, losses changes a lot !
TestExtremeCurtail._aux_compare_with_ref(
self.env, obs1, self.obs1_ref, min_loss_slack=4
)
obs2, reward, done, info = self.env.step(self.env.action_space())
assert np.all(obs2.storage_power == 0.0)
assert self.env._amount_storage == -amount_storage_first_step
self._aux_test_storage(obs1, obs2)
TestExtremeCurtail._aux_test_gen(
obs1, obs2, min_loss_slack=4
) # I generate ~40 MW on this grid with storage, losses changes a lot !
TestExtremeCurtail._aux_compare_with_ref(
self.env, obs2, self.obs2_ref, min_loss_slack=4
)
obs3, reward, done, info = self.env.step(self.env.action_space())
assert np.all(obs3.storage_power == 0.0)
assert self.env._amount_storage == 0.0
self._aux_test_storage(obs2, obs3)
TestExtremeCurtail._aux_test_gen(
obs3, obs3, min_loss_slack=4
) # I generate ~40 MW on this grid with storage, losses changes a lot !
TestExtremeCurtail._aux_compare_with_ref(
self.env, obs3, self.obs3_ref, min_loss_slack=4
)
def test_storage_curtail(self):
param = self.env.parameters
param.LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION = True
self.env.change_parameters(param)
self.env.seed(0)
self.env.set_id(0)
obs = self.env.reset()
obs1, reward, done, info = self.env.step(self.storage_curtail)
assert not done
# not too much losses (which would indicate errors in the computation of the total amount to dispatch)
assert (
np.all(
np.abs(self.env._gen_activeprod_t_redisp - self.env._gen_activeprod_t)
)
<= 1
)
# test the storage is "limited"
assert np.all(obs1.storage_power < self.storage_curtail.storage_p)
gen_curt = obs1.gen_renewable & (obs1.gen_p > 0.0)
assert np.all(
obs1.gen_p[gen_curt] / obs1.gen_pmax[gen_curt]
> self.storage_curtail.curtail[gen_curt]
)
# test the energy / power is properly converted
self._aux_test_storage(obs, obs1)
# test the generators are ok
TestExtremeCurtail._aux_test_gen(
obs, obs1, min_loss_slack=4
) # I generate ~40 MW on this grid with storage, losses changes a lot !
TestExtremeCurtail._aux_compare_with_ref(
self.env, obs1, self.obs1_ref, min_loss_slack=4
)
obs2, reward, done, info = self.env.step(self.env.action_space())
assert np.all(
obs2.gen_p[obs2.gen_renewable] >= 0.0
), "some curtailment make for a negative production !"
assert np.all(
obs2.gen_p[obs2.gen_renewable] == 0.0
) # everything is set to 0. now !
self._aux_test_storage(obs1, obs2)
# test the generators are ok
TestExtremeCurtail._aux_test_gen(
obs1, obs2, min_loss_slack=4
) # I generate ~40 MW on this grid with storage, losses changes a lot !
TestExtremeCurtail._aux_compare_with_ref(
self.env, obs2, self.obs2_ref, min_loss_slack=4
)
obs3, reward, done, info = self.env.step(self.env.action_space())
assert np.all(
obs3.gen_p[obs2.gen_renewable] >= 0.0
), "some curtailment make for a negative production !"
assert np.all(
obs3.gen_p[obs2.gen_renewable] == 0.0
) # everything is set to 0. now !
self._aux_test_storage(obs2, obs3)
# test the generators are ok
TestExtremeCurtail._aux_test_gen(
obs2, obs3, min_loss_slack=4
) # I generate ~40 MW on this grid with storage, losses changes a lot !
TestExtremeCurtail._aux_compare_with_ref(
self.env, obs3, self.obs3_ref, min_loss_slack=4
)
# TODO test with simulate !!!!
if __name__ == "__main__":
unittest.main()
| 38,665 | 41.028261 | 116 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_remove_line_status_from_topo.py | # Copyright (c) 2019-2022, RTE (https://www.rte-france.com)
# See AUTHORS.txt and https://github.com/rte-france/Grid2Op/pull/319
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import grid2op
from grid2op.Exceptions import AmbiguousAction
import unittest
import warnings
import pdb
class RemoveLineStatusFromTopoTester(unittest.TestCase):
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make("l2rpn_case14_sandbox", test=True)
param = self.env.parameters
param.NB_TIMESTEP_COOLDOWN_SUB = 3
param.NB_TIMESTEP_COOLDOWN_LINE = 3
param.NO_OVERFLOW_DISCONNECTION = True
self.env.change_parameters(param)
_ = self.env.reset()
def test_limit_reco(self):
"""test that it limit the action when it reconnects"""
act = self.env.action_space({"set_bus": {"substations_id": [(1, [1, 2, 2, -1, 1, 1])]}})
obs, reward, done, info = self.env.step(act)
# limit reco when set
act_sub4_clean = self.env.action_space({"set_bus": {"substations_id": [(4, [2, 2, 2, 1, 1])]}})
assert act_sub4_clean._set_topo_vect[20] == 2
act_sub4_clean.remove_line_status_from_topo(obs)
assert act_sub4_clean._set_topo_vect[20] == 0
# limit reco when change
act_sub4_clean = self.env.action_space({"change_bus": {"substations_id": [(4, [True, True, True, False, False])]}})
assert act_sub4_clean._change_bus_vect[20]
act_sub4_clean.remove_line_status_from_topo(obs)
assert not act_sub4_clean._change_bus_vect[20]
def test_limit_disco(self):
"""test that it limit the action when it disconnects"""
dn = self.env.action_space()
act = self.env.action_space({"set_bus": {"substations_id": [(1, [1, 2, 2, -1, 1, 1])]}})
obs, reward, done, info = self.env.step(act)
assert obs.time_before_cooldown_line[4] == 3
obs, reward, done, info = self.env.step(dn)
obs, reward, done, info = self.env.step(dn)
obs, reward, done, info = self.env.step(dn)
assert obs.time_before_cooldown_line[4] == 0
# reconnect it
act_reco = self.env.action_space({"set_bus": {"substations_id": [(4, [2, 2, 2, 1, 1])]}})
obs, reward, done, info = self.env.step(act_reco)
assert obs.time_before_cooldown_line[4] == 3
# limit disco when set
act_deco = self.env.action_space({"set_bus": {"substations_id": [(1, [1, 2, 2, -1, 1, 1])]}})
assert act_deco._set_topo_vect[6] == -1
act_deco.remove_line_status_from_topo(obs)
assert act_deco._set_topo_vect[6] == 0
def test_nothing_when_cooldown(self):
"""test it does nothing when there is no cooldown"""
dn = self.env.action_space()
act = self.env.action_space({"set_bus": {"substations_id": [(1, [1, 2, 2, -1, 1, 1])]}})
obs, reward, done, info = self.env.step(act)
assert obs.time_before_cooldown_line[4] == 3
obs, reward, done, info = self.env.step(dn)
obs, reward, done, info = self.env.step(dn)
obs, reward, done, info = self.env.step(dn)
assert obs.time_before_cooldown_line[4] == 0
# action should not be modified because there is a cooldown
act_sub4_clean = self.env.action_space({"set_bus": {"substations_id": [(4, [2, 2, 2, 1, 1])]}})
assert act_sub4_clean._set_topo_vect[20] == 2
act_sub4_clean.remove_line_status_from_topo(obs)
assert act_sub4_clean._set_topo_vect[20] == 2
def test_something_when_nocooldown_butcheck_cooldown(self):
"""test that something is done when no cooldown but the check_cooldown is set"""
dn = self.env.action_space()
act = self.env.action_space({"set_bus": {"substations_id": [(1, [1, 2, 2, -1, 1, 1])]}})
obs, reward, done, info = self.env.step(act)
assert obs.time_before_cooldown_line[4] == 3
obs, reward, done, info = self.env.step(dn)
obs, reward, done, info = self.env.step(dn)
obs, reward, done, info = self.env.step(dn)
assert obs.time_before_cooldown_line[4] == 0
# action should not be modified because there is a cooldown
act_sub4_clean = self.env.action_space({"set_bus": {"substations_id": [(4, [2, 2, 2, 1, 1])]}})
assert act_sub4_clean._set_topo_vect[20] == 2
act_sub4_clean.remove_line_status_from_topo(obs, check_cooldown=False)
assert act_sub4_clean._set_topo_vect[20] == 0
def test_limit_withoutobs(self):
"""test that it limit the action correctly when no obs is provided"""
disco = self.env.action_space({"set_line_status": [(4, -1)]})
reco = self.env.action_space({"set_line_status": [(4, +1)]})
# limit reco when set
act_sub4_clean = self.env.action_space({"set_bus": {"substations_id": [(4, [2, 2, 2, 1, 1])]}})
act_sub4_clean += disco
assert act_sub4_clean._set_topo_vect[20] == 2
assert act_sub4_clean._set_line_status[4] == -1
with self.assertRaises(AmbiguousAction):
act_sub4_clean._check_for_ambiguity()
act_sub4_clean.remove_line_status_from_topo(check_cooldown=False)
assert act_sub4_clean._set_topo_vect[20] == 0
assert act_sub4_clean._set_line_status[4] == -1
act_sub4_clean._check_for_ambiguity() # does not raise
# limit reco when change
act_sub4_clean = self.env.action_space({"change_bus": {"substations_id": [(4, [True, True, True, False, False])]}})
act_sub4_clean += disco
assert act_sub4_clean._change_bus_vect[20]
assert act_sub4_clean._set_line_status[4] == -1
with self.assertRaises(AmbiguousAction):
act_sub4_clean._check_for_ambiguity()
act_sub4_clean.remove_line_status_from_topo(check_cooldown=False)
assert not act_sub4_clean._change_bus_vect[20]
assert act_sub4_clean._set_line_status[4] == -1
act_sub4_clean._check_for_ambiguity() # does not raise
# limit disco when set
act_sub4_clean = self.env.action_space({"set_bus": {"substations_id": [(4, [2, -1, 2, 1, 1])]}})
act_sub4_clean += reco
assert act_sub4_clean._set_topo_vect[20] == -1
assert act_sub4_clean._set_line_status[4] == 1
with self.assertRaises(AmbiguousAction):
act_sub4_clean._check_for_ambiguity()
act_sub4_clean.remove_line_status_from_topo(check_cooldown=False)
assert act_sub4_clean._set_topo_vect[20] == 0
assert act_sub4_clean._set_line_status[4] == 1
act_sub4_clean._check_for_ambiguity() # does not raise
if __name__ == "__main__":
unittest.main()
| 7,262 | 48.074324 | 123 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_render.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import grid2op
import warnings
import unittest
class BaseTestPlot(unittest.TestCase):
def setUp(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make("l2rpn_case14_sandbox", test=True)
def tearDown(self):
self.env.close()
def test_render(self):
obs = self.env.reset()
arr_ = self.env.render()
assert arr_.shape == (720, 1280, 3)
assert arr_.min() == 0
assert arr_.max() == 255
if __name__ == "__main__":
unittest.main() | 1,030 | 32.258065 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_reward_to_obs.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import grid2op
import warnings
import unittest
from grid2op.Observation import CompleteObservation
from grid2op.Reward import BaseReward
from grid2op.Runner import Runner
class CustomTestReward(BaseReward):
nb_call = 0
def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous):
env._reward_to_obs[env.nb_time_step] = type(self).nb_call
type(self).nb_call += 1
return super().__call__(action, env, has_error, is_done, is_illegal, is_ambiguous)
class CustomTestObservation(CompleteObservation):
def update_after_reward(self, env):
self.stuff = env._reward_to_obs[env.nb_time_step]
return super().update_after_reward(env)
class BaseTestPlot(unittest.TestCase):
def setUp(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make("l2rpn_case14_sandbox", test=True,
observation_class=CustomTestObservation,
reward_class=CustomTestReward)
CustomTestReward.nb_call = 0
def tearDown(self):
self.env.close()
def test_info(self):
obs = self.env.reset()
assert not hasattr(obs, "stuff")
assert CustomTestReward.nb_call == 1 # ideally it should be 0 but..
obs, reward, done, info = self.env.step(self.env.action_space())
assert hasattr(obs, "stuff")
assert obs.stuff == 1
assert CustomTestReward.nb_call == 2
def test_copy(self):
obs = self.env.reset()
assert not hasattr(obs, "stuff")
assert CustomTestReward.nb_call == 1 # ideally it should be 0 but..
env_cpy = self.env.copy()
obs, reward, done, info = env_cpy.step(self.env.action_space())
assert hasattr(obs, "stuff")
assert obs.stuff == 1
assert CustomTestReward.nb_call == 2, f"{CustomTestReward.nb_call} vs 2"
# attr is not copied as it is a class attribute !
obs, reward, done, info = self.env.step(self.env.action_space())
assert hasattr(obs, "stuff")
assert obs.stuff == 2
assert CustomTestReward.nb_call == 3, f"{CustomTestReward.nb_call} vs 3"
def test_runner(self):
runner = Runner(**self.env.get_params_for_runner())
CustomTestReward.nb_call = 0
runner.run(nb_episode=1, max_iter=10)
assert CustomTestReward.nb_call == 12, f"{CustomTestReward.nb_call} vs 12"
def test_simulate(self):
obs = self.env.reset()
assert not hasattr(obs, "stuff")
assert CustomTestReward.nb_call == 1 # ideally it should be 0 but..
sim_obs, sim_r, sim_d, sim_i = obs.simulate(self.env.action_space())
assert not hasattr(sim_obs, "stuff")
assert CustomTestReward.nb_call == 2 # reward is still called in simulate
def test_forecast_env(self):
obs = self.env.reset()
assert not hasattr(obs, "stuff")
assert CustomTestReward.nb_call == 1 # ideally it should be 0 but..
for_env = obs.get_forecast_env()
f_done = False
while not f_done:
f_obs, f_reward, f_done, f_info = for_env.step(self.env.action_space())
assert not hasattr(f_obs, "stuff")
if __name__ == "__main__":
unittest.main()
| 3,864 | 38.040404 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_runner_kwargs_backend.py | # Copyright (c) 2019-2022, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import warnings
import grid2op
import unittest
import pdb
from grid2op.Backend import PandaPowerBackend
from grid2op.Runner import Runner
class InstanceCount:
"""just to make it work... does not really work"""
nb_instance = 0
def __init__(self) -> None:
type(self).nb_instance += 1
def __del__(self):
type(self).nb_instance -= 1
class PPExtraArgs(PandaPowerBackend):
def __init__(self,
stuff="",
detailed_infos_for_cascading_failures=False,
lightsim2grid=False,
dist_slack=False,
max_iter=10,
can_be_copied=True,
with_numba=False):
super().__init__(detailed_infos_for_cascading_failures,
lightsim2grid,
dist_slack,
max_iter,
can_be_copied=can_be_copied,
with_numba=with_numba)
self._my_kwargs["stuff"] = stuff
class BackendProperlyInit(unittest.TestCase):
"""test grid2op works when the backend cannot be copied."""
def setUp(self) -> None:
self.env_name = "l2rpn_case14_sandbox"
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make(self.env_name, test=True, backend=PPExtraArgs())
def tearDown(self) -> None:
self.env.close()
def test_default_args(self):
"""basic tests, with default arguments"""
kwargs = self.env.get_params_for_runner()
assert "backend_kwargs" in kwargs
runner = Runner(**kwargs)
assert runner._backend_kwargs == kwargs["backend_kwargs"]
env = runner.init_env()
assert "stuff" in env.backend._my_kwargs
assert env.backend._my_kwargs["stuff"] == ""
env.close()
def test_non_default_args(self):
"""test with non default args: they are used properly in the runner"""
self.env.close()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make(self.env_name,
test=True,
backend=PPExtraArgs(stuff="toto"))
runner = Runner(**self.env.get_params_for_runner())
env = runner.init_env()
assert env.backend._my_kwargs["stuff"] == "toto"
env.close()
def test_make_no_copy(self):
"""test that it does not make any copy of the default arguments"""
self.env.close()
counter = InstanceCount()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make(self.env_name,
test=True,
backend=PPExtraArgs(stuff=counter))
runner = Runner(**self.env.get_params_for_runner())
env = runner.init_env()
assert isinstance(env.backend._my_kwargs["stuff"], InstanceCount)
assert env.backend._my_kwargs["stuff"] is counter
assert type(env.backend._my_kwargs["stuff"]).nb_instance == 1
env.close()
if __name__ == "__main__":
unittest.main()
| 3,730 | 36.686869 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_score_idf_2023.py | # Copyright (c) 2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import warnings
import numpy as np
import unittest
import grid2op
from grid2op.Action import ActionSpace, BaseAction
from grid2op.utils import ScoreL2RPN2023
from grid2op.Observation import BaseObservation
from grid2op.Agent.doNothing import DoNothingAgent, BaseAgent
from grid2op.Chronics import FromHandlers
from grid2op.Chronics.handlers import CSVHandler, PerfectForecastHandler
from grid2op.Reward import _NewRenewableSourcesUsageScore
class CurtailTrackerAgent(BaseAgent):
def __init__(self, action_space, gen_renewable, gen_pmax, curtail_level=1.):
super().__init__(action_space)
self.gen_renewable = gen_renewable
self.gen_pmax = gen_pmax[gen_renewable]
self.curtail_level = curtail_level
def act(self, obs: BaseObservation, reward, done):
curtail_target = self.curtail_level * obs.gen_p[self.gen_renewable] / self.gen_pmax
act = self.action_space(
{"curtail": [(el, ratio) for el, ratio in zip(np.arange(len(self.gen_renewable))[self.gen_renewable], curtail_target)]}
)
return act
class CurtailAgent(BaseAgent):
def __init__(self, action_space: ActionSpace, curtail_level=1.):
self.curtail_level = curtail_level
super().__init__(action_space)
def act(self, observation: BaseObservation, reward: float, done: bool = False) -> BaseAction:
next_gen_p = observation.simulate(self.action_space())[0].gen_p_before_curtail
curtail = self.curtail_level * next_gen_p / observation.gen_pmax
curtail[~observation.gen_renewable] = -1
act = self.action_space({"curtail": curtail})
return act
class TestScoreL2RPN2023(unittest.TestCase):
def setUp(self) -> None:
env_name = "l2rpn_case14_sandbox"
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make(env_name,
test=True,
data_feeding_kwargs={"gridvalueClass": FromHandlers,
"gen_p_handler": CSVHandler("prod_p"),
"load_p_handler": CSVHandler("load_p"),
"gen_v_handler": CSVHandler("prod_v"),
"load_q_handler": CSVHandler("load_q"),
"h_forecast": (5,),
"gen_p_for_handler": PerfectForecastHandler("prod_p_forecasted", quiet_warnings=True),
"gen_v_for_handler": PerfectForecastHandler("prod_v_forecasted", quiet_warnings=True),
"load_p_for_handler": PerfectForecastHandler("load_p_forecasted", quiet_warnings=True),
"load_q_for_handler": PerfectForecastHandler("load_q_forecasted", quiet_warnings=True),
},)
self.env.set_max_iter(20)
params = self.env.parameters
params.NO_OVERFLOW_DISCONNECTION = True
params.LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION = True
self.seed = 0
self.scen_id = 0
self.nb_scenario = 2
self.max_iter = 10
def tearDown(self) -> None:
self.env.close()
return super().tearDown()
def test_score_helper(self):
"""basic tests for ScoreL2RPN2023 class"""
self.env.reset()
try:
my_score = ScoreL2RPN2023(
self.env,
nb_scenario=self.nb_scenario,
env_seeds=[0 for _ in range(self.nb_scenario)],
agent_seeds=[0 for _ in range(self.nb_scenario)],
max_step=self.max_iter,
weight_op_score=0.8,
weight_assistant_score=0,
weight_nres_score=0.2,
scale_nres_score=100,
scale_assistant_score=100,
min_nres_score=-300.)
# test do nothing indeed gets 100.
res_dn = my_score.get(DoNothingAgent(self.env.action_space))
for scen_id, (ep_score, op_score, nres_score, assistant_confidence_score, assistant_cost_score) in enumerate(res_dn[0]):
assert nres_score == 100.
assert ep_score == 0.8 * op_score + 0.2 * nres_score
# now test that the score decrease fast "at beginning" and slower "at the end"
# ie from 1. to 0.95 bigger difference than from 0.8 to 0.7
res_agent0 = my_score.get(CurtailTrackerAgent(self.env.action_space,
gen_renewable = self.env.gen_renewable,
gen_pmax=self.env.gen_pmax,
curtail_level = 0.95))
# assert np.allclose(res_agent0[0][0][2], 81.83611011377577)
# assert np.allclose(res_agent0[0][1][2], 68.10026022372575)
assert np.allclose(res_agent0[0][0][0], 0.8 * res_agent0[0][0][1] + 0.2 * res_agent0[0][0][2])
assert np.allclose(res_agent0[0][0][2], 16.73128726588182)
assert np.allclose(res_agent0[0][1][2], -26.02070223995034)
res_agent1 = my_score.get(CurtailTrackerAgent(self.env.action_space,
gen_renewable = self.env.gen_renewable,
gen_pmax=self.env.gen_pmax,
curtail_level = 0.9))
# assert np.allclose(res_agent1[0][0][2], 56.256863965501466)
# assert np.allclose(res_agent1[0][1][2], 43.370607328810415)
assert np.allclose(res_agent1[0][0][2], -49.61104170080321)
assert np.allclose(res_agent1[0][1][2], -78.00216266500183)
# decrease
assert 100. - res_agent0[0][0][2] >= res_agent0[0][0][2] - res_agent1[0][0][2]
assert 100. - res_agent0[0][1][2] >= res_agent0[0][1][2] - res_agent1[0][1][2]
res_agent2 = my_score.get(CurtailTrackerAgent(self.env.action_space,
gen_renewable = self.env.gen_renewable,
gen_pmax=self.env.gen_pmax,
curtail_level = 0.8))
assert np.allclose(res_agent2[0][0][2], -127.62213025108333)
assert np.allclose(res_agent2[0][1][2], -143.83405253996978)
# decrease
assert 100. - res_agent1[0][0][2] >= res_agent1[0][0][2] - res_agent2[0][0][2]
assert 100. - res_agent1[0][1][2] >= res_agent1[0][1][2] - res_agent2[0][1][2]
res_agent3 = my_score.get(CurtailTrackerAgent(self.env.action_space,
gen_renewable = self.env.gen_renewable,
gen_pmax=self.env.gen_pmax,
curtail_level = 0.7))
assert np.allclose(res_agent3[0][0][2], -169.9519401162611)
assert np.allclose(res_agent3[0][1][2], -179.45065441917586)
assert res_agent1[0][0][2] - res_agent2[0][0][2] >= res_agent2[0][0][2] - res_agent2[0][0][2]
assert res_agent1[0][1][2] - res_agent2[0][1][2] >= res_agent2[0][1][2] - res_agent2[0][1][2]
finally:
my_score.clear_all()
def test_min_score(self):
"""test the score does not go bellow the minimum in input"""
try:
self.env.reset()
my_score = ScoreL2RPN2023(
self.env,
nb_scenario=self.nb_scenario,
env_seeds=[0 for _ in range(self.nb_scenario)],
agent_seeds=[0 for _ in range(self.nb_scenario)],
max_step=self.max_iter,
weight_op_score=0.8,
weight_assistant_score=0,
weight_nres_score=0.2,
scale_nres_score=100,
scale_assistant_score=100,
min_nres_score=-100.)
res_agent3 = my_score.get(CurtailTrackerAgent(self.env.action_space,
gen_renewable = self.env.gen_renewable,
gen_pmax=self.env.gen_pmax,
curtail_level = 0.7))
# assert np.allclose(res_agent3[0][0][2], -169.9519401162611)
# assert np.allclose(res_agent3[0][1][2], -179.45065441917586)
assert np.allclose(res_agent3[0][0][2], -100.)
assert np.allclose(res_agent3[0][1][2], -100.)
finally:
my_score.clear_all()
def test_spec(self):
""" spec are: 100pts for 0 curtailment, 0 pts for 80% renewable (20% curtailment) and -100 pts for 50% renewable"""
# test function without actual data
assert _NewRenewableSourcesUsageScore._surlinear_func_curtailment(100.) == 1.
assert _NewRenewableSourcesUsageScore._surlinear_func_curtailment(80.) == 0.
assert _NewRenewableSourcesUsageScore._surlinear_func_curtailment(50.) == -1.
assert _NewRenewableSourcesUsageScore._surlinear_func_curtailment(0.) < _NewRenewableSourcesUsageScore._surlinear_func_curtailment(50.)
try:
# now test with "real" data
my_score = ScoreL2RPN2023(
self.env,
nb_scenario=self.nb_scenario,
env_seeds=[0 for _ in range(self.nb_scenario)],
agent_seeds=[0 for _ in range(self.nb_scenario)],
max_step=self.max_iter,
weight_op_score=0.8,
weight_assistant_score=0,
weight_nres_score=0.2)
tol = 3e-5
# test do nothing indeed gets 100.
res_dn = my_score.get(DoNothingAgent(self.env.action_space))
for scen_id, (ep_score, op_score, nres_score, assistant_confidence_score, assistant_cost_score) in enumerate(res_dn[0]):
assert abs(nres_score - 100.) <= tol
# test 80% gets indeed close to 0
res_80 = my_score.get(CurtailAgent(self.env.action_space, 0.8))
for scen_id, (ep_score, op_score, nres_score, assistant_confidence_score, assistant_cost_score) in enumerate(res_80[0]):
assert abs(nres_score) <= tol
# test 50% gets indeed close to -100
res_50 = my_score.get(CurtailAgent(self.env.action_space, 0.5))
for scen_id, (ep_score, op_score, nres_score, assistant_confidence_score, assistant_cost_score) in enumerate(res_50[0]):
assert abs(nres_score + 100.) <= tol
# test bellow 50% still gets close to -100
res_30 = my_score.get(CurtailAgent(self.env.action_space, 0.3))
for scen_id, (ep_score, op_score, nres_score, assistant_confidence_score, assistant_cost_score) in enumerate(res_30[0]):
assert abs(nres_score + 100.) <= tol
finally:
my_score.clear_all()
if __name__ == "__main__":
unittest.main() | 12,114 | 53.327354 | 145 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_score_wcci_2022.py | # Copyright (c) 2019-2022, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import unittest
import warnings
import numpy as np
import grid2op
from grid2op.Agent.baseAgent import BaseAgent
from grid2op.Agent.doNothing import DoNothingAgent
from grid2op.Reward import L2RPNWCCI2022ScoreFun
from grid2op.utils import ScoreL2RPN2022
class AgentTester(BaseAgent):
def act(self, observation, reward, done):
if observation.current_step == 0:
return self.action_space()
if observation.current_step >= 13:
return self.action_space()
return self.action_space({"set_storage": [(0, 1.), (1, -1.)]})
class WCCI2022Tester(unittest.TestCase):
"""tests are focused on the storage units for this class"""
def setUp(self) -> None:
self.seed = 0
self.scen_id = 0
self.nb_scenario = 2
self.max_iter = 13
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make("educ_case14_storage", test=True, reward_class=L2RPNWCCI2022ScoreFun)
def tearDown(self) -> None:
self.env.close()
return super().tearDown()
def _aux_reset_env(self):
self.env.seed(self.seed)
self.env.set_id(self.scen_id)
obs = self.env.reset()
return obs
def test_storage_cost(self):
"""basic tests for L2RPNWCCI2022ScoreFun"""
score_fun = L2RPNWCCI2022ScoreFun()
score_fun.initialize(self.env)
th_val = 10. * 10. / 12.
obs = self._aux_reset_env()
act = self.env.action_space({"set_storage": [(0, -5.), (1, 5.)]})
obs, reward, done, info = self.env.step(act)
rew = score_fun(act, self.env, False, False, False, False)
margin_cost = score_fun._get_marginal_cost(self.env)
assert margin_cost == 70.
storage_cost = score_fun._get_storage_cost(self.env, margin_cost)
assert abs(storage_cost - th_val) <= 1e-5 # (10 MWh )* (10 € / MW )* (1/12. step / h)
gen_p = 1.0 * obs.gen_p
_ = self._aux_reset_env()
obs, reward_dn, done, info = self.env.step(self.env.action_space())
gen_p_dn = 1.0 * obs.gen_p
assert reward >= reward_dn
assert abs(reward - (reward_dn + storage_cost + (gen_p.sum() - gen_p_dn.sum()) * margin_cost / 12. )) <= 1e-6
def test_storage_cost_2(self):
"""basic tests for L2RPNWCCI2022ScoreFun, when changin storage cost"""
storage_cost = 100.
self.env.close()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make("educ_case14_storage", test=True,
reward_class=L2RPNWCCI2022ScoreFun(storage_cost=storage_cost))
score_fun = L2RPNWCCI2022ScoreFun(storage_cost=storage_cost)
score_fun.initialize(self.env)
th_val = storage_cost * 10. / 12.
obs = self._aux_reset_env()
act = self.env.action_space({"set_storage": [(0, -5.), (1, 5.)]})
obs, reward, done, info = self.env.step(act)
rew = score_fun(act, self.env, False, False, False, False)
margin_cost = score_fun._get_marginal_cost(self.env)
assert margin_cost == 70.
storage_cost = score_fun._get_storage_cost(self.env, margin_cost)
assert abs(storage_cost - th_val) <= 1e-5 # (10 MWh )* (storage_cost € / MW )* (1/12. step / h)
gen_p = 1.0 * obs.gen_p
_ = self._aux_reset_env()
obs, reward_dn, done, info = self.env.step(self.env.action_space())
gen_p_dn = 1.0 * obs.gen_p
assert reward >= reward_dn
assert abs(reward - (reward_dn + storage_cost + (gen_p.sum() - gen_p_dn.sum()) * margin_cost / 12. )) <= 1e-6
def test_score_helper(self):
"""basic tests for ScoreL2RPN2022 class"""
my_score = ScoreL2RPN2022(self.env,
nb_scenario=self.nb_scenario,
env_seeds=[0 for _ in range(self.nb_scenario)],
agent_seeds=[0 for _ in range(self.nb_scenario)],
max_step=self.max_iter,
)
try:
res_dn = my_score.get(DoNothingAgent(self.env.action_space))
res_agent = my_score.get(AgentTester(self.env.action_space))
for scen_id, (score_dn, score_agent) in enumerate(zip(res_dn[0], res_agent[0])):
assert score_agent < score_dn, f"error for scenario id {scen_id}"
assert np.all(np.abs(np.array(res_agent[0]) - np.array([-0.007520790059641119, -0.00823946207038134])) <= 1e-6)
finally:
my_score.clear_all()
def test_score_helper_2(self):
"""basic tests for ScoreL2RPN2022 class when changing storage cost"""
storage_cost = 100.
my_score = ScoreL2RPN2022(self.env,
nb_scenario=self.nb_scenario,
env_seeds=[0 for _ in range(self.nb_scenario)],
agent_seeds=[0 for _ in range(self.nb_scenario)],
max_step=self.max_iter,
scores_func=L2RPNWCCI2022ScoreFun(storage_cost=storage_cost)
)
try:
res_dn = my_score.get(DoNothingAgent(self.env.action_space))
res_agent = my_score.get(AgentTester(self.env.action_space))
for scen_id, (score_dn, score_agent) in enumerate(zip(res_dn[0], res_agent[0])):
assert score_agent < score_dn, f"error for scenario id {scen_id}"
assert np.all(np.abs(np.array(res_agent[0]) - np.array([-0.07931602, -0.08532347])) <= 1e-6)
finally:
my_score.clear_all()
class TestL2RPNWCCI2022ScoreFun(unittest.TestCase):
"""test curtailment and redispatching scores to make sure they match the description in the html of the competition.
(storage are tested in the class just above, so i don't retest them here)
"""
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make("l2rpn_wcci_2022", test=True)
self.env.seed(0)
self.env.reset()
self.score_fun = L2RPNWCCI2022ScoreFun()
self.score_fun.initialize(self.env)
# run the env without actions
self.env.set_id(0)
obs = self.env.reset()
dn_ = self.env.action_space()
self.obs_ref, reward, done, info = self.env.step(dn_)
obs = self.obs_ref
self.score_ref = self.score_fun(env=self.env, action=dn_, is_done=False, has_error=False, is_illegal=False, is_ambiguous=False)
self.losses_ref = np.sum(obs.gen_p) - np.sum(obs.load_p)
self.pt_ref = obs.gen_cost_per_MW[obs.gen_p > 0].max()
# test that the score, in this case, is the losses
assert np.abs(self.score_ref - self.losses_ref * self.pt_ref / 12.) <= 1e-4
return super().setUp()
def tearDown(self) -> None:
self.env.close()
return super().tearDown()
def test_unary_curtailment(self):
gen_id = 0
# now apply a curtailment action and compute the score
for E_curt in [0.5, 1., 2., 3.]:
self.env.set_id(0)
obs = self.env.reset()
# curtail a certain amount of MWh and check the formula is correct
act = self.env.action_space({"curtail": [(gen_id,(self.obs_ref.gen_p[gen_id] - 12. * E_curt) / obs.gen_pmax[gen_id])]})
obs, reward, done, info = self.env.step(act)
assert not info['exception']
score = self.score_fun(env=self.env, action=act, is_done=False, has_error=False, is_illegal=False, is_ambiguous=False)
losses = np.sum(obs.gen_p) - np.sum(obs.load_p)
pt = obs.gen_cost_per_MW[obs.gen_p > 0].max()
assert np.abs(pt - self.pt_ref) <= 1e-5, f"wrong marginal price for {E_curt:.2f}"
assert np.abs(score - (losses * pt / 12. + 2 * E_curt * pt)) <= 1e-4, f"error for {E_curt:.2f}"
def test_unary_redisp(self):
gen_id = 8 # ramps is 11.2
# now apply a redispatching action and compute the score
for E_redisp in [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9]:
self.env.set_id(0)
obs = self.env.reset()
# curtail 12MW for 5 mins => 1 MWh (next step gen 0 should produce 41.6 MW)
act = self.env.action_space({"redispatch": [(gen_id, 12. * E_redisp)]})
obs, reward, done, info = self.env.step(act)
assert not info['exception'], f'one error occured for {E_redisp:.2f}'
score = self.score_fun(env=self.env, action=act, is_done=False, has_error=False, is_illegal=False, is_ambiguous=False)
losses = np.sum(obs.gen_p) - np.sum(obs.load_p)
pt = obs.gen_cost_per_MW[obs.gen_p > 0].max()
assert np.abs(pt - self.pt_ref) <= 1e-5, f"wrong marginal price for {E_redisp:.2f}"
assert np.abs(score - (losses * pt / 12. + 2 * np.abs(E_redisp) * pt)) <= 2e-4, f"error for {E_redisp:.2f}"
if __name__ == "__main__":
unittest.main()
| 9,831 | 46.269231 | 135 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_simulator.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import unittest
import warnings
import grid2op
import numpy as np
from grid2op.Action import PlayableAction
from grid2op.simulator import Simulator
from grid2op.Exceptions import SimulatorError, BaseObservationError
import pdb
class TestSimulator(unittest.TestCase):
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make("l2rpn_case14_sandbox", test=True)
self.env.seed(0)
self.obs = self.env.reset()
def tearDown(self) -> None:
self.env.close()
def test_create(self):
"""test i can create them"""
simulator = Simulator(backend=self.env.backend)
assert simulator.backend is not self.env.backend
simulator = Simulator(backend=None, env=self.env)
assert simulator.backend is not self.env.backend
with self.assertRaises(SimulatorError):
# backend should be a backend
simulator = Simulator(backend=self.env)
with self.assertRaises(SimulatorError):
# backend is not None
simulator = Simulator(backend=self.env.backend, env=self.env)
with self.assertRaises(SimulatorError):
# env is not a BaseEnv
simulator = Simulator(backend=self.env.backend, env=self.env.backend)
def test_change_backend(self):
simulator = Simulator(backend=self.env.backend)
with self.assertRaises(SimulatorError):
# not initialized
simulator.change_backend(self.env.backend.copy())
simulator.set_state(self.obs)
simulator.change_backend(self.env.backend.copy())
with self.assertRaises(SimulatorError):
# env is not a BaseEnv
simulator.change_backend(self.env)
def test_change_backend_type(self):
simulator = Simulator(backend=self.env.backend)
with self.assertRaises(SimulatorError):
# not initialized
simulator.change_backend_type(
self.env.backend.copy(), grid_path=self.env._init_grid_path
)
simulator.set_state(self.obs)
simulator.change_backend_type(
self.env._raw_backend_class, grid_path=self.env._init_grid_path
)
with self.assertRaises(SimulatorError):
# self.env.backend is not a type
simulator.change_backend_type(
self.env.backend, grid_path=self.env._init_grid_path
)
with self.assertRaises(SimulatorError):
# wrong type
simulator.change_backend_type(
type(self.env), grid_path=self.env._init_grid_path
)
def test_predict(self):
env = self.env
simulator = Simulator(backend=self.env.backend)
act1 = env.action_space({"set_line_status": [(1, -1)]})
act2 = env.action_space(
{"set_bus": {"substations_id": [(5, (2, 1, 2, 1, 2, 1, 2))]}}
)
with self.assertRaises(SimulatorError):
# not initialized
sim1 = simulator.predict(act1)
simulator.set_state(self.obs)
sim1 = simulator.predict(act1)
assert sim1 is not simulator
assert sim1.current_obs.rho[1] == 0.0
sim2 = simulator.predict(act2)
assert sim2 is not simulator
assert abs(sim2.current_obs.rho[1] - 0.35845447) <= 1e-6
sim3 = simulator.predict(act1).predict(act2, do_copy=False)
assert abs(sim3.current_obs.rho[1]) <= 1e-6
assert np.any(sim3.current_obs.rho != sim1.current_obs.rho)
assert np.any(sim3.current_obs.rho != sim2.current_obs.rho)
assert np.any(sim3.current_obs.rho != simulator.current_obs.rho)
sim4 = simulator.predict(
act1,
new_gen_p=env.chronics_handler.real_data.data.prod_p[1],
new_gen_v=env.chronics_handler.real_data.data.prod_v[1],
new_load_p=env.chronics_handler.real_data.data.load_p[1],
new_load_q=env.chronics_handler.real_data.data.load_q[1],
)
assert sim4 is not simulator
assert sim4.current_obs.rho[1] == 0.0
assert np.any(sim4.current_obs.rho != sim1.current_obs.rho)
sim5 = sim1.predict(act2)
assert abs(sim5.current_obs.rho[1]) <= 1e-6
assert np.max(np.abs(sim5.current_obs.rho - sim3.current_obs.rho)) <= 1e-6
sim6 = simulator.predict(act1, do_copy=False)
assert sim6 is simulator
assert abs(sim6.current_obs.rho[1]) <= 1e-6
assert np.max(np.abs(sim6.current_obs.rho - sim1.current_obs.rho)) <= 1e-6
def test_copy(self):
simulator = Simulator(backend=self.env.backend)
with self.assertRaises(SimulatorError):
# not initialized
sim1 = simulator.copy()
simulator.set_state(self.obs)
sim1 = simulator.copy()
assert sim1 is not simulator
assert np.max(np.abs(sim1.current_obs.rho - simulator.current_obs.rho)) <= 1e-6
def test_obs(self):
simulator = self.obs.get_simulator()
assert np.max(np.abs(simulator.current_obs.rho - self.obs.rho)) <= 1e-6
with self.assertRaises(BaseObservationError):
sim2 = simulator.current_obs.get_simulator()
class TestComplexActions(unittest.TestCase):
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make(
"educ_case14_storage", test=True, action_class=PlayableAction
)
self.env.seed(0)
self.obs = self.env.reset()
self.simulator = Simulator(backend=self.env.backend)
self.simulator.set_state(self.obs)
def tearDown(self) -> None:
self.env.close()
self.simulator.close()
def test_redisp_action(self):
act = self.env.action_space({"redispatch": [(0, 5.0)]})
obs, *_ = self.env.step(act)
res = self.simulator.predict(
act,
new_gen_p=obs.gen_p - obs.actual_dispatch,
new_load_p=obs.load_p,
new_load_q=obs.load_q,
)
assert (
np.max(np.abs(res.current_obs.target_dispatch - obs.target_dispatch))
<= 1e-5
)
assert (
np.max(np.abs(res.current_obs.actual_dispatch - obs.actual_dispatch))
<= 1e-2
)
assert np.max(np.abs(res.current_obs.gen_p - obs.gen_p)) <= 1e-2
act2 = self.env.action_space({"redispatch": [(0, 5.0), (1, 4.0)]})
obs2, *_ = self.env.step(act2)
res2 = res.predict(
act2,
new_gen_p=obs2.gen_p - obs.actual_dispatch,
new_load_p=obs2.load_p,
new_load_q=obs2.load_q,
)
assert (
np.max(np.abs(res2.current_obs.target_dispatch - obs2.target_dispatch))
<= 1e-2
)
# ultimately the redispatch should match (but not necessarily at this step)
for _ in range(2):
obsn, *_ = self.env.step(self.env.action_space())
assert (
np.max(np.abs(res2.current_obs.actual_dispatch - obsn.actual_dispatch))
<= 2e-1
)
act3 = self.env.action_space({"redispatch": [(5, 3.0)]})
obs3, *_ = self.env.step(act3)
res3 = res2.predict(
act3,
new_gen_p=obs3.gen_p - obs3.actual_dispatch,
new_load_p=obs3.load_p,
new_load_q=obs3.load_q,
)
assert (
np.max(np.abs(res3.current_obs.target_dispatch - obs3.target_dispatch))
<= 2e-1
)
assert (
np.max(np.abs(res3.current_obs.actual_dispatch - obs3.actual_dispatch))
<= 4e-1
)
assert np.max(np.abs(res3.current_obs.gen_p - obs3.gen_p)) <= 4e-1
def test_storage(self):
act = self.env.action_space({"set_storage": [(0, -5.0)]})
obs, *_ = self.env.step(act)
res = self.simulator.predict(
act,
new_gen_p=obs.gen_p - obs.actual_dispatch,
new_load_p=obs.load_p,
new_load_q=obs.load_q,
)
assert (
np.max(np.abs(res.current_obs.actual_dispatch - obs.actual_dispatch)) <= 0.1
)
assert np.max(np.abs(res.current_obs.gen_p - obs.gen_p)) <= 0.1
assert np.max(np.abs(res.current_obs.storage_power - obs.storage_power)) <= 0.1
assert (
np.max(np.abs(res.current_obs.storage_charge - obs.storage_charge)) <= 0.1
)
# check Emin / Emax are met
for it_num in range(16):
res.predict(act, do_copy=False)
assert res.converged, f"error at iteration {it_num}"
assert np.all(res.current_obs.storage_power == [-5.0, 0.0])
res.predict(act, do_copy=False)
assert res.converged
assert np.all(res.current_obs.storage_charge == [0.0, 3.5])
assert np.all(np.abs(res.current_obs.storage_power - [-0.499, 0.0]) <= 0.01)
res.predict(act, do_copy=False)
assert res.converged
assert np.all(res.current_obs.storage_charge == [0.0, 3.5])
assert np.all(np.abs(res.current_obs.storage_power) <= 0.01)
act2 = self.env.action_space({"set_storage": [(0, 5.0), (1, -10.0)]})
res.predict(act2, do_copy=False)
assert res.converged
assert np.all(np.abs(res.current_obs.storage_charge - [0.417, 2.667]) <= 0.01)
assert np.all(np.abs(res.current_obs.storage_power - [5.0, -10.0]) <= 0.01)
def test_curtailment(self):
gen_id = 2
# should curtail 3.4 MW
act = self.env.action_space()
act.curtail_mw = [(gen_id, 5.0)]
obs, *_ = self.env.step(act)
new_gen_p = obs.gen_p - obs.actual_dispatch
new_gen_p[gen_id] = obs.gen_p_before_curtail[gen_id]
res = self.simulator.predict(
act, new_gen_p=new_gen_p, new_load_p=obs.load_p, new_load_q=obs.load_q
)
assert (
np.max(np.abs(res.current_obs.target_dispatch - obs.target_dispatch))
<= 1e-5
)
assert (
np.max(np.abs(res.current_obs.actual_dispatch - obs.actual_dispatch)) <= 0.1
)
assert np.max(np.abs(res.current_obs.gen_p - obs.gen_p)) <= 0.1
# should curtail another 3 MW
act2 = self.env.action_space()
act2.curtail_mw = [(gen_id, 2.0)]
obs1, *_ = self.env.step(act2)
new_gen_p2 = obs1.gen_p - obs1.actual_dispatch
new_gen_p2[gen_id] = obs1.gen_p_before_curtail[gen_id]
res2 = self.simulator.predict(
act2, new_gen_p=new_gen_p2, new_load_p=obs1.load_p, new_load_q=obs1.load_q
)
assert (
np.max(np.abs(res2.current_obs.target_dispatch - obs1.target_dispatch))
<= 1e-5
)
assert (
np.max(np.abs(res2.current_obs.actual_dispatch - obs1.actual_dispatch))
<= 0.01
)
assert np.max(np.abs(res2.current_obs.gen_p - obs1.gen_p)) <= 0.01
# should curtail less (-4 MW)
act3 = self.env.action_space()
act3.curtail_mw = [(gen_id, 6.0)]
obs2, *_ = self.env.step(act3)
new_gen_p3 = obs2.gen_p - obs2.actual_dispatch
new_gen_p3[gen_id] = obs2.gen_p_before_curtail[gen_id]
res3 = self.simulator.predict(
act3, new_gen_p=new_gen_p3, new_load_p=obs2.load_p, new_load_q=obs2.load_q
)
assert (
np.max(np.abs(res3.current_obs.target_dispatch - obs2.target_dispatch))
<= 1e-5
)
assert (
np.max(np.abs(res3.current_obs.actual_dispatch - obs2.actual_dispatch))
<= 0.2
)
assert np.max(np.abs(res3.current_obs.gen_p - obs2.gen_p)) <= 0.2
# remove all curtailment
act4 = self.env.action_space()
act4.curtail_mw = [(gen_id, 9.0)]
obs3, *_ = self.env.step(act4)
new_gen_p4 = obs3.gen_p - obs3.actual_dispatch
new_gen_p4[gen_id] = obs3.gen_p_before_curtail[gen_id]
res4 = self.simulator.predict(
act4, new_gen_p=new_gen_p4, new_load_p=obs3.load_p, new_load_q=obs3.load_q
)
assert np.max(np.abs(res4.current_obs.actual_dispatch)) <= 1e-5
assert (
np.max(np.abs(res4.current_obs.target_dispatch - obs3.target_dispatch))
<= 1e-5
)
assert (
np.max(np.abs(res4.current_obs.actual_dispatch - obs3.actual_dispatch))
<= 0.2
)
assert np.max(np.abs(res4.current_obs.gen_p - obs3.gen_p)) <= 0.2
# now test when I start from a previous step with curtailment already
res5 = res3.predict(
act4, new_gen_p=new_gen_p4, new_load_p=obs3.load_p, new_load_q=obs3.load_q
)
assert np.max(np.abs(res5.current_obs.actual_dispatch)) <= 1e-5
assert (
np.max(
np.abs(
res5.current_obs.target_dispatch - res4.current_obs.target_dispatch
)
)
<= 0.01
)
assert (
np.max(
np.abs(
res5.current_obs.actual_dispatch - res4.current_obs.actual_dispatch
)
)
<= 0.01
)
assert np.max(np.abs(res5.current_obs.gen_p - res4.current_obs.gen_p)) <= 0.01
# now another test where i still apply some curtailment
res6 = res2.predict(
act3, new_gen_p=new_gen_p3, new_load_p=obs2.load_p, new_load_q=obs2.load_q
)
assert (
np.max(
np.abs(
res6.current_obs.target_dispatch - res3.current_obs.target_dispatch
)
)
<= 0.01
)
assert (
np.max(
np.abs(
res6.current_obs.actual_dispatch - res3.current_obs.actual_dispatch
)
)
<= 0.01
)
assert np.max(np.abs(res6.current_obs.gen_p - res3.current_obs.gen_p)) <= 0.01
# TODO test observation attributes:
# res.current_obs.curtailment[:] = (new_gen_p - new_gen_p_modif) / act.gen_pmax
# res.current_obs.curtailment_limit[:] = act.curtail
# res.current_obs.curtailment_limit_effective[:] = act.curtail
# res.current_obs.gen_p_before_curtail[:] = new_gen_p
if __name__ == "__main__":
unittest.main()
| 14,952 | 36.3825 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_timeOutEnvironment.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import time
import warnings
import unittest
import grid2op
from grid2op.Environment import TimedOutEnvironment
from grid2op.Agent import BaseAgent
from grid2op.Runner import Runner
from grid2op.gym_compat import (GymEnv,
BoxGymActSpace,
BoxGymObsSpace,
DiscreteActSpace,
MultiDiscreteActSpace)
class WaitAgent(BaseAgent):
def __init__(self, action_space):
super().__init__(action_space)
def get_timeout(self, env):
return env.time_out_ms if isinstance(env, TimedOutEnvironment) else env.init_env.time_out_ms
def act(self, obs, reward, done):
time.sleep(1e-3 * self.time_out_ms)
return self.action_space()
def act_gym(self, obs, reward, done):
time.sleep(1e-3 * (self.time_out_ms - 220.))
return self.action_space.sample()
class AgentOK(WaitAgent):
def __init__(self, env):
super().__init__(env.action_space)
self.time_out_ms = 0.9 * self.get_timeout(env)
class AgentKO(WaitAgent):
def __init__(self, env):
super().__init__(env.action_space)
self.time_out_ms = 1.1 * self.get_timeout(env)
class AgentKO1(WaitAgent):
def __init__(self, env):
super().__init__(env.action_space)
self.time_out_ms = 1.9 * self.get_timeout(env)
class AgentKO2(WaitAgent):
def __init__(self, env):
super().__init__(env.action_space)
self.time_out_ms = 2.1 * self.get_timeout(env)
class TestTimedOutEnvironment100(unittest.TestCase):
def get_timeout_ms(self):
return 250
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env1 = TimedOutEnvironment(grid2op.make("l2rpn_case14_sandbox", test=True),
time_out_ms=self.get_timeout_ms())
params = self.env1.parameters
params.NO_OVERFLOW_DISCONNECTION = True
self.env1.change_parameters(params)
def tearDown(self) -> None:
self.env1.close()
return super().tearDown()
def test_no_dn(self):
agentok = AgentOK(self.env1)
obs = self.env1.reset()
assert self.env1._nb_dn_last == 0
for i in range(10):
act = agentok.act(None, None, None)
obs, reward, done, info = self.env1.step(act)
assert info["nb_do_nothing"] == 0
assert info["nb_do_nothing_made"] == 0
assert self.env1._nb_dn_last == 0
def test_one_dn(self):
agentko = AgentKO(self.env1)
obs = self.env1.reset()
assert self.env1._nb_dn_last == 0
for i in range(10):
act = agentko.act(None, None, None)
obs, reward, done, info = self.env1.step(act)
assert info["nb_do_nothing"] == 1
assert info["nb_do_nothing_made"] == 1
assert self.env1._nb_dn_last == 1
def test_one_dn2(self):
agentko = AgentKO1(self.env1)
obs = self.env1.reset()
assert self.env1._nb_dn_last == 0
for i in range(10):
act = agentko.act(None, None, None)
obs, reward, done, info = self.env1.step(act)
assert info["nb_do_nothing"] == 1
assert info["nb_do_nothing_made"] == 1
assert self.env1._nb_dn_last == 1
def test_two_dn(self):
agentko2 = AgentKO2(self.env1)
obs = self.env1.reset()
assert self.env1._nb_dn_last == 0
for i in range(10):
act = agentko2.act(None, None, None)
obs, reward, done, info = self.env1.step(act)
assert info["nb_do_nothing"] == 2
assert info["nb_do_nothing_made"] == 2
assert self.env1._nb_dn_last == 2
def test_diff_dn(self):
agentok = AgentOK(self.env1)
agentko = AgentKO(self.env1)
agentko2 = AgentKO2(self.env1)
obs = self.env1.reset()
assert self.env1._nb_dn_last == 0
for i, agent in enumerate([agentok, agentko, agentko2] * 2):
act = agent.act(None, None, None)
obs, reward, done, info = self.env1.step(act)
assert info["nb_do_nothing"] == i % 3
assert info["nb_do_nothing_made"] == i % 3
assert self.env1._nb_dn_last == i % 3
class TestTimedOutEnvironment50(TestTimedOutEnvironment100):
def get_timeout_ms(self):
return 300
class TestTimedOutEnvironmentCpy(TestTimedOutEnvironment100):
def setUp(self) -> None:
super().setUp()
self.env0 = self.env1
self.env1 = self.env0.copy()
def tearDown(self) -> None:
self.env1.close()
self.env0.close()
class TestTOEnvRunner(unittest.TestCase):
def get_timeout_ms(self):
return 200
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env1 = TimedOutEnvironment(grid2op.make("l2rpn_case14_sandbox", test=True),
time_out_ms=self.get_timeout_ms())
params = self.env1.parameters
params.NO_OVERFLOW_DISCONNECTION = True
self.env1.change_parameters(params)
self.cum_reward = 645.70208
self.max_iter = 10
def tearDown(self) -> None:
self.env1.close()
return super().tearDown()
def test_runner_can_make(self):
runner = Runner(**self.env1.get_params_for_runner())
env2 = runner.init_env()
assert isinstance(env2, TimedOutEnvironment)
assert env2.time_out_ms == self.get_timeout_ms()
def test_runner_noskip(self):
agent = AgentOK(self.env1)
runner = Runner(**self.env1.get_params_for_runner(),
agentClass=None,
agentInstance=agent)
res = runner.run(nb_episode=1,
max_iter=self.max_iter)
_, _, cum_reward, timestep, max_ts = res[0]
assert abs(cum_reward - self.cum_reward) <= 1e-5
def test_runner_skip1(self):
agent = AgentKO(self.env1)
runner = Runner(**self.env1.get_params_for_runner(),
agentClass=None,
agentInstance=agent)
res = runner.run(nb_episode=1,
max_iter=self.max_iter)
_, _, cum_reward, timestep, max_ts = res[0]
assert abs(cum_reward - self.cum_reward) <= 1e-5
def test_runner_skip2(self):
agent = AgentKO2(self.env1)
runner = Runner(**self.env1.get_params_for_runner(),
agentClass=None,
agentInstance=agent)
res = runner.run(nb_episode=1,
max_iter=self.max_iter)
_, _, cum_reward, timestep, max_ts = res[0]
assert abs(cum_reward - self.cum_reward) <= 1e-5
def test_runner_skip2_2ep(self):
agent = AgentKO2(self.env1)
runner = Runner(**self.env1.get_params_for_runner(),
agentClass=None,
agentInstance=agent)
res = runner.run(nb_episode=2,
max_iter=self.max_iter)
_, _, cum_reward, timestep, max_ts = res[0]
assert abs(cum_reward - self.cum_reward) <= 1e-5
_, _, cum_reward, timestep, max_ts = res[1]
assert abs(cum_reward - 648.90795) <= 1e-5
class TestTOEnvGym(unittest.TestCase):
def get_timeout_ms(self):
return 400.
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env1 = TimedOutEnvironment(grid2op.make("l2rpn_case14_sandbox", test=True),
time_out_ms=self.get_timeout_ms())
def tearDown(self) -> None:
self.env1.close()
return super().tearDown()
def test_gym_with_step(self):
"""test the step function also makes the 'do nothing'"""
self.skipTest("On docker execution time is too unstable")
env_gym = GymEnv(self.env1)
env_gym.reset()
agentok = AgentOK(env_gym)
for i in range(10):
act = agentok.act_gym(None, None, None)
for k in act:
act[k][:] = 0
*_, info = env_gym.step(act)
assert info["nb_do_nothing"] == 0
assert info["nb_do_nothing_made"] == 0
assert env_gym.init_env._nb_dn_last == 0
env_gym.reset()
agentko = AgentKO1(env_gym)
for i in range(10):
act = agentko.act_gym(None, None, None)
for k in act:
act[k][:] = 0
*_, info = env_gym.step(act)
assert info["nb_do_nothing"] == 1
assert info["nb_do_nothing_made"] == 1
assert env_gym.init_env._nb_dn_last == 1
def test_gym_normal(self):
"""test I can create the gym env"""
env_gym = GymEnv(self.env1)
env_gym.reset()
def test_gym_box(self):
"""test I can create the gym env with box ob space and act space"""
env_gym = GymEnv(self.env1)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env_gym.action_space = BoxGymActSpace(self.env1.action_space)
env_gym.observation_space = BoxGymObsSpace(self.env1.observation_space)
env_gym.reset()
def test_gym_discrete(self):
"""test I can create the gym env with discrete act space"""
env_gym = GymEnv(self.env1)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env_gym.action_space = DiscreteActSpace(self.env1.action_space)
env_gym.reset()
def test_gym_multidiscrete(self):
"""test I can create the gym env with multi discrete act space"""
env_gym = GymEnv(self.env1)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env_gym.action_space = MultiDiscreteActSpace(self.env1.action_space)
env_gym.reset()
if __name__ == "__main__":
unittest.main()
| 10,818 | 35.550676 | 112 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_ts_handlers.py | # Copyright (c) 2019-2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
import pdb
import time
import warnings
from grid2op.tests.helper_path_test import *
import grid2op
from grid2op.Exceptions import NoForecastAvailable
from grid2op.Chronics import GridStateFromFileWithForecasts, GridStateFromFile, GridStateFromFileWithForecastsWithoutMaintenance
from grid2op.Chronics.time_series_from_handlers import FromHandlers
from grid2op.Chronics.handlers import (CSVHandler,
DoNothingHandler,
CSVForecastHandler,
CSVMaintenanceHandler,
JSONMaintenanceHandler,
PersistenceForecastHandler,
PerfectForecastHandler,
NoisyForecastHandler,
LoadQFromPHandler
)
from grid2op.Runner import Runner
from grid2op.Exceptions import HandlerError
from grid2op.Parameters import Parameters
import warnings
# TODO check when there is also redispatching
def _load_next_chunk_in_memory_hack(self):
self._nb_call += 1
# i load the next chunk as dataframes
array = self._get_next_chunk() # array: load_p
# i put these dataframes in the right order (columns)
self._init_attrs(array) # TODO
# i don't forget to reset the reading index to 0
self.current_index = 0
class TestCSVHandlerEnv(HelperTests):
"""test the env part of the storage functionality"""
def _aux_assert_right_type_chronics(self):
assert isinstance(self.env1.chronics_handler.real_data.data, GridStateFromFile)
assert isinstance(self.env2.chronics_handler.real_data.data, FromHandlers)
def _aux_reproducibility(self):
for env in [self.env1, self.env2]:
env.set_id(0)
env.seed(0)
env.reset()
self._aux_assert_right_type_chronics()
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env1 = grid2op.make("l2rpn_case14_sandbox", test=True) # regular env
self.env2 = grid2op.make("l2rpn_case14_sandbox",
data_feeding_kwargs={"gridvalueClass": FromHandlers,
"gen_p_handler": CSVHandler("prod_p"),
"load_p_handler": CSVHandler("load_p"),
"gen_v_handler": CSVHandler("prod_v"),
"load_q_handler": CSVHandler("load_q"),
},
_add_to_name="_TestCSVHandlerEnv",
test=True) # regular env
self._aux_reproducibility()
return super().setUp()
def tearDown(self) -> None:
self.env1.close()
self.env2.close()
return super().tearDown()
def _aux_compare_one(self, it_nm, obs1, obs2, descr=""):
for attr_nm in ["load_p", "load_q", "gen_v", "rho", "gen_p", "line_status", "time_next_maintenance"]:
# assert np.all(getattr(obs1, attr_nm) == getattr(obs2, attr_nm)), f"error for {attr_nm}{descr} at iteration {it_nm}: {getattr(obs1, attr_nm)} vs {getattr(obs2, attr_nm)}"
assert np.allclose(getattr(obs1, attr_nm), getattr(obs2, attr_nm)), f"error for {attr_nm}{descr} at iteration {it_nm}: {getattr(obs1, attr_nm)} vs {getattr(obs2, attr_nm)}"
def _run_then_compare(self, nb_iter=10, env1=None, env2=None):
if env1 is None:
env1 = self.env1
if env2 is None:
env2 = self.env2
for k in range(nb_iter):
obs1, reward1, done1, info1 = env1.step(env1.action_space())
obs2, reward2, done2, info2 = env2.step(env2.action_space())
assert done2 == done1, f"error at iteration {k} for done"
assert reward1 == reward2, f"error at iteration {k} for reward"
if done1:
break
self._aux_compare_one(k, obs1, obs2)
def test_step_equal(self):
self._run_then_compare()
self.env1.reset()
self.env2.reset()
self._run_then_compare()
self.env1.reset()
self.env2.reset()
self._run_then_compare()
def test_max_iter(self):
self.env1.set_max_iter(5)
self.env2.set_max_iter(5)
self.env1.reset()
self.env2.reset()
self._run_then_compare(nb_iter=4)
obs1, reward1, done1, info1 = self.env1.step(self.env1.action_space())
obs2, reward2, done2, info2 = self.env2.step(self.env2.action_space())
assert done1
assert done2
def test_chunk(self):
self.env_ref = self.env1.copy()
self.env1.set_chunk_size(1)
self.env2.set_chunk_size(1)
# hugly copy paste from above otherwise the hack does not work...
# because of the reset
self.env1.set_max_iter(5)
self.env2.set_max_iter(5)
self.env_ref.set_max_iter(5)
obs1 = self.env1.reset()
obs2 = self.env2.reset()
self.env_ref.reset()
self._aux_compare_one(0, obs1, obs2)
###### hack to count the number this is called
if hasattr(self.env2.chronics_handler.real_data, "data"):
self.env2.chronics_handler.data.gen_p_handler._nb_call = 0
self.env2.chronics_handler.data.gen_p_handler._load_next_chunk_in_memory = lambda : _load_next_chunk_in_memory_hack(self.env2.chronics_handler.data.gen_p_handler)
else:
self.env2.chronics_handler.gen_p_handler._nb_call = 0
self.env2.chronics_handler.gen_p_handler._load_next_chunk_in_memory = lambda : _load_next_chunk_in_memory_hack(self.env2.chronics_handler.gen_p_handler)
######
self._run_then_compare(nb_iter=4)
obs1, reward1, done1, info1 = self.env1.step(self.env1.action_space())
obs2, reward2, done2, info2 = self.env2.step(self.env2.action_space())
assert done1
assert done2
# now check the "load_next_chunk has been called the right number of time"
if hasattr(self.env2.chronics_handler.real_data, "data"):
assert self.env2.chronics_handler.data.gen_p_handler._nb_call == 5
else:
assert self.env2.chronics_handler.gen_p_handler._nb_call == 5
def test_copy(self):
env2 = self.env2.copy()
self._run_then_compare(env2=env2)
self.env1.reset()
env2.reset()
self._run_then_compare(env2=env2)
self.env1.reset()
env2.reset()
self._run_then_compare(env2=env2)
def test_runner(self):
runner1 = Runner(**self.env1.get_params_for_runner())
runner2 = Runner(**self.env2.get_params_for_runner())
res1 = runner1.run(nb_episode=2, max_iter=5, env_seeds=[0, 1], episode_id=[0, 1])
res2 = runner2.run(nb_episode=2, max_iter=5, env_seeds=[0, 1], episode_id=[0, 1])
assert res1 == res2
def test_if_file_absent(self):
# do it only once
if type(self) != TestCSVHandlerEnv:
self.skipTest("This test should be done only in the TestCSVHandlerEnv class (no need to do it 10x times)")
with self.assertRaises(HandlerError):
grid2op.make(os.path.join(PATH_DATA_TEST, "5bus_example_some_missing"),
data_feeding_kwargs={"gridvalueClass": FromHandlers,
"gen_p_handler": CSVHandler("prod_p"),
"load_p_handler": CSVHandler("load_p"),
"gen_v_handler": DoNothingHandler(),
"load_q_handler": CSVHandler("load_q"), # crash because this file does not exist
},
_add_to_name="_TestCSVHandlerEnv") # regular env
def test_max_episode_duration(self):
assert self.env2.max_episode_duration() == self.env1.max_episode_duration()
self.env1.reset()
self.env2.reset()
assert self.env2.max_episode_duration() == self.env1.max_episode_duration()
self.env1.set_max_iter(5)
self.env2.set_max_iter(5)
self.env1.reset()
self.env2.reset()
assert self.env2.max_episode_duration() == self.env1.max_episode_duration()
self.env1.set_chunk_size(1)
self.env2.set_chunk_size(1)
self.env1.reset()
self.env2.reset()
assert self.env2.max_episode_duration() == self.env1.max_episode_duration()
def test_fast_forward_chronics(self):
self.env1.fast_forward_chronics(5)
self.env2.fast_forward_chronics(5)
self._run_then_compare()
self.env1.fast_forward_chronics(7)
self.env2.fast_forward_chronics(7)
self._run_then_compare()
self.env1.reset()
self.env2.reset()
self._run_then_compare()
# TODO:
# test when "names_chronics_to_backend"
# test sample_next_chronics
# test I can "finish" an environment completely (without max_iter, when data are over)
# test multifolderwithCache
# test without "multi folder" X
# test runner X
# test env copy X
# test when max_iter `env.set_max_iter` X
# test when "set_chunk" X
# test with forecasts
# test next_chronics
# test tell_id
# test set_id
# test with maintenance
# test fast_forward_chronics
class TestSomeFileMissingEnv(TestCSVHandlerEnv):
"""test the env part of the storage functionality"""
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env1 = grid2op.make(os.path.join(PATH_DATA_TEST, "5bus_example_some_missing")) # regular env
self.env2 = grid2op.make(os.path.join(PATH_DATA_TEST, "5bus_example_some_missing"),
data_feeding_kwargs={"gridvalueClass": FromHandlers,
"gen_p_handler": CSVHandler("prod_p"),
"load_p_handler": CSVHandler("load_p"),
"gen_v_handler": DoNothingHandler(),
"load_q_handler": DoNothingHandler(),
},
_add_to_name="_TestDNHandlerEnv")
self._aux_reproducibility()
class TestWithoutMultiFolderEnv(TestCSVHandlerEnv):
def setUp(self) -> None:
chronics_path = os.path.join(PATH_DATA,
"l2rpn_case14_sandbox",
"chronics",
"0000")
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env1 = grid2op.make("l2rpn_case14_sandbox", test=True,
chronics_class=GridStateFromFileWithForecasts,
chronics_path=chronics_path) # regular env
self.env2 = grid2op.make("l2rpn_case14_sandbox",
chronics_class=FromHandlers,
data_feeding_kwargs={
"gen_p_handler": CSVHandler("prod_p"),
"load_p_handler": CSVHandler("load_p"),
"gen_v_handler": CSVHandler("prod_v"),
"load_q_handler": CSVHandler("load_q"),
},
chronics_path=chronics_path,
_add_to_name="TestWithoutMultiFolderEnv",
test=True)
self._aux_reproducibility()
def _aux_assert_right_type_chronics(self):
assert isinstance(self.env1.chronics_handler.real_data, GridStateFromFile)
assert isinstance(self.env2.chronics_handler.real_data, FromHandlers)
class TestForecastHandlerNoMultiFolder(TestWithoutMultiFolderEnv):
"""test the env part of the storage functionality"""
def setUp(self) -> None:
chronics_path = os.path.join(PATH_DATA,
"l2rpn_case14_sandbox",
"chronics",
"0000")
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env1 = grid2op.make("l2rpn_case14_sandbox", test=True,
chronics_class=GridStateFromFileWithForecasts,
chronics_path=chronics_path) # regular env
self.env2 = grid2op.make("l2rpn_case14_sandbox",
chronics_class=FromHandlers,
data_feeding_kwargs={"gen_p_handler": CSVHandler("prod_p"),
"load_p_handler": CSVHandler("load_p"),
"gen_v_handler": CSVHandler("prod_v"),
"load_q_handler": CSVHandler("load_q"),
"gen_p_for_handler": CSVForecastHandler("prod_p_forecasted"),
"load_p_for_handler": CSVForecastHandler("load_p_forecasted"),
"gen_v_for_handler": CSVForecastHandler("prod_v_forecasted"),
"load_q_for_handler": CSVForecastHandler("load_q_forecasted"),
},
chronics_path=chronics_path,
_add_to_name="TestForecastHandlerNoMulti14",
test=True)
self._aux_reproducibility()
assert np.all(self.env1.chronics_handler.real_data.load_p_forecast ==
self.env2.chronics_handler.real_data.load_p_for_handler.array)
def _aux_compare_one(self, it_nm, obs1, obs2):
super()._aux_compare_one(it_nm, obs1, obs2)
sim_obs1, *_ = obs1.simulate(self.env1.action_space())
sim_obs2, *_ = obs2.simulate(self.env1.action_space())
super()._aux_compare_one(it_nm, sim_obs1, sim_obs2, " forecast")
class TestForecastHandler14(TestCSVHandlerEnv):
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env1 = grid2op.make("l2rpn_case14_sandbox", test=True) # regular env
self.env2 = grid2op.make("l2rpn_case14_sandbox",
data_feeding_kwargs={"gridvalueClass": FromHandlers,
"gen_p_handler": CSVHandler("prod_p"),
"load_p_handler": CSVHandler("load_p"),
"gen_v_handler": CSVHandler("prod_v"),
"load_q_handler": CSVHandler("load_q"),
"gen_p_for_handler": CSVForecastHandler("prod_p_forecasted"),
"load_p_for_handler": CSVForecastHandler("load_p_forecasted"),
"gen_v_for_handler": CSVForecastHandler("prod_v_forecasted"),
"load_q_for_handler": CSVForecastHandler("load_q_forecasted"),
},
_add_to_name="TestForecastHandlerEnv",
test=True)
self._aux_reproducibility()
assert np.all(self.env1.chronics_handler.real_data.data.load_p_forecast ==
self.env2.chronics_handler.real_data.data.load_p_for_handler.array)
def _aux_compare_one(self, it_nm, obs1, obs2):
super()._aux_compare_one(it_nm, obs1, obs2)
sim_obs1, *_ = obs1.simulate(self.env1.action_space())
sim_obs2, *_ = obs2.simulate(self.env1.action_space())
super()._aux_compare_one(it_nm, sim_obs1, sim_obs2, " forecast")
class TestForecastHandler5MultiSteps(TestCSVHandlerEnv):
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env1 = grid2op.make(os.path.join(PATH_DATA_TEST, "5bus_example_forecasts"), test=True,
data_feeding_kwargs={"gridvalueClass": GridStateFromFileWithForecastsWithoutMaintenance},
) # regular env
self.env2 = grid2op.make(os.path.join(PATH_DATA_TEST, "5bus_example_forecasts"),
data_feeding_kwargs={"gridvalueClass": FromHandlers,
"gen_p_handler": CSVHandler("prod_p"),
"load_p_handler": CSVHandler("load_p"),
"load_q_handler": CSVHandler("load_q"),
"gen_v_handler": DoNothingHandler(),
"gen_p_for_handler": CSVForecastHandler("prod_p_forecasted"),
"load_p_for_handler": CSVForecastHandler("load_p_forecasted"),
"load_q_for_handler": CSVForecastHandler("load_q_forecasted"),
},
_add_to_name="TestForecastHandler5MultiSteps",
test=True)
self._aux_reproducibility()
assert np.all(self.env1.chronics_handler.real_data.data.load_p ==
self.env2.chronics_handler.real_data.data.load_p_handler.array)
assert np.all(self.env1.chronics_handler.real_data.data.load_p_forecast ==
self.env2.chronics_handler.real_data.data.load_p_for_handler.array)
assert np.all(self.env1.chronics_handler.real_data.data.prod_p ==
self.env2.chronics_handler.real_data.data.gen_p_handler.array)
assert np.all(self.env1.chronics_handler.real_data.data.prod_p_forecast ==
self.env2.chronics_handler.real_data.data.gen_p_for_handler.array)
def _aux_compare_one(self, it_nm, obs1, obs2):
super()._aux_compare_one(it_nm, obs1, obs2)
sim_obs1_1, *_ = obs1.simulate(self.env1.action_space())
sim_obs2_1, *_ = obs2.simulate(self.env1.action_space())
super()._aux_compare_one(it_nm, sim_obs1_1, sim_obs2_1, " forecast 1")
sim_obs1_2, *_ = sim_obs1_1.simulate(self.env1.action_space())
sim_obs2_2, *_ = sim_obs2_1.simulate(self.env1.action_space())
super()._aux_compare_one(it_nm, sim_obs1_2, sim_obs2_2, " forecast 2")
sim_obs1_3, *_ = sim_obs1_2.simulate(self.env1.action_space())
sim_obs2_3, *_ = sim_obs2_2.simulate(self.env1.action_space())
super()._aux_compare_one(it_nm, sim_obs1_3, sim_obs2_3, " forecast 3")
class TestMaintenanceCSV(TestForecastHandler14):
def setUp(self) -> None:
param = Parameters()
param.NO_OVERFLOW_DISCONNECTION = True
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env1 = grid2op.make(os.path.join(PATH_DATA_TEST, "env_14_test_maintenance"),
test=True,
param=param
) # regular env
self.env2 = grid2op.make(os.path.join(PATH_DATA_TEST, "env_14_test_maintenance"),
data_feeding_kwargs={"gridvalueClass": FromHandlers,
"gen_p_handler": CSVHandler("prod_p"),
"load_p_handler": CSVHandler("load_p"),
"load_q_handler": CSVHandler("load_q"),
"gen_v_handler": CSVHandler("prod_v"),
"maintenance_handler": CSVMaintenanceHandler(),
"gen_p_for_handler": CSVForecastHandler("prod_p_forecasted"),
"gen_v_for_handler": CSVForecastHandler("prod_v_forecasted"),
"load_p_for_handler": CSVForecastHandler("load_p_forecasted"),
"load_q_for_handler": CSVForecastHandler("load_q_forecasted"),
},
_add_to_name="TestMaintenanceCSV",
test=True,
param=param)
self._aux_reproducibility()
assert np.all(self.env1.chronics_handler.real_data.data.maintenance ==
self.env2.chronics_handler.real_data.data.maintenance_handler.array)
assert np.all(self.env1.chronics_handler.real_data.data.maintenance_time ==
self.env2.chronics_handler.real_data.data.maintenance_handler.maintenance_time)
assert np.all(self.env1.chronics_handler.real_data.data.maintenance_duration ==
self.env2.chronics_handler.real_data.data.maintenance_handler.maintenance_duration)
class TestMaintenanceJson(unittest.TestCase):
def setUp(self) -> None:
param = Parameters()
param.NO_OVERFLOW_DISCONNECTION = True
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env2 = grid2op.make(os.path.join(PATH_DATA_TEST, "ieee118_R2subgrid_wcci_test_maintenance"),
data_feeding_kwargs={"gridvalueClass": FromHandlers,
"gen_p_handler": CSVHandler("prod_p"),
"load_p_handler": CSVHandler("load_p"),
"load_q_handler": CSVHandler("load_q"),
"gen_v_handler": CSVHandler("prod_v"),
"maintenance_handler": JSONMaintenanceHandler(),
"gen_p_for_handler": CSVForecastHandler("prod_p_forecasted"),
"load_p_for_handler": CSVForecastHandler("load_p_forecasted"),
"load_q_for_handler": CSVForecastHandler("load_q_forecasted"),
},
_add_to_name="TestMaintenanceCSV",
test=True,
param=param)
# carefull here ! the "seed" mechanism does not work the same way between the two alternative.
# in the second case each "handler" get its own prng with a different seed. This is
# why you cannot compare directly the generated maintenance between the two env and why
# this does not inherit from TestCSVHandlerEnv
def tearDown(self) -> None:
self.env2.close()
return super().tearDown()
def test_seed(self):
self.env2.seed(0)
self.env2.reset()
all_ln_nm = np.zeros(type(self.env2).n_line, dtype=bool)
assert self.env2.chronics_handler.real_data.data.maintenance_handler.maintenance.sum() == 960
tmp_ = np.where(self.env2.chronics_handler.real_data.data.maintenance_handler.maintenance.any(axis=0))[0]
assert np.all(tmp_ == [ 0, 9, 14, 27, 45, 56])
all_ln_nm[tmp_] = True
self.env2.reset()
assert self.env2.chronics_handler.real_data.data.maintenance_handler.maintenance.sum() == 1248
tmp_ = np.where(self.env2.chronics_handler.real_data.data.maintenance_handler.maintenance.any(axis=0))[0]
assert np.all(tmp_ == [ 0, 13, 14, 18, 23, 27, 39, 45, 56])
all_ln_nm[tmp_] = True
self.env2.reset()
assert self.env2.chronics_handler.real_data.data.maintenance_handler.maintenance.sum() == 960
tmp_ = np.where(self.env2.chronics_handler.real_data.data.maintenance_handler.maintenance.any(axis=0))[0]
assert np.all(tmp_ == [13, 14, 23, 27, 45])
all_ln_nm[tmp_] = True
self.env2.reset()
assert self.env2.chronics_handler.real_data.data.maintenance_handler.maintenance.sum() == 672
tmp_ = np.where(self.env2.chronics_handler.real_data.data.maintenance_handler.maintenance.any(axis=0))[0]
assert np.all(tmp_ == [ 0, 9, 13, 18, 23, 56])
all_ln_nm[tmp_] = True
line_to_maintenance = self.env2.chronics_handler.real_data.data.maintenance_handler.dict_meta_data["line_to_maintenance"]
assert np.all(np.isin(type(self.env2).name_line[all_ln_nm], line_to_maintenance))
assert np.all(np.isin(line_to_maintenance, type(self.env2).name_line[all_ln_nm]))
class TestPersistenceHandler(unittest.TestCase):
def setUp(self) -> None:
hs_ = [5*(i+1) for i in range(12)]
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make("l2rpn_case14_sandbox",
data_feeding_kwargs={"gridvalueClass": FromHandlers,
"gen_p_handler": CSVHandler("prod_p"),
"load_p_handler": CSVHandler("load_p"),
"gen_v_handler": CSVHandler("prod_v"),
"load_q_handler": CSVHandler("load_q"),
"h_forecast": hs_,
"gen_p_for_handler": PersistenceForecastHandler("prod_p_forecasted"),
"load_p_for_handler": PersistenceForecastHandler("load_p_forecasted"),
"load_q_for_handler": PersistenceForecastHandler("load_q_forecasted"),
},
_add_to_name="TestPersistenceHandler",
test=True)
def tearDown(self) -> None:
self.env.close()
return super().tearDown()
def _aux_test_obs(self, obs, max_it=12):
assert len(obs._forecasted_inj) == 13 # 12 + 1
init_obj = obs._forecasted_inj[0]
for el in obs._forecasted_inj:
for k_ in ["load_p", "load_q"]:
assert np.all(el[1]["injection"][k_] == init_obj[1]["injection"][k_])
k_ = "prod_p" # because of slack...
assert np.all(el[1]["injection"][k_][:-1] == init_obj[1]["injection"][k_][:-1])
obs.simulate(self.env.action_space(), 12)
with self.assertRaises(NoForecastAvailable):
obs.simulate(self.env.action_space(), 13)
def test_step(self):
obs = self.env.reset()
self._aux_test_obs(obs)
obs, *_ = self.env.step(self.env.action_space())
self._aux_test_obs(obs)
obs = self.env.reset()
self._aux_test_obs(obs)
obs, *_ = self.env.step(self.env.action_space())
self._aux_test_obs(obs)
def test_fast_forward_chronics(self):
obs = self.env.reset()
self.env.fast_forward_chronics(5)
obs, *_ = self.env.step(self.env.action_space())
self._aux_test_obs(obs)
self.env.fast_forward_chronics(7)
obs, *_ = self.env.step(self.env.action_space())
self._aux_test_obs(obs)
def test_copy(self):
env_cpy = self.env.copy()
obs_cpy = env_cpy.reset()
obs = self.env.reset()
self._aux_test_obs(obs_cpy)
for el, el_cpy in zip(obs._forecasted_inj, obs_cpy._forecasted_inj):
for k_ in ["load_p", "load_q", "prod_p"]:
assert np.all(el[1]["injection"][k_] == el_cpy[1]["injection"][k_])
def test_runner(self):
from grid2op.Agent import BaseAgent
class TestAgent(BaseAgent):
def __init__(self, action_space, tester):
super().__init__(action_space)
self.tester = tester
def act(self, obs, reward, done=False):
self.tester._aux_test_obs(obs, max_it=5 - obs.current_step)
_ = self.tester.env.step(self.action_space()) # for TestPerfectForecastHandler: self.tester.env should be synch with the runner env...
return self.action_space()
def reset(self, obs):
self.tester.env.reset() # for TestPerfectForecastHandler
return super().reset(obs)
testagent = TestAgent(self.env.action_space, self)
self.env.set_id(0) # for TestPerfectForecastHandler
runner = Runner(**self.env.get_params_for_runner(), agentClass=None, agentInstance=testagent)
res = runner.run(nb_episode=2, episode_id=[0, 1], env_seeds=[2, 3], max_iter=5)
class TestPerfectForecastHandler(TestPersistenceHandler):
def setUp(self) -> None:
hs_ = [5*(i+1) for i in range(12)]
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make("l2rpn_case14_sandbox",
data_feeding_kwargs={"gridvalueClass": FromHandlers,
"gen_p_handler": CSVHandler("prod_p"),
"load_p_handler": CSVHandler("load_p"),
"gen_v_handler": CSVHandler("prod_v"),
"load_q_handler": CSVHandler("load_q"),
"h_forecast": hs_,
"gen_p_for_handler": PerfectForecastHandler("prod_p_forecasted"),
"gen_v_for_handler": PerfectForecastHandler("prod_v_forecasted"),
"load_p_for_handler": PerfectForecastHandler("load_p_forecasted"),
"load_q_for_handler": PerfectForecastHandler("load_q_forecasted"),
},
_add_to_name="TestPerfectForecastHandler",
test=True)
def tearDown(self) -> None:
self.env.close()
def _aux_test_obs(self, obs, max_it=13):
assert len(obs._forecasted_inj) == 13 # 12 + 1
env_cpy = self.env.copy()
for it_num, el in enumerate(obs._forecasted_inj[1:]):
next_obs, *_ = env_cpy.step(self.env.action_space())
for k_ in ["load_p", "load_q", "prod_v"]:
assert np.allclose(el[1]["injection"][k_], getattr(next_obs, k_)), f"error for {k_} at iteration {it_num}"
k_ = "prod_p" # because of slack...
assert np.all(el[1]["injection"][k_][:-1] == getattr(next_obs, k_)[:-1])
if max_it > it_num:
break
obs.simulate(self.env.action_space(), 12)
with self.assertRaises(NoForecastAvailable):
obs.simulate(self.env.action_space(), 13)
class TestPerfectForecastHandler(unittest.TestCase):
def setUp(self) -> None:
hs_ = [5*(i+1) for i in range(12)]
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env = grid2op.make("l2rpn_case14_sandbox",
data_feeding_kwargs={"gridvalueClass": FromHandlers,
"gen_p_handler": CSVHandler("prod_p"),
"load_p_handler": CSVHandler("load_p"),
"gen_v_handler": CSVHandler("prod_v"),
"load_q_handler": CSVHandler("load_q"),
"h_forecast": hs_,
"gen_p_for_handler": NoisyForecastHandler("prod_p_forecasted"),
"load_p_for_handler": NoisyForecastHandler("load_p_forecasted"),
"load_q_for_handler": NoisyForecastHandler("load_q_forecasted"),
},
_add_to_name="TestPerfectForecastHandler",
test=True)
def tearDown(self) -> None:
self.env.close()
def _aux_obs_equal(self, obs1, obs2):
assert np.all(obs1.load_p == obs2.load_p)
assert np.all(obs1.load_q == obs2.load_q)
assert np.all(obs1.gen_p == obs2.gen_p)
assert np.all(obs1.rho == obs2.rho)
def test_seed(self):
self.env.set_id(0)
self.env.seed(0)
obs = self.env.reset()
sim_obs, *_ = obs.simulate(self.env.action_space(), 12)
# assert same results when called multiple times
sim_obs_, *_ = obs.simulate(self.env.action_space(), 12)
self._aux_obs_equal(sim_obs, sim_obs_)
# assert same results when env seeded
self.env.set_id(0)
self.env.seed(0)
obs2 = self.env.reset()
sim_obs2, *_ = obs2.simulate(self.env.action_space(), 12)
self._aux_obs_equal(sim_obs, sim_obs2)
# assert not the same when not seeded
obs3 = self.env.reset()
sim_obs3, *_ = obs3.simulate(self.env.action_space(), 12)
assert np.all(sim_obs3.load_p != sim_obs.load_p)
def test_get_list(self):
handler : NoisyForecastHandler = self.env.chronics_handler.real_data.data.gen_p_for_handler
# default behaviour
assert handler.sigma is None
assert np.allclose(handler._my_noise, [0.022360679774997897, 0.0316227766016838, 0.03872983346207417,
0.044721359549995794, 0.05, 0.05477225575051661, 0.05916079783099616,
0.0632455532033676, 0.0670820393249937, 0.07071067811865475,
0.07416198487095663, 0.07745966692414834])
# with a callable
handler.sigma = lambda x: x
handler.set_h_forecast(handler._h_forecast)
assert np.all(handler._my_noise == [5 * (i+1) for i in range(12)])
# with a complete list
handler.sigma = [6 * (i+1) for i in range(12)]
handler.set_h_forecast(handler._h_forecast)
assert np.all(handler._my_noise == handler.sigma)
# with a complete list
handler.sigma = [6 * (i+1) for i in range(12)]
handler.set_h_forecast(handler._h_forecast)
assert np.all(handler._my_noise == handler.sigma)
# with a complete list (too big)
handler.sigma = [6 * (i+1) for i in range(15)]
handler.set_h_forecast(handler._h_forecast)
assert np.all(handler._my_noise == handler.sigma)
# with a complete list (too short)
handler.sigma = [6 * (i+1) for i in range(10)]
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
handler.set_h_forecast(handler._h_forecast)
assert np.all(handler._my_noise == (handler.sigma + [60., 60.]))
# with a complete list (too short)
# test that a warnings is issued
handler.sigma = [6 * (i+1) for i in range(10)]
with self.assertRaises(UserWarning):
with warnings.catch_warnings():
warnings.filterwarnings("error")
handler.set_h_forecast(handler._h_forecast)
class TestLoadQPHandler14(TestCSVHandlerEnv):
def setUp(self) -> None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
self.env1 = grid2op.make(os.path.join(PATH_DATA_TEST, "l2rpn_case14_sandbox_qp_cste"),
test=True) # regular env
self.env2 = grid2op.make(os.path.join(PATH_DATA_TEST, "l2rpn_case14_sandbox_qp_cste"),
data_feeding_kwargs={"gridvalueClass": FromHandlers,
"gen_p_handler": CSVHandler("prod_p"),
"load_p_handler": CSVHandler("load_p"),
"gen_v_handler": CSVHandler("prod_v"),
"load_q_handler": LoadQFromPHandler("load_q"),
"gen_p_for_handler": CSVForecastHandler("prod_p_forecasted"),
"gen_v_for_handler": CSVForecastHandler("prod_v_forecasted"),
"load_p_for_handler": CSVForecastHandler("load_p_forecasted"),
"load_q_for_handler": LoadQFromPHandler("load_q_forecasted"),
},
_add_to_name="TestForecastHandlerEnv",
test=True)
self._aux_reproducibility()
if __name__ == "__main__":
unittest.main()
| 39,818 | 53.546575 | 184 | py |
Grid2Op | Grid2Op-master/grid2op/tests/test_utils.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.tests.helper_path_test import *
PATH_ADN_CHRONICS_FOLDER = os.path.abspath(
os.path.join(PATH_CHRONICS, "test_multi_chronics")
)
PATH_PREVIOUS_RUNNER = os.path.join(data_test_dir, "runner_data")
from grid2op.Reward import L2RPNSandBoxScore
from grid2op.MakeEnv import make
from grid2op.dtypes import dt_float
from grid2op.Agent import DoNothingAgent, RecoPowerlineAgent
from grid2op.utils import EpisodeStatistics, ScoreL2RPN2020, ScoreICAPS2021
from grid2op.Parameters import Parameters
import warnings
warnings.simplefilter("error")
class TestEpisodeStatistics(HelperTests):
"""test teh grid2op.utils.EpisodeStatistics"""
def test_read(self):
"""test that i can read the data stored"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
with make("rte_case5_example", test=True) as env:
obs = env.reset()
stats = EpisodeStatistics(env)
aor_, ids_ = stats.get("a_or")
assert aor_.shape == (7930, 8)
assert np.max(ids_) == 19
assert ids_.shape == (7930, 1)
assert self.compare_vect(
np.mean(aor_, axis=0),
np.array(
[
351.6208,
153.674,
91.057,
80.47367,
351.93213,
89.18627,
89.18627,
74.77638,
],
dtype=dt_float,
),
)
def test_compute_erase(self):
"""test that i can compute and erase the results afterwards"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
with make("rte_case5_example", test=True) as env:
stats = EpisodeStatistics(env, "test")
stats.compute(nb_scenario=1, max_step=10, pbar=False)
# the file have been created
assert os.path.exists(
os.path.join(env.get_path_env(), stats.get_name_dir("test"))
)
# i can access it
aor_, ids_ = stats.get("a_or")
assert aor_.shape == (11, 8)
# i can clear the data of individual episode
stats.clear_episode_data()
assert not os.path.exists(
os.path.join(env.get_path_env(), stats.get_name_dir("test"), "00")
)
# i can clear everything
stats.clear_all()
assert not os.path.exists(
os.path.join(env.get_path_env(), stats.get_name_dir("test"))
)
def test_compute_with_score(self):
"""test that i can compute and erase the results afterwards"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
with make("rte_case5_example", test=True) as env:
stats = EpisodeStatistics(env, "test")
stats.compute(
nb_scenario=2,
max_step=10,
pbar=False,
scores_func=L2RPNSandBoxScore,
)
# i can access it
scores, ids_ = stats.get(EpisodeStatistics.SCORES)
assert scores.shape == (20,), "error on the score shape"
assert ids_.shape == (20, 1), "error on the ids shape"
scores, ids_ = stats.get("scores")
assert scores.shape == (20,), "error on the score shape"
assert ids_.shape == (20, 1), "error on the ids shape"
# i can clear everything
stats.clear_all()
assert not os.path.exists(
os.path.join(env.get_path_env(), stats.get_name_dir("test"))
)
def test_compute_without_score(self):
"""test that i can compute and erase the results afterwards"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
with make("rte_case5_example", test=True) as env:
stats = EpisodeStatistics(env, "test")
stats.compute(nb_scenario=2, max_step=10, pbar=False)
# i can access it
prods, ids_ = stats.get("prod_p")
assert prods.shape == (22, 2), "error on the prods shape"
assert ids_.shape == (22, 1), "error on the ids shape"
with self.assertRaises(RuntimeError):
scores, ids_ = stats.get("scores")
# i can clear everything
stats.clear_all()
assert not os.path.exists(
os.path.join(env.get_path_env(), stats.get_name_dir("test"))
)
class TestL2RPNSCORE(HelperTests):
"""test teh grid2op.utils.EpisodeStatistics"""
def test_can_compute(self):
"""test that i can initialize the score and then delete the statistics"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
with make("rte_case5_example", test=True) as env:
scores = ScoreL2RPN2020(env, nb_scenario=4, verbose=0, max_step=50)
# the statistics have been properly computed
assert os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN),
)
)
assert os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(
ScoreL2RPN2020.NAME_RP_NO_OVERFLOW
),
)
)
# delete them
scores.clear_all()
assert not os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN),
)
)
# assert not os.path.exists(os.path.join(env.get_path_env(),
# EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN_NO_OVERWLOW)))
assert not os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(
ScoreL2RPN2020.NAME_RP_NO_OVERFLOW
),
)
)
def test_donothing_0(self):
"""test that do nothing has a score of 0.00"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
with make("rte_case5_example", test=True) as env:
scores = ScoreL2RPN2020(env, nb_scenario=4, verbose=0, max_step=20)
# the statistics have been properly computed
assert os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN),
)
)
assert os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(
ScoreL2RPN2020.NAME_RP_NO_OVERFLOW
),
)
)
my_agent = DoNothingAgent(env.action_space)
my_scores, *_ = scores.get(my_agent)
assert np.max(np.abs(my_scores)) <= self.tol_one
# delete them
scores.clear_all()
assert not os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN),
)
)
# assert not os.path.exists(os.path.join(env.get_path_env(),
# EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN_NO_OVERWLOW)))
assert not os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(
ScoreL2RPN2020.NAME_RP_NO_OVERFLOW
),
)
)
def test_modif_max_step_decrease(self):
"""
test that i can modify the max step by decreaseing it (and in that case it does not trigger a recomputation
of the statistics)
"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
with make("rte_case5_example", test=True) as env:
scores = ScoreL2RPN2020(env, nb_scenario=2, verbose=0, max_step=15)
# the statistics have been properly computed
assert os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN),
)
)
assert os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(
ScoreL2RPN2020.NAME_RP_NO_OVERFLOW
),
)
)
my_agent = DoNothingAgent(env.action_space)
my_scores, *_ = scores.get(my_agent)
assert (
np.max(np.abs(my_scores)) <= self.tol_one
), "error for the first do nothing"
scores2 = ScoreL2RPN2020(env, nb_scenario=2, verbose=0, max_step=10)
assert not scores2._recomputed_dn
assert not scores2._recomputed_no_ov_rp
my_agent = DoNothingAgent(env.action_space)
my_scores2, *_ = scores2.get(my_agent)
assert (
np.max(np.abs(my_scores2)) <= self.tol_one
), "error for the second do nothing"
# delete them
scores.clear_all()
assert not os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN),
)
)
# assert not os.path.exists(os.path.join(env.get_path_env(),
# EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN_NO_OVERWLOW)))
assert not os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(
ScoreL2RPN2020.NAME_RP_NO_OVERFLOW
),
)
)
def test_modif_max_step_increase(self):
"""test that i can modify the max step (and that if I increase it it does trigger a recomputation)"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
with make("rte_case5_example", test=True) as env:
scores = ScoreL2RPN2020(env, nb_scenario=2, verbose=0, max_step=5)
# the statistics have been properly computed
assert os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN),
)
)
assert os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(
ScoreL2RPN2020.NAME_RP_NO_OVERFLOW
),
)
)
my_agent = DoNothingAgent(env.action_space)
my_scores, *_ = scores.get(my_agent)
assert (
np.max(np.abs(my_scores)) <= self.tol_one
), "error for the first do nothing"
scores2 = ScoreL2RPN2020(env, nb_scenario=2, verbose=0, max_step=10)
assert scores2._recomputed_dn
assert scores2._recomputed_no_ov_rp
# delete them
scores.clear_all()
assert not os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN),
)
)
# assert not os.path.exists(os.path.join(env.get_path_env(),
# EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN_NO_OVERWLOW)))
assert not os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(
ScoreL2RPN2020.NAME_RP_NO_OVERFLOW
),
)
)
def test_modif_nb_scenario(self):
"""
test that i can modify the nb_scenario and it properly recomputes it when it increased and not
when it decreases
"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
with make("rte_case5_example", test=True) as env:
scores = ScoreL2RPN2020(env, nb_scenario=2, verbose=0, max_step=5)
# the statistics have been properly computed
assert os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN),
)
)
assert os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(
ScoreL2RPN2020.NAME_RP_NO_OVERFLOW
),
)
)
my_agent = DoNothingAgent(env.action_space)
my_scores, *_ = scores.get(my_agent)
assert (
np.max(np.abs(my_scores)) <= self.tol_one
), "error for the first do nothing"
scores2 = ScoreL2RPN2020(env, nb_scenario=4, verbose=0, max_step=5)
assert scores2._recomputed_dn
assert scores2._recomputed_no_ov_rp
scores2 = ScoreL2RPN2020(env, nb_scenario=3, verbose=0, max_step=5)
assert not scores2._recomputed_dn
assert not scores2._recomputed_no_ov_rp
# delete them
scores.clear_all()
assert not os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN),
)
)
# assert not os.path.exists(os.path.join(env.get_path_env(),
# EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN_NO_OVERWLOW)))
assert not os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(
ScoreL2RPN2020.NAME_RP_NO_OVERFLOW
),
)
)
def test_reco_noov_80(self):
"""test that do nothing has a score of 80.0 if it is run with "no overflow disconnection" """
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
with make("rte_case5_example", test=True) as env:
# I cannot decrease the max step: it must be above the number of steps the do nothing does
scores = ScoreL2RPN2020(env, nb_scenario=2, verbose=0, max_step=130)
assert scores._recomputed_dn
assert scores._recomputed_no_ov_rp
# the statistics have been properly computed
assert os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN),
)
)
assert os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(
ScoreL2RPN2020.NAME_RP_NO_OVERFLOW
),
)
)
my_agent = DoNothingAgent(env.action_space)
my_scores, *_ = scores.get(my_agent)
assert (
np.max(np.abs(my_scores)) <= self.tol_one
), "error for the first do nothing"
param = Parameters()
param.NO_OVERFLOW_DISCONNECTION = True
with make("rte_case5_example", test=True, param=param) as env:
scores2 = ScoreL2RPN2020(env, nb_scenario=2, verbose=0, max_step=130)
assert not scores2._recomputed_dn
assert not scores2._recomputed_no_ov_rp
my_agent = RecoPowerlineAgent(env.action_space)
my_scores, *_ = scores2.get(my_agent)
assert np.max(np.abs(np.array(my_scores) - 80.0)) <= self.tol_one
# delete them
scores.clear_all()
assert not os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN),
)
)
# assert not os.path.exists(os.path.join(env.get_path_env(),
# EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN_NO_OVERWLOW)))
assert not os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(
ScoreL2RPN2020.NAME_RP_NO_OVERFLOW
),
)
)
class TestICAPSSCORE(HelperTests):
"""test teh grid2op.utils.EpisodeStatistics"""
def test_can_compute(self):
"""test that i can initialize the score and then delete the statistics"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
with make(
os.path.join(PATH_DATA_TEST, "l2rpn_neurips_2020_track1_with_alarm"),
test=True,
) as env:
scores = ScoreICAPS2021(
env,
nb_scenario=2,
verbose=0,
max_step=50,
env_seeds=[1, 2], # with these seeds do nothing goes till the end
agent_seeds=[3, 4],
)
my_agent = DoNothingAgent(env.action_space)
scores_this, n_played, total_ts = scores.get(my_agent)
for (ep_score, op_score, alarm_score) in scores_this:
assert (
np.abs(ep_score - 30.0) <= self.tol_one
), f"wrong score for the episode: {ep_score} vs 30."
assert np.abs(op_score - 0.0) <= self.tol_one, (
f"wrong score for the operationnal cost: " f"{op_score} vs 0."
)
assert np.abs(alarm_score - 100.0) <= self.tol_one, (
f"wrong score for the alarm: " f"{alarm_score} vs 100."
)
# the statistics have been properly computed
assert os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(ScoreICAPS2021.NAME_DN),
)
)
assert os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(
ScoreICAPS2021.NAME_RP_NO_OVERFLOW
),
)
)
# delete them
scores.clear_all()
assert not os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN),
)
)
# assert not os.path.exists(os.path.join(env.get_path_env(),
# EpisodeStatistics.get_name_dir(ScoreL2RPN2020.NAME_DN_NO_OVERWLOW)))
assert not os.path.exists(
os.path.join(
env.get_path_env(),
EpisodeStatistics.get_name_dir(
ScoreL2RPN2020.NAME_RP_NO_OVERFLOW
),
)
)
if __name__ == "__main__":
unittest.main()
| 22,214 | 41.314286 | 125 | py |
Grid2Op | Grid2Op-master/grid2op/utils/__init__.py | __all__ = ["EpisodeStatistics", "ScoreL2RPN2020", "ScoreICAPS2021", "ScoreL2RPN2022", "ScoreL2RPN2023"]
from grid2op.utils.underlying_statistics import EpisodeStatistics
from grid2op.utils.l2rpn_2020_scores import ScoreL2RPN2020
from grid2op.utils.icaps_2021_scores import ScoreICAPS2021
from grid2op.utils.l2rpn_wcci_2022_scores import ScoreL2RPN2022
from grid2op.utils.l2rpn_idf_2023_scores import ScoreL2RPN2023
| 416 | 51.125 | 103 | py |
Grid2Op | Grid2Op-master/grid2op/utils/icaps_2021_scores.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import os
import numpy as np
import json
import copy
import tempfile
from grid2op.dtypes import dt_float
from grid2op.Reward import L2RPNSandBoxScore, _AlarmScore
from grid2op.utils.underlying_statistics import EpisodeStatistics
from grid2op.utils.l2rpn_2020_scores import ScoreL2RPN2020
from grid2op.Episode import EpisodeData
class ScoreICAPS2021(ScoreL2RPN2020):
"""
This class allows to compute the same score as the one computed for the ICAPS 2021 competitions.
It uses some "EpisodeStatistics" of the environment to compute these scores. These statistics, if not available
are computed at the initialization.
When using it a second time these information are reused.
This scores is the combination of the `ScoreL2RPN2020` score and some extra scores based on the alarm feature.
Examples
---------
This class can be used as follow:
.. code-block:: python
import grid2op
from grid2op.utils import ScoreICAPS2021
from grid2op.Agent import DoNothingAgent
env = grid2op.make("l2rpn_case14_sandbox")
nb_scenario = 2
my_score = ScoreICAPS2021(env,
nb_scenario=nb_scenario,
env_seeds=[0 for _ in range(nb_scenario)],
agent_seeds=[0 for _ in range(nb_scenario)]
)
my_agent = DoNothingAgent(env.action_space)
print(my_score.get(my_agent))
Notes
-------
To prevent overfitting, we strongly recommend you to use the :func:`grid2op.Environment.Environment.train_val_split`
and use this function on the built validation set only.
Also note than computing the statistics, and evaluating an agent on a whole dataset of multiple GB can take a
really long time and a lot of memory. This fact, again, plea in favor of using this function only on
a validation set.
We also strongly recommend to set the seeds of your agent (agent_seeds)
and of the environment (env_seeds) if you want to use this feature. Reproducibility is really important if you
want to make progress.
.. warning::
The triggering (or not) of the recomputation of the statistics is not perfect for now.
We recommend you to use always
the same seeds (`env_seeds` and `agent_seeds` key word argument of this functions)
and the same parameters (`env.parameters`) when using a given environments.
You might need to clean it manually if you change
one of theses things by calling :func:`ScoreL2RPN2020.clear_all()` function .
"""
NAME_DN = "icaps2021_dn"
# NAME_DN_NO_OVERFLOW = "icaps2021_no_overflow"
NAME_RP_NO_OVERWLOW = "icaps2021_no_overflow_reco"
def __init__(
self,
env,
env_seeds=None,
agent_seeds=None,
nb_scenario=16,
min_losses_ratio=0.8,
verbose=0,
max_step=-1,
nb_process_stats=1,
scale_alarm_score=100.0,
weight_op_score=0.7,
weight_alarm_score=0.3,
):
ScoreL2RPN2020.__init__(
self,
env=env,
env_seeds=env_seeds,
agent_seeds=agent_seeds,
nb_scenario=nb_scenario,
min_losses_ratio=min_losses_ratio,
verbose=verbose,
max_step=max_step,
nb_process_stats=nb_process_stats,
scores_func={
"grid_operational_cost": L2RPNSandBoxScore,
"alarm_cost": _AlarmScore,
},
score_names=["grid_operational_cost_scores", "alarm_cost_scores"],
)
self.scale_alarm_score = scale_alarm_score
self.weight_op_score = weight_op_score
self.weight_alarm_score = weight_alarm_score
def _compute_episode_score(
self,
ep_id, # the ID here, which is an integer and is not the ID from chronics balblabla
meta,
other_rewards,
dn_metadata,
no_ov_metadata,
score_file_to_use=None,
):
"""
Performs the rescaling of the score given the information stored in the "statistics" of this
environment.
This computes the score for a single episode. The loop to compute the score for all the
episodes is the same as for l2rpn_2020_scores and is then reused.
"""
# compute the operational score
op_score, n_played, total_ts = super()._compute_episode_score(
ep_id,
meta,
other_rewards,
dn_metadata,
no_ov_metadata,
# score_file_to_use should match the
# L2RPNSandBoxScore key in
# self.scores_func
score_file_to_use="grid_operational_cost_scores",
)
# should match underlying_statistics.run_env `dict_kwg["other_rewards"][XXX] = ...`
# XXX is right now f"{EpisodeStatistics.KEY_SCORE}_{nm}" [this should match the XXX]
alarm_score_nm = "alarm_cost_scores"
real_nm = EpisodeStatistics._nm_score_from_attr_name(alarm_score_nm)
key_score_file = f"{EpisodeStatistics.KEY_SCORE}_{real_nm}"
alarm_score = float(other_rewards[-1][key_score_file])
alarm_score = self.scale_alarm_score * alarm_score
ep_score = (
self.weight_op_score * op_score + self.weight_alarm_score * alarm_score
)
return (ep_score, op_score, alarm_score), n_played, total_ts
if __name__ == "__main__":
import grid2op
from lightsim2grid import LightSimBackend
from grid2op.Agent import RandomAgent, DoNothingAgent
env = grid2op.make("l2rpn_case14_sandbox", backend=LightSimBackend())
nb_scenario = 2
my_score = ScoreICAPS2021(
env,
nb_scenario=nb_scenario,
env_seeds=[0 for _ in range(nb_scenario)],
agent_seeds=[0 for _ in range(nb_scenario)],
)
my_agent = RandomAgent(env.action_space)
my_agent = DoNothingAgent(env.action_space)
print(my_score.get(my_agent))
| 6,515 | 35 | 120 | py |
Grid2Op | Grid2Op-master/grid2op/utils/l2rpn_2020_scores.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import os
import numpy as np
import json
import copy
import tempfile
from grid2op.dtypes import dt_float
from grid2op.Reward import L2RPNSandBoxScore
from grid2op.Agent import RecoPowerlineAgent
from grid2op.utils.underlying_statistics import EpisodeStatistics
from grid2op.Episode import EpisodeData
import re
class ScoreL2RPN2020(object):
"""
This class allows to compute the same score as the one computed for the L2RPN 2020 competitions.
It uses some "EpisodeStatistics" of the environment to compute these scores. These statistics, if not available
are computed at the initialization.
When using it a second time these information are reused.
Examples
---------
This class can be used as follow:
.. code-block:: python
import grid2op
from grid2op.utils import ScoreL2RPN2020
from grid2op.Agent import DoNothingAgent
env = grid2op.make("l2rpn_case14_sandbox")
nb_scenario = 2
my_score = ScoreL2RPN2020(env,
nb_scenario=nb_scenario,
env_seeds=[0 for _ in range(nb_scenario)],
agent_seeds=[0 for _ in range(nb_scenario)]
)
my_agent = DoNothingAgent(env.action_space)
print(my_score.get(my_agent))
Notes
-------
To prevent overfitting, we strongly recommend you to use the :func:`grid2op.Environment.Environment.train_val_split`
and use this function on the built validation set only.
Also note than computing the statistics, and evaluating an agent on a whole dataset of multiple GB can take a
really long time and a lot of memory. This fact, again, plea in favor of using this function only on
a validation set.
We also strongly recommend to set the seeds of your agent (agent_seeds)
and of the environment (env_seeds) if you want to use this feature. Reproducibility is really important if you
want to make progress.
.. warning::
The triggering (or not) of the recomputation of the statistics is not perfect for now.
We recommend you to use always
the same seeds (`env_seeds` and `agent_seeds` key word argument of this functions)
and the same parameters (`env.parameters`) when using a given environments.
You might need to clean it manually if you change
one of theses things by calling :func:`ScoreL2RPN2020.clear_all()` function .
"""
NAME_DN = "l2rpn_dn"
NAME_RP_NO_OVERFLOW = "l2rpn_no_overflow_reco"
def __init__(
self,
env,
env_seeds=None,
agent_seeds=None,
nb_scenario=16,
min_losses_ratio=0.8,
verbose=0,
max_step=-1,
nb_process_stats=1,
scores_func=L2RPNSandBoxScore,
score_names=None,
):
self.env = env
self.nb_scenario = nb_scenario
self.env_seeds = env_seeds
self.agent_seeds = agent_seeds
self.min_losses_ratio = min_losses_ratio
self.verbose = verbose
self.max_step = max_step
computed_scenarios = [el[1] for el in EpisodeStatistics.list_stats(self.env)]
self.scores_func = scores_func
# check if i need to compute stat for do nothing
self.stat_dn = EpisodeStatistics(self.env, self.NAME_DN)
self._recomputed_dn = self._init_stat(
self.stat_dn,
self.NAME_DN,
computed_scenarios,
nb_process_stats=nb_process_stats,
score_names=score_names,
)
# check if i need to compute that for do nothing without overflow disconnection
param_no_overflow = copy.deepcopy(env.parameters)
param_no_overflow.NO_OVERFLOW_DISCONNECTION = True
# check if i need to compute that for reco powerline without overflow disconnection
self.stat_no_overflow_rp = EpisodeStatistics(self.env, self.NAME_RP_NO_OVERFLOW)
agent_reco = RecoPowerlineAgent(self.env.action_space)
self._recomputed_no_ov_rp = self._init_stat(
self.stat_no_overflow_rp,
self.NAME_RP_NO_OVERFLOW,
computed_scenarios,
parameters=param_no_overflow,
nb_process_stats=nb_process_stats,
agent=agent_reco,
score_names=score_names,
)
self.__cleared = False
def _init_stat(
self,
stat,
stat_name,
computed_scenarios,
parameters=None,
nb_process_stats=1,
agent=None,
score_names=None,
):
"""will check if the statistics need to be computed"""
need_recompute = True
if score_names is None:
score_names = [EpisodeStatistics.SCORES]
if EpisodeStatistics.get_name_dir(stat_name) in computed_scenarios:
# the things have been computed i check if the number of scenarios is big enough
scores, ids_ = stat.get(score_names[0])
metadata = stat.get_metadata()
max_id = np.max(ids_)
# i need to recompute if if i did not compute enough scenarios
need_recompute = max_id < self.nb_scenario - 1
# if max
computed_step = int(metadata["max_step"])
if computed_step > 0:
# if i have computed the data with
if self.max_step == -1:
# i need to compute now all the dataset, so yes i have to recompute it
need_recompute = True
# i need to recompute only if i ask more steps than what was computed
need_recompute = need_recompute or self.max_step > metadata["max_step"]
# TODO check for the seeds here too
# TODO and check for the class of the scores too
# TODO check for the parameters too...
if need_recompute:
# i need to compute it
if self.verbose >= 1:
print(
"I need to recompute the statistics for this environment. This will take a while"
) # TODO logger
stat.compute(
nb_scenario=self.nb_scenario,
pbar=self.verbose >= 2,
env_seeds=self.env_seeds,
agent_seeds=self.agent_seeds,
scores_func=self.scores_func,
max_step=self.max_step,
parameters=parameters,
nb_process=nb_process_stats,
agent=agent,
)
stat.clear_episode_data()
return need_recompute
def _compute_episode_score(
self,
ep_id, # the ID here, which is an integer and is not the ID from chronics balblabla
meta,
other_rewards,
dn_metadata,
no_ov_metadata,
score_file_to_use=None,
):
"""
Performs the rescaling of the score given the information stored in the "statistics" of this
environment.
"""
# load_p, ids = self.stat_no_overflow.get("load_p")
# prod_p, _ = self.stat_no_overflow.get("prod_p")
load_p_rp, ids_rp = self.stat_no_overflow_rp.get("load_p")
prod_p_rp, _ = self.stat_no_overflow_rp.get("load_p")
if score_file_to_use is None:
score_file_to_use = EpisodeStatistics.SCORES
key_score_file = EpisodeStatistics.KEY_SCORE
else:
# should match underlying_statistics.run_env `dict_kwg["other_rewards"][XXX] = ...`
# XXX is right now f"{EpisodeStatistics.KEY_SCORE}_{nm}" [this should match the XXX]
real_nm = EpisodeStatistics._nm_score_from_attr_name(score_file_to_use)
key_score_file = f"{EpisodeStatistics.KEY_SCORE}_{real_nm}"
scores_dn, ids_dn_sc = self.stat_dn.get(score_file_to_use)
# scores_no_ov, ids_noov_sc = self.stat_no_overflow.get(score_file_to_use)
scores_no_ov_rp, ids_noov_sc_rp = self.stat_no_overflow_rp.get(
score_file_to_use
)
# reshape to have 1 dim array
ids = ids_rp.reshape(-1)
ids_dn_sc = ids_dn_sc.reshape(-1)
ids_noov_sc_rp = ids_noov_sc_rp.reshape(-1)
# there is a hugly "1" at the end of each scores due to the "game over" (or end of game), so i remove it
scores_dn = scores_dn[ids_dn_sc == ep_id][:-1]
scores_no_ov_rp = scores_no_ov_rp[ids_noov_sc_rp == ep_id][:-1]
dn_this = dn_metadata[f"{ep_id}"]
no_ov_this = no_ov_metadata[f"{ep_id}"]
n_played = int(meta["nb_timestep_played"])
dn_step_played = dn_this["nb_step"] - 1
total_ts = no_ov_this["nb_step"] - 1
ep_marginal_cost = np.max(self.env.gen_cost_per_MW).astype(dt_float)
min_losses_ratio = self.min_losses_ratio
# remember that first observation do not count (it's generated by the environment)
ep_loads = np.sum(load_p_rp[ids == ep_id, :], axis=1)[1:]
ep_losses = np.sum(prod_p_rp[ids == ep_id, :], axis=1)[1:] - ep_loads
if self.max_step > 0:
scores_dn = scores_dn[: self.max_step]
# scores_no_ov = scores_no_ov[:self.max_step]
scores_no_ov_rp = scores_no_ov_rp[: self.max_step]
ep_loads = ep_loads[: self.max_step]
ep_losses = ep_losses[: self.max_step]
# do nothing operationnal cost
ep_do_nothing_operat_cost = np.sum(scores_dn)
ep_do_nothing_operat_cost += (
np.sum(ep_loads[dn_step_played:]) * ep_marginal_cost
)
# no overflow disconnection cost
ep_do_nothing_nodisc_cost = np.sum(scores_no_ov_rp)
# this agent cumulated operationnal cost
# same as above: i remove the last element which correspond to the last state, so irrelevant
ep_cost = np.array([el[key_score_file] for el in other_rewards]).astype(
dt_float
)
if dn_metadata["max_step"] == self.max_step:
ep_cost = ep_cost[:-1]
ep_cost = np.sum(ep_cost)
ep_cost += np.sum(ep_loads[n_played:]) * ep_marginal_cost
# Compute ranges
worst_operat_cost = (
np.sum(ep_loads) * ep_marginal_cost
) # operational cost corresponding to the min score
zero_operat_score = ep_do_nothing_operat_cost
nodisc_oeprat_cost = ep_do_nothing_nodisc_cost
best_score = (
np.sum(ep_losses) * min_losses_ratio
) # operational cost corresponding to the max score
# Linear interp episode reward to codalab score
if zero_operat_score != nodisc_oeprat_cost:
# DoNothing agent doesnt complete the scenario
reward_range = [
best_score,
nodisc_oeprat_cost,
zero_operat_score,
worst_operat_cost,
]
score_range = [100.0, 80.0, 0.0, -100.0]
else:
# DoNothing agent can complete the scenario
reward_range = [best_score, zero_operat_score, worst_operat_cost]
score_range = [100.0, 0.0, -100.0]
ep_score = np.interp(ep_cost, reward_range, score_range)
return ep_score, n_played, total_ts
def clear_all(self):
"""
Has side effects
.. warning:: /!\\\\ Be careful /!\\\\
Clear the whole statistics directory for the 3 different computed statistics used for the score. It will
remove the previously computed statistics.
Once done, this cannot be undone.
"""
# self.stat_no_overflow.clear_all()
self.stat_no_overflow_rp.clear_all()
self.stat_dn.clear_all()
self.__cleared = True
def get(self, agent, path_save=None, nb_process=1):
"""
Get the score of the agent depending on what has been computed.
TODO The plots will be done later.
Parameters
----------
agent: :class:`grid2op.Agent.BaseAgent`
The agent you want to score
path_save: ``str``
the path were you want to store the logs of your agent.
nb_process: ``int``
Number of process to use for the evaluation
Returns
-------
all_scores: ``list``
List of the score of your agent per scenarios
ts_survived: ``list``
List of the number of step your agent successfully managed for each scenario
total_ts: ``list``
Total number of step for each scenario
"""
if self.__cleared:
raise RuntimeError(EpisodeStatistics.ERROR_MSG_CLEANED)
if path_save is not None:
need_delete = False # TODO this is soooo dirty
path_save = os.path.abspath(path_save)
else:
need_delete = True
dir_tmp = tempfile.TemporaryDirectory()
path_save = dir_tmp.name
if self.verbose >= 1:
print("Using a temp directory to store the intermediate data.") # TODO logger
if self.verbose >= 1:
print("Starts the evaluation of the agent") # TODO logger
EpisodeStatistics.run_env(
self.env,
env_seeds=self.env_seeds,
agent_seeds=self.agent_seeds,
path_save=path_save,
parameters=self.env.parameters,
scores_func=self.scores_func,
agent=agent,
max_step=self.max_step,
nb_scenario=self.nb_scenario,
pbar=self.verbose >= 2,
nb_process=nb_process,
)
if self.verbose >= 1:
print("Start the evaluation of the scores") # TODO logger
meta_data_dn = self.stat_dn.get_metadata()
no_ov_metadata = self.stat_no_overflow_rp.get_metadata()
all_scores = []
ts_survived = []
total_ts = []
for ep_id in range(self.nb_scenario):
this_ep_nm = meta_data_dn[f"{ep_id}"]["scenario_name"]
with open(
os.path.join(path_save, this_ep_nm, EpisodeData.META),
"r",
encoding="utf-8",
) as f:
this_epi_meta = json.load(f)
with open(
os.path.join(path_save, this_ep_nm, EpisodeData.OTHER_REWARDS),
"r",
encoding="utf-8",
) as f:
this_epi_scores = json.load(f)
score_this_ep, nb_ts_survived, total_ts_tmp = self._compute_episode_score(
ep_id,
meta=this_epi_meta,
other_rewards=this_epi_scores,
dn_metadata=meta_data_dn,
no_ov_metadata=no_ov_metadata,
)
all_scores.append(score_this_ep)
ts_survived.append(nb_ts_survived)
total_ts.append(total_ts_tmp)
if need_delete:
dir_tmp.cleanup()
return all_scores, ts_survived, total_ts
if __name__ == "__main__":
import grid2op
from lightsim2grid import LightSimBackend
from grid2op.Agent import RandomAgent, DoNothingAgent
env = grid2op.make("l2rpn_case14_sandbox", backend=LightSimBackend())
nb_scenario = 16
my_score = ScoreL2RPN2020(
env,
nb_scenario=nb_scenario,
env_seeds=[0 for _ in range(nb_scenario)],
agent_seeds=[0 for _ in range(nb_scenario)],
)
my_agent = RandomAgent(env.action_space)
my_agent = DoNothingAgent(env.action_space)
print(my_score.get(my_agent))
| 15,987 | 36.268065 | 120 | py |
Grid2Op | Grid2Op-master/grid2op/utils/l2rpn_idf_2023_scores.py | # Copyright (c) 2023, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.utils.l2rpn_2020_scores import ScoreL2RPN2020
from grid2op.Reward import L2RPNSandBoxScore, _NewRenewableSourcesUsageScore, _AssistantConfidenceScore, _AssistantCostScore
from grid2op.utils.underlying_statistics import EpisodeStatistics
class ScoreL2RPN2023(ScoreL2RPN2020):
"""
This class allows to compute the same score as the one computed for the L2RPN 2023 competitions.
It uses some "EpisodeStatistics" of the environment to compute these scores. These statistics, if not available
are computed at the initialization.
When using it a second time these information are reused.
This scores is the combination of the `ScoreL2RPN2020` score and some extra scores based on the assistant feature
(alert) and the use of new renewable energy sources.
Examples
---------
This class can be used as follow:
.. code-block:: python
import grid2op
from grid2op.utils import ScoreL2RPN2023
from grid2op.Agent import DoNothingAgent
env = grid2op.make("l2rpn_case14_sandbox")
nb_scenario = 2
my_score = ScoreL2RPN2023(env,
nb_scenario=nb_scenario,
env_seeds=[0 for _ in range(nb_scenario)],
agent_seeds=[0 for _ in range(nb_scenario)]
)
my_agent = DoNothingAgent(env.action_space)
print(my_score.get(my_agent))
Notes
-------
To prevent overfitting, we strongly recommend you to use the :func:`grid2op.Environment.Environment.train_val_split`
and use this function on the built validation set only.
Also note than computing the statistics, and evaluating an agent on a whole dataset of multiple GB can take a
really long time and a lot of memory. This fact, again, plea in favor of using this function only on
a validation set.
We also strongly recommend to set the seeds of your agent (agent_seeds)
and of the environment (env_seeds) if you want to use this feature. Reproducibility is really important if you
want to make progress.
.. warning::
The triggering (or not) of the recomputation of the statistics is not perfect for now.
We recommend you to use always
the same seeds (`env_seeds` and `agent_seeds` key word argument of this functions)
and the same parameters (`env.parameters`) when using a given environments.
You might need to clean it manually if you change
one of theses things by calling :func:`ScoreL2RPN2020.clear_all()` function .
"""
def __init__(
self,
env,
env_seeds=None,
agent_seeds=None,
nb_scenario=16,
min_losses_ratio=0.8,
verbose=0,
max_step=-1,
nb_process_stats=1,
scale_assistant_score=100.0,
scale_nres_score=100.,
weight_op_score=0.6,
weight_assistant_score=0.25,
weight_nres_score=0.15,
weight_confidence_assistant_score=0.7,
min_nres_score=-100,
):
ScoreL2RPN2020.__init__(
self,
env=env,
env_seeds=env_seeds,
agent_seeds=agent_seeds,
nb_scenario=nb_scenario,
min_losses_ratio=min_losses_ratio,
verbose=verbose,
max_step=max_step,
nb_process_stats=nb_process_stats,
scores_func={
"grid_operational_cost": L2RPNSandBoxScore,
#"assistance_confidence": _AssistantConfidenceScore,
#"assistant_cost": _AssistantCostScore,
"new_renewable_sources_usage": _NewRenewableSourcesUsageScore,
},
score_names=["grid_operational_cost_scores",
#"assistant_confidence_scores",
#"assistant_cost_scores",
"new_renewable_sources_usage_scores"],
)
assert(weight_op_score + weight_assistant_score + weight_nres_score==1.)
assert(all([weight_confidence_assistant_score>=0., weight_confidence_assistant_score<=1.]))
self.scale_assistant_score = scale_assistant_score
self.scale_nres_score = scale_nres_score
self.weight_op_score = weight_op_score
self.weight_assistant_score = weight_assistant_score
self.weight_nres_score = weight_nres_score
self.weight_confidence_assistant_score = weight_confidence_assistant_score
self.min_nres_score = min_nres_score
def _compute_episode_score(
self,
ep_id, # the ID here, which is an integer and is not the ID from chronics balblabla
meta,
other_rewards,
dn_metadata,
no_ov_metadata,
score_file_to_use="grid_operational_cost_scores",
):
"""
Performs the rescaling of the score given the information stored in the "statistics" of this
environment.
This computes the score for a single episode. The loop to compute the score for all the
episodes is the same as for l2rpn_2020_scores and is then reused.
"""
# compute the operational score
op_score, n_played, total_ts = super()._compute_episode_score(
ep_id,
meta,
other_rewards,
dn_metadata,
no_ov_metadata,
# score_file_to_use should match the
# L2RPNSandBoxScore key in
# self.scores_func
score_file_to_use=score_file_to_use,
)
# should match underlying_statistics.run_env `dict_kwg["other_rewards"][XXX] = ...`
# XXX is right now f"{EpisodeStatistics.KEY_SCORE}_{nm}" [this should match the XXX]
#retrieve nres_score
new_renewable_sources_usage_score_nm = "new_renewable_sources_usage_scores"
real_nm = EpisodeStatistics._nm_score_from_attr_name(new_renewable_sources_usage_score_nm)
key_score_file = f"{EpisodeStatistics.KEY_SCORE}_{real_nm}"
nres_score = float(other_rewards[-1][key_score_file])
nres_score = max(nres_score, self.min_nres_score / self.scale_nres_score)
nres_score = self.scale_nres_score * nres_score
#assistant_confidence_score
# new_renewable_sources_usage_score_nm = "assistant_confidence_scores"
# real_nm = EpisodeStatistics._nm_score_from_attr_name(new_renewable_sources_usage_score_nm)
# key_score_file = f"{EpisodeStatistics.KEY_SCORE}_{real_nm}"
# assistant_confidence_score = float(other_rewards[-1][key_score_file])
assistant_confidence_score = 0 #self.scale_assistant_score * assistant_confidence_score
#assistant_cost_score
# new_renewable_sources_usage_score_nm = "assistant_cost_scores"
# real_nm = EpisodeStatistics._nm_score_from_attr_name(new_renewable_sources_usage_score_nm)
# key_score_file = f"{EpisodeStatistics.KEY_SCORE}_{real_nm}"
# assistant_cost_score = float(other_rewards[-1][key_score_file])
assistant_cost_score = 0 #self.scale_assistant_score * assistant_cost_score
assistant_score = self.weight_confidence_assistant_score * assistant_confidence_score +\
(1. - self.weight_confidence_assistant_score) * assistant_cost_score
ep_score = (
self.weight_op_score * op_score + self.weight_nres_score * nres_score + self.weight_assistant_score * assistant_score
)
return (ep_score, op_score, nres_score, assistant_confidence_score, assistant_cost_score), n_played, total_ts
| 8,080 | 42.213904 | 129 | py |
Grid2Op | Grid2Op-master/grid2op/utils/l2rpn_wcci_2022_scores.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
from grid2op.utils.l2rpn_2020_scores import ScoreL2RPN2020
from grid2op.Reward import L2RPNWCCI2022ScoreFun
class ScoreL2RPN2022(ScoreL2RPN2020):
"""This class implements the score used for the L2RPN 2022 competition,
taking place in the context of the WCCI 2022 competition.
"""
def __init__(self,
env,
env_seeds=None,
agent_seeds=None,
nb_scenario=16,
min_losses_ratio=0.8,
verbose=0, max_step=-1,
nb_process_stats=1,
scores_func=L2RPNWCCI2022ScoreFun,
score_names=None):
super().__init__(env, env_seeds, agent_seeds, nb_scenario, min_losses_ratio, verbose, max_step, nb_process_stats, scores_func, score_names)
| 1,257 | 43.928571 | 147 | py |
Grid2Op | Grid2Op-master/grid2op/utils/underlying_statistics.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
import copy
import os
import json
import shutil
import re
import numpy as np
from grid2op.dtypes import dt_float
from grid2op.Agent import BaseAgent, DoNothingAgent
from grid2op.Parameters import Parameters
from grid2op.Runner import Runner
from grid2op.Environment import MultiMixEnvironment
from grid2op.Episode import EpisodeData
from grid2op.Reward import BaseReward
from grid2op.Exceptions import Grid2OpException
class EpisodeStatistics(object):
"""
This class allows to serialize / de serialize some information about the data of a given environment.
Its use happens in two steps:
- :func:`EpisodeStatistics.compute` where you run some experiments to generate some data. Be carefull, some data
(for example obs.a_or, obs.rho etc.) depends on the agent you use! This needs to be performed at least once.
- :func:`EpisodeStatistics.get` retrieve the stored information and get back a numpy array with each rows
representing a step.
Note that it does not check what agent do you use. If you want statistics on more than 1 agent, please use
the `name_stats` key word attribute when you create the `EpisodeStatistics` object.
Examples
---------
A basic use of this class is the following:
.. code-block:: python
import grid2op
from grid2op.utils import EpisodeStatistics
env = grid2op.make()
stats = EpisodeStatistics(env)
#################################
# This need to be done only once
stats.compute(nb_scenario=100) # this will take a while to compute in most cases
################################
rhos_, scenario_ids = stats.get("rho")
load_p_, scenario_ids = stats.get("load_p")
# do something with them
If you want some statistics with different agent you might also consider giving some names to the way they are
saved as follow:
.. code-block:: python
import grid2op
from grid2op.utils import EpisodeStatistics
from grid2op.Parameters import Parameters
env = grid2op.make()
nb_scenario = 8
# for a example a simple do nothing agent
stats_dn = EpisodeStatistics(env, name_stats="do_nothing")
stats_dn.compute(nb_scenario=nb_scenario) # this will take a while to compute in most cases
# you can also change the parameters
param = Parameters()
param.NO_OVERFLOW_DISCONNECTION = True
stats_no_overflow = EpisodeStatistics(env, name_stats="no_overflow")
stats_no_overflow.compute(nb_scenario=nb_scenario, parameters=param) # this will take a while to compute in most cases
# or use a different agent
my_agent = ... # use any grid2op agent you want here
stats_custom_agent = EpisodeStatistics(env, name_stats="custom_agent")
stats_custom_agent.compute(nb_scenario=nb_scenario, agent=my_agent) # this will take a while to compute in most cases
# and then you can retrieve the statistics
rho_dn, ids = stats_dn.get("rho")
rho_dn_all, ids = stats_no_overflow.get("rho")
rho_custom_agent, ids = stats_custom_agent.get("rho")
Notes
-------
The observations computed highly depends on the agent and the stochastic part of the environment, such as the
maintenance or the opponent etc. We highly recommend you to use the env_seeds and agent_seeds keyword arguments
when using the :func:`EpisodeStatistics.compute` function.
"""
# TODO NB: name for generator are saved as "prod_p.npz", "prod_v.npz" and "prod_q.npz" and not
# TODO NB: "gen_p.npz" for backward compatibility.
SCENARIO_IDS = "scenario_ids.npz"
SCORES = "scores.npz"
SCORES_CLEAN = re.sub("\\.npz", "", SCORES)
KEY_SCORE = "__scores"
SCORE_FOOTPRINT = ".has_score"
STATISTICS_FOLDER = "_statistics"
STATISTICS_FOOTPRINT = ".statistics"
METADATA = "metadata.json"
ERROR_MSG_CLEANED = ("This statistics has been removed from the hard drive through a call to "
"`stat.clear_all()`. You cannot use it anymore.")
def __init__(self, env, name_stats=None):
if isinstance(env, MultiMixEnvironment):
raise RuntimeError("MultiMixEnvironment are not supported at the moment")
self.env = env
self.path_env = self.env.get_path_env()
nm_ = self.get_name_dir(name_stats)
self.path_save_stats = os.path.join(self.path_env, nm_)
self.li_attributes = self.env.observation_space.attr_list_vect
self.__cleared = False
@staticmethod
def get_name_dir(name_stats):
"""return the name of the folder in which the statistics will be computed"""
if name_stats is not None:
nm_ = f"{EpisodeStatistics.STATISTICS_FOLDER}_{name_stats}"
else:
nm_ = EpisodeStatistics.STATISTICS_FOLDER
return nm_
def get_name_file(self, observation_attribute):
"""get the name of the file that is used to save a given attribute names"""
if observation_attribute not in self.li_attributes:
raise RuntimeWarning(
f'Unknown observation attribute: "{observation_attribute}"'
)
# backward compatibility
if observation_attribute == "gen_p":
observation_attribute = "prod_p"
elif observation_attribute == "gen_q":
observation_attribute = "prod_q"
elif observation_attribute == "gen_v":
observation_attribute = "prod_v"
return f"obs_{observation_attribute}.npz"
def _delete_if_exists(self, path_tmp, episode_name, saved_stuff):
full_path = os.path.join(path_tmp, episode_name, saved_stuff)
if os.path.exists(full_path) and os.path.isfile(full_path):
os.remove(full_path)
@staticmethod
def _save_numpy(path, array):
np.savez_compressed(path, data=array)
@staticmethod
def _load(path):
return np.load(path)["data"]
def _clean_observations(self, path_tmp, episode_name):
full_path = os.path.join(path_tmp, episode_name, EpisodeData.OBSERVATIONS_FILE)
if not os.path.exists(full_path) or not os.path.isfile(full_path):
# this is not a proper path for the observation
return
# todo the way to load back the saved data need to be done in episode data instead
all_obs = np.load(full_path)["data"]
# handle the end of the episode
with open(
os.path.join(path_tmp, episode_name, EpisodeData.META),
"r",
encoding="utf-8",
) as f:
metadata_ep = json.load(f)
nb_ts = int(metadata_ep["nb_timestep_played"]) + 1
all_obs = all_obs[:nb_ts, :]
for obs_nm in self.env.observation_space.attr_list_vect:
beg_, end_, dtype = self.env.observation_space.get_indx_extract(obs_nm)
all_attr = all_obs[:, beg_:end_].astype(dtype)
self._save_numpy(
os.path.join(path_tmp, episode_name, self.get_name_file(obs_nm)),
all_attr,
)
self._delete_if_exists(path_tmp, episode_name, EpisodeData.OBSERVATIONS_FILE)
def _gather_all(self, li_episodes, dict_metadata, score_names):
"""gather all the data from all the episodes into large array (for easier access later on)"""
if len(li_episodes) == 0:
return
ids_ = np.zeros(shape=(0, 1))
scores = None
if score_names:
scores = {el: None for el in score_names}
first_attr = True
for obs_nm in self.li_attributes:
res = None
for i, (path_tmp, episode_name) in enumerate(li_episodes):
# retrieve the content of the attributes
tmp_arr = self._load(
os.path.join(path_tmp, episode_name, self.get_name_file(obs_nm))
)
if res is None:
res = tmp_arr
else:
res = np.concatenate((res, tmp_arr))
if first_attr:
dict_metadata[f"{i}"] = {
"path": path_tmp,
"scenario_name": episode_name,
"nb_step": int(tmp_arr.shape[0]),
}
# save the ids corresponding to each scenarios (but only once)
if first_attr:
scen_sz = tmp_arr.shape[0]
tmp_ids = np.ones(scen_sz, dtype=int).reshape((-1, 1))
tmp_ids *= i
tmp_ids = tmp_ids.astype(int)
ids_ = np.concatenate((ids_, tmp_ids))
# handles the scores (same, only once)
if score_names:
for el in score_names:
tmp_scor = self._load(
os.path.join(path_tmp, episode_name, el)
)
if len(score_names) == 0:
dict_metadata[f"{i}"]["scores"] = float(
np.sum(tmp_scor)
)
else:
dict_metadata[f"{i}"][f"scores_{el}"] = float(
np.sum(tmp_scor)
)
if scores[el] is None:
scores[el] = tmp_scor
else:
scores[el] = np.concatenate((scores[el], tmp_scor))
# save for each attributes its content
path_total = li_episodes[0][0]
self._save_numpy(
os.path.join(path_total, self.get_name_file(obs_nm)), array=res
)
# save the id, the metadata and the scores but only once
if first_attr:
self._save_numpy(
os.path.join(path_total, self.SCENARIO_IDS), array=ids_
)
if score_names:
for el in scores:
self._save_numpy(os.path.join(path_total, el), array=scores[el])
del scores
del ids_
with open(
os.path.join(path_total, EpisodeStatistics.METADATA),
"w",
encoding="utf-8",
) as f:
json.dump(obj=dict_metadata, fp=f)
first_attr = False
@staticmethod
def list_stats(env):
"""this is a function listing all the stats that have been computed for this environment"""
res = []
path_env = env.get_path_env()
for el in os.listdir(path_env):
if os.path.exists(
os.path.join(path_env, el, EpisodeStatistics.STATISTICS_FOOTPRINT)
):
res.append((path_env, el))
return sorted(res)
@staticmethod
def _nm_score_from_attr_name(attribute_name):
return re.sub(
f"(_{{0,1}}{EpisodeStatistics.SCORES_CLEAN})|(\\.npz)|(\\.npy)",
"",
attribute_name,
)
@staticmethod
def _is_score_attribute(attribute_name):
nm = None
# test if it a single stat or not
has_stat = (
attribute_name == EpisodeStatistics.SCORES
or attribute_name == EpisodeStatistics.SCORES_CLEAN
)
if has_stat:
nm = EpisodeStatistics.SCORES
else:
# i test if it's a statistics with multiple scores
if (
re.match(f".*_{EpisodeStatistics.SCORES_CLEAN}", attribute_name)
is not None
):
# it's a match: multiple score were computed for this name
# i need to compute the name with which the files are stored
nm_stat = EpisodeStatistics._nm_score_from_attr_name(attribute_name)
has_stat = True
nm = f"{nm_stat}_{EpisodeStatistics.SCORES}" # should be the same as in "_retrieve_scores" function
return has_stat, nm
def get(self, attribute_name):
"""
This function supposes that you previously ran the :func:`EpisodeStatistics.compute` to have lots of
observations.
It allows the retrieval of the information about the observation that were previously stored on drive.
Parameters
----------
attribute_name: ``str``
The name of the attribute of an observation on which you want some information.
Returns
-------
values: ``numpy.ndarray``
All the values for the "attribute_name" of all the observations that were obtained when running the
:func:`EpisodeStatistics.compute`. It has the shape (nb step, dim_attribute).
ids: ``numpy.ndarray``
The scenario ids to which belong the "values" value. It has the same number of rows than "values" but
only one column. This unique column contains an integer. If two rows have the same id then they come from
the same scenario.
"""
if self.__cleared:
raise RuntimeError(EpisodeStatistics.ERROR_MSG_CLEANED)
# backward compatibility
if attribute_name == "prod_p":
attribute_name = "gen_p"
elif attribute_name == "prod_q":
attribute_name = "gen_q"
elif attribute_name == "prod_v":
attribute_name = "gen_v"
if not os.path.exists(self.path_save_stats) or not os.path.isdir(
self.path_save_stats
):
raise RuntimeError(
"No statistics were computed for this environment. "
'Please use "self.compute()" to compute them. '
"And most importantly have a look at the documentation for precisions about this "
"feature."
)
ids = self._load(
os.path.join(self.path_save_stats, EpisodeStatistics.SCENARIO_IDS)
).astype(int)
is_score, score_name = EpisodeStatistics._is_score_attribute(attribute_name)
if is_score:
if not self._get_has_score():
# not score have been saved
raise RuntimeError(
'No score have been computed for this statistics. Please re run "stats.compute" '
'by setting the "scores_func" argument.'
)
# TODO here for multiple score
path_th = os.path.join(self.path_save_stats, score_name)
ids_ = np.concatenate((ids[:, 0], (-1,)))
diff_ = np.diff(ids_)
ids = ids[diff_ == 0, :]
else:
path_th = os.path.join(
self.path_save_stats, self.get_name_file(attribute_name)
)
if not os.path.exists(path_th) or not os.path.isfile(path_th):
raise RuntimeError(
f'Impossible to read the statistics for attribute "{attribute_name}"'
)
array_ = self._load(path_th)
return array_, ids
def clear_episode_data(self):
"""
Has side effects
.. warning:: /!\\\\ Be careful /!\\\\
To save space, it clears the data for each episode.
This is permanent. If you want this data to be available again, you will need to run an expensive
:func:`EpisodeStatistics.compute` again.
Notes
-----
It clears all directory into the "statistics" directory
"""
if not os.path.exists(self.path_save_stats) or not os.path.isdir(
self.path_save_stats
):
raise RuntimeError(
"No statistics have been saved for this environment. Please use "
'"stat.compute" to save some (this might take a while, '
"see the documentation)"
)
for episode_name in sorted(os.listdir(self.path_save_stats)):
path_tmp = os.path.join(self.path_save_stats, episode_name)
if os.path.isdir(path_tmp):
shutil.rmtree(path_tmp)
def clear_all(self):
"""
Has side effects
.. warning:: /!\\\\ Be careful /!\\\\
Clear the whole statistics directory.
This is permanent. If you want this data to be available again, you will need to run an expensive
:func:`EpisodeStatistics.compute` again.
Once done, this cannot be undone.
"""
if os.path.exists(self.path_save_stats) and os.path.isdir(self.path_save_stats):
shutil.rmtree(self.path_save_stats, ignore_errors=True)
self.__cleared = True
@staticmethod
def clean_all_stats(env):
"""
Has possibly huge side effects
.. warning:: /!\\\\ Be extremely careful /!\\\\
This function cleans all the statistics that have been computed for this environment.
This cannot be undone is permanent and is equivalent to calling :func:`EpisodeStatistics.clear_all` on all
statistics ever computed on this episode.
"""
li_stats = EpisodeStatistics.list_stats(env)
for path, el in li_stats:
shutil.rmtree(os.path.join(path, el))
def _tell_is_stats(self):
"""put the footprint to inform grid2op this is a stat directory"""
path_tmp = os.path.join(
self.path_save_stats, EpisodeStatistics.STATISTICS_FOOTPRINT
)
with open(path_tmp, "w", encoding="utf-8") as f:
f.write(
"This files is internal to grid2op. Expect some inconsistent behaviour if you attempt to modify "
"it, remove it, alter it in any ways, copy it in another directory etc.\n"
)
def _tell_has_score(self):
"""put the footprint to inform grid2op this is a stat directory"""
path_tmp = os.path.join(self.path_save_stats, EpisodeStatistics.SCORE_FOOTPRINT)
with open(path_tmp, "w", encoding="utf-8") as f:
f.write(
"This files is internal to grid2op. Expect some inconsistent behaviour if you attempt to modify "
"it, remove it, alter it in any ways, copy it in another directory etc.\n"
)
def _get_has_score(self):
"""say if a score has been computed or not"""
res = os.path.exists(
os.path.join(self.path_save_stats, EpisodeStatistics.SCORE_FOOTPRINT)
)
if res:
res = os.path.isfile(
os.path.join(self.path_save_stats, EpisodeStatistics.SCORE_FOOTPRINT)
)
return res
def _fill_metadata(self, agent, parameters, max_step, agent_seeds, env_seeds):
dict_metadata = {}
dict_metadata["agent_type"] = f"{type(agent)}"
if agent_seeds is None:
dict_metadata["agent_seeds"] = None
else:
dict_metadata["agent_seeds"] = [int(el) for el in agent_seeds]
if env_seeds is None:
dict_metadata["env_seeds"] = None
else:
dict_metadata["env_seeds"] = [int(el) for el in env_seeds]
dict_metadata["max_step"] = int(max_step)
dict_metadata["parameters"] = parameters.to_dict()
return dict_metadata
def _retrieve_scores(self, path_tmp, episode_name):
my_path = os.path.join(path_tmp, episode_name, EpisodeData.OTHER_REWARDS)
with open(my_path, "r", encoding="utf-8") as f:
dict_rewards = json.load(f)
if not len(dict_rewards):
# nothing to do if the dictionary is empty
return
# check if the score is unique or if there are multiple scores
tmp = dict_rewards[0]
if self.KEY_SCORE in tmp:
# only one score was used
arr_ = np.array([dt_float(el[self.KEY_SCORE]) for el in dict_rewards])
self._save_numpy(os.path.join(path_tmp, episode_name, self.SCORES), arr_)
else:
for possible_key in tmp:
if re.match(f"{self.KEY_SCORE }_.*", possible_key) is None:
# this key does not represent a score
continue
nm_score = re.sub(f"{self.KEY_SCORE }_", "", possible_key)
arr_ = np.array([dt_float(el[possible_key]) for el in dict_rewards])
self._save_numpy(
os.path.join(path_tmp, episode_name, f"{nm_score}_{self.SCORES}"),
arr_,
)
@staticmethod
def _check_if_base_reward(stuff):
if isinstance(stuff, type):
return issubclass(stuff, BaseReward)
else:
return isinstance(stuff, BaseReward)
@staticmethod
def run_env(
env,
path_save,
parameters,
scores_func,
agent,
nb_scenario,
max_step,
env_seeds,
agent_seeds,
pbar,
nb_process,
):
if scores_func is not None:
if not (
EpisodeStatistics._check_if_base_reward(scores_func)
or isinstance(scores_func, dict)
):
raise Grid2OpException(
"score_func should be either a dictionary or an instance of BaseReward"
)
dict_kwg = env.get_params_for_runner()
dict_kwg["parameters_path"] = parameters.to_dict()
if "other_rewards" not in dict_kwg:
dict_kwg["other_rewards"] = {}
if scores_func is not None:
if EpisodeStatistics._check_if_base_reward(scores_func):
dict_kwg["other_rewards"][EpisodeStatistics.KEY_SCORE] = scores_func
elif isinstance(scores_func, dict):
for nm, score_fun in scores_func.items():
dict_kwg["other_rewards"][
f"{EpisodeStatistics.KEY_SCORE}_{nm}"
] = score_fun
else:
raise RuntimeError(
'"scores_func" should inherit from "grid2op.Reward.BaseReward" or '
"be a dictionary"
)
runner = Runner(**dict_kwg, agentClass=None, agentInstance=agent)
runner.run(
path_save=path_save,
nb_episode=nb_scenario,
max_iter=max_step,
env_seeds=env_seeds,
agent_seeds=agent_seeds,
pbar=pbar,
nb_process=nb_process,
)
def get_metadata(self):
"""return the metadata as a dictionary"""
if self.__cleared:
raise RuntimeError(EpisodeStatistics.ERROR_MSG_CLEANED)
with open(
os.path.join(self.path_save_stats, self.METADATA), "r", encoding="utf-8"
) as f:
res = json.load(f)
return res
def compute(
self,
agent=None,
parameters=None,
nb_scenario=1,
scores_func=None,
max_step=-1,
env_seeds=None,
agent_seeds=None,
nb_process=1,
pbar=False,
):
"""
This function will save (to be later used with :func:`EpisodeStatistics.get_statistics`) all the observation
at all time steps, for a given number of scenario (see attributes nb_scenario).
This is useful when you want to store at a given place some information to use later on on your agent.
Notes
-----
Depending on its parameters (mainly the environment, the agent and the number of scenarios computed)
this function might take a really long time to compute.
However you only need to compute it once (unless you delete its results with
:func:`EpisodeStatistics.clear_all` or :func:`EpisodeStatistics.clear_episode_data`
Results might also take a lot of space on the hard drive (possibly few GB as all information of all
observations encountered are stored)
Parameters
----------
agent: :class:`grid2op.Agent.BaseAgent`
The agent you want to use to generate the statistics. Note that the statistics are highly dependant on
the agent. For now only one set of statistics are computed. If you want to run a different agent previous
results will be erased.
parameters: :class:`grid2op.Parameters.Parameters`
The parameters you want to use when computing this statistics
nb_scenario: ``int``
Number of scenarios that will be evaluated
scores_func: :class:`grid2op.Reward.BaseReward`
A reward used to compute the score of an Agent (it can now be a dictionary of BaseReward)
nb_scenario: ``int``
On how many scenarios you want the statistics to be computed
max_step: ``int``
Maximum number of steps you want to compute (see :func:`grid2op.Runner.Runner.run`)
env_seeds: ``list``
List of seeds used for the environment (for reproducible results) (see :func:`grid2op.Runner.Runner.run`)
agent_seeds: ``list``
List of seeds used for the agent (for reproducible results) (see :func:`grid2op.Runner.Runner.run`).
nb_process: ``int``
Number of process to use (see :func:`grid2op.Runner.Runner.run`)
pbar: ``bool``
Whether a progress bar is displayed (see :func:`grid2op.Runner.Runner.run`)
"""
if agent is None:
agent = DoNothingAgent(self.env.action_space)
if parameters is None:
parameters = copy.deepcopy(self.env.parameters)
if not isinstance(agent, BaseAgent):
raise RuntimeError(
'"agent" should be either "None" to use DoNothingAgent or an agent that inherits '
"grid2op.Agent.BaseAgent"
)
if not isinstance(parameters, Parameters):
raise RuntimeError(
'"parameters" should be either "None" to use the default parameters passed in the '
"environment or inherits grid2op.Parameters.Parameters"
)
score_names = None
dict_metadata = self._fill_metadata(
agent, parameters, max_step, agent_seeds, env_seeds
)
if scores_func is not None:
if EpisodeStatistics._check_if_base_reward(scores_func):
dict_metadata["score_class"] = f"{scores_func}"
score_names = [self.SCORES]
elif isinstance(scores_func, dict):
score_names = []
for nm, score_fun in scores_func.items():
if not EpisodeStatistics._check_if_base_reward(score_fun):
raise Grid2OpException(
'if using "score_fun" as a dictionary, each value need to be a '
"BaseReward"
)
dict_metadata[f"score_class_{nm}"] = f"{score_fun}"
score_names.append(f"{nm}_{self.SCORES}")
else:
raise Grid2OpException(
"score_func should be either a dictionary or an instance of BaseReward"
)
self.run_env(
env=self.env,
path_save=self.path_save_stats,
parameters=parameters,
scores_func=scores_func,
agent=agent,
max_step=max_step,
env_seeds=env_seeds,
agent_seeds=agent_seeds,
pbar=pbar,
nb_process=nb_process,
nb_scenario=nb_scenario,
)
# inform grid2op this is a statistics directory
self._tell_is_stats()
if scores_func is not None:
self._tell_has_score()
# now clean a bit the output directory
os.remove(os.path.join(self.path_save_stats, EpisodeData.ACTION_SPACE))
os.remove(os.path.join(self.path_save_stats, EpisodeData.ATTACK_SPACE))
os.remove(os.path.join(self.path_save_stats, EpisodeData.ENV_MODIF_SPACE))
os.remove(os.path.join(self.path_save_stats, EpisodeData.OBS_SPACE))
li_episodes = EpisodeData.list_episode(self.path_save_stats)
for path_tmp, episode_name in li_episodes:
# remove the useless information (saved but not used)
self._delete_if_exists(path_tmp, episode_name, EpisodeData.ACTIONS_FILE)
self._delete_if_exists(path_tmp, episode_name, EpisodeData.AG_EXEC_TIMES)
self._delete_if_exists(path_tmp, episode_name, EpisodeData.LINES_FAILURES)
self._delete_if_exists(path_tmp, episode_name, EpisodeData.ENV_ACTIONS_FILE)
self._delete_if_exists(path_tmp, episode_name, EpisodeData.ATTACK)
if scores_func is not None:
self._retrieve_scores(path_tmp, episode_name)
else:
self._delete_if_exists(
path_tmp, episode_name, EpisodeData.OTHER_REWARDS
)
self._delete_if_exists(path_tmp, episode_name, EpisodeData.REWARDS)
# reformat the observation into a proper "human readable" format
self._clean_observations(path_tmp, episode_name)
# and now gather the information for the top level
self._gather_all(li_episodes, dict_metadata, score_names=score_names)
if __name__ == "__main__":
import grid2op
from lightsim2grid import LightSimBackend
from grid2op.Agent import RandomAgent
from grid2op.Reward import L2RPNSandBoxScore, AlarmReward
# env = grid2op.make("l2rpn_case14_sandbox", backend=LightSimBackend())
nb_scenario = 2
# # for a example a simple do nothing agent
# stats_dn = EpisodeStatistics(env, name_stats="do_nothing")
# stats_dn.compute(nb_scenario=nb_scenario,
# pbar=True,
# scores_func=L2RPNSandBoxScore) # this will take a while to compute in most cases
# stats_dn.clear_episode_data()
#
# # you can also change the parameters
# param = Parameters()
# param.NO_OVERFLOW_DISCONNECTION = True
# stats_no_overflow = EpisodeStatistics(env, name_stats="no_overflow")
# stats_no_overflow.compute(nb_scenario=nb_scenario,
# parameters=param,
# pbar=True,
# scores_func=L2RPNSandBoxScore) # this will take a while to compute in most cases
# stats_no_overflow.clear_episode_data()
#
# # or use a different agent
# my_agent = RandomAgent(env.action_space) # use any grid2op agent you want here
# stats_custom_agent = EpisodeStatistics(env, name_stats="custom_agent")
# stats_custom_agent.compute(nb_scenario=nb_scenario,
# agent=my_agent,
# pbar=True,
# scores_func=L2RPNSandBoxScore) # this will take a while to compute in most cases
# stats_custom_agent.clear_episode_data()
#
# # and then you can retrieve the statistics
# rho_dn, ids = stats_dn.get("rho")
# rho_dn_all, ids = stats_no_overflow.get("rho")
# rho_custom_agent, ids = stats_custom_agent.get("rho")
# with multiple "scores"
env = grid2op.make(
"l2rpn_neurips_2020_track1_with_alarm",
backend=LightSimBackend(),
)
stats_dn = EpisodeStatistics(env, name_stats="do_nothing")
stats_dn.compute(
nb_scenario=nb_scenario,
pbar=True,
scores_func={
"grid_operational_cost": L2RPNSandBoxScore,
"operator_attention": AlarmReward,
},
)
# rho_dn, ids = stats_dn.get("rho")
score_op_cost, ids = stats_dn.get("grid_operational_cost_scores")
score_att_cost, ids = stats_dn.get("operator_attention_scores")
import pdb
pdb.set_trace()
assert score_att_cost.shape[0] == ids.shape[0]
| 32,419 | 39.323383 | 128 | py |
Grid2Op | Grid2Op-master/utils/edit_layout.py | #!/usr/bin/env python3
import sys
import os
import json
import argparse
import grid2op
from grid2op.PlotGrid import PlotMatplot
def edit_layout(ds_name, test=False):
env = grid2op.make(ds_name, test=test)
plotter = PlotMatplot(env.observation_space)
fig = plotter.plot_layout()
fig.show()
user_input = ""
while True:
# Select a substation or exit
user_input = input("exit or sub id: ")
if "exit" in user_input:
break
sub_id = int(user_input)
# Get substation infos
sub_name = env.name_sub[sub_id]
x = plotter._grid_layout[sub_name][0]
y = plotter._grid_layout[sub_name][1]
print ("{} [{};{}]".format(sub_name, x, y))
# Update x coord
user_input = input("{} new x: ".format(sub_name))
if len(user_input) == 0:
new_x = x
else:
new_x = float(user_input)
# Update Y coord
user_input = input("{} new y: ".format(sub_name))
if len(user_input) == 0:
new_y = y
else:
new_y = float(user_input)
# Apply to layout
plotter._grid_layout[sub_name][0] = new_x
plotter._grid_layout[sub_name][1] = new_y
# Redraw
plotter.plot_info(figure=fig)
fig.canvas.draw()
# Done editing, print subs result
subs_layout = {}
for k, v in plotter._grid_layout.items():
if k in env.name_sub:
subs_layout[k] = v
print(json.dumps(subs_layout, indent=2))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Grid layout editor')
parser.add_argument('--dataset',
required=True, type=str,
help='Path to dataset directory')
parser.add_argument('--test',
default=False, action="store_true",
help='Pass test=True to grid2op.make')
args = parser.parse_args()
edit_layout(args.dataset, test=args.test)
| 2,049 | 27.082192 | 72 | py |
Grid2Op | Grid2Op-master/utils/make_release.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
# This script will update automatically create a new release
# - setup.py
# - grid2op/__init__.py
# - docs/conf.py
# - Dockerfile
import sys
import os
import argparse
import re
import subprocess
import time
def start_subprocess_print(li, sleepbefore=2, cwd=None):
print("Will execute command after {}s: \n\t{}".format(sleepbefore, " ".join(li)))
time.sleep(sleepbefore)
subprocess.run(li, cwd=cwd)
def modify_and_push_docker(version, # grid2op version
path,
templateDockerFile_to_use="templateDockerFile",
docker_versions=[],
docker_tags=[]):
# Dockerfile
template_dockerfile = os.path.join(path, "utils", templateDockerFile_to_use)
dockerfile = os.path.join(path, "Dockerfile")
with open(template_dockerfile, "r") as f:
new_setup = f.read()
new_setup = re.sub("__VERSION__",
"v{}".format(version),
new_setup)
with open(dockerfile, "w") as f:
f.write(new_setup)
# Create new docker containers
for vers_ in docker_versions:
start_subprocess_print(
["docker", "build"] + docker_tags + ["-t", "{}/grid2op:{}".format(dockeruser, vers_), "."], cwd=path)
start_subprocess_print(["docker", "push", "{}/grid2op:{}".format(dockeruser, vers_)], cwd=path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Update the version of grid2op in the python files.')
parser.add_argument('--version', default=None,
help='The new version to update.')
parser.add_argument('--dockeruser', default='bdonnot',
help='The docker hub username.')
parser.add_argument('--path', default=os.path.abspath("."),
help='The path of the root directory of Grid2op (default {}'.format(os.path.abspath(".")))
args = parser.parse_args()
path = args.path
dockeruser = args.dockeruser
version = args.version
if args.version is None:
raise RuntimeError("script \"update_version\" should be called with a version number.")
try:
maj_, min_, minmin_, *post = version.split(".")
except Exception as exc_:
raise RuntimeError(
"script \"update_version\": version should be formated as XX.YY.ZZ (eg 0.3.1). "
"Please modify \"--version\" argument")
regex_version = "[0-9]+\.[0-9]+\.[0-9]+(.post[0-9]+){0,1}(.rc[0-9]+){0,1}(.pre[0-9]+){0,1}(.dev[0-9]+){0,1}"
# TODO use the official regex !
# see https://semver.org/ and https://regex101.com/r/Ly7O1x/3/
# regex_version = r"^(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)(?:-(?P<prerelease>(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P<buildmetadata>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$"
regex_version_with_str = f"['\"]{regex_version}['\"]"
if re.match("^{}$".format(regex_version), version) is None:
raise RuntimeError(
"script \"update_version\": version should be formated as XX.YY.ZZ (eg 0.3.1) and not {}. "
"Please modify \"--version\" argument".format(
version))
# TODO re.search(reg_, "0.0.4-rc1").group("prerelease") -> rc1 (if regex_version is the official one)
if re.search(f".*\.(rc|pre|dev)[0-9]+$", version) is not None:
is_prerelease = True
print("This is a pre release, docker will NOT be pushed, github tag will NOT be made")
time.sleep(2)
else:
is_prerelease = False
print("This is sandard release, docker will be pushed, github tag will be added")
time.sleep(2)
if True:
# setup.py
setup_path = os.path.join(path, "setup.py")
grid2op_init = os.path.join(path, "grid2op", "__init__.py")
with open(grid2op_init, "r") as f:
old_init = f.read()
if not os.path.exists(setup_path):
raise RuntimeError(
"script \"update_version\" cannot find the root path of Grid2op. "
"Please provide a valid \"--path\" argument.")
with open(setup_path, "r") as f:
new_setup = f.read()
try:
old_version = re.search("__version__ = {}".format(regex_version_with_str), old_init).group(0)
except Exception as e:
raise RuntimeError("Impossible to find the old version number. Stopping here")
old_version = re.sub("__version__ = ", "", old_version)
old_version = re.sub("'", "", old_version)
old_version = re.sub('"', "", old_version)
old_version = re.sub("\\.rc[0-9]+", "", old_version)
old_version = re.sub("\\.post[0-9]+", "", old_version)
old_version = re.sub("\\.pre[0-9]+", "", old_version)
old_version = re.sub("\\.dev[0-9]+", "", old_version)
if version < old_version:
raise RuntimeError("You provided the \"new\" version \"{}\" which is older (or equal) to the current version "
"found: \"{}\".".format(version, old_version))
new_setup = re.sub("version={}".format(regex_version_with_str),
"version='{}'".format(version),
new_setup)
with open(setup_path, "w") as f:
f.write(new_setup)
# Stage in git
start_subprocess_print(["git", "add", setup_path])
# grid2op/__init__.py
with open(grid2op_init, "r") as f:
new_setup = f.read()
new_setup = re.sub("__version__ = {}".format(regex_version_with_str),
"__version__ = '{}'".format(version),
new_setup)
with open(grid2op_init, "w") as f:
f.write(new_setup)
# Stage in git
start_subprocess_print(["git", "add", grid2op_init])
# docs/conf.py
docs_conf = os.path.join(path, "docs", "conf.py")
with open(docs_conf, "r") as f:
new_setup = f.read()
new_setup = re.sub("release = {}".format(regex_version_with_str),
"release = '{}'".format(version),
new_setup)
new_setup = re.sub("version = '[0-9]+\.[0-9]+'",
"version = '{}.{}'".format(maj_, min_),
new_setup)
with open(docs_conf, "w") as f:
f.write(new_setup)
# Stage in git
start_subprocess_print(["git", "add", docs_conf])
# Dockerfile
template_dockerfile = os.path.join(path, "utils", "templateDockerFile")
dockerfile = os.path.join(path, "Dockerfile")
with open(template_dockerfile, "r") as f:
new_setup = f.read()
new_setup = re.sub("__VERSION__",
"v{}".format(version),
new_setup)
with open(dockerfile, "w") as f:
f.write(new_setup)
if not is_prerelease:
# Stage in git
start_subprocess_print(["git", "add", dockerfile])
# generate some logs, for backward compatibility
# NB this generation is part of the test run, so it's safe to re generate the log when each version is released
# in the sense that the tests pass ;-)
import grid2op
from grid2op.Agent import RandomAgent
from grid2op.Runner import Runner
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
PATH_PREVIOUS_RUNNER = os.path.join(path, "grid2op", "data_test", "runner_data")
# set the right grid2op version (instead of reloading the stuff, ugly, but working)
grid2op.__version__ = version
env = grid2op.make("rte_case5_example", test=True)
runner = Runner(**env.get_params_for_runner(), agentClass=RandomAgent)
runner.run(nb_episode=2,
path_save=os.path.join(PATH_PREVIOUS_RUNNER, f"res_agent_{version}"),
pbar=True,
max_iter=100)
# Stage in git
start_subprocess_print(["git", "add", f'{os.path.join(PATH_PREVIOUS_RUNNER, f"res_agent_{version}")}/*'])
# Commit
start_subprocess_print(["git", "commit", "-m", "Release v{}".format(version)])
if not is_prerelease:
# Create a new git tag
start_subprocess_print(["git", "tag", "-a", "v{}".format(version), "-m", "Release v{}".format(version)])
if is_prerelease:
print("Please push changes: 'git push'")
sys.exit(0)
# Wait for user to push changes
pushed = input("Please push changes: 'git push && git push --tags' - then press any key")
# TODO refacto these, no need to have 3 times almost the same "templatedockerfile"
# update docker for test version
# TODO remove the "-e" in this docker file, and copy paste the data in data_test in the appropriate folder
# that you can get with a python call
modify_and_push_docker(version, path=path,
templateDockerFile_to_use="templateDockerFile_test",
docker_versions=["test"],
docker_tags=["--no-cache"])
# update docker for "light"
modify_and_push_docker(version, path=path,
templateDockerFile_to_use="templateDockerFile_light",
docker_versions=[f"{version}-light"],
docker_tags=["--no-cache"])
# update version for competition and regular version
modify_and_push_docker(version,
path=path,
docker_versions=[version, "latest"],
docker_tags=["--no-cache"])
| 10,440 | 44.199134 | 262 | py |
Grid2Op | Grid2Op-master/utils/push_docker.sh | #/bin/bash
# Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
if [ $# -eq 0 ]
then
echo "No arguments supplied, please specified the grid2op version to push to docker"
exit 1
fi
version=$1
echo "Pushing grid2ip verion "$version
#exit 1
docker build -t bdonnot/grid2op:$version .
docker push bdonnot/grid2op:$version
docker build -t bdonnot/grid2op:latest .
docker push bdonnot/grid2op:latest
| 818 | 30.5 | 112 | sh |
Grid2Op | Grid2Op-master/utils/rounder.py | import pandas as pd
files = [
"load_p.csv.bz2",
"load_p_forecasted.csv.bz2",
"load_q.csv.bz2",
"load_q_forecasted.csv.bz2",
"prices.csv.bz2",
"prod_p.csv.bz2",
"prod_p_forecasted.csv.bz2",
"prod_v.csv.bz2"
]
for f in files:
df = pd.read_csv(f, sep=";")
df = df.round(decimals=1)
print (df)
df.to_csv(f, sep=";", index=False)
| 375 | 18.789474 | 38 | py |
Grid2Op | Grid2Op-master/utils/trigger_readthedocs.io.py | # Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems.
# This files allows to automatically update the documentation of grid2op on the
# readthedocs.io website. It should not be used for other purpose.
import argparse
import json
import os
import re
import time
try:
import requests as rq
except:
raise RuntimeError("Impossible to find library urllib. Please install it.")
import pdb
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Update the version of grid2op in the python files.')
parser.add_argument('--version', default=None,
help='The new version to update.')
parser.add_argument('--path', default=os.path.abspath("."),
help='The path of the root directory of Grid2op (default {}'.format(os.path.abspath(".")))
args = parser.parse_args()
path = args.path
version = args.version
if args.version is None:
raise RuntimeError("script \"update_version\" should be called with a version number.")
try:
maj_, min_, minmin_ = version.split(".")
except:
raise RuntimeError("script \"update_version\": version should be formated as XX.YY.ZZ (eg 0.3.1). Please modify \"--version\" argument")
if re.match('^[0-9]+\.[0-9]+\.[0-9]+$', version) is None:
raise RuntimeError("script \"update_version\": version should be formated as XX.YY.ZZ (eg 0.3.1) and not {}. Please modify \"--version\" argument".format(version))
if not os.path.exists(".readthedocstoken.json"):
raise RuntimeError("Impossible to find credential for buildthedocs. Stopping there. Make sur to put them on \".readthedocstoken.json\"")
with open(".readthedocstoken.json", "r") as f:
dict_credentials = json.load(f)
token = dict_credentials["token"]
hdr = {"Authorization": "Token {}".format(token)}
# curl \
# -X POST \
# -H "Authorization: Token <token>" https://readthedocs.org/api/v3/projects/pip/versions/latest/builds/
# list existing version on read the doc:
url_existing_version = "https://readthedocs.org/api/v3/projects/grid2op/versions/"
req = rq.get(url_existing_version, headers=hdr)
resp = req.json()
li_existing_version = set()
for el in resp["results"]:
li_existing_version.add(el['slug'])
# update new versions
url_version = "https://readthedocs.org/api/v3/projects/grid2op/versions/{version_slug}/builds/"
for vers_ in ["v{}".format(version), "stable", "latest"]:
if vers_ in li_existing_version:
req = rq.post(url_version.format(version_slug=vers_), headers=hdr)
print("Version {} properly updated".format(vers_))
time.sleep(5)
else:
raise RuntimeError("Version \"{}\" is not part of the read the doc version,"
"please create it before updating it.".format(vers_))
| 3,307 | 40.873418 | 171 | py |
null | Vid-ODE-main/README.md | # Vid-ODE - Official PyTorch Implementation
<p align="left"><img width="95%" src="assets/teaser.jpg" /></p>
This repository provides the official PyTorch implementation of the following paper:
> **Vid-ODE: Continuous-Time Video Generation with Neural Ordinary Differential Equation**<br>
> [Sunghyun Park*](https://psh01087.github.io/)<sup>1</sup>, [Kangyeol Kim*](https://www.notion.so/kangyeolk/Kangyeol-Kim-86d81c125e404a98a9527713bd8a355c)<sup>1</sup>, [Junsoo Lee](https://ssuhan.github.io/)<sup>1</sup>, [Jaegul Choo](https://sites.google.com/site/jaegulchoo/)<sup>1</sup>, [Joonseok Lee](http://www.joonseok.net/)<sup>2</sup>, [Sookyung Kim](http://sookyung.net/)<sup>3</sup>, [Edword Choi](https://mp2893.com/)<sup>1</sup><br>
> <sup>1</sup>KAIST, <sup>2</sup>Google Research, <sup>3</sup>Lawrence Livermore Nat’l Lab.<br>
> In AAAI 2021. (* indicates equal contribution)<br>
> Paper : https://arxiv.org/abs/2010.08188<br>
> Project : https://psh01087.github.io/Vid-ODE/<br>
> **Abstract**: *Video generation models often operate under the assumption of fixed frame rates, which leads to suboptimal performance when it comes to handling flexible frame rates (e.g., increasing the frame rate of more dynamic portion of the video as well as handling missing video frames). To resolve the restricted nature of existing video generation models' ability to handle arbitrary timesteps, we propose continuous-time video generation by combining neural ODE (Vid-ODE) with pixel-level video processing techniques. Using ODE-ConvGRU as an encoder, a convolutional version of the recently proposed neural ODE, which enables us to learn continuous-time dynamics, Vid-ODE can learn the spatio-temporal dynamics of input videos of flexible frame rates. The decoder integrates the learned dynamics function to synthesize video frames at any given timesteps, where the pixel-level composition technique is used to maintain the sharpness of individual frames. With extensive experiments on four real-world video datasets, we verify that the proposed Vid-ODE outperforms state-of-the-art approaches under various video generation settings, both within the trained time range (interpolation) and beyond the range (extrapolation). To the best of our knowledge, Vid-ODE is the first work successfully performing continuous-time video generation using real-world videos.*
## Installation
Clone this repository:
```bash
git clone https://github.com/psh01087/Vid-ODE.git
cd Vid-ODE/
```
We support `python3`. Install the dependencies:
```bash
pip install -r requirements.txt
```
## Downloading Datasets
1. [KTH Action](https://www.dropbox.com/s/dtk54q8woufqimh/kth_action.tar.gz?dl=0)
2. [Moving GIF](https://www.dropbox.com/s/ltbatvndujit0wi/moving_gif.tar.gz?dl=0)
3. [Penn Action](https://www.dropbox.com/s/m7n62qm12xepsjl/penn_action.tar.gz?dl=0)
4. [Hurricane](https://www.dropbox.com/s/cn1njdonfwl1nta/hurricane.tar.gz?dl=0)
After downloading the dataset and unzipping files, place them under the ```dataset``` folder in current directory.
We preprocess all datasets for training our models.
<!-- ## Downloading pre-trained networks -->
## Training
To train a model on specific dataset, run:
```bash
CUDA_VISIBLE_DEVICES=0 python main.py --phase train --dataset kth
```
All arguments used for this project are described in the function "get_opt()" in ```main.py```. There are a lot of options to train our network on a wide range of datasets and also to evaluate various architectures for writing the paper. However, just for the purpose of executing the proposed method, the number of arguments that you need to change would be very limited.
The following options will be what you need to concern:
```--dataset``` : Specify the dataset to train, select among [kth, penn, mgif, hurricane].<br>
```--extrap``` : If you toggle this option, you can train the extrapolation model.<br>
```--irregular``` : If you toggle this option, you can train the model with irregularly sampled frames.<br>
## Evaluation
We evaluate our model using Structural Similarity (SSIM), Peak Signal-to-Noise Ratio (PSNR), and Learned Perceptual Image Patch Similarity (LPIPS). To evaluate a model on specific dataset, run:
```bash
CUDA_VISIBLE_DEVICES=0 python main.py --phase test_met --dataset kth --test_dir CHECKPOINT_DIR
```
## Citation
If you find this work useful for your research, please cite our [paper](https://arxiv.org/abs/2010.08188):
```
@article{park2020vid,
title={Vid-ODE: Continuous-Time Video Generation with Neural Ordinary Differential Equation},
author={Park, Sunghyun and Kim, Kangyeol and Lee, Junsoo and Choo, Jaegul and Lee, Joonseok and Kim, Sookyung and Choi, Edward},
journal={arXiv preprint arXiv:2010.08188},
booktitle={The Thirty-Fifth {AAAI} Conference on Artificial Intelligence, {AAAI} 2021},
pages={online},
publisher={{AAAI} Press},
year={2021},
}
```
| 4,896 | 55.94186 | 1,373 | md |
null | Vid-ODE-main/dataloader.py | import numpy as np
import os
import random
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms as T
import video_transforms as vtransforms
import utils
class Dataset_base(Dataset):
def __init__(self, opt, train=True):
# Get options
self.opt = opt
self.window_size = opt.window_size
self.sample_size = opt.sample_size
self.irregular = opt.irregular
self.train = train
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Print out Dataset setting
regularity = "irregular" if self.opt.irregular else "regular"
task = "extrapolation" if self.opt.extrap else "interpolation"
print(f"[Info] Dataset:{self.opt.dataset} / regularity:{regularity} / task:{task}")
def sample_regular_interp(self, images):
seq_len = images.shape[0]
assert self.sample_size <= seq_len, "[Error] sample_size > seq_len"
win_start = np.random.randint(0, seq_len - self.sample_size + 1) if self.train else 0
if self.opt.phase == 'train':
input_images = images[np.arange(win_start, win_start + self.sample_size, 2), ...]
mask = torch.ones((self.sample_size // 2, 1))
else:
input_images = images[win_start: win_start + self.sample_size]
mask = torch.zeros((self.sample_size, 1))
mask[np.arange(0, self.sample_size, 2), :] = 1
mask = mask.type(torch.FloatTensor).to(self.device)
return input_images, mask
def sample_regular_extrap(self, images):
""" Same as sample_regular_interp, may be different when utils.sampling """
seq_len = images.shape[0]
assert self.sample_size <= seq_len, "[Error] sample_size > seq_len"
# win_start = random.randint(0, seq_len - self.sample_size - 1) if self.train else 0
win_start = random.randint(0, seq_len - self.sample_size) if self.train else 0
input_images = images[win_start: win_start + self.sample_size]
mask = torch.ones((self.sample_size, 1))
mask = mask.type(torch.FloatTensor).to(self.device)
return input_images, mask
def sample_irregular_interp(self, images):
seq_len = images.shape[0]
if seq_len <= self.window_size:
assert self.sample_size <= seq_len, "[Error] sample_size > seq_len"
win_start = 0
rand_idx = sorted(np.random.choice(list(range(win_start + 1, seq_len - 1)), size=self.sample_size - 2, replace=False))
rand_idx = [win_start] + rand_idx + [seq_len - 1]
elif seq_len > self.window_size:
win_start = random.randint(0, seq_len - self.window_size - 1) if self.train else 0
rand_idx = sorted(np.random.choice(list(range(win_start + 1, win_start + self.window_size - 1)), size=self.sample_size - 2, replace=False))
rand_idx = [win_start] + rand_idx + [win_start + self.window_size - 1]
# [Caution]: Irregular setting return window-sized images and it is filtered out
# Sample images
input_idx = list(range(win_start, win_start + self.window_size))
input_images = images[input_idx]
# Sample masks
mask = torch.zeros((self.window_size, 1))
mask_idx = [r - win_start for r in rand_idx]
mask[mask_idx, :] = 1
mask = mask.type(torch.FloatTensor).to(self.device)
return input_images, mask
def sample_irregular_extrap(self, images):
seq_len = images.shape[0]
assert self.window_size % 2 == 0, "[Error] window_size should be even number"
assert self.sample_size % 2 == 0, "[Error] sample_size should be even number"
half_window_size = self.window_size // 2
half_sample_size = self.sample_size // 2
if seq_len <= self.window_size:
assert self.sample_size <= seq_len, "[Error] sample_size > seq_len"
win_start = 0 # + 1
half_window_size = seq_len // 2
rand_idx_in = sorted(np.random.choice(list(range(win_start + 1, win_start + half_window_size)), size=half_sample_size - 1, replace=False))
rand_idx_out = sorted(np.random.choice(list(range(win_start + half_window_size, win_start + seq_len - 1)), size=half_sample_size - 1, replace=False))
rand_idx = [win_start] + rand_idx_in + rand_idx_out + [win_start + seq_len - 1]
elif seq_len > self.window_size:
win_start = random.randint(0, seq_len - self.window_size - 1) if self.train else 0 # + 1
rand_idx_in = sorted(np.random.choice(list(range(win_start + 1, win_start + half_window_size)), size=half_sample_size - 1, replace=False))
rand_idx_out = sorted(np.random.choice(list(range(win_start + half_window_size, win_start + self.window_size - 1)), size=half_sample_size - 1, replace=False))
rand_idx = [win_start] + rand_idx_in + rand_idx_out + [win_start + self.window_size - 1]
# [Caution]: Irregular setting return window-sized images and it is filtered out
# Sample images
input_idx = list(range(win_start, win_start + self.window_size))
input_images = images[input_idx]
# Sample mask
mask = torch.zeros((self.window_size, 1))
mask_idx = [r - win_start for r in rand_idx]
mask[mask_idx, :] = 1
return input_images, mask
def sampling(self, images):
# Sampling
if not self.irregular and not self.opt.extrap:
input_images, mask = self.sample_regular_interp(images=images)
elif not self.irregular and self.opt.extrap:
input_images, mask = self.sample_regular_extrap(images=images)
elif self.irregular and not self.opt.extrap:
input_images, mask = self.sample_irregular_interp(images=images)
else:
input_images, mask = self.sample_irregular_extrap(images=images)
return input_images, mask
def remove_files_under_sample_size(image_path, threshold):
temp_image_list = [x for x in os.listdir(image_path)]
image_list = []
remove_count = 0
for i, file in enumerate(temp_image_list):
_image = np.load(os.path.join(image_path, file))
if _image.shape[0] >= threshold:
image_list.append(file)
else:
remove_count += 1
if remove_count > 0:
print(f"Remove {remove_count:03d} shorter than than sample_size...")
return image_list
class HurricaneVideoDataset(Dataset_base):
def __init__(self, opt, train=True):
super(HurricaneVideoDataset, self).__init__(opt, train=train)
self.nc = 3 if self.opt.dataset == "hurricane" else 6
if self.train:
self.image_path = os.path.join('./dataset/Hurricane/', 'train')
else:
self.image_path = os.path.join('./dataset/Hurricane/', 'test')
threshold = self.window_size if opt.irregular else self.sample_size
self.image_list = remove_files_under_sample_size(image_path=self.image_path, threshold=threshold)
self.image_list = sorted(self.image_list)
vtrans = [vtransforms.Pad(padding=(1, 0), fill=0)]
if self.train:
# vtrans += [vtransforms.RandomHorizontalFlip()]
# vtrans += [vtransforms.RandomRotation()]
pass
vtrans += [vtransforms.ToTensor(scale=False)]
vtrans += [vtransforms.Normalize(0.5, 0.5)] if opt.input_norm else []
self.vtrans = T.Compose(vtrans)
def __getitem__(self, index):
assert self.sample_size <= self.window_size, "[Error] sample_size > window_size"
images = np.load(os.path.join(self.image_path, self.image_list[index]))
images = images[..., :self.nc]
# Sampling
input_images, mask = self.sampling(images=images)
# Transform
input_images = self.vtrans(input_images) # return (b, c, h, w)
return input_images, mask
def __len__(self):
return len(self.image_list)
class VideoDataset(Dataset_base):
def __init__(self, opt, train=True):
super(VideoDataset, self).__init__(opt, train=train)
# Dataroot & Transform
if opt.dataset == 'mgif':
data_root = './dataset/moving-gif'
vtrans = [vtransforms.Scale(size=128)]
elif opt.dataset == 'kth':
data_root = './dataset/kth_action/'
vtrans = [vtransforms.CenterCrop(size=120), vtransforms.Scale(size=128)]
elif opt.dataset == 'penn':
data_root = './dataset/penn_action/'
vtrans = [vtransforms.Scale(size=128)]
if self.train:
vtrans += [vtransforms.RandomHorizontalFlip()]
vtrans += [vtransforms.RandomRotation()]
vtrans += [vtransforms.ToTensor(scale=True)]
vtrans += [vtransforms.Normalize(0.5, 0.5)] if opt.input_norm else []
self.vtrans = T.Compose(vtrans)
if self.train:
self.image_path = os.path.join(data_root, 'train')
else:
self.image_path = os.path.join(data_root, 'test')
threshold = self.window_size if opt.irregular else self.sample_size
if opt.dataset in ['kth', 'sintel', 'ucf101', 'penn']:
self.image_list = os.listdir(self.image_path)
elif opt.dataset in ['mgif', 'stickman']:
self.image_list = remove_files_under_sample_size(image_path=self.image_path, threshold=threshold)
self.image_list = sorted(self.image_list)
def __getitem__(self, index):
assert self.sample_size <= self.window_size, "[Error] sample_size > window_size"
images = np.load(os.path.join(self.image_path, self.image_list[index]))
# Sampling
input_images, mask = self.sampling(images=images)
# Transform
input_images = self.vtrans(input_images) # return (b, c, h, w)
return input_images, mask
def __len__(self):
return len(self.image_list)
def parse_datasets(opt, device):
def video_collate_fn(batch, time_steps, opt=opt, data_type="train"):
images = torch.stack([b[0] for b in batch])
mask = torch.stack([b[1] for b in batch])
data_dict = {"data": images, "time_steps": time_steps, "mask": mask}
data_dict = utils.split_and_subsample_batch(data_dict, opt, data_type=data_type)
data_dict['mode'] = data_type
return data_dict
if opt.irregular:
time_steps = np.arange(0, opt.window_size) / opt.window_size
else:
if opt.extrap:
time_steps = np.arange(0, opt.sample_size) / opt.sample_size
else:
time_steps = np.arange(0, opt.sample_size // 2) / (opt.sample_size // 2)
# time_steps = np.arange(0, opt.sample_size) / opt.sample_size
time_steps = torch.from_numpy(time_steps).type(torch.FloatTensor).to(device)
if opt.dataset in ['hurricane']:
train_dataloader = DataLoader(HurricaneVideoDataset(opt, train=True),
batch_size=opt.batch_size,
shuffle=True,
collate_fn=lambda batch: video_collate_fn(batch, time_steps, data_type="train"))
test_dataloader = DataLoader(HurricaneVideoDataset(opt, train=False),
batch_size=opt.batch_size,
shuffle=False,
collate_fn=lambda batch: video_collate_fn(batch, time_steps, data_type="test"))
elif opt.dataset in ['mgif', 'kth', 'penn']:
train_dataloader = DataLoader(VideoDataset(opt, train=True),
batch_size=opt.batch_size,
shuffle=True,
collate_fn=lambda batch: video_collate_fn(batch, time_steps, data_type="train"))
test_dataloader = DataLoader(VideoDataset(opt, train=False),
batch_size=opt.batch_size,
shuffle=False,
collate_fn=lambda batch: video_collate_fn(batch, time_steps, data_type="test"))
else:
raise NotImplementedError(f"There is no dataset named {opt.dataset}")
data_objects = {"train_dataloader": utils.inf_generator(train_dataloader),
"test_dataloader": utils.inf_generator(test_dataloader),
"n_train_batches": len(train_dataloader),
"n_test_batches": len(test_dataloader)}
return data_objects
if __name__ == "__main__":
pass
| 13,123 | 40.27044 | 170 | py |
null | Vid-ODE-main/evaluate.py | import argparse
import os
import numpy as np
from PIL import Image
from skimage.metrics import structural_similarity as ssim
from math import log10
import torch
import torch.nn.functional as F
import torchvision.transforms as Transforms
import eval_models as models
def get_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--result_image_dir', default='./')
opt = parser.parse_args()
return opt
def Evaluation(opt, pred_list, gt_list):
pred_list.sort()
gt_list.sort()
T1 = Transforms.ToTensor()
T2 = Transforms.Compose([Transforms.Resize((128, 128)),
Transforms.ToTensor(),
Transforms.Normalize(mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5))])
model = models.PerceptualLoss(model='net-lin',net='alex',use_gpu=True)
model.eval()
avg_ssim, avg_mse, avg_distance = 0.0, 0.0, 0.0
with torch.no_grad():
print("Calculate SSIM, MSE, LPIPS...")
for i, (pred_img, gt_img) in enumerate(zip(pred_list, gt_list)):
# Calculate SSIM
gt_img = Image.open(os.path.join(opt.result_image_dir, 'gt', gt_img))
gt_np = np.asarray(gt_img.convert('L'))
pred_img = Image.open(os.path.join(opt.result_image_dir, 'pred', pred_img))
pred_np = np.asarray(pred_img.convert('L'))
avg_ssim += ssim(gt_np, pred_np, data_range=255, gaussian_weights=True, use_sample_covariance=False)
# Calculate LPIPS
gt_img_LPIPS = T2(gt_img).unsqueeze(0).cuda()
pred_img_LPIPS = T2(pred_img).unsqueeze(0).cuda()
avg_distance += model.forward(gt_img_LPIPS, pred_img_LPIPS)
# Calculate MSE
gt_img_MSE = T1(gt_img).unsqueeze(0).cuda()
pred_img_MSE = T1(pred_img).unsqueeze(0).cuda()
avg_mse += F.mse_loss(gt_img_MSE, pred_img_MSE)
if (i + 1) % 10 == 0:
print("step: %8d evaluation..." % (i+1))
avg_ssim /= len(gt_list)
avg_mse = avg_mse / len(gt_list)
avg_psnr = 10 * log10(1 / avg_mse)
avg_distance = avg_distance / len(gt_list)
print("SSIM : %f / MSE : %f / LPIPS : %f / PSNR : %f" % (avg_ssim, avg_mse, avg_distance, avg_psnr))
return avg_ssim, avg_mse, avg_distance
def main():
opt = get_opt()
# Output과 Ground Truth Data
pred_list = os.listdir(os.path.join(opt.result_image_dir, 'pred'))
gt_list = os.listdir(os.path.join(opt.result_image_dir, 'gt'))
avg_ssim, avg_mse, avg_distance = Evaluation(opt, pred_list, gt_list)
print("Finish evaluate.py...")
if __name__ == '__main__':
main() | 2,707 | 31.626506 | 112 | py |
null | Vid-ODE-main/main.py | import torch
import torch.optim as optim
import argparse
import os
import time
import datetime
import json
from pathlib import Path
import numpy as np
from dataloader import parse_datasets
from models.conv_odegru import *
from models.gan import *
from tester import Tester
import utils
import visualize
def get_opt():
parser = argparse.ArgumentParser()
parser.add_argument("--name", default="vid_ode", help='Specify experiment')
parser.add_argument('-j', '--workers', type=int, default=4)
parser.add_argument('-b', '--batch_size', type=int, default=8)
parser.add_argument('--epoch', type=int, default=500, help='epoch')
parser.add_argument('--phase', default="train", choices=["train", "test_met"])
# Hyper-parameters
parser.add_argument('--lr', type=float, default=1e-3, help="Starting learning rate.")
parser.add_argument('--window_size', type=int, default=20, help="Window size to sample")
parser.add_argument('--sample_size', type=int, default=10, help="Number of time points to sub-sample")
# Hyper-parameters
parser.add_argument('--lamb_adv', type=float, default=0.003, help="Adversarial Loss lambda")
# Network variants for experiment..
parser.add_argument('--input_size', type=int, default=128)
parser.add_argument('--dec_diff', type=str, default='dopri5', choices=['dopri5', 'euler', 'adams', 'rk4'])
parser.add_argument('--n_layers', type=int, default=2, help='A number of layer of ODE func')
parser.add_argument('--n_downs', type=int, default=2)
parser.add_argument('--init_dim', type=int, default=32)
parser.add_argument('--input_norm', action='store_true', default=False)
parser.add_argument('--run_backwards', action='store_true', default=True)
parser.add_argument('--irregular', action='store_true', default=False, help="Train with irregular time-steps")
# Need to be tested...
parser.add_argument('--extrap', action='store_true', default=False, help="Set extrapolation mode. If this flag is not set, run interpolation mode.")
# Test argument:
parser.add_argument('--split_time', default=10, type=int, help='Split time for extrapolation or interpolation ')
# Log
parser.add_argument("--ckpt_save_freq", type=int, default=5000)
parser.add_argument("--log_print_freq", type=int, default=10)
parser.add_argument("--image_print_freq", type=int, default=1000)
# Path (Data & Checkpoint & Tensorboard)
parser.add_argument('--dataset', type=str, default='kth', choices=["mgif", "hurricane", "kth", "penn"])
parser.add_argument('--log_dir', type=str, default='./logs', help='save tensorboard infos')
parser.add_argument('--checkpoint_dir', type=str, default='./checkpoints', help='save checkpoint infos')
parser.add_argument('--test_dir', type=str, help='load saved model')
opt = parser.parse_args()
opt.input_dim = 3
if opt.phase == 'train':
# Make Directory
STORAGE_PATH = utils.create_folder_ifnotexist("./storage")
STORAGE_PATH = STORAGE_PATH.resolve()
LOG_PATH = utils.create_folder_ifnotexist(STORAGE_PATH / "logs")
CKPT_PATH = utils.create_folder_ifnotexist(STORAGE_PATH / "checkpoints")
# Modify Desc
now = datetime.datetime.now()
month_day = f"{now.month:02d}{now.day:02d}"
opt.name = f"dataset{opt.dataset}_extrap{opt.extrap}_irregular{opt.irregular}_runBack{opt.run_backwards}_{opt.name}"
opt.log_dir = utils.create_folder_ifnotexist(LOG_PATH / month_day / opt.name)
opt.checkpoint_dir = utils.create_folder_ifnotexist(CKPT_PATH / month_day / opt.name)
# Write opt information
with open(str(opt.log_dir / 'options.json'), 'w') as fp:
opt.log_dir = str(opt.log_dir)
opt.checkpoint_dir = str(opt.checkpoint_dir)
json.dump(opt.__dict__, fp=fp)
print("option.json dumped!")
opt.log_dir = Path(opt.log_dir)
opt.checkpoint_dir = Path(opt.checkpoint_dir)
opt.train_image_path = utils.create_folder_ifnotexist(opt.log_dir / "train_images")
opt.test_image_path = utils.create_folder_ifnotexist(opt.log_dir / "test_images")
else:
print("[Info] In test phase, skip dumping options.json..!")
return opt
def main():
# Option
opt = get_opt()
print(opt)
if opt.phase != 'train':
tester = Tester()
opt = tester._load_json(opt)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"device:{device}")
# Dataloader
loader_objs = parse_datasets(opt, device)
# Model
model = VidODE(opt, device)
# Set tester
if opt.phase != 'train':
tester._load_model(opt, model)
tester._set_properties(opt, model, loader_objs, device)
# Phase
if opt.phase == 'train':
train(opt, model, loader_objs, device)
if opt.phase == 'test_met':
tester.infer_and_metrics()
def train(opt, netG, loader_objs, device):
# Optimizer
optimizer_netG = optim.Adamax(netG.parameters(), lr=opt.lr)
# Discriminator
netD_img, netD_seq, optimizer_netD = create_netD(opt, device)
train_dataloader = loader_objs['train_dataloader']
test_dataloader = loader_objs['test_dataloader']
n_train_batches = loader_objs['n_train_batches']
n_test_batches = loader_objs['n_test_batches']
total_step = 0
start_time = time.time()
for epoch in range(opt.epoch):
utils.update_learning_rate(optimizer_netG, decay_rate=0.99, lowest=opt.lr / 10)
utils.update_learning_rate(optimizer_netD, decay_rate=0.99, lowest=opt.lr / 10)
for it in range(n_train_batches):
data_dict = utils.get_data_dict(train_dataloader)
batch_dict = utils.get_next_batch(data_dict)
res = netG.compute_all_losses(batch_dict)
loss_netG = res["loss"]
# Compute Adversarial Loss
real = batch_dict["data_to_predict"]
fake = res["pred_y"]
input_real = batch_dict["observed_data"]
# Filter out mask
if opt.irregular:
b, _, c, h, w = real.size()
observed_mask = batch_dict["observed_mask"]
mask_predicted_data = batch_dict["mask_predicted_data"]
selected_timesteps = int(observed_mask[0].sum())
input_real = input_real[observed_mask.squeeze(-1).byte(), ...].view(b, selected_timesteps, c, h, w)
real = real[mask_predicted_data.squeeze(-1).byte(), ...].view(b, selected_timesteps, c, h, w)
loss_netD = opt.lamb_adv * netD_seq.netD_adv_loss(real, fake, input_real)
loss_netD += opt.lamb_adv * netD_img.netD_adv_loss(real, fake, None)
loss_adv_netG = opt.lamb_adv * netD_seq.netG_adv_loss(fake, input_real)
loss_adv_netG += opt.lamb_adv * netD_img.netG_adv_loss(fake, None)
loss_netG += loss_adv_netG
# Train D
optimizer_netD.zero_grad()
loss_netD.backward()
optimizer_netD.step()
# Train G
optimizer_netG.zero_grad()
loss_netG.backward()
optimizer_netG.step()
if (total_step + 1) % opt.log_print_freq == 0 or total_step == 0:
et = time.time() - start_time
et = str(datetime.timedelta(seconds=et))[:-7]
log = f"Elapsed [{et}] Epoch [{epoch:03d}/{opt.epoch:03d}]\t"\
f"Iterations [{(total_step + 1):6d}] \t"\
f"Mse [{res['loss'].item():.4f}]\t"\
f"Adv_G [{loss_adv_netG.item():.4f}]\t"\
f"Adv_D [{loss_netD.item():.4f}]"
print(log)
if (total_step + 1) % opt.ckpt_save_freq == 0 or (epoch + 1 == opt.epoch and it + 1 == n_train_batches) or total_step == 0:
utils.save_checkpoint(netG, os.path.join(opt.checkpoint_dir, f"ckpt_{(total_step + 1):08d}.pth"))
if (total_step + 1) % opt.image_print_freq == 0 or total_step == 0:
gt, pred, time_steps = visualize.make_save_sequence(opt, batch_dict, res)
if opt.extrap:
visualize.save_extrap_images(opt=opt, gt=gt, pred=pred, path=opt.train_image_path, total_step=total_step)
else:
visualize.save_interp_images(opt=opt, gt=gt, pred=pred, path=opt.train_image_path, total_step=total_step)
total_step += 1
# Test
if (epoch + 1) % 100 == 0:
test(netG, epoch, test_dataloader, opt, n_test_batches)
def test(netG, epoch, test_dataloader, opt, n_test_batches):
# Select random index to save
random_saving_idx = np.random.randint(0, n_test_batches, size=1)
fix_saving_idx = 2
test_losses = 0.0
with torch.no_grad():
for i in range(n_test_batches):
data_dict = utils.get_data_dict(test_dataloader)
batch_dict = utils.get_next_batch(data_dict)
res = netG.compute_all_losses(batch_dict)
test_losses += res["loss"].detach()
if i == fix_saving_idx or i == random_saving_idx:
gt, pred, time_steps = visualize.make_save_sequence(opt, batch_dict, res)
if opt.extrap:
visualize.save_extrap_images(opt=opt, gt=gt, pred=pred, path=opt.test_image_path, total_step=100 * (epoch + 1) + i)
else:
visualize.save_interp_images(opt=opt, gt=gt, pred=pred, path=opt.test_image_path, total_step=100 * (epoch + 1) + i)
test_losses /= n_test_batches
print(f"[Test] Epoch [{epoch:03d}/{opt.epoch:03d}]\t" f"Loss {test_losses:.4f}\t")
if __name__ == '__main__':
main()
| 10,063 | 39.580645 | 152 | py |
null | Vid-ODE-main/tester.py | from pathlib import Path
import os
import json
import utils
import torch
import visualize
import evaluate
from dataloader import remove_files_under_sample_size
class Tester:
def __init__(self):
return
def _load_json(self, opt):
keep_opt_list = ['phase', 'split_time']
with open(os.path.join(opt.test_dir, "options.json"), 'r') as option:
opt_dict = json.load(option)
for k, v in opt_dict.items():
if k not in keep_opt_list:
setattr(opt, k, v)
# Metric directory
opt.result_image_dir = utils.create_folder_ifnotexist(Path(opt.log_dir) / "result_images")
return opt
def _load_model(self, opt, model):
checkpoints = os.listdir(opt.checkpoint_dir)
print(f"Possible loading models:{checkpoints}")
checkpoint_file = checkpoints[-1]
print(f"Load checkpoint file... {os.path.join(opt.checkpoint_dir, checkpoint_file)}")
utils.load_checkpoint(model, os.path.join(opt.checkpoint_dir, checkpoint_file))
def _set_properties(self, opt, model, loader_objs, device):
self.opt = opt
self.model = model.to(device)
self.test_dataloader = loader_objs['test_dataloader']
self.train_dataloader = loader_objs['train_dataloader']
self.n_test_batches = loader_objs['n_test_batches']
self.n_train_batches = loader_objs['n_train_batches']
self.device = device
@torch.no_grad()
def infer_and_metrics(self):
test_interp = True if not self.opt.extrap else False
for it in range(self.n_test_batches):
data_dict = utils.get_data_dict(self.test_dataloader)
batch_dict = utils.get_next_batch(data_dict, test_interp=test_interp)
preds, extra_info = self.model.get_reconstruction(time_steps_to_predict=batch_dict["tp_to_predict"],
truth=batch_dict["observed_data"],
truth_time_steps=batch_dict["observed_tp"],
mask=batch_dict["observed_mask"],
out_mask=batch_dict["mask_predicted_data"])
b, _, c, h, w = batch_dict["data_to_predict"].size()
selected_time_len = int(batch_dict["mask_predicted_data"][0].sum())
batch_dict["data_to_predict"] = batch_dict["data_to_predict"][batch_dict["mask_predicted_data"].squeeze(-1).byte()].view(b, selected_time_len, c, h, w)
visualize.save_test_images(opt=self.opt, preds=preds, batch_dict=batch_dict, path=self.opt.result_image_dir, index=it * self.opt.batch_size)
if (it + 1) % 10 == 0:
print(f"step: {it + 1:8d} testing...")
pred_list = os.listdir(os.path.join(self.opt.result_image_dir, 'pred'))
gt_list = os.listdir(os.path.join(self.opt.result_image_dir, 'gt'))
evaluate.Evaluation(self.opt, pred_list, gt_list) | 3,130 | 40.746667 | 163 | py |
null | Vid-ODE-main/utils.py | import os
import numpy as np
from pathlib import Path
import torch
def create_folder_ifnotexist(folder_path):
folder_path = Path(folder_path)
if not folder_path.exists():
folder_path.mkdir(parents=True, exist_ok=False)
return folder_path
class Tracker(object):
def __init__(self):
self.infos = {}
def write_info(self, key, value):
self.infos[key] = value
def export_info(self):
return self.infos
def clean_info(self):
self.infos = {}
def save_checkpoint(model, path):
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
torch.save(model.state_dict(), path)
def load_checkpoint(model, path):
model.load_state_dict(torch.load(path))
def denorm(x):
"""Convert the range from [-1, 1] to [0, 1]."""
out = (x + 1) / 2
return out.clamp_(0, 1)
def inf_generator(iterable):
"""Allows training with DataLoaders in a single infinite loop:
for i, (x, y) in enumerate(inf_generator(train_loader)):
"""
iterator = iterable.__iter__()
while True:
try:
yield iterator.__next__()
except StopIteration:
iterator = iterable.__iter__()
def flatten(x, dim):
return x.reshape(x.size()[:dim] + (-1,))
def get_device(tensor):
device = torch.device("cpu")
if tensor.is_cuda:
device = tensor.get_device()
return device
def get_data_dict(dataloader):
data_dict = dataloader.__next__()
return data_dict
def get_next_batch(data_dict, test_interp=False):
device = get_device(data_dict["observed_data"])
batch_dict = get_dict_template()
# preserving values:
batch_dict["mode"] = data_dict["mode"]
batch_dict["observed_data"] = data_dict["observed_data"]
batch_dict["observed_tp"] = data_dict["observed_tp"]
batch_dict["data_to_predict"] = data_dict["data_to_predict"]
batch_dict["tp_to_predict"] = data_dict["tp_to_predict"]
# Input: Mask out skipped data
if ("observed_mask" in data_dict) and (data_dict["observed_mask"] is not None):
batch_dict["observed_mask"] = data_dict["observed_mask"]
filter_mask = batch_dict["observed_mask"].unsqueeze(-1).unsqueeze(-1).to(device)
if not test_interp:
batch_dict["observed_data"] = filter_mask * batch_dict["observed_data"]
else:
selected_mask = batch_dict["observed_mask"].squeeze(-1).byte()
b, t, c, h, w = batch_dict["observed_data"].size()
batch_dict["observed_data"] = batch_dict["observed_data"][selected_mask, ...].view(b, t // 2, c, h, w)
batch_dict["observed_mask"] = torch.ones(b, t // 2, 1).cuda()
# Pred: Mask out skipped data
if ("mask_predicted_data" in data_dict) and (data_dict["mask_predicted_data"] is not None):
batch_dict["mask_predicted_data"] = data_dict["mask_predicted_data"]
filter_mask = batch_dict["mask_predicted_data"].unsqueeze(-1).unsqueeze(-1).to(device)
if not test_interp:
batch_dict["orignal_data_to_predict"] = batch_dict["data_to_predict"].clone()
batch_dict["data_to_predict"] = filter_mask * batch_dict["data_to_predict"]
else:
b, t, c, h, w = batch_dict["data_to_predict"].size()
# specify times
batch_dict["tp_to_predict"] = torch.from_numpy(np.arange(0, t) / t).type(torch.FloatTensor).cuda()
# mask out
selected_mask = torch.ones_like(batch_dict["mask_predicted_data"]) - batch_dict["mask_predicted_data"]
selected_mask[:, -1, :] = 0. # exclude last frame
selected_mask = selected_mask.squeeze(-1).byte()
batch_dict["mask_predicted_data"] = selected_mask
return batch_dict
def update_learning_rate(optimizer, decay_rate=0.999, lowest=1e-3):
for param_group in optimizer.param_groups:
lr = param_group['lr']
lr = max(lr * decay_rate, lowest)
param_group['lr'] = lr
def reverse_time_order(tensor):
idx = [i for i in range(tensor.size(1) - 1, -1, -1)]
return tensor[:, idx, ...]
def get_dict_template():
return {"observed_data": None,
"observed_tp": None,
"data_to_predict": None,
"tp_to_predict": None,
"observed_mask": None,
"mask_predicted_data": None
}
def split_data_extrap(data_dict, opt):
n_observed_tp = data_dict["data"].size(1) // 2
split_dict = {"observed_data": data_dict["data"][:, :n_observed_tp, :].clone(),
"observed_tp": data_dict["time_steps"][:n_observed_tp].clone(),
"data_to_predict": data_dict["data"][:, n_observed_tp:, :].clone(),
"tp_to_predict": data_dict["time_steps"][n_observed_tp:].clone(),
"observed_mask": None, "mask_predicted_data": None}
if ("mask" in data_dict) and (data_dict["mask"] is not None):
split_dict["observed_mask"] = data_dict["mask"][:, :n_observed_tp].clone()
split_dict["mask_predicted_data"] = data_dict["mask"][:, n_observed_tp:].clone()
split_dict["mode"] = "extrap"
return split_dict
def split_data_interp(data_dict, opt):
split_dict = {"observed_data": data_dict["data"].clone(),
"observed_tp": data_dict["time_steps"].clone(),
"data_to_predict": data_dict["data"].clone(),
"tp_to_predict": data_dict["time_steps"].clone(),
"observed_mask": None,
"mask_predicted_data": None}
if "mask" in data_dict and data_dict["mask"] is not None:
split_dict["observed_mask"] = data_dict["mask"].clone()
split_dict["mask_predicted_data"] = data_dict["mask"].clone()
split_dict["mode"] = "interp"
return split_dict
def add_mask(data_dict):
data = data_dict["observed_data"]
mask = data_dict["observed_mask"]
if mask is None:
mask = torch.ones_like(data).to(get_device(data))
data_dict["observed_mask"] = mask
return data_dict
def split_and_subsample_batch(data_dict, opt, data_type="train"):
if data_type == "train":
# Training set
if opt.extrap:
processed_dict = split_data_extrap(data_dict, opt)
else:
processed_dict = split_data_interp(data_dict, opt)
else:
# Test set
if opt.extrap:
processed_dict = split_data_extrap(data_dict, opt)
else:
processed_dict = split_data_interp(data_dict, opt)
# add mask
processed_dict = add_mask(processed_dict)
return processed_dict | 6,743 | 31.57971 | 114 | py |
null | Vid-ODE-main/video_transforms.py | import collections
import math
import torch
import random
import numpy as np
import numbers
import cv2
import PIL
from PIL import Image
import torchvision.transforms.functional as F
import skimage
def resize(video, size, interpolation):
if interpolation == 'bilinear':
inter = cv2.INTER_LINEAR
elif interpolation == 'nearest':
inter = cv2.INTER_NEAREST
else:
raise NotImplementedError
shape = video.shape[:-3]
video = video.reshape((-1, *video.shape[-3:]))
resized_video = np.zeros((video.shape[0], size[1], size[0], video.shape[-1]))
for i in range(video.shape[0]):
img = cv2.resize(video[i], size, inter)
if len(img.shape) == 2:
img = img[:, :, np.newaxis]
resized_video[i] = img
return resized_video.reshape((*shape, size[1], size[0], video.shape[-1]))
class ToTensor(object):
"""Converts a numpy.ndarray (... x H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (... x C x H x W) in the range [0.0, 1.0].
"""
def __init__(self, scale=True):
self.scale = scale
def __call__(self, arr):
if isinstance(arr, np.ndarray):
video = torch.from_numpy(np.rollaxis(arr, axis=-1, start=-3))
# print(f"type(video):{type(video)}")
if self.scale:
return video.float().div(255)
else:
return video.float()
else:
raise NotImplementedError
class Normalize(object):
"""Given mean: (R, G, B) and std: (R, G, B),
will normalize each channel of the torch.*Tensor, i.e.
channel = (channel - mean) / std
"""
def __init__(self, mean, std):
if not isinstance(mean, list):
mean = [mean]
if not isinstance(std, list):
std = [std]
self.mean = torch.FloatTensor(mean).unsqueeze(1).unsqueeze(2)
self.std = torch.FloatTensor(std).unsqueeze(1).unsqueeze(2)
def __call__(self, tensor):
return tensor.sub_(self.mean).div_(self.std)
class Scale(object):
"""Rescale the input numpy.ndarray to the given size.
Args:
size (sequence or int): Desired output size. If size is a sequence like
(w, h), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size)
interpolation (int, optional): Desired interpolation. Default is
``bilinear``
"""
def __init__(self, size, interpolation='bilinear'):
assert isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)
self.size = size
self.interpolation = interpolation
def __call__(self, video):
"""
Args:
video (numpy.ndarray): Video to be scaled.
Returns:
numpy.ndarray: Rescaled video.
"""
if isinstance(self.size, int):
w, h = video.shape[-2], video.shape[-3]
if (w <= h and w == self.size) or (h <= w and h == self.size):
return video
if w < h:
ow = self.size
oh = int(self.size * h / w)
return resize(video, (ow, oh), self.interpolation)
else:
oh = self.size
ow = int(self.size * w / h)
return resize(video, (ow, oh), self.interpolation)
else:
return resize(video, self.size, self.interpolation)
class CenterCrop(object):
"""Crops the given numpy.ndarray at the center to have a region of
the given size. size can be a tuple (target_height, target_width)
or an integer, in which case the target will be of a square shape (size, size)
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, video):
h, w = video.shape[-3:-1]
th, tw = self.size
x1 = int(round((w - tw) / 2.))
y1 = int(round((h - th) / 2.))
return video[..., y1:y1 + th, x1:x1 + tw, :]
class Cutout(object):
"""Cutout the given np.ndarray with rectangle mask.
Args:
mask_size (int): size of mask.
mask_color: color of mask.
centered (bool): if True, cutout occur at the center of the image
"""
def __init__(self, mask_size, mask_color=(0, 0, 0), centered=True):
if isinstance(mask_size, tuple):
self.half_mask_size_x, self.half_mask_size_y = mask_size[0] // 2, mask_size[1] // 2
self.offset_x = 1 if mask_size[0] % 2 == 0 else 0
self.offset_y = 1 if mask_size[1] % 2 == 0 else 0
else:
self.half_mask_size_x, self.half_mask_size_y = mask_size // 2, mask_size // 2
self.offset_x, self.offset_y = (1, 1) if mask_size % 2 == 0 else (0, 0)
self.mask_color = mask_color
self.centered = centered
def __call__(self, video):
h, w = video.shape[-3:-1]
if self.centered:
cx = w // 2
cy = h // 2
else:
cxmin, cxmax = self.half_mask_size_x, w + self.offset_x - self.half_mask_size_x
cymin, cymax = self.half_mask_size_y, h + self.offset_y - self.half_mask_size_y
cx = np.random.randint(cxmin, cxmax)
cy = np.random.randint(cymin, cymax)
xmin, xmax = cx - self.half_mask_size_x, cx + self.half_mask_size_x
ymin, ymax = cy - self.half_mask_size_y, cy + self.half_mask_size_y
# print(f"xmin:{xmin}, xmax:{xmax}")
# print(f"ymin:{ymin}, ymax:{ymax}")
# prevent overflow
xmin, ymin = max(0, xmin), max(0, ymin)
xmax, ymax = min(w, xmax), min(h, ymax)
# print(f"xmin:{xmin}, xmax:{xmax}")
# print(f"ymin:{ymin}, ymax:{ymax}")
# insert color
video[..., ymin:ymax, xmin:xmax, :] = self.mask_color
return video
class Pad(object):
"""Pad the given np.ndarray on all sides with the given "pad" value.
Args:
padding (int or sequence): Padding on each border. If a sequence of
length 4, it is used to pad left, top, right and bottom borders respectively.
fill: Pixel fill value. Default is 0.
"""
def __init__(self, padding, fill=0):
assert isinstance(padding, numbers.Number) or isinstance(padding, tuple)
assert isinstance(fill, numbers.Number) or isinstance(fill, str) or isinstance(fill, tuple)
self.padding = padding
self.fill = fill
def __call__(self, video):
"""
Args:
video (np.ndarray): Video to be padded.
Returns:
np.ndarray: Padded video.
"""
# pad_width = ((0, 0), (self.padding, self.padding), (self.padding, self.padding), (0, 0))
# Note pad to last two dimension
if isinstance(self.padding, tuple):
# pad_width = ((0, 0), (0, 0), (self.padding[0], self.padding[0]), (self.padding[1], self.padding[1]))
pad_width = ((0, 0), (self.padding[0], self.padding[0]), (self.padding[1], self.padding[1]), (0, 0))
else:
pad_width = ((0, 0), (self.padding, self.padding), (self.padding, self.padding), (0, 0))
return np.pad(video, pad_width=pad_width, mode='constant', constant_values=self.fill)
class RandomCrop(object):
"""Crop the given numpy.ndarray at a random location.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
padding (int or sequence, optional): Optional padding on each border
of the image. Default is 0, i.e no padding. If a sequence of length
4 is provided, it is used to pad left, top, right, bottom borders
respectively.
"""
def __init__(self, size, padding=0):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.padding = padding
def __call__(self, video):
"""
Args:
video (np.ndarray): Video to be cropped.
Returns:
np.ndarray: Cropped video.
"""
if self.padding > 0:
pad = Pad(self.padding, 0)
video = pad(video)
w, h = video.shape[-2], video.shape[-3]
th, tw = self.size
if w == tw and h == th:
return video
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
return video[..., y1:y1 + th, x1:x1 + tw, :]
class RandomHorizontalFlip(object):
"""Randomly horizontally flips the given numpy.ndarray with a probability of 0.5
"""
def __call__(self, video):
if random.random() < 0.5:
return video[..., ::-1, :].copy()
return video
class RandomSizedCrop(object):
"""Crop the given np.ndarray to random size and aspect ratio.
A crop of random size of (0.08 to 1.0) of the original size and a random
aspect ratio of 3/4 to 4/3 of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: size of the smaller edge
interpolation: Default: 'bilinear'
"""
def __init__(self, size, interpolation='bilinear'):
self.size = size
self.interpolation = interpolation
def __call__(self, video):
for attempt in range(10):
area = video.shape[-3] * video.shape[-2]
target_area = random.uniform(0.08, 1.0) * area
aspect_ratio = random.uniform(3. / 4, 4. / 3)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= video.shape[-2] and h <= video.shape[-3]:
x1 = random.randint(0, video.shape[-2] - w)
y1 = random.randint(0, video.shape[-3] - h)
video = video[..., y1:y1 + h, x1:x1 + w, :]
return resize(video, (self.size, self.size), self.interpolation)
# Fallback
scale = Scale(self.size, interpolation=self.interpolation)
crop = CenterCrop(self.size)
return crop(scale(video))
class RandomRotation(object):
"""Rotate entire clip randomly by a random angle within
given bounds
Args:
degrees (sequence or int): Range of degrees to select from
If degrees is a number instead of sequence like (min, max),
the range of degrees, will be (-degrees, +degrees).
"""
def __init__(self, degrees=10):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError('If degrees is a single number,'
'must be positive')
degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError('If degrees is a sequence,'
'it must be of len 2.')
self.degrees = degrees
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
angle = random.uniform(self.degrees[0], self.degrees[1])
if isinstance(clip[0], np.ndarray):
rotated = [skimage.transform.rotate(img, angle, preserve_range=True) for img in clip]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
rotated = np.array(rotated)
return rotated
class ColorJitter(object):
"""Randomly change the brightness, contrast and saturation of an image.
Args:
brightness (float): How much to jitter brightness. brightness_factor
is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].
contrast (float): How much to jitter contrast. contrast_factor
is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].
saturation (float): How much to jitter saturation. saturation_factor
is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].
hue(float): How much to jitter hue. hue_factor is chosen uniformly from
[-hue, hue]. Should be >=0 and <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
@staticmethod
def get_params(brightness, contrast, saturation, hue):
"""Get a randomized transform to be applied on image.
Arguments are same as that of __init__.
Returns:
Transform which randomly adjusts brightness, contrast and
saturation in a random order.
"""
transforms = []
if brightness > 0:
brightness_factor = random.uniform(max(0, 1 - brightness), 1 + brightness)
transforms.append(lambda img: F.adjust_brightness(img, brightness_factor))
if contrast > 0:
contrast_factor = random.uniform(max(0, 1 - contrast), 1 + contrast)
transforms.append(lambda img: F.adjust_contrast(img, contrast_factor))
if saturation > 0:
saturation_factor = random.uniform(max(0, 1 - saturation), 1 + saturation)
transforms.append(lambda img: F.adjust_saturation(img, saturation_factor))
if hue > 0:
hue_factor = random.uniform(-hue, hue)
transforms.append(lambda img: F.adjust_hue(img, hue_factor))
random.shuffle(transforms)
return transforms
def __call__(self, video):
"""
Args:
img (numpy array): Input image, shape (... x H x W x C), dtype uint8.
Returns:
PIL Image: Color jittered image.
"""
transforms = self.get_params(self.brightness, self.contrast, self.saturation, self.hue)
reshaped_video = video.reshape((-1, *video.shape[-3:]))
n_channels = video.shape[-1]
for i in range(reshaped_video.shape[0]):
img = reshaped_video[i]
if n_channels == 1:
img = img.squeeze(axis=2)
img = Image.fromarray(img)
for t in transforms:
img = t(img)
img = np.array(img)
if n_channels == 1:
img = img[..., np.newaxis]
reshaped_video[i] = img
video = reshaped_video.reshape(video.shape)
return video
| 15,050 | 35.355072 | 114 | py |
null | Vid-ODE-main/visualize.py | import matplotlib
matplotlib.use('Agg')
import torch
from torchvision.utils import save_image
import os
import utils
def save_test_images(opt, preds, batch_dict, path, index):
preds = preds.cpu().detach()
if opt.dataset == 'hurricane':
gt = batch_dict['orignal_data_to_predict'].cpu().detach()
else:
gt = batch_dict['data_to_predict'].cpu().detach()
b, t, c, h, w = gt.shape
if opt.input_norm:
preds = utils.denorm(preds)
gt = utils.denorm(gt)
os.makedirs(os.path.join(path, 'pred'), exist_ok=True)
os.makedirs(os.path.join(path, 'gt'), exist_ok=True)
for i in range(b):
for j in range(t):
save_image(preds[i, j, ...], os.path.join(path, 'pred', f"pred_{index + i:03d}_{j:03d}.png"))
save_image(gt[i, j, ...], os.path.join(path, 'gt', f"gt_{index + i:03d}_{j:03d}.png"))
def make_save_sequence(opt, batch_dict, res):
""" 4 cases: (interp, extrap) | (regular, irregular) """
b, t, c, h, w = batch_dict['observed_data'].size()
# Filter out / Select by mask
if opt.irregular:
observed_mask = batch_dict["observed_mask"]
mask_predicted_data = batch_dict["mask_predicted_data"]
selected_timesteps = int(observed_mask[0].sum())
if opt.dataset in ['hurricane']:
batch_dict['observed_data'] = batch_dict['observed_data'][observed_mask.squeeze(-1).byte(), ...].view(b, selected_timesteps, c, h, w)
batch_dict['data_to_predict'] = batch_dict['data_to_predict'][mask_predicted_data.squeeze(-1).byte(), ...].view(b, selected_timesteps, c, h, w)
else:
batch_dict['observed_data'] = batch_dict['observed_data'] * observed_mask.unsqueeze(-1).unsqueeze(-1)
batch_dict['data_to_predict'] = batch_dict['data_to_predict'] * mask_predicted_data.unsqueeze(-1).unsqueeze(-1)
# Make sequence to save
pred = res['pred_y'].cpu().detach()
if opt.extrap:
inputs = batch_dict['observed_data'].cpu().detach()
gt_to_predict = batch_dict['data_to_predict'].cpu().detach()
gt = torch.cat([inputs, gt_to_predict], dim=1)
else:
gt = batch_dict['data_to_predict'].cpu().detach()
time_steps = None
if opt.input_norm:
gt = utils.denorm(gt)
pred = utils.denorm(pred)
return gt, pred, time_steps
def save_extrap_images(opt, gt, pred, path, total_step):
pred = pred.cpu().detach()
gt = gt.cpu().detach()
b, t, c, h, w = gt.shape
# Padding zeros
PAD = torch.zeros((b, t // 2, c, h, w))
pred = torch.cat([PAD, pred], dim=1)
save_me = []
for i in range(min([b, 4])): # save only 4 items
row = torch.cat([gt[i], pred[i]], dim=0)
if opt.input_norm:
row = utils.denorm(row)
if row.size(1) == 1:
row = row.repeat(1, 3, 1, 1)
save_me += [row]
save_me = torch.cat(save_me, dim=0)
save_image(save_me, os.path.join(path, f"image_{(total_step + 1):08d}.png"), nrow=t)
def save_interp_images(opt, gt, pred, path, total_step):
pred = pred.cpu().detach()
data = gt.cpu().detach()
b, t, c, h, w = data.shape
save_me = []
for i in range(min([b, 4])): # save only 4 items
row = torch.cat([data[i], pred[i]], dim=0)
if opt.input_norm:
row = utils.denorm(row)
if row.size(1) == 1:
row = row.repeat(1, 3, 1, 1)
save_me += [row]
save_me = torch.cat(save_me, dim=0)
save_image(save_me, os.path.join(path, f"image_{(total_step + 1):08d}.png"), nrow=t)
if __name__ == '__main__':
pass
| 3,680 | 31.575221 | 155 | py |
null | Vid-ODE-main/eval_models/__init__.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from skimage.measure import compare_ssim
import torch
from torch.autograd import Variable
from eval_models import dist_model
class PerceptualLoss(torch.nn.Module):
def __init__(self, model='net-lin', net='alex', colorspace='rgb', spatial=False, use_gpu=True, gpu_ids=[0]): # VGG using our perceptually-learned weights (LPIPS metric)
# def __init__(self, model='net', net='vgg', use_gpu=True): # "default" way of using VGG as a perceptual loss
super(PerceptualLoss, self).__init__()
print('Setting up Perceptual loss...')
self.use_gpu = use_gpu
self.spatial = spatial
self.gpu_ids = gpu_ids
self.model = dist_model.DistModel()
self.model.initialize(model=model, net=net, use_gpu=use_gpu, colorspace=colorspace, spatial=self.spatial, gpu_ids=gpu_ids)
print('...[%s] initialized'%self.model.name())
print('...Done')
def forward(self, pred, target, normalize=False):
"""
Pred and target are Variables.
If normalize is True, assumes the images are between [0,1] and then scales them between [-1,+1]
If normalize is False, assumes the images are already between [-1,+1]
Inputs pred and target are Nx3xHxW
Output pytorch Variable N long
"""
if normalize:
target = 2 * target - 1
pred = 2 * pred - 1
return self.model.forward(target, pred)
def normalize_tensor(in_feat,eps=1e-10):
norm_factor = torch.sqrt(torch.sum(in_feat**2,dim=1,keepdim=True))
return in_feat/(norm_factor+eps)
def l2(p0, p1, range=255.):
return .5*np.mean((p0 / range - p1 / range)**2)
def psnr(p0, p1, peak=255.):
return 10*np.log10(peak**2/np.mean((1.*p0-1.*p1)**2))
def dssim(p0, p1, range=255.):
return (1 - compare_ssim(p0, p1, data_range=range, multichannel=True)) / 2.
def rgb2lab(in_img,mean_cent=False):
from skimage import color
img_lab = color.rgb2lab(in_img)
if(mean_cent):
img_lab[:,:,0] = img_lab[:,:,0]-50
return img_lab
def tensor2np(tensor_obj):
# change dimension of a tensor object into a numpy array
return tensor_obj[0].cpu().float().numpy().transpose((1,2,0))
def np2tensor(np_obj):
# change dimenion of np array into tensor array
return torch.Tensor(np_obj[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
def tensor2tensorlab(image_tensor,to_norm=True,mc_only=False):
# image tensor to lab tensor
from skimage import color
img = tensor2im(image_tensor)
img_lab = color.rgb2lab(img)
if(mc_only):
img_lab[:,:,0] = img_lab[:,:,0]-50
if(to_norm and not mc_only):
img_lab[:,:,0] = img_lab[:,:,0]-50
img_lab = img_lab/100.
return np2tensor(img_lab)
def tensorlab2tensor(lab_tensor,return_inbnd=False):
from skimage import color
import warnings
warnings.filterwarnings("ignore")
lab = tensor2np(lab_tensor)*100.
lab[:,:,0] = lab[:,:,0]+50
rgb_back = 255.*np.clip(color.lab2rgb(lab.astype('float')),0,1)
if(return_inbnd):
# convert back to lab, see if we match
lab_back = color.rgb2lab(rgb_back.astype('uint8'))
mask = 1.*np.isclose(lab_back,lab,atol=2.)
mask = np2tensor(np.prod(mask,axis=2)[:,:,np.newaxis])
return (im2tensor(rgb_back),mask)
else:
return im2tensor(rgb_back)
def rgb2lab(input):
from skimage import color
return color.rgb2lab(input / 255.)
def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255./2.):
image_numpy = image_tensor[0].cpu().float().numpy()
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor
return image_numpy.astype(imtype)
def im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.):
return torch.Tensor((image / factor - cent)
[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
def tensor2vec(vector_tensor):
return vector_tensor.data.cpu().numpy()[:, :, 0, 0]
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255./2.):
# def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=1.):
image_numpy = image_tensor[0].cpu().float().numpy()
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor
return image_numpy.astype(imtype)
def im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.):
# def im2tensor(image, imtype=np.uint8, cent=1., factor=1.):
return torch.Tensor((image / factor - cent)
[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
| 5,726 | 34.571429 | 172 | py |
null | Vid-ODE-main/eval_models/base_model.py | import os
import torch
import numpy as np
class BaseModel():
def __init__(self):
pass;
def name(self):
return 'BaseModel'
def initialize(self, use_gpu=True, gpu_ids=[0]):
self.use_gpu = use_gpu
self.gpu_ids = gpu_ids
def forward(self):
pass
def get_image_paths(self):
pass
def optimize_parameters(self):
pass
def get_current_visuals(self):
return self.input
def get_current_errors(self):
return {}
def save(self, label):
pass
# helper saving function that can be used by subclasses
def save_network(self, network, path, network_label, epoch_label):
save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
save_path = os.path.join(path, save_filename)
torch.save(network.state_dict(), save_path)
# helper loading function that can be used by subclasses
def load_network(self, network, network_label, epoch_label):
save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
save_path = os.path.join(self.save_dir, save_filename)
print('Loading network from %s'%save_path)
network.load_state_dict(torch.load(save_path))
def update_learning_rate():
pass
def get_image_paths(self):
return self.image_paths
def save_done(self, flag=False):
np.save(os.path.join(self.save_dir, 'done_flag'),flag)
np.savetxt(os.path.join(self.save_dir, 'done_flag'),[flag,],fmt='%i')
| 1,525 | 25.77193 | 77 | py |
null | Vid-ODE-main/eval_models/dist_model.py |
from __future__ import absolute_import
import sys
import numpy as np
import torch
from torch import nn
import os
from collections import OrderedDict
from torch.autograd import Variable
import itertools
from .base_model import BaseModel
from scipy.ndimage import zoom
import fractions
import functools
import skimage.transform
from tqdm import tqdm
from IPython import embed
from . import networks_basic as networks
import eval_models as util
class DistModel(BaseModel):
def name(self):
return self.model_name
def initialize(self, model='net-lin', net='alex', colorspace='Lab', pnet_rand=False, pnet_tune=False, model_path=None,
use_gpu=True, printNet=False, spatial=False,
is_train=False, lr=.0001, beta1=0.5, version='0.1', gpu_ids=[0]):
'''
INPUTS
model - ['net-lin'] for linearly calibrated network
['net'] for off-the-shelf network
['L2'] for L2 distance in Lab colorspace
['SSIM'] for ssim in RGB colorspace
net - ['squeeze','alex','vgg']
model_path - if None, will look in weights/[NET_NAME].pth
colorspace - ['Lab','RGB'] colorspace to use for L2 and SSIM
use_gpu - bool - whether or not to use a GPU
printNet - bool - whether or not to print network architecture out
spatial - bool - whether to output an array containing varying distances across spatial dimensions
spatial_shape - if given, output spatial shape. if None then spatial shape is determined automatically via spatial_factor (see below).
spatial_factor - if given, specifies upsampling factor relative to the largest spatial extent of a convolutional layer. if None then resized to size of input images.
spatial_order - spline order of filter for upsampling in spatial mode, by default 1 (bilinear).
is_train - bool - [True] for training mode
lr - float - initial learning rate
beta1 - float - initial momentum term for adam
version - 0.1 for latest, 0.0 was original (with a bug)
gpu_ids - int array - [0] by default, gpus to use
'''
BaseModel.initialize(self, use_gpu=use_gpu, gpu_ids=gpu_ids)
self.model = model
self.net = net
self.is_train = is_train
self.spatial = spatial
self.gpu_ids = gpu_ids
self.model_name = '%s [%s]'%(model,net)
if(self.model == 'net-lin'): # pretrained net + linear layer
self.net = networks.PNetLin(pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net,
use_dropout=True, spatial=spatial, version=version, lpips=True)
kw = {}
if not use_gpu:
kw['map_location'] = 'cpu'
if(model_path is None):
import inspect
model_path = os.path.abspath(os.path.join(inspect.getfile(self.initialize), '..', 'weights/v%s/%s.pth'%(version,net)))
if(not is_train):
print('Loading model from: %s'%model_path)
self.net.load_state_dict(torch.load(model_path, **kw), strict=False)
elif(self.model=='net'): # pretrained network
self.net = networks.PNetLin(pnet_rand=pnet_rand, pnet_type=net, lpips=False)
elif(self.model in ['L2','l2']):
self.net = networks.L2(use_gpu=use_gpu,colorspace=colorspace) # not really a network, only for testing
self.model_name = 'L2'
elif(self.model in ['DSSIM','dssim','SSIM','ssim']):
self.net = networks.DSSIM(use_gpu=use_gpu,colorspace=colorspace)
self.model_name = 'SSIM'
else:
raise ValueError("Model [%s] not recognized." % self.model)
self.parameters = list(self.net.parameters())
if self.is_train: # training mode
# extra network on top to go from distances (d0,d1) => predicted human judgment (h*)
self.rankLoss = networks.BCERankingLoss()
self.parameters += list(self.rankLoss.net.parameters())
self.lr = lr
self.old_lr = lr
self.optimizer_net = torch.optim.Adam(self.parameters, lr=lr, betas=(beta1, 0.999))
else: # test mode
self.net.eval()
if(use_gpu):
self.net.to(gpu_ids[0])
self.net = torch.nn.DataParallel(self.net, device_ids=gpu_ids)
if(self.is_train):
self.rankLoss = self.rankLoss.to(device=gpu_ids[0]) # just put this on GPU0
if(printNet):
print('---------- Networks initialized -------------')
networks.print_network(self.net)
print('-----------------------------------------------')
def forward(self, in0, in1, retPerLayer=False):
''' Function computes the distance between image patches in0 and in1
INPUTS
in0, in1 - torch.Tensor object of shape Nx3xXxY - image patch scaled to [-1,1]
OUTPUT
computed distances between in0 and in1
'''
return self.net.forward(in0, in1, retPerLayer=retPerLayer)
# ***** TRAINING FUNCTIONS *****
def optimize_parameters(self):
self.forward_train()
self.optimizer_net.zero_grad()
self.backward_train()
self.optimizer_net.step()
self.clamp_weights()
def clamp_weights(self):
for module in self.net.modules():
if(hasattr(module, 'weight') and module.kernel_size==(1,1)):
module.weight.data = torch.clamp(module.weight.data,min=0)
def set_input(self, data):
self.input_ref = data['ref']
self.input_p0 = data['p0']
self.input_p1 = data['p1']
self.input_judge = data['judge']
if(self.use_gpu):
self.input_ref = self.input_ref.to(device=self.gpu_ids[0])
self.input_p0 = self.input_p0.to(device=self.gpu_ids[0])
self.input_p1 = self.input_p1.to(device=self.gpu_ids[0])
self.input_judge = self.input_judge.to(device=self.gpu_ids[0])
self.var_ref = Variable(self.input_ref,requires_grad=True)
self.var_p0 = Variable(self.input_p0,requires_grad=True)
self.var_p1 = Variable(self.input_p1,requires_grad=True)
def forward_train(self): # run forward pass
# print(self.net.module.scaling_layer.shift)
# print(torch.norm(self.net.module.net.slice1[0].weight).item(), torch.norm(self.net.module.lin0.model[1].weight).item())
self.d0 = self.forward(self.var_ref, self.var_p0)
self.d1 = self.forward(self.var_ref, self.var_p1)
self.acc_r = self.compute_accuracy(self.d0,self.d1,self.input_judge)
self.var_judge = Variable(1.*self.input_judge).view(self.d0.size())
self.loss_total = self.rankLoss.forward(self.d0, self.d1, self.var_judge*2.-1.)
return self.loss_total
def backward_train(self):
torch.mean(self.loss_total).backward()
def compute_accuracy(self,d0,d1,judge):
''' d0, d1 are Variables, judge is a Tensor '''
d1_lt_d0 = (d1<d0).cpu().data.numpy().flatten()
judge_per = judge.cpu().numpy().flatten()
return d1_lt_d0*judge_per + (1-d1_lt_d0)*(1-judge_per)
def get_current_errors(self):
retDict = OrderedDict([('loss_total', self.loss_total.data.cpu().numpy()),
('acc_r', self.acc_r)])
for key in retDict.keys():
retDict[key] = np.mean(retDict[key])
return retDict
def get_current_visuals(self):
zoom_factor = 256/self.var_ref.data.size()[2]
ref_img = util.tensor2im(self.var_ref.data)
p0_img = util.tensor2im(self.var_p0.data)
p1_img = util.tensor2im(self.var_p1.data)
ref_img_vis = zoom(ref_img,[zoom_factor, zoom_factor, 1],order=0)
p0_img_vis = zoom(p0_img,[zoom_factor, zoom_factor, 1],order=0)
p1_img_vis = zoom(p1_img,[zoom_factor, zoom_factor, 1],order=0)
return OrderedDict([('ref', ref_img_vis),
('p0', p0_img_vis),
('p1', p1_img_vis)])
def save(self, path, label):
if(self.use_gpu):
self.save_network(self.net.module, path, '', label)
else:
self.save_network(self.net, path, '', label)
self.save_network(self.rankLoss.net, path, 'rank', label)
def update_learning_rate(self,nepoch_decay):
lrd = self.lr / nepoch_decay
lr = self.old_lr - lrd
for param_group in self.optimizer_net.param_groups:
param_group['lr'] = lr
print('update lr [%s] decay: %f -> %f' % (type,self.old_lr, lr))
self.old_lr = lr
def score_2afc_dataset(data_loader, func, name=''):
''' Function computes Two Alternative Forced Choice (2AFC) score using
distance function 'func' in dataset 'data_loader'
INPUTS
data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside
func - callable distance function - calling d=func(in0,in1) should take 2
pytorch tensors with shape Nx3xXxY, and return numpy array of length N
OUTPUTS
[0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators
[1] - dictionary with following elements
d0s,d1s - N arrays containing distances between reference patch to perturbed patches
gts - N array in [0,1], preferred patch selected by human evaluators
(closer to "0" for left patch p0, "1" for right patch p1,
"0.6" means 60pct people preferred right patch, 40pct preferred left)
scores - N array in [0,1], corresponding to what percentage function agreed with humans
CONSTS
N - number of test triplets in data_loader
'''
d0s = []
d1s = []
gts = []
for data in tqdm(data_loader.load_data(), desc=name):
d0s+=func(data['ref'],data['p0']).data.cpu().numpy().flatten().tolist()
d1s+=func(data['ref'],data['p1']).data.cpu().numpy().flatten().tolist()
gts+=data['judge'].cpu().numpy().flatten().tolist()
d0s = np.array(d0s)
d1s = np.array(d1s)
gts = np.array(gts)
scores = (d0s<d1s)*(1.-gts) + (d1s<d0s)*gts + (d1s==d0s)*.5
return(np.mean(scores), dict(d0s=d0s,d1s=d1s,gts=gts,scores=scores))
def score_jnd_dataset(data_loader, func, name=''):
''' Function computes JND score using distance function 'func' in dataset 'data_loader'
INPUTS
data_loader - CustomDatasetDataLoader object - contains a JNDDataset inside
func - callable distance function - calling d=func(in0,in1) should take 2
pytorch tensors with shape Nx3xXxY, and return pytorch array of length N
OUTPUTS
[0] - JND score in [0,1], mAP score (area under precision-recall curve)
[1] - dictionary with following elements
ds - N array containing distances between two patches shown to human evaluator
sames - N array containing fraction of people who thought the two patches were identical
CONSTS
N - number of test triplets in data_loader
'''
ds = []
gts = []
for data in tqdm(data_loader.load_data(), desc=name):
ds+=func(data['p0'],data['p1']).data.cpu().numpy().tolist()
gts+=data['same'].cpu().numpy().flatten().tolist()
sames = np.array(gts)
ds = np.array(ds)
sorted_inds = np.argsort(ds)
ds_sorted = ds[sorted_inds]
sames_sorted = sames[sorted_inds]
TPs = np.cumsum(sames_sorted)
FPs = np.cumsum(1-sames_sorted)
FNs = np.sum(sames_sorted)-TPs
precs = TPs/(TPs+FPs)
recs = TPs/(TPs+FNs)
score = util.voc_ap(recs,precs)
return(score, dict(ds=ds,sames=sames))
| 11,777 | 40.326316 | 177 | py |
null | Vid-ODE-main/eval_models/networks_basic.py |
from __future__ import absolute_import
import sys
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Variable
import numpy as np
from skimage import color
from IPython import embed
from . import pretrained_networks as pn
import eval_models as util
def spatial_average(in_tens, keepdim=True):
return in_tens.mean([2,3],keepdim=keepdim)
def upsample(in_tens, out_H=64): # assumes scale factor is same for H and W
in_H = in_tens.shape[2]
scale_factor = 1.*out_H/in_H
return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens)
# Learned perceptual metric
class PNetLin(nn.Module):
def __init__(self, pnet_type='vgg', pnet_rand=False, pnet_tune=False, use_dropout=True, spatial=False, version='0.1', lpips=True):
super(PNetLin, self).__init__()
self.pnet_type = pnet_type
self.pnet_tune = pnet_tune
self.pnet_rand = pnet_rand
self.spatial = spatial
self.lpips = lpips
self.version = version
self.scaling_layer = ScalingLayer()
if(self.pnet_type in ['vgg','vgg16']):
net_type = pn.vgg16
self.chns = [64,128,256,512,512]
elif(self.pnet_type=='alex'):
net_type = pn.alexnet
self.chns = [64,192,384,256,256]
elif(self.pnet_type=='squeeze'):
net_type = pn.squeezenet
self.chns = [64,128,256,384,384,512,512]
self.L = len(self.chns)
self.net = net_type(pretrained=not self.pnet_rand, requires_grad=self.pnet_tune)
if(lpips):
self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout)
self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout)
self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout)
self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout)
self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout)
self.lins = [self.lin0,self.lin1,self.lin2,self.lin3,self.lin4]
if(self.pnet_type=='squeeze'): # 7 layers for squeezenet
self.lin5 = NetLinLayer(self.chns[5], use_dropout=use_dropout)
self.lin6 = NetLinLayer(self.chns[6], use_dropout=use_dropout)
self.lins+=[self.lin5,self.lin6]
def forward(self, in0, in1, retPerLayer=False):
# v0.0 - original release had a bug, where input was not scaled
in0_input, in1_input = (self.scaling_layer(in0), self.scaling_layer(in1)) if self.version=='0.1' else (in0, in1)
outs0, outs1 = self.net.forward(in0_input), self.net.forward(in1_input)
feats0, feats1, diffs = {}, {}, {}
for kk in range(self.L):
feats0[kk], feats1[kk] = util.normalize_tensor(outs0[kk]), util.normalize_tensor(outs1[kk])
diffs[kk] = (feats0[kk]-feats1[kk])**2
if(self.lpips):
if(self.spatial):
res = [upsample(self.lins[kk].model(diffs[kk]), out_H=in0.shape[2]) for kk in range(self.L)]
else:
res = [spatial_average(self.lins[kk].model(diffs[kk]), keepdim=True) for kk in range(self.L)]
else:
if(self.spatial):
res = [upsample(diffs[kk].sum(dim=1,keepdim=True), out_H=in0.shape[2]) for kk in range(self.L)]
else:
res = [spatial_average(diffs[kk].sum(dim=1,keepdim=True), keepdim=True) for kk in range(self.L)]
val = res[0]
for l in range(1,self.L):
val += res[l]
if(retPerLayer):
return (val, res)
else:
return val
class ScalingLayer(nn.Module):
def __init__(self):
super(ScalingLayer, self).__init__()
self.register_buffer('shift', torch.Tensor([-.030,-.088,-.188])[None,:,None,None])
self.register_buffer('scale', torch.Tensor([.458,.448,.450])[None,:,None,None])
def forward(self, inp):
return (inp - self.shift) / self.scale
class NetLinLayer(nn.Module):
''' A single linear layer which does a 1x1 conv '''
def __init__(self, chn_in, chn_out=1, use_dropout=False):
super(NetLinLayer, self).__init__()
layers = [nn.Dropout(),] if(use_dropout) else []
layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False),]
self.model = nn.Sequential(*layers)
class Dist2LogitLayer(nn.Module):
''' takes 2 distances, puts through fc layers, spits out value between [0,1] (if use_sigmoid is True) '''
def __init__(self, chn_mid=32, use_sigmoid=True):
super(Dist2LogitLayer, self).__init__()
layers = [nn.Conv2d(5, chn_mid, 1, stride=1, padding=0, bias=True),]
layers += [nn.LeakyReLU(0.2,True),]
layers += [nn.Conv2d(chn_mid, chn_mid, 1, stride=1, padding=0, bias=True),]
layers += [nn.LeakyReLU(0.2,True),]
layers += [nn.Conv2d(chn_mid, 1, 1, stride=1, padding=0, bias=True),]
if(use_sigmoid):
layers += [nn.Sigmoid(),]
self.model = nn.Sequential(*layers)
def forward(self,d0,d1,eps=0.1):
return self.model.forward(torch.cat((d0,d1,d0-d1,d0/(d1+eps),d1/(d0+eps)),dim=1))
class BCERankingLoss(nn.Module):
def __init__(self, chn_mid=32):
super(BCERankingLoss, self).__init__()
self.net = Dist2LogitLayer(chn_mid=chn_mid)
# self.parameters = list(self.net.parameters())
self.loss = torch.nn.BCELoss()
def forward(self, d0, d1, judge):
per = (judge+1.)/2.
self.logit = self.net.forward(d0,d1)
return self.loss(self.logit, per)
# L2, DSSIM metrics
class FakeNet(nn.Module):
def __init__(self, use_gpu=True, colorspace='Lab'):
super(FakeNet, self).__init__()
self.use_gpu = use_gpu
self.colorspace=colorspace
class L2(FakeNet):
def forward(self, in0, in1, retPerLayer=None):
assert(in0.size()[0]==1) # currently only supports batchSize 1
if(self.colorspace=='RGB'):
(N,C,X,Y) = in0.size()
value = torch.mean(torch.mean(torch.mean((in0-in1)**2,dim=1).view(N,1,X,Y),dim=2).view(N,1,1,Y),dim=3).view(N)
return value
elif(self.colorspace=='Lab'):
value = util.l2(util.tensor2np(util.tensor2tensorlab(in0.data,to_norm=False)),
util.tensor2np(util.tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float')
ret_var = Variable( torch.Tensor((value,) ) )
if(self.use_gpu):
ret_var = ret_var.cuda()
return ret_var
class DSSIM(FakeNet):
def forward(self, in0, in1, retPerLayer=None):
assert(in0.size()[0]==1) # currently only supports batchSize 1
if(self.colorspace=='RGB'):
value = util.dssim(1.*util.tensor2im(in0.data), 1.*util.tensor2im(in1.data), range=255.).astype('float')
elif(self.colorspace=='Lab'):
value = util.dssim(util.tensor2np(util.tensor2tensorlab(in0.data,to_norm=False)),
util.tensor2np(util.tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float')
ret_var = Variable( torch.Tensor((value,) ) )
if(self.use_gpu):
ret_var = ret_var.cuda()
return ret_var
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print('Network',net)
print('Total number of parameters: %d' % num_params)
| 7,447 | 38.828877 | 134 | py |
null | Vid-ODE-main/eval_models/pretrained_networks.py | from collections import namedtuple
import torch
from torchvision import models as tv
from IPython import embed
class squeezenet(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(squeezenet, self).__init__()
pretrained_features = tv.squeezenet1_1(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.slice6 = torch.nn.Sequential()
self.slice7 = torch.nn.Sequential()
self.N_slices = 7
for x in range(2):
self.slice1.add_module(str(x), pretrained_features[x])
for x in range(2,5):
self.slice2.add_module(str(x), pretrained_features[x])
for x in range(5, 8):
self.slice3.add_module(str(x), pretrained_features[x])
for x in range(8, 10):
self.slice4.add_module(str(x), pretrained_features[x])
for x in range(10, 11):
self.slice5.add_module(str(x), pretrained_features[x])
for x in range(11, 12):
self.slice6.add_module(str(x), pretrained_features[x])
for x in range(12, 13):
self.slice7.add_module(str(x), pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1 = h
h = self.slice2(h)
h_relu2 = h
h = self.slice3(h)
h_relu3 = h
h = self.slice4(h)
h_relu4 = h
h = self.slice5(h)
h_relu5 = h
h = self.slice6(h)
h_relu6 = h
h = self.slice7(h)
h_relu7 = h
vgg_outputs = namedtuple("SqueezeOutputs", ['relu1','relu2','relu3','relu4','relu5','relu6','relu7'])
out = vgg_outputs(h_relu1,h_relu2,h_relu3,h_relu4,h_relu5,h_relu6,h_relu7)
return out
class alexnet(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(alexnet, self).__init__()
alexnet_pretrained_features = tv.alexnet(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.N_slices = 5
for x in range(2):
self.slice1.add_module(str(x), alexnet_pretrained_features[x])
for x in range(2, 5):
self.slice2.add_module(str(x), alexnet_pretrained_features[x])
for x in range(5, 8):
self.slice3.add_module(str(x), alexnet_pretrained_features[x])
for x in range(8, 10):
self.slice4.add_module(str(x), alexnet_pretrained_features[x])
for x in range(10, 12):
self.slice5.add_module(str(x), alexnet_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1 = h
h = self.slice2(h)
h_relu2 = h
h = self.slice3(h)
h_relu3 = h
h = self.slice4(h)
h_relu4 = h
h = self.slice5(h)
h_relu5 = h
alexnet_outputs = namedtuple("AlexnetOutputs", ['relu1', 'relu2', 'relu3', 'relu4', 'relu5'])
out = alexnet_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5)
return out
class vgg16(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(vgg16, self).__init__()
vgg_pretrained_features = tv.vgg16(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.N_slices = 5
for x in range(4):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(4, 9):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(9, 16):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(16, 23):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(23, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1_2 = h
h = self.slice2(h)
h_relu2_2 = h
h = self.slice3(h)
h_relu3_3 = h
h = self.slice4(h)
h_relu4_3 = h
h = self.slice5(h)
h_relu5_3 = h
vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3'])
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
return out
class resnet(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True, num=18):
super(resnet, self).__init__()
if(num==18):
self.net = tv.resnet18(pretrained=pretrained)
elif(num==34):
self.net = tv.resnet34(pretrained=pretrained)
elif(num==50):
self.net = tv.resnet50(pretrained=pretrained)
elif(num==101):
self.net = tv.resnet101(pretrained=pretrained)
elif(num==152):
self.net = tv.resnet152(pretrained=pretrained)
self.N_slices = 5
self.conv1 = self.net.conv1
self.bn1 = self.net.bn1
self.relu = self.net.relu
self.maxpool = self.net.maxpool
self.layer1 = self.net.layer1
self.layer2 = self.net.layer2
self.layer3 = self.net.layer3
self.layer4 = self.net.layer4
def forward(self, X):
h = self.conv1(X)
h = self.bn1(h)
h = self.relu(h)
h_relu1 = h
h = self.maxpool(h)
h = self.layer1(h)
h_conv2 = h
h = self.layer2(h)
h_conv3 = h
h = self.layer3(h)
h_conv4 = h
h = self.layer4(h)
h_conv5 = h
outputs = namedtuple("Outputs", ['relu1','conv2','conv3','conv4','conv5'])
out = outputs(h_relu1, h_conv2, h_conv3, h_conv4, h_conv5)
return out
| 6,533 | 34.901099 | 109 | py |
null | Vid-ODE-main/models/__init__.py | 0 | 0 | 0 | py | |
null | Vid-ODE-main/models/base_conv_gru.py | import torch
import torch.nn as nn
import sys
sys.path.append('../')
sys.path.append('./')
import utils
class ConvGRUCell(nn.Module):
def __init__(self, input_size, input_dim, hidden_dim, kernel_size, bias, dtype):
"""
:param input_size: (int, int) / Height and width of input tensor as (height, width).
:param input_dim: int / Number of channels of input tensor.
:param hidden_dim: int / Number of channels of hidden state.
:param kernel_size: (int, int) / Size of the convolutional kernel.
:param bias: bool / Whether or not to add the bias.
:param dtype: torch.cuda.FloatTensor or torch.FloatTensor / Whether or not to use cuda.
"""
super(ConvGRUCell, self).__init__()
self.height, self.width = input_size
self.padding = kernel_size[0] // 2, kernel_size[1] // 2
self.hidden_dim = hidden_dim
self.bias = bias
self.dtype = dtype
self.conv_gates = nn.Conv2d(in_channels=input_dim + hidden_dim,
out_channels=2 * self.hidden_dim, # for update_gate,reset_gate respectively
kernel_size=kernel_size,
padding=self.padding,
bias=self.bias)
self.conv_can = nn.Conv2d(in_channels=input_dim + hidden_dim,
out_channels=self.hidden_dim, # for candidate neural memory
kernel_size=kernel_size,
padding=self.padding,
bias=self.bias)
def init_hidden(self, batch_size):
return (torch.zeros(batch_size, self.hidden_dim, self.height, self.width)).type(self.dtype)
def forward(self, input_tensor, h_cur, mask=None):
"""
:param self:
:param input_tensor: (b, c, h, w) / input is actually the target_model
:param h_cur: (b, c_hidden, h, w) / current hidden and cell states respectively
:return: h_next, next hidden state
"""
combined = torch.cat([input_tensor, h_cur], dim=1)
combined_conv = self.conv_gates(combined)
gamma, beta = torch.split(combined_conv, self.hidden_dim, dim=1)
reset_gate = torch.sigmoid(gamma)
update_gate = torch.sigmoid(beta)
combined = torch.cat([input_tensor, reset_gate * h_cur], dim=1)
cc_cnm = self.conv_can(combined)
cnm = torch.tanh(cc_cnm)
h_next = (1 - update_gate) * h_cur + update_gate * cnm
mask = mask.view(-1, 1, 1, 1).expand_as(h_cur)
h_next = mask * h_next + (1 - mask) * h_cur
return h_next
class Encoder_z0_ODE_ConvGRU(nn.Module):
def __init__(self, input_size, input_dim, hidden_dim, kernel_size, num_layers, dtype, batch_first=False,
bias=True, return_all_layers=False, z0_diffeq_solver=None, run_backwards=None):
super(Encoder_z0_ODE_ConvGRU, self).__init__()
# Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers
kernel_size = self._extend_for_multilayer(kernel_size, num_layers)
hidden_dim = self._extend_for_multilayer(hidden_dim, num_layers)
if not len(kernel_size) == len(hidden_dim) == num_layers:
raise ValueError('Inconsistent list length.')
self.height, self.width = input_size
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.dtype = dtype
self.num_layers = num_layers
self.batch_first = batch_first
self.bias = bias
self.return_all_layers = return_all_layers
self.z0_diffeq_solver = z0_diffeq_solver
self.run_backwards = run_backwards
##### By product for visualization
self.by_product = {}
cell_list = []
for i in range(0, self.num_layers):
cur_input_dim = input_dim if i == 0 else hidden_dim[i - 1]
cell_list.append(ConvGRUCell(input_size=(self.height, self.width),
input_dim=cur_input_dim,
hidden_dim=self.hidden_dim[i],
kernel_size=self.kernel_size[i],
bias=self.bias,
dtype=self.dtype))
# convert python list to pytorch module
self.cell_list = nn.ModuleList(cell_list)
# last conv layer for generating mu, sigma
self.z0_dim = hidden_dim[0]
z = hidden_dim[0]
self.transform_z0 = nn.Sequential(
nn.Conv2d(z, z, 1, 1, 0),
nn.ReLU(),
nn.Conv2d(z, z * 2, 1, 1, 0), )
def forward(self, input_tensor, time_steps, mask=None, tracker=None):
if not self.batch_first:
# (t, b, c, h, w) -> (b, t, c, h, w)
input_tensor = input_tensor.permute(1, 0, 2, 3, 4)
assert (input_tensor.size(1) == len(time_steps)), "Sequence length should be same as time_steps"
last_yi, latent_ys = self.run_ode_conv_gru(
input_tensor=input_tensor,
mask=mask,
time_steps=time_steps,
run_backwards=self.run_backwards,
tracker=tracker)
trans_last_yi = self.transform_z0(last_yi)
mean_z0, std_z0 = torch.split(trans_last_yi, self.z0_dim, dim=1)
std_z0 = std_z0.abs()
return mean_z0, std_z0
def run_ode_conv_gru(self, input_tensor, mask, time_steps, run_backwards=True, tracker=None):
b, t, c, h, w = input_tensor.size()
device = utils.get_device(input_tensor)
# Set initial inputs
prev_input_tensor = torch.zeros((b, c, h, w)).to(device)
# Time configuration
# Run ODE backwards and combine the y(t) estimates using gating
prev_t, t_i = time_steps[-1] + 0.01, time_steps[-1]
latent_ys = []
time_points_iter = range(0, time_steps.size(-1))
if run_backwards:
time_points_iter = reversed(time_points_iter)
for idx, i in enumerate(time_points_iter):
inc = self.z0_diffeq_solver.ode_func(prev_t, prev_input_tensor) * (t_i - prev_t)
assert (not torch.isnan(inc).any())
tracker.write_info(key=f"inc{idx}", value=inc.clone().cpu())
ode_sol = prev_input_tensor + inc
tracker.write_info(key=f"prev_input_tensor{idx}", value=prev_input_tensor.clone().cpu())
tracker.write_info(key=f"ode_sol{idx}", value=ode_sol.clone().cpu())
ode_sol = torch.stack((prev_input_tensor, ode_sol), dim=1) # [1, b, 2, c, h, w] => [b, 2, c, h, w]
assert (not torch.isnan(ode_sol).any())
if torch.mean(ode_sol[:, 0, :] - prev_input_tensor) >= 0.001:
print("Error: first point of the ODE is not equal to initial value")
print(torch.mean(ode_sol[:, :, 0, :] - prev_input_tensor))
exit()
yi_ode = ode_sol[:, -1, :]
xi = input_tensor[:, i, :]
# only 1 now
yi = self.cell_list[0](input_tensor=xi,
h_cur=yi_ode,
mask=mask[:, i])
# return to iteration
prev_input_tensor = yi
prev_t, t_i = time_steps[i], time_steps[i - 1]
latent_ys.append(yi)
latent_ys = torch.stack(latent_ys, 1)
return yi, latent_ys
def _init_hidden(self, batch_size):
init_states = []
for i in range(self.num_layers):
init_states.append(self.cell_list[i].init_hidden(batch_size))
return init_states
@staticmethod
def _check_kernel_size_consistency(kernel_size):
if not (isinstance(kernel_size, tuple) or
(isinstance(kernel_size, list) and all([isinstance(elem, tuple) for elem in kernel_size]))):
raise ValueError('`kernel_size` must be tuple or list of tuples')
@staticmethod
def _extend_for_multilayer(param, num_layers):
if not isinstance(param, list):
param = [param] * num_layers
return param
def get_norm_layer(ch):
norm_layer = nn.BatchNorm2d(ch)
return norm_layer
class Encoder(nn.Module):
def __init__(self, input_dim=3, ch=64, n_downs=2):
super(Encoder, self).__init__()
model = []
model += [nn.Conv2d(input_dim, ch, 3, 1, 1)]
model += [get_norm_layer(ch)]
model += [nn.ReLU()]
for _ in range(n_downs):
model += [nn.Conv2d(ch, ch * 2, 4, 2, 1)]
model += [get_norm_layer(ch * 2)]
model += [nn.ReLU()]
ch *= 2
self.model = nn.Sequential(*model)
def forward(self, x):
out = self.model(x)
return out
class Decoder(nn.Module):
def __init__(self, input_dim=256, output_dim=3, n_ups=2):
super(Decoder, self).__init__()
model = []
ch = input_dim
for i in range(n_ups):
model += [nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)]
model += [nn.Conv2d(ch, ch // 2, 3, 1, 1)]
model += [get_norm_layer(ch // 2)]
model += [nn.ReLU()]
ch = ch // 2
model += [nn.Conv2d(ch, output_dim, 3, 1, 1)]
# model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, x):
out = self.model(x)
return out | 9,941 | 36.946565 | 112 | py |
null | Vid-ODE-main/models/conv_odegru.py | import torch
import torch.nn as nn
from models.base_conv_gru import *
from models.ode_func import ODEFunc, DiffeqSolver
from models.layers import create_convnet
class VidODE(nn.Module):
def __init__(self, opt, device):
super(VidODE, self).__init__()
self.opt = opt
self.device = device
# initial function
self.build_model()
# tracker
self.tracker = utils.Tracker()
def build_model(self):
# channels for encoder, ODE, init decoder
init_dim = self.opt.init_dim
resize = 2 ** self.opt.n_downs
base_dim = init_dim * resize
input_size = (self.opt.input_size // resize, self.opt.input_size // resize)
ode_dim = base_dim
print(f"Building models... base_dim:{base_dim}")
##### Conv Encoder
self.encoder = Encoder(input_dim=self.opt.input_dim,
ch=init_dim,
n_downs=self.opt.n_downs).to(self.device)
##### ODE Encoder
ode_func_netE = create_convnet(n_inputs=ode_dim,
n_outputs=base_dim,
n_layers=self.opt.n_layers,
n_units=base_dim // 2).to(self.device)
rec_ode_func = ODEFunc(opt=self.opt,
input_dim=ode_dim,
latent_dim=base_dim, # channels after encoder, & latent dimension
ode_func_net=ode_func_netE,
device=self.device).to(self.device)
z0_diffeq_solver = DiffeqSolver(base_dim,
ode_func=rec_ode_func,
method="euler",
latents=base_dim,
odeint_rtol=1e-3,
odeint_atol=1e-4,
device=self.device)
self.encoder_z0 = Encoder_z0_ODE_ConvGRU(input_size=input_size,
input_dim=base_dim,
hidden_dim=base_dim,
kernel_size=(3, 3),
num_layers=1,
dtype=torch.cuda.FloatTensor if self.device == 'cuda' else torch.FloatTensor,
batch_first=True,
bias=True,
return_all_layers=True,
z0_diffeq_solver=z0_diffeq_solver,
run_backwards=self.opt.run_backwards).to(self.device)
##### ODE Decoder
ode_func_netD = create_convnet(n_inputs=ode_dim,
n_outputs=base_dim,
n_layers=self.opt.n_layers,
n_units=base_dim // 2).to(self.device)
gen_ode_func = ODEFunc(opt=self.opt,
input_dim=ode_dim,
latent_dim=base_dim,
ode_func_net=ode_func_netD,
device=self.device).to(self.device)
self.diffeq_solver = DiffeqSolver(base_dim,
gen_ode_func,
self.opt.dec_diff, base_dim,
odeint_rtol=1e-3,
odeint_atol=1e-4,
device=self.device)
##### Conv Decoder
self.decoder = Decoder(input_dim=base_dim * 2, output_dim=self.opt.input_dim + 3, n_ups=self.opt.n_downs).to(self.device)
def get_reconstruction(self, time_steps_to_predict, truth, truth_time_steps, mask=None, out_mask=None):
truth = truth.to(self.device)
truth_time_steps = truth_time_steps.to(self.device)
mask = mask.to(self.device)
out_mask = out_mask.to(self.device)
time_steps_to_predict = time_steps_to_predict.to(self.device)
resize = 2 ** self.opt.n_downs
b, t, c, h, w = truth.shape
pred_t_len = len(time_steps_to_predict)
##### Skip connection forwarding
skip_image = truth[:, -1, ...] if self.opt.extrap else truth[:, 0, ...]
skip_conn_embed = self.encoder(skip_image).view(b, -1, h // resize, w // resize)
##### Conv encoding
e_truth = self.encoder(truth.view(b * t, c, h, w)).view(b, t, -1, h // resize, w // resize)
##### ODE encoding
first_point_mu, first_point_std = self.encoder_z0(input_tensor=e_truth, time_steps=truth_time_steps, mask=mask, tracker=self.tracker)
# Sampling latent features
first_point_enc = first_point_mu.unsqueeze(0).repeat(1, 1, 1, 1, 1)
# ==================================================================================== #
##### ODE decoding
first_point_enc = first_point_enc.squeeze(0)
sol_y = self.diffeq_solver(first_point_enc, time_steps_to_predict)
self.tracker.write_info(key="sol_y", value=sol_y.clone().cpu())
##### Conv decoding
sol_y = sol_y.contiguous().view(b, pred_t_len, -1, h // resize, w // resize)
# regular b, t, 6, h, w / irregular b, t * ratio, 6, h, w
pred_outputs = self.get_flowmaps(sol_out=sol_y, first_prev_embed=skip_conn_embed, mask=out_mask) # b, t, 6, h, w
pred_outputs = torch.cat(pred_outputs, dim=1)
pred_flows, pred_intermediates, pred_masks = \
pred_outputs[:, :, :2, ...], pred_outputs[:, :, 2:2+self.opt.input_dim, ...], torch.sigmoid(pred_outputs[:, :, 2+self.opt.input_dim:, ...])
### Warping first frame by using optical flow
# Declare grid for warping
grid_x = torch.linspace(-1.0, 1.0, w).view(1, 1, w, 1).expand(b, h, -1, -1)
grid_y = torch.linspace(-1.0, 1.0, h).view(1, h, 1, 1).expand(b, -1, w, -1)
grid = torch.cat([grid_x, grid_y], 3).float().to(self.device) # [b, h, w, 2]
# Warping
last_frame = truth[:, -1, ...] if self.opt.extrap else truth[:, 0, ...]
warped_pred_x = self.get_warped_images(pred_flows=pred_flows, start_image=last_frame, grid=grid)
warped_pred_x = torch.cat(warped_pred_x, dim=1) # regular b, t, 6, h, w / irregular b, t * ratio, 6, h, w
pred_x = pred_masks * warped_pred_x + (1 - pred_masks) * pred_intermediates
pred_x = pred_x.view(b, -1, c, h, w)
### extra information
extra_info = {}
extra_info["optical_flow"] = pred_flows
extra_info["warped_pred_x"] = warped_pred_x
extra_info["pred_intermediates"] = pred_intermediates
extra_info["pred_masks"] = pred_masks
return pred_x, extra_info
def get_mse(self, truth, pred_x, mask=None):
b, _, c, h, w = truth.size()
if mask is None:
selected_time_len = truth.size(1)
selected_truth = truth
else:
selected_time_len = int(mask[0].sum())
selected_truth = truth[mask.squeeze(-1).byte()].view(b, selected_time_len, c, h, w)
loss = torch.sum(torch.abs(pred_x - selected_truth)) / (b * selected_time_len * c * h * w)
return loss
def get_diff(self, data, mask=None):
data_diff = data[:, 1:, ...] - data[:, :-1, ...]
b, _, c, h, w = data_diff.size()
selected_time_len = int(mask[0].sum())
masked_data_diff = data_diff[mask.squeeze(-1).byte()].view(b, selected_time_len, c, h, w)
return masked_data_diff
def export_infos(self):
infos = self.tracker.export_info()
self.tracker.clean_info()
return infos
def get_flowmaps(self, sol_out, first_prev_embed, mask):
""" Get flowmaps recursively
Input:
sol_out - Latents from ODE decoder solver (b, time_steps_to_predict, c, h, w)
first_prev_embed - Latents of last frame (b, c, h, w)
Output:
pred_flows - List of predicted flowmaps (b, time_steps_to_predict, c, h, w)
"""
b, _, c, h, w = sol_out.size()
pred_time_steps = int(mask[0].sum())
pred_flows = list()
prev = first_prev_embed.clone()
time_iter = range(pred_time_steps)
if mask.size(1) == sol_out.size(1):
sol_out = sol_out[mask.squeeze(-1).byte()].view(b, pred_time_steps, c, h, w)
for t in time_iter:
cur_and_prev = torch.cat([sol_out[:, t, ...], prev], dim=1)
pred_flow = self.decoder(cur_and_prev).unsqueeze(1)
pred_flows += [pred_flow]
prev = sol_out[:, t, ...].clone()
return pred_flows
def get_warped_images(self, pred_flows, start_image, grid):
""" Get warped images recursively
Input:
pred_flows - Predicted flowmaps to use (b, time_steps_to_predict, c, h, w)
start_image- Start image to warp
grid - pre-defined grid
Output:
pred_x - List of warped (b, time_steps_to_predict, c, h, w)
"""
warped_time_steps = pred_flows.size(1)
pred_x = list()
last_frame = start_image
b, _, c, h, w = pred_flows.shape
for t in range(warped_time_steps):
pred_flow = pred_flows[:, t, ...] # b, 2, h, w
pred_flow = torch.cat([pred_flow[:, 0:1, :, :] / ((w - 1.0) / 2.0), pred_flow[:, 1:2, :, :] / ((h - 1.0) / 2.0)], dim=1)
pred_flow = pred_flow.permute(0, 2, 3, 1) # b, h, w, 2
flow_grid = grid.clone() + pred_flow.clone()# b, h, w, 2
warped_x = nn.functional.grid_sample(last_frame, flow_grid, padding_mode="border")
pred_x += [warped_x.unsqueeze(1)] # b, 1, 3, h, w
last_frame = warped_x.clone()
return pred_x
def compute_all_losses(self, batch_dict):
batch_dict["tp_to_predict"] = batch_dict["tp_to_predict"].to(self.device)
batch_dict["observed_data"] = batch_dict["observed_data"].to(self.device)
batch_dict["observed_tp"] = batch_dict["observed_tp"].to(self.device)
batch_dict["observed_mask"] = batch_dict["observed_mask"].to(self.device)
batch_dict["data_to_predict"] = batch_dict["data_to_predict"].to(self.device)
batch_dict["mask_predicted_data"] = batch_dict["mask_predicted_data"].to(self.device)
pred_x, extra_info = self.get_reconstruction(
time_steps_to_predict=batch_dict["tp_to_predict"],
truth=batch_dict["observed_data"],
truth_time_steps=batch_dict["observed_tp"],
mask=batch_dict["observed_mask"],
out_mask=batch_dict["mask_predicted_data"])
# batch-wise mean
loss = torch.mean(self.get_mse(truth=batch_dict["data_to_predict"],
pred_x=pred_x,
mask=batch_dict["mask_predicted_data"]))
if not self.opt.extrap:
init_image = batch_dict["observed_data"][:, 0, ...]
else:
init_image = batch_dict["observed_data"][:, -1, ...]
data = torch.cat([init_image.unsqueeze(1), batch_dict["data_to_predict"]], dim=1)
data_diff = self.get_diff(data=data, mask=batch_dict["mask_predicted_data"])
loss = loss + torch.mean(self.get_mse(truth=data_diff, pred_x=extra_info["pred_intermediates"], mask=None))
results = {}
results["loss"] = torch.mean(loss)
results["pred_y"] = pred_x
return results
| 12,161 | 43.065217 | 151 | py |
null | Vid-ODE-main/models/gan.py | import torch
import torch.nn as nn
import torch.optim as optim
class ConvNormAct(nn.Module):
def __init__(self, in_ch, out_ch, kernel_size, stride, padding, act_type='relu'):
super(ConvNormAct, self).__init__()
layers = []
layers += [nn.Conv2d(in_ch, out_ch, kernel_size, stride, padding)]
layers += [nn.InstanceNorm2d(out_ch)]
if act_type == 'relu':
layers += [nn.ReLU(inplace=True)]
elif act_type == 'lrelu':
layers += [nn.LeakyReLU(0.2)]
self.main = nn.Sequential(*layers)
def forward(self, x):
return self.main(x)
class Discriminator(nn.Module):
def __init__(self, in_ch, device, seq=False, is_extrap=True):
super(Discriminator, self).__init__()
self.device = device
self.seq = seq
self.is_extrap = is_extrap
self.layer_1 = nn.Sequential(
nn.Conv2d(in_ch, 64, kernel_size=4, stride=2, padding=1, bias=False),
nn.LeakyReLU(0.2))
self.layer_2 = ConvNormAct(64, 128, kernel_size=4, stride=2, padding=1, act_type='lrelu')
self.layer_3 = ConvNormAct(128, 256, kernel_size=4, stride=2, padding=1, act_type='lrelu')
self.layer_4 = ConvNormAct(256, 512, kernel_size=4, stride=1, padding=2, act_type='lrelu')
self.last_conv = nn.Conv2d(512, 64, kernel_size=4, stride=1, padding=2, bias=False)
def forward(self, x):
h = self.layer_1(x)
h = self.layer_2(h)
h = self.layer_3(h)
h = self.layer_4(h)
return self.last_conv(h)
def netD_adv_loss(self, real, fake, input_real):
if self.seq:
if self.is_extrap:
real, fake = self.rearrange_seq(real, fake, input_real, only_fake=False)
else:
real, fake = self.rearrange_seq_interp(real, fake, input_real, only_fake=False)
elif not self.seq:
b, t, c, h, w = fake.size()
real = real.contiguous().view(-1, c, h, w)
fake = fake.contiguous().view(-1, c, h, w)
pred_fake = self.forward(fake.detach())
pred_real = self.forward(real)
# GAN loss type
real_label = torch.ones_like(pred_real).to(self.device)
loss_fake = torch.mean((pred_fake) ** 2)
loss_real = torch.mean((pred_real - real_label) ** 2)
loss_D = (loss_real + loss_fake) * 0.5
return loss_D
def netG_adv_loss(self, fake, input_real):
b, t, c, h, w = fake.size()
if self.seq:
if self.is_extrap:
fake = self.rearrange_seq(None, fake, input_real, only_fake=True)
else:
fake = self.rearrange_seq_interp(None, fake, input_real, only_fake=True)
elif not self.seq:
fake = fake.contiguous().view(-1, c, h, w)
pred_fake = self.forward(fake)
# GAN loss type
real_label = torch.ones_like(pred_fake).to(self.device)
loss_real = torch.mean((pred_fake - real_label) ** 2)
return loss_real
def rearrange_seq(self, real, fake, input_real, only_fake=True):
b, t, c, h, w = fake.size()
fake_seqs = []
for i in range(t):
fake_seq = torch.cat([input_real[:, i:, ...], fake[:, :i+1, ...]], dim=1)
fake_seqs += [fake_seq]
fake_seqs = torch.cat(fake_seqs, dim=0).view(b * t, -1, h, w)
if only_fake:
return fake_seqs
real_seqs = []
for i in range(t):
real_seq = torch.cat([input_real[:, i:, ...], real[:, :i+1, ...]], dim=1)
real_seqs += [real_seq]
real_seqs = torch.cat(real_seqs, dim=0).view(b * t, -1, h, w)
return real_seqs, fake_seqs
def rearrange_seq_interp(self, real, fake, input_real, only_fake=True):
b, t, c, h, w = fake.size()
mask = torch.eye(t).float().cuda()
fake_seqs = []
for i in range(t):
reshaped_mask = mask[i].view(1, -1, 1, 1, 1)
fake_seq = (1 - reshaped_mask) * input_real + reshaped_mask * fake
fake_seqs += [fake_seq]
fake_seqs = torch.cat(fake_seqs, dim=0).view(b * t, -1, h, w)
if only_fake:
return fake_seqs
real_seqs = []
for i in range(t):
reshaped_mask = mask[i].view(1, -1, 1, 1, 1)
real_seq = (1 - reshaped_mask) * input_real + reshaped_mask * real
real_seqs += [real_seq]
real_seqs = torch.cat(real_seqs, dim=0).view(b * t, -1, h, w)
return real_seqs, fake_seqs
def create_netD(opt, device):
# Model
seq_len = opt.sample_size // 2
if opt.irregular and not opt.extrap:
seq_len = opt.sample_size
if opt.extrap:
seq_len += 1
netD_img = Discriminator(in_ch=3, device=device, seq=False, is_extrap=opt.extrap).to(device)
netD_seq = Discriminator(in_ch=3 * (seq_len), device=device, seq=True, is_extrap=opt.extrap).to(device)
# Optimizer
optimizer_netD = optim.Adamax(list(netD_img.parameters()) + list(netD_seq.parameters()), lr=opt.lr)
return netD_img, netD_seq, optimizer_netD
| 5,314 | 33.512987 | 107 | py |
null | Vid-ODE-main/models/layers.py | import torch.nn as nn
def create_net(n_inputs, n_outputs, n_layers=1,
n_units=100, nonlinear=nn.Tanh):
layers = [nn.Linear(n_inputs, n_units)]
for i in range(n_layers):
layers.append(nonlinear())
layers.append(nn.Linear(n_units, n_units))
layers.append(nonlinear())
layers.append(nn.Linear(n_units, n_outputs))
return nn.Sequential(*layers)
def create_convnet(n_inputs, n_outputs, n_layers=1, n_units=128, nonlinear='tanh'):
if nonlinear == 'tanh':
nonlinear = nn.Tanh()
else:
raise NotImplementedError('There is no named')
layers = []
layers.append(nn.Conv2d(n_inputs, n_units, 3, 1, 1, dilation=1))
for i in range(n_layers):
layers.append(nonlinear)
layers.append(nn.Conv2d(n_units, n_units, 3, 1, 1, dilation=1))
layers.append(nonlinear)
layers.append(nn.Conv2d(n_units, n_outputs, 3, 1, 1, dilation=1))
return nn.Sequential(*layers) | 984 | 29.78125 | 83 | py |
null | Vid-ODE-main/models/ode_func.py | import torch
import torch.nn as nn
# git clone https://github.com/rtqichen/torchdiffeq.git
from torchdiffeq import odeint as odeint
class DiffeqSolver(nn.Module):
def __init__(self, input_dim, ode_func, method, latents,
odeint_rtol=1e-4, odeint_atol=1e-5, device=torch.device("cpu")):
super(DiffeqSolver, self).__init__()
self.ode_method = method
self.latents = latents
self.device = device
self.ode_func = ode_func
self.odeint_rtol = odeint_rtol
self.odeint_atol = odeint_atol
def forward(self, first_point, time_steps_to_predict, backwards=False):
"""
# Decode the trajectory through ODE Solver
"""
# n_traj_samples, n_traj = first_point.size()[0], first_point.size()[1]
pred_y = odeint(self.ode_func, first_point, time_steps_to_predict,
rtol=self.odeint_rtol, atol=self.odeint_atol, method=self.ode_method)
pred_y = pred_y.permute(1, 0, 2, 3, 4) # => [b, t, c, h0, w0]
return pred_y
def sample_traj_from_prior(self, starting_point_enc, time_steps_to_predict, n_traj_samples=1):
"""
# Decode the trajectory through ODE Solver using samples from the prior
time_steps_to_predict: time steps at which we want to sample the new trajectory
"""
func = self.ode_func.sample_next_point_from_prior
pred_y = odeint(func, starting_point_enc, time_steps_to_predict,
rtol=self.odeint_rtol, atol=self.odeint_atol, method=self.ode_method)
pred_y = pred_y.permute(1, 2, 0, 3)
return pred_y
#####################################################################################################
class ODEFunc(nn.Module):
def __init__(self, opt, input_dim, latent_dim, ode_func_net, device=torch.device("cpu")):
"""
input_dim: dimensionality of the input
latent_dim: dimensionality used for ODE. Analog of a continous latent state
"""
super(ODEFunc, self).__init__()
self.input_dim = input_dim
self.device = device
self.opt = opt
self.gradient_net = ode_func_net
def forward(self, t_local, y, backwards=False):
"""
Perform one step in solving ODE. Given current data point y and current time point t_local, returns gradient dy/dt at this time point
t_local: current time point
y: value at the current time point
"""
grad = self.get_ode_gradient_nn(t_local, y)
if backwards:
grad = -grad
return grad
def get_ode_gradient_nn(self, t_local, y):
output = self.gradient_net(y)
return output
def sample_next_point_from_prior(self, t_local, y):
"""
t_local: current time point
y: value at the current time point
"""
return self.get_ode_gradient_nn(t_local, y)
| 2,907 | 33.211765 | 135 | py |
null | EMSAFormer-main/.gitlab-ci.yml | stages:
- stylecheck
- test
- deploy
.conda_env: &conda_env
before_script:
# update conda
- conda config --set always_yes yes
- conda update -q conda
# create and activate environment
- conda create -q -n testenv_${CI_JOB_ID}_py${PYTHON_VERSION_TO_USE//./} python=${PYTHON_VERSION_TO_USE} pip
- source activate testenv_${CI_JOB_ID}_py${PYTHON_VERSION_TO_USE//./}
after_script:
# remove environment
- conda env remove --name testenv_${CI_JOB_ID}_py${PYTHON_VERSION_TO_USE//./}
.test_template: &test_template
<<: *conda_env
stage: test
rules:
- if: $CI_MERGE_REQUEST_TITLE =~ /^(Draft:|WIP:|\[Draft\]|\[WIP\])/
when: manual
- if: $CI_PIPELINE_SOURCE == "merge_request_event" && $CI_MERGE_REQUEST_TITLE !~ /^(Draft:|WIP:|\[Draft\]|\[WIP\])/
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
- if: $CI_PIPELINE_SOURCE == "schedule"
# parallel: 1
script:
# install packages (use conda to avoid time-consuming installations)
- conda install -q pytest pytest-cov
- python -m pip install -q pytest-html
- python -m pip install pytest-xdist # multiple workers for pytest (-n 2 below)
# install dependencies
- conda install 'protobuf<=3.19.1' # for onnx
- |
if [ "${PYTHON_VERSION_TO_USE}" == "3.6" ]; then
conda install pytorch=1.10.1 torchvision=0.11.2 cudatoolkit=11.3 -c pytorch
else
conda install pytorch=1.13.0 torchvision=0.14.0 torchaudio pytorch-cuda=11.7 -c pytorch -c nvidia
fi
- python -m pip install 'opencv-python>=4.2.0.34'
- python -m pip install wandb==0.13.6
- python -m pip install onnx==1.12.0
- python -m pip install tui_imagenet==0.1.0
- python -m pip install nicr-cluster-utils==1.0.0
- python -m pip install git+https://github.com/cocodataset/panopticapi.git
- python -m pip install pycocotools==2.0.2
# install packages (and all missing dependencies)
- python -m pip install --editable lib/nicr-scene-analysis-datasets[test]
- python -m pip install --editable lib/nicr-multitask-scene-analysis[test]
# check conda installation
- conda info
- conda list
- python -m pip list
# run test
- py.test ./emsaformer/tests -vv -rx -s --maxfail=4 --ff --html=report_py${PYTHON_VERSION_TO_USE//./}.html --self-contained-html
style_check:
<<: *conda_env
stage: stylecheck
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
- if: '$CI_PIPELINE_SOURCE == "schedule"'
variables:
PYTHON_VERSION_TO_USE: "3.8"
script:
# install packages
- conda install -q pycodestyle pylint
# check style using pep8
- find ./ -name "*.py" -not -path "*/lib/*" -not -path "*/stuff/*" | xargs pycodestyle --show-source --show-pep8
# check style using pylint (without taking into account)
- pylint ./ --rcfile=${CI_PROJECT_DIR}/.pylintrc || true
tests_py36:
<<: *test_template
variables:
PYTHON_VERSION_TO_USE: "3.6"
GIT_SUBMODULE_STRATEGY: recursive
tests_py38:
<<: *test_template
variables:
PYTHON_VERSION_TO_USE: "3.8"
GIT_SUBMODULE_STRATEGY: recursive
tests_py310:
<<: *test_template
variables:
PYTHON_VERSION_TO_USE: "3.10"
GIT_SUBMODULE_STRATEGY: recursive
| 3,569 | 34.7 | 136 | yml |
null | EMSAFormer-main/README.md | # EMSAFormer: Efficient Multi-Task Scene Analysis with RGB-D Transformers
This repository contains the code to our paper
"EMSAFormer: Efficient Multi-Task Scene Analysis with RGB-D Transformers"
([arXiv](https://arxiv.org/pdf/2306.05242.pdf))
EMSAFormer builds on top of our previous work,
[EMSANet](https://github.com/TUI-NICR/EMSANet), to efficiently perform tasks
such as semantic and instance segmentation (panoptic segmentation), instance
orientation estimation, and scene classification. In EMSAFormer, we replaced
the dual CNN-based encoder of EMSANet with a single Swin Transformer.

The repository includes code for training, evaluating, and applying our
network. We also provide code for exporting the model to the ONNX format.
Additionally, we have implemented a custom TensorRT extension, based on
NVIDIA's [FasterTransformer](https://github.com/NVIDIA/FasterTransformer)
extension, for accelerating inference.
## License and Citations
The source code is published under Apache 2.0 license, see
[license file](LICENSE) for details.
If you use the source code or the network weights, please cite the following
paper ([arXiv](https://arxiv.org/pdf/2306.05242.pdf)):
> Fischedick, S., Seichter, D., Schmidt, R., Rabes, L., Gross, H.-M.
*Efficient Multi-Task Scene Analysis with RGB-D Transformers*,
to appear in IEEE International Joint Conference on Neural Networks (IJCNN), 2023.
```bibtex
@article{emsaformer2023,
title={Efficient {Multi-Task} Scene Analysis with {RGB-D} Transformers},
author={S{\"o}hnke B. Fischedick and Daniel Seichter and Robin Schmidt and Leonard Rabes and Horst-Michael Gross},
journal={arXiv preprint arXiv:2306.05242},
year={2023}
}
```
Note that the preprint was accepted to be published in IEEE International Joint
Conference on Neural Networks (IJCNN) 2023.
## Content
There are subsection for different things to do:
- [Installation](#installation): Set up the environment.
- [Results & Weights](#results-weights): Overview about major results and pretrained network weights.
- [Evaluation](#evaluation): Reproduce results reported in our paper.
- [Inference](#inference): Apply trained models.
- [Dataset Inference](#dataset-inference): Apply trained model to samples from dataset.
- [Sample Inference](#sample-inference): Apply trained model to samples in ./samples.
- [Time Inference](#time-inference): Time inference on NVIDIA Jetson AGX Orin using TensorRT.
- [Training](#training): Train new EMSAFormer model.
- [Changelog](#changelog): List of changes and updates made to the project.
## Installation
1. Clone repository:
```bash
# do not forget the '--recursive'
git clone --recursive https://github.com/TUI-NICR/EMSAFormer
# navigate to the cloned directory (required for installing some dependencies and to run the scripts later)
cd EMSAFormer
```
2. Create conda environment and install all dependencies:
```bash
# option 1: create conda environment from provided YAML file with PyTorch 2.0 (original publication)
conda env create -f emsaformer_environment_pytorch_2_0.yml
conda activate emsaformer
```
```bash
# option 2: create new conda environment manually
conda create -n emsaformer python=3.8 anaconda
conda activate emsaformer
# remaining conda dependencies
conda install pytorch torchvision torchaudio pytorch-cuda=11.7 -c pytorch -c nvidia
# remaining pip dependencies
python -m pip install 'opencv-python==4.2.0.34' # newer versions may work as well
python -m pip install torchmetrics==0.10.2
python -m pip install wandb==0.14.2
```
3. Install submodule packages:
```bash
# dataset package
python -m pip install -e ./lib/nicr-scene-analysis-datasets[withpreparation]
# multitask scene analysis package
python -m pip install -e ./lib/nicr-multitask-scene-analysis
```
4. Prepare datasets:
We trained our networks on
[NYUv2](https://cs.nyu.edu/~silberman/datasets/nyu_depth_v2.html),
[SUNRGB-D](https://rgbd.cs.princeton.edu/), and
[ScanNet](http://www.scan-net.org/).
Please follow the instructions given in `./lib/nicr-scene-analysis-datasets` or [HERE](https://github.com/TUI-NICR/nicr-scene-analysis-datasets/tree/v0.5.4) to prepare the datasets.
In the following, we assume that they are stored at `./datasets`
## Results & Weights
We provide the weights for our selected EMSAFormer-SwinV2-T-128-Multi-Aug
(with a modified SwinV2-T backbone) on NYUv2 and SUNRGB-D and ScanNet:
| Dataset | Model | mIoU | mIoU* | PQ | RQ | SQ | MAAE | bAcc | FPS (50W/30W)** | URL |
|-------------------------|-----------------------------------------|:-----:|:------:|:-----:|:-----:|:-----:|:-----:|:-----:|:------------:|------|
| NYUv2 (test) | SwinV2-T-128-Multi-Aug | 51.06 | 51.76 | 43.28 | 52.48 | 81.43 | 18.26 | 78.80 | 36.5 / 25.6 | [Download](https://drive.google.com/uc?id=1qj7FL2kSA-gu_XdDNtsNaVfWZScrOXnu) |
| | SwinV2-T-128-Multi-Aug (Sem(SegFormer)) | 50.23 | 51.34 | 43.41 | 52.23 | 81.75 | 18.94 | 77.70 | 39.1 / 27.3 | [Download](https://drive.google.com/uc?id=1NeL_4KFFKqQxwMyB1oHewfsUb-4SkAoS) |
| SUNRGB-D (test) | SwinV2-T-128-Multi-Aug | 48.52 | 45.12 | 50.08 | 59.08 | 84.68 | 15.32 | 62.01 | 36.5 / 25.6 | [Download](https://drive.google.com/uc?id=1FHH817pAVIAjIWxDggrCtdszM8PN9KCB) |
| | SwinV2-T-128-Multi-Aug (Sem(SegFormer)) | 48.61 | 45.79 | 51.70 | 60.12 | 84.65 | 14.00 | 61.97 | 39.1 / 27.3 | [Download](https://drive.google.com/uc?id=1furt5IF_MOA6AeVD4sSm8ZaUASh4quaT) |
| ScanNet (test) | SwinV2-T-128-Multi-Aug | 63.78 | 61.93 | 49.70 | 59.15 | 83.31 | *** | 48.82 | 36.5 / 25.6 | [Download](https://drive.google.com/uc?id=11mMFdI6mPh_SyQ5y8jxRtc3Sd9p4Q48L) |
| | SwinV2-T-128-Multi-Aug (Sem(SegFormer)) | 64.75 | 62.66 | 51.18 | 61.01 | 83.20 | *** | 49.69 | 39.1 / 27.3 | [Download](https://drive.google.com/uc?id=1vbL5OCkiiyRDmZIf9lHJ48xnC5NptYwd) |
\* This mIoU is after merging the semantic and instance segmentation to the
panoptic segmentation. Since merging is focused on instances, the mIoU might
change slightly compared to the one obtained from semantic decoder.
\*\* We report the FPS for an NVIDIA Jetson AGX Orin (Jetpack 5.1.1,
TensorRT 8.5.2, Float16) without postprocessing (as it is not optimized so far).
Note that we only report the inference time for NYUv2 in our paper as it has
the same or more classes than the other datasets. Thus, the FPS for the other
datasets can be slightly higher. The performance is reported with a measured
power consumption of 50W and 30W.
\*\*\* Orientations are not available for ScanNet
The checkpoints denoted by "(Sem(SegFormer))" use a smaller MLP-based decoder
for performing semantic segmentation, instead of the EMSANet decoder.
Download and extract the models to `./trained_models`.
## Evaluation
To reproduce results for the full multi-task approach, use `main.py` together
with `--validation-only`.
> Note that building the model correctly depends on the respective dataset and
the tasks the model was trained on.
### NYUv2
To evaluate on NYUv2 with EMSANet decoder (for semantic segmentation), run:
```bash
python main.py \
--dataset nyuv2 \
--dataset-path ./datasets/nyuv2 \
--tasks semantic scene instance orientation \
--enable-panoptic \
--input-modalities rgbd \
--rgbd-encoder-backbone swin-multi-t-v2-128 \
--encoder-normalization layernorm \
--no-pretrained-backbone \
--semantic-decoder emsanet \
--semantic-encoder-decoder-fusion swin-ln-add \
--semantic-decoder-n-channels 512 256 128 \
--semantic-decoder-upsampling learned-3x3-zeropad \
--weights-filepath ./trained_models/nyuv2/nyuv2_swin_multi_t_v2_128_emsanet_decoder.pth \
--checkpointing-metrics valid_semantic_miou bacc mae_gt_deg panoptic_deeplab_semantic_miou panoptic_all_deeplab_pq \
--validation-batch-size 4 \
--validation-only \
--skip-sanity-check \
--wandb-mode disabled
```
```text
Validation results:
{
...
'valid_instance_all_with_gt_deeplab_pq': tensor(0.5906, dtype=torch.float64),
...
'valid_orientation_mae_gt_deg': tensor(20.0162, dtype=torch.float64),
...
'valid_panoptic_all_with_gt_deeplab_pq': tensor(0.4341, dtype=torch.float64),
...
'valid_panoptic_all_with_gt_deeplab_rq': tensor(0.5253, dtype=torch.float64),
...
'valid_panoptic_all_with_gt_deeplab_sq': tensor(0.8175, dtype=torch.float64),
...
'valid_panoptic_deeplab_semantic_miou': tensor(0.5176),
...
'valid_panoptic_mae_deeplab_deg': tensor(18.2569, dtype=torch.float64),
...
'valid_scene_bacc': tensor(0.7880),
...
'valid_semantic_miou': tensor(0.5106),
...
}
```
To evaluate on NYUv2 with MLP-based decoder (for semantic segmentation), run:
```bash
python main.py \
--dataset nyuv2 \
--dataset-path ./datasets/nyuv2 \
--tasks semantic scene instance orientation \
--enable-panoptic \
--input-modalities rgbd \
--rgbd-encoder-backbone swin-multi-t-v2-128 \
--encoder-normalization layernorm \
--no-pretrained-backbone \
--semantic-decoder segformermlp \
--semantic-encoder-decoder-fusion swin-ln-select \
--semantic-decoder-n-channels 256 128 64 64 \
--semantic-decoder-upsampling bilinear \
--weights-filepath ./trained_models/nyuv2/nyuv2_swin_multi_t_v2_128_segformermlp_decoder.pth \
--checkpointing-metrics valid_semantic_miou bacc mae_gt_deg panoptic_deeplab_semantic_miou panoptic_all_deeplab_pq \
--validation-batch-size 4 \
--validation-only \
--skip-sanity-check \
--wandb-mode disabled
```
```text
Validation results:
{
...
'valid_instance_all_with_gt_deeplab_pq': tensor(0.5875, dtype=torch.float64),
...
'valid_orientation_mae_gt_deg': tensor(20.9530, dtype=torch.float64),
...
'valid_panoptic_all_with_gt_deeplab_pq': tensor(0.4341, dtype=torch.float64),
...
'valid_panoptic_all_with_gt_deeplab_rq': tensor(0.5253, dtype=torch.float64),
...
'valid_panoptic_all_with_gt_deeplab_sq': tensor(0.8175, dtype=torch.float64),
...
'valid_panoptic_deeplab_semantic_miou': tensor(0.5134),
...
'valid_panoptic_mae_deeplab_deg': tensor(18.9417, dtype=torch.float64),
...
'valid_scene_bacc': tensor(0.7770),
...
'valid_semantic_miou': tensor(0.5023),
...
}
```
### SUNRGB-D
To evaluate on SUNRGB-D with EMSANet decoder (for semantic segmentation), run:
```bash
python main.py \
--dataset sunrgbd \
--dataset-path ./datasets/sunrgbd \
--sunrgbd-depth-do-not-force-mm \
--tasks semantic scene instance orientation \
--enable-panoptic \
--input-modalities rgbd \
--rgbd-encoder-backbone swin-multi-t-v2-128 \
--encoder-normalization layernorm \
--no-pretrained-backbone \
--semantic-decoder emsanet \
--semantic-encoder-decoder-fusion swin-ln-add \
--semantic-decoder-n-channels 512 256 128 \
--semantic-decoder-upsampling learned-3x3-zeropad \
--weights-filepath ./trained_models/sunrgbd/sunrgbd_swin_multi_t_v2_128_emsanet_decoder.pth \
--checkpointing-metrics valid_semantic_miou bacc mae_gt_deg panoptic_deeplab_semantic_miou panoptic_all_deeplab_pq \
--validation-batch-size 4 \
--validation-only \
--skip-sanity-check \
--wandb-mode disabled
```
```text
Validation results:
{
...
'valid_instance_all_with_gt_deeplab_pq': tensor(0.6114, dtype=torch.float64),
...
'valid_orientation_mae_gt_deg': tensor(16.9858, dtype=torch.float64),
...
'valid_panoptic_all_with_gt_deeplab_pq': tensor(0.5082, dtype=torch.float64),
...
'valid_panoptic_all_with_gt_deeplab_rq': tensor(0.5908, dtype=torch.float64),
...
'valid_panoptic_all_with_gt_deeplab_sq': tensor(0.8469, dtype=torch.float64),
...
'valid_panoptic_deeplab_semantic_miou': tensor(0.4512),
...
'valid_panoptic_mae_deeplab_deg': tensor(15.3224, dtype=torch.float64),
...
'valid_scene_bacc': tensor(0.6201),
...
'valid_semantic_miou': tensor(0.4852),
...
}
```
To evaluate on SUNRGB-D with MLP-based decoder (for semantic segmentation), run:
```bash
python main.py \
--dataset sunrgbd \
--dataset-path ./datasets/sunrgbd \
--sunrgbd-depth-do-not-force-mm \
--tasks semantic scene instance orientation \
--enable-panoptic \
--input-modalities rgbd \
--rgbd-encoder-backbone swin-multi-t-v2-128 \
--encoder-normalization layernorm \
--no-pretrained-backbone \
--semantic-decoder segformermlp \
--semantic-encoder-decoder-fusion swin-ln-select \
--semantic-decoder-n-channels 256 128 64 64 \
--semantic-decoder-upsampling bilinear \
--weights-filepath ./trained_models/sunrgbd/sunrgbd_swin_multi_t_v2_128_segformermlp_decoder.pth \
--checkpointing-metrics valid_semantic_miou bacc mae_gt_deg panoptic_deeplab_semantic_miou panoptic_all_deeplab_pq \
--validation-batch-size 4 \
--validation-only \
--skip-sanity-check \
--wandb-mode disabled
```
```text
Validation results:
{
...
'valid_instance_all_with_gt_deeplab_pq': tensor(0.6120, dtype=torch.float64),
...
'valid_orientation_mae_gt_deg': tensor(15.9133, dtype=torch.float64),
...
'valid_panoptic_all_with_gt_deeplab_pq': tensor(0.5170, dtype=torch.float64),
...
'valid_panoptic_all_with_gt_deeplab_rq': tensor(0.6012, dtype=torch.float64),
...
'valid_panoptic_all_with_gt_deeplab_sq': tensor(0.8465, dtype=torch.float64),
...
'valid_panoptic_deeplab_semantic_miou': tensor(0.4579),
...
'valid_panoptic_mae_deeplab_deg': tensor(13.9994, dtype=torch.float64),
...
'valid_scene_bacc': tensor(0.6197),
...
'valid_semantic_miou': tensor(0.4861),
...
}
```
### ScanNet
To evaluate on ScanNet with EMSANet decoder (for semantic segmentation), run:
```bash
python main.py \
--dataset scannet \
--dataset-path ./datasets/scannet \
--scannet-semantic-n-classes 20 \
--tasks semantic scene instance \
--enable-panoptic \
--input-modalities rgbd \
--rgbd-encoder-backbone swin-multi-t-v2-128 \
--encoder-normalization layernorm \
--no-pretrained-backbone \
--semantic-decoder emsanet \
--semantic-encoder-decoder-fusion swin-ln-add \
--semantic-decoder-n-channels 512 256 128 \
--semantic-decoder-upsampling learned-3x3-zeropad \
--weights-filepath ./trained_models/scannet/scannet_swin_multi_t_v2_128_emsanet_decoder.pth \
--checkpointing-metrics valid_semantic_miou bacc mae_gt_deg panoptic_deeplab_semantic_miou panoptic_all_deeplab_pq \
--validation-batch-size 4 \
--validation-only \
--skip-sanity-check \
--wandb-mode disabled
```
```text
Validation results:
{
...
'valid_instance_all_with_gt_deeplab_pq': tensor(0.6669, dtype=torch.float64),
...
'valid_panoptic_all_with_gt_deeplab_pq': tensor(0.4970, dtype=torch.float64),
...
'valid_panoptic_all_with_gt_deeplab_rq': tensor(0.5915, dtype=torch.float64),
...
'valid_panoptic_all_with_gt_deeplab_sq': tensor(0.8331, dtype=torch.float64),
...
'valid_panoptic_deeplab_semantic_miou': tensor(0.6193),
...
'valid_scene_bacc': tensor(0.4882),
...
'valid_semantic_miou': tensor(0.6378),
...
}
```
To evaluate on ScanNet with MLP-based decoder (for semantic segmentation), run:
```bash
python main.py \
--dataset scannet \
--dataset-path ./datasets/scannet \
--scannet-semantic-n-classes 20 \
--tasks semantic scene instance \
--enable-panoptic \
--input-modalities rgbd \
--rgbd-encoder-backbone swin-multi-t-v2-128 \
--encoder-normalization layernorm \
--no-pretrained-backbone \
--semantic-decoder segformermlp \
--semantic-encoder-decoder-fusion swin-ln-select \
--semantic-decoder-n-channels 256 128 64 64 \
--semantic-decoder-upsampling bilinear \
--weights-filepath ./trained_models/scannet/scannet_swin_multi_t_v2_128_segformermlp_decoder.pth \
--checkpointing-metrics valid_semantic_miou bacc mae_gt_deg panoptic_deeplab_semantic_miou panoptic_all_deeplab_pq \
--validation-batch-size 4 \
--validation-only \
--skip-sanity-check \
--wandb-mode disabled
```
```text
Validation results:
{
...
'valid_instance_all_with_gt_deeplab_pq': tensor(0.6771, dtype=torch.float64),
...
'valid_panoptic_all_with_gt_deeplab_pq': tensor(0.5118, dtype=torch.float64),
...
'valid_panoptic_all_with_gt_deeplab_rq': tensor(0.6101, dtype=torch.float64),
...
'valid_panoptic_all_with_gt_deeplab_sq': tensor(0.8320, dtype=torch.float64),
...
'valid_panoptic_deeplab_semantic_miou': tensor(0.6266),
...
'valid_scene_bacc': tensor(0.4969),
...
'valid_semantic_miou': tensor(0.6475),
...
}
```
## Inference
We provide scripts for inference on both samples drawn from one of our used
datasets (`main.py` with additional arguments) and samples located in
`./samples` (`inference_samples.py`).
> Note that building the model correctly depends on the respective dataset the
model was trained on.
### Dataset Inference
To run inference on a dataset with the full multi-task approach, use `main.py`
together with `--validation-only` and `--visualize-validation`.
By default the visualized outputs are written to a newly created directory next
to the weights. However, you can also specify the output path with
`--visualization-output-path`.
Example: To apply EMSAFormer trained on NYUv2 to samples from NYUv2, run:
```bash
python main.py \
--dataset nyuv2 \
--dataset-path ./datasets/nyuv2 \
--tasks semantic scene instance orientation \
--enable-panoptic \
--input-modalities rgbd \
--rgbd-encoder-backbone swin-multi-t-v2-128 \
--encoder-normalization layernorm \
--no-pretrained-backbone \
--semantic-decoder segformermlp \
--semantic-encoder-decoder-fusion swin-ln-select \
--semantic-decoder-n-channels 256 128 64 64 \
--semantic-decoder-upsampling bilinear \
--weights-filepath ./trained_models/nyuv2/nyuv2_swin_multi_t_v2_128_segformermlp_decoder.pth \
--checkpointing-metrics valid_semantic_miou bacc mae_gt_deg panoptic_deeplab_semantic_miou panoptic_all_deeplab_pq \
--validation-batch-size 4 \
--validation-only \
--visualize-validation \
--visualization-output-path ./visualized_outputs/nyuv2 \
--skip-sanity-check \
--wandb-mode disabled
```
Similarly, the same can be applied to SUNRGB-D and ScanNet
(see parameters in [evaluation](#evaluation) section).
> Note that the `inference_dataset.py` script can be used to predict on the
test data of ScanNet and write the results in the format which is required
for the official evaluation servers.
### Sample Inference
Use `inference_samples.py` to apply a trained model to the sample from a
Kinect v2 given in `./samples`.
> Note that the dataset argument is required to determine the correct dataset
configuration (classes, colors, ...) and to build the model correctly.
However, you do not need to prepare the respective dataset.
Furthermore, depending on the given depth images and the
used dataset for training, an additional depth scaling might be necessary.
The provided example depth image is in millimeters (1m equals to a depth
value of 1000).
```bash
python inference_samples.py \
--dataset sunrgbd \
--sunrgbd-depth-do-not-force-mm \
--tasks semantic scene instance orientation \
--enable-panoptic \
--raw-depth \
--depth-max 8000 \
--depth-scale 8 \
--instance-offset-distance-threshold 40 \
--weights-filepath ./trained_models/sunrgbd/sunrgbd_swin_multi_t_v2_128_segformermlp_decoder.pth \
--show-results
```

> Note that the model was not trained on that kind of incomplete depth images.
> Note that the `--instance-offset-distance-threshold` argument is used to
assign an instance ID of 0 to pixels if they have a distance greater than
40 pixels from the nearest center. During panoptic merging, these pixels are
assigned to the void class.
### Time Inference
To reproduce the timings on an NVIDIA Jetson AGX Orin 32GB, a custom TensorRT
extension, based on NVIDIA's [FasterTransformer](https://github.com/NVIDIA/FasterTransformer),
is required. Additionally, some modifications for the ONNX export are required,
which enable the usage of ONNX for inference with TensoRT.
Please note that the custom TensorRT extension is not available yet, as we are
actively working on its release. Updates regarding its availability will be
provided in the near future.
## Training
Use `main.py` to train EMSAformer on NYUv2, SUNRGB-D, ScanNet, or any other
dataset that you implemented following the implementation of the provided
datasets.
> Note that training our EMSAFormer with our selected SwinV2-T-128-Multi-Aug as
encoder requires pretrained weights. You can download our pretrained weights on
ImageNet from [Link](https://drive.google.com/uc?id=10hUuPmO49yNIKVPoo6LlWVZS1txccn57).
> Note that we trained all models on NVIDIA A100-SXM4-40GB GPUs with batch
size of 8. However, training the full multi-task approach requires ~25GB
of VRAM, so a smaller GPU may not work. We did not observe any great boost
from larger batch sizes.
Example: Train our full multi-task EMSAFormer with MLP-based decoder
(for semantic segmentation) on NYUv2:
```bash
python main.py \
--results-basepath ./results \
--dataset nyuv2 \
--dataset-path ./datasets/nyuv2 \
--input-modalities rgbd \
--tasks semantic scene instance orientation \
--enable-panoptic \
--tasks-weighting 1.0 0.25 2.0 0.5 \
--instance-weighting 2 1 \
--rgbd-encoder-backbone swin-multi-t-v2-128 \
--encoder-normalization layernorm \
--rgbd-encoder-backbone-pretrained-weights-filepath ./trained_models/imagenet/swin_multi_t_v2_128.pth \
--validation-batch-size 16 \
--validation-skip 0.0 \
--checkpointing-skip 0.8 \
--checkpointing-best-only \
--checkpointing-metrics valid_semantic_miou bacc mae_gt_deg panoptic_deeplab_semantic_miou panoptic_all_with_gt_deeplab_pq \
--batch-size 8 \
--learning-rate 0.03 \
--wandb-mode disabled
```
For more options, we refer to `./emsaformer/args.py` or simply run:
```bash
python main.py --help
```
## Changelog
**June 02, 2023**
- initial code release for original publication
| 22,285 | 38.72549 | 220 | md |
null | EMSAFormer-main/emsaformer_environment_pytorch_2_0.yml | name: emsaformer
channels:
- pytorch
- nvidia
- defaults
dependencies:
- _anaconda_depends=2023.03=py38_0
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- alabaster=0.7.12=pyhd3eb1b0_0
- anaconda=custom=py38_1
- anyio=3.5.0=py38h06a4308_0
- appdirs=1.4.4=pyhd3eb1b0_0
- argon2-cffi=21.3.0=pyhd3eb1b0_0
- argon2-cffi-bindings=21.2.0=py38h7f8727e_0
- arrow=1.2.3=py38h06a4308_1
- astroid=2.14.2=py38h06a4308_0
- astropy=5.1=py38h7deecbd_0
- asttokens=2.0.5=pyhd3eb1b0_0
- atomicwrites=1.4.0=py_0
- attrs=22.1.0=py38h06a4308_0
- automat=20.2.0=py_0
- autopep8=1.6.0=pyhd3eb1b0_1
- babel=2.11.0=py38h06a4308_0
- backcall=0.2.0=pyhd3eb1b0_0
- bcrypt=3.2.0=py38h5eee18b_1
- beautifulsoup4=4.11.1=py38h06a4308_0
- binaryornot=0.4.4=pyhd3eb1b0_1
- black=22.6.0=py38h06a4308_0
- blas=1.0=mkl
- bleach=4.1.0=pyhd3eb1b0_0
- blosc=1.21.3=h6a678d5_0
- bokeh=2.4.3=py38h06a4308_0
- bottleneck=1.3.5=py38h7deecbd_0
- brotli=1.0.9=h5eee18b_7
- brotli-bin=1.0.9=h5eee18b_7
- brotlipy=0.7.0=py38h27cfd23_1003
- brunsli=0.1=h2531618_0
- bzip2=1.0.8=h7b6447c_0
- c-ares=1.18.1=h7f8727e_0
- ca-certificates=2023.01.10=h06a4308_0
- certifi=2022.12.7=py38h06a4308_0
- cffi=1.15.1=py38h5eee18b_3
- cfitsio=3.470=h5893167_7
- chardet=4.0.0=py38h06a4308_1003
- charls=2.2.0=h2531618_0
- charset-normalizer=2.0.4=pyhd3eb1b0_0
- click=8.0.4=py38h06a4308_0
- cloudpickle=2.0.0=pyhd3eb1b0_0
- colorama=0.4.6=py38h06a4308_0
- colorcet=3.0.1=py38h06a4308_0
- comm=0.1.2=py38h06a4308_0
- constantly=15.1.0=pyh2b92418_0
- contourpy=1.0.5=py38hdb19cb5_0
- cookiecutter=1.7.3=pyhd3eb1b0_0
- cryptography=39.0.1=py38h9ce1e76_0
- cssselect=1.1.0=pyhd3eb1b0_0
- cuda-cudart=11.7.99=0
- cuda-cupti=11.7.101=0
- cuda-libraries=11.7.1=0
- cuda-nvrtc=11.7.99=0
- cuda-nvtx=11.7.91=0
- cuda-runtime=11.7.1=0
- curl=7.87.0=h5eee18b_0
- cycler=0.11.0=pyhd3eb1b0_0
- cytoolz=0.12.0=py38h5eee18b_0
- daal4py=2023.0.2=py38h79cecc1_0
- dal=2023.0.1=hdb19cb5_26647
- dask=2022.7.0=py38h06a4308_0
- dask-core=2022.7.0=py38h06a4308_0
- datashader=0.14.4=py38h06a4308_0
- datashape=0.5.4=py38h06a4308_1
- dbus=1.13.18=hb2f20db_0
- debugpy=1.5.1=py38h295c915_0
- decorator=5.1.1=pyhd3eb1b0_0
- defusedxml=0.7.1=pyhd3eb1b0_0
- diff-match-patch=20200713=pyhd3eb1b0_0
- dill=0.3.6=py38h06a4308_0
- distributed=2022.7.0=py38h06a4308_0
- docstring-to-markdown=0.11=py38h06a4308_0
- docutils=0.18.1=py38h06a4308_3
- entrypoints=0.4=py38h06a4308_0
- et_xmlfile=1.1.0=py38h06a4308_0
- executing=0.8.3=pyhd3eb1b0_0
- expat=2.4.9=h6a678d5_0
- ffmpeg=4.3=hf484d3e_0
- filelock=3.9.0=py38h06a4308_0
- flake8=6.0.0=py38h06a4308_0
- flask=2.2.2=py38h06a4308_0
- flit-core=3.6.0=pyhd3eb1b0_0
- fontconfig=2.14.1=h52c9d5c_1
- fonttools=4.25.0=pyhd3eb1b0_0
- freetype=2.12.1=h4a9f257_0
- fsspec=2022.11.0=py38h06a4308_0
- future=0.18.3=py38h06a4308_0
- gensim=4.3.0=py38h6a678d5_0
- giflib=5.2.1=h5eee18b_3
- glib=2.69.1=he621ea3_2
- gmp=6.2.1=h295c915_3
- gmpy2=2.1.2=py38heeb90bb_0
- gnutls=3.6.15=he1e5248_0
- greenlet=2.0.1=py38h6a678d5_0
- gst-plugins-base=1.14.1=h6a678d5_1
- gstreamer=1.14.1=h5eee18b_1
- h5py=3.7.0=py38h737f45e_0
- hdf5=1.10.6=h3ffc7dd_1
- heapdict=1.0.1=pyhd3eb1b0_0
- holoviews=1.15.4=py38h06a4308_0
- huggingface_hub=0.10.1=py38h06a4308_0
- hvplot=0.8.2=py38h06a4308_0
- hyperlink=21.0.0=pyhd3eb1b0_0
- icu=58.2=he6710b0_3
- idna=3.4=py38h06a4308_0
- imagecodecs=2021.8.26=py38hfcb8610_2
- imageio=2.26.0=py38h06a4308_0
- imagesize=1.4.1=py38h06a4308_0
- imbalanced-learn=0.10.1=py38h06a4308_0
- importlib-metadata=4.11.3=py38h06a4308_0
- importlib_metadata=4.11.3=hd3eb1b0_0
- importlib_resources=5.2.0=pyhd3eb1b0_1
- incremental=21.3.0=pyhd3eb1b0_0
- inflection=0.5.1=py38h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- intake=0.6.7=py38h06a4308_0
- intel-openmp=2021.4.0=h06a4308_3561
- intervaltree=3.1.0=pyhd3eb1b0_0
- ipykernel=6.19.2=py38hb070fc8_0
- ipython=8.10.0=py38h06a4308_0
- ipython_genutils=0.2.0=pyhd3eb1b0_1
- ipywidgets=7.6.5=pyhd3eb1b0_1
- isort=5.9.3=pyhd3eb1b0_0
- itemadapter=0.3.0=pyhd3eb1b0_0
- itemloaders=1.0.4=pyhd3eb1b0_1
- itsdangerous=2.0.1=pyhd3eb1b0_0
- jedi=0.18.1=py38h06a4308_1
- jeepney=0.7.1=pyhd3eb1b0_0
- jellyfish=0.9.0=py38h7f8727e_0
- jinja2=3.1.2=py38h06a4308_0
- jinja2-time=0.2.0=pyhd3eb1b0_3
- jmespath=0.10.0=pyhd3eb1b0_0
- joblib=1.1.1=py38h06a4308_0
- jpeg=9e=h5eee18b_1
- jq=1.6=h27cfd23_1000
- json5=0.9.6=pyhd3eb1b0_0
- jsonschema=4.17.3=py38h06a4308_0
- jupyter=1.0.0=py38h06a4308_8
- jupyter_client=7.3.4=py38h06a4308_0
- jupyter_console=6.6.2=py38h06a4308_0
- jupyter_core=5.2.0=py38h06a4308_0
- jupyter_server=1.23.4=py38h06a4308_0
- jupyterlab=3.5.3=py38h06a4308_0
- jupyterlab_pygments=0.1.2=py_0
- jupyterlab_server=2.19.0=py38h06a4308_0
- jupyterlab_widgets=1.0.0=pyhd3eb1b0_1
- jxrlib=1.1=h7b6447c_2
- keyring=23.4.0=py38h06a4308_0
- kiwisolver=1.4.4=py38h6a678d5_0
- krb5=1.19.4=h568e23c_0
- lame=3.100=h7b6447c_0
- lazy-object-proxy=1.6.0=py38h27cfd23_0
- lcms2=2.12=h3be6417_0
- ld_impl_linux-64=2.38=h1181459_1
- lerc=3.0=h295c915_0
- libaec=1.0.4=he6710b0_1
- libbrotlicommon=1.0.9=h5eee18b_7
- libbrotlidec=1.0.9=h5eee18b_7
- libbrotlienc=1.0.9=h5eee18b_7
- libclang=10.0.1=default_hb85057a_2
- libcublas=11.10.3.66=0
- libcufft=10.7.2.124=h4fbf590_0
- libcufile=1.6.0.25=0
- libcurand=10.3.2.56=0
- libcurl=7.87.0=h91b91d3_0
- libcusolver=11.4.0.1=0
- libcusparse=11.7.4.91=0
- libdeflate=1.17=h5eee18b_0
- libedit=3.1.20221030=h5eee18b_0
- libev=4.33=h7f8727e_1
- libevent=2.1.12=h8f2d780_0
- libffi=3.4.2=h6a678d5_6
- libgcc-ng=11.2.0=h1234567_1
- libgfortran-ng=11.2.0=h00389a5_1
- libgfortran5=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libiconv=1.16=h7f8727e_2
- libidn2=2.3.2=h7f8727e_0
- libllvm10=10.0.1=hbcb73fb_5
- libllvm11=11.1.0=h9e868ea_6
- libnghttp2=1.46.0=hce63b2e_0
- libnpp=11.7.4.75=0
- libnvjpeg=11.8.0.2=0
- libpng=1.6.39=h5eee18b_0
- libpq=12.9=h16c4e8d_3
- libprotobuf=3.20.3=he621ea3_0
- libsodium=1.0.18=h7b6447c_0
- libspatialindex=1.9.3=h2531618_0
- libssh2=1.10.0=h8f2d780_0
- libstdcxx-ng=11.2.0=h1234567_1
- libtasn1=4.19.0=h5eee18b_0
- libtiff=4.5.0=h6a678d5_2
- libunistring=0.9.10=h27cfd23_0
- libuuid=1.41.5=h5eee18b_0
- libwebp=1.2.4=h11a3e52_1
- libwebp-base=1.2.4=h5eee18b_1
- libxcb=1.15=h7f8727e_0
- libxkbcommon=1.0.1=hfa300c1_0
- libxml2=2.9.14=h74e7548_0
- libxslt=1.1.35=h4e12654_0
- libzopfli=1.0.3=he6710b0_0
- llvmlite=0.39.1=py38he621ea3_0
- locket=1.0.0=py38h06a4308_0
- lxml=4.9.1=py38h1edc446_0
- lz4=3.1.3=py38h27cfd23_0
- lz4-c=1.9.4=h6a678d5_0
- lzo=2.10=h7b6447c_2
- markdown=3.4.1=py38h06a4308_0
- markupsafe=2.1.1=py38h7f8727e_0
- matplotlib=3.7.0=py38h06a4308_0
- matplotlib-base=3.7.0=py38h417a72b_0
- matplotlib-inline=0.1.6=py38h06a4308_0
- mccabe=0.7.0=pyhd3eb1b0_0
- mistune=0.8.4=py38h7b6447c_1000
- mkl=2021.4.0=h06a4308_640
- mkl-service=2.4.0=py38h7f8727e_0
- mkl_fft=1.3.1=py38hd3c417c_0
- mkl_random=1.2.2=py38h51133e4_0
- mock=4.0.3=pyhd3eb1b0_0
- mpc=1.1.0=h10f8cd9_1
- mpfr=4.0.2=hb69a4c5_1
- mpi=1.0=mpich
- mpich=3.3.2=external_0
- mpmath=1.2.1=py38h06a4308_0
- msgpack-python=1.0.3=py38hd09550d_0
- multipledispatch=0.6.0=py38_0
- munkres=1.1.4=py_0
- mypy_extensions=0.4.3=py38h06a4308_1
- nbclassic=0.5.2=py38h06a4308_0
- nbclient=0.5.13=py38h06a4308_0
- nbconvert=6.5.4=py38h06a4308_0
- nbformat=5.7.0=py38h06a4308_0
- ncurses=6.4=h6a678d5_0
- nest-asyncio=1.5.6=py38h06a4308_0
- nettle=3.7.3=hbbd107a_1
- networkx=2.8.4=py38h06a4308_0
- ninja=1.10.2=h06a4308_5
- ninja-base=1.10.2=hd09550d_5
- nltk=3.7=pyhd3eb1b0_0
- notebook=6.5.2=py38h06a4308_0
- notebook-shim=0.2.2=py38h06a4308_0
- nspr=4.33=h295c915_0
- nss=3.74=h0370c37_0
- numba=0.56.4=py38h417a72b_0
- numexpr=2.8.4=py38he184ba9_0
- numpy=1.23.5=py38h14f4228_0
- numpy-base=1.23.5=py38h31eccc5_0
- numpydoc=1.5.0=py38h06a4308_0
- oniguruma=6.9.7.1=h27cfd23_0
- openh264=2.1.1=h4ff587b_0
- openjpeg=2.4.0=h3ad879b_0
- openpyxl=3.0.10=py38h5eee18b_0
- openssl=1.1.1t=h7f8727e_0
- packaging=22.0=py38h06a4308_0
- pandas=1.5.3=py38h417a72b_0
- pandocfilters=1.5.0=pyhd3eb1b0_0
- panel=0.14.3=py38h06a4308_0
- param=1.12.3=py38h06a4308_0
- parsel=1.6.0=py38h06a4308_0
- parso=0.8.3=pyhd3eb1b0_0
- partd=1.2.0=pyhd3eb1b0_1
- pathspec=0.10.3=py38h06a4308_0
- patsy=0.5.3=py38h06a4308_0
- pcre=8.45=h295c915_0
- pep8=1.7.1=py38h06a4308_1
- pexpect=4.8.0=pyhd3eb1b0_3
- pickleshare=0.7.5=pyhd3eb1b0_1003
- pillow=9.4.0=py38h6a678d5_0
- pip=22.3.1=py38h06a4308_0
- pkgutil-resolve-name=1.3.10=py38h06a4308_0
- platformdirs=2.5.2=py38h06a4308_0
- plotly=5.9.0=py38h06a4308_0
- pluggy=1.0.0=py38h06a4308_1
- ply=3.11=py38_0
- pooch=1.4.0=pyhd3eb1b0_0
- poyo=0.5.0=pyhd3eb1b0_0
- prometheus_client=0.14.1=py38h06a4308_0
- prompt-toolkit=3.0.36=py38h06a4308_0
- prompt_toolkit=3.0.36=hd3eb1b0_0
- protego=0.1.16=py_0
- psutil=5.9.0=py38h5eee18b_0
- ptyprocess=0.7.0=pyhd3eb1b0_2
- pure_eval=0.2.2=pyhd3eb1b0_0
- py=1.11.0=pyhd3eb1b0_0
- pyasn1=0.4.8=pyhd3eb1b0_0
- pyasn1-modules=0.2.8=py_0
- pycodestyle=2.10.0=py38h06a4308_0
- pycparser=2.21=pyhd3eb1b0_0
- pyct=0.5.0=py38h06a4308_0
- pycurl=7.45.1=py38h8f2d780_0
- pydispatcher=2.0.5=py38h06a4308_2
- pydocstyle=6.3.0=py38h06a4308_0
- pyerfa=2.0.0=py38h27cfd23_0
- pyflakes=3.0.1=py38h06a4308_0
- pygments=2.11.2=pyhd3eb1b0_0
- pyhamcrest=2.0.2=pyhd3eb1b0_2
- pylint=2.16.2=py38h06a4308_0
- pylint-venv=2.3.0=py38h06a4308_0
- pyls-spyder=0.4.0=pyhd3eb1b0_0
- pyodbc=4.0.34=py38h6a678d5_0
- pyopenssl=23.0.0=py38h06a4308_0
- pyparsing=3.0.9=py38h06a4308_0
- pyqt=5.15.7=py38h6a678d5_1
- pyqt5-sip=12.11.0=py38h6a678d5_1
- pyqtwebengine=5.15.7=py38h6a678d5_1
- pyrsistent=0.18.0=py38heee7806_0
- pysocks=1.7.1=py38h06a4308_0
- pytables=3.7.0=py38hf19a122_1
- pytest=7.1.2=py38h06a4308_0
- python=3.8.16=h7a1cb2a_3
- python-dateutil=2.8.2=pyhd3eb1b0_0
- python-fastjsonschema=2.16.2=py38h06a4308_0
- python-lsp-black=1.2.1=py38h06a4308_0
- python-lsp-jsonrpc=1.0.0=pyhd3eb1b0_0
- python-lsp-server=1.7.1=py38h06a4308_0
- python-slugify=5.0.2=pyhd3eb1b0_0
- python-snappy=0.6.1=py38h6a678d5_0
- pytoolconfig=1.2.5=py38h06a4308_1
- pytorch=2.0.0=py3.8_cuda11.7_cudnn8.5.0_0
- pytorch-cuda=11.7=h778d358_3
- pytorch-mutex=1.0=cuda
- pytz=2022.7=py38h06a4308_0
- pyviz_comms=2.0.2=pyhd3eb1b0_0
- pywavelets=1.4.1=py38h5eee18b_0
- pyxdg=0.27=pyhd3eb1b0_0
- pyyaml=6.0=py38h5eee18b_1
- pyzmq=23.2.0=py38h6a678d5_0
- qdarkstyle=3.0.2=pyhd3eb1b0_0
- qstylizer=0.2.2=py38h06a4308_0
- qt-main=5.15.2=h327a75a_7
- qt-webengine=5.15.9=hd2b0992_4
- qtawesome=1.2.2=py38h06a4308_0
- qtconsole=5.4.0=py38h06a4308_0
- qtpy=2.2.0=py38h06a4308_0
- qtwebkit=5.212=h4eab89a_4
- queuelib=1.5.0=py38h06a4308_0
- readline=8.2=h5eee18b_0
- regex=2022.7.9=py38h5eee18b_0
- requests=2.28.1=py38h06a4308_0
- requests-file=1.5.1=pyhd3eb1b0_0
- rope=1.7.0=py38h06a4308_0
- rtree=1.0.1=py38h06a4308_0
- scikit-image=0.19.3=py38h6a678d5_1
- scikit-learn=1.2.1=py38h6a678d5_0
- scikit-learn-intelex=2023.0.2=py38h06a4308_0
- scipy=1.10.0=py38h14f4228_1
- scrapy=2.8.0=py38h06a4308_0
- seaborn=0.12.2=py38h06a4308_0
- secretstorage=3.3.1=py38h06a4308_0
- send2trash=1.8.0=pyhd3eb1b0_1
- service_identity=18.1.0=pyhd3eb1b0_1
- setuptools=65.6.3=py38h06a4308_0
- sip=6.6.2=py38h6a678d5_0
- six=1.16.0=pyhd3eb1b0_1
- smart_open=5.2.1=py38h06a4308_0
- snappy=1.1.9=h295c915_0
- sniffio=1.2.0=py38h06a4308_1
- snowballstemmer=2.2.0=pyhd3eb1b0_0
- sortedcontainers=2.4.0=pyhd3eb1b0_0
- soupsieve=2.3.2.post1=py38h06a4308_0
- sphinx=5.0.2=py38h06a4308_0
- sphinxcontrib-applehelp=1.0.2=pyhd3eb1b0_0
- sphinxcontrib-devhelp=1.0.2=pyhd3eb1b0_0
- sphinxcontrib-htmlhelp=2.0.0=pyhd3eb1b0_0
- sphinxcontrib-jsmath=1.0.1=pyhd3eb1b0_0
- sphinxcontrib-qthelp=1.0.3=pyhd3eb1b0_0
- sphinxcontrib-serializinghtml=1.1.5=pyhd3eb1b0_0
- spyder=5.4.1=py38h06a4308_0
- spyder-kernels=2.4.1=py38h06a4308_0
- sqlalchemy=1.4.39=py38h5eee18b_0
- sqlite=3.40.1=h5082296_0
- stack_data=0.2.0=pyhd3eb1b0_0
- statsmodels=0.13.5=py38h7deecbd_1
- sympy=1.11.1=py38h06a4308_0
- tabulate=0.8.10=py38h06a4308_0
- tbb=2021.7.0=hdb19cb5_0
- tbb4py=2021.7.0=py38hdb19cb5_0
- tblib=1.7.0=pyhd3eb1b0_0
- tenacity=8.0.1=py38h06a4308_1
- terminado=0.17.1=py38h06a4308_0
- text-unidecode=1.3=pyhd3eb1b0_0
- textdistance=4.2.1=pyhd3eb1b0_0
- threadpoolctl=2.2.0=pyh0d69192_0
- three-merge=0.1.1=pyhd3eb1b0_0
- tifffile=2021.7.2=pyhd3eb1b0_2
- tinycss2=1.2.1=py38h06a4308_0
- tk=8.6.12=h1ccaba5_0
- tldextract=3.2.0=pyhd3eb1b0_0
- tokenizers=0.11.4=py38h3dcd8bd_1
- toml=0.10.2=pyhd3eb1b0_0
- tomli=2.0.1=py38h06a4308_0
- tomlkit=0.11.1=py38h06a4308_0
- toolz=0.12.0=py38h06a4308_0
- torchaudio=2.0.0=py38_cu117
- torchtriton=2.0.0=py38
- torchvision=0.15.0=py38_cu117
- tornado=6.1=py38h27cfd23_0
- tqdm=4.64.1=py38h06a4308_0
- traitlets=5.7.1=py38h06a4308_0
- transformers=4.24.0=py38h06a4308_0
- twisted=22.2.0=py38h5eee18b_1
- typing-extensions=4.4.0=py38h06a4308_0
- typing_extensions=4.4.0=py38h06a4308_0
- ujson=5.4.0=py38h6a678d5_0
- unidecode=1.2.0=pyhd3eb1b0_0
- unixodbc=2.3.11=h5eee18b_0
- urllib3=1.26.14=py38h06a4308_0
- w3lib=1.21.0=pyhd3eb1b0_0
- watchdog=2.1.6=py38h06a4308_0
- wcwidth=0.2.5=pyhd3eb1b0_0
- webencodings=0.5.1=py38_1
- websocket-client=0.58.0=py38h06a4308_4
- werkzeug=2.2.2=py38h06a4308_0
- whatthepatch=1.0.2=py38h06a4308_0
- wheel=0.38.4=py38h06a4308_0
- widgetsnbextension=3.5.2=py38h06a4308_0
- wrapt=1.14.1=py38h5eee18b_0
- wurlitzer=3.0.2=py38h06a4308_0
- xarray=2022.11.0=py38h06a4308_0
- xz=5.2.10=h5eee18b_1
- yaml=0.2.5=h7b6447c_0
- yapf=0.31.0=pyhd3eb1b0_0
- zeromq=4.3.4=h2531618_0
- zfp=0.5.5=h295c915_6
- zict=2.1.0=py38h06a4308_0
- zipp=3.11.0=py38h06a4308_0
- zlib=1.2.13=h5eee18b_0
- zope=1.0=py38_1
- zope.interface=5.4.0=py38h7f8727e_0
- zstd=1.5.2=ha4553b6_0
- pip:
- docker-pycreds==0.4.0
- gitdb==4.0.10
- gitpython==3.1.31
- onnx==1.13.1
- opencv-python==4.2.0.34
- pathtools==0.1.2
- protobuf==3.20.3
- sentry-sdk==1.19.1
- setproctitle==1.3.2
- smmap==5.0.0
- torchmetrics==0.10.2
- wandb==0.14.2 | 14,701 | 31.59867 | 52 | yml |
null | EMSAFormer-main/inference_dataset.py | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Daniel Seichter <daniel.seichter@tu-ilmenau.de>
"""
from copy import deepcopy
from datetime import datetime
from functools import partial
import getpass
import json
import os
from pprint import pprint
import sys
from time import time
import warnings
import cv2
import numpy as np
import torch
# torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
from torch.utils.data import DataLoader
from tqdm import tqdm
from nicr_mt_scene_analysis.data import move_batch_to_device
from nicr_mt_scene_analysis.data import mt_collate
from nicr_mt_scene_analysis.data import CollateIgnoredDict
from nicr_mt_scene_analysis.data.preprocessing.resize import get_fullres
from nicr_mt_scene_analysis.data.preprocessing.resize import get_fullres_key
from nicr_scene_analysis_datasets import ScanNet
from nicr_scene_analysis_datasets.dataset_base import OrientationDict
from nicr_scene_analysis_datasets.dataset_base import SampleIdentifier
from emsaformer.args import ArgParserEMSAFormer
from emsaformer.data import get_dataset
from emsaformer.model import EMSAFormer
from emsaformer.preprocessing import get_preprocessor
from emsaformer.weights import load_weights
_SCORE_MAX = 0.999
def _get_args():
parser = ArgParserEMSAFormer()
# add additional arguments
group = parser.add_argument_group('Inference')
group.add_argument(
'--inference-split',
type=str,
default='test',
help="Dataset split to load."
)
group.add_argument(
'--inference-scannet-subsample',
type=int,
default=100,
choices=(5, 10, 50, 100, 200, 500), # 5 only for mapping inference
help="Subsample to use for ScanNet dataset."
)
group.add_argument( # useful for appm context module
'--inference-input-height',
type=int,
default=480,
dest='validation_input_height', # used in test phase
help="Network input height for predicting on inference data."
)
group.add_argument( # useful for appm context module
'--inference-input-width',
type=int,
default=640,
dest='validation_input_width', # used in test phase
help="Network input width for predicting on inference data."
)
group.add_argument(
'--inference-batch-size',
type=int,
default=8,
help="Batch size to use for inference."
)
group.add_argument(
'--inference-output-path',
type=str,
default=None,
help="Path where to write inference outputs to."
)
group.add_argument(
'--inference-output-format',
type=str,
nargs='+',
default='scannet-semantic',
choices=('scannet-semantic', 'scannet-instance', 'scannet-panoptic',
'mapping'),
help="Output format(s) for inference."
)
group.add_argument(
'--inference-output-write-ground-truth',
action='store_true',
default=False,
help="For output format 'scannet-*', write ground-truth data."
)
group.add_argument(
'--inference-output-ground-truth-max-depth',
type=float,
default=None,
help="Mask all ground-truth annotations with depth larger then this "
"value (in m) to void. By default, no masking is performed."
)
group.add_argument(
'--inference-output-semantic-instance-shift',
type=int,
default=1000,
choices=(1000, (1 << 16)),
help="Shift to apply for writing ground-truth annotations for output "
"format 'scannet-instance'. ScanNet benchmark by default uses "
"1000 and encodes ground-truth instances as sem*1000+inst. "
"However, for Hypersim, 1000 is too small, thus, we use "
"(1<<16=2^16) instead. Note that shifting 16 bits also requires "
"changing the output format as annotations cannot be stored in a "
"png16 anymore. Similar to the panoptic encoding, we use a png8 "
"with three channels instead: R: semantic class (uint8), G+B: "
"instance id (uint16)."
)
group.add_argument(
'--overwrite',
action='store_true',
default=False,
help="Force overwriting of existing output files."
)
args = parser.parse_args()
return args
def _semantic_and_instance_to_panoptic_bgr(semantic, instance):
assert semantic.max() <= np.iinfo('uint8').max
semantic_uint8 = semantic.astype('uint8')
assert instance.shape == semantic.shape
assert instance.max() <= np.iinfo('uint16').max
instance_uint16 = instance.astype('uint16')
r = semantic_uint8 # semantic class
g = (instance_uint16 >> 8).astype('uint8') # upper 8bit of instance id
b = (instance_uint16 & 0xFF).astype('uint8') # lower 8bit of instance id
# BGR for opencv
panoptic_img = np.stack([b, g, r], axis=2)
return panoptic_img
def write_scannet_panoptic_output(
batch,
prediction,
output_path,
max_instances_per_category,
identifier_to_filename_mapper,
max_depth=None,
semantic_class_mapper=lambda x: x,
write_gt=False
):
# For evaluating the panoptic segmentation, we need to save the
# image in the following format:
# R: semantic class (uint8), G+B: instance id (uint16)
# We save the image in the following format:
# unzip_root/
# |-- scene0707_00_000000.png
# |-- scene0707_00_000200.png
# |-- scene0707_00_000400.png
# ⋮
# Note that, for Hypersim, semantic and panoptic_semantic (i.e. semantic
# after merging semantic and instance) slightly differ for few images.
# This is because there are some pixels that belong to a thing class but
# are not assigned to any instance (instance=0), e.g., in scene ai_052_001,
# a lamp is labeled as lamp but is not annotated as instance. Panoptic
# merging assigns void for those pixels. There is no workaround for this
# issue. Affected scenes: valid: ai_023_003, ai_041_003, ai_052_001,
# ai_052_003 -> 1576566 pixels (0.03%); test: ai_005_001, ai_008_005,
# ai_008_005, ai_022_001 -> 801359 pixels (0.01%).
# Computing mIoU in [0, 1] to semantic / panoptic_semantic as ground truth
# changes the result by ~0.0001-0.0002 - so it is not a big issue and
# negligible.
# ground-truth panoptic
# read semantic and instance and combine them to panoptic
if write_gt and get_fullres_key('panoptic') in batch:
path = os.path.join(output_path, 'gt_path')
os.makedirs(path, exist_ok=True)
gt_panoptics = get_fullres(batch, 'panoptic').cpu().numpy()
for i, (gt_panoptic) in enumerate(gt_panoptics):
# extract semantic and instance from merged panoptic
gt_semantic = gt_panoptic // max_instances_per_category
gt_instance = gt_panoptic % max_instances_per_category
# apply opt. class mapping
gt_semantic = semantic_class_mapper(gt_semantic)
# mask out all pixels with depth larger then max_depth
if max_depth is not None:
depth = batch['_no_preprocessing']['depth'][i]
depth_mask = depth > max_depth
gt_semantic[depth_mask] = 0
gt_instance[depth_mask] = 0
cv2.imwrite(
os.path.join(
path,
identifier_to_filename_mapper(batch['identifier'][i])
),
_semantic_and_instance_to_panoptic_bgr(gt_semantic, gt_instance)
)
# predicted panoptic
path = os.path.join(output_path, 'pred_path')
os.makedirs(path, exist_ok=True)
panoptic_segmentation_semantic = get_fullres(prediction, 'panoptic_segmentation_deeplab_semantic_idx').cpu().numpy()
panoptic_segmentation_semantic = semantic_class_mapper(panoptic_segmentation_semantic) # map classes
panoptic_segmentation_instance = get_fullres(prediction, 'panoptic_segmentation_deeplab_instance_idx').cpu().numpy()
for b_idx in range(panoptic_segmentation_semantic.shape[0]):
cv2.imwrite(
os.path.join(
path,
identifier_to_filename_mapper(batch['identifier'][b_idx])
),
_semantic_and_instance_to_panoptic_bgr(
panoptic_segmentation_semantic[b_idx],
panoptic_segmentation_instance[b_idx]
)
)
def write_scannet_semantic_output(
batch,
prediction,
output_path,
identifier_to_filename_mapper,
max_depth=None,
semantic_class_mapper=lambda x: x,
write_gt=False
):
# Scannet benchmark format for semantic segmentation
# see: https://kaldir.vc.in.tum.de/scannet_benchmark/documentation#format-label2d
# see: https://github.com/ScanNet/ScanNet/blob/master/BenchmarkScripts/2d_evaluation/evalPixelLevelSemanticLabeling.py
# format:
# unzip_root/
# |-- scene0707_00_000000.png
# |-- scene0707_00_000200.png
# |-- scene0707_00_000400.png
# ⋮
# ground-truth semantic
if write_gt and get_fullres_key('semantic') in batch:
path = os.path.join(output_path, 'gt_path')
os.makedirs(path, exist_ok=True)
for i, gt in enumerate(get_fullres(batch, 'semantic').cpu().numpy()):
gt_semantic = semantic_class_mapper(gt) # gt has void class
# mask out all pixels with depth larger then max_depth
if max_depth is not None:
depth = batch['_no_preprocessing']['depth'][i]
depth_mask = depth > max_depth
gt_semantic[depth_mask] = 0
cv2.imwrite(
os.path.join(
path,
identifier_to_filename_mapper(batch['identifier'][i])
),
gt_semantic
)
# semantic prediction
path = os.path.join(output_path, 'pred_path_semantic')
os.makedirs(path, exist_ok=True)
pred_semantic = get_fullres(prediction, 'semantic_segmentation_idx')
pred_semantic = pred_semantic.to(torch.uint8).cpu().numpy()
for i, pred in enumerate(pred_semantic):
cv2.imwrite(
os.path.join(
path,
identifier_to_filename_mapper(batch['identifier'][i])
),
semantic_class_mapper(pred + 1) # add 0 as void class
)
# panoptic semantic prediction
path = os.path.join(output_path, 'pred_path_panoptic_semantic')
os.makedirs(path, exist_ok=True)
pred_semantic = get_fullres(prediction, 'panoptic_segmentation_deeplab_semantic_idx')
pred_semantic = pred_semantic.to(torch.uint8).cpu().numpy()
for i, pred in enumerate(pred_semantic):
cv2.imwrite(
os.path.join(
path,
identifier_to_filename_mapper(batch['identifier'][i])
),
semantic_class_mapper(pred) # already has void class
)
def write_scannet_instance_output(
batch,
prediction,
output_path,
identifier_to_filename_mapper,
shift=1000,
max_depth=None,
semantic_class_mapper=lambda x: x,
write_gt=False
):
# Scannet benchmark format for instance segmentation
# see: https://kaldir.vc.in.tum.de/scannet_benchmark/documentation#format-instance2d
# see: https://github.com/ScanNet/ScanNet/blob/master/BenchmarkScripts/2d_evaluation/evalInstanceLevelSemanticLabeling.py
# prediction format:
# root/
# |-- scene0707_00_000000.txt
# |-- scene0707_00_000200.txt
# |-- scene0707_00_000400.txt
# ⋮
# |-- predicted_masks/
# |-- scene0707_00_000000_000.png
# |-- scene0707_00_000000_001.png
# ⋮
# with scene0707_00_000000.txt containing:
# predicted_masks/scene0707_00_000000_000.png 33 0.7234
# predicted_masks/scene0707_00_000000_001.png 5 0.9038
# ground-truth semantic+instance
# see: https://github.com/ScanNet/ScanNet/blob/3e5726500896748521a6ceb81271b0f5b2c0e7d2/BenchmarkScripts/2d_helpers/convert_scannet_instance_image.py
# ScanNet benchmark by default uses 1000 and encodes ground-truth instances
# as sem*1000+inst. However, for Hypersim, 1000 is too small, thus, we
# use (1<<16=2^16) instead. Note that shifting 16 bits also requires
# changing the output format as annotations cannot be stored in a png16
# anymore. Similar to the panoptic encoding, we use a png8 with three
# channels instead: R: semantic class (uint8), G+B: instance id (uint16).
assert shift in (1000, (1 << 16))
if write_gt and all(get_fullres_key(k) in batch for k in ('semantic',
'instance')):
path = os.path.join(output_path, 'gt_path')
os.makedirs(path, exist_ok=True)
gt_semantic = get_fullres(batch, 'semantic').cpu().numpy()
gt_instance = get_fullres(batch, 'instance').cpu().numpy()
if 1000 == shift:
# scannet default shift
# apply opt. class mapping
gt_semantic_instance = semantic_class_mapper(gt_semantic)
# create combined label as label * 1000 + instance_id
gt_semantic_instance = gt_semantic_instance.astype('uint16') * 1000
gt_semantic_instance += gt_instance.astype('uint16')
for i, gt in enumerate(gt_semantic_instance):
# mask out all pixels with depth larger then max_depth
if max_depth is not None:
depth = batch['_no_preprocessing']['depth'][i]
depth_mask = depth > max_depth
gt[depth_mask] = 0
cv2.imwrite(
os.path.join(
path,
identifier_to_filename_mapper(batch['identifier'][i])
),
gt
)
else:
# scannet shift by 2^16 (three channel encoding)
for i, (gt_sem, gt_ins) in enumerate(zip(gt_semantic, gt_instance)):
# apply opt. class mapping
gt_sem = semantic_class_mapper(gt_sem)
# mask out all pixels with depth larger then max_depth
if max_depth is not None:
depth = batch['_no_preprocessing']['depth'][i]
depth_mask = depth > max_depth
gt_sem[depth_mask] = 0
gt_ins[depth_mask] = 0
cv2.imwrite(
os.path.join(
path,
identifier_to_filename_mapper(batch['identifier'][i])
),
_semantic_and_instance_to_panoptic_bgr(gt_sem, gt_ins)
)
# panoptic instance prediction
path = os.path.join(output_path, 'pred_path_panoptic_instance')
mask_dir = 'predicted_masks'
path_masks = os.path.join(path, mask_dir)
os.makedirs(path, exist_ok=True)
os.makedirs(path_masks, exist_ok=True)
instance = get_fullres(prediction, 'panoptic_segmentation_deeplab_instance_idx').cpu().numpy()
instance_meta = prediction['panoptic_segmentation_deeplab_instance_meta']
for i, (instance_i, instance_meta_i) in enumerate(zip(instance,
instance_meta)):
# write a txt file and corresponding masks for each example in batch
basename = identifier_to_filename_mapper(batch['identifier'][i], ext='')
txt_lines = []
for instance_id in instance_meta_i:
if instance_meta_i[instance_id]['area'] == 0:
# empty instance (no offset was assigned to this center)
continue
# get mask ("everything non-zero is part of the prediction")
mask = ((instance_i == instance_id)*255).astype(np.uint8)
# save mask
mask_fn = basename + f'_{len(txt_lines):03d}.png'
cv2.imwrite(os.path.join(path_masks, mask_fn), mask)
# prepare line for text file
semantic_idx = semantic_class_mapper(
instance_meta_i[instance_id]['semantic_idx']
)
panoptic_score = instance_meta_i[instance_id]['panoptic_score']
txt_lines.append(
f"{mask_dir}/{mask_fn} {semantic_idx} {panoptic_score:0.4f}\n"
)
with open(os.path.join(path, basename + '.txt'), 'w') as f:
f.writelines(txt_lines)
def write_mapping_output(
batch,
prediction,
output_path,
instance_use_panoptic_score=True,
semantic_class_mapper=lambda x: x,
compressed=True
):
# we only write predictions (see MIRA dataset readers in
# nicr_scene_analysis_datasets for loading)
def _write_as_npz(dirname, tensor_to_write):
path = os.path.join(output_path, dirname)
for i, tensor in enumerate(tensor_to_write):
path_i = os.path.join(path, *batch['identifier'][i][:-1])
filename_i = batch['identifier'][i][-1] + '.npz'
os.makedirs(path_i, exist_ok=True)
if compressed:
np.savez_compressed(os.path.join(path_i, filename_i), tensor)
else:
np.savez(os.path.join(path_i, filename_i), tensor)
# semantic prediction (float32: class + score)
sem_scores = get_fullres(prediction, 'semantic_segmentation_score')
sem_scores = torch.clamp(sem_scores, min=0, max=_SCORE_MAX)
sem_classes = get_fullres(prediction, 'semantic_segmentation_idx')
sem_classes = sem_classes.to(torch.uint8) # < 255 classes
sem_classes += 1 # 0 = void, but output has no void class -> +1
sem_scores = sem_scores.cpu().numpy()
sem_classes = sem_classes.cpu().numpy()
sem_classes = semantic_class_mapper(sem_classes) # map classes
sem_output = sem_classes.astype('float32') + sem_scores
assert (sem_output.astype('uint8') == sem_classes).all()
# convert to topk format (topk, h, w) with topk=1 here for now
sem_output = sem_output[:, None, ...]
_write_as_npz('pred_semantic', sem_output)
# panoptic semantic prediction (float32: class + score)
# note panoptic merging is done on CPU
pan_sem_scores = get_fullres(
prediction,
'panoptic_segmentation_deeplab_semantic_score'
)
pan_sem_scores = torch.clamp(pan_sem_scores, min=0, max=_SCORE_MAX)
pan_sem_classes = get_fullres(prediction, 'panoptic_segmentation_deeplab_semantic_idx')
pan_sem_classes = pan_sem_classes.to(torch.uint8) # < 255 classes
pan_sem_scores = pan_sem_scores.cpu().numpy()
pan_sem_classes = pan_sem_classes.cpu().numpy()
pan_sem_classes = semantic_class_mapper(pan_sem_classes) # map classes
pan_sem_output = pan_sem_classes.astype('float32') + pan_sem_scores
assert (pan_sem_output.astype('uint8') == pan_sem_classes).all()
# convert to topk format (topk, h, w) with topk=1
pan_sem_output = pan_sem_output[:, None, ...]
_write_as_npz('pred_panoptic_semantic', pan_sem_output)
# panoptic instance prediction
if instance_use_panoptic_score:
# use panoptic score instead of instance score
# score: score_instance_center * (mean_semantic_score_of_instance)
pan_ins_scores = get_fullres(
prediction,
'panoptic_segmentation_deeplab_panoptic_score'
)
else:
# use raw instance score
# score: score_instance_center
pan_ins_scores = get_fullres(
prediction,
'panoptic_segmentation_deeplab_instance_score'
)
pan_ins_scores = torch.clamp(pan_ins_scores, min=0, max=_SCORE_MAX)
pan_ins_ids = get_fullres(prediction, 'panoptic_segmentation_deeplab_instance_idx')
pan_ins_scores = pan_ins_scores.cpu().numpy()
pan_ins_ids = pan_ins_ids.cpu().numpy()
pan_ins_output = pan_ins_ids.astype('float32') + pan_ins_scores
_write_as_npz('pred_panoptic_instance', pan_ins_output)
# panoptic instance meta
pan_ins_meta = prediction['panoptic_segmentation_deeplab_instance_meta']
path = os.path.join(output_path, 'pred_panoptic_instance_meta')
for i, meta in enumerate(pan_ins_meta):
# apply semantic class mapping
meta_i = deepcopy(meta) # copy to be avoid to modify inplace
for k in meta_i:
if 'semantic_idx' in meta_i[k]: # filter instances without pixels
meta_i[k]['semantic_idx'] = int(semantic_class_mapper(
meta_i[k]['semantic_idx'])
)
path_i = os.path.join(path, *batch['identifier'][i][:-1])
filename_i = batch['identifier'][i][-1] + '.json'
os.makedirs(path_i, exist_ok=True)
with open(os.path.join(path_i, filename_i), 'w') as f:
json.dump(meta_i, f, sort_keys=True, indent=4)
# scene class prediction
scene_scores = prediction['scene_class_score']
scene_scores = torch.clamp(scene_scores, min=0, max=_SCORE_MAX)
scene_classes = prediction['scene_class_idx']
scene_scores = scene_scores.cpu().numpy()
scene_classes = scene_classes.cpu().numpy()
scene_output = scene_classes.astype('float32') + scene_scores
_write_as_npz('pred_scene', scene_output)
def main():
# args
args = _get_args()
if any(k in args.inference_output_format
for k in ('scannet-semantic', 'scannet-instance',
'scannet-panoptic')):
# ensure correct subsampling for ScanNet test split
if 'scannet' == args.dataset and 'test' == args.inference_split:
assert args.validation_scannet_subsample == 100
# output path(s)
if args.inference_output_path is None:
# use weights path
path, fn = os.path.split(args.weights_filepath)
dir_name = f'inference_outputs_{os.path.splitext(fn)[0]}'
args.inference_output_path = os.path.join(
path,
dir_name,
args.dataset,
args.inference_split
)
print(f"Writing inference outputs to: '{args.inference_output_path}'")
os.makedirs(args.inference_output_path, exist_ok=True)
# device
device = torch.device('cuda')
# data ---------------------------------------------------------------------
# note that args.validation_scannet_subsample is used for ScanNet in test
# phase, thus we overwrite it with args.inference_scannet_subsample
args.validation_scannet_subsample = args.inference_scannet_subsample
dataset = get_dataset(args, split=args.inference_split)
# split dataset by camera -> batches of same spatial resolution
datasets = tuple(
deepcopy(dataset).filter_camera(camera)
for camera in dataset.cameras
)
# build and set preprocessor
preprocessor = get_preprocessor(
args,
dataset=dataset,
phase='test',
multiscale_downscales=None,
keep_raw_inputs=True
)
for ds in datasets:
ds.preprocessor = preprocessor
# create dataloaders
collate_fn = partial(
mt_collate,
type_blacklist=(np.ndarray, CollateIgnoredDict, OrientationDict,
SampleIdentifier)
)
dataloaders = tuple(
DataLoader(
ds,
batch_size=args.inference_batch_size,
shuffle=False,
drop_last=False,
collate_fn=collate_fn,
pin_memory=True,
num_workers=args.n_workers,
persistent_workers=False
)
for ds in datasets
)
# max depth (parameter is given in m but we need it in mm)
max_depth = None
if args.inference_output_ground_truth_max_depth is not None:
if 'scannet' != args.dataset:
max_depth = args.inference_output_ground_truth_max_depth * 1000.0
else:
# Currently, we are using the depth image before preprocessing for
# masking ground-truth annotations based on depth, as only before
# preprocessing depth is in mm. However, for ScanNet, depth and RGB
# are not registered and, thus, shapes may be different. As the
# maximum depth is 10m for ScanNet, we simply disable the masking
# for now.
warnings.warn(
"Masking ground-truth annotations based on "
"`--inference-output-ground-truth-max-depth` disabled as "
"dataset is ScanNet. Maximum distance is 10m."
)
# semantic class mapping --------------------------------------------------
# ScanNet dataset only
if args.dataset == 'scannet' and 20 == args.scannet_semantic_n_classes:
mapping = ScanNet.SEMANTIC_CLASSES_20_MAPPING_TO_BENCHMARK # with void
mapping = np.array(list(mapping.values()), dtype=np.uint8)
semantic_class_mapper = lambda x: mapping[x]
else:
semantic_class_mapper = lambda x: x
# identifier mapping ------------------------------------------------------
# scannet-* output format only
if 'scannet' == args.dataset:
def _identifier_to_filename(identifier, ext='.png'):
# format scene%04d_%02d_%06d.png
camera, scene, id_ = identifier
return f'{scene}_{int(id_):06d}{ext}'
elif 'hypersim' == args.dataset:
def _identifier_to_filename(identifier, ext='.png'):
# format scene_camera%04d_%02d_%06d.png
scene, camera, id_ = identifier
return f'{scene}_{camera}_{int(id_):06d}{ext}'
else:
raise RuntimeError()
# model -------------------------------------------------------------------
model = EMSAFormer(args, dataset_config=dataset.config)
# load weights
print(f"Loading checkpoint: '{args.weights_filepath}'.")
checkpoint = torch.load(args.weights_filepath, map_location='cpu')
if 'epoch' in checkpoint:
print(f"-> Epoch: {checkpoint['epoch']}")
if args.debug and 'logs' in checkpoint:
print(f"-> Logs/Metrics:")
pprint(checkpoint['logs'])
state_dict = checkpoint['state_dict']
load_weights(args, model, state_dict)
# set model to eval mode
torch.set_grad_enabled(False)
model.eval()
model.to(device)
# inference ---------------------------------------------------------------
# write some meta data
ts = time()
meta = {
'command': ' '.join(sys.argv),
'args': vars(args),
'timestamp': int(ts),
'local_time': datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S'),
'user': getpass.getuser(),
'checkpoint': {}
}
if 'epoch' in checkpoint:
meta['checkpoint']['epoch'] = checkpoint['epoch']
if 'logs' in checkpoint:
meta['checkpoint']['logs'] = {k: v.item() if torch.is_tensor(v) else v
for k, v in checkpoint['logs'].items()}
fp = os.path.join(args.inference_output_path, 'meta.json')
meta_list = []
# check for existing meta information
if os.path.exists(fp):
with open(fp, 'r') as f:
meta_list = json.load(f)
# write meta information
meta_list.append(meta)
with open(fp, 'w') as f:
json.dump(meta_list, f, indent=4)
# determine max instances per category (class)
panoptic_post = model.decoders['panoptic_helper'].postprocessing
max_instances_per_category = panoptic_post.max_instances_per_category
# run inference and write outputs
for i, dataloader in enumerate(dataloaders):
camera = dataloader.dataset.camera
for j, batch in tqdm(enumerate(dataloader),
total=len(dataloader),
desc=f'{i+1}/{len(dataloaders)} ({camera})'):
# move batch to device
batch = move_batch_to_device(batch, device=device)
# apply model
prediction = model(batch, do_postprocessing=True)
# write outputs
for output_format in args.inference_output_format:
# determine and create output path if not exists
output_path = os.path.join(
args.inference_output_path,
output_format.replace('-', '_'),
)
os.makedirs(output_path,
exist_ok=(args.overwrite or j != 0 or i != 0))
if 'scannet-semantic' == output_format:
write_scannet_semantic_output(
batch=batch,
prediction=prediction,
output_path=output_path,
identifier_to_filename_mapper=_identifier_to_filename,
max_depth=max_depth,
semantic_class_mapper=semantic_class_mapper,
write_gt=args.inference_output_write_ground_truth
)
elif 'scannet-instance' == output_format:
write_scannet_instance_output(
batch=batch,
prediction=prediction,
output_path=output_path,
identifier_to_filename_mapper=_identifier_to_filename,
shift=args.inference_output_semantic_instance_shift,
max_depth=max_depth,
semantic_class_mapper=semantic_class_mapper,
write_gt=args.inference_output_write_ground_truth
)
elif 'scannet-panoptic' == output_format:
write_scannet_panoptic_output(
batch=batch,
prediction=prediction,
output_path=output_path,
max_instances_per_category=max_instances_per_category,
identifier_to_filename_mapper=_identifier_to_filename,
max_depth=max_depth,
semantic_class_mapper=semantic_class_mapper,
write_gt=args.inference_output_write_ground_truth
)
elif 'mapping' == output_format:
write_mapping_output(
batch=batch,
prediction=prediction,
output_path=output_path,
instance_use_panoptic_score=True,
semantic_class_mapper=semantic_class_mapper,
compressed=True
)
if __name__ == '__main__':
main()
| 30,738 | 38.05845 | 153 | py |
null | EMSAFormer-main/inference_samples.py | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Mona Koehler <mona.koehler@tu-ilmenau.de>
.. codeauthor:: Daniel Seichter <daniel.seichter@tu-ilmenau.de>
.. codeauthor:: Soehnke Fischedick <soehnke-benedikt.fischedick@tu-ilmenau.de>
"""
from glob import glob
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import torch
from tqdm import tqdm
from nicr_mt_scene_analysis.data import move_batch_to_device
from nicr_mt_scene_analysis.data import mt_collate
from emsaformer.args import ArgParserEMSAFormer
from emsaformer.data import get_datahelper
from emsaformer.model import EMSAFormer
from emsaformer.preprocessing import get_preprocessor
from emsaformer.visualization import visualize_predictions
from emsaformer.weights import load_weights
def _get_args():
parser = ArgParserEMSAFormer()
# add additional arguments
group = parser.add_argument_group('Inference')
group.add_argument( # useful for appm context module
'--inference-input-height',
type=int,
default=480,
dest='validation_input_height', # used in test phase
help="Network input height for predicting on inference data."
)
group.add_argument( # useful for appm context module
'--inference-input-width',
type=int,
default=640,
dest='validation_input_width', # used in test phase
help="Network input width for predicting on inference data."
)
group.add_argument(
'--depth-max',
type=float,
default=None,
help="Additional max depth values. Values above are set to zero as "
"they are most likely not valid. Note, this clipping is applied "
"before scaling the depth values."
)
group.add_argument(
'--depth-scale',
type=float,
default=1.0,
help="Additional depth scaling factor to apply."
)
default_samples_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'samples'
)
group.add_argument(
'--samples-path',
type=str,
default=default_samples_dir,
help="Directory containing the samples."
)
group.add_argument(
'--output-path',
type=str,
default=None,
help="Directory to save the results."
)
group.add_argument(
'--show-results',
action='store_true',
default=False,
help="Show results in a window."
)
return parser.parse_args()
def _load_img(fp):
img = cv2.imread(fp, cv2.IMREAD_UNCHANGED)
if img.ndim == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def main():
args = _get_args()
# assert all(x in args.input_modalities for x in ('rgb', 'depth', 'rgbd')), \
# "Only RGBD inference supported so far"
device = torch.device('cuda')
# data and model
data = get_datahelper(args)
dataset_config = data.dataset_config
model = EMSAFormer(args, dataset_config=dataset_config)
# load weights
print(f"Loading checkpoint: '{args.weights_filepath}'")
checkpoint = torch.load(args.weights_filepath)
state_dict = checkpoint['state_dict']
if 'epoch' in checkpoint:
print(f"-> Epoch: {checkpoint['epoch']}")
load_weights(args, model, state_dict, verbose=True)
torch.set_grad_enabled(False)
model.eval()
model.to(device)
# build preprocessor
preprocessor = get_preprocessor(
args,
dataset=data.datasets_valid[0],
phase='test',
multiscale_downscales=None
)
# get samples
basepath = args.samples_path
# Files are assumed to be in an rgb and depth folder
rgb_filepaths = sorted(glob(os.path.join(basepath, 'rgb', '*.*')))
depth_filepaths = sorted(glob(os.path.join(basepath, 'depth', '*.*')))
assert len(rgb_filepaths) == len(depth_filepaths)
basenames_rgb = [os.path.basename(os.path.splitext(x)[0])
for x in rgb_filepaths]
basenames_depth = [os.path.basename(os.path.splitext(x)[0])
for x in depth_filepaths]
assert basenames_rgb == basenames_depth
if args.output_path is not None:
assert os.path.exists(args.output_path)
for fp_rgb, fp_depth in tqdm(zip(rgb_filepaths, depth_filepaths), total=len(rgb_filepaths)):
# load rgb and depth image
img_rgb = _load_img(fp_rgb)
img_depth = _load_img(fp_depth).astype('float32')
if args.depth_max is not None:
img_depth[img_depth > args.depth_max] = 0
img_depth *= args.depth_scale
# preprocess sample
sample = preprocessor({
'rgb': img_rgb,
'depth': img_depth,
'identifier': os.path.basename(os.path.splitext(fp_rgb)[0])
})
# add batch axis as there is no dataloader
batch = mt_collate([sample])
batch = move_batch_to_device(batch, device=device)
# apply model
predictions = model(batch, do_postprocessing=True)
# visualize predictions
preds_viz = visualize_predictions(
predictions=predictions,
batch=batch,
dataset_config=dataset_config
)
if args.output_path is not None:
keys = [
'semantic_segmentation_idx_fullres',
'panoptic_segmentation_deeplab_semantic_idx_fullres',
'panoptic_segmentation_deeplab_instance_idx_fullres',
'instance_centers',
'instance_offsets',
'panoptic_orientations_fullres'
]
for key in keys:
image = preds_viz[key][0]
fp_out = os.path.join(
args.output_path,
key,
os.path.basename(fp_rgb)
)
# Create dir if not exists
os.makedirs(os.path.dirname(fp_out), exist_ok=True)
if isinstance(image, Image.Image):
image.save(fp_out)
elif isinstance(image, np.ndarray):
# Convert to BGR
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imwrite(fp_out, image)
# show results
if args.show_results:
_, axs = plt.subplots(2, 4, figsize=(12, 6), dpi=150)
[ax.set_axis_off() for ax in axs.ravel()]
axs[0, 0].set_title('RGB')
axs[0, 0].imshow(
img_rgb
)
axs[0, 1].set_title('Depth')
axs[0, 1].imshow(
img_depth,
interpolation='nearest'
)
axs[0, 2].set_title('Semantic')
axs[0, 2].imshow(
preds_viz['semantic_segmentation_idx_fullres'][0],
interpolation='nearest'
)
axs[0, 3].set_title('Semantic (panoptic)')
axs[0, 3].imshow(
preds_viz['panoptic_segmentation_deeplab_semantic_idx_fullres'][0],
interpolation='nearest'
)
axs[1, 0].set_title('Instance (panoptic)')
axs[1, 0].imshow(
preds_viz['panoptic_segmentation_deeplab_instance_idx_fullres'][0],
interpolation='nearest'
)
axs[1, 1].set_title('Instance centers')
axs[1, 1].imshow(
preds_viz['instance_centers'][0]
)
axs[1, 2].set_title('Instance offsets')
axs[1, 2].imshow(
preds_viz['instance_offsets'][0]
)
axs[1, 3].set_title('Panoptic (with orientations)')
axs[1, 3].imshow(
preds_viz['panoptic_orientations_fullres'][0],
interpolation='nearest'
)
plt.suptitle(
f"Image: ({os.path.basename(fp_rgb)}, "
f"{os.path.basename(fp_depth)}), "
f"Model: {args.weights_filepath}, "
f"Scene: {preds_viz['scene'][0]}"
)
plt.tight_layout()
# fp = os.path.join('./', 'samples', 'results',
# f"{sample['identifier']}.png")
# plt.savefig(fp, bbox_inches='tight', pad_inches=0.05, dpi=150)
plt.show()
if __name__ == '__main__':
main()
| 8,393 | 31.534884 | 96 | py |
null | EMSAFormer-main/main.py | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Soehnke Fischedick <soehnke-benedikt.fischedick@tu-ilmenau.de>
.. codeauthor:: Daniel Seichter <daniel.seichter@tu-ilmenau.de>
.. codeauthor:: Mona Koehler <mona.koehler@tu-ilmenau.de>
"""
from typing import Tuple
from copy import deepcopy
from datetime import datetime
import json
import os
from pprint import pprint
import shlex
import sys
from time import time
import traceback
import warnings
import numpy as np
import PIL.Image
import torch
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
from torchmetrics import MeanMetric
from tqdm import tqdm
import wandb
from nicr_mt_scene_analysis.checkpointing import CheckpointHelper
from nicr_mt_scene_analysis.data import move_batch_to_device
from nicr_mt_scene_analysis.data import infer_batch_size
from nicr_mt_scene_analysis.logging import CSVLogger
from nicr_mt_scene_analysis.testing.onnx import export_onnx_model
from nicr_mt_scene_analysis.utils import cprint
from nicr_mt_scene_analysis.utils import cprint_step
from emsaformer.args import ArgParserEMSAFormer
from emsaformer.data import get_datahelper
from emsaformer.data import parse_datasets
from emsaformer.loss_weighting import get_loss_weighting_module
from emsaformer.lr_scheduler import get_lr_scheduler
from emsaformer.model import EMSAFormer
from emsaformer.optimizer import get_optimizer
from emsaformer.preprocessing import get_preprocessor
from emsaformer.task_helper import get_task_helpers
from emsaformer.task_helper import TaskHelperType
from emsaformer.visualization import setup_shared_color_generators
from emsaformer.visualization import visualize
from emsaformer.weights import load_weights
class RunHelper:
def __init__(
self,
args,
model: EMSAFormer,
task_helpers: Tuple[TaskHelperType],
device: torch.device,
compile_model: bool
) -> None:
super().__init__()
# store args to have them later
self.args = args
self.model = model.to(device)
if compile_model:
self.model = torch.compile(self.model)
self._task_helpers = task_helpers
for task_helper in self._task_helpers:
task_helper.initialize(device)
# some internal stuff
self._device = device
self._validation_best_metrics_cache = {}
self._accumulated_step_metrics = {}
# loss weighting
self._loss_weighting_module = get_loss_weighting_module(args)
def reset(self):
# perform internal reset (e.g., after performing a sanity check)
# reset loss weights
self._loss_weighting_module.reset_weights()
# reset internal caches
self._validation_best_metrics_cache = {}
self._accumulated_step_metrics = {}
def _update_accumulated_step_metrics(self, logs, batch_size):
metrics = self._accumulated_step_metrics # pep8
for key, value in logs.items():
# create metric object if it does not yet exist
if key not in metrics:
metrics[key] = MeanMetric().to(self._device)
# update metric
metrics[key].update(value, weight=batch_size)
def set_training_mode(self) -> None:
torch.set_grad_enabled(True)
self.model.train()
def set_inference_mode(self) -> None:
torch.set_grad_enabled(False)
self.model.eval()
def training_step(self, batch, batch_idx):
assert self.model.training
# apply model
batch = move_batch_to_device(batch, device=self._device)
predictions_post = self.model(batch, do_postprocessing=True)
# apply task helpers
losses = {}
logs = {}
for task_helper in self._task_helpers:
task_loss_dict, task_logs = task_helper.training_step(
batch=batch,
batch_idx=batch_idx,
predictions_post=predictions_post
)
losses.update(task_loss_dict)
logs.update(task_logs)
# accumulate losses
loss = self._loss_weighting_module.reduce_losses(losses, batch_idx)
# add total loss to logs
logs['total_loss'] = loss.detach().clone()
# update accumulated step metrics
self._update_accumulated_step_metrics(
logs={f'train_{key}': value for key, value in logs.items()},
batch_size=infer_batch_size(batch)
)
return loss
def training_get_artifacts_and_metrics(self):
artifacts, metrics = {}, {}
# handle accumulated step metrics
for key, metric in self._accumulated_step_metrics.items():
if 'train' not in key:
continue
metrics[key] = metric.compute()
# reset metric to be ready for next epoch
metric.reset()
return artifacts, metrics
def validation_step(self, batch, batch_idx):
assert not self.model.training
# apply model
batch = move_batch_to_device(batch, device=self._device)
predictions_post = self.model(batch, do_postprocessing=True)
# apply task helpers
losses = {}
logs = {}
for task_helper in self._task_helpers:
task_loss_dict, task_logs = task_helper.validation_step(
batch=batch,
batch_idx=batch_idx,
predictions_post=predictions_post
)
losses.update(task_loss_dict)
logs.update(task_logs)
# accumulate losses
loss = self._loss_weighting_module.reduce_losses(losses, batch_idx)
# add total loss to logs
logs['total_loss'] = loss.detach().clone()
# update accumulated step metrics
self._update_accumulated_step_metrics(
logs={f'valid_{key}': value for key, value in logs.items()},
batch_size=infer_batch_size(batch)
)
return loss, predictions_post
def validation_get_artifacts_examples_metrics(self):
artifacts, examples, metrics = {}, {}, {}
# handle accumulated step metrics
for key, metric in self._accumulated_step_metrics.items():
if 'valid' not in key:
continue
metrics[key] = metric.compute()
# reset metric to be ready for next epoch
metric.reset()
# apply task helpers
for task_helper in self._task_helpers:
task_result = task_helper.validation_epoch_end()
task_artifacts, task_examples, task_logs = task_result
metrics.update({f'valid_{key}': value
for key, value in task_logs.items()})
artifacts.update({f'valid_{key}': value
for key, value in task_artifacts.items()})
examples.update({f'valid_{key}': value
for key, value in task_examples.items()})
# update cache for currently best metrics
def force_tensor(v):
return v if isinstance(v, torch.Tensor) else torch.tensor(v)
cache = self._validation_best_metrics_cache
for key in metrics:
# determine behavior
if any(m in key for m in ('miou', 'acc', 'rq', 'sq', 'pq')):
fn = torch.greater
default = torch.tensor(-torch.inf)
elif 'mae' in key or 'rmse' in key:
fn = torch.less
default = torch.tensor(torch.inf)
else:
continue
# add or update entry in cache
key_best = f'{key}_best'
value_cur = metrics[key]
value_best = cache.get(key_best, default)
if fn(force_tensor(value_cur), force_tensor(value_best)).item():
cache[key_best] = value_cur
# add best metrics to current logs
metrics.update(cache)
return artifacts, examples, metrics
def main():
# Args & General Stuff -----------------------------------------------------
parser = ArgParserEMSAFormer()
args = parser.parse_args()
# prepare results paths
if not args.is_resumed_training:
starttime = datetime.now().strftime('%Y_%m_%d-%H_%M_%S-%f')
results_path = os.path.abspath(os.path.join(
args.results_basepath,
'_debug_runs' if args.debug else '',
args.dataset.replace(':', '+'),
f'run_{starttime}'
))
else:
# write results to same folder as in previous training
results_path = args.resume_path
os.makedirs(results_path, exist_ok=args.is_resumed_training)
artifacts_path = os.path.join(results_path, 'artifacts')
os.makedirs(artifacts_path, exist_ok=args.is_resumed_training)
checkpoints_path = os.path.join(results_path, 'checkpoints')
os.makedirs(checkpoints_path, exist_ok=args.is_resumed_training)
examples_path = os.path.join(results_path, 'examples')
os.makedirs(examples_path, exist_ok=args.is_resumed_training)
print(f"Writing results to '{results_path}'.")
# append some information to args
args.results_path = results_path
args.artifacts_path = artifacts_path
args.checkpoints_path = checkpoints_path
args.examples_path = examples_path
args.start_timestamp = int(time())
if not args.validation_only:
# set up wandb
# convert tuples/lists to let them appear in parallel coordinate plots
w_args = deepcopy(args)
for k, v in dict(vars(w_args)).items():
if isinstance(v, (list, tuple)):
v_str = ', '.join(str(v_) for v_ in v)
if not isinstance(v[0], str):
# prepend 's ' to make sure wandb handles it correctly
v_str = f's {v_str}'
setattr(w_args, f'{k}_str', v_str)
wandb.init(
dir=results_path,
entity='nicr',
config=w_args,
mode=args.wandb_mode,
project=args.wandb_project,
settings=wandb.Settings(start_method='fork')
)
# set epoch as default x axis
wandb.run.define_metric('epoch')
wandb.run.define_metric("*", step_metric='epoch', step_sync=True)
# append some information to args
args.wandb_name = wandb.run.name
args.wandb_id = wandb.run.id
args.wandb_url = wandb.run.url
# dump args ------------------------------------------------------------
if not args.is_resumed_training:
# argv only if not resuming
with open(os.path.join(args.results_path, 'argsv.txt'), 'w') as f:
f.write(shlex.join(sys.argv))
f.write('\n')
with open(os.path.join(results_path, 'args.json'), 'w') as f:
json.dump(vars(args), f, sort_keys=True, indent=4)
# Data & Model -------------------------------------------------------------
cprint_step(f"Get model and dataset")
# get datahelper
data = get_datahelper(args)
if args.weights_filepath is not None:
args.no_pretrained_backbone = True
# get model
model = EMSAFormer(args, dataset_config=data.dataset_config)
# load weights (account for renamed or missing keys, specific dataset
# combinations, pretraining configurations)
if args.weights_filepath is not None:
print(f"Loading (pretrained) weights from: '{args.weights_filepath}'.")
checkpoint = torch.load(args.weights_filepath,
map_location=torch.device('cpu'))
state_dict = checkpoint['state_dict']
if 'epoch' in checkpoint:
print(f"-> Epoch: {checkpoint['epoch']}")
if args.debug and 'logs' in checkpoint:
print(f"-> Logs/Metrics:")
pprint(checkpoint['logs'])
load_weights(args, model, state_dict, verbose=True)
# set preprocessor to datasets (note, preprocessing depends on model)
downscales = set()
for decoder in model.decoders.values():
downscales |= set(decoder.side_output_downscales)
data.set_train_preprocessor(
get_preprocessor(
args,
dataset=data.dataset_train,
phase='train',
multiscale_downscales=tuple(downscales)
)
)
data.set_valid_preprocessor(
get_preprocessor(
args,
dataset=data.datasets_valid[0],
phase='test',
multiscale_downscales=tuple(downscales) if args.debug else None
)
)
# export onnx model to be able to debug the model's structure
if args.debug:
cprint_step(f"Export ONNX model")
# use 'EXPORT_ONNX_MODELS=true python ...' to export the model
from torch.onnx import TrainingMode
# get some valid data
batch = next(iter(data.train_dataloader))
batch = {k: v for k, v in batch.items() if torch.is_tensor(v)}
fp = os.path.join(results_path, 'model.onnx')
if export_onnx_model(fp, model, (batch, {}),
training_mode=TrainingMode.EVAL,
force_export=False,
use_fallback=True):
print(f"Wrote ONNX model to '{fp}'.")
else:
print("Export skipped. Set `EXPORT_ONNX_MODELS=true` to enable.")
# Training Stuff -----------------------------------------------------------
# logging (note, appends to existing metrics file)
csv_logger = CSVLogger(filepath=os.path.join(results_path, 'metrics.csv'),
write_interval=1)
# optimizer and lr scheduler
optimizer = get_optimizer(args, model.parameters())
lr_scheduler = get_lr_scheduler(args, optimizer)
# get task helper
task_helpers = get_task_helpers(args, data.dataset_train)
# wrap model in run helper
run = RunHelper(
args,
model=model,
task_helpers=task_helpers,
device=torch.device('cuda'),
compile_model=args.compile_model,
)
# check for resumed training
if args.resume_ckpt_filepath is not None:
cprint_step(f"Resume training")
checkpoint = torch.load(args.resume_ckpt_filepath,
map_location=torch.device('cpu'))
print(f"Checkpoint: '{args.resume_ckpt_filepath}'")
next_epoch = checkpoint['epoch'] + 1
print(f"Last epoch: {checkpoint['epoch']}, next epoch: {next_epoch}")
print("Replacing state dicts for model, optimizer, and lr scheduler.")
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
else:
# training starts from scratch
next_epoch = 0
# checkpointing
if args.checkpointing_metrics is None:
warnings.warn(
"No checkpoints will be saved. Please provide the metrics by which "
"you want to checkpoint the model weights with "
"`--checkpoinintg-metrics`."
)
checkpoint_helper = CheckpointHelper(
metric_names=args.checkpointing_metrics,
debug=True # args.debug
)
# Simple Sanity Check ------------------------------------------------------
if not args.skip_sanity_check:
# ensure that crucial parts (data, forward, metrics, ...) are working
# as expected, the check is done by forwarding a single batch of all
# dataloaders WITHOUT backpropagation.
cprint_step(f"Perform sanity check")
# disable forward stats tracking (e.g., batchnorm)
for m in model.modules():
if hasattr(m, 'track_running_stats'):
m.track_running_stats = False
# check training (single batch)
batch = next(iter(data.train_dataloader))
assert isinstance(run.training_step(batch, 0), torch.Tensor)
assert run.training_get_artifacts_and_metrics()
# re-enable forward stats tracking (e.g., batchnorm)
for m in model.modules():
if hasattr(m, 'track_running_stats'):
m.track_running_stats = True
# check validation (single batch for all valid sets)
run.set_inference_mode()
for valid_dataloader in data.valid_dataloaders:
batch = next(iter(valid_dataloader))
validation_result, _ = run.validation_step(batch, 0)
assert isinstance(validation_result, torch.Tensor)
result = run.validation_get_artifacts_examples_metrics() # also resets
assert result
# check metrics for checkpointing
artifacts, examples, metrics = result
for ckpt_metric in args.checkpointing_metrics or []:
assert checkpoint_helper._determine_checkpoint_metrics(
ckpt_metric, metrics
)
# reset run helper states (loss weighting module and metric caches)
run.reset()
# everything seems to work
print(f"Fine.")
# Validation ---------------------------------------------------------------
if args.validation_only:
cprint_step(f"Run validation only")
if args.visualize_validation:
print("Writing visualizations to: "
f"'{args.visualization_output_path}'.")
# use shared color generators to ensure consistent colors and to speed
# up visualization
setup_shared_color_generators(data.dataset_train.config)
run.set_inference_mode()
batch_idx = 0
for i, valid_dataloader in enumerate(data.valid_dataloaders):
tqdm_desc = f'Validation {i+1}/{len(data.valid_dataloaders)}'
tqdm_desc += f' ({valid_dataloader.dataset.camera})'
for batch in tqdm(valid_dataloader,
total=len(valid_dataloader),
desc=tqdm_desc):
_, predictions = run.validation_step(batch, batch_idx)
if args.visualize_validation:
output_path = os.path.join(
args.visualization_output_path,
args.validation_split
)
visualize(
output_path=output_path,
batch=batch,
predictions=predictions,
dataset_config=data.dataset_train.config
)
batch_idx += 1
# get and print validation metrics
_, _, metrics = run.validation_get_artifacts_examples_metrics()
print("Validation results:")
pprint(metrics)
# stop here
return
# Training -----------------------------------------------------------------
cprint_step(f"Start training")
# overfitting
if args.overfit_n_batches > 0:
# force overfitting (training+validation) to overfit_n_batches batches
# of the valid set
data.enable_overfitting_mode(n_valid_batches=args.overfit_n_batches)
# training loop
try:
for epoch in range(next_epoch, args.n_epochs):
cprint(f"Epoch: {epoch:04d}/{args.n_epochs-1:04d}",
color='cyan', attrs=('bold',))
epoch_logs = {'epoch': epoch, 'lr': lr_scheduler.get_last_lr()[0]}
# training
run.set_training_mode()
for batch_idx, batch in tqdm(enumerate(data.train_dataloader),
total=len(data.train_dataloader),
desc='Training'):
loss = run.training_step(batch, batch_idx)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# get training metrics
_, metrics = run.training_get_artifacts_and_metrics()
epoch_logs.update(metrics)
# validation
if (args.validation_force_interval is None) or (epoch == 0):
force = False
else:
force = (epoch % args.validation_force_interval) == 0
if (epoch >= (args.n_epochs * args.validation_skip)) or force:
run.set_inference_mode()
# we have multiple valid datasets due to multiple resolutions
batch_idx = 0
for i, valid_dataloader in enumerate(data.valid_dataloaders):
if isinstance(valid_dataloader.dataset,
torch.utils.data.Subset):
# overfitting mode (dataset is wrapped using Subset)
camera = valid_dataloader.dataset.dataset.camera
else:
camera = valid_dataloader.dataset.camera
tqdm_desc = (f'Validation {i+1}/'
f'{len(data.valid_dataloaders)} ({camera})')
for batch in tqdm(valid_dataloader,
total=len(valid_dataloader),
desc=tqdm_desc):
_ = run.validation_step(batch, batch_idx)
batch_idx += 1
# get validation artifacts and metrics
artifacts, examples, metrics = \
run.validation_get_artifacts_examples_metrics()
epoch_logs.update(metrics)
# checkpointing
do_create_checkpoint = checkpoint_helper.check_for_checkpoint(
logs=epoch_logs,
add_checkpoint_metrics_to_logs=True
)
if epoch >= (args.n_epochs * args.checkpointing_skip) or force:
# we are allowed to store checkpoints
for ckpt_metric in do_create_checkpoint:
if not do_create_checkpoint[ckpt_metric]:
# no new best value, skip checkpointing
continue
# create new ckeckpoint
if args.checkpointing_best_only:
suffix = '_best'
else:
suffix = f'_epoch_{epoch:04d}'
mapped_name = \
checkpoint_helper.metric_mapping_joined[ckpt_metric]
ckpt_filepath = os.path.join(
checkpoints_path, f'ckpt_{mapped_name}{suffix}.pth')
# save checkpoint
ckpt = {
'state_dict': model.state_dict(),
'epoch': epoch,
'logs': epoch_logs
}
torch.save(ckpt, ckpt_filepath)
print(f"Wrote checkpoint to: '{ckpt_filepath}'.")
# store artifacts
for key, value in artifacts.items():
fn = f'{key}__epoch_{epoch:04d}.npy'
if isinstance(value, torch.Tensor):
value = value.cpu().numpy()
np.save(os.path.join(artifacts_path, fn), value)
# store / log examples
wandb_examples = {}
for key, value in examples.items():
fn = f'{key}__epoch_{epoch:04d}'
if isinstance(value, PIL.Image.Image):
value.save(os.path.join(examples_path, fn+'.png'),
'PNG')
wandb_examples[key] = wandb.Image(value)
else:
wandb_examples = {}
# update learning rate
lr_scheduler.step()
# resume checkpoint
if ((epoch % args.resume_ckpt_interval) == 0 and epoch > 0) or \
(epoch == (args.n_epochs-1)):
# save checkpoint containing state dict, optimizer, and lr
# scheduler
ckpt_filepath = os.path.join(checkpoints_path,
f'ckpt_resume.pth')
ckpt = {
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'logs': epoch_logs
}
# write ckeckpoint file in paranoid mode
torch.save(ckpt, ckpt_filepath+'.tmp')
if os.path.isfile(ckpt_filepath):
# does exist only after first writing
os.remove(ckpt_filepath)
os.rename(ckpt_filepath+'.tmp', ckpt_filepath)
print(f"Wrote resume checkpoint to: '{ckpt_filepath}'.")
# logging
csv_logger.log(epoch_logs)
wandb_logs = {**epoch_logs, **wandb_examples}
wandb_logs = dict(sorted(wandb_logs.items()))
wandb.log(wandb_logs, commit=True)
if args.debug:
print("Epoch logs:")
pprint(epoch_logs)
except Exception:
# something went wrong -.-
# store checkpoint
ckpt_filepath = os.path.join(checkpoints_path,
f'ckpt_error__epoch_{epoch:04d}.pth')
ckpt = {
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'logs': epoch_logs
}
torch.save(ckpt, ckpt_filepath)
print(f"Wrote checkpoint to: '{ckpt_filepath}'.")
# log error
log_filepath = os.path.join(results_path, 'error.log')
with open(log_filepath, 'w') as f:
traceback.print_exc(file=f)
print(f"Wrote error log to: '{log_filepath}'.")
# reraise error -> let the run crash
raise
# training done
with open(os.path.join(results_path, 'finished'), 'w') as f:
pass
csv_logger.write()
cprint_step(f"Done")
if __name__ == '__main__':
main()
| 26,219 | 37.110465 | 80 | py |
null | EMSAFormer-main/emsaformer/__init__.py | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Daniel Seichter <daniel.seichter@tu-ilmenau.de>
"""
| 96 | 18.4 | 63 | py |
null | EMSAFormer-main/emsaformer/args.py | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Soehnke Fischedick <soehnke-benedikt.fischedick@tu-ilmenau.de>
.. codeauthor:: Daniel Seichter <daniel.seichter@tu-ilmenau.de>
.. codeauthor:: Mona Koehler <mona.koehler@tu-ilmenau.de>
"""
import argparse as ap
import json
import os
import shlex
import shutil
import socket
from nicr_mt_scene_analysis.model.activation import KNOWN_ACTIVATIONS
from nicr_mt_scene_analysis.model.backbone import KNOWN_BACKBONES
from nicr_mt_scene_analysis.model.block import KNOWN_BLOCKS
from nicr_mt_scene_analysis.model.context_module import KNOWN_CONTEXT_MODULES
from nicr_mt_scene_analysis.model.encoder_decoder_fusion import KNOWN_ENCODER_DECODER_FUSIONS
from nicr_mt_scene_analysis.model.encoder_fusion import KNOWN_ENCODER_FUSIONS
from nicr_mt_scene_analysis.model.normalization import KNOWN_NORMALIZATIONS
from nicr_mt_scene_analysis.model.upsampling import KNOWN_UPSAMPLING_METHODS
from nicr_mt_scene_analysis.multi_task import KNOWN_TASKS
from nicr_mt_scene_analysis.task_helper.instance import KNOWN_INSTANCE_CENTER_LOSS_FUNCTIONS
from nicr_mt_scene_analysis.task_helper.normal import KNOWN_NORMAL_LOSS_FUNCTIONS
from .data import KNOWN_DATASETS
from .data import KNOWN_CLASS_WEIGHTINGS
from .decoder import KNOWN_DECODERS
from .lr_scheduler import KNOWN_LR_SCHEDULERS
from .optimizer import KNOWN_OPTIMIZERS
class Range(object):
"""
Helper for argparse to restrict floats to be in a specified range.
"""
def __init__(self, start, end):
self.start = start
self.end = end
def __eq__(self, other):
return self.start <= other <= self.end
def __contains__(self, item):
return self.__eq__(item)
def __iter__(self):
yield self
def __repr__(self):
return f'[{self.start}, {self.end}]'
class ArgParserEMSAFormer(ap.ArgumentParser):
def __init__(self, *args, **kwargs):
# force ArgumentDefaultsHelpFormatter as formatter_class is given
formatter_class = kwargs.pop('formatter_class', None)
formatter_class = formatter_class or ap.ArgumentDefaultsHelpFormatter
super().__init__(*args, formatter_class=formatter_class, **kwargs)
# paths ---------------------------------------------------------------
group = self.add_argument_group('Paths')
group.add_argument(
'--results-basepath',
type=str,
default='./results',
help="Path where to store training files."
)
group.add_argument(
'--weights-filepath',
type=str,
default=None,
help="Filepath to (last) checkpoint / weights for the entire model."
)
# network and multi-task -----------------------------------------------
group = self.add_argument_group('Tasks')
# -> multi-task parameters
group.add_argument(
'--tasks',
nargs='+',
type=str,
choices=KNOWN_TASKS,
default=('semantic',),
help="Task(s) to perform."
)
group.add_argument(
'--enable-panoptic',
action='store_true',
default=False,
help="Enforces taskts 'semanic' and 'instance' to be combined for "
"panoptic segmentation"
)
# -> input
group = self.add_argument_group('Input')
group.add_argument(
'--input-height',
type=int,
default=480,
help="Network input height. Images will be resized to this height."
)
group.add_argument(
'--input-width',
type=int,
default=640,
help="Network input width. Images will be resized to this width."
)
group.add_argument(
'--input-modalities',
nargs='+',
type=str,
choices=('rgb', 'depth', 'rgbd'),
default=('rgbd',),
help="Input modalities to consider."
)
# -> whole model
group = self.add_argument_group('Model')
group.add_argument(
'--compile-model',
default=False,
action='store_true',
help="Enables compilation of the model. Dosn't work with "
"Swin Transformer backbones."
)
group.add_argument(
'--normalization',
type=str,
default=None,
choices=KNOWN_NORMALIZATIONS,
help="[DEPRECATED - use encoder or decoder specific] Normalization "
"to apply in the whole model."
)
group.add_argument(
'--activation',
type=str,
default='relu',
choices=KNOWN_ACTIVATIONS,
help="Activation to use in the whole model. "
"If Swin Transformer backbones are used, the activation "
"of the Encoder is always 'gelu'."
)
# -> encoder related parameters
group = self.add_argument_group('Model: Encoder(s)')
group.add_argument(
'--no-pretrained-backbone',
action='store_true',
default=False,
help="Disables loading of ImageNet pretrained weights for the "
"backbone(s). Useful for inference or inference timing."
)
group.add_argument(
'--encoder-normalization',
type=str,
default='layernorm',
choices=KNOWN_NORMALIZATIONS,
help="Normalization to apply to the encoders."
)
group.add_argument(
'--encoder-backbone-pretrained-weights-filepath',
type=str,
default=None,
help="Path to pretrained (ImageNet) weights for the encoder "
"backbones. Use this argument if you want to initialize all "
"encoder backbones with the same weights. "
"If `weights-filepath` is given, the specified weights are "
"loaded subsequently and may replace the pretrained weights."
)
group.add_argument(
'--encoder-fusion',
choices=KNOWN_ENCODER_FUSIONS,
default='se-add-uni-rgb',
help="Determines how features of the depth (rgb) encoder are "
"fused to features of the other encoder."
)
# -> rgb encoder
group = self.add_argument_group('Model: Encoder(s) -> RGB encoder')
group.add_argument(
'--rgb-encoder-backbone',
type=str,
choices=KNOWN_BACKBONES,
default='resnet34',
help="Backbone to use for RGB encoder."
)
group.add_argument(
'--rgb-encoder-backbone-resnet-block',
type=str,
choices=KNOWN_BLOCKS,
default='nonbottleneck1d',
help="Block (type) to use in RGB encoder backbone."
)
group.add_argument(
'--rgb-encoder-backbone-block',
type=str,
choices=KNOWN_BLOCKS,
default=None,
help="[DEPRECATED - use rgb-encoder-backbone-resnet-block] "
"Block (type) to use in RGB encoder backbone."
)
group.add_argument(
'--rgb-encoder-backbone-pretrained-weights-filepath',
type=str,
default=None,
help="Path to pretrained (ImageNet) weights for the rgb encoder "
"backbone. "
"If `weights-filepath` is given, the specified weights are "
"loaded subsequently and may replace the pretrained weights."
)
# -> depth encoder
group = self.add_argument_group('Model: Encoder(s) -> depth encoder')
group.add_argument(
'--depth-encoder-backbone',
type=str,
choices=KNOWN_BACKBONES,
default='resnet34',
help="Backbone to use for depth encoder."
)
group.add_argument(
'--depth-encoder-backbone-resnet-block',
type=str,
choices=KNOWN_BLOCKS,
default='nonbottleneck1d',
help="Block (type) to use in depth encoder backbone."
)
group.add_argument(
'--depth-encoder-backbone-block',
type=str,
choices=KNOWN_BLOCKS,
default=None,
help="[DEPRECATED - use depth-encoder-backbone-resnet-block] "
"Block (type) to use in depth encoder backbone."
)
group.add_argument(
'--depth-encoder-backbone-pretrained-weights-filepath',
type=str,
default=None,
help="Path to pretrained (ImageNet) weights for the depth encoder "
"backbone. "
"If `weights-filepath` is given, the specified weights are "
"loaded subsequently and may replace the pretrained weights."
)
# -> rgbd encoder
group = self.add_argument_group('Model: Encoder(s) -> RGB-D encoder')
group.add_argument(
'--rgbd-encoder-backbone',
type=str,
choices=KNOWN_BACKBONES,
default='swin-multi-t-v2-128',
help="Backbone to use for RGBD encoder."
)
group.add_argument(
'--rgbd-encoder-backbone-resnet-block',
type=str,
choices=KNOWN_BLOCKS,
default='nonbottleneck1d',
help="Block (type) to use in RGBD encoder backbone."
)
group.add_argument(
'--rgbd-encoder-backbone-pretrained-weights-filepath',
type=str,
default=None,
help="Path to pretrained (ImageNet) weights for the rgbd encoder "
"backbone. "
"If `weights-filepath` is given, the specified weights are "
"loaded subsequently and may replace the pretrained weights."
)
# -> context module related parameters
group = self.add_argument_group('Model: Context Module')
group.add_argument(
'--context-module',
type=str,
choices=KNOWN_CONTEXT_MODULES,
default='ppm',
help='Context module to use.'
)
group.add_argument(
'--upsampling-context-module',
choices=('nearest', 'bilinear'),
default='bilinear',
help="How features are upsampled in the context module. Bilinear "
"upsampling may cause problems when converting to TensorRT."
)
# -> decoder related parameters
group = self.add_argument_group('Model: Decoder(s)')
group.add_argument(
'--encoder-decoder-skip-downsamplings',
nargs='+',
type=int,
default=(4, 8, 16),
help="Determines at which downsamplings skip connections from the "
"encoder to the decoder(s) should be created, e.g., '4, 8' "
"means skip connections after encoder stages at 1/4 and 1/8 "
"of the input size to the decoder."
)
group.add_argument(
'--encoder-decoder-fusion',
type=str,
choices=KNOWN_ENCODER_DECODER_FUSIONS,
default=None,
help="[DEPRECATED - use parameter for each decoder] Determines "
"how features of the encoder (after fusing "
"encoder features) are fused into the decoders."
)
group.add_argument(
'--upsampling-decoder',
choices=KNOWN_UPSAMPLING_METHODS,
default=None,
help="[DEPRECATED - use parameter for each decoder] How features "
"are upsampled in the decoders. Bilinear upsampling may "
"cause problems when converting to TensorRT. 'learned-3x3*' "
"mimics bilinear interpolation with nearest interpolation "
"and adaptable 3x3 depth-wise convolution subsequently."
)
group.add_argument(
'--upsampling-prediction',
choices=KNOWN_UPSAMPLING_METHODS,
default='learned-3x3-zeropad',
help="How features are upsampled after the last decoder module to "
"match the NETWORK input resolution. Bilinear upsampling may "
"cause problems when converting to TensorRT. 'learned-3x3*' "
"mimics bilinear interpolation with nearest interpolation "
"and adaptable 3x3 depth-wise conv subsequently."
)
group.add_argument(
'--decoder-normalization',
type=str,
default='batchnorm',
choices=KNOWN_NORMALIZATIONS,
help="Normalization to apply in the decoder."
)
# -> semantic related parameters
group = self.add_argument_group('Model: Decoder(s) -> Semantic')
group.add_argument(
'--semantic-encoder-decoder-fusion',
type=str,
choices=KNOWN_ENCODER_DECODER_FUSIONS,
default='swin-ln-select',
help="Determines how features of the encoder (after fusing "
"encoder features) are fused into the semantic decoder."
)
group.add_argument(
'--semantic-decoder',
type=str,
default='segformermlp',
choices=KNOWN_DECODERS,
help="Decoder type to use for semantic segmentation."
)
group.add_argument(
'--semantic-decoder-block',
type=str,
default='nonbottleneck1d',
choices=KNOWN_BLOCKS,
help="[EMSANet decoder] Block (type) to use in semantic decoder."
)
group.add_argument(
'--semantic-decoder-block-dropout-p',
type=float,
default=0.2,
help="[EMSANet decoder] Dropout probability to use in semantic "
"decoder blocks (only for 'nonbottleneck1d')."
)
group.add_argument(
'--semantic-decoder-n-blocks',
type=int,
default=3,
help="[EMSANet decoder] Number of blocks to use in each semantic "
"decoder module."
)
group.add_argument(
'--semantic-decoder-dropout-p',
type=float,
default=0.1,
help="[SegFormerMLP decoder] Probability to use for feature "
"dropout (Dropout2d) in semantic decoder before task head."
)
group.add_argument(
'--semantic-decoder-n-channels',
type=int,
default=(256, 128, 64, 64),
nargs='+',
help="[EMSANet decoder] Number of features maps (channels) to use "
"in each semantic decoder module. Length of tuple "
"determines the number of decoder modules. "
"[SegFormerMLP decoder] Embedding dimensions to use for main "
"branch and skip connections."
)
group.add_argument(
'--semantic-decoder-downsamplings',
type=int,
default=(16, 8, 4),
nargs='+',
help="[EMSANet decoder] Downsampling at the end of each semantic "
"decoder module. Length of tuple must match "
"`--semantic-decoder-n-channels`."
)
group.add_argument(
'--semantic-decoder-upsampling',
choices=KNOWN_UPSAMPLING_METHODS,
default='bilinear',
help="[EMSANet decoder] How features are upsampled in the semantic "
"decoders. Bilinear upsampling may cause problems when "
"converting to TensorRT. 'learned-3x3*' mimics bilinear "
"interpolation with nearest interpolation and adaptable "
"3x3 depth-wise convolution subsequently."
"[SegFormerMLP decoder] How features are upsampled in the "
"semantic decoders. Only bilinear upsampling is supported."
)
# -> instance related parameters
group = self.add_argument_group('Model: Decoder(s) -> Instance')
group.add_argument(
'--instance-encoder-decoder-fusion',
type=str,
choices=KNOWN_ENCODER_DECODER_FUSIONS,
default='swin-ln-add',
help="Determines how features of the encoder (after fusing "
"encoder features) are fused into the instance decoder."
)
group.add_argument(
'--instance-decoder',
type=str,
default='emsanet',
choices=KNOWN_DECODERS,
help="Decoder type to use for instance segmentation."
)
group.add_argument(
'--instance-decoder-block',
type=str,
default='nonbottleneck1d',
choices=KNOWN_BLOCKS,
help="[EMSANet decoder] Block (type) to use in instance decoder."
)
group.add_argument(
'--instance-decoder-block-dropout-p',
type=float,
default=0.2,
help="[EMSANet decoder] Dropout probability to use in instance "
"decoder blocks (only for 'nonbottleneck1d')."
)
group.add_argument(
'--instance-decoder-n-blocks',
type=int,
default=3,
help="[EMSANet decoder] Number of blocks to use in each instance "
"decoder module."
)
group.add_argument(
'--instance-decoder-dropout-p',
type=float,
default=0.1,
help="[SegFormerMLP decoder] Probability to use for feature "
"dropout (Dropout2d) in instance decoder before task head."
)
group.add_argument(
'--instance-decoder-n-channels',
type=int,
default=(512, 256, 128),
nargs='+',
help="[EMSANet decoder] Number of features maps (channels) to use "
"in each instance decoder module. Length of tuple "
"determines the number of decoder modules. "
"[SegFormerMLP decoder] Embedding dimensions to use for main "
"branch and skip connections."
)
group.add_argument(
'--instance-decoder-downsamplings',
type=int,
default=(16, 8, 4),
nargs='+',
help="[EMSANet decoder] Downsampling at the end of each instance "
"decoder module. Length of tuple must match "
"`--instance-decoder-n-channels`."
)
group.add_argument(
'--instance-decoder-upsampling',
choices=KNOWN_UPSAMPLING_METHODS,
default='learned-3x3-zeropad',
help="How features are upsampled in the instance decoders. "
"Bilinear upsampling may cause problems when converting to "
"TensorRT. 'learned-3x3*' mimics bilinear interpolation with "
"nearest interpolation and adaptable 3x3 depth-wise "
"convolution subsequently (EMSANet decoder only)."
)
group.add_argument(
'--instance-center-sigma',
type=int,
default=8,
help="Sigma to use for encoding instance centers. Instance "
"centers are encoded in a heatmap using a gauss up to "
"3*sigma. Note that `sigma` is adjusted when using "
"multiscale supervision as follows: "
"sigma_s = (4*`sigma`) // s for downscale of s."
)
group.add_argument(
'--instance-center-heatmap-threshold',
type=float,
default=0.1,
help="Threshold to use for filtering valid instances during "
"postprocessing the predicted center heatmaps. The order of "
"postprocessing operations is: threshold, nms, opt. masking, "
"top-k."
)
group.add_argument(
'--instance-center-heatmap-nms-kernel-size',
type=int,
default=17,
help="Kernel size for non-maximum suppression to use for "
"filtering the predicting instance center heatmaps during "
"postprocessing. The order of postprocessing operations is: "
"threshold, nms, opt. masking, top-k."
)
group.add_argument(
'--instance-center-heatmap-apply-foreground-mask',
action='store_true',
default=False,
help="Apply foreground mask to centers after non-maximum "
"suppression. This filters instance centers that do not "
"actually belong to the foreground and, thus, prevents "
"instance pixels after offset shifting being assigned to "
"such an instance center later on. The order of "
"postprocessing operations is: threshold, nms, opt. masking, "
"top-k."
)
group.add_argument(
'--instance-center-heatmap-top-k',
type=int,
default=64,
help="Top-k instances to finally select during postprocessing "
"instance center heatmaps. The order of postprocessing "
"operations is: threshold, nms, opt. masking, top-k.")
group.add_argument(
'--instance-center-encoding',
type=str,
choices=('deeplab', 'sigmoid'),
default='sigmoid',
help="Determines how to encode the predicted instance centers. "
"'deeplab' corresponds to simple linear encoding. "
"'sigmoid' forces the output to be in range [0., 1.] by "
"applying sigmoid activation."
)
group.add_argument(
'--instance-offset-encoding',
type=str,
choices=('deeplab', 'relative', 'tanh'),
default='tanh',
help="Determines how to encode the predicted instance offset "
"vectors. 'deeplab' corresponds to absolute coordinates as "
"done in panoptic deeplab."
"'relative' means [-1., 1.] with respect to the"
"network input resolution. 'tanh is similar to 'relative' "
"but further forces [-1., 1.] by applying tanh activation."
"Note that this also affects instance target generation.")
group.add_argument(
'--instance-offset-distance-threshold',
type=int,
default=None,
help="Distance threshold in pixels to mask out invalid instance "
"assignments. Pixels that are more than this threshold away"
"from the next instance center after offset shifting, are "
"assigned to the 'no instance id' (id=0). Note that this "
"masking may lead to thing segments without an instance id, "
"which have to be handled later on. During panoptic merging, "
"masked pixels are assigned to the void class."
)
# -> normal related parameters
group = self.add_argument_group('Model: Decoder(s) -> Normal')
group.add_argument(
'--normal-encoder-decoder-fusion',
type=str,
choices=KNOWN_ENCODER_DECODER_FUSIONS,
default='add-rgb',
help="Determines how features of the encoder (after fusing "
"encoder features) are fused into the normal decoder."
)
group.add_argument(
'--normal-decoder',
type=str,
default='emsanet',
choices=KNOWN_DECODERS,
help="Decoder type to use for normal segmentation."
)
group.add_argument(
'--normal-decoder-block',
type=str,
default='nonbottleneck1d',
choices=KNOWN_BLOCKS,
help="[EMSANet decoder] Block (type) to use in normal decoder."
)
group.add_argument(
'--normal-decoder-block-dropout-p',
type=float,
default=0.2,
help="[EMSANet decoder] Dropout probability to use in normal "
"decoder blocks (only for 'nonbottleneck1d')."
)
group.add_argument(
'--normal-decoder-n-blocks',
type=int,
default=3,
help="[EMSANet decoder] Number of blocks to use in each normal "
"decoder module."
)
group.add_argument(
'--normal-decoder-dropout-p',
type=float,
default=0.1,
help="[SegFormerMLP decoder] Probability to use for feature "
"dropout (Dropout2d) in normal decoder before task head."
)
group.add_argument(
'--normal-decoder-n-channels',
type=int,
default=(512, 256, 128),
nargs='+',
help="[EMSANet decoder] Number of features maps (channels) to use "
"in each normal decoder module. Length of tuple "
"determines the number of decoder modules. "
"[SegFormerMLP decoder] Embedding dimensions to use for main "
"branch and skip connections."
)
group.add_argument(
'--normal-decoder-downsamplings',
type=int,
default=(16, 8, 4),
nargs='+',
help="[EMSANet decoder] Downsampling at the end of each normal "
"decoder module. Length of tuple must match "
"`--normal-decoder-n-channels`."
)
group.add_argument(
'--normal-decoder-upsampling',
choices=KNOWN_UPSAMPLING_METHODS,
default='learned-3x3-zeropad',
help="How features are upsampled in the normal decoders. "
"Bilinear upsampling may cause problems when converting to "
"TensorRT. 'learned-3x3*' mimics bilinear interpolation with "
"nearest interpolation and adaptable 3x3 depth-wise "
"convolution subsequently (EMSANet decoder only)."
)
# training ------------------------------------------------------------
group = self.add_argument_group('Training')
group.add_argument(
'--dropout-p',
type=float,
default=0.1,
help="Dropout probability to use in encoder blocks (only for "
"'nonbottleneck1d')."
)
group.add_argument(
'--he-init',
nargs='+',
type=str,
choices=('encoder-fusion', 'encoder-decoder-fusion',
'context-module',
'decoder'),
default=('encoder-fusion', ),
help="Initialize weights in given parts of the network using He "
"initialization instead of PyTorch's default initialization "
"(commonly used heuristic). Note that bias weights are "
"allways initialized using pytorch's default (commonly used "
"heuristic)."
)
group.add_argument(
'--no-zero-init-decoder-residuals',
action='store_true',
default=False,
help="Disables zero-initializing weights in the last BN in each "
"block, so that the residual branch starts with zeros, and "
"each residual block behaves like an identity."
)
group.add_argument(
'--n-epochs',
type=int,
default=500,
help="Number of epochs to train for."
)
group.add_argument(
'--batch-size',
type=int,
default=8,
help="Batch size to use for training."
)
group.add_argument(
'--optimizer',
type=str,
choices=KNOWN_OPTIMIZERS,
default='sgd',
help="Optimizer to use."
)
group.add_argument(
'--learning-rate',
type=float,
default=0.01,
help="Maximum learning rate for a `batch-size` of 8. When using a "
"deviating batch size, the learning rate is scaled "
"automatically: lr = `learning-rate` * `batch-size`/8."
)
group.add_argument(
'--learning-rate-scheduler',
type=str,
choices=KNOWN_LR_SCHEDULERS,
default='onecycle',
help="Learning rate scheduler to use. For parameters and details, "
"see implementation."
)
group.add_argument(
'--momentum',
type=float,
default=0.9,
help="Momentum to use."
)
group.add_argument(
'--weight-decay',
type=float,
default=1e-4,
help="Weight decay to use for all network weights."
)
group.add_argument(
'--tasks-weighting',
nargs='+',
type=float,
default=None,
help="Task weighting to use for loss balancing. The tasks' "
"weights are assigned to the task in the order given by "
"`tasks`."
)
# -> semantic related parameters
group = self.add_argument_group('Training -> Semantic')
group.add_argument(
'--semantic-class-weighting',
type=str,
choices=KNOWN_CLASS_WEIGHTINGS,
default='median-frequency',
help="Weighting mode to use for semantic classes to balance loss "
"during training"
)
group.add_argument(
'--semantic-class-weighting-logarithmic-c',
type=float,
default=1.02,
help="Parameter c for limiting the upper bound of the class "
"weights when `semantic-class-weighting` is 'logarithmic'. "
"Logarithmic class weighting is defined as 1 / ln(c+p_class)."
)
group.add_argument(
"--semantic-loss-label-smoothing",
type=float,
default=0.0,
help="Label smoothing factor to use in loss function for semantic "
"segmentation."
)
group.add_argument(
'--semantic-no-multiscale-supervision',
action='store_true',
default=False,
help="Disables multi-scale supervision for semantic decoder."
)
# -> instance related parameters
group = self.add_argument_group('Training -> Instance')
group.add_argument(
'--instance-weighting',
nargs=2,
type=int,
default=(2, 1),
help="Weighting to use for instance task loss balancing with "
"format: 'center offset'. The resulting instance task loss "
"will then again be weighted with the weight given with "
"`tasks-weighting`."
)
group.add_argument(
'--instance-center-loss',
type=str,
choices=KNOWN_INSTANCE_CENTER_LOSS_FUNCTIONS,
default='mse',
help='Loss function for instance centers.'
)
group.add_argument(
'--instance-no-multiscale-supervision',
action='store_true',
default=False,
help="Disables multi-scale supervision for instance decoder."
)
# -> orientation related parameters
group = self.add_argument_group('Training -> Orientation')
group.add_argument(
'--orientation-kappa',
type=float,
default=1.0,
help="Parameter kappa to use for VonMises loss."
)
# -> normal related parameters
group = self.add_argument_group('Training -> Normal')
group.add_argument(
'--normal-loss',
type=str,
choices=KNOWN_NORMAL_LOSS_FUNCTIONS,
default='l1',
help='Loss function for normal.'
)
group.add_argument(
'--normal-no-multiscale-supervision',
action='store_true',
default=False,
help="Disables multi-scale supervision for normal decoder."
)
# -> scene related parameters
group = self.add_argument_group('Training -> Scene')
group.add_argument(
"--scene-loss-label-smoothing",
type=float,
default=0.1,
help="Label smoothing factor to use in loss function for scene "
"classification."
)
# dataset and augmentation --------------------------------------------
group = self.add_argument_group('Dataset and Augmentation')
group.add_argument(
'--dataset',
type=str,
default='nyuv2',
help="Dataset(s) to train/validate on. Use ':' to combine multiple"
"datasets. Note that the first dataset is used for "
"determining dataset/network/training parameters. Use "
"'dataset[camera,camera4]' to select specific cameras. "
f"Available datasets: {', '.join(KNOWN_DATASETS)}."
)
group.add_argument(
'--dataset-path',
type=str,
default=None,
help="Path(s) to dataset root(s). If not given, the path is "
"determined automatically using the distributed training "
"package. If no path(s) can be determined, data loading is "
"disabled. Use ':' to combine the paths for combined datasets."
)
group.add_argument(
'--raw-depth',
action='store_true',
default=False,
help="Whether to use the raw depth values instead of refined "
"depth values."
)
group.add_argument(
'--use-original-scene-labels',
action='store_true',
default=False,
help="Do not use unified scene class labels for domestic indoor "
"environments (Hypersim, NYUv2, ScanNet, and SUNRGB-D only)."
)
group.add_argument(
'--aug-scale-min',
type=float,
default=1.0,
help="Minimum scale for random rescaling during training."
)
group.add_argument(
'--aug-scale-max',
type=float,
default=1.4,
help="Maximum scale for random rescaling during training."
)
group.add_argument(
'--cache-dataset',
action='store_true',
default=False,
help="Cache dataset to speed up training."
)
group.add_argument(
'--n-workers',
type=int,
default=8,
help="Number of workers for data loading and preprocessing"
)
group.add_argument(
'--subset-train',
type=float,
default=1.0,
choices=Range(0.0, 1.0),
help="Relative value to train on a subset of the train data. For "
"example if `subset-train`=0.2 and we have 100 train images, "
"then we train only on 20 images. These 20 images are chosen "
"randomly each epoch, except if `subset-deterministic` is "
"set."
)
group.add_argument(
'--subset-deterministic',
action='store_true',
default=False,
help="Use the same subset in each epoch and across different "
"training runs. Requires `subset-train` to be set."
)
group = self.add_argument_group('Dataset and Augmentation -> ScanNet')
# -> ScanNet related parameters
group.add_argument(
'--scannet-subsample',
type=int,
default=50,
choices=(50, 100, 200, 500),
help="Subsample to use for ScanNet dataset for training."
)
group.add_argument(
'--scannet-semantic-n-classes',
type=int,
default=40,
choices=(20, 40, 200, 549),
help="Number of semantic classes to use for ScanNet dataset."
)
# -> SUNRGB-D related parameters
group = self.add_argument_group('Dataset and Augmentation -> SUNRGB-D')
group.add_argument(
'--sunrgbd-depth-do-not-force-mm',
action='store_true',
default=False,
help="Do not force mm for SUNRGB-D depth values. Use this option "
"to evaluate weights of the EMSANet paper on SUNRGB-D."
)
# -> Hypersim related parameters
group = self.add_argument_group('Dataset and Augmentation -> Hypersim')
group.add_argument(
'--hypersim-subsample',
type=int,
default=1,
choices=(1, 2, 5, 10, 20),
help="Subsample to use for ScanNet dataset for training."
)
# validation/evaluation ------------------------------------------------
group = self.add_argument_group('Validation/Evaluation')
group.add_argument(
'--validation-only',
action='store_true',
default=False,
help="No training, validation only. Requires `weights-filepath`."
)
group.add_argument(
'--visualize-validation',
default=False,
action='store_true',
help="Whether the validation images should be visualized."
)
group.add_argument(
'--visualization-output-path',
type=str,
default=None,
help="Path where to save visualized predictions. By default, a "
"new directory is created in the directory where the weights "
"come from. The filename of the weights is included in the "
"name of the visualization directory, so that it is evident "
"which weights have led to these visualizations."
)
group.add_argument( # useful for appm context module
'--validation-input-height',
type=int,
default=None,
help="Network input height for validation. Images will be resized "
"to this height. If not given, `input-height` is used (same "
"height for training and validation). "
)
group.add_argument( # useful for appm context module
'--validation-input-width',
type=int,
default=None,
help="Network input width for validation. Images will be resized "
"to this width. If not given, `input-width` is used (same "
"width for training and validation)."
)
group.add_argument(
'--validation-batch-size',
type=int,
default=None,
help="Batch size to use for validation. Can be typically 2-3 "
"times as large as the batch size for training. If not given "
"it will be set to 3 times `batch-size`."
)
group.add_argument(
'--validation-split',
type=str,
default='valid',
help="Dataset split(s) to use for validation. Use ':' to combine "
"the splits for combined datasets."
)
group.add_argument(
'--validation-skip',
type=float,
default=0.0,
help="Skip validation (metric calculation, example creation, and "
"checkpointing) in early epochs. For example, passing a"
"value of '0.2' and `n_epochs` of '500', skips validation "
"for the first 0.2*500 = 100 epochs. A value of '1.0' "
"disables validation at all."
)
group.add_argument(
'--validation-force-interval',
type=int,
default=20,
help="Force validation after every X epochs even when using "
"`validation-skip`. This allows to still see progress and "
"save checkpoints during training."
)
group.add_argument(
'--validation-full-resolution',
action='store_true',
default=False,
help="Whether to validate on full-resolution inputs (do not apply "
"any resizing to the inputs, for Cityscapes or "
"Hypersim dataset)."
)
# -> ScanNet related parameters
group = self.add_argument_group('Validation/Evaluation -> ScanNet')
group.add_argument(
'--validation-scannet-subsample',
type=int,
default=100,
choices=(50, 100, 200, 500),
help="Subsample to use for ScanNet dataset for validation."
)
group.add_argument(
'--validation-scannet-benchmark-mode',
action='store_true',
default=False,
help="Enable benchmark mode for validation on ScanNet dataset, "
"i.e., mapping ignored classes to void "
"(`scannet-semantic-n-classes`=40/549 only)."
)
# -> checkpointing
group = self.add_argument_group(
'Validation/Evaluation -> Checkpointing'
)
group.add_argument(
'--checkpointing-metrics',
nargs='+',
type=str,
default=None,
help="Metric(s) to use for checkpointing. For example "
"'miou bacc miou+bacc' leads to checkpointing when either "
"miou, bacc, or the sum of miou and bacc reaches its highest "
"value. Note that current implemention only supports "
"combining metrics using '+'. Omitted this parameter "
"disables checkpointing."
)
group.add_argument(
'--checkpointing-best-only',
action='store_true',
default=False,
help="Store only the best checkpoint."
)
group.add_argument(
'--checkpointing-skip',
type=float,
default=0.0,
help="Skip checkpointing in early epochs. For example, passing a"
"value of '0.2' and `n_epochs` of '500', skips checkpointing "
"for the first 0.2*500 = 100 epochs. A value of '1.0' "
"disables checkpointing at all."
)
# resuming ------------------------------------------------------------
subparsers = self.add_subparsers(
parser_class=ap.ArgumentParser, # important to avoid recursion
# required=False, # python >= 3.7 feature
dest='action'
)
subparser = subparsers.add_parser(
'resume',
help="Resume previous training run with auto argument and "
"checkpoint detection, see `path` argument for details."
)
subparser.add_argument(
'resume_path',
type=str,
default=None,
help="Path to previous training run, e.g., './runs_xy'. All args "
"are automatically replaced with the args given in "
"'./runs_xy/argsv.txt'. Furthermore, `--resume-ckpt-filepath` "
"is set to './runs_xy/checkpoints/ckpt_resume.pth'. For "
"safety, a backup of the given run folder is created."
)
self.add_argument(
'--resume-ckpt-filepath',
type=str,
default=None,
help="Path to checkpoint file to resume training from. "
)
self.add_argument(
'--resume-ckpt-interval',
type=int,
default=20,
help="Write resume checkpoint containing state dicts for model, "
"optimizer, and lr scheduler every X epochs. "
"This allows resuming a previous training."
)
# debugging -----------------------------------------------------------
self.add_argument(
'--debug',
action='store_true',
default=False,
help="Enables debug outputs (and exporting the model to ONNX)."
)
self.add_argument(
'--skip-sanity-check',
action='store_true',
default=False,
help="Disables the simple sanity check before training that "
"ensures that crucial parts (data, forward, metrics, ...) are "
"working as expected. The check is done by forwarding a "
"single batch of all dataloadera WITHOUT backpropagation."
)
self.add_argument(
'--overfit-n-batches',
type=int,
default=-1,
help="Forces to overfit on specified number of batches. Note that "
"for both training and validation samples are drawn from the "
"(first) validation loader without shuffling."
)
# Weights & Biases ----------------------------------------------------
self.add_argument(
'--wandb-mode',
type=str,
choices=('online', 'offline', 'disabled'), # see wandb
default='online',
help="Mode for Weights & Biases"
)
self.add_argument(
'--wandb-project',
type=str,
default='EMSAFormer',
help="Project name for Weights & Biases"
)
self.add_argument(
'--wandb-name',
type=str,
default=None,
help="[DEPRECATED] Use `--wandb-project` instead."
)
# other parameters ----------------------------------------------------
self.add_argument(
'--hostname',
type=str,
default=socket.gethostname(),
help="We are often interested in the hostname the code is running."
)
self.add_argument(
'--notes',
type=str,
default='',
help="Just to add some additional notes for this run."
)
def parse_args(self, args=None, namespace=None, verbose=True):
# parse args
pa = super().parse_args(args=args, namespace=namespace)
def _warn(text):
if verbose:
print(f"[Warning] {text}")
# check for resumed training ------------------------------------------
if 'resume' == pa.action:
is_resumed_training = True
resume_path = pa.resume_path
# load args from file
args_fp = os.path.join(pa.resume_path, 'argsv.txt')
print(f"Resuming training with args from: '{args_fp}'.")
with open(args_fp, 'r') as f:
args_str = f.read().strip()
args_run = shlex.split(args_str)[1:] # remove script name
# create a backup of the given run folder
backup_number = 1
while 0 != backup_number: # was not successful
backup_path = os.path.normpath(pa.resume_path) # trailing /
backup_path += f'_before_resume{backup_number}'
if os.path.isdir(backup_path):
print(f"Found already existing backup: '{backup_path}'")
backup_number += 1
continue
print(f"Creating backup at: '{backup_path}'.")
shutil.copytree(src=pa.resume_path, dst=backup_path)
break
# set resume checkpoint filepath
ckpt_fp = os.path.join(pa.resume_path, 'checkpoints',
'ckpt_resume.pth')
assert os.path.isfile(ckpt_fp)
args_run.extend(
shlex.split(f'--resume-ckpt-filepath {shlex.quote(ckpt_fp)}')
)
# parse args again
pa = super().parse_args(args=args_run, namespace=namespace)
else:
is_resumed_training = False
resume_path = None
# store additional information
pa.resume_path = resume_path
pa.is_resumed_training = is_resumed_training
# convert nargs+ arguments from lists to tuples -----------------------
for k, v in dict(vars(pa)).items():
if isinstance(v, list):
setattr(pa, k, tuple(v))
# perform some initial argument checks
# weights filepaths ---------------------------------------------------
if pa.encoder_backbone_pretrained_weights_filepath is not None:
# check if filepaths for rgb and depth are not set
if any((pa.rgb_encoder_backbone_pretrained_weights_filepath is not None,
pa.depth_encoder_backbone_pretrained_weights_filepath is not None,
pa.rgbd_encoder_backbone_pretrained_weights_filepath is not None)):
raise ValueError(
"Only use `encoder-backbone-pretrained-weights-filepath` "
"if you want to initialize all used encoder backbones with "
"the same weights! "
"`rgb-encoder-backbone-pretrained-weights-filepath` and "
"`depth-encoder-backbone-pretrained-weights-filepath` and "
"`rgbd-encoder-backbone-pretrained-weights-filepath` must"
"not be set."
)
pa.rgb_encoder_backbone_pretrained_weights_filepath = \
pa.encoder_backbone_pretrained_weights_filepath
pa.depth_encoder_backbone_pretrained_weights_filepath = \
pa.encoder_backbone_pretrained_weights_filepath
pa.rgbd_encoder_backbone_pretrained_weights_filepath = \
pa.encoder_backbone_pretrained_weights_filepath
# this argument is not needed anymore
del pa.encoder_backbone_pretrained_weights_filepath
# model ---------------------------------------------------------------
# handle deprecated normalization choice for whole model
if pa.normalization is not None:
pa.encoder_normalization = pa.normalization
pa.decoder_normalization = pa.normalization
_warn("Forced `encoder-normalization` and `decoder-normalization`, "
f"to be '{pa.normalization}' as `normalization` was given.")
# handle deprecated resnet block choice for encoder
if pa.rgb_encoder_backbone_block is not None:
pa.rgb_encoder_backbone_resnet_block = pa.rgb_encoder_backbone_block
_warn("Forced `rgb-encoder-backbone-resnet-block`, to be "
f"'{pa.rgb_encoder_backbone_block}' as "
"`rgb-encoder-backbone-block` was given.")
if pa.depth_encoder_backbone_block is not None:
pa.depth_encoder_backbone_resnet_block = pa.depth_encoder_backbone_block
_warn("Forced `depth-encoder-backbone-resnet-block`, to be "
f"'{pa.depth_encoder_backbone_block}' as "
"`depth-encoder-backbone-block` was given.")
# handle deprecated encoder-decoder-fusion
if pa.encoder_decoder_fusion is not None:
pa.semantic_encoder_decoder_fusion = pa.encoder_decoder_fusion
_warn("Forced `semantic-encoder-decoder-fusion`, to be "
f"'{pa.encoder_decoder_fusion}' as `encoder-decoder-fusion` "
"was given.")
pa.instance_encoder_decoder_fusion = pa.encoder_decoder_fusion
_warn("Forced `semantic-encoder-decoder-fusion`, to be "
f"'{pa.encoder_decoder_fusion}' as `encoder-decoder-fusion` "
"was given.")
pa.normal_encoder_decoder_fusion = pa.encoder_decoder_fusion
_warn("Forced `normal-encoder-decoder-fusion`, to be "
f"'{pa.encoder_decoder_fusion}' as `encoder-decoder-fusion` "
"was given.")
# handle deprecated upsampling-decoder
if pa.upsampling_decoder is not None:
pa.semantic_decoder_upsampling = pa.upsampling_decoder
_warn("Forced `semantic-decoder-upsampling`, to be "
f"'{pa.upsampling_decoder}' as `upsampling-decoder` "
"was given.")
pa.instance_decoder_upsampling = pa.upsampling_decoder
_warn("Forced `instance-decoder-upsampling`, to be "
f"'{pa.upsampling_decoder}' as `upsampling-decoder` "
"was given.")
pa.normal_decoder_upsampling = pa.upsampling_decoder
_warn("Forced `normal-decoder-upsampling`, to be "
f"'{pa.upsampling_decoder}' as `upsampling-decoder` "
"was given.")
# disable encoder fusion if only one input modality is used
if 1 == len(pa.input_modalities):
pa.encoder_fusion = 'none'
_warn("Set `encoder-fusion` to 'none' as there is only one input "
"modality.")
# multi-task parameters -----------------------------------------------
if 'orientation' in pa.tasks:
if 'instance' not in pa.tasks:
raise ValueError("Task 'instance' is missing in `tasks` for "
"performing task 'orientation'.")
if pa.enable_panoptic:
if 'semantic' not in pa.tasks:
raise ValueError("Task 'semantic' is missing in `tasks` for "
"performing panoptic segmentation.")
if 'instance' not in pa.tasks:
raise ValueError("Task 'instance' is missing in `tasks` for "
"performing panoptic segmentation.")
# training ------------------------------------------------------------
if pa.batch_size != 8:
# the provided learning rate refers to the default batch size of 8
# when using different batch sizes, we need to adjust the learning
# rate accordingly
pa.learning_rate = pa.learning_rate * pa.batch_size / 8
_warn(f"Adapted learning rate to '{pa.learning_rate}' as the "
f"provided batch size differs from default batch size of 8.")
if pa.tasks_weighting is None:
# default weighting (required for inference or timing)
pa.tasks_weighting = (1,)*len(pa.tasks)
if len(pa.tasks_weighting) != len(pa.tasks):
raise ValueError("Length for given task weighting does not match "
f"number of tasks: {len(pa.tasks_weighting)} vs. "
f"{len(pa.tasks)}.")
# common failures for ScanNet
if 'scannet' in pa.dataset and pa.validation_scannet_benchmark_mode:
if pa.scannet_semantic_n_classes not in (40, 549):
raise ValueError(
"`validation-scannet-benchmark-mode` requires "
"`scannet-semantic-n-classes` to be 40 or 549."
)
# common failures for COCO
if 'coco' in pa.dataset:
if 'depth' in pa.input_modalities:
raise ValueError("COCO dataset does not feature depth data.")
if 'normal' in pa.tasks:
raise ValueError("COCO dataset does not feature surface "
"normals.")
if 'scene' in pa.tasks:
raise ValueError("Scene classification is not supported for "
"COCO dataset.")
if any(d in pa.dataset for d in ('cityscapes', 'hypersim', 'scannet')):
# Depth data for hypersim is clipped to the limit of png16 (uint16)
# during dataset preparation. To account for that and to ignore
# these pixels '--raw-depth' should be forced. Note, the actual
# amount of clipped pixels is quite small.
pa.raw_depth = True
_warn(f"Forced `raw-depth` as `dataset` is '{pa.dataset}'.")
# check whether provided decoder type supports multiscale supervision
decoders_with_ms = ('emsanet',)
if pa.semantic_decoder not in decoders_with_ms:
if not pa.semantic_no_multiscale_supervision:
pa.semantic_no_multiscale_supervision = True
_warn("Forced `semantic-no-multiscale-supervision` as "
f"`semantic-decoder` is '{pa.semantic_decoder}'.")
if pa.instance_decoder not in decoders_with_ms:
if not pa.instance_no_multiscale_supervision:
pa.instance_no_multiscale_supervision = True
_warn("Forced `instance-no-multiscale-supervision` as "
f"`instance-decoder` is '{pa.instance_decoder}'.")
if pa.normal_decoder not in decoders_with_ms:
if not pa.normal_no_multiscale_supervision:
pa.normal_no_multiscale_supervision = True
_warn("Forced `normal-no-multiscale-supervision` as "
f"`normal-decoder` is '{pa.normal_decoder}'.")
# evaluation ----------------------------------------------------------
if pa.validation_full_resolution:
if not any(d in pa.dataset for d in ('cityscapes', 'hypersim')):
# height/width in cityscapes and hypersim are multiple 32
raise ValueError(
"Validation with full resolution inputs is only supported"
"for 'cityscapes' or 'hypersim'."
)
# ensure that validation input size is set (None -> input size)
if pa.validation_input_width is None:
pa.validation_input_width = pa.input_width
if pa.validation_input_height is None:
pa.validation_input_height = pa.input_height
if pa.validation_batch_size is None:
pa.validation_batch_size = 3*pa.batch_size
_warn(f"`validation-batch-size` not given, using default: "
f"{pa.validation_batch_size}.")
# handle some common misconfigurations
if 'valid' == pa.validation_split and pa.dataset in ('nyuv2',
'sunrgbd'):
pa.validation_split = 'test'
_warn(f"Dataset '{pa.dataset}' does not have a 'valid' split, "
"using 'test' split instead.")
if pa.validation_skip > pa.checkpointing_skip:
_warn(f"Setting `checkpointing_skip` to '{pa.validation_skip}' as "
f"`validation_skip` is larger '{pa.validation_skip}'.")
# set default for pa.visualization_output_path and check that it does
# not already exist
if pa.visualize_validation:
if pa.visualization_output_path is None:
weights_dirpath, weights_filename = os.path.split(
pa.weights_filepath
)
pa.visualization_output_path = os.path.join(
weights_dirpath,
f'visualization_{os.path.splitext(weights_filename)[0]}'
)
if os.path.exists(pa.visualization_output_path):
raise ValueError(
"The path provided by `visualization-output-path` "
f"'{pa.visualization_output_path}' already exists. Please "
"provide a different path."
)
# other parameters ----------------------------------------------------
if pa.debug:
_warn("`debug` is set, enabling debug outputs and ONNX export "
"(use EXPORT_ONNX_MODELS=true python ...)")
# wandb
if pa.wandb_name is not None:
_warn("Parameter `wandb-name` is deprecated, use `wandb-project` "
"instead.")
pa.wandb_project = pa.wandb_name
# print args
if verbose:
args_str = json.dumps(vars(pa), indent=4, sort_keys=True)
print(f"Running with args:\n {args_str}")
# return parsed (and subsequently modified) args
return pa
| 60,757 | 40.930987 | 93 | py |
null | EMSAFormer-main/emsaformer/data.py | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Soehnke Fischedick <soehnke-benedikt.fischedick@tu-ilmenau.de>
.. codeauthor:: Daniel Seichter <daniel.seichter@tu-ilmenau.de>
"""
from typing import Optional, Iterable, Tuple
from collections import OrderedDict
from copy import deepcopy
from dataclasses import asdict
from functools import partial
import re
import warnings
import numpy as np
from nicr_mt_scene_analysis.data import CollateIgnoredDict
from nicr_mt_scene_analysis.data import mt_collate
from nicr_mt_scene_analysis.data import RandomSamplerSubset
from torch.utils.data import DataLoader
from torch.utils.data import Subset
from nicr_scene_analysis_datasets.dataset_base import build_dataset_config
from nicr_scene_analysis_datasets.dataset_base import DatasetConfig
from nicr_scene_analysis_datasets.dataset_base import OrientationDict
from nicr_scene_analysis_datasets.dataset_base import SampleIdentifier
from nicr_scene_analysis_datasets.dataset_base import SemanticLabel
from nicr_scene_analysis_datasets.dataset_base import SemanticLabelList
from nicr_scene_analysis_datasets.pytorch import DatasetType
from nicr_scene_analysis_datasets.pytorch import KNOWN_DATASETS # noqa: F401
from nicr_scene_analysis_datasets.pytorch import KNOWN_CLASS_WEIGHTINGS # noqa: F401
from nicr_scene_analysis_datasets.pytorch import ConcatDataset
from nicr_scene_analysis_datasets.pytorch import get_dataset_class
from nicr_scene_analysis_datasets.pytorch import ScanNet
class ScanNetWithOrientations(ScanNet):
def __init__(self, **kwargs):
# ScanNet does provide annotations for instance orientations. However,
# we need support for orientations when combining ScanNet with other
# datasets, e.g., NYUv2 or SUNRGB-D. This class is a workaround to
# provide empty OrientationDicts, indicating that the instances should
# be ignored for fitting the orientation estimation task.
# To further use ScanNet as main dataset, this class provides a
# function to copy the 'use_orientations' information from another
# dataset.
sample_keys = kwargs['sample_keys']
# call super without orientations key
kwargs['sample_keys'] = tuple(
sk for sk in kwargs['sample_keys'] if sk != 'orientations'
)
super().__init__(**kwargs)
# restore sample keys and re-register loaders
self._sample_keys = sample_keys
self.auto_register_sample_key_loaders()
self._use_orientations_replaced = False
def _load_orientations(self, idx):
# we do not have instance orientations for ScanNet
return OrientationDict({})
@staticmethod
def get_available_sample_keys(split: str) -> Tuple[str]:
return ScanNet.SPLIT_SAMPLE_KEYS[split] + ('orientations',)
def __getitem__(self, idx):
if not self._use_orientations_replaced:
warnings.warn(
"You are using ScanNetWithOrientations without copying the "
"'use_orientations' information from another dataset."
)
return super().__getitem__(idx)
def copy_use_orientations_from(self, other_datataset):
# we need another dataset to copy the 'use_orientations' information
# from for each semantic class
other_semantic_label_list = other_datataset.config.semantic_label_list
# create new semantic label list
new_semantic_label_list = SemanticLabelList()
missing_classes = []
for sl in self.config.semantic_label_list:
if sl.class_name not in other_semantic_label_list:
# we do not have this class in other_dataset
new_semantic_label_list.add_label(sl)
missing_classes.append(sl.class_name)
continue
# create new semantic label with copied 'use_orientations'
idx = other_semantic_label_list.index(sl.class_name)
other_sl = other_semantic_label_list[idx]
sl_dict = asdict(sl)
sl_dict['use_orientations'] = other_sl.use_orientations
new_semantic_label_list.add_label(SemanticLabel(**sl_dict))
# print warning for missing classes
if len(missing_classes) > 0:
warnings.warn(
f"{self.__class__.__name__}: Could not copy 'use_orientations' "
f"information for classes: {missing_classes} from dataset "
f"{other_datataset.__class__.__name__}."
)
# replace current dataset config
self._config = build_dataset_config(
semantic_label_list=new_semantic_label_list,
scene_label_list=self.config.scene_label_list,
depth_stats=self.config.depth_stats
)
self._use_orientations_replaced = True
def parse_datasets(
datasets_str: str,
datasets_path_str: Optional[str] = None,
datasets_split_str: Optional[str] = None
):
misconfiguration_error = ValueError(
"Detected dataset misconfiguration, i.e., different number of "
f"datasets, paths or splits. Datasets: '{datasets_str}', paths: "
f"'{datasets_path_str}', splits: '{datasets_split_str}'."
)
# ':' indicates joined datasets
datasets = datasets_str.lower().split(':')
if datasets_path_str is not None:
dataset_paths = datasets_path_str.lower().split(':')
if len(dataset_paths) != len(datasets):
raise misconfiguration_error
if datasets_split_str is not None:
dataset_splits = datasets_split_str.lower().split(':')
if len(dataset_splits) != len(datasets):
raise misconfiguration_error
dataset_dict = OrderedDict() # we may use dict in future here (py > 3.6)
for i, dataset in enumerate(datasets):
# handle complex dataset format (e.g., 'sunrgbd[kv1,kv2]')
re_res = re.findall('([a-z0-9\\_\\-]+)\\[?([a-z0-9\\_\\-]*)\\]?',
dataset)
assert len(re_res) == 1 and len(re_res[0]) == 2
# parse results (dataset_name, cameras_str)
ds_name, ds_cameras = re_res[0]
# split cameras
ds_cameras = ds_cameras.split(',') if ds_cameras else None
assert ds_name not in dataset_dict, f"Got same '{ds_name}' twice."
dataset_dict[ds_name] = {
'path': None if datasets_path_str is None else dataset_paths[i],
'split': None if datasets_split_str is None else dataset_splits[i],
'cameras': ds_cameras
}
return dataset_dict
def get_dataset(args, split):
# define default kwargs dict for all datasets
dataset_depth_mode = 'raw' if args.raw_depth else 'refined'
default_dataset_kwargs = {
'cityscapes': {
'depth_mode': dataset_depth_mode,
'semantic_n_classes': 19,
'disparity_instead_of_depth': False
},
'coco': {},
'hypersim': {
'depth_mode': dataset_depth_mode,
'subsample': None,
'scene_use_indoor_domestic_labels': not args.use_original_scene_labels
},
'nyuv2': {
'depth_mode': dataset_depth_mode,
'semantic_n_classes': 40,
'scene_use_indoor_domestic_labels': not args.use_original_scene_labels
},
'scannet': {
'depth_mode': dataset_depth_mode,
'instance_semantic_mode': 'refined', # use refined annotations
'scene_use_indoor_domestic_labels': not args.use_original_scene_labels,
'semantic_n_classes': args.scannet_semantic_n_classes,
'semantic_use_nyuv2_colors': args.scannet_semantic_n_classes in (20, 40)
},
'scenenetrgbd': {
'depth_mode': dataset_depth_mode,
'semantic_n_classes': 13
},
'sunrgbd': {
'depth_mode': dataset_depth_mode,
# note, EMSANet paper uses False for 'depth_force_mm'
'depth_force_mm': not args.sunrgbd_depth_do_not_force_mm,
'semantic_use_nyuv2_colors': True,
'scene_use_indoor_domestic_labels': not args.use_original_scene_labels
},
}
# prepare names, paths, and splits
# ':' indicates joined datasets
dataset_split = split.lower()
n_datasets = len(parse_datasets(args.dataset))
if 'train' == dataset_split and n_datasets > 1:
# currently, we do not have an args for setting multiple train splits,
# thus, we mimic the correct format
dataset_split = ':'.join(['train'] * n_datasets)
# parse full dataset information
datasets = parse_datasets(
datasets_str=args.dataset,
datasets_path_str=args.dataset_path,
datasets_split_str=dataset_split
)
# check if SUNRGB-D is combined with other datasets
if 'sunrgbd' in datasets and len(datasets) > 1:
# we need to force depth in mm
warnings.warn(
"Forcing `depth_force_mm` for SUNRGB-D, as it is combined with "
f"other datasets. Datasets to load: {datasets}."
)
default_dataset_kwargs['sunrgbd']['depth_force_mm'] = True
# determine sample keys
sample_keys = list(args.input_modalities) + list(args.tasks)
# add identifier for easier debugging and plotting
sample_keys.append('identifier')
# fix sample key for orientation
if 'orientation' in sample_keys:
idx = sample_keys.index('orientation')
sample_keys[idx] = 'orientations'
# instance task requires semantic for determining foreground
if 'instance' in args.tasks and 'semantic' not in args.tasks:
sample_keys.append('semantic')
# rgbd (single encoder) modality still require rgb and depth
if 'rgbd' in sample_keys:
if 'rgb' not in sample_keys:
sample_keys.append('rgb')
if 'depth' not in sample_keys:
sample_keys.append('depth')
# remove rgbd key
sample_keys.remove('rgbd')
sample_keys = tuple(sample_keys)
# get dataset instances
dataset_instances = []
for i, (dataset_name, dataset) in enumerate(datasets.items()):
if 'none' == dataset['split']:
# indicates that this dataset should not be loaded (e.g., for
# training on ScanNet and SunRGB-D but validation only on SunRGB-D)
continue
# get dataset class
if 'scannet' == dataset_name and 'orientations' in sample_keys:
# we do not have orientation annotations for ScanNet, use
# ScanNetWithOrientations as a simple workaround to mimic empty
# OrientationDicts, however, this only makes sense if ScanNet is
# is combined with another dataset that provides orientation
warnings.warn(
"Detected ScanNet dataset in dataset configuration: "
f"{datasets} and training with orientation estimation. "
"Switching to 'ScanNetWithOrientations' to mimic orientations."
)
Dataset = ScanNetWithOrientations
else:
Dataset = get_dataset_class(dataset_name)
# get default kwargs for dataset
dataset_kwargs = deepcopy(default_dataset_kwargs[dataset_name])
# handle subsample for ScanNet
if 'scannet' == dataset_name:
if 'train' == dataset['split']:
dataset_kwargs['subsample'] = args.scannet_subsample
else:
dataset_kwargs['subsample'] = args.validation_scannet_subsample
# handle subsample for Hypersim
if 'hypersim' == dataset_name:
if 'train' == dataset['split']:
dataset_kwargs['subsample'] = args.hypersim_subsample
# check if all sample keys are available
sample_keys_avail = Dataset.get_available_sample_keys(dataset['split'])
sample_keys_missing = set(sample_keys) - set(sample_keys_avail)
if sample_keys_missing:
# this indicates a common problem, however, it also happens for
# inference ScanNet on test split
warnings.warn(
f"Sample keys '{sample_keys_missing}' are not available for "
f"dataset '{dataset_name}' and split '{dataset['split']}'. "
"Removing them from sample keys."
)
sample_keys = tuple(set(sample_keys) - sample_keys_missing)
# instantiate dataset object
dataset_instance = Dataset(
dataset_path=dataset['path'],
split=dataset['split'],
sample_keys=sample_keys,
use_cache=args.cache_dataset,
cache_disable_deepcopy=False, # False as we modify samples inplace
cameras=dataset['cameras'],
**dataset_kwargs
)
dataset_instances.append(dataset_instance)
if 1 == len(dataset_instances):
# single dataset
return dataset_instances[0]
if isinstance(dataset_instances[0], ScanNetWithOrientations):
# we switched from ScanNet to ScanNetWithOrientations as it is combined
# with other datasets, however, we do not have valid 'use_orientations'
# information for ScanNet, so we copy it from the next dataset
dataset_instances[0].copy_use_orientations_from(dataset_instances[1])
# concatenated datasets
return ConcatDataset(dataset_instances[0], *dataset_instances[1:])
class DataHelper:
def __init__(
self,
dataset_train: DatasetType,
batch_size_train: int,
datasets_valid: Iterable[DatasetType],
batch_size_valid: Optional[int] = None,
subset_train: float = 1.0,
subset_deterministic: bool = False,
n_workers: int = 8,
persistent_worker: bool = False,
) -> None:
# we use a modified collate function to handle elements of different
# spatial resolution and to ignore numpy arrays, dicts containing
# orientations (OrientationDict), and simple tuples storing shapes
collate_fn = partial(mt_collate,
type_blacklist=(np.ndarray,
CollateIgnoredDict,
OrientationDict,
SampleIdentifier))
# training split/set
sampler = RandomSamplerSubset(
data_source=dataset_train,
subset=subset_train,
deterministic=subset_deterministic
)
self._dataloader_train = DataLoader(
dataset_train,
batch_size=batch_size_train,
sampler=sampler,
drop_last=True,
collate_fn=collate_fn,
pin_memory=True,
num_workers=n_workers,
persistent_workers=persistent_worker
)
# validation split/set
self._dataloaders_valid = tuple(
DataLoader(
dataset_valid,
batch_size=batch_size_valid or 3*batch_size_train,
shuffle=False,
drop_last=False,
collate_fn=collate_fn,
pin_memory=True,
num_workers=n_workers,
persistent_workers=persistent_worker
)
for dataset_valid in datasets_valid
)
self._overfitting_enabled = False
# we use the (first) valid dataset when overfitting mode gets enabled,
# copy the dataset here to ensure that no sample was drawn before
self._overfitting_dataset = deepcopy(self.datasets_valid[0])
def enable_overfitting_mode(self, n_valid_batches: int) -> None:
self._overfitting_enabled = True
batch_size = self._dataloader_train.batch_size
n_samples = n_valid_batches * batch_size
dataset = self._overfitting_dataset
camera = dataset.cameras[0]
if len(dataset.cameras) > 1:
warnings.warn(
"Overfitting dataset (valid split) contains multiple cameras. "
f"Using first camera: '{camera}' to ensure samples of same "
"spatial resolution."
)
dataset.filter_camera(camera)
if n_samples > len(dataset):
raise ValueError(
f"Not enough data for overfitting. Tried to draw {n_samples} "
f"samples from {len(dataset)}. Reduce the number of batches or "
" the batch size for overfitting!"
)
self._overfitting_dataloader = DataLoader(
Subset(dataset, tuple(range(n_samples))),
batch_size=self._dataloader_train.batch_size,
shuffle=False,
drop_last=True,
collate_fn=self._dataloader_train.collate_fn,
pin_memory=True,
num_workers=self._dataloader_train.num_workers,
persistent_workers=self._dataloader_train.persistent_workers
)
print("Enable overfitting mode (same data for training and validation) "
f"with {n_valid_batches} batches (each with {batch_size} "
"samples) from validation split.")
@property
def dataset_config(self) -> DatasetConfig:
# use config of train split
return self._dataloader_train.dataset.config
@property
def dataset_train(self) -> DatasetType:
return self._dataloader_train.dataset
@property
def datasets_valid(self) -> Tuple[DatasetType]:
return tuple(loader.dataset for loader in self._dataloaders_valid)
def set_train_preprocessor(self, preprocessor):
self._dataloader_train.dataset.preprocessor = preprocessor
def set_valid_preprocessor(self, preprocessor):
for dataset in self.datasets_valid:
dataset.preprocessor = preprocessor
# apply preprocessor to overfitting dataset as well
self._overfitting_dataset.preprocessor = deepcopy(preprocessor)
@property
def train_dataloader(self) -> DataLoader:
if self._overfitting_enabled:
return self._overfitting_dataloader
return self._dataloader_train
@property
def valid_dataloaders(self) -> Tuple[DataLoader]:
if self._overfitting_enabled:
return tuple([self._overfitting_dataloader])
return self._dataloaders_valid
def get_datahelper(args) -> DataHelper:
# get datasets
dataset_train = get_dataset(args, 'train')
dataset_valid = get_dataset(args, args.validation_split)
# create list of datasets for validation (each with only one camera ->
# same resolution)
dataset_valid_list = []
for camera in dataset_valid.cameras:
dataset_camera = deepcopy(dataset_valid).filter_camera(camera)
dataset_valid_list.append(dataset_camera)
# combine everything in a data helper
return DataHelper(
dataset_train=dataset_train,
subset_train=args.subset_train,
subset_deterministic=args.subset_deterministic,
batch_size_train=args.batch_size,
datasets_valid=dataset_valid_list,
batch_size_valid=args.validation_batch_size,
n_workers=args.n_workers,
persistent_worker=args.cache_dataset, # only if caching is enabled
)
| 19,253 | 38.946058 | 87 | py |
null | EMSAFormer-main/emsaformer/decoder.py | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Soehnke Fischedick <soehnke-benedikt.fischedick@tu-ilmenau.de>
.. codeauthor:: Daniel Seichter <daniel.seichter@tu-ilmenau.de>
"""
from typing import Tuple, Union
from torch import nn
from nicr_mt_scene_analysis.model.activation import get_activation_class
from nicr_mt_scene_analysis.model.block import get_block_class
from nicr_mt_scene_analysis.model.decoder import SemanticDecoder
from nicr_mt_scene_analysis.model.decoder import SemanticMLPDecoder
from nicr_mt_scene_analysis.model.decoder import InstanceDecoder
from nicr_mt_scene_analysis.model.decoder import InstanceMLPDecoder
from nicr_mt_scene_analysis.model.decoder import NormalDecoder
from nicr_mt_scene_analysis.model.decoder import NormalMLPDecoder
from nicr_mt_scene_analysis.model.decoder import PanopticHelper
from nicr_mt_scene_analysis.model.decoder import SceneClassificationDecoder
from nicr_mt_scene_analysis.model.encoder_decoder_fusion import get_encoder_decoder_fusion_class
from nicr_mt_scene_analysis.model.normalization import get_normalization_class
from nicr_mt_scene_analysis.model.postprocessing import get_postprocessing_class
from nicr_mt_scene_analysis.model.upsampling import get_upsampling_class
KNOWN_DECODERS = (
'emsanet', # decoder used in EMSANet publication
'segformermlp', # MLP decoder used in EMSAFormer publication
)
def get_decoders(
args,
n_channels_in: int,
downsampling_in: int,
semantic_n_classes: int = 40,
instance_normalized_offset: bool = True,
instance_offset_distance_threshold: Union[None, int] = None,
instance_sigmoid_for_center: bool = True,
instance_tanh_for_offset: bool = True,
panoptic_semantic_classes_is_thing: Tuple[bool, ...] = (True, )*40,
panoptic_has_orientation: Tuple[bool, ...] = (True, )*40,
normal_n_channels_out: int = 3,
scene_n_channels_in: int = 512//2,
scene_n_classes: int = 10,
fusion_n_channels: Tuple[int, ...] = (512, 256, 128),
**kwargs
) -> nn.ModuleList:
# common parameters used in almost all encoders
common_kwargs = {
'n_channels_in': n_channels_in,
'downsampling_in': downsampling_in,
'fusion_n_channels': fusion_n_channels,
'fusion_downsamplings': args.encoder_decoder_skip_downsamplings[::-1],
'normalization': get_normalization_class(args.decoder_normalization),
'activation': get_activation_class(args.activation),
'prediction_upsampling': get_upsampling_class(args.upsampling_prediction)
}
decoders = {}
# semantic segmentation
if 'semantic' in args.tasks:
decoder_type = args.semantic_decoder.lower()
if 'emsanet' == decoder_type:
semantic_decoder = SemanticDecoder(
n_channels=args.semantic_decoder_n_channels,
downsamplings=args.semantic_decoder_downsamplings,
block=get_block_class(
args.semantic_decoder_block,
dropout_p=args.semantic_decoder_block_dropout_p
),
n_blocks=args.semantic_decoder_n_blocks,
fusion=get_encoder_decoder_fusion_class(args.semantic_encoder_decoder_fusion),
n_classes=semantic_n_classes,
postprocessing=get_postprocessing_class('semantic', **kwargs),
upsampling=get_upsampling_class(args.semantic_decoder_upsampling),
**common_kwargs
)
elif 'segformermlp' == decoder_type:
semantic_decoder = SemanticMLPDecoder(
n_channels=args.semantic_decoder_n_channels,
fusion=get_encoder_decoder_fusion_class(args.semantic_encoder_decoder_fusion),
n_classes=semantic_n_classes,
downsampling_in_heads=4,
dropout_p=args.semantic_decoder_dropout_p,
postprocessing=get_postprocessing_class('semantic', **kwargs),
upsampling=get_upsampling_class(args.semantic_decoder_upsampling),
**common_kwargs
)
decoders['semantic_decoder'] = semantic_decoder
# (class-agnostic) instance segmentation
if 'instance' in args.tasks:
instance_postprocessing = get_postprocessing_class(
'instance',
heatmap_threshold=args.instance_center_heatmap_threshold,
heatmap_nms_kernel_size=args.instance_center_heatmap_nms_kernel_size,
heatmap_apply_foreground_mask=args.instance_center_heatmap_apply_foreground_mask,
top_k_instances=args.instance_center_heatmap_top_k,
normalized_offset=instance_normalized_offset,
offset_distance_threshold=instance_offset_distance_threshold,
**kwargs
)
decoder_type = args.instance_decoder.lower()
if 'emsanet' == decoder_type:
instance_decoder = InstanceDecoder(
n_channels=args.instance_decoder_n_channels,
downsamplings=args.instance_decoder_downsamplings,
block=get_block_class(
args.instance_decoder_block,
dropout_p=args.instance_decoder_block_dropout_p
),
n_blocks=args.instance_decoder_n_blocks,
fusion=get_encoder_decoder_fusion_class(args.instance_encoder_decoder_fusion),
n_channels_per_task=32, # default panoptic deeplab
with_orientation=('orientation' in args.tasks),
sigmoid_for_center=instance_sigmoid_for_center,
tanh_for_offset=instance_tanh_for_offset,
postprocessing=instance_postprocessing,
upsampling=get_upsampling_class(args.instance_decoder_upsampling),
**common_kwargs
)
elif 'segformermlp' == decoder_type:
instance_decoder = InstanceMLPDecoder(
n_channels=args.instance_decoder_n_channels,
fusion=get_encoder_decoder_fusion_class(args.instance_encoder_decoder_fusion),
n_channels_per_task=32, # default panoptic deeplab
with_orientation=('orientation' in args.tasks),
sigmoid_for_center=instance_sigmoid_for_center,
tanh_for_offset=instance_tanh_for_offset,
downsampling_in_heads=4,
dropout_p=args.instance_decoder_dropout_p,
postprocessing=instance_postprocessing,
upsampling=get_upsampling_class(args.instance_decoder_upsampling),
**common_kwargs
)
decoders['instance_decoder'] = instance_decoder
# panoptic segmentation
if args.enable_panoptic:
panoptic_helper = PanopticHelper(
semantic_decoder=semantic_decoder,
instance_decoder=instance_decoder,
postprocessing=get_postprocessing_class(
'panoptic',
semantic_postprocessing=semantic_decoder.postprocessing,
instance_postprocessing=instance_decoder.postprocessing,
semantic_classes_is_thing=panoptic_semantic_classes_is_thing,
semantic_class_has_orientation=panoptic_has_orientation,
compute_scores=True,
**kwargs
)
)
# replace dict with decoders (can only contain semantic and instance
# up to now)
decoders = {'panoptic_helper': panoptic_helper}
# surface normal estimation
if 'normal' in args.tasks:
decoder_type = args.normal_decoder.lower()
if 'emsanet' == decoder_type:
normal_decoder = NormalDecoder(
n_channels=args.normal_decoder_n_channels,
downsamplings=args.normal_decoder_downsamplings,
block=get_block_class(
args.normal_decoder_block,
dropout_p=args.normal_decoder_block_dropout_p
),
n_blocks=args.normal_decoder_n_blocks,
fusion=get_encoder_decoder_fusion_class(args.normal_encoder_decoder_fusion),
n_channels_out=normal_n_channels_out,
postprocessing=get_postprocessing_class('normal', **kwargs),
upsampling=get_upsampling_class(args.normal_decoder_upsampling),
**common_kwargs
)
elif 'segformermlp' == decoder_type:
normal_decoder = NormalMLPDecoder(
n_channels=args.normal_decoder_n_channels,
fusion=get_encoder_decoder_fusion_class(args.normal_encoder_decoder_fusion),
n_channels_out=normal_n_channels_out,
downsampling_in_heads=4,
dropout_p=args.normal_decoder_dropout_p,
postprocessing=get_postprocessing_class('normal', **kwargs),
upsampling=get_upsampling_class(args.normal_decoder_upsampling),
**common_kwargs
)
decoders['normal_decoder'] = normal_decoder
# scene classification
if 'scene' in args.tasks:
common_kwargs['n_channels_in'] = scene_n_channels_in
scene_decoder = SceneClassificationDecoder(
n_classes=scene_n_classes,
postprocessing=get_postprocessing_class('scene', **kwargs),
**common_kwargs
)
decoders['scene_decoder'] = scene_decoder
return nn.ModuleDict(decoders)
| 9,414 | 45.608911 | 96 | py |
null | EMSAFormer-main/emsaformer/loss_weighting.py | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Daniel Seichter <daniel.seichter@tu-ilmenau.de>
"""
from nicr_mt_scene_analysis.loss_weighting import FixedLossWeighting
from nicr_mt_scene_analysis.loss_weighting import LossWeightingType
from nicr_mt_scene_analysis.task_helper.base import get_total_loss_key
def get_loss_weighting_module(args) -> LossWeightingType:
# we stick to fixed task weighting as none of the remaining was working well
# assign weight to each task (based on positional order)
tasks_weights = {}
assert len(args.tasks) == len(args.tasks_weighting)
tasks_weights = {
task: weight
for task, weight in zip(args.tasks, args.tasks_weighting)
}
# convert task weights to loss weights (keys must match the later losses)
# note, we consider only losses marked as total for weighting for now
loss_weights = {}
# handle orientation as it is part of the instance decoder
if 'orientation' in args.tasks:
loss_weights[get_total_loss_key('instance_orientation')] = \
tasks_weights.pop('orientation')
# handle instance keys
if 'instance' in args.tasks:
# overall weight for the instance task (not orientation!)
weight_instance = tasks_weights.pop('instance')
# additional weighting for both instance tasks (center and offset)
weight_center, weight_offset = args.instance_weighting
# to determine the final (flat) weights, values gets multiplied
loss_weights[get_total_loss_key('instance_center')] = \
weight_instance*weight_center
loss_weights[get_total_loss_key('instance_offset')] = \
weight_instance*weight_offset
# for the remaining tasks, simply append the total loss suffix
loss_weights.update({
get_total_loss_key(task): value
for task, value in tasks_weights.items()
})
return FixedLossWeighting(weights=loss_weights)
| 1,939 | 37.8 | 80 | py |
null | EMSAFormer-main/emsaformer/lr_scheduler.py | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Daniel Seichter <daniel.seichter@tu-ilmenau.de>
"""
from torch.optim.lr_scheduler import OneCycleLR
KNOWN_LR_SCHEDULERS = ('onecycle', )
LrSchedulerType = OneCycleLR
def get_lr_scheduler(args, optimizer) -> LrSchedulerType:
name = args.learning_rate_scheduler
n_epochs = args.n_epochs
name = name.lower()
if name not in KNOWN_LR_SCHEDULERS:
raise ValueError(f"Unknown learning rate scheduler: '{name}'")
if 'onecycle' == name:
lr_scheduler = OneCycleLR(
optimizer,
max_lr=[i['lr'] for i in optimizer.param_groups],
total_steps=n_epochs,
div_factor=25,
pct_start=0.1,
anneal_strategy='cos',
final_div_factor=1e4
)
return lr_scheduler
| 818 | 23.088235 | 70 | py |
null | EMSAFormer-main/emsaformer/model.py | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Soehnke Fischedick <soehnke-benedikt.fischedick@tu-ilmenau.de>
.. codeauthor:: Daniel Seichter <daniel.seichter@tu-ilmenau.de>
"""
from typing import Any, Dict
from collections import ChainMap
from nicr_mt_scene_analysis.model.block import get_block_class
from nicr_mt_scene_analysis.model.backbone import get_backbone
from nicr_mt_scene_analysis.model.context_module import get_context_module
from nicr_mt_scene_analysis.model.encoder import get_encoder
from nicr_mt_scene_analysis.model.encoder_fusion import get_encoder_fusion_class
from nicr_mt_scene_analysis.model.encoder_decoder_fusion import get_encoder_decoder_fusion_class
from nicr_mt_scene_analysis.model.upsampling import Upsampling
from nicr_mt_scene_analysis.model.initialization import he_initialization
from nicr_mt_scene_analysis.model.initialization import zero_residual_initialization
import torch
from .data import DatasetConfig
from .decoder import get_decoders
class EMSAFormer(torch.nn.Module):
def __init__(
self,
args,
dataset_config: DatasetConfig
) -> None:
super().__init__()
# store args and dataset parameters
self.args = args
self.dataset_config = dataset_config
# get some dataset properties
semantic_labels = dataset_config.semantic_label_list_without_void
semantic_n_classes = len(semantic_labels)
scene_n_classes = len(dataset_config.scene_label_list_without_void)
panoptic_semantic_classes_is_thing = semantic_labels.classes_is_thing
panoptic_use_orientation = tuple(semantic_labels.classes_use_orientations)
# create encoder(s)
if 'rgb' in args.input_modalities:
backbone_rgb = get_backbone(
name=args.rgb_encoder_backbone,
resnet_block=get_block_class(
args.rgb_encoder_backbone_resnet_block,
dropout_p=args.dropout_p
),
n_input_channels=3,
normalization=args.encoder_normalization,
activation=args.activation,
pretrained=not args.no_pretrained_backbone,
pretrained_filepath=args.rgb_encoder_backbone_pretrained_weights_filepath
)
else:
backbone_rgb = None
if 'depth' in args.input_modalities:
backbone_depth = get_backbone(
name=args.depth_encoder_backbone,
resnet_block=get_block_class(
args.depth_encoder_backbone_resnet_block,
dropout_p=args.dropout_p
),
n_input_channels=1,
normalization=args.encoder_normalization,
activation=args.activation,
pretrained=not args.no_pretrained_backbone,
pretrained_filepath=args.depth_encoder_backbone_pretrained_weights_filepath
)
else:
backbone_depth = None
if 'rgbd' in args.input_modalities:
backbone_rgbd = get_backbone(
name=args.rgbd_encoder_backbone,
resnet_block=get_block_class(
args.rgbd_encoder_backbone_resnet_block,
dropout_p=args.dropout_p
),
n_input_channels=3+1,
normalization=args.encoder_normalization,
activation=args.activation,
pretrained=not args.no_pretrained_backbone,
pretrained_filepath=args.rgbd_encoder_backbone_pretrained_weights_filepath
)
else:
backbone_rgbd = None
# fuse encoder(s) in a shared module
self.encoder = get_encoder(
backbone_rgb=backbone_rgb,
backbone_depth=backbone_depth,
backbone_rgbd=backbone_rgbd,
fusion=args.encoder_fusion,
normalization=args.encoder_normalization,
activation=args.activation,
skip_downsamplings=args.encoder_decoder_skip_downsamplings
)
enc_downsampling = self.encoder.downsampling
enc_n_channels_out = self.encoder.n_channels_out
enc_skips_n_channels = self.encoder.skips_n_channels
# create context module
self.context_module = get_context_module(
name=args.context_module,
n_channels_in=enc_n_channels_out,
n_channels_out=enc_n_channels_out,
input_size=(args.input_height // enc_downsampling,
args.input_width // enc_downsampling),
# context module only makes sense with batch normalization
normalization='bn',
activation=args.activation,
upsampling=args.upsampling_context_module
)
# create decoder(s)
if args.instance_offset_encoding == 'tanh':
instance_normalized_offset = True
instance_tanh_for_offset = True
elif args.instance_offset_encoding == 'relative':
instance_normalized_offset = True
instance_tanh_for_offset = False
elif args.instance_offset_encoding == 'deeplab':
instance_normalized_offset = False
instance_tanh_for_offset = False
else:
raise NotImplementedError
if args.instance_center_encoding == 'sigmoid':
instance_sigmoid_for_center = True
else:
instance_sigmoid_for_center = False
self.decoders = get_decoders(
args,
n_channels_in=enc_n_channels_out,
downsampling_in=enc_downsampling,
# semantic segmentation
semantic_n_classes=semantic_n_classes,
# instance segmentation
instance_normalized_offset=instance_normalized_offset,
instance_offset_distance_threshold=args.instance_offset_distance_threshold,
instance_sigmoid_for_center=instance_sigmoid_for_center,
instance_tanh_for_offset=instance_tanh_for_offset,
# surface normal estimation
normal_n_channels_out=3,
# scene classification
scene_n_channels_in=self.context_module.n_channels_reduction,
scene_n_classes=scene_n_classes,
# panoptic
panoptic_semantic_classes_is_thing=panoptic_semantic_classes_is_thing,
panoptic_has_orientation=panoptic_use_orientation,
# other shared args
fusion_n_channels=enc_skips_n_channels[::-1],
)
# initialization
debug_init = args.debug
# apply he initialization to selected parts of the network
for part in args.he_init:
# whitelisted initialization
cls = None
if 'encoder-fusion' == part:
cls = get_encoder_fusion_class(args.encoder_fusion)
elif 'encoder-decoder-fusion' == part:
cls = get_encoder_decoder_fusion_class(
args.encoder_decoder_fusion
)
if cls is not None:
for n, m in self.named_modules():
if isinstance(m, cls):
he_initialization(m, name_hint=n, debug=debug_init)
# (blacklisted) initialization
if 'context-module' == part:
he_initialization(self.context_module, debug=debug_init)
elif 'decoder' == part:
he_initialization(self.decoders,
blacklist=(Upsampling,),
debug=debug_init)
# init last norm in residuals to zero to enforce identity on start
if not args.no_zero_init_decoder_residuals:
zero_residual_initialization(self.decoders, debug=debug_init)
def forward(self, batch, do_postprocessing=False) -> Dict[str, Any]:
# determine input
enc_inputs = {}
if 'rgbd' in self.args.input_modalities:
rgb = batch['rgb']
depth = batch['depth']
rgbd = torch.cat([rgb, depth], dim=1)
enc_inputs['rgbd'] = rgbd
else:
if 'rgb' in self.args.input_modalities:
enc_inputs['rgb'] = batch['rgb']
if 'depth' in self.args.input_modalities:
enc_inputs['depth'] = batch['depth']
# forward (fused) encoder(s)
enc_outputs, enc_dec_skips = self.encoder(enc_inputs)
# forward context module
if len(self.args.input_modalities) == 2:
# design choice up to now, use output of rgb encoder as input for
# context module
con_input = enc_outputs['rgb']
else:
# use the output of the decoder with the same name as the input
assert len(enc_inputs) == 1 # only one input modality
con_input = enc_outputs[list(enc_inputs.keys())[0]]
con_outputs, con_context_outputs = self.context_module(con_input)
# forward decoder(s)
outputs = []
for decoder in self.decoders.values():
outputs.append(
decoder(
(con_outputs, con_context_outputs), enc_dec_skips, batch,
do_postprocessing=do_postprocessing
)
)
# simplify output if postprocessing was applied
if do_postprocessing:
outputs = dict(ChainMap(*outputs))
return outputs
| 9,460 | 39.431624 | 96 | py |
null | EMSAFormer-main/emsaformer/optimizer.py | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Daniel Seichter <daniel.seichter@tu-ilmenau.de>
"""
from typing import Union
from torch.optim import Adam
from torch.optim import AdamW
from torch.optim import RAdam
from torch.optim import SGD
KNOWN_OPTIMIZERS = ('adam', 'adamw', 'radam', 'sgd')
OptimizerType = Union[Adam, AdamW, RAdam, SGD]
def get_optimizer(args, parameters) -> OptimizerType:
name = args.optimizer
lr = args.learning_rate
weight_decay = args.weight_decay
momentum = args.momentum
name = name.lower()
if name not in KNOWN_OPTIMIZERS:
raise ValueError(f"Unknown optimizer: '{name}'")
if 'sgd' == args.optimizer:
optimizer = SGD(
parameters,
lr=lr,
weight_decay=weight_decay,
momentum=momentum,
nesterov=True
)
elif 'adam' == args.optimizer:
optimizer = Adam(
parameters,
lr=lr,
weight_decay=weight_decay,
betas=(0.9, 0.999)
)
elif 'adamw' == args.optimizer:
optimizer = AdamW(
parameters,
lr=lr,
weight_decay=weight_decay,
betas=(0.9, 0.999)
)
elif 'radam' == args.optimizer:
optimizer = RAdam(
parameters,
lr=lr,
betas=(0.9, 0.999),
weight_decay=weight_decay,
)
return optimizer
| 1,424 | 22.75 | 63 | py |
null | EMSAFormer-main/emsaformer/preprocessing.py | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Soehnke Fischedick <soehnke-benedikt.fischedick@tu-ilmenau.de>
.. codeauthor:: Daniel Seichter <daniel.seichter@tu-ilmenau.de>
"""
from typing import Optional, Tuple
from nicr_mt_scene_analysis.data.preprocessing import CloneEntries
from nicr_mt_scene_analysis.data.preprocessing import FullResCloner
from nicr_mt_scene_analysis.data.preprocessing import RandomCrop
from nicr_mt_scene_analysis.data.preprocessing import RandomHorizontalFlip
from nicr_mt_scene_analysis.data.preprocessing import InstanceClearStuffIDs
from nicr_mt_scene_analysis.data.preprocessing import InstanceTargetGenerator
from nicr_mt_scene_analysis.data.preprocessing import MultiscaleSupervisionGenerator
from nicr_mt_scene_analysis.data.preprocessing import NormalizeRGB
from nicr_mt_scene_analysis.data.preprocessing import NormalizeDepth
from nicr_mt_scene_analysis.data.preprocessing import OrientationTargetGenerator
from nicr_mt_scene_analysis.data.preprocessing import PanopticTargetGenerator
from nicr_mt_scene_analysis.data.preprocessing import RandomHSVJitter
from nicr_mt_scene_analysis.data.preprocessing import RandomResize
from nicr_mt_scene_analysis.data.preprocessing import Resize
from nicr_mt_scene_analysis.data.preprocessing import SemanticClassMapper
from nicr_mt_scene_analysis.data.preprocessing import ToTorchTensors
from nicr_scene_analysis_datasets import ScanNet
from torchvision.transforms import Compose
from .data import DatasetType
from .data import parse_datasets
def get_preprocessor(
args,
dataset: DatasetType,
phase: str,
multiscale_downscales: Optional[Tuple[int, ...]] = None,
keep_raw_inputs=False
) -> Compose:
assert phase in ('train', 'test')
dataset_config = dataset.config
sample_keys = dataset.sample_keys
if args.visualize_validation or keep_raw_inputs:
# clone raw inputs just to have them later for visualization
transforms = [CloneEntries()]
else:
transforms = []
# check if ScanNet benchmark mode is enabled -> handle remapping
if 'test' == phase and args.validation_scannet_benchmark_mode:
# enable ScanNet benchmark mode for validation ONLY, i.e., mapping
# ignored classes to void (40 -> 20, 549 -> 200) to ignore them in
# metrics
assert args.scannet_semantic_n_classes in (40, 549)
if 40 == args.scannet_semantic_n_classes:
mapping = ScanNet.SEMANTIC_CLASSES_40_MAPPING_TO_BENCHMARK
else:
mapping = ScanNet.SEMANTIC_CLASSES_549_MAPPING_TO_BENCHMARK200
classes_to_ignore = tuple(
c_data
for c_data, c_benchmark in mapping.items()
if c_benchmark == 0 and c_data != 0 # ignore void
)
assert len(classes_to_ignore) in (40-20, 549-200)
transforms.append(
SemanticClassMapper(
classes_to_map=classes_to_ignore,
new_label=0,
)
)
# check if SUNRGB-D is combined as main dataset with NYUv2, ScanNet or
# Hypersim -> ignore last three classes (other*)
datasets = tuple(parse_datasets(args.dataset).keys())
if 'sunrgbd' == datasets[0]:
if any(d in ('nyuv2', 'hypersim', 'scannet') for d in datasets[1:]):
# map last three classes to void (ignore these classes in training/
# validation)
transforms.append(
SemanticClassMapper(
classes_to_map=(38, 39, 40),
new_label=0,
)
)
# instance preprocessing
if 'instance' in sample_keys:
# depending on the dataset and the applied division into stuff and
# thing classes, the data may contain valid instance ids for instances
# of stuff classes, we force id=0 (= no instance) for all stuff classes
# including void to ensure that each stuff class is considered as a
# single segment later
# note that this preprocessor should be applied before resizing to
# ensure that this requirement also applies to the full resolution
# images that may be used for determining evaluation metrics
is_thing = dataset_config.semantic_label_list.classes_is_thing
transforms.append(
InstanceClearStuffIDs(
semantic_classes_is_thing=is_thing
)
)
if 'train' == phase:
# augmentation
transforms.extend([
RandomResize(
min_scale=args.aug_scale_min,
max_scale=args.aug_scale_max
),
RandomCrop(
crop_height=args.input_height,
crop_width=args.input_width
),
RandomHSVJitter(
hue_jitter=10/(360/2), # +-10 degree
saturation_jitter=20/255, # +- ~8%
value_jitter=50/255, # +- ~16%
),
RandomHorizontalFlip(p=0.5)
])
else:
# panoptic targets are only required for validation
# note: we need the panoptic targets in full resolution, so it is
# important to have this preprocessor before the resize
if 'semantic' in sample_keys and 'instance' in sample_keys:
is_thing = dataset_config.semantic_label_list.classes_is_thing
transforms.append(
PanopticTargetGenerator(
semantic_classes_is_thing=is_thing,
)
)
# create full-resolution copies of the relevant inputs (required for
# resizing in inference and metrics)
transforms.append(
FullResCloner(
keys_to_keep_fullres=(
'rgb', 'depth', # resizing in inference
'semantic', 'normal', 'instance', 'panoptic'), # eval!
ignore_missing_keys=True # not all keys may be available
)
)
if not args.validation_full_resolution:
# resize input images to network input resolution
transforms.append(
Resize(
height=args.validation_input_height,
width=args.validation_input_width,
)
)
# handle mulitscale supervision
if multiscale_downscales is not None:
multiscale_keys = ['identifier']
if 'semantic' in sample_keys:
if not args.semantic_no_multiscale_supervision:
multiscale_keys.append('semantic')
if 'instance' in sample_keys:
if not args.instance_no_multiscale_supervision:
multiscale_keys.append('semantic') # for thing vs. stuff
multiscale_keys.append('instance')
if 'orientations' in sample_keys:
multiscale_keys.append('orientations')
if 'normal' in sample_keys:
if not args.normal_no_multiscale_supervision:
multiscale_keys.append('normal')
if multiscale_keys:
transforms.append(
MultiscaleSupervisionGenerator(
downscales=multiscale_downscales,
keys=tuple(multiscale_keys)
)
)
else:
multiscale_downscales = ()
# instance task
if 'instance' in sample_keys:
sigma = args.instance_center_sigma
sigma_for_add_downscales = {
downscale: (4*sigma) // downscale
for downscale in multiscale_downscales
}
is_thing = dataset_config.semantic_label_list.classes_is_thing
if args.instance_offset_encoding in ('relative', 'tanh'):
normalized_offset = True
else:
normalized_offset = False
transforms.append(
InstanceTargetGenerator(
sigma=sigma,
semantic_classes_is_thing=is_thing,
sigma_for_additional_downscales=sigma_for_add_downscales,
normalized_offset=normalized_offset
)
)
if 'orientations' in sample_keys:
estimate_orientation = \
dataset_config.semantic_label_list.classes_use_orientations
transforms.append(
OrientationTargetGenerator(
semantic_classes_estimate_orientation=estimate_orientation
)
)
# default preprocessing
if 'rgb' in args.input_modalities or 'rgbd' in args.input_modalities:
transforms.append(NormalizeRGB())
if 'depth' in args.input_modalities or 'rgbd' in args.input_modalities:
transforms.append(
NormalizeDepth(
depth_mean=dataset_config.depth_stats.mean,
depth_std=dataset_config.depth_stats.std,
raw_depth=args.raw_depth
)
)
transforms.append(ToTorchTensors())
# stack all transforms into a single preprocessor object
preprocessor = Compose(transforms=transforms)
if args.debug:
print(f"Preprocessor for for phase: '{phase}':\n{preprocessor}")
return preprocessor
| 9,116 | 37.795745 | 84 | py |
null | EMSAFormer-main/emsaformer/task_helper.py | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Soehnke Fischedick <soehnke-benedikt.fischedick@tu-ilmenau.de>
.. codeauthor:: Daniel Seichter <daniel.seichter@tu-ilmenau.de>
"""
from typing import Tuple
from nicr_mt_scene_analysis.task_helper import NormalTaskHelper
from nicr_mt_scene_analysis.task_helper import SemanticTaskHelper
from nicr_mt_scene_analysis.task_helper import SceneTaskHelper
from nicr_mt_scene_analysis.task_helper import InstanceTaskHelper
from nicr_mt_scene_analysis.task_helper import PanopticTaskHelper
from nicr_mt_scene_analysis.task_helper import TaskHelperType
from .data import DatasetType
def get_task_helpers(
args,
dataset: DatasetType
) -> Tuple[TaskHelperType]:
task_helper = []
if 'semantic' in args.tasks:
class_weights = dataset.semantic_compute_class_weights(
weight_mode=args.semantic_class_weighting,
c=args.semantic_class_weighting_logarithmic_c,
n_threads=4,
debug=False
)
task_helper.append(
SemanticTaskHelper(
n_classes=dataset.semantic_n_classes_without_void,
class_weights=class_weights,
label_smoothing=args.semantic_loss_label_smoothing,
disable_multiscale_supervision=args.semantic_no_multiscale_supervision,
examples_cmap=dataset.semantic_class_colors_without_void
)
)
if 'scene' in args.tasks:
task_helper.append(
SceneTaskHelper(
n_classes=dataset.scene_n_classes_without_void,
class_weights=None,
label_smoothing=args.scene_loss_label_smoothing
)
)
if 'normal' in args.tasks:
task_helper.append(
NormalTaskHelper(
loss_name=args.normal_loss,
disable_multiscale_supervision=args.normal_no_multiscale_supervision
)
)
if 'instance' in args.tasks or 'orientation' in args.tasks:
task_helper.append(
InstanceTaskHelper(
semantic_n_classes=dataset.semantic_n_classes,
semantic_classes_is_thing=dataset.config.semantic_label_list.classes_is_thing,
loss_name_instance_center=args.instance_center_loss,
disable_multiscale_supervision=args.instance_no_multiscale_supervision
)
)
if args.enable_panoptic:
task_helper.append(
PanopticTaskHelper(
semantic_n_classes=dataset.semantic_n_classes,
semantic_classes_is_thing=dataset.config.semantic_label_list.classes_is_thing,
semantic_label_list=dataset.config.semantic_label_list,
)
)
return tuple(task_helper)
| 2,775 | 37.027397 | 94 | py |
null | EMSAFormer-main/emsaformer/visualization.py | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Soehnke Fischedick <soehnke-benedikt.fischedick@tu-ilmenau.de>
.. codeauthor:: Daniel Seichter <daniel.seichter@tu-ilmenau.de>
"""
from typing import Any, Dict, Optional, Sequence, Union
import os
import warnings
import cv2
import numpy as np
import PIL
from nicr_mt_scene_analysis.data.preprocessing.clone import DEFAULT_CLONE_KEY
from nicr_mt_scene_analysis.data.preprocessing.resize import get_fullres_key
from nicr_mt_scene_analysis.types import BatchType
from nicr_mt_scene_analysis.visualization import InstanceColorGenerator
from nicr_mt_scene_analysis.visualization import PanopticColorGenerator
from nicr_mt_scene_analysis.visualization import visualize_heatmap
from nicr_mt_scene_analysis.visualization import visualize_semantic_pil
from nicr_mt_scene_analysis.visualization import visualize_instance_pil
from nicr_mt_scene_analysis.visualization import visualize_instance
from nicr_mt_scene_analysis.visualization import visualize_orientation
from nicr_mt_scene_analysis.visualization import visualize_instance_orientations
from nicr_mt_scene_analysis.visualization import visualize_instance_center
from nicr_mt_scene_analysis.visualization import visualize_instance_offset
from nicr_mt_scene_analysis.visualization import visualize_panoptic
from nicr_mt_scene_analysis.visualization import visualize_depth
from nicr_scene_analysis_datasets.dataset_base import DatasetConfig
from nicr_scene_analysis_datasets.utils.img import get_visual_distinct_colormap
KWARGS_INSTANCE_ORIENTATION = {
'thickness': 3,
'font_size': 45,
'bg_color': 0,
'bg_color_font': 'black'
}
KWARGS_INSTANCE_ORIENTATION_WHITEBG = {
'thickness': 3,
'font_size': 45,
'bg_color': 255,
'bg_color_font': 'white'
}
CV_WRITE_FLAGS = (cv2.IMWRITE_PNG_COMPRESSION, 9)
_shared_color_generators = {
'instance': None,
'panoptic': None,
}
def setup_shared_color_generators(dataset_config: DatasetConfig) -> None:
# instance color generator
instance_shg = InstanceColorGenerator(
cmap_without_void=get_visual_distinct_colormap(with_void=False)
)
_shared_color_generators['instance'] = instance_shg
# panoptic color generator
sem_labels = dataset_config.semantic_label_list
panoptic_shg = PanopticColorGenerator(
classes_colors=sem_labels.colors,
classes_is_thing=sem_labels.classes_is_thing,
max_instances=(1 << 16), # we use 16 bit for shifting
void_label=0
)
_shared_color_generators['panoptic'] = panoptic_shg
def visualize(
output_path: str,
batch: BatchType,
predictions: Dict[str, Any],
dataset_config: DatasetConfig,
use_shared_color_generators: bool = True,
) -> None:
# color generators
if use_shared_color_generators:
instance_color_generator = _shared_color_generators['instance']
panoptic_color_generator = _shared_color_generators['panoptic']
if instance_color_generator is None or panoptic_color_generator is None:
warnings.warn(
"Shared color generators are not ready. Please call "
"'setup_shared_color_generators' first."
)
else:
instance_color_generator = None
panoptic_color_generator = None
# visualize ground truth
gt_path = os.path.join(output_path, 'gt')
batch_visualization = visualize_batches(
batch=batch,
dataset_config=dataset_config,
instance_color_generator=instance_color_generator,
panoptic_color_generator=panoptic_color_generator
)
save_visualization_result_dict(
visualization_dict=batch_visualization,
output_path=gt_path
)
# visualize ground truth for side outputs (downscaled images)
additional_keys = ['_down_8', '_down_16', '_down_32']
for key in additional_keys:
if key not in batch:
# we do not have side outputs
continue
# get batch dict for side output and copy identifier
so_batch = batch[key]
so_batch['identifier'] = so_batch['identifier']
# visualize side output
so_batch_visualization = visualize_batches(
batch=so_batch,
dataset_config=dataset_config,
instance_color_generator=instance_color_generator,
panoptic_color_generator=panoptic_color_generator
)
save_visualization_result_dict(
visualization_dict=so_batch_visualization,
output_path=os.path.join(gt_path, key)
)
# visualize predictions
prediction_visualization = visualize_predictions(
predictions=predictions,
batch=batch,
dataset_config=dataset_config,
instance_color_generator=instance_color_generator,
panoptic_color_generator=panoptic_color_generator
)
save_visualization_result_dict(
visualization_dict=prediction_visualization,
output_path=os.path.join(output_path, 'pred')
)
def save_visualization_result_dict(
visualization_dict: Dict[str, Any],
output_path: str
) -> None:
os.makedirs(output_path, exist_ok=True)
for key, value in visualization_dict.items():
if key == 'identifier':
continue
for i, v in enumerate(value):
out_filepath = os.path.join(
output_path,
key,
*visualization_dict['identifier'][i]
)
os.makedirs(os.path.dirname(out_filepath), exist_ok=True)
if isinstance(v, PIL.Image.Image):
# value is a PIL image
v.save(out_filepath + '.png')
elif isinstance(v, np.ndarray):
# value is an image given as numpy array, write with OpenCV
if v.ndim == 3:
v = cv2.cvtColor(v, cv2.COLOR_RGB2BGR, CV_WRITE_FLAGS)
cv2.imwrite(out_filepath + '.png', v)
else:
# scene label
with open(out_filepath + '.txt', 'w') as f:
f.write(str(v))
def blend_images(
img1: np.ndarray,
img2: np.ndarray,
alpha: float = 0.2
) -> np.ndarray:
# ensure that img is a numpy object
img1 = np.asanyarray(img1)
img2 = np.asanyarray(img2)
assert img1.dtype == img2.dtype
assert img1.ndim == img2.ndim
# ensure that img is a numpy object
img1 = np.asanyarray(img1)
img2 = np.asanyarray(img2)
# alpha composite images
if img2.ndim == 3:
mask = np.any(img2 > 0, axis=2)
else:
mask = img2 > 0
result = img1.copy()
result[mask, ...] = (
(1-alpha)*img1[mask, ...] + alpha*img2[mask, ...]
).astype(img1.dtype)
return result
def _apply_mask(
img: np.ndarray,
mask: np.ndarray,
value: Union[np.ndarray, Sequence]
) -> None:
# apply mask inplace
img[mask, ...] = value
return img
def _copy_and_apply_mask(
img: np.ndarray,
mask: np.ndarray,
value: Union[np.ndarray, Sequence]
) -> np.ndarray:
# copy img and apply mask
return _apply_mask(img.copy(), mask, value)
def visualize_batches(
batch: BatchType,
dataset_config: DatasetConfig,
instance_color_generator: Optional[InstanceColorGenerator] = None,
panoptic_color_generator: Optional[PanopticColorGenerator] = None,
) -> Dict[str, Any]:
# note, we use PIL whenever an image with palette is useful
# semantic colors
colors = dataset_config.semantic_label_list.colors_array # with void
# create dict storing the result
result_dict = {}
result_dict['identifier'] = batch['identifier']
# dump inputs and targets without preprocessing ----------------------------
if DEFAULT_CLONE_KEY in batch:
batch_np = batch[DEFAULT_CLONE_KEY]
# inputs
if 'rgb' in batch_np:
result_dict[f'{DEFAULT_CLONE_KEY}_rgb'] = list(batch_np['rgb'])
if 'depth' in batch_np:
result_dict[f'{DEFAULT_CLONE_KEY}_depth'] = [
visualize_depth(img) for img in batch_np['depth']
]
# semantic
if 'semantic' in batch_np:
result_dict[f'{DEFAULT_CLONE_KEY}_semantic'] = [
visualize_semantic_pil(img, colors=colors)
for img in batch_np['semantic']
]
# instance
if 'instance' in batch_np:
result_dict[f'{DEFAULT_CLONE_KEY}_instance'] = [
visualize_instance_pil(
instance_img=img,
shared_color_generator=instance_color_generator
)
for img in batch_np['instance']
]
# orientation
if 'orientations' in batch_np:
result_dict[f'{DEFAULT_CLONE_KEY}_orientations'] = [
visualize_instance_orientations(
*data,
shared_color_generator=instance_color_generator,
**KWARGS_INSTANCE_ORIENTATION
) for data in zip(batch_np['instance'],
batch_np['orientations'])
]
result_dict[f'{DEFAULT_CLONE_KEY}_orientations_white_bg'] = [
visualize_instance_orientations(
*data,
shared_color_generator=instance_color_generator,
**KWARGS_INSTANCE_ORIENTATION_WHITEBG
) for data in zip(batch_np['instance'],
batch_np['orientations'])
]
# scene classification
if 'scene' in batch_np:
result_dict[f'{DEFAULT_CLONE_KEY}_scene'] = [
dataset_config.scene_label_list[s].class_name
for s in batch_np['scene']
]
else:
# we do not have the batch data without preprocessing
batch_np = {}
# semantic -----------------------------------------------------------------
if 'semantic' in batch:
# semantic may have changed due to mapping some classes to void
result_dict[f'semantic'] = [
visualize_semantic_pil(img, colors=colors)
for img in batch['semantic'].cpu().numpy()
]
# instance -----------------------------------------------------------------
if 'instance' in batch:
# instance may have changed due to selecting thing classes
result_dict['instance'] = [
visualize_instance_pil(
instance_img=img,
shared_color_generator=instance_color_generator
)
for img in batch['instance'].cpu().numpy()
]
result_dict['instance_white_bg'] = [
# use foreground mask to change background color to white
_apply_mask(
img=visualize_instance(
instance_img=img,
shared_color_generator=instance_color_generator
),
mask=np.logical_not(fg),
value=(255, 255, 255)
)
for img, fg in zip(batch['instance'].cpu().numpy(),
batch['instance_foreground'].cpu().numpy())
]
result_dict['instance_center'] = [
visualize_instance_center(center_img=img)
for img in batch['instance_center'].cpu().numpy()
]
result_dict['instance_offset'] = [
visualize_instance_offset(
offset_img=img.transpose(1, 2, 0),
foreground_mask=fg
)
for img, fg in zip(batch['instance_offset'].cpu().numpy(),
batch['instance_foreground'].cpu().numpy())
]
# orientation --------------------------------------------------------------
if 'orientation' in batch:
# instance orientation may have changed due to selecting thing classes
# 2d dense orientation with black/white background
result_dict['orientation'] = [
# use foreground mask to change background color to black
_apply_mask(
img=visualize_orientation(o.transpose(1, 2, 0)),
mask=np.logical_not(fg),
value=(0, 0, 0)
)
for o, fg in zip(batch['orientation'].cpu().numpy(),
batch['orientation_foreground'].cpu().numpy())
]
result_dict['orientation_white_bg'] = [
# change background color to white
_copy_and_apply_mask(
img=o_img,
mask=np.logical_not(fg),
value=(255, 255, 255)
)
for o_img, fg in zip(result_dict['orientation'],
batch['orientation_foreground'].cpu().numpy())
]
# orientation with outline
result_dict[f'orientations'] = [
visualize_instance_orientations(
*data,
shared_color_generator=instance_color_generator,
draw_outline=True,
**KWARGS_INSTANCE_ORIENTATION
)
for data in zip(batch['instance'].cpu().numpy(),
batch['orientations_present'])
]
result_dict[f'orientations_white_bg'] = [
visualize_instance_orientations(
*data,
shared_color_generator=instance_color_generator,
draw_outline=True,
**KWARGS_INSTANCE_ORIENTATION_WHITEBG
)
for data in zip(batch['instance'].cpu().numpy(),
batch['orientations_present'])
]
# panoptic -----------------------------------------------------------------
if 'panoptic' in batch:
sem_labels = dataset_config.semantic_label_list
result_dict['panoptic'] = [
visualize_panoptic(
panoptic_img=img,
semantic_classes_colors=sem_labels.colors,
semantic_classes_is_thing=sem_labels.classes_is_thing,
max_instances=1 << 16,
void_label=0,
shared_color_generator=panoptic_color_generator
)
for img in batch['panoptic'].cpu().numpy()
]
# panoptic + orientation ---------------------------------------------------
# panoptic image overlayed with orientation as text
if 'panoptic' in batch and 'orientations_present' in batch:
result_dict['panoptic_orientations'] = [
_copy_and_apply_mask(
img=panoptic_img,
mask=visualize_instance_orientations(
instance_img=instance,
orientations=orientations,
shared_color_generator=instance_color_generator,
draw_outline=False,
thickness=3,
font_size=45,
bg_color=0,
bg_color_font='black'
).any(axis=-1), # text mask
value=(255, 255, 255) # white text color
)
for panoptic_img, instance, orientations in zip(
result_dict['panoptic'],
batch['instance'].cpu().numpy(),
batch['orientations_present']
)
]
return result_dict
def visualize_predictions(
predictions: Dict[str, Any],
batch: BatchType,
dataset_config: DatasetConfig,
instance_color_generator: Optional[InstanceColorGenerator] = None,
panoptic_color_generator: Optional[PanopticColorGenerator] = None,
) -> Dict[str, Any]:
# note, we use PIL whenever an image with palette is useful
# semantic colors and class indices with orientation
colors = dataset_config.semantic_label_list.colors_array
use_orientation_class_indices = np.where(
dataset_config.semantic_label_list.classes_use_orientations
)[0]
# create dict for results
result_dict = {}
result_dict['identifier'] = batch['identifier']
# semantic -----------------------------------------------------------------
# -> predicted class
key = 'semantic_segmentation_idx'
if key in predictions:
for k in (key, get_fullres_key(key)): # plain output and fullres
result_dict[k] = [
visualize_semantic_pil(img, colors=colors[1:])
for img in predictions[k].cpu().numpy()
]
# -> predicted class score
key = 'semantic_segmentation_score'
if key in predictions:
for k in (key, get_fullres_key(key)): # plain output and fullres
result_dict[k] = [
visualize_heatmap(img, cmap='jet')
for img in predictions[k].cpu().numpy()
]
# instance -----------------------------------------------------------------
# -> instance segmentation using gt foreground mask (dataset eval only)
key = 'instance_segmentation_gt_foreground'
if key in predictions:
for k in (key, get_fullres_key(key)): # plain output and fullres
result_dict[k] = [
visualize_instance_pil(
instance_img=img,
shared_color_generator=instance_color_generator
)
for img in predictions[k].cpu().numpy()
]
# raw predictions of instance head (there are no fullres versions)
# -> instance centers
key = 'instance_centers'
if key in predictions:
result_dict[key] = [
visualize_instance_center(center_img=img[0])
for img in predictions[key].cpu().numpy()
]
# -> instance offsets (and masked versions)
key = 'instance_offsets'
if key in predictions:
# plain network output without any mask
result_dict[key] = [
visualize_instance_offset(img.transpose(1, 2, 0))
for img in predictions[key].cpu().numpy()
]
# masked with gt foreground
key_fg = 'instance_foreground'
if key_fg in batch:
result_dict[key+'_gt_foreground'] = [
_copy_and_apply_mask(
img=img_offset,
mask=np.logical_not(fg),
value=(255, 255, 255)
) for img_offset, fg in zip(result_dict[key],
batch[key_fg].cpu().numpy())
]
# masked with predicted foreground for panoptic segmentation
key_fg = 'panoptic_foreground_mask'
if key_fg in predictions:
result_dict[key+'_pred_foreground'] = [
_copy_and_apply_mask(
img=img_offset,
mask=np.logical_not(fg),
value=(255, 255, 255)
) for img_offset, fg in zip(result_dict[key],
predictions[key_fg].cpu().numpy())
]
# (instance) orientation ---------------------------------------------------
# -> 2d dense raw orientation with black/white background (there is no
# fullres version)
key = 'instance_orientation'
if key in predictions:
# plain network output without any mask
result_dict[key] = [
visualize_orientation(img.transpose(1, 2, 0))
for img in predictions[key].cpu().numpy()
]
# masked with gt foreground
key_fg = 'orientation_foreground'
if key_fg in batch:
result_dict[key+'_gt_foreground'] = [
_copy_and_apply_mask(
img=img_o,
mask=np.logical_not(fg),
value=(0, 0, 0)
)
for img_o, fg in zip(
result_dict[key],
batch['orientation_foreground'].cpu().numpy()
)
]
# masked with gt foreground and white bg
key_fg = 'orientation_foreground'
if key_fg in batch:
result_dict[key+'_gt_foreground_white_bg'] = [
_copy_and_apply_mask(
img=img_o,
mask=np.logical_not(fg),
value=(255, 255, 255)
)
for img_o, fg in zip(
result_dict[key],
batch['orientation_foreground'].cpu().numpy()
)
]
# masked with predicted foreground for panoptic
key_semantic = 'panoptic_segmentation_deeplab_semantic_idx'
if key_semantic in predictions:
fg_masks = [
np.isin(sem, use_orientation_class_indices) # both with void
for sem in predictions[key_semantic].cpu().numpy()
]
# black bg
result_dict[key+'_pred_foreground'] = [
_copy_and_apply_mask(
img=img_o,
mask=np.logical_not(fg),
value=(0, 0, 0)
)
for img_o, fg in zip(result_dict[key], fg_masks)
]
# white bg
result_dict[key+'_pred_foreground_white_bg'] = [
_copy_and_apply_mask(
img=img_o,
mask=np.logical_not(fg),
value=(255, 255, 255)
)
for img_o, fg in zip(result_dict[key], fg_masks)
]
# orientations with outline
# -> predicted orientations with gt instances and gt foreground (dataset
# eval only, there is no fullres version)
key = 'orientations_gt_instance_gt_orientation_foreground'
if key in predictions:
result_dict[key] = [
visualize_instance_orientations(
*data,
shared_color_generator=instance_color_generator,
draw_outline=True,
**KWARGS_INSTANCE_ORIENTATION
)
for data in zip(
batch['instance'].cpu().numpy(),
predictions['orientations_gt_instance_gt_orientation_foreground']
)
]
result_dict[key+'_white_bg'] = [
visualize_instance_orientations(
*data,
shared_color_generator=instance_color_generator,
draw_outline=True,
**KWARGS_INSTANCE_ORIENTATION_WHITEBG
)
for data in zip(
batch['instance'].cpu().numpy(),
predictions['orientations_gt_instance_gt_orientation_foreground']
)
]
# -> predicted orientations with panoptic instances
key = 'orientations_panoptic_segmentation_deeplab_instance'
if key in predictions:
key_instance = 'panoptic_segmentation_deeplab_instance_idx'
key_semantic = 'panoptic_segmentation_deeplab_semantic_idx'
# visualize for both plain output and fullres
for k_r, k_i, k_s in zip(
(key, get_fullres_key(key)),
(key_instance, get_fullres_key(key_instance)),
(key_semantic, get_fullres_key(key_semantic))
):
# get foreground masks and instance images
fg_masks = np.isin(predictions[k_s].cpu().numpy(),
use_orientation_class_indices) # both with void
instance_imgs = predictions[k_i].cpu().numpy()
instance_imgs[np.logical_not(fg_masks)] = 0
# black bg
result_dict[k_r] = [
visualize_instance_orientations(
*data,
shared_color_generator=instance_color_generator,
draw_outline=True,
**KWARGS_INSTANCE_ORIENTATION
)
for data in zip(instance_imgs, predictions[key])
]
# white bg
result_dict[k_r+'_white_bg'] = [
visualize_instance_orientations(
*data,
shared_color_generator=instance_color_generator,
draw_outline=True,
**KWARGS_INSTANCE_ORIENTATION_WHITEBG
)
for data in zip(instance_imgs, predictions[key])
]
# panoptic segmentation ----------------------------------------------------
sem_labels = dataset_config.semantic_label_list
# -> predicted label
key = 'panoptic_segmentation_deeplab'
if key in predictions:
for k in (key, get_fullres_key(key)): # plain output and fullres
result_dict[k] = [
visualize_panoptic(
panoptic_img=img,
semantic_classes_colors=sem_labels.colors,
semantic_classes_is_thing=sem_labels.classes_is_thing,
max_instances=(1 << 16),
void_label=0,
shared_color_generator=panoptic_color_generator
)
for img in predictions[k].cpu().numpy()
]
# -> predicted score
key = 'panoptic_segmentation_deeplab_panoptic_score'
if key in predictions:
for k in (key, get_fullres_key(key)): # plain output and fullres
result_dict[k] = [
visualize_heatmap(img, cmap='jet')
for img in predictions[k].cpu().numpy()
]
# -> predicted semantic label
key = 'panoptic_segmentation_deeplab_semantic_idx'
if key in predictions:
for k in (key, get_fullres_key(key)): # plain output and fullres
result_dict[k] = [
visualize_semantic_pil(img, sem_labels.colors_array)
for img in predictions[k].cpu().numpy()
]
# -> predicted semantic score
key = 'panoptic_segmentation_deeplab_semantic_score'
if key in predictions:
for k in (key, get_fullres_key(key)): # plain output and fullres
result_dict[k] = [
visualize_heatmap(img, cmap='jet')
for img in predictions[k].cpu().numpy()
]
# -> predicted instance label
key = 'panoptic_segmentation_deeplab_instance_idx'
if key in predictions:
for k in (key, get_fullres_key(key)): # plain output and fullres
result_dict[k] = [
visualize_instance_pil(
instance_img=img,
shared_color_generator=instance_color_generator
)
for img in predictions[k].cpu().numpy()
]
# -> predicted instance score
key = 'panoptic_segmentation_deeplab_instance_score'
if key in predictions:
for k in (key, get_fullres_key(key)): # plain output and fullres
result_dict[k] = [
visualize_heatmap(img, cmap='jet')
for img in predictions[k].cpu().numpy()
]
# everything combined ------------------------------------------------------
# panoptic segmentation and orientations and rgb overlayed with both
# (only fullres!)
if all((
'panoptic_segmentation_deeplab' in predictions,
'orientations_panoptic_segmentation_deeplab_instance' in predictions,
)):
key_semantic = get_fullres_key('panoptic_segmentation_deeplab_semantic_idx')
key_instance = get_fullres_key('panoptic_segmentation_deeplab_instance_idx')
# create orientation images with text but without outline
fg_masks = np.isin(predictions[key_semantic].cpu().numpy(),
use_orientation_class_indices) # both with void
instance_imgs = predictions[key_instance].cpu().numpy()
instance_imgs[np.logical_not(fg_masks)] = 0
orientation_imgs = [
visualize_instance_orientations(
*data,
shared_color_generator=instance_color_generator,
draw_outline=False,
**KWARGS_INSTANCE_ORIENTATION
)
for data in zip(
instance_imgs,
predictions['orientations_panoptic_segmentation_deeplab_instance']
)
]
result_dict[get_fullres_key('panoptic_orientations')] = [
_copy_and_apply_mask(
img=panoptic_img,
mask=orientation_img.any(axis=-1), # text mask
value=(255, 255, 255) # white text color
)
for panoptic_img, orientation_img in zip(
result_dict[get_fullres_key('panoptic_segmentation_deeplab')],
orientation_imgs
)
]
# save the mask for the orientation
result_dict['panoptic_orientations_mask'] = [
orientation_img
for orientation_img in orientation_imgs
]
if DEFAULT_CLONE_KEY in batch:
result_dict[get_fullres_key('panoptic_orientations_rgb')] = [
blend_images(
img1=panoptic_orientation, img2=rgb, alpha=0.5
)
for panoptic_orientation, rgb in zip(
result_dict[get_fullres_key('panoptic_orientations')],
batch[DEFAULT_CLONE_KEY]['rgb']
)
]
# scene classification -----------------------------------------------------
if 'scene_class_idx' in predictions:
result_dict['scene'] = [
dataset_config.scene_label_list_without_void[s].class_name
for s in predictions['scene_class_idx']
]
return result_dict
| 29,466 | 36.39467 | 84 | py |
null | EMSAFormer-main/emsaformer/weights.py | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Daniel Seichter <daniel.seichter@tu-ilmenau.de>
"""
import torch
from nicr_scene_analysis_datasets import ScanNet
def load_weights(args, model, state_dict, verbose=True):
# this function accounts for:
# - renamed keys, e.g., fused_encoders.* -> encoder.*
# - missing keys, e.g., different number of scene classes
# - specific dataset or pretraining combinations, e.g., Hypersim -> SUNRGB-D
print_ = print if verbose else lambda *a, **k: None
# get current model state dict
model_state_dict = model.state_dict()
# the encoder key was renamed from fused_encoders.* to encoder.*
state_dict = {
k.replace('fused_encoders.', 'encoder.'): v
for k, v in state_dict.items()
}
if len(state_dict) != len(model_state_dict):
# loaded state dict is different, run a deeper analysis
# this can happen if a model trained with deviating tasks is loaded
# (e.g., pre-training on hypersim with normals)
# we try to remove the extra keys
for key in list(state_dict.keys()):
if key not in model_state_dict:
print_(f"Removing '{key}' from loaded state dict as the "
"current model does not contain such key.")
_ = state_dict.pop(key)
# scene classes may differ, e.g., when using pretrained weights on
# Hypersim for a subsequent training, we skip loading these pretrained
# weights
for key in list(state_dict.keys()):
if all(n in key for n in ('scene_decoder', 'head')):
n_classes_pretraining = model_state_dict[key].shape[0]
n_classes_current = state_dict[key].shape[0]
if n_classes_current != n_classes_pretraining:
print_(f"Skipping '{key}' as the number of scene classes "
f"differs {n_classes_current} (current) vs. "
f"{n_classes_pretraining} (pretraining).")
# we simply use the random weights of the current model
state_dict[key] = model_state_dict[key]
if 'semantic' in args.tasks:
if args.dataset.startswith('nyuv2'): # first (main) dataset
# nyuv2 uses 40 semantic classes, when using a checkpoint
# pretrained on sunrgbd with 37, we can still copy the weights
# for 37 classes
for key, weight in list(state_dict.items()):
if all(n in key for n in ('semantic_decoder', 'head', 'conv')):
if weight.shape[0] == 37:
weight_sunrgbd = weight.clone()
# we simply copy the random weights of the current
# model first
state_dict[key] = model_state_dict[key]
# and then overwrite the first 37 channels
print_(f"Reusing 37/40 channels in '{key}'.")
state_dict[key][:37, ...] = weight_sunrgbd
if args.dataset.startswith('sunrgbd'): # first (main) dataset
# sunrgbd has only 37 semantic classes, however these classes
# match the first 37 classes of nyuv2, scannet and hypersim
# (40 classes), so, if we detect weights with 40 output
# channels (filter and bias) in a semantic head, we keep the
# first 37 channels
for key, weight in list(state_dict.items()):
if all(n in key for n in ('semantic_decoder', 'head', 'conv')):
if weight.shape[0] == 40:
print_(f"Removing last 3 channels in '{key}'.")
state_dict[key] = weight[:37, ...]
elif args.dataset.startswith('scannet'): # first (main) dataset
# check if training (e.g., pretraining on hypersim) was done
# with more classes, we can handle two cases 40 -> 20 and
# 549 -> 200
if not args.validation_scannet_benchmark_mode:
# otherwise, we already would have 20 / 200 classes
# get mapping and mask
if 20 == args.scannet_semantic_n_classes:
mapping = ScanNet.SEMANTIC_CLASSES_40_MAPPING_TO_BENCHMARK
else:
mapping = ScanNet.SEMANTIC_CLASSES_549_MAPPING_TO_BENCHMARK200
mask = torch.tensor([
c_benchmark != 0 # class is not ignored
for c_data, c_benchmark in mapping.items()
if c_data != 0 # skip void class
], dtype=torch.bool)
# check weights of semantic heads and remove ignored classes
for key, weight in list(state_dict.items()):
if all(n in key for n in ('semantic_decoder', 'head',
'conv')):
if weight.shape[0] == mask.shape[0]:
print_("Removing channels for ignored classes "
f"in '{key}'.")
state_dict[key] = weight[mask, ...]
# remove all semantic weights if shape still does not match,
# happens, e.g., when using pretrained weights from scannet with
# 20 classes for sunrbgd or nyuv2
for key, weight in list(state_dict.items()):
if all(n in key for n in ('semantic_decoder', 'head', 'conv')):
if weight.shape != model_state_dict[key].shape:
print_(f"Removing '{key}' from loaded state dict as"
f"the shape does not match: {weight.shape} "
f"vs. {model_state_dict[key].shape}.")
# we simply use the random weights of the current
# model
state_dict[key] = model_state_dict[key]
model.load_state_dict(state_dict, strict=True)
| 5,946 | 47.349593 | 82 | py |
null | EMSAFormer-main/emsaformer/tests/__init__.py | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Daniel Seichter <daniel.seichter@tu-ilmenau.de>
Note that this file is import for test discovery.
"""
| 147 | 20.142857 | 63 | py |
null | EMSAFormer-main/emsaformer/tests/conftest.py | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Daniel Seichter <daniel.seichter@tu-ilmenau.de>
"""
import os
import shutil
import pytest
def pytest_addoption(parser):
parser.addoption('--keep-files', action='store_true', default=False)
parser.addoption('--force-onnx-export', action='store_true', default=False)
parser.addoption('--show-results', action='store_true', default=False)
def pytest_configure(config):
# see: nicr_mt_scene_analysis/testing/__init__.py
if config.getoption('--force-onnx-export'):
os.environ['EXPORT_ONNX_MODELS'] = str(True)
if config.getoption('--show-results'):
os.environ['SHOW_RESULTS'] = str(True)
@pytest.fixture(scope='session')
def keep_files(request):
return request.config.getoption('--keep-files')
@pytest.fixture(scope='session')
def tmp_path(tmpdir_factory, keep_files):
# see: https://docs.pytest.org/en/6.2.x/reference.html#tmpdir-factory
# use '--basetemp' to change default path
# -> BE AWARE <- --basetemp is cleared on start
path = tmpdir_factory.mktemp('emsaformer')
print(f"\nWriting temporary files to '{path}'")
if keep_files:
print("Files are kept and require to be deleted manually.'")
yield path
# teardown (delete if it was created)
if os.path.exists(path) and not keep_files:
shutil.rmtree(path)
| 1,356 | 28.5 | 79 | py |
null | EMSAFormer-main/emsaformer/tests/test_emsanet_model_weights.py | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Mona Koehler <mona.koehler@tu-ilmenau.de>
"""
from nicr_mt_scene_analysis.testing.onnx import export_onnx_model
import onnx
import torch
from emsaformer.args import ArgParserEMSAFormer
from emsaformer.data import get_datahelper
from emsaformer.model import EMSAFormer
def test_weights():
"""
test that all weights are part of the state dict and exported correctly
to onnx
"""
# args and data needed for building a model
parser = ArgParserEMSAFormer()
args = parser.parse_args('', verbose=False)
args.no_pretrained_backbone = True
args.dropout_p = 0
args.validation_batch_size = 2
args.input_modalities = ['rgb', 'depth']
args.encoder_normalization = 'bn'
args.encoder_decoder_fusion = 'add-rgb'
args.semantic_decoder = 'emsanet'
args.semantic_decoder_n_channels = (512, 256, 128)
args.semantic_decoder_upsampling = 'learned-3x3-zeropad'
args.semantic_encoder_decoder_fusion = 'add-rgb'
data = get_datahelper(args)
dataset_config = data.dataset_config
# build model and extract weights
model_1 = EMSAFormer(args, dataset_config=dataset_config)
model_1.eval()
state_dict_1 = model_1.state_dict()
# build second model and load weights from first model
model_2 = EMSAFormer(args, dataset_config=dataset_config)
model_2.load_state_dict(state_dict_1)
model_2.eval()
# prepare input for onnx export
batch_size = 3
input_shape = (480, 640)
batch = {}
if 'rgb' in args.input_modalities:
batch['rgb'] = torch.randn((batch_size, 3)+input_shape)
if 'depth' in args.input_modalities:
batch['depth'] = torch.randn((batch_size, 1)+input_shape)
if 'rgbd' in args.input_modalities:
batch['rgb'] = torch.randn((batch_size, 3)+input_shape)
batch['depth'] = torch.randn((batch_size, 1)+input_shape)
x = (batch, {'do_postprocessing': False})
# export both models to onnx
export_onnx_model('model_1.onnx', model_1, x, force_export=True)
export_onnx_model('model_2.onnx', model_2, x, force_export=True)
# load onnx models again
onnx_model_1 = onnx.load('model_1.onnx')
onnx_model_2 = onnx.load('model_2.onnx')
# test if weights for each layer are the same between first and second model
for l1, l2 in zip(onnx_model_1.graph.initializer,
onnx_model_2.graph.initializer):
if l1.raw_data != l2.raw_data:
raise
| 2,485 | 33.054795 | 80 | py |
null | EMSAFormer-main/emsaformer/tests/test_interface_dataset.py | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Soehnke Fischedick <soehnke-benedikt.fischedick@tu-ilmenau.de>
.. codeauthor:: Daniel Seichter <daniel.seichter@tu-ilmenau.de>
"""
import pytest
import time
from nicr_scene_analysis_datasets.utils.testing import DATASET_PATH_DICT
from emsaformer.args import ArgParserEMSAFormer
from emsaformer.data import get_datahelper
from emsaformer.preprocessing import get_preprocessor
from emsaformer.data import KNOWN_DATASETS
@pytest.mark.parametrize('dataset', KNOWN_DATASETS)
def test_data_helper(dataset):
"""Test data helper"""
# get args
parser = ArgParserEMSAFormer()
if 'coco' == dataset:
input_modalities = ('rgb',)
else:
input_modalities = ('rgb', 'depth')
args = parser.parse_args(
['--dataset', dataset,
'--dataset-path', DATASET_PATH_DICT[dataset],
'--input-modalities', *input_modalities],
verbose=False)
data = get_datahelper(args)
for idx, batch in enumerate(data.train_dataloader):
assert batch is not None
if idx == 10:
break
for idx, batch in enumerate(data.valid_dataloaders[0]):
assert batch is not None
if idx == 10:
break
def test_data_caching():
"""Test dataset caching"""
dataset = 'nyuv2'
dataset_path = DATASET_PATH_DICT[dataset]
n_worksers = 4
parser = ArgParserEMSAFormer()
args = parser.parse_args('', verbose=False)
# replace some args
args.dataset = dataset
args.dataset_path = dataset_path
args.n_workers = n_worksers
args.cache_dataset = True
data = get_datahelper(args)
data.set_valid_preprocessor(
get_preprocessor(
args,
dataset=data.datasets_valid[0],
phase='test',
multiscale_downscales=None
)
)
# iteration should be faster in later runs (after cache of all workers is
# ready)
simple_sums = []
durations = []
data_loader = data.valid_dataloaders[0]
for _ in range(4*n_worksers):
start = time.time()
sum = 0
for sample in data_loader:
for rgb in sample['rgb']:
sum += rgb.numpy().sum()
end = time.time()
simple_sums.append(sum)
durations.append(end-start)
# print(simple_sums[-1], durations[-1])
# note that all workers have to cache the dataset
assert all(d < durations[0] for d in durations[-2*n_worksers:])
assert all(s == simple_sums[0] for s in simple_sums[-2*n_worksers:])
| 2,551 | 27.355556 | 78 | py |
null | EMSAFormer-main/emsaformer/tests/test_interface_decoders.py | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Soehnke Fischedick <soehnke-benedikt.fischedick@tu-ilmenau.de>
.. codeauthor:: Daniel Seichter <daniel.seichter@tu-ilmenau.de>
"""
import os
import pytest
import torch
from nicr_mt_scene_analysis.testing.onnx import export_onnx_model
from emsaformer.args import ArgParserEMSAFormer
from emsaformer.decoder import get_decoders
class ForwardHelper(torch.nn.Module):
def __init__(self, decoders):
super().__init__()
self.decoders = decoders
def forward(self, x, skips, batch, do_postprocessing=False):
outs = []
for decoder in self.decoders.values():
outs.append(
decoder(x, skips, batch,
do_postprocessing=do_postprocessing)
)
return outs
def decoders_test(args, do_postprocessing, training, tmp_path):
# create decoders
debug = True
decoders = get_decoders(
args,
n_channels_in=512,
downsampling_in=32,
semantic_n_blocks=3,
instance_n_blocks=2,
normal_n_blocks=1,
scene_n_channels_in=512//2,
fusion_n_channels=(256, 128, 64),
debug=debug
)
input_h, input_w = (480, 640)
downsampling_in = 32
n_decoders = len(args.tasks)
if 'orientation' in args.tasks:
# orientation task is handled in instance decoder
n_decoders -= 1
if args.enable_panoptic:
# panoptic task fuses semantic and instance
n_decoders -= 1
assert len(decoders) == n_decoders
# create model containing all decoders
model = ForwardHelper(decoders)
if not training:
model.eval()
# set up inputs for decoders
x = (
# output of context module
torch.rand(3, 512, input_h//downsampling_in, input_w//downsampling_in),
# at least one context branch (from GAP)
(torch.rand(3, 512//2, 1, 1),)
)
# strings are used to prevent casting keys from int to tensor(int)
# while exporting to ONNX
skips = {
'16': {'rgb': torch.rand(3, 256, input_h//16, input_w//16),
'depth': torch.rand(3, 256, input_h//16, input_w//16)},
'8': {'rgb': torch.rand(3, 128, input_h//8, input_w//8),
'depth': torch.rand(3, 128, input_h//8, input_w//8)},
'4': {'rgb': torch.rand(3, 64, input_h//4, input_w//4),
'depth': torch.rand(3, 64, input_h//4, input_w//4)},
}
batch = {}
if 'instance' in args.tasks:
# pure instance segmentation task requires gt foreground mask
batch['instance_foreground'] = torch.ones((3, input_h, input_w),
dtype=torch.bool)
if 'orientation' in args.tasks:
# orientation estimation requires a gt segmentation and foreground mask
batch['instance'] = torch.ones((3, input_h, input_w), dtype=torch.bool)
batch['orientation_foreground'] = torch.ones((3, input_h, input_w),
dtype=torch.bool)
if not training and do_postprocessing:
# for inference postprocessing, inputs in full resolution are required
batch['rgb_fullres'] = torch.randn((3, 3, input_h, input_w))
batch['depth_fullres'] = torch.randn((3, 1, input_h, input_w))
# apply decoders
outputs = model(x, skips, batch,
do_postprocessing=do_postprocessing)
# perform some basic checks
assert len(outputs) == n_decoders
if not do_postprocessing:
# output of decoder(s) is returned: tuple(outputs, side_outputs)
for output in outputs:
assert isinstance(output, tuple)
assert len(output) == 2
else:
# postprocessed output of decoders is returned: dict
for output in outputs:
assert isinstance(output, dict)
assert len(output)
# export decoders to ONNX
if not training and do_postprocessing:
# stop here: inference postprocessing is challenging
return
# determine filename and filepath
tasks_str = '+'.join(args.tasks)
if args.enable_panoptic:
tasks_str += '+panoptic'
filename = f'decoders_{tasks_str}'
filename += f'__train_{training}'
filename += f'__post_{do_postprocessing}'
filename += '.onnx'
filepath = os.path.join(tmp_path, filename)
# export
x = (x, skips, batch, {'do_postprocessing': do_postprocessing})
export_onnx_model(filepath, model, x)
@pytest.mark.parametrize('enable_panoptic', (False, True))
@pytest.mark.parametrize('do_postprocessing', (False, True))
@pytest.mark.parametrize('training', (False, True))
def test_decoders_full_mt_emsanet(enable_panoptic, do_postprocessing, training,
tmp_path):
"""Test EMSANet decoders in full mt setting"""
parser = ArgParserEMSAFormer()
args = parser.parse_args('', verbose=False)
args = parser.parse_args([
# semantic
'--semantic-decoder', 'emsanet',
'--semantic-decoder-n-channels', '512', '256', '128',
'--semantic-decoder-upsampling', 'learned-3x3-zeropad',
'--semantic-encoder-decoder-fusion', 'add-rgb',
# instance
'--instance-decoder', 'emsanet',
'--instance-decoder-n-channels', '512', '256', '128',
'--instance-decoder-upsampling', 'learned-3x3-zeropad',
'--instance-encoder-decoder-fusion', 'add-depth', # test depth
# normal
'--normal-decoder', 'emsanet',
'--normal-decoder-n-channels', '512', '256', '128',
'--normal-decoder-upsampling', 'learned-3x3-zeropad',
'--normal-encoder-decoder-fusion', 'add-rgb',
], verbose=False)
args.tasks = ('semantic',
'instance', 'orientation',
'normal',
'scene')
args.enable_panoptic = enable_panoptic
decoders_test(args,
do_postprocessing=do_postprocessing,
training=training,
tmp_path=tmp_path)
@pytest.mark.parametrize('enable_panoptic', (False, True))
@pytest.mark.parametrize('do_postprocessing', (False, True))
@pytest.mark.parametrize('training', (False, True))
def test_decoders_full_mt_segformermlp(enable_panoptic, do_postprocessing,
training, tmp_path):
"""Test SegFormerMLP decoders in full mt setting"""
parser = ArgParserEMSAFormer()
args = parser.parse_args([
# semantic
'--semantic-decoder', 'segformermlp',
'--semantic-decoder-n-channels', '256', '128', '64', '64',
'--semantic-decoder-upsampling', 'bilinear',
'--semantic-encoder-decoder-fusion', 'select-rgb',
# instance
'--instance-decoder', 'segformermlp',
'--instance-decoder-n-channels', '256', '128', '64', '64',
'--instance-decoder-upsampling', 'bilinear',
'--instance-encoder-decoder-fusion', 'select-depth', # test depth
# normal
'--normal-decoder', 'segformermlp',
'--normal-decoder-n-channels', '256', '128', '64', '64',
'--normal-decoder-upsampling', 'bilinear',
'--normal-encoder-decoder-fusion', 'select-rgb',
], verbose=False)
args.tasks = ('semantic',
'instance', 'orientation',
'normal',
'scene')
args.enable_panoptic = enable_panoptic
decoders_test(args,
do_postprocessing=do_postprocessing,
training=training,
tmp_path=tmp_path)
@pytest.mark.parametrize('do_postprocessing', (False, True))
@pytest.mark.parametrize('training', (False, True))
def test_decoders_panoptic_mixed(do_postprocessing, training, tmp_path):
"""Test decoders in panoptic setting with mixed decoder types"""
parser = ArgParserEMSAFormer()
args = parser.parse_args([
# semantic
'--semantic-decoder', 'segformermlp',
'--semantic-decoder-n-channels', '256', '128', '64', '64',
'--semantic-decoder-upsampling', 'bilinear',
'--semantic-encoder-decoder-fusion', 'select-depth', # test depth
# instance (= default args)
'--instance-decoder', 'emsanet',
'--instance-decoder-n-channels', '512', '256', '128',
'--instance-decoder-upsampling', 'learned-3x3-zeropad',
'--instance-encoder-decoder-fusion', 'add-rgb',
'--tasks', 'semantic', 'instance',
'--enable-panoptic',
], verbose=False)
decoders_test(args,
do_postprocessing=do_postprocessing,
training=training,
tmp_path=tmp_path)
| 8,676 | 36.240343 | 79 | py |
null | EMSAFormer-main/emsaformer/tests/test_interface_emsaformer_model.py | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Daniel Seichter <daniel.seichter@tu-ilmenau.de>
.. codeauthor:: Soehnke Fischedick <soehnke-benedikt.fischedick@tu-ilmenau.de>
"""
import os
from nicr_mt_scene_analysis.testing.onnx import export_onnx_model
import pytest
import torch
from emsaformer.args import ArgParserEMSAFormer
from emsaformer.data import get_dataset
from emsaformer.model import EMSAFormer
def model_test(tasks,
panoptic_enabled,
modalities,
backbone,
do_postprocessing,
training,
tmp_path,
additional_args=[]):
parser = ArgParserEMSAFormer()
args = parser.parse_args([
'--input-modalities', *modalities,
'--tasks', *tasks,
'--instance-encoder-decoder-fusion', 'swin-add-rgb' if len(modalities) > 1 else 'swin-add',
'--semantic-encoder-decoder-fusion', 'swin-select-rgb' if len(modalities) > 1 else 'swin-select',
'--normal-encoder-decoder-fusion', 'swin-add-rgb' if len(modalities) > 1 else 'swin-add',
'--rgbd-encoder-backbone', backbone,
'--rgb-encoder-backbone', backbone,
'--depth-encoder-backbone', backbone,
'--no-pretrained-backbone',
'--dataset', 'nyuv2',
*additional_args
], verbose=False)
# replace some args
args.enable_panoptic = panoptic_enabled
dataset = get_dataset(args, split='train')
dataset_config = dataset.config
# create model
model = EMSAFormer(args, dataset_config=dataset_config)
if not training:
model.eval()
# determine input
batch_size = 3
input_shape = (480, 640)
batch = {}
if 'rgb' in args.input_modalities or 'rgbd' in args.input_modalities:
batch['rgb'] = torch.randn((batch_size, 3)+input_shape)
if 'depth' in args.input_modalities or 'rgbd' in args.input_modalities:
batch['depth'] = torch.randn((batch_size, 1)+input_shape)
if 'instance' in tasks:
# pure instance segmentation task requires gt foreground mask
batch['instance_foreground'] = torch.ones(
(batch_size, 1)+input_shape,
dtype=torch.bool
)
if 'orientation' in tasks:
# orientation estimation requires a gt segmentation and foreground mask
batch['instance'] = torch.ones(
(batch_size, 1)+input_shape,
dtype=torch.bool
)
batch['orientation_foreground'] = torch.ones(
(batch_size, 1)+input_shape,
dtype=torch.bool
)
if not training and do_postprocessing:
# for inference postprocessing, inputs in full resolution are required
if 'rgb' in batch:
batch['rgb_fullres'] = batch['rgb'].clone()
if 'depth' in batch:
batch['depth_fullres'] = batch['depth'].clone()
# apply model
outputs = model(batch, do_postprocessing=do_postprocessing)
# some simple checks for output
if do_postprocessing:
assert isinstance(outputs, dict)
else:
assert isinstance(outputs, list)
assert outputs
# export model to ONNX
if not training and do_postprocessing:
# stop here: inference postprocessing is challenging (no onnx export)
return
# determine filename and filepath
tasks_str = '+'.join(tasks)
if panoptic_enabled:
tasks_str += '+panoptic'
modalities_str = '+'.join(modalities)
filename = f'model_{modalities_str}_{tasks_str}'
filename += f'__backbone_{backbone}'
filename += f'__train{training}'
filename += f'__post_{do_postprocessing}'
filename += '.onnx'
filepath = os.path.join(tmp_path, filename)
# export
# note, the last element in input tuple is interpreted as named args
# if no named args should be passed use
x = (batch, {'do_postprocessing': do_postprocessing})
export_onnx_model(filepath, model, x)
@pytest.mark.parametrize('tasks', (('semantic',),
('semantic', 'instance'),
('semantic', 'instance', 'orientation'),
('semantic', 'instance', 'orientation',
'scene', 'normal')))
@pytest.mark.parametrize('modalities', (('rgbd',),
('depth',),
('rgb', 'depth'),
('rgbd',)))
@pytest.mark.parametrize('backbone', ('swin-t', 'swin-t-v2',
'swin-t-128', 'swin-t-v2-128'))
@pytest.mark.parametrize('do_postprocessing', (False, True))
@pytest.mark.parametrize('training', (False, True))
def test_model(tasks, modalities, backbone, do_postprocessing,
training, tmp_path):
"""Test entire EMSAFormer model"""
model_test(tasks, False, modalities, backbone,
do_postprocessing, training, tmp_path)
@pytest.mark.parametrize('tasks', (('semantic', 'instance'),
('semantic', 'instance', 'orientation'),
('semantic', 'instance', 'orientation',
'scene', 'normal')))
@pytest.mark.parametrize('do_postprocessing', (False, True))
@pytest.mark.parametrize('training', (False, True))
def test_model_panoptic(tasks, do_postprocessing, training, tmp_path):
"""Test entire EMSAFormer model (panoptic - single encoder)"""
model_test(
tasks=tasks,
panoptic_enabled=True,
modalities=('rgbd',),
backbone='swin-multi-t-v2-128',
do_postprocessing=do_postprocessing,
training=training,
tmp_path=tmp_path
)
@pytest.mark.parametrize('do_postprocessing', (False, True))
@pytest.mark.parametrize('training', (False, True))
def test_model_less_downsampling_skips(do_postprocessing, training, tmp_path):
"""Test EMSAFormer model with less downsampling and less skip connections"""
model_test(
tasks=('semantic', 'instance'),
panoptic_enabled=True,
modalities=('rgbd',),
backbone='swin-multi-t-v2-128',
do_postprocessing=do_postprocessing,
training=training,
tmp_path=tmp_path,
additional_args=[
'--semantic-decoder-n-blocks', '1',
'--instance-decoder-n-blocks', '1',
'--encoder-decoder-skip-downsamplings', '4', '8',
'--semantic-decoder-n-channels', '256', '128', '64',
'--instance-decoder-n-channels', '256', '128', '64'
]
)
| 6,571 | 36.554286 | 105 | py |
null | EMSAFormer-main/emsaformer/tests/test_interface_emsanet_model.py | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Daniel Seichter <daniel.seichter@tu-ilmenau.de>
"""
import os
from nicr_mt_scene_analysis.testing.onnx import export_onnx_model
import pytest
import torch
from emsaformer.args import ArgParserEMSAFormer
from emsaformer.data import get_dataset
from emsaformer.model import EMSAFormer
def model_test_emsanet(tasks,
panoptic_enabled,
modalities,
backbone,
activation,
do_postprocessing,
training,
tmp_path,
additional_args=[]):
parser = ArgParserEMSAFormer()
args = parser.parse_args([
'--input-modalities', *modalities,
'--tasks', *tasks,
'--encoder-decoder-fusion', 'add-rgb' if len(modalities) > 1 else 'add',
'--rgb-encoder-backbone', backbone,
'--depth-encoder-backbone', backbone,
'--encoder-normalization', 'bn',
'--semantic-decoder', 'emsanet',
'--semantic-decoder-n-channels', '512', '256', '128',
'--semantic-decoder-upsampling', 'learned-3x3-zeropad',
'--no-pretrained-backbone',
'--activation', activation,
'--dataset', 'nyuv2',
*additional_args
], verbose=False)
# replace some args
args.enable_panoptic = panoptic_enabled
dataset = get_dataset(args, split='train')
dataset_config = dataset.config
# create model
model = EMSAFormer(args, dataset_config=dataset_config)
if not training:
model.eval()
# determine input
batch_size = 3
input_shape = (480, 640)
batch = {}
if 'rgb' in args.input_modalities:
batch['rgb'] = torch.randn((batch_size, 3)+input_shape)
if 'depth' in args.input_modalities:
batch['depth'] = torch.randn((batch_size, 1)+input_shape)
if 'instance' in tasks:
# pure instance segmentation task requires gt foreground mask
batch['instance_foreground'] = torch.ones(
(batch_size, 1)+input_shape,
dtype=torch.bool
)
if 'orientation' in tasks:
# orientation estimation requires a gt segmentation and foreground mask
batch['instance'] = torch.ones(
(batch_size, 1)+input_shape,
dtype=torch.bool
)
batch['orientation_foreground'] = torch.ones(
(batch_size, 1)+input_shape,
dtype=torch.bool
)
if not training and do_postprocessing:
# for inference postprocessing, inputs in full resolution are required
if 'rgb' in batch:
batch['rgb_fullres'] = batch['rgb'].clone()
if 'depth' in batch:
batch['depth_fullres'] = batch['depth'].clone()
# apply model
outputs = model(batch, do_postprocessing=do_postprocessing)
# some simple checks for output
if do_postprocessing:
assert isinstance(outputs, dict)
else:
assert isinstance(outputs, list)
assert outputs
# export model to ONNX
if not training and do_postprocessing:
# stop here: inference postprocessing is challenging (no onnx export)
return
# determine filename and filepath
tasks_str = '+'.join(tasks)
if panoptic_enabled:
tasks_str += '+panoptic'
modalities_str = '+'.join(modalities)
filename = f'model_{modalities_str}_{tasks_str}'
filename += f'__backbone_{backbone}'
filename += f'__act_{activation}'
filename += f'__train{training}'
filename += f'__post_{do_postprocessing}'
filename += '.onnx'
filepath = os.path.join(tmp_path, filename)
# export
# note, the last element in input tuple is interpreted as named args
# if no named args should be passed use
x = (batch, {'do_postprocessing': do_postprocessing})
export_onnx_model(filepath, model, x)
@pytest.mark.parametrize('tasks', (('semantic',),
('semantic', 'instance'),
('semantic', 'instance', 'orientation'),
('semantic', 'instance', 'orientation',
'scene', 'normal')))
@pytest.mark.parametrize('modalities', (('rgb',),
('depth',),
('rgb', 'depth')))
@pytest.mark.parametrize('backbone', ('resnet18', 'resnet50', 'resnet34se'))
@pytest.mark.parametrize('activation', ('relu', 'swish'))
@pytest.mark.parametrize('do_postprocessing', (False, True))
@pytest.mark.parametrize('training', (False, True))
def test_model_emsanet(tasks, modalities, backbone, activation, do_postprocessing,
training, tmp_path):
"""Test entire EMSANet model"""
model_test_emsanet(tasks, False, modalities, backbone, activation,
do_postprocessing, training, tmp_path)
@pytest.mark.parametrize('tasks', (('semantic', 'instance'),
('semantic', 'instance', 'orientation'),
('semantic', 'instance', 'orientation',
'scene', 'normal')))
@pytest.mark.parametrize('do_postprocessing', (False, True))
@pytest.mark.parametrize('training', (False, True))
def test_model_emsanet_panoptic(tasks, do_postprocessing, training, tmp_path):
"""Test entire EMSANet model (panoptic)"""
model_test_emsanet(
tasks=tasks,
panoptic_enabled=True,
modalities=('rgb', 'depth'),
backbone='resnet18',
activation='relu',
do_postprocessing=do_postprocessing,
training=training,
tmp_path=tmp_path
)
@pytest.mark.parametrize('do_postprocessing', (False, True))
@pytest.mark.parametrize('training', (False, True))
def test_model_emsanet_less_downsampling_skips(do_postprocessing, training, tmp_path):
"""Test EMSANet model with less downsampling and less skip connections"""
model_test_emsanet(
tasks=('semantic', 'instance'),
panoptic_enabled=True,
modalities=('rgb', 'depth'),
backbone='resnet18-d16',
activation='relu',
do_postprocessing=do_postprocessing,
training=training,
tmp_path=tmp_path,
additional_args=[
'--semantic-decoder-n-blocks', '1',
'--instance-decoder-n-blocks', '1',
'--encoder-decoder-skip-downsamplings', '4', '8',
]
)
| 6,470 | 35.559322 | 86 | py |
null | EMSAFormer-main/emsaformer/tests/test_interface_preprocessing.py | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Daniel Seichter <daniel.seichter@tu-ilmenau.de>
"""
from functools import partial
from nicr_mt_scene_analysis.data import mt_collate
from nicr_mt_scene_analysis.data import CollateIgnoredDict
from nicr_mt_scene_analysis.testing.preprocessing import show_results
from nicr_mt_scene_analysis.testing.preprocessing import SHOW_RESULTS
from nicr_scene_analysis_datasets.dataset_base import OrientationDict
from nicr_scene_analysis_datasets.dataset_base import SampleIdentifier
from nicr_scene_analysis_datasets.utils.testing import DATASET_PATH_DICT
import numpy as np
import pytest
import torch
from emsaformer.args import ArgParserEMSAFormer
from emsaformer.data import get_dataset
from emsaformer.preprocessing import get_preprocessor
@pytest.mark.parametrize('dataset', ('nyuv2', 'sunrgbd', 'hypersim'))
@pytest.mark.parametrize('tasks', (('semantic',),
('semantic', 'instance'),
('instance', 'orientation'),
('semantic', 'instance', 'orientation'),
('semantic', 'instance', 'orientation',
'scene', 'normal')))
@pytest.mark.parametrize('modalities', (('rgb',),
('depth',),
('rgb', 'depth')))
@pytest.mark.parametrize('phase', (('train', 'test')))
@pytest.mark.parametrize('multiscale', (False, True))
def test_preprocessing(dataset, tasks, modalities, phase, multiscale):
"""Test entire EMSANet/EMSAFormer preprocessing"""
# drop normal task for SUNRGB-D
if dataset not in ('hypersim', 'nyuv2'):
tasks = tuple(t for t in tasks if t != 'normal')
parser = ArgParserEMSAFormer()
args = parser.parse_args('', verbose=False)
args.tasks = tasks
args.input_modalities = modalities
args.dataset = dataset
args.dataset_path = DATASET_PATH_DICT[dataset]
if dataset in ('cityscapes', 'hypersim'):
args.raw_depth = True
dataset = get_dataset(args, 'train')
preprocessor = get_preprocessor(
args=args,
dataset=dataset,
phase=phase,
multiscale_downscales=(8, 16, 32) if multiscale else None
)
dataset.preprocessor = preprocessor
for sample_pre in dataset:
if SHOW_RESULTS:
# use 'SHOW_RESULTS=true pytest ...'
sample = sample_pre.pop('_no_preprocessing')
show_results(sample, sample_pre, "Preprocessing")
else:
break
show_results({}, sample_pre, "Preprocessing")
# we use a modified collate function to handle elements of different
# spatial resolution and to ignore numpy arrays, dicts containing
# orientations (OrientationDict), and simple tuples storing shapes
collate_fn = partial(mt_collate,
type_blacklist=(np.ndarray,
CollateIgnoredDict,
OrientationDict,
SampleIdentifier))
# test with data loader (and collate function)
loader = torch.utils.data.DataLoader(
dataset,
batch_size=2,
num_workers=0,
shuffle=True,
drop_last=True,
collate_fn=collate_fn
)
for sample_pre in loader:
break
| 3,402 | 35.98913 | 75 | py |
null | EMSAFormer-main/emsaformer/tests/test_metrics_with_model.py | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Soehnke Fischedick <soehnke-benedikt.fischedick@tu-ilmenau.de>
"""
import json
import os
import torch
import numpy as np
import pytest
import PIL.Image as Image
from tqdm import tqdm
from nicr_mt_scene_analysis import metric
from nicr_mt_scene_analysis.data import move_batch_to_device
from nicr_mt_scene_analysis.data.preprocessing.resize import get_fullres
from nicr_mt_scene_analysis.utils.panoptic_merge import deeplab_merge_batch
from nicr_scene_analysis_datasets.utils.testing import DATASET_PATH_DICT
from panopticapi.utils import IdGenerator
from panopticapi.evaluation import pq_compute
from emsaformer.args import ArgParserEMSAFormer
from emsaformer.data import get_datahelper
from emsaformer.model import EMSAFormer
from emsaformer.preprocessing import get_preprocessor
from emsaformer.weights import load_weights
PQ_TEST_WEIGHT_DICT = {
'nyuv2': '/results_nas/emsanet_transformer/_released/nyuv2/nyuv2_swin_multi_t_v2_128_segformermlp_decoder.pth',
'sunrgbd': '/results_nas/emsanet_transformer/_released/sunrgbd/sunrgbd_swin_multi_t_v2_128_segformermlp_decoder.pth'
}
@pytest.mark.parametrize('dataset', ('nyuv2', 'sunrgbd'))
def test_compare_pq_with_panopticapi(tmp_path, dataset):
"""Test that compares our pq to the panopticapi"""
parser = ArgParserEMSAFormer()
args = parser.parse_args(['--dataset', dataset], verbose=False)
args.wandb_mode = 'disabled'
args.tasks = ('instance', 'semantic', 'scene', 'orientation')
args.enable_panoptic = True
args.no_pretrained_backbone = True
args.dataset_path = DATASET_PATH_DICT[args.dataset]
args.validation_batch_size = 4
label_divisor = (1 << 16)
data = get_datahelper(args)
dataset_config = data.dataset_config
n_semantic_classes = len(dataset_config.semantic_label_list)
is_thing = dataset_config.semantic_label_list.classes_is_thing
# we process only 25 batches of each camera to speed up testing, use
# -1 or 0 to process all batches of a camera
batches_per_cam = 25
model = EMSAFormer(args, dataset_config=data.dataset_config)
model = model.to(torch.device('cpu'))
checkpoint = torch.load(PQ_TEST_WEIGHT_DICT[dataset])
load_weights(args, model, checkpoint['state_dict'])
model.eval()
# set preprocessor to datasets (note, preprocessing depends on model)
downscales = set()
for decoder in model.decoders.values():
downscales |= set(decoder.side_output_downscales)
data.set_valid_preprocessor(
get_preprocessor(
args,
dataset=data.datasets_valid[0],
phase='test',
multiscale_downscales=tuple(downscales) if args.debug else None
)
)
pq_metric = metric.PanopticQuality(
num_categories=n_semantic_classes,
ignored_label=0,
max_instances_per_category=label_divisor,
offset=256**3,
is_thing=is_thing
)
gt_path = os.path.join(tmp_path, 'gt')
os.makedirs(gt_path, exist_ok=True)
pred_path = os.path.join(tmp_path, 'pred')
os.makedirs(pred_path, exist_ok=True)
img_ctr = 0
categorys = []
for idx, label in enumerate(dataset_config.semantic_label_list):
label_dict = {}
label_dict['supercategory'] = label.class_name
label_dict['name'] = label.class_name
label_dict['id'] = idx
label_dict['isthing'] = int(label.is_thing)
label_dict['color'] = [int(a) for a in label.color]
categorys.append(label_dict)
categorys_list = categorys.copy()
categorys = {cat['id']: cat for cat in categorys}
categorys_json_file = os.path.join(tmp_path, 'categories.json')
with open(categorys_json_file, 'w') as f:
json.dump(categorys, f)
# Modified copy of the original code from
# https://github.com/cocodataset/panopticapi/blob/master/converters/2channels2panoptic_coco_format.py#L38
def convert(image_id, pan, categories, file_name, segmentations_folder,
VOID=0, OFFSET=label_divisor):
h, w = pan.shape
pan_format = np.zeros((h, w, 3), dtype=np.uint8)
id_generator = IdGenerator(categories)
uids = np.unique(pan)
segm_info = []
for el in uids:
sem = el // OFFSET
if sem == VOID:
continue
if sem not in categories:
raise KeyError('Unknown semantic label {}'.format(sem))
mask = pan == el
segment_id, color = id_generator.get_id_and_color(sem)
pan_format[mask] = color
segm_info.append({"id": segment_id,
"category_id": int(sem),
"iscrowd": 0,
"area": int(mask.sum())})
annotation = {'image_id': image_id,
'file_name': file_name,
'segments_info': segm_info}
Image.fromarray(pan_format).save(os.path.join(segmentations_folder, file_name))
return annotation
annotations_gt = []
annotations_pred = []
images = []
for cam in data.valid_dataloaders:
for idx, batch in tqdm(enumerate(cam), total=len(cam)):
if batches_per_cam > 0 and idx > batches_per_cam:
break
batch = move_batch_to_device(batch, torch.device('cpu'))
with torch.no_grad():
output = model(batch, do_postprocessing=True)
# batch = move_batch_to_device(batch, torch.device("cpu"))
semantic_batch = get_fullres(batch, 'semantic')
instance_batch = get_fullres(batch, 'instance')
instance_fg = instance_batch != 0
panoptic_targets, _ = deeplab_merge_batch(semantic_batch,
instance_batch,
instance_fg,
label_divisor,
np.where(is_thing)[0],
0)
panoptic_preds = output['panoptic_segmentation_deeplab_fullres']
panoptic_targets = panoptic_targets.cpu()
panoptic_preds = panoptic_preds.cpu()
pq_metric.update(panoptic_preds, panoptic_targets)
for target_img, pred_img in zip(panoptic_targets, panoptic_preds):
pan_img_name = f'{img_ctr:05d}.png'
annotation = convert(img_ctr, target_img, categorys, pan_img_name,
gt_path, VOID=0, OFFSET=label_divisor)
annotations_gt.append(annotation)
annotation = convert(img_ctr, pred_img, categorys, pan_img_name,
pred_path, VOID=0, OFFSET=label_divisor)
annotations_pred.append(annotation)
image_dict = {}
image_dict['file_name'] = pan_img_name
image_dict['id'] = img_ctr
images.append(image_dict)
img_ctr += 1
pq_result = pq_metric.compute()
coco_dict_gt = {}
coco_dict_gt['images'] = images
coco_dict_gt['annotations'] = annotations_gt
coco_dict_gt['categories'] = categorys_list
coco_dict_pred = {}
coco_dict_pred['images'] = images
coco_dict_pred['annotations'] = annotations_pred
coco_dict_pred['categories'] = categorys_list
pan_gt_json_file = os.path.join(tmp_path, 'panoptic_gt.json')
pan_pred_json_file = os.path.join(tmp_path, 'panoptic_pred.json')
with open(pan_gt_json_file, 'w') as f:
json.dump(coco_dict_gt, f)
with open(pan_pred_json_file, 'w') as f:
json.dump(coco_dict_pred, f)
pq_coco_panoptic_api = pq_compute(
pan_gt_json_file, pan_pred_json_file,
gt_path, pred_path
)
np.testing.assert_almost_equal(pq_result['all_pq'],
pq_coco_panoptic_api['All']['pq'],
decimal=9)
np.testing.assert_almost_equal(pq_result['all_sq'],
pq_coco_panoptic_api['All']['sq'],
decimal=9)
np.testing.assert_almost_equal(pq_result['all_rq'],
pq_coco_panoptic_api['All']['rq'],
decimal=9)
np.testing.assert_almost_equal(pq_result['things_pq'],
pq_coco_panoptic_api['Things']['pq'],
decimal=9)
np.testing.assert_almost_equal(pq_result['things_sq'],
pq_coco_panoptic_api['Things']['sq'],
decimal=9)
np.testing.assert_almost_equal(pq_result['things_rq'],
pq_coco_panoptic_api['Things']['rq'],
decimal=9)
np.testing.assert_almost_equal(pq_result['stuff_pq'],
pq_coco_panoptic_api['Stuff']['pq'],
decimal=9)
np.testing.assert_almost_equal(pq_result['stuff_sq'],
pq_coco_panoptic_api['Stuff']['sq'],
decimal=9)
np.testing.assert_almost_equal(pq_result['stuff_rq'],
pq_coco_panoptic_api['Stuff']['rq'],
decimal=9)
| 9,468 | 39.465812 | 120 | py |
null | EMSAFormer-main/emsaformer/tests/test_semantic_loss.py | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Mona Koehler <mona.koehler@tu-ilmenau.de>
"""
import numpy as np
import torch
from torch import nn
from nicr_mt_scene_analysis.loss.ce import CrossEntropyLossSemantic
DEVICE = 'cuda:0' if torch.cuda.is_available() else 'cpu'
# copied from: https://github.com/TUI-NICR/ESANet/blob/main/src/utils.py#L18-L50
class CrossEntropyLossPrevious(nn.Module):
def __init__(self, device, weight):
super(CrossEntropyLossPrevious, self).__init__()
self.weight = torch.tensor(weight).to(device)
self.num_classes = len(self.weight) + 1 # +1 for void
if self.num_classes < 2 ** 8:
self.dtype = torch.uint8
else:
self.dtype = torch.int16
self.ce_loss = nn.CrossEntropyLoss(
torch.from_numpy(np.array(weight)).float(),
reduction='none',
ignore_index=-1
)
self.ce_loss.to(device)
def forward(self, inputs_scales, targets_scales):
losses = []
for inputs, targets in zip(inputs_scales, targets_scales):
# mask = targets > 0
targets_m = targets.clone()
targets_m -= 1
loss_all = self.ce_loss(inputs, targets_m.long())
number_of_pixels_per_class = \
torch.bincount(targets.flatten().type(self.dtype),
minlength=self.num_classes)
divisor_weighted_pixel_sum = \
torch.sum(number_of_pixels_per_class[
1:] * self.weight) # without void
losses.append(torch.sum(loss_all) / divisor_weighted_pixel_sum)
# losses.append(torch.sum(loss_all) / torch.sum(mask.float()))
return losses
def test_loss():
"""
make sure that new loss implementation outputs the same values as the old
loss implementation
"""
class_weights = torch.tensor(
[0.2650426, 0.5533999, 0.42025763, 0.34482047, 0.7993162,
0.49264285, 1.1026958, 0.78996897, 0.76780474, 0.36996013,
1.6053797, 0.97266424, 0.63303965, 0.73651886, 0.92407864,
0.59753835, 0.4705898, 1.7916499, 0.61840767, 1.1446692,
1.1642636, 1.081512, 1.8748288, 0.6763455, 1.0289167,
4.0649543, 1.5289997, 0.42058772, 3.60466, 0.53412074,
1.246997, 2.2661245, 0.9652696, 3.0297952, 5.316681,
1.0555762, 6.7779245, 1.0640355, 1.2999853, 1.1953188],
)
# loss object from new loss implementation
loss_object = CrossEntropyLossSemantic(
weights=class_weights.to(DEVICE),
label_smoothing=0.0,
weighted_reduction=True
)
# loss object from old loss implementation
loss_object_previous = CrossEntropyLossPrevious(
device=DEVICE,
weight=class_weights
)
# generate random prediction and target
pred = tuple(
torch.rand(size=(2, 40, int(480 / (2 ** i)), int(640 / (2 ** i))),
device=DEVICE)
for i in [0, 3, 4, 5]
)
target = tuple(
torch.randint(size=(2, int(480 / (2 ** i)), int(640 / (2 ** i))),
low=0,
high=40,
device=DEVICE)
for i in [0, 3, 4, 5]
)
# compute loss with new as well as with old loss object
loss = loss_object(pred, target)
loss_previous = loss_object_previous(pred, target)
# test if the computed losses are the same between both implementations
assert torch.allclose(
torch.tensor([loss[i][0] for i in range(4)]),
torch.tensor([loss_previous[i] for i in range(4)])
)
| 3,614 | 33.759615 | 80 | py |
fancyimpute | fancyimpute-master/.travis.yml | sudo: false # Use container-based infrastructure
language: python
env:
global:
- KERAS_BACKEND=tensorflow
- CUDA_VISIBLE_DEVICES=""
matrix:
include:
- python: 3.6
before_install:
# Commands below copied from: http://conda.pydata.org/docs/travis.html
# We do this conditionally because it saves us some downloading if the
# version is the same.
- wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh;
- bash miniconda.sh -b -p $HOME/miniconda
- export PATH="$HOME/miniconda/bin:$PATH"
# reset the shell's lookup table for program name to path mappings
- hash -r
- conda config --set always_yes yes --set changeps1 no
- conda update -q conda
# Useful for debugging any issues with conda
- conda info -a
addons:
apt:
packages:
# Even though I'm installing cvxopt via conda, still seem to need these:
- liblapack-dev
- libatlas-base-dev
install:
- >
conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION
numpy=1.19.5 keras=2.4.3 scipy nose pandas matplotlib cvxopt scikit-learn
- source activate test-environment
- conda install -c cvxgrp scs=1.2.6
- pip install tensorflow==2.5
- pip install -r requirements.txt
- pip install .
- pip install coveralls
- export PACKAGE_DIR=`pwd`
script:
- cd $PACKAGE_DIR
- nosetests test --with-coverage --cover-package=fancyimpute
after_success:
- coveralls
deploy:
provider: pypi
distributions: sdist
user: openvax
password: # See http://docs.travis-ci.com/user/encryption-keys/
secure: "AAzTof2771B8tjg2PzCFfctNUbJ6BcQIkH3skpKJvoyWmL0U/fqnGF6zpK0QApJBqTX/xygYhLSfKWZ788FWwyaHW6Hgw8UQ1eHJPurjC9P8O/OWYRhK3r9J7dEPL4+uHfD67C7C+JGCl9BQk8+dRGYDOJ9kx32Eown8wtaoNY7ykLwq/mXsJcm+NjvfJzA7xE4TbGlL1RFDidUkwZ4YgWtGFcfEtVZlO+pEqeprLr/PBQap6K6WPA5yjQKziaqw5DSjMAU5TVDoZgIMu3/uxUJS6EGYs7FvRM961oEFXs9QvhDz+VtKr1kY8wGR1kJXes41NDr8fq9MqBAGcz3yxHeEP1wU1Aukfbw6QUQqQ7rUWFVKSqeVAq7Phirz7RHWslXl9dSoK2REQA3C8sXggmj198YhEq7QufxzTkD4KCDj+jutbMURZI5re6oetLqBz+8zExywXLKgVtTlUnokJ9R5Fnl0E1B4LMHXRvus71+vLQfv2gCt5OWRxzUfUFzpMdkXG2FDmjFGdBw6OWMhS1W+B19ht6Ho4SoN0Tj3YzvZt2AEwShm1i0LA8ITSN1lQdEucdz0kAhvXVRJtcGa4y48/uT9e8gzeyDyANvJ1RAbCsj3/kazucZH9I0b0lRyMiadtj7mfQwnU9MXCJzG7e912sGJDImyiTXqTQfw1Us="
on:
branch: master
condition: $TRAVIS_PYTHON_VERSION = "3.6"
| 2,333 | 42.222222 | 698 | yml |
fancyimpute | fancyimpute-master/README.md | [](https://travis-ci.org/iskandr/fancyimpute) [](https://coveralls.io/github/iskandr/fancyimpute?branch=master) [](http://dx.doi.org/10.5281/zenodo.51773)

A variety of matrix completion and imputation algorithms implemented in Python 3.6.
To install:
`pip install fancyimpute`
If you run into `tensorflow` problems and use anaconda, you can try to fix them with `conda install cudatoolkit`.
## Important Caveats
(1) This project is in "bare maintenance" mode. That means we are not planning on adding more imputation algorithms or features (but might if we get inspired). Please do report bugs, and we'll try to fix them. Also, we are happy to take pull requests for more algorithms and/or features.
(2) `IterativeImputer` started its life as a `fancyimpute` original, but was then merged into `scikit-learn` and we deleted it from `fancyimpute` in favor of the better-tested `sklearn` version. As a convenience, you can still `from fancyimpute import IterativeImputer`, but under the hood it's just doing `from sklearn.impute import IterativeImputer`. That means if you update `scikit-learn` in the future, you may also change the behavior of `IterativeImputer`.
## Usage
```python
from fancyimpute import KNN, NuclearNormMinimization, SoftImpute, BiScaler
# X is the complete data matrix
# X_incomplete has the same values as X except a subset have been replace with NaN
# Use 3 nearest rows which have a feature to fill in each row's missing features
X_filled_knn = KNN(k=3).fit_transform(X_incomplete)
# matrix completion using convex optimization to find low-rank solution
# that still matches observed values. Slow!
X_filled_nnm = NuclearNormMinimization().fit_transform(X_incomplete)
# Instead of solving the nuclear norm objective directly, instead
# induce sparsity using singular value thresholding
X_incomplete_normalized = BiScaler().fit_transform(X_incomplete)
X_filled_softimpute = SoftImpute().fit_transform(X_incomplete_normalized)
# print mean squared error for the imputation methods above
nnm_mse = ((X_filled_nnm[missing_mask] - X[missing_mask]) ** 2).mean()
print("Nuclear norm minimization MSE: %f" % nnm_mse)
softImpute_mse = ((X_filled_softimpute[missing_mask] - X[missing_mask]) ** 2).mean()
print("SoftImpute MSE: %f" % softImpute_mse)
knn_mse = ((X_filled_knn[missing_mask] - X[missing_mask]) ** 2).mean()
print("knnImpute MSE: %f" % knn_mse)
```
## Algorithms
* `SimpleFill`: Replaces missing entries with the mean or median of each column.
* `KNN`: Nearest neighbor imputations which weights samples using the mean squared difference
on features for which two rows both have observed data.
* `SoftImpute`: Matrix completion by iterative soft thresholding of SVD decompositions. Inspired by the [softImpute](https://web.stanford.edu/~hastie/swData/softImpute/vignette.html) package for R, which is based on [Spectral Regularization Algorithms for Learning Large Incomplete Matrices](http://web.stanford.edu/~hastie/Papers/mazumder10a.pdf) by Mazumder et. al.
* `IterativeImputer`: A strategy for imputing missing values by modeling each feature with missing values as a function of other features in a round-robin fashion. A stub that links to `scikit-learn`'s [IterativeImputer](https://scikit-learn.org/stable/modules/generated/sklearn.impute.IterativeImputer.html).
* `IterativeSVD`: Matrix completion by iterative low-rank SVD decomposition. Should be similar to SVDimpute from [Missing value estimation methods for DNA microarrays](http://www.ncbi.nlm.nih.gov/pubmed/11395428) by Troyanskaya et. al.
* `MatrixFactorization`: Direct factorization of the incomplete matrix into low-rank `U` and `V`, with per-row and per-column biases, as well as a global bias. Solved by SGD in pure numpy.
* `NuclearNormMinimization`: Simple implementation of [Exact Matrix Completion via Convex Optimization](http://statweb.stanford.edu/~candes/papers/MatrixCompletion.pdf
) by Emmanuel Candes and Benjamin Recht using [cvxpy](http://www.cvxpy.org). Too slow for large matrices.
* `BiScaler`: Iterative estimation of row/column means and standard deviations to get doubly normalized
matrix. Not guaranteed to converge but works well in practice. Taken from [Matrix Completion and Low-Rank SVD via Fast Alternating Least Squares](http://arxiv.org/abs/1410.2596).
## Citation
If you use `fancyimpute` in your academic publication, please cite it as follows:
```bibtex
@software{fancyimpute,
author = {Alex Rubinsteyn and Sergey Feldman},
title={fancyimpute: An Imputation Library for Python},
url = {https://github.com/iskandr/fancyimpute},
version = {0.7.0},
date = {2016},
}
```
| 5,019 | 57.372093 | 465 | md |
fancyimpute | fancyimpute-master/setup.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import re
from setuptools import setup
package_name = "fancyimpute"
readme_dir = os.path.dirname(__file__)
readme_filename = os.path.join(readme_dir, "README.md")
try:
with open(readme_filename, "r") as f:
readme_markdown = f.read()
except:
logging.warn("Failed to load %s" % readme_filename)
readme_markdown = ""
with open("%s/__init__.py" % package_name, "r") as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1)
if __name__ == "__main__":
setup(
name=package_name,
version=version,
description="Matrix completion and feature imputation algorithms",
author="Alex Rubinsteyn, Sergey Feldman",
author_email="alex.rubinsteyn@gmail.com",
url="https://github.com/iskandr/%s" % package_name,
license="http://www.apache.org/licenses/LICENSE-2.0.html",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Operating System :: OS Independent",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
install_requires=[
"knnimpute>=0.1.0",
"scikit-learn>=0.24.2",
# used by NuclearNormMinimization
"cvxpy",
"cvxopt",
"pytest",
"nose",
],
long_description=readme_markdown,
long_description_content_type="text/markdown",
packages=[package_name],
)
| 2,228 | 32.772727 | 99 | py |
fancyimpute | fancyimpute-master/experiments/complete_faces.py | from os import mkdir
from os.path import exists, join
from collections import defaultdict
import pylab
from sklearn.datasets import fetch_lfw_people
from sklearn.impute import IterativeImputer
import numpy as np
from fancyimpute import (
SimpleFill,
IterativeSVD,
SoftImpute,
BiScaler,
KNN
)
from fancyimpute.common import masked_mae, masked_mse
def remove_pixels(
full_images,
missing_square_size=32,
random_seed=0):
np.random.seed(random_seed)
incomplete_faces = []
n_faces = len(full_images)
height, width = full_images[0].shape[:2]
for i in range(n_faces):
image = full_images[i].copy()
start_x = np.random.randint(
low=0,
high=height - missing_square_size + 1)
start_y = np.random.randint(
low=0,
high=width - missing_square_size + 1)
image[
start_x: start_x + missing_square_size,
start_y: start_y + missing_square_size] = np.nan
incomplete_faces.append(image)
return np.array(incomplete_faces, dtype=np.float32)
def rescale_pixel_values(images, order="C"):
"""
Rescale the range of values in images to be between [0, 1]
"""
images = np.asarray(images, order=order).astype("float32")
images -= images.min()
images /= images.max()
return images
def color_balance(images):
images = images.astype("float32")
red = images[:, :, :, 0]
green = images[:, :, :, 1]
blue = images[:, :, :, 2]
combined = (red + green + blue)
total_color = combined.sum()
overall_fraction_red = red.sum() / total_color
overall_fraction_green = green.sum() / total_color
overall_fraction_blue = blue.sum() / total_color
for i in range(images.shape[0]):
image = images[i]
image_total = combined[i].sum()
red_scale = overall_fraction_red / (red[i].sum() / image_total)
green_scale = overall_fraction_green / (green[i].sum() / image_total)
blue_scale = overall_fraction_blue / (blue[i].sum() / image_total)
image[:, :, 0] *= red_scale
image[:, :, 1] *= green_scale
image[:, :, 2] *= blue_scale
image[image < 0] = 0
image[image > 255] = 255
return images
class ResultsTable(object):
def __init__(
self,
images_dict,
percent_missing=0.25,
saved_image_stride=25,
dirname="face_images",
scale_rows=False,
center_rows=False):
self.images_dict = images_dict
self.labels = list(sorted(images_dict.keys()))
self.images_array = np.array(
[images_dict[k] for k in self.labels]).astype("float32")
self.image_shape = self.images_array[0].shape
self.width, self.height = self.image_shape[:2]
self.color = (len(self.image_shape) == 3) and (self.image_shape[2] == 3)
if self.color:
self.images_array = color_balance(self.images_array)
self.n_pixels = self.width * self.height
self.n_features = self.n_pixels * (3 if self.color else 1)
self.n_images = len(self.images_array)
print("[ResultsTable] # images = %d, color=%s # features = %d, shape = %s" % (
self.n_images, self.color, self.n_features, self.image_shape))
self.flattened_array_shape = (self.n_images, self.n_features)
self.flattened_images = self.images_array.reshape(self.flattened_array_shape)
n_missing_pixels = int(self.n_pixels * percent_missing)
missing_square_size = int(np.sqrt(n_missing_pixels))
print("[ResultsTable] n_missing_pixels = %d, missing_square_size = %d" % (
n_missing_pixels, missing_square_size))
self.incomplete_images = remove_pixels(
self.images_array,
missing_square_size=missing_square_size)
print("[ResultsTable] Incomplete images shape = %s" % (
self.incomplete_images.shape,))
self.flattened_incomplete_images = self.incomplete_images.reshape(
self.flattened_array_shape)
self.missing_mask = np.isnan(self.flattened_incomplete_images)
self.normalizer = BiScaler(
scale_rows=scale_rows,
center_rows=center_rows,
min_value=self.images_array.min(),
max_value=self.images_array.max())
self.incomplete_normalized = self.normalizer.fit_transform(
self.flattened_incomplete_images)
self.saved_image_indices = list(
range(0, self.n_images, saved_image_stride))
self.saved_images = defaultdict(dict)
self.dirname = dirname
self.mse_dict = {}
self.mae_dict = {}
self.save_images(self.images_array, "original", flattened=False)
self.save_images(self.incomplete_images, "incomplete", flattened=False)
def ensure_dir(self, dirname):
if not exists(dirname):
print("Creating directory: %s" % dirname)
mkdir(dirname)
def save_images(self, images, base_filename, flattened=True):
self.ensure_dir(self.dirname)
for i in self.saved_image_indices:
label = self.labels[i].lower().replace(" ", "_")
image = images[i, :].copy()
if flattened:
image = image.reshape(self.image_shape)
image[np.isnan(image)] = 0
figure = pylab.gcf()
axes = pylab.gca()
extra_kwargs = {}
if self.color:
extra_kwargs["cmap"] = "gray"
assert image.min() >= 0, "Image can't contain negative numbers"
if image.max() <= 1:
image *= 256
image[image > 255] = 255
axes.imshow(image.astype("uint8"), **extra_kwargs)
axes.get_xaxis().set_visible(False)
axes.get_yaxis().set_visible(False)
filename = base_filename + ".png"
subdir = join(self.dirname, label)
self.ensure_dir(subdir)
path = join(subdir, filename)
figure.savefig(
path,
bbox_inches='tight')
self.saved_images[i][base_filename] = path
def add_entry(self, solver, name):
print("Running %s" % name)
completed_normalized = solver.fit_transform(self.incomplete_normalized)
completed = self.normalizer.inverse_transform(completed_normalized)
mae = masked_mae(
X_true=self.flattened_images,
X_pred=completed,
mask=self.missing_mask)
mse = masked_mse(
X_true=self.flattened_images,
X_pred=completed,
mask=self.missing_mask)
print("==> %s: MSE=%0.4f MAE=%0.4f" % (name, mse, mae))
self.mse_dict[name] = mse
self.mae_dict[name] = mae
self.save_images(completed, base_filename=name)
def sorted_errors(self):
"""
Generator for (rank, name, MSE, MAE) sorted by increasing MAE
"""
for i, (name, mae) in enumerate(
sorted(self.mae_dict.items(), key=lambda x: x[1])):
yield(i + 1, name, self.mse_dict[name], self.mae_dict[name],)
def print_sorted_errors(self):
for (rank, name, mse, mae) in self.sorted_errors():
print("%d) %s: MSE=%0.4f MAE=%0.4f" % (
rank,
name,
mse,
mae))
def save_html_table(self, filename="results_table.html"):
html = """
<table>
<th>
<td>Rank</td>
<td>Name</td>
<td>Mean Squared Error</td>
<td>Mean Absolute Error</td>
</th>
"""
for (rank, name, mse, mae) in self.sorted_errors():
html += """
<tr>
<td>%d</td>
<td>%s</td>
<td>%0.4f</td>
<td>%0.4f</td>
</tr>
""" % (rank, name, mse, mae)
html += "</table>"
self.ensure_dir(self.dirname)
path = join(self.dirname, filename)
with open(path, "w") as f:
f.write(html)
return html
def image_per_label(images, label_indices, label_names, max_size=2000):
groups = defaultdict(list)
for i, label_idx in enumerate(label_indices):
label = label_names[label_idx].lower().strip().replace(" ", "_")
groups[label].append(images[i])
# as a pretty arbitrary heuristic, let's try taking the min variance
# image for each person
singe_images = {}
for label, images in sorted(groups.items()):
singe_images[label] = min(images, key=lambda image: image.std())
if max_size and len(singe_images) >= max_size:
break
return singe_images
def get_lfw(max_size=None):
dataset = fetch_lfw_people(color=True)
# keep only one image per person
return image_per_label(
dataset.images,
dataset.target,
dataset.target_names,
max_size=max_size)
if __name__ == "__main__":
images_dict = get_lfw(max_size=2000)
table = ResultsTable(
images_dict=images_dict,
scale_rows=False,
center_rows=False)
for negative_log_regularization_weight in [2, 3, 4]:
regularization_weight = 10.0 ** -negative_log_regularization_weight
table.add_entry(
solver=IterativeImputer(
n_nearest_features=80,
max_iter=50
),
name="IterativeImputer_%d" % negative_log_regularization_weight)
for fill_method in ["mean", "median"]:
table.add_entry(
solver=SimpleFill(fill_method=fill_method),
name="SimpleFill_%s" % fill_method)
for k in [1, 3, 7]:
table.add_entry(
solver=KNN(
k=k,
orientation="rows"),
name="KNN_k%d" % (k,))
for shrinkage_value in [25, 50, 100]:
# SoftImpute without rank constraints
table.add_entry(
solver=SoftImpute(
shrinkage_value=shrinkage_value),
name="SoftImpute_lambda%d" % (shrinkage_value,))
for rank in [10, 20, 40]:
table.add_entry(
solver=IterativeSVD(
rank=rank,
init_fill_method="zero"),
name="IterativeSVD_rank%d" % (rank,))
table.save_html_table()
table.print_sorted_errors()
| 10,454 | 33.50495 | 86 | py |
fancyimpute | fancyimpute-master/experiments/readme_example.py | import numpy as np
from fancyimpute import (
BiScaler,
KNN,
NuclearNormMinimization,
SoftImpute,
SimpleFill
)
n = 200
m = 20
inner_rank = 4
X = np.dot(np.random.randn(n, inner_rank), np.random.randn(inner_rank, m))
print("Mean squared element: %0.4f" % (X ** 2).mean())
# X is a data matrix which we're going to randomly drop entries from
missing_mask = np.random.rand(*X.shape) < 0.1
X_incomplete = X.copy()
# missing entries indicated with NaN
X_incomplete[missing_mask] = np.nan
meanFill = SimpleFill("mean")
X_filled_mean = meanFill.fit_transform(X_incomplete)
# Use 3 nearest rows which have a feature to fill in each row's missing features
knnImpute = KNN(k=3)
X_filled_knn = knnImpute.fit_transform(X_incomplete)
# matrix completion using convex optimization to find low-rank solution
# that still matches observed values. Slow!
X_filled_nnm = NuclearNormMinimization().fit_transform(X_incomplete)
# Instead of solving the nuclear norm objective directly, instead
# induce sparsity using singular value thresholding
softImpute = SoftImpute()
# simultaneously normalizes the rows and columns of your observed data,
# sometimes useful for low-rank imputation methods
biscaler = BiScaler()
# rescale both rows and columns to have zero mean and unit variance
X_incomplete_normalized = biscaler.fit_transform(X_incomplete)
X_filled_softimpute_normalized = softImpute.fit_transform(X_incomplete_normalized)
X_filled_softimpute = biscaler.inverse_transform(X_filled_softimpute_normalized)
X_filled_softimpute_no_biscale = softImpute.fit_transform(X_incomplete)
meanfill_mse = ((X_filled_mean[missing_mask] - X[missing_mask]) ** 2).mean()
print("meanFill MSE: %f" % meanfill_mse)
# print mean squared error for the imputation methods above
nnm_mse = ((X_filled_nnm[missing_mask] - X[missing_mask]) ** 2).mean()
print("Nuclear norm minimization MSE: %f" % nnm_mse)
softImpute_mse = ((X_filled_softimpute[missing_mask] - X[missing_mask]) ** 2).mean()
print("SoftImpute MSE: %f" % softImpute_mse)
softImpute_no_biscale_mse = (
(X_filled_softimpute_no_biscale[missing_mask] - X[missing_mask]) ** 2).mean()
print("SoftImpute without BiScale MSE: %f" % softImpute_no_biscale_mse)
knn_mse = ((X_filled_knn[missing_mask] - X[missing_mask]) ** 2).mean()
print("knnImpute MSE: %f" % knn_mse)
| 2,321 | 34.181818 | 84 | py |
fancyimpute | fancyimpute-master/fancyimpute/__init__.py | from __future__ import absolute_import, print_function, division
from .solver import Solver
from .nuclear_norm_minimization import NuclearNormMinimization
from .matrix_factorization import MatrixFactorization
from .iterative_svd import IterativeSVD
from .simple_fill import SimpleFill
from .soft_impute import SoftImpute
from .scaler import BiScaler
from .knn import KNN
from .similarity_weighted_averaging import SimilarityWeightedAveraging
# while iterative imputer is experimental in sklearn, we need this
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
__version__ = "0.7.0"
__all__ = [
"Solver",
"NuclearNormMinimization",
"MatrixFactorization",
"IterativeSVD",
"SimpleFill",
"SoftImpute",
"BiScaler",
"KNN",
"SimilarityWeightedAveraging",
"IterativeImputer",
]
| 865 | 26.935484 | 70 | py |
fancyimpute | fancyimpute-master/fancyimpute/common.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import importlib
import numpy as np
def import_from(module, name):
'''
usage example:
grid = import_from('sklearn.model_selection', 'GridSearchCV')
is equivalent to:
from sklearn.model_selection import GridSearchV as grid
'''
module = importlib.import_module(module)
return getattr(module, name)
def masked_mae(X_true, X_pred, mask):
masked_diff = X_true[mask] - X_pred[mask]
return np.mean(np.abs(masked_diff))
def masked_mse(X_true, X_pred, mask):
masked_diff = X_true[mask] - X_pred[mask]
return np.mean(masked_diff ** 2)
def generate_random_column_samples(column):
col_mask = np.isnan(column)
n_missing = np.sum(col_mask)
if n_missing == len(column):
logging.warn("No observed values in column")
return np.zeros_like(column)
mean = np.nanmean(column)
std = np.nanstd(column)
if np.isclose(std, 0):
return np.array([mean] * n_missing)
else:
return np.random.randn(n_missing) * std + mean
def choose_solution_using_percentiles(
X_original,
solutions,
parameters=None,
verbose=False,
percentiles=list(range(10, 100, 10))):
"""
It's tricky to pick a single matrix out of all the candidate
solutions with differing shrinkage thresholds.
Our heuristic is to pick the matrix whose percentiles match best
between the missing and observed data.
"""
missing_mask = np.isnan(X_original)
min_mse = np.inf
best_solution = None
for i, candidate in enumerate(solutions):
for col_idx in range(X_original.shape[1]):
col_data = candidate[:, col_idx]
col_missing = missing_mask[:, col_idx]
col_observed = ~col_missing
if col_missing.sum() < 2:
continue
elif col_observed.sum() < 2:
continue
missing_data = col_data[col_missing]
observed_data = col_data[col_observed]
missing_percentiles = np.array([
np.percentile(missing_data, p)
for p in percentiles])
observed_percentiles = np.array([
np.percentile(observed_data, p)
for p in percentiles])
mse = np.mean((missing_percentiles - observed_percentiles) ** 2)
if mse < min_mse:
min_mse = mse
best_solution = candidate
if verbose:
print("Candidate #%d/%d%s: %f" % (
i + 1,
len(solutions),
(" (parameter=%s) " % parameters[i]
if parameters is not None
else ""),
mse))
return best_solution
| 3,268 | 30.432692 | 76 | py |
fancyimpute | fancyimpute-master/fancyimpute/dictionary_helpers.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helper functions for incomplete matrices represented using dictionaries.
"""
from collections import defaultdict
import numpy as np
from scipy.sparse import dok_matrix
def dense_nan_matrix(shape, dtype):
return np.ones(shape, dtype=dtype) * np.nan
def collect_nested_keys(nested_dict):
outer_key_list = list(sorted(nested_dict.keys()))
inner_key_set = set([])
for k in outer_key_list:
inner_dict = nested_dict[k]
inner_key_set = inner_key_set.union(inner_dict.keys())
inner_key_list = list(sorted(inner_key_set))
return outer_key_list, inner_key_list
def nested_key_indices(nested_dict):
"""
Give an ordering to the outer and inner keys used in a dictionary that
maps to dictionaries.
"""
outer_keys, inner_keys = collect_nested_keys(nested_dict)
outer_key_indices = {k: i for (i, k) in enumerate(outer_keys)}
inner_key_indices = {k: i for (i, k) in enumerate(inner_keys)}
return outer_key_indices, inner_key_indices
def flattened_nested_key_indices(nested_dict):
"""
Combine the outer and inner keys of nested dictionaries into a single
ordering.
"""
outer_keys, inner_keys = collect_nested_keys(nested_dict)
combined_keys = list(sorted(set(outer_keys + inner_keys)))
return {k: i for (i, k) in enumerate(combined_keys)}
def index_dict_to_sorted_list(key_indices):
sorted_list = [None] * len(key_indices)
for (key, index) in key_indices.items():
sorted_list[index] = key
return sorted_list
def array_from_nested_dictionary(
nested_dict,
array_fn,
dtype="float32",
square_result=False):
"""
Parameters
----------
nested_dict : dict
Dictionary which contains dictionaries
array_fn : function
Takes shape and dtype as arguments, returns empty array.
dtype : dtype
NumPy dtype of result array
square_result : bool
Combine keys from outer and inner dictionaries.
Returns array and sorted lists of the outer and inner keys.
"""
if square_result:
outer_key_indices = inner_key_indices = flattened_nested_key_indices(
nested_dict)
else:
outer_key_indices, inner_key_indices = nested_key_indices(
nested_dict)
n_rows = len(outer_key_indices)
n_cols = len(inner_key_indices)
shape = (n_rows, n_cols)
result = array_fn(shape, dtype)
for outer_key, sub_dictionary in nested_dict.items():
i = outer_key_indices[outer_key]
for inner_key, value in sub_dictionary.items():
j = inner_key_indices[inner_key]
result[i, j] = value
outer_key_list = index_dict_to_sorted_list(outer_key_indices)
inner_key_list = index_dict_to_sorted_list(inner_key_indices)
return result, outer_key_list, inner_key_list
def sparse_dok_matrix_from_nested_dictionary(
nested_dict,
dtype="float32",
square_result=False):
return array_from_nested_dictionary(
nested_dict,
array_fn=dok_matrix,
dtype=dtype,
square_result=square_result)
def dense_matrix_from_nested_dictionary(
nested_dict,
dtype="float32",
square_result=False):
return array_from_nested_dictionary(
nested_dict,
array_fn=dense_nan_matrix,
dtype=dtype,
square_result=square_result)
def matrix_to_pair_dictionary(
X, row_keys=None, column_keys=None, filter_fn=None):
"""
X : numpy.ndarray
row_keys : dict
Dictionary mapping indices to row names. If omitted then maps each
number to its string representation, such as 1 -> "1".
column_keys : dict
If omitted and matrix is square, then use the same dictionary
as the rows. Otherwise map each column index to its string form.
filter_fn : function
If given then only add elements for which this function returns True.
"""
n_rows, n_cols = X.shape
if row_keys is None:
row_keys = {i: i for i in range(n_rows)}
if column_keys is None:
if n_rows == n_cols:
column_keys = row_keys
else:
column_keys = {j: j for j in range(n_cols)}
if len(row_keys) != n_rows:
raise ValueError("Need %d row keys but got list of length %d" % (
n_rows,
len(row_keys)))
if len(column_keys) != n_cols:
raise ValueError("Need %d column keys but got list of length %d" % (
n_cols,
len(column_keys)))
result_dict = {}
for i, X_i in enumerate(X):
row_key = row_keys[i]
for j, X_ij in enumerate(X_i):
if filter_fn and not filter_fn(X_ij):
continue
column_key = column_keys[j]
key_pair = (row_key, column_key)
result_dict[key_pair] = X_ij
return result_dict
def curry_pair_dictionary(key_pair_dict, default_value=0.0):
"""
Transform dictionary from pairs of keys to dict -> dict -> float
"""
result = defaultdict(dict)
for (a, b), value in key_pair_dict.items():
result[a][b] = value
return result
def uncurry_nested_dictionary(curried_dict):
"""
Transform dictionary from (key_a -> key_b -> float) to
(key_a, key_b) -> float
"""
result = {}
for a, a_dict in curried_dict.items():
for b, value in a_dict.items():
result[(a, b)] = value
return result
def matrix_to_nested_dictionary(
X,
row_keys=None,
column_keys=None,
filter_fn=None):
pair_dict = matrix_to_pair_dictionary(
X,
row_keys=row_keys,
column_keys=column_keys,
filter_fn=filter_fn)
return curry_pair_dictionary(pair_dict)
def pair_dict_key_sets(pair_dict):
row_keys = set([])
column_keys = set([])
for (row_key, column_key) in pair_dict.keys():
row_keys.add(row_key)
column_keys.add(column_key)
return row_keys, column_keys
def array_from_pair_dictionary(
pair_dict,
array_fn,
dtype="float32",
square_result=False):
"""
Convert a dictionary whose keys are pairs (k1, k2) into a sparse
or incomplete array.
Parameters
----------
pair_dict : dict
Dictionary from pairs of keys to values.
array_fn : function
Takes shape and dtype as arguments, returns empty array.
dtype : dtype
NumPy dtype of result array
square_result : bool
Combine keys from rows and columns
Returns array and sorted lists of the row and column keys.
"""
row_key_set, column_key_set = pair_dict_key_sets(pair_dict)
if square_result:
combined_key_set = row_key_set.union(column_key_set)
row_key_list = column_key_list = list(sorted(combined_key_set))
row_key_indices = column_key_indices = {
k: i for (i, k) in enumerate(row_key_list)
}
else:
row_key_list = list(sorted(row_key_set))
column_key_list = list(sorted(column_key_set))
row_key_indices = {k: i for (i, k) in enumerate(row_key_list)}
column_key_indices = {k: i for (i, k) in enumerate(column_key_list)}
n_rows = len(row_key_indices)
n_cols = len(column_key_indices)
shape = (n_rows, n_cols)
result = array_fn(shape, dtype)
for (row_key, column_key), value in pair_dict.items():
i = row_key_indices[row_key]
j = column_key_indices[column_key]
result[i, j] = value
return result, row_key_list, column_key_list
def sparse_dok_matrix_from_pair_dictionary(
pair_dict,
dtype="float32",
square_result=False):
return array_from_pair_dictionary(
pair_dict,
array_fn=dok_matrix,
dtype=dtype,
square_result=square_result)
def dense_matrix_from_pair_dictionary(
pair_dict,
dtype="float32",
square_result=False):
return array_from_pair_dictionary(
pair_dict,
array_fn=dense_nan_matrix,
dtype=dtype,
square_result=square_result)
def transpose_nested_dictionary(nested_dict):
"""
Given a nested dictionary from k1 -> k2 > value
transpose its outer and inner keys so it maps
k2 -> k1 -> value.
"""
result = defaultdict(dict)
for k1, d in nested_dict.items():
for k2, v in d.items():
result[k2][k1] = v
return result
def reverse_lookup_from_nested_dict(values_dict):
"""
Create reverse-lookup dictionary mapping each row key to a list of triplets:
[(column key, value), ...]
Parameters
----------
nested_values_dict : dict
column_key -> row_key -> value
weights_dict : dict
column_key -> row_key -> sample weight
Returns dictionary mapping row_key -> [(column key, value)]
"""
reverse_lookup = defaultdict(list)
for column_key, column_dict in values_dict.items():
for row_key, value in column_dict.items():
entry = (column_key, value)
reverse_lookup[row_key].append(entry)
return reverse_lookup
| 9,665 | 28.379939 | 80 | py |
fancyimpute | fancyimpute-master/fancyimpute/iterative_svd.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_array
import numpy as np
from .solver import Solver
from .common import masked_mae
F32PREC = np.finfo(np.float32).eps
class IterativeSVD(Solver):
def __init__(
self,
rank=10,
convergence_threshold=0.00001,
max_iters=200,
gradual_rank_increase=True,
svd_algorithm="arpack",
init_fill_method="zero",
min_value=None,
max_value=None,
verbose=True):
Solver.__init__(
self,
fill_method=init_fill_method,
min_value=min_value,
max_value=max_value)
self.rank = rank
self.max_iters = max_iters
self.svd_algorithm = svd_algorithm
self.convergence_threshold = convergence_threshold
self.gradual_rank_increase = gradual_rank_increase
self.verbose = verbose
def _converged(self, X_old, X_new, missing_mask):
# check for convergence
old_missing_values = X_old[missing_mask]
new_missing_values = X_new[missing_mask]
difference = old_missing_values - new_missing_values
ssd = np.sum(difference ** 2)
old_norm_squared = (old_missing_values ** 2).sum()
# edge cases
if old_norm_squared == 0 or \
(old_norm_squared < F32PREC and ssd > F32PREC):
return False
else:
return (ssd / old_norm_squared) < self.convergence_threshold
def solve(self, X, missing_mask):
X = check_array(X, force_all_finite=False)
observed_mask = ~missing_mask
X_filled = X
for i in range(self.max_iters):
# deviation from original svdImpute algorithm:
# gradually increase the rank of our approximation
if self.gradual_rank_increase:
curr_rank = min(2 ** i, self.rank)
else:
curr_rank = self.rank
tsvd = TruncatedSVD(curr_rank, algorithm=self.svd_algorithm)
X_reduced = tsvd.fit_transform(X_filled)
X_reconstructed = tsvd.inverse_transform(X_reduced)
X_reconstructed = self.clip(X_reconstructed)
mae = masked_mae(
X_true=X,
X_pred=X_reconstructed,
mask=observed_mask)
if self.verbose:
print(
"[IterativeSVD] Iter %d: observed MAE=%0.6f" % (
i + 1, mae))
converged = self._converged(
X_old=X_filled,
X_new=X_reconstructed,
missing_mask=missing_mask)
X_filled[missing_mask] = X_reconstructed[missing_mask]
if converged:
break
return X_filled
| 3,371 | 35.258065 | 74 | py |
fancyimpute | fancyimpute-master/fancyimpute/knn.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from knnimpute import knn_impute_few_observed, knn_impute_with_argpartition
from sklearn.utils import check_array
from .solver import Solver
class KNN(Solver):
"""
k-Nearest Neighbors imputation for arrays with missing data.
Works only on dense arrays with at most a few thousand rows.
Assumes that each feature has been centered and rescaled to have
mean 0 and variance 1.
Inspired by the implementation of kNNImpute from the R package
imputation.
See here:
https://www.rdocumentation.org/packages/imputation/versions/2.0.3/topics/kNNImpute
"""
def __init__(
self,
k=5,
orientation="rows",
use_argpartition=False,
print_interval=100,
min_value=None,
max_value=None,
normalizer=None,
verbose=True):
"""
Parameters
----------
k : int
Number of neighboring rows to use for imputation.
orientation : str
Which axis of the input matrix should be treated as a sample
(default is "rows" but can also be "columns")
use_argpartition : bool
Use a more naive implementation of kNN imputation whichs calls
numpy.argpartition for each row/column pair. May give NaN if fewer
than k neighbors are available for a missing value.
print_interval : int
min_value : float
Minimum possible imputed value
max_value : float
Maximum possible imputed value
normalizer : object
Any object (such as BiScaler) with fit() and transform() methods
verbose : bool
"""
Solver.__init__(
self,
min_value=min_value,
max_value=max_value,
normalizer=normalizer)
self.k = k
self.verbose = verbose
self.orientation = orientation
self.print_interval = print_interval
if use_argpartition:
self._impute_fn = knn_impute_with_argpartition
else:
self._impute_fn = knn_impute_few_observed
def solve(self, X, missing_mask):
X = check_array(X, force_all_finite=False)
if self.orientation == "columns":
X = X.T
missing_mask = missing_mask.T
elif self.orientation != "rows":
raise ValueError(
"Orientation must be either 'rows' or 'columns', got: %s" % (
self.orientation,))
X_imputed = self._impute_fn(
X=X,
missing_mask=missing_mask,
k=self.k,
verbose=self.verbose,
print_interval=self.print_interval)
failed_to_impute = np.isnan(X_imputed)
n_missing_after_imputation = failed_to_impute.sum()
if n_missing_after_imputation != 0:
if self.verbose:
print("[KNN] Warning: %d/%d still missing after imputation, replacing with 0" % (
n_missing_after_imputation,
X.shape[0] * X.shape[1]))
X_imputed[failed_to_impute] = X[failed_to_impute]
if self.orientation == "columns":
X_imputed = X_imputed.T
return X_imputed
| 3,824 | 31.415254 | 97 | py |
fancyimpute | fancyimpute-master/fancyimpute/matrix_factorization.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from sklearn.utils import check_array
from .solver import Solver
from .common import masked_mae
class MatrixFactorization(Solver):
def __init__(
self,
rank=40,
learning_rate=0.01,
max_iters=50,
shrinkage_value=0,
min_value=None,
max_value=None,
verbose=True,
):
"""
Train a matrix factorization model to predict empty
entries in a matrix. Mostly copied (with permission) from:
https://blog.insightdatascience.com/explicit-matrix-factorization-als-sgd-and-all-that-jazz-b00e4d9b21ea
Params
=====+
rank : (int)
Number of latent factors to use in matrix
factorization model
learning_rate : (float)
Learning rate for optimizer
max_iters : (int)
Number of max_iters to train for
shrinkage_value : (float)
Regularization term for sgd penalty
min_value : float
Smallest possible imputed value
max_value : float
Largest possible imputed value
verbose : (bool)
Whether or not to printout training progress
"""
Solver.__init__(self, min_value=min_value, max_value=max_value)
self.rank = rank
self.learning_rate = learning_rate
self.max_iters = max_iters
self.shrinkage_value = shrinkage_value
self._v = verbose
def solve(self, X, missing_mask):
""" Train model for max_iters iterations from scratch."""
X = check_array(X, force_all_finite=False)
# shape data to fit into keras model
(n_samples, n_features) = X.shape
observed_mask = ~missing_mask
training_indices = list(zip(*np.where(observed_mask)))
self.user_vecs = np.random.normal(scale=1.0 / self.rank, size=(n_samples, self.rank))
self.item_vecs = np.random.normal(scale=1.0 / self.rank, size=(n_features, self.rank))
self.user_bias = np.zeros(n_samples)
self.item_bias = np.zeros(n_features)
self.global_bias = np.mean(X[observed_mask])
for i in range(self.max_iters):
# to do: early stopping
if (i + 1) % 10 == 0 and self._v:
X_reconstruction = self.clip(self.predict_all())
mae = masked_mae(X_true=X, X_pred=X_reconstruction, mask=observed_mask)
print("[MatrixFactorization] Iter %d: observed MAE=%0.6f rank=%d" % (i + 1, mae, self.rank))
np.random.shuffle(training_indices)
self.sgd(X, training_indices)
i += 1
X_filled = X.copy()
X_filled[missing_mask] = self.clip(self.predict_all()[missing_mask])
return X_filled
def sgd(self, X, training_indices):
# to do: batch learning
for (u, i) in training_indices:
prediction = self.predict(u, i)
e = X[u, i] - prediction # error
# Update biases
self.user_bias[u] += self.learning_rate * (e - self.shrinkage_value * self.user_bias[u])
self.item_bias[i] += self.learning_rate * (e - self.shrinkage_value * self.item_bias[i])
# Update latent factors
self.user_vecs[u, :] += self.learning_rate * (
e * self.item_vecs[i, :] - self.shrinkage_value * self.user_vecs[u, :]
)
self.item_vecs[i, :] += self.learning_rate * (
e * self.user_vecs[u, :] - self.shrinkage_value * self.item_vecs[i, :]
)
def predict(self, u, i):
""" Single user and item prediction."""
prediction = self.global_bias + self.user_bias[u] + self.item_bias[i]
prediction += self.user_vecs[u, :].dot(self.item_vecs[i, :].T)
return prediction
def predict_all(self):
""" Predict ratings for every user and item."""
predictions = self.user_vecs.dot(self.item_vecs.T)
predictions += self.global_bias + self.user_bias[:, np.newaxis] + self.item_bias[np.newaxis, :]
return predictions
| 4,625 | 35.714286 | 112 | py |
fancyimpute | fancyimpute-master/fancyimpute/nuclear_norm_minimization.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cvxpy
from .solver import Solver
from sklearn.utils import check_array
class NuclearNormMinimization(Solver):
"""
Simple implementation of "Exact Matrix Completion via Convex Optimization"
by Emmanuel Candes and Benjamin Recht using cvxpy.
"""
def __init__(
self,
require_symmetric_solution=False,
min_value=None,
max_value=None,
error_tolerance=0.0001,
max_iters=50000,
verbose=True):
"""
Parameters
----------
require_symmetric_solution : bool
Add symmetry constraint to convex problem
min_value : float
Smallest possible imputed value
max_value : float
Largest possible imputed value
error_tolerance : bool
Degree of error allowed on reconstructed values. If omitted then
defaults to 0.0001
max_iters : int
Maximum number of iterations for the convex solver
verbose : bool
Print debug info
"""
Solver.__init__(
self,
min_value=min_value,
max_value=max_value)
self.require_symmetric_solution = require_symmetric_solution
self.error_tolerance = error_tolerance
self.max_iters = max_iters
self.verbose = verbose
def _constraints(self, X, missing_mask, S, error_tolerance):
"""
Parameters
----------
X : np.array
Data matrix with missing values filled in
missing_mask : np.array
Boolean array indicating where missing values were
S : cvxpy.Variable
Representation of solution variable
"""
ok_mask = ~missing_mask
masked_X = cvxpy.multiply(ok_mask, X)
masked_S = cvxpy.multiply(ok_mask, S)
abs_diff = cvxpy.abs(masked_S - masked_X)
close_to_data = abs_diff <= error_tolerance
constraints = [close_to_data]
if self.require_symmetric_solution:
constraints.append(S == S.T)
if self.min_value is not None:
constraints.append(S >= self.min_value)
if self.max_value is not None:
constraints.append(S <= self.max_value)
return constraints
def _create_objective(self, m, n):
"""
Parameters
----------
m, n : int
Dimensions that of solution matrix
Returns the objective function and a variable representing the
solution to the convex optimization problem.
"""
# S is the completed matrix
shape = (m, n)
S = cvxpy.Variable(shape, name="S")
norm = cvxpy.norm(S, "nuc")
objective = cvxpy.Minimize(norm)
return S, objective
def solve(self, X, missing_mask):
X = check_array(X, force_all_finite=False)
m, n = X.shape
S, objective = self._create_objective(m, n)
constraints = self._constraints(
X=X,
missing_mask=missing_mask,
S=S,
error_tolerance=self.error_tolerance)
problem = cvxpy.Problem(objective, constraints)
problem.solve(
verbose=self.verbose,
solver=cvxpy.CVXOPT,
max_iters=self.max_iters,
# use_indirect, see: https://github.com/cvxgrp/cvxpy/issues/547
use_indirect=False)
return S.value
| 3,999 | 30.007752 | 78 | py |