text
stringlengths
1
93.6k
trainer.eval_classifier(test_loader, "test", 0)
def main(args):
"""main function to call from workflow"""
# set up cfg and args
cfg = setup(args)
# Perform training.
train(cfg, args)
if __name__ == '__main__':
args = default_argument_parser().parse_args()
main(args)
# <FILESEP>
import os
import argparse
import pandas as pd
import numpy as np
import config
from clip.clip_rewarded_ppo import CLIPRewardedPPO
parser = argparse.ArgumentParser(description="Eval a CARLA agent")
parser.add_argument("--host", default="localhost", type=str, help="IP of the host server (default: 127.0.0.1)")
parser.add_argument("--port", default=2020, type=int, help="TCP port to listen to (default: 2000)")
parser.add_argument("--model", type=str, default="./model_400000_steps.zip", help="Path to a model evaluate")
parser.add_argument("--no_render", action="store_false", help="If True, render the environment")
parser.add_argument("--fps", type=int, default=15, help="FPS to render the environment")
parser.add_argument("--no_record_video", action="store_false", help="If True, record video of the evaluation")
parser.add_argument("--config", type=str, default="vlm_rl", help="Config to use (default: vlm_rl)")
parser.add_argument("--seed", type=int, default=101, help="random seed")
parser.add_argument("--device", type=str, default="cuda:0", help="cpu, cuda:0, cuda:1, cuda:2")
parser.add_argument("--density", choices=['empty', 'regular', 'dense'], default="regular",
help="different traffic densities")
parser.add_argument("--town", choices=['Town01', 'Town02', 'Town03', 'Town04', 'Town05'], default="Town02",
help="different traffic densities")
args = vars(parser.parse_args())
CONFIG = config.set_config(args["config"])
CONFIG.seed = args["seed"]
CONFIG.algorithm_params.device = args["device"]
from stable_baselines3 import PPO, DDPG, SAC
from clip.clip_rewarded_sac import CLIPRewardedSAC
from utils import VideoRecorder, parse_wrapper_class
from carla_env.state_commons import create_encode_state_fn
from carla_env.rewards import reward_functions
from carla_env.wrappers import vector, get_displacement_vector
from carla_env.envs.carla_route_env import CarlaRouteEnv
from eval_plots import plot_eval, summary_eval
def convert_state(state):
c_state = dict()
c_state['seg_camera'] = np.transpose(state['seg_camera'], (2, 0, 1))
c_state['seg_camera'] = np.array([c_state['seg_camera']])
c_state['waypoints'] = np.array([state['waypoints']])
c_state['vehicle_measures'] = np.array([state['vehicle_measures']])
return c_state
def run_eval(env, model, model_path=None, record_video=False, eval_suffix=''):
model_name = os.path.basename(model_path)
log_path = os.path.join(os.path.dirname(model_path), 'eval{}'.format(eval_suffix))
os.makedirs(log_path, exist_ok=True)
video_path = os.path.join(log_path, model_name.replace(".zip", "_eval.avi"))
csv_path = os.path.join(log_path, model_name.replace(".zip", "_eval.csv"))
model_id = f"{model_path.split('/')[-2]}-{model_name.split('_')[-2]}"
state = env.reset()
columns = ["model_id", "episode", "step", "throttle", "steer", "vehicle_location_x", "vehicle_location_y",
"reward", "distance", "speed", "center_dev", "angle_next_waypoint", "waypoint_x", "waypoint_y",
"route_x", "route_y", "routes_completed", "collision_speed", "collision_interval", "CPS", "CPM"
]
df = pd.DataFrame(columns=columns)
# Init video recording
if record_video:
rendered_frame = env.render(mode="rgb_array")
print("Recording video to {} ({}x{}x{}@{}fps)".format(video_path, *rendered_frame.shape,
int(env.fps)))
video_recorder = VideoRecorder(video_path,
frame_size=rendered_frame.shape,
fps=env.fps)
video_recorder.add_frame(rendered_frame)
else:
video_recorder = None
episode_idx = 0
# While non-terminal state
print("Episode ", episode_idx)
saved_route = False
while episode_idx < 10:
env.extra_info.append("Evaluation")
action, _states = model.predict(state, deterministic=True)