text
stringlengths
1
93.6k
if args.advanced_test:
advanced_stats = {}
all_exp_c = np.arange(args.exploration_coeff_n) * args.exploration_coeff_mult
for exp_c in all_exp_c:
test_reward, train_td_loss = eval_policy(
args, agent, args.num_test_seeds, print_score=False,
advanced_test=True, exploration_coeff=exp_c)
advanced_stats['Advanced explore / {}'.format(exp_c)] = np.mean(test_reward)
wandb.log(advanced_stats, step=t * args.num_processes)
test_ppo_normalised_reward = ppo_normalise_reward(mean_test_rewards, args.env_name)
train_ppo_normalised_reward = ppo_normalise_reward(mean_train_rewards, args.env_name)
test_min_max_normalised_reward = min_max_normalise_reward(mean_test_rewards, args.env_name)
train_min_max_normalised_reward = min_max_normalise_reward(mean_train_rewards, args.env_name)
if t % 1 == 0:
all_reward = np.array(reward_stats_deque).reshape(-1)
all_reward = [[r] for r in all_reward]
# table = wandb.Table(data=all_reward, columns=["reward"])
reward_mean = np.mean(all_reward)
reward_std = np.std(all_reward)
reward_skew = skew(all_reward)
reward_kurtosis = kurtosis(all_reward)
wandb.log(
{
"Test / Evaluation Returns": mean_test_rewards,
"Train / Evaluation Returns": mean_train_rewards,
"Generalization Gap:": mean_train_rewards - mean_test_rewards,
"Test / Evaluation Returns (normalised)": test_ppo_normalised_reward,
"Train / Evaluation Returns (normalised)": train_ppo_normalised_reward,
"Test / Evaluation Returns (ppo normalised)": test_ppo_normalised_reward,
"Train / Evaluation Returns (ppo normalised)": train_ppo_normalised_reward,
"Test / Evaluation Returns (min-max normalised)": test_min_max_normalised_reward,
"Train / Evaluation Returns (min-max normalised)": train_min_max_normalised_reward,
"Time per step": (time.time() - start_time) / t,
"Reward / mean": reward_mean,
"Reward / std": reward_std,
"Reward / skew": reward_skew,
"Reward / kurtosis": reward_kurtosis,
},
step=t * args.num_processes
)
logger.logkv("minutes elapse", (time.time() - start_time) / 60)
logger.logkv("time / step", (time.time() - start_time) / t)
logger.logkv("train / total_num_steps", t * args.num_processes)
logger.logkv("train / mean_episode_reward", mean_train_rewards)
logger.logkv("test / mean_episode_reward", mean_test_rewards)
# logger.logkv("train/median_episode_reward", np.median(episode_rewards))
# logger.logkv("train/loss", np.mean(episode_loss))
# logger.logkv("test / average_reward", mean_test_rewards)
# logger.logkv("test/median_reward", np.median(rewards))
# logger.logkv("test/average_q_values", avg_Q)
# logger.logkv("time/epsilon", scheduler.get_value(T))
logger.dumpkvs()
# if t % 1000 == 0 and t > 1:
# hist = np.array(reward_stats_deque).reshape(-1)
# hist = np.histogram(hist, density=True)
# hist = wandb.Histogram(np_histogram=hist, num_bins=100)
# wandb.log({'Reward / reward_histogram': hist}, step=t * args.num_processes)
print(f"\nLast update: Evaluating on {args.final_num_test_seeds} test levels...\n ")
final_eval_episode_rewards = eval_policy(
args, agent, args.final_num_test_seeds, record=args.record_final_eval
)
mean_final_eval_episode_rewards = np.mean(final_eval_episode_rewards)
median_final_eval_episide_rewards = np.median(final_eval_episode_rewards)
print("Mean Final Evaluation Rewards: ", mean_final_eval_episode_rewards)
print("Median Final Evaluation Rewards: ", median_final_eval_episide_rewards)
wandb.log(
{
"Mean Final Evaluation Rewards": mean_final_eval_episode_rewards,
"Median Final Evaluation Rewards": median_final_eval_episide_rewards,
"Mean Final Evaluation Rewards (normalised)": ppo_normalise_reward(
mean_final_eval_episode_rewards, args.env_name
),
"Median Final Evaluation Rewards (normalised)": ppo_normalise_reward(
median_final_eval_episide_rewards, args.env_name
),
}
)
if args.save_model:
print(f"Saving model to {args.model_path}")
if "models" not in os.listdir():
os.mkdir("models")
torch.save(
{
"model_state_dict": agent.Q.state_dict(),
"args": vars(args),
},
args.model_path,
)
wandb.save(args.model_path)
def generate_seeds(num_seeds, base_seed=0):