text
stringlengths
1
93.6k
rollouts.insert(next_state, action, value.unsqueeze(1), torch.Tensor(reward), masks, level_seeds)
state = next_state
if args.autodrq and (t + 1) % 256 == 0:
with torch.no_grad():
obs_id = rollouts.obs[-1]
next_value = agent.get_value(obs_id).unsqueeze(1).detach()
rollouts.compute_returns(next_value, args.gamma, args.gae_lambda)
replay_buffer.update_ucb_values(rollouts)
rollouts.after_update()
# Train agent after collecting sufficient data
if t % args.train_freq == 0 and t >= args.start_timesteps:
for _ in range(args.opt_step_per_interaction):
loss, grad_magnitude, weight = agent.train(replay_buffer)
if t % 500 == 0:
wandb.log(
{"Value Loss": loss,
"Gradient magnitude": grad_magnitude,
},
step=t * args.num_processes,
)
if args.qrdqn and args.qrdqn_bootstrap and not args.PER:
wandb.log(
{"weights": torch.mean(weight).item()},
step=t * args.num_processes,
)
if t % 500 == 0:
effective_rank = agent.Q.effective_rank()
wandb.log({"Effective Rank of DQN": effective_rank}, step=t * args.num_processes)
if (t + 1) % int((num_steps - 1) / 10) == 0:
if args.track_seed_weights and not args.PER:
count_data = [
[seed, count]
for (seed, count) in zip(agent.seed_weights.keys(), agent.seed_weights.values())
]
total_weight = sum(agent.seed_weights.values())
count_data = [[i[0], i[1] / total_weight] for i in count_data]
table = wandb.Table(data=count_data, columns=["Seed", "Weight"])
wandb.log(
{
f"Seed Sampling Distribution at time {t}": wandb.plot.bar(
table, "Seed", "Weight", title="Sampling distribution of levels"
)
}
)
correlation1 = np.corrcoef(gaps, list(agent.seed_weights.values()))[0][1]
correlation2 = np.corrcoef(returns, list(agent.seed_weights.values()))[0][1]
wandb.log(
{
"Correlation between value error and number of samples": correlation1,
"Correlation between empirical return and number of samples": correlation2,
}
)
else:
seed2weight = replay_buffer.weights_per_seed()
weight_data = [
[seed, weight] for (seed, weight) in zip(seed2weight.keys(), seed2weight.values())
]
correlation1 = np.corrcoef(gaps, list(seed2weight.values()))[0][1]
correlation2 = np.corrcoef(returns, list(seed2weight.values()))[0][1]
if t >= args.start_timesteps and t % args.eval_freq == 0:
if args.record_td_error:
test_reward, train_td_loss = eval_policy(args, agent, args.num_test_seeds, print_score=False)
train_reward, test_td_loss = eval_policy(args,
agent,
args.num_test_seeds,
start_level=0,
num_levels=args.num_train_seeds,
seeds=seeds,
print_score=False)
mean_test_rewards = np.mean(test_reward)
mean_train_rewards = np.mean(train_reward)
wandb.log(
{
"Train / td loss": train_td_loss,
"Test / td loss": test_td_loss,
"td loss difference": test_td_loss - train_td_loss
},
step=t * args.num_processes
)
else:
mean_test_rewards = np.mean(eval_policy(args, agent, args.num_test_seeds, print_score=False))
mean_train_rewards = np.mean(
eval_policy(
args,
agent,
args.num_test_seeds,
start_level=0,
num_levels=args.num_train_seeds,
seeds=seeds,
print_score=False
)
)