text
stringlengths 1
93.6k
|
|---|
"ucb / mean": torch.max(mean, 1)[0].mean().item(),
|
"ucb / upper var": torch.max(upper_var, 1)[0].mean().item(),
|
"ucb / var": torch.max(var, 1)[0].mean().item(),
|
"ucb / value": torch.max(value, 1)[0].mean().item()
|
}
|
# print(stats)
|
wandb.log(stats, step=t * args.num_processes)
|
elif args.qrdqn and args.qrdqn_bootstrap and not args.bootstrap_dqn:
|
decay = args.ucb_c
|
with torch.no_grad():
|
mean, eps_var, ale_var = agent.get_bootstrapped_uncertainty(state)
|
total_var = torch.sqrt(eps_var + ale_var)
|
eps_var, ale_var = torch.sqrt(eps_var), torch.sqrt(ale_var)
|
mean = mean.mean(axis=1)
|
if args.thompson_sampling:
|
eps_var = eps_var * torch.randn(eps_var.shape, device=eps_var.device)
|
# value = mean + decay * eps_var * torch.randn(eps_var.shape, device=eps_var.device)
|
# value = mean + decay * eps_var
|
if args.diff_epsilon_schedule:
|
value = mean + loguniform_decay.expand(args.num_processes, mean.size(1)) * eps_var
|
elif args.total_uncertainty:
|
value = mean + decay * total_var
|
elif args.ale_uncertainty:
|
value = mean + decay * ale_var
|
else:
|
value = mean + decay * eps_var
|
action = value.argmax(1).reshape(-1, 1)
|
if t % 500 == 0:
|
stats = {
|
"ucb / mean": torch.max(mean, 1)[0].mean().item(),
|
"ucb / eps uncertainty": torch.max(eps_var, 1)[0].mean().item(),
|
"ucb / ale uncertainty": torch.max(ale_var, 1)[0].mean().item(),
|
"ucb / value": torch.max(value, 1)[0].mean().item()
|
}
|
wandb.log(stats, step=t * args.num_processes)
|
elif args.qrdqn and args.qrdqn_bootstrap and args.bootstrap_dqn:
|
if t % 30 == 0:
|
curr_index = np.random.randint(args.n_ensemble)
|
with torch.no_grad():
|
all_quantiles = agent.Q.single_quantile(state, curr_index) # (B, atom, action)
|
value = all_quantiles.mean(axis=1)
|
action = value.argmax(1).reshape(-1, 1)
|
if t % 500 == 0:
|
stats = {
|
"current_idx": torch.max(mean, 1)[0].mean().item(),
|
"ucb / value": torch.max(value, 1)[0].mean().item()
|
}
|
wandb.log(stats, step=t * args.num_processes)
|
elif args.bootstrap_dqn_ucb and args.bootstrap_dqn:
|
mean, std = agent.get_bootstrap_dqn_values(state)
|
decay = args.ucb_c
|
value = mean + decay * std
|
action = value.argmax(1).reshape(-1, 1)
|
if t % 500 == 0:
|
stats = {
|
"ucb / factor": decay,
|
"ucb / mean": torch.mean(mean).item(),
|
"ucb / std": torch.mean(std).item()
|
}
|
wandb.log(stats, step=t * args.num_processes)
|
elif args.bootstrap_dqn:
|
for i in range(args.num_processes):
|
if len(action_deque[i]) == 0:
|
# print(f'sampling new head for {i}')
|
agent.current_bootstrap_head[i] = np.random.randint(args.n_ensemble)
|
action, value = agent.select_action(state)
|
cur_epsilon = epsilon(t)
|
for i in range(args.num_processes):
|
if np.random.uniform() < cur_epsilon:
|
action[i] = torch.LongTensor([envs.action_space.sample()]).to(args.device)
|
if t % 500 == 0:
|
wandb.log({"Current Epsilon": cur_epsilon}, step=t * args.num_processes)
|
elif args.diff_epsilon_schedule:
|
cur_epsilon = args.diff_eps_schedule_base ** (1 + np.arange(args.num_processes)/(args.num_processes-1) * args.diff_eps_schedule_exp)
|
action, value = agent.select_action(state)
|
for i in range(args.num_processes):
|
if np.random.uniform() < cur_epsilon[i]:
|
action[i] = torch.LongTensor([envs.action_space.sample()]).to(args.device)
|
elif args.eps_z:
|
cur_epsilon = epsilon(t)
|
action, value = agent.select_action(state)
|
for i in range(args.num_processes):
|
if n[i] == 0:
|
if np.random.uniform() < cur_epsilon:
|
n[i] = np.random.choice(ez_n, 1, p=ez_prob)
|
omega[i] = envs.action_space.sample()
|
action[i] = torch.LongTensor([omega[i]]).to(args.device)
|
else:
|
action[i] = torch.LongTensor([omega[i]]).to(args.device)
|
n[i] = n[i] - 1
|
elif args.noisy_layers:
|
if t % args.train_freq == 0:
|
agent.Q.reset_noise()
|
action, value = agent.select_action(state)
|
else:
|
cur_epsilon = epsilon(t)
|
action, value = agent.select_action(state)
|
for i in range(args.num_processes):
|
if np.random.uniform() < cur_epsilon:
|
action[i] = torch.LongTensor([envs.action_space.sample()]).to(args.device)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.