text
stringlengths
1
93.6k
os.environ["WANDB_BASE_URL"] = "https://api.fairwandb.ai"
os.environ["WANDB_API_KEY"] = "092a14187f6f01d8d2df67e8145ed4b16ba8bc9d"
num_levels = 1
level_sampler_args = dict(
num_actors=args.num_processes,
strategy=args.level_replay_strategy,
)
envs, level_sampler = make_dqn_lr_venv(
num_envs=args.num_processes,
env_name=args.env_name,
seeds=seeds,
device=args.device,
num_levels=num_levels,
start_level=args.start_level,
no_ret_normalization=args.no_ret_normalization,
distribution_mode=args.distribution_mode,
paint_vel_info=args.paint_vel_info,
use_sequential_levels=args.use_sequential_levels,
level_sampler_args=level_sampler_args,
attach_task_id=args.attach_task_id
)
if args.atc:
args.drq = True
agent = ATCAgent(args, envs)
else:
agent = DQNAgent(args, envs)
replay_buffer = make_buffer(args, envs)
level_seeds = torch.zeros(args.num_processes)
if level_sampler:
state, level_seeds = envs.reset()
else:
state = envs.reset()
level_seeds = level_seeds.unsqueeze(-1)
if args.autodrq:
rollouts = RolloutStorage(256, args.num_processes, envs.observation_space.shape, envs.action_space)
rollouts.obs[0].copy_(state)
rollouts.to(args.device)
estimates = [0 for _ in range(args.num_train_seeds)]
returns = [0 for _ in range(args.num_train_seeds)]
gaps = [0 for _ in range(args.num_train_seeds)]
episode_reward = 0
state_deque: List[deque] = [deque(maxlen=args.multi_step) for _ in range(args.num_processes)]
reward_deque: List[deque] = [deque(maxlen=args.multi_step) for _ in range(args.num_processes)]
action_deque: List[deque] = [deque(maxlen=args.multi_step) for _ in range(args.num_processes)]
expect_new_seed: List[bool] = [False for _ in range(args.num_processes)]
reward_stats_deque: List[deque] = [deque(maxlen=500) for _ in range(args.num_processes)]
num_steps = int(args.T_max // args.num_processes)
epsilon_start = 1.0
epsilon_final = args.end_eps
epsilon_decay = args.eps_decay_period
def epsilon(t):
return epsilon_final + (epsilon_start - epsilon_final) * np.exp(
-1.0 * (t - args.start_timesteps) / epsilon_decay
)
start_time = time.time()
curr_index = 0
#### Log uniform parameters ####
loguniform_decay = args.ucb_c * args.diff_eps_schedule_base ** (
1 + np.arange(args.num_processes)/(args.num_processes-1) * args.diff_eps_schedule_exp)
loguniform_decay = torch.from_numpy(loguniform_decay).to(args.device).unsqueeze(1)
#### epsilon-z parameters ####
n = np.zeros(args.num_processes)
omega = np.zeros(args.num_processes)
ez_prob = 1 / np.arange(1, args.eps_z_n+1)**args.eps_z_mu
ez_prob /= np.sum(ez_prob)
ez_n = np.arange(1, args.eps_z_n+1)
for t in range(num_steps):
if t < args.start_timesteps:
action = (
torch.LongTensor([envs.action_space.sample() for _ in range(args.num_processes)])
.reshape(-1, 1)
.to(args.device)
)
value = agent.get_value(state)
elif args.explore_strat == "qrdqn_ucb":
_, mean, var, upper_var = agent.get_quantile(state)
decay = args.ucb_c * np.sqrt(np.log(t+1) / (t+1))
value = mean + decay * var
# print(value.shape)
action = value.argmax(1).reshape(-1, 1)
# print(torch.max(mean, 1))
if t % 500 == 0:
stats = {
"ucb / facotr": decay,