text
stringlengths 1
93.6k
|
|---|
n_action = action_deque[i][j]
|
replay_buffer.add(
|
n_state,
|
n_action,
|
next_state[i],
|
n_reward,
|
np.uint8(done[i]),
|
level_seeds[i],
|
)
|
expect_new_seed[i] = True
|
############################################################
|
state = next_state
|
for info in infos:
|
if "episode" in info.keys():
|
eval_episode_rewards.append(info["episode"]["r"])
|
if progressbar:
|
progressbar.update(1)
|
if record:
|
for video in eval_envs.get_videos():
|
wandb.log({"evaluation_behaviour": video})
|
eval_envs.close()
|
if progressbar:
|
progressbar.close()
|
avg_reward = sum(eval_episode_rewards) / len(eval_episode_rewards)
|
if print_score:
|
print("---------------------------------------")
|
print(f"Evaluation over {num_episodes} episodes: {avg_reward}")
|
print("---------------------------------------")
|
############################################################
|
if args.record_td_error:
|
with torch.no_grad():
|
n_batch = 2
|
loss = 0
|
for _ in range(n_batch):
|
_, batch_loss, _ = policy.loss(replay_buffer)
|
loss += batch_loss.item()
|
loss /= n_batch * args.batch_size
|
del replay_buffer
|
return eval_episode_rewards, loss
|
############################################################
|
return eval_episode_rewards
|
def multi_step_reward(rewards, gamma):
|
ret = 0.0
|
for idx, reward in enumerate(rewards):
|
ret += reward * (gamma ** idx)
|
return ret
|
def new_episode(value, estimates, level_seed, i, step):
|
estimates[level_seed] = value[i].item()
|
wandb.log(
|
{f"Start State Value Estimate for Level {level_seed}": value[i].item()},
|
step=step,
|
)
|
def plot_level_returns(level_seeds, returns, estimates, gaps, episode_reward, i, step):
|
seed = level_seeds[i][0].item()
|
returns[seed] = episode_reward
|
gaps[seed] = episode_reward - estimates[seed]
|
wandb.log({f"Empirical Return for Level {seed}": episode_reward}, step=step)
|
if __name__ == "__main__":
|
args = parser.parse_args()
|
logging.getLogger().setLevel(logging.INFO)
|
if args.verbose:
|
logging.getLogger().setLevel(logging.INFO)
|
else:
|
logging.disable(logging.CRITICAL)
|
if args.seed_path:
|
train_seeds = load_seeds(args.seed_path)
|
else:
|
train_seeds = generate_seeds(args.num_train_seeds, args.base_seed)
|
train(args, train_seeds)
|
# <FILESEP>
|
from .categories import NodeCategories
|
from .core.partial_prompt import PartialPrompt
|
class RandomPromptScheduleGenerator:
|
NODE_NAME = "Random Prompt Schedule Generator"
|
ICON = "🖺"
|
@classmethod
|
def INPUT_TYPES(cls):
|
return {
|
"required": {
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.