text
stringlengths
1
93.6k
self.gae_param = 0.95
self.clip = 0.2
self.ent_coeff = 0.
self.num_epoch = 10
self.num_steps = 1000
self.exploration_size = 1000
self.num_processes = 4
self.update_treshold = self.num_processes - 1
self.max_episode_length = 10000
self.seed = 1
self.env_name = 'InvertedPendulum-v1'
#self.env_name = 'Reacher-v1'
#self.env_name = 'Pendulum-v0'
#self.env_name = 'Hopper-v1'
#self.env_name = 'Ant-v1'
#self.env_name = 'Humanoid-v1'
#self.env_name = 'HalfCheetah-v1'
if __name__ == '__main__':
os.environ['OMP_NUM_THREADS'] = '1'
params = Params()
torch.manual_seed(params.seed)
env = gym.make(params.env_name)
num_inputs = env.observation_space.shape[0]
num_outputs = env.action_space.shape[0]
traffic_light = TrafficLight()
counter = Counter()
shared_model = Model(num_inputs, num_outputs)
shared_model.share_memory()
shared_grad_buffers = Shared_grad_buffers(shared_model)
#shared_grad_buffers.share_memory()
shared_obs_stats = Shared_obs_stats(num_inputs)
#shared_obs_stats.share_memory()
optimizer = optim.Adam(shared_model.parameters(), lr=params.lr)
test_n = torch.Tensor([0])
test_n.share_memory_()
processes = []
p = mp.Process(target=test, args=(params.num_processes, params, shared_model, shared_obs_stats, test_n))
p.start()
processes.append(p)
p = mp.Process(target=chief, args=(params.num_processes, params, traffic_light, counter, shared_model, shared_grad_buffers, optimizer))
p.start()
processes.append(p)
for rank in range(0, params.num_processes):
p = mp.Process(target=train, args=(rank, params, traffic_light, counter, shared_model, shared_grad_buffers, shared_obs_stats, test_n))
p.start()
processes.append(p)
for p in processes:
p.join()
# <FILESEP>
import numpy as np
import torch
from torch import nn
"""
Code from https://github.com/salesforce/awd-lstm-lm
paper: https://arxiv.org/pdf/1708.02182.pdf (see Section 4.3)
"""
class EmbeddingDropout(nn.Module):
"""
Embedding Layer.
If embedding_dropout != 0 we apply dropout to word 'types' not 'tokens' as suggested
in the paper https://arxiv.org/pdf/1512.05287.pdf.
We first map the input sequences to the corresponding embeddings (from |V| -> embedding_dim)
and THEN apply dropout.
"""
def __init__(self, num_embeddings, embedding_dim, embedding_dropout=0.):
super().__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
self.dropoute = embedding_dropout
self.embed = nn.Embedding(num_embeddings=self.num_embeddings,
embedding_dim=self.embedding_dim)
def forward(self, words):
if self.dropoute and self.training:
mask = self.embed.weight.data.new().resize_((self.embed.weight.size(0), 1)).bernoulli_(1 - self.dropoute).expand_as(
self.embed.weight) / (1 - self.dropoute)
masked_embed_weight = mask * self.embed.weight
else:
masked_embed_weight = self.embed.weight
padding_idx = self.embed.padding_idx # be careful here to use the same 'padding_idx' name
if padding_idx is None:
padding_idx = -1
X = torch.nn.functional.embedding(words, masked_embed_weight,
padding_idx, self.embed.max_norm, self.embed.norm_type,
self.embed.scale_grad_by_freq, self.embed.sparse
)
return X
def embedded_dropout(embed, words, dropout=0.1):