repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
TapNet | TapNet-master/miniImageNet_TapNet/utils/model_TapNet_ResNet12.py | import cupy as cp
import numpy as np
import chainer
import chainer.links as L
import chainer.functions as F
from chainer import cuda
from utils.rank_nullspace import nullspace_gpu
class TapNet(object):
def __init__(self, nb_class_train, nb_class_test, input_size, dimension,
n_shot, gpu=-1):
"""
Args
nb_class_train (int): number of classes in a training episode
nb_class_test (int): number of classes in a test episode
input_size (int): dimension of input vector
dimension (int) : dimension of embedding space
n_shot (int) : number of shots
"""
self.nb_class_train = nb_class_train
self.nb_class_test = nb_class_test
self.input_size = input_size
self.dimension = dimension
self.n_shot = n_shot
# create chain
self.chain = self._create_chain()
self.set_gpu(gpu)
# Set up methods
# ---------------
@property
def xp(self):
if self.gpu<0:
return np
else:
return cp
def set_gpu(self, gpu):
self.gpu = gpu
if self.gpu < 0:
self.chain.to_cpu()
else:
self.chain.to_gpu()
def set_optimizer(self, optimizer):
self.optimizer = optimizer
self.optimizer.setup(self.chain)
self.optimizer.use_cleargrads(use=False)
def _create_chain(self):
chain = chainer.Chain(
l_conv1_1=L.Convolution2D(None,64,(3,3), pad=1),
l_norm1_1=L.BatchNormalization(64),
l_conv1_2=L.Convolution2D(64,64,(3,3), pad=1),
l_norm1_2=L.BatchNormalization(64),
l_conv1_3=L.Convolution2D(64,64,(3,3), pad=1),
l_norm1_3=L.BatchNormalization(64),
l_conv1_r=L.Convolution2D(None,64,(3,3), pad=1),
l_norm1_r=L.BatchNormalization(64),
l_conv2_1=L.Convolution2D(64,128,(3,3), pad=1),
l_norm2_1=L.BatchNormalization(128),
l_conv2_2=L.Convolution2D(128,128,(3,3), pad=1),
l_norm2_2=L.BatchNormalization(128),
l_conv2_3=L.Convolution2D(128,128,(3,3), pad=1),
l_norm2_3=L.BatchNormalization(128),
l_conv2_r=L.Convolution2D(64,128,(3,3), pad=1),
l_norm2_r=L.BatchNormalization(128),
l_conv3_1=L.Convolution2D(128,256,(3,3), pad=1),
l_norm3_1=L.BatchNormalization(256),
l_conv3_2=L.Convolution2D(256,256,(3,3), pad=1),
l_norm3_2=L.BatchNormalization(256),
l_conv3_3=L.Convolution2D(256,256,(3,3), pad=1),
l_norm3_3=L.BatchNormalization(256),
l_conv3_r=L.Convolution2D(128,256,(3,3), pad=1),
l_norm3_r=L.BatchNormalization(256),
l_conv4_1=L.Convolution2D(256,512,(3,3), pad=1),
l_norm4_1=L.BatchNormalization(512),
l_conv4_2=L.Convolution2D(512,512,(3,3), pad=1),
l_norm4_2=L.BatchNormalization(512),
l_conv4_3=L.Convolution2D(512,512,(3,3), pad=1),
l_norm4_3=L.BatchNormalization(512),
l_conv4_r=L.Convolution2D(256,512,(3,3), pad=1),
l_norm4_r=L.BatchNormalization(512),
l_phi=L.Linear(self.dimension, self.nb_class_train),
)
return chain
# Train methods
# ---------------
def encoder(self, x, batchsize, train=True):
with chainer.using_config('train', train):
x2 = F.reshape(x, (batchsize,84,84,3))
x3 = F.transpose(x2, [0,3,1,2])
c1_r=self.chain.l_conv1_r(x3)
n1_r=self.chain.l_norm1_r(c1_r)
c1_1=self.chain.l_conv1_1(x3)
n1_1=self.chain.l_norm1_1(c1_1)
a1_1=F.relu(n1_1)
c1_2=self.chain.l_conv1_2(a1_1)
n1_2=self.chain.l_norm1_2(c1_2)
a1_2=F.relu(n1_2)
c1_3=self.chain.l_conv1_3(a1_2)
n1_3=self.chain.l_norm1_3(c1_3)
a1_3=F.relu(n1_3+n1_r)
p1=F.max_pooling_2d(a1_3,2)
p1=F.dropout(p1,ratio=0.3)
c2_r=self.chain.l_conv2_r(p1)
n2_r=self.chain.l_norm2_r(c2_r)
c2_1=self.chain.l_conv2_1(p1)
n2_1=self.chain.l_norm2_1(c2_1)
a2_1=F.relu(n2_1)
c2_2=self.chain.l_conv2_2(a2_1)
n2_2=self.chain.l_norm2_2(c2_2)
a2_2=F.relu(n2_2)
c2_3=self.chain.l_conv2_3(a2_2)
n2_3=self.chain.l_norm2_3(c2_3)
a2_3=F.relu(n2_3+n2_r)
p2=F.max_pooling_2d(a2_3,2)
p2=F.dropout(p2, ratio=0.2)
c3_r=self.chain.l_conv3_r(p2)
n3_r=self.chain.l_norm3_r(c3_r)
c3_1=self.chain.l_conv3_1(p2)
n3_1=self.chain.l_norm3_1(c3_1)
a3_1=F.relu(n3_1)
c3_2=self.chain.l_conv3_2(a3_1)
n3_2=self.chain.l_norm3_2(c3_2)
a3_2=F.relu(n3_2)
c3_3=self.chain.l_conv3_3(a3_2)
n3_3=self.chain.l_norm3_3(c3_3)
a3_3=F.relu(n3_3+n3_r)
p3=F.max_pooling_2d(a3_3,2)
p3=F.dropout(p3,ratio=0.2)
c4_r=self.chain.l_conv4_r(p3)
n4_r=self.chain.l_norm4_r(c4_r)
c4_1=self.chain.l_conv4_1(p3)
n4_1=self.chain.l_norm4_1(c4_1)
a4_1=F.relu(n4_1)
c4_2=self.chain.l_conv4_2(a4_1)
n4_2=self.chain.l_norm4_2(c4_2)
a4_2=F.relu(n4_2)
c4_3=self.chain.l_conv4_3(a4_2)
n4_3=self.chain.l_norm4_3(c4_3)
a4_3=F.relu(n4_3+n4_r)
p4=F.max_pooling_2d(a4_3,2)
p4=F.dropout(p4, ratio=0.2)
p5=F.average_pooling_2d(p4,6)
h_t=F.reshape(p5, (batchsize,-1))
return h_t
def Projection_Space(self, average_key, batchsize, nb_class, train=True, phi_ind=None):
c_t = average_key
eps=1e-6
if train == True:
Phi_tmp = self.chain.l_phi.W
else:
Phi_data = self.chain.l_phi.W.data
Phi_tmp = chainer.Variable(Phi_data[phi_ind,:])
for i in range(nb_class):
if i == 0:
Phi_sum = Phi_tmp[i]
else:
Phi_sum += Phi_tmp[i]
Phi = nb_class*(Phi_tmp)-F.broadcast_to(Phi_sum,(nb_class,self.dimension))
power_Phi = F.sqrt(F.sum(Phi*Phi, axis=1))
power_Phi = F.transpose(F.broadcast_to(power_Phi, [self.dimension,nb_class]))
Phi = Phi/(power_Phi+eps)
power_c = F.sqrt(F.sum(c_t*c_t, axis=1))
power_c = F.transpose(F.broadcast_to(power_c, [self.dimension,nb_class]))
c_tmp = c_t/(power_c+eps)
null=Phi - c_tmp
M = nullspace_gpu(null.data)
M = F.broadcast_to(M,[batchsize, self.dimension, self.dimension-nb_class])
return M
def compute_power(self, batchsize,key,M, nb_class, train=True,phi_ind=None):
if train == True:
Phi_out = self.chain.l_phi.W
else:
Phi_data = self.chain.l_phi.W.data
Phi_out = chainer.Variable(Phi_data[phi_ind,:])
Phi_out_batch = F.broadcast_to(Phi_out,[batchsize,nb_class, self.dimension])
PhiM = F.batch_matmul(Phi_out_batch,M)
PhiMs = F.sum(PhiM*PhiM,axis=2)
key_t = F.reshape(key,[batchsize,1,self.dimension])
keyM = F.batch_matmul(key_t,M)
keyMs = F.sum(keyM*keyM, axis=2)
keyMs = F.broadcast_to(keyMs, [batchsize,nb_class])
pow_t = PhiMs + keyMs
return pow_t
def compute_power_avg_phi(self, batchsize, nb_class, average_key, train=False):
avg_pow = F.sum(average_key*average_key,axis=1)
Phi = self.chain.l_phi.W
Phis = F.sum(Phi*Phi,axis=1)
avg_pow_bd = F.broadcast_to(F.reshape(avg_pow,[len(avg_pow),1]),[len(avg_pow),len(Phis)])
wzs_bd = F.broadcast_to(F.reshape(Phis,[1,len(Phis)]),[len(avg_pow),len(Phis)])
pow_avg = avg_pow_bd + wzs_bd
return pow_avg
def compute_loss(self, t_data, r_t, pow_t, batchsize,nb_class, train=True):
t = chainer.Variable(self.xp.array(t_data, dtype=self.xp.int32))
u = 2*self.chain.l_phi(r_t)-pow_t
return F.softmax_cross_entropy(u,t)
def compute_accuracy(self, t_data, r_t, pow_t,batchsize, nb_class, phi_ind=None):
ro = 2*self.chain.l_phi(r_t)
ro_t = chainer.Variable(ro.data[:,phi_ind])
u = ro_t-pow_t
t_est = self.xp.argmax(F.softmax(u).data, axis=1)
return (t_est == self.xp.array(t_data))
def select_phi(self, average_key, avg_pow):
u_avg = 2*self.chain.l_phi(average_key).data
u_avg = u_avg - avg_pow.data
u_avg_ind = cp.asnumpy(self.xp.argsort(u_avg, axis=1))
phi_ind = np.zeros(self.nb_class_test)
for i in range(self.nb_class_test):
if i == 0:
phi_ind[i] = np.int(u_avg_ind[i, self.nb_class_train-1])
else:
k=self.nb_class_train-1
while u_avg_ind[i,k] in phi_ind[:i]:
k = k-1
phi_ind[i] = np.int(u_avg_ind[i,k])
return phi_ind.tolist()
def train(self, images, labels):
"""
Train a minibatch of episodes
"""
images = self.xp.stack(images)
batchsize = images.shape[0]
loss = 0
key = self.encoder(images, batchsize, train=True)
support_set = key[:self.nb_class_train*self.n_shot,:]
query_set = key[self.nb_class_train*self.n_shot:,:]
average_key = F.mean(F.reshape(support_set,[self.n_shot,self.nb_class_train,-1]),axis=0)
batchsize_q = len(query_set.data)
M = self.Projection_Space(average_key, batchsize_q, self.nb_class_train)
r_t = F.reshape(F.batch_matmul(M,F.batch_matmul(M,query_set,transa=True)),(batchsize_q,-1))
pow_t = self.compute_power(batchsize_q,query_set,M,self.nb_class_train)
loss = self.compute_loss(labels[self.nb_class_train*self.n_shot:], r_t, pow_t, batchsize_q,self.nb_class_train)
self.chain.zerograds()
loss.backward()
self.optimizer.update()
return loss.data
def evaluate(self, images, labels):
"""
Evaluate accuracy score
"""
nb_class = self.nb_class_test
images = self.xp.stack(images)
batchsize = images.shape[0]
accs = []
key= self.encoder(images,batchsize, train=False)
support_set = key[:nb_class*self.n_shot,:]
query_set = key[nb_class*self.n_shot:,:]
average_key = F.mean(F.reshape(support_set,[self.n_shot,nb_class,-1]),axis=0)
batchsize_q = len(query_set.data)
pow_avg = self.compute_power_avg_phi(batchsize_q, nb_class, average_key, train=False)
phi_ind = [np.int(ind) for ind in self.select_phi(average_key,pow_avg)]
M = self.Projection_Space(average_key, batchsize_q,nb_class, train=False, phi_ind=phi_ind)
r_t = F.reshape(F.batch_matmul(M,F.batch_matmul(M,query_set,transa=True)),(batchsize_q,-1))
pow_t = self.compute_power(batchsize_q,query_set,M,nb_class, train=False, phi_ind=phi_ind)
accs_tmp = self.compute_accuracy(labels[nb_class*self.n_shot:], r_t, pow_t, batchsize_q, nb_class, phi_ind=phi_ind)
accs.append(accs_tmp)
return accs
def decay_learning_rate(self, decaying_parameter=0.5):
self.optimizer.alpha=self.optimizer.alpha*decaying_parameter
| 12,091 | 34.253644 | 123 | py |
TapNet | TapNet-master/miniImageNet_TapNet/data/__init__.py | 1 | 0 | 0 | py | |
acl-anthology-helper | acl-anthology-helper-master/toy.py | 0 | 0 | 0 | py | |
acl-anthology-helper | acl-anthology-helper-master/src/__init__.py | 0 | 0 | 0 | py | |
acl-anthology-helper | acl-anthology-helper-master/src/modules/anthology_sqlite.py | """
@Reference"
使用SQLite
https://www.liaoxuefeng.com/wiki/1016959663602400/1017801751919456
Python自带的Sqlite支持shell命令行交互模式吗?
https://www.zhihu.com/question/62897833/answer/559922232
"""
import os
import sqlite3
from logging import DEBUG
from src.modules.retriever import Retriever
from src.modules.logger import MyLogger
from src.modules.constants import DBConsts
class AnthologySqlite(object):
_class_name = "Anthology Sqlite"
def __init__(self, cache_enable=True, log_path='', db_dir='./database'):
self.retriever = Retriever(cache_enable=cache_enable)
self.anthology = self.retriever.load_anthology() # use local cache
self.logger = MyLogger(self._class_name, DEBUG, log_path)
if not os.path.exists(db_dir):
os.makedirs(db_dir)
self.logger.warning(f'{db_dir} did not exist, and has been created now.')
self.db_path = os.path.join(db_dir, DBConsts.ANTHOLOGY_DB)
self.conn = None
self.cursor = None
def db_connect(self):
self.conn = sqlite3.connect(self.db_path)
self.cursor = self.conn.cursor()
def db_disconnect(self):
self.cursor.close()
# 提交事务:
self.conn.commit()
# 关闭Connection:
self.conn.close()
def create_tables(self):
self.db_connect()
# conferences
self.cursor.execute('''create table if not exists conference(id integer primary key auto_increment,
conf_content char(20) not null,
venue char(20),
year integer,
link char(50),
volume_size integer);
''')
# papers
self.cursor.execute('''create table if not exists paper(id int primary key auto_increment,
title char(100) not null,
year integer,
url char(50),
authors text,
abstract text,
conf_content varchar(20));
''')
self.db_disconnect()
def shell(self):
self.db_connect()
buffer = ""
print("Enter your SQL commands to execute in sqlite3.")
print("Enter a blank line to exit.")
while True:
line = input()
if line == "":
break
buffer += line
if sqlite3.complete_statement(buffer):
try:
buffer = buffer.strip()
self.cursor.execute(buffer)
if buffer.lstrip().upper().startswith("SELECT"):
print(self.cursor.fetchall())
except sqlite3.Error as e:
print("An error occurred:", e.args[0])
buffer = ""
self.db_disconnect()
def __del__(self):
if self.conn or self.cursor:
self.db_disconnect()
| 3,073 | 32.78022 | 108 | py |
acl-anthology-helper | acl-anthology-helper-master/src/modules/papers.py | """
@Desc:
"""
from tqdm import tqdm
import requests
from bs4 import BeautifulSoup as Soup
from .logger import MyLogger
from src.common.string_tools import StringTools
class Paper(object):
def __init__(self, title, year, url, authors=[], abstrat="", conf_content="", venue=""):
self.title = title
self.year = year
self.url = url
self.authors = authors
self.abstract = abstrat
self.conf_content = conf_content
self.venue = venue
self.reader_desc = '' # I can add some descriptions to this paper
def add_desc(self, desc: str):
self.reader_desc += desc + '||\n'
def __repr__(self):
repr_content = f'\nPaper "{self.title}":\n'
for attribute_name, attribute_value in self.__dict__.items():
if attribute_name == "title":
continue
repr_content += f"{attribute_name} : {attribute_value};\t"
return repr_content
class PaperList(object):
def __init__(self, papers=[], logger=None):
self.name = "PaperList"
self.papers = papers
self.logger = logger
@property
def size(self):
return len(self.papers)
@classmethod
def init_from_volumes_response(cls, conf, conf_content, year, url, logger=None):
"""
e.g. https://aclanthology.org/volumes/2021.acl-long/
"""
_paper_list = PaperList([], logger)
response = requests.get(url)
page = Soup(response.content, "html.parser")
# get info from infobox ===========
# the first is not a paper.
infobox_set = page.find_all("p", {"class": "d-sm-flex align-items-stretch"})[1:]
for one in infobox_set:
infobox = one.find_all("span", {"class", "d-block"})[1]
title_with_href = infobox.find("a", {"class": "align-middle"})
title = title_with_href.get_text().strip()
href = title_with_href.get("href")
url = f'https://aclanthology.org{href[:-1]}.pdf'
# authors
authors = []
skip_first = True
for author in infobox.find_all("a"):
if skip_first:
skip_first = False
continue
authors.append(author.get_text().strip())
_paper_list.papers.append(Paper(title, year, url, authors, conf_content=conf_content, venue=conf))
# get info from abstract ===========
abstract_set = page.find_all("div", {"class", "card-body p-3 small"})
# 如果数量对不上的话只能跳过,不载入abstract
if len(abstract_set) == len(_paper_list):
for one, paper in zip(abstract_set, _paper_list):
abstract = one.get_text().strip()
paper.abstract = abstract
return _paper_list
@classmethod
def init_from_events_response(cls, conf, conf_content, year, r_content, logger=None):
"""
e.g. https://aclanthology.org/events/acl-2021/
"""
_paper_list = PaperList([], logger)
page = Soup(r_content, "html.parser")
# segment of papers
core = page.find("div", {"id": conf_content})
# get info from infobox ===========
# the first is not a paper.
infobox_set = core.find_all("p", {"class": "d-sm-flex align-items-stretch"})[1:]
for one in tqdm(infobox_set, desc='parsing infobox_set'):
infobox = one.find_all("span", {"class", "d-block"})[1]
title_with_href = infobox.find("a", {"class": "align-middle"})
title = title_with_href.get_text().strip()
href = title_with_href.get("href")
url = f'https://aclanthology.org{href[:-1]}.pdf'
# authors
authors = []
skip_first = True
for author in infobox.find_all("a"):
if skip_first:
skip_first = False
continue
authors.append(author.get_text().strip())
_paper_list.papers.append(Paper(title, year, url, authors, conf_content=conf_content, venue=conf))
# get info from abstract ===========
abstract_set = core.find_all("div", {"class", "card-body p-3 small"})
# 如果数量对不上的话只能跳过,不载入abstract
if len(abstract_set) == len(_paper_list):
for one, paper in tqdm(zip(abstract_set, _paper_list), desc='parsing abstract_set'):
abstract = one.get_text().strip()
paper.abstract = abstract
return _paper_list
def add_logger(self, logger: MyLogger):
self.logger = logger
'''
============================ filters ============================
'''
def containing_filter(self, attr: str, keyword: str):
filtered = []
for paper in self.papers:
if StringTools.contain(eval(f'paper.{attr}'), keyword):
paper.add_desc(f'filtered by containing "{keyword}" in {attr}')
filtered.append(paper)
if isinstance(self.logger, MyLogger):
self.logger.info(
f'filtered by containing "{keyword}" in {attr} for {len(self.papers)} papers,'
f' remaining {len(filtered)}')
return PaperList(filtered)
def or_containing_filter(self, attr: str, keywords: list):
filtered = []
for paper in self.papers:
if StringTools.multi_or_contain(eval(f'paper.{attr}'), keywords):
paper.add_desc(f'filtered by containing [{" or ".join(keywords)}] in {attr}')
filtered.append(paper)
if isinstance(self.logger, MyLogger):
self.logger.info(
f'filtered by containing [{" or ".join(keywords)}] in {attr} for {len(self.papers)} papers,'
f' remaining {len(filtered)}')
return PaperList(filtered)
def and_containing_filter(self, attr: str, keywords: list):
filtered = []
for paper in self.papers:
if StringTools.multi_and_contain(eval(f'paper.{attr}'), keywords):
paper.add_desc(f'filtered by containing [{" and ".join(keywords)}] in {attr}')
filtered.append(paper)
if isinstance(self.logger, MyLogger):
self.logger.info(
f'filtered by containing [{" and ".join(keywords)}] in {attr} for {len(self.papers)} papers,'
f' remaining {len(filtered)}')
return PaperList(filtered)
def group(self, attr: str) -> dict:
group_dict = {}
for paper in self.papers:
key = eval(f'paper.{attr}')
if key not in group_dict:
group_dict[key] = PaperList(papers=[])
group_dict[key].papers.append(paper)
return group_dict
def items(self):
return self.papers
def __and__(self, other):
new = PaperList(papers=list(set(self.papers) & set(other.papers)), logger=self.logger)
return new
def __or__(self, other):
new = PaperList(papers=list(set(self.papers) | set(other.papers)), logger=self.logger)
return new
def __iter__(self):
for paper in self.papers:
yield paper
def __call__(self, *args, **kwargs):
return self.papers
def __repr__(self):
repr_content = f'\n'
for one in self.papers:
repr_content += str(one)
return repr_content
def __len__(self):
return len(self.papers)
| 7,445 | 36.606061 | 110 | py |
acl-anthology-helper | acl-anthology-helper-master/src/modules/retriever.py | """
@Desc:
"""
import requests
from logging import DEBUG
from src.modules.constants import CACHE_DIR
from src.modules.conferences import Anthology
from src.modules.papers import PaperList
from src.modules.statistics import Statistics as stats
from src.modules.statistics import Stat
from src.modules.logger import MyLogger
from src.modules.cache import LocalCache
class Retriever(object):
_class_name = "ACL Anthology Retriever"
def __init__(self, cache_enable=True, cache_dir=CACHE_DIR, log_path=''):
self.homepage_url = "https://aclanthology.org"
self.logger = MyLogger('retriever', DEBUG, log_path)
self.cache_enable = cache_enable
if self.cache_enable:
self.cache = LocalCache('retriever', cache_dir, self.logger)
self.cache.smart_load()
else:
self.cache = None
def load_anthology(self):
"""
:return:
note: Anthology cannot be serialized, because of containing logging
"""
_cache_key = 'anthology'
if self.cache and _cache_key in self.cache:
conf_dict = self.cache[_cache_key]
anthology = Anthology(confs=conf_dict, logger=self.logger)
else:
anthology = Anthology(confs={}, logger=self.logger)
anthology.parse_htmls()
if self.cache_enable:
self.cache[_cache_key] = anthology.confs
self.cache.store()
return anthology
def get_paper_list_from_volumes(self, conf, conf_content, year, url):
return PaperList.init_from_volumes_response(conf, conf_content, year, url, self.logger)
def _get_paper_list(self, conf, year, conf_content):
"""
:return:
note: PaperList cannot be serialized, because of containing logging
"""
if self.cache and conf_content in self.cache:
paper_list = self.cache[conf_content]
paper_list_obj = PaperList(papers=paper_list, logger=self.logger)
else:
target_url = f"{self.homepage_url}/events/{conf}-{year}/#{conf_content}"
response = requests.get(target_url)
paper_list_obj = PaperList.init_from_events_response(conf, conf_content, year, response.content,
self.logger)
if self.cache_enable:
self.cache[conf_content] = paper_list_obj.papers
self.cache.store()
return paper_list_obj
def _collect_stats(self, conf_content, papers):
stats.add(Stat(f'{conf_content}').add_attr('papers', len(papers)))
self.logger.info(stats.repr())
@classmethod
def acl(cls, year, conf_content, cache_enable=True) -> PaperList:
retriever = Retriever(cache_enable=cache_enable)
papers = retriever._get_paper_list("acl", year, conf_content)
retriever._collect_stats(conf_content, papers)
return papers
@classmethod
def naacl(cls, year, conf_content, cache_enable=True) -> PaperList:
retriever = Retriever(cache_enable=cache_enable)
papers = retriever._get_paper_list("naacl", year, conf_content)
retriever._collect_stats(conf_content, papers)
return papers
@classmethod
def emnlp(cls, year, conf_content, cache_enable=True) -> PaperList:
retriever = Retriever(cache_enable=cache_enable)
papers = retriever._get_paper_list("emnlp", year, conf_content)
retriever._collect_stats(conf_content, papers)
return papers
def __repr__(self):
repr_content = f"========== {self._class_name}: ============\n"
for attribute_name, attribute_value in self.__dict__.items():
repr_content += f"{str(attribute_name).upper()} : {attribute_value}\n"
return repr_content
| 3,827 | 38.463918 | 108 | py |
acl-anthology-helper | acl-anthology-helper-master/src/modules/constants.py | """
@Desc:
"""
CACHE_DIR = './cache'
class DBConsts(object):
ANTHOLOGY_DB = 'anthology.db'
CONF_TABLE = 'conference'
PAPER_TABLE = 'paper'
class ConfConsts(object):
ACL_EVENTS = "ACL Events"
NON_ACL_EVENTS = "NON-ACL Events"
| 249 | 14.625 | 37 | py |
acl-anthology-helper | acl-anthology-helper-master/src/modules/parallel_downloader.py | """
@reference:
Python 3 爬虫|第4章:多进程并发下载
https://madmalls.com/blog/post/multi-process-for-python3/
并行有诸多限制:
1.设计并行下载类的时候不能引入带锁的东西(如logging)
2.如果子任务报异常需要设计处理handler
"""
from logging import Logger, DEBUG
import os
import requests
from src.modules.logger import MyLogger
from src.modules.papers import Paper, PaperList
from src.common.string_tools import StringTools
from src.common.file_tools import FileTools
from multiprocessing import Pool
class PaperDownloader(object):
def __init__(self, download_dir='./download', logger=None):
self._download_dir = download_dir
self._logger = logger if logger else MyLogger('downloader', DEBUG)
def set_downlard_dir(self, download_dir: str):
if not os.path.isdir(download_dir):
raise FileExistsError(f'The input directory -{download_dir}- is invalid.')
self._download_dir = download_dir
@staticmethod
def download(paper: Paper, download_dir, prefix_path):
try:
prefix = os.path.join(download_dir, prefix_path)
os.makedirs(prefix, exist_ok=True)
fpath = os.path.join(prefix, f'{StringTools.filename_norm(paper.title)}.pdf')
r = requests.get(paper.url)
with open(fpath, "wb") as f:
f.write(r.content)
except Exception as e:
print(f'{paper} download failed, the exception is :{e}')
def multi_download(self, papers: PaperList, prefix_dir):
cpu_cores = os.cpu_count() # number of parallel
download_dir = self._download_dir
self._logger.info(
f'Papers multi_download(parallel) starts, cpus: {cpu_cores},'
f' download_dir: {os.path.abspath(download_dir)},'
f' papers: {papers.size}')
params = []
for paper in papers.items():
params.append((paper, download_dir, prefix_dir))
with Pool(cpu_cores) as process:
process.starmap(self.download, params)
self._logger.info('All subprocesses done.')
prefix = os.path.join(self._download_dir, prefix_dir)
FileTools.info_to_file(papers, os.path.join(prefix, 'papers_info.txt'))
| 2,153 | 36.789474 | 89 | py |
acl-anthology-helper | acl-anthology-helper-master/src/modules/downloader.py | """
@reference:
python下载文件的三种方法
https://www.jianshu.com/p/e137b03a1cd2
"""
from logging import DEBUG
import os
import requests
from tqdm import tqdm
from src.modules.logger import MyLogger
from src.modules.papers import Paper, PaperList
from src.common.string_tools import StringTools
from src.common.file_tools import FileTools
class Downloader(object):
def __init__(self, download_dir='./download', logger=None):
self.download_dir = download_dir
self.logger = logger if logger else MyLogger('downloader', DEBUG)
def set_downlard_dir(self, path):
pass
def download(self, obj, prefix_dir, verbose=False):
pass
def multi_download(self, objs, prefix_dir, verbose=False):
pass
class PaperDownloader(Downloader):
def set_downlard_dir(self, download_dir: str):
if not os.path.isdir(download_dir):
raise FileExistsError(f'The input directory -{download_dir}- is invalid.')
self.download_dir = download_dir
def download(self, paper: Paper, prefix_dir, verbose=False):
prefix = os.path.join(self.download_dir, prefix_dir)
if not os.path.exists(prefix):
os.makedirs(prefix)
fpath = os.path.join(prefix, f'{StringTools.filename_norm(paper.title)}.pdf')
r = requests.get(paper.url)
with open(fpath, "wb") as f:
f.write(r.content)
if verbose:
self.logger.info(f'paper downloaded at {fpath}')
def multi_download(self, papers: PaperList, prefix_dir, verbose=False):
"""
:param papers:
:param prefix_path:
:param verbose:
:return:
without multiprocessing.
"""
self.logger.info(f'Papers multi_download(without multi-processing) starts, papers: {papers.size}')
success = 0
fails = []
for paper in tqdm(papers, desc=f"multi downloading", total=papers.size):
try:
self.download(paper, prefix_dir)
success += 1
except Exception as e:
self.logger.warning(f'{paper} download failed, the exception is :{e}')
fails.append(paper)
self.logger.info('All subprocesses done.')
prefix = os.path.join(self.download_dir, prefix_dir)
FileTools.info_to_file(papers, os.path.join(prefix, 'papers_info.txt'))
FileTools.info_to_file(f'{success} downloaded, total: {papers.size}\nfailed papers:\n{fails}',
os.path.join(prefix, 'download_info.txt'))
| 2,540 | 34.291667 | 106 | py |
acl-anthology-helper | acl-anthology-helper-master/src/modules/logger.py | """
@Desc:
Rrinting log to both screen and files.
@Reference:
https://xnathan.com/2017/03/09/logging-output-to-screen-and-file/
"""
import logging
import os
from logging import Logger
from logging import NOTSET
class MyLogger(Logger):
def __init__(self, name, level=NOTSET, log_path=''):
super(MyLogger, self).__init__(name, level)
self.log_path = log_path if log_path else f'log/{name}_log.txt'
self._init()
def _init(self):
warns = ''
self.setLevel(self.level)
formatter = logging.Formatter(
"%(asctime)s %(pathname)s %(filename)s %(funcName)s %(lineno)s %(levelname)s - %(message)s",
"%Y-%m-%d %H:%M:%S")
# 使用FileHandler输出到文件
if not os.path.exists(self.log_path):
os.makedirs(os.path.dirname(self.log_path), exist_ok=True)
warns = f'{self.log_path} did not exist, and has been created now.'
fh = logging.FileHandler(f'{self.log_path}', encoding='utf-8')
fh.setLevel(self.level)
fh.setFormatter(formatter)
# 使用StreamHandler输出到屏幕
ch = logging.StreamHandler()
ch.setLevel(self.level)
ch.setFormatter(formatter)
# 添加两个Handler
self.addHandler(ch)
self.addHandler(fh)
# output warning
if warns:
self.warning(warns)
self.info(f'MyLogger instance {self.name} has been set. level: {self.level}, log_path: {os.path.abspath(self.log_path)}')
| 1,479 | 29.204082 | 129 | py |
acl-anthology-helper | acl-anthology-helper-master/src/modules/conferences.py | """
@Desc:
"""
import requests
import json
import itertools
from tqdm import tqdm
from bs4 import BeautifulSoup as Soup
from .logger import MyLogger
from .constants import ConfConsts
from src.common.serialization_tools import MyEncoder
class Conference(object):
def __init__(self, name, label, link):
self.name = name
self.label = label # ACL Events / Non-ACL Events
self.link = link
self.conf_contents = {}
@property
def size(self):
return sum([len(contents) for contents in self.conf_contents.values()])
def __iter__(self):
for content in self.conf_contents:
yield content
def __call__(self, *args, **kwargs):
return self.conf_contents
def __repr__(self):
return f'Conference {self.name}:\n' + json.dumps(self.conf_contents, indent=4, cls=MyEncoder)
def __str__(self):
return self.__repr__()
class ConfContent(object):
def __init__(self, name, full_name, venue, year, link, volume_size):
self.name = name
self.full_name = full_name
self.venue = venue
self.year = year
self.link = link
self.volume_size = volume_size
def __repr__(self):
return self.name
def __str__(self):
return self.__repr__()
class Anthology(object):
def __init__(self, confs=None, logger=None):
if confs is None:
confs = {ConfConsts.ACL_EVENTS: [], ConfConsts.NON_ACL_EVENTS: []}
elif not isinstance(confs, dict):
raise ValueError("confs should be dict.")
self.name = "Anthology"
self.homepage_url = "https://aclanthology.org"
self.confs = confs
self.logger = logger
# initialization
self.init_confs()
def init_confs(self):
if ConfConsts.ACL_EVENTS not in self.confs:
self.confs[ConfConsts.ACL_EVENTS] = []
if ConfConsts.NON_ACL_EVENTS not in self.confs:
self.confs[ConfConsts.NON_ACL_EVENTS] = []
@property
def size(self):
return sum([len(conferences) for conferences in self.confs.values()])
def parse_htmls(self):
self.fill_with_conf_infos()
self.fill_with_conf_contents()
def fill_with_conf_infos(self):
response = requests.get(self.homepage_url)
acl_homepage = Soup(response.content, "html.parser")
events = acl_homepage.find_all("tbody")
acl_events = events[0]
non_ack_events = events[1]
# acl events
rows = acl_events.find_all("tr", {"class", "text-center"})
for row in tqdm(rows, desc='parsing acl events'):
conf = row.find("th")
conf_name = conf.get_text().strip()
# SIGs are not conferences
if conf_name == "SIGs":
continue
conf_link = f'https://aclanthology.org/{conf.find("a").get("href")}'
self.confs[ConfConsts.ACL_EVENTS].append(Conference(conf_name, ConfConsts.ACL_EVENTS, conf_link))
# non-acl events
rows = non_ack_events.find_all("tr", {"class", "text-center"})
for row in tqdm(rows, desc='parsing non-acl events'):
conf = row.find("th")
conf_name = conf.get_text().strip()
conf_link = f'{self.homepage_url}{conf.find("a").get("href")}'
self.confs[ConfConsts.NON_ACL_EVENTS].append(Conference(conf_name, ConfConsts.NON_ACL_EVENTS, conf_link))
def fill_with_conf_contents(self):
for conf in tqdm(itertools.chain(*self.confs.values()), total=self.size, desc='parsing all conf_contents'):
response = requests.get(conf.link)
conf_html = Soup(response.content, "html.parser")
for year_conf_html in conf_html.find_all("div", {"class", "row"}):
if year_conf_html.find("h4") is None:
continue
year = year_conf_html.find("h4").get_text() # year
conf.conf_contents[year] = []
# traverse contents
for content in year_conf_html.find_all("li"):
a = content.find("a")
href = a.get("href")
name = href.split("/")[2]
full_name = a.get_text().strip()
link = f'{self.homepage_url}{href}'
# is blank for html.
volume_size = int(content.find("span").get_text().split(" ")[0].strip())
conf.conf_contents[year].append(ConfContent(name, full_name, conf.name, year, link, volume_size))
def add_logger(self, logger: MyLogger):
self.logger = logger
def items(self):
return self.confs.items()
def __iter__(self):
for conf in self.confs:
yield conf
def __call__(self, *args, **kwargs):
return self.confs
def __repr__(self):
return 'Anthology:\n' + json.dumps(self.confs, indent=4, cls=MyEncoder)
def __str__(self):
return self.__repr__()
def __len__(self):
return sum([len(one) for one in self.confs.values()])
| 5,098 | 32.993333 | 117 | py |
acl-anthology-helper | acl-anthology-helper-master/src/modules/anthology_mysql.py | """
@Reference:
"""
import os
import itertools
import pymysql
from tqdm import tqdm
from logging import DEBUG
from src.modules.constants import CACHE_DIR
from src.modules.retriever import Retriever
from src.modules.logger import MyLogger
from src.modules.constants import DBConsts
from src.configuration.mysql_cfg import MySQLCFG
from src.modules.cache import LocalCache
from src.modules.papers import Paper
from src.modules.conferences import ConfContent
class AnthologyMySQL(object):
_class_name = "Anthology MySQL"
def __init__(self, cache_enable=True, cache_dir=CACHE_DIR, log_path='', db_dir='./database'):
self.retriever = Retriever(cache_enable=cache_enable)
self.anthology = self.retriever.load_anthology() # use local cache
self.logger = MyLogger(self._class_name, DEBUG, log_path)
if not os.path.exists(db_dir):
os.makedirs(db_dir)
self.logger.warning(f'{db_dir} did not exist, and has been created now.')
self.db_path = os.path.join(db_dir, DBConsts.ANTHOLOGY_DB)
self.conn = None
self.cursor = None
self.cache_enable = cache_enable
if self.cache_enable:
self.cache = LocalCache('anthology', cache_dir, self.logger)
self.cache.smart_load()
else:
self.cache = None
def db_connect(self):
self.conn = pymysql.connect(host=MySQLCFG.HOST,
port=MySQLCFG.PORT,
user=MySQLCFG.USER,
password=MySQLCFG.PASSWORD,
db=MySQLCFG.DB,
charset='utf8mb4')
self.cursor = self.conn.cursor()
def db_close_with_commit(self):
self.cursor.close()
# 提交事务:
self.conn.commit()
# 关闭Connection:
self.conn.close()
def db_close_without_commit(self):
self.cursor.close()
# 关闭Connection:
self.conn.close()
def create_tables(self):
self.db_connect()
# conferences
self.cursor.execute(f'''create table if not exists {DBConsts.CONF_TABLE}(id int primary key auto_increment,
conf_content varchar(20) not null,
venue varchar(20),
year int,
link char(100),
volume_size int);
''')
# papers
self.cursor.execute(f'''create table if not exists {DBConsts.PAPER_TABLE}(id int primary key auto_increment,
title varchar(1000) not null,
year int,
url char(100),
authors text,
abstract text,
conf_content varchar(20),
venue varchar(20));
''')
self.db_close_with_commit()
def insert_a_conf_content(self, conf_content: ConfContent):
self.cursor.execute(f'''insert ignore into {DBConsts.CONF_TABLE}
(conf_content, venue, year, link, volume_size)
values
('{conf_content.name}', '{conf_content.venue}', {conf_content.year},
'{conf_content.link}', {conf_content.volume_size});
''')
def insert_a_paper(self, paper: Paper):
title_norm = paper.title.replace("'", f"\\'")
authors_norm = ", ".join([one.replace("'", f"\\'") for one in paper.authors])
abstract_norm = paper.abstract.replace("'", f"\\'")
self.cursor.execute(f'''insert ignore into {DBConsts.PAPER_TABLE}
(title, year, url, authors, abstract, conf_content, venue)
values
('{title_norm}', {paper.year}, '{paper.url}',
'{authors_norm}', '{abstract_norm}', '{paper.conf_content}',
'{paper.venue}');
''')
def load_data(self):
self.db_connect()
self.logger.info("Start loading data...")
for conf in itertools.chain(*self.anthology.confs.values()):
# 该conf已经加载过
if self.cache and self.cache.get(conf.name, False):
self.logger.info(f"{conf.name} found in cache.")
continue
else:
self.logger.info(f"Start loading {conf.name}...")
for conf_content in tqdm(itertools.chain(*conf.conf_contents.values()),
total=conf.size,
desc=f'loading {conf.name}...'):
# 该conf_content已经加载过
if self.cache and self.cache.get(conf_content.name, False):
continue
else:
papers = self.retriever.get_paper_list_from_volumes(conf_content.venue,
conf_content.name,
conf_content.year,
conf_content.link)
for paper in papers.items():
# 插入论文
self.insert_a_paper(paper)
# 插入会议
self.insert_a_conf_content(conf_content)
self.conn.commit() # 提交数据
self.cache[conf_content.name] = True
self.cache.store()
self.cache[conf.name] = True
self.cache.store()
self.logger.info(f"Finished!")
self.db_close_with_commit()
def get_conferences(self):
self.db_connect()
# 执行查询 SQL
self.cursor.execute(f'SELECT * FROM `{DBConsts.CONF_TABLE}`')
# 获取所有数据
result = self.cursor.fetchall()
self.db_close_without_commit()
return result
def size_of_confs(self):
self.db_connect()
# 执行查询 SQL
self.cursor.execute(f'SELECT count(*) FROM `{DBConsts.CONF_TABLE}`')
# 获取所有数据
result = self.cursor.fetchall()
self.db_close_without_commit()
return result
def size_of_papers(self):
self.db_connect()
# 执行查询 SQL
self.cursor.execute(f'SELECT count(*) FROM `{DBConsts.PAPER_TABLE}`')
# 获取所有数据
result = self.cursor.fetchall()
self.db_close_without_commit()
return result
| 6,803 | 40.487805 | 117 | py |
acl-anthology-helper | acl-anthology-helper-master/src/modules/statistics.py | """
@Desc:
"""
import json
class Stat(object):
def __init__(self, name):
self.name = name
self._attrs = dict() # collect anything needed
def attrs(self):
return self._attrs
def add_attr(self, key, val):
self._attrs[key] = val
return self
def __repr__(self):
info = {'stat name', self.name,
'attrs', self.attrs}
return json.dumps(info, indent=4)
class Statistics(object):
collections = dict()
@classmethod
def add(cls, stat: Stat):
if stat.name in cls.collections:
return False
else:
cls.collections[stat.name] = stat.attrs()
return True
@classmethod
def update(cls, stat: Stat, key, val):
if stat.name not in cls.collections:
return False
else:
cls.collections[stat.name][key] = val
return True
@classmethod
def repr(cls):
return json.dumps(cls.collections, indent=4)
| 1,006 | 20.891304 | 55 | py |
acl-anthology-helper | acl-anthology-helper-master/src/modules/cache.py | """
@reference:
python dict 保存为pickle格式
https://blog.csdn.net/rosefun96/article/details/90633786
"""
import os
import pickle
import json
from logging import DEBUG
from src.modules.logger import MyLogger
class Cache(object):
def __init__(self, name, local_dir='./cache', logger=None):
self._name = name
self._local_dir = local_dir
self._logger = logger if logger else MyLogger('cache', DEBUG)
self._cache = dict()
def add(self, key, val):
if key in self._cache:
self._logger.warning(f'key exits already, add failed.')
return False
else:
self._cache[key] = val
return True
def put(self, key, val):
self._cache[key] = val
def get(self, key, default):
return default if key not in self._cache else self._cache[key]
def __iter__(self):
for key in self._cache:
yield key
def __setitem__(self, key, val):
self._cache[key] = val
def __getitem__(self, key):
return self._cache[key]
def __repr__(self):
return json.dumps(self._cache, indent=4)
class LocalCache(Cache):
def __init__(self, name, local_dir='./cache', logger=None):
super(LocalCache, self).__init__(name, local_dir, logger)
self.local_path = ''
def store(self, local_path=''):
if not local_path:
if not os.path.exists(self._local_dir):
os.makedirs(self._local_dir, exist_ok=True)
self._logger.warning(f'{self._local_dir} did not exist, and has been created now.')
local_path = f'{os.path.join(self._local_dir, self._name)}.pkl'
with open(local_path, 'wb') as fw: # Pickling
pickle.dump(self._cache, fw, protocol=pickle.HIGHEST_PROTOCOL)
self.local_path = local_path
def load(self, local_path=''):
if not local_path:
local_path = f'{os.path.join(self._local_dir, self._name)}.pkl'
with open(local_path, 'rb') as fr:
self._cache = pickle.load(fr)
self.local_path = local_path
def smart_load(self, local_path=''):
"""
:param local_path:
:return:
Don't raise an error if cache does not exis.
"""
if not local_path:
local_path = f'{os.path.join(self._local_dir, self._name)}.pkl'
if os.path.exists(local_path):
with open(local_path, 'rb') as fr:
cache = pickle.load(fr)
self._cache = cache
self.local_path = local_path
else:
self._logger.warning(f'load failed. "{local_path} does not exist."')
def clear(self):
"""
:return:
clear both cathe in memory and local.
"""
self._cache = dict()
if self.local_path:
os.remove(self.local_path)
self.local_path = ''
@classmethod
def load_from(cls, local_path):
if os.path.exists(local_path):
raise FileNotFoundError
cache_name = os.path.basename(local_path).split('.')[0]
new = LocalCache(cache_name)
new.load(local_path)
new.local_path = local_path # record this local_path, it is useful when make clear.
return new
| 3,280 | 29.663551 | 99 | py |
acl-anthology-helper | acl-anthology-helper-master/src/modules/__init__.py | from .constants import *
from .retriever import Retriever | 57 | 28 | 32 | py |
acl-anthology-helper | acl-anthology-helper-master/src/common/database_tools.py | from src.modules.papers import Paper, PaperList
class MySQLTools(object):
@classmethod
def dict_to_paper(cls, result: dict):
paper = Paper(
title=result['title'],
year=result['year'],
url=result['url'],
authors=result['authors'].split(', '),
abstrat=result['abstract'],
conf_content=result['conf_content'],
venue=result['venue']
)
return paper
@classmethod
def list_to_papers(cls, result: list):
paper_list = []
for one in result:
paper_list.append(cls.dict_to_paper(one))
return PaperList(papers=paper_list)
| 671 | 28.217391 | 53 | py |
acl-anthology-helper | acl-anthology-helper-master/src/common/string_tools.py | class StringTools(object):
@classmethod
def match(cls, one: str, two: str):
return one.lower() == two.lower()
@classmethod
def contain(cls, text: str, keyword: str):
return keyword.lower() in text.lower()
@classmethod
def multi_or_contain(cls, text: str, keywords: list):
return bool(sum([one.lower() in text.lower() for one in keywords]))
@classmethod
def multi_and_contain(cls, text: str, keywords: list):
return bool(sum([one.lower() in text.lower() for one in keywords]) == len(keywords))
@classmethod
def isssymbols(cls, c: chr):
"""
:param c: character
:return:
is special symbols like !@#$%^%......
"""
return str.isalnum(c) | str.isspace(c)
@classmethod
def _is_valid_for_file(cls, c: chr):
"""
:param c: character
:return:
is special symbols like !@#$%^%......
"""
return False if c in "\\/:*?*<>|" else True
@classmethod
def filename_norm(cls, string: str):
return ''.join(filter(cls._is_valid_for_file, string))
| 1,121 | 27.769231 | 92 | py |
acl-anthology-helper | acl-anthology-helper-master/src/common/file_tools.py | import os
class FileTools(object):
@classmethod
def info_to_file(cls, info, local_path: str):
with open(local_path, 'w', encoding='utf-8') as fw:
fw.write(f'{info}')
| 195 | 23.5 | 59 | py |
acl-anthology-helper | acl-anthology-helper-master/src/common/__init__.py | 0 | 0 | 0 | py | |
acl-anthology-helper | acl-anthology-helper-master/src/common/serialization_tools.py | """
@Reference:
https://blog.csdn.net/dou_being/article/details/82290588
https://blog.csdn.net/zywvvd/article/details/106131555
"""
import json
class MyEncoder(json.JSONEncoder):
def default(self, obj):
"""
只要检查到了是bytes类型的数据就把它转为str类型
:param obj:
:return:
"""
try:
return json.JSONEncoder.default(self, obj)
except Exception:
return obj.__str__()
| 434 | 19.714286 | 56 | py |
acl-anthology-helper | acl-anthology-helper-master/tasks/search_paper.py | """
@Desc:
@Reference:
https://github.com/lizhenggan/ABuilder
pip install a-sqlbuilder
"""
import sys
sys.path.insert(0, '') # 在tasks文件夹中可以直接运行程序
from typing import List
import os
from ABuilder.ABuilder import ABuilder
from src.modules.downloader import PaperDownloader
from src.common.file_tools import FileTools
from tasks.basic_task import BasicTask
from src.common.database_tools import MySQLTools
class MyTask(BasicTask):
@classmethod
def multi_keywords_query_papers(cls, keywords: List[str], conf_contents: list, years_limit: list):
"""
检索论文
"""
data = ABuilder().table('paper') \
.where({"year": ["in", years_limit]}) \
.where({"venue": ["in", conf_contents]}).query()
papers = MySQLTools.list_to_papers(data)
filtered = papers
for keyword in keywords:
filtered = filtered.containing_filter('title', keyword) | \
filtered.containing_filter('abstract', keyword)
return filtered
@classmethod
def run(cls):
# Survey for text generation
conf_contents_limit = ['ACL', 'EMNLP', 'NAACL', 'Findings']
downloader = PaperDownloader()
keywords = ['event', 'story generation']
years_limit = list(range(2021, 2023))
fields = ['title', 'abstract']
papers = cls.multi_keywords_query_papers(keywords, conf_contents_limit, years_limit)
downloader.logger.info(f'The size of papers: {papers.size}')
group = papers.group('conf_content')
print(papers)
if __name__ == '__main__':
MyTask.run()
| 1,606 | 28.759259 | 102 | py |
acl-anthology-helper | acl-anthology-helper-master/tasks/database.py | from src.configuration.mysql_cfg import MySQLCFG
class Config(object):
pass
class Proconfig(Config):
pass
class Devconfig(Config):
debug = True
DATABASE_URI = f'mysql+pymysql://{MySQLCFG.USER}:{MySQLCFG.USER}@{MySQLCFG.HOST}:{MySQLCFG.PORT}/{MySQLCFG.DB}'
data_host = MySQLCFG.HOST
data_pass = MySQLCFG.PASSWORD
data_user = MySQLCFG.USER
database = MySQLCFG.DB
data_port = MySQLCFG.PORT
charset = 'utf8mb4'
database = Devconfig
| 476 | 18.875 | 115 | py |
acl-anthology-helper | acl-anthology-helper-master/tasks/parallel_download_task.py | """
@Desc:
"""
import sys
sys.path.insert(0, '..') # 在tasks文件夹中可以直接运行程序
import os
from src.modules import Retriever
from src.modules.parallel_downloader import PaperDownloader
class ParallelDownloadTask(object):
@classmethod
def acl_long_download(cls, keyword: str):
conf_content = '2021-acl-long'
papers = Retriever.acl(2021, conf_content, True) # use local cache
downloader = PaperDownloader()
filtered = papers.containing_filter('title', keyword) | papers.containing_filter('abstract', keyword)
downloader.multi_download(filtered, os.path.join(keyword, conf_content))
@classmethod
def naacl_main_download(cls, keyword: str):
conf_content = '2021-naacl-main'
papers = Retriever.naacl(2021, conf_content, True) # use local cache
downloader = PaperDownloader()
filtered = papers.containing_filter('title', keyword) | papers.containing_filter('abstract', keyword)
downloader.multi_download(filtered, os.path.join(keyword, conf_content))
@classmethod
def emnlp_main_download(cls, keyword: str):
conf_content = '2020-emnlp-main'
papers = Retriever.emnlp(2020, conf_content, True) # use local cache
downloader = PaperDownloader()
filtered = papers.containing_filter('title', keyword) | papers.containing_filter('abstract', keyword)
downloader.multi_download(filtered, os.path.join(keyword, conf_content))
@classmethod
def run(cls):
while True:
keyword = input('\ntype a keyword(blank will exit): ')
if not keyword.strip():
break
cls.acl_long_download(keyword)
cls.naacl_main_download(keyword)
cls.emnlp_main_download(keyword)
if __name__ == '__main__':
ParallelDownloadTask.run()
| 1,830 | 33.54717 | 109 | py |
acl-anthology-helper | acl-anthology-helper-master/tasks/basic_task.py | """
@Desc:
@Reference:
https://github.com/lizhenggan/ABuilder
"""
import sys
sys.path.insert(0, '..') # 在tasks文件夹中可以直接运行程序
import os
from ABuilder.ABuilder import ABuilder
from src.modules.downloader import PaperDownloader
from src.modules.papers import Paper, PaperList
from src.modules.anthology_mysql import AnthologyMySQL
from src.common.database_tools import MySQLTools
class BasicTask(object):
@classmethod
def load_data_to_db(cls):
"""
将论文数据载入数据库
"""
db = AnthologyMySQL(cache_enable=True)
db.create_tables()
db.load_data() # 将数据爬取载入数据库中
@classmethod
def query_papers(cls, keyword: str, conf_contents: list, years_limit: list):
"""
检索论文
"""
data = ABuilder().table('paper') \
.where({"year": ["in", years_limit]}) \
.where({"venue": ["in", conf_contents]}).query()
papers = MySQLTools.list_to_papers(data)
filtered = papers.containing_filter('title', keyword) | papers.containing_filter('abstract', keyword)
return filtered
@classmethod
def download_papers(cls, papers: PaperList, keyword, conf_content):
"""
检索论文
"""
downloader = PaperDownloader()
downloader.multi_download(papers, os.path.join(keyword, conf_content))
@classmethod
def run(cls):
cls.load_data_to_db()
downloader = PaperDownloader()
years_limit = list(range(2016, 2023))
conf_contents_limit = ['ACL', 'EMNLP', 'TACL', 'NAACL']
while True:
keyword = input('\ntype a keyword(blank will exit): ')
if not keyword.strip():
break
papers = cls.query_papers(keyword, conf_contents_limit, years_limit)
print(f'The size of papers: {papers.size}')
group = papers.group('conf_content')
for conf_content, papers_obj in group.items():
downloader.multi_download(papers_obj, os.path.join(keyword, conf_content))
if __name__ == '__main__':
BasicTask.run()
| 2,072 | 29.485294 | 109 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/compare_rnn.py | import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
from tensorboard.backend.event_processing import event_accumulator
def read_data(load_dir, tag="perf/avg_reward_100"):
events = os.listdir(load_dir)
for event in events:
path = os.path.join(load_dir, event)
ea = event_accumulator.EventAccumulator(path, size_guidance={
event_accumulator.COMPRESSED_HISTOGRAMS: 0,
event_accumulator.IMAGES: 0,
event_accumulator.AUDIO: 0,
event_accumulator.SCALARS: 2500,
event_accumulator.HISTOGRAMS: 0,
})
ea.Reload()
tags = ea.Tags()
if tag not in tags["scalars"]: continue
if len(ea.Scalars(tag)) == 2500:
return np.array([s.value for s in ea.Scalars(tag)])
return None
def plot_rewards_curve(save_path,
load_path_gru,
load_path_rgu,
load_path_lstm,
n_seeds=8,
n_workers=8,
):
gru_data = np.zeros((n_seeds, 2500))
rgu_data = np.zeros((n_seeds, 2500))
lstm_data = np.zeros((n_seeds, 2500))
count = 0
for seed_idx in tqdm(range(n_seeds)):
gru_workers = []
rgu_workers = []
lstm_workers = []
for worker in range(n_workers):
gru_event = read_data(load_dir=load_path_gru+f"_{seed_idx+1}_{worker}")
rgu_event = read_data(load_dir=load_path_rgu+f"_{seed_idx+1}_{worker}")
lstm_event = read_data(load_dir=load_path_lstm+f"_{seed_idx+1}_{worker}")
if not(gru_event is None or lstm_event is None or rgu_event is None):
gru_workers += [gru_event]
rgu_workers += [rgu_event]
lstm_workers += [lstm_event]
else:
count += 1
gru_data[seed_idx] = np.array(gru_workers).mean(axis=0)
rgu_data[seed_idx] = np.array(rgu_workers).mean(axis=0)
lstm_data[seed_idx] = np.array(lstm_workers).mean(axis=0)
data = []
for seed_idx in range(n_seeds):
for i in range(2500):
data += [{'Episode': i, 'Reward': gru_data[seed_idx][i], "RNN Type": "GRU"}]
data += [{'Episode': i, 'Reward': rgu_data[seed_idx][i], "RNN Type": "RGU"}]
data += [{'Episode': i, 'Reward': lstm_data[seed_idx][i], "RNN Type": "LSTM"}]
df = pd.DataFrame(data)
sns.lineplot(x="Episode", y="Reward", hue="RNN Type", data=df)
plt.show()
if __name__ == "__main__":
plot_rewards_curve(
'./gru_lstm.png',
'./logs_gru/HS_GRU_2',
'./logs_rgu/HS_RGU_2',
'./logs_lstm/HS_LSTM_2'
) | 2,697 | 31.506024 | 90 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/viz_featmaps.py | import numpy as np
import matplotlib.pyplot as plt
path = "featmaps/featmaps_7500_5.npy"
featmaps = np.load(path)
rand_idxs = np.random.randint(0,featmaps.shape[1], 5)
for idx in rand_idxs:
featmap = featmaps[0,idx,:,:]
plt.imshow(featmap*0.5+0.5, cmap='gray')
plt.show()
| 289 | 21.307692 | 53 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/main_1d.py | import os
import yaml
import pickle
import argparse
import numpy as np
import torch as T
import torch.nn as nn
import torch.multiprocessing as mp
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from collections import namedtuple
from common.shared_optim import SharedAdam, SharedRMSprop
from Harlow_1D.train import train, train_stacked
from models.a3c_lstm_simple import A3C_LSTM, A3C_StackedLSTM
if __name__ == "__main__":
mp.set_start_method("spawn")
os.environ['OMP_NUM_THREADS'] = '1'
parser = argparse.ArgumentParser(description='Paramaters')
parser.add_argument('-c', '--config', type=str, default="Harlow_1D/config.yaml", help='path of config file')
args = parser.parse_args()
with open(args.config, 'r', encoding="utf-8") as fin:
config = yaml.load(fin, Loader=yaml.FullLoader)
n_seeds = 8
base_seed = config["seed"]
base_run_title = config["run-title"]
for seed_idx in range(1, n_seeds + 1):
config["run-title"] = base_run_title + f"_{seed_idx}"
config["seed"] = base_seed * seed_idx
exp_path = os.path.join(config["save-path"], config["run-title"])
if not os.path.isdir(exp_path):
os.mkdir(exp_path)
out_path = os.path.join(exp_path, os.path.basename(args.config))
with open(out_path, 'w') as fout:
yaml.dump(config, fout)
############## Start Here ##############
print(f"> Running {config['run-title']} {config['mode']} using {config['optimizer']}")
if config["mode"] == "vanilla":
shared_model = A3C_LSTM(
config["task"]["input-dim"],
config["agent"]["mem-units"],
config["task"]["num-actions"],
config["agent"]["cell-type"]
)
elif config["mode"] == "stacked":
shared_model = A3C_StackedLSTM(
config["task"]["input-dim"],
config["agent"]["mem-units"],
config["task"]["num-actions"],
device=config["device"]
)
else:
raise ValueError(config["mode"])
shared_model.share_memory()
shared_model.to(config['device'])
print(shared_model)
optim_class = SharedAdam if config["optimizer"] == "adam" else SharedRMSprop
optimizer = optim_class(shared_model.parameters(), lr=config["agent"]["lr"])
optimizer.share_memory()
processes = []
T.manual_seed(config["seed"])
np.random.seed(config["seed"])
T.random.manual_seed(config["seed"])
if config["resume"]:
filepath = os.path.join(
config["save-path"],
config["load-title"],
f"{config['load-title']}_{config['start-episode']}.pt"
)
print(f"> Loading Checkpoint {filepath}")
shared_model.load_state_dict(T.load(filepath)["state_dict"])
train_target = train_stacked if config["mode"] == "stacked" else train
for rank in range(config["agent"]["n-workers"]):
p = mp.Process(target=train_target, args=(
config,
shared_model,
optimizer,
rank,
))
p.start()
processes += [p]
for p in processes:
p.join()
| 3,414 | 30.915888 | 113 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/plot_training_curve.py | import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
from tensorboard.backend.event_processing import event_accumulator
def read_data(load_dir, tag="perf/avg_reward_100"):
events = os.listdir(load_dir)
for event in events:
path = os.path.join(load_dir, event)
ea = event_accumulator.EventAccumulator(path, size_guidance={
event_accumulator.COMPRESSED_HISTOGRAMS: 0,
event_accumulator.IMAGES: 0,
event_accumulator.AUDIO: 0,
event_accumulator.SCALARS: 2500,
event_accumulator.HISTOGRAMS: 0,
})
ea.Reload()
tags = ea.Tags()
if tag not in tags["scalars"]: continue
if len(ea.Scalars(tag)) == 2500:
return np.array([s.value for s in ea.Scalars(tag)])
return None
def plot_rewards_curve(save_path,
load_path_lstm,
n_seeds=8,
n_workers=8,
):
lstm_data = np.zeros((n_seeds, 2500))
count = 0
for seed_idx in tqdm(range(n_seeds)):
lstm_workers = []
for worker in range(n_workers):
lstm_event = read_data(load_dir=load_path_lstm+f"_{seed_idx+1}_{worker}")
if lstm_event is not None:
lstm_workers += [lstm_event]
else:
count += 1
lstm_data[seed_idx] = np.array(lstm_workers).mean(axis=0)
data = []
for seed_idx in range(n_seeds):
for i in range(2500):
data += [{'Episode': i, 'Reward': lstm_data[seed_idx][i], "RNN Type": "LSTM"}]
df = pd.DataFrame(data)
sns.lineplot(x="Episode", y="Reward", data=df, ci="sd")
plt.show()
if __name__ == "__main__":
plot_rewards_curve(
'./harlow_final_training.png',
'./logs_final/Harlow_Final_LSTM',
) | 1,875 | 26.588235 | 90 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/vis_simple.py | import os
import yaml
import pickle
import argparse
import numpy as np
import torch as T
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from datetime import datetime
from collections import namedtuple
from Harlow_Simple.harlow import HarlowSimple
from models.a3c_lstm_simple import A3C_LSTM
def run_episode(agent, env, device="cpu"):
agent.eval()
done = False
state = env.reset()
p_action, p_reward = [0,0,0], 0
ht, ct = agent.get_init_states(device)
while not done:
logit, _, (ht, ct) = agent(
T.tensor([state]).float().to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
(ht, ct)
)
action = T.argmax(F.softmax(logit, dim=-1), -1)
state, reward, done, _ = env.step(action)
p_action = np.eye(env.n_actions)[action]
p_reward = reward
env.reset()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Paramaters')
parser.add_argument('-c', '--config', type=str, default="Harlow_Simple/config.yaml", help='path of config file')
args = parser.parse_args()
with open(args.config, 'r', encoding="utf-8") as fin:
config = yaml.load(fin, Loader=yaml.FullLoader)
load_path = config["load-path"]
save_path = os.path.join(config["save-path"], config["run-title"], config["run-title"]+"_{epi:04d}.gif")
agent = A3C_LSTM(
config["task"]["input-dim"],
config["agent"]["mem-units"],
config["task"]["num-actions"],
)
agent.load_state_dict(T.load(load_path)["state_dict"])
env = HarlowSimple(visualize=True, save_interval=1, save_path=save_path)
run_episode(agent, env)
| 1,850 | 25.826087 | 117 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/plot.py | import os
import numpy as np
import matplotlib.pyplot as plt
if __name__ == "__main__":
all_rewards = []
base_path = "ckpt"
run_title = "Harlow_Final_LSTM"
n_seeds = 8
n_workers = 8
for seed in range(1, n_seeds+1):
run = run_title + f"_{seed}"
run_rewards = []
for worker in range(n_workers):
path = os.path.join(base_path, run, f"rewards_{worker}.npy")
if os.path.exists(path):
rewards = np.load(path)
run_rewards += [rewards[:2500]]
all_rewards += [np.array(run_rewards).mean(axis=0)]
all_rewards = np.stack(all_rewards)
quantiles = [0, 500, 1000, 1500, 2000, 2500]
n_quantiles = len(quantiles)-1
n_trials = all_rewards.shape[2]
for i in range(n_quantiles):
line = []
stds = []
for j in range(n_trials):
q = all_rewards[:,quantiles[i]:quantiles[i+1],j]
performance = q.mean(axis=1)
line += [performance.mean()*100]
stds += [(performance.std()*100)]
plt.errorbar(np.arange(1,7), line, fmt='o-', yerr=stds)
plt.plot([1,6], [50,50], '--')
plt.xlabel("Trial")
plt.ylabel("Performance (%)")
plt.legend(["Random", "1st", "2nd", "3rd", "4th", "Final"], title="Training Quantile")
plt.title("Harlow Task")
plt.show()
| 1,354 | 29.795455 | 90 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/main_psychlab.py | import os
import yaml
import pickle
import argparse
import numpy as np
import torch as T
import torch.nn as nn
import torch.multiprocessing as mp
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from collections import namedtuple
from common.shared_optim import SharedAdam, SharedRMSprop
from Harlow_PsychLab.train import train, train_stacked
from Harlow_PsychLab.harlow import HarlowWrapper
from models.a3c_lstm import A3C_LSTM, A3C_StackedLSTM
from models.a3c_conv_lstm import A3C_ConvLSTM, A3C_ConvStackedLSTM
from models.resnet_lstm import ResNet_LSTM
if __name__ == "__main__":
mp.set_start_method("spawn")
os.environ['OMP_NUM_THREADS'] = '1'
parser = argparse.ArgumentParser(description='Paramaters')
parser.add_argument('-c', '--config', type=str,
default="/home/bkhmsi/Documents/Projects/lab/Meta-RL-Harlow/Harlow_PsychLab/config.yaml",
help='path of config file')
parser.add_argument('--length', type=int, default=3600,
help='Number of steps to run the agent')
parser.add_argument('--width', type=int, default=512,
help='Horizontal size of the observations')
parser.add_argument('--height', type=int, default=512,
help='Vertical size of the observations')
parser.add_argument('--fps', type=int, default=60,
help='Number of frames per second')
parser.add_argument('--runfiles_path', type=str, default=None,
help='Set the runfiles path to find DeepMind Lab data')
parser.add_argument('--level_script', type=str,
default='contributed/psychlab/harlow',
help='The environment level script to load')
parser.add_argument('--record', type=str, default=None,
help='Record the run to a demo file')
parser.add_argument('--demo', type=str, default=None,
help='Play back a recorded demo file')
parser.add_argument('--demofiles', type=str, default=None,
help='Directory for demo files')
parser.add_argument('--video', type=str, default=None,
help='Record the demo run as a video')
args = parser.parse_args()
with open(args.config, 'r', encoding="utf-8") as fin:
config = yaml.load(fin, Loader=yaml.FullLoader)
task_config = {
'fps': str(args.fps),
'width': str(args.width),
'height': str(args.height)
}
if args.record:
task_config['record'] = args.record
if args.demo:
task_config['demo'] = args.demo
if args.demofiles:
task_config['demofiles'] = args.demofiles
if args.video:
task_config['video'] = args.video
n_seeds = 1
base_seed = config["seed"]
base_run_title = config["run-title"]
for seed_idx in range(1, n_seeds + 1):
config["run-title"] = base_run_title + f"_{seed_idx}"
config["seed"] = base_seed * seed_idx
exp_path = os.path.join(config["save-path"], config["run-title"])
if not os.path.isdir(exp_path):
os.mkdir(exp_path)
out_path = os.path.join(exp_path, os.path.basename(args.config))
with open(out_path, 'w') as fout:
yaml.dump(config, fout)
############## Start Here ##############
print(f"> Running {config['run-title']} {config['mode']} using {config['optimizer']}")
if config["mode"] == "resnet":
shared_model = ResNet_LSTM(config["agent"], config["task"]["num-actions"])
elif config["mode"] == "conv-stacked":
shared_model = A3C_ConvStackedLSTM(config["agent"], config["task"]["num-actions"])
elif config["mode"] == "stacked":
shared_model = A3C_StackedLSTM(config["agent"], config["task"]["num-actions"])
elif config["mode"] == "conv-vanilla":
shared_model = A3C_ConvLSTM(config["agent"], config["task"]["num-actions"])
elif config["mode"] == "vanilla":
shared_model = A3C_LSTM(config["agent"], config["task"]["num-actions"])
else:
raise ValueError(config["mode"])
print(shared_model)
shared_model.share_memory()
shared_model.to(config['device'])
optim_class = SharedAdam if config["optimizer"] == "adam" else SharedRMSprop
optimizer = optim_class(shared_model.parameters(), lr=config["agent"]["lr"])
optimizer.share_memory()
processes = []
update_counter = 0
T.manual_seed(config["seed"])
np.random.seed(config["seed"])
T.random.manual_seed(config["seed"])
if config["copy-encoder"]:
filepath = os.path.join(
config["save-path"],
config["load-title"],
f"{config['load-title']}_{config['start-episode']}.pt"
)
print(f"> Copying Encoder from {filepath}")
pretrained_dict = T.load(filepath, map_location=T.device(config["device"]))["state_dict"]
load_dict = shared_model.state_dict()
for k, v in pretrained_dict.items():
if k in "encoder": load_dict[k] = v
shared_model.load_state_dict(load_dict)
if config["resume"]:
filepath = os.path.join(
config["save-path"],
config["load-title"],
f"{config['load-title']}_{config['start-episode']:04d}.pt"
)
print(f"> Loading Checkpoint {filepath}")
model_data = T.load(filepath, map_location=T.device(config["device"]))
update_counter = model_data["update_counter"]
pretrained_dict = model_data["state_dict"]
# load_dict = {}
# for i, (k, v) in enumerate(pretrained_dict.items()):
# load_dict[k] = v if i < 6 else eval(f"shared_model.{k}")
shared_model.load_state_dict(pretrained_dict)
train_target = train_stacked if "stacked" in config["mode"] else train
for rank in range(config["agent"]["n-workers"]):
p = mp.Process(target=train_target, args=(
config,
shared_model,
optimizer,
rank,
task_config,
update_counter,
))
p.start()
processes += [p]
for p in processes:
p.join()
| 6,521 | 37.591716 | 114 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/run_episode.py | import os
import yaml
import pickle
import argparse
import numpy as np
import torch as T
import torch.nn as nn
import torch.nn.functional as F
import torch.multiprocessing as mp
from torch.utils.tensorboard import SummaryWriter
import deepmind_lab as lab
from tqdm import tqdm
from collections import namedtuple
from common.shared_optim import SharedAdam, SharedRMSprop
from Harlow_PsychLab.train import train, train_stacked
from Harlow_PsychLab.harlow import HarlowWrapper
from models.a3c_lstm import A3C_LSTM, A3C_StackedLSTM
from models.a3c_conv_lstm import A3C_ConvLSTM, A3C_ConvStackedLSTM
if __name__ == "__main__":
mp.set_start_method("spawn")
os.environ['OMP_NUM_THREADS'] = '1'
parser = argparse.ArgumentParser(description='Paramaters')
parser.add_argument('-c', '--config', type=str,
default="/home/bkhmsi/Documents/Projects/lab/Meta-RL-Harlow/Harlow_PsychLab/config.yaml",
help='path of config file')
parser.add_argument('--length', type=int, default=3600,
help='Number of steps to run the agent')
parser.add_argument('--width', type=int, default=84,
help='Horizontal size of the observations')
parser.add_argument('--height', type=int, default=84,
help='Vertical size of the observations')
parser.add_argument('--fps', type=int, default=60,
help='Number of frames per second')
parser.add_argument('--runfiles_path', type=str, default=None,
help='Set the runfiles path to find DeepMind Lab data')
parser.add_argument('--level_script', type=str,
default='contributed/psychlab/harlow',
help='The environment level script to load')
parser.add_argument('--record', type=str, default=None,
help='Record the run to a demo file')
parser.add_argument('--demo', type=str, default=None,
help='Play back a recorded demo file')
parser.add_argument('--demofiles', type=str, default=None,
help='Directory for demo files')
parser.add_argument('--video', type=str, default=None,
help='Record the demo run as a video')
args = parser.parse_args()
with open(args.config, 'r', encoding="utf-8") as fin:
config = yaml.load(fin, Loader=yaml.FullLoader)
task_config = {
'fps': str(args.fps),
'width': str(args.width),
'height': str(args.height)
}
if args.record:
task_config['record'] = args.record
if args.demo:
task_config['demo'] = args.demo
if args.demofiles:
task_config['demofiles'] = args.demofiles
if args.video:
task_config['video'] = args.video
n_seeds = 1
device = config["device"]
############## Start Here ##############
print(f"> Running {config['run-title']} {config['mode']}")
if config["mode"] == "conv-stacked":
agent = A3C_ConvStackedLSTM(config["agent"], config["task"]["num-actions"])
elif config["mode"] == "stacked":
agent = A3C_StackedLSTM(config["agent"], config["task"]["num-actions"])
elif config["mode"] == "conv-vanilla":
agent = A3C_ConvLSTM(config["agent"], config["task"]["num-actions"])
elif config["mode"] == "vanilla":
agent = A3C_LSTM(config["agent"], config["task"]["num-actions"])
else:
raise ValueError(config["mode"])
filepath = os.path.join(
config["save-path"],
config["load-title"],
f"{config['load-title']}_{config['start-episode']:04d}.pt"
)
print(f"> Loading Checkpoint {filepath}")
agent.load_state_dict(T.load(filepath, map_location=T.device(config["device"]))["state_dict"])
lab_env = lab.Lab("contributed/psychlab/harlow", ['RGB_INTERLEAVED'], config=task_config)
env = HarlowWrapper(lab_env, config, 0)
print(agent)
agent.to(config['device'])
agent.eval()
with T.no_grad():
done = False
state = env.reset()
p_action, p_reward = [0]*config["task"]["num-actions"], 0
episode_reward = 0
ht1, ct1 = agent.get_init_states(1, device)
ht2, ct2 = agent.get_init_states(2, device)
while not done:
logit, value, (ht1, ct1), (ht2, ct2) = agent(
T.tensor([state]).to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
(ht1, ct1), (ht2, ct2)
)
logit = logit.squeeze(0)
prob = F.softmax(logit, dim=-1)
action = prob.multinomial(num_samples=1).detach()
state, reward, done, _ = env.step(int(action))
if reward == 0.2 and config["save-featmaps"]:
state, _, _, _ = env.step(0)
state, _, _, _ = env.step(1)
layer = 9
path = f"/home/bkhmsi/Documents/Projects/lab/Meta-RL-Harlow/featmaps_{config['start-episode']:04d}_{layer}.npy"
agent.save_featmaps(T.tensor([state]), path, layer)
print("> Feature Maps Saved")
exit()
episode_reward += reward
p_action = np.eye(env.num_actions)[int(action)]
p_reward = reward
env.save_frames(f"/home/bkhmsi/Documents/Projects/lab/Meta-RL-Harlow/sample_{config['start-episode']:04d}.gif")
print(f"Episode Reward: {episode_reward}")
| 5,563 | 35.605263 | 127 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/main_psychlab_single.py | import os
import yaml
import pickle
import argparse
import numpy as np
import torch as T
import torch.nn as nn
import torch.nn.functional as F
import torch.multiprocessing as mp
from torch.utils.tensorboard import SummaryWriter
import deepmind_lab as lab
from tqdm import tqdm
from collections import namedtuple
from common.shared_optim import SharedAdam, SharedRMSprop
from Harlow_PsychLab.train import train, train_stacked
from Harlow_PsychLab.harlow import HarlowWrapper
from models.a3c_lstm import A3C_LSTM, A3C_StackedLSTM
from models.a3c_conv_lstm import A3C_ConvLSTM, A3C_ConvStackedLSTM
from models.densenet_lstm import DenseNet_StackedLSTM
if __name__ == "__main__":
mp.set_start_method("spawn")
os.environ['OMP_NUM_THREADS'] = '1'
parser = argparse.ArgumentParser(description='Paramaters')
parser.add_argument('-c', '--config', type=str,
default="/home/bkhmsi/Documents/Projects/lab/Meta-RL-Harlow/Harlow_PsychLab/config.yaml",
help='path of config file')
parser.add_argument('--length', type=int, default=3600,
help='Number of steps to run the agent')
parser.add_argument('--width', type=int, default=84,
help='Horizontal size of the observations')
parser.add_argument('--height', type=int, default=84,
help='Vertical size of the observations')
parser.add_argument('--fps', type=int, default=60,
help='Number of frames per second')
parser.add_argument('--runfiles_path', type=str, default=None,
help='Set the runfiles path to find DeepMind Lab data')
parser.add_argument('--level_script', type=str,
default='contributed/psychlab/harlow',
help='The environment level script to load')
parser.add_argument('--record', type=str, default=None,
help='Record the run to a demo file')
parser.add_argument('--demo', type=str, default=None,
help='Play back a recorded demo file')
parser.add_argument('--demofiles', type=str, default=None,
help='Directory for demo files')
parser.add_argument('--video', type=str, default=None,
help='Record the demo run as a video')
args = parser.parse_args()
with open(args.config, 'r', encoding="utf-8") as fin:
config = yaml.load(fin, Loader=yaml.FullLoader)
task_config = {
'fps': str(args.fps),
'width': str(args.width),
'height': str(args.height)
}
if args.record:
task_config['record'] = args.record
if args.demo:
task_config['demo'] = args.demo
if args.demofiles:
task_config['demofiles'] = args.demofiles
if args.video:
task_config['video'] = args.video
n_seeds = 1
device = config["device"]
base_seed = config["seed"]
base_run_title = config["run-title"]
for seed_idx in range(1, n_seeds + 1):
config["run-title"] = base_run_title + f"_{seed_idx}"
config["seed"] = base_seed * seed_idx
exp_path = os.path.join(config["save-path"], config["run-title"])
if not os.path.isdir(exp_path):
os.mkdir(exp_path)
out_path = os.path.join(exp_path, os.path.basename(args.config))
with open(out_path, 'w') as fout:
yaml.dump(config, fout)
############## Start Here ##############
print(f"> Running {config['run-title']} {config['mode']} using {config['optimizer']}")
params = (config["agent"], config["task"]["num-actions"])
if config["mode"] == "densenet-stacked":
agent = DenseNet_StackedLSTM(*params)
elif config["mode"] == "conv-stacked":
agent = A3C_ConvStackedLSTM(*params)
elif config["mode"] == "stacked":
agent = A3C_StackedLSTM(*params)
elif config["mode"] == "conv-vanilla":
agent = A3C_ConvLSTM(*params)
elif config["mode"] == "vanilla":
agent = A3C_LSTM(*params)
else:
raise ValueError(config["mode"])
print(agent)
agent.to(config['device'])
optim_class = T.optim.RMSprop if config["optimizer"] == "rmsprop" else T.optim.AdamW
optimizer = optim_class(agent.parameters(), lr=config["agent"]["lr"])
T.manual_seed(config["seed"])
np.random.seed(config["seed"])
T.random.manual_seed(config["seed"])
update_counter = 0
if config["copy-encoder"]:
filepath = os.path.join(
config["save-path"],
config["load-title"],
f"{config['load-title']}_{config['start-episode']}.pt"
)
print(f"> Copying Encoder from {filepath}")
pretrained_dict = T.load(filepath, map_location=T.device(config["device"]))["state_dict"]
load_dict = {}
for k, v in pretrained_dict.items():
load_dict[k] = v if "encoder" in k else eval(f"agent.{k}")
agent.load_state_dict(load_dict)
if config["resume"]:
filepath = os.path.join(
config["save-path"],
config["load-title"],
f"{config['load-title']}_{config['start-episode']:04d}.pt"
)
print(f"> Loading Checkpoint {filepath}")
model_data = T.load(filepath, map_location=T.device(config["device"]))
update_counter = model_data["update_counter"]
agent.load_state_dict(model_data["state_dict"])
if config["freeze-encoder"]:
print("> Freezing Encoder")
for param in agent.encoder.parameters():
param.requires_grad = False
lab_env = lab.Lab("contributed/psychlab/harlow", ['RGB_INTERLEAVED'], config=task_config)
env = HarlowWrapper(lab_env, config, 0)
agent.train()
### hyper-parameters ###
gamma = config["agent"]["gamma"]
gae_lambda = config["agent"]["gae-lambda"]
val_coeff = config["agent"]["value-loss-weight"]
entropy_coeff = config["agent"]["entropy-weight"]
n_step_update = config["agent"]["n-step-update"]
writer = SummaryWriter(log_dir=os.path.join(config["log-path"], config["run-title"]))
save_path = os.path.join(config["save-path"], config["run-title"], config["run-title"]+"_{epi:04d}")
save_interval = config["save-interval"]
done = True
state = env.reset()
p_action, p_reward = [0]*config["task"]["num-actions"], 0
episode_reward = 0
total_rewards = []
while True:
if done:
ht, ct = agent.get_init_states(device)
else:
ht, ct = ht.detach(), ct.detach()
values = []
log_probs = []
rewards = []
entropies = []
for _ in range(n_step_update):
logit, value, (ht, ct) = agent(
T.tensor([state]).to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
(ht, ct)
)
logit = logit.squeeze(0)
prob = F.softmax(logit, dim=-1)
log_prob = F.log_softmax(logit, dim=-1)
entropy = -(log_prob * prob).sum(1, keepdim=True)
entropies += [entropy]
action = prob.multinomial(num_samples=1).detach()
log_prob = log_prob.gather(1, action)
state, reward, done, _ = env.step(int(action))
# if reward == 1:
# env.snapshot()
# exit()
episode_reward += reward
p_action = np.eye(env.num_actions)[int(action)]
p_reward = reward
log_probs += [log_prob]
values += [value]
rewards += [reward]
if done:
state = env.reset()
total_rewards += [episode_reward]
avg_reward_100 = np.array(total_rewards[-100:]).mean()
writer.add_scalar("perf/reward_t", episode_reward, env.episode_num)
writer.add_scalar("perf/avg_reward_100", avg_reward_100, env.episode_num)
episode_reward = 0
if env.episode_num % save_interval == 0:
T.save({
"state_dict": agent.state_dict(),
"avg_reward_100": avg_reward_100,
"update_counter": update_counter
}, save_path.format(epi=env.episode_num) + ".pt")
break
R = T.zeros(1, 1).to(device)
if not done:
_, value, _ = agent(
T.tensor([state]).to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
(ht, ct)
)
R = value.detach()
values += [R]
policy_loss = 0
value_loss = 0
gae = T.zeros(1, 1).to(device)
for i in reversed(range(len(rewards))):
R = gamma * R + rewards[i]
advantage = R - values[i]
value_loss = value_loss + 0.5 * advantage.pow(2)
# Generalized Advantage Estimation
delta_t = rewards[i] + gamma * values[i + 1] - values[i]
gae = gae * gamma * gae_lambda + delta_t
policy_loss = policy_loss - \
log_probs[i] * gae.detach() - entropy_coeff * entropies[i]
loss = policy_loss + val_coeff * value_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
update_counter += 1
writer.add_scalar("losses/total_loss", loss.item(), update_counter)
# while True:
# if done:
# ht1, ct1 = agent.get_init_states(1, device)
# ht2, ct2 = agent.get_init_states(2, device)
# else:
# ht1, ct1 = ht1.detach(), ct1.detach()
# ht2, ct2 = ht2.detach(), ct2.detach()
# values = []
# log_probs = []
# rewards = []
# entropies = []
# for _ in range(n_step_update):
# logit, value, (ht1, ct1), (ht2, ct2) = agent(
# T.tensor([state]).float().to(device), (
# T.tensor([p_action]).float().to(device),
# T.tensor([[p_reward]]).float().to(device)),
# (ht1, ct1), (ht2, ct2)
# )
# logit = logit.squeeze(0)
# prob = F.softmax(logit, dim=-1)
# log_prob = F.log_softmax(logit, dim=-1)
# entropy = -(log_prob * prob).sum(1, keepdim=True)
# entropies += [entropy]
# action = prob.multinomial(num_samples=1).detach()
# log_prob = log_prob.gather(1, action)
# state, reward, done, _ = env.step(int(action))
# episode_reward += reward
# p_action = np.eye(env.num_actions)[int(action)]
# p_reward = reward
# log_probs += [log_prob]
# values += [value]
# rewards += [reward]
# if done:
# state = env.reset()
# total_rewards += [episode_reward]
# avg_reward_100 = np.array(total_rewards[-100:]).mean()
# writer.add_scalar("perf/reward_t", episode_reward, env.episode_num)
# writer.add_scalar("perf/avg_reward_100", avg_reward_100, env.episode_num)
# episode_reward = 0
# if env.episode_num % save_interval == 0:
# T.save({
# "state_dict": agent.state_dict(),
# "avg_reward_100": avg_reward_100,
# "update_counter": update_counter
# }, save_path.format(epi=env.episode_num) + ".pt")
# break
# R = T.zeros(1, 1).to(device)
# if not done:
# _, value, _, _ = agent(
# T.tensor([state]).float().to(device), (
# T.tensor([p_action]).float().to(device),
# T.tensor([[p_reward]]).float().to(device)),
# (ht1, ct1), (ht2, ct2)
# )
# R = value.detach()
# values += [R]
# policy_loss = 0
# value_loss = 0
# gae = T.zeros(1, 1).to(device)
# for i in reversed(range(len(rewards))):
# R = gamma * R + rewards[i]
# advantage = R - values[i]
# value_loss = value_loss + 0.5 * advantage.pow(2)
# # Generalized Advantage Estimation
# delta_t = rewards[i] + gamma * values[i + 1] - values[i]
# gae = gae * gamma * gae_lambda + delta_t
# policy_loss = policy_loss - \
# log_probs[i] * gae.detach() - entropy_coeff * entropies[i]
# loss = policy_loss + val_coeff * value_loss
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
# update_counter += 1
# writer.add_scalar("losses/total_loss", loss.item(), update_counter)
| 14,014 | 36.573727 | 114 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/pretrain/evaluate.py | import torch as T
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from collections import OrderedDict
from utils import get_test_loader
model_urls = {
'cifar10': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/cifar10-d875770b.pth',
'cifar100': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/cifar100-3a55a987.pth',
}
class CIFAR(nn.Module):
def __init__(self, features, n_channel, num_classes):
super(CIFAR, self).__init__()
assert isinstance(features, nn.Sequential), type(features)
self.features = features
self.classifier = nn.Sequential(
nn.Linear(n_channel, num_classes)
)
print(self.features)
print(self.classifier)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for i, v in enumerate(cfg):
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
padding = v[1] if isinstance(v, tuple) else 1
out_channels = v[0] if isinstance(v, tuple) else v
conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=padding)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(out_channels, affine=False), nn.ReLU()]
else:
layers += [conv2d, nn.ReLU()]
in_channels = out_channels
return nn.Sequential(*layers)
def cifar10(n_channel, pretrained=None):
cfg = [n_channel, n_channel, 'M', 2*n_channel, 2*n_channel, 'M', 4*n_channel, 4*n_channel, 'M', (8*n_channel, 0), 'M']
layers = make_layers(cfg, batch_norm=True)
model = CIFAR(layers, n_channel=8*n_channel, num_classes=10)
if pretrained is not None:
m = model_zoo.load_url(model_urls['cifar10'])
state_dict = m.state_dict() if isinstance(m, nn.Module) else m
assert isinstance(state_dict, (dict, OrderedDict)), type(state_dict)
model.load_state_dict(state_dict)
return model
def cifar100(n_channel, pretrained=None):
cfg = [n_channel, n_channel, 'M', 2*n_channel, 2*n_channel, 'M', 4*n_channel, 4*n_channel, 'M', (8*n_channel, 0), 'M']
layers = make_layers(cfg, batch_norm=True)
model = CIFAR(layers, n_channel=8*n_channel, num_classes=100)
if pretrained is not None:
m = model_zoo.load_url(model_urls['cifar100'])
state_dict = m.state_dict() if isinstance(m, nn.Module) else m
assert isinstance(state_dict, (dict, OrderedDict)), type(state_dict)
model.load_state_dict(state_dict)
return model
if __name__ == "__main__":
device = 'cuda'
model = cifar100(128, pretrained=True).to(device)
model.eval()
test_loader = get_test_loader("./data", num_classes=100, batch_size=32)
total, correct = 0, 0
for data in test_loader:
images, labels = data[0].to(device), data[1].to(device)
outputs = model(images)
_, predicted = T.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
acc = correct / total
print(f"Testing Accuracy: {acc*100:.4f}%")
| 3,243 | 36.287356 | 122 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/pretrain/utils.py | """
Create train, valid, test iterators for CIFAR-10 [1].
Easily extended to MNIST, CIFAR-100 and Imagenet.
[1]: https://discuss.pytorch.org/t/feedback-on-pytorch-for-kaggle-competitions/2252/4
"""
import torch
import imageio
import numpy as np
import matplotlib.pyplot as plt
from torchvision import datasets
from torchvision import transforms
from torch.utils.data.sampler import SubsetRandomSampler
def pad_image(image):
imsize = 84
full_image = np.zeros((imsize,imsize,3))
image = np.array(image)
rand_x = np.random.randint(imsize-image.shape[0])
rand_y = np.random.randint(imsize-image.shape[1])
full_image[
rand_x:rand_x+image.shape[0],
rand_y:rand_y+image.shape[1], :
] = image
return full_image
def get_train_valid_loader(data_dir,
num_classes,
batch_size,
val_batch_size,
augment,
random_seed,
valid_size=0.1,
shuffle=True,
num_workers=4,
pin_memory=False):
"""
Utility function for loading and returning train and valid
multi-process iterators over the CIFAR-10 dataset.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Params
------
- data_dir: path directory to the dataset.
- batch_size: how many samples per batch to load.
- augment: whether to apply the data augmentation scheme
mentioned in the paper. Only applied on the train split.
- random_seed: fix seed for reproducibility.
- valid_size: percentage split of the training set used for
the validation set. Should be a float in the range [0, 1].
- shuffle: whether to shuffle the train/validation indices.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- train_loader: training set iterator.
- valid_loader: validation set iterator.
"""
error_msg = "[!] valid_size should be in the range [0, 1]."
assert ((valid_size >= 0) and (valid_size <= 1)), error_msg
# define transforms
valid_transform = transforms.Compose([
# transforms.Lambda(lambda x: pad_image(x)),
transforms.ToTensor()
])
train_transform = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.2),
# transforms.Lambda(lambda x: pad_image(x)),
transforms.ToTensor()
])
dataset = datasets.CIFAR10 if num_classes == 10 else datasets.CIFAR100
# load the dataset
train_dataset = dataset(
root=data_dir,
train=True,
download=True,
transform=train_transform,
)
valid_dataset = dataset(
root=data_dir,
train=True,
download=True,
transform=valid_transform,
)
num_train = len(train_dataset)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
if shuffle:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
sampler=train_sampler,
num_workers=num_workers,
pin_memory=pin_memory,
)
valid_loader = torch.utils.data.DataLoader(
valid_dataset,
batch_size=val_batch_size,
sampler=valid_sampler,
num_workers=num_workers,
pin_memory=pin_memory,
)
return (train_loader, valid_loader)
def get_test_loader(data_dir,
num_classes,
batch_size,
shuffle=True,
num_workers=4,
pin_memory=False):
"""
Utility function for loading and returning a multi-process
test iterator over the CIFAR-10 dataset.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Params
------
- data_dir: path directory to the dataset.
- batch_size: how many samples per batch to load.
- shuffle: whether to shuffle the dataset after every epoch.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- data_loader: test set iterator.
"""
# define transform
transform = transforms.Compose([
# transforms.Lambda(lambda x: pad_image(x)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
dataset = datasets.CIFAR10 if num_classes == 10 else datasets.CIFAR100
dataset = dataset(
root=data_dir,
train=False,
download=True,
transform=transform,
)
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=pin_memory,
)
return data_loader
if __name__ == "__main__":
loader = get_test_loader("./data", num_classes=100, batch_size=1)
image, _ = next(iter(loader))
transformed_image = pad_image(image[0])
plt.imshow(np.moveaxis(transformed_image, 0, -1))
plt.show() | 5,546 | 29.646409 | 85 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/pretrain/train.py | import os
import yaml
import argparse
import numpy as np
import torch as T
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from tqdm import tqdm
from copy import deepcopy
from torch.utils.tensorboard import SummaryWriter
from utils import get_train_valid_loader, get_test_loader
class ConvNet(nn.Module):
def __init__(self, num_classes):
super(ConvNet, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(3, 16, kernel_size=(8, 8), stride=(4, 4)), # output: (16, 20, 20)
nn.Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2)), # output: (32, 9, 9)
)
self.classifier = nn.Sequential(
nn.Flatten(),
nn.Linear(128, 256),
nn.ReLU(),
nn.Linear(256, num_classes),
nn.Softmax(dim=-1)
)
def forward(self, image):
return self.classifier(self.encoder(image))
class Trainer:
def __init__(self, config):
self.device = config["device"]
self.model = ConvNet(num_classes=config["num-classes"])
self.model.to(self.device)
if config["resume"]:
print("> Loading Checkpoint")
self.model.load_state_dict(T.load(config["load-path"]))
self.train_loader, self.val_loader = get_train_valid_loader(
config["data-path"],
config["num-classes"],
config["batch-size"],
config["val-batch-size"],
config["augment"],
config["seed"],
config["valid-size"],
config["shuffle"],
config["num-workers"]
)
self.test_loader = get_test_loader(
config["data-path"],
config["num-classes"],
config["batch-size"],
config["shuffle"],
config["num-workers"],
config["pin-memory"]
)
self.criterion = nn.CrossEntropyLoss()
self.optim = T.optim.AdamW(self.model.parameters(), lr=config["lr-init"], weight_decay=config["weight-decay"])
self.writer = SummaryWriter(log_dir=os.path.join("logs", config["run-title"]))
self.reduce_lr = T.optim.lr_scheduler.ReduceLROnPlateau(self.optim, factor=config["lr-factor"], patience=config["lr-patience"], min_lr=config["lr-min"])
self.stopping_patience = config["stopping-patience"]
self.stopping_delta = config["stopping-delta"]
self.filepath = os.path.join(config["save-path"], config["run-title"], config["run-title"]+".pt")
def train_epoch(self, epoch, pbar):
self.model.train()
train_loss = np.zeros(len(self.train_loader))
for i, (inputs, labels) in enumerate(self.train_loader):
inputs = inputs.float().to(self.device)
labels = labels.to(self.device)
self.optim.zero_grad()
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
loss.backward()
self.optim.step()
train_loss[i] = loss.item()
pbar.set_description(f"Epoch {epoch} | Loss: {train_loss[:i].sum()/(i+1):.4f} | ({i}/{len(self.train_loader)})")
val_loss = self.validate_epoch()
return train_loss.mean(), val_loss
def validate_epoch(self):
self.model.eval()
val_loss = np.zeros(len(self.val_loader))
for i, (inputs, labels) in enumerate(self.val_loader):
inputs = inputs.float().to(self.device)
labels = labels.to(self.device)
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
val_loss[i] = loss.item()
return val_loss.mean()
def evaluate(self, load_path):
total, correct = 0, 0
self.model.load_state_dict(T.load(load_path, map_location=T.device(self.device)))
self.model.eval()
for data in self.test_loader:
images, labels = data[0].to(self.device), data[1].to(self.device)
outputs = self.model(images)
_, predicted = T.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
return correct / total
def train(self, epochs):
stopping_counter = 0
best_val_loss = np.inf
progress = tqdm(range(epochs))
for epoch in progress:
########## Training ##########
train_loss, val_loss = self.train_epoch(epoch, progress)
self.writer.add_scalar("loss/train", train_loss, epoch)
self.writer.add_scalar("loss/val", val_loss, epoch)
progress.write(f"Epoch {epoch}/{epochs}\t| Train Loss {train_loss:.5f} | Val Loss {val_loss:.5f}")
if val_loss < best_val_loss and abs(val_loss-best_val_loss) > self.stopping_delta :
stopping_counter = 0
best_val_loss = val_loss
T.save(self.model.state_dict(), self.filepath)
else:
stopping_counter += 1
if stopping_counter > self.stopping_patience:
break
self.writer.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Paramaters')
parser.add_argument('-c', '--config', type=str, default="config.yaml", help='path of config file')
args = parser.parse_args()
with open(args.config, 'r', encoding="utf-8") as file:
config = yaml.load(file, Loader=yaml.FullLoader)
exp_path = os.path.join(config["save-path"], config["run-title"])
if not os.path.isdir(exp_path): os.mkdir(exp_path)
trainer = Trainer(config)
if config["train"]:
print("> Training")
trainer.train(config["epochs"])
if config["test"]:
print("> Testing")
acc = trainer.evaluate(config["load-path"])
print(f"Testing Accuracy: {acc*100:.4f}%")
| 5,955 | 32.088889 | 160 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/common/shared_optim.py | import math
import torch as T
import torch.optim as optim
class SharedAdam(optim.Adam):
"""Implements Adam algorithm with shared states.
"""
def __init__(self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0):
super(SharedAdam, self).__init__(params, lr, betas, eps, weight_decay)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = T.zeros(1)
state['exp_avg'] = p.data.new().resize_as_(p.data).zero_()
state['exp_avg_sq'] = p.data.new().resize_as_(p.data).zero_()
def share_memory(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'].share_memory_()
state['exp_avg'].share_memory_()
state['exp_avg_sq'].share_memory_()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
# exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step'].item()
bias_correction2 = 1 - beta2 ** state['step'].item()
step_size = group['lr'] * math.sqrt(
bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
# Non-centered RMSprop update with shared statistics (without momentum)
class SharedRMSprop(optim.RMSprop):
"""Implements RMSprop algorithm with shared states.
"""
def __init__(self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0):
super(SharedRMSprop, self).__init__(params, lr=lr, alpha=alpha, eps=eps, weight_decay=weight_decay, momentum=0, centered=False)
# State initialisation (must be done before step, else will not be shared between threads)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = p.data.new().resize_(1).zero_()
state['square_avg'] = p.data.new().resize_as_(p.data).zero_()
def share_memory(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'].share_memory_()
state['square_avg'].share_memory_()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
square_avg = state['square_avg']
alpha = group['alpha']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
# g = αg + (1 - α)Δθ^2
# square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad)
square_avg.mul_(alpha).addcmul_(grad, grad, value=1 - alpha)
# θ ← θ - ηΔθ/√(g + ε)
avg = square_avg.sqrt().add_(group['eps'])
# p.data.addcdiv_(-group['lr'], grad, avg)
p.data.addcdiv_(grad, avg, value=-group['lr'])
return loss | 4,680 | 35.286822 | 135 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/Harlow_PsychLab/harlow.py | import os
import imageio
import numpy as np
PIXELS_PER_ACTION = 1
class HarlowWrapper:
"""A gym-like wrapper environment for DeepMind Lab.
Attributes:
env: The corresponding DeepMind Lab environment.
max_length: Maximum number of frames
Args:
env (deepmind_lab.Lab): DeepMind Lab environment.
"""
def __init__(self, env, config, rank):
self.imagenet_mean = [0.485, 0.456, 0.406]
self.imagenet_std = [0.229, 0.224, 0.225]
self.env = env
self.max_length = config["task"]["max-length"]
self.num_trials = config["task"]["num-trials"]
self.reward_scheme = config["task"]["reward-scheme"]
self.save_interval = config["save-interval"]
self.save_path = os.path.join(config["save-path"], config["run-title"], config["run-title"]+"_{epi:04d}.gif")
self.rank = rank
self.frames = []
self.num_actions = config["task"]["num-actions"] # {no-op, left, right}
self.episode_num = config["start-episode"]
self.trial_num = 0
self.reset()
self.mode = config["mode"]
self.prev_reward = 0
self.f_counter = 0
def step(self, action, repeat=4):
'''
Rewards:
fixation 1.00
correct image 5.00
wrong image -5.00
Time Penalty -0.01
'''
action_vec = self._create_action(action)
obs = self.env.observations()
reward = self.env.step(action_vec, num_steps=repeat)
self.frames += [obs['RGB_INTERLEAVED']]
if reward in [-5, 5]:
self.trial_num += 1
reward = self._return_reward(reward)
self.prev_reward = reward
last_frame = obs['RGB_INTERLEAVED']
if (last_frame==0).sum() < 4900:
reward = -2
timestep = self.num_steps()
done = not self.env.is_running() or timestep > self.max_length or self.trial_num >= self.num_trials
return self._preprocess(obs['RGB_INTERLEAVED']), reward, done, timestep
def reset(self):
self.env.reset()
obs = self.env.observations()
if len(self.frames) > 0:
self.episode_num += 1
if self.episode_num > 0 and len(self.frames) > 0 and (self.episode_num % self.save_interval) == 0:
filepath = self.save_path.format(epi=self.episode_num)
imageio.mimsave(filepath, self.frames)
self.trial_num = 0
self.frames = []
return self._preprocess(obs['RGB_INTERLEAVED'])
def num_steps(self):
return self.env.num_steps()
def snapshot(self, filepath=None):
obs = self.env.observations()['RGB_INTERLEAVED']
if filepath is None:
filepath = os.path.join(os.path.dirname(self.save_path), "snapshot.png")
imageio.imsave(filepath, obs)
def save_frames(self, path):
imageio.mimsave(path, self.frames)
def _preprocess(self, obs):
obs = obs.astype(np.float32)
obs = obs / 255.0
# obs = (obs - 0.5) / 0.5
# obs = obs - self.imagenet_mean
# obs = obs / self.imagenet_std
return np.einsum('ijk->kij', obs)
def _return_reward(self, reward):
if self.reward_scheme == 0:
return reward / 5.
elif self.reward_scheme == 1:
if reward in [-5, 1]: reward = 0
return reward / 5.
elif self.reward_scheme == 2:
if reward == -5: reward = 0
return reward / 5.
else:
return reward
def _create_action(self, action):
"""
action: no-op (0), left (1), right(-1)
"""
# map_actions = [0, PIXELS_PER_ACTION, -PIXELS_PER_ACTION]
# map_actions = [0, 2, -2, 2, -2, 3, -3]
# map_actions = [4.4, -4.4]
speed = 5
if self.prev_reward != 0:
speed = 30
map_actions = [speed, -speed]
return np.array([map_actions[action],0,0,0,0,0,0], dtype=np.intc) | 3,639 | 28.12 | 113 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/Harlow_PsychLab/train.py | import os
import yaml
import pickle
import argparse
import numpy as np
import torch as T
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from datetime import datetime
from collections import namedtuple
import deepmind_lab as lab
from Harlow_PsychLab.harlow import HarlowWrapper
from models.a3c_lstm import A3C_LSTM, A3C_StackedLSTM
from models.a3c_conv_lstm import A3C_ConvLSTM, A3C_ConvStackedLSTM
from models.resnet_lstm import ResNet_LSTM
def ensure_shared_grads(model, shared_model):
for param, shared_param in zip(model.parameters(),
shared_model.parameters()):
if shared_param.grad is not None:
return
shared_param._grad = param.grad
def train(config,
shared_model,
optimizer,
rank,
task_config,
counter,
):
T.manual_seed(config["seed"] + rank)
np.random.seed(config["seed"] + rank)
T.random.manual_seed(config["seed"] + rank)
device = config["device"]
lab_env = lab.Lab("contributed/psychlab/harlow", ['RGB_INTERLEAVED'], config=task_config)
env = HarlowWrapper(lab_env, config, rank)
if config["mode"] == "resnet":
agent = ResNet_LSTM(config["agent"], env.num_actions)
elif config["mode"] == "conv-vanilla":
agent = A3C_ConvLSTM(config["agent"], env.num_actions)
elif config["mode"] == "vanilla":
agent = A3C_LSTM(config["agent"], env.num_actions)
else:
raise ValueError(config["mode"])
agent.to(device)
agent.train()
### hyper-parameters ###
gamma = config["agent"]["gamma"]
gae_lambda = config["agent"]["gae-lambda"]
val_coeff = config["agent"]["value-loss-weight"]
entropy_coeff = config["agent"]["entropy-weight"]
n_step_update = config["agent"]["n-step-update"]
writer = SummaryWriter(log_dir=os.path.join(config["log-path"], config["run-title"] + f"_{rank}"))
save_path = os.path.join(config["save-path"], config["run-title"], config["run-title"]+"_{epi:04d}")
save_interval = config["save-interval"]
done = True
state = env.reset()
p_action, p_reward = [0]*config["task"]["num-actions"], 0
print('='*50)
print(f"Starting Worker {rank}")
print('='*50)
episode_reward = 0
update_counter = counter
total_rewards = []
while True:
agent.load_state_dict(shared_model.state_dict())
if done:
rnn_state = agent.get_init_states(device)
else:
if config["agent"]["cell-type"] == "lstm":
rnn_state = rnn_state[0].detach(), rnn_state[1].detach()
else:
rnn_state = rnn_state.detach()
values = []
log_probs = []
rewards = []
entropies = []
for _ in range(n_step_update):
logit, value, rnn_state = agent(
T.tensor([state]).to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
rnn_state
)
logit = logit.squeeze(0)
prob = F.softmax(logit, dim=-1)
log_prob = F.log_softmax(logit, dim=-1)
entropy = -(log_prob * prob).sum(1, keepdim=True)
entropies += [entropy]
action = prob.multinomial(num_samples=1).detach()
log_prob = log_prob.gather(1, action)
state, reward, done, _ = env.step(int(action))
# if done:
# env.save_frames(os.path.join(config["save-path"], "frames.gif"))
# exit()
episode_reward += reward
p_action = np.eye(env.num_actions)[int(action)]
p_reward = reward
log_probs += [log_prob]
values += [value]
rewards += [reward]
if done:
state = env.reset()
total_rewards += [episode_reward]
avg_reward_100 = np.array(total_rewards[-100:]).mean()
writer.add_scalar("perf/reward_t", episode_reward, env.episode_num)
writer.add_scalar("perf/avg_reward_100", avg_reward_100, env.episode_num)
episode_reward = 0
if env.episode_num % save_interval == 0:
T.save({
"state_dict": shared_model.state_dict(),
"avg_reward_100": avg_reward_100,
"update_counter": update_counter
}, save_path.format(epi=env.episode_num) + ".pt")
break
R = T.zeros(1, 1).to(device)
if not done:
_, value, _ = agent(
T.tensor([state]).to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
rnn_state
)
R = value.detach()
values += [R]
policy_loss = 0
value_loss = 0
gae = T.zeros(1, 1).to(device)
for i in reversed(range(len(rewards))):
R = gamma * R + rewards[i]
advantage = R - values[i]
value_loss = value_loss + 0.5 * advantage.pow(2)
# Generalized Advantage Estimation
delta_t = rewards[i] + gamma * values[i + 1] - values[i]
gae = gae * gamma * gae_lambda + delta_t
policy_loss = policy_loss - \
log_probs[i] * gae.detach() - entropy_coeff * entropies[i]
loss = policy_loss + val_coeff * value_loss
optimizer.zero_grad()
loss.backward()
ensure_shared_grads(agent, shared_model)
optimizer.step()
update_counter += 1
writer.add_scalar("losses/total_loss", loss.item(), update_counter)
def train_stacked(config,
shared_model,
optimizer,
rank,
task_config,
counter,
):
T.manual_seed(config["seed"] + rank)
np.random.seed(config["seed"] + rank)
T.random.manual_seed(config["seed"] + rank)
device = config["device"]
lab_env = lab.Lab("contributed/psychlab/harlow", ['RGB_INTERLEAVED'], config=task_config)
env = HarlowWrapper(lab_env, config, rank)
if config["mode"] == "conv-stacked":
agent = A3C_ConvStackedLSTM(config["agent"], env.num_actions)
elif config["mode"] == "stacked":
agent = A3C_StackedLSTM(config["agent"], env.num_actions)
else:
raise ValueError(config["mode"])
agent.to(device)
agent.train()
### hyper-parameters ###
gamma = config["agent"]["gamma"]
gae_lambda = config["agent"]["gae-lambda"]
val_coeff = config["agent"]["value-loss-weight"]
entropy_coeff = config["agent"]["entropy-weight"]
n_step_update = config["agent"]["n-step-update"]
writer = SummaryWriter(log_dir=os.path.join(config["log-path"], config["run-title"] + f"_{rank}"))
save_path = os.path.join(config["save-path"], config["run-title"], config["run-title"]+"_{epi:04d}")
save_interval = config["save-interval"]
done = True
state = env.reset()
p_action, p_reward = [0]*config["task"]["num-actions"], 0
print('='*50)
print(f"Starting Worker {rank}")
print('='*50)
episode_reward = 0
update_counter = 0
total_rewards = []
while True:
agent.load_state_dict(shared_model.state_dict())
if done:
ht1, ct1 = agent.get_init_states(1, device)
ht2, ct2 = agent.get_init_states(2, device)
else:
ht1, ct1 = ht1.detach(), ct1.detach()
ht2, ct2 = ht2.detach(), ct2.detach()
values = []
log_probs = []
rewards = []
entropies = []
for _ in range(n_step_update):
logit, value, (ht1, ct1), (ht2, ct2) = agent(
T.tensor([state]).to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
(ht1, ct1), (ht2, ct2)
)
logit = logit.squeeze(0)
prob = F.softmax(logit, dim=-1)
log_prob = F.log_softmax(logit, dim=-1)
entropy = -(log_prob * prob).sum(1, keepdim=True)
entropies += [entropy]
action = prob.multinomial(num_samples=1).detach()
log_prob = log_prob.gather(1, action)
state, reward, done, _ = env.step(int(action))
episode_reward += reward
p_action = np.eye(env.num_actions)[int(action)]
p_reward = reward
log_probs += [log_prob]
values += [value]
rewards += [reward]
if done:
state = env.reset()
total_rewards += [episode_reward]
avg_reward_100 = np.array(total_rewards[-100:]).mean()
writer.add_scalar("perf/reward_t", episode_reward, env.episode_num)
writer.add_scalar("perf/avg_reward_100", avg_reward_100, env.episode_num)
episode_reward = 0
if env.episode_num % save_interval == 0:
T.save({
"state_dict": shared_model.state_dict(),
"avg_reward_100": avg_reward_100,
"update_counter": update_counter
}, save_path.format(epi=env.episode_num) + ".pt")
break
R = T.zeros(1, 1).to(device)
if not done:
_, value, _, _ = agent(
T.tensor([state]).to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
(ht1, ct1), (ht2, ct2)
)
R = value.detach()
values += [R]
policy_loss = 0
value_loss = 0
gae = T.zeros(1, 1).to(device)
for i in reversed(range(len(rewards))):
R = gamma * R + rewards[i]
advantage = R - values[i]
value_loss = value_loss + 0.5 * advantage.pow(2)
# Generalized Advantage Estimation
delta_t = rewards[i] + gamma * values[i + 1] - values[i]
gae = gae * gamma * gae_lambda + delta_t
policy_loss = policy_loss - \
log_probs[i] * gae.detach() - entropy_coeff * entropies[i]
loss = policy_loss + val_coeff * value_loss
optimizer.zero_grad()
loss.backward()
ensure_shared_grads(agent, shared_model)
optimizer.step()
update_counter += 1
writer.add_scalar("losses/total_loss", loss.item(), update_counter) | 10,760 | 31.315315 | 104 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/models/dnd.py | import torch as T
import torch.nn.functional as F
# constants
ALL_KERNELS = ['cosine', 'l1', 'l2']
ALL_POLICIES = ['1NN']
class DND:
"""The differentiable neural dictionary (DND) class. This enables episodic
recall in a neural network.
notes:
- a memory is a row vector
Parameters
----------
dict_len : int
the maximial len of the dictionary
memory_dim : int
the dim or len of memory i, we assume memory_i is a row vector
kernel : str
the metric for memory search
Attributes
----------
encoding_off : bool
if True, stop forming memories
retrieval_off : type
if True, stop retrieving memories
reset_memory : func;
if called, clear the dictionary
check_config : func
check the class config
"""
def __init__(self, dict_len, key_dim, memory_dim, kernel='l2'):
# params
self.dict_len = dict_len
self.kernel = kernel
self.key_dim = key_dim
self.memory_dim = memory_dim
# dynamic state
self.encoding_off = False
self.retrieval_off = False
# allocate space for memories
self.reset_memory()
# check everything
self.check_config()
def reset_memory(self):
self.pointer = 0
self.overflow = False
self.keys = T.empty(self.dict_len, self.key_dim)
self.vals = T.empty(self.dict_len, self.memory_dim)
def check_config(self):
assert self.dict_len > 0
assert self.kernel in ALL_KERNELS
def inject_memories(self, input_keys, input_vals):
"""Inject pre-defined keys and values
Parameters
----------
input_keys : list
a list of memory keys
input_vals : list
a list of memory content
"""
assert len(input_keys) == len(input_vals)
for k, v in zip(input_keys, input_vals):
self.save_memory(k, v)
def save_memory(self,
memory_key,
memory_val,
replace_similar=False,
threshold=0
):
"""Save an episodic memory to the dictionary
Parameters
----------
memory_key : a row vector
a DND key, used to for memory search
memory_val : a row vector
a DND value, representing the memory content
"""
if self.encoding_off:
return
# add new memory to the the dictionary
# get data is necessary for gradient reason
replaced = False
if replace_similar and (self.pointer > 0 or self.overflow):
similarities = compute_similarities(memory_key, self.keys[:self.pointer], self.kernel)
closest_idx = T.argmax(similarities)
if similarities[closest_idx] > threshold:
self.keys[closest_idx] = T.squeeze(memory_key.data)
self.vals[closest_idx] = T.squeeze(memory_val.data)
replaced = True
if not replace_similar or not replaced:
self.keys[self.pointer] = T.squeeze(memory_key.data)
self.vals[self.pointer] = T.squeeze(memory_val.data)
self.pointer += 1
if self.pointer >= self.dict_len:
self.pointer = 0
self.overflow = True
def get_memory(self, query_key, threshold=-1):
"""Perform a 1-NN search over dnd
Parameters
----------
query_key : a row vector
a DND key, used to for memory search
Returns
-------
a row vector
a DND value, representing the memory content
"""
# if no memory, return the zero vector
if (self.pointer == 0 and not self.overflow) or self.retrieval_off:
return _empty_memory(self.memory_dim)
# compute similarity(query, memory_i ), for all i
similarities = compute_similarities(query_key, self.keys[:self.pointer], self.kernel)
# get the best-match memory
best_memory_val = self._get_memory(similarities, threshold)
return best_memory_val
def _get_memory(self, similarities, threshold, policy='1NN'):
"""get the episodic memory according to some policy
e.g. if the policy is 1nn, return the best matching memory
e.g. the policy can be based on the rational model
Parameters
----------
similarities : a vector of len #memories
the similarity between query vs. key_i, for all i
policy : str
the retrieval policy
Returns
-------
a row vector
a DND value, representing the memory content
"""
best_memory_val = None
if policy is '1NN':
best_memory_id = T.argmax(similarities)
if threshold <= 0 or similarities[best_memory_id] > threshold:
best_memory_val = self.vals[best_memory_id].unsqueeze(0)
else:
best_memory_val = _empty_memory(self.memory_dim)
else:
raise ValueError(f'unrecog recall policy: {policy}')
return best_memory_val
"""helpers"""
def compute_similarities(query_key, key_list, metric):
"""Compute the similarity between query vs. key_i for all i
i.e. compute q M, w/ q: 1 x key_dim, M: key_dim x #keys
Parameters
----------
query_key : a vector
Description of parameter `query_key`.
key_list : list
Description of parameter `key_list`.
metric : str
Description of parameter `metric`.
Returns
-------
a row vector w/ len #memories
the similarity between query vs. key_i, for all i
"""
# reshape query to 1 x key_dim
q = query_key.data.view(1, -1)
# compute similarities
if metric == 'cosine':
similarities = F.cosine_similarity(q.float(), key_list.float())
elif metric == 'l1':
similarities = - F.pairwise_distance(q, key_list, p=1)
elif metric == 'l2':
similarities = - F.pairwise_distance(q, key_list, p=2)
else:
raise ValueError(f'unrecog metric: {metric}')
return similarities
def _empty_memory(memory_dim):
"""Get a empty memory, assuming the memory is a row vector
"""
return T.zeros(1, memory_dim)
| 6,304 | 29.756098 | 98 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/models/ep_lstm.py | from typing import (
Tuple,
List,
Optional,
Dict,
Callable,
Union,
cast,
)
from collections import namedtuple
from abc import ABC, abstractmethod
from dataclasses import dataclass
import numpy as np
import torch as T
from torch import nn
from torch.nn import functional as F
from torch import Tensor
from models.ep_lstm_cell import EpLSTMCell
@dataclass
class EpLSTMCell_Builder:
hidden_size : int
vertical_dropout : float = 0.0
recurrent_dropout : float = 0.0
recurrent_dropout_mode : str = 'gal_tied'
input_kernel_initialization : str = 'xavier_uniform'
recurrent_activation : str = 'sigmoid'
tied_forget_gate : bool = False
def make(self, input_size: int):
return EpLSTMCell(input_size, self)
def make_scripted(self, *p, **ks):
return T.jit.script(self.make(*p, **ks))
class EpLSTM_Layer(nn.Module):
def reorder_inputs(self, inputs: Union[List[T.Tensor], T.Tensor]):
#^ inputs : [t b i]
if self.direction == 'backward':
return inputs[::-1]
return inputs
def __init__(
self,
cell: EpLSTMCell,
direction='forward',
batch_first=False,
):
super().__init__()
if isinstance(batch_first, bool):
batch_first = (batch_first, batch_first)
self.batch_first = batch_first
self.direction = direction
self.cell_: EpLSTMCell = cell
@T.jit.ignore
def forward(self, inputs, state_t0):
x_t, m_t = inputs
if self.batch_first[0]:
#^ x_t : [b t i]
x_t = x_t.transpose(1, 0)
#^ x_t : [t b i]
# x_t = x_t.unbind(0)
if state_t0 is None:
state_t0 = self.cell_.get_init_state(x_t)
x_t = self.reorder_inputs(x_t)
sequence, state = self.cell_.loop(x_t, m_t, state_t0)
#^ sequence : t * [b h]
sequence = self.reorder_inputs(sequence)
sequence = T.stack(sequence)
#^ sequence : [t b h]
if self.batch_first[1]:
sequence = sequence.transpose(1, 0)
#^ sequence : [b t h]
return sequence, state
class EpLSTM(nn.Module):
def __init__(
self,
input_size : int,
num_layers : int,
batch_first : bool = False,
scripted : bool = True,
*args, **kargs,
):
super().__init__()
self._cell_builder = EpLSTMCell_Builder(*args, **kargs)
Dh = self._cell_builder.hidden_size
def make(isize: int):
# cell = self._cell_builder.make_scripted(isize)
cell = self._cell_builder.make(isize)
return EpLSTM_Layer(cell, isize, batch_first=batch_first)
rnns = [
make(input_size),
*[
make(Dh)
for _ in range(num_layers - 1)
],
]
self.rnn = nn.Sequential(*rnns)
self.input_size = input_size
self.hidden_size = self._cell_builder.hidden_size
self.num_layers = num_layers
def __repr__(self):
return (
f'${self.__class__.__name__}'
+ '('
+ f'in={self.input_size}, '
+ f'hid={self.hidden_size}, '
+ f'layers={self.num_layers}, '
+ f'bi={self.bidirectional}'
+ '; '
+ str(self._cell_builder)
)
def forward(self, inputs, state_t0=None):
for rnn in self.rnn:
inputs, state = rnn(inputs, state_t0)
return inputs, state
def reset_parameters(self):
for rnn in self.rnn:
rnn.cell_.reset_parameters_() | 3,780 | 26.398551 | 70 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/models/ep_lstm_cell.py | from typing import (
Tuple,
List,
Optional,
Dict,
Callable,
Union,
cast,
)
from collections import namedtuple
from dataclasses import dataclass
import numpy as np
import torch as T
from torch import nn
from torch import Tensor
from torch.nn import functional as F
# from models.ep_lstm import EpLSTMCell_Builder
# constants
N_GATES = 5
GateSpans = namedtuple('GateSpans', ['I', 'F', 'G', 'O', 'R'])
ACTIVATIONS = {
'sigmoid': nn.Sigmoid(),
'tanh': nn.Tanh(),
'hard_tanh': nn.Hardtanh(),
'relu': nn.ReLU(),
}
class EpLSTMCell(nn.Module):
def __repr__(self):
return (
f'{self.__class__.__name__}('
+ ', '.join(
[
f'in: {self.Dx}',
f'hid: {self.Dh}',
f'rdo: {self.recurrent_dropout_p} @{self.recurrent_dropout_mode}',
f'vdo: {self.vertical_dropout_p}'
]
)
+')'
)
def __init__(
self,
input_size: int,
args,
):
super().__init__()
self._args = args
self.Dx = input_size
self.Dh = args.hidden_size
self.recurrent_kernel = nn.Linear(self.Dh, self.Dh * N_GATES)
self.input_kernel = nn.Linear(self.Dx, self.Dh * N_GATES)
self.recurrent_dropout_p = args.recurrent_dropout or 0.0
self.vertical_dropout_p = args.vertical_dropout or 0.0
self.recurrent_dropout_mode = args.recurrent_dropout_mode
self.recurrent_dropout = nn.Dropout(self.recurrent_dropout_p)
self.vertical_dropout = nn.Dropout(self.vertical_dropout_p)
self.tied_forget_gate = args.tied_forget_gate
if isinstance(args.recurrent_activation, str):
self.fun_rec = ACTIVATIONS[args.recurrent_activation]
else:
self.fun_rec = args.recurrent_activation
self.reset_parameters_()
# @T.jit.ignore
def get_recurrent_weights(self):
# type: () -> Tuple[GateSpans, GateSpans]
W = self.recurrent_kernel.weight.chunk(5, 0)
b = self.recurrent_kernel.bias.chunk(5, 0)
W = GateSpans(W[0], W[1], W[2], W[3], W[4])
b = GateSpans(b[0], b[1], b[2], b[3], b[4])
return W, b
# @T.jit.ignore
def get_input_weights(self):
# type: () -> Tuple[GateSpans, GateSpans]
W = self.input_kernel.weight.chunk(5, 0)
b = self.input_kernel.bias.chunk(5, 0)
W = GateSpans(W[0], W[1], W[2], W[3], W[4])
b = GateSpans(b[0], b[1], b[2], b[3], b[4])
return W, b
@T.jit.ignore
def reset_parameters_(self):
rw, rb = self.get_recurrent_weights()
iw, ib = self.get_input_weights()
nn.init.zeros_(self.input_kernel.bias)
nn.init.zeros_(self.recurrent_kernel.bias)
nn.init.ones_(rb.F)
#^ forget bias
for W in rw:
nn.init.orthogonal_(W)
for W in iw:
nn.init.xavier_uniform_(W)
# @T.jit.export
@T.jit.ignore
def get_init_state(self, input: Tensor) -> Tuple[Tensor, Tensor]:
batch_size = input.shape[1]
zeros = T.zeros(batch_size, self.Dh, device=input.device)
return (zeros, zeros)
def apply_input_kernel(self, xt: Tensor) -> List[Tensor]:
xto = self.vertical_dropout(xt)
out = self.input_kernel(xto).chunk(5, 1)
return out
def apply_recurrent_kernel(self, h_tm1: Tensor):
#^ h_tm1 : [b h]
mode = self.recurrent_dropout_mode
if mode == 'gal_tied':
hto = self.recurrent_dropout(h_tm1)
out = self.recurrent_kernel(hto)
#^ out : [b 5*h]
outs = out.chunk(5, -1)
elif mode == 'gal_gates':
outs = []
WW, bb = self.get_recurrent_weights()
for i in range(5):
hto = self.recurrent_dropout(h_tm1)
outs.append(F.linear(hto, WW[i], bb[i]))
else:
outs = self.recurrent_kernel(h_tm1).chunk(5, -1)
return outs
def forward(self, xt, mt, state):
# type: (Tensor, Tensor, Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]
#^ inputs.xt : [b i]
#^ state.h : [b h]
(h_tm1, c_tm1) = state
Xi, Xf, Xg, Xo, Xr = self.apply_input_kernel(xt)
Hi, Hf, Hg, Ho, Hr = self.apply_recurrent_kernel(h_tm1)
ft = self.fun_rec(Xf + Hf)
ot = self.fun_rec(Xo + Ho)
if self.tied_forget_gate:
it = 1.0 - ft
else:
it = self.fun_rec(Xi + Hi)
gt = T.tanh(Xg + Hg)
if self.recurrent_dropout_mode == 'semeniuta':
#* https://arxiv.org/abs/1603.05118
gt = self.recurrent_dropout(gt)
rt = self.fun_rec(Xr + Hr)
ct = (ft * c_tm1) + (it * gt) + (rt * T.tanh(mt))
ht = ot * T.tanh(ct)
return ht, (ht, ct)
# @T.jit.export
@T.jit.ignore
def loop(self, inputs, memories, state_t0, mask=None):
# type: (Tensor, Tensor, Tuple[Tensor, Tensor], Optional[List[Tensor]]) -> Tuple[List[Tensor], Tuple[Tensor, Tensor]]
'''
This loops over t (time) steps
'''
#^ inputs : t * [b i]
#^ memories : t * [b i]
#^ state_t0[i] : [b s]
#^ out : [t b h]
state = state_t0
outs = []
for xt, mt in zip(inputs, memories):
ht, state = self(xt, mt, state)
outs.append(ht)
return outs, state
| 5,570 | 28.47619 | 125 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/models/a3c_conv_lstm.py | import numpy as np
import torch as T
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
model_urls = {
'cifar10': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/cifar10-d875770b.pth',
'cifar100': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/cifar100-3a55a987.pth',
}
def make_layers(cfg, batch_norm=False, in_channels=3):
layers = []
for i, v in enumerate(cfg):
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=4)]
else:
padding = v[1] if isinstance(v, tuple) else 1
out_channels = v[0] if isinstance(v, tuple) else v
conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=padding)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(out_channels, affine=False), nn.ELU()] # this was ReLU
else:
layers += [conv2d, nn.ELU()] # this was ReLU
in_channels = out_channels
return nn.Sequential(*layers)
class Encoder(nn.Module):
def __init__(self, n_channel):
super(Encoder, self).__init__()
cfg = [
n_channel,
n_channel,
'M',
2*n_channel,
2*n_channel,
'M',
4*n_channel,
4*n_channel,
'M',
# (8*n_channel, 0),
# 'M'
]
self.features = make_layers(cfg, batch_norm=True)
def forward(self, inputs):
return self.features(inputs)
class A3C_ConvLSTM(nn.Module):
def __init__(self, config, num_actions, pretrained=True):
super(A3C_ConvLSTM, self).__init__()
self.encoder = Encoder(config["conv-nchannels"])
if pretrained:
m = model_zoo.load_url(model_urls['cifar100'], map_location=T.device('cpu'))
pretrained_dict = m.state_dict() if isinstance(m, nn.Module) else m
# for i, (k, v) in enumerate(pretrained_dict.items()): print(i, k, v.size())
pretrained_dict = {
k: v for i, (k, v) in enumerate(pretrained_dict.items())
if i < 24
}
self.encoder.load_state_dict(pretrained_dict)
for param in self.encoder.parameters():
param.requires_grad = False
self.working_memory = nn.LSTM(2048+1+num_actions, config["mem-units"])
self.actor = nn.Linear(config["mem-units"], num_actions)
self.critic = nn.Linear(config["mem-units"], 1)
# intialize actor and critic weights
T.nn.init.orthogonal_(self.actor.weight.data, 0.01)
self.actor.bias.data.fill_(0)
T.nn.init.orthogonal_(self.critic.weight.data, 1)
self.critic.bias.data.fill_(0)
def forward(self, obs, p_input, mem_state=None):
if mem_state is None:
mem_state = self.get_init_states(layer=1)
feats = self.encoder(obs)
feats = feats.view(feats.size(0), -1)
mem_input = T.cat((feats, *p_input), dim=-1).unsqueeze(0)
h_t, mem_state = self.working_memory(mem_input, mem_state)
action_logits = self.actor(h_t)
value_estimate = self.critic(h_t)
return action_logits, value_estimate, mem_state
def get_init_states(self, layer, device='cuda'):
hsize = self.working_memory.hidden_size
h0 = T.zeros(1, 1, hsize).float().to(device)
c0 = T.zeros(1, 1, hsize).float().to(device)
return (h0, c0)
class A3C_ConvStackedLSTM(nn.Module):
def __init__(self, config, num_actions, pretrained=True):
super(A3C_ConvStackedLSTM, self).__init__()
self.encoder = Encoder(config["conv-nchannels"])
if pretrained:
m = model_zoo.load_url(model_urls['cifar100'], map_location=T.device('cpu'))
pretrained_dict = m.state_dict() if isinstance(m, nn.Module) else m
pretrained_dict = {
k: v for i, (k, v) in enumerate(pretrained_dict.items())
if i < 24
}
self.encoder.load_state_dict(pretrained_dict)
for param in self.encoder.parameters():
param.requires_grad = False
self.actor = nn.Linear(128, num_actions)
self.critic = nn.Linear(128, 1)
self.lstm_1 = nn.LSTM(2048+1, config["mem-units"])
self.lstm_2 = nn.LSTM(2048+config["mem-units"]+num_actions, 128)
# intialize actor and critic weights
T.nn.init.orthogonal_(self.actor.weight.data, 0.01)
self.actor.bias.data.fill_(0)
T.nn.init.orthogonal_(self.critic.weight.data, 1)
self.critic.bias.data.fill_(0)
def forward(self, obs, p_input, state_1=None, state_2=None):
p_action, p_reward = p_input
if state_1 is None:
state_1 = self.get_init_states(layer=1)
if state_2 is None:
state_2 = self.get_init_states(layer=2)
feats = self.encoder(obs)
feats = feats.view(feats.size(0), -1)
input_1 = T.cat((feats, p_reward), dim=-1)
if len(input_1.size()) == 2:
input_1 = input_1.unsqueeze(0)
output_1, state_1 = self.lstm_1(input_1, state_1)
input_2 = T.cat((feats, output_1.squeeze(0), p_action), dim=-1)
if len(input_2.size()) == 2:
input_2 = input_2.unsqueeze(0)
output_2, state_2 = self.lstm_2(input_2, state_2)
action_logits = self.actor(output_2)
value_estimate = self.critic(output_2)
return action_logits, value_estimate, state_1, state_2
def get_init_states(self, layer, device='cuda'):
hsize = self.lstm_1.hidden_size if layer == 1 else self.lstm_2.hidden_size
h0 = T.zeros(1, 1, hsize).float().to(device)
c0 = T.zeros(1, 1, hsize).float().to(device)
return (h0, c0)
def save_featmaps(self, obs, path, layer=5):
featmaps = self.encoder.features[:layer+1](obs)
np.save(path, featmaps)
| 6,009 | 33.94186 | 104 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/models/a3c_dnd_lstm.py | """
A DND-based LSTM based on ...
Ritter, et al. (2018).
Been There, Done That: Meta-Learning with Episodic Recall.
Proceedings of the International Conference on Machine Learning (ICML).
"""
import torch as T
import torch.nn as nn
import torch.nn.functional as F
from models.dnd import DND
from models.ep_lstm import EpLSTM
class A2C_DND_LSTM(nn.Module):
def __init__(self,
input_dim,
hidden_dim,
num_actions,
dict_len,
kernel='l2',
bias=True
):
super(A2C_DND_LSTM, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.bias = bias
# long-term memory
self.dnd = DND(dict_len, hidden_dim, kernel)
# short-term memory
self.ep_lstm = EpLSTM(
input_size=input_dim,
hidden_size=hidden_dim,
num_layers=1,
batch_first=False
)
# intial states of LSTM
self.h0 = nn.Parameter(T.randn(1, self.ep_lstm.hidden_size).float())
self.c0 = nn.Parameter(T.randn(1, self.ep_lstm.hidden_size).float())
# actor-critic networks
self.actor = nn.Linear(hidden_dim, num_actions)
self.critic = nn.Linear(hidden_dim, 1)
self.reset_parameters()
def reset_parameters(self):
# reset lstm parameters
self.ep_lstm.reset_parameters()
# reset initial states
T.nn.init.normal_(self.h0)
T.nn.init.normal_(self.c0)
# intialize actor and critic weights
T.nn.init.orthogonal_(self.actor.weight, gain=0.01)
self.actor.bias.data.fill_(0)
T.nn.init.orthogonal_(self.critic.weight, gain=1.0)
self.critic.bias.data.fill_(0)
def forward(self, data, cue, mem_state):
state, p_action, p_reward, timestep = data
x_t = T.cat((state, p_action, p_reward, timestep), dim=-1)
if mem_state is None:
mem_state = (self.h0, self.c0)
m_t = self.dnd.get_memory(cue)
_, (h_t, c_t) = self.ep_lstm((x_t.unsqueeze(1), m_t.unsqueeze(1)), mem_state)
action_logits = self.actor(h_t)
value_estimate = self.critic(h_t)
return action_logits, value_estimate, (h_t, c_t)
def pick_action(self, action_distribution):
"""action selection by sampling from a multinomial.
Parameters
----------
action_distribution : 1d T.tensor
action distribution, pi(a|s)
Returns
-------
T.tensor(int), T.tensor(float)
sampled action, log_prob(sampled action)
"""
m = T.distributions.Categorical(action_distribution)
a_t = m.sample()
log_prob_a_t = m.log_prob(a_t)
return a_t, log_prob_a_t
def get_init_states(self):
return (self.h0, self.c0)
def turn_off_encoding(self):
self.dnd.encoding_off = True
def turn_on_encoding(self):
self.dnd.encoding_off = False
def turn_off_retrieval(self):
self.dnd.retrieval_off = True
def turn_on_retrieval(self):
self.dnd.retrieval_off = False
def reset_memory(self):
self.dnd.reset_memory()
def save_memory(self, mem_key, mem_val):
self.dnd.save_memory(mem_key, mem_val)
def retrieve_memory(self, query_key):
return self.dnd.get_memory(query_key)
def get_all_mems(self):
n_mems = len(self.dnd.keys)
K = [self.dnd.keys[i] for i in range(n_mems)]
V = [self.dnd.vals[i] for i in range(n_mems)]
return K, V
| 3,612 | 27.448819 | 85 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/models/a3c_lstm_simple.py | import numpy as np
import torch as T
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from models.rgu import RGUnit
CELLS = {
'lstm': nn.LSTM,
'gru': nn.GRU,
'rgu': RGUnit
}
class A3C_LSTM(nn.Module):
def __init__(self, input_dim, hidden_size, num_actions, cell_type="lstm"):
super(A3C_LSTM, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(9, 64),
nn.ReLU(),
nn.Linear(64, 128),
nn.ReLU(),
)
rnn = CELLS[cell_type]
self.cell_type = cell_type
self.working_memory = rnn(128+num_actions+1, hidden_size)
self.actor = nn.Linear(hidden_size, num_actions)
self.critic = nn.Linear(hidden_size, 1)
# intialize actor and critic weights
T.nn.init.orthogonal_(self.actor.weight.data, 0.01)
self.actor.bias.data.fill_(0)
T.nn.init.orthogonal_(self.critic.weight.data, 1)
self.critic.bias.data.fill_(0)
def forward(self, obs, p_input, mem_state=None):
if mem_state is None:
mem_state = self.get_init_states()
feats = self.encoder(obs)
mem_input = T.cat((feats, *p_input), dim=-1)
if len(mem_input.size()) == 2:
mem_input = mem_input.unsqueeze(0)
h_t, mem_state = self.working_memory(mem_input, mem_state)
action_logits = self.actor(h_t)
value_estimate = self.critic(h_t)
return action_logits, value_estimate, mem_state
def get_init_states(self, device='cpu'):
h0 = T.zeros(1, 1, self.working_memory.hidden_size).float().to(device)
c0 = T.zeros(1, 1, self.working_memory.hidden_size).float().to(device)
return (h0, c0) if self.cell_type in ["lstm", "rgu"] else h0
class A3C_StackedLSTM(nn.Module):
def __init__(self,
input_dim,
hidden_dim,
num_actions,
device="cpu",
):
super(A3C_StackedLSTM, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.device = device
feat_dim = 128
self.encoder = nn.Sequential(
nn.Linear(9, 64),
nn.ReLU(),
nn.Linear(64, feat_dim),
nn.ReLU(),
)
# short-term memory
# self.lstm_1 = nn.LSTM(feat_dim+1, hidden_dim)
# self.lstm_2 = nn.LSTM(feat_dim+num_actions+hidden_dim, hidden_dim // 2)
self.lstm_1 = nn.LSTM(feat_dim, hidden_dim)
self.lstm_2 = nn.LSTM(hidden_dim+1+num_actions, hidden_dim)
self.actor = nn.Linear(hidden_dim, num_actions)
self.critic = nn.Linear(hidden_dim, 1)
self.reset_parameters()
def reset_parameters(self):
T.nn.init.orthogonal_(self.actor.weight, gain=0.01)
self.actor.bias.data.fill_(0)
T.nn.init.orthogonal_(self.critic.weight, gain=1.0)
self.critic.bias.data.fill_(0)
def forward(self, obs, p_input, state_1, state_2):
p_action, p_reward = p_input
feats = self.encoder(obs)
# x_t1 = T.cat((feats, p_reward), dim=-1).unsqueeze(1)
# x_t1 = T.cat((feats, p_action, p_reward), dim=-1).unsqueeze(1)
_, (h_t1, c_t1) = self.lstm_1(feats.unsqueeze(1), state_1)
x_t2 = T.cat((h_t1.squeeze(0), p_reward, p_action), dim=-1).unsqueeze(1)
_, (h_t2, c_t2) = self.lstm_2(x_t2, state_2)
action_logits = self.actor(h_t2)
value_estimate = self.critic(h_t2)
return action_logits, value_estimate, (h_t1, c_t1), (h_t2, c_t2)
def get_init_states(self, layer=1):
hidden_size = self.lstm_1.hidden_size if layer == 1 else self.lstm_2.hidden_size
h0 = T.zeros(1, 1, hidden_size).float().to(self.device)
c0 = T.zeros(1, 1, hidden_size).float().to(self.device)
return (h0, c0)
| 3,913 | 29.341085 | 88 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/models/densenet_lstm.py | import numpy as np
import torch as T
import torchvision
import torch.nn as nn
import torch.nn.functional as F
class Encoder(nn.Module):
def __init__(self, freeze = True):
super(Encoder,self).__init__()
original_model = torchvision.models.densenet161(pretrained=True)
self.features = T.nn.Sequential(*list(original_model.children())[:-1])
for param in self.parameters():
param.requires_grad = False
def forward(self, x):
x = self.features(x)
x = F.relu(x, inplace=True)
x = F.avg_pool2d(x, kernel_size=7).view(x.size(0), -1)
return x
class DenseNet_StackedLSTM(nn.Module):
def __init__(self, config, num_actions, pretrained=True):
super(DenseNet_StackedLSTM, self).__init__()
self.encoder = Encoder(freeze=True)
self.actor = nn.Linear(256, num_actions)
self.critic = nn.Linear(256, 1)
self.lstm_1 = nn.LSTM(2208+1, config["mem-units"])
self.lstm_2 = nn.LSTM(2208+config["mem-units"]+num_actions, 256)
# intialize actor and critic weights
T.nn.init.orthogonal_(self.actor.weight.data, 0.01)
self.actor.bias.data.fill_(0)
T.nn.init.orthogonal_(self.critic.weight.data, 1)
self.critic.bias.data.fill_(0)
def forward(self, obs, p_input, state_1=None, state_2=None):
p_action, p_reward = p_input
if state_1 is None:
state_1 = self.get_init_states(layer=1)
if state_2 is None:
state_2 = self.get_init_states(layer=2)
feats = self.encoder(obs)
input_1 = T.cat((feats, p_reward), dim=-1)
if len(input_1.size()) == 2:
input_1 = input_1.unsqueeze(0)
output_1, state_1 = self.lstm_1(input_1, state_1)
input_2 = T.cat((feats, output_1.squeeze(0), p_action), dim=-1)
if len(input_2.size()) == 2:
input_2 = input_2.unsqueeze(0)
output_2, state_2 = self.lstm_2(input_2, state_2)
action_logits = self.actor(output_2)
value_estimate = self.critic(output_2)
return action_logits, value_estimate, state_1, state_2
def get_init_states(self, layer, device='cuda'):
hsize = self.lstm_1.hidden_size if layer == 1 else self.lstm_2.hidden_size
h0 = T.zeros(1, 1, hsize).float().to(device)
c0 = T.zeros(1, 1, hsize).float().to(device)
return (h0, c0)
def save_featmaps(self, obs, path, layer=5):
featmaps = self.encoder.features[:layer+1](obs)
np.save(path, featmaps)
| 2,587 | 31.759494 | 82 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/models/a3c_lstm.py | import numpy as np
import torch as T
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class A3C_LSTM(nn.Module):
def __init__(self, config, num_actions):
super(A3C_LSTM, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(3, 16, kernel_size=(8, 8), stride=(4, 4)),
nn.Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2)),
nn.Flatten(),
nn.Linear(7200, 256),
nn.ReLU()
)
self.actor = nn.Linear(config["mem-units"], num_actions)
self.critic = nn.Linear(config["mem-units"], 1)
self.working_memory = nn.LSTM(256+num_actions+1, config["mem-units"])
# intialize actor and critic weights
T.nn.init.orthogonal_(self.actor.weight.data, 0.01)
self.actor.bias.data.fill_(0)
T.nn.init.orthogonal_(self.critic.weight.data, 1)
self.critic.bias.data.fill_(0)
def forward(self, obs, p_input, mem_state=None):
if mem_state is None:
mem_state = self.get_init_states()
feats = self.encoder(obs)
# import pdb; pdb.set_trace()
mem_input = T.cat((feats, *p_input), dim=-1).unsqueeze(0)
h_t, mem_state = self.working_memory(mem_input, mem_state)
action_logits = self.actor(h_t)
value_estimate = self.critic(h_t)
return action_logits, value_estimate, mem_state
def get_init_states(self, device='cpu'):
h0 = T.zeros(1, 1, self.working_memory.hidden_size).float().to(device)
c0 = T.zeros(1, 1, self.working_memory.hidden_size).float().to(device)
return (h0, c0)
class A3C_StackedLSTM(nn.Module):
def __init__(self, config, num_actions):
super(A3C_StackedLSTM, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(3, 16, kernel_size=(8, 8), stride=(4, 4)), # output: (16, 20, 20)
nn.ReLU(),
nn.Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2)), # output: (32, 9, 9)
nn.ReLU(),
nn.Flatten(),
nn.Linear(7200, 256),
nn.ReLU()
)
self.actor = nn.Linear(128, num_actions)
self.critic = nn.Linear(128, 1)
self.lstm_1 = nn.LSTM(256+1, config["mem-units"])
self.lstm_2 = nn.LSTM(256+config["mem-units"]+num_actions, 128)
# intialize actor and critic weights
T.nn.init.orthogonal_(self.actor.weight.data, 0.01)
self.actor.bias.data.fill_(0)
T.nn.init.orthogonal_(self.critic.weight.data, 1)
self.critic.bias.data.fill_(0)
def forward(self, obs, p_input, state_1=None, state_2=None):
p_action, p_reward = p_input
if state_1 is None:
state_1 = self.get_init_states(layer=1)
if state_2 is None:
state_2 = self.get_init_states(layer=2)
feats = self.encoder(obs)
input_1 = T.cat((feats, p_reward), dim=-1).unsqueeze(0)
output_1, state_1 = self.lstm_1(input_1, state_1)
input_2 = T.cat((feats, output_1.squeeze(0), p_action), dim=-1).unsqueeze(0)
output_2, state_2 = self.lstm_2(input_2, state_2)
action_logits = self.actor(output_2)
value_estimate = self.critic(output_2)
return action_logits, value_estimate, state_1, state_2
def get_init_states(self, layer, device='cpu'):
hsize = self.lstm_1.hidden_size if layer == 1 else self.lstm_2.hidden_size
h0 = T.zeros(1, 1, hsize).float().to(device)
c0 = T.zeros(1, 1, hsize).float().to(device)
return (h0, c0) | 3,636 | 33.638095 | 88 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/models/rgu_cell.py | from typing import (
Tuple,
List,
Optional,
Dict,
Callable,
Union,
cast,
)
from collections import namedtuple
from abc import ABC, abstractmethod
from dataclasses import dataclass
import torch as T
from torch import nn
from torch.nn import functional as F
from torch import Tensor
import pdb
__all__ = [
'RGUnit',
'ReciprocallyGated_Cell',
'ReciprocallyGated_Cell_Builder',
]
GateSpans = namedtuple('GateSpans', ['gh', 'gc'])
ACTIVATIONS = {
'sigmoid': nn.Sigmoid(),
'tanh': nn.Tanh(),
'hard_tanh': nn.Hardtanh(),
'relu': nn.ReLU(),
}
class ReciprocallyGated_Cell(nn.Module):
'''
Adapted from:
https://papers.nips.cc/paper/7775-task-driven-convolutional-recurrent-models-of-the-visual-system
arxiv:1807.00053
with modifications.
'''
def __repr__(self):
return (
f'{self.__class__.__name__}('
+ ', '.join(
[
f'in: {self.Dx}',
f'hid: {self.Dh}',
f'rdo: {self.recurrent_dropout_p}',
f'vdo: {self.vertical_dropout_p}',
]
)
+')'
)
def __init__(
self,
input_size: int,
args,
):
super().__init__()
self._args = args
self.Dx = input_size
self.Dh = args.hidden_size
# self.recurrent_kernel = nn.Linear(self.Dh, self.Dh * 2)
# self.cell_memory_kernel = nn.Linear(self.Dh, self.Dh * 2)
# self.input_kernel = nn.Linear(self.Dx, self.Dh * 2)
self.recurrent_kernel = nn.Conv1d(
in_channels=self.Dh,
out_channels=2,
kernel_size=3,
stride=1
)
self.cell_memory_kernel = nn.Conv1d(
in_channels=self.Dh,
out_channels=2,
kernel_size=3,
stride=1
)
self.input_kernel = nn.Conv1d(
in_channels=self.Dh,
out_channels=2,
kernel_size=3,
stride=1
)
self.recurrent_dropout_p = args.recurrent_dropout or 0.0
self.vertical_dropout_p = args.vertical_dropout or 0.0
self.recurrent_dropout = nn.Dropout(self.recurrent_dropout_p)
self.vertical_dropout = nn.Dropout(self.vertical_dropout_p)
self.fun_gate = ACTIVATIONS[args.gate_activation]
self.fun_main = ACTIVATIONS[args.activation]
self.reset_parameters_()
# @T.jit.ignore
def get_recurrent_weights(self):
# type: () -> Tuple[GateSpans, GateSpans]
W = self.recurrent_kernel.weight.chunk(2, 0)
b = self.recurrent_kernel.bias.chunk(2, 0)
W = GateSpans(W[0], W[1])
b = GateSpans(b[0], b[1])
return W, b
# @T.jit.ignore
def get_cell_memory_weights(self):
# type: () -> Tuple[GateSpans, GateSpans]
W = self.cell_memory_kernel.weight.chunk(2, 0)
b = self.cell_memory_kernel.bias.chunk(2, 0)
W = GateSpans(W[0], W[1])
b = GateSpans(b[0], b[1])
return W, b
@T.jit.ignore
def reset_parameters_(self):
rw, rb = self.get_recurrent_weights()
iw, ib = self.get_cell_memory_weights()
nn.init.zeros_(self.cell_memory_kernel.bias)
nn.init.constant_(self.recurrent_kernel.bias, 0.5)
for W in rw:
nn.init.orthogonal_(W)
for W in iw:
nn.init.xavier_uniform_(W)
@T.jit.export
def get_init_state(self, input: Tensor) -> Tuple[Tensor, Tensor]:
batch_size = input.shape[1]
zeros = T.zeros(batch_size, self.Dh, device=input.device)
return (zeros, zeros)
def apply_input_kernel(self, xt: Tensor) -> List[Tensor]:
#^ xt : [b h]
xto = self.vertical_dropout(xt)
out = self.input_kernel(xto).chunk(2, 1)
#^ out : [b h]
return out
def apply_recurrent_kernel(self, h_tm1: Tensor) -> List[Tensor]:
#^ h_tm1 : [b h]
hto = self.recurrent_dropout(h_tm1)
out = self.recurrent_kernel(hto).chunk(2, 1)
#^ out : [b h]
return out
def apply_cell_memory_kernel(self, c_tm1: Tensor) -> List[Tensor]:
out = self.cell_memory_kernel(c_tm1).chunk(2, 1)
return out
def forward(self, input, state):
# type: (Tensor, Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]
#^ input : [b i]
#^ state.h : [b h]
h_tm1, c_tm1 = state
Cc, Ch = self.apply_cell_memory_kernel(c_tm1)
Hc, Hh = self.apply_recurrent_kernel(h_tm1)
Xc, Xh = self.apply_input_kernel(input)
gh = (1 - self.fun_gate(Ch)) * Xh + (1 - self.fun_gate(Hh)) * h_tm1
gc = (1 - self.fun_gate(Hc)) * Xc + (1 - self.fun_gate(Cc)) * c_tm1
ht = self.fun_main(gh)
ct = self.fun_main(gc)
return ht, (ht, ct)
@T.jit.export
def loop(self, inputs, state_t0, mask=None):
# type: (List[Tensor], Tuple[Tensor, Tensor], Optional[List[Tensor]]) -> List[Tensor]
'''
This loops over t (time) steps
'''
#^ inputs : t * [b i]
#^ state_t0[i] : [b s]
#^ out : [t b h]
state = state_t0
outs = []
for xt in inputs:
ht, state = self(xt, state)
outs.append(ht)
return outs, state | 5,418 | 26.93299 | 101 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/models/rgu.py | from typing import (
Tuple,
List,
Optional,
Dict,
Callable,
Union,
cast,
)
from collections import namedtuple
from abc import ABC, abstractmethod
from dataclasses import dataclass
import numpy as np
import torch as T
from torch import nn
from torch.nn import functional as F
from torch import Tensor
from models.rgu_cell import ReciprocallyGated_Cell
@dataclass
class ReciprocallyGated_Cell_Builder:
# input_size: int
hidden_size: int
vertical_dropout : float = 0.0
recurrent_dropout : float = 0.0
input_kernel_initialization : str = 'xavier_uniform'
gate_activation : str = 'sigmoid'
activation : str = 'tanh'
def make(self, input_size: int):
return ReciprocallyGated_Cell(input_size, self)
class RGU_Layer(nn.Module):
def __init__(
self,
cell,
direction='forward',
batch_first=False,
):
super().__init__()
if isinstance(batch_first, bool):
batch_first = (batch_first, batch_first)
self.batch_first = batch_first
self.direction = direction
self.cell_ = cell
@T.jit.ignore
def forward(self, input, state_t0, return_state=None):
if self.batch_first[0]:
#^ input : [b t i]
input = input.transpose(1, 0)
#^ input : [t b i]
inputs = input.unbind(0)
if state_t0 is None:
state_t0 = self.cell_.get_init_state(input)
sequence, state = self.cell_.loop(inputs, state_t0)
#^ sequence : t * [b h]
sequence = T.stack(sequence)
#^ sequence : [t b h]
if self.batch_first[1]:
sequence = sequence.transpose(1, 0)
#^ sequence : [b t h]
return sequence, state
class RGUnit(nn.Module):
def __init__(
self,
input_size : int,
num_layers : int,
batch_first : bool = False,
scripted : bool = True,
*args, **kargs,
):
super().__init__()
self._cell_builder = ReciprocallyGated_Cell_Builder(*args, **kargs)
Dh = self._cell_builder.hidden_size
def make(isize: int):
cell = self._cell_builder.make(isize)
return RGU_Layer(cell, isize, batch_first=batch_first)
rnns = [
make(input_size),
*[
make(Dh)
for _ in range(num_layers - 1)
],
]
self.rnn = nn.Sequential(*rnns)
self.input_size = input_size
self.hidden_size = self._cell_builder.hidden_size
self.num_layers = num_layers
def __repr__(self):
return (
f'${self.__class__.__name__}'
+ '('
+ f'in={self.input_size}, '
+ f'hid={self.hidden_size}, '
+ f'layers={self.num_layers}, '
+ f'bi={self.bidirectional}'
+ '; '
+ str(self._cell_builder)
)
def forward(self, inputs, state_t0=None):
for rnn in self.rnn:
inputs, state = rnn(inputs, state_t0)
return inputs, state
def reset_parameters(self):
for rnn in self.rnn:
rnn.cell_.reset_parameters_()
if __name__ == "__main__":
rgu = RGUnit(
input_size=128,
hidden_size=256,
num_layers=1,
batch_first=False
)
x_t = T.rand(1, 16, 128)
state_0 = (T.zeros(1, 256), T.zeros(1, 256))
h_t, state_t = rgu(x_t, state_0)
import pdb; pdb.set_trace() | 3,620 | 25.23913 | 75 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/models/resnet_lstm.py | import numpy as np
import torch as T
import torchvision
import torch.nn as nn
import torch.nn.functional as F
class Encoder(nn.Module):
def __init__(self):
super(Encoder,self).__init__()
original_model = torchvision.models.resnet18(pretrained=False)
self.features = T.nn.Sequential(*list(original_model.children())[:-1])
def forward(self, x):
x = self.features(x)
return x.view(1, -1)
class ResNet_LSTM(nn.Module):
def __init__(self, config, num_actions, pretrained=True):
super(ResNet_LSTM, self).__init__()
self.encoder = Encoder()
self.cell_type = config["cell-type"]
# for param in self.encoder.parameters():
# param.requires_grad = False
self.lstm = nn.LSTM(512+1+num_actions, config["mem-units"])
self.actor = nn.Linear(config["mem-units"], num_actions)
self.critic = nn.Linear(config["mem-units"], 1)
# intialize actor and critic weights
T.nn.init.orthogonal_(self.actor.weight.data, 0.01)
self.actor.bias.data.fill_(0)
T.nn.init.orthogonal_(self.critic.weight.data, 1)
self.critic.bias.data.fill_(0)
def forward(self, obs, p_input, state):
feats = self.encoder(obs)
x_t = T.cat((feats, *p_input), dim=-1).unsqueeze(0)
output, state_out = self.lstm(x_t, state)
action_logits = self.actor(output)
value_estimate = self.critic(output)
return action_logits, value_estimate, state_out
def get_init_states(self, device='cuda'):
hsize = self.lstm.hidden_size
h0 = T.zeros(1, 1, hsize).float().to(device)
c0 = T.zeros(1, 1, hsize).float().to(device)
return (h0, c0) if self.cell_type == "lstm" else h0 | 1,788 | 30.385965 | 78 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/Harlow_1D/harlow.py | import os
import sys
import imageio
import numpy as np
import matplotlib.pyplot as plt
"""helpers"""
def _binary2int(binary):
return (binary * 2**np.arange(binary.shape[0]-1, -1, -1)).sum()
def _int2binary(decimal, length=10):
return np.array([int(x) for x in format(decimal, f'#0{length+2}b')[2:]])
class Harlow_1D:
"""A 1D episodic variant of the Harlow Task.
"""
def __init__(self,
verbose = False,
visualize = False,
save_path = None,
save_interval = None
):
'''environment constants'''
self.max_length = 250
self.n_trials = 6
self.n_actions = 2
self.n_objects = 1000
self.n_episodes = 2500
self.state_len = 17 # size of state
self.obs_length = 8 # size of receptive field
self.obj_offset = 3
self.fix_reward = 0.2
self.obj_reward = 1
self.time_step = 0
self.map_action = [1, -1]
self.episode_num = 0
self.verbose = verbose
self.visualize = visualize
self.center = self.state_len // 2
self.reward_counter = np.zeros((self.n_episodes,self.n_trials))
if self.visualize:
self.frames = []
self._create_palette()
self.save_path = save_path
self.save_interval = save_interval
@property
def current(self):
return self.state[self.center]
def _place_objects(self):
self.state[self.center-self.obj_offset] = self.obj_1
self.state[self.center+self.obj_offset] = self.obj_2
self.state[self.center] = 0
swap = np.random.rand() < 0.5
if swap:
self.state[self.center-self.obj_offset] = self.obj_2
self.state[self.center+self.obj_offset] = self.obj_1
def _place_fixation(self):
self.state = np.zeros(self.state_len)
if self.pointer > self.center:
self.state[self.center - self.obj_offset] = 1
else:
self.state[self.center + self.obj_offset] = 1
def observation(self):
offset = (self.state_len - self.obs_length) // 2
return self.state[offset:-offset]
def step(self, action):
self.time_step += 1
reward = 0
self.state = np.roll(self.state, self.map_action[action])
self.pointer -= self.map_action[action]
if self.pointer >= self.state_len:
self.pointer = 0
elif self.pointer < 0:
self.pointer = self.state_len - 1
if self.current != 0 and self.visualize:
self._add_frames(self.observation())
if self.current == 1:
reward = self.fix_reward
self._place_objects()
elif self.current == self.obj_1:
reward = self.obj_reward if self.reward_obj else -self.obj_reward
if self.reward_obj:
self.reward_counter[self.episode_num-1][self.trial_num] = 1
self.trial_num += 1
self._place_fixation()
elif self.current == self.obj_2:
reward = self.obj_reward if not self.reward_obj else -self.obj_reward
if not self.reward_obj:
self.reward_counter[self.episode_num-1][self.trial_num] = 1
self.trial_num += 1
self._place_fixation()
obs = self.observation()
if self.visualize:
self._add_frames(obs)
if self.verbose:
print(f"Observation: {obs}")
print(f"Reward: {reward} | Pointer: {self.pointer}")
done = self.trial_num >= self.n_trials or self.time_step >= self.max_length
return obs, reward, done, self.time_step
def reset(self):
self.trial_num = 0
self.time_step = 0
self.episode_num += 1
self.pointer = self.center
if self.visualize and len(self.frames) > 0 and self.episode_num % self.save_interval == 0:
self._save_frames()
self.frames = []
# initialize state
self.state = np.zeros(self.state_len)
self.state[self.center] = 1
shift = np.random.randint(-self.obj_offset, self.obj_offset)
if shift == 0: shift = 1
self.state = np.roll(self.state, shift)
self.pointer -= shift
obs = self.observation()
if self.visualize:
self._add_frames(obs)
if self.verbose:
print(f"Observation: {obs}")
print(f"Pointer: {self.pointer}")
# episode objects
self.obj_1, self.obj_2 = np.random.randint(
low=2,
high=self.n_objects+2,
size=2
)
self.reward_obj = np.random.rand() < 0.5
self.obj_1 /= self.n_objects
self.obj_2 /= self.n_objects
return obs
def _visualize_obs(self, obs):
size = 20
background = np.ones((size*5, size*(obs.shape[0]), 3), dtype=np.uint8) * 255
bar = np.zeros((size, size*(obs.shape[0]), 3), dtype=np.uint8)
for i, cell in enumerate(obs):
if cell == 1:
# draw fixation cross
bar[0:9,i*size:i*size+9] = [255, 0, 0]
bar[0:9,i*size+11:i*size+20] = [255, 0, 0]
bar[11:20,i*size+11:i*size+20] = [255, 0, 0]
bar[11:20,i*size:i*size+9] = [255, 0, 0]
elif cell > 0:
idx = int(cell*self.n_objects)
bar[:,i*size:i*size+size] = self.palette[idx]
background[size*2:size*3] = bar
return background
def _create_palette(self):
self.palette = []
for _ in range(self.n_objects):
color = list(np.random.choice(range(256), size=3))
if color not in self.palette:
self.palette += [color]
def _add_frames(self, obs):
bar = self._visualize_obs(obs)
for _ in range(10):
self.frames += [bar]
def _save_frames(self):
filepath = self.save_path.format(epi=self.episode_num)
imageio.mimsave(filepath, self.frames)
if __name__ == "__main__":
env = Harlow_1D(verbose=True)
while True:
action = int(input("Left (1) or Right (2): "))
if action <= 0:
break
env.step(action)
| 6,298 | 29.138756 | 98 | py |
Meta-RL-Harlow | Meta-RL-Harlow-master/Harlow_1D/train.py | import os
import yaml
import pickle
import argparse
import numpy as np
import torch as T
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from datetime import datetime
from collections import namedtuple
from Harlow_1D.harlow import Harlow_1D
from models.a3c_lstm_simple import A3C_LSTM, A3C_StackedLSTM
def ensure_shared_grads(model, shared_model):
for param, shared_param in zip(model.parameters(),
shared_model.parameters()):
if shared_param.grad is not None:
return
shared_param._grad = param.grad
def train(config,
shared_model,
optimizer,
rank
):
T.manual_seed(config["seed"] + rank)
np.random.seed(config["seed"] + rank)
T.random.manual_seed(config["seed"] + rank)
device = config["device"]
env = Harlow_1D()
if config["mode"] == "vanilla":
agent = A3C_LSTM(
config["task"]["input-dim"],
config["agent"]["mem-units"],
config["task"]["num-actions"],
config["agent"]["cell-type"]
)
else:
raise ValueError(config["mode"])
agent.to(device)
agent.train()
### hyper-parameters ###
gamma = config["agent"]["gamma"]
gae_lambda = config["agent"]["gae-lambda"]
val_coeff = config["agent"]["value-loss-weight"]
entropy_coeff = config["agent"]["entropy-weight"]
n_step_update = config["agent"]["n-step-update"]
writer = SummaryWriter(log_dir=os.path.join(config["log-path"], config["run-title"] + f"_{rank}"))
save_path = os.path.join(config["save-path"], config["run-title"], config["run-title"]+"_{epi:04d}")
save_interval = config["save-interval"]
cell_type = config["agent"]["cell-type"]
done = True
state = env.reset()
p_action, p_reward = [0]*config["task"]["num-actions"], 0
print('='*50)
print(f"Starting Worker {rank}")
print('='*50)
episode_reward = 0
update_counter = 0
total_rewards = []
while True:
agent.load_state_dict(shared_model.state_dict())
if done:
rnn_state = agent.get_init_states(device)
else:
if cell_type == "lstm":
rnn_state = rnn_state[0].detach(), rnn_state[1].detach()
elif cell_type == "gru":
rnn_state = rnn_state.detach()
values = []
log_probs = []
rewards = []
entropies = []
for _ in range(n_step_update):
logit, value, rnn_state = agent(
T.tensor([state]).float().to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
rnn_state
)
logit = logit.squeeze(0)
prob = F.softmax(logit, dim=-1)
log_prob = F.log_softmax(logit, dim=-1)
entropy = -(log_prob * prob).sum(1, keepdim=True)
entropies += [entropy]
action = prob.multinomial(num_samples=1).detach()
log_prob = log_prob.gather(1, action)
state, reward, done, _ = env.step(int(action))
episode_reward += reward
p_action = np.eye(env.n_actions)[int(action)]
p_reward = reward
log_probs += [log_prob]
values += [value]
rewards += [reward]
if done:
state = env.reset()
total_rewards += [episode_reward]
avg_reward_100 = np.array(total_rewards[-100:]).mean()
writer.add_scalar("perf/reward_t", episode_reward, env.episode_num)
writer.add_scalar("perf/avg_reward_100", avg_reward_100, env.episode_num)
episode_reward = 0
if env.episode_num % save_interval == 0:
T.save({
"state_dict": shared_model.state_dict(),
"avg_reward_100": avg_reward_100,
}, save_path.format(epi=env.episode_num) + ".pt")
break
R = T.zeros(1, 1).to(device)
if not done:
_, value, _ = agent(
T.tensor([state]).float().to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
rnn_state
)
R = value.detach()
values += [R]
policy_loss = 0
value_loss = 0
gae = T.zeros(1, 1).to(device)
for i in reversed(range(len(rewards))):
R = gamma * R + rewards[i]
advantage = R - values[i]
value_loss = value_loss + 0.5 * advantage.pow(2)
# Generalized Advantage Estimation
delta_t = rewards[i] + gamma * values[i + 1] - values[i]
gae = gae * gamma * gae_lambda + delta_t
policy_loss = policy_loss - \
log_probs[i] * gae.detach() - entropy_coeff * entropies[i]
loss = policy_loss + val_coeff * value_loss
optimizer.zero_grad()
loss.backward()
ensure_shared_grads(agent, shared_model)
optimizer.step()
update_counter += 1
writer.add_scalar("losses/total_loss", loss.item(), update_counter)
if env.episode_num > env.n_episodes:
np.save(os.path.join(os.path.dirname(save_path), f"rewards_{rank}.npy"), env.reward_counter)
break
def train_stacked(config,
shared_model,
optimizer,
rank
):
T.manual_seed(config["seed"] + rank)
np.random.seed(config["seed"] + rank)
T.random.manual_seed(config["seed"] + rank)
device = config["device"]
env = Harlow_1D()
agent = A3C_StackedLSTM(
config["task"]["input-dim"],
config["agent"]["mem-units"],
config["task"]["num-actions"],
device=config["device"]
)
agent.to(device)
agent.train()
### hyper-parameters ###
gamma = config["agent"]["gamma"]
gae_lambda = config["agent"]["gae-lambda"]
val_coeff = config["agent"]["value-loss-weight"]
entropy_coeff = config["agent"]["entropy-weight"]
n_step_update = config["agent"]["n-step-update"]
writer = SummaryWriter(log_dir=os.path.join(config["log-path"], config["run-title"] + f"_{rank}"))
save_path = os.path.join(config["save-path"], config["run-title"], config["run-title"]+"_{epi:04d}")
save_interval = config["save-interval"]
done = True
state = env.reset()
p_action, p_reward = [0]*config["task"]["num-actions"], 0
print('='*50)
print(f"Starting Worker {rank}")
print('='*50)
episode_reward = 0
update_counter = 0
total_rewards = []
while True:
if done:
h_t1, c_t1 = agent.get_init_states(layer=1)
h_t2, c_t2 = agent.get_init_states(layer=2)
else:
h_t1, c_t1 = h_t1.detach(), c_t1.detach()
h_t2, c_t2 = h_t2.detach(), c_t2.detach()
values = []
log_probs = []
rewards = []
entropies = []
for _ in range(n_step_update):
logit, value, (h_t1, c_t1), (h_t2, c_t2) = agent(
T.tensor([state]).float().to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
(h_t1, c_t1), (h_t2, c_t2)
)
logit = logit.squeeze(0)
prob = F.softmax(logit, dim=-1)
log_prob = F.log_softmax(logit, dim=-1)
entropy = -(log_prob * prob).sum(1, keepdim=True)
entropies += [entropy]
action = prob.multinomial(num_samples=1).detach()
log_prob = log_prob.gather(1, action)
state, reward, done, _ = env.step(int(action))
episode_reward += reward
p_action = np.eye(env.n_actions)[int(action)]
p_reward = reward
log_probs += [log_prob]
values += [value]
rewards += [reward]
if done:
state = env.reset()
total_rewards += [episode_reward]
avg_reward_100 = np.array(total_rewards[-100:]).mean()
writer.add_scalar("perf/reward_t", episode_reward, env.episode_num)
writer.add_scalar("perf/avg_reward_100", avg_reward_100, env.episode_num)
episode_reward = 0
if env.episode_num % save_interval == 0:
T.save({
"state_dict": shared_model.state_dict(),
"avg_reward_100": avg_reward_100,
"update_counter": update_counter,
}, save_path.format(epi=env.episode_num) + ".pt")
break
R = T.zeros(1, 1).to(device)
if not done:
_, value, _, _ = agent(
T.tensor([state]).float().to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
(h_t1, c_t1), (h_t2, c_t2)
)
R = value.detach()
values += [R]
policy_loss = 0
value_loss = 0
gae = T.zeros(1, 1).to(device)
for i in reversed(range(len(rewards))):
R = gamma * R + rewards[i]
advantage = R - values[i]
value_loss = value_loss + 0.5 * advantage.pow(2)
# Generalized Advantage Estimation
delta_t = rewards[i] + gamma * values[i + 1] - values[i]
gae = gae * gamma * gae_lambda + delta_t
policy_loss = policy_loss - \
log_probs[i] * gae.detach() - entropy_coeff * entropies[i]
loss = policy_loss + val_coeff * value_loss
optimizer.zero_grad()
loss.backward()
ensure_shared_grads(agent, shared_model)
optimizer.step()
update_counter += 1
writer.add_scalar("losses/total_loss", loss.item(), update_counter)
if env.episode_num > env.n_episodes:
np.save(os.path.join(os.path.dirname(save_path), f"rewards_{rank}.npy"), env.reward_counter)
break
def train_episodic(config,
shared_model,
optimizer,
rank
):
T.manual_seed(config["seed"] + rank)
np.random.seed(config["seed"] + rank)
T.random.manual_seed(config["seed"] + rank)
device = config["device"]
env = Harlow_1D()
if config["mode"] == "vanilla":
agent = A3C_LSTM(
config["task"]["input-dim"],
config["agent"]["mem-units"],
config["task"]["num-actions"],
)
elif config["mode"] == "episodic":
agent =A3C_DND_LSTM(
config["task"]["input-dim"],
config["agent"]["mem-units"],
config["task"]["num-actions"],
config["agent"]["dict-len"],
config["agent"]["dict-kernel"]
)
else:
raise ValueError(config["mode"])
agent.to(device)
agent.train()
### hyper-parameters ###
gamma = config["agent"]["gamma"]
gae_lambda = config["agent"]["gae-lambda"]
val_coeff = config["agent"]["value-loss-weight"]
entropy_coeff = config["agent"]["entropy-weight"]
n_step_update = config["agent"]["n-step-update"]
writer = SummaryWriter(log_dir=os.path.join(config["log-path"], config["run-title"] + f"_{rank}"))
save_path = os.path.join(config["save-path"], config["run-title"], config["run-title"]+"_{epi:04d}")
save_interval = config["save-interval"]
done = True
state = env.reset()
p_action, p_reward = [0,0,0], 0
print('='*50)
print(f"Starting Trainer {rank}")
print('='*50)
episode_reward = 0
update_counter = 0
total_rewards = []
agent.turn_on_encoding()
agent.turn_on_retrieval()
# agent.turn_off_encoding()
# agent.turn_off_retrieval()
while True:
agent.load_state_dict(shared_model.state_dict())
if done:
ht, ct = agent.get_init_states(device)
else:
ht, ct = ht.detach(), ct.detach()
values = []
log_probs = []
rewards = []
entropies = []
for _ in range(n_step_update):
logit, value, rnn_state, feats = agent(
T.tensor([state]).float().to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
(ht, ct)
)
logit = logit.squeeze(0)
prob = F.softmax(logit, dim=-1)
log_prob = F.log_softmax(logit, dim=-1)
entropy = -(log_prob * prob).sum(1, keepdim=True)
entropies += [entropy]
action = prob.multinomial(num_samples=1).detach()
log_prob = log_prob.gather(1, action)
state, reward, done, _ = env.step(int(action))
# if reward > 0:
agent.save_memory(feats, ct)
episode_reward += reward
p_action = np.eye(env.n_actions)[int(action)]
p_reward = reward
log_probs += [log_prob]
values += [value]
rewards += [reward]
if done:
state = env.reset()
total_rewards += [episode_reward]
avg_reward_100 = np.array(total_rewards[-100:]).mean()
writer.add_scalar("perf/reward_t", episode_reward, env.episode_num)
writer.add_scalar("perf/avg_reward_100", avg_reward_100, env.episode_num)
episode_reward = 0
if env.episode_num % save_interval == 0 and rank % 4 == 0:
T.save({
"state_dict": shared_model.state_dict(),
"avg_reward_100": avg_reward_100,
"update_counter": update_counter
}, save_path.format(epi=env.episode_num) + ".pt")
break
R = T.zeros(1, 1).to(device)
if not done:
_, value, _, _ = agent(
T.tensor([state]).float().to(device), (
T.tensor([p_action]).float().to(device),
T.tensor([[p_reward]]).float().to(device)),
(ht, ct)
)
R = value.detach()
values += [R]
policy_loss = 0
value_loss = 0
gae = T.zeros(1, 1).to(device)
for i in reversed(range(len(rewards))):
R = gamma * R + rewards[i]
advantage = R - values[i]
value_loss = value_loss + 0.5 * advantage.pow(2)
# Generalized Advantage Estimation
delta_t = rewards[i] + gamma * values[i + 1] - values[i]
gae = gae * gamma * gae_lambda + delta_t
policy_loss = policy_loss - \
log_probs[i] * gae.detach() - entropy_coeff * entropies[i]
loss = policy_loss + val_coeff * value_loss
optimizer.zero_grad()
loss.backward()
ensure_shared_grads(agent, shared_model)
optimizer.step()
update_counter += 1
writer.add_scalar("losses/total_loss", loss.item(), update_counter)
if env.episode_num > env.n_episodes:
if rank % 2 == 0:
np.save(os.path.join(os.path.dirname(save_path), f"{rank}_rewards.npy"), env.reward_counter)
break
| 15,561 | 30.502024 | 108 | py |
FEAT | FEAT-master/pretrain.py | import argparse
import os
import os.path as osp
import shutil
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from model.models.classifier import Classifier
from model.dataloader.samplers import CategoriesSampler
from model.utils import pprint, set_gpu, ensure_path, Averager, Timer, count_acc, euclidean_metric
from tensorboardX import SummaryWriter
from tqdm import tqdm
# pre-train model, compute validation acc after 500 epoches
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--max_epoch', type=int, default=500)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')
parser.add_argument('--dataset', type=str, default='MiniImageNet', choices=['MiniImageNet', 'TieredImagenet', 'CUB'])
parser.add_argument('--backbone_class', type=str, default='Res12', choices=['ConvNet', 'Res12'])
parser.add_argument('--schedule', type=int, nargs='+', default=[75, 150, 300], help='Decrease learning rate at these epochs.')
parser.add_argument('--gamma', type=float, default=0.1)
parser.add_argument('--query', type=int, default=15)
parser.add_argument('--resume', type=bool, default=False)
args = parser.parse_args()
args.orig_imsize = -1
pprint(vars(args))
save_path1 = '-'.join([args.dataset, args.backbone_class, 'Pre'])
save_path2 = '_'.join([str(args.lr), str(args.gamma), str(args.schedule)])
args.save_path = osp.join(save_path1, save_path2)
if not osp.exists(save_path1):
os.mkdir(save_path1)
ensure_path(args.save_path)
if args.dataset == 'MiniImageNet':
# Handle MiniImageNet
from model.dataloader.mini_imagenet import MiniImageNet as Dataset
elif args.dataset == 'CUB':
from model.dataloader.cub import CUB as Dataset
elif args.dataset == 'TieredImagenet':
from model.dataloader.tiered_imagenet import tieredImageNet as Dataset
else:
raise ValueError('Non-supported Dataset.')
trainset = Dataset('train', args, augment=True)
train_loader = DataLoader(dataset=trainset, batch_size=args.batch_size, shuffle=True, num_workers=8, pin_memory=True)
args.num_class = trainset.num_class
valset = Dataset('val', args)
val_sampler = CategoriesSampler(valset.label, 200, valset.num_class, 1 + args.query) # test on 16-way 1-shot
val_loader = DataLoader(dataset=valset, batch_sampler=val_sampler, num_workers=8, pin_memory=True)
args.way = valset.num_class
args.shot = 1
# construct model
model = Classifier(args)
if 'Conv' in args.backbone_class:
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=0.0005)
elif 'Res' in args.backbone_class:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, nesterov=True, weight_decay=0.0005)
else:
raise ValueError('No Such Encoder')
criterion = torch.nn.CrossEntropyLoss()
if torch.cuda.is_available():
torch.backends.cudnn.benchmark = True
if args.ngpu > 1:
model.encoder = torch.nn.DataParallel(model.encoder, device_ids=list(range(args.ngpu)))
model = model.cuda()
criterion = criterion.cuda()
def save_model(name):
torch.save(dict(params=model.state_dict()), osp.join(args.save_path, name + '.pth'))
def save_checkpoint(is_best, filename='checkpoint.pth.tar'):
state = {'epoch': epoch + 1,
'args': args,
'state_dict': model.state_dict(),
'trlog': trlog,
'val_acc_dist': trlog['max_acc_dist'],
'val_acc_sim': trlog['max_acc_sim'],
'optimizer' : optimizer.state_dict(),
'global_count': global_count}
torch.save(state, osp.join(args.save_path, filename))
if is_best:
shutil.copyfile(osp.join(args.save_path, filename), osp.join(args.save_path, 'model_best.pth.tar'))
if args.resume == True:
# load checkpoint
state = torch.load(osp.join(args.save_path, 'model_best.pth.tar'))
init_epoch = state['epoch']
resumed_state = state['state_dict']
# resumed_state = {'module.'+k:v for k,v in resumed_state.items()}
model.load_state_dict(resumed_state)
trlog = state['trlog']
optimizer.load_state_dict(state['optimizer'])
initial_lr = optimizer.param_groups[0]['lr']
global_count = state['global_count']
else:
init_epoch = 1
trlog = {}
trlog['args'] = vars(args)
trlog['train_loss'] = []
trlog['val_loss_dist'] = []
trlog['val_loss_sim'] = []
trlog['train_acc'] = []
trlog['val_acc_sim'] = []
trlog['val_acc_dist'] = []
trlog['max_acc_dist'] = 0.0
trlog['max_acc_dist_epoch'] = 0
trlog['max_acc_sim'] = 0.0
trlog['max_acc_sim_epoch'] = 0
initial_lr = args.lr
global_count = 0
timer = Timer()
writer = SummaryWriter(logdir=args.save_path)
for epoch in range(init_epoch, args.max_epoch + 1):
# refine the step-size
if epoch in args.schedule:
initial_lr *= args.gamma
for param_group in optimizer.param_groups:
param_group['lr'] = initial_lr
model.train()
tl = Averager()
ta = Averager()
for i, batch in enumerate(train_loader, 1):
global_count = global_count + 1
if torch.cuda.is_available():
data, label = [_.cuda() for _ in batch]
label = label.type(torch.cuda.LongTensor)
else:
data, label = batch
label = label.type(torch.LongTensor)
logits = model(data)
loss = criterion(logits, label)
acc = count_acc(logits, label)
writer.add_scalar('data/loss', float(loss), global_count)
writer.add_scalar('data/acc', float(acc), global_count)
if (i-1) % 100 == 0:
print('epoch {}, train {}/{}, loss={:.4f} acc={:.4f}'.format(epoch, i, len(train_loader), loss.item(), acc))
tl.add(loss.item())
ta.add(acc)
optimizer.zero_grad()
loss.backward()
optimizer.step()
tl = tl.item()
ta = ta.item()
# do not do validation in first 500 epoches
if epoch > 100 or (epoch-1) % 5 == 0:
model.eval()
vl_dist = Averager()
va_dist = Averager()
vl_sim = Averager()
va_sim = Averager()
print('[Dist] best epoch {}, current best val acc={:.4f}'.format(trlog['max_acc_dist_epoch'], trlog['max_acc_dist']))
print('[Sim] best epoch {}, current best val acc={:.4f}'.format(trlog['max_acc_sim_epoch'], trlog['max_acc_sim']))
# test performance with Few-Shot
label = torch.arange(valset.num_class).repeat(args.query)
if torch.cuda.is_available():
label = label.type(torch.cuda.LongTensor)
else:
label = label.type(torch.LongTensor)
with torch.no_grad():
for i, batch in tqdm(enumerate(val_loader, 1)):
if torch.cuda.is_available():
data, _ = [_.cuda() for _ in batch]
else:
data, _ = batch
data_shot, data_query = data[:valset.num_class], data[valset.num_class:] # 16-way test
logits_dist, logits_sim = model.forward_proto(data_shot, data_query, valset.num_class)
loss_dist = F.cross_entropy(logits_dist, label)
acc_dist = count_acc(logits_dist, label)
loss_sim = F.cross_entropy(logits_sim, label)
acc_sim = count_acc(logits_sim, label)
vl_dist.add(loss_dist.item())
va_dist.add(acc_dist)
vl_sim.add(loss_sim.item())
va_sim.add(acc_sim)
vl_dist = vl_dist.item()
va_dist = va_dist.item()
vl_sim = vl_sim.item()
va_sim = va_sim.item()
writer.add_scalar('data/val_loss_dist', float(vl_dist), epoch)
writer.add_scalar('data/val_acc_dist', float(va_dist), epoch)
writer.add_scalar('data/val_loss_sim', float(vl_sim), epoch)
writer.add_scalar('data/val_acc_sim', float(va_sim), epoch)
print('epoch {}, val, loss_dist={:.4f} acc_dist={:.4f} loss_sim={:.4f} acc_sim={:.4f}'.format(epoch, vl_dist, va_dist, vl_sim, va_sim))
if va_dist > trlog['max_acc_dist']:
trlog['max_acc_dist'] = va_dist
trlog['max_acc_dist_epoch'] = epoch
save_model('max_acc_dist')
save_checkpoint(True)
if va_sim > trlog['max_acc_sim']:
trlog['max_acc_sim'] = va_sim
trlog['max_acc_sim_epoch'] = epoch
save_model('max_acc_sim')
save_checkpoint(True)
trlog['train_loss'].append(tl)
trlog['train_acc'].append(ta)
trlog['val_loss_dist'].append(vl_dist)
trlog['val_acc_dist'].append(va_dist)
trlog['val_loss_sim'].append(vl_sim)
trlog['val_acc_sim'].append(va_sim)
save_model('epoch-last')
print('ETA:{}/{}'.format(timer.measure(), timer.measure(epoch / args.max_epoch)))
writer.close()
import pdb
pdb.set_trace() | 9,931 | 42.946903 | 147 | py |
FEAT | FEAT-master/train_fsl.py | import numpy as np
import torch
from model.trainer.fsl_trainer import FSLTrainer
from model.utils import (
pprint, set_gpu,
get_command_line_parser,
postprocess_args,
)
# from ipdb import launch_ipdb_on_exception
if __name__ == '__main__':
parser = get_command_line_parser()
args = postprocess_args(parser.parse_args())
# with launch_ipdb_on_exception():
pprint(vars(args))
set_gpu(args.gpu)
trainer = FSLTrainer(args)
trainer.train()
trainer.evaluate_test()
trainer.final_record()
print(args.save_path)
| 561 | 20.615385 | 48 | py |
FEAT | FEAT-master/model/data_parallel.py | from torch.nn.parallel import DataParallel
import torch
from torch.nn.parallel._functions import Scatter
from torch.nn.parallel.parallel_apply import parallel_apply
def scatter(inputs, target_gpus, chunk_sizes, dim=0):
r"""
Slices tensors into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not tensors.
"""
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
try:
return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
except:
print('obj', obj.size())
print('dim', dim)
print('chunk_sizes', chunk_sizes)
quit()
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return list(map(list, zip(*map(scatter_map, obj))))
if isinstance(obj, dict) and len(obj) > 0:
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
# After scatter_map is called, a scatter_map cell will exist. This cell
# has a reference to the actual function scatter_map, which has references
# to a closure that has a reference to the scatter_map cell (because the
# fn is recursive). To avoid this reference cycle, we set the function to
# None, clearing the cell
try:
return scatter_map(inputs)
finally:
scatter_map = None
def scatter_kwargs(inputs, kwargs, target_gpus, chunk_sizes, dim=0):
r"""Scatter with support for kwargs dictionary"""
inputs = scatter(inputs, target_gpus, chunk_sizes, dim) if inputs else []
kwargs = scatter(kwargs, target_gpus, chunk_sizes, dim) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
class BalancedDataParallel(DataParallel):
def __init__(self, gpu0_bsz, *args, **kwargs):
self.gpu0_bsz = gpu0_bsz
super().__init__(*args, **kwargs)
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
if self.gpu0_bsz == 0:
device_ids = self.device_ids[1:]
else:
device_ids = self.device_ids
inputs, kwargs = self.scatter(inputs, kwargs, device_ids)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids)
if self.gpu0_bsz == 0:
replicas = replicas[1:]
outputs = self.parallel_apply(replicas, device_ids, inputs, kwargs)
return self.gather(outputs, self.output_device)
def parallel_apply(self, replicas, device_ids, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, device_ids)
def scatter(self, inputs, kwargs, device_ids):
bsz = inputs[0].size(self.dim)
num_dev = len(self.device_ids)
gpu0_bsz = self.gpu0_bsz
bsz_unit = (bsz - gpu0_bsz) // (num_dev - 1)
if gpu0_bsz < bsz_unit:
chunk_sizes = [gpu0_bsz] + [bsz_unit] * (num_dev - 1)
delta = bsz - sum(chunk_sizes)
for i in range(delta):
chunk_sizes[i + 1] += 1
if gpu0_bsz == 0:
chunk_sizes = chunk_sizes[1:]
else:
return super().scatter(inputs, kwargs, device_ids)
return scatter_kwargs(inputs, kwargs, device_ids, chunk_sizes, dim=self.dim)
| 3,764 | 40.373626 | 84 | py |
FEAT | FEAT-master/model/utils.py | import os
import shutil
import time
import pprint
import torch
import argparse
import numpy as np
def one_hot(indices, depth):
"""
Returns a one-hot tensor.
This is a PyTorch equivalent of Tensorflow's tf.one_hot.
Parameters:
indices: a (n_batch, m) Tensor or (m) Tensor.
depth: a scalar. Represents the depth of the one hot dimension.
Returns: a (n_batch, m, depth) Tensor or (m, depth) Tensor.
"""
encoded_indicies = torch.zeros(indices.size() + torch.Size([depth]))
if indices.is_cuda:
encoded_indicies = encoded_indicies.cuda()
index = indices.view(indices.size()+torch.Size([1]))
encoded_indicies = encoded_indicies.scatter_(1,index,1)
return encoded_indicies
def set_gpu(x):
os.environ['CUDA_VISIBLE_DEVICES'] = x
print('using gpu:', x)
def ensure_path(dir_path, scripts_to_save=None):
if os.path.exists(dir_path):
if input('{} exists, remove? ([y]/n)'.format(dir_path)) != 'n':
shutil.rmtree(dir_path)
os.mkdir(dir_path)
else:
os.mkdir(dir_path)
print('Experiment dir : {}'.format(dir_path))
if scripts_to_save is not None:
script_path = os.path.join(dir_path, 'scripts')
if not os.path.exists(script_path):
os.makedirs(script_path)
for src_file in scripts_to_save:
dst_file = os.path.join(dir_path, 'scripts', os.path.basename(src_file))
print('copy {} to {}'.format(src_file, dst_file))
if os.path.isdir(src_file):
shutil.copytree(src_file, dst_file)
else:
shutil.copyfile(src_file, dst_file)
class Averager():
def __init__(self):
self.n = 0
self.v = 0
def add(self, x):
self.v = (self.v * self.n + x) / (self.n + 1)
self.n += 1
def item(self):
return self.v
def count_acc(logits, label):
pred = torch.argmax(logits, dim=1)
if torch.cuda.is_available():
return (pred == label).type(torch.cuda.FloatTensor).mean().item()
else:
return (pred == label).type(torch.FloatTensor).mean().item()
def euclidean_metric(a, b):
n = a.shape[0]
m = b.shape[0]
a = a.unsqueeze(1).expand(n, m, -1)
b = b.unsqueeze(0).expand(n, m, -1)
logits = -((a - b)**2).sum(dim=2)
return logits
class Timer():
def __init__(self):
self.o = time.time()
def measure(self, p=1):
x = (time.time() - self.o) / p
x = int(x)
if x >= 3600:
return '{:.1f}h'.format(x / 3600)
if x >= 60:
return '{}m'.format(round(x / 60))
return '{}s'.format(x)
_utils_pp = pprint.PrettyPrinter()
def pprint(x):
_utils_pp.pprint(x)
def compute_confidence_interval(data):
"""
Compute 95% confidence interval
:param data: An array of mean accuracy (or mAP) across a number of sampled episodes.
:return: the 95% confidence interval for this data.
"""
a = 1.0 * np.array(data)
m = np.mean(a)
std = np.std(a)
pm = 1.96 * (std / np.sqrt(len(a)))
return m, pm
def postprocess_args(args):
args.num_classes = args.way
save_path1 = '-'.join([args.dataset, args.model_class, args.backbone_class, '{:02d}w{:02d}s{:02}q'.format(args.way, args.shot, args.query)])
save_path2 = '_'.join([str('_'.join(args.step_size.split(','))), str(args.gamma),
'lr{:.2g}mul{:.2g}'.format(args.lr, args.lr_mul),
str(args.lr_scheduler),
'T1{}T2{}'.format(args.temperature, args.temperature2),
'b{}'.format(args.balance),
'bsz{:03d}'.format( max(args.way, args.num_classes)*(args.shot+args.query) ),
# str(time.strftime('%Y%m%d_%H%M%S'))
])
if args.init_weights is not None:
save_path1 += '-Pre'
if args.use_euclidean:
save_path1 += '-DIS'
else:
save_path1 += '-SIM'
if args.fix_BN:
save_path2 += '-FBN'
if not args.augment:
save_path2 += '-NoAug'
if not os.path.exists(os.path.join(args.save_dir, save_path1)):
os.mkdir(os.path.join(args.save_dir, save_path1))
args.save_path = os.path.join(args.save_dir, save_path1, save_path2)
return args
def get_command_line_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--max_epoch', type=int, default=200)
parser.add_argument('--episodes_per_epoch', type=int, default=100)
parser.add_argument('--num_eval_episodes', type=int, default=600)
parser.add_argument('--model_class', type=str, default='FEAT',
choices=['MatchNet', 'ProtoNet', 'BILSTM', 'DeepSet', 'GCN', 'FEAT', 'FEATSTAR', 'SemiFEAT', 'SemiProtoFEAT']) # None for MatchNet or ProtoNet
parser.add_argument('--use_euclidean', action='store_true', default=False)
parser.add_argument('--backbone_class', type=str, default='ConvNet',
choices=['ConvNet', 'Res12', 'Res18', 'WRN'])
parser.add_argument('--dataset', type=str, default='MiniImageNet',
choices=['MiniImageNet', 'TieredImageNet', 'CUB'])
parser.add_argument('--way', type=int, default=5)
parser.add_argument('--eval_way', type=int, default=5)
parser.add_argument('--shot', type=int, default=1)
parser.add_argument('--eval_shot', type=int, default=1)
parser.add_argument('--query', type=int, default=15)
parser.add_argument('--eval_query', type=int, default=15)
parser.add_argument('--balance', type=float, default=0)
parser.add_argument('--temperature', type=float, default=1)
parser.add_argument('--temperature2', type=float, default=1) # the temperature in the
# optimization parameters
parser.add_argument('--orig_imsize', type=int, default=-1) # -1 for no cache, and -2 for no resize, only for MiniImageNet and CUB
parser.add_argument('--lr', type=float, default=0.0001)
parser.add_argument('--lr_mul', type=float, default=10)
parser.add_argument('--lr_scheduler', type=str, default='step', choices=['multistep', 'step', 'cosine'])
parser.add_argument('--step_size', type=str, default='20')
parser.add_argument('--gamma', type=float, default=0.2)
parser.add_argument('--fix_BN', action='store_true', default=False) # means we do not update the running mean/var in BN, not to freeze BN
parser.add_argument('--augment', action='store_true', default=False)
parser.add_argument('--multi_gpu', action='store_true', default=False)
parser.add_argument('--gpu', default='0')
parser.add_argument('--init_weights', type=str, default=None)
# usually untouched parameters
parser.add_argument('--mom', type=float, default=0.9)
parser.add_argument('--weight_decay', type=float, default=0.0005) # we find this weight decay value works the best
parser.add_argument('--num_workers', type=int, default=4)
parser.add_argument('--log_interval', type=int, default=50)
parser.add_argument('--eval_interval', type=int, default=1)
parser.add_argument('--save_dir', type=str, default='./checkpoints')
return parser
| 7,275 | 38.32973 | 166 | py |
FEAT | FEAT-master/model/logger.py | import json
import os.path as osp
import numpy as np
from collections import defaultdict, OrderedDict
from tensorboardX import SummaryWriter
class ConfigEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, type):
return {'$class': o.__module__ + "." + o.__name__}
elif isinstance(o, Enum):
return {
'$enum': o.__module__ + "." + o.__class__.__name__ + '.' + o.name
}
elif callable(o):
return {
'$function': o.__module__ + "." + o.__name__
}
return json.JSONEncoder.default(self, o)
class Logger(object):
def __init__(self, args, log_dir, **kwargs):
self.logger_path = osp.join(log_dir, 'scalars.json')
self.tb_logger = SummaryWriter(
logdir=osp.join(log_dir, 'tflogger'),
**kwargs,
)
self.log_config(vars(args))
self.scalars = defaultdict(OrderedDict)
def add_scalar(self, key, value, counter):
assert self.scalars[key].get(counter, None) is None, 'counter should be distinct'
self.scalars[key][counter] = value
self.tb_logger.add_scalar(key, value, counter)
def log_config(self, variant_data):
config_filepath = osp.join(osp.dirname(self.logger_path), 'configs.json')
with open(config_filepath, "w") as fd:
json.dump(variant_data, fd, indent=2, sort_keys=True, cls=ConfigEncoder)
def dump(self):
with open(self.logger_path, 'w') as fd:
json.dump(self.scalars, fd, indent=2) | 1,621 | 35.863636 | 89 | py |
FEAT | FEAT-master/model/__init__.py | 0 | 0 | 0 | py | |
FEAT | FEAT-master/model/trainer/base.py | import abc
import torch
import os.path as osp
from model.utils import (
ensure_path,
Averager, Timer, count_acc,
compute_confidence_interval,
)
from model.logger import Logger
class Trainer(object, metaclass=abc.ABCMeta):
def __init__(self, args):
self.args = args
# ensure_path(
# self.args.save_path,
# scripts_to_save=['model/models', 'model/networks', __file__],
# )
self.logger = Logger(args, osp.join(args.save_path))
self.train_step = 0
self.train_epoch = 0
self.max_steps = args.episodes_per_epoch * args.max_epoch
self.dt, self.ft = Averager(), Averager()
self.bt, self.ot = Averager(), Averager()
self.timer = Timer()
# train statistics
self.trlog = {}
self.trlog['max_acc'] = 0.0
self.trlog['max_acc_epoch'] = 0
self.trlog['max_acc_interval'] = 0.0
@abc.abstractmethod
def train(self):
pass
@abc.abstractmethod
def evaluate(self, data_loader):
pass
@abc.abstractmethod
def evaluate_test(self, data_loader):
pass
@abc.abstractmethod
def final_record(self):
pass
def try_evaluate(self, epoch):
args = self.args
if self.train_epoch % args.eval_interval == 0:
vl, va, vap = self.evaluate(self.val_loader)
self.logger.add_scalar('val_loss', float(vl), self.train_epoch)
self.logger.add_scalar('val_acc', float(va), self.train_epoch)
print('epoch {}, val, loss={:.4f} acc={:.4f}+{:.4f}'.format(epoch, vl, va, vap))
if va >= self.trlog['max_acc']:
self.trlog['max_acc'] = va
self.trlog['max_acc_interval'] = vap
self.trlog['max_acc_epoch'] = self.train_epoch
self.save_model('max_acc')
def try_logging(self, tl1, tl2, ta, tg=None):
args = self.args
if self.train_step % args.log_interval == 0:
print('epoch {}, train {:06g}/{:06g}, total loss={:.4f}, loss={:.4f} acc={:.4f}, lr={:.4g}'
.format(self.train_epoch,
self.train_step,
self.max_steps,
tl1.item(), tl2.item(), ta.item(),
self.optimizer.param_groups[0]['lr']))
self.logger.add_scalar('train_total_loss', tl1.item(), self.train_step)
self.logger.add_scalar('train_loss', tl2.item(), self.train_step)
self.logger.add_scalar('train_acc', ta.item(), self.train_step)
if tg is not None:
self.logger.add_scalar('grad_norm', tg.item(), self.train_step)
print('data_timer: {:.2f} sec, ' \
'forward_timer: {:.2f} sec,' \
'backward_timer: {:.2f} sec, ' \
'optim_timer: {:.2f} sec'.format(
self.dt.item(), self.ft.item(),
self.bt.item(), self.ot.item())
)
self.logger.dump()
def save_model(self, name):
torch.save(
dict(params=self.model.state_dict()),
osp.join(self.args.save_path, name + '.pth')
)
def __str__(self):
return "{}({})".format(
self.__class__.__name__,
self.model.__class__.__name__
)
| 3,407 | 33.77551 | 103 | py |
FEAT | FEAT-master/model/trainer/fsl_trainer.py | import time
import os.path as osp
import numpy as np
import torch
import torch.nn.functional as F
from model.trainer.base import Trainer
from model.trainer.helpers import (
get_dataloader, prepare_model, prepare_optimizer,
)
from model.utils import (
pprint, ensure_path,
Averager, Timer, count_acc, one_hot,
compute_confidence_interval,
)
from tensorboardX import SummaryWriter
from collections import deque
from tqdm import tqdm
class FSLTrainer(Trainer):
def __init__(self, args):
super().__init__(args)
self.train_loader, self.val_loader, self.test_loader = get_dataloader(args)
self.model, self.para_model = prepare_model(args)
self.optimizer, self.lr_scheduler = prepare_optimizer(self.model, args)
def prepare_label(self):
args = self.args
# prepare one-hot label
label = torch.arange(args.way, dtype=torch.int16).repeat(args.query)
label_aux = torch.arange(args.way, dtype=torch.int8).repeat(args.shot + args.query)
label = label.type(torch.LongTensor)
label_aux = label_aux.type(torch.LongTensor)
if torch.cuda.is_available():
label = label.cuda()
label_aux = label_aux.cuda()
return label, label_aux
def train(self):
args = self.args
self.model.train()
if self.args.fix_BN:
self.model.encoder.eval()
# start FSL training
label, label_aux = self.prepare_label()
for epoch in range(1, args.max_epoch + 1):
self.train_epoch += 1
self.model.train()
if self.args.fix_BN:
self.model.encoder.eval()
tl1 = Averager()
tl2 = Averager()
ta = Averager()
start_tm = time.time()
for batch in self.train_loader:
self.train_step += 1
if torch.cuda.is_available():
data, gt_label = [_.cuda() for _ in batch]
else:
data, gt_label = batch[0], batch[1]
data_tm = time.time()
self.dt.add(data_tm - start_tm)
# get saved centers
logits, reg_logits = self.para_model(data)
if reg_logits is not None:
loss = F.cross_entropy(logits, label)
total_loss = loss + args.balance * F.cross_entropy(reg_logits, label_aux)
else:
loss = F.cross_entropy(logits, label)
total_loss = F.cross_entropy(logits, label)
tl2.add(loss)
forward_tm = time.time()
self.ft.add(forward_tm - data_tm)
acc = count_acc(logits, label)
tl1.add(total_loss.item())
ta.add(acc)
self.optimizer.zero_grad()
total_loss.backward()
backward_tm = time.time()
self.bt.add(backward_tm - forward_tm)
self.optimizer.step()
optimizer_tm = time.time()
self.ot.add(optimizer_tm - backward_tm)
# refresh start_tm
start_tm = time.time()
self.lr_scheduler.step()
self.try_evaluate(epoch)
print('ETA:{}/{}'.format(
self.timer.measure(),
self.timer.measure(self.train_epoch / args.max_epoch))
)
torch.save(self.trlog, osp.join(args.save_path, 'trlog'))
self.save_model('epoch-last')
def evaluate(self, data_loader):
# restore model args
args = self.args
# evaluation mode
self.model.eval()
record = np.zeros((args.num_eval_episodes, 2)) # loss and acc
label = torch.arange(args.eval_way, dtype=torch.int16).repeat(args.eval_query)
label = label.type(torch.LongTensor)
if torch.cuda.is_available():
label = label.cuda()
print('best epoch {}, best val acc={:.4f} + {:.4f}'.format(
self.trlog['max_acc_epoch'],
self.trlog['max_acc'],
self.trlog['max_acc_interval']))
with torch.no_grad():
for i, batch in enumerate(data_loader, 1):
if torch.cuda.is_available():
data, _ = [_.cuda() for _ in batch]
else:
data = batch[0]
logits = self.model(data)
loss = F.cross_entropy(logits, label)
acc = count_acc(logits, label)
record[i-1, 0] = loss.item()
record[i-1, 1] = acc
assert(i == record.shape[0])
vl, _ = compute_confidence_interval(record[:,0])
va, vap = compute_confidence_interval(record[:,1])
# train mode
self.model.train()
if self.args.fix_BN:
self.model.encoder.eval()
return vl, va, vap
def evaluate_test(self):
# restore model args
args = self.args
# evaluation mode
self.model.load_state_dict(torch.load(osp.join(self.args.save_path, 'max_acc.pth'))['params'])
self.model.eval()
record = np.zeros((10000, 2)) # loss and acc
label = torch.arange(args.eval_way, dtype=torch.int16).repeat(args.eval_query)
label = label.type(torch.LongTensor)
if torch.cuda.is_available():
label = label.cuda()
print('best epoch {}, best val acc={:.4f} + {:.4f}'.format(
self.trlog['max_acc_epoch'],
self.trlog['max_acc'],
self.trlog['max_acc_interval']))
with torch.no_grad():
for i, batch in tqdm(enumerate(self.test_loader, 1)):
if torch.cuda.is_available():
data, _ = [_.cuda() for _ in batch]
else:
data = batch[0]
logits = self.model(data)
loss = F.cross_entropy(logits, label)
acc = count_acc(logits, label)
record[i-1, 0] = loss.item()
record[i-1, 1] = acc
assert(i == record.shape[0])
vl, _ = compute_confidence_interval(record[:,0])
va, vap = compute_confidence_interval(record[:,1])
self.trlog['test_acc'] = va
self.trlog['test_acc_interval'] = vap
self.trlog['test_loss'] = vl
print('best epoch {}, best val acc={:.4f} + {:.4f}\n'.format(
self.trlog['max_acc_epoch'],
self.trlog['max_acc'],
self.trlog['max_acc_interval']))
print('Test acc={:.4f} + {:.4f}\n'.format(
self.trlog['test_acc'],
self.trlog['test_acc_interval']))
return vl, va, vap
def final_record(self):
# save the best performance in a txt file
with open(osp.join(self.args.save_path, '{}+{}'.format(self.trlog['test_acc'], self.trlog['test_acc_interval'])), 'w') as f:
f.write('best epoch {}, best val acc={:.4f} + {:.4f}\n'.format(
self.trlog['max_acc_epoch'],
self.trlog['max_acc'],
self.trlog['max_acc_interval']))
f.write('Test acc={:.4f} + {:.4f}\n'.format(
self.trlog['test_acc'],
self.trlog['test_acc_interval'])) | 7,495 | 35.038462 | 132 | py |
FEAT | FEAT-master/model/trainer/__init__.py | 0 | 0 | 0 | py | |
FEAT | FEAT-master/model/trainer/helpers.py | import torch
import torch.nn as nn
import numpy as np
import torch.optim as optim
from torch.utils.data import DataLoader
from model.dataloader.samplers import CategoriesSampler, RandomSampler, ClassSampler
from model.models.protonet import ProtoNet
from model.models.matchnet import MatchNet
from model.models.feat import FEAT
from model.models.featstar import FEATSTAR
from model.models.deepset import DeepSet
from model.models.bilstm import BILSTM
from model.models.graphnet import GCN
from model.models.semi_feat import SemiFEAT
from model.models.semi_protofeat import SemiProtoFEAT
class MultiGPUDataloader:
def __init__(self, dataloader, num_device):
self.dataloader = dataloader
self.num_device = num_device
def __len__(self):
return len(self.dataloader) // self.num_device
def __iter__(self):
data_iter = iter(self.dataloader)
done = False
while not done:
try:
output_batch = ([], [])
for _ in range(self.num_device):
batch = next(data_iter)
for i, v in enumerate(batch):
output_batch[i].append(v[None])
yield ( torch.cat(_, dim=0) for _ in output_batch )
except StopIteration:
done = True
return
def get_dataloader(args):
if args.dataset == 'MiniImageNet':
# Handle MiniImageNet
from model.dataloader.mini_imagenet import MiniImageNet as Dataset
elif args.dataset == 'CUB':
from model.dataloader.cub import CUB as Dataset
elif args.dataset == 'TieredImageNet':
from model.dataloader.tiered_imagenet import tieredImageNet as Dataset
else:
raise ValueError('Non-supported Dataset.')
num_device = torch.cuda.device_count()
num_episodes = args.episodes_per_epoch*num_device if args.multi_gpu else args.episodes_per_epoch
num_workers=args.num_workers*num_device if args.multi_gpu else args.num_workers
trainset = Dataset('train', args, augment=args.augment)
args.num_class = trainset.num_class
train_sampler = CategoriesSampler(trainset.label,
num_episodes,
max(args.way, args.num_classes),
args.shot + args.query)
train_loader = DataLoader(dataset=trainset,
num_workers=num_workers,
batch_sampler=train_sampler,
pin_memory=True)
#if args.multi_gpu and num_device > 1:
#train_loader = MultiGPUDataloader(train_loader, num_device)
#args.way = args.way * num_device
valset = Dataset('val', args)
val_sampler = CategoriesSampler(valset.label,
args.num_eval_episodes,
args.eval_way, args.eval_shot + args.eval_query)
val_loader = DataLoader(dataset=valset,
batch_sampler=val_sampler,
num_workers=args.num_workers,
pin_memory=True)
testset = Dataset('test', args)
test_sampler = CategoriesSampler(testset.label,
10000, # args.num_eval_episodes,
args.eval_way, args.eval_shot + args.eval_query)
test_loader = DataLoader(dataset=testset,
batch_sampler=test_sampler,
num_workers=args.num_workers,
pin_memory=True)
return train_loader, val_loader, test_loader
def prepare_model(args):
model = eval(args.model_class)(args)
# load pre-trained model (no FC weights)
if args.init_weights is not None:
model_dict = model.state_dict()
pretrained_dict = torch.load(args.init_weights)['params']
if args.backbone_class == 'ConvNet':
pretrained_dict = {'encoder.'+k: v for k, v in pretrained_dict.items()}
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
print(pretrained_dict.keys())
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
if torch.cuda.is_available():
torch.backends.cudnn.benchmark = True
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
if args.multi_gpu:
model.encoder = nn.DataParallel(model.encoder, dim=0)
para_model = model.to(device)
else:
para_model = model.to(device)
return model, para_model
def prepare_optimizer(model, args):
top_para = [v for k,v in model.named_parameters() if 'encoder' not in k]
# as in the literature, we use ADAM for ConvNet and SGD for other backbones
if args.backbone_class == 'ConvNet':
optimizer = optim.Adam(
[{'params': model.encoder.parameters()},
{'params': top_para, 'lr': args.lr * args.lr_mul}],
lr=args.lr,
# weight_decay=args.weight_decay, do not use weight_decay here
)
else:
optimizer = optim.SGD(
[{'params': model.encoder.parameters()},
{'params': top_para, 'lr': args.lr * args.lr_mul}],
lr=args.lr,
momentum=args.mom,
nesterov=True,
weight_decay=args.weight_decay
)
if args.lr_scheduler == 'step':
lr_scheduler = optim.lr_scheduler.StepLR(
optimizer,
step_size=int(args.step_size),
gamma=args.gamma
)
elif args.lr_scheduler == 'multistep':
lr_scheduler = optim.lr_scheduler.MultiStepLR(
optimizer,
milestones=[int(_) for _ in args.step_size.split(',')],
gamma=args.gamma,
)
elif args.lr_scheduler == 'cosine':
lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(
optimizer,
args.max_epoch,
eta_min=0 # a tuning parameter
)
else:
raise ValueError('No Such Scheduler')
return optimizer, lr_scheduler
| 6,374 | 38.351852 | 100 | py |
FEAT | FEAT-master/model/networks/dropblock.py | import torch
import torch.nn.functional as F
from torch import nn
from torch.distributions import Bernoulli
class DropBlock(nn.Module):
def __init__(self, block_size):
super(DropBlock, self).__init__()
self.block_size = block_size
def forward(self, x, gamma):
# shape: (bsize, channels, height, width)
if self.training:
batch_size, channels, height, width = x.shape
bernoulli = Bernoulli(gamma)
mask = bernoulli.sample((batch_size, channels, height - (self.block_size - 1), width - (self.block_size - 1)))
if torch.cuda.is_available():
mask = mask.cuda()
block_mask = self._compute_block_mask(mask)
countM = block_mask.size()[0] * block_mask.size()[1] * block_mask.size()[2] * block_mask.size()[3]
count_ones = block_mask.sum()
return block_mask * x * (countM / count_ones)
else:
return x
def _compute_block_mask(self, mask):
left_padding = int((self.block_size-1) / 2)
right_padding = int(self.block_size / 2)
batch_size, channels, height, width = mask.shape
non_zero_idxs = mask.nonzero()
nr_blocks = non_zero_idxs.shape[0]
offsets = torch.stack(
[
torch.arange(self.block_size).view(-1, 1).expand(self.block_size, self.block_size).reshape(-1), # - left_padding,
torch.arange(self.block_size).repeat(self.block_size), #- left_padding
]
).t()
offsets = torch.cat((torch.zeros(self.block_size**2, 2).long(), offsets.long()), 1)
if torch.cuda.is_available():
offsets = offsets.cuda()
if nr_blocks > 0:
non_zero_idxs = non_zero_idxs.repeat(self.block_size ** 2, 1)
offsets = offsets.repeat(nr_blocks, 1).view(-1, 4)
offsets = offsets.long()
block_idxs = non_zero_idxs + offsets
#block_idxs += left_padding
padded_mask = F.pad(mask, (left_padding, right_padding, left_padding, right_padding))
padded_mask[block_idxs[:, 0], block_idxs[:, 1], block_idxs[:, 2], block_idxs[:, 3]] = 1.
else:
padded_mask = F.pad(mask, (left_padding, right_padding, left_padding, right_padding))
block_mask = 1 - padded_mask#[:height, :width]
return block_mask
| 2,392 | 37.596774 | 129 | py |
FEAT | FEAT-master/model/networks/convnet.py | import torch.nn as nn
# Basic ConvNet with Pooling layer
def conv_block(in_channels, out_channels):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.MaxPool2d(2)
)
class ConvNet(nn.Module):
def __init__(self, x_dim=3, hid_dim=64, z_dim=64):
super().__init__()
self.encoder = nn.Sequential(
conv_block(x_dim, hid_dim),
conv_block(hid_dim, hid_dim),
conv_block(hid_dim, hid_dim),
conv_block(hid_dim, z_dim),
)
def forward(self, x):
x = self.encoder(x)
x = nn.MaxPool2d(5)(x)
x = x.view(x.size(0), -1)
return x
| 735 | 23.533333 | 59 | py |
FEAT | FEAT-master/model/networks/res12.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from model.networks.dropblock import DropBlock
# This ResNet network was designed following the practice of the following papers:
# TADAM: Task dependent adaptive metric for improved few-shot learning (Oreshkin et al., in NIPS 2018) and
# A Simple Neural Attentive Meta-Learner (Mishra et al., in ICLR 2018).
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, drop_rate=0.0, drop_block=False, block_size=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.LeakyReLU(0.1)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv3x3(planes, planes)
self.bn3 = nn.BatchNorm2d(planes)
self.maxpool = nn.MaxPool2d(stride)
self.downsample = downsample
self.stride = stride
self.drop_rate = drop_rate
self.num_batches_tracked = 0
self.drop_block = drop_block
self.block_size = block_size
self.DropBlock = DropBlock(block_size=self.block_size)
def forward(self, x):
self.num_batches_tracked += 1
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
out = self.maxpool(out)
if self.drop_rate > 0:
if self.drop_block == True:
feat_size = out.size()[2]
keep_rate = max(1.0 - self.drop_rate / (20*2000) * (self.num_batches_tracked), 1.0 - self.drop_rate)
gamma = (1 - keep_rate) / self.block_size**2 * feat_size**2 / (feat_size - self.block_size + 1)**2
out = self.DropBlock(out, gamma=gamma)
else:
out = F.dropout(out, p=self.drop_rate, training=self.training, inplace=True)
return out
class ResNet(nn.Module):
def __init__(self, block=BasicBlock, keep_prob=1.0, avg_pool=True, drop_rate=0.1, dropblock_size=5):
self.inplanes = 3
super(ResNet, self).__init__()
self.layer1 = self._make_layer(block, 64, stride=2, drop_rate=drop_rate)
self.layer2 = self._make_layer(block, 160, stride=2, drop_rate=drop_rate)
self.layer3 = self._make_layer(block, 320, stride=2, drop_rate=drop_rate, drop_block=True, block_size=dropblock_size)
self.layer4 = self._make_layer(block, 640, stride=2, drop_rate=drop_rate, drop_block=True, block_size=dropblock_size)
if avg_pool:
self.avgpool = nn.AvgPool2d(5, stride=1)
self.keep_prob = keep_prob
self.keep_avg_pool = avg_pool
self.dropout = nn.Dropout(p=1 - self.keep_prob, inplace=False)
self.drop_rate = drop_rate
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, stride=1, drop_rate=0.0, drop_block=False, block_size=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, drop_rate, drop_block, block_size))
self.inplanes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.keep_avg_pool:
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
def Res12(keep_prob=1.0, avg_pool=False, **kwargs):
"""Constructs a ResNet-12 model.
"""
model = ResNet(BasicBlock, keep_prob=keep_prob, avg_pool=avg_pool, **kwargs)
return model
| 4,705 | 36.349206 | 125 | py |
FEAT | FEAT-master/model/networks/WRN28.py | import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.autograd import Variable
import sys
import numpy as np
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
def conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_uniform(m.weight, gain=np.sqrt(2))
init.constant(m.bias, 0)
elif classname.find('BatchNorm') != -1:
init.constant(m.weight, 1)
init.constant(m.bias, 0)
class wide_basic(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, stride=1):
super(wide_basic, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True)
self.dropout = nn.Dropout(p=dropout_rate)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True),
)
def forward(self, x):
out = self.dropout(self.conv1(F.relu(self.bn1(x))))
out = self.conv2(F.relu(self.bn2(out)))
out += self.shortcut(x)
return out
class Wide_ResNet(nn.Module):
def __init__(self, depth, widen_factor, dropout_rate):
super(Wide_ResNet, self).__init__()
self.in_planes = 16
assert ((depth-4)%6 ==0), 'Wide-resnet depth should be 6n+4'
n = int((depth-4)/6)
k = widen_factor
print('| Wide-Resnet %dx%d' %(depth, k))
nStages = [16, 16*k, 32*k, 64*k]
self.conv1 = conv3x3(3,nStages[0])
self.layer1 = self._wide_layer(wide_basic, nStages[1], n, dropout_rate, stride=1)
self.layer2 = self._wide_layer(wide_basic, nStages[2], n, dropout_rate, stride=2)
self.layer3 = self._wide_layer(wide_basic, nStages[3], n, dropout_rate, stride=2)
self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9)
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, dropout_rate, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
out = F.avg_pool2d(out, 21)
out = out.view(out.size(0), -1)
return out | 2,858 | 34.296296 | 98 | py |
FEAT | FEAT-master/model/networks/res18.py | import torch.nn as nn
__all__ = ['resnet10', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block=BasicBlock, layers=[2, 2, 2, 2], zero_init_residual=False):
super(ResNet, self).__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
def resnet10(**kwargs):
"""Constructs a ResNet-10 model.
"""
model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)
return model
def resnet18(**kwargs):
"""Constructs a ResNet-18 model.
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def resnet34(**kwargs):
"""Constructs a ResNet-34 model.
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
def resnet50(**kwargs):
"""Constructs a ResNet-50 model.
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def resnet101(**kwargs):
"""Constructs a ResNet-101 model.
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
def resnet152(**kwargs):
"""Constructs a ResNet-152 model.
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
return model | 5,632 | 28.492147 | 106 | py |
FEAT | FEAT-master/model/models/base.py | import torch
import torch.nn as nn
import numpy as np
class FewShotModel(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
if args.backbone_class == 'ConvNet':
from model.networks.convnet import ConvNet
self.encoder = ConvNet()
elif args.backbone_class == 'Res12':
hdim = 640
from model.networks.res12 import ResNet
self.encoder = ResNet()
elif args.backbone_class == 'Res18':
hdim = 512
from model.networks.res18 import ResNet
self.encoder = ResNet()
elif args.backbone_class == 'WRN':
hdim = 640
from model.networks.WRN28 import Wide_ResNet
self.encoder = Wide_ResNet(28, 10, 0.5) # we set the dropout=0.5 directly here, it may achieve better results by tunning the dropout
else:
raise ValueError('')
def split_instances(self, data):
args = self.args
if self.training:
return (torch.Tensor(np.arange(args.way*args.shot)).long().view(1, args.shot, args.way),
torch.Tensor(np.arange(args.way*args.shot, args.way * (args.shot + args.query))).long().view(1, args.query, args.way))
else:
return (torch.Tensor(np.arange(args.eval_way*args.eval_shot)).long().view(1, args.eval_shot, args.eval_way),
torch.Tensor(np.arange(args.eval_way*args.eval_shot, args.eval_way * (args.eval_shot + args.eval_query))).long().view(1, args.eval_query, args.eval_way))
def forward(self, x, get_feature=False):
if get_feature:
# get feature with the provided embeddings
return self.encoder(x)
else:
# feature extraction
x = x.squeeze(0)
instance_embs = self.encoder(x)
num_inst = instance_embs.shape[0]
# split support query set for few-shot data
support_idx, query_idx = self.split_instances(x)
if self.training:
logits, logits_reg = self._forward(instance_embs, support_idx, query_idx)
return logits, logits_reg
else:
logits = self._forward(instance_embs, support_idx, query_idx)
return logits
def _forward(self, x, support_idx, query_idx):
raise NotImplementedError('Suppose to be implemented by subclass') | 2,434 | 43.272727 | 174 | py |
FEAT | FEAT-master/model/models/graphnet.py | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from model.models import FewShotModel
import math
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
from itertools import permutations
import scipy.sparse as sp
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
if torch.cuda.is_available():
return torch.sparse.FloatTensor(indices, values, shape).cuda()
else:
return torch.sparse.FloatTensor(indices, values, shape)
class GraphFunc(nn.Module):
def __init__(self, z_dim):
super(GraphFunc, self).__init__()
"""
DeepSets Function
"""
self.gc1 = GraphConvolution(z_dim, z_dim * 4)
self.gc2 = GraphConvolution(z_dim * 4, z_dim)
self.z_dim = z_dim
def forward(self, graph_input_raw, graph_label):
"""
set_input, seq_length, set_size, dim
"""
set_length, set_size, dim = graph_input_raw.shape
assert(dim == self.z_dim)
set_output_list = []
for g_index in range(set_length):
graph_input = graph_input_raw[g_index, :]
# construct the adj matrix
unique_class = np.unique(graph_label)
edge_set = []
for c in unique_class:
current_index = np.where(graph_label == c)[0].tolist()
if len(current_index) > 1:
edge_set.append(np.array(list(permutations(current_index, 2))))
if len(edge_set) == 0:
adj = sp.coo_matrix((np.array([0]), (np.array([0]), np.array([0]))),
shape=(graph_label.shape[0], graph_label.shape[0]),
dtype=np.float32)
else:
edge_set = np.concatenate(edge_set, 0)
adj = sp.coo_matrix((np.ones(edge_set.shape[0]), (edge_set[:, 0], edge_set[:, 1])),
shape=(graph_label.shape[0], graph_label.shape[0]),
dtype=np.float32)
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
adj = normalize(adj + sp.eye(adj.shape[0]))
adj = sparse_mx_to_torch_sparse_tensor(adj)
# do GCN process
residual = graph_input
graph_input = F.relu(self.gc1(graph_input, adj))
graph_input = F.dropout(graph_input, 0.5, training=self.training)
graph_input = self.gc2(graph_input, adj)
set_output = residual + graph_input
set_output_list.append(set_output)
return torch.stack(set_output_list)
class GCN(FewShotModel):
def __init__(self, args):
super().__init__(args)
if args.backbone_class == 'ConvNet':
hdim = 64
elif args.backbone_class == 'Res12':
hdim = 640
elif args.backbone_class == 'Res18':
hdim = 512
elif args.backbone_class == 'WRN':
hdim = 640
else:
raise ValueError('')
self.graph_func = GraphFunc(hdim)
def _forward(self, instance_embs, support_idx, query_idx):
emb_dim = instance_embs.size(-1)
# organize support/query data
support = instance_embs[support_idx.contiguous().view(-1)].contiguous().view(*(support_idx.shape + (-1,)))
query = instance_embs[query_idx.contiguous().view(-1)].contiguous().view( *(query_idx.shape + (-1,)))
# get mean of the support
proto = support.mean(dim=1) # Ntask x NK x d
num_batch = proto.shape[0]
num_proto = proto.shape[1]
num_query = np.prod(query_idx.shape[-2:])
# query: (num_batch, num_query, num_proto, num_emb)
# proto: (num_batch, num_proto, num_emb)
if self.training:
graph_label = torch.arange(self.args.way).long()
else:
graph_label = torch.arange(self.args.eval_way).long()
proto = self.graph_func(proto, graph_label)
if self.args.use_euclidean:
query = query.view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
proto = proto.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
proto = proto.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
logits = - torch.sum((proto - query) ** 2, 2) / self.args.temperature
else:
proto = F.normalize(proto, dim=-1) # normalize for cosine distance
query = query.view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
logits = torch.bmm(query, proto.permute([0,2,1])) / self.args.temperature
logits = logits.view(-1, num_proto)
# do not use contrastive regularization for GCN (since there are only one sinlge instances class in each auxiliary task)
if self.training:
return logits, None
else:
return logits
| 6,810 | 37.480226 | 128 | py |
FEAT | FEAT-master/model/models/deepset.py | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from model.models import FewShotModel
class DeepSetsFunc(nn.Module):
def __init__(self, z_dim):
super(DeepSetsFunc, self).__init__()
"""
DeepSets Function
"""
self.gen1 = nn.Linear(z_dim, z_dim * 4)
self.gen2 = nn.Linear(z_dim*4, z_dim)
self.gen3 = nn.Linear(z_dim * 2, z_dim * 4)
self.gen4 = nn.Linear(z_dim*4, z_dim)
self.z_dim = z_dim
def forward(self, set_input):
"""
set_input, seq_length, set_size, dim
"""
set_length, set_size, dim = set_input.shape
assert(dim == self.z_dim)
mask_one = torch.ones(set_size, set_size) - torch.eye(set_size, set_size)
mask_one = mask_one.view(1, set_size, set_size, 1)
if torch.cuda.is_available():
mask_one = mask_one.cuda()
combined_mean = torch.mul(set_input.unsqueeze(2), mask_one).max(1)[0] # 75 x 6 x 64, we can also try max here
# do a bilinear transformation
combined_mean = F.relu(self.gen1(combined_mean.view(-1, self.z_dim)))
combined_mean = self.gen2(combined_mean)
combined_mean_cat = torch.cat([set_input.contiguous().view(-1, self.z_dim), combined_mean], 1)
# do linear transformation
combined_mean_cat = F.relu(self.gen3(combined_mean_cat))
combined_mean_cat = self.gen4(combined_mean_cat)
combined_mean_cat = combined_mean_cat.view(-1, set_size, self.z_dim)
set_output = set_input + combined_mean_cat
return set_output
class DeepSet(FewShotModel):
def __init__(self, args):
super().__init__(args)
if args.backbone_class == 'ConvNet':
hdim = 64
elif args.backbone_class == 'Res12':
hdim = 640
elif args.backbone_class == 'Res18':
hdim = 512
elif args.backbone_class == 'WRN':
hdim = 640
else:
raise ValueError('')
self.set_func = DeepSetsFunc(hdim)
def _forward(self, instance_embs, support_idx, query_idx):
emb_dim = instance_embs.size(-1)
# organize support/query data
support = instance_embs[support_idx.contiguous().view(-1)].contiguous().view(*(support_idx.shape + (-1,)))
query = instance_embs[query_idx.contiguous().view(-1)].contiguous().view( *(query_idx.shape + (-1,)))
# get mean of the support
proto = support.mean(dim=1) # Ntask x NK x d
num_batch = proto.shape[0]
num_proto = proto.shape[1]
num_query = np.prod(query_idx.shape[-2:])
# query: (num_batch, num_query, num_proto, num_emb)
# proto: (num_batch, num_proto, num_emb)
proto = self.set_func(proto)
if self.args.use_euclidean:
query = query.view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
proto = proto.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
proto = proto.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
logits = - torch.sum((proto - query) ** 2, 2) / self.args.temperature
else:
proto = F.normalize(proto, dim=-1) # normalize for cosine distance
query = query.view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
logits = torch.bmm(query, proto.permute([0,2,1])) / self.args.temperature
logits = logits.view(-1, num_proto)
# for regularization
if self.training:
aux_task = torch.cat([support.view(1, self.args.shot, self.args.way, emb_dim),
query.view(1, self.args.query, self.args.way, emb_dim)], 1) # T x (K+Kq) x N x d
num_query = np.prod(aux_task.shape[1:3])
aux_task = aux_task.permute([0, 2, 1, 3])
aux_task = aux_task.contiguous().view(-1, self.args.shot + self.args.query, emb_dim)
# apply the transformation over the Aug Task
aux_emb = self.set_func(aux_task) # T x N x (K+Kq) x d
# compute class mean
aux_emb = aux_emb.view(num_batch, self.args.way, self.args.shot + self.args.query, emb_dim)
aux_center = torch.mean(aux_emb, 2) # T x N x d
if self.args.use_euclidean:
aux_task = aux_task.contiguous().view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
aux_center = aux_center.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
aux_center = aux_center.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
logits_reg = - torch.sum((aux_center - aux_task) ** 2, 2) / self.args.temperature2
else:
aux_center = F.normalize(aux_center, dim=-1) # normalize for cosine distance
aux_task = aux_task.contiguous().view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
logits_reg = torch.bmm(aux_task, aux_center.permute([0,2,1])) / self.args.temperature2
logits_reg = logits_reg.view(-1, num_proto)
return logits, logits_reg
else:
return logits
| 5,338 | 43.865546 | 117 | py |
FEAT | FEAT-master/model/models/semi_protofeat.py | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from model.models import FewShotModel
from model.utils import one_hot
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
log_attn = F.log_softmax(attn, 2)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn, log_attn
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
output, attn, log_attn = self.attention(q, k, v)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
return output
class SemiProtoFEAT(FewShotModel):
def __init__(self, args):
super().__init__(args)
if args.backbone_class == 'ConvNet':
hdim = 64
elif args.backbone_class == 'Res12':
hdim = 640
elif args.backbone_class == 'Res18':
hdim = 512
elif args.backbone_class == 'WRN':
hdim = 640
else:
raise ValueError('')
self.slf_attn = MultiHeadAttention(1, hdim, hdim, hdim, dropout=0.5)
def get_proto(self, x_shot, x_pool):
# get the prototypes based w/ an unlabeled pool set
num_batch, num_shot, num_way, emb_dim = x_shot.shape
num_pool_shot = x_pool.shape[1]
num_pool = num_pool_shot * num_way
label_support = torch.arange(num_way).repeat(num_shot).type(torch.LongTensor)
label_support_onehot = one_hot(label_support, num_way)
label_support_onehot = label_support_onehot.unsqueeze(0).repeat([num_batch, 1, 1])
if torch.cuda.is_available():
label_support_onehot = label_support_onehot.cuda()
proto_shot = x_shot.mean(dim = 1)
if self.args.use_euclidean:
dis = - torch.sum((proto_shot.unsqueeze(1).expand(num_batch, num_pool, num_way, emb_dim).contiguous().view(num_batch*num_pool, num_way, emb_dim) - x_pool.view(-1, emb_dim).unsqueeze(1)) ** 2, 2) / self.args.temperature
else:
dis = torch.bmm(x_pool.view(num_batch, -1, emb_dim), F.normalize(proto_shot, dim=-1).permute([0,2,1])) / self.args.temperature
dis = dis.view(num_batch, -1, num_way)
z_hat = F.softmax(dis, dim=2)
z = torch.cat([label_support_onehot, z_hat], dim = 1) # (num_batch, n_shot + n_pool, n_way)
h = torch.cat([x_shot.view(num_batch, -1, emb_dim), x_pool.view(num_batch, -1, emb_dim)], dim = 1) # (num_batch, n_shot + n_pool, n_embedding)
proto = torch.bmm(z.permute([0,2,1]), h)
sum_z = z.sum(dim = 1).view((num_batch, -1, 1))
proto = proto / sum_z
return proto
def _forward(self, instance_embs, support_idx, query_idx):
emb_dim = instance_embs.size(-1)
# organize support/query data
support = instance_embs[support_idx.contiguous().view(-1)].contiguous().view(*(support_idx.shape + (-1,)))
query = instance_embs[query_idx.contiguous().view(-1)].contiguous().view( *(query_idx.shape + (-1,)))
num_batch = support.shape[0]
num_shot, num_way = support.shape[1], support.shape[2]
num_query = np.prod(query_idx.shape[-2:])
# transformation
whole_set = torch.cat([support.view(num_batch, -1, emb_dim), query.view(num_batch, -1, emb_dim)], 1)
support = self.slf_attn(support.view(num_batch, -1, emb_dim), whole_set, whole_set).view(num_batch, num_shot, num_way, emb_dim)
# get mean of the support
proto = self.get_proto(support, query) # we can also use adapted query set here to achieve better results
# proto = support.mean(dim=1) # Ntask x NK x d
num_proto = proto.shape[1]
# query: (num_batch, num_query, num_proto, num_emb)
# proto: (num_batch, num_proto, num_emb)
if self.args.use_euclidean:
query = query.view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
proto = proto.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
proto = proto.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
logits = - torch.sum((proto - query) ** 2, 2) / self.args.temperature
else:
proto = F.normalize(proto, dim=-1) # normalize for cosine distance
query = query.view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
logits = torch.bmm(query, proto.permute([0,2,1])) / self.args.temperature
logits = logits.view(-1, num_proto)
# for regularization
if self.training:
aux_task = torch.cat([support.view(1, self.args.shot, self.args.way, emb_dim),
query.view(1, self.args.query, self.args.way, emb_dim)], 1) # T x (K+Kq) x N x d
num_query = np.prod(aux_task.shape[1:3])
aux_task = aux_task.permute([0, 2, 1, 3])
aux_task = aux_task.contiguous().view(-1, self.args.shot + self.args.query, emb_dim)
# apply the transformation over the Aug Task
aux_emb = self.slf_attn(aux_task, aux_task, aux_task) # T x N x (K+Kq) x d
# compute class mean
aux_emb = aux_emb.view(num_batch, self.args.way, self.args.shot + self.args.query, emb_dim)
aux_center = torch.mean(aux_emb, 2) # T x N x d
if self.args.use_euclidean:
aux_task = aux_task.permute([1,0,2]).contiguous().view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
aux_center = aux_center.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
aux_center = aux_center.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
logits_reg = - torch.sum((aux_center - aux_task) ** 2, 2) / self.args.temperature2
else:
aux_center = F.normalize(aux_center, dim=-1) # normalize for cosine distance
aux_task = aux_task.permute([1,0,2]).contiguous().view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
logits_reg = torch.bmm(aux_task, aux_center.permute([0,2,1])) / self.args.temperature2
logits_reg = logits_reg.view(-1, num_proto)
return logits, logits_reg
else:
return logits
| 8,560 | 45.781421 | 230 | py |
FEAT | FEAT-master/model/models/protonet.py | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from model.models import FewShotModel
# Note: As in Protonet, we use Euclidean Distances here, you can change to the Cosine Similarity by replace
# TRUE in line 30 as self.args.use_euclidean
class ProtoNet(FewShotModel):
def __init__(self, args):
super().__init__(args)
def _forward(self, instance_embs, support_idx, query_idx):
emb_dim = instance_embs.size(-1)
# organize support/query data
support = instance_embs[support_idx.flatten()].view(*(support_idx.shape + (-1,)))
query = instance_embs[query_idx.flatten()].view( *(query_idx.shape + (-1,)))
# get mean of the support
proto = support.mean(dim=1) # Ntask x NK x d
num_batch = proto.shape[0]
num_proto = proto.shape[1]
num_query = np.prod(query_idx.shape[-2:])
# query: (num_batch, num_query, num_proto, num_emb)
# proto: (num_batch, num_proto, num_emb)
if True: # self.args.use_euclidean:
query = query.view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
proto = proto.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim)
proto = proto.contiguous().view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
logits = - torch.sum((proto - query) ** 2, 2) / self.args.temperature
else: # cosine similarity: more memory efficient
proto = F.normalize(proto, dim=-1) # normalize for cosine distance
query = query.view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
# (num_batch, num_emb, num_proto) * (num_batch, num_query*num_proto, num_emb) -> (num_batch, num_query*num_proto, num_proto)
logits = torch.bmm(query, proto.permute([0,2,1])) / self.args.temperature
logits = logits.view(-1, num_proto)
if self.training:
return logits, None
else:
return logits
| 2,007 | 40.833333 | 137 | py |
FEAT | FEAT-master/model/models/bilstm.py | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
from model.models import FewShotModel
class BidirectionalLSTM(nn.Module):
def __init__(self, layer_sizes, vector_dim):
super(BidirectionalLSTM, self).__init__()
"""
Initializes a multi layer bidirectional LSTM
:param layer_sizes: A list containing the neuron numbers per layer
e.g. [100, 100, 100] returns a 3 layer, 100
:param batch_size: The experiments batch size
"""
self.hidden_size = layer_sizes[0]
self.vector_dim = vector_dim
self.num_layers = len(layer_sizes)
self.lstm = nn.LSTM(input_size=self.vector_dim,
num_layers=self.num_layers,
hidden_size=self.hidden_size,
bidirectional=True)
def forward(self, inputs, batch_size):
"""
Runs the bidirectional LSTM, produces outputs and saves both forward and backward states as well as gradients.
:param x: The inputs should be a list of shape [sequence_length, batch_size, 64]
:return: Returns the LSTM outputs, as well as the forward and backward hidden states.
"""
c0 = Variable(torch.rand(self.lstm.num_layers*2, batch_size, self.lstm.hidden_size),
requires_grad=False)
h0 = Variable(torch.rand(self.lstm.num_layers*2, batch_size, self.lstm.hidden_size),
requires_grad=False)
if torch.cuda.is_available():
c0 = c0.cuda()
h0 = h0.cuda()
output, (hn, cn) = self.lstm(inputs, (h0, c0))
# residual addition
output = output + inputs
return output # , hn, cn
class BILSTM(FewShotModel):
def __init__(self, args):
super().__init__(args)
if args.backbone_class == 'ConvNet':
hdim = 64
elif args.backbone_class == 'Res12':
hdim = 640
elif args.backbone_class == 'Res18':
hdim = 512
elif args.backbone_class == 'WRN':
hdim = 640
else:
raise ValueError('')
self.bilstm = BidirectionalLSTM(layer_sizes=[hdim // 2],
vector_dim = hdim)
def _forward(self, instance_embs, support_idx, query_idx):
emb_dim = instance_embs.size(-1)
# organize support/query data
support = instance_embs[support_idx.contiguous().view(-1)].contiguous().view(*(support_idx.shape + (-1,)))
query = instance_embs[query_idx.contiguous().view(-1)].contiguous().view( *(query_idx.shape + (-1,)))
# get mean of the support
proto = support.mean(dim=1) # Ntask x NK x d
num_batch = proto.shape[0]
num_proto = proto.shape[1]
num_query = np.prod(query_idx.shape[-2:])
# query: (num_batch, num_query, num_proto, num_emb)
# proto: (num_batch, num_proto, num_emb)
proto = self.bilstm(proto.permute([1, 0, 2]), num_batch)
proto = proto.permute([1, 0, 2])
if self.args.use_euclidean:
query = query.view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
proto = proto.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
proto = proto.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
logits = - torch.sum((proto - query) ** 2, 2) / self.args.temperature
else:
proto = F.normalize(proto, dim=-1) # normalize for cosine distance
query = query.view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
logits = torch.bmm(query, proto.permute([0,2,1])) / self.args.temperature
logits = logits.view(-1, num_proto)
# for regularization
if self.training:
aux_task = torch.cat([support.view(1, self.args.shot, self.args.way, emb_dim),
query.view(1, self.args.query, self.args.way, emb_dim)], 1) # T x (K+Kq) x N x d
num_query = np.prod(aux_task.shape[1:3])
aux_task = aux_task.permute([0, 2, 1, 3])
aux_task = aux_task.contiguous().view(-1, self.args.shot + self.args.query, emb_dim)
# apply the transformation over the Aug Task
aux_emb = self.bilstm(aux_task.permute([1, 0, 2]), num_batch * self.args.way) # T x N x (K+Kq) x d
aux_emb = aux_emb.permute([1, 0, 2])
# compute class mean
aux_emb = aux_emb.view(num_batch, self.args.way, self.args.shot + self.args.query, emb_dim)
aux_center = torch.mean(aux_emb, 2) # T x N x d
if self.args.use_euclidean:
aux_task = aux_task.contiguous().view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
aux_center = aux_center.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
aux_center = aux_center.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
logits_reg = - torch.sum((aux_center - aux_task) ** 2, 2) / self.args.temperature2
else:
aux_center = F.normalize(aux_center, dim=-1) # normalize for cosine distance
aux_task = aux_task.contiguous().view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
logits_reg = torch.bmm(aux_task, aux_center.permute([0,2,1])) / self.args.temperature2
logits_reg = logits_reg.view(-1, num_proto)
return logits, logits_reg
else:
return logits
| 5,746 | 45.723577 | 118 | py |
FEAT | FEAT-master/model/models/classifier.py | import torch
import torch.nn as nn
import numpy as np
from model.utils import euclidean_metric
import torch.nn.functional as F
class Classifier(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
if args.backbone_class == 'ConvNet':
from model.networks.convnet import ConvNet
hdim = 64
self.encoder = ConvNet()
elif args.backbone_class == 'Res12':
hdim = 640
from model.networks.res12 import ResNet
self.encoder = ResNet()
elif args.backbone_class == 'Res18':
hdim = 512
from model.networks.res18 import ResNet
self.encoder = ResNet()
elif args.backbone_class == 'WRN':
hdim = 640
from model.networks.WRN28 import Wide_ResNet
self.encoder = Wide_ResNet(28, 10, 0.5)
else:
raise ValueError('')
self.fc = nn.Linear(hdim, args.num_class)
def forward(self, data, is_emb = False):
out = self.encoder(data)
if not is_emb:
out = self.fc(out)
return out
def forward_proto(self, data_shot, data_query, way = None):
if way is None:
way = self.args.num_class
proto = self.encoder(data_shot)
proto = proto.reshape(self.args.shot, way, -1).mean(dim=0)
query = self.encoder(data_query)
logits_dist = euclidean_metric(query, proto)
logits_sim = torch.mm(query, F.normalize(proto, p=2, dim=-1).t())
return logits_dist, logits_sim | 1,617 | 32.708333 | 75 | py |
FEAT | FEAT-master/model/models/featstar.py | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from model.models import FewShotModel
# No-Reg for FEAT-STAR here
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
log_attn = F.log_softmax(attn, 2)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn, log_attn
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
output, attn, log_attn = self.attention(q, k, v)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
return output
class FEATSTAR(FewShotModel):
def __init__(self, args):
super().__init__(args)
if args.backbone_class == 'ConvNet':
hdim = 64
elif args.backbone_class == 'Res12':
hdim = 640
elif args.backbone_class == 'Res18':
hdim = 512
elif args.backbone_class == 'WRN':
hdim = 640
else:
raise ValueError('')
self.slf_attn = MultiHeadAttention(1, hdim, hdim, hdim, dropout=0.5)
def _forward(self, instance_embs, support_idx, query_idx):
emb_dim = instance_embs.size(-1)
# organize support/query data
support = instance_embs[support_idx.contiguous().view(-1)].contiguous().view(*(support_idx.shape + (-1,)))
query = instance_embs[query_idx.contiguous().view(-1)].contiguous().view( *(query_idx.shape + (-1,)))
# get mean of the support
proto = support.mean(dim=1) # Ntask x NK x d
num_batch = proto.shape[0]
num_proto = proto.shape[1]
num_query = np.prod(query_idx.shape[-2:])
# query: (num_batch, num_query, num_proto, num_emb)
# proto: (num_batch, num_proto, num_emb)
query = query.view(-1, emb_dim).unsqueeze(1)
proto = proto.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
proto = proto.view(num_batch*num_query, num_proto, emb_dim)
# refine by Transformer
combined = torch.cat([proto, query], 1) # Nk x (N + 1) x d, batch_size = NK
combined = self.slf_attn(combined, combined, combined)
# compute distance for all batches
proto, query = combined.split(num_proto, 1)
if self.args.use_euclidean:
query = query.view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
logits = - torch.sum((proto - query) ** 2, 2) / self.args.temperature
else: # cosine similarity: more memory efficient
proto = F.normalize(proto, dim=-1) # normalize for cosine distance
logits = torch.bmm(query, proto.permute([0,2,1])) / self.args.temperature
logits = logits.view(-1, num_proto)
return logits, None
| 4,959 | 37.153846 | 114 | py |
FEAT | FEAT-master/model/models/matchnet.py | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from model.models import FewShotModel
from model.utils import one_hot
# Note: This is the MatchingNet without FCE
# it predicts an instance based on nearest neighbor rule (not Nearest center mean)
class MatchNet(FewShotModel):
def __init__(self, args):
super().__init__(args)
def _forward(self, instance_embs, support_idx, query_idx):
emb_dim = instance_embs.size(-1)
# organize support/query data
support = instance_embs[support_idx.flatten()].view(*(support_idx.shape + (-1,)))
query = instance_embs[query_idx.flatten()].view( *(query_idx.shape + (-1,)))
if self.training:
label_support = torch.arange(self.args.way).repeat(self.args.shot).type(torch.LongTensor)
label_support_onehot = one_hot(label_support, self.args.way)
else:
label_support = torch.arange(self.args.eval_way).repeat(self.args.eval_shot).type(torch.LongTensor)
label_support_onehot = one_hot(label_support, self.args.eval_way)
if torch.cuda.is_available():
label_support_onehot = label_support_onehot.cuda() # KN x N
# get mean of the support
num_batch = support.shape[0]
num_way = support.shape[2]
num_support = np.prod(support.shape[1:3])
num_query = np.prod(query_idx.shape[-2:])
support = support.view(num_batch, num_support, emb_dim) # Ntask x NK x d
label_support_onehot = label_support_onehot.unsqueeze(0).repeat(num_batch, 1, 1)
# query: (num_batch, num_query, num_proto, num_emb)
# proto: (num_batch, num_proto, num_emb)
support = F.normalize(support, dim=-1) # normalize for cosine distance
query = query.view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
# (num_batch, num_emb, num_proto) * (num_batch, num_query*num_proto, num_emb) -> (num_batch, num_query*num_proto, num_proto)
logits = torch.bmm(query, support.permute([0,2,1]))
logits = torch.bmm(logits, label_support_onehot) / self.args.temperature # KqN x N
logits = logits.view(-1, num_way)
if self.training:
return logits, None
else:
return logits
| 2,299 | 40.818182 | 133 | py |
FEAT | FEAT-master/model/models/__init__.py | from model.models.base import FewShotModel
| 43 | 21 | 42 | py |
FEAT | FEAT-master/model/models/feat.py | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from model.models import FewShotModel
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
log_attn = F.log_softmax(attn, 2)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn, log_attn
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
output, attn, log_attn = self.attention(q, k, v)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
return output
class FEAT(FewShotModel):
def __init__(self, args):
super().__init__(args)
if args.backbone_class == 'ConvNet':
hdim = 64
elif args.backbone_class == 'Res12':
hdim = 640
elif args.backbone_class == 'Res18':
hdim = 512
elif args.backbone_class == 'WRN':
hdim = 640
else:
raise ValueError('')
self.slf_attn = MultiHeadAttention(1, hdim, hdim, hdim, dropout=0.5)
def _forward(self, instance_embs, support_idx, query_idx):
emb_dim = instance_embs.size(-1)
# organize support/query data
support = instance_embs[support_idx.contiguous().view(-1)].contiguous().view(*(support_idx.shape + (-1,)))
query = instance_embs[query_idx.contiguous().view(-1)].contiguous().view( *(query_idx.shape + (-1,)))
# get mean of the support
proto = support.mean(dim=1) # Ntask x NK x d
num_batch = proto.shape[0]
num_proto = proto.shape[1]
num_query = np.prod(query_idx.shape[-2:])
# query: (num_batch, num_query, num_proto, num_emb)
# proto: (num_batch, num_proto, num_emb)
proto = self.slf_attn(proto, proto, proto)
if self.args.use_euclidean:
query = query.view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
proto = proto.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
proto = proto.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
logits = - torch.sum((proto - query) ** 2, 2) / self.args.temperature
else:
proto = F.normalize(proto, dim=-1) # normalize for cosine distance
query = query.view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
logits = torch.bmm(query, proto.permute([0,2,1])) / self.args.temperature
logits = logits.view(-1, num_proto)
# for regularization
if self.training:
aux_task = torch.cat([support.view(1, self.args.shot, self.args.way, emb_dim),
query.view(1, self.args.query, self.args.way, emb_dim)], 1) # T x (K+Kq) x N x d
num_query = np.prod(aux_task.shape[1:3])
aux_task = aux_task.permute([0, 2, 1, 3])
aux_task = aux_task.contiguous().view(-1, self.args.shot + self.args.query, emb_dim)
# apply the transformation over the Aug Task
aux_emb = self.slf_attn(aux_task, aux_task, aux_task) # T x N x (K+Kq) x d
# compute class mean
aux_emb = aux_emb.view(num_batch, self.args.way, self.args.shot + self.args.query, emb_dim)
aux_center = torch.mean(aux_emb, 2) # T x N x d
if self.args.use_euclidean:
aux_task = aux_task.permute([1,0,2]).contiguous().view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
aux_center = aux_center.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
aux_center = aux_center.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
logits_reg = - torch.sum((aux_center - aux_task) ** 2, 2) / self.args.temperature2
else:
aux_center = F.normalize(aux_center, dim=-1) # normalize for cosine distance
aux_task = aux_task.permute([1,0,2]).contiguous().view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
logits_reg = torch.bmm(aux_task, aux_center.permute([0,2,1])) / self.args.temperature2
logits_reg = logits_reg.view(-1, num_proto)
return logits, logits_reg
else:
return logits
| 6,494 | 42.590604 | 119 | py |
FEAT | FEAT-master/model/models/semi_feat.py | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from model.models import FewShotModel
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
log_attn = F.log_softmax(attn, 2)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn, log_attn
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
output, attn, log_attn = self.attention(q, k, v)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
return output
class SemiFEAT(FewShotModel):
def __init__(self, args):
super().__init__(args)
if args.backbone_class == 'ConvNet':
hdim = 64
elif args.backbone_class == 'Res12':
hdim = 640
elif args.backbone_class == 'Res18':
hdim = 512
elif args.backbone_class == 'WRN':
hdim = 640
else:
raise ValueError('')
self.slf_attn = MultiHeadAttention(1, hdim, hdim, hdim, dropout=0.5)
def _forward(self, instance_embs, support_idx, query_idx):
emb_dim = instance_embs.size(-1)
# organize support/query data
support = instance_embs[support_idx.contiguous().view(-1)].contiguous().view(*(support_idx.shape + (-1,)))
query = instance_embs[query_idx.contiguous().view(-1)].contiguous().view( *(query_idx.shape + (-1,)))
# get mean of the support
proto = support.mean(dim=1) # Ntask x NK x d
num_batch = proto.shape[0]
num_proto = proto.shape[1]
num_query = np.prod(query_idx.shape[-2:])
# query: (num_batch, num_query, num_proto, num_emb)
# proto: (num_batch, num_proto, num_emb)
whole_set = torch.cat([proto, query.view(num_batch, -1, emb_dim)], 1)
proto = self.slf_attn(proto, whole_set, whole_set)
if self.args.use_euclidean:
query = query.view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
proto = proto.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
proto = proto.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
logits = - torch.sum((proto - query) ** 2, 2) / self.args.temperature
else:
proto = F.normalize(proto, dim=-1) # normalize for cosine distance
query = query.view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
logits = torch.bmm(query, proto.permute([0,2,1])) / self.args.temperature
logits = logits.view(-1, num_proto)
# for regularization
if self.training:
aux_task = torch.cat([support.view(1, self.args.shot, self.args.way, emb_dim),
query.view(1, self.args.query, self.args.way, emb_dim)], 1) # T x (K+Kq) x N x d
num_query = np.prod(aux_task.shape[1:3])
aux_task = aux_task.permute([0, 2, 1, 3])
aux_task = aux_task.contiguous().view(-1, self.args.shot + self.args.query, emb_dim)
# apply the transformation over the Aug Task
aux_emb = self.slf_attn(aux_task, aux_task, aux_task) # T x N x (K+Kq) x d
# compute class mean
aux_emb = aux_emb.view(num_batch, self.args.way, self.args.shot + self.args.query, emb_dim)
aux_center = torch.mean(aux_emb, 2) # T x N x d
if self.args.use_euclidean:
aux_task = aux_task.permute([1,0,2]).contiguous().view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
aux_center = aux_center.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
aux_center = aux_center.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
logits_reg = - torch.sum((aux_center - aux_task) ** 2, 2) / self.args.temperature2
else:
aux_center = F.normalize(aux_center, dim=-1) # normalize for cosine distance
aux_task = aux_task.permute([1,0,2]).contiguous().view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
logits_reg = torch.bmm(aux_task, aux_center.permute([0,2,1])) / self.args.temperature2
logits_reg = logits_reg.view(-1, num_proto)
return logits, logits_reg
else:
return logits
| 6,584 | 42.9 | 119 | py |
FEAT | FEAT-master/model/dataloader/tiered_imagenet.py | from __future__ import print_function
import os
import os.path as osp
import numpy as np
import pickle
import sys
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from PIL import Image
# Set the appropriate paths of the datasets here.
THIS_PATH = osp.dirname(__file__)
ROOT_PATH1 = osp.abspath(osp.join(THIS_PATH, '..', '..', '..'))
ROOT_PATH2 = osp.abspath(osp.join(THIS_PATH, '..', '..'))
IMAGE_PATH = osp.join(ROOT_PATH1, 'data/tieredimagenet/')
SPLIT_PATH = osp.join(ROOT_PATH2, 'data/miniimagenet/split')
def buildLabelIndex(labels):
label2inds = {}
for idx, label in enumerate(labels):
if label not in label2inds:
label2inds[label] = []
label2inds[label].append(idx)
return label2inds
def load_data(file):
try:
with open(file, 'rb') as fo:
data = pickle.load(fo)
return data
except:
with open(file, 'rb') as f:
u = pickle._Unpickler(f)
u.encoding = 'latin1'
data = u.load()
return data
file_path = {'train':[os.path.join(IMAGE_PATH, 'train_images.npz'), os.path.join(IMAGE_PATH, 'train_labels.pkl')],
'val':[os.path.join(IMAGE_PATH, 'val_images.npz'), os.path.join(IMAGE_PATH,'val_labels.pkl')],
'test':[os.path.join(IMAGE_PATH, 'test_images.npz'), os.path.join(IMAGE_PATH, 'test_labels.pkl')]}
class tieredImageNet(data.Dataset):
def __init__(self, setname, args, augment=False):
assert(setname=='train' or setname=='val' or setname=='test')
image_path = file_path[setname][0]
label_path = file_path[setname][1]
data_train = load_data(label_path)
labels = data_train['labels']
self.data = np.load(image_path)['images']
label = []
lb = -1
self.wnids = []
for wnid in labels:
if wnid not in self.wnids:
self.wnids.append(wnid)
lb += 1
label.append(lb)
self.label = label
self.num_class = len(set(label))
if augment and setname == 'train':
transforms_list = [
transforms.RandomCrop(84, padding=8),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
else:
transforms_list = [
transforms.ToTensor(),
]
# Transformation
if args.backbone_class == 'ConvNet':
self.transform = transforms.Compose(
transforms_list + [
transforms.Normalize(np.array([0.485, 0.456, 0.406]),
np.array([0.229, 0.224, 0.225]))
])
elif args.backbone_class == 'ResNet':
self.transform = transforms.Compose(
transforms_list + [
transforms.Normalize(np.array([x / 255.0 for x in [125.3, 123.0, 113.9]]),
np.array([x / 255.0 for x in [63.0, 62.1, 66.7]]))
])
elif args.backbone_class == 'Res12':
self.transform = transforms.Compose(
transforms_list + [
transforms.Normalize(np.array([x / 255.0 for x in [120.39586422, 115.59361427, 104.54012653]]),
np.array([x / 255.0 for x in [70.68188272, 68.27635443, 72.54505529]]))
])
elif args.backbone_class == 'Res18':
self.transform = transforms.Compose(
transforms_list + [
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
elif args.backbone_class == 'WRN':
self.transform = transforms.Compose(
transforms_list + [
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
else:
raise ValueError('Non-supported Network Types. Please Revise Data Pre-Processing Scripts.')
def __getitem__(self, index):
img, label = self.data[index], self.label[index]
img = self.transform(Image.fromarray(img))
return img, label
def __len__(self):
return len(self.data)
| 4,423 | 35.561983 | 114 | py |
FEAT | FEAT-master/model/dataloader/cub.py | import os.path as osp
import PIL
from PIL import Image
import numpy as np
from torch.utils.data import Dataset
from torchvision import transforms
THIS_PATH = osp.dirname(__file__)
ROOT_PATH1 = osp.abspath(osp.join(THIS_PATH, '..', '..', '..'))
ROOT_PATH2 = osp.abspath(osp.join(THIS_PATH, '..', '..'))
IMAGE_PATH = osp.join(ROOT_PATH1, 'data/cub')
SPLIT_PATH = osp.join(ROOT_PATH2, 'data/cub/split')
CACHE_PATH = osp.join(ROOT_PATH2, '.cache/')
# This is for the CUB dataset
# It is notable, we assume the cub images are cropped based on the given bounding boxes
# The concept labels are based on the attribute value, which are for further use (and not used in this work)
class CUB(Dataset):
def __init__(self, setname, args, augment=False):
im_size = args.orig_imsize
txt_path = osp.join(SPLIT_PATH, setname + '.csv')
lines = [x.strip() for x in open(txt_path, 'r').readlines()][1:]
cache_path = osp.join( CACHE_PATH, "{}.{}.{}.pt".format(self.__class__.__name__, setname, im_size) )
self.use_im_cache = ( im_size != -1 ) # not using cache
if self.use_im_cache:
if not osp.exists(cache_path):
print('* Cache miss... Preprocessing {}...'.format(setname))
resize_ = identity if im_size < 0 else transforms.Resize(im_size)
data, label = self.parse_csv(txt_path)
self.data = [ resize_(Image.open(path).convert('RGB')) for path in data ]
self.label = label
print('* Dump cache from {}'.format(cache_path))
torch.save({'data': self.data, 'label': self.label }, cache_path)
else:
print('* Load cache from {}'.format(cache_path))
cache = torch.load(cache_path)
self.data = cache['data']
self.label = cache['label']
else:
self.data, self.label = self.parse_csv(txt_path)
self.num_class = np.unique(np.array(self.label)).shape[0]
image_size = 84
if augment and setname == 'train':
transforms_list = [
transforms.RandomResizedCrop(image_size),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
else:
transforms_list = [
transforms.Resize(92),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
]
# Transformation
if args.backbone_class == 'ConvNet':
self.transform = transforms.Compose(
transforms_list + [
transforms.Normalize(np.array([0.485, 0.456, 0.406]),
np.array([0.229, 0.224, 0.225]))
])
elif args.backbone_class == 'Res12':
self.transform = transforms.Compose(
transforms_list + [
transforms.Normalize(np.array([x / 255.0 for x in [120.39586422, 115.59361427, 104.54012653]]),
np.array([x / 255.0 for x in [70.68188272, 68.27635443, 72.54505529]]))
])
elif args.backbone_class == 'Res18':
self.transform = transforms.Compose(
transforms_list + [
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
elif args.backbone_class == 'WRN':
self.transform = transforms.Compose(
transforms_list + [
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
else:
raise ValueError('Non-supported Network Types. Please Revise Data Pre-Processing Scripts.')
def parse_csv(self, txt_path):
data = []
label = []
lb = -1
self.wnids = []
lines = [x.strip() for x in open(txt_path, 'r').readlines()][1:]
for l in lines:
context = l.split(',')
name = context[0]
wnid = context[1]
path = osp.join(IMAGE_PATH, name)
if wnid not in self.wnids:
self.wnids.append(wnid)
lb += 1
data.append(path)
label.append(lb)
return data, label
def __len__(self):
return len(self.data)
def __getitem__(self, i):
data, label = self.data[i], self.label[i]
if self.use_im_cache:
image = self.transform(data)
else:
image = self.transform(Image.open(data).convert('RGB'))
return image, label
| 4,840 | 38.040323 | 112 | py |
FEAT | FEAT-master/model/dataloader/mini_imagenet.py | import torch
import os.path as osp
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
from tqdm import tqdm
import numpy as np
THIS_PATH = osp.dirname(__file__)
ROOT_PATH = osp.abspath(osp.join(THIS_PATH, '..', '..'))
ROOT_PATH2 = osp.abspath(osp.join(THIS_PATH, '..', '..', '..'))
IMAGE_PATH1 = osp.join(ROOT_PATH2, 'data/miniimagenet/images')
SPLIT_PATH = osp.join(ROOT_PATH, 'data/miniimagenet/split')
CACHE_PATH = osp.join(ROOT_PATH, '.cache/')
def identity(x):
return x
class MiniImageNet(Dataset):
""" Usage:
"""
def __init__(self, setname, args, augment=False):
im_size = args.orig_imsize
csv_path = osp.join(SPLIT_PATH, setname + '.csv')
cache_path = osp.join( CACHE_PATH, "{}.{}.{}.pt".format(self.__class__.__name__, setname, im_size) )
self.use_im_cache = ( im_size != -1 ) # not using cache
if self.use_im_cache:
if not osp.exists(cache_path):
print('* Cache miss... Preprocessing {}...'.format(setname))
resize_ = identity if im_size < 0 else transforms.Resize(im_size)
data, label = self.parse_csv(csv_path, setname)
self.data = [ resize_(Image.open(path).convert('RGB')) for path in data ]
self.label = label
print('* Dump cache from {}'.format(cache_path))
torch.save({'data': self.data, 'label': self.label }, cache_path)
else:
print('* Load cache from {}'.format(cache_path))
cache = torch.load(cache_path)
self.data = cache['data']
self.label = cache['label']
else:
self.data, self.label = self.parse_csv(csv_path, setname)
self.num_class = len(set(self.label))
image_size = 84
if augment and setname == 'train':
transforms_list = [
transforms.RandomResizedCrop(image_size),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
else:
transforms_list = [
transforms.Resize(92),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
]
# Transformation
if args.backbone_class == 'ConvNet':
self.transform = transforms.Compose(
transforms_list + [
transforms.Normalize(np.array([0.485, 0.456, 0.406]),
np.array([0.229, 0.224, 0.225]))
])
elif args.backbone_class == 'Res12':
self.transform = transforms.Compose(
transforms_list + [
transforms.Normalize(np.array([x / 255.0 for x in [120.39586422, 115.59361427, 104.54012653]]),
np.array([x / 255.0 for x in [70.68188272, 68.27635443, 72.54505529]]))
])
elif args.backbone_class == 'Res18':
self.transform = transforms.Compose(
transforms_list + [
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
elif args.backbone_class == 'WRN':
self.transform = transforms.Compose(
transforms_list + [
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
else:
raise ValueError('Non-supported Network Types. Please Revise Data Pre-Processing Scripts.')
def parse_csv(self, csv_path, setname):
lines = [x.strip() for x in open(csv_path, 'r').readlines()][1:]
data = []
label = []
lb = -1
self.wnids = []
for l in tqdm(lines, ncols=64):
name, wnid = l.split(',')
path = osp.join(IMAGE_PATH1, name)
if wnid not in self.wnids:
self.wnids.append(wnid)
lb += 1
data.append( path )
label.append(lb)
return data, label
def __len__(self):
return len(self.data)
def __getitem__(self, i):
data, label = self.data[i], self.label[i]
if self.use_im_cache:
image = self.transform(data)
else:
image = self.transform(Image.open(data).convert('RGB'))
return image, label
| 4,581 | 36.252033 | 112 | py |
FEAT | FEAT-master/model/dataloader/samplers.py | import torch
import numpy as np
class CategoriesSampler():
def __init__(self, label, n_batch, n_cls, n_per):
self.n_batch = n_batch
self.n_cls = n_cls
self.n_per = n_per
label = np.array(label)
self.m_ind = []
for i in range(max(label) + 1):
ind = np.argwhere(label == i).reshape(-1)
ind = torch.from_numpy(ind)
self.m_ind.append(ind)
def __len__(self):
return self.n_batch
def __iter__(self):
for i_batch in range(self.n_batch):
batch = []
classes = torch.randperm(len(self.m_ind))[:self.n_cls]
for c in classes:
l = self.m_ind[c]
pos = torch.randperm(len(l))[:self.n_per]
batch.append(l[pos])
batch = torch.stack(batch).t().reshape(-1)
yield batch
class RandomSampler():
def __init__(self, label, n_batch, n_per):
self.n_batch = n_batch
self.n_per = n_per
self.label = np.array(label)
self.num_label = self.label.shape[0]
def __len__(self):
return self.n_batch
def __iter__(self):
for i_batch in range(self.n_batch):
batch = torch.randperm(self.num_label)[:self.n_per]
yield batch
# sample for each class
class ClassSampler():
def __init__(self, label, n_per=None):
self.n_per = n_per
label = np.array(label)
self.m_ind = []
for i in range(max(label) + 1):
ind = np.argwhere(label == i).reshape(-1)
ind = torch.from_numpy(ind)
self.m_ind.append(ind)
def __len__(self):
return len(self.m_ind)
def __iter__(self):
classes = torch.arange(len(self.m_ind))
for c in classes:
l = self.m_ind[int(c)]
if self.n_per is None:
pos = torch.randperm(len(l))
else:
pos = torch.randperm(len(l))[:self.n_per]
yield l[pos]
# for ResNet Fine-Tune, which output the same index of task examples several times
class InSetSampler():
def __init__(self, n_batch, n_sbatch, pool): # pool is a tensor
self.n_batch = n_batch
self.n_sbatch = n_sbatch
self.pool = pool
self.pool_size = pool.shape[0]
def __len__(self):
return self.n_batch
def __iter__(self):
for i_batch in range(self.n_batch):
batch = self.pool[torch.randperm(self.pool_size)[:self.n_sbatch]]
yield batch | 2,586 | 27.119565 | 82 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/setup.py | import os
from setuptools import find_packages
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
install_requires = [
"numpy",
"Pillow"
]
setup(
name='ple',
version='0.0.1',
description='PyGame Learning Environment',
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
url='https://github.com/ntasfi/PyGame-Learning-Environment',
author='Norman Tasfi',
author_email='first letter of first name plus last at googles email service.',
keywords='',
license="MIT",
packages=find_packages(),
include_package_data=False,
zip_safe=False,
install_requires=install_requires
)
| 874 | 24.735294 | 79 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/ple.py | import numpy as np
from PIL import Image # pillow
import sys
import pygame
from .games.base.pygamewrapper import PyGameWrapper
class PLE(object):
"""
ple.PLE(
game, fps=30,
frame_skip=1, num_steps=1,
reward_values={}, force_fps=True,
display_screen=False, add_noop_action=True,
NOOP=K_F15, state_preprocessor=None,
rng=24
)
Main wrapper that interacts with games.
Provides a similar interface to Arcade Learning Environment.
Parameters
----------
game: Class from ple.games.base
The game the PLE environment manipulates and maintains.
fps: int (default: 30)
The desired frames per second we want to run our game at.
Typical settings are 30 and 60 fps.
frame_skip: int (default: 1)
The number of times we skip getting observations while
repeat an action.
num_steps: int (default: 1)
The number of times we repeat an action.
reward_values: dict
This contains the rewards we wish to set give our agent based on
different actions in game. The current defaults are as follows:
.. code-block:: python
rewards = {
"positive": 1.0,
"negative": -1.0,
"tick": 0.0,
"loss": -5.0,
"win": 5.0
}
Tick is given to the agent at each game step. You can selectively
adjust the rewards by passing a dictonary with the key you want to
change. Eg. If we want to adjust the negative reward and the tick
reward we would pass in the following:
.. code-block:: python
rewards = {
"negative": -2.0,
"tick": -0.01
}
Keep in mind that the tick is applied at each frame. If the game is
running at 60fps the agent will get a reward of 60*tick.
force_fps: bool (default: True)
If False PLE delays between game.step() calls to ensure the fps is
specified. If not PLE passes an elapsed time delta to ensure the
game steps by an amount of time consistent with the specified fps.
This is usally set to True as it allows the game to run as fast as
possible which speeds up training.
display_screen: bool (default: False)
If we draw updates to the screen. Disabling this speeds up
interation speed. This can be toggled to True during testing phases
so you can observe the agents progress.
add_noop_action: bool (default: True)
This inserts the NOOP action specified as a valid move the agent
can make.
state_preprocessor: python function (default: None)
Python function which takes a dict representing game state and
returns a numpy array.
rng: numpy.random.RandomState, int, array_like or None. (default: 24)
Number generator which is used by PLE and the games.
"""
def __init__(self,
game, fps=30, frame_skip=1, num_steps=1,
reward_values={}, force_fps=True, display_screen=False,
add_noop_action=True, state_preprocessor=None, rng=24):
self.game = game
self.fps = fps
self.frame_skip = frame_skip
self.NOOP = None
self.num_steps = num_steps
self.force_fps = force_fps
self.display_screen = display_screen
self.add_noop_action = add_noop_action
self.last_action = []
self.action = []
self.previous_score = 0
self.frame_count = 0
# update the scores of games with values we pick
if reward_values:
self.game.adjustRewards(reward_values)
if isinstance(self.game, PyGameWrapper):
if isinstance(rng, np.random.RandomState):
self.rng = rng
else:
self.rng = np.random.RandomState(rng)
# some pygame games preload the images
# to speed resetting and inits up.
pygame.display.set_mode((1, 1), pygame.NOFRAME)
else:
# in order to use doom, install following https://github.com/openai/doom-py
from .games.base.doomwrapper import DoomWrapper
if isinstance(self.game, DoomWrapper):
self.rng = rng
self.game.setRNG(self.rng)
self.init()
self.state_preprocessor = state_preprocessor
self.state_dim = None
if self.state_preprocessor is not None:
self.state_dim = self.game.getGameState()
if self.state_dim is None:
raise ValueError(
"Asked to return non-visual state on game that does not support it!")
else:
self.state_dim = self.state_preprocessor(self.state_dim).shape
if game.allowed_fps is not None and self.fps != game.allowed_fps:
raise ValueError("Game requires %dfps, was given %d." %
(game.allowed_fps, game.allowed_fps))
def _tick(self):
"""
Calculates the elapsed time between frames or ticks.
"""
if self.force_fps:
return 1000.0 / self.fps
else:
return self.game.tick(self.fps)
def init(self):
"""
Initializes the game. This depends on the game and could include
doing things such as setting up the display, clock etc.
This method should be explicitly called.
"""
self.game._setup()
self.game.init() #this is the games setup/init
def getActionSet(self):
"""
Gets the actions the game supports. Optionally inserts the NOOP
action if PLE has add_noop_action set to True.
Returns
--------
list of pygame.constants
The agent can simply select the index of the action
to perform.
"""
actions = self.game.actions
if (sys.version_info > (3, 0)): #python ver. 3
if isinstance(actions, dict) or isinstance(actions, dict_values):
actions = actions.values()
else:
if isinstance(actions, dict):
actions = actions.values()
actions = list(actions) #.values()
#print (actions)
#assert isinstance(actions, list), "actions is not a list"
if self.add_noop_action:
actions.append(self.NOOP)
return actions
def getFrameNumber(self):
"""
Gets the current number of frames the agent has seen
since PLE was initialized.
Returns
--------
int
"""
return self.frame_count
def game_over(self):
"""
Returns True if the game has reached a terminal state and
False otherwise.
This state is game dependent.
Returns
-------
bool
"""
return self.game.game_over()
def score(self):
"""
Gets the score the agent currently has in game.
Returns
-------
int
"""
return self.game.getScore()
def lives(self):
"""
Gets the number of lives the agent has left. Not all games have
the concept of lives.
Returns
-------
int
"""
return self.game.lives
def reset_game(self):
"""
Performs a reset of the games to a clean initial state.
"""
self.last_action = []
self.action = []
self.previous_score = 0.0
self.game.reset()
def getScreenRGB(self):
"""
Gets the current game screen in RGB format.
Returns
--------
numpy uint8 array
Returns a numpy array with the shape (width, height, 3).
"""
return self.game.getScreenRGB()
def getScreenGrayscale(self):
"""
Gets the current game screen in Grayscale format. Converts from RGB using relative lumiance.
Returns
--------
numpy uint8 array
Returns a numpy array with the shape (width, height).
"""
frame = self.getScreenRGB()
frame = 0.21 * frame[:, :, 0] + 0.72 * \
frame[:, :, 1] + 0.07 * frame[:, :, 2]
frame = np.round(frame).astype(np.uint8)
return frame
def saveScreen(self, filename):
"""
Saves the current screen to png file.
Parameters
----------
filename : string
The path with filename to where we want the image saved.
"""
frame = Image.fromarray(self.getScreenRGB())
frame.save(filename)
def getScreenDims(self):
"""
Gets the games screen dimensions.
Returns
-------
tuple of int
Returns a tuple of the following format (screen_width, screen_height).
"""
return self.game.getScreenDims()
def getGameStateDims(self):
"""
Gets the games non-visual state dimensions.
Returns
-------
tuple of int or None
Returns a tuple of the state vectors shape or None if the game does not support it.
"""
return self.state_dim
def getGameState(self):
"""
Gets a non-visual state representation of the game.
This can include items such as player position, velocity, ball location and velocity etc.
Returns
-------
dict or None
It returns a dict of game information. This greatly depends on the game in question and must be referenced against each game.
If no state is available or supported None will be returned back.
"""
state = self.game.getGameState()
if state is not None:
if self.state_preprocessor is not None:
return self.state_preprocessor(state)
return state
else:
raise ValueError(
"Was asked to return state vector for game that does not support it!")
def act(self, action):
"""
Perform an action on the game. We lockstep frames with actions. If act is not called the game will not run.
Parameters
----------
action : int
The index of the action we wish to perform. The index usually corresponds to the index item returned by getActionSet().
Returns
-------
int
Returns the reward that the agent has accumlated while performing the action.
"""
return sum(self._oneStepAct(action) for i in range(self.frame_skip))
def _draw_frame(self):
"""
Decides if the screen will be drawn too
"""
self.game._draw_frame(self.display_screen)
def _oneStepAct(self, action):
"""
Performs an action on the game. Checks if the game is over or if the provided action is valid based on the allowed action set.
"""
if self.game_over():
return 0.0
if action not in self.getActionSet():
action = self.NOOP
self._setAction(action)
for i in range(self.num_steps):
time_elapsed = self._tick()
self.game.step(time_elapsed)
self._draw_frame()
self.frame_count += self.num_steps
return self._getReward()
def _setAction(self, action):
"""
Instructs the game to perform an action if its not a NOOP
"""
if action is not None:
self.game._setAction(action, self.last_action)
self.last_action = action
def _getReward(self):
"""
Returns the reward the agent has gained as the difference between the last action and the current one.
"""
reward = self.game.getScore() - self.previous_score
self.previous_score = self.game.getScore()
return reward
| 11,976 | 27.314421 | 137 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/__init__.py | from .ple import PLE
| 21 | 10 | 20 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/waterworld.py | import pygame
import sys
import math
#import .base
from .base.pygamewrapper import PyGameWrapper
from .utils.vec2d import vec2d
from .utils import percent_round_int
from pygame.constants import K_w, K_a, K_s, K_d
from .primitives import Player, Creep
class WaterWorld(PyGameWrapper):
"""
Based Karpthy's WaterWorld in `REINFORCEjs`_.
.. _REINFORCEjs: https://github.com/karpathy/reinforcejs
Parameters
----------
width : int
Screen width.
height : int
Screen height, recommended to be same dimension as width.
num_creeps : int (default: 3)
The number of creeps on the screen at once.
"""
def __init__(self,
width=48,
height=48,
num_creeps=3):
actions = {
"up": K_w,
"left": K_a,
"right": K_d,
"down": K_s
}
PyGameWrapper.__init__(self, width, height, actions=actions)
self.BG_COLOR = (255, 255, 255)
self.N_CREEPS = num_creeps
self.CREEP_TYPES = ["GOOD", "BAD"]
self.CREEP_COLORS = [(40, 140, 40), (150, 95, 95)]
radius = percent_round_int(width, 0.047)
self.CREEP_RADII = [radius, radius]
self.CREEP_REWARD = [
self.rewards["positive"],
self.rewards["negative"]]
self.CREEP_SPEED = 0.25 * width
self.AGENT_COLOR = (60, 60, 140)
self.AGENT_SPEED = 0.25 * width
self.AGENT_RADIUS = radius
self.AGENT_INIT_POS = (self.width / 2, self.height / 2)
self.creep_counts = {
"GOOD": 0,
"BAD": 0
}
self.dx = 0
self.dy = 0
self.player = None
self.creeps = None
def _handle_player_events(self):
self.dx = 0
self.dy = 0
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
key = event.key
if key == self.actions["left"]:
self.dx -= self.AGENT_SPEED
if key == self.actions["right"]:
self.dx += self.AGENT_SPEED
if key == self.actions["up"]:
self.dy -= self.AGENT_SPEED
if key == self.actions["down"]:
self.dy += self.AGENT_SPEED
def _add_creep(self):
creep_type = self.rng.choice([0, 1])
creep = None
pos = (0, 0)
dist = 0.0
while dist < 1.5:
radius = self.CREEP_RADII[creep_type] * 1.5
pos = self.rng.uniform(radius, self.height - radius, size=2)
dist = math.sqrt(
(self.player.pos.x - pos[0])**2 + (self.player.pos.y - pos[1])**2)
creep = Creep(
self.CREEP_COLORS[creep_type],
self.CREEP_RADII[creep_type],
pos,
self.rng.choice([-1, 1], 2),
self.rng.rand() * self.CREEP_SPEED,
self.CREEP_REWARD[creep_type],
self.CREEP_TYPES[creep_type],
self.width,
self.height,
self.rng.rand()
)
self.creeps.add(creep)
self.creep_counts[self.CREEP_TYPES[creep_type]] += 1
def getGameState(self):
"""
Returns
-------
dict
* player x position.
* player y position.
* player x velocity.
* player y velocity.
* player distance to each creep
"""
state = {
"player_x": self.player.pos.x,
"player_y": self.player.pos.y,
"player_velocity_x": self.player.vel.x,
"player_velocity_y": self.player.vel.y,
"creep_dist": {
"GOOD": [],
"BAD": []
},
"creep_pos": {
"GOOD": [],
"BAD": []
}
}
for c in self.creeps:
dist = math.sqrt((self.player.pos.x - c.pos.x) **
2 + (self.player.pos.y - c.pos.y)**2)
state["creep_dist"][c.TYPE].append(dist)
state["creep_pos"][c.TYPE].append([c.pos.x, c.pos.y])
return state
def getScore(self):
return self.score
def game_over(self):
"""
Return bool if the game has 'finished'
"""
return (self.creep_counts['GOOD'] == 0)
def init(self):
"""
Starts/Resets the game to its inital state
"""
self.creep_counts = {"GOOD": 0, "BAD": 0}
if self.player is None:
self.player = Player(
self.AGENT_RADIUS, self.AGENT_COLOR,
self.AGENT_SPEED, self.AGENT_INIT_POS,
self.width, self.height
)
else:
self.player.pos = vec2d(self.AGENT_INIT_POS)
self.player.vel = vec2d((0.0, 0.0))
if self.creeps is None:
self.creeps = pygame.sprite.Group()
else:
self.creeps.empty()
for i in range(self.N_CREEPS):
self._add_creep()
self.score = 0
self.ticks = 0
self.lives = -1
def step(self, dt):
"""
Perform one step of game emulation.
"""
dt /= 1000.0
self.screen.fill(self.BG_COLOR)
self.score += self.rewards["tick"]
self._handle_player_events()
self.player.update(self.dx, self.dy, dt)
hits = pygame.sprite.spritecollide(self.player, self.creeps, True)
for creep in hits:
self.creep_counts[creep.TYPE] -= 1
self.score += creep.reward
self._add_creep()
if self.creep_counts["GOOD"] == 0:
self.score += self.rewards["win"]
self.creeps.update(dt)
self.player.draw(self.screen)
self.creeps.draw(self.screen)
if __name__ == "__main__":
import numpy as np
pygame.init()
game = WaterWorld(width=256, height=256, num_creeps=10)
game.screen = pygame.display.set_mode(game.getScreenDims(), 0, 32)
game.clock = pygame.time.Clock()
game.rng = np.random.RandomState(24)
game.init()
while True:
dt = game.clock.tick_busy_loop(30)
game.step(dt)
pygame.display.update()
| 6,382 | 25.595833 | 82 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/raycastmaze.py |
#import .base
from .base.pygamewrapper import PyGameWrapper
import pygame
import numpy as np
import math
from .raycast import RayCastPlayer
from pygame.constants import K_w, K_a, K_d, K_s
class RaycastMaze(PyGameWrapper, RayCastPlayer):
"""
Parameters
----------
init_pos : tuple of int (default: (1,1))
The position the player starts on in the grid. The grid is zero indexed.
resolution : int (default: 1)
This instructs the Raycast engine on how many vertical lines to use when drawing the screen. The number is equal to the width / resolution.
move_speed : int (default: 20)
How fast the agent moves forwards or backwards.
turn_speed : int (default: 13)
The speed at which the agent turns left or right.
map_size : int (default: 10)
The size of the maze that is generated. Must be greater then 5. Can be incremented to increase difficulty by adjusting the attribute between game resets.
width : int (default: 48)
Screen width.
height : int (default: 48)
Screen height, recommended to be same dimension as width.
init_pos_distance_to_target : int (default None aka. map_size*map_size)
Useful for curriculum learning, slowly move target away from init position to improve learning
"""
def __init__(self,
init_pos=(1, 1), resolution=1,
move_speed=20, turn_speed=13,
map_size=10, height=48, width=48, init_pos_distance_to_target=None):
assert map_size > 5, "map_size must be gte 5"
# do not change
init_dir = (1.0, 0.0)
init_plane = (0.0, 0.66)
block_types = {
0: {
"pass_through": True,
"color": None
},
1: {
"pass_through": False,
"color": (255, 255, 255)
},
2: {
"pass_through": False,
"color": (255, 100, 100)
}
}
actions = {
"forward": K_w,
"left": K_a,
"right": K_d,
"backward": K_s
}
PyGameWrapper.__init__(self, width, height, actions=actions)
RayCastPlayer.__init__(self, None,
init_pos, init_dir, width, height, resolution,
move_speed, turn_speed, init_plane, actions, block_types)
if init_pos_distance_to_target is None:
init_pos_distance_to_target = map_size * map_size
self.init_pos_distance_to_target = max(1, init_pos_distance_to_target)
self.init_pos = np.array([init_pos], dtype=np.float32)
self.init_dir = np.array([init_dir], dtype=np.float32)
self.init_plane = np.array([init_plane], dtype=np.float32)
self.obj_loc = None
self.map_size = map_size
self.is_game_over = False
def _make_maze(self, complexity=0.75, density=0.75):
"""
ty wikipedia?
"""
dim = int(np.floor(self.map_size / 2) * 2 + 1)
shape = (dim, dim)
complexity = int(complexity * (5 * (shape[0] + shape[1])))
density = int(density * (shape[0] // 2 * shape[1] // 2))
# Build actual maze
Z = np.zeros(shape, dtype=bool)
# Fill borders
Z[0, :] = Z[-1, :] = 1
Z[:, 0] = Z[:, -1] = 1
# Make isles
for i in range(density):
x = self.rng.random_integers(0, shape[1] // 2) * 2
y = self.rng.random_integers(0, shape[0] // 2) * 2
Z[y, x] = 1
for j in range(complexity):
neighbours = []
if x > 1:
neighbours.append((y, x - 2))
if x < shape[1] - 2:
neighbours.append((y, x + 2))
if y > 1:
neighbours.append((y - 2, x))
if y < shape[0] - 2:
neighbours.append((y + 2, x))
if len(neighbours):
y_, x_ = neighbours[
self.rng.random_integers(
0, len(neighbours) - 1)]
if Z[y_, x_] == 0:
Z[y_, x_] = 1
Z[y_ + (y - y_) // 2, x_ + (x - x_) // 2] = 1
x, y = x_, y_
return Z.astype(int)
def getGameState(self):
"""
Returns
-------
None
Does not have a non-visual representation of game state.
Would be possible to return the location of the maze end.
"""
return None
def getScore(self):
return self.score
def game_over(self):
return self.is_game_over
def getFiltredPositions(self, pos_input, pos_list, wall_list):
pos_check = pos_input['pos']
if self.map_[pos_check[0], pos_check[1]] == 0:
for y, x in [(0, 0), (-1, 0), (1, 0), (0, -1), (0, 1)]:
if self.map_[pos_check[0] + y, pos_check[1] + x] == 0:
# aile
if not any(it for it in pos_list if it['pos'][0] == pos_check[0] + y and it['pos'][1] == pos_check[1] + x):
pos_list.append({
'pos': [pos_check[0] + y, pos_check[1] + x],
'dist': pos_input['dist'] + (0 if (x == 0 and y == 0) else 1),
'checked': (x == 0 and y == 0)
})
else:
for it in pos_list:
if it['pos'][0] == pos_check[0] + y and it['pos'][1] == pos_check[1] + x:
it['checked'] = True
break
else:
# wall
if not any(it for it in wall_list if it['pos'][0] == pos_check[0] + y and it['pos'][1] == pos_check[1] + x):
wall_list.append({
'pos': [pos_check[0] + y, pos_check[1] + x],
'dist': pos_input['dist'] + (0 if (x == 0 and y == 0) else 1)
})
def init(self):
self.score = 0 #reset score
self.is_game_over = False
self.pos = np.copy(self.init_pos)
self.dir = np.copy(self.init_dir)
self.plane = np.copy(self.init_plane)
self.map_ = self._make_maze()
pos_list = []
wall_list = []
check_list = []
pos_input = {
'pos': self.pos.astype(np.int)[0],
'dist': 0,
'checked': False
}
pos_list.append(pos_input)
check_list.append(pos_input)
while len(check_list):
for pos_each in check_list:
self.getFiltredPositions(pos_each, pos_list, wall_list)
check_list = [it for it in pos_list if not it['checked']]
available_positions = []
for y in range(self.map_size + 1):
for x in range(self.map_size + 1):
# in a wall
if self.map_[y, x] == 1:
# check access to this point
if any(it for it in wall_list if it['dist'] <= self.init_pos_distance_to_target and it['pos'][0] == y and it['pos'][1] == x):
available_positions.append([y,x])
self.obj_loc = np.array([available_positions[self.rng.randint(0, high=len(available_positions))]])
self.map_[self.obj_loc[0][0], self.obj_loc[0][1]] = 2
if self.angle_to_obj_rad() < 1.5:
# turn away from target at init state
self.dir *= -1.0
self.plane *= -1.0
def reset(self):
self.init()
def normalize(self, vector):
norm = math.sqrt(vector[0][0] ** 2 + vector[0][1] ** 2)
vector[0][0] /= norm
vector[0][1] /= norm
return vector
def step(self, dt):
self.screen.fill((0, 0, 0))
pygame.draw.rect(self.screen, (92, 92, 92),
(0, self.height / 2, self.width, self.height))
if not self.is_game_over:
self.score += self.rewards["tick"]
self._handle_player_events(dt)
c, t, b, col = self.draw()
for i in range(len(c)):
color = (col[i][0], col[i][1], col[i][2])
p0 = (c[i], t[i])
p1 = (c[i], b[i])
pygame.draw.line(self.screen, color, p0, p1, self.resolution)
dist = np.sqrt(np.sum((self.pos[0] - (self.obj_loc[0] + 0.5))**2.0))
# Close to target object and in sight
if dist < 1.1 and self.angle_to_obj_rad() < 0.8:
self.score += self.rewards["win"]
self.is_game_over = True
def angle_to_obj_rad(self):
dir_to_loc = (self.obj_loc + 0.5) - self.pos
dir_to_loc = self.normalize(dir_to_loc)
dir_norm = self.normalize(np.copy(self.dir))
angle_rad = np.arccos(np.dot(dir_to_loc[0], dir_norm[0]))
return angle_rad
if __name__ == "__main__":
import numpy as np
fps = 60
pygame.init()
game = RaycastMaze(
height=256,
width=256,
map_size=10
)
game.screen = pygame.display.set_mode(game.getScreenDims(), 0, 32)
game.clock = pygame.time.Clock()
game.rng = np.random.RandomState(24)
game.init()
while True:
dt = game.clock.tick_busy_loop(fps)
if game.game_over():
print("Game over!")
print("Resetting!")
game.reset()
game.step(dt)
pygame.display.update()
| 9,656 | 32.415225 | 161 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/snake.py | import pygame
import sys
import math
#import .base
from .base.pygamewrapper import PyGameWrapper
from pygame.constants import K_w, K_a, K_s, K_d
from .utils.vec2d import vec2d
from .utils import percent_round_int
class Food(pygame.sprite.Sprite):
def __init__(self, pos_init, width, color,
SCREEN_WIDTH, SCREEN_HEIGHT, rng):
pygame.sprite.Sprite.__init__(self)
self.pos = vec2d(pos_init)
self.color = color
self.SCREEN_WIDTH = SCREEN_WIDTH
self.SCREEN_HEIGHT = SCREEN_HEIGHT
self.width = width
self.rng = rng
image = pygame.Surface((width, width))
image.fill((0, 0, 0, 0))
image.set_colorkey((0, 0, 0))
pygame.draw.rect(
image,
color,
(0, 0, self.width, self.width),
0
)
self.image = image
self.rect = self.image.get_rect()
self.rect.center = pos_init
def new_position(self, snake):
new_pos = snake.body[0].pos
snake_body = [s.pos for s in snake.body]
while (new_pos in snake_body):
_x = self.rng.choice(range(
self.width * 2, self.SCREEN_WIDTH - self.width * 2, self.width
))
_y = self.rng.choice(range(
self.width * 2, self.SCREEN_HEIGHT - self.width * 2, self.width
))
new_pos = vec2d((_x, _y))
self.pos = new_pos
self.rect.center = (self.pos.x, self.pos.y)
def draw(self, screen):
screen.blit(self.image, self.rect.center)
class SnakeSegment(pygame.sprite.Sprite):
def __init__(self, pos_init, width, height, color):
pygame.sprite.Sprite.__init__(self)
self.pos = vec2d(pos_init)
self.color = color
self.width = width
self.height = height
image = pygame.Surface((width, height))
image.fill((0, 0, 0))
image.set_colorkey((0, 0, 0))
pygame.draw.rect(
image,
color,
(0, 0, self.width, self.height),
0
)
self.image = image
# use half the size
self.rect = pygame.Rect(pos_init, (self.width / 2, self.height / 2))
self.rect.center = pos_init
def draw(self, screen):
screen.blit(self.image, self.rect.center)
# basically just holds onto all of them
class SnakePlayer():
def __init__(self, speed, length, pos_init, width,
color, SCREEN_WIDTH, SCREEN_HEIGHT):
self.dir = vec2d((1, 0))
self.speed = speed
self.pos = vec2d(pos_init)
self.color = color
self.width = width
self.length = length
self.body = []
self.update_head = True
# build our body up
for i in range(self.length):
self.body.append(
# makes a neat "zapping" in effect
SnakeSegment(
(self.pos.x - (width) * i, self.pos.y),
self.width,
self.width,
tuple([c - 100 for c in self.color]
) if i == 0 else self.color
)
)
# we dont add the first few because it cause never actually hit it
self.body_group = pygame.sprite.Group()
self.head = self.body[0]
def update(self, dt):
for i in range(self.length - 1, 0, -1):
scale = 0.1
self.body[i].pos = vec2d((
((1.0 - scale) *
self.body[i - 1].pos.x + scale * self.body[i].pos.x),
((1.0 - scale) *
self.body[i - 1].pos.y + scale * self.body[i].pos.y)
))
self.body[i].rect.center = (self.body[i].pos.x, self.body[i].pos.y)
self.head.pos.x += self.dir.x * self.speed * dt
self.head.pos.y += self.dir.y * self.speed * dt
self.update_hitbox()
def update_hitbox(self):
# need to make a small rect pointing the direction the snake is
# instead of counting the entire head square as a hit box, since
# the head touchs the body on turns and causes game overs.
x = self.head.pos.x
y = self.head.pos.y
if self.dir.x == 0:
w = self.width
h = percent_round_int(self.width, 0.25)
if self.dir.y == 1:
y += percent_round_int(self.width, 1.0)
if self.dir.y == -1:
y -= percent_round_int(self.width, 0.25)
if self.dir.y == 0:
w = percent_round_int(self.width, 0.25)
h = self.width
if self.dir.x == 1:
x += percent_round_int(self.width, 1.0)
if self.dir.x == -1:
x -= percent_round_int(self.width, 0.25)
if self.update_head:
image = pygame.Surface((w, h))
image.fill((0, 0, 0))
image.set_colorkey((0, 0, 0))
pygame.draw.rect(
image,
(255, 0, 0),
(0, 0, w, h),
0
)
self.head.image = image
self.head.rect = self.head.image.get_rect()
self.update_head = False
self.head.rect.center = (x, y)
def grow(self):
self.length += 1
add = 100 if self.length % 2 == 0 else -100
color = (self.color[0] + add, self.color[1], self.color[2] + add)
last = self.body[-1].pos
self.body.append(
SnakeSegment(
(last.x, last.y), # initially off screen?
self.width,
self.width,
color
)
)
if self.length > 3: # we cant actually hit another segment until this point.
self.body_group.add(self.body[-1])
def draw(self, screen):
for b in self.body[::-1]:
b.draw(screen)
class Snake(PyGameWrapper):
"""
Parameters
----------
width : int
Screen width.
height : int
Screen height, recommended to be same dimension as width.
init_length : int (default: 3)
The starting number of segments the snake has. Do not set below 3 segments. Has issues with hitbox detection with the body for lower values.
"""
def __init__(self,
width=64,
height=64,
init_length=3):
actions = {
"up": K_w,
"left": K_a,
"right": K_d,
"down": K_s
}
PyGameWrapper.__init__(self, width, height, actions=actions)
self.speed = percent_round_int(width, 0.45)
self.player_width = percent_round_int(width, 0.05)
self.food_width = percent_round_int(width, 0.09)
self.player_color = (100, 255, 100)
self.food_color = (255, 100, 100)
self.INIT_POS = (width / 2, height / 2)
self.init_length = init_length
self.BG_COLOR = (25, 25, 25)
def _handle_player_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
key = event.key
#left = -1
#right = 1
#up = -1
#down = 1
if key == self.actions["left"] and self.player.dir.x != 1:
self.player.dir = vec2d((-1, 0))
if key == self.actions["right"] and self.player.dir.x != -1:
self.player.dir = vec2d((1, 0))
if key == self.actions["up"] and self.player.dir.y != 1:
self.player.dir = vec2d((0, -1))
if key == self.actions["down"] and self.player.dir.y != -1:
self.player.dir = vec2d((0, 1))
self.player.update_head = True
def getGameState(self):
"""
Returns
-------
dict
* snake head x position.
* snake head y position.
* food x position.
* food y position.
* distance from head to each snake segment.
See code for structure.
"""
state = {
"snake_head_x": self.player.head.pos.x,
"snake_head_y": self.player.head.pos.y,
"food_x": self.food.pos.x,
"food_y": self.food.pos.y,
"snake_body": [],
"snake_body_pos": [],
}
for s in self.player.body:
dist = math.sqrt((self.player.head.pos.x - s.pos.x)
** 2 + (self.player.head.pos.y - s.pos.y)**2)
state["snake_body"].append(dist)
state["snake_body_pos"].append([s.pos.x, s.pos.y])
return state
def getScore(self):
return self.score
def game_over(self):
return self.lives == -1
def init(self):
"""
Starts/Resets the game to its inital state
"""
self.player = SnakePlayer(
self.speed,
self.init_length,
self.INIT_POS,
self.player_width,
self.player_color,
self.width,
self.height
)
self.food = Food((0, 0),
self.food_width,
self.food_color,
self.width,
self.height,
self.rng
)
self.food.new_position(self.player)
self.score = 0
self.ticks = 0
self.lives = 1
def step(self, dt):
"""
Perform one step of game emulation.
"""
dt /= 1000.0
self.ticks += 1
self.screen.fill(self.BG_COLOR)
self._handle_player_events()
self.score += self.rewards["tick"]
hit = pygame.sprite.collide_rect(self.player.head, self.food)
if hit: # it hit
self.score += self.rewards["positive"]
self.player.grow()
self.food.new_position(self.player)
hits = pygame.sprite.spritecollide(
self.player.head, self.player.body_group, False)
if len(hits) > 0:
self.lives = -1
x_check = (
self.player.head.pos.x < 0) or (
self.player.head.pos.x +
self.player_width /
2 > self.width)
y_check = (
self.player.head.pos.y < 0) or (
self.player.head.pos.y +
self.player_width /
2 > self.height)
if x_check or y_check:
self.lives = -1
if self.lives <= 0.0:
self.score += self.rewards["loss"]
self.player.update(dt)
self.player.draw(self.screen)
self.food.draw(self.screen)
if __name__ == "__main__":
import numpy as np
pygame.init()
game = Snake(width=128, height=128)
game.screen = pygame.display.set_mode(game.getScreenDims(), 0, 32)
game.clock = pygame.time.Clock()
game.rng = np.random.RandomState(24)
game.init()
while True:
if game.game_over():
game.init()
dt = game.clock.tick_busy_loop(30)
game.step(dt)
pygame.display.update()
| 11,320 | 26.747549 | 148 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/pixelcopter.py | import math
import sys
#import .base
from .base.pygamewrapper import PyGameWrapper
import pygame
from pygame.constants import K_w, K_s
from .utils.vec2d import vec2d
class Block(pygame.sprite.Sprite):
def __init__(self, pos_init, speed, SCREEN_WIDTH, SCREEN_HEIGHT):
pygame.sprite.Sprite.__init__(self)
self.pos = vec2d(pos_init)
self.width = int(SCREEN_WIDTH * 0.1)
self.height = int(SCREEN_HEIGHT * 0.2)
self.speed = speed
self.SCREEN_WIDTH = SCREEN_WIDTH
self.SCREEN_HEIGHT = SCREEN_HEIGHT
image = pygame.Surface((self.width, self.height))
image.fill((0, 0, 0, 0))
image.set_colorkey((0, 0, 0))
pygame.draw.rect(
image,
(120, 240, 80),
(0, 0, self.width, self.height),
0
)
self.image = image
self.rect = self.image.get_rect()
self.rect.center = pos_init
def update(self, dt):
self.pos.x -= self.speed * dt
self.rect.center = (self.pos.x, self.pos.y)
class HelicopterPlayer(pygame.sprite.Sprite):
def __init__(self, speed, SCREEN_WIDTH, SCREEN_HEIGHT):
pygame.sprite.Sprite.__init__(self)
pos_init = (int(SCREEN_WIDTH * 0.35), SCREEN_HEIGHT / 2)
self.pos = vec2d(pos_init)
self.speed = speed
self.climb_speed = speed * -0.875 # -0.0175
self.fall_speed = speed * 0.09 # 0.0019
self.momentum = 0
self.width = SCREEN_WIDTH * 0.05
self.height = SCREEN_HEIGHT * 0.05
image = pygame.Surface((self.width, self.height))
image.fill((0, 0, 0, 0))
image.set_colorkey((0, 0, 0))
pygame.draw.rect(
image,
(255, 255, 255),
(0, 0, self.width, self.height),
0
)
self.image = image
self.rect = self.image.get_rect()
self.rect.center = pos_init
def update(self, is_climbing, dt):
self.momentum += (self.climb_speed if is_climbing else self.fall_speed) * dt
self.momentum *= 0.99
self.pos.y += self.momentum
self.rect.center = (self.pos.x, self.pos.y)
class Terrain(pygame.sprite.Sprite):
def __init__(self, pos_init, speed, SCREEN_WIDTH, SCREEN_HEIGHT):
pygame.sprite.Sprite.__init__(self)
self.pos = vec2d(pos_init)
self.speed = speed
self.width = int(SCREEN_WIDTH * 0.1)
image = pygame.Surface((self.width, SCREEN_HEIGHT * 1.5))
image.fill((0, 0, 0, 0))
image.set_colorkey((0, 0, 0))
color = (120, 240, 80)
# top rect
pygame.draw.rect(
image,
color,
(0, 0, self.width, SCREEN_HEIGHT * 0.5),
0
)
# bot rect
pygame.draw.rect(
image,
color,
(0, SCREEN_HEIGHT * 1.05, self.width, SCREEN_HEIGHT * 0.5),
0
)
self.image = image
self.rect = self.image.get_rect()
self.rect.center = pos_init
def update(self, dt):
self.pos.x -= self.speed * dt
self.rect.center = (self.pos.x, self.pos.y)
class Pixelcopter(PyGameWrapper):
"""
Parameters
----------
width : int
Screen width.
height : int
Screen height, recommended to be same dimension as width.
"""
def __init__(self, width=48, height=48):
actions = {
"up": K_w
}
PyGameWrapper.__init__(self, width, height, actions=actions)
self.is_climbing = False
self.speed = 0.0004 * width
def _handle_player_events(self):
self.is_climbing = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
key = event.key
if key == self.actions['up']:
self.is_climbing = True
def getGameState(self):
"""
Gets a non-visual state representation of the game.
Returns
-------
dict
* player y position.
* player velocity.
* player distance to floor.
* player distance to ceiling.
* next block x distance to player.
* next blocks top y location,
* next blocks bottom y location.
See code for structure.
"""
min_dist = 999
min_block = None
for b in self.block_group: # Groups do not return in order
dist_to = b.pos.x - self.player.pos.x
if dist_to > 0 and dist_to < min_dist:
min_block = b
min_dist = dist_to
current_terrain = pygame.sprite.spritecollide(
self.player, self.terrain_group, False)[0]
state = {
"player_y": self.player.pos.y,
"player_vel": self.player.momentum,
"player_dist_to_ceil": self.player.pos.y - (current_terrain.pos.y - self.height * 0.25),
"player_dist_to_floor": (current_terrain.pos.y + self.height * 0.25) - self.player.pos.y,
"next_gate_dist_to_player": min_dist,
"next_gate_block_top": min_block.pos.y,
"next_gate_block_bottom": min_block.pos.y + min_block.height
}
return state
def getScreenDims(self):
return self.screen_dim
def getActions(self):
return self.actions.values()
def getScore(self):
return self.score
def game_over(self):
return self.lives <= 0.0
def init(self):
self.score = 0.0
self.lives = 1.0
self.player = HelicopterPlayer(
self.speed,
self.width,
self.height
)
self.player_group = pygame.sprite.Group()
self.player_group.add(self.player)
self.block_group = pygame.sprite.Group()
self._add_blocks()
self.terrain_group = pygame.sprite.Group()
self._add_terrain(0, self.width * 4)
def _add_terrain(self, start, end):
w = int(self.width * 0.1)
# each block takes up 10 units.
steps = range(start + int(w / 2), end + int(w / 2), w)
y_jitter = []
freq = 4.5 / self.width + self.rng.uniform(-0.01, 0.01)
for step in steps:
jitter = (self.height * 0.125) * \
math.sin(freq * step + self.rng.uniform(0.0, 0.5))
y_jitter.append(jitter)
y_pos = [int((self.height / 2.0) + y_jit) for y_jit in y_jitter]
for i in range(0, len(steps)):
self.terrain_group.add(Terrain(
(steps[i], y_pos[i]),
self.speed,
self.width,
self.height
)
)
def _add_blocks(self):
x_pos = self.rng.randint(self.width, int(self.width * 1.5))
y_pos = self.rng.randint(
int(self.height * 0.25),
int(self.height * 0.75)
)
self.block_group.add(
Block(
(x_pos, y_pos),
self.speed,
self.width,
self.height
)
)
def reset(self):
self.init()
def step(self, dt):
self.screen.fill((0, 0, 0))
self._handle_player_events()
self.score += self.rewards["tick"]
self.player.update(self.is_climbing, dt)
self.block_group.update(dt)
self.terrain_group.update(dt)
hits = pygame.sprite.spritecollide(
self.player, self.block_group, False)
for creep in hits:
self.lives -= 1
hits = pygame.sprite.spritecollide(
self.player, self.terrain_group, False)
for t in hits:
if self.player.pos.y - self.player.height <= t.pos.y - self.height * 0.25:
self.lives -= 1
if self.player.pos.y >= t.pos.y + self.height * 0.25:
self.lives -= 1
for b in self.block_group:
if b.pos.x <= self.player.pos.x and len(self.block_group) == 1:
self.score += self.rewards["positive"]
self._add_blocks()
if b.pos.x <= -b.width:
b.kill()
for t in self.terrain_group:
if t.pos.x <= -t.width:
self.score += self.rewards["positive"]
t.kill()
if self.player.pos.y < self.height * 0.125: # its above
self.lives -= 1
if self.player.pos.y > self.height * 0.875: # its below the lowest possible block
self.lives -= 1
if len(self.terrain_group) <= (
10 + 3): # 10% per terrain, offset of ~2 with 1 extra
self._add_terrain(self.width, self.width * 5)
if self.lives <= 0.0:
self.score += self.rewards["loss"]
self.player_group.draw(self.screen)
self.block_group.draw(self.screen)
self.terrain_group.draw(self.screen)
if __name__ == "__main__":
import numpy as np
pygame.init()
game = Pixelcopter(width=256, height=256)
game.screen = pygame.display.set_mode(game.getScreenDims(), 0, 32)
game.clock = pygame.time.Clock()
game.rng = np.random.RandomState(24)
game.init()
while True:
if game.game_over():
game.reset()
dt = game.clock.tick_busy_loop(30)
game.step(dt)
pygame.display.update()
| 9,494 | 26.521739 | 101 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/raycast.py | import pdb
import time
import os
import sys
import pygame
import numpy as np
from pygame.constants import K_w, K_a, K_d, K_s
import copy
class RayCastPlayer():
"""
Loosely based on code from Lode's `Computer Graphics Tutorial`_.
.. _Computer Graphics Tutorial: http://lodev.org/cgtutor/raycasting.html
Takes input from key presses and traverses a map
"""
def __init__(self, map_, init_pos, init_dir,
width, height, resolution, move_speed,
turn_speed, plane, actions, block_types):
self.actions = actions
self.map_ = map_
self.width = width
self.height = height
self.pos = np.array([init_pos], dtype=np.float32)
self.dir = np.array([init_dir], dtype=np.float32)
self.plane = np.array([plane], dtype=np.float32)
self.resolution = resolution
self.move_speed = move_speed
self.turn_speed = turn_speed
self.eps = 1e-7
self.block_types = block_types
def _handle_player_events(self, dt):
dt = dt / 1000.0
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
key = event.key
new_location = self.pos
if key == self.actions["forward"]:
new_location = self.pos + self.dir * self.move_speed * dt
if key == self.actions["backward"]:
new_location = self.pos - self.dir * self.move_speed * dt
new_location = new_location.astype(int)
newX, newY = new_location[0, :]
if newX < self.map_.shape[0] and newY < self.map_.shape[1]:
new_map = self.map_[newX, newY]
if self.block_types[new_map]["pass_through"]:
if key == self.actions["forward"]:
self.pos[0, 0] += self.dir[0, 0] * \
self.move_speed * dt
self.pos[0, 1] += self.dir[0, 1] * \
self.move_speed * dt
if key == self.actions["backward"]:
self.pos[0, 0] -= self.dir[0, 0] * \
self.move_speed * dt
self.pos[0, 1] -= self.dir[0, 1] * \
self.move_speed * dt
if key == self.actions["right"]:
X_TURN = np.cos(self.turn_speed * dt)
Y_TURN = np.sin(self.turn_speed * dt)
_dirX = self.dir[0, 0] * X_TURN - self.dir[0, 1] * Y_TURN
_dirY = self.dir[0, 0] * Y_TURN + self.dir[0, 1] * X_TURN
_planeX = self.plane[0, 0] * \
X_TURN - self.plane[0, 1] * Y_TURN
_planeY = self.plane[0, 0] * \
Y_TURN + self.plane[0, 1] * X_TURN
self.dir[0, 0] = _dirX
self.dir[0, 1] = _dirY
self.plane[0, 0] = _planeX
self.plane[0, 1] = _planeY
if key == self.actions["left"]:
X_INV_TURN = np.cos(-self.turn_speed * dt)
Y_INV_TURN = np.sin(-self.turn_speed * dt)
_dirX = self.dir[0, 0] * X_INV_TURN - \
self.dir[0, 1] * Y_INV_TURN
_dirY = self.dir[0, 0] * Y_INV_TURN + \
self.dir[0, 1] * X_INV_TURN
_planeX = self.plane[0, 0] * X_INV_TURN - \
self.plane[0, 1] * Y_INV_TURN
_planeY = self.plane[0, 0] * Y_INV_TURN + \
self.plane[0, 1] * X_INV_TURN
self.dir[0, 0] = _dirX
self.dir[0, 1] = _dirY
self.plane[0, 0] = _planeX
self.plane[0, 1] = _planeY
def draw(self):
#N = width/resolution
# N,2
cameraX = np.arange(
0.0,
self.width,
self.resolution).astype(
np.float32)[
:,
np.newaxis]
cameraX = 2.0 * cameraX / float(self.width) - 1.0
# set the rayPos to the players current position
ray_pos = np.tile(self.pos, [cameraX.shape[0], 1]) # N,2
# ray direction
ray_dir = self.dir + self.plane * cameraX # N,2
# which box of the map we're in
map_ = ray_pos.astype(int)
ray_pow = np.power(ray_dir, 2.0) + self.eps
ray_div = ray_pow[:, 0] / (ray_pow[:, 1])
delta_dist = np.sqrt(
1.0 + np.array([1.0 / (ray_div), ray_div])).T # N,2
# N,2
step = np.ones(ray_dir.shape).astype(int)
step[ray_dir[:, 0] < 0, 0] = -1
step[ray_dir[:, 1] < 0, 1] = -1
# N,2
side_dist = (map_ + 1.0 - ray_pos) * delta_dist
_value = (ray_pos - map_) * delta_dist
side_dist[ray_dir[:, 0] < 0, 0] = _value[ray_dir[:, 0] < 0, 0]
side_dist[ray_dir[:, 1] < 0, 1] = _value[ray_dir[:, 1] < 0, 1]
side_dist, delta_dist, map_, side = self._DDA(
side_dist, delta_dist, map_, step)
perpWallDistX = (map_[:, 0] - ray_pos[:, 0] + (1.0 - step[:, 0]) / 2.0)
perpWallDistX = perpWallDistX / (ray_dir[:, 0] + self.eps)
perpWallDistX = perpWallDistX[:, np.newaxis]
perpWallDistY = (map_[:, 1] - ray_pos[:, 1] + (1.0 - step[:, 1]) / 2.0)
perpWallDistY = perpWallDistY / (ray_dir[:, 1] + self.eps)
perpWallDistY = perpWallDistY[:, np.newaxis]
perpWallDist = perpWallDistY
perpWallDist[side == 0] = perpWallDistX[side == 0]
lineHeights = (self.height / (perpWallDist + self.eps)).astype(int)
tops = -(lineHeights) / 2.0 + self.height / 2.0
tops[tops < 0] = 0.0
tops = tops.astype(int)
bottoms = lineHeights / 2.0 + self.height / 2.0
bottoms[bottoms >= self.height] = self.height - 1
bottoms = bottoms.astype(int)
visible_blocks = self.map_[map_[:, 0], map_[:, 1]]
coloring = np.ones((bottoms.shape[0], 3)) * 255.0
for k in self.block_types.keys():
if self.block_types[k] is not None:
c = self.block_types[k]["color"]
sel = visible_blocks == k
coloring[sel] = np.tile(c, [bottoms.shape[0], 1])[sel]
shading = np.abs(perpWallDist * 15) * 1.5
coloring = coloring - shading
coloring = np.clip(coloring, 0, 255)
coloring[(side == 1.0).flatten(), :] *= 0.65 # lighting apparently
cameraX = np.arange(0, self.width, self.resolution)
returns = [cameraX, tops, bottoms, coloring]
return [r.astype(int) for r in returns]
def _DDA(self, side_dist, delta_dist, map_, step):
# tested against for-loop version using line_profiler
# for-loop take about 0.005968s per call
# this version takes 0.000416s per call
hits = np.zeros((map_.shape[0], 1))
side = np.zeros((map_.shape[0], 1))
while np.sum(hits) < side_dist.shape[0]:
# only update values that havent hit a wall. So are 0 still.
update_mask = np.logical_not(hits).astype(np.bool)
# 1 => 1, 0
# 0 => 0, 1
mask = (side_dist[:, 0] < side_dist[:, 1])[:, np.newaxis]
sel = (update_mask & (mask == True)).flatten()
side_dist[sel, 0] += delta_dist[sel, 0]
map_[sel, 0] += step[sel, 0]
side[sel] = np.zeros(side.shape)[sel]
sel = (update_mask & (mask == False)).flatten()
side_dist[sel, 1] += delta_dist[sel, 1]
map_[sel, 1] += step[sel, 1]
side[sel] = np.ones(side.shape)[sel]
# once it becomes 1 it never goes back to 0.
hits = np.logical_or(
hits, (self.map_[
map_[
:, 0], map_[
:, 1]] > 0)[
:, np.newaxis])
return side_dist, delta_dist, map_, side
def make_map(dim):
map_grid = np.zeros((dim, dim))
map_grid[0, :] = 1.0
map_grid[:, 0] = 1.0
map_grid[:, -1] = 1.0
map_grid[-1, :] = 1.0
return map_grid
def make_box(grid, p0, p1, fill=0, isFilled=True):
x0, y0 = p0
x1, y1 = p1
if isFilled:
grid[x0:x1, y0:y1] = fill
else:
grid[x0, y0:y1 + 1] = fill
grid[x1, y0:y1 + 1] = fill
grid[x0:x1, y0] = fill
grid[x0:x1, y1] = fill
return grid
if __name__ == "__main__":
map_grid = make_map(15)
block_types = {
0: {
"pass_through": True,
"color": None
},
1: {
"pass_through": False,
"color": (255, 255, 255)
},
2: {
"pass_through": False,
"color": (220, 100, 100)
},
3: {
"pass_through": False,
"color": (100, 220, 100)
},
4: {
"pass_through": False,
"color": (100, 100, 220)
}
}
map_grid = make_box(map_grid, (5, 5), (9, 9), fill=2, isFilled=False)
map_grid = make_box(map_grid, (8, 8), (14, 14), fill=3, isFilled=True)
map_grid = make_box(map_grid, (1, 2), (3, 9), fill=4, isFilled=False)
map_grid = make_box(map_grid, (11, 6), (12, 11), fill=0, isFilled=True)
map_grid = make_box(map_grid, (6, 11), (12, 12), fill=0, isFilled=True)
map_grid = make_box(map_grid, (2, 6), (7, 7), fill=0, isFilled=True)
map_grid[map_grid > 0] = np.random.randint(
2, high=5, size=map_grid[map_grid > 0].shape)
init_dir = (1.0, 0.0)
init_pos = (1, 1)
width = 128
height = 128
resolution = 1
move_speed = 15
turn_speed = 10.5
plane = (0.0, 0.66)
actions = {
"forward": K_w,
"left": K_a,
"right": K_d,
"backward": K_s
}
rc = RayCastPlayer(
map_grid,
init_pos,
init_dir,
width,
height,
resolution,
move_speed,
turn_speed,
plane,
actions,
block_types
)
pygame.init()
screen = pygame.display.set_mode((width, height), 0, 24)
clock = pygame.time.Clock()
while True:
dt = clock.tick(60)
screen.fill((0, 0, 0))
pygame.draw.rect(screen, (92, 92, 92), (0, height / 2, width, height))
rc._handle_player_events(dt)
c, t, b, col = rc.draw()
for i in range(len(c)):
pygame.draw.line(screen, (col[i][0], col[i][1], col[i][2]), (c[
i], t[i]), (c[i], b[i]), rc.resolution)
pygame.display.update()
| 10,881 | 29.914773 | 79 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/puckworld.py | import pygame
import sys
import math
#import .base
from .base.pygamewrapper import PyGameWrapper
from pygame.constants import K_w, K_a, K_s, K_d
from .primitives import Player, Creep
from .utils.vec2d import vec2d
from .utils import percent_round_int
class PuckCreep(pygame.sprite.Sprite):
def __init__(self, pos_init, attr, SCREEN_WIDTH, SCREEN_HEIGHT):
pygame.sprite.Sprite.__init__(self)
self.pos = vec2d(pos_init)
self.attr = attr
self.SCREEN_WIDTH = SCREEN_WIDTH
self.SCREEN_HEIGHT = SCREEN_HEIGHT
image = pygame.Surface(
(self.attr["radius_outer"] * 2,
self.attr["radius_outer"] * 2))
image.fill((0, 0, 0, 0))
image.set_colorkey((0, 0, 0))
pygame.draw.circle(
image,
self.attr["color_outer"],
(self.attr["radius_outer"], self.attr["radius_outer"]),
self.attr["radius_outer"],
0
)
image.set_alpha(int(255 * 0.75))
pygame.draw.circle(
image,
self.attr["color_center"],
(self.attr["radius_outer"], self.attr["radius_outer"]),
self.attr["radius_center"],
0
)
self.image = image
self.rect = self.image.get_rect()
self.rect.center = pos_init
def update(self, ndx, ndy, dt):
self.pos.x += ndx * self.attr['speed'] * dt
self.pos.y += ndy * self.attr['speed'] * dt
self.rect.center = (self.pos.x, self.pos.y)
class PuckWorld(PyGameWrapper):
"""
Based Karpthy's PuckWorld in `REINFORCEjs`_.
.. _REINFORCEjs: https://github.com/karpathy/reinforcejs
Parameters
----------
width : int
Screen width.
height : int
Screen height, recommended to be same dimension as width.
"""
def __init__(self,
width=64,
height=64):
actions = {
"up": K_w,
"left": K_a,
"right": K_d,
"down": K_s
}
PyGameWrapper.__init__(self, width, height, actions=actions)
self.CREEP_BAD = {
"radius_center": percent_round_int(width, 0.047),
"radius_outer": percent_round_int(width, 0.265),
"color_center": (110, 45, 45),
"color_outer": (150, 95, 95),
"speed": 0.05 * width
}
self.CREEP_GOOD = {
"radius": percent_round_int(width, 0.047),
"color": (40, 140, 40)
}
self.AGENT_COLOR = (60, 60, 140)
self.AGENT_SPEED = 0.2 * width
self.AGENT_RADIUS = percent_round_int(width, 0.047)
self.AGENT_INIT_POS = (
self.AGENT_RADIUS * 1.5,
self.AGENT_RADIUS * 1.5)
self.BG_COLOR = (255, 255, 255)
self.dx = 0
self.dy = 0
self.ticks = 0
def _handle_player_events(self):
self.dx = 0.0
self.dy = 0.0
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
key = event.key
if key == self.actions["left"]:
self.dx -= self.AGENT_SPEED
if key == self.actions["right"]:
self.dx += self.AGENT_SPEED
if key == self.actions["up"]:
self.dy -= self.AGENT_SPEED
if key == self.actions["down"]:
self.dy += self.AGENT_SPEED
def getGameState(self):
"""
Gets a non-visual state representation of the game.
Returns
-------
dict
* player x position.
* player y position.
* players x velocity.
* players y velocity.
* good creep x position.
* good creep y position.
* bad creep x position.
* bad creep y position.
See code for structure.
"""
state = {
"player_x": self.player.pos.x,
"player_y": self.player.pos.y,
"player_velocity_x": self.player.vel.x,
"player_velocity_y": self.player.vel.y,
"good_creep_x": self.good_creep.pos.x,
"good_creep_y": self.good_creep.pos.y,
"bad_creep_x": self.bad_creep.pos.x,
"bad_creep_y": self.bad_creep.pos.y
}
return state
def getScore(self):
return self.score
def game_over(self):
"""
Return bool if the game has 'finished'
"""
return False
def _rngCreepPos(self):
r = self.CREEP_GOOD['radius']
x = self.rng.uniform(r * 3, self.width - r * 2.5)
y = self.rng.uniform(r * 3, self.height - r * 2.5)
return (x, y)
def init(self):
"""
Starts/Resets the game to its inital state
"""
self.player = Player(
self.AGENT_RADIUS,
self.AGENT_COLOR,
self.AGENT_SPEED,
self.AGENT_INIT_POS,
self.width,
self.height)
self.good_creep = Creep(
self.CREEP_GOOD['color'],
self.CREEP_GOOD['radius'],
self._rngCreepPos(),
(1, 1),
0.0,
1.0,
"GOOD",
self.width,
self.height,
0.0 # jitter
)
self.bad_creep = PuckCreep(
(self.width,
self.height),
self.CREEP_BAD,
self.screen_dim[0] * 0.75,
self.screen_dim[1] * 0.75)
self.creeps = pygame.sprite.Group()
self.creeps.add(self.good_creep)
self.creeps.add(self.bad_creep)
self.score = 0
self.ticks = 0
self.lives = -1
def step(self, dt):
"""
Perform one step of game emulation.
"""
dt /= 1000.0
self.ticks += 1
self.screen.fill(self.BG_COLOR)
self.score += self.rewards["tick"]
self._handle_player_events()
self.player.update(self.dx, self.dy, dt)
dx = self.player.pos.x - self.good_creep.pos.x
dy = self.player.pos.y - self.good_creep.pos.y
dist_to_good = math.sqrt(dx * dx + dy * dy)
dx = self.player.pos.x - self.bad_creep.pos.x
dy = self.player.pos.y - self.bad_creep.pos.y
dist_to_bad = math.sqrt(dx * dx + dy * dy)
reward = -dist_to_good
if dist_to_bad < self.CREEP_BAD['radius_outer']:
reward += 2.0 * \
(dist_to_bad - self.CREEP_BAD['radius_outer']
) / float(self.CREEP_BAD['radius_outer'])
self.score += reward
if self.ticks % 500 == 0:
x, y = self._rngCreepPos()
self.good_creep.pos.x = x
self.good_creep.pos.y = y
ndx = 0.0 if dist_to_bad == 0.0 else dx / dist_to_bad
ndy = 0.0 if dist_to_bad == 0.0 else dy / dist_to_bad
self.bad_creep.update(ndx, ndy, dt)
self.good_creep.update(dt)
self.player.draw(self.screen)
self.creeps.draw(self.screen)
if __name__ == "__main__":
import numpy as np
pygame.init()
game = PuckWorld(width=256, height=256)
game.screen = pygame.display.set_mode(game.getScreenDims(), 0, 32)
game.clock = pygame.time.Clock()
game.rng = np.random.RandomState(24)
game.init()
while True:
dt = game.clock.tick_busy_loop(60)
game.step(dt)
pygame.display.update()
| 7,591 | 25.921986 | 70 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/primitives.py | import pygame
import math
from .utils.vec2d import vec2d
class Creep(pygame.sprite.Sprite):
def __init__(self,
color,
radius,
pos_init,
dir_init,
speed,
reward,
TYPE,
SCREEN_WIDTH,
SCREEN_HEIGHT,
jitter_speed):
pygame.sprite.Sprite.__init__(self)
self.SCREEN_WIDTH = SCREEN_WIDTH
self.SCREEN_HEIGHT = SCREEN_HEIGHT
self.TYPE = TYPE
self.jitter_speed = jitter_speed
self.speed = speed
self.reward = reward
self.radius = radius
self.pos = vec2d(pos_init)
self.direction = vec2d(dir_init)
self.direction.normalize() # normalized
image = pygame.Surface((radius * 2, radius * 2))
image.fill((0, 0, 0))
image.set_colorkey((0, 0, 0))
pygame.draw.circle(
image,
color,
(radius, radius),
radius,
0
)
self.image = image.convert()
self.rect = self.image.get_rect()
self.rect.center = pos_init
def update(self, dt):
dx = self.direction.x * self.speed * dt
dy = self.direction.y * self.speed * dt
if self.pos.x + dx > self.SCREEN_WIDTH - self.radius:
self.pos.x = self.SCREEN_WIDTH - self.radius
self.direction.x = -1 * self.direction.x * \
(1 + 0.5 * self.jitter_speed) # a little jitter
elif self.pos.x + dx <= self.radius:
self.pos.x = self.radius
self.direction.x = -1 * self.direction.x * \
(1 + 0.5 * self.jitter_speed) # a little jitter
else:
self.pos.x = self.pos.x + dx
if self.pos.y + dy > self.SCREEN_HEIGHT - self.radius:
self.pos.y = self.SCREEN_HEIGHT - self.radius
self.direction.y = -1 * self.direction.y * \
(1 + 0.5 * self.jitter_speed) # a little jitter
elif self.pos.y + dy <= self.radius:
self.pos.y = self.radius
self.direction.y = -1 * self.direction.y * \
(1 + 0.5 * self.jitter_speed) # a little jitter
else:
self.pos.y = self.pos.y + dy
self.direction.normalize()
self.rect.center = ((self.pos.x, self.pos.y))
class Wall(pygame.sprite.Sprite):
def __init__(self, pos, w, h):
pygame.sprite.Sprite.__init__(self)
self.pos = vec2d(pos)
self.w = w
self.h = h
image = pygame.Surface([w, h])
image.fill((10, 10, 10))
self.image = image.convert()
self.rect = self.image.get_rect()
self.rect.center = pos
def draw(self, screen):
pygame.draw.rect(
screen, (10, 10, 10), [
self.pos.x, self.pos.y, self.w, self.h], 0)
class Player(pygame.sprite.Sprite):
def __init__(self,
radius,
color,
speed,
pos_init,
SCREEN_WIDTH,
SCREEN_HEIGHT):
pygame.sprite.Sprite.__init__(self)
self.SCREEN_WIDTH = SCREEN_WIDTH
self.SCREEN_HEIGHT = SCREEN_HEIGHT
self.pos = vec2d(pos_init)
self.vel = vec2d((0, 0))
image = pygame.Surface([radius * 2, radius * 2])
image.set_colorkey((0, 0, 0))
pygame.draw.circle(
image,
color,
(radius, radius),
radius,
0
)
self.image = image.convert()
self.rect = self.image.get_rect()
self.radius = radius
def update(self, dx, dy, dt):
self.vel.x += dx
self.vel.y += dy
new_x = self.pos.x + self.vel.x * dt
new_y = self.pos.y + self.vel.y * dt
# if its not against a wall we want a total decay of 50
if new_x >= self.SCREEN_WIDTH - self.radius * 2:
self.pos.x = self.SCREEN_WIDTH - self.radius * 2
self.vel.x = 0.0
elif new_x < 0.0:
self.pos.x = 0.0
self.vel.x = 0.0
else:
self.pos.x = new_x
self.vel.x = self.vel.x * 0.975
if new_y > self.SCREEN_HEIGHT - self.radius * 2:
self.pos.y = self.SCREEN_HEIGHT - self.radius * 2
self.vel.y = 0.0
elif new_y < 0.0:
self.pos.y = 0.0
self.vel.y = 0.0
else:
self.pos.y = new_y
self.vel.y = self.vel.y * 0.975
self.rect.center = (self.pos.x, self.pos.y)
def draw(self, screen):
screen.blit(self.image, self.rect.center)
| 4,703 | 26.83432 | 64 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/pong.py | import math
import sys
import pygame
from pygame.constants import K_w, K_s
from ple.games.utils.vec2d import vec2d
from ple.games.utils import percent_round_int
#import base
from ple.games.base.pygamewrapper import PyGameWrapper
class Ball(pygame.sprite.Sprite):
def __init__(self, radius, speed, rng,
pos_init, SCREEN_WIDTH, SCREEN_HEIGHT):
pygame.sprite.Sprite.__init__(self)
self.rng = rng
self.radius = radius
self.speed = speed
self.pos = vec2d(pos_init)
self.pos_before = vec2d(pos_init)
self.vel = vec2d((speed, -1.0 * speed))
self.SCREEN_HEIGHT = SCREEN_HEIGHT
self.SCREEN_WIDTH = SCREEN_WIDTH
image = pygame.Surface((radius * 2, radius * 2))
image.fill((0, 0, 0, 0))
image.set_colorkey((0, 0, 0))
pygame.draw.circle(
image,
(255, 255, 255),
(radius, radius),
radius,
0
)
self.image = image
self.rect = self.image.get_rect()
self.rect.center = pos_init
def line_intersection(self, p0_x, p0_y, p1_x, p1_y, p2_x, p2_y, p3_x, p3_y):
s1_x = p1_x - p0_x
s1_y = p1_y - p0_y
s2_x = p3_x - p2_x
s2_y = p3_y - p2_y
s = (-s1_y * (p0_x - p2_x) + s1_x * (p0_y - p2_y)) / (-s2_x * s1_y + s1_x * s2_y)
t = (s2_x * (p0_y - p2_y) - s2_y * (p0_x - p2_x)) / (-s2_x * s1_y + s1_x * s2_y)
return (s >= 0 and s <= 1 and t >= 0 and t <= 1)
def update(self, agentPlayer, cpuPlayer, dt):
self.pos.x += self.vel.x * dt
self.pos.y += self.vel.y * dt
is_pad_hit = False
if self.pos.x <= agentPlayer.pos.x + agentPlayer.rect_width:
if self.line_intersection(self.pos_before.x, self.pos_before.y, self.pos.x, self.pos.y, agentPlayer.pos.x + agentPlayer.rect_width / 2, agentPlayer.pos.y - agentPlayer.rect_height / 2, agentPlayer.pos.x + agentPlayer.rect_width / 2, agentPlayer.pos.y + agentPlayer.rect_height / 2):
self.pos.x = max(0, self.pos.x)
self.vel.x = -1 * (self.vel.x + self.speed * 0.05)
self.vel.y += agentPlayer.vel.y * 2.0
self.pos.x += self.radius
is_pad_hit = True
if self.pos.x >= cpuPlayer.pos.x - cpuPlayer.rect_width:
if self.line_intersection(self.pos_before.x, self.pos_before.y, self.pos.x, self.pos.y, cpuPlayer.pos.x - cpuPlayer.rect_width / 2, cpuPlayer.pos.y - cpuPlayer.rect_height / 2, cpuPlayer.pos.x - cpuPlayer.rect_width / 2, cpuPlayer.pos.y + cpuPlayer.rect_height / 2):
self.pos.x = min(self.SCREEN_WIDTH, self.pos.x)
self.vel.x = -1 * (self.vel.x + self.speed * 0.05)
self.vel.y += cpuPlayer.vel.y * 0.006
self.pos.x -= self.radius
is_pad_hit = True
# Little randomness in order not to stuck in a static loop
if is_pad_hit:
self.vel.y += self.rng.random_sample() * 0.001 - 0.0005
if self.pos.y - self.radius <= 0:
self.vel.y *= -0.99
self.pos.y += 1.0
if self.pos.y + self.radius >= self.SCREEN_HEIGHT:
self.vel.y *= -0.99
self.pos.y -= 1.0
self.pos_before.x = self.pos.x
self.pos_before.y = self.pos.y
self.rect.center = (self.pos.x, self.pos.y)
class Player(pygame.sprite.Sprite):
def __init__(self, speed, rect_width, rect_height,
pos_init, SCREEN_WIDTH, SCREEN_HEIGHT):
pygame.sprite.Sprite.__init__(self)
self.speed = speed
self.pos = vec2d(pos_init)
self.vel = vec2d((0, 0))
self.rect_height = rect_height
self.rect_width = rect_width
self.SCREEN_HEIGHT = SCREEN_HEIGHT
self.SCREEN_WIDTH = SCREEN_WIDTH
image = pygame.Surface((rect_width, rect_height))
image.fill((0, 0, 0, 0))
image.set_colorkey((0, 0, 0))
pygame.draw.rect(
image,
(255, 255, 255),
(0, 0, rect_width, rect_height),
0
)
self.image = image
self.rect = self.image.get_rect()
self.rect.center = pos_init
def update(self, dy, dt):
self.vel.y += dy * dt
self.vel.y *= 0.9
self.pos.y += self.vel.y
if self.pos.y - self.rect_height / 2 <= 0:
self.pos.y = self.rect_height / 2
self.vel.y = 0.0
if self.pos.y + self.rect_height / 2 >= self.SCREEN_HEIGHT:
self.pos.y = self.SCREEN_HEIGHT - self.rect_height / 2
self.vel.y = 0.0
self.rect.center = (self.pos.x, self.pos.y)
def updateCpu(self, ball, dt):
dy = 0.0
if ball.vel.x >= 0 and ball.pos.x >= self.SCREEN_WIDTH / 2:
dy = self.speed
if self.pos.y > ball.pos.y:
dy = -1.0 * dy
else:
dy = 1.0 * self.speed / 4.0
if self.pos.y > self.SCREEN_HEIGHT / 2.0:
dy = -1.0 * self.speed / 4.0
if self.pos.y - self.rect_height / 2 <= 0:
self.pos.y = self.rect_height / 2
self.vel.y = 0.0
if self.pos.y + self.rect_height / 2 >= self.SCREEN_HEIGHT:
self.pos.y = self.SCREEN_HEIGHT - self.rect_height / 2
self.vel.y = 0.0
self.pos.y += dy * dt
self.rect.center = (self.pos.x, self.pos.y)
class Pong(PyGameWrapper):
"""
Loosely based on code from marti1125's `pong game`_.
.. _pong game: https://github.com/marti1125/pong/
Parameters
----------
width : int
Screen width.
height : int
Screen height, recommended to be same dimension as width.
MAX_SCORE : int (default: 11)
The max number of points the agent or cpu need to score to cause a terminal state.
cpu_speed_ratio: float (default: 0.5)
Speed of opponent (useful for curriculum learning)
players_speed_ratio: float (default: 0.25)
Speed of player (useful for curriculum learning)
ball_speed_ratio: float (default: 0.75)
Speed of ball (useful for curriculum learning)
"""
def __init__(self, width=64, height=48, cpu_speed_ratio=0.6, players_speed_ratio = 0.4, ball_speed_ratio=0.75, MAX_SCORE=11):
actions = {
"up": K_w,
"down": K_s
}
PyGameWrapper.__init__(self, width, height, actions=actions)
# the %'s come from original values, wanted to keep same ratio when you
# increase the resolution.
self.ball_radius = percent_round_int(height, 0.03)
self.cpu_speed_ratio = cpu_speed_ratio
self.ball_speed_ratio = ball_speed_ratio
self.players_speed_ratio = players_speed_ratio
self.paddle_width = percent_round_int(width, 0.023)
self.paddle_height = percent_round_int(height, 0.15)
self.paddle_dist_to_wall = percent_round_int(width, 0.0625)
self.MAX_SCORE = MAX_SCORE
self.dy = 0.0
self.score_sum = 0.0 # need to deal with 11 on either side winning
self.score_counts = {
"agent": 0.0,
"cpu": 0.0
}
def _handle_player_events(self):
self.dy = 0
if __name__ == "__main__":
# for debugging mode
pygame.event.get()
keys = pygame.key.get_pressed()
if keys[self.actions['up']]:
self.dy = -self.agentPlayer.speed
elif keys[self.actions['down']]:
self.dy = self.agentPlayer.speed
if keys[pygame.QUIT]:
pygame.quit()
sys.exit()
pygame.event.pump()
else:
# consume events from act
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
key = event.key
if key == self.actions['up']:
self.dy = -self.agentPlayer.speed
if key == self.actions['down']:
self.dy = self.agentPlayer.speed
def getGameState(self):
"""
Gets a non-visual state representation of the game.
Returns
-------
dict
* player y position.
* players velocity.
* cpu y position.
* ball x position.
* ball y position.
* ball x velocity.
* ball y velocity.
See code for structure.
"""
state = {
"player_y": self.agentPlayer.pos.y,
"player_velocity": self.agentPlayer.vel.y,
"cpu_y": self.cpuPlayer.pos.y,
"ball_x": self.ball.pos.x,
"ball_y": self.ball.pos.y,
"ball_velocity_x": self.ball.vel.x,
"ball_velocity_y": self.ball.vel.y
}
return state
def getScore(self):
return self.score_sum
def game_over(self):
# pong used 11 as max score
return (self.score_counts['agent'] == self.MAX_SCORE) or (
self.score_counts['cpu'] == self.MAX_SCORE)
def init(self):
self.score_counts = {
"agent": 0.0,
"cpu": 0.0
}
self.score_sum = 0.0
self.ball = Ball(
self.ball_radius,
self.ball_speed_ratio * self.height,
self.rng,
(self.width / 2, self.height / 2),
self.width,
self.height
)
self.agentPlayer = Player(
self.players_speed_ratio * self.height,
self.paddle_width,
self.paddle_height,
(self.paddle_dist_to_wall, self.height / 2),
self.width,
self.height)
self.cpuPlayer = Player(
self.cpu_speed_ratio * self.height,
self.paddle_width,
self.paddle_height,
(self.width - self.paddle_dist_to_wall, self.height / 2),
self.width,
self.height)
self.players_group = pygame.sprite.Group()
self.players_group.add(self.agentPlayer)
self.players_group.add(self.cpuPlayer)
self.ball_group = pygame.sprite.Group()
self.ball_group.add(self.ball)
def reset(self):
self.init()
# after game over set random direction of ball otherwise it will always be the same
self._reset_ball(1 if self.rng.random_sample() > 0.5 else -1)
def _reset_ball(self, direction):
self.ball.pos.x = self.width / 2 # move it to the center
# we go in the same direction that they lost in but at starting vel.
self.ball.vel.x = self.ball.speed * direction
self.ball.vel.y = (self.rng.random_sample() *
self.ball.speed) - self.ball.speed * 0.5
def step(self, dt):
dt /= 1000.0
self.screen.fill((0, 0, 0))
self.agentPlayer.speed = self.players_speed_ratio * self.height
self.cpuPlayer.speed = self.cpu_speed_ratio * self.height
self.ball.speed = self.ball_speed_ratio * self.height
self._handle_player_events()
# doesnt make sense to have this, but include if needed.
self.score_sum += self.rewards["tick"]
self.ball.update(self.agentPlayer, self.cpuPlayer, dt)
is_terminal_state = False
# logic
if self.ball.pos.x <= 0:
self.score_sum += self.rewards["negative"]
self.score_counts["cpu"] += 1.0
self._reset_ball(-1)
is_terminal_state = True
if self.ball.pos.x >= self.width:
self.score_sum += self.rewards["positive"]
self.score_counts["agent"] += 1.0
self._reset_ball(1)
is_terminal_state = True
if is_terminal_state:
# winning
if self.score_counts['agent'] == self.MAX_SCORE:
self.score_sum += self.rewards["win"]
# losing
if self.score_counts['cpu'] == self.MAX_SCORE:
self.score_sum += self.rewards["loss"]
else:
self.agentPlayer.update(self.dy, dt)
self.cpuPlayer.updateCpu(self.ball, dt)
self.players_group.draw(self.screen)
self.ball_group.draw(self.screen)
if __name__ == "__main__":
import numpy as np
pygame.init()
game = Pong(width=256, height=200)
game.screen = pygame.display.set_mode(game.getScreenDims(), 0, 32)
game.clock = pygame.time.Clock()
game.rng = np.random.RandomState(24)
game.init()
while True:
dt = game.clock.tick_busy_loop(60)
game.step(dt)
pygame.display.update()
| 12,840 | 30.243309 | 294 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/__init__.py | try:
from ple.games.doom import Doom
except:
print("Couldn't import doom")
from ple.games.catcher import Catcher
from ple.games.flappybird import FlappyBird
from ple.games.monsterkong import MonsterKong
from ple.games.pixelcopter import Pixelcopter
from ple.games.pong import Pong
from ple.games.puckworld import PuckWorld
from ple.games.raycastmaze import RaycastMaze
from ple.games.snake import Snake
from ple.games.waterworld import WaterWorld
| 455 | 31.571429 | 45 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/catcher.py | import sys
import pygame
from .utils import percent_round_int
from ple.games import base
from pygame.constants import K_a, K_d
class Paddle(pygame.sprite.Sprite):
def __init__(self, speed, width, height, SCREEN_WIDTH, SCREEN_HEIGHT):
self.speed = speed
self.width = width
self.SCREEN_WIDTH = SCREEN_WIDTH
self.vel = 0.0
pygame.sprite.Sprite.__init__(self)
image = pygame.Surface((width, height))
image.fill((0, 0, 0, 0))
image.set_colorkey((0, 0, 0))
pygame.draw.rect(
image,
(255, 255, 255),
(0, 0, width, height),
0
)
self.image = image
self.rect = self.image.get_rect()
self.rect.center = (
SCREEN_WIDTH / 2 - self.width / 2,
SCREEN_HEIGHT - height - 3)
def update(self, dx, dt):
self.vel += dx
self.vel *= 0.9
x, y = self.rect.center
n_x = x + self.vel
if n_x <= 0:
self.vel = 0.0
n_x = 0
if n_x + self.width >= self.SCREEN_WIDTH:
self.vel = 0.0
n_x = self.SCREEN_WIDTH - self.width
self.rect.center = (n_x, y)
def draw(self, screen):
screen.blit(self.image, self.rect.center)
class Fruit(pygame.sprite.Sprite):
def __init__(self, speed, size, SCREEN_WIDTH, SCREEN_HEIGHT, rng):
self.speed = speed
self.size = size
self.SCREEN_WIDTH = SCREEN_WIDTH
self.SCREEN_HEIGHT = SCREEN_HEIGHT
self.rng = rng
pygame.sprite.Sprite.__init__(self)
image = pygame.Surface((size, size))
image.fill((0, 0, 0, 0))
image.set_colorkey((0, 0, 0))
pygame.draw.rect(
image,
(255, 120, 120),
(0, 0, size, size),
0
)
self.image = image
self.rect = self.image.get_rect()
self.rect.center = (-30, -30)
def update(self, dt):
x, y = self.rect.center
n_y = y + self.speed * dt
self.rect.center = (x, n_y)
def reset(self):
x = self.rng.choice(
range(
self.size *
2,
self.SCREEN_WIDTH -
self.size *
2,
self.size))
y = self.rng.choice(
range(
self.size,
int(self.SCREEN_HEIGHT / 2),
self.size))
self.rect.center = (x, -1 * y)
def draw(self, screen):
screen.blit(self.image, self.rect.center)
class Catcher(base.PyGameWrapper):
"""
Based on `Eder Santana`_'s game idea.
.. _`Eder Santana`: https://github.com/EderSantana
Parameters
----------
width : int
Screen width.
height : int
Screen height, recommended to be same dimension as width.
init_lives : int (default: 3)
The number lives the agent has.
"""
def __init__(self, width=64, height=64, init_lives=3):
actions = {
"left": K_a,
"right": K_d
}
base.PyGameWrapper.__init__(self, width, height, actions=actions)
self.fruit_size = percent_round_int(height, 0.06)
self.fruit_fall_speed = 0.00095 * height
self.player_speed = 0.021 * width
self.paddle_width = percent_round_int(width, 0.2)
self.paddle_height = percent_round_int(height, 0.04)
self.dx = 0.0
self.init_lives = init_lives
def _handle_player_events(self):
self.dx = 0.0
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
key = event.key
if key == self.actions['left']:
self.dx -= self.player_speed
if key == self.actions['right']:
self.dx += self.player_speed
def init(self):
self.score = 0
self.lives = self.init_lives
self.player = Paddle(self.player_speed, self.paddle_width,
self.paddle_height, self.width, self.height)
self.fruit = Fruit(self.fruit_fall_speed, self.fruit_size,
self.width, self.height, self.rng)
self.fruit.reset()
def getGameState(self):
"""
Gets a non-visual state representation of the game.
Returns
-------
dict
* player x position.
* players velocity.
* fruits x position.
* fruits y position.
See code for structure.
"""
state = {
"player_x": self.player.rect.center[0],
"player_vel": self.player.vel,
"fruit_x": self.fruit.rect.center[0],
"fruit_y": self.fruit.rect.center[1]
}
return state
def getScore(self):
return self.score
def game_over(self):
return self.lives == 0
def step(self, dt):
self.screen.fill((0, 0, 0))
self._handle_player_events()
self.score += self.rewards["tick"]
if self.fruit.rect.center[1] >= self.height:
self.score += self.rewards["negative"]
self.lives -= 1
self.fruit.reset()
if pygame.sprite.collide_rect(self.player, self.fruit):
self.score += self.rewards["positive"]
self.fruit.reset()
self.player.update(self.dx, dt)
self.fruit.update(dt)
if self.lives == 0:
self.score += self.rewards["loss"]
self.player.draw(self.screen)
self.fruit.draw(self.screen)
if __name__ == "__main__":
import numpy as np
pygame.init()
game = Catcher(width=256, height=256)
game.rng = np.random.RandomState(24)
game.screen = pygame.display.set_mode(game.getScreenDims(), 0, 32)
game.clock = pygame.time.Clock()
game.init()
while True:
dt = game.clock.tick_busy_loop(30)
if game.game_over():
game.reset()
game.step(dt)
pygame.display.update()
| 6,177 | 23.613546 | 74 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/monsterkong/fireball.py | __author__ = 'Erilyth'
import pygame
import math
import os
from .onBoard import OnBoard
'''
This class defines all our fireballs.
A fireball inherits from the OnBoard class since we will use it as an inanimate object on our board.
Each fireball can check for collisions in order to decide when to turn and when they hit a player.
'''
class Fireball(OnBoard):
def __init__(self, raw_image, position, index, speed, rng, dir):
super(Fireball, self).__init__(raw_image, position)
# Set the fireball direction randomly
self.rng = rng
self.__direction = int(math.floor(self.rng.rand() * 100)) % 2
self.index = index
self.wallsBelow = []
self.laddersBelow = []
self.IMAGES = {
"fireballright": pygame.transform.scale(pygame.image.load(os.path.join(dir, 'assets/fireballright.png')), (20, 20)).convert_alpha(),
"fireballleft": pygame.transform.scale(pygame.image.load(os.path.join(dir, 'assets/fireballleft.png')), (20, 20)).convert_alpha()
}
# The newly spawned fireball is not falling
self.__fall = 0
# The speed of a fireball is set
self.__speed = speed
# Update the image of a fireball
def updateImage(self, raw_image):
self.image = raw_image
# Getters and Setters for some private variables
def getSpeed(self):
return self.__speed
def setSpeed(self, speed):
self.__speed = speed
def getFall(self):
return self.__fall
def getDirection(self):
return self.__direction
# Moves the fireball in the required direction
def continuousUpdate(self, wallGroup, ladderGroup):
# The fireball is falling
if self.__fall == 1:
# We move the fireball downwards with speed of self.__speed
self.update(self.image, "V", self.__speed)
if self.checkCollision(wallGroup, "V"):
# We have collided with a wall below, so the fireball can stop
# falling
self.__fall = 0
# Set the direction randomly
self.__direction = int(math.floor(self.rng.rand() * 100)) % 2
else:
# While we are on the ladder, we use a probability of 4/20 to make
# the fireball start falling
if self.checkCollision(ladderGroup, "V") and len(
self.checkCollision(wallGroup, "V")) == 0:
randVal = int(math.floor(self.rng.rand() * 100)) % 20
if randVal < 15:
self.__fall = 0
else:
self.__fall = 1
# We are at the edge of the floor so the fireball starts falling
if len(self.checkCollision(ladderGroup, "V")) == 0 and len(
self.checkCollision(wallGroup, "V")) == 0:
self.__fall = 1
# We are moving right, so update the fireball image to the right
if self.__direction == 0:
self.update(self.IMAGES["fireballright"], "H", self.__speed)
# When we hit a wall, we change direction
if self.checkCollision(wallGroup, "H"):
self.__direction = 1
self.update(self.image, "H", -self.__speed)
# We are moving left, so update the fireball image to the left
else:
self.update(self.IMAGES["fireballleft"], "H", -self.__speed)
# When we hit a wall, we change direction
if self.checkCollision(wallGroup, "H"):
self.__direction = 0
self.update(self.image, "H", self.__speed)
# Move the fireball in the required direction with the required value and
# also set the image of the fireball
def update(self, raw_image, direction, value):
if direction == "H":
self.setPosition(
(self.getPosition()[0] + value,
self.getPosition()[1]))
self.image = raw_image
if direction == "V":
self.setPosition(
(self.getPosition()[0],
self.getPosition()[1] + value))
self.rect.center = self.getPosition()
'''
We check for collisions in the direction in which we are moving if the parameter direction is "H".
The way we do this is move a little forward in the direction in which we are moving, then check for collisions then move back to the original location
We check for collisions below the fireball if the parameter direction is "V"
We do this by moving down a little, then check for collisions then move back up to the original location
'''
def checkCollision(self, colliderGroup, direction):
if direction == "H":
if self.__direction == 0:
self.update(self.image, "H", self.__speed) # Right collision
if self.__direction == 1:
self.update(self.image, "H", -self.__speed) # Left collision
Colliders = pygame.sprite.spritecollide(self, colliderGroup, False)
if self.__direction == 0:
self.update(self.image, "H", -self.__speed) # Right collision
if self.__direction == 1:
self.update(self.image, "H", self.__speed) # Left collision
else:
self.update(self.image, "V", self.__speed) # Bottom collision
Colliders = pygame.sprite.spritecollide(self, colliderGroup, False)
self.update(self.image, "V", -self.__speed)
return Colliders
| 5,580 | 40.340741 | 154 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/monsterkong/board.py | __author__ = 'Batchu Vishal'
import pygame
import math
import sys
import os
from .person import Person
from .onBoard import OnBoard
from .coin import Coin
from .player import Player
from .fireball import Fireball
from .monsterPerson import MonsterPerson
class Board(object):
'''
This class defines our gameboard.
A gameboard contains everthing related to our game on it like our characters, walls, ladders, coins etc
The generation of the level also happens in this class.
'''
def __init__(self, width, height, rewards, rng, _dir):
self.__width = width
self.__actHeight = height
self.__height = self.__actHeight + 10
self.score = 0
self.rng = rng
self.rewards = rewards
self.cycles = 0 # For the characters animation
self.direction = 0
self._dir = _dir
self.IMAGES = {
"still": pygame.image.load(os.path.join(_dir, 'assets/still.png')).convert_alpha(),
"monster0": pygame.image.load(os.path.join(_dir, 'assets/monster0.png')).convert_alpha(),
"princess": pygame.image.load(os.path.join(_dir, 'assets/princess.png')).convert_alpha(),
"fireballright": pygame.image.load(os.path.join(_dir, 'assets/fireballright.png')).convert_alpha(),
"coin1": pygame.image.load(os.path.join(_dir, 'assets/coin1.png')).convert_alpha(),
"wood_block": pygame.image.load(os.path.join(_dir, 'assets/wood_block.png')).convert_alpha(),
"ladder": pygame.image.load(os.path.join(_dir, 'assets/ladder.png')).convert_alpha()
}
self.white = (255, 255, 255)
'''
The map is essentially an array of 30x80 in which we store what each block on our map is.
1 represents a wall, 2 for a ladder and 3 for a coin.
'''
self.map = []
# These are the arrays in which we store our instances of different
# classes
self.Players = []
self.Enemies = []
self.Allies = []
self.Coins = []
self.Walls = []
self.Ladders = []
self.Fireballs = []
self.Boards = []
self.FireballEndpoints = []
# Resets the above groups and initializes the game for us
self.resetGroups()
# Initialize the instance groups which we use to display our instances
# on the screen
self.fireballGroup = pygame.sprite.RenderPlain(self.Fireballs)
self.playerGroup = pygame.sprite.RenderPlain(self.Players)
self.enemyGroup = pygame.sprite.RenderPlain(self.Enemies)
self.wallGroup = pygame.sprite.RenderPlain(self.Walls)
self.ladderGroup = pygame.sprite.RenderPlain(self.Ladders)
self.coinGroup = pygame.sprite.RenderPlain(self.Coins)
self.allyGroup = pygame.sprite.RenderPlain(self.Allies)
self.fireballEndpointsGroup = pygame.sprite.RenderPlain(
self.FireballEndpoints)
def resetGroups(self):
self.score = 0
self.lives = 3
self.map = [] # We will create the map again when we reset the game
self.Players = [
Player(
self.IMAGES["still"],
(self.__width / 2,
435),
15,
15)]
self.Enemies = [
MonsterPerson(
self.IMAGES["monster0"],
(100,
117),
self.rng,
self._dir)]
self.Allies = [Person(self.IMAGES["princess"], (50, 48), 18, 25)]
self.Allies[0].updateWH(self.Allies[0].image, "H", 0, 25, 25)
self.Coins = []
self.Walls = []
self.Ladders = []
self.Fireballs = []
self.FireballEndpoints = [OnBoard(self.IMAGES["still"], (50, 440))]
self.initializeGame() # This initializes the game and generates our map
self.createGroups() # This creates the instance groups
# Checks to destroy a fireball when it reaches its terminal point
def checkFireballDestroy(self, fireball):
if pygame.sprite.spritecollide(
fireball, self.fireballEndpointsGroup, False):
# We use indices on fireballs to uniquely identify each fireball
self.DestroyFireball(fireball.index)
# Creates a new fireball and adds it to our fireball group
def CreateFireball(self, location, monsterIndex):
if len(self.Fireballs) < len(self.Enemies) * 5:
self.Fireballs.append(
Fireball(self.IMAGES["fireballright"], (location[0], location[1] + 15), len(self.Fireballs),
2 + len(self.Enemies) / 2, self.rng, self._dir))
# Starts monster's animation
self.Enemies[monsterIndex].setStopDuration(15)
self.Enemies[monsterIndex].setPosition(
(self.Enemies[monsterIndex].getPosition()[0], self.Enemies[monsterIndex].getPosition()[1] - 12))
self.Enemies[monsterIndex].setCenter(
self.Enemies[monsterIndex].getPosition())
self.createGroups() # We recreate the groups so the fireball is added
# Destroy a fireball if it has collided with a player or reached its
# endpoint
def DestroyFireball(self, index):
for fireBall in range(len(self.Fireballs)):
if self.Fireballs[fireBall].index == index:
self.Fireballs.remove(self.Fireballs[fireBall])
for fireBallrem in range(
len(self.Fireballs)): # We need to reduce the indices of all fireballs greater than this
if self.Fireballs[fireBallrem].index > index:
self.Fireballs[fireBallrem].index -= 1
self.createGroups() # Recreate the groups so the fireball is removed
break
# Randomly Generate coins in the level where there is a wall below the
# coin so the player can reach it
def GenerateCoins(self):
for i in range(6, len(self.map)):
for j in range(len(self.map[i])):
if self.map[i][j] == 0 and ((i + 1 < len(self.map) and self.map[i + 1][j] == 1) or (
i + 2 < len(self.map) and self.map[i + 2][j] == 1)):
randNumber = math.floor(self.rng.rand() * 1000)
if randNumber % 35 == 0 and len(
self.Coins) <= 25: # At max there will be 26 coins in the map
self.map[i][j] = 3
if j - 1 >= 0 and self.map[i][j - 1] == 3:
self.map[i][j] = 0
if self.map[i][j] == 3:
# Add the coin to our coin list
self.Coins.append(
Coin(
self.IMAGES["coin1"],
(j * 15 + 15 / 2,
i * 15 + 15 / 2),
self._dir))
if len(
self.Coins) <= 15: # If there are less than 21 coins, we call the function again
self.GenerateCoins()
# Given a position and checkNo ( 1 for wall, 2 for ladder, 3 for coin) the
# function tells us if its a valid position to place or not
def checkMapForMatch(self, placePosition, floor, checkNo, offset):
if floor < 1:
return 0
for i in range(
0, 5): # We will get things placed atleast 5-1 blocks away from each other
if self.map[floor * 5 - offset][placePosition + i] == checkNo:
return 1
if self.map[floor * 5 - offset][placePosition - i] == checkNo:
return 1
return 0
# Create an empty 2D map of 30x80 size
def makeMap(self):
for point in range(0, int(self.__height / 15 + 1)):
row = []
for point2 in range(0, int(self.__width / 15)):
row.append(0)
self.map.append(row)
# Add walls to our map boundaries and also the floors
def makeWalls(self):
for i in range(0, int(self.__height / 15)):
self.map[i][0] = self.map[i][int(self.__width / 15 - 1)] = 1
for i in range(2, int(self.__height / (15 * 4))):
for j in range(0, int(self.__width / 15)):
self.map[i * 5][j] = 1
# Make a small chamber on the top where the princess resides
def makePrincessChamber(self):
for j in range(0, 4):
self.map[j][9] = 1
for j in range(0, 10):
self.map[4][j] = 1
for j in range(0, 6):
self.map[1 * 4 + j][7] = self.map[1 * 4 + j][8] = 2
# Generate ladders randomly, 1 for each floor such that they are not too
# close to each other
def makeLadders(self):
for i in range(2, int(self.__height / (15 * 4) - 1)):
ladderPos = math.floor(self.rng.rand() * (self.__width / 15 - 20))
ladderPos = int(7 + ladderPos)
while self.checkMapForMatch(ladderPos, i - 1, 2, 0) == 1:
ladderPos = math.floor(
self.rng.rand() * (self.__width / 15 - 20))
ladderPos = int(7 + ladderPos)
for k in range(0, 5):
self.map[i * 5 + k][ladderPos] = self.map[i *
5 + k][ladderPos + 1] = 2
# Create the holes on each floor (extreme right and extreme left)
def makeHoles(self):
for i in range(3, int(self.__height / (15 * 4) - 1)):
for k in range(
1, 6): # Ladders wont interfere since they leave 10 blocks on either side
if i % 2 == 0:
self.map[i * 5][k] = 0
else:
self.map[i * 5][int(self.__width / 15 - 1 - k)] = 0
'''
This is called once you have finished making holes, ladders, walls etc
You use the 2D map to add instances to the groups
'''
def populateMap(self):
for x in range(len(self.map)):
for y in range(len(self.map[x])):
if self.map[x][y] == 1:
# Add a wall at that position
self.Walls.append(
OnBoard(
self.IMAGES["wood_block"],
(y * 15 + 15 / 2,
x * 15 + 15 / 2)))
elif self.map[x][y] == 2:
# Add a ladder at that position
self.Ladders.append(
OnBoard(
self.IMAGES["ladder"],
(y * 15 + 15 / 2,
x * 15 + 15 / 2)))
# Check if the player is on a ladder or not
def ladderCheck(self, laddersCollidedBelow,
wallsCollidedBelow, wallsCollidedAbove):
if laddersCollidedBelow and len(wallsCollidedBelow) == 0:
for ladder in laddersCollidedBelow:
if ladder.getPosition()[1] >= self.Players[0].getPosition()[1]:
self.Players[0].onLadder = 1
self.Players[0].isJumping = 0
# Move the player down if he collides a wall above
if wallsCollidedAbove:
self.Players[0].updateY(3)
else:
self.Players[0].onLadder = 0
# Update all the fireball positions and check for collisions with player
def fireballCheck(self):
for fireball in self.fireballGroup:
fireball.continuousUpdate(self.wallGroup, self.ladderGroup)
if fireball.checkCollision(self.playerGroup, "V"):
self.Fireballs.remove(fireball)
self.Players[0].setPosition((50, 440))
self.score += self.rewards["negative"]
self.lives += -1
self.createGroups()
self.checkFireballDestroy(fireball)
# Check for coins collided and add the appropriate score
def coinCheck(self, coinsCollected):
for coin in coinsCollected:
self.score += self.rewards["positive"]
# We also remove the coin entry from our map
self.map[int((coin.getPosition()[1] - 15 / 2) /
15)][int((coin.getPosition()[0] - 15 / 2) / 15)] = 0
# Remove the coin entry from our list
self.Coins.remove(coin)
# Update the coin group since we modified the coin list
self.createGroups()
# Check if the player wins
def checkVictory(self):
# If you touch the princess or reach the floor with the princess you
# win!
if self.Players[0].checkCollision(self.allyGroup) or self.Players[
0].getPosition()[1] < 4 * 15:
self.score += self.rewards["win"]
# This is just the next level so we only clear the fireballs and
# regenerate the coins
self.Fireballs = []
self.Players[0].setPosition((50, 440))
self.Coins = []
self.GenerateCoins()
# Add monsters
if len(self.Enemies) == 1:
self.Enemies.append(
MonsterPerson(
self.IMAGES["monster0"], (700, 117), self.rng, self._dir))
elif len(self.Enemies) == 2:
self.Enemies.append(
MonsterPerson(
self.IMAGES["monster0"], (400, 117), self.rng, self._dir))
# Create the groups again so the enemies are effected
self.createGroups()
# Redraws the entire game screen for us
def redrawScreen(self, screen, width, height):
screen.fill((40, 20, 0)) # Fill it with black
# Draw all our groups on the background
self.ladderGroup.draw(screen)
self.playerGroup.draw(screen)
self.coinGroup.draw(screen)
self.wallGroup.draw(screen)
self.fireballGroup.draw(screen)
self.enemyGroup.draw(screen)
self.allyGroup.draw(screen)
# Update all the groups from their corresponding lists
def createGroups(self):
self.fireballGroup = pygame.sprite.RenderPlain(self.Fireballs)
self.playerGroup = pygame.sprite.RenderPlain(self.Players)
self.enemyGroup = pygame.sprite.RenderPlain(self.Enemies)
self.wallGroup = pygame.sprite.RenderPlain(self.Walls)
self.ladderGroup = pygame.sprite.RenderPlain(self.Ladders)
self.coinGroup = pygame.sprite.RenderPlain(self.Coins)
self.allyGroup = pygame.sprite.RenderPlain(self.Allies)
self.fireballEndpointsGroup = pygame.sprite.RenderPlain(
self.FireballEndpoints)
'''
Initialize the game by making the map, generating walls, generating princess chamber, generating ladders randomly,
generating broken ladders randomly, generating holes, generating coins randomly, adding the ladders and walls to our lists
and finally updating the groups.
'''
def initializeGame(self):
self.makeMap()
self.makeWalls()
self.makePrincessChamber()
self.makeLadders()
self.makeHoles()
self.GenerateCoins()
self.populateMap()
self.createGroups()
| 15,293 | 41.960674 | 126 | py |
PyGame-Learning-Environment | PyGame-Learning-Environment-master/ple/games/monsterkong/ladder.py | __author__ = 'Batchu Vishal'
import pygame
from onBoard import OnBoard
'''
This class defines all our ladders in the game.
Currently not much is done here, but we can add features such as ladder climb sounds etc here
'''
class Ladder(OnBoard):
def __init__(self, raw_image, position):
super(Ladder, self).__init__(raw_image, position)
# Update the ladder image
def updateImage(self, raw_image):
self.image = raw_image
self.image = pygame.transform.scale(self.image, (15, 15))
| 518 | 23.714286 | 93 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.