repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
guettli/simple21term
|
simple21/models.py
|
from django.contrib.auth.models import User
from django.db import models
from django.db.models import CheckConstraint, Q
from django.urls import reverse
from django.contrib.postgres.fields import ArrayField
class Page(models.Model):
parent = models.ForeignKey('Page', null=True, blank=True, on_delete=models.RESTRICT)
name = models.CharField(max_length=120, unique=True)
text = models.TextField(default='', blank=True)
def __str__(self):
if not self.parent_id:
if self.name:
return self.name
return '<root>'
return ' / '.join([page.name for page in self.get_ancestors(include_self=True) if page.name])
def get_ancestors(self, include_self=False):
ret = []
if include_self:
parent = self
else:
parent = self.parent
while parent:
ret.insert(0, parent)
parent = parent.parent
return ret
def get_children(self):
return Page.objects.filter(parent=self)
@classmethod
def get_root(cls):
return cls.objects.get(parent=None)
def get_absolute_url(self):
return reverse('page', kwargs=dict(id=self.id))
class SearchLog(models.Model):
query = models.CharField(max_length=1024)
datetime = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
result_count = models.PositiveIntegerField()
page_ids = ArrayField(models.IntegerField())
def __str__(self):
return '{} ({})'.format(self.query, self.result_count)
class GlobalConfig(models.Model):
anonymous_user = models.ForeignKey(User, on_delete=models.PROTECT)
anonymous_user_name = 'anonymous'
ID=1
@classmethod
def get(cls):
config = cls.objects.filter(id=cls.ID).first()
if config:
return config
return cls.objects.create(anonymous_user=User.objects.get_or_create(username=cls.anonymous_user_name)[0])
|
guettli/simple21term
|
simple21/tests/test_views.py
|
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'mysite.settings'
import django
django.setup()
import html2text
from simple21.models import GlobalConfig
from simple21.testutils import AbstractPageTest
from django.urls import reverse
from simple21 import views
from django.test import Client
from simple21.views import get_queryset
class ViewTests(AbstractPageTest):
def test_page_view(self):
self.assertEqual('/page/{}/'.format(self.page.id), self.page.get_absolute_url())
response = Client().get(self.page.get_absolute_url())
assert self.page.text in str(response.content)
def test_search_view(self):
response = Client().get(reverse(views.search), dict(q='fun'))
text = html2text.html2text(str(response.content)).replace('\\n', ' ')
assert 'fun sub-page' in text
def test_get_queryset(self):
self.assertEqual('myPage', str(self.page))
self.assertEqual(['myPage'], [page.name for page in get_queryset('fun')])
def test__test_session_of_anonymous_user(self):
"""
Ensure that two anonymous user have different session data, although they
use the same user instance in the database.
"""
from simple21.views import test_session_of_anonymous_user
url = reverse(test_session_of_anonymous_user)
user = GlobalConfig.get().anonymous_user
client_one = Client()
response = client_one.get(url, dict(me='client_one'))
self.assertEqual(200, response.status_code)
self.assertEqual([dict(data=dict(me='client_one'), user=user.username, id=user.id)], client_one.session['get'])
client_two = Client()
response = client_two.get(url, dict(me='client_two'))
self.assertEqual(200, response.status_code)
self.assertEqual([dict(data=dict(me='client_two'), user=user.username, id=user.id)], client_two.session['get'])
response = client_one.get(url, dict(me='client_one'))
self.assertEqual(200, response.status_code)
self.assertEqual([dict(data=dict(me='client_one'), user=user.username, id=user.id),
dict(data=dict(me='client_one'), user=user.username, id=user.id),
], client_one.session['get'])
|
guettli/simple21term
|
simple21/urls.py
|
<gh_stars>0
from django.urls import path
from . import views
urlpatterns = [
path('', views.search, name='search'),
path('page/<int:id>/', views.page, name='page'),
path('tests/test_session_of_anonymous_user', views.test_session_of_anonymous_user),
]
|
dave-malone/aws-iot-device-sdk-python
|
samples/secureTunneling/secureTunneling.py
|
<filename>samples/secureTunneling/secureTunneling.py
'''
/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
'''
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
import logging
import time
import argparse
import json
import subprocess
# Custom MQTT message callback
def secureTunnelNotificationCallback(client, userdata, message):
logger.debug('Received a secure tunneling notification: %s', message.payload)
notification = json.loads(message.payload)
logger.debug('clientAccessToken: %s', notification['clientAccessToken'])
logger.debug('region: %s', notification['region'])
logger.debug('clientMode: %s', notification['clientMode'])
if notification['clientMode'] != 'destination':
logger.warn('clientMode %s not supported', notification['clientMode'])
return
logger.debug('services: %s', notification['services'])
if len(notification['services']) > 1:
logger.warn('%d services specified; ignoring all but the first service', len(notification['services']))
service = notification['services'][0]
logger.debug('initializing secure proxy for %s', service)
if service == 'ssh':
servicePort = 22
elif service == 'vnc':
servicePort = 5900
else:
logger.warn('service %s not supported', service)
return
subprocess.run(["localproxy",
"-t", notification['clientAccessToken'],
"-r", notification['region'],
"-d", "localhost:{portNumber}".format(portNumber = servicePort),
"-v", "5"
])
# Read in command-line parameters
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--endpoint", action="store", required=True, dest="host", help="Your AWS IoT custom endpoint")
parser.add_argument("-r", "--rootCA", action="store", required=True, dest="rootCAPath", help="Root CA file path")
parser.add_argument("-c", "--cert", action="store", dest="certificatePath", help="Certificate file path")
parser.add_argument("-k", "--key", action="store", dest="privateKeyPath", help="Private key file path")
parser.add_argument("-p", "--port", action="store", dest="port", type=int, help="Port number override")
parser.add_argument("-w", "--websocket", action="store_true", dest="useWebsocket", default=False,
help="Use MQTT over WebSocket")
parser.add_argument("-id", "--clientId", action="store", dest="clientId", default="basicPubSub",
help="Targeted client id")
parser.add_argument("-t", "--topic", action="store", dest="topic", default="$aws/things/{thingName}/tunnels/notify", help="Targeted topic")
args = parser.parse_args()
host = args.host
rootCAPath = args.rootCAPath
certificatePath = args.certificatePath
privateKeyPath = args.privateKeyPath
port = args.port
useWebsocket = args.useWebsocket
clientId = args.clientId
topic = args.topic.format(thingName = clientId)
if args.useWebsocket and args.certificatePath and args.privateKeyPath:
parser.error("X.509 cert authentication and WebSocket are mutual exclusive. Please pick one.")
exit(2)
if not args.useWebsocket and (not args.certificatePath or not args.privateKeyPath):
parser.error("Missing credentials for authentication.")
exit(2)
# Port defaults
if args.useWebsocket and not args.port: # When no port override for WebSocket, default to 443
port = 443
if not args.useWebsocket and not args.port: # When no port override for non-WebSocket, default to 8883
port = 8883
# Configure logging
logger = logging.getLogger("AWSIoTPythonSDK.core")
logger.setLevel(logging.DEBUG)
streamHandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
# Init AWSIoTMQTTClient
myAWSIoTMQTTClient = None
if useWebsocket:
myAWSIoTMQTTClient = AWSIoTMQTTClient(clientId, useWebsocket=True)
myAWSIoTMQTTClient.configureEndpoint(host, port)
myAWSIoTMQTTClient.configureCredentials(rootCAPath)
else:
myAWSIoTMQTTClient = AWSIoTMQTTClient(clientId)
myAWSIoTMQTTClient.configureEndpoint(host, port)
myAWSIoTMQTTClient.configureCredentials(rootCAPath, privateKeyPath, certificatePath)
# AWSIoTMQTTClient connection configuration
myAWSIoTMQTTClient.configureAutoReconnectBackoffTime(1, 32, 20)
myAWSIoTMQTTClient.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing
myAWSIoTMQTTClient.configureDrainingFrequency(2) # Draining: 2 Hz
myAWSIoTMQTTClient.configureConnectDisconnectTimeout(10) # 10 sec
myAWSIoTMQTTClient.configureMQTTOperationTimeout(5) # 5 sec
# Connect and subscribe to AWS IoT listening for Secure Tunnel Notifications
myAWSIoTMQTTClient.connect()
logger.debug('Connection to AWS IoT Core initiated')
myAWSIoTMQTTClient.subscribe(topic, 1, secureTunnelNotificationCallback)
# Maintain connectivity until program is terminated
logger.debug('Subscribed to Secure Tunnel Notifications on topic %s', topic)
logger.debug('Ctrl+C To exit the program')
while True:
# myAWSIoTMQTTClient.publish(topic, '{"message":"Not dead yet!"}', 1)
# logger.debug('Published heartbeat message')
time.sleep(60)
|
jgnunes/MitoHiFi
|
circularizationCheck.py
|
#!/usr/bin/python
#Version: 1.0
#Author: <NAME> - <EMAIL>
#LAMPADA - IBQM - UFRJ
'''
Copyright (c) 2014 <NAME> - LAMPADA/UFRJ
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from Bio import SeqIO, SearchIO
import argparse
from subprocess import Popen
import shlex, sys, os
def circularizationCheck(resultFile, circularSize=220, circularOffSet=40):
'''
Check, with blast, if there is a match between the start and the end of a sequence.
Returns a tuple with (True, start, end) or False, accordingly.
'''
refSeq = SeqIO.read(resultFile, "fasta")
sizeOfSeq = len(refSeq)
try:
command = "makeblastdb -in " + resultFile + " -dbtype nucl" #need to formatdb refseq first
args = shlex.split(command)
formatDB = Popen(args, stdout=open(os.devnull, 'wb'))
formatDB.wait()
except:
print ('')
print ("formatDB during circularization check failed...")
print ('')
return (False,-1,-1)
with open("circularization_check.blast.xml",'w') as blastResultFile:
command = "blastn -task blastn -db " + resultFile + " -query " + resultFile + " -outfmt 5" #call BLAST with XML output
args = shlex.split(command)
blastAll = Popen(args, stdout=blastResultFile)
blastAll.wait()
with open("circularization_check.blast.tsv",'w') as blastResultFile:
command = "blastn -task blastn -db " + resultFile + " -query " + resultFile + " -outfmt 6" #call BLAST with TSV output
args = shlex.split(command)
blastAll = Popen(args, stdout=blastResultFile)
blastAll.wait()
blastparse = SearchIO.parse('circularization_check.blast.xml', 'blast-xml') #get all queries
'''
Let's loop through all blast results and see if there is a circularization.
Do it by looking at all HSPs in the parse and see if there is an alignment of the ending of the sequence
with the start of that same sequence. It should have a considerable size, you don't want to say it circularized
if only a couple of bases matched.
Returns True or False, x_coordinate, y_coordinate
x coordinate = starting point of circularization match
y coordinate = ending point of circularization match
'''
for qresult in blastparse: #in each query...
for hsp in qresult.hsps: #loop through all HSPs looking for a circularization (perceived as a hsp with start somewhat close to the query finish)
if (hsp.query_range[0] >= 0 and hsp.query_range[0] <= circularOffSet) and (hsp.hit_range[0] >= sizeOfSeq - hsp.aln_span - circularOffSet and hsp.hit_range[0] <= sizeOfSeq + circularOffSet) and hsp.aln_span >= circularSize and hsp.aln_span < sizeOfSeq * 0.90:
if hsp.hit_range[0] < hsp.query_range[0]:
return (True,hsp.hit_range[0],hsp.hit_range[1]) #it seems to have circularized, return True
else:
return (True,hsp.query_range[0],hsp.query_range[1])
#no circularization was observed in the for loop, so we exited it, just return false
return (False,-1,-1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Find circularization points')
parser.add_argument('--result-file', type=str, help='Input FASTA file to be processed')
parser.add_argument('--circular-size', type=int, default=220,
help='Size to consider when checking for circularization')
parser.add_argument('--circular-offset', type=int, default=40,
help='Offset from start and finish to consider when looking for circularization')
args = parser.parse_args()
if sys.argv[1] == '-h' or sys.argv[1] == '--help':
print ('Usage: fasta_file')
else:
print(circularizationCheck(args.result_file, args.circular_size, args.circular_offset))
|
twgo/tshue-puntiau
|
json2csv.py
|
import csv
import json
with open('tw0102pun.csv', 'wt', encoding='utf-8') as itaigicsv:
with open('tw0102pun-dict.json', encoding='utf-8') as f:
tsuliau = json.load(f)
fieldnames = [
'檔名', '通用漢羅', '原始通用',
'無合音通用漢羅', '連字符通用',
'通用本調', '台羅本調',
]
itaigi = csv.DictWriter(
itaigicsv, fieldnames=fieldnames
)
itaigi.writeheader()
for pit in tsuliau:
itaigi.writerow(pit)
|
twgo/tshue-puntiau
|
pkl2json.py
|
import json
import pickle
with open('table_2018_09_04_17_15.pkl', 'br') as pkl:
with open('tw0102.json', 'wt') as trs:
json.dump(
pickle.load(pkl), trs,
ensure_ascii=False, sort_keys=True, indent=2,
)
|
twgo/tshue-puntiau
|
twisas_trs.py
|
<gh_stars>0
import json
from 臺灣言語工具.解析整理.拆文分析器 import 拆文分析器
from 轉本調 import 查可能本調
from 臺灣言語工具.解析整理.解析錯誤 import 解析錯誤
def main():
with open('twisas-HL-kaldi.json') as trs:
tsuliau = json.load(trs)
def 揣物件(han, lo):
try:
return 拆文分析器.建立句物件(han, lo)
except 解析錯誤:
print(tsua)
pass
return 拆文分析器.建立句物件(lo, lo)
for tsua in tsuliau:
han = tsua['無合音漢字']
lo = tsua['口語臺羅']
句物件 = 揣物件(han, lo)
for ji in 句物件.篩出字物件():
ji.音 = 查可能本調(ji.型, ji.音)
tsua['本調臺羅'] = 句物件.看音()
with open('twisas-HL-kaldi-pun.json', 'w') as tong:
json.dump(
tsuliau, tong,
ensure_ascii=False, sort_keys=True, indent=2
)
main()
|
twgo/tshue-puntiau
|
tw0102.py
|
import json
import re
from 臺灣言語工具.解析整理.拆文分析器 import 拆文分析器
from 臺灣言語工具.音標系統.閩南語.通用拼音音標 import 通用拼音音標
from 轉本調 import 查可能本調
from 臺灣言語工具.解析整理.解析錯誤 import 解析錯誤
from 臺灣言語工具.音標系統.閩南語.臺灣閩南語羅馬字拼音 import 臺灣閩南語羅馬字拼音
def main():
with open('tw0102.json') as trs:
tsuliau = json.load(trs)
合音提掉 = re.compile('[((].+?[))]')
數字 = re.compile('\d{2,}')
kiatko = []
for tsua in tsuliau:
han = 合音提掉.sub(' XXX ', tsua[1].replace('_', ' '))
sooji = 數字.sub(轉漢字, han)
lo = tsua[2].replace('_', '-')
pun = []
臺羅陣列 = []
tsitpit = {
'檔名': tsua[0],
'通用漢羅': tsua[1],
'原始通用': tsua[2],
'無合音通用漢羅': han,
'漢字數字無合音通用漢羅': sooji,
'連字符通用': lo,
}
愛 = True
臺羅口語 = []
try:
for ji in 揣物件(sooji, lo, tsua).篩出字物件():
if ji.音標敢著(通用拼音音標):
tl = ji.轉音(通用拼音音標)
臺羅口語.append(tl.音)
臺羅 = 查可能本調(tl.型, tl.音)
臺羅陣列.append(臺羅)
pun.append(臺灣閩南語羅馬字拼音(臺羅).轉通用拼音())
else:
愛 = False
except 解析錯誤:
愛 = False
if 愛:
tsitpit['臺羅口語'] = '-'.join(臺羅口語)
tsitpit['通用本調'] = '-'.join(pun)
tsitpit['台羅本調'] = '-'.join(臺羅陣列)
kiatko.append(tsitpit)
with open('tw0102pun-dict.json', 'w') as tong:
json.dump(
kiatko, tong,
ensure_ascii=False, sort_keys=True, indent=2
)
def 揣物件(han, lo, tsua):
try:
return 拆文分析器.建立句物件(han, lo)
except 解析錯誤:
print(tsua)
raise
return 拆文分析器.建立句物件(lo, lo)
數字對照 = {
'0': '零',
'1': '一',
'2': '二',
'3': '三',
'4': '四',
'5': '五',
'6': '六',
'7': '七',
'8': '八',
'9': '九',
}
def 轉漢字(match):
han = []
for a in match.group(0):
han.append(數字對照[a])
return ''.join(han)
main()
|
semink/TCN
|
test_pytest.py
|
<gh_stars>0
# imports for examples
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from model import TCNPredictor
from torchtest import test_suite
batch_size, horizon, seq_len, features, n_nodes = 1, 12, 12, 1, 325
inputs = Variable(torch.randn(batch_size, features, n_nodes, seq_len))
targets = Variable(torch.randn(batch_size, n_nodes, horizon))
batch = [inputs, targets]
model = TCNPredictor(adjs=torch.eye(n_nodes))
loss_fn = F.mse_loss
optim = model.configure_optimizers()
def test_value_change():
return test_suite(model, loss_fn, optim, batch, test_vars_change=True)
def test_NaN():
return test_suite(model, loss_fn, optim, batch, test_nan_vals=True)
def test_inf():
return test_suite(model, loss_fn, optim, batch, test_inf_vals=True)
|
semink/TCN
|
model.py
|
import copy
import pytorch_lightning as pl
import torch
from torch import nn, optim
from torch.nn import functional as F
from GAT.model import GAT
from libs import utils
from TCN.model import TemporalConvNet
from libs.utils import StandardScaler
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class Encoder(nn.Module):
"""Some Information about Encoder"""
def __init__(self, in_features, thiddin_features, tnhid, tkernel_size, shid_features, alpha, adjs, dropout, seq_len,
spatial=True, temporal=True, N=6,):
super(Encoder, self).__init__()
self.net = clones(EncoderLayer(in_features, thiddin_features, tnhid,
tkernel_size, shid_features, alpha, adjs=adjs, dropout=dropout, seq_len=seq_len,
spatial=spatial, temporal=temporal), N,)
def forward(self, x):
for layer in self.net:
x = layer(x)
return x
class EncoderLayer(nn.Module):
"""Some Information about Encoder"""
def __init__(self, in_features, thiddin_features, tnhid, tkernel_size, shid_features, alpha, adjs, dropout, seq_len, spatial, temporal):
super(EncoderLayer, self).__init__()
self.spatial = spatial
self.temporal = temporal
if temporal:
self.temp_corr = TemporalConvNet(in_features, [
*([thiddin_features] * tnhid), in_features],
kernel_size=tkernel_size, dropout=dropout,)
if spatial:
if len(adjs.size()) == 2:
adjs = adjs.unsqueeze(0) # add dimension if adjs is a matrix
self.register_buffer("adjs", adjs)
nheads = adjs.size(0)
self.spatial_corr = clones(GAT(in_features, shid_features, in_features,
dropout, alpha, nheads), 2)
# self.linear1 = nn.Linear(n_nodes, n_nodes)
# self.linear2 = nn.Linear(seq_len, seq_len)
def forward(self, x):
# spatial correlation
if self.spatial:
x = x.permute(0, 3, 1, 2)
res = x
for layer in self.spatial_corr:
x = layer(x, self.adjs)
x = res + x
x = x.permute(0, 2, 3, 1)
# # x = self.linear1(x + out)
# temporal correlation
if self.temporal:
res = x
x = self.temp_corr(x)
x = x + res
# x = self.linear2(x)+res
return x
class Decoder(nn.Module):
"""Some Information about Decoder"""
def __init__(self, in_features, out_features):
super(Decoder, self).__init__()
self.mlp = nn.Sequential(
nn.Linear(in_features, out_features * in_features),
nn.ReLU(),
nn.Linear(out_features * in_features, out_features),
)
def forward(self, x):
x = self.mlp(x)
return x
class TCNPredictor(pl.LightningModule):
def __init__(self, in_features=1, thid_features=1, nhid=2, kernel_size=3, seq_len=12, horizon=12, shid_features=16,
alpha=0.2, dropout=0.3, lr=1e-3, weight_decay=1e-2, scaler=StandardScaler(), adjs=None, N=6, spatial=False,
temporal=False, **kwargs,):
super(TCNPredictor, self).__init__()
self.encode = Encoder(in_features, thid_features, nhid, kernel_size,
shid_features, alpha, adjs=adjs, dropout=dropout, N=N, seq_len=seq_len,
spatial=spatial, temporal=temporal)
self.decode = Decoder(seq_len, horizon)
self.lr = lr
self.weight_decay = weight_decay
self.scaler = scaler
def forward(self, x):
# encoder
x = self.encode(x)
# decoder
pred = self.decode(x)
return pred.squeeze(1)
def training_step(self, batch, idx):
x, y = batch
x, y = x.float(), y.float()
pred = self.forward(x)
loss = F.l1_loss(self.scaler.inverse_transform(pred), y)
self.log("Training/mae", loss, prog_bar=True)
return loss
def validation_step(self, batch, idx):
x, y = batch
x, y = x.float(), y.float()
pred = self.forward(x)
loss = F.l1_loss(self.scaler.inverse_transform(pred), y)
return loss
def validation_epoch_end(self, outputs):
loss = torch.stack(outputs).mean()
self.log('Validation/mae', loss, prog_bar=True, on_epoch=True)
return loss
def test_step(self, batch, idx):
x, y = batch
x, y = x.float(), y.float()
pred = self.scaler.inverse_transform(self.forward(x))
loss = {
"mae": utils.masked_MAE(pred, y, dim=-2),
"rmse": utils.masked_RMSE(pred, y, dim=-2),
}
return loss
def test_epoch_end(self, outputs):
loss = {"mae": 0, "rmse": 0}
for m in loss:
loss[m] = torch.cat([output[m]
for output in outputs], dim=0).nanmean(dim=0)
for h in range(len(loss["mae"])):
print(f"Horizon {h+1} ({5*(h+1)} min) - ", end="")
print(f"MAE: {loss['mae'][h]:.2f}", end=", ")
print(f"RMSE: {loss['rmse'][h]:.2f}")
self.logger.experiment.add_scalar(f"Test/mae", loss['mae'][h], h)
self.logger.experiment.add_scalar(f"Test/rmse", loss['rmse'][h], h)
print("Aggregation - ", end="")
print(f"MAE: {loss['mae'].mean():.2f}", end=", ")
print(f"RMSE: {loss['rmse'].mean():.2f}")
def configure_optimizers(self):
return optim.Adam(self.parameters(),
lr=self.lr,
weight_decay=self.weight_decay)
@staticmethod
def add_model_specific_args(parent_parser):
parser = parent_parser.add_argument_group("TCNPredictor")
parser.add_argument("--batch_size", type=int, default=64, help="")
parser.add_argument("--horizon", type=int, default=12, help="")
parser.add_argument("--lookback_window", type=int, default=2, help="")
parser.add_argument("--dropout", type=int, default=0.3, help="")
parser.add_argument("--lr", type=float, default=1e-3, help="")
parser.add_argument("--weight_decay",
type=float,
default=1e-2,
help="")
parser.add_argument("--in_features", type=int, default=1)
parser.add_argument("--thid_features", type=int, default=5)
parser.add_argument("--shid_features", type=int, default=5)
parser.add_argument('--spatial', dest='spatial',
action='store_true')
parser.add_argument('--temporal', dest='temporal',
action='store_true')
parser.add_argument("--nhid", type=int, default=3)
parser.add_argument("--kernel_size", type=int, default=3)
parser.add_argument("--N", type=int, default=3)
return parent_parser
|
semink/TCN
|
tune.py
|
<reponame>semink/TCN
from ray import tune
from ray.tune import CLIReporter
from ray.tune.schedulers import PopulationBasedTraining
from pytorch_lightning import Trainer
from argparse import ArgumentParser
from train import train_model
from model import TCNPredictor
def tune_pbt(args):
config = {
"lr": 1e-3,
"batch_size": 8,
"dropout": 0.3,
"weight_decay": 0.01,
"N": tune.choice([2, 4, 8]),
"hidden_features": tune.choice([4, 8, 16, 32]),
"shid_features": tune.choice([4, 8, 16, 32]),
"nhid": tune.choice([1, 3, 5]),
"kernel_size": tune.choice([3, 5, 7]),
}
scheduler = PopulationBasedTraining(
perturbation_interval=4,
hyperparam_mutations={
"lr": tune.loguniform(1e-4, 1e-1),
"batch_size": [8, 16],
"dropout": tune.uniform(0, 0.99),
"weight_decay": tune.loguniform(1e-5, 1e-1)
})
reporter = CLIReporter(
parameter_columns=[c for c in config],
metric_columns=["loss", "training_iteration"],
max_report_frequency=20)
analysis = tune.run(
tune.with_parameters(train_model, args=args, tuning=True),
resources_per_trial={"cpu": 4, "gpu": 1},
metric="loss",
mode="min",
config=config,
num_samples=args.ray_num_samples,
scheduler=scheduler,
progress_reporter=reporter, resume=args.resume,
name=f"tune_TCN_pbt{args.expid}", verbose=2)
best_trial = analysis.get_best_trial(metric='loss', mode='min')
best_model_checkpoint = analysis.get_best_checkpoint(
best_trial, metric='loss')
print("Best hyperparameters found were: ", analysis.best_config)
print("Best checkpoint: ", best_model_checkpoint)
if __name__ == "__main__":
parser = ArgumentParser()
parser = Trainer.add_argparse_args(parser)
# PROGRAM level args
parser.add_argument("--conda_env", type=str, default="pytorch")
parser.add_argument('--ray_num_samples', type=int, default=20,
help='number of ray tune samples')
parser.add_argument('--expid', type=int, default=0,
help='ray tune experiment id')
parser.add_argument('--resume', action='store_true',
help='whether to resume previous tuning process')
# MODEL specific args
parser = TCNPredictor.add_model_specific_args(parser)
# DATALOADER specific args
parser.add_argument("--seq_len", type=int, default=12)
parser.add_argument("--num_workers", type=int, default=32)
parser.add_argument("--weeks", type=int, default=5)
args = parser.parse_args()
tune_pbt(args)
|
semink/TCN
|
GAT/layers.py
|
<gh_stars>0
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.empty(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.empty(size=(2 * out_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, h, adj):
# h.shape: (N, in_features), Wh.shape: (N, out_features)
# Wh = torch.mm(h, self.W)
# i:batch, j:horizon, k: in_features, l: num_sensors, m: out_features
Wh = torch.einsum('ijkl, km -> ijml', h, self.W)
e = self._prepare_attentional_mechanism_input(Wh)
attention = torch.where(adj > 0, e, -9e15*torch.ones_like(e))
attention = F.softmax(attention, dim=-1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.einsum('ijkk, ijlk -> ijlk', attention, Wh)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def _prepare_attentional_mechanism_input(self, Wh):
Wh1 = torch.einsum('ijkl, km -> ijml', Wh,
self.a[:self.out_features, :])
Wh2 = torch.einsum('ijkl, km -> ijml', Wh,
self.a[self.out_features:, :])
# broadcast add
e = Wh1 + Wh2.transpose(-1, -2)
return self.leakyrelu(e)
def __repr__(self):
return self.__class__.__name__ + ' (' + str(
self.in_features) + ' -> ' + str(self.out_features) + ')'
|
semink/TCN
|
train.py
|
<reponame>semink/TCN
from argparse import ArgumentParser
from pytorch_lightning import Trainer
from model import TCNPredictor
from TrafficDataset import DataModule
from ray.tune.integration.pytorch_lightning import TuneReportCheckpointCallback
from ray import tune
from pytorch_lightning.loggers import TensorBoardLogger
import pprint
import os
pp = pprint.PrettyPrinter()
def train_model(config=None, args=None, checkpoint_dir=None, tuning=False):
if config:
[setattr(args, key, config[key]) for key in config]
dict_args = vars(args)
# pp.pprint(dict_args)
dm = DataModule(dataset=dict_args["dataset"], batch_size=dict_args["batch_size"], seq_len=dict_args["seq_len"], horizon=dict_args["horizon"],
num_workers=dict_args["num_workers"], weeks=dict_args["weeks"])
dm.prepare_data()
dm.setup()
model = TCNPredictor(**dict_args, scaler=dm.scaler,
n_nodes=dm.n_nodes, adjs=dm.adj)
if tuning:
kwargs = dict(logger=TensorBoardLogger(save_dir=tune.get_trial_dir(), name="", version="."),
gpus=1,
progress_bar_refresh_rate=0,
callbacks=[
TuneReportCheckpointCallback(
metrics={"loss": "Validation/mae"},
filename="checkpoint",
on="validation_end")
])
else:
kwargs = dict(weights_summary="top")
if checkpoint_dir:
kwargs["resume_from_checkpoint"] = os.path.join(
checkpoint_dir, "checkpoint")
trainer = Trainer.from_argparse_args(
args, **kwargs)
trainer.fit(model, dm)
result = trainer.test(model, dm)
if __name__ == "__main__":
parser = ArgumentParser()
parser = Trainer.add_argparse_args(parser)
# DATALOADER specific args
parser.add_argument("--dataset", type=str, default='la')
parser.add_argument("--seq_len", type=int, default=12)
parser.add_argument("--num_workers", type=int, default=32)
parser.add_argument("--weeks", type=int, default=5)
parser.add_argument("--residual", dest="residual", action="store_true")
# MODEL specific args
parser = TCNPredictor.add_model_specific_args(parser)
args = parser.parse_args()
train_model(args=args)
|
semink/TCN
|
TCN/model.py
|
<reponame>semink/TCN
import torch.nn as nn
# original TCN has weight_norm but it causes infinite gradient many times..
from torch.nn.utils import weight_norm
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
if self.chomp_size != 0:
# make TCN to be a causal process
x = x[..., :-self.chomp_size].contiguous()
return x
class ConvRowSeparate(nn.Module):
"""Some Information about ConvRowSeparate"""
def __init__(self):
super(ConvRowSeparate, self).__init__()
def forward(self, x):
return x
class TemporalLayer(nn.Module):
"""Some Information about TemporalLayer"""
def __init__(self, in_features, out_features, kernel_size, stride, dilation, padding, dropout):
super(TemporalLayer, self).__init__()
self.conv = nn.Conv2d(in_features, out_features, kernel_size=(1, kernel_size),
stride=stride, padding=(0, padding), padding_mode='replicate', dilation=dilation)
self.chomp = Chomp1d(padding)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
def init_weights(self):
self.conv.weight.data.normal_(0, 0.01)
def forward(self, x):
x = self.conv(x)
x = self.chomp(x)
x = self.relu(x)
x = self.dropout(x)
return x
class TemporalBlock(nn.Module):
def __init__(self, in_features, out_features, kernel_size, stride, dilation, padding, dropout=0.2):
super(TemporalBlock, self).__init__()
self.layer1 = TemporalLayer(
in_features, out_features, kernel_size, stride, dilation, padding, dropout)
self.layer2 = TemporalLayer(
out_features, out_features, kernel_size, stride, dilation, padding, dropout)
self.downsample = nn.Conv2d(
in_features, out_features, 1) if in_features != out_features else None
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
self.layer1.init_weights()
self.layer2.init_weights()
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TemporalConvNet(nn.Module):
def __init__(self, in_features, num_channels, kernel_size=2, dropout=0.2):
super(TemporalConvNet, self).__init__()
layers = []
num_levels = len(num_channels)
for i in range(num_levels):
dilation_size = 2 ** i
if i == 0:
in_channels = in_features
else:
in_channels = num_channels[i-1]
out_channels = num_channels[i]
layers += [TemporalBlock(in_channels, out_channels, kernel_size, stride=1,
dilation=dilation_size, padding=(kernel_size-1) * dilation_size, dropout=dropout)]
self.network = nn.Sequential(*layers)
def forward(self, x):
return self.network(x)
|
semink/TCN
|
libs/utils.py
|
<gh_stars>0
import copy
import torch.nn as nn
import torch
import numpy as np
from scipy.spatial.distance import pdist, squareform, cdist
import pandas as pd
import os
import wget
import pickle
from pathlib import Path
from sklearn.cluster import SpectralClustering
def get_project_root() -> Path:
return Path(__file__).parent.parent
PROJECT_ROOT = get_project_root()
class StandardScaler:
"""
Standard the input
"""
def __init__(self, mean=0, std=1):
self.mean = mean
self.std = std
def transform(self, data):
return (data - self.mean) / self.std
def inverse_transform(self, data):
return (data * self.std) + self.mean
def cluster_spatiotemporal_signal(df, adj, K=4, psim_weight=0.5):
physical_similarity = np.maximum(adj, adj.T)
signal_similarity = get_signal_similarity(df, cut=0.01, sigma=0.1).values
mix_similarity = cut_threshold(
psim_weight * physical_similarity + (1 - psim_weight) * signal_similarity, 0)
# 2. Spectral clustering
clustering = SpectralClustering(n_clusters=K,
assign_labels='discretize',
random_state=0, affinity='precomputed').fit(mix_similarity)
df_agg = df.groupby(by=clustering.labels_, axis=1).mean()
return df_agg, clustering.labels_
def cut_threshold(df, threshold_cut=2):
cut_idx = df < threshold_cut
df[cut_idx] = 0
return df
def get_signal_similarity(df, sigma=0.1, cut=0.2):
distance_signal = pdist(df.dropna().T, 'correlation')
distance_signal = pd.DataFrame(np.exp(-squareform(distance_signal)/sigma),
index=df.columns, columns=df.columns)
distance_signal = cut_threshold(distance_signal, cut)
return distance_signal
def _exist_dataset_on_disk(dataset):
file = f'{PROJECT_ROOT}/dataset/METR-LA.csv' if dataset == 'la' else f'{PROJECT_ROOT}/dataset/PEMS-BAY.csv'
return os.path.isfile(file)
def get_traffic_data(dataset):
if dataset == 'la':
fn, adj_name = 'METR-LA.csv', 'adj_mx_METR-LA.pkl'
elif dataset == 'bay':
fn, adj_name = 'PEMS-BAY.csv', 'adj_mx_PEMS-BAY.pkl'
else:
raise ValueError("dataset name should be either 'bay' or 'la")
data_url = f'https://zenodo.org/record/5724362/files/{fn}'
sup_url = f'https://zenodo.org/record/5724362/files/{adj_name}'
if not _exist_dataset_on_disk(dataset):
wget.download(data_url, out=f'{PROJECT_ROOT}/dataset')
wget.download(sup_url, out=f'{PROJECT_ROOT}/dataset')
df = pd.read_csv(f'{PROJECT_ROOT}/dataset/{fn}', index_col=0)
df.index = pd.DatetimeIndex(df.index)
dt = pd.Timedelta(df.index.to_series().diff().mode().values[0])
df = df.asfreq(freq=dt, fill_value=0.0)
with open(f'{PROJECT_ROOT}/dataset/{adj_name}', 'rb') as f:
_, _, adj = pickle.load(f, encoding='latin1')
return df, adj
def clones(module, N):
"""Produce N identical layers."""
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class LayerNorm(nn.Module):
"""Construct a layernorm module (See citation for details)."""
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
def residual(df, labels):
return df.T.groupby(by=labels).transform('mean').T - df
def masked_MAE(pred, target, dim=1):
target[target == 0.0] = np.nan
return torch.nanmean(torch.absolute(pred - target), dim=dim)
def masked_MSE(pred, target, dim=1):
target[target == 0.0] = np.nan
return torch.nanmean((pred - target) ** 2, dim=dim)
def masked_RMSE(pred, target, dim=1):
target[target == 0.0] = np.nan
return torch.sqrt(torch.nanmean((pred - target) ** 2, dim=dim))
def masked_MAPE(pred, target, dim=1):
target[target == 0.0] = np.nan
return torch.nanmean(torch.absolute((pred - target) / (target + 0.1)), dim=dim) * 100
# def masked_mae(preds, labels, null_val=np.nan):
# if np.isnan(null_val):
# mask = ~torch.isnan(labels)
# else:
# mask = (labels != null_val)
# mask = mask.float()
# mask /= torch.mean((mask))
# mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
# loss = torch.abs(preds - labels)
# loss = loss * mask
# loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
# return torch.mean(loss)
# def masked_mse(preds, labels, null_val=np.nan):
# if np.isnan(null_val):
# mask = ~torch.isnan(labels)
# else:
# mask = (labels != null_val)
# mask = mask.float()
# mask /= torch.mean((mask))
# mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
# loss = (preds - labels) ** 2
# loss = loss * mask
# loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
# return torch.mean(loss)
# def masked_mape(preds, labels, null_val=np.nan):
# if np.isnan(null_val):
# mask = ~torch.isnan(labels)
# else:
# mask = (labels != null_val)
# mask = mask.float()
# mask /= torch.mean((mask))
# mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
# loss = torch.abs(preds - labels) / labels
# loss = loss * mask
# loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
# return torch.mean(loss)
# def masked_rmse(preds, labels, null_val=np.nan):
# return torch.sqrt(masked_mse(preds=preds, labels=labels, null_val=null_val))
|
semink/TCN
|
TrafficDataset.py
|
from pytorch_lightning import LightningDataModule
import pandas as pd
import torch
import math
from torch.utils.data import DataLoader
from libs import utils
from libs.utils import StandardScaler
import numpy as np
import os
import pickle
class TrafficScaler:
"""[summary]
"""
def __init__(self, max_traffic=80, min_traffic=0):
self.max_traffic = max_traffic
self.min_traffic = min_traffic
def transform(self, data):
return data / (self.max_traffic - self.min_traffic)
def inverse_transform(self, data):
return data * (self.max_traffic - self.min_traffic)
def timestamp_to_vec(t: pd.DatetimeIndex):
dt_sec = torch.tensor((t - t.normalize()).total_seconds())
day_sec = pd.Timedelta(days=1).total_seconds()
c_t, s_t = (
torch.cos(2 * math.pi * dt_sec / day_sec),
torch.sin(2 * math.pi * dt_sec / day_sec),
)
day = torch.tensor(t.dayofweek)
c_d, s_d = torch.cos(2 * math.pi * day /
7), torch.sin(2 * math.pi * day / 7)
return torch.stack([c_t, s_t, c_d, s_d])
class TimeSeriesDatasetWeek(torch.utils.data.Dataset):
def __init__(self, df, dt, scaler, weeks, seq_len=1, horizon=1):
self.df = df
self.seq_len = seq_len
self.horizon = horizon
self.scaler = scaler
self.weeks = weeks
self.dt = dt
def __len__(self):
return self.df.values.__len__() - (self.seq_len + self.horizon - 1) - int(pd.Timedelta(weeks=self.weeks) / self.dt)
def get_weeks_data(self, index):
pred_time = self.df.index[index + self.seq_len]
week_idx = []
for week in range(1, self.weeks + 1):
for t in pd.date_range(pred_time - pd.Timedelta(weeks=week), pred_time - pd.Timedelta(weeks=week) + pd.Timedelta(self.dt) * self.horizon, freq=self.dt, closed="left",):
week_idx.append(t)
return self.df.loc[week_idx]
def __getitem__(self, index):
offset = int(pd.Timedelta(weeks=self.weeks) / self.dt)
X = self.scaler.transform(
self.df.values[index + offset: index + self.seq_len + offset]).T
X_w = self.scaler.transform(
self.get_weeks_data(index + offset).values).T
Y = self.df.values[index + self.seq_len +
offset: index + self.seq_len + self.horizon + offset].T
return (
# torch.cat([torch.tensor(X), torch.tensor(X_w)], dim=1).T,
torch.tensor(X).unsqueeze(0),
torch.tensor(Y),
)
class TimeSeriesDataset(torch.utils.data.Dataset):
def __init__(self, df, dt, scaler, seq_len=1, horizon=1):
self.df = df
self.seq_len = seq_len
self.horizon = horizon
self.scaler = scaler
self.dt = dt
def __len__(self):
return self.df.values.__len__() - (self.seq_len + self.horizon - 1)
def __getitem__(self, index):
X = self.scaler.transform(
self.df.values[index: index + self.seq_len]).T
Y = self.df.values[index + self.seq_len: index +
self.seq_len + self.horizon].T
return torch.tensor(X).T, torch.tensor(Y).T
class DataModule(LightningDataModule):
def __init__(self, dataset: str = "bay", batch_size: int = 32, seq_len=24, horizon=12, num_workers=64, weeks=5, residual=False,):
super(DataModule, self).__init__()
self.dataset = dataset
self.batch_size = batch_size
self.seq_len = seq_len
self.horizon = horizon
self.num_workers = num_workers
self.weeks = weeks
self.residual = residual
self.n_nodes = None
def split_data(self, df, rule=[0.7, 0.1, 0.2], pad_train=True):
assert (
np.sum(rule) == 1
), f"sum of split rule should be 1 (currently sum={np.sum(rule):.2f})"
train_end_idx, valid_end_idx = (
df.index[int(rule[0] * df.shape[0])],
df.index[int((rule[0] + rule[1]) * df.shape[0])],
)
train_df = df.loc[:train_end_idx]
if self.residual:
pad_train = True
if pad_train:
train_df = self.pad_empty_weeks(train_df)
offset = pd.Timedelta(weeks=self.weeks)
valid_df = df.loc[train_end_idx - offset: valid_end_idx]
test_df = df.loc[valid_end_idx - offset:]
return train_df, valid_df, test_df
def _substract(self, df, day, mean_dfs):
idx = df.index
df.index = df.index.strftime("%H:%M")
df = df - mean_dfs[day]
df.index = idx
return df
def _set_mean_dfs(self, source_df):
self.mean_dfs = self._get_mean_dfs(source_df)
def _get_mean_dfs(self, source_df):
mean_dfs = {}
for day, df in source_df.groupby(by=source_df.index.dayofweek):
mean_dfs[day] = df.replace(0.0, np.nan).groupby(df.index.strftime(
"%H:%M")).mean().interpolate() # mean value should not have nan value
return mean_dfs
def calculate_residual(self, target_df):
offsets = [pd.Timedelta(weeks=week)
for week in range(1, self.weeks + 1)]
target_df = target_df.replace(0.0, np.nan)
residual_df = target_df.loc[target_df.index[0] + offsets[-1]:].apply(
lambda x: x - target_df.loc[[x.name - o for o in offsets]].mean(axis=0), axis=1,)
return residual_df.replace(np.nan, 0.0)
def prepare_data(self, custom_dataset=None):
if custom_dataset is None:
self.df, self.adj = utils.get_traffic_data(self.dataset)
else:
self.df, self.adj = custom_dataset
self.adj = torch.stack([torch.tensor(self.adj),
torch.tensor(self.adj.T)], dim=0)
self.dt = self.df.index[1] - self.df.index[0]
def get_raw_data(self):
return self.df, self.adj
def feed_processed_data(self, new_df):
self.df = new_df
def _cache_check(self, fn):
return os.path.isfile(fn)
def setup(self, refresh=False, **kwargs):
self.train_df, self.valid_df, self.test_df = self.split_data(
df=self.df)
self.n_nodes = self.train_df.shape[1]
if self.residual:
fn = f"dataset/cache_{self.dataset}_{self.weeks}.pkl"
if self._cache_check(fn) and not refresh:
self.train_df_resi, self.valid_df_resi, self.test_df_resi = pickle.load(
open(fn, "rb"))
else:
self._set_mean_dfs(source_df=self.train_df)
self.valid_df_resi = self.calculate_residual(
target_df=self.valid_df)
self.test_df_resi = self.calculate_residual(
target_df=self.test_df)
self.train_df_resi = self.calculate_residual(
target_df=self.train_df)
pickle.dump([self.train_df_resi, self.valid_df_resi,
self.test_df_resi], open(fn, "wb"),)
self.scaler = StandardScaler(
mean=np.nanmean(
self.train_df_resi.replace(0.0, np.nan).values),
std=np.nanstd(self.train_df_resi.replace(0.0, np.nan).values),
)
else:
self.scaler = StandardScaler(
mean=np.nanmean(self.train_df.replace(0.0, np.nan).values),
std=np.nanstd(self.train_df.replace(0.0, np.nan).values),
)
def pad_empty_weeks(self, df):
i = pd.date_range(
df.index[0] - pd.Timedelta(weeks=self.weeks), df.index[-1], freq=self.dt
)
day_avg = self._get_mean_dfs(source_df=df)
df = df.replace(np.nan, 0.0)
df = df.reindex(index=i, fill_value=np.nan)
nan_idx = df[~df.any(axis=1)].index
df.loc[nan_idx] = df.loc[nan_idx].apply(
lambda x: day_avg[x.name.dayofweek].loc[x.name.strftime("%H:%M")], axis=1
)
return df
def train_dataloader(self):
df = self.train_df_resi if self.residual else self.train_df
ds = TimeSeriesDatasetWeek(df, dt=self.dt, scaler=self.scaler,
seq_len=self.seq_len, horizon=self.horizon, weeks=self.weeks)
return DataLoader(ds, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers)
def val_dataloader(self):
df = self.valid_df_resi if self.residual else self.valid_df
ds = TimeSeriesDatasetWeek(df, dt=self.dt, scaler=self.scaler,
seq_len=self.seq_len, horizon=self.horizon, weeks=self.weeks)
return DataLoader(ds, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers)
def test_dataloader(self):
df = self.test_df_resi if self.residual else self.test_df
ds = TimeSeriesDatasetWeek(df, dt=self.dt, scaler=self.scaler,
seq_len=self.seq_len, horizon=self.horizon, weeks=self.weeks)
return DataLoader(ds, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers)
|
semink/TCN
|
GAT/model.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from GAT.layers import GraphAttentionLayer
class GAT(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):
"""Dense version of GAT."""
super(GAT, self).__init__()
self.dropout = dropout
self.attentions = [GraphAttentionLayer(
nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
self.out_att = GraphAttentionLayer(
nhid * nheads, nclass, dropout=dropout, alpha=alpha, concat=False)
self.downsample = nn.Conv1d() if nfeat != nclass else None
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x, adjs):
out = F.dropout(x, self.dropout, training=self.training)
out = torch.cat([att(out, adj)
for att, adj in zip(self.attentions, adjs)], dim=-2)
out = F.dropout(out, self.dropout, training=self.training)
x = F.elu(x+self.out_att(out, adjs.sum(dim=0)))
return x
|
fluency-in/python-grade-school
|
grade_school_test.py
|
from collections import Sequence
from types import GeneratorType
import unittest
from grade_school import School
class SchoolTest(unittest.TestCase):
def setUp(self):
# assertCountEqual is py3, py2 only knowns assetItemsEqual
if not hasattr(self, 'assertCountEqual'):
self.assertCountEqual = self.assertItemsEqual
self.school = School("Haleakala Hippy School")
def test_an_empty_school(self):
for n in range(1, 9):
self.assertCountEqual(set(), self.school.grade(n))
def test_add_student(self):
self.school.add("Aimee", 2)
self.assertCountEqual(("Aimee",), self.school.grade(2))
def test_add_more_students_in_same_class(self):
self.school.add("James", 2)
self.school.add("Blair", 2)
self.school.add("Paul", 2)
self.assertCountEqual(("James", "Blair", "Paul"), self.school.grade(2))
def test_add_students_to_different_grades(self):
self.school.add("Chelsea", 3)
self.school.add("Logan", 7)
self.assertCountEqual(("Chelsea",), self.school.grade(3))
self.assertCountEqual(("Logan",), self.school.grade(7))
def test_get_students_in_a_grade(self):
self.school.add("Franklin", 5)
self.school.add("Bradley", 5)
self.school.add("Jeff", 1)
self.assertCountEqual(("Franklin", "Bradley"), self.school.grade(5))
def test_get_students_in_a_non_existant_grade(self):
self.assertCountEqual(set(), self.school.grade(1))
def test_sort_school(self):
students = [
(3, ("Kyle",)),
(4, ("Christopher", "Jennifer",)),
(6, ("Kareem",))
]
for grade, students_in_grade in students:
for student in students_in_grade:
self.school.add(student, grade)
result = self.school.sort()
# Attempts to catch false positives
self.assertTrue(isinstance(result, Sequence) or
isinstance(result, GeneratorType) or
callable(getattr(result, '__reversed__', False)))
result_list = list(result.items() if hasattr(result, "items")
else result)
self.assertEqual(result_list, students)
if __name__ == '__main__':
unittest.main()
|
TcheL/Road2Filter
|
IIR/bidirection.py
|
#!/usr/bin/env python3
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
fs = 1000 # sampling frequency
fc = 6 # cut-off frequency
t = np.arange(1000)/fs
sga = np.sin(2*np.pi*2*t) # signal with f = 2
sgb = np.sin(2*np.pi*10*t) # signal with f = 10
sgo = sga + sgb #+ (np.random.random(1000) - 0.5)
w = fc/fs
b, a = signal.butter(4, w, 'low')
sgf1 = signal.lfilter(b, a, sgo)
sgf1 = sgf1[ : : -1]
sgf1 = signal.lfilter(b, a, sgf1)
sgf1 = sgf1[ : : -1]
plt.plot(t, sgo, label = 'original')
plt.plot(t, sga, label = 'f = 2')
plt.plot(t, sgf1, 'r-', linewidth = 3, label = 'bidirectional')
sgf2 = signal.filtfilt(b, a, sgo)
sgf3 = signal.lfilter(b, a, sgo)
plt.plot(t, sgf2, label = 'filtfilt')
plt.plot(t, sgf3, 'k-', linewidth = 1.5, label = 'lfilter')
plt.legend()
plt.show()
|
TcheL/Road2Filter
|
FIR/bpFIR.py
|
#!/usr/bin/env python3
import numpy as np
def bandPassFIR(fL, fH, b = 0.08):
N = int(np.ceil(4 / b))
if not N % 2: N += 1
n = np.arange(N)
hlpf = np.sinc(2 * fH * (n - (N - 1) / 2))
hlpf *= np.blackman(N)
hlpf /= np.sum(hlpf)
hhpf = np.sinc(2 * fL * (n - (N - 1) / 2))
hhpf *= np.blackman(N)
hhpf /= np.sum(hhpf)
hhpf *= - 1
hhpf[(N - 1) // 2] += 1
h = np.convolve(hlpf, hhpf)
return 2*N - 1, h
#-------------------------------------------------------------------------------
import matplotlib.pyplot as plt
fs = 1000 # sampling frequency
# generate properly the time vector
t = np.arange(1000)/fs
sga = np.sin(2*np.pi*2*t) # signal with f = 2
sgb = np.sin(2*np.pi*6*t) # signal with f = 6
sgc = np.sin(2*np.pi*10*t) # signal with f = 10
sgd = sga + sgb + sgc
N, h = bandPassFIR(fL = 4/fs, fH = 8/fs, b = 2/fs)
sgf = np.convolve(sgd, h)
plt.plot(t, sgd, label = 'original')
plt.plot(t, sgf[int(N/2):1000 + int(N/2)], label = 'band-pass')
plt.plot(t, sgb, label = 'f = 6')
plt.legend()
plt.show()
|
TcheL/Road2Filter
|
IIR/o4zpsbwlpf.py
|
#!/usr/bin/env python3
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
fs = 1000 # sampling frequency
fc = 6 # cut-off frequency
t = np.arange(1000)/fs
sga = np.sin(2*np.pi*2*t) # signal with f = 2
sgb = np.sin(2*np.pi*10*t) # signal with f = 10
sgo = sga + sgb #+ (np.random.random(1000) - 0.5)
np.savetxt('original.dat', sgo)
os.system('cat original.dat | ../bin/zpsbwlpf 1000 {:.2f} > filtered.dat'.format(fc/2))
sgf1 = np.loadtxt('filtered.dat')
plt.plot(t, sgo, label = 'original')
plt.plot(t, sga, label = 'f = 2')
plt.plot(t, sgf1, 'r-', linewidth = 3, label = 'o4zpsbwlpf')
w = fc/fs
b, a = signal.butter(4, w, 'low')
sgf2 = signal.filtfilt(b, a, sgo)
sgf3 = signal.lfilter(b, a, sgo)
plt.plot(t, sgf2, label = 'filtfilt')
plt.plot(t, sgf3, 'k-', linewidth = 1.5, label = 'lfilter')
plt.legend()
plt.show()
os.system('rm -f original.dat filtered.dat')
|
TcheL/Road2Filter
|
FIR/lpFIR.py
|
#!/usr/bin/env python3
import numpy as np
def lowPassFIR(fc, b = 0.08):
#fc: Cutoff frequency as a fraction of the sampling rate (in (0, 0.5)).
#b: Transition band, as a fraction of the sampling rate (in (0, 0.5)).
N = int(np.ceil((4 / b)))
if not N % 2: N += 1 # Make sure that N is odd.
n = np.arange(N)
# Compute sinc filter.
h = np.sinc(2 * fc * (n - (N - 1) / 2))
# Compute Blackman window.
w = 0.42 - 0.5 * np.cos(2 * np.pi * n / (N - 1)) + \
0.08 * np.cos(4 * np.pi * n / (N - 1))
# Multiply sinc filter by window.
h = h * w
# Normalize to get unity gain.
h = h / np.sum(h)
return N, h
#-------------------------------------------------------------------------------
import matplotlib.pyplot as plt
fs = 1000 # sampling frequency
# generate properly the time vector
t = np.arange(1000)/fs
sga = np.sin(2*np.pi*2*t) # signal with f = 2
sgb = np.sin(2*np.pi*10*t) # signal with f = 10
sgc = sga + sgb
N, h = lowPassFIR(fc = 6/fs, b = 2/fs)
sgf = np.convolve(sgc, h)
plt.plot(t, sgc, label = 'original')
plt.plot(t, sgf[int(N/2):1000 + int(N/2)], label = 'low-pass')
plt.plot(t, sga, label = 'f = 2')
plt.legend()
plt.show()
|
PaLeroy/smac
|
smac/env/starcraft2/starcraft2multi.py
|
<gh_stars>1-10
from smac.env.starcraft2.starcraft2 import StarCraft2Env
from smac.env.starcraft2.starcraft2 import races, difficulties, Direction
from smac.env.starcraft2.starcraft2 import actions as actions_api
from operator import attrgetter
from copy import deepcopy
import numpy as np
from absl import logging
from pysc2 import maps
from pysc2 import run_configs
from pysc2.lib import protocol, run_parallel, portspicker
from s2clientprotocol import sc2api_pb2 as sc_pb
from s2clientprotocol import raw_pb2 as r_pb
from s2clientprotocol import debug_pb2 as d_pb
class StarCraft2EnvMulti(StarCraft2Env):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.max_reward_p2 = (
self.n_agents * self.reward_death_value + self.reward_win
)
self.last_action = np.zeros(
(self.n_agents + self.n_enemies, self.n_actions))
self.team_1_heuristic = False
self.team_2_heuristic = False
self.action_error = 0
self.battles_won_team_1 = 0
self.battles_won_team_2 = 0
self.sum_rewards_team1 = 0
self.sum_rewards_team2 = 0
def _launch(self):
# Multi player, based on the implement in:
# https://github.com/deepmind/pysc2/blob/master/pysc2/env/sc2_env.py
n_players = 2
self._run_config = run_configs.get(version=self.game_version)
self.parallel = run_parallel.RunParallel()
_map = maps.get(self.map_name)
interface_options = sc_pb.InterfaceOptions(raw=True, score=False)
# Reserve a whole bunch of ports
ports = portspicker.pick_unused_ports(n_players * 2)
# Actually launch the game processes.
self._sc2_proc = [self._run_config.start(
extra_ports=ports,
window_size=self.window_size,
want_rgb=False)
for _ in range(n_players)]
self._controller = [p.controller for p in self._sc2_proc]
for c in self._controller:
c.save_map(_map.path, _map.data(self._run_config))
# Create the create request.
create = sc_pb.RequestCreateGame(
local_map=sc_pb.LocalMap(
map_path=_map.path,
map_data=self._run_config.map_data(_map.path)),
realtime=False,
random_seed=self._seed)
for _ in range(n_players):
create.player_setup.add(type=sc_pb.Participant)
self._controller[0].create_game(create)
ports_copy = ports[:]
# Create the join requests.
join_resquests = []
join = sc_pb.RequestJoinGame(race=races[self._agent_race],
options=interface_options)
join.shared_port = 0 # unused
join.server_ports.game_port = ports_copy.pop(0)
join.server_ports.base_port = ports_copy.pop(0)
for _ in range(n_players - 1):
join.client_ports.add(game_port=ports_copy.pop(0),
base_port=ports_copy.pop(0))
join_resquests.append(join)
ports_copy = ports[:]
join = sc_pb.RequestJoinGame(race=races[self._bot_race],
options=interface_options)
join.shared_port = 0 # unused
join.server_ports.game_port = ports_copy.pop(0)
join.server_ports.base_port = ports_copy.pop(0)
for _ in range(n_players - 1):
join.client_ports.add(game_port=ports_copy.pop(0),
base_port=ports_copy.pop(0))
join_resquests.append(join)
self.parallel.run((c.join_game, join__) for c, join__ in
zip(self._controller, join_resquests))
game_info = self._controller[0].game_info()
map_info = game_info.start_raw
map_play_area_min = map_info.playable_area.p0
map_play_area_max = map_info.playable_area.p1
self.max_distance_x = map_play_area_max.x - map_play_area_min.x
self.max_distance_y = map_play_area_max.y - map_play_area_min.y
self.map_x = map_info.map_size.x
self.map_y = map_info.map_size.y
self.terrain_height = np.flip(
np.transpose(np.array(list(map_info.terrain_height.data))
.reshape(self.map_x, self.map_y)), 1) / 255
if map_info.pathing_grid.bits_per_pixel == 1:
vals = np.array(list(map_info.pathing_grid.data)).reshape(
self.map_x, int(self.map_y / 8))
self.pathing_grid = np.transpose(np.array([
[(b >> i) & 1 for b in row for i in range(7, -1, -1)]
for row in vals], dtype=np.bool))
else:
self.pathing_grid = np.invert(np.flip(np.transpose(np.array(
list(map_info.pathing_grid.data), dtype=np.bool).reshape(
self.map_x, self.map_y)), axis=1))
def reset(self):
"""Reset the environment. Required after each full episode.
Returns initial observations and states.
"""
self._episode_steps = 0
if self._episode_count == 0:
# Launch StarCraft II
self._launch()
else:
self._restart()
# Information kept for counting the reward
self.death_tracker_ally = np.zeros(self.n_agents)
self.death_tracker_enemy = np.zeros(self.n_enemies)
self.previous_ally_units = None
self.previous_enemy_units = None
self.win_counted = False
self.defeat_counted = False
self.sum_rewards_team1 = 0
self.sum_rewards_team2 = 0
self.last_action = np.zeros(
(self.n_agents + self.n_enemies, self.n_actions))
try:
self._obs = []
for c in self._controller:
self._obs.append(c.observe())
self.init_units()
except (protocol.ProtocolError, protocol.ConnectionError):
self.full_restart()
if self.debug:
logging.debug("Started Episode {}"
.format(self._episode_count).center(60, "*"))
if self.log_more_stats:
self.distance_traveled_team_1 = [0 for _ in range(self.n_agents)]
self.distance_traveled_team_2 = [0 for _ in range(self.n_enemies)]
self.previous_team_1_pos = [[al_unit.pos.x, al_unit.pos.y] for
idx, al_unit
in self.agents.items()]
self.previous_team_2_pos = [[en_unit.pos.x, en_unit.pos.y] for
idx, en_unit
in self.enemies.items()]
self.attack_actions_team_1 = [0 for _ in range(self.n_agents)]
self.attack_actions_team_2 = [0 for _ in range(self.n_enemies)]
self.move_actions_team_1 = [0 for _ in range(self.n_agents)]
self.move_actions_team_2 = [0 for _ in range(self.n_enemies)]
self.stop_actions_team_1 = [0 for _ in range(self.n_agents)]
self.stop_actions_team_2 = [0 for _ in range(self.n_enemies)]
self.once_in_shoot_range_opponent_team_1 = [
[False for _ in range(self.n_enemies)]
for _ in range(self.n_agents)]
self.once_in_shoot_range_opponent_team_2 = [
[False for _ in range(self.n_agents)]
for _ in range(self.n_enemies)]
self.once_in_sight_range_opponent_team_1 = [
[False for _ in range(self.n_enemies)]
for _ in range(self.n_agents)]
self.once_in_sight_range_opponent_team_2 = [
[False for _ in range(self.n_agents)]
for _ in range(self.n_enemies)]
self.move_in_sight_range_team1 = [0 for _ in
range(self.n_agents)]
self.move_toward_in_sight_range_team1 = [
[0 for _ in range(self.n_enemies)] for _ in
range(self.n_agents)]
self.move_away_in_sight_range_team1 = [
[0 for _ in range(self.n_enemies)] for _ in
range(self.n_agents)]
self.move_in_shoot_range_team1 = [0 for _ in range(self.n_agents)]
self.move_toward_in_shoot_range_team1 = [
[0 for _ in range(self.n_enemies)] for _ in
range(self.n_agents)]
self.move_away_in_shoot_range_team1 = [
[0 for _ in range(self.n_enemies)] for _ in
range(self.n_agents)]
self.move_in_sight_range_team2 = [0 for _ in range(self.n_enemies)]
self.move_toward_in_sight_range_team2 = [
[0 for _ in range(self.n_agents)] for _ in
range(self.n_enemies)]
self.move_away_in_sight_range_team2 = [
[0 for _ in range(self.n_agents)] for _ in
range(self.n_enemies)]
self.move_in_shoot_range_team2 = [0 for _ in range(self.n_enemies)]
self.move_toward_in_shoot_range_team2 = [
[0 for _ in range(self.n_agents)] for _ in
range(self.n_enemies)]
self.move_away_in_shoot_range_team2 = [
[0 for _ in range(self.n_agents)] for _ in
range(self.n_enemies)]
return self.get_obs(), self.get_state()
def _restart(self):
"""Restart the environment by killing all units on the map.
There is a trigger in the SC2Map file, which restarts the
episode when there are no units left.
"""
try:
self._kill_all_units()
for _ in range(3):
for c in self._controller:
c.step()
except (protocol.ProtocolError, protocol.ConnectionError):
self.full_restart()
def full_restart(self):
"""Full restart. Closes the SC2 process and launches a new one. """
for p in self._sc2_proc:
p.close()
try:
self._launch()
self.force_restarts += 1
except:
self.full_restart()
def setup_heuristic(self, team_1: bool, team_2: bool):
self.team_1_heuristic = team_1
self.team_2_heuristic = team_2
def step(self, actions):
actions = [int(a) for a in actions]
if self.team_1_heuristic:
for i in range(self.n_agents):
actions[i] = self.get_heuristic_action(i)
if self.team_2_heuristic:
for i in range(self.n_enemies):
actions[self.n_agents + i] = self.get_heuristic_action(
self.n_agents + i)
if self.log_more_stats:
# count type of actions
for i in range(self.n_agents):
if actions[i] > 5:
self.attack_actions_team_1[i] += 1
elif actions[i] > 1:
self.move_actions_team_1[i] += 1
elif actions[i] == 1:
self.stop_actions_team_1[i] += 1
for i in range(self.n_enemies):
if actions[self.n_agents + i] > 5:
self.attack_actions_team_2[i] += 1
elif actions[self.n_agents + i] > 1:
self.move_actions_team_2[i] += 1
elif actions[self.n_agents + i] == 1:
self.stop_actions_team_2[i] += 1
new_pos_team_1 = []
new_pos_team_2 = []
for i in range(self.n_agents):
unit = self.get_unit_by_id(i)
new_pos_team_1.append((unit.pos.x, unit.pos.y))
for i in range(self.n_enemies):
unit = self.get_unit_by_id(self.n_agents + i)
new_pos_team_2.append((unit.pos.x, unit.pos.y))
for i in range(self.n_agents):
shoot_range = self.unit_shoot_range(i)
sight_range = self.unit_sight_range(i)
move_in_shoot_not_counted = True
move_in_sight_not_counted = True
for t_id, t_unit in self.enemies.items():
if t_unit.health > 0:
dist = self.distance(
new_pos_team_1[i][0], new_pos_team_1[i][1],
t_unit.pos.x, t_unit.pos.y
)
if dist <= shoot_range:
self.once_in_shoot_range_opponent_team_1[i][
t_id] = True
if 1 < actions[i] < 6:
if move_in_shoot_not_counted:
self.move_in_shoot_range_team1[i] += 1
move_in_shoot_not_counted = False
x_diff = new_pos_team_1[i][0] - t_unit.pos.x
y_diff = new_pos_team_1[i][1] - t_unit.pos.y
if actions[i] == 2:
# north
if y_diff < 0:
self.move_toward_in_shoot_range_team1[
i][t_id] += 1
else:
self.move_away_in_shoot_range_team1[i][
t_id] += 1
if actions[i] == 3:
# south
if y_diff > 0:
self.move_toward_in_shoot_range_team1[
i][t_id] += 1
else:
self.move_away_in_shoot_range_team1[i][
t_id] += 1
if actions[i] == 4:
# east
if x_diff < 0:
self.move_toward_in_shoot_range_team1[
i][t_id] += 1
else:
self.move_away_in_shoot_range_team1[i][
t_id] += 1
if actions[i] == 5:
# west
if x_diff > 0:
self.move_toward_in_shoot_range_team1[
i][t_id] += 1
else:
self.move_away_in_shoot_range_team1[i][
t_id] += 1
elif dist <= sight_range:
self.once_in_sight_range_opponent_team_1[i][
t_id] = True
if 1 < actions[i] < 6:
if move_in_sight_not_counted:
self.move_in_sight_range_team1[i] += 1
move_in_sight_not_counted = False
x_diff = new_pos_team_1[i][0] - t_unit.pos.x
y_diff = new_pos_team_1[i][1] - t_unit.pos.y
if actions[i] == 2:
# north
if y_diff < 0:
self.move_toward_in_sight_range_team1[
i][t_id] += 1
else:
self.move_away_in_sight_range_team1[i][
t_id] += 1
if actions[i] == 3:
# south
if y_diff > 0:
self.move_toward_in_sight_range_team1[
i][t_id] += 1
else:
self.move_away_in_sight_range_team1[i][
t_id] += 1
if actions[i] == 4:
# east
if x_diff < 0:
self.move_toward_in_sight_range_team1[
i][t_id] += 1
else:
self.move_away_in_sight_range_team1[i][
t_id] += 1
if actions[i] == 5:
# west
if x_diff > 0:
self.move_toward_in_sight_range_team1[
i][t_id] += 1
else:
self.move_away_in_sight_range_team1[i][
t_id] += 1
for i in range(self.n_enemies):
shoot_range = self.unit_shoot_range(self.n_agents + i)
sight_range = self.unit_sight_range(self.n_agents + i)
move_in_shoot_not_counted = True
move_in_sight_not_counted = True
action__ = actions[self.n_agents + i]
for t_id, t_unit in self.agents.items():
if t_unit.health > 0:
dist = self.distance(
new_pos_team_2[i][0], new_pos_team_2[i][1],
t_unit.pos.x, t_unit.pos.y
)
if dist <= shoot_range:
self.once_in_shoot_range_opponent_team_2[i][
t_id] = True
if 1 < action__ < 6:
if move_in_shoot_not_counted:
self.move_in_shoot_range_team2[i] += 1
move_in_shoot_not_counted = False
x_diff = new_pos_team_2[i][0] - t_unit.pos.x
y_diff = new_pos_team_2[i][1] - t_unit.pos.y
if action__ == 2:
# north
if y_diff < 0:
self.move_toward_in_shoot_range_team2[
i][t_id] += 1
else:
self.move_away_in_shoot_range_team2[i][
t_id] += 1
if action__ == 3:
# south
if y_diff > 0:
self.move_toward_in_shoot_range_team2[
i][t_id] += 1
else:
self.move_away_in_shoot_range_team2[i][
t_id] += 1
if action__ == 4:
# east
if x_diff < 0:
self.move_toward_in_shoot_range_team2[
i][t_id] += 1
else:
self.move_away_in_shoot_range_team2[i][
t_id] += 1
if action__ == 5:
# west
if x_diff > 0:
self.move_toward_in_shoot_range_team2[
i][t_id] += 1
else:
self.move_away_in_shoot_range_team2[i][
t_id] += 1
elif dist <= sight_range:
self.once_in_sight_range_opponent_team_2[i][
t_id] = True
if 1 < action__ < 6:
if move_in_sight_not_counted:
self.move_in_sight_range_team2[i] += 1
move_in_sight_not_counted = False
x_diff = new_pos_team_2[i][0] - t_unit.pos.x
y_diff = new_pos_team_2[i][1] - t_unit.pos.y
if action__ == 2:
# north
if y_diff < 0:
self.move_toward_in_sight_range_team2[
i][t_id] += 1
else:
self.move_away_in_sight_range_team2[i][
t_id] += 1
if action__ == 3:
# south
if y_diff > 0:
self.move_toward_in_sight_range_team2[
i][t_id] += 1
else:
self.move_away_in_sight_range_team2[i][
t_id] += 1
if action__ == 4:
# east
if x_diff < 0:
self.move_toward_in_sight_range_team2[
i][t_id] += 1
else:
self.move_away_in_sight_range_team2[i][
t_id] += 1
if action__ == 5:
# west
if x_diff > 0:
self.move_toward_in_sight_range_team2[
i][t_id] += 1
else:
self.move_away_in_sight_range_team2[i][
t_id] += 1
for i in range(self.n_agents):
self.distance_traveled_team_1[i] += self.distance(
self.previous_team_1_pos[i][0],
self.previous_team_1_pos[i][1],
new_pos_team_1[i][0],
new_pos_team_1[i][1])
self.previous_team_1_pos[i][0] = new_pos_team_1[i][0]
self.previous_team_1_pos[i][1] = new_pos_team_1[i][1]
for i in range(self.n_enemies):
self.distance_traveled_team_2[i] += self.distance(
self.previous_team_2_pos[i][0],
self.previous_team_2_pos[i][1],
new_pos_team_2[i][0],
new_pos_team_2[i][1])
self.previous_team_2_pos[i][0] = new_pos_team_2[i][0]
self.previous_team_2_pos[i][1] = new_pos_team_2[i][1]
self.last_action = np.eye(self.n_actions)[np.array(actions)]
# Collect individual actions
sc_actions_team_1 = []
sc_actions_team_2 = []
if self.debug:
logging.debug("Actions".center(60, "-"))
try:
for a_id, action in enumerate(actions):
agent_action = self.get_agent_action(a_id, action)
if agent_action:
if a_id < self.n_agents:
sc_actions_team_1.append(agent_action)
else:
sc_actions_team_2.append(agent_action)
except AssertionError as err:
self._episode_count += 1
self.action_error += 1
self.reset()
return [0 for _ in actions], True, {"battle_won_team_1": False,
"battle_won_team_2": False,
"env_error": True}
req_actions_p1 = sc_pb.RequestAction(
actions=sc_actions_team_1)
req_actions_p2 = sc_pb.RequestAction(
actions=sc_actions_team_2)
req_actions_all = [req_actions_p1, req_actions_p2]
try:
for idx_, (controller, req_actions) \
in enumerate(zip(self._controller, req_actions_all)):
controller.actions(req_actions)
# Make step in SC2, i.e. apply actions
if self._step_mul is not None:
for _ in range(self._step_mul):
for c in self._controller:
c.step()
# Observe here so that we know if the episode is over.
for idx_, c in enumerate(self._controller):
self._obs[idx_] = c.observe()
except (protocol.ProtocolError, protocol.ConnectionError):
self.full_restart()
return [0 for _ in actions], True, {"battle_won_team_1": False,
"battle_won_team_2": False,
"env_error": True}
self._total_steps += 1
self._episode_steps += 1
# Update units
game_end_code = self.update_units()
terminated = False
reward = self.reward_battle()
info = {"battle_won_team_1": False,
"battle_won_team_2": False}
if game_end_code is not None:
# Battle is over
terminated = True
self.battles_game += 1
if self.log_more_stats:
center_x = self.map_x / 2
center_y = self.map_y / 2
pos_team_1 = []
pos_team_2 = []
for i in range(self.n_agents):
unit = self.get_unit_by_id(i)
pos_team_1.append(((
unit.pos.x - center_x) / self.max_distance_x,
(
unit.pos.y - center_y) / self.max_distance_y))
for i in range(self.n_enemies):
unit = self.get_unit_by_id(self.n_agents + i)
pos_team_2.append(((
unit.pos.x - center_x) / self.max_distance_x,
(
unit.pos.y - center_y) / self.max_distance_y))
if game_end_code == 1 and not self.win_counted:
self.win_counted = True
self.battles_won_team_1 += 1
info["battle_won_team_1"] = True
if not self.reward_sparse:
reward[0] += self.reward_win
reward[1] += self.reward_defeat
else:
reward[0] = 1
reward[1] = -1
if self.log_more_stats:
# Records remaining health
for i in range(self.n_agents):
unit = self.get_unit_by_id(i)
info["win_health_team_1_agent_" + str(
i)] = unit.health / unit.health_max
info["win_position_x_team_1_agent_" + str(
i)] = pos_team_1[i][0]
info["win_position_y_team_1_agent_" + str(
i)] = pos_team_1[i][1]
info["win_distance_traveled_team_1_agent_" + str(
i)] = self.distance_traveled_team_1[i]
info["win_attack_actions_team_1_agent_" + str(
i)] = self.attack_actions_team_1[i]
info["win_move_actions_team_1_agent_" + str(
i)] = self.move_actions_team_1[i]
info["win_stop_actions_team_1_agent_" + str(
i)] = self.stop_actions_team_1[i]
info[
"win_once_in_shoot_range_opponent_1_team_1_agent_" + str(
i)] = \
self.once_in_shoot_range_opponent_team_1[i][0]
info[
"win_once_in_shoot_range_opponent_2_team_1_agent_" + str(
i)] = \
self.once_in_shoot_range_opponent_team_1[i][1]
info[
"win_once_in_shoot_range_opponent_3_team_1_agent_" + str(
i)] = \
self.once_in_shoot_range_opponent_team_1[i][2]
info[
"win_once_in_sight_range_opponent_1_team_1_agent_" + str(
i)] = \
self.once_in_sight_range_opponent_team_1[i][0]
info[
"win_once_in_sight_range_opponent_2_team_1_agent_" + str(
i)] = \
self.once_in_sight_range_opponent_team_1[i][1]
info[
"win_once_in_sight_range_opponent_3_team_1_agent_" + str(
i)] = \
self.once_in_sight_range_opponent_team_1[i][2]
info[
"win_move_in_sight_range_team_1_agent_" + str(i)] = \
self.move_in_sight_range_team1[i]
info[
"win_move_toward_in_sight_range_1_team_1_agent_" + str(
i)] = self.move_toward_in_sight_range_team1[i][
0]
info[
"win_move_toward_in_sight_range_2_team_1_agent_" + str(
i)] = self.move_toward_in_sight_range_team1[i][
1]
info[
"win_move_toward_in_sight_range_3_team_1_agent_" + str(
i)] = self.move_toward_in_sight_range_team1[i][
2]
info[
"win_move_away_in_sight_range_1_team_1_agent_" + str(
i)] = self.move_away_in_sight_range_team1[i][0]
info[
"win_move_away_in_sight_range_2_team_1_agent_" + str(
i)] = self.move_away_in_sight_range_team1[i][1]
info[
"win_move_away_in_sight_range_3_team_1_agent_" + str(
i)] = self.move_away_in_sight_range_team1[i][2]
info[
"win_move_in_shoot_range_team_1_agent_" + str(i)] = \
self.move_in_shoot_range_team1[i]
info[
"win_move_toward_in_shoot_range_1_team_1_agent_" + str(
i)] = self.move_toward_in_shoot_range_team1[i][
0]
info[
"win_move_toward_in_shoot_range_2_team_1_agent_" + str(
i)] = self.move_toward_in_shoot_range_team1[i][
1]
info[
"win_move_toward_in_shoot_range_3_team_1_agent_" + str(
i)] = self.move_toward_in_shoot_range_team1[i][
2]
info[
"win_move_away_in_shoot_range_1_team_1_agent_" + str(
i)] = self.move_away_in_shoot_range_team1[i][0]
info[
"win_move_away_in_shoot_range_2_team_1_agent_" + str(
i)] = self.move_away_in_shoot_range_team1[i][1]
info[
"win_move_away_in_shoot_range_3_team_1_agent_" + str(
i)] = self.move_away_in_shoot_range_team1[i][2]
for i in range(self.n_enemies):
info["loss_position_x_team_2_agent_" + str(
i)] = pos_team_2[i][0]
info["loss_position_y_team_2_agent_" + str(
i)] = pos_team_2[i][1]
info["loss_distance_traveled_team_2_agent_" + str(
i)] = self.distance_traveled_team_2[i]
info["loss_attack_actions_team_2_agent_" + str(
i)] = self.attack_actions_team_2[i]
info["loss_move_actions_team_2_agent_" + str(
i)] = self.move_actions_team_2[i]
info["loss_stop_actions_team_2_agent_" + str(
i)] = self.stop_actions_team_2[i]
info[
"loss_once_in_shoot_range_opponent_team_2_agent_" + str(
i)] = self.once_in_shoot_range_opponent_team_2[
i]
info[
"loss_once_in_sight_range_opponent_team_2_agent_" + str(
i)] = self.once_in_sight_range_opponent_team_2[
i]
info[
"loss_once_in_shoot_range_opponent_1_team_2_agent_" + str(
i)] = \
self.once_in_shoot_range_opponent_team_2[i][0]
info[
"loss_once_in_shoot_range_opponent_2_team_2_agent_" + str(
i)] = \
self.once_in_shoot_range_opponent_team_2[i][1]
info[
"loss_once_in_shoot_range_opponent_3_team_2_agent_" + str(
i)] = \
self.once_in_shoot_range_opponent_team_2[i][2]
info[
"loss_once_in_sight_range_opponent_1_team_2_agent_" + str(
i)] = \
self.once_in_sight_range_opponent_team_2[i][0]
info[
"loss_once_in_sight_range_opponent_2_team_2_agent_" + str(
i)] = \
self.once_in_sight_range_opponent_team_2[i][1]
info[
"loss_once_in_sight_range_opponent_3_team_2_agent_" + str(
i)] = \
self.once_in_sight_range_opponent_team_2[i][2]
info["loss_move_in_sight_range_team_2_agent_" + str(
i)] = self.move_in_sight_range_team2[i]
info[
"loss_move_toward_in_sight_range_1_team_2_agent_" + str(
i)] = self.move_toward_in_sight_range_team2[i][
0]
info[
"loss_move_toward_in_sight_range_2_team_2_agent_" + str(
i)] = self.move_toward_in_sight_range_team2[i][
1]
info[
"loss_move_toward_in_sight_range_3_team_2_agent_" + str(
i)] = self.move_toward_in_sight_range_team2[i][
2]
info[
"loss_move_away_in_sight_range_1_team_2_agent_" + str(
i)] = self.move_away_in_sight_range_team2[i][0]
info[
"loss_move_away_in_sight_range_2_team_2_agent_" + str(
i)] = self.move_away_in_sight_range_team2[i][1]
info[
"loss_move_away_in_sight_range_3_team_2_agent_" + str(
i)] = self.move_away_in_sight_range_team2[i][2]
info["loss_move_in_shoot_range_team_2_agent_" + str(
i)] = self.move_in_shoot_range_team2[i]
info[
"loss_move_toward_in_shoot_range_1_team_2_agent_" + str(
i)] = self.move_toward_in_shoot_range_team2[i][
0]
info[
"loss_move_toward_in_shoot_range_2_team_2_agent_" + str(
i)] = self.move_toward_in_shoot_range_team2[i][
1]
info[
"loss_move_toward_in_shoot_range_3_team_2_agent_" + str(
i)] = self.move_toward_in_shoot_range_team2[i][
2]
info[
"loss_move_away_in_shoot_range_1_team_2_agent_" + str(
i)] = self.move_away_in_shoot_range_team2[i][0]
info[
"loss_move_away_in_shoot_range_2_team_2_agent_" + str(
i)] = self.move_away_in_shoot_range_team2[i][1]
info[
"loss_move_away_in_shoot_range_3_team_2_agent_" + str(
i)] = self.move_away_in_shoot_range_team2[i][2]
elif game_end_code == -1 and not self.defeat_counted:
self.defeat_counted = True
self.battles_won_team_2 += 1
info["battle_won_team_2"] = True
if not self.reward_sparse:
reward[0] += self.reward_defeat
reward[1] += self.reward_win
else:
reward[0] = -1
reward[1] = 1
if self.log_more_stats:
for i in range(self.n_enemies):
unit = self.get_unit_by_id(self.n_agents + i)
info["win_health_team_2_agent_" + str(
i)] = unit.health / unit.health_max
info["win_position_x_team_2_agent_" + str(
i)] = pos_team_2[i][0]
info["win_position_y_team_2_agent_" + str(
i)] = pos_team_2[i][1]
info["win_distance_traveled_team_2_agent_" + str(
i)] = self.distance_traveled_team_2[i]
info["win_attack_actions_team_2_agent_" + str(
i)] = self.attack_actions_team_2[i]
info["win_move_actions_team_2_agent_" + str(
i)] = self.move_actions_team_2[i]
info["win_stop_actions_team_2_agent_" + str(
i)] = self.stop_actions_team_2[i]
info[
"win_once_in_shoot_range_opponent_team_2_agent_" + str(
i)] = self.once_in_shoot_range_opponent_team_2[
i]
info[
"win_once_in_sight_range_opponent_team_2_agent_" + str(
i)] = self.once_in_sight_range_opponent_team_2[
i]
info[
"win_once_in_shoot_range_opponent_1_team_2_agent_" + str(
i)] = \
self.once_in_shoot_range_opponent_team_2[i][0]
info[
"win_once_in_shoot_range_opponent_2_team_2_agent_" + str(
i)] = \
self.once_in_shoot_range_opponent_team_2[i][1]
info[
"win_once_in_shoot_range_opponent_3_team_2_agent_" + str(
i)] = \
self.once_in_shoot_range_opponent_team_2[i][2]
info[
"win_once_in_sight_range_opponent_1_team_2_agent_" + str(
i)] = \
self.once_in_sight_range_opponent_team_2[i][0]
info[
"win_once_in_sight_range_opponent_2_team_2_agent_" + str(
i)] = \
self.once_in_sight_range_opponent_team_2[i][1]
info[
"win_once_in_sight_range_opponent_3_team_2_agent_" + str(
i)] = \
self.once_in_sight_range_opponent_team_2[i][2]
info[
"win_move_in_sight_range_team_2_agent_" + str(i)] = \
self.move_in_sight_range_team2[i]
info[
"win_move_toward_in_sight_range_1_team_2_agent_" + str(
i)] = self.move_toward_in_sight_range_team2[i][
0]
info[
"win_move_toward_in_sight_range_2_team_2_agent_" + str(
i)] = self.move_toward_in_sight_range_team2[i][
1]
info[
"win_move_toward_in_sight_range_3_team_2_agent_" + str(
i)] = self.move_toward_in_sight_range_team2[i][
2]
info[
"win_move_away_in_sight_range_1_team_2_agent_" + str(
i)] = self.move_away_in_sight_range_team2[i][0]
info[
"win_move_away_in_sight_range_2_team_2_agent_" + str(
i)] = self.move_away_in_sight_range_team2[i][1]
info[
"win_move_away_in_sight_range_3_team_2_agent_" + str(
i)] = self.move_away_in_sight_range_team2[i][2]
info[
"win_move_in_shoot_range_team_2_agent_" + str(i)] = \
self.move_in_shoot_range_team2[i]
info[
"win_move_toward_in_shoot_range_1_team_2_agent_" + str(
i)] = self.move_toward_in_shoot_range_team2[i][
0]
info[
"win_move_toward_in_shoot_range_2_team_2_agent_" + str(
i)] = self.move_toward_in_shoot_range_team2[i][
1]
info[
"win_move_toward_in_shoot_range_3_team_2_agent_" + str(
i)] = self.move_toward_in_shoot_range_team2[i][
2]
info[
"win_move_away_in_shoot_range_1_team_2_agent_" + str(
i)] = self.move_away_in_shoot_range_team2[i][0]
info[
"win_move_away_in_shoot_range_2_team_2_agent_" + str(
i)] = self.move_away_in_shoot_range_team2[i][1]
info[
"win_move_away_in_shoot_range_3_team_2_agent_" + str(
i)] = self.move_away_in_shoot_range_team2[i][2]
for i in range(self.n_agents):
info["loss_position_x_team_1_agent_" + str(
i)] = pos_team_1[i][0]
info["loss_position_y_team_1_agent_" + str(
i)] = pos_team_1[i][1]
info["loss_distance_traveled_team_1_agent_" + str(
i)] = self.distance_traveled_team_1[i]
info["loss_attack_actions_team_1_agent_" + str(
i)] = self.attack_actions_team_1[i]
info["loss_move_actions_team_1_agent_" + str(
i)] = self.move_actions_team_1[i]
info["loss_stop_actions_team_1_agent_" + str(
i)] = self.stop_actions_team_1[i]
info[
"loss_once_in_shoot_range_opponent_1_team_1_agent_" + str(
i)] = \
self.once_in_shoot_range_opponent_team_1[i][0]
info[
"loss_once_in_shoot_range_opponent_2_team_1_agent_" + str(
i)] = \
self.once_in_shoot_range_opponent_team_1[i][1]
info[
"loss_once_in_shoot_range_opponent_3_team_1_agent_" + str(
i)] = \
self.once_in_shoot_range_opponent_team_1[i][2]
info[
"loss_once_in_sight_range_opponent_1_team_1_agent_" + str(
i)] = \
self.once_in_sight_range_opponent_team_1[i][0]
info[
"loss_once_in_sight_range_opponent_2_team_1_agent_" + str(
i)] = \
self.once_in_sight_range_opponent_team_1[i][1]
info[
"loss_once_in_sight_range_opponent_3_team_1_agent_" + str(
i)] = \
self.once_in_sight_range_opponent_team_1[i][2]
info["loss_move_in_sight_range_team_1_agent_" + str(
i)] = self.move_in_sight_range_team1[i]
info[
"loss_move_toward_in_sight_range_1_team_1_agent_" + str(
i)] = self.move_toward_in_sight_range_team1[i][
0]
info[
"loss_move_toward_in_sight_range_2_team_1_agent_" + str(
i)] = self.move_toward_in_sight_range_team1[i][
1]
info[
"loss_move_toward_in_sight_range_3_team_1_agent_" + str(
i)] = self.move_toward_in_sight_range_team1[i][
2]
info[
"loss_move_away_in_sight_range_1_team_1_agent_" + str(
i)] = self.move_away_in_sight_range_team1[i][0]
info[
"loss_move_away_in_sight_range_2_team_1_agent_" + str(
i)] = self.move_away_in_sight_range_team1[i][1]
info[
"loss_move_away_in_sight_range_3_team_1_agent_" + str(
i)] = self.move_away_in_sight_range_team1[i][2]
info["loss_move_in_shoot_range_team_1_agent_" + str(
i)] = self.move_in_shoot_range_team1[i]
info[
"loss_move_toward_in_shoot_range_1_team_1_agent_" + str(
i)] = self.move_toward_in_shoot_range_team1[i][
0]
info[
"loss_move_toward_in_shoot_range_2_team_1_agent_" + str(
i)] = self.move_toward_in_shoot_range_team1[i][
1]
info[
"loss_move_toward_in_shoot_range_3_team_1_agent_" + str(
i)] = self.move_toward_in_shoot_range_team1[i][
2]
info[
"loss_move_away_in_shoot_range_1_team_1_agent_" + str(
i)] = self.move_away_in_shoot_range_team1[i][0]
info[
"loss_move_away_in_shoot_range_2_team_1_agent_" + str(
i)] = self.move_away_in_shoot_range_team1[i][1]
info[
"loss_move_away_in_shoot_range_3_team_1_agent_" + str(
i)] = self.move_away_in_shoot_range_team1[i][2]
elif self._episode_steps >= self.episode_limit:
# Episode limit reached
terminated = True
if self.continuing_episode:
info["episode_limit"] = True
self.battles_game += 1
self.timeouts += 1
if self.log_more_stats:
# Draw
center_x = self.map_x / 2
center_y = self.map_y / 2
pos_team_1 = []
pos_team_2 = []
for i in range(self.n_agents):
unit = self.get_unit_by_id(i)
pos_team_1.append(((
unit.pos.x - center_x) / self.max_distance_x,
(
unit.pos.y - center_y) / self.max_distance_y))
for i in range(self.n_enemies):
unit = self.get_unit_by_id(self.n_agents + i)
pos_team_2.append(((
unit.pos.x - center_x) / self.max_distance_x,
(
unit.pos.y - center_y) / self.max_distance_y))
for i in range(self.n_agents):
unit = self.get_unit_by_id(i)
info["draw_health_team_1_agent_" + str(
i)] = unit.health / unit.health_max
info["draw_position_x_team_1_agent_" + str(
i)] = pos_team_1[i][0]
info["draw_position_y_team_1_agent_" + str(
i)] = pos_team_1[i][1]
info["draw_distance_traveled_team_1_agent_" + str(
i)] = self.distance_traveled_team_1[i]
info["draw_attack_actions_team_1_agent_" + str(
i)] = self.attack_actions_team_1[i]
info["draw_move_actions_team_1_agent_" + str(
i)] = self.move_actions_team_1[i]
info["draw_stop_actions_team_1_agent_" + str(
i)] = self.stop_actions_team_1[i]
info[
"draw_once_in_shoot_range_opponent_1_team_1_agent_" + str(
i)] = self.once_in_shoot_range_opponent_team_1[i][
0]
info[
"draw_once_in_shoot_range_opponent_2_team_1_agent_" + str(
i)] = self.once_in_shoot_range_opponent_team_1[i][
1]
info[
"draw_once_in_shoot_range_opponent_3_team_1_agent_" + str(
i)] = self.once_in_shoot_range_opponent_team_1[i][
2]
info[
"draw_once_in_sight_range_opponent_1_team_1_agent_" + str(
i)] = self.once_in_sight_range_opponent_team_1[i][
0]
info[
"draw_once_in_sight_range_opponent_2_team_1_agent_" + str(
i)] = self.once_in_sight_range_opponent_team_1[i][
1]
info[
"draw_once_in_sight_range_opponent_3_team_1_agent_" + str(
i)] = self.once_in_sight_range_opponent_team_1[i][
2]
info["draw_move_in_sight_range_team_1_agent_" + str(i)] = \
self.move_in_sight_range_team1[i]
info[
"draw_move_toward_in_sight_range_1_team_1_agent_" + str(
i)] = self.move_toward_in_sight_range_team1[i][0]
info[
"draw_move_toward_in_sight_range_2_team_1_agent_" + str(
i)] = self.move_toward_in_sight_range_team1[i][1]
info[
"draw_move_toward_in_sight_range_3_team_1_agent_" + str(
i)] = self.move_toward_in_sight_range_team1[i][2]
info["draw_move_away_in_sight_range_1_team_1_agent_" + str(
i)] = self.move_away_in_sight_range_team1[i][0]
info["draw_move_away_in_sight_range_2_team_1_agent_" + str(
i)] = self.move_away_in_sight_range_team1[i][1]
info["draw_move_away_in_sight_range_3_team_1_agent_" + str(
i)] = self.move_away_in_sight_range_team1[i][2]
info["draw_move_in_shoot_range_team_1_agent_" + str(i)] = \
self.move_in_shoot_range_team1[i]
info[
"draw_move_toward_in_shoot_range_1_team_1_agent_" + str(
i)] = self.move_toward_in_shoot_range_team1[i][0]
info[
"draw_move_toward_in_shoot_range_2_team_1_agent_" + str(
i)] = self.move_toward_in_shoot_range_team1[i][1]
info[
"draw_move_toward_in_shoot_range_3_team_1_agent_" + str(
i)] = self.move_toward_in_shoot_range_team1[i][2]
info["draw_move_away_in_shoot_range_1_team_1_agent_" + str(
i)] = self.move_away_in_shoot_range_team1[i][0]
info["draw_move_away_in_shoot_range_2_team_1_agent_" + str(
i)] = self.move_away_in_shoot_range_team1[i][1]
info["draw_move_away_in_shoot_range_3_team_1_agent_" + str(
i)] = self.move_away_in_shoot_range_team1[i][2]
for i in range(self.n_enemies):
unit = self.get_unit_by_id(self.n_agents + i)
info["draw_health_team_2_agent_" + str(
i)] = unit.health / unit.health_max
info["draw_position_x_team_2_agent_" + str(
i)] = pos_team_2[i][0]
info["draw_position_y_team_2_agent_" + str(
i)] = pos_team_2[i][1]
info["draw_distance_traveled_team_2_agent_" + str(
i)] = self.distance_traveled_team_2[i]
info["draw_attack_actions_team_2_agent_" + str(
i)] = self.attack_actions_team_2[i]
info["draw_move_actions_team_2_agent_" + str(
i)] = self.move_actions_team_2[i]
info["draw_stop_actions_team_2_agent_" + str(
i)] = self.stop_actions_team_2[i]
info[
"draw_once_in_shoot_range_opponent_1_team_2_agent_" + str(
i)] = self.once_in_shoot_range_opponent_team_2[i][
0]
info[
"draw_once_in_shoot_range_opponent_2_team_2_agent_" + str(
i)] = self.once_in_shoot_range_opponent_team_2[i][
1]
info[
"draw_once_in_shoot_range_opponent_3_team_2_agent_" + str(
i)] = self.once_in_shoot_range_opponent_team_2[i][
2]
info[
"draw_once_in_sight_range_opponent_1_team_2_agent_" + str(
i)] = self.once_in_sight_range_opponent_team_2[i][
0]
info[
"draw_once_in_sight_range_opponent_2_team_2_agent_" + str(
i)] = self.once_in_sight_range_opponent_team_2[i][
1]
info[
"draw_once_in_sight_range_opponent_3_team_2_agent_" + str(
i)] = self.once_in_sight_range_opponent_team_2[i][
2]
info["draw_move_in_sight_range_team_2_agent_" + str(i)] = \
self.move_in_sight_range_team2[i]
info[
"draw_move_toward_in_sight_range_1_team_2_agent_" + str(
i)] = self.move_toward_in_sight_range_team2[i][0]
info[
"draw_move_toward_in_sight_range_2_team_2_agent_" + str(
i)] = self.move_toward_in_sight_range_team2[i][1]
info[
"draw_move_toward_in_sight_range_3_team_2_agent_" + str(
i)] = self.move_toward_in_sight_range_team2[i][2]
info["draw_move_away_in_sight_range_1_team_2_agent_" + str(
i)] = self.move_away_in_sight_range_team2[i][0]
info["draw_move_away_in_sight_range_2_team_2_agent_" + str(
i)] = self.move_away_in_sight_range_team2[i][1]
info["draw_move_away_in_sight_range_3_team_2_agent_" + str(
i)] = self.move_away_in_sight_range_team2[i][2]
info["draw_move_in_shoot_range_team_2_agent_" + str(i)] = \
self.move_in_shoot_range_team2[i]
info[
"draw_move_toward_in_shoot_range_1_team_2_agent_" + str(
i)] = self.move_toward_in_shoot_range_team2[i][0]
info[
"draw_move_toward_in_shoot_range_2_team_2_agent_" + str(
i)] = self.move_toward_in_shoot_range_team2[i][1]
info[
"draw_move_toward_in_shoot_range_3_team_2_agent_" + str(
i)] = self.move_toward_in_shoot_range_team2[i][2]
info["draw_move_away_in_shoot_range_1_team_2_agent_" + str(
i)] = self.move_away_in_shoot_range_team2[i][0]
info["draw_move_away_in_shoot_range_2_team_2_agent_" + str(
i)] = self.move_away_in_shoot_range_team2[i][1]
info["draw_move_away_in_shoot_range_3_team_2_agent_" + str(
i)] = self.move_away_in_shoot_range_team2[i][2]
if self.debug:
logging.debug("Reward = {}".format(reward).center(60, '-'))
if terminated:
self._episode_count += 1
if self.reward_scale:
reward /= self.max_reward / self.reward_scale_rate
self.sum_rewards_team1 += reward[0]
self.sum_rewards_team2 += reward[1]
if self.log_more_stats and terminated:
info["battle_won_custom_team_1"] = 0
info["battle_won_custom_team_2"] = 0
info["battle_loss_custom_team_1"] = 0
info["battle_loss_custom_team_2"] = 0
info["battle_draw_custom_team_1"] = 0
info["battle_draw_custom_team_2"] = 0
if self.sum_rewards_team1 > self.sum_rewards_team2:
info["battle_won_custom_team_1"] = 1
info["battle_loss_custom_team_2"] = 1
elif self.sum_rewards_team1 < self.sum_rewards_team2:
info["battle_won_custom_team_2"] = 1
info["battle_loss_custom_team_1"] = 1
else:
info["battle_draw_custom_team_1"] = 1
info["battle_draw_custom_team_2"] = 1
info["total_reward_custom_team_1"] = self.sum_rewards_team1
info["total_reward_custom_team_2"] = self.sum_rewards_team2
reward_all = []
for _ in range(self.n_agents):
reward_all.append(reward[0])
for _ in range(self.n_enemies):
reward_all.append(reward[1])
return reward_all, terminated, info
def get_agent_action(self, a_id, action):
if action <= 5:
return super().get_agent_action(a_id, action)
else:
avail_actions = self.get_avail_agent_actions(a_id)
assert avail_actions[action] == 1, \
"Agent {} cannot perform action {}".format(a_id, action)
unit = self.get_unit_by_id(a_id)
tag = unit.tag
ally = a_id < self.n_agents
# attack/heal units that are in range
if ally:
target_id = action - self.n_actions_no_attack
if self.map_type == "MMM" and unit.unit_type == self.medivac_id:
target_unit = self.agents[target_id]
action_name = "heal"
else:
target_unit = self.enemies[target_id]
action_name = "attack"
else:
target_id = action - self.n_actions_no_attack
if self.map_type == "MMM" and unit.unit_type == self.medivac_id:
target_unit = self.enemies[target_id]
action_name = "heal"
else:
target_unit = self.agents[target_id]
action_name = "attack"
action_id = actions_api[action_name]
target_tag = target_unit.tag
cmd = r_pb.ActionRawUnitCommand(
ability_id=action_id,
target_unit_tag=target_tag,
unit_tags=[tag],
queue_command=False)
if self.debug:
logging.debug("Agent {} {}s unit # {}".format(
a_id, action_name, target_id))
sc_action = sc_pb.Action(action_raw=r_pb.ActionRaw(unit_command=cmd))
return sc_action
def get_avail_actions(self):
"""Returns the available actions of all agents in a list."""
avail_actions = []
for agent_id in range(self.n_agents + self.n_enemies):
avail_agent = self.get_avail_agent_actions(agent_id)
avail_actions.append(avail_agent)
return avail_actions
def get_heuristic_action(self, a_id):
""" Returns the action (not a sc2 action)"""
agent_avail_actions = self.get_avail_agent_actions(a_id)
# "Ally"
if a_id < self.n_agents:
unit = self.get_unit_by_id(a_id)
# check if can attack, if yes, attack the closest
if unit.health > 0 \
and sum(
agent_avail_actions[
self.n_actions_no_attack: self.n_actions]) > 0:
own_x = unit.pos.x
own_y = unit.pos.y
# find the closest
distance = []
for e_id, e_unit in self.enemies.items():
if agent_avail_actions[self.n_actions_no_attack + e_id]:
distance.append(
self.distance(own_x, own_y, e_unit.pos.x,
e_unit.pos.y))
else:
distance.append(float('Inf'))
return self.n_actions_no_attack + distance.index(min(distance))
else:
# If no one in range, go to right or stop.
if agent_avail_actions[4]:
return 4
elif agent_avail_actions[1]:
return 1
else:
return 0
# "Ennemy"
else:
unit = self.get_unit_by_id(a_id)
own_x = unit.pos.x
own_y = unit.pos.y
# check if can attack, if yes, attack the closest
if unit.health > 0 \
and sum(
agent_avail_actions[
self.n_actions_no_attack: self.n_actions]) > 0:
# find the closest
distance = []
for a_id, a_unit in self.agents.items():
if agent_avail_actions[self.n_actions_no_attack + a_id]:
distance.append(
self.distance(own_x, own_y, a_unit.pos.x,
a_unit.pos.y))
else:
distance.append(float('Inf'))
return self.n_actions_no_attack + distance.index(
min(distance))
else:
# If no one in range, go to right or stop.
if agent_avail_actions[5]:
return 5
elif agent_avail_actions[1]:
return 1
else:
return 0
def reward_battle(self):
"""Reward function when self.reward_spare==False.
Returns accumulative hit/shield point damage dealt to the enemy
+ reward_death_value per enemy unit killed, and, in case
self.reward_only_positive == False, - (damage dealt to ally units
+ reward_death_value per ally unit killed) * self.reward_negative_scale
"""
if self.reward_sparse:
return 0
reward = []
delta_deaths_ally = 0 # reward for dead ally
delta_deaths_enemy = 0 # reward for dead enemy
delta_ally = 0 # reward for damage taken
delta_enemy = 0 # reward for damage dealt
neg_scale = self.reward_negative_scale
# update deaths
for al_id, al_unit in self.agents.items():
if not self.death_tracker_ally[al_id]:
# did not die so far
prev_health = (
self.previous_ally_units[al_id].health
+ self.previous_ally_units[al_id].shield
)
if al_unit.health == 0:
# just died
self.death_tracker_ally[al_id] = 1
if not self.reward_only_positive:
delta_deaths_ally -= self.reward_death_value * neg_scale
delta_deaths_enemy += self.reward_death_value
delta_ally += prev_health * neg_scale
else:
# still alive
delta_ally += neg_scale * (
prev_health - al_unit.health - al_unit.shield
)
for e_id, e_unit in self.enemies.items():
if not self.death_tracker_enemy[e_id]:
prev_health = (
self.previous_enemy_units[e_id].health
+ self.previous_enemy_units[e_id].shield
)
if e_unit.health == 0:
self.death_tracker_enemy[e_id] = 1
if not self.reward_only_positive:
delta_deaths_enemy -= self.reward_death_value * neg_scale
delta_deaths_ally += self.reward_death_value
delta_enemy += prev_health * neg_scale
else:
delta_enemy += neg_scale * (
prev_health - e_unit.health - e_unit.shield)
if self.reward_only_positive:
reward.append(
abs(delta_enemy + delta_deaths_ally)) # shield regeneration
reward.append(abs(delta_ally + delta_deaths_enemy))
else:
reward.append(delta_enemy + delta_deaths_ally - delta_ally)
reward.append(delta_ally + delta_deaths_enemy - delta_enemy)
return np.array(reward)
def get_obs_agent(self, agent_id):
"""Returns observation for agent_id.
NOTE: Agents should have access only to their local observations
during decentralised execution.
NOTE2: here, enemy_feat represent the enemies of agent_id
and not the self.enemies
"""
unit = self.get_unit_by_id(agent_id)
ally_unit = agent_id < self.n_agents
own_list_id = agent_id if ally_unit else agent_id - self.n_agents
nf_al = 4 + self.unit_type_bits
nf_en = 4 + self.unit_type_bits
if self.obs_all_health:
nf_al += 1 + self.shield_bits_ally
nf_en += 1 + self.shield_bits_enemy
if self.obs_last_action:
nf_al += self.n_actions
nf_own = self.unit_type_bits
if self.obs_own_health:
if ally_unit:
nf_own += 1 + self.shield_bits_ally
else:
nf_own += 1 + self.shield_bits_enemy
if self.obs_bool_team:
# One hot encoding of the "team id"
nf_own += 2
if self.obs_own_position:
nf_own += 2
move_feats_len = self.n_actions_move
if self.obs_pathing_grid:
move_feats_len += self.n_obs_pathing
if self.obs_terrain_height:
move_feats_len += self.n_obs_height
move_feats = np.zeros(move_feats_len, dtype=np.float32)
enemy_feats = np.zeros((self.n_enemies, nf_en), dtype=np.float32)
ally_feats = np.zeros((self.n_agents - 1, nf_al), dtype=np.float32)
own_feats = np.zeros(nf_own, dtype=np.float32)
if unit.health > 0: # otherwise dead, return all zeros
x = unit.pos.x
y = unit.pos.y
sight_range = self.unit_sight_range(agent_id)
# Movement features
avail_actions = self.get_avail_agent_actions(agent_id)
for m in range(self.n_actions_move):
move_feats[m] = avail_actions[m + 2]
ind = self.n_actions_move
if self.obs_pathing_grid:
move_feats[
ind: ind + self.n_obs_pathing
] = self.get_surrounding_pathing(unit)
ind += self.n_obs_pathing
if self.obs_terrain_height:
move_feats[ind:] = self.get_surrounding_height(unit)
en_ids = [
self.n_agents + en_id for en_id in range(self.n_enemies)
if ally_unit or (not ally_unit and en_id != own_list_id)
]
al_ids = [
al_id for al_id in range(self.n_agents)
if not ally_unit or (ally_unit and al_id != agent_id)
]
if not ally_unit:
al_ids, en_ids = en_ids, al_ids
# Enemy features
for e_id, en_id in enumerate(en_ids):
e_unit = self.get_unit_by_id(en_id)
e_x = e_unit.pos.x
e_y = e_unit.pos.y
dist = self.distance(x, y, e_x, e_y)
if dist < sight_range and e_unit.health > 0:
# visible and alive
# Sight range > shoot range
enemy_feats[e_id, 0] \
= avail_actions[self.n_actions_no_attack + e_id]
# distance
enemy_feats[e_id, 1] = dist / sight_range
# relative X
enemy_feats[e_id, 2] = (e_x - x) / sight_range
# relative Y
enemy_feats[e_id, 3] = (e_y - y) / sight_range
ind = 4
if self.obs_all_health:
# health
enemy_feats[e_id, ind] \
= e_unit.health / e_unit.health_max
ind += 1
if self.shield_bits_enemy > 0:
max_shield = self.unit_max_shield(e_unit)
# shield
enemy_feats[e_id, ind] = e_unit.shield / max_shield
ind += 1
if self.unit_type_bits > 0:
type_id = self.get_unit_type_id(e_unit, False)
enemy_feats[e_id, ind + type_id] = 1 # unit type
ind += self.unit_type_bits
# Ally features
for i, al_id in enumerate(al_ids):
al_unit = self.get_unit_by_id(al_id)
al_x = al_unit.pos.x
al_y = al_unit.pos.y
dist = self.distance(x, y, al_x, al_y)
if dist < sight_range and al_unit.health > 0:
# if visible and alive
ally_feats[i, 0] = 1
ally_feats[i, 1] = dist / sight_range # distance
ally_feats[i, 2] = (al_x - x) / sight_range # relative X
ally_feats[i, 3] = (al_y - y) / sight_range # relative Y
ind = 4
if self.obs_all_health:
# health
ally_feats[i, ind] \
= al_unit.health / al_unit.health_max
ind += 1
if self.shield_bits_ally > 0:
# shield
max_shield = self.unit_max_shield(al_unit)
ally_feats[i, ind] = al_unit.shield / max_shield
ind += 1
if self.unit_type_bits > 0:
type_id = self.get_unit_type_id(al_unit, True)
ally_feats[i, ind + type_id] = 1
ind += self.unit_type_bits
if self.obs_last_action:
ally_feats[i, ind:] = self.last_action[al_id]
# Own features
ind = 0
if self.obs_own_health:
own_feats[ind] = unit.health / unit.health_max
ind += 1
if self.shield_bits_ally > 0:
max_shield = self.unit_max_shield(unit)
own_feats[ind] = unit.shield / max_shield
ind += 1
if self.unit_type_bits > 0:
type_id = self.get_unit_type_id(unit, True)
own_feats[ind + type_id] = 1
ind += 1
if self.obs_bool_team:
if ally_unit:
own_feats[ind] = 1
else:
own_feats[ind + 1] = 1
ind += 2
if self.obs_own_position:
# relative X
own_feats[ind] = (x - (self.map_x / 2)) \
/ self.max_distance_x
# relative Y
own_feats[ind + 1] = (y - (self.map_y / 2)) \
/ self.max_distance_y
ind += 2
agent_obs = np.concatenate(
(
move_feats.flatten(),
enemy_feats.flatten(),
ally_feats.flatten(),
own_feats.flatten(),
)
)
if self.obs_timestep_number:
agent_obs = np.append(agent_obs,
self._episode_steps / self.episode_limit)
if self.debug:
logging.debug("Obs Agent: {}".format(agent_id).center(60, "-"))
logging.debug("Avail. actions {}".format(
self.get_avail_agent_actions(agent_id)))
logging.debug("Move feats {}".format(move_feats))
logging.debug("Enemy feats {}".format(enemy_feats))
logging.debug("Ally feats {}".format(ally_feats))
logging.debug("Own feats {}".format(own_feats))
return agent_obs
def get_obs(self):
"""Returns all agent observations in a list.
NOTE: Agents should have access only to their local observations
during decentralised execution.
"""
agents_obs = [self.get_obs_agent(i) for i in
range(self.n_agents + self.n_enemies)]
return agents_obs
def get_state(self):
"""Returns the global state.
NOTE: This functon should not be used during decentralised execution.
"""
if self.obs_instead_of_state:
obs_concat = np.concatenate(self.get_obs(), axis=0).astype(
np.float32
)
return obs_concat
nf_al = 4 + self.shield_bits_ally + self.unit_type_bits
nf_en = 4 + self.shield_bits_enemy + self.unit_type_bits
ally_state = np.zeros((self.n_agents, nf_al))
enemy_state = np.zeros((self.n_enemies, nf_en))
center_x = self.map_x / 2
center_y = self.map_y / 2
for al_id, al_unit in self.agents.items():
if al_unit.health > 0:
x = al_unit.pos.x
y = al_unit.pos.y
max_cd = self.unit_max_cooldown(al_unit)
# health
ally_state[al_id, 0] = (al_unit.health / al_unit.health_max)
if (self.map_type == "MMM"
and al_unit.unit_type == self.medivac_id):
# energy
ally_state[al_id, 1] = al_unit.energy / max_cd
else:
# cooldown
ally_state[al_id, 1] = (al_unit.weapon_cooldown / max_cd)
# relative X
ally_state[al_id, 2] = (x - center_x) / self.max_distance_x
# relative Y
ally_state[al_id, 3] = (y - center_y) / self.max_distance_y
ind = 4
if self.shield_bits_ally > 0:
max_shield = self.unit_max_shield(al_unit)
# shield
ally_state[al_id, ind] = (al_unit.shield / max_shield)
ind += 1
if self.unit_type_bits > 0:
type_id = self.get_unit_type_id(al_unit, True)
ally_state[al_id, ind + type_id] = 1
for e_id, e_unit in self.enemies.items():
if e_unit.health > 0:
x = e_unit.pos.x
y = e_unit.pos.y
max_cd = self.unit_max_cooldown(e_unit)
enemy_state[e_id, 0] = (e_unit.health / e_unit.health_max)
if (self.map_type == "MMM"
and e_unit.unit_type == self.medivac_id):
# energy
enemy_state[e_id, 1] = e_unit.energy / max_cd
else:
# cooldown
enemy_state[e_id, 1] = (e_unit.weapon_cooldown / max_cd)
# relative X
enemy_state[e_id, 2] = (x - center_x) / self.max_distance_x
# relative Y
enemy_state[e_id, 3] = (y - center_y) / self.max_distance_y
ind = 4
if self.shield_bits_enemy > 0:
max_shield = self.unit_max_shield(e_unit)
# shield
enemy_state[e_id, ind] = (e_unit.shield / max_shield)
ind += 1
if self.unit_type_bits > 0:
type_id = self.get_unit_type_id(e_unit, False)
enemy_state[e_id, ind + type_id] = 1
state = np.append(ally_state.flatten(), enemy_state.flatten())
state_enemy = np.append(enemy_state.flatten(), ally_state.flatten())
if self.state_last_action:
state = np.append(state, self.last_action.flatten())
last_action_inverse = np.concatenate((self.last_action[
self.n_agents:, :],
self.last_action[
:self.n_agents,
:])).flatten()
state_enemy = np.append(state_enemy, last_action_inverse.flatten())
if self.state_timestep_number:
state = np.append(state,
self._episode_steps / self.episode_limit)
state_enemy = np.append(state_enemy,
self._episode_steps / self.episode_limit)
state = state.astype(dtype=np.float32)
state_enemy = state_enemy.astype(dtype=np.float32)
if self.debug:
logging.debug("STATE".center(60, "-"))
logging.debug("Ally state {}".format(ally_state))
logging.debug("Enemy state {}".format(enemy_state))
if self.state_last_action:
logging.debug("Last actions {}".format(self.last_action))
return state, state_enemy
def get_unit_type_id(self, unit, ally):
"""Returns the ID of unit type in the given scenario."""
return unit.unit_type - self._min_unit_type
def get_obs_size(self):
"""
Returns the sizes of the observation.
Due to unit_type_bits, enemy observation can differ from ally ones.
"""
nf_al = 4 + self.unit_type_bits
nf_en = 4 + self.unit_type_bits
if self.obs_all_health:
nf_al += 1 + self.shield_bits_ally
nf_en += 1 + self.shield_bits_enemy
own_feats = self.unit_type_bits
if self.obs_own_health:
own_feats += 1 + self.shield_bits_ally
if self.obs_timestep_number:
own_feats += 1
if self.obs_bool_team:
own_feats += 2
if self.obs_own_position:
own_feats += 2
if self.obs_last_action:
last_action_feats = self.n_actions
move_feats = self.n_actions_move
if self.obs_pathing_grid:
move_feats += self.n_obs_pathing
if self.obs_terrain_height:
move_feats += self.n_obs_height
enemy_feats = self.n_enemies * nf_en
ally_feats = self.n_agents * nf_al
size_for_all = move_feats + enemy_feats + ally_feats + own_feats
return size_for_all - nf_al + (self.n_agents - 1) * last_action_feats, \
size_for_all - nf_en + (self.n_enemies - 1) * last_action_feats
def get_state_size(self):
"""Returns the size of the global state."""
if self.obs_instead_of_state:
return self.get_obs_size() * self.n_agents
nf_al = 4 + self.shield_bits_ally + self.unit_type_bits
nf_en = 4 + self.shield_bits_enemy + self.unit_type_bits
enemy_state = self.n_enemies * nf_en
ally_state = self.n_agents * nf_al
size = enemy_state + ally_state
if self.state_last_action:
size += self.n_agents * self.n_actions
size += self.n_enemies * self.n_actions
if self.state_timestep_number:
size += 1
return size
def get_avail_agent_actions(self, agent_id):
"""Returns the available actions for agent_id."""
unit = self.get_unit_by_id(agent_id)
if unit.health > 0:
# cannot choose no-op when alive
avail_actions = [0] * self.n_actions
# stop should be allowed
avail_actions[1] = 1
# see if we can move
if self.can_move(unit, Direction.NORTH):
avail_actions[2] = 1
if self.can_move(unit, Direction.SOUTH):
avail_actions[3] = 1
if self.can_move(unit, Direction.EAST):
avail_actions[4] = 1
if self.can_move(unit, Direction.WEST):
avail_actions[5] = 1
# Can attack only alive units that are alive in the shooting range
shoot_range = self.unit_shoot_range(agent_id)
if agent_id < self.n_agents:
target_items = self.enemies.items()
else:
target_items = self.agents.items()
if self.map_type == "MMM" and unit.unit_type == self.medivac_id:
# Medivacs cannot heal themselves or other flying units
target_items = [
(t_id, t_unit)
for (t_id, t_unit) in self.agents.items()
if t_unit.unit_type != self.medivac_id
]
for t_id, t_unit in target_items:
if t_unit.health > 0:
dist = self.distance(
unit.pos.x, unit.pos.y, t_unit.pos.x, t_unit.pos.y
)
if dist <= shoot_range:
avail_actions[t_id + self.n_actions_no_attack] = 1
return avail_actions
else:
# only no-op allowed
return [1] + [0] * (self.n_actions - 1)
def close(self):
"""Close StarCraft II."""
if self._sc2_proc:
if type(self._sc2_proc) is list:
for p in self._sc2_proc:
p.close()
else:
self._sc2_proc.close()
def _kill_all_units(self):
"""Kill all units on the map."""
units_alive = [
unit.tag for unit in self.agents.values() if
unit.health > 0
] + [unit.tag for unit in self.enemies.values() if
unit.health > 0]
debug_command = [
d_pb.DebugCommand(kill_unit=d_pb.DebugKillUnit(tag=units_alive))
]
self._controller[0].debug(debug_command)
def init_units(self):
"""Initialise the units."""
while True:
# Sometimes not all units have yet been created by SC2
self.agents = {}
self.enemies = {}
ally_units = []
for unit in self._obs[0].observation.raw_data.units:
if unit.owner == 1:
ally_units.append(unit)
if self._episode_count == 0:
self.max_reward_p2 += unit.health_max + unit.shield_max
ally_units_sorted = sorted(
ally_units,
key=attrgetter("unit_type", "pos.x", "pos.y"),
reverse=False,
)
for i in range(len(ally_units_sorted)):
self.agents[i] = ally_units_sorted[i]
if self.debug:
logging.debug(
"Unit {} is {}, x = {}, y = {}".format(
len(self.agents),
self.agents[i].unit_type,
self.agents[i].pos.x,
self.agents[i].pos.y,
)
)
enemy_units = []
for unit in self._obs[1].observation.raw_data.units:
if unit.owner == 2:
enemy_units.append(unit)
if self._episode_count == 0:
self.max_reward += unit.health_max + unit.shield_max
enemy_units_sorted = sorted(
enemy_units,
key=attrgetter("unit_type", "pos.x", "pos.y"),
reverse=False,
)
for i in range(len(enemy_units_sorted)):
self.enemies[i] = enemy_units_sorted[i]
if self.debug:
logging.debug(
"Enemy unit {} is {}, x = {}, y = {}".format(
len(self.enemies),
self.enemies[i].unit_type,
self.enemies[i].pos.x,
self.enemies[i].pos.y,
)
)
if self._episode_count == 0:
all_agent = []
all_agent += self.agents.values()
all_agent += self.enemies.values()
min_unit_type = min(
unit.unit_type
for unit
in all_agent
)
self._init_ally_unit_types(min_unit_type)
all_agents_created = (len(self.agents) == self.n_agents)
all_enemies_created = (len(self.enemies) == self.n_enemies)
if all_agents_created and all_enemies_created: # all good
return
try:
for idx_, controller_ in self._controller:
controller_.step(1)
self._obs[idx_] = controller_.observe()
except (protocol.ProtocolError, protocol.ConnectionError):
self.full_restart()
self.reset()
def update_units(self):
"""
Update units after an environment step.
This function assumes that self._obs is up-to-date.
"""
n_ally_alive = 0
n_enemy_alive = 0
# Store previous state
self.previous_ally_units = deepcopy(self.agents)
self.previous_enemy_units = deepcopy(self.enemies)
for al_id, al_unit in self.agents.items():
updated = False
for unit in self._obs[0].observation.raw_data.units:
if al_unit.tag == unit.tag:
self.agents[al_id] = unit
updated = True
n_ally_alive += 1
break
if not updated: # dead
al_unit.health = 0
for e_id, e_unit in self.enemies.items():
updated = False
for unit in self._obs[1].observation.raw_data.units:
if e_unit.tag == unit.tag:
self.enemies[e_id] = unit
updated = True
n_enemy_alive += 1
break
if not updated: # dead
e_unit.health = 0
if (n_ally_alive == 0 and n_enemy_alive > 0
or self.only_medivac_left(ally=True)):
return -1 # lost
if (n_ally_alive > 0 and n_enemy_alive == 0
or self.only_medivac_left(ally=False)):
return 1 # won
if n_ally_alive == 0 and n_enemy_alive == 0:
return 0
return None
def get_unit_by_id(self, a_id):
"""Get unit by ID."""
if a_id < self.n_agents:
return self.agents[a_id]
else:
return self.enemies[a_id - self.n_agents]
def get_env_info(self):
env_info = super().get_env_info()
env_info["n_enemies"] = self.n_enemies
return env_info
def get_stats(self):
stats = {
"won_team_1": self.battles_won_team_1,
"won_team_2": self.battles_won_team_2,
"battles_draw": self.timeouts,
"battles_game": self.battles_game,
"timeouts": self.timeouts,
"restarts": self.force_restarts,
"action_error": self.action_error
}
return stats
|
ossf-cve-benchmark/CVE-2018-8035
|
src/main/admin/db_util.py
|
<gh_stars>0
#!/usr/bin/python
# -----------------------------------------------------------------------
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -----------------------------------------------------------------------
#!/usr/bin/env python
import os
# common routines for ducc_post_install and db_create
def addToCp(cp, lib):
return cp + ':' + lib
def execute(CMD):
print CMD
return os.system(CMD)
# --------------------------------------------------------------------------------
# these next methods are used to parse a table returned from cqlsh into a
# - header
# - dictionary of values for each row
# parse the header into a list of names
def parse_header(header):
ret = []
parts = header.split('|')
for p in parts:
ret.append(p.strip())
return ret
# parse a single line into a dictionary with key from the header and value from the line
def parse_line(header, line):
parts = line.split('|')
ret = {}
for k, v in zip(header, parts):
ret[k] = v.strip()
return ret
# parse a set of lines returned from cqlsh into a header and a list of dictionaries, one per line
# header_id is a sting we use to positively identify a header line
def parse(lines, header_id):
ret = []
header = []
for l in lines:
l = l.strip()
# print '[]', l
if ( l == '' ):
continue
if ( '---' in l ):
continue;
if ( 'rows)' in l ):
continue;
if ( header_id in l ):
header = parse_header(l)
continue
ret.append(parse_line(header, l))
return header, ret
# given a header and a collection of lines parsed by the utilities above, print a
# mostly un-ugly listing of the table retults
def format(header, lines):
# calculate max column widths
hlens = {}
for k in header:
hlens[k] = len(k)
for line in lines:
if ( not hlens.has_key(k) ):
hlens[k] = len(line[k])
else:
hlens[k] = max(len(line[k]), hlens[k])
# create a format string from the widths
fmt = ''
for k in header:
fmt = fmt + ' %' + str(hlens[k]) + 's'
# first the header
print fmt % tuple(header)
# now the rows
for line in lines:
l = []
for k in header:
l.append(line[k])
print fmt % tuple(l)
return
# end of row parsing utilities
# --------------------------------------------------------------------------------
def stop_database(pidfile):
print "Stopping the database."
CMD = ['kill', '-TERM', '`cat ' + pidfile + '`']
CMD = ' '.join(CMD)
execute(CMD)
def manual_config(DUCC_HOME, DUCC_HEAD):
print ''
print 'To manually configure the database edit', DUCC_HOME + '/cassandra-server/conf/casssandra.yaml'
print 'to Insure every occurance of DUCC_HEAD is replaced with', DUCC_HEAD, 'and every occurance'
print 'of DUCC_HOME is replaced with', DUCC_HOME + '.'
print ''
print 'Note that one occurance of DUCC_HEAD will be quoted: you must preserve these quotes, e.g. as "' + DUCC_HEAD + '".'
def update_cassandra_config(DUCC_HOME, DUCC_HEAD):
# Read cassandra.yaml and change the things necessary to configure it correctly
config = DUCC_HOME + '/cassandra-server/conf/cassandra.yaml'
f = open(config)
lines = []
for line in f:
if ( line.startswith('listen_address:') ):
line = line.strip();
print 'Database host is configured at', line
if ( not DUCC_HEAD in line ):
print 'Must reconfigure listen_address to', DUCC_HEAD
parts = line.strip().split(':')
old = parts[1].strip()
ch_head = "sed -i.bak s'/" + old + "/" + DUCC_HEAD + "'/ " + config
os.system(ch_head)
def configure_database(DUCC_HOME, DUCC_HEAD, java, db_autostart=True, db_host=None, db_user=None, db_pw=None ):
# for cassandra:
# in ducc_runtime/cassandra-server/conf we need to update cassandra.yaml to establish
# the data directories and db connection addresses
# Note this is a bootstrap routine and doesn't try to use common code that may depend on
# things being initialized correctly.
if ( db_pw == None ):
db_pw = raw_input("Enter database password OR 'bypass' to bypass database support:")
if ( db_pw == '' ):
print "Must enter a DB password or 'bypass' to continue."
return False
if ( db_pw == 'bypass' ):
print 'Database support will be bypassed'
return True
if(db_host == None):
db_host = DUCC_HEAD
db_host = db_host.split()[0]
print "database host: "+str(db_host)
if( db_autostart ):
if ( os.path.exists(DUCC_HOME + "/state/database/data") ):
print 'Database is already defined in', DUCC_HOME + '/database', '- but will try to rebuild.'
update_cassandra_config(DUCC_HOME, DUCC_HEAD)
here = os.getcwd()
os.chdir(DUCC_HOME + "/cassandra-server")
pidfile = DUCC_HOME + '/state/cassandra.pid'
consfile = DUCC_HOME + '/state/cassandra.configure.console'
print 'Starting the database. This might take a few moments if it is the first time.'
CMD = "bin/cassandra -p "+ pidfile + " > "+consfile+" 2>&1";
os.system(CMD);
print "Database is started. Waiting for initialization";
os.chdir(here)
else:
print "Database is not auto-managed.";
# Now start the db and create the schema
CLASSPATH = ''
CLASSPATH = addToCp(CLASSPATH, DUCC_HOME + '/lib/cassandra/*')
CLASSPATH = addToCp(CLASSPATH, DUCC_HOME + '/lib/guava/*')
CLASSPATH = addToCp(CLASSPATH, DUCC_HOME + '/lib/apache-log4j/*')
CLASSPATH = addToCp(CLASSPATH, DUCC_HOME + '/lib/uima-ducc/*')
CLASSPATH = addToCp(CLASSPATH, DUCC_HOME + '/apache-uima/apache-activemq/lib/*')
os.environ['CLASSPATH'] = CLASSPATH
print os.environ['CLASSPATH']
ret = True
CMD = [java, '-DDUCC_HOME=' + DUCC_HOME, 'org.apache.uima.ducc.database.DbCreate', db_host, db_user, db_pw]
CMD = ' '.join(CMD)
if ( execute(CMD) == 0 ):
print 'Database is initialized.'
else:
print 'Database schema could not be defined.'
ret = False
if( db_autostart ):
stop_database(pidfile)
return ret
|
ossf-cve-benchmark/CVE-2018-8035
|
uima-ducc-examples/src/main/uima-ducc-vm/driver/helper.py
|
<filename>uima-ducc-examples/src/main/uima-ducc-vm/driver/helper.py
#! /usr/bin/env python
# -----------------------------------------------------------------------
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -----------------------------------------------------------------------
import os
import random
import datetime
class Helper():
base = '/tmp/ducc/driver'
#####
dictUser = {
1:'albatross', 2:'bonobo', 3:'chinchilla', 4:'dodo', 5:'eagle',
6:'frog', 7:'guppy', 8:'hummingbird', 9:'iguana', 10:'jellyfish',
11:'kiwi', 12:'lemming', 13:'moose', 14:'nuthatch', 15:'oyster',
16:'porcupine', 17:'quail', 18:'roadrunner', 19:'squirrel', 20:'tapir',
21:'urchin', 22:'vicuna', 23:'walrus', 24:'xerus', 25:'yak',
26:'zebra'
}
#####
pctLoClass = 15
pctHiClass = 15
dictClass = { 1:'low', 2:'normal', 3:'high' }
#####
pctLoItemsNormal = 30
pctHiItemsNormal = 10
dictItemsNormal = { 1:'1', 2:'2', 3:'3'}
#####
pctError = 10
#####
pctLoItemsError = 20
pctHiItemsError = 10
dictItemsError = { 1:'4', 2:'4', 3:'6'}
#####
pctLoMemory = 15
pctHiMemory = 15
dictMemory = { 1:'15', 2:'30', 3:'45'}
#####
dictUnmanagedReservation = {
1:'1',
}
dictManagedReservation = {
1:'2', 2:'3',
}
#####
dictService = {
1:'UIMA-AS:FixedSleepAE_1:tcp://localhost:61617',
2:'UIMA-AS:FixedSleepAE_4:tcp://localhost:61617',
3:'CUSTOM:localhost:7175',
}
dictServiceSets = {
1:dictService.get(1),
2:dictService.get(1)+' '+dictService.get(2),
3:dictService.get(1)+' '+dictService.get(3),
4:dictService.get(1)+' '+dictService.get(2)+' '+dictService.get(3),
5:dictService.get(2),
6:dictService.get(2)+' '+dictService.get(3),
7:dictService.get(3),
}
#####
def __init__(self):
pass
def getUser(self):
key = random.randint(1, len(self.dictUser))
value = self.dictUser.get(key)
return value
def getClass(self):
selector = random.randint(1,100)
if (selector < self.pctLoClass):
key = 1
elif (selector < self.pctLoClass+self.pctHiClass):
key = 3
else:
key = 2
value = self.dictClass.get(key)
return value
def getItemsNormal(self):
selector = random.randint(1,100)
if (selector < self.pctLoItemsNormal):
key = 1
elif (selector < self.pctLoItemsNormal+self.pctHiItemsNormal):
key = 3
else:
key = 2
value = self.dictItemsNormal.get(key)
return value
def getItemsError(self):
selector = random.randint(1,100)
if (selector < self.pctLoItemsError):
key = 1
elif (selector < self.pctLoItemsError+self.pctHiItemsError):
key = 3
else:
key = 2
value = self.dictItemsError.get(key)
return value
def getMemory(self):
selector = random.randint(1,100)
if (selector < self.pctLoMemory):
key = 1
elif (selector < self.pctLoMemory+self.pctHiMemory):
key = 3
else:
key = 2
value = self.dictMemory.get(key)
return value
def getThreads(self):
selector = random.randint(1,5)
value = str(2*selector)
return value
def getJobFileName(self):
selector = random.randint(1,100)
if (selector < self.pctError):
value = self.getItemsError()+'.job'
else:
value = self.getItemsNormal()+'.job'
return value
def getUnmanagedReservationFileName(self):
key = random.randint(1, len(self.dictUnmanagedReservation))
value = self.dictUnmanagedReservation.get(key)+'.unmanaged'
return value
def getManagedReservationFileName(self):
key = random.randint(1, len(self.dictManagedReservation))
value = self.dictManagedReservation.get(key)+'.managed'
return value
def getService(self):
key = random.randint(1, len(self.dictService))
value = self.dictService.get(key)
return value
def getServiceSet(self):
key = random.randint(1, len(self.dictServiceSets))
value = self.dictServiceSets.get(key)
return value
def getLogDir(self,user,subdir):
value = self.base+'/'+user+'/ducc/logs'+'/'+subdir
if not os.path.exists(value):
os.makedirs(value)
return value
def getWorkDir(self,user,subdir):
value = self.base+'/'+user+'/ducc/work'+'/'+subdir
if not os.path.exists(value):
os.makedirs(value)
return value
def getHoldTimeInSecondsForManaged(self):
minMinutes = 30
maxMinutes = 60
minutes = random.randint(minMinutes,maxMinutes)
seconds = minutes * 60
value = seconds
return value
def getHoldTimeInSecondsForUnmanaged(self):
minHours = 12
maxHours = 48
hours = random.randint(minHours,maxHours)
minMinutes = 30
maxMinutes = 60
minutes = random.randint(minMinutes,maxMinutes) + (hours * 60)
seconds = minutes * 60
value = seconds
return value
def timestamp(self):
value = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
return value
if __name__ == '__main__':
helper = Helper()
print helper.getUser()
|
ossf-cve-benchmark/CVE-2018-8035
|
src/main/scripts/ducc_base.py
|
<reponame>ossf-cve-benchmark/CVE-2018-8035
#!/usr/bin/python
# -----------------------------------------------------------------------
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -----------------------------------------------------------------------
import os
import sys
import string
import subprocess
import re
import zipfile
import platform
from properties import *
def find_ducc_home():
# Infer DUCC_HOME from our location - no longer use a (possibly inaccurate) environment variable
me = os.path.abspath(__file__)
ndx = me.rindex('/')
ndx = me.rindex('/', 0, ndx)
DUCC_HOME = me[:ndx] # split from 0 to ndx
return DUCC_HOME
def find_localhost():
return os.uname()[1]
def which(file):
for p in os.environ["PATH"].split(":"):
if os.path.exists(p + "/" + file):
return p + "/" + file
return None
class DuccBase:
def read_properties(self):
if ( self.do_merge ):
self.merge_properties()
self.ducc_properties = Properties()
self.ducc_properties.put('ducc.home', self.DUCC_HOME)
self.ducc_properties.put('DUCC_HOME', self.DUCC_HOME)
self.ducc_properties.load(self.propsfile)
self.webserver_node = self.ducc_properties.get('ducc.ws.node')
self.jvm = self.ducc_properties.get('ducc.jvm')
def java(self):
return self.jvm
def java_home(self):
if ( os.environ.has_key('DUCC_POST_INSTALL') ):
return 'JAVA_HOME' # avoid npe during first-time setup
if ( self.system == 'Darwin' ):
self.jvm_home = "/Library/Java/Home"
else:
ndx = self.jvm.rindex('/')
ndx = self.jvm.rindex('/', 0, ndx)
self.jvm_home = self.jvm[:ndx]
return self.jvm_home
def version(self):
lines = self.popen(self.jvm, ' org.apache.uima.ducc.common.utils.Version')
line = lines.readline().strip()
return "DUCC Version", line
# simply spawn-and-forget using Python preferred mechanism
def spawn(self, *CMD):
cmd = ' '.join(CMD)
# print '**** spawn', cmd, '****'
ducc = subprocess.Popen(cmd, shell=True)
pid = ducc.pid
try:
status = os.waitpid(pid, 0)
except KeyboardInterrupt:
print 'KeyboardInterrupt'
except:
print "Unexpected exception: ", sys.exc_info()[0]
return pid
def popen(self, *CMD):
cmd = ' '.join(CMD)
#print 'POPEN:', cmd
proc = subprocess.Popen(cmd, bufsize=0, stdout=subprocess.PIPE, shell=True, stderr=subprocess.STDOUT)
return proc.stdout
def format_classpath(self, cp):
strings = cp.split(':')
for s in strings:
print s
def set_classpath(self):
ducc_home = self.DUCC_HOME
LIB = ducc_home + '/lib'
CLASSPATH = LIB + '/ducc-submit.jar'
os.environ['CLASSPATH'] = CLASSPATH
def mkargs(self, args):
'''
The cli needs to insure all args are fully quoted so the shell doesn't
lose the proper tokenization. This quotes everything.
'''
answer = []
for a in args:
arg = '"' + a + '"'
answer.append(arg)
return answer
def __init__(self, merge=False):
self.DUCC_HOME = find_ducc_home()
self.do_merge = merge
self.ducc_properties = None
self.system = platform.system()
self.jvm = None
self.webserver_node = 'localhost'
self.propsfile = self.DUCC_HOME + '/resources/ducc.properties'
self.localhost = find_localhost()
self.read_properties()
os.environ['JAVA_HOME'] = self.java_home()
self.set_classpath()
if __name__ == "__main__":
base = DuccBase()
|
AlpacaDB/backlight
|
src/backlight/labelizer/ternary/static_neutral.py
|
<reponame>AlpacaDB/backlight
import pandas as pd
import numpy as np
from backlight.datasource.marketdata import MarketData
from backlight.labelizer.common import LabelType, TernaryDirection
from backlight.labelizer.labelizer import Labelizer, Label
class StaticNeutralLabelizer(Labelizer):
"""Generates session-aware static labels
Args:
lookahead (str): Lookahead period
session_splits (list[datetime.time]): EST local time to split sessions
neutral_ratio (float): 0 < x < 1, Percentage of NEUTRAL labels
window_start (str): Start date for lookback window
window_end (str): End date for lookback window
neutral_hard_limit (float): The minimum diff to label UP/DOWN
"""
def validate_params(self) -> None:
assert "lookahead" in self._params
assert "session_splits" in self._params
assert len(self._params["session_splits"])
assert "neutral_ratio" in self._params
assert "window_start" in self._params
assert "window_end" in self._params
assert "neutral_hard_limit" in self._params
def _calculate_static_neutral_range(self, diff_abs: pd.Series) -> pd.Series:
df = pd.DataFrame(diff_abs.values, index=diff_abs.index, columns=["diff"])
df.loc[:, "nyk_time"] = df.index.tz_convert("America/New_York")
df.loc[:, "res"] = np.nan
mask = (
(df.index >= self._params["window_start"])
& (df.index < self._params["window_end"])
& ~((df.nyk_time.dt.hour <= 17) & (df.nyk_time.dt.dayofweek == 6))
& ((df.nyk_time.dt.hour < 16) | (df.nyk_time.dt.hour > 17))
& ~((df.nyk_time.dt.hour >= 16) & (df.nyk_time.dt.dayofweek == 4))
& (df.nyk_time.dt.dayofweek != 5)
)
splits = sorted(self._params["session_splits"])
shifted_splits = splits[1:] + splits[:1]
for s, t in list(zip(splits, shifted_splits)):
if s >= t:
scope = (df.nyk_time.dt.time >= s) | (df.nyk_time.dt.time < t)
else:
scope = (df.nyk_time.dt.time >= s) & (df.nyk_time.dt.time < t)
df.loc[scope, "res"] = df.loc[(scope & mask), "diff"].quantile(
self.neutral_ratio
)
df.loc[(df.res < self.neutral_hard_limit), "res"] = self.neutral_hard_limit
return df.res
def create(self, mkt: MarketData) -> pd.DataFrame:
mid = mkt.mid.copy()
future_price = mid.shift(freq="-{}".format(self._params["lookahead"]))
diff = (future_price - mid).reindex(mid.index)
diff_abs = diff.abs()
neutral_range = self._calculate_static_neutral_range(diff_abs)
df = mid.to_frame("mid")
df.loc[:, "label_diff"] = diff
df.loc[:, "neutral_range"] = neutral_range
df.loc[df.label_diff > 0, "label"] = TernaryDirection.UP.value
df.loc[df.label_diff < 0, "label"] = TernaryDirection.DOWN.value
df.loc[diff_abs < neutral_range, "label"] = TernaryDirection.NEUTRAL.value
df = Label(df[["label_diff", "label", "neutral_range"]])
df.label_type = LabelType.TERNARY
return df
@property
def neutral_ratio(self) -> str:
return self._params["neutral_ratio"]
@property
def session_splits(self) -> str:
return self._params["session_splits"]
@property
def neutral_hard_limit(self) -> str:
return self._params["neutral_hard_limit"]
|
AlpacaDB/backlight
|
src/backlight/labelizer/ternary/dynamic_neutral.py
|
import pandas as pd
from backlight.datasource.marketdata import MarketData
from backlight.labelizer.common import LabelType, TernaryDirection
from backlight.labelizer.labelizer import Label, Labelizer
class DynamicNeutralLabelizer(Labelizer):
def validate_params(self) -> None:
assert "lookahead" in self._params
assert "neutral_ratio" in self._params
assert "neutral_window" in self._params
assert "neutral_hard_limit" in self._params
def _calculate_dynamic_neutral_range(self, diff_abs: pd.Series) -> pd.Series:
dnr = diff_abs.rolling(self._params["neutral_window"]).quantile(
self.neutral_ratio
)
dnr[dnr < self.neutral_hard_limit] = self.neutral_hard_limit
return dnr
def create(self, mkt: MarketData) -> pd.DataFrame:
mid = mkt.mid.copy()
future_price = mid.shift(freq="-{}".format(self._params["lookahead"]))
diff = (future_price - mid).reindex(mid.index)
diff_abs = diff.abs()
neutral_range = self._calculate_dynamic_neutral_range(diff_abs)
df = mid.to_frame("mid")
df.loc[:, "label_diff"] = diff
df.loc[:, "neutral_range"] = neutral_range
df.loc[df.label_diff > 0, "label"] = TernaryDirection.UP.value
df.loc[df.label_diff < 0, "label"] = TernaryDirection.DOWN.value
df.loc[diff_abs < neutral_range, "label"] = TernaryDirection.NEUTRAL.value
df = Label(df[["label_diff", "label", "neutral_range"]])
df.label_type = LabelType.TERNARY
return df
@property
def neutral_ratio(self) -> str:
return self._params["neutral_ratio"]
@property
def neutral_window(self) -> str:
return self._params["neutral_window"]
@property
def neutral_hard_limit(self) -> str:
return self._params["neutral_hard_limit"]
class MarketCloseAwareDynamicNeutralLabelizer(DynamicNeutralLabelizer):
def _calculate_dynamic_neutral_range(self, diff_abs: pd.Series) -> pd.Series:
df = pd.DataFrame(diff_abs.values, index=diff_abs.index, columns=["res"])
df.loc[:, "nyk_time"] = df.index.tz_convert("America/New_York")
freq = int(
pd.Timedelta(self._params["neutral_window"])
/ pd.Timedelta(diff_abs.index.freq)
)
mask = (
~((df.nyk_time.dt.hour <= 17) & (df.nyk_time.dt.dayofweek == 6))
& ((df.nyk_time.dt.hour < 16) | (df.nyk_time.dt.hour > 17))
& ~((df.nyk_time.dt.hour >= 16) & (df.nyk_time.dt.dayofweek == 4))
& (df.nyk_time.dt.dayofweek != 5)
)
dnr = (
df.loc[mask, "res"]
.rolling(freq)
.quantile(self.neutral_ratio)
.reindex(diff_abs.index)
.ffill()
)
dnr[dnr < self.neutral_hard_limit] = self.neutral_hard_limit
return dnr
|
AlpacaDB/backlight
|
src/backlight/strategies/filter.py
|
import pandas as pd
import numpy as np
from typing import List, Type
from backlight.trades.trades import Trades, from_dataframe
from backlight.datasource.marketdata import AskBidMarketData
def limit_max_amount(trades: Trades, max_amount: int) -> Trades:
"""Limit trade by max amount.
Args:
trades: Trades
max_amount: Max amount in absolute value
Result:
Trades
"""
assert max_amount > 0.0
current_amount = 0.0
deleted_ids = [] # type: List[int]
for index, row in trades.iterrows():
if row["_id"] in deleted_ids:
continue
next_amount = current_amount + row["amount"]
if abs(next_amount) > max_amount:
deleted_ids.append(row["_id"])
continue
current_amount = next_amount
return trades[~trades["_id"].isin(deleted_ids)]
def skip_entry_by_spread(
trades: Trades, mkt: AskBidMarketData, max_spread: float
) -> Trades:
"""Skip entry by spread.
Args:
trades: Trades
mkt: Market data for ask/bid prices
max_spread: More than the value, skip entry
Result:
Trades
"""
assert max_spread >= 0.0
assert trades.index.unique().isin(mkt.index).all()
spread = mkt.spread
deleted_ids = [] # type: List[int]
for i in trades.ids:
entry = trades.get_trade(i).index[0]
if spread.at[entry] > max_spread:
deleted_ids.append(i)
return trades[~trades["_id"].isin(deleted_ids)]
def filter_entry_by_time(trades: Trades, unit: str, container_set: tuple) -> Trades:
"""Filter trade which match conditions at least one element.
-> e.g. for container_set = [1,2] and unit = 'hour' Trades of hour 1 or 2
will be return.
Args:
container_set: The results will only contain elements of time in this set
unit: Can be 'hour', 'minute'... The results.time will be in the set
Returns:
Trades.
"""
sort = trades.sort_values("_id")
df = pd.DataFrame(
data=np.zeros((sort.shape[0], 3)), columns=["amount", "_id", "index"]
)
j = 0
for i in range(0, sort.index.size, 2):
entry_index = sort.index[i]
exit_index = sort.index[i + 1]
if (
getattr(entry_index, unit) in container_set
or getattr(exit_index, unit) in container_set
):
# This code is faster than an iloc.
df.at[j, "amount"] = sort.iat[i, 0]
df.at[j + 1, "amount"] = sort.iat[i + 1, 0]
df.at[j, "_id"] = sort.iat[i, 1]
df.at[j + 1, "_id"] = sort.iat[i + 1, 1]
df.at[j, "index"] = entry_index
df.at[j + 1, "index"] = exit_index
j += 2
df = df.iloc[0:j, :].sort_values("index").set_index("index")
df.index.name = None
return from_dataframe(df, trades.symbol, trades.currency_unit)
def skip_entry_by_hours(trades: Trades, hours: List[int]) -> Trades:
"""Skip entry by hours.
Args:
trades: Trades
hours: Hours which will be filtered out from entry.
Result:
Trades
"""
deleted_ids = []
for i in trades.ids:
entry = trades.get_trade(i).index[0]
if entry.hour in hours:
deleted_ids.append(i)
return trades[~trades["_id"].isin(deleted_ids)]
|
AlpacaDB/backlight
|
src/backlight/strategies/exit.py
|
<filename>src/backlight/strategies/exit.py
import numpy as np
import pandas as pd
from typing import Callable, List, Optional, Tuple
from backlight.datasource.marketdata import MarketData
from backlight.labelizer.common import TernaryDirection
from backlight.signal.signal import Signal
from backlight.trades.trades import Transaction, Trades, concat, from_dataframe
def _concat(mkt: MarketData, sig: Optional[Signal]) -> pd.DataFrame:
if sig is None:
return mkt
assert mkt.symbol == sig.symbol
# Assume sig is less frequent than mkt.
assert all([idx in mkt.index for idx in sig.index])
df = pd.concat([mkt, sig], axis=1, join="inner")
df.symbol = mkt.symbol
return df
def _exit_transaction(
df: pd.DataFrame,
trade: pd.Series,
exit_condition: Callable[[pd.DataFrame, pd.Series], pd.Series],
) -> Transaction:
exit_indices = df[exit_condition(df, trade)].index
if exit_indices.empty:
exit_index = df.index[-1]
else:
exit_index = exit_indices[0]
return Transaction(timestamp=exit_index, amount=-trade.sum())
def _no_exit_condition(df: pd.DataFrame, trade: pd.Series) -> pd.Series:
return pd.Series(index=df.index, data=False)
def exit(
mkt: MarketData,
sig: Optional[Signal],
entries: Trades,
exit_condition: Callable[[pd.DataFrame, pd.Series], pd.Series],
) -> Trades:
"""Exit trade when satisfying condition.
Args:
mkt: Market data
sig: Signal data
entries: Tuple of entry trades.
max_holding_time: maximum holding time
exit_condition: The entry is closed most closest time which
condition is `True`.
Result:
Trades
"""
df = _concat(mkt, sig)
def _exit(
trades: Trades,
df: pd.DataFrame,
exit_condition: Callable[[pd.DataFrame, pd.Series], pd.Series],
) -> pd.Series:
indices = [] # type: List[pd.Timestamp]
exits = [] # type: List[Tuple[float, int]]
for i in trades.ids:
trade = trades.get_trade(i)
if trade.sum() == 0:
continue
idx = trade.index[0]
df_exit = df[idx <= df.index]
transaction = _exit_transaction(df_exit, trade, exit_condition)
indices.append(transaction.timestamp)
exits.append((transaction.amount, i))
df = pd.DataFrame(index=indices, data=exits, columns=["amount", "_id"])
return from_dataframe(df, trades.symbol, trades.currency_unit)
exits = _exit(entries, df, exit_condition)
return concat([entries, exits])
def exit_by_max_holding_time(
mkt: MarketData,
sig: Optional[Signal],
entries: Trades,
max_holding_time: pd.Timedelta,
exit_condition: Callable[[pd.DataFrame, pd.Series], pd.Series],
) -> Trades:
"""Exit trade at max holding time or satisfying condition.
Args:
mkt: Market data
sig: Signal data
entries: Tuple of entry trades.
max_holding_time: maximum holding time
exit_condition: The entry is closed most closest time which
condition is `True`.
Result:
Trades
"""
df = _concat(mkt, sig)
def _exit_by_max_holding_time(
trades: Trades,
df: pd.DataFrame,
max_holding_time: pd.Timedelta,
exit_condition: Callable[[pd.DataFrame, pd.Series], pd.Series],
) -> Trades:
indices = [] # type: List[pd.Timestamp]
exits = [] # type: List[Tuple[float, int]]
for i in trades.ids:
trade = trades.get_trade(i)
if trade.sum() == 0:
continue
idx = trade.index[0]
df_exit = df[(idx <= df.index) & (df.index <= idx + max_holding_time)]
transaction = _exit_transaction(df_exit, trade, exit_condition)
indices.append(transaction.timestamp)
exits.append((transaction.amount, i))
df = pd.DataFrame(index=indices, data=exits, columns=["amount", "_id"])
return from_dataframe(df, trades.symbol, trades.currency_unit)
exits = _exit_by_max_holding_time(entries, df, max_holding_time, exit_condition)
return concat([entries, exits])
def exit_at_max_holding_time(
mkt: MarketData, sig: Signal, entries: Trades, max_holding_time: pd.Timedelta
) -> Trades:
"""Exit at max holding time.
Args:
mkt: Market data
sig: Signal data
entries: Tuple of entry trades.
max_holding_time: maximum holding time
Result:
Trades
"""
return exit_by_max_holding_time(
mkt, sig, entries, max_holding_time, _no_exit_condition
)
def exit_at_opposite_signals(
mkt: MarketData,
sig: Signal,
entries: Trades,
max_holding_time: pd.Timedelta,
opposite_signals_dict: dict,
) -> Trades:
"""Exit at max holding time or opposite signals.
Args:
mkt: Market data
sig: Signal data
entries: Tuple of entry trades.
max_holding_time: maximum holding time
opposite_signals_dict: Dictionary to define opposite signals for each signal.
Result:
Trades
"""
def _exit_at_opposite_signals_condition(
df: pd.DataFrame, opposite_signals_dict: dict
) -> pd.Series:
current_signal = TernaryDirection(df["pred"][0])
opposite_signals = opposite_signals_dict[current_signal]
return df["pred"].isin(opposite_signals)
def _exit_condition(df: pd.DataFrame, trade: pd.Series) -> pd.Series:
return _exit_at_opposite_signals_condition(df, opposite_signals_dict)
return exit_by_max_holding_time(
mkt, sig, entries, max_holding_time, _exit_condition
)
def exit_by_expectation(
mkt: MarketData, sig: Signal, entries: Trades, max_holding_time: pd.Timedelta
) -> Trades:
"""Exit at max holding time or by expectation.
Args:
mkt: Market data
sig: Signal data
entries: Tuple of entry trades.
max_holding_time: maximum holding time
Result:
Trades
"""
def _exit_by_expectation_condition(df: pd.DataFrame, trade: pd.Series) -> pd.Series:
current_signal = TernaryDirection(df["pred"][0])
v = np.array([1.0, 0.0, -1.0])
expectation = np.dot(df[["up", "neutral", "down"]].values, v)
expectation = current_signal.value * expectation
sign = expectation < 0.0
return pd.Series(index=df.index, data=sign)
return exit_by_max_holding_time(
mkt, sig, entries, max_holding_time, _exit_by_expectation_condition
)
def exit_by_trailing_stop(
mkt: MarketData, entries: Trades, initial_stop: float, trailing_stop: float
) -> Trades:
"""Trailing stop exit strategy.
Given the list of entries, it simulates exits by using the trailing stop logic.
The marketdata defines the range for simulation. In case you want to clear all
positions at the end of the day, you have to limit the end edge of marketdata,
and call this function for each day.
Args:
mkt : Market data
entries : List of entries
initial_stop : Initial stop in absolute price.
trailing_stop : Trailing stop in absolute price.
Returns:
trades : All trades for entry and exit.
"""
assert initial_stop >= 0.0
assert trailing_stop >= 0.0
def _exit_by_trailing_stop(df: pd.DataFrame, trade: pd.Series) -> pd.Series:
prices = df.mid
amount = trade.sum()
entry_price = prices.iloc[0]
pl_per_amount = np.sign(amount) * (prices - entry_price)
is_initial_stop = pl_per_amount <= -initial_stop
historical_max_pl = pl_per_amount.cummax()
drawdown = historical_max_pl - pl_per_amount
is_trailing_stop = (historical_max_pl >= trailing_stop) & (
drawdown >= trailing_stop
)
return is_initial_stop | is_trailing_stop
return exit(mkt, None, entries, _exit_by_trailing_stop)
def exit_at_loss_and_gain(
mkt: MarketData,
sig: Optional[Signal],
entries: Trades,
max_holding_time: pd.Timedelta,
loss_threshold: float,
gain_threshold: float,
) -> Trades:
def _exit_at_loss_and_gain(df: pd.DataFrame, trade: pd.Series) -> pd.Series:
prices = df.mid
amount = trade.sum()
entry_price = prices.iloc[0]
pl_per_amount = np.sign(amount) * (prices - entry_price)
is_stop_loss = pl_per_amount <= -loss_threshold
is_take_gain = pl_per_amount >= gain_threshold
return is_stop_loss | is_take_gain
return exit_by_max_holding_time(
mkt, None, entries, max_holding_time, _exit_at_loss_and_gain
)
|
AlpacaDB/backlight
|
tests/strategies/test_filter.py
|
from backlight.strategies import filter as module
import pytest
import pandas as pd
import numpy as np
import backlight
import backlight.trades
from backlight.strategies.amount_based import simple_entry_and_exit
from backlight.asset.currency import Currency
@pytest.fixture
def symbol():
return "USDJPY"
@pytest.fixture
def currency_unit():
return Currency.JPY
@pytest.fixture
def signal(symbol, currency_unit):
periods = 22
df = pd.DataFrame(
index=pd.date_range(start="2018-06-06", freq="1min", periods=periods),
data=[
[1, 0, 0],
[0, 0, 1],
[0, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 0],
[0, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
],
columns=["up", "neutral", "down"],
)
signal = backlight.signal.from_dataframe(df, symbol, currency_unit)
return signal
@pytest.fixture
def market(symbol, currency_unit):
periods = 22
df = pd.DataFrame(
index=pd.date_range(start="2018-06-06", freq="1min", periods=periods),
data=np.arange(periods)[:, None],
columns=["mid"],
)
market = backlight.datasource.from_dataframe(df, symbol, currency_unit)
return market
@pytest.fixture
def askbid(symbol, currency_unit):
periods = 22
df = pd.DataFrame(
index=pd.date_range(start="2018-06-06", freq="1min", periods=periods),
data=[[i + i % 3, i - i % 3] for i in range(periods)],
columns=["ask", "bid"],
)
market = backlight.datasource.from_dataframe(df, symbol, currency_unit)
return market
@pytest.fixture
def trades(market, signal):
max_holding_time = pd.Timedelta("3min")
trades = simple_entry_and_exit(market, signal, max_holding_time)
return trades
def test_limit_max_amount(market, trades):
max_amount = 2.0
limited = module.limit_max_amount(trades, max_amount)
expected = pd.DataFrame(
index=market.index,
data=[
[True, 1.0], # 1.0
[True, -1.0], # 0.0
[False, 0.0], # 0.0
[True, 0.0], # 0.0
[True, 2.0], # 2.0
[True, -1.0], # 1.0
[True, -2.0], # -1.0
[True, -1.0], # -2.0
[True, 1.0], # -1.0
[True, 2.0], # 1.0
[True, 1.0], # 2.0
[False, 0.0], # 2.0
[True, -2.0], # 0.0
[True, -2.0], # -2.0
[False, 0.0], # -2.0
[True, 1.0], # -1.0
[True, 1.0], # 0.0
[False, 0.0], # 0.0
[True, 1.0], # 1.0
[True, 1.0], # 2.0
[False, 0.0], # 2.0
[True, -2.0], # 0.0
],
columns=["exist", "amount"],
)
assert (limited.amount == expected.amount[expected.exist]).all()
def test_skip_entry_by_spread(trades, askbid):
spread = 2.0
limited = module.skip_entry_by_spread(trades, askbid, spread)
expected = pd.DataFrame(
index=askbid.index,
data=[
[True, 1.0], # 1.0
[True, -1.0], # 0.0
[False, 0.0], # 0.0
[True, 0.0], # 0.0
[True, 2.0], # 2.0
[False, 0.0], # 2.0
[True, -2.0], # 0.0
[True, -1.0], # -1.0
[False, 0.0], # -1.0
[True, 2.0], # 1.0
[True, 1.0], # 2.0
[False, 0.0], # 2.0
[True, -2.0], # 0.0
[True, -2.0], # -2.0
[False, 0.0], # 0.0
[True, 1.0], # -1.0
[True, 1.0], # -2.0
[False, 0.0], # 0.0
[True, 1.0], # 1.0
[True, 1.0], # 2.0
[False, 0.0], # 2.0
[True, -2.0], # 0.0
],
columns=["exist", "amount"],
)
assert (limited.amount == expected.amount[expected.exist]).all()
def test_filter_entry_by_time(trades, symbol, currency_unit):
result = module.filter_entry_by_time(trades, "minute", [1, 3, 8, 12])
df = pd.DataFrame(
data=[
[1.0, 0.0],
[-1.0, 1.0],
[-1.0, 0.0],
[1.0, 2.0],
[1.0, 1.0],
[-1.0, 4.0],
[-1.0, 2.0],
[1.0, 4.0],
[1.0, 6.0],
[-1.0, 6.0],
[-1.0, 9.0],
[1.0, 9.0],
],
index=pd.DatetimeIndex(
[
pd.Timestamp("2018-06-06 00:00:00"),
pd.Timestamp("2018-06-06 00:01:00"),
pd.Timestamp("2018-06-06 00:03:00"),
pd.Timestamp("2018-06-06 00:03:00"),
pd.Timestamp("2018-06-06 00:04:00"),
pd.Timestamp("2018-06-06 00:05:00"),
pd.Timestamp("2018-06-06 00:06:00"),
pd.Timestamp("2018-06-06 00:08:00"),
pd.Timestamp("2018-06-06 00:09:00"),
pd.Timestamp("2018-06-06 00:12:00"),
pd.Timestamp("2018-06-06 00:12:00"),
pd.Timestamp("2018-06-06 00:15:00"),
]
),
columns=["amount", "_id"],
)
expected = backlight.trades.trades.from_dataframe(df, symbol, currency_unit)
assert (result.all() == expected.all()).all()
@pytest.fixture
def hourly_trades(symbol, currency_unit):
data = [
1.0, # entry at UTC 0
-2.0,
1.0, # entry at UTC 2
2.0,
-4.0, # entry at UTC 4
2.0,
1.0, # entry at UTC 6
0.0,
1.0, # entry at UTC 8
0.0,
]
index = pd.date_range(start="2018-06-06", freq="1H", periods=len(data))
trades = []
for i in range(0, len(data), 2):
trade = pd.Series(index=index[i : i + 2], data=data[i : i + 2], name="amount")
trades.append(trade)
trades = backlight.trades.make_trades(symbol, trades, currency_unit)
return trades
def test_skip_entry_by_hours(hourly_trades):
hours = [2, 5, 6, 7]
limited = module.skip_entry_by_hours(hourly_trades, hours)
expected = pd.concat(
[
hourly_trades.get_trade(0),
hourly_trades.get_trade(2),
hourly_trades.get_trade(4),
],
axis=0,
)
assert (limited.amount == expected).all()
|
AlpacaDB/backlight
|
src/backlight/labelizer/ternary/hybrid_neutral.py
|
import pandas as pd
from backlight.datasource.marketdata import MarketData
from backlight.labelizer.common import LabelType, TernaryDirection
from backlight.labelizer.labelizer import Label
from backlight.labelizer.ternary.static_neutral import StaticNeutralLabelizer
from backlight.labelizer.ternary.dynamic_neutral import (
MarketCloseAwareDynamicNeutralLabelizer,
)
class HybridNeutralLabelizer(
StaticNeutralLabelizer, MarketCloseAwareDynamicNeutralLabelizer
):
def __init__(self, **kwargs: str) -> None:
super().__init__(**kwargs)
self.validate_params()
def validate_params(self) -> None:
super(HybridNeutralLabelizer, self).validate_params()
super(MarketCloseAwareDynamicNeutralLabelizer, self).validate_params()
assert "alpha" in self._params
assert 0 <= float(self._params["alpha"]) <= 1
def _calculate_hybrid_neutral_range(self, diff_abs: pd.Series) -> pd.Series:
snr = self._calculate_static_neutral_range(diff_abs)
dnr = self._calculate_dynamic_neutral_range(diff_abs)
return self.alpha * snr + (1 - self.alpha) * dnr
def create(self, mkt: MarketData) -> pd.DataFrame:
mid = mkt.mid.copy()
future_price = mid.shift(freq="-{}".format(self._params["lookahead"]))
diff = (future_price - mid).reindex(mid.index)
diff_abs = diff.abs()
neutral_range = self._calculate_hybrid_neutral_range(diff_abs)
df = mid.to_frame("mid")
df.loc[:, "label_diff"] = diff
df.loc[:, "neutral_range"] = neutral_range
df.loc[df.label_diff > 0, "label"] = TernaryDirection.UP.value
df.loc[df.label_diff < 0, "label"] = TernaryDirection.DOWN.value
df.loc[diff_abs < neutral_range, "label"] = TernaryDirection.NEUTRAL.value
df = Label(df[["label_diff", "label", "neutral_range"]])
df.label_type = LabelType.TERNARY
return df
@property
def alpha(self) -> float:
return float(self._params["alpha"])
|
AlpacaDB/backlight
|
tests/labelizer/test_labelizer_ternary_hybrid_neutral.py
|
from backlight.labelizer.ternary.hybrid_neutral import HybridNeutralLabelizer as module
import pytest
import pandas as pd
import numpy as np
import datetime
@pytest.fixture
def sample_df():
index = pd.date_range(
"2017-09-04 13:00:00+00:00", "2017-09-05 13:00:00+00:00", freq="1H"
)
return pd.DataFrame(
index=index,
data=np.array(
[
[109.68, 109.69, 109.685],
[109.585, 109.595, 109.59],
[109.525, 109.535, 109.53],
[109.6, 109.61, 109.605],
[109.695, 109.7, 109.6975],
[109.565, 109.705, 109.635],
[109.63, 109.685, 109.6575],
[109.555, 109.675, 109.615],
[109.7, 109.75, 109.725],
[109.67, 109.72, 109.695],
[109.66, 109.675, 109.6675],
[109.8, 109.815, 109.8075],
[109.565, 109.575, 109.57],
[109.535, 109.545, 109.54],
[109.32, 109.33, 109.325],
[109.27, 109.275, 109.2725],
[109.345, 109.355, 109.35],
[109.305, 109.315, 109.31],
[109.3, 109.31, 109.305],
[109.445, 109.46, 109.4525],
[109.42, 109.425, 109.4225],
[109.385, 109.395, 109.39],
[109.305, 109.315, 109.31],
[109.365, 109.375, 109.37],
[109.365, 109.375, 109.37],
]
),
columns=["bid", "ask", "mid"],
)
def test_create(sample_df):
lbl_args = {
"lookahead": "1H",
"neutral_ratio": 0.5,
"session_splits": [datetime.time(9), datetime.time(18)],
"neutral_window": "3H",
"neutral_hard_limit": 0.00,
"window_start": "20170904 12:00:00+0000",
"window_end": "20170905 06:00:00+0000",
"alpha": 0.5,
}
lbl = module(**lbl_args).create(sample_df)
assert lbl.label.sum() == 1
assert lbl.neutral_range.isna().sum() == 2
|
AlpacaDB/backlight
|
src/backlight/__init__.py
|
<reponame>AlpacaDB/backlight
__author__ = "AlpacaJapan Co., Ltd."
__version__ = "1.0.9"
__release__ = "1.0.9"
__license__ = "MIT"
|
jradavenport/cubehelix
|
setup.py
|
#!/usr/bin/env python
# encoding: utf-8
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="cubehelix",
version="0.1.0",
author="<NAME>",
# author_email="",
description="Cubehelix colormaps for matplotlib",
long_description=read('README.md'),
# license="BSD",
py_modules=['cubehelix'],
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Scientific/Engineering :: Visualization",
# "License :: OSI Approved :: BSD License",
]
)
|
jradavenport/cubehelix
|
test.py
|
import numpy as np
import matplotlib.pyplot as plt
import cubehelix
# set up some simple data to plot
x = np.random.randn(10000)
y = np.random.randn(10000)
cx1 = cubehelix.cmap(startHue=240,endHue=-300,minSat=1,maxSat=2.5,minLight=.3,maxLight=.8,gamma=.9)
plt.hexbin(x,y,gridsize=50,cmap=cx1)
plt.colorbar()
#plt.savefig('rainbow.png')
|
ethan-haynes/warden
|
warden/app.py
|
#!/usr/bin/env python3
import click, socket, time
sock_errors = (
ValueError,
socket.error,
socket.gaierror,
socket.herror,
socket.timeout
)
def check_connection( host, port, udp, ipv6, timeout, message ):
s_settings = dict(
sock_type=socket.IPPROTO_UDP if udp else socket.IPPROTO_TCP,
addr_family=None,
sock_family=None,
address=None,
result=None,
reason=None
)
try:
addr_info = socket.getaddrinfo(
host,
port,
proto=s_settings['sock_type']
)
except sock_errors as e:
return print({ 'error': str(e) })
for addr in addr_info:
af, sf = addr[0], addr[1]
address = addr[-1]
if (ipv6 and af == socket.AF_INET6) or (not ipv6 and af == socket.AF_INET):
s_settings['addr_family'] = af
s_settings['sock_family'] = sf
s_settings['address'] = address
if s_settings['addr_family'] == None:
return print({
'error': f'address family for host {host} does not match provided type of { "IPv6" if ipv6 else "IPv4"}'
})
sock = socket.socket(
family=socket.AF_INET,
type=s_settings['sock_family'],
proto=s_settings['sock_type']
)
sock.settimeout(timeout)
try:
if udp:
sock.sendto(bytes(message, 'utf-8'), address)
s_settings['result'] = sock.recv(1024)
else:
s_settings['result'] = sock.connect_ex( address )
except sock_errors as e:
s_settings['reason'] = str(e)
finally:
sock.close()
out = {
'host': host,
'port': port,
'up' : ( s_settings['result'] != None if udp else s_settings['result'] == 0 ),
'ip' : s_settings['address'][0]
}
if s_settings['reason'] != None:
out['reason'] = s_settings['reason'] # adding failure reason
print(out)
@click.group()
@click.pass_context
@click.option('-h','--host', 'host', type=str,
required=True, help='host string -- [HOSTNAME|IP]')
@click.option('-p', '--port', type=int, required=True,
help='port to connect to -- [PORT]')
@click.option('-6', '--ipv6', is_flag=True,
help='enable IPv6 support for addressing -- default is IPv4')
@click.option('-t', '--timeout', type=int, default=5,
help='specify timeout for port scan -- default is 5 seconds')
def app( ctx, host, port, ipv6, timeout ):
# ensure that ctx.obj exists and is a dict (in case `cli()` is called
# by means other than the `if` block below
ctx.ensure_object(dict)
ctx.obj['host'] = host
ctx.obj['port'] = port
ctx.obj['ipv6'] = ipv6
ctx.obj['timeout'] = timeout
pass
@app.command()
@click.pass_context
def tcp( ctx ):
check_connection(
ctx.obj['host'],
ctx.obj['port'],
False,
ctx.obj['ipv6'],
ctx.obj['timeout'],
None
)
@app.command()
@click.pass_context
@click.option('-uM', '--udp-message', 'udp_message', type=str, required=True,
help='port to connect to -- [PORT]')
def udp( ctx, udp_message ):
check_connection(
ctx.obj['host'],
ctx.obj['port'],
True,
ctx.obj['ipv6'],
ctx.obj['timeout'],
udp_message
)
if __name__ == '__main__':
cli(obj={})
|
ethan-haynes/warden
|
setup.py
|
<filename>setup.py
#!/usr/bin/env python3
from setuptools import setup, find_packages
setup(
name='warden',
version='0.1',
description='app for monitoring service availability',
author='<NAME>',
install_requires=[
'click'
],
packages=find_packages(),
scripts=['bin/warden']
)
|
AswinRetnakumar/Machina
|
tests/optims/distributed_adamw_test.py
|
import os
import unittest
import torch
import torch.distributed as dist
from torch.multiprocessing import Process
import torch.nn as nn
from machina.optims import DistributedAdamW
def init_processes(rank, world_size,
function, backend='tcp'):
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend, rank=rank,
world_size=world_size)
function(rank, world_size)
class TestDistributedAdamW(unittest.TestCase):
def test_step(self):
def _run(rank, world_size):
model = nn.Linear(10, 1)
optimizer = DistributedAdamW(
model.parameters())
optimizer.zero_grad()
loss = model(torch.ones(10).float())
loss.backward()
optimizer.step()
processes = []
world_size = 4
for rank in range(world_size):
p = Process(target=init_processes,
args=(rank,
world_size,
_run))
p.start()
processes.append(p)
for p in processes:
p.join()
|
AswinRetnakumar/Machina
|
machina/noise/__init__.py
|
from machina.noise.base import BaseActionNoise
from machina.noise.ounoise import OUActionNoise
|
AswinRetnakumar/Machina
|
machina/pds/categorical_pd.py
|
"""
Categorical
"""
import numpy as np
import torch
from torch.distributions import Categorical, kl_divergence
from machina.pds.base import BasePd
class CategoricalPd(BasePd):
"""
Categorical probablistic distribution.
"""
def sample(self, params, sample_shape=torch.Size()):
pi = params['pi']
pi_sampled = Categorical(probs=pi).sample(sample_shape)
return pi_sampled
def llh(self, x, params):
pi = params['pi']
return Categorical(pi).log_prob(x)
def kl_pq(self, p_params, q_params):
p_pi = p_params['pi']
q_pi = q_params['pi']
return kl_divergence(Categorical(p_pi), Categorical(q_pi))
def ent(self, params):
pi = params['pi']
return Categorical(pi).entropy()
|
AswinRetnakumar/Machina
|
machina/noise/ounoise.py
|
<gh_stars>100-1000
"""
This is implementation of Ornstein-Uhlenbeck process.
"""
import numpy as np
import torch
from machina.utils import get_device
from machina.noise.base import BaseActionNoise
class OUActionNoise(BaseActionNoise):
"""
noise produced by Ornstein-Uhlenbeck process.
"""
def __init__(self, action_space, sigma=0.2, theta=.15, dt=1e-2, x0=None):
BaseActionNoise.__init__(self, action_space)
self.mu = np.zeros(self.action_space.shape[0])
self.theta = theta
self.sigma = sigma * np.ones_like(self.mu)
self.dt = dt
self.x0 = x0
self.reset()
def __call__(self, device='cpu'):
x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + \
self.sigma * np.sqrt(self.dt) * \
np.random.normal(size=self.mu.shape)
self.x_prev = x
return torch.tensor(x, dtype=torch.float, device=device)
def reset(self):
if self.x0 is not None:
self.x_prev = self.x0
else:
self.x_prev = np.zeros_like(self.mu, dtype=np.float32)
|
AswinRetnakumar/Machina
|
machina/envs/rew_in_ob_env.py
|
import gym
import numpy as np
class RewInObEnv(gym.Env):
def __init__(self, env, dim=0, normalize=True, initial_value=0, mean=0, std=1, low=-np.inf, high=np.inf):
self.env = env
if hasattr(env, 'original_env'):
self.original_env = env.original_env
else:
self.original_env = env
self.dim = dim
self.normalize = normalize
self.initial_value = initial_value
self.mean = mean
self.std = std
observation_space = self.env.observation_space
action_space = self.env.action_space
low = np.concatenate(
[observation_space.low, np.array([low])], axis=dim)
high = np.concatenate(
[observation_space.high, np.array([high])], axis=dim)
self.observation_space = gym.spaces.Box(low, high, dtype=np.float32)
self.action_space = self.env.action_space
@property
def horizon(self):
if hasattr(self.env, 'horizon'):
return self.env._horizon
def reset(self):
ob = self.env.reset()
initial_rew = np.ones((1, )) * self.initial_value
ob = np.concatenate([ob, initial_rew], axis=self.dim)
return ob
def step(self, action):
next_ob, reward, done, info = self.env.step(action)
_rew = reward
if self.normalize:
_rew = (_rew - self.mean) / self.std
next_ob = np.concatenate([next_ob, np.array([_rew])], axis=self.dim)
return next_ob, reward, done, info
def render(self):
self.env.render()
def terminate(self):
self.env.terminate()
|
AswinRetnakumar/Machina
|
machina/envs/__init__.py
|
from machina.envs.gym_env import GymEnv
from machina.envs.continuous2discrete_env import C2DEnv
from machina.envs.ac_in_ob_env import AcInObEnv
from machina.envs.rew_in_ob_env import RewInObEnv
from machina.envs.skill_env import SkillEnv
from machina.envs.env_utils import flatten_to_dict
|
AswinRetnakumar/Machina
|
machina/vfuncs/__init__.py
|
<filename>machina/vfuncs/__init__.py
from machina.vfuncs.state_vfuncs import BaseSVfunc, DeterministicSVfunc, NormalizedDeterministicSVfunc
from machina.vfuncs.state_action_vfuncs import BaseSAVfunc, DeterministicSAVfunc, CEMDeterministicSAVfunc
|
AswinRetnakumar/Machina
|
machina/envs/skill_env.py
|
<filename>machina/envs/skill_env.py
import gym
import numpy as np
class SkillEnv(gym.Wrapper):
def __init__(self, env, num_skill=4):
gym.Wrapper.__init__(self, env)
self.num_skill = num_skill
self.skill = 0
self.real_observation_space = env.observation_space
low = np.hstack((env.observation_space.low, np.zeros(self.num_skill)))
high = np.hstack((env.observation_space.high, np.ones(self.num_skill)))
self.observation_space = gym.spaces.Box(low=low, high=high)
self.skill_space = gym.spaces.Box(low=np.zeros(
self.num_skill), high=np.ones(self.num_skill))
def reset(self, **kwargs):
# sample skill id
self.skill = self.unwrapped.np_random.randint(0, self.num_skill)
obs = self.env.reset(**kwargs)
obs_skill = np.hstack((obs, np.eye(self.num_skill)[self.skill]))
return obs_skill
def step(self, ac):
obs, reward, done, info = self.env.step(ac)
obs_skill = np.hstack((obs, np.eye(self.num_skill)[self.skill]))
return obs_skill, reward, done, info
@property
def horizon(self):
if hasattr(self.env, 'horizon'):
return self._horizon
def terminate(self):
if self.monitoring:
self.env._close()
|
AswinRetnakumar/Machina
|
machina/prepro/base.py
|
<reponame>AswinRetnakumar/Machina<filename>machina/prepro/base.py
import numpy as np
import torch
class BasePrePro(object):
"""
Preprocess for observations.
Parameters
----------
observation_space : gym.Space
normalize_ob : bool
"""
def __init__(self, observation_space, normalize_ob=True):
self.observation_space = observation_space
self.normalize_ob = normalize_ob
if self.normalize_ob:
self.ob_rm = np.zeros(self.observation_space.shape)
self.ob_rv = np.ones(self.observation_space.shape)
self.alpha = 0.001
def update_ob_rms(self, ob):
"""
Updating running mean and running variance.
"""
self.ob_rm = self.ob_rm * (1-self.alpha) + self.alpha * ob
self.ob_rv = self.ob_rv * (1-self.alpha) + \
self.alpha * np.square(ob-self.ob_rm)
def prepro(self, ob):
"""
Applying preprocess to observations.
"""
if self.normalize_ob:
ob = (ob - self.ob_rm) / (np.sqrt(self.ob_rv) + 1e-8)
ob = np.clip(ob, -5, 5)
return ob
def prepro_with_update(self, ob):
"""
Applying preprocess to observations with update.
"""
if self.normalize_ob:
self.update_ob_rms(ob)
ob = (ob - self.ob_rm) / (np.sqrt(self.ob_rv) + 1e-8)
ob = np.clip(ob, -5, 5)
return ob
|
AswinRetnakumar/Machina
|
machina/optims/__init__.py
|
from machina.optims.adamw import AdamW
from machina.optims.distributed_adamw import DistributedAdamW
from machina.optims.distributed_sgd import DistributedSGD
|
AswinRetnakumar/Machina
|
machina/algos/diayn.py
|
<gh_stars>100-1000
import torch
from machina import loss_functional as lf
def train(discrim, optim_discrim, on_traj, discrim_batch_size, epc_per_itr, num_skill):
discrim_losses = []
for batch in on_traj.random_batch(discrim_batch_size, epc_per_itr):
optim_discrim.zero_grad()
loss = lf.cross_ent_diayn(discrim, batch, num_skill)
loss.backward()
optim_discrim.step()
discrim_losses.append(loss.item())
return discrim_losses
|
AswinRetnakumar/Machina
|
machina/noise/base.py
|
<gh_stars>100-1000
class BaseActionNoise(object):
"""
Base class of action noise.
"""
def __init__(self, action_space):
self.action_space = action_space
def reset(self):
pass
|
AswinRetnakumar/Machina
|
example/run_mixed_env.py
|
<reponame>AswinRetnakumar/Machina
"""
An example of mixed environment with ppo.
"""
import argparse
import json
import os
from pprint import pprint
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import gym
import pybullet_envs
import machina as mc
from machina.pols import GaussianPol, CategoricalPol, MultiCategoricalPol
from machina.algos import ppo_clip
from machina.vfuncs import DeterministicSVfunc
from machina.envs import GymEnv, C2DEnv, AcInObEnv, RewInObEnv
from machina.traj import Traj
from machina.traj import epi_functional as ef
from machina.samplers import EpiSampler
from machina import logger
from machina.utils import measure, set_device
from simple_net import PolNet, VNet, PolNetLSTM, VNetLSTM
parser = argparse.ArgumentParser()
parser.add_argument('--log', type=str, default='garbage',
help='Directory name of log.')
parser.add_argument('--env_name', type=str,
default='Pendulum-v0', help='Name of environment.')
parser.add_argument('--record', action='store_true',
default=False, help='If True, movie is saved.')
parser.add_argument('--seed', type=int, default=256)
parser.add_argument('--max_epis', type=int,
default=1000000, help='Number of episodes to run.')
parser.add_argument('--num_parallel', type=int, default=4,
help='Number of processes to sample.')
parser.add_argument('--cuda', type=int, default=-1, help='cuda device number.')
parser.add_argument('--data_parallel', action='store_true', default=False,
help='If True, inference is done in parallel on gpus.')
parser.add_argument('--max_epis_per_iter', type=int,
default=1024, help='Number of episodes in an iteration.')
parser.add_argument('--epoch_per_iter', type=int, default=10,
help='Number of epoch in an iteration')
parser.add_argument('--rnn_batch_size', type=int, default=8,
help='Number of sequences included in batch of rnn.')
parser.add_argument('--pol_lr', type=float, default=3e-4,
help='Policy learning rate')
parser.add_argument('--vf_lr', type=float, default=3e-4,
help='Value function learning rate')
parser.add_argument('--cell_size', type=int, default=512,
help='Cell size of rnn.')
parser.add_argument('--h_size', type=int, default=512,
help='Hidden size of rnn.')
parser.add_argument('--max_grad_norm', type=float, default=0.5,
help='Value of maximum gradient norm.')
parser.add_argument('--clip_param', type=float, default=0.2,
help='Value of clipping liklihood ratio.')
parser.add_argument('--gamma', type=float, default=0.995,
help='Discount factor.')
parser.add_argument('--lam', type=float, default=1,
help='Tradeoff value of bias variance.')
args = parser.parse_args()
if not os.path.exists(args.log):
os.mkdir(args.log)
with open(os.path.join(args.log, 'args.json'), 'w') as f:
json.dump(vars(args), f)
pprint(vars(args))
if not os.path.exists(os.path.join(args.log, 'models')):
os.mkdir(os.path.join(args.log, 'models'))
np.random.seed(args.seed)
torch.manual_seed(args.seed)
device_name = 'cpu' if args.cuda < 0 else "cuda:{}".format(args.cuda)
device = torch.device(device_name)
set_device(device)
score_file = os.path.join(args.log, 'progress.csv')
logger.add_tabular_output(score_file)
env1 = GymEnv('HumanoidBulletEnv-v0')
env1.original_env.seed(args.seed)
env1 = AcInObEnv(env1)
env1 = RewInObEnv(env1)
env1 = C2DEnv(env1)
env2 = GymEnv('HumanoidFlagrunBulletEnv-v0')
env2.original_env.seed(args.seed)
env2 = AcInObEnv(env2)
env2 = RewInObEnv(env2)
env2 = C2DEnv(env2)
assert env1.observation_space == env2.observation_space
assert env1.action_space.shape == env2.action_space.shape
observation_space = env1.observation_space
action_space = env1.action_space
pol_net = PolNetLSTM(observation_space, action_space, h_size=args.h_size,
cell_size=args.cell_size)
pol = MultiCategoricalPol(observation_space, action_space, pol_net,
True, data_parallel=args.data_parallel, parallel_dim=1)
vf_net = VNetLSTM(observation_space, h_size=args.h_size,
cell_size=args.cell_size)
vf = DeterministicSVfunc(observation_space, vf_net, True,
data_parallel=args.data_parallel, parallel_dim=1)
sampler1 = EpiSampler(
env1, pol, num_parallel=args.num_parallel, seed=args.seed)
sampler2 = EpiSampler(
env2, pol, num_parallel=args.num_parallel, seed=args.seed)
optim_pol = torch.optim.Adam(pol_net.parameters(), args.pol_lr)
optim_vf = torch.optim.Adam(vf_net.parameters(), args.vf_lr)
total_epi = 0
total_step = 0
max_rew = -1e6
while args.max_epis > total_epi:
with measure('sample'):
epis1 = sampler1.sample(pol, max_epis=args.max_epis_per_iter)
epis2 = sampler2.sample(pol, max_epis=args.max_epis_per_iter)
with measure('train'):
traj1 = Traj()
traj2 = Traj()
traj1.add_epis(epis1)
traj1 = ef.compute_vs(traj1, vf)
traj1 = ef.compute_rets(traj1, args.gamma)
traj1 = ef.compute_advs(traj1, args.gamma, args.lam)
traj1 = ef.centerize_advs(traj1)
traj1 = ef.compute_h_masks(traj1)
traj1.register_epis()
traj2.add_epis(epis2)
traj2 = ef.compute_vs(traj2, vf)
traj2 = ef.compute_rets(traj2, args.gamma)
traj2 = ef.compute_advs(traj2, args.gamma, args.lam)
traj2 = ef.centerize_advs(traj2)
traj2 = ef.compute_h_masks(traj2)
traj2.register_epis()
traj1.add_traj(traj2)
if args.data_parallel:
pol.dp_run = True
vf.dp_run = True
result_dict = ppo_clip.train(traj=traj1, pol=pol, vf=vf, clip_param=args.clip_param,
optim_pol=optim_pol, optim_vf=optim_vf, epoch=args.epoch_per_iter, batch_size=args.batch_size if not args.rnn else args.rnn_batch_size, max_grad_norm=args.max_grad_norm)
if args.data_parallel:
pol.dp_run = False
vf.dp_run = False
total_epi += traj1.num_epi
step = traj1.num_step
total_step += step
rewards1 = [np.sum(epi['rews']) for epi in epis1]
rewards2 = [np.sum(epi['rews']) for epi in epis2]
mean_rew = np.mean(rewards1 + rewards2)
logger.record_tabular_misc_stat('Reward1', rewards1)
logger.record_tabular_misc_stat('Reward2', rewards2)
logger.record_results(args.log, result_dict, score_file,
total_epi, step, total_step,
rewards1 + rewards2,
plot_title='humanoid')
if mean_rew > max_rew:
torch.save(pol.state_dict(), os.path.join(
args.log, 'models', 'pol_max.pkl'))
torch.save(vf.state_dict(), os.path.join(
args.log, 'models', 'vf_max.pkl'))
torch.save(optim_pol.state_dict(), os.path.join(
args.log, 'models', 'optim_pol_max.pkl'))
torch.save(optim_vf.state_dict(), os.path.join(
args.log, 'models', 'optim_vf_max.pkl'))
max_rew = mean_rew
torch.save(pol.state_dict(), os.path.join(
args.log, 'models', 'pol_last.pkl'))
torch.save(vf.state_dict(), os.path.join(
args.log, 'models', 'vf_last.pkl'))
torch.save(optim_pol.state_dict(), os.path.join(
args.log, 'models', 'optim_pol_last.pkl'))
torch.save(optim_vf.state_dict(), os.path.join(
args.log, 'models', 'optim_vf_last.pkl'))
del traj1
del traj2
del sampler
|
AswinRetnakumar/Machina
|
machina/pds/deterministic_pd.py
|
import numpy as np
import torch
from torch.distributions import Normal, kl_divergence
from machina.pds.base import BasePd
class DeterministicPd(BasePd):
"""
Deterministic probablistic distribution.
"""
def sample(self, params, sample_shape=torch.Size()):
mean = params['mean']
ac = Normal(loc=mean, scale=torch.zeros_like(
mean)).rsample(sample_shape)
return ac
def llh(self, x, params):
mean = params['mean']
return torch.sum(Normal(loc=mean, scale=torch.zeros_like(mean)).log_prob(x), dim=-1)
def kl_pq(self, p_params, q_params):
p_mean = p_params['mean']
q_mean = q_params['mean']
return torch.sum(kl_divergence(Normal(loc=p_mean, scale=torch.zeros_like(p_mean)), Normal(loc=q_mean, scale=torch.zeros_like(q_mean))), dim=-1)
def ent(self, params):
mean = params['mean']
return torch.sum(Normal(loc=mean, scale=torch.zeros_like(mean)).entropy(), dim=-1)
|
AswinRetnakumar/Machina
|
machina/envs/env_utils.py
|
import numpy as np
from collections import OrderedDict
def flatten_to_dict(flatten_obs, dict_space, dict_keys=None):
if dict_keys is None:
dict_keys = dict_space.spaces.keys()
obs_dict = OrderedDict()
begin_index = 0
end_index = 0
for key in dict_keys:
origin_shape = dict_space.spaces[key].shape
end_index += np.prod(origin_shape)
dim = len(flatten_obs.shape)
obs_dict[key] = flatten_obs[..., begin_index:end_index].reshape(
flatten_obs.shape[:-1] + origin_shape)
begin_index = end_index
return obs_dict
|
AswinRetnakumar/Machina
|
machina/pds/__init__.py
|
from machina.pds.base import BasePd
from machina.pds.gaussian_pd import GaussianPd
from machina.pds.deterministic_pd import DeterministicPd
from machina.pds.mixture_gaussian_pd import MixtureGaussianPd
from machina.pds.categorical_pd import CategoricalPd
|
AswinRetnakumar/Machina
|
machina/vfuncs/state_action_vfuncs/cem_state_action_vfunc.py
|
"""
Deterministic State Action Value function with Cross Entropy Method
"""
from machina.vfuncs.state_action_vfuncs.deterministic_state_action_vfunc import DeterministicSAVfunc
from machina.utils import get_device
import torch
from torch.distributions import Normal, MultivariateNormal
class CEMDeterministicSAVfunc(DeterministicSAVfunc):
"""
Deterministic State Action Vfunction with Cross Entropy Method.
Parameters
----------
observation_space : gym.Space
action_space : gym.Space
net : torch.nn.Module
rnn : bool
data_parallel : bool
If True, network computation is executed in parallel.
parallel_dim : int
Splitted dimension in data parallel.
num_sampling : int
Number of samples sampled from Gaussian in CEM.
num_best_sampling : int
Number of best samples used for fitting Gaussian in CEM.
num_iter : int
Number of iteration of CEM.
delta : float
Coefficient used for making covariance matrix positive definite.
"""
def __init__(self, observation_space, action_space, net, rnn=False, data_parallel=False, parallel_dim=0, num_sampling=64,
num_best_sampling=6, num_iter=2, multivari=True, delta=1e-4, save_memory=False):
super().__init__(observation_space, action_space,
net, rnn, data_parallel, parallel_dim)
self.num_sampling = num_sampling
self.delta = delta
self.num_best_sampling = num_best_sampling
self.num_iter = num_iter
self.net = net
self.dim_ac = self.action_space.shape[0]
self.multivari = multivari
self.save_memory = save_memory
self.to(get_device())
def max(self, obs):
"""
Perform max and argmax of Qfunc
Parameters
----------
obs : torch.Tensor
Returns
-------
max_qs : torch.Tensor
max_acs : torch.Tensor
"""
obs = self._check_obs_shape(obs)
self.dim_ob = obs.shape[1]
high = torch.tensor(self.action_space.high,
dtype=torch.float, device=get_device())
low = torch.tensor(
self.action_space.low, dtype=torch.float, device=get_device())
init_samples = torch.linspace(
0, 1, self.num_sampling, device=get_device())
init_samples = init_samples.reshape(
self.num_sampling, -1) * (high - low) + low # (self.num_sampling, dim_ac)
init_samples = self._clamp(init_samples)
if not self.save_memory: # batch
self.cem_batch_size = obs.shape[0]
obs = obs.repeat((1, self.num_sampling)).reshape(
(self.cem_batch_size * self.num_sampling, self.dim_ob))
# concatenate[(self.num_sampling, dim_ac), ..., (self.num_sampling, self.dim_ob)], dim=0)
init_samples = init_samples.repeat((self.cem_batch_size, 1))
# concatenate[(self.num_sampling, dim_ac), ..., (self.num_sampling, dim_ac)], dim=0)
max_qs, max_acs = self._cem(obs, init_samples)
else: # for-sentence
self.cem_batch_size = 1
max_acs = []
max_qs = []
for ob in obs:
ob = ob.repeat((1, self.num_sampling)).reshape(
(self.cem_batch_size * self.num_sampling, self.dim_ob))
ob = self._check_obs_shape(ob)
max_q, max_ac = self._cem(ob, init_samples)
max_qs.append(max_q)
max_acs.append(max_ac)
max_qs = torch.tensor(
max_qs, dtype=torch.float, device=obs.device)
max_acs = torch.cat(max_acs, dim=0)
max_acs = self._check_acs_shape(max_acs)
return max_qs, max_acs
def _cem(self, obs, samples):
"""
Perform cross entropy method
Parameters
----------
obs : torch.Tensor
samples : torch.Tensor
shape (self.num_sampling, dim_ac)
Returns
-------
max_q : torch.Tensor
max_ac : torch.Tensor
"""
for i in range(self.num_iter+1):
with torch.no_grad():
qvals, _ = self.forward(obs, samples)
if i != self.num_iter:
qvals = qvals.reshape((self.cem_batch_size, self.num_sampling))
_, indices = torch.sort(qvals, dim=1, descending=True)
best_indices = indices[:, :self.num_best_sampling]
best_indices = best_indices + \
torch.arange(0, self.num_sampling*self.cem_batch_size,
self.num_sampling, device=get_device()).reshape((self.cem_batch_size, 1))
best_indices = best_indices.reshape(
(self.num_best_sampling * self.cem_batch_size,))
# (self.num_best_sampling * self.cem_batch_size, self.dim_ac)
best_samples = samples[best_indices, :]
# (self.cem_batch_size, self.num_best_sampling, self.dim_ac)
best_samples = best_samples.reshape(
(self.cem_batch_size, self.num_best_sampling, self.dim_ac))
samples = self._fitting_diag(
best_samples) if not self.multivari else self._fitting_multivari(best_samples)
qvals = qvals.reshape((self.cem_batch_size, self.num_sampling))
samples = samples.reshape(
(self.cem_batch_size, self.num_sampling, self.dim_ac))
max_q, ind = torch.max(qvals, dim=1)
max_ac = samples[torch.arange(
self.cem_batch_size, device=get_device()), ind]
return max_q, max_ac
def _fitting_diag(self, best_samples):
"""
Fit diagonal covariance gaussian and sampling from it
Parameters
----------
best_samples : torch.Tensor
shape (self.cem_batch_size, self.num_best_sampling, self.dim_ac)
Returns
-------
samples : torch.Tensor
"""
mean = torch.mean(
best_samples, dim=1) # (self.cem_batch_size, self.dim_ac)
# (self.cem_batch_size, self.dim_ac)
std = torch.std(best_samples, dim=1)
samples = Normal(loc=mean, scale=std).rsample(
torch.Size((self.num_sampling,))) # (self.num_best_sampling, self.cem_batch_size, self.dim_ac)
# (self.num_best_sampling, self.cem_batch_size, self.dim_ac)
samples = samples.transpose(1, 0)
samples = samples.reshape((self.num_sampling * self.cem_batch_size,
self.dim_ac)) # (self.num_best_sampling * self.cem_batch_size, self.dim_ac)
# (self.num_best_sampling * self.cem_batch_size, self.dim_ac)
samples = self._clamp(samples)
return samples
def _fitting_multivari(self, best_samples):
"""
Fit multivariate gaussian and sampling from it
Parameters
----------
best_samples : torch.Tensor
shape (self.cem_batch_size, self.num_best_sampling, self.dim_ac)
Returns
-------
samples : torch.Tensor
"""
def fitting(best_samples):
mean = best_samples.mean(dim=0)
fs_m = best_samples.sub(mean.expand_as(best_samples))
cov_mat = fs_m.transpose(0, 1).mm(fs_m) / (self.num_sampling - 1)
cov_mat = cov_mat + self.delta * torch.eye(cov_mat.shape[0])
pd = MultivariateNormal(mean, cov_mat)
samples = pd.sample((self.num_sampling,))
return samples
samples = torch.cat([fitting(best_sample)
for best_sample in best_samples], dim=0)
return samples
def _clamp(self, samples):
low = torch.tensor(self.action_space.low,
dtype=torch.float, device=get_device())
high = torch.tensor(self.action_space.high,
dtype=torch.float, device=get_device())
samples = (samples - low) / (high - low)
samples = torch.clamp(samples, 0, 1) * (high - low) + low
return samples
|
AswinRetnakumar/Machina
|
machina/envs/gym_env.py
|
<gh_stars>100-1000
# The MIT License (MIT)
#
# Copyright (c) 2016 rllab contributors
#
# rllab uses a shared copyright model: each contributor holds copyright over
# their contributions to rllab. The project versioning records all such
# contribution and copyright details.
# By contributing to the rllab repository through pull-request, comment,
# or otherwise, the contributor releases their content to the license and
# copyright terms herein.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
"""
This is code for gym.
This code is taken from rllab which is MIT-licensed.
"""
import gym
from machina import logger
class CappedCubicVideoSchedule(object):
# Copied from gym, since this method is frequently moved around
def __call__(self, count):
if count < 1000:
return int(round(count ** (1. / 3))) ** 3 == count
else:
return count % 1000 == 0
class NoVideoSchedule(object):
def __call__(self, count):
return False
class GymEnv(gym.Env):
def __init__(self, env, record_video=False, video_schedule=None, log_dir=None,
force_reset=False):
if isinstance(env, str):
env = gym.envs.make(env)
self.env = env
if hasattr(env, 'original_env'):
self.original_env = env.original_env
else:
self.original_env = env
if self.env.spec is not None:
self.env_id = env.spec.id
else:
self.env_id = None
if log_dir is None:
self.monitoring = False
else:
if not record_video:
video_schedule = NoVideoSchedule()
else:
if video_schedule is None:
video_schedule = CappedCubicVideoSchedule()
self.env = gym.wrappers.Monitor(
self.env, log_dir, video_callable=video_schedule, force=True)
self.monitoring = True
self.observation_space = env.observation_space
logger.log("observation space: {}".format(self.observation_space))
self.action_space = env.action_space
logger.log("action space: {}".format(self.action_space))
if self.env.spec is not None:
self._horizon = env.spec.tags['wrapper_config.TimeLimit.max_episode_steps']
else:
self._horizon = None
self._log_dir = log_dir
self._force_reset = force_reset
@property
def horizon(self):
return self._horizon
def reset(self):
if self._force_reset and self.monitoring:
from gym.wrappers.monitoring import Monitor
assert isinstance(self.env, Monitor)
recorder = self.env.stats_recorder
if recorder is not None:
recorder.done = True
return self.env.reset()
def step(self, action):
next_obs, reward, done, info = self.env.step(action)
return next_obs, reward, done, info
def render(self):
self.env.render()
def terminate(self):
if self.monitoring:
self.env._close()
@property
def unwrapped(self):
return self.env.unwrapped
|
AswinRetnakumar/Machina
|
machina/samplers/distributed_epi_sampler.py
|
<reponame>AswinRetnakumar/Machina
"""
Sampler class for multi node situation.
This sampler uses redis for the backend.
"""
import argparse
import time
import cloudpickle
import redis
from machina.samplers import EpiSampler
from machina.utils import _int, get_redis, make_redis
class DistributedEpiSampler(object):
"""
A sampler which sample episodes.
Parameters
----------
world_size : int
Number of nodes
rank : int
-1 represent master node.
env : gym.Env
pol : Pol
num_parallel : int
Number of processes
prepro : Prepro
seed : int
"""
def __init__(self, world_size, rank=-1, env=None, pol=None, num_parallel=8, prepro=None, seed=256):
if rank < 0:
assert env is not None and pol is not None
self.world_size = world_size
self.rank = rank
self.r = get_redis()
if rank < 0:
self.env = env
self.pol = pol
self.num_parallel = num_parallel // world_size
self.prepro = prepro
self.seed = seed
self.original_num_parallel = num_parallel
self.scatter_from_master('env')
self.scatter_from_master('pol')
self.scatter_from_master('num_parallel')
self.scatter_from_master('prepro')
self.scatter_from_master('seed')
self.seed = self.seed * (self.rank + 23000)
if not rank < 0:
self.in_node_sampler = EpiSampler(
self.env, self.pol, self.num_parallel, self.prepro, self.seed)
self.launch_sampler()
def __del__(self):
if not self.rank < 0:
del self.in_node_sampler
def launch_sampler(self):
while True:
self.scatter_from_master('pol')
self.scatter_from_master('max_epis')
self.scatter_from_master('max_steps')
self.scatter_from_master('deterministic')
self.epis = self.in_node_sampler.sample(
self.pol, self.max_epis, self.max_steps, self.deterministic)
self.gather_to_master('epis')
def scatter_from_master(self, key):
if self.rank < 0:
obj = getattr(self, key)
self.r.set(key, cloudpickle.dumps(obj))
triggers = {key + '_trigger' +
"_{}".format(rank): '1' for rank in range(self.world_size)}
self.r.mset(triggers)
while True:
time.sleep(0.1)
values = self.r.mget(triggers)
if all([_int(v) == 0 for v in values]):
break
else:
while True:
time.sleep(0.1)
trigger = self.r.get(key + '_trigger' +
"_{}".format(self.rank))
if _int(trigger) == 1:
break
obj = cloudpickle.loads(self.r.get(key))
setattr(self, key, obj)
self.r.set(key + '_trigger' + "_{}".format(self.rank), '0')
def gather_to_master(self, key):
"""
This method assume that obj is summable to list.
"""
if self.rank < 0:
num_done = 0
objs = []
while True:
time.sleep(0.1)
# This for iteration can be faster.
for rank in range(self.world_size):
trigger = self.r.get(key + '_trigger' + "_{}".format(rank))
if _int(trigger) == 1:
obj = cloudpickle.loads(
self.r.get(key + "_{}".format(rank)))
objs += obj
self.r.set(key + '_trigger' + "_{}".format(rank), '0')
num_done += 1
if num_done == self.world_size:
break
setattr(self, key, objs)
else:
obj = getattr(self, key)
self.r.set(key + "_{}".format(self.rank), cloudpickle.dumps(obj))
self.r.set(key + '_trigger' + "_{}".format(self.rank), '1')
while True:
time.sleep(0.1)
if _int(self.r.get(key + '_trigger' + "_{}".format(self.rank))) == 0:
break
def sample(self, pol, max_epis=None, max_steps=None, deterministic=False):
"""
This method should be called in master node.
"""
self.pol = pol
self.max_epis = max_epis // self.world_size if max_epis is not None else None
self.max_steps = max_steps // self.world_size if max_steps is not None else None
self.deterministic = deterministic
self.scatter_from_master('pol')
self.scatter_from_master('max_epis')
self.scatter_from_master('max_steps')
self.scatter_from_master('deterministic')
self.gather_to_master('epis')
return self.epis
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--world_size', type=int)
parser.add_argument('--rank', type=int)
parser.add_argument('--redis_host', type=str, default='localhost')
parser.add_argument('--redis_port', type=str, default='6379')
args = parser.parse_args()
make_redis(args.redis_host, args.redis_port)
sampler = DistributedEpiSampler(
args.world_size, args.rank)
|
AswinRetnakumar/Machina
|
example/run_trpo.py
|
"""
An example of Trust Region Policy Optimization.
"""
import argparse
import json
import os
from pprint import pprint
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import gym
import machina as mc
from machina.pols import GaussianPol, CategoricalPol, MultiCategoricalPol
from machina.algos import trpo
from machina.vfuncs import DeterministicSVfunc
from machina.envs import GymEnv, C2DEnv
from machina.traj import Traj
from machina.traj import epi_functional as ef
from machina.samplers import EpiSampler
from machina import logger
from machina.utils import measure
from simple_net import PolNet, VNet, PolNetLSTM, VNetLSTM
parser = argparse.ArgumentParser()
parser.add_argument('--log', type=str, default='garbage')
parser.add_argument('--env_name', type=str, default='Pendulum-v0')
parser.add_argument('--c2d', action='store_true', default=False)
parser.add_argument('--record', action='store_true', default=False)
parser.add_argument('--seed', type=int, default=256)
parser.add_argument('--max_epis', type=int, default=1000000)
parser.add_argument('--num_parallel', type=int, default=4)
parser.add_argument('--max_steps_per_iter', type=int, default=10000)
parser.add_argument('--epoch_per_iter', type=int, default=5)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--vf_lr', type=float, default=3e-4)
parser.add_argument('--rnn', action='store_true', default=False)
parser.add_argument('--rnn_batch_size', type=int, default=8,
help='Number of sequences included in batch of rnn.')
parser.add_argument('--gamma', type=float, default=0.995)
parser.add_argument('--lam', type=float, default=1)
args = parser.parse_args()
if not os.path.exists(args.log):
os.mkdir(args.log)
with open(os.path.join(args.log, 'args.json'), 'w') as f:
json.dump(vars(args), f)
pprint(vars(args))
if not os.path.exists(os.path.join(args.log, 'models')):
os.mkdir(os.path.join(args.log, 'models'))
np.random.seed(args.seed)
torch.manual_seed(args.seed)
score_file = os.path.join(args.log, 'progress.csv')
logger.add_tabular_output(score_file)
env = GymEnv(args.env_name, log_dir=os.path.join(
args.log, 'movie'), record_video=args.record)
env.env.seed(args.seed)
if args.c2d:
env = C2DEnv(env)
observation_space = env.observation_space
action_space = env.action_space
if args.rnn:
pol_net = PolNetLSTM(observation_space, action_space,
h_size=256, cell_size=256)
else:
pol_net = PolNet(observation_space, action_space)
if isinstance(action_space, gym.spaces.Box):
pol = GaussianPol(observation_space, action_space, pol_net, args.rnn)
elif isinstance(action_space, gym.spaces.Discrete):
pol = CategoricalPol(observation_space, action_space, pol_net, args.rnn)
elif isinstance(action_space, gym.spaces.MultiDiscrete):
pol = MultiCategoricalPol(
observation_space, action_space, pol_net, args.rnn)
else:
raise ValueError('Only Box, Discrete, and MultiDiscrete are supported')
if args.rnn:
vf_net = VNetLSTM(observation_space, h_size=256, cell_size=256)
else:
vf_net = VNet(observation_space)
vf = DeterministicSVfunc(observation_space, vf_net, args.rnn)
sampler = EpiSampler(env, pol, num_parallel=args.num_parallel, seed=args.seed)
optim_vf = torch.optim.Adam(vf_net.parameters(), args.vf_lr)
total_epi = 0
total_step = 0
max_rew = -1e6
while args.max_epis > total_epi:
with measure('sample'):
epis = sampler.sample(pol, max_steps=args.max_steps_per_iter)
with measure('train'):
traj = Traj()
traj.add_epis(epis)
traj = ef.compute_vs(traj, vf)
traj = ef.compute_rets(traj, args.gamma)
traj = ef.compute_advs(traj, args.gamma, args.lam)
traj = ef.centerize_advs(traj)
traj = ef.compute_h_masks(traj)
traj.register_epis()
result_dict = trpo.train(
traj, pol, vf, optim_vf, args.epoch_per_iter, batch_size=args.batch_size if not args.rnn else args.rnn_batch_size)
total_epi += traj.num_epi
step = traj.num_step
total_step += step
rewards = [np.sum(epi['rews']) for epi in epis]
mean_rew = np.mean(rewards)
logger.record_results(args.log, result_dict, score_file,
total_epi, step, total_step,
rewards,
plot_title=args.env_name)
if mean_rew > max_rew:
torch.save(pol.state_dict(), os.path.join(
args.log, 'models', 'pol_max.pkl'))
torch.save(vf.state_dict(), os.path.join(
args.log, 'models', 'vf_max.pkl'))
torch.save(optim_vf.state_dict(), os.path.join(
args.log, 'models', 'optim_vf_max.pkl'))
max_rew = mean_rew
torch.save(pol.state_dict(), os.path.join(
args.log, 'models', 'pol_last.pkl'))
torch.save(vf.state_dict(), os.path.join(
args.log, 'models', 'vf_last.pkl'))
torch.save(optim_vf.state_dict(), os.path.join(
args.log, 'models', 'optim_vf_last.pkl'))
del traj
del sampler
|
AswinRetnakumar/Machina
|
machina/prepro/__init__.py
|
<filename>machina/prepro/__init__.py
from machina.prepro.base import BasePrePro
|
AswinRetnakumar/Machina
|
machina/traj/__init__.py
|
<filename>machina/traj/__init__.py<gh_stars>100-1000
"""
- This package is the bockbone of machina.
- :class:`Environment<machina.envs.gym_env.GymEnv>` and :py:mod:`Algorithm<machina.algos>` are seperated by this package.
- Traj class
- :data:`epis` sampled by :py:mod:`sampler<machina.samplers>` are changed into the class :class:`Traj<machina.traj.traj.Traj>`.
- :class:`Traj<machina.traj.traj.Traj>` is the :py:class:`list` of :py:class:`dict`.
- Iterator
- Output is batch, a :py:class:`dict` keys of which are MDP elements such as :data:`obs`, :data:`acs`, :data:`rews`, etc.
- Methods of :py:meth:`iterate*<machina.traj.traj.Traj.iterate>` are used for On-Policy algorithms.
- Methods of :py:meth:`random*<machina.traj.traj.Traj.random_batch>` are used for Off-Policy algorithms.
"""
from machina.traj.traj import Traj
|
AswinRetnakumar/Machina
|
machina/envs/continuous2discrete_env.py
|
"""
Continuous to discrete.
"""
import gym
import numpy as np
class C2DEnv(object):
"""
Wrapper environment for converting continuous action space to multi discrete action space.
Parameters
----------
env : gym.Env
n_bins : int
Number of bins for converting continuous to discrete.
e.g. continuous action space is 0 ~ 1 and n_bins=5,
action space is converted to [0, 0.25, 0.5, 0.75, 1]
"""
def __init__(self, env, n_bins=30):
assert isinstance(env.action_space, gym.spaces.Box)
assert len(env.action_space.shape) == 1
self.env = env
self.n_bins = n_bins
self.action_space = gym.spaces.MultiDiscrete(
env.action_space.shape[0] * [n_bins])
self.observation_space = self.env.observation_space
if hasattr(env, 'original_env'):
self.original_env = env.original_env
else:
self.original_env = env
@property
def horizon(self):
if hasattr(self.env, 'horizon'):
return self.env._horizon
def reset(self):
return self.env.reset()
def step(self, action):
continuous_action = []
for a, low, high in zip(action, self.env.action_space.low, self.env.action_space.high):
continuous_action.append(np.linspace(low, high, self.n_bins)[a])
action = np.array(continuous_action)
next_obs, reward, done, info = self.env.step(action)
return next_obs, reward, done, info
def render(self):
self.env.render()
def terminate(self):
self.env.terminate()
|
AswinRetnakumar/Machina
|
example/run_behavior_clone.py
|
"""
An example of Behavioral Cloning.
"""
import argparse
import json
import os
import copy
from pprint import pprint
import pickle
import numpy as np
import torch
import gym
from machina.pols import GaussianPol, CategoricalPol, MultiCategoricalPol, DeterministicActionNoisePol
from machina.algos import behavior_clone
from machina.envs import GymEnv, C2DEnv
from machina.traj import Traj
from machina.traj import epi_functional as ef
from machina.samplers import EpiSampler
from machina import logger
from machina.utils import measure, set_device
from simple_net import PolNet, PolNetLSTM, VNet, DiscrimNet
parser = argparse.ArgumentParser()
parser.add_argument('--log', type=str, default='garbage',
help='Directory name of log.')
parser.add_argument('--env_name', type=str,
default='Pendulum-v0', help='Name of environment.')
parser.add_argument('--c2d', action='store_true',
default=False, help='If True, action is discretized.')
parser.add_argument('--record', action='store_true',
default=False, help='If True, movie is saved.')
parser.add_argument('--seed', type=int, default=256)
parser.add_argument('--max_epis', type=int,
default=100000000, help='Number of episodes to run.')
parser.add_argument('--num_parallel', type=int, default=4,
help='Number of processes to sample.')
parser.add_argument('--cuda', type=int, default=-1, help='cuda device number.')
parser.add_argument('--data_parallel', action='store_true', default=False,
help='If True, inference is done in parallel on gpus.')
parser.add_argument('--expert_dir', type=str, default='../data/expert_epis')
parser.add_argument('--expert_fname', type=str,
default='Pendulum-v0_100epis.pkl')
parser.add_argument('--max_epis_per_iter', type=int,
default=10, help='Number of episodes in an iteration.')
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--pol_lr', type=float, default=1e-4,
help='Policy learning rate.')
parser.add_argument('--h1', type=int, default=32)
parser.add_argument('--h2', type=int, default=32)
parser.add_argument('--tau', type=float, default=0.001,
help='Coefficient of target function.')
parser.add_argument('--gamma', type=float, default=0.99,
help='Discount factor.')
parser.add_argument('--lam', type=float, default=1,
help='Tradeoff value of bias variance.')
parser.add_argument('--train_size', type=int, default=0.7,
help='Size of training data.')
parser.add_argument('--check_rate', type=int, default=0.05,
help='Rate of performance check per epoch.')
parser.add_argument('--epoch', type=int, default=1000)
parser.add_argument('--deterministic', action='store_true',
default=False, help='If True, policy is deterministic.')
args = parser.parse_args()
device_name = 'cpu' if args.cuda < 0 else "cuda:{}".format(args.cuda)
device = torch.device(device_name)
set_device(device)
if not os.path.exists(args.log):
os.mkdir(args.log)
with open(os.path.join(args.log, 'args.json'), 'w') as f:
json.dump(vars(args), f)
pprint(vars(args))
if not os.path.exists(os.path.join(args.log, 'models')):
os.mkdir(os.path.join(args.log, 'models'))
np.random.seed(args.seed)
torch.manual_seed(args.seed)
score_file = os.path.join(args.log, 'progress.csv')
logger.add_tabular_output(score_file)
env = GymEnv(args.env_name, log_dir=os.path.join(
args.log, 'movie'), record_video=args.record)
env.env.seed(args.seed)
if args.c2d:
env = C2DEnv(env)
observation_space = env.observation_space
action_space = env.action_space
pol_net = PolNet(observation_space, action_space)
if isinstance(action_space, gym.spaces.Box):
pol = GaussianPol(observation_space, action_space, pol_net,
data_parallel=args.data_parallel)
elif isinstance(action_space, gym.spaces.Discrete):
pol = CategoricalPol(observation_space, action_space, pol_net,
data_parallel=args.data_parallel)
elif isinstance(action_space, gym.spaces.MultiDiscrete):
pol = MultiCategoricalPol(
observation_space, action_space, pol_net, data_parallel=args.data_parallel)
else:
raise ValueError('Only Box, Discrete, and MultiDiscrete are supported')
sampler = EpiSampler(env, pol, num_parallel=args.num_parallel, seed=args.seed)
optim_pol = torch.optim.Adam(pol_net.parameters(), args.pol_lr)
with open(os.path.join(args.expert_dir, args.expert_fname), 'rb') as f:
expert_epis = pickle.load(f)
train_epis, test_epis = ef.train_test_split(
expert_epis, train_size=args.train_size)
train_traj = Traj()
train_traj.add_epis(train_epis)
train_traj.register_epis()
test_traj = Traj()
test_traj.add_epis(test_epis)
test_traj.register_epis()
expert_rewards = [np.sum(epi['rews']) for epi in expert_epis]
expert_mean_rew = np.mean(expert_rewards)
logger.log('expert_score={}'.format(expert_mean_rew))
logger.log('num_train_epi={}'.format(train_traj.num_epi))
max_rew = -1e6
for curr_epoch in range(args.epoch):
if args.data_parallel:
pol.dp_run = True
result_dict = behavior_clone.train(
train_traj, pol, optim_pol,
args.batch_size
)
test_result_dict = behavior_clone.test(test_traj, pol)
if args.data_parallel:
pol.dp_run = False
for key in test_result_dict.keys():
result_dict[key] = test_result_dict[key]
if curr_epoch % int(args.check_rate * args.epoch) == 0 or curr_epoch == 0:
with measure('sample'):
paths = sampler.sample(
pol, max_epis=args.max_epis_per_iter)
rewards = [np.sum(path['rews']) for path in paths]
mean_rew = np.mean([np.sum(path['rews']) for path in paths])
logger.record_results_bc(args.log, result_dict, score_file,
curr_epoch, rewards,
plot_title=args.env_name)
if mean_rew > max_rew:
torch.save(pol.state_dict(), os.path.join(
args.log, 'models', 'pol_max.pkl'))
torch.save(optim_pol.state_dict(), os.path.join(
args.log, 'models', 'optim_pol_max.pkl'))
max_rew = mean_rew
torch.save(pol.state_dict(), os.path.join(
args.log, 'models', 'pol_last.pkl'))
torch.save(optim_pol.state_dict(), os.path.join(
args.log, 'models', 'optim_pol_last.pkl'))
del sampler
|
AswinRetnakumar/Machina
|
machina/vfuncs/state_vfuncs/__init__.py
|
<reponame>AswinRetnakumar/Machina<filename>machina/vfuncs/state_vfuncs/__init__.py
from machina.vfuncs.state_vfuncs.base import BaseSVfunc
from machina.vfuncs.state_vfuncs.deterministic_state_vfunc import DeterministicSVfunc, NormalizedDeterministicSVfunc
|
AswinRetnakumar/Machina
|
machina/samplers/__init__.py
|
<gh_stars>100-1000
"""
- This package samples episodes from :class:`Environment<machina.envs.gym_env.GymEnv>` in multiprocessing.
- Inputs are :class:`Policy<machina.pols.base.BasePol>` and maximum steps or episodes.
- Output is :py:class:`ndarray` of :py:class:`dict` of :py:class:`list`.
"""
from machina.samplers.epi_sampler import EpiSampler
from machina.samplers.distributed_epi_sampler import DistributedEpiSampler
|
AswinRetnakumar/Machina
|
machina/samplers/epi_sampler.py
|
<gh_stars>100-1000
"""
Sampler class
"""
import copy
import time
import gym
import numpy as np
import torch
import torch.multiprocessing as mp
from machina.utils import cpu_mode
LARGE_NUMBER = 100000000
def one_epi(env, pol, deterministic=False, prepro=None):
"""
Sampling an episode.
Parameters
----------
env : gym.Env
pol : Pol
deterministic : bool
If True, policy is deterministic.
prepro : Prepro
Returns
-------
epi_length, epi : int, dict
"""
with cpu_mode():
if prepro is None:
def prepro(x): return x
obs = []
acs = []
rews = []
dones = []
a_is = []
e_is = []
o = env.reset()
pol.reset()
done = False
epi_length = 0
while not done:
o = prepro(o)
if not deterministic:
ac_real, ac, a_i = pol(torch.tensor(o, dtype=torch.float))
else:
ac_real, ac, a_i = pol.deterministic_ac_real(
torch.tensor(o, dtype=torch.float))
ac_real = ac_real.reshape(pol.action_space.shape)
next_o, r, done, e_i = env.step(np.array(ac_real))
obs.append(o)
rews.append(r)
dones.append(done)
acs.append(ac.squeeze().detach().cpu(
).numpy().reshape(pol.action_space.shape))
_a_i = dict()
for key in a_i.keys():
if a_i[key] is None:
continue
if isinstance(a_i[key], tuple):
_a_i[key] = tuple([h.squeeze().detach().cpu().numpy()
for h in a_i[key]])
else:
_a_i[key] = a_i[key].squeeze().detach(
).cpu().numpy().reshape(pol.a_i_shape)
a_i = _a_i
a_is.append(a_i)
e_is.append(e_i)
epi_length += 1
if done:
break
o = next_o
return epi_length, dict(
obs=np.array(obs, dtype='float32'),
acs=np.array(acs, dtype='float32'),
rews=np.array(rews, dtype='float32'),
dones=np.array(dones, dtype='float32'),
a_is=dict([(key, np.array([a_i[key] for a_i in a_is], dtype='float32'))
for key in a_is[0].keys()]),
e_is=dict([(key, np.array([e_i[key] for e_i in e_is], dtype='float32'))
for key in e_is[0].keys()])
)
def mp_sample(pol, env, max_steps, max_epis, n_steps_global, n_epis_global, epis, exec_flag, deterministic_flag, process_id, prepro=None, seed=256):
"""
Multiprocess sample.
Sampling episodes until max_steps or max_epis is achieved.
Parameters
----------
pol : Pol
env : gym.Env
max_steps : int
maximum steps of episodes
max_epis : int
maximum episodes of episodes
n_steps_global : torch.Tensor
shared Tensor
n_epis_global : torch.Tensor
shared Tensor
epis : list
multiprocessing's list for sharing episodes between processes.
exec_flag : torch.Tensor
execution flag
deterministic_flag : torch.Tensor
process_id : int
prepro : Prepro
seed : int
"""
np.random.seed(seed + process_id)
torch.manual_seed(seed + process_id)
torch.set_num_threads(1)
while True:
time.sleep(0.1)
if exec_flag > 0:
while max_steps > n_steps_global and max_epis > n_epis_global:
l, epi = one_epi(env, pol, deterministic_flag, prepro)
n_steps_global += l
n_epis_global += 1
epis.append(epi)
exec_flag.zero_()
class EpiSampler(object):
"""
A sampler which sample episodes.
Parameters
----------
env : gym.Env
pol : Pol
num_parallel : int
Number of processes
prepro : Prepro
seed : int
"""
def __init__(self, env, pol, num_parallel=8, prepro=None, seed=256):
self.env = env
self.pol = copy.deepcopy(pol)
self.pol.to('cpu')
self.pol.share_memory()
self.pol.eval()
self.num_parallel = num_parallel
self.n_steps_global = torch.tensor(0, dtype=torch.long).share_memory_()
self.max_steps = torch.tensor(0, dtype=torch.long).share_memory_()
self.n_epis_global = torch.tensor(
0, dtype=torch.long).share_memory_()
self.max_epis = torch.tensor(0, dtype=torch.long).share_memory_()
self.exec_flags = [torch.tensor(
0, dtype=torch.long).share_memory_() for _ in range(self.num_parallel)]
self.deterministic_flag = torch.tensor(
0, dtype=torch.uint8).share_memory_()
self.epis = mp.Manager().list()
self.processes = []
for ind in range(self.num_parallel):
p = mp.Process(target=mp_sample, args=(self.pol, env, self.max_steps, self.max_epis, self.n_steps_global,
self.n_epis_global, self.epis, self.exec_flags[ind], self.deterministic_flag, ind, prepro, seed))
p.start()
self.processes.append(p)
def __del__(self):
for p in self.processes:
p.terminate()
def sample(self, pol, max_epis=None, max_steps=None, deterministic=False):
"""
Switch on sampling processes.
Parameters
----------
pol : Pol
max_epis : int or None
maximum episodes of episodes.
If None, this value is ignored.
max_steps : int or None
maximum steps of episodes
If None, this value is ignored.
deterministic : bool
Returns
-------
epis : list of dict
Sampled epis.
Raises
------
ValueError
If max_steps and max_epis are botch None.
"""
for sp, p in zip(self.pol.parameters(), pol.parameters()):
sp.data.copy_(p.data.to('cpu'))
if max_epis is None and max_steps is None:
raise ValueError(
'Either max_epis or max_steps needs not to be None')
max_epis = max_epis if max_epis is not None else LARGE_NUMBER
max_steps = max_steps if max_steps is not None else LARGE_NUMBER
self.n_steps_global.zero_()
self.n_epis_global.zero_()
self.max_steps.zero_()
self.max_steps += max_steps
self.max_epis.zero_()
self.max_epis += max_epis
if deterministic:
self.deterministic_flag.zero_()
self.deterministic_flag += 1
else:
self.deterministic_flag.zero_()
del self.epis[:]
for exec_flag in self.exec_flags:
exec_flag += 1
while True:
if all([exec_flag == 0 for exec_flag in self.exec_flags]):
return list(self.epis)
|
AswinRetnakumar/Machina
|
machina/pols/gaussian_pol.py
|
<gh_stars>0
import numpy as np
import torch
import torch.nn as nn
from machina.pols import BasePol
from machina.pds.gaussian_pd import GaussianPd
from machina.utils import get_device
class GaussianPol(BasePol):
"""
Policy with Gaussian distribution.
Parameters
----------
observation_space : gym.Space
observation's space
action_space : gym.Space
action's space
This should be gym.spaces.Box
net : torch.nn.Module
rnn : bool
normalize_ac : bool
If True, the output of network is spreaded for action_space.
In this situation the output of network is expected to be in -1~1.
data_parallel : bool or str
If True, network computation is executed in parallel.
If data_parallel is ddp, network computation is executed in distributed parallel.
parallel_dim : int
Splitted dimension in data parallel.
"""
def __init__(self, observation_space, action_space, net, rnn=False, normalize_ac=True, data_parallel=False, parallel_dim=0):
BasePol.__init__(self, observation_space, action_space, net, rnn,
normalize_ac, data_parallel, parallel_dim)
self.pd = GaussianPd()
self.to(get_device())
def forward(self, obs, hs=None, h_masks=None):
obs = self._check_obs_shape(obs)
if self.rnn:
time_seq, batch_size, *_ = obs.shape
if hs is None:
if self.hs is None:
self.hs = self.net.init_hs(batch_size)
if self.dp_run:
self.hs = (self.hs[0].unsqueeze(
0), self.hs[1].unsqueeze(0))
hs = self.hs
if h_masks is None:
h_masks = hs[0].new(time_seq, batch_size, 1).zero_()
h_masks = h_masks.reshape(time_seq, batch_size, 1)
if self.dp_run:
mean, log_std, hs = self.dp_net(obs, hs, h_masks)
else:
mean, log_std, hs = self.net(obs, hs, h_masks)
self.hs = hs
else:
if self.dp_run:
mean, log_std = self.dp_net(obs)
else:
mean, log_std = self.net(obs)
log_std = log_std.expand_as(mean)
ac = self.pd.sample(dict(mean=mean, log_std=log_std))
ac_real = self.convert_ac_for_real(ac.detach().cpu().numpy())
return ac_real, ac, dict(mean=mean, log_std=log_std, hs=hs)
def deterministic_ac_real(self, obs, hs=None, h_masks=None):
"""
action for deployment
"""
obs = self._check_obs_shape(obs)
if self.rnn:
time_seq, batch_size, *_ = obs.shape
if hs is None:
if self.hs is None:
self.hs = self.net.init_hs(batch_size)
hs = self.hs
if h_masks is None:
h_masks = hs[0].new(time_seq, batch_size, 1).zero_()
h_masks = h_masks.reshape(time_seq, batch_size, 1)
mean, log_std, hs = self.net(obs, hs, h_masks)
self.hs = hs
else:
mean, log_std = self.net(obs)
mean_real = self.convert_ac_for_real(mean.detach().cpu().numpy())
return mean_real, mean, dict(mean=mean, log_std=log_std, hs=hs)
|
AswinRetnakumar/Machina
|
machina/pds/multi_categorical_pd.py
|
import numpy as np
import torch
from torch.distributions import Categorical, kl_divergence
from machina.pds.base import BasePd
class MultiCategoricalPd(BasePd):
"""
Multi Categorical probablistic distribution
"""
def sample(self, params, sample_shape=torch.Size()):
pis = params['pis']
pis_sampled = []
for pi in torch.chunk(pis, pis.size(-2), -2):
pi_sampled = Categorical(probs=pi).sample()
pis_sampled.append(pi_sampled)
return torch.cat(pis_sampled, dim=-1)
def llh(self, xs, params):
pis = params['pis']
llhs = []
for x, pi in zip(torch.chunk(xs, xs.size(-1), -1), torch.chunk(pis, pis.size(-2), -2)):
x = x.squeeze(-1)
pi = pi.squeeze(-2)
llhs.append(Categorical(pi).log_prob(x))
return sum(llhs)
def kl_pq(self, p_params, q_params):
p_pis = p_params['pis']
q_pis = q_params['pis']
kls = []
for p_pi, q_pi in zip(torch.chunk(p_pis, p_pis.size(-2), -2), torch.chunk(q_pis, q_pis.size(-2), -2)):
kls.append(kl_divergence(Categorical(p_pi), Categorical(q_pi)))
return sum(kls)
def ent(self, params):
pis = params['pis']
ents = []
for pi in torch.chunk(pis, pis.size(-2), -2):
ents.append(torch.sum(Categorical(pi).entropy(), dim=-1))
return sum(ents)
|
AswinRetnakumar/Machina
|
machina/pols/mixture_gaussian_pol.py
|
import numpy as np
import torch
from torch.distributions import Categorical
from machina.pols import BasePol
from machina.pds.mixture_gaussian_pd import MixtureGaussianPd
from machina.utils import get_device
class MixtureGaussianPol(BasePol):
def __init__(self, observation_space, action_space, net, normalize_ac=True):
BasePol.__init__(self, observation_space, action_space, normalize_ac)
self.net = net
self.pd = MixtureGaussianPd()
self.to(get_device())
def forward(self, obs):
pi, mean, log_std = self.net(obs)
log_std = log_std.expand_as(mean)
ac = self.pd.sample(dict(pi=pi, mean=mean, log_std=log_std))
ac_real = self.convert_ac_for_real(ac.detach().cpu().numpy())
return ac_real, ac, dict(pi=pi, mean=mean, log_std=log_std)
def deterministic_ac_real(self, obs):
"""
action for deployment
"""
pi, mean, _ = self.net(obs)
_, i = torch.max(pi, 1)
onehot = torch.zeros_like(mean)
onehot = onehot.scatter_(-1, i.unsqueeze(-1), 1)
mean_real = self.convert_ac_for_real(
torch.sum(mean * onehot.unsqueeze(-1), 1).detach().cpu().numpy())
return mean_real
|
AswinRetnakumar/Machina
|
machina/traj/epi_functional.py
|
<gh_stars>100-1000
"""
These are functions which is applied to episodes.
"""
import numpy as np
import copy
import torch
import torch.nn.functional as F
from machina.utils import get_device
from machina import loss_functional as lf
from machina.traj import Traj
def compute_vs(data, vf):
"""
Computing Value Function.
Parameters
----------
data : Traj or epis(dict of ndarray)
vf : SVFunction
Returns
-------
data : Traj or epi(dict of ndarray)
Corresponding to input
"""
if isinstance(data, Traj):
epis = data.current_epis
else:
epis = data
vf.reset()
with torch.no_grad():
for epi in epis:
if vf.rnn:
obs = torch.tensor(
epi['obs'], dtype=torch.float, device=get_device()).unsqueeze(1)
else:
obs = torch.tensor(
epi['obs'], dtype=torch.float, device=get_device())
epi['vs'] = vf(obs)[0].detach().cpu().numpy()
return data
def set_all_pris(data, pri):
"""
Set prioritization to all episodes.
Parameters
----------
data : Traj or epis(dict of ndarray)
pri : torch.Tensor
Returns
-------
data : Traj or epi(dict of ndarray)
Corresponding to input
"""
if isinstance(data, Traj):
epis = data.current_epis
else:
epis = data
for epi in epis:
pris = pri.repeat(len(epi['obs']))
epi['pris'] = pris.cpu().numpy()
return data
def compute_pris(data, qf, targ_qf, pol, gamma, continuous=True, deterministic=True, rnn=False, sampling=1, alpha=0.6, epsilon=1e-6):
"""
Compute prioritization.
Parameters
----------
data : Traj or epis(dict of ndarray)
qf : SAVfunction
targ_qf : SAVfunction
pol : Pol
gamma : float
continuous : bool
deterministic : bool
rnn : bool
sampling : int
alpha : float
epsilen : float
Returns
-------
data : Traj or epi(dict of ndarray)
Corresponding to input
"""
if continuous:
if isinstance(data, Traj):
epis = data.current_epis
else:
epis = data
for epi in epis:
data_map = dict()
keys = ['obs', 'acs', 'rews', 'next_obs', 'dones']
for key in keys:
data_map[key] = torch.tensor(epi[key], device=get_device())
if rnn:
qf.reset()
targ_qf.reset()
pol.reset()
keys = ['obs', 'acs', 'next_obs']
for key in keys:
data_map[key] = data_map[key].unsqueeze(1)
with torch.no_grad():
bellman_loss = lf.bellman(
qf, targ_qf, pol, data_map, gamma, continuous, deterministic, sampling, reduction='none')
td_loss = torch.sqrt(bellman_loss*2)
pris = (torch.abs(td_loss) + epsilon) ** alpha
epi['pris'] = pris.cpu().numpy()
return data
else:
raise NotImplementedError(
"Only Q function with continuous action space is supported now.")
def compute_seq_pris(data, seq_length, eta=0.9):
"""
Computing priorities of each sequence in episodes.
Parameters
----------
data : Traj or epis(dict of ndarray)
seq_length : int
Length of batch
eta : float
Returns
-------
data : Traj or epi(dict of ndarray)
Corresponding to input
"""
if isinstance(data, Traj):
epis = data.current_epis
else:
epis = data
for epi in epis:
n_seq = len(epi['pris']) - seq_length + 1
abs_pris = np.abs(epi['pris'])
seq_pris = np.array([eta * np.max(abs_pris[i:i+seq_length]) + (1 - eta) *
np.mean(abs_pris[i:i+seq_length]) for i in range(n_seq)], dtype='float32')
pad = np.zeros((seq_length - 1,), dtype='float32')
epi['seq_pris'] = np.concatenate([seq_pris, pad])
return data
def compute_rets(data, gamma):
"""
Computing discounted cumulative returns.
Parameters
----------
data : Traj or epis(dict of ndarray)
gamma : float
Discount rate
Returns
-------
data : Traj or epi(dict of ndarray)
Corresponding to input
"""
if isinstance(data, Traj):
epis = data.current_epis
else:
epis = data
for epi in epis:
rews = epi['rews']
rets = np.empty(len(rews), dtype=np.float32)
last_rew = 0
for t in reversed(range(len(rews))):
rets[t] = last_rew = rews[t] + gamma * last_rew
epi['rets'] = rets
return data
def compute_advs(data, gamma, lam):
"""
Computing Advantage Function.
Parameters
----------
data : Traj or epis(dict of ndarray)
gamma : float
Discount rate
lam : float
Bias-Variance trade-off parameter
Returns
-------
data : Traj or epi(dict of ndarray)
Corresponding to input
"""
if isinstance(data, Traj):
epis = data.current_epis
else:
epis = data
for epi in epis:
rews = epi['rews']
vs = epi['vs']
vs = np.append(vs, 0)
advs = np.empty(len(rews), dtype=np.float32)
last_gaelam = 0
for t in reversed(range(len(rews))):
delta = rews[t] + gamma * vs[t + 1] - vs[t]
advs[t] = last_gaelam = delta + gamma * lam * last_gaelam
epi['advs'] = advs
return data
def compute_hs(data, func, hs_name='hs', input_acs=False):
"""
Computing Hidden State of RNN Cell.
Parameters
----------
data : Traj or epis(dict of ndarray)
func :
Any function. for example pols, vf and qf.
Returns
-------
data : Traj or epi(dict of ndarray)
Corresponding to input
"""
if isinstance(data, Traj):
epis = data.current_epis
else:
epis = data
func.reset()
with torch.no_grad():
for epi in epis:
obs = torch.tensor(
epi['obs'], dtype=torch.float, device=get_device()).unsqueeze(1)
time_seq = obs.size()[0]
if input_acs:
acs = torch.tensor(
epi['acs'], dtype=torch.float, device=get_device()).unsqueeze(1)
hs_seq = [func(obs[i:i+1], acs[i:i+1])[-1]['hs']
for i in range(time_seq)]
else:
hs_seq = [func(obs[i:i+1])[-1]['hs'] for i in range(time_seq)]
if isinstance(hs_seq[0], tuple):
hs = np.array([[h.squeeze().detach().cpu().numpy()
for h in hs] for hs in hs_seq], dtype='float32')
else:
hs = np.array(hs.detach().cpu().numpy(), dtype='float32')
epi[hs_name] = hs
return data
def centerize_advs(data, eps=1e-6):
"""
Centerizing Advantage Function.
Parameters
----------
data : Traj or epis(dict of ndarray)
eps : float
Small value for preventing 0 division.
Returns
-------
data : Traj or epi(dict of ndarray)
Corresponding to input
"""
if isinstance(data, Traj):
epis = data.current_epis
else:
epis = data
_advs = np.concatenate([epi['advs'] for epi in epis])
for epi in epis:
epi['advs'] = (epi['advs'] - np.mean(_advs)) / (np.std(_advs) + eps)
return data
def add_next_obs(data):
"""
Adding next observations to episodes.
Parameters
----------
data : Traj or epis(dict of ndarray)
Returns
-------
data : Traj or epi(dict of ndarray)
Corresponding to input
"""
if isinstance(data, Traj):
epis = data.current_epis
else:
epis = data
for epi in epis:
obs = epi['obs']
_obs = [ob for ob in obs]
next_obs = np.array(_obs[1:] + _obs[:1], dtype=np.float32)
epi['next_obs'] = next_obs
return data
def compute_h_masks(data):
"""
Computing masks for hidden state.
At the begining of an episode, it remarks 1.
Parameters
----------
data : Traj or epis(dict of ndarray)
Returns
-------
data : Traj or epi(dict of ndarray)
Corresponding to input
"""
if isinstance(data, Traj):
epis = data.current_epis
else:
epis = data
for epi in epis:
h_masks = np.zeros_like(epi['rews'])
h_masks[0] = 1
epi['h_masks'] = h_masks
return data
def compute_pseudo_rews(data, rew_giver, state_only=False):
if isinstance(data, Traj):
epis = data.current_epis
else:
epis = data
for epi in epis:
obs = torch.tensor(epi['obs'], dtype=torch.float, device=get_device())
if state_only:
logits, _ = rew_giver(obs)
else:
acs = torch.tensor(
epi['acs'], dtype=torch.float, device=get_device())
logits, _ = rew_giver(obs, acs)
with torch.no_grad():
rews = -F.logsigmoid(-logits).cpu().numpy()
epi['real_rews'] = copy.deepcopy(epi['rews'])
epi['rews'] = rews
return data
def compute_diayn_rews(data, rew_giver):
epis = data.current_epis
for epi in epis:
obs = torch.as_tensor(
epi['obs'], dtype=torch.float, device=get_device())
with torch.no_grad():
rews, info = rew_giver(obs)
epi['rews'] = rews.cpu().numpy()
return data
def train_test_split(epis, train_size):
num_epi = len(epis)
num_train = int(num_epi * train_size)
indices = np.arange(num_epi)
train_epis, test_epis = [[epis[indice] for indice in indices] for indices in
np.array_split(indices, [num_train])]
return train_epis, test_epis
def normalize_obs_and_acs(data, mean_obs=None, std_obs=None, mean_acs=None, std_acs=None, return_statistic=True, eps=1e-6):
with torch.no_grad():
if isinstance(data, Traj):
epis = data.current_epis
else:
epis = data
obs = []
acs = []
for epi in epis:
obs.extend(epi['obs'])
acs.extend(epi['acs'])
obs = np.array(obs, dtype=np.float32)
acs = np.array(acs, dtype=np.float32)
if mean_obs is None:
mean_obs = np.mean(obs, axis=0, keepdims=True)
if std_obs is None:
std_obs = np.std(
obs, axis=0, keepdims=True) + eps
if mean_acs is None:
mean_acs = np.mean(acs, axis=0, keepdims=True)
if std_acs is None:
std_acs = np.std(
acs, axis=0, keepdims=True) + eps
for epi in epis:
epi['obs'] = (epi['obs'] - mean_obs) / std_obs
epi['acs'] = (epi['acs'] - mean_acs) / std_acs
epi['next_obs'] = (
epi['next_obs'] - mean_obs) / std_obs
if return_statistic:
return data, mean_obs, std_obs, mean_acs, std_acs
else:
return data
|
AswinRetnakumar/Machina
|
tests/test_env.py
|
"""
Test script for environment
"""
import unittest
import gym
import numpy as np
import torch
from torch import nn
from gym.wrappers import FlattenDictWrapper
from machina.envs import GymEnv, C2DEnv, flatten_to_dict
from simple_net import PolDictNet, VNet, QNet, VNetLSTM, PolNetDictLSTM, QNetLSTM
from machina.vfuncs import DeterministicSVfunc, DeterministicSAVfunc
from machina.pols import GaussianPol
from machina.traj import Traj
from machina.traj import epi_functional as ef
from machina.samplers import EpiSampler
from machina.algos import ppo_clip, sac, r2d2_sac
from gym.envs import register
register(
id='PendulumDictEnv-v0',
entry_point='tests.env:PendulumDictEnv',
max_episode_steps=200
)
def test_continuous2discrete():
continuous_env = GymEnv('Pendulum-v0', record_video=False)
discrete_env = C2DEnv(continuous_env, n_bins=10)
assert np.all(discrete_env.action_space.nvec == np.array([10]))
discrete_env.reset()
out = discrete_env.step([3, 10])
def test_flatten2dict():
dict_env = gym.make('PendulumDictEnv-v0')
dict_env = GymEnv(dict_env)
dict_ob = dict_env.observation_space.sample()
dict_observation_space = dict_env.observation_space
env = FlattenDictWrapper(
dict_env, dict_env.observation_space.spaces.keys())
flatten_ob = env.observation(dict_ob)
dict_keys = env.dict_keys
recovered_dict_ob = flatten_to_dict(
flatten_ob, dict_observation_space, dict_keys)
tf = []
for (a_key, a_val), (b_key, b_val) in zip(dict_ob.items(), recovered_dict_ob.items()):
tf.append(a_key == b_key)
tf.append(all(a_val == b_val))
assert all(tf)
class TestFlatten2DictPP0(unittest.TestCase):
def setUp(self):
dict_env = gym.make('PendulumDictEnv-v0')
self.dict_observation_space = dict_env.observation_space
env = FlattenDictWrapper(
dict_env, dict_env.observation_space.spaces.keys())
self.env = GymEnv(env)
def test_learning(self):
pol_net = PolDictNet(self.dict_observation_space,
self.env.action_space, h1=32, h2=32)
pol = GaussianPol(self.env.observation_space,
self.env.action_space, pol_net)
vf_net = VNet(self.env.observation_space, h1=32, h2=32)
vf = DeterministicSVfunc(self.env.observation_space, vf_net)
sampler = EpiSampler(self.env, pol, num_parallel=1)
optim_pol = torch.optim.Adam(pol_net.parameters(), 3e-4)
optim_vf = torch.optim.Adam(vf_net.parameters(), 3e-4)
epis = sampler.sample(pol, max_steps=32)
traj = Traj()
traj.add_epis(epis)
traj = ef.compute_vs(traj, vf)
traj = ef.compute_rets(traj, 0.99)
traj = ef.compute_advs(traj, 0.99, 0.95)
traj = ef.centerize_advs(traj)
traj = ef.compute_h_masks(traj)
traj.register_epis()
result_dict = ppo_clip.train(traj=traj, pol=pol, vf=vf, clip_param=0.2,
optim_pol=optim_pol, optim_vf=optim_vf, epoch=1, batch_size=32)
del sampler
def test_learning_rnn(self):
pol_net = PolNetDictLSTM(
self.dict_observation_space, self.env.action_space, h_size=32, cell_size=32)
pol = GaussianPol(self.env.observation_space,
self.env.action_space, pol_net, rnn=True)
vf_net = VNetLSTM(self.env.observation_space, h_size=32, cell_size=32)
vf = DeterministicSVfunc(self.env.observation_space, vf_net, rnn=True)
sampler = EpiSampler(self.env, pol, num_parallel=1)
optim_pol = torch.optim.Adam(pol_net.parameters(), 3e-4)
optim_vf = torch.optim.Adam(vf_net.parameters(), 3e-4)
epis = sampler.sample(pol, max_steps=400)
traj = Traj()
traj.add_epis(epis)
traj = ef.compute_vs(traj, vf)
traj = ef.compute_rets(traj, 0.99)
traj = ef.compute_advs(traj, 0.99, 0.95)
traj = ef.centerize_advs(traj)
traj = ef.compute_h_masks(traj)
traj.register_epis()
result_dict = ppo_clip.train(traj=traj, pol=pol, vf=vf, clip_param=0.2,
optim_pol=optim_pol, optim_vf=optim_vf, epoch=1, batch_size=2)
del sampler
class TestFlatten2DictSAC(unittest.TestCase):
def setUp(self):
dict_env = gym.make('PendulumDictEnv-v0')
self.dict_observation_space = dict_env.observation_space
env = FlattenDictWrapper(
dict_env, dict_env.observation_space.spaces.keys())
self.env = GymEnv(env)
def test_learning(self):
pol_net = PolDictNet(self.dict_observation_space,
self.env.action_space, h1=32, h2=32)
pol = GaussianPol(self.env.observation_space,
self.env.action_space, pol_net)
qf_net1 = QNet(self.env.observation_space, self.env.action_space)
qf1 = DeterministicSAVfunc(
self.env.observation_space, self.env.action_space, qf_net1)
targ_qf_net1 = QNet(self.env.observation_space, self.env.action_space)
targ_qf_net1.load_state_dict(qf_net1.state_dict())
targ_qf1 = DeterministicSAVfunc(
self.env.observation_space, self.env.action_space, targ_qf_net1)
qf_net2 = QNet(self.env.observation_space, self.env.action_space)
qf2 = DeterministicSAVfunc(
self.env.observation_space, self.env.action_space, qf_net2)
targ_qf_net2 = QNet(self.env.observation_space, self.env.action_space)
targ_qf_net2.load_state_dict(qf_net2.state_dict())
targ_qf2 = DeterministicSAVfunc(
self.env.observation_space, self.env.action_space, targ_qf_net2)
qfs = [qf1, qf2]
targ_qfs = [targ_qf1, targ_qf2]
log_alpha = nn.Parameter(torch.zeros(()))
sampler = EpiSampler(self.env, pol, num_parallel=1)
optim_pol = torch.optim.Adam(pol_net.parameters(), 3e-4)
optim_qf1 = torch.optim.Adam(qf_net1.parameters(), 3e-4)
optim_qf2 = torch.optim.Adam(qf_net2.parameters(), 3e-4)
optim_qfs = [optim_qf1, optim_qf2]
optim_alpha = torch.optim.Adam([log_alpha], 3e-4)
epis = sampler.sample(pol, max_steps=32)
traj = Traj()
traj.add_epis(epis)
traj = ef.add_next_obs(traj)
traj.register_epis()
result_dict = sac.train(
traj,
pol, qfs, targ_qfs, log_alpha,
optim_pol, optim_qfs, optim_alpha,
2, 32,
0.01, 0.99, 2,
)
del sampler
class TestFlatten2DictR2D2SAC(unittest.TestCase):
def setUp(self):
dict_env = gym.make('PendulumDictEnv-v0')
self.dict_observation_space = dict_env.observation_space
env = FlattenDictWrapper(
dict_env, dict_env.observation_space.spaces.keys())
self.env = GymEnv(env)
def test_learning(self):
pol_net = PolNetDictLSTM(
self.dict_observation_space, self.env.action_space, h_size=32, cell_size=32)
pol = GaussianPol(self.env.observation_space,
self.env.action_space, pol_net, rnn=True)
qf_net1 = QNetLSTM(self.env.observation_space,
self.env.action_space, h_size=32, cell_size=32)
qf1 = DeterministicSAVfunc(
self.env.observation_space, self.env.action_space, qf_net1, rnn=True)
targ_qf_net1 = QNetLSTM(
self.env.observation_space, self.env.action_space, h_size=32, cell_size=32)
targ_qf_net1.load_state_dict(qf_net1.state_dict())
targ_qf1 = DeterministicSAVfunc(
self.env.observation_space, self.env.action_space, targ_qf_net1, rnn=True)
qf_net2 = QNetLSTM(self.env.observation_space,
self.env.action_space, h_size=32, cell_size=32)
qf2 = DeterministicSAVfunc(
self.env.observation_space, self.env.action_space, qf_net2, rnn=True)
targ_qf_net2 = QNetLSTM(
self.env.observation_space, self.env.action_space, h_size=32, cell_size=32)
targ_qf_net2.load_state_dict(qf_net2.state_dict())
targ_qf2 = DeterministicSAVfunc(
self.env.observation_space, self.env.action_space, targ_qf_net2, rnn=True)
qfs = [qf1, qf2]
targ_qfs = [targ_qf1, targ_qf2]
log_alpha = nn.Parameter(torch.zeros(()))
sampler = EpiSampler(self.env, pol, num_parallel=1)
optim_pol = torch.optim.Adam(pol_net.parameters(), 3e-4)
optim_qf1 = torch.optim.Adam(qf_net1.parameters(), 3e-4)
optim_qf2 = torch.optim.Adam(qf_net2.parameters(), 3e-4)
optim_qfs = [optim_qf1, optim_qf2]
optim_alpha = torch.optim.Adam([log_alpha], 3e-4)
epis = sampler.sample(pol, max_steps=32)
traj = Traj()
traj.add_epis(epis)
traj = ef.add_next_obs(traj)
max_pri = traj.get_max_pri()
traj = ef.set_all_pris(traj, max_pri)
traj = ef.compute_seq_pris(traj, 4)
traj = ef.compute_h_masks(traj)
for i in range(len(qfs)):
traj = ef.compute_hs(
traj, qfs[i], hs_name='q_hs'+str(i), input_acs=True)
traj = ef.compute_hs(
traj, targ_qfs[i], hs_name='targ_q_hs'+str(i), input_acs=True)
traj.register_epis()
result_dict = r2d2_sac.train(
traj,
pol, qfs, targ_qfs, log_alpha,
optim_pol, optim_qfs, optim_alpha,
2, 32, 4, 2,
0.01, 0.99, 2,
)
del sampler
if __name__ == '__main__':
test_continuous2discrete()
|
AswinRetnakumar/Machina
|
machina/__init__.py
|
<filename>machina/__init__.py
import pkg_resources
__version__ = pkg_resources.get_distribution('machina-rl').version
from machina import algos # NOQA
from machina import envs # NOQA
from machina import models # NOQA
from machina import noise # NOQA
from machina import optims # NOQA
from machina import pds # NOQA
from machina import pols # NOQA
from machina import prepro # NOQA
from machina import samplers # NOQA
from machina import traj # NOQA
from machina import vfuncs # NOQA
|
AswinRetnakumar/Machina
|
machina/optims/distributed_sgd.py
|
import torch
from torch.optim import SGD
import torch.distributed as dist
class DistributedSGD(SGD):
"""Distributed SGD optimizer.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
world_size (int, optional): if not given, automatically set
dist.get_world_size()
"""
def __init__(self, *args, **kwargs):
world_size = kwargs.pop('world_size', dist.get_world_size())
super(DistributedSGD, self).__init__(
*args, **kwargs)
self.world_size = world_size
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
world_size = float(self.world_size)
grads = []
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grads.append(p.grad)
flat_grads = torch.nn.utils.parameters_to_vector(grads)
dist.all_reduce_multigpu([flat_grads])
flat_grads /= world_size
torch.nn.utils.vector_to_parameters(flat_grads, grads)
loss = super(DistributedSGD, self).step(closure)
return loss
|
AswinRetnakumar/Machina
|
machina/pols/__init__.py
|
<reponame>AswinRetnakumar/Machina
from machina.pols.base import BasePol
from machina.pols.gaussian_pol import GaussianPol
from machina.pols.mixture_gaussian_pol import MixtureGaussianPol
from machina.pols.deterministic_action_noise_pol import DeterministicActionNoisePol
from machina.pols.categorical_pol import CategoricalPol
from machina.pols.multi_categorical_pol import MultiCategoricalPol
from machina.pols.mpc_pol import MPCPol
from machina.pols.random_pol import RandomPol
from machina.pols.argmax_qf_pol import ArgmaxQfPol
|
AswinRetnakumar/Machina
|
machina/vfuncs/state_action_vfuncs/__init__.py
|
<filename>machina/vfuncs/state_action_vfuncs/__init__.py
from machina.vfuncs.state_action_vfuncs.base import BaseSAVfunc
from machina.vfuncs.state_action_vfuncs.deterministic_state_action_vfunc import DeterministicSAVfunc
from machina.vfuncs.state_action_vfuncs.cem_state_action_vfunc import CEMDeterministicSAVfunc
|
AswinRetnakumar/Machina
|
machina/models/base.py
|
import torch.nn as nn
from machina.utils import get_device
class BaseModel(nn.Module):
"""
Base class of Model.
Parameters
----------
observation_space : gym.Space
action_space : gym.Space
net : torch.nn.Module
rnn : bool
data_parallel : bool or str
If True, network computation is executed in parallel.
If data_parallel is ddp, network computation is executed in distributed parallel.
parallel_dim : int
Splitted dimension in data parallel.
"""
def __init__(self, observation_space, action_space, net, rnn=False, data_parallel=False, parallel_dim=0):
nn.Module.__init__(self)
self.observation_space = observation_space
self.action_space = action_space
self.net = net
self.rnn = rnn
self.hs = None
self.data_parallel = data_parallel
if data_parallel:
if data_parallel is True:
self.dp_net = nn.DataParallel(self.net, dim=parallel_dim)
elif data_parallel == 'ddp':
self.net.to(get_device())
self.dp_net = nn.parallel.DistributedDataParallel(
self.net, device_ids=[get_device()], dim=parallel_dim)
else:
raise ValueError(
'Bool and str(ddp) are allowed to be data_parallel.')
self.dp_run = False
def __getstate__(self):
state = self.__dict__.copy()
if 'dp_net' in state['_modules']:
_modules = copy.deepcopy(state['_modules'])
del _modules['dp_net']
state['_modules'] = _modules
return state
def __setstate__(self, state):
if 'dp_net' in state:
state.pop('dp_net')
self.__dict__.update(state)
def reset(self):
"""
reset for rnn's hidden state.
"""
if self.rnn:
self.hs = None
def _check_obs_shape(self, obs):
"""
Reshape input appropriately.
"""
if self.rnn:
additional_shape = 2
else:
additional_shape = 1
if len(obs.shape) < additional_shape + len(self.observation_space.shape):
for _ in range(additional_shape + len(self.observation_space.shape) - len(obs.shape)):
obs = obs.unsqueeze(0)
return obs
def _check_acs_shape(self, acs):
"""
Reshape input appropriately.
"""
if self.rnn:
additional_shape = 2
else:
additional_shape = 1
if len(acs.shape) < additional_shape + len(self.action_space.shape):
for _ in range(additional_shape + len(self.action_space.shape) - len(acs.shape)):
acs = acs.unsqueeze(0)
return acs
|
AswinRetnakumar/Machina
|
machina/algos/behavior_clone.py
|
<gh_stars>100-1000
"""
This is an implementation of Behavioral Cloning
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from machina import loss_functional as lf
def update_pol(pol, optim_pol, batch):
pol_loss = lf.log_likelihood(pol, batch)
optim_pol.zero_grad()
pol_loss.backward()
optim_pol.step()
return pol_loss.detach().cpu().numpy()
def train(expert_traj, pol, optim_pol, batch_size):
pol_losses = []
iterater = expert_traj.iterate_once(batch_size)
for batch in iterater:
pol_loss = update_pol(pol, optim_pol, batch)
pol_losses.append(pol_loss)
return dict(PolLoss=pol_losses)
def test(expert_traj, pol):
pol.eval()
iterater = expert_traj.full_batch(epoch=1)
for batch in iterater:
with torch.no_grad():
pol_loss = lf.log_likelihood(pol, batch)
return dict(TestPolLoss=[float(pol_loss.detach().cpu().numpy())])
|
AswinRetnakumar/Machina
|
tests/traj/test_traj.py
|
import unittest
import numpy as np
from machina.traj import Traj
from machina.envs import GymEnv
from machina.samplers import EpiSampler
from machina.pols.random_pol import RandomPol
class TestTraj(unittest.TestCase):
env = None
traj = None
@classmethod
def setUpClass(cls):
cls.env = GymEnv('Pendulum-v0')
pol = RandomPol(cls.env.observation_space, cls.env.action_space)
sampler = EpiSampler(cls.env, pol, num_parallel=1)
epis = sampler.sample(pol, max_steps=32)
cls.traj = Traj()
cls.traj.add_epis(epis)
cls.traj.register_epis()
def test_add_traj(self):
new_traj = Traj()
new_traj.add_traj(self.traj)
assert new_traj.num_epi == self.traj.num_epi
assert new_traj.num_step == self.traj.num_step
def test_random_batch_once(self):
batch_size = 32
data_map = self.traj.random_batch_once(
batch_size, return_indices=False)
data_map, indices = self.traj.random_batch_once(
batch_size, return_indices=True)
data_map = self.traj.random_batch_once(
batch_size, indices=np.arange(5), return_indices=False)
data_map, indices = self.traj.random_batch_once(
batch_size, indices=np.arange(5), return_indices=True)
def test_random_batch(self):
batch_size = 32
iterator = self.traj.random_batch(batch_size)
iterator = self.traj.random_batch(batch_size, return_indices=False)
for batch in iterator:
pass
iterator = self.traj.random_batch(batch_size, return_indices=True)
for batch, indices in iterator:
pass
|
AswinRetnakumar/Machina
|
machina/algos/diayn_sac.py
|
<filename>machina/algos/diayn_sac.py
"""
This is an implementation of Soft Actor Critic.
See https://arxiv.org/abs/1801.01290
"""
import torch
import torch.nn as nn
from machina import loss_functional as lf
from machina import logger
def calc_rewards(obskill, num_skill, discrim):
ob = obskill[:, :-num_skill]
skill = obskill[:, -num_skill:]
logit, info = discrim(ob)
logqz = torch.sum(torch.log(torch.softmax(logit, dim=1))*skill, dim=1)
logpz = -torch.log(torch.tensor(num_skill, dtype=torch.float))
return logqz - logpz, info
def train(traj,
pol, qfs, targ_qfs, log_alpha,
optim_pol, optim_qfs, optim_alpha,
epoch, batch_size, # optimization hypers
tau, gamma, sampling, discrim,
num_skill, reparam=True
):
"""
Train function for soft actor critic.
Parameters
----------
traj : Traj
Off policy trajectory.
pol : Pol
Policy.
qfs : list of SAVfunction
Q function.
targ_qfs : list of SAVfunction
Target Q function.
log_alpha : torch.Tensor
Temperature parameter of entropy.
optim_pol : torch.optim.Optimizer
Optimizer for Policy.
optim_qfs : list of torch.optim.Optimizer
Optimizer for Q function.
optim_alpha : torch.optim.Optimizer
Optimizer for alpha.
epoch : int
Number of iteration.
batch_size : int
Number of batches.
tau : float
Target updating rate.
gamma : float
Discounting rate.
sampling : int
Number of samping in calculating expectation.
reparam : bool
discrim : SVfunction
Discriminator.
discrim_f : function
Feature extractor of discriminator.
f_dim :
The dimention of discrim_f output.
num_skill : int
The number of skills.
Returns
-------
result_dict : dict
Dictionary which contains losses information.
"""
pol_losses = []
_qf_losses = []
alpha_losses = []
logger.log("Optimizing...")
for batch in traj.random_batch(batch_size, epoch):
with torch.no_grad():
rews, info = calc_rewards(batch['obs'], num_skill, discrim)
batch['rews'] = rews
pol_loss, qf_losses, alpha_loss = lf.sac(
pol, qfs, targ_qfs, log_alpha, batch, gamma, sampling, reparam)
optim_pol.zero_grad()
pol_loss.backward()
optim_pol.step()
for optim_qf, qf_loss in zip(optim_qfs, qf_losses):
optim_qf.zero_grad()
qf_loss.backward()
optim_qf.step()
optim_alpha.zero_grad()
alpha_loss.backward()
optim_alpha.step()
for qf, targ_qf in zip(qfs, targ_qfs):
for q, targ_q in zip(qf.parameters(), targ_qf.parameters()):
targ_q.detach().copy_((1 - tau) * targ_q.detach() + tau * q.detach())
pol_losses.append(pol_loss.detach().cpu().numpy())
_qf_losses.append(
(sum(qf_losses) / len(qf_losses)).detach().cpu().numpy())
alpha_losses.append(alpha_loss.detach().cpu().numpy())
logger.log("Optimization finished!")
return dict(
PolLoss=pol_losses,
QfLoss=_qf_losses,
AlphaLoss=alpha_losses
)
|
AswinRetnakumar/Machina
|
machina/pds/base.py
|
class BasePd(object):
"""
Base class of probablistic distribution
"""
def sample(self, params, sample_shape):
"""
sampling
"""
raise NotImplementedError
def llh(self, x, params):
"""
log liklihood
"""
raise NotImplementedError
def kl_pq(self, p_params, q_params):
"""
KL divergence between p and q
"""
raise NotImplementedError
def ent(self, params):
"""
entropy
"""
raise NotImplementedError
|
AswinRetnakumar/Machina
|
example/run_qtopt.py
|
<gh_stars>0
"""
An example of QT-Opt.
"""
import argparse
import copy
import json
import os
from pprint import pprint
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import gym
import machina as mc
from machina.pols import ArgmaxQfPol
from machina.noise import OUActionNoise
from machina.algos import qtopt
from machina.vfuncs import DeterministicSAVfunc, CEMDeterministicSAVfunc
from machina.envs import GymEnv
from machina.traj import Traj
from machina.traj import epi_functional as ef
from machina.samplers import EpiSampler
from machina import logger
from machina.utils import set_device, measure
from simple_net import QNet
parser = argparse.ArgumentParser()
parser.add_argument('--log', type=str, default='garbage',
help='Directory name of log.')
parser.add_argument('--env_name', type=str,
default='Pendulum-v0', help='Name of environment.')
parser.add_argument('--record', action='store_true',
default=False, help='If True, movie is saved.')
parser.add_argument('--seed', type=int, default=256)
parser.add_argument('--max_epis', type=int,
default=100000000, help='Number of episodes to run.')
parser.add_argument('--max_steps_off', type=int,
default=1000000000000, help='Number of episodes stored in off traj.')
parser.add_argument('--num_parallel', type=int, default=4,
help='Number of processes to sample.')
parser.add_argument('--cuda', type=int, default=-1, help='cuda device number.')
parser.add_argument('--data_parallel', action='store_true', default=False,
help='If True, inference is done in parallel on gpus.')
parser.add_argument('--max_steps_per_iter', type=int, default=4000,
help='Number of steps to use in an iteration.')
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--pol_lr', type=float, default=1e-4,
help='Policy learning rate.')
parser.add_argument('--qf_lr', type=float, default=1e-3,
help='Q function learning rate.')
parser.add_argument('--h1', type=int, default=32,
help='hidden size of layer1.')
parser.add_argument('--h2', type=int, default=32,
help='hidden size of layer2.')
parser.add_argument('--tau', type=float, default=0.0001,
help='Coefficient of target function.')
parser.add_argument('--gamma', type=float, default=0.9,
help='Discount factor.')
parser.add_argument('--lag', type=int, default=6000,
help='Lag of gradient steps of target function2.')
parser.add_argument('--num_iter', type=int, default=2,
help='Number of iteration of CEM.')
parser.add_argument('--num_sampling', type=int, default=60,
help='Number of samples sampled from Gaussian in CEM.')
parser.add_argument('--num_best_sampling', type=int, default=6,
help='Number of best samples used for fitting Gaussian in CEM.')
parser.add_argument('--multivari', action='store_true',
help='If true, Gaussian with diagonal covarince instead of Multivariate Gaussian matrix is used in CEM.')
parser.add_argument('--eps', type=float, default=0.2,
help='Probability of random action in epsilon-greedy policy.')
parser.add_argument('--loss_type', type=str,
choices=['mse', 'bce'], default='mse',
help='Choice for type of belleman loss.')
parser.add_argument('--save_memory', action='store_true',
help='If true, save memory while need more computation time by for-sentence.')
args = parser.parse_args()
if not os.path.exists(args.log):
os.mkdir(args.log)
with open(os.path.join(args.log, 'args.json'), 'w') as f:
json.dump(vars(args), f)
pprint(vars(args))
if not os.path.exists(os.path.join(args.log, 'models')):
os.mkdir(os.path.join(args.log, 'models'))
np.random.seed(args.seed)
torch.manual_seed(args.seed)
device_name = 'cpu' if args.cuda < 0 else "cuda:{}".format(args.cuda)
device = torch.device(device_name)
set_device(device)
score_file = os.path.join(args.log, 'progress.csv')
logger.add_tabular_output(score_file)
env = GymEnv(args.env_name, log_dir=os.path.join(
args.log, 'movie'), record_video=args.record)
env.env.seed(args.seed)
observation_space = env.observation_space
action_space = env.action_space
qf_net = QNet(observation_space, action_space, args.h1, args.h2)
lagged_qf_net = QNet(observation_space, action_space, args.h1, args.h2)
lagged_qf_net.load_state_dict(qf_net.state_dict())
targ_qf1_net = QNet(observation_space, action_space, args.h1, args.h2)
targ_qf1_net.load_state_dict(qf_net.state_dict())
targ_qf2_net = QNet(observation_space, action_space, args.h1, args.h2)
targ_qf2_net.load_state_dict(lagged_qf_net.state_dict())
qf = DeterministicSAVfunc(observation_space, action_space, qf_net,
data_parallel=args.data_parallel)
lagged_qf = DeterministicSAVfunc(
observation_space, action_space, lagged_qf_net, data_parallel=args.data_parallel)
targ_qf1 = CEMDeterministicSAVfunc(observation_space, action_space, targ_qf1_net, num_sampling=args.num_sampling,
num_best_sampling=args.num_best_sampling, num_iter=args.num_iter,
multivari=args.multivari, data_parallel=args.data_parallel, save_memory=args.save_memory)
targ_qf2 = DeterministicSAVfunc(
observation_space, action_space, targ_qf2_net, data_parallel=args.data_parallel)
pol = ArgmaxQfPol(observation_space, action_space, targ_qf1, eps=args.eps)
sampler = EpiSampler(env, pol, num_parallel=args.num_parallel, seed=args.seed)
optim_qf = torch.optim.Adam(qf_net.parameters(), args.qf_lr)
off_traj = Traj(args.max_steps_off, traj_device='cpu')
total_epi = 0
total_step = 0
total_grad_step = 0
num_update_lagged = 0
max_rew = -1e6
while args.max_epis > total_epi:
with measure('sample'):
epis = sampler.sample(pol, max_steps=args.max_steps_per_iter)
with measure('train'):
on_traj = Traj(traj_device='cpu')
on_traj.add_epis(epis)
on_traj = ef.add_next_obs(on_traj)
on_traj.register_epis()
off_traj.add_traj(on_traj)
total_epi += on_traj.num_epi
step = on_traj.num_step
total_step += step
epoch = step
if args.data_parallel:
qf.dp_run = True
lagged_qf.dp_run = True
targ_qf1.dp_run = True
targ_qf2.dp_run = True
result_dict = qtopt.train(
off_traj, qf, lagged_qf, targ_qf1, targ_qf2,
optim_qf, epoch, args.batch_size,
args.tau, args.gamma, loss_type=args.loss_type
)
if args.data_parallel:
qf.dp_run = False
lagged_qf.dp_run = False
targ_qf1.dp_run = False
targ_qf2.dp_run = False
total_grad_step += epoch
if total_grad_step >= args.lag * num_update_lagged:
logger.log('Updated lagged qf!!')
lagged_qf_net.load_state_dict(qf_net.state_dict())
num_update_lagged += 1
rewards = [np.sum(epi['rews']) for epi in epis]
mean_rew = np.mean(rewards)
logger.record_results(args.log, result_dict, score_file,
total_epi, step, total_step,
rewards,
plot_title=args.env_name)
if mean_rew > max_rew:
torch.save(pol.state_dict(), os.path.join(
args.log, 'models', 'pol_max.pkl'))
torch.save(qf.state_dict(), os.path.join(
args.log, 'models', 'qf_max.pkl'))
torch.save(targ_qf1.state_dict(), os.path.join(
args.log, 'models', 'targ_qf1_max.pkl'))
torch.save(targ_qf2.state_dict(), os.path.join(
args.log, 'models', 'targ_qf2_max.pkl'))
torch.save(optim_qf.state_dict(), os.path.join(
args.log, 'models', 'optim_qf_max.pkl'))
max_rew = mean_rew
torch.save(pol.state_dict(), os.path.join(
args.log, 'models', 'pol_last.pkl'))
torch.save(qf.state_dict(), os.path.join(
args.log, 'models', 'qf_last.pkl'))
torch.save(targ_qf1.state_dict(), os.path.join(
args.log, 'models', 'targ_qf1_last.pkl'))
torch.save(targ_qf2.state_dict(), os.path.join(
args.log, 'models', 'targ_qf2_last.pkl'))
torch.save(optim_qf.state_dict(), os.path.join(
args.log, 'models', 'optim_qf_last.pkl'))
del on_traj
del sampler
|
AswinRetnakumar/Machina
|
machina/algos/__init__.py
|
<gh_stars>100-1000
"""
- This package trains :class:`Policy<machina.pols.base.BasePol>`, :class:`V function<machina.vfuncs.state_action_vfuncs.base.BaseSAVfunc>`, :class:`Q function<machina.vfuncs.state_vfuncs.base.BaseSVfunc>`, etc. by using :py:mod:`loss_functional<machina.loss_functional>`.
- It is determined here which :py:mod:`loss_functional<machina.loss_functional>`, :py:meth:`iterater<machina.traj.traj.Traj.iterate>` are used.
- Also, It is determined how `Policy<machina.pols.base.BasePol>`, :class:`V function<machina.vfuncs.state_action_vfuncs.base.BaseSAVfunc>`, :class:`Q function<machina.vfuncs.state_vfuncs.base.BaseSVfunc>`, etc. are updated.
"""
from machina.algos import airl # NOQA
from machina.algos import behavior_clone # NOQA
from machina.algos import ddpg # NOQA
from machina.algos import diayn # NOQA
from machina.algos import diayn_sac # NOQA
from machina.algos import gail # NOQA
from machina.algos import mpc # NOQA
from machina.algos import on_pol_teacher_distill # NOQA
from machina.algos import ppo_clip # NOQA
from machina.algos import ppo_kl # NOQA
from machina.algos import prioritized_ddpg # NOQA
from machina.algos import qtopt # NOQA
from machina.algos import r2d2_sac # NOQA
from machina.algos import sac # NOQA
from machina.algos import svg # NOQA
from machina.algos import trpo # NOQA
from machina.algos import vpg # NOQA
|
AswinRetnakumar/Machina
|
machina/algos/ppo_clip.py
|
"""
This is an implementation of Proximal Policy Optimization
in which gradient is clipped by the size especially.
See https://arxiv.org/abs/1707.06347
"""
import torch
import torch.nn as nn
from machina import loss_functional as lf
from machina import logger
def update_pol(pol, optim_pol, batch, clip_param, ent_beta, max_grad_norm):
"""
Update function for Policy.
Parameters
----------
pol : Pol
Policy.
optim_pol : torch.optim.Optimizer
Optimizer for Policy.
batch : dict
Batch of trajectory
clip_param : float
Clipping ratio of objective function.
ent_beta : float
Entropy coefficient.
max_grad_norm : float
Maximum gradient norm.
Returns
-------
pol_loss : ndarray
Value of loss function.
"""
pol_loss = lf.pg_clip(pol, batch, clip_param, ent_beta)
optim_pol.zero_grad()
pol_loss.backward()
torch.nn.utils.clip_grad_norm_(pol.parameters(), max_grad_norm)
optim_pol.step()
return pol_loss.detach().cpu().numpy()
def update_vf(vf, optim_vf, batch, clip_param, clip, max_grad_norm):
"""
Update function for V function.
Parameters
----------
vf : SVfunction
V function.
optim_vf : torch.optim.Optimizer
Optimizer for V function.
batch : dict
Batch of trajectory
clip_param : float
Clipping ratio of objective function.
clip: bool
If True, vfunc is also updated by clipped objective function.
max_grad_norm : float
Maximum gradient norm.
Returns
-------
vf_loss : ndarray
Value of loss function.
"""
vf_loss = lf.monte_carlo(vf, batch, clip_param, clip)
optim_vf.zero_grad()
vf_loss.backward()
torch.nn.utils.clip_grad_norm_(vf.parameters(), max_grad_norm)
optim_vf.step()
return vf_loss.detach().cpu().numpy()
def train(traj, pol, vf,
optim_pol, optim_vf,
epoch, batch_size, num_epi_per_seq=1, # optimization hypers
clip_param=0.2, ent_beta=1e-3,
max_grad_norm=0.5,
clip_vfunc=False
):
"""
Train function for proximal policy optimization (clip).
Parameters
----------
traj : Traj
On policy trajectory.
pol : Pol
Policy.
vf : SVfunction
V function.
optim_pol : torch.optim.Optimizer
Optimizer for Policy.
optim_vf : torch.optim.Optimizer
Optimizer for V function.
epoch : int
Number of iteration.
batch_size : int
Number of batches.
num_epi_per_seq : int
Number of episodes in one sequence for rnn.
clip_param : float
Clipping ratio of objective function.
ent_beta : float
Entropy coefficient.
max_grad_norm : float
Maximum gradient norm.
clip_vfunc: bool
If True, vfunc is also updated by clipped objective function.
Returns
-------
result_dict : dict
Dictionary which contains losses information.
"""
pol_losses = []
vf_losses = []
logger.log("Optimizing...")
iterator = traj.iterate(batch_size, epoch) if not pol.rnn else traj.iterate_rnn(
batch_size=batch_size, num_epi_per_seq=num_epi_per_seq, epoch=epoch)
for batch in iterator:
pol_loss = update_pol(pol, optim_pol, batch,
clip_param, ent_beta, max_grad_norm)
vf_loss = update_vf(vf, optim_vf, batch, clip_param,
clip_vfunc, max_grad_norm)
pol_losses.append(pol_loss)
vf_losses.append(vf_loss)
logger.log("Optimization finished!")
return dict(PolLoss=pol_losses, VfLoss=vf_losses)
|
AswinRetnakumar/Machina
|
example/make_expert_epis.py
|
"""
Script for making file of expert epis.
"""
import argparse
import json
import os
from pprint import pprint
import pickle
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import gym
import machina as mc
from machina.pols import GaussianPol, CategoricalPol, MultiCategoricalPol, DeterministicActionNoisePol
from machina.noise import OUActionNoise
from machina.envs import GymEnv, C2DEnv
from machina.samplers import EpiSampler
from machina import logger
from machina.utils import measure, set_device
from simple_net import PolNet, VNet, PolNetLSTM, VNetLSTM
parser = argparse.ArgumentParser()
parser.add_argument('--pol_dir', type=str, default='../data/expert_pols',
help='Directory path storing file of expert policy model.')
parser.add_argument('--pol_fname', type=str, default='pol_max.pkl',
help='File name of expert policy model.')
parser.add_argument('--epis_dir', type=str, default='../data/expert_epis',
help='Directory path to store file of expert trajectory.')
parser.add_argument('--epis_fname', type=str, default='',
help='File name of expert trajectory.')
parser.add_argument('--env_name', type=str,
default='Pendulum-v0', help='Name of environment.')
parser.add_argument('--c2d', action='store_true',
default=False, help='If True, action is discretized.')
parser.add_argument('--record', action='store_true',
default=False, help='If True, movie is saved.')
parser.add_argument('--seed', type=int, default=256)
parser.add_argument('--max_epis', type=int,
default=100000000, help='Number of episodes to run.')
parser.add_argument('--num_parallel', type=int, default=1,
help='Number of processes to sample.')
parser.add_argument('--cuda', type=int, default=-1, help='cuda device number.')
parser.add_argument('--rnn', action='store_true',
default=False, help='If True, network is reccurent.')
parser.add_argument('--pol_h1', type=int, default=100,
help='Hidden size of layer1 of policy.')
parser.add_argument('--pol_h2', type=int, default=100,
help='Hidden size of layer2 of policy.')
parser.add_argument('--num_epis', type=int, default=100,
help='Number of episodes of expert trajectories.')
parser.add_argument('--ddpg', action='store_true',
default=False, help='If True, policy for DDPG is used.')
args = parser.parse_args()
if not os.path.exists(args.pol_dir):
os.mkdir(args.pol_dir)
with open(os.path.join(args.pol_dir, 'args.json'), 'w') as f:
json.dump(vars(args), f)
pprint(vars(args))
if not os.path.exists(os.path.join(args.epis_dir)):
os.mkdir(args.epis_dir)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
device_name = 'cpu' if args.cuda < 0 else "cuda:{}".format(args.cuda)
device = torch.device(device_name)
set_device(device)
env = GymEnv(args.env_name, log_dir=os.path.join(
args.pol_dir, 'movie'), record_video=args.record)
env.env.seed(args.seed)
if args.c2d:
env = C2DEnv(env)
observation_space = env.observation_space
action_space = env.action_space
if args.ddpg:
pol_net = PolNet(observation_space, action_space, args.pol_h1,
args.pol_h2, deterministic=True)
noise = OUActionNoise(action_space.shape)
pol = DeterministicActionNoisePol(
observation_space, action_space, pol_net, noise)
else:
if args.rnn:
pol_net = PolNetLSTM(observation_space, action_space,
h_size=256, cell_size=256)
else:
pol_net = PolNet(observation_space, action_space)
if isinstance(action_space, gym.spaces.Box):
pol = GaussianPol(observation_space, action_space, pol_net, args.rnn)
elif isinstance(action_space, gym.spaces.Discrete):
pol = CategoricalPol(
observation_space, action_space, pol_net, args.rnn)
elif isinstance(action_space, gym.spaces.MultiDiscrete):
pol = MultiCategoricalPol(
observation_space, action_space, pol_net, args.rnn)
else:
raise ValueError('Only Box, Discrete, and MultiDiscrete are supported')
sampler = EpiSampler(env, pol, num_parallel=args.num_parallel, seed=args.seed)
with open(os.path.join(args.pol_dir, args.pol_fname), 'rb') as f:
pol.load_state_dict(torch.load(
f, map_location=lambda storage, location: storage))
epis = sampler.sample(pol, max_epis=args.num_epis)
filename = args.epis_fname if len(
args.epis_fname) != 0 else env.env.spec.id + '_{}epis.pkl'.format(len(epis))
with open(os.path.join(args.epis_dir, filename), 'wb') as f:
pickle.dump(epis, f)
rewards = [np.sum(epi['rews']) for epi in epis]
mean_rew = np.mean(rewards)
logger.log('expert_score={}'.format(mean_rew))
del sampler
|
AswinRetnakumar/Machina
|
machina/models/deterministic_state_model.py
|
"""
Deterministic State Dynamics Model
"""
from machina.models.base import BaseModel
from machina.utils import get_device
class DeterministicSModel(BaseModel):
"""
Deterministic version of State Dynamics Model.
Parameters
----------
observation_space : gym.Space
action_space : gym.Space
net : torch.nn.Module
rnn : bool
data_parallel : bool or str
If True, network computation is executed in parallel.
If data_parallel is ddp, network computation is executed in distributed parallel.
parallel_dim : int
Splitted dimension in data parallel.
"""
def __init__(self, observation_space, action_space, net, rnn=False, data_parallel=False, parallel_dim=0):
super().__init__(observation_space, action_space,
net, rnn, data_parallel, parallel_dim)
self.to(get_device())
def forward(self, obs, acs, hs=None, h_masks=None):
obs = self._check_obs_shape(obs)
acs = self._check_acs_shape(acs)
if self.rnn:
time_seq, batch_size, *_ = obs.shape
if hs is None:
if self.hs is None:
self.hs = self.net.init_hs(batch_size)
hs = self.hs
if h_masks is None:
h_masks = hs[0].new(time_seq, batch_size, 1).zero_()
h_masks = h_masks.reshape(time_seq, batch_size, 1)
d_ob, hs = self.net(obs, acs, hs, h_masks)
self.hs = hs
else:
d_ob = self.net(obs, acs)
return d_ob, dict(mean=d_ob)
|
AswinRetnakumar/Machina
|
machina/algos/vpg.py
|
"""
This is an implementation of Vanilla Policy Gradient.
"""
import torch
import torch.nn as nn
from machina import loss_functional as lf
from machina import logger
def update_pol(pol, optim_pol, batch):
pol_loss = lf.pg(pol, batch)
optim_pol.zero_grad()
pol_loss.backward()
optim_pol.step()
return pol_loss.detach().cpu().numpy()
def update_vf(vf, optim_vf, batch):
vf_loss = lf.monte_carlo(vf, batch)
optim_vf.zero_grad()
vf_loss.backward()
optim_vf.step()
return vf_loss.detach().cpu().numpy()
def train(traj, pol, vf,
optim_pol, optim_vf,
epoch, batch_size, # optimization hypers
large_batch,
):
"""
Train function for vanila policy gradient.
Parameters
----------
traj : Traj
On policy trajectory.
pol : Pol
Policy.
vf : SVfunction
V function.
optim_pol : torch.optim.Optimizer
Optimizer for Policy.
optim_vf : torch.optim.Optimizer
Optimizer for V function.
epoch : int
Number of iteration.
batch_size : int
Number of batches.
larget_batch : bool
If True, batch is provided as whole trajectory.
Returns
-------
result_dict : dict
Dictionary which contains losses information.
"""
pol_losses = []
vf_losses = []
logger.log("Optimizing...")
if large_batch:
for batch in traj.full_batch(epoch):
pol_loss = update_pol(pol, optim_pol, batch)
vf_loss = update_vf(vf, optim_vf, batch)
pol_losses.append(pol_loss)
vf_losses.append(vf_loss)
else:
for batch in traj.iterate(batch_size, epoch):
pol_loss = update_pol(pol, optim_pol, batch)
vf_loss = update_vf(vf, optim_vf, batch)
pol_losses.append(pol_loss)
vf_losses.append(vf_loss)
logger.log("Optimization finished!")
return dict(PolLoss=pol_losses, VfLoss=vf_losses)
|
AswinRetnakumar/Machina
|
machina/utils.py
|
<filename>machina/utils.py<gh_stars>0
import contextlib
import redis
import torch
import torch.autograd as autograd
from machina import logger
_DEVICE = torch.device('cpu')
_REDIS = None
def make_redis(redis_host, redis_port):
r = redis.StrictRedis(redis_host, redis_port)
set_redis(r)
def set_redis(r):
global _REDIS
_REDIS = r
def get_redis():
return _REDIS
def _int(v):
try:
new_v = int(v)
except:
new_v = -1
return new_v
def set_device(device):
global _DEVICE
_DEVICE = device
def get_device():
return _DEVICE
@contextlib.contextmanager
def cpu_mode():
global _DEVICE
tmp = _DEVICE
_DEVICE = torch.device('cpu')
yield
_DEVICE = tmp
@contextlib.contextmanager
def measure(name):
import time
s = time.time()
yield
e = time.time()
logger.log("{}: {:.4f}sec".format(name, e-s))
def detach_tensor_dict(d):
_d = dict()
for key in d.keys():
if d[key] is None:
continue
if isinstance(d[key], tuple):
_d[key] = (d[key][0].detach(), d[key][1].detach())
continue
_d[key] = d[key].detach()
return _d
|
AswinRetnakumar/Machina
|
tests/test_cloud_pickle.py
|
<filename>tests/test_cloud_pickle.py
import unittest
import numpy as np
import cloudpickle
from machina.traj import Traj
from machina.envs import GymEnv, C2DEnv
from machina.samplers import EpiSampler
from machina.pols import RandomPol, GaussianPol, MultiCategoricalPol, CategoricalPol, DeterministicActionNoisePol, MPCPol, ArgmaxQfPol
from machina.vfuncs import DeterministicSAVfunc, DeterministicSVfunc
from machina.utils import make_redis, get_redis
from simple_net import PolNet, VNet, QNet, ModelNet
def rew_func(next_obs, acs, mean_obs=0., std_obs=1., mean_acs=0., std_acs=1.):
next_obs = next_obs * std_obs + mean_obs
acs = acs * std_acs + mean_acs
# Pendulum
rews = -(torch.acos(next_obs[:, 0].clamp(min=-1, max=1))**2 +
0.1*(next_obs[:, 2].clamp(min=-8, max=8)**2) + 0.001 * acs.squeeze(-1)**2)
rews = rews.squeeze(0)
return rews
class TestCloudPickle(unittest.TestCase):
@classmethod
def setUpClass(cls):
env = GymEnv('Pendulum-v0')
random_pol = RandomPol(cls.env.observation_space, cls.env.action_space)
sampler = EpiSampler(cls.env, pol, num_parallel=1)
epis = sampler.sample(pol, max_steps=32)
traj = Traj()
traj.add_epis(epis)
traj.register_epis()
cls.num_step = traj.num_step
make_redis('localhost', '6379')
cls.r = get_redis()
cls.r.set('env', env)
cls.r.set('traj', traj)
pol_net = PolNet(env.observation_space, env.action_space)
gpol = GaussianPol(env.observation_space, env.action_space, pol_net)
pol_net = PolNet(env.observation_space,
env.action_space, deterministic=True)
dpol = DeterministicActionNoisePol(
env.observation_space, env.action_space, pol_net)
model_net = ModelNet(env.observation_space, env.action_space)
mpcpol = MPCPol(env.observation_space,
env.action_space, model_net, rew_func)
q_net = QNet(env.observation_space, env.action_space)
qfunc = DeterministicSAVfunc(
env.observation_space, env.action_space, q_net)
aqpol = ArgmaxQfPol(env.observation_space, env.action_space, qfunc)
v_net = VNet(env.observation_space)
vfunc = DeterministicSVfunc(env.observation_space, v_net)
cls.r.set('gpol', cloudpickle.dumps(gpol))
cls.r.set('dpol', cloudpickle.dumps(dpol))
cls.r.set('mpcpol', cloudpickle.dumps(mpcpol))
cls.r.set('qfunc', cloudpickle.dumps(qfunc))
cls.r.set('aqpol', cloudpickle.dumps(aqpol))
cls.r.set('vfunc', cloudpickle.dumps(vfunc))
c2d = C2DEnv(env)
pol_net = PolNet(c2d.observation_space, c2d.action_space)
mcpol = MultiCategoricalPol(
env.observation_space, env.action_space, pol_net)
cls.r.set('mcpol', cloudpickle.dumps(mcpol))
@classmethod
def tearDownClass(cls):
cloudpickle.loads(cls.r.get('env'))
cloudpickle.loads(cls.r.get('traj'))
cloudpickle.loads(cls.r.get('gpol'))
cloudpickle.loads(cls.r.get('dpol'))
cloudpickle.loads(cls.r.get('mpcpol'))
cloudpickle.loads(cls.r.get('qfunc'))
cloudpickle.loads(cls.r.get('aqpol'))
cloudpickle.loads(cls.r.get('vfunc'))
cloudpickle.loads(cls.r.get('mcpol'))
if __name__ == '__main__':
unittest.main()
|
AswinRetnakumar/Machina
|
machina/algos/prioritized_ddpg.py
|
<filename>machina/algos/prioritized_ddpg.py
"""
This is an implementation of Prioritized Experience Replay.
See https://arxiv.org/abs/1511.05952
"""
import torch
import torch.nn as nn
from machina import loss_functional as lf
from machina.traj import traj_functional as tf
from machina import logger
def train(traj,
pol, targ_pol, qf, targ_qf,
optim_pol, optim_qf,
epoch, batch_size, # optimization hypers
tau, gamma
):
pol_losses = []
qf_losses = []
logger.log("Optimizing...")
for batch, indices in traj.prioritized_random_batch(batch_size, epoch, return_indices=True):
qf_bellman_loss = lf.bellman(
qf, targ_qf, targ_pol, batch, gamma, reduction='none')
td_loss = torch.sqrt(qf_bellman_loss*2)
qf_bellman_loss = torch.mean(qf_bellman_loss)
optim_qf.zero_grad()
qf_bellman_loss.backward()
optim_qf.step()
pol_loss = lf.ag(pol, qf, batch)
optim_pol.zero_grad()
pol_loss.backward()
optim_pol.step()
for p, targ_p in zip(pol.parameters(), targ_pol.parameters()):
targ_p.detach().copy_((1 - tau) * targ_p.detach() + tau * p.detach())
for q, targ_q in zip(qf.parameters(), targ_qf.parameters()):
targ_q.detach().copy_((1 - tau) * targ_q.detach() + tau * q.detach())
qf_losses.append(qf_bellman_loss.detach().cpu().numpy())
pol_losses.append(pol_loss.detach().cpu().numpy())
traj = tf.update_pris(traj, td_loss, indices)
logger.log("Optimization finished!")
return {'PolLoss': pol_losses, 'QfLoss': qf_losses}
|
AswinRetnakumar/Machina
|
machina/pols/random_pol.py
|
<filename>machina/pols/random_pol.py
import numpy as np
import torch
from machina.pols import BasePol
class RandomPol(BasePol):
"""
Policy with uniform distribution.
Parameters
----------
observation_space : gym.Space
observation's space
action_space : gym.Space
action's space.
This should be gym.spaces.Box
net : torch.nn.Module
rnn : bool
normalize_ac : bool
If True, the output of network is spreaded for action_space.
In this situation the output of network is expected to be in -1~1.
data_parallel : bool
If True, network computation is executed in parallel.
parallel_dim : int
Splitted dimension in data parallel.
"""
def __init__(self, observation_space, action_space, net=None, rnn=False, normalize_ac=True, data_parallel=False, parallel_dim=0):
BasePol.__init__(self, observation_space, action_space, net, rnn=rnn, normalize_ac=normalize_ac,
data_parallel=data_parallel, parallel_dim=parallel_dim)
def forward(self, ob):
ac_real = np.random.uniform(
self.action_space.low, self.action_space.high, self.action_space.shape).astype(np.float32)
ac = torch.tensor(ac_real)
mean = torch.zeros_like(ac)
return ac_real, ac, dict(mean=mean)
|
AswinRetnakumar/Machina
|
machina/traj/traj_functional.py
|
"""
These are functions which is applied to trajectory.
"""
import time
import cloudpickle
import torch
import torch.distributed as dist
import numpy as np
from machina import loss_functional as lf
from machina.utils import get_device, get_redis, _int
def sync(traj, master_rank=0):
"""
Synchronize trajs. This function is used in multi node situation, and use redis.
Parameters
----------
traj : Traj
master_rank : int
master_rank's traj is scattered
Returns
-------
traj : Traj
"""
rank = traj.rank
r = get_redis()
if rank == master_rank:
obj = cloudpickle.dumps(traj)
r.set('Traj', obj)
triggers = {'Traj_trigger' +
"_{}".format(rank): '1' for rank in range(traj.world_size)}
triggers["Traj_trigger_{}".format(master_rank)] = '0'
r.mset(triggers)
while True:
time.sleep(0.1)
values = r.mget(triggers)
if all([_int(v) == 0 for v in values]):
break
else:
while True:
time.sleep(0.1)
trigger = r.get('Traj_trigger' +
"_{}".format(rank))
if _int(trigger) == 1:
break
obj = cloudpickle.loads(r.get('Traj'))
traj.copy(obj)
r.set('Traj_trigger' + "_{}".format(rank), '0')
return traj
def update_pris(traj, td_loss, indices, alpha=0.6, epsilon=1e-6, update_epi_pris=False, seq_length=None, eta=0.9):
"""
Update priorities specified in indices.
Parameters
----------
traj : Traj
td_loss : torch.Tensor
indices : torch.Tensor ot List of int
alpha : float
epsilon : float
update_epi_pris : bool
If True, all priorities of a episode including indices[0] are updated.
seq_length : int
Length of batch.
eta : float
Returns
-------
traj : Traj
"""
pris = (torch.abs(td_loss) + epsilon) ** alpha
traj.data_map['pris'][indices] = pris.detach().to(traj.traj_device())
if update_epi_pris:
epi_start = -1
epi_end = -1
seq_start = indices[0]
for i in range(1, len(traj._epis_index)):
if seq_start < traj._epis_index[i]:
epi_start = traj._epis_index[i-1]
epi_end = traj._epis_index[i]
break
pris = traj.data_map['pris'][epi_start: epi_end]
n_seq = len(pris) - seq_length + 1
abs_pris = np.abs(pris.cpu().numpy())
seq_pris = np.array([eta * np.max(abs_pris[i:i+seq_length]) + (1 - eta) *
np.mean(abs_pris[i:i+seq_length]) for i in range(n_seq)], dtype='float32')
traj.data_map['seq_pris'][epi_start:epi_start +
n_seq] = torch.tensor(seq_pris, dtype=torch.float, device=get_device())
return traj
|
AswinRetnakumar/Machina
|
machina/algos/svg.py
|
"""
This is an implementation of Stochastic Value Gradient.
See https://arxiv.org/abs/1510.09142
"""
import torch
import torch.nn as nn
from machina import loss_functional as lf
from machina import logger
def train(traj,
pol, targ_pol, qf, targ_qf,
optim_pol, optim_qf,
epoch, batch_size, # optimization hypers
tau, gamma, # advantage estimation
sampling,
):
"""
Train function for deep deterministic policy gradient
Parameters
----------
traj : Traj
Off policy trajectory.
pol : Pol
Policy.
targ_pol : Pol
Target Policy.
qf : SAVfunction
Q function.
targ_qf : SAVfunction
Target Q function.
optim_pol : torch.optim.Optimizer
Optimizer for Policy.
optim_qf : torch.optim.Optimizer
Optimizer for Q function.
epoch : int
Number of iteration.
batch_size : int
Number of batches.
tau : float
Target updating rate.
gamma : float
Discounting rate.
sampling : int
Number of samping in calculating expectation.
Returns
-------
result_dict : dict
Dictionary which contains losses information.
"""
pol_losses = []
qf_losses = []
logger.log("Optimizing...")
for batch in traj.iterate(batch_size, epoch):
qf_bellman_loss = lf.bellman(
qf, targ_qf, targ_pol, batch, gamma, sampling=sampling)
optim_qf.zero_grad()
qf_bellman_loss.backward()
optim_qf.step()
pol_loss = lf.ag(pol, qf, batch, sampling)
optim_pol.zero_grad()
pol_loss.backward()
optim_pol.step()
for q, targ_q, p, targ_p in zip(qf.parameters(), targ_qf.parameters(), pol.parameters(), targ_pol.parameters()):
targ_p.detach().copy_((1 - tau) * targ_p.detach() + tau * p.detach())
targ_q.detach().copy_((1 - tau) * targ_q.detach() + tau * q.detach())
qf_losses.append(qf_bellman_loss.detach().cpu().numpy())
pol_losses.append(pol_loss.detach().cpu().numpy())
logger.log("Optimization finished!")
return dict(PolLoss=pol_losses,
QfLoss=qf_losses,
)
|
AswinRetnakumar/Machina
|
setup.py
|
<gh_stars>0
#!/usr/bin/env python
import os
import pkg_resources
import sys
from setuptools import setup
from setuptools import find_packages
with open('README.md', 'r') as f:
readme = f.read()
install_requires = [
'cached_property',
'torch>=1.0.1',
'joblib>=0.11',
'cloudpickle',
'redis',
'gym>=0.10.5',
'numpy>=1.13.3',
'terminaltables',
'pandas',
]
setup(
name='machina-rl',
version='0.2.1',
description='machina is a library for a deep reinforcement learning.',
long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/DeepX-inc/machina',
license='MIT License',
packages=find_packages(),
zip_safe=False,
install_requires=install_requires,
test_suite='tests'
)
|
AswinRetnakumar/Machina
|
machina/pds/mixture_gaussian_pd.py
|
<reponame>AswinRetnakumar/Machina
import torch
from torch.distributions import OneHotCategorical
import numpy as np
from machina.pds.base import BasePd
from machina.pds.gaussian_pd import GaussianPd
class MixtureGaussianPd(BasePd):
def __init__(self, observation_space, action_space):
BasePd.__init__(self, observation_space, action_space)
self.gaussian_pd = GaussianPd(observation_space, action_space)
def sample(self, params):
pi, mean, log_std = params['pi'], params['mean'], params['log_std']
pi_onehot = OneHotCategorical(pi).sample()
ac = torch.sum((mean + torch.randn_like(mean) *
torch.exp(log_std)) * pi_onehot.unsqueeze(-1), 1)
return ac
def llh(self, x, params):
pis = params['pi']
means = params['mean']
log_stds = params['log_std']
llh = 0
for i in range(pis.shape[1]):
pi = pis[:, i]
mean = means[:, i, :]
log_std = log_stds[:, i, :]
llh = llh + pi * \
torch.exp(self.gaussian_pd.llh(
x, dict(mean=mean, log_std=log_std)))
return torch.log(llh)
def kl_pq(self, p_params, q_params):
p_pis = p_params['pi']
p_means = p_params['mean']
p_log_stds = p_params['log_std']
q_pis = q_params['pi']
q_means = q_params['mean']
q_log_stds = q_params['log_std']
kl = 0
for i in range(p_pis.shape[1]):
p_pi = p_pis[:, i]
p_mean = p_means[:, i, :]
p_log_std = p_log_stds[:, i, :]
q_pi = q_pis[:, i]
q_mean = q_means[:, i, :]
q_log_std = q_log_stds[:, i, :]
numerator = 0
for ii in range(p_pis.shape[1]):
numerator = numerator + p_pis[:, ii] * torch.exp(
-self.gaussian_pd.kl_pq(
dict(mean=p_mean, log_std=p_log_std),
dict(mean=p_means[:, ii, :],
log_std=p_log_stds[:, ii, :])
)
)
denominator = 0
for ii in range(p_pis.shape[1]):
denominator = denominator + p_pis[:, ii] * torch.exp(
-self.gaussian_pd.kl_pq(
dict(mean=p_mean, log_std=p_log_std),
dict(mean=q_means[:, ii, :],
log_std=q_log_stds[:, ii, :])
)
)
kl = kl + p_pi * torch.log(numerator / denominator)
return kl
|
AswinRetnakumar/Machina
|
example/take_movie.py
|
<reponame>AswinRetnakumar/Machina
import gym
import torch.nn.functional as F
import torch.nn as nn
import torch
import numpy as np
import pickle
from pprint import pprint
import os
import json
import argparse
"""
Script for taking movie of learned policy.
"""
from simple_net import PolNet, VNet, PolNetLSTM, VNetLSTM
from machina.utils import measure, set_device
from machina import logger
from machina.samplers import EpiSampler
from machina.envs import GymEnv, C2DEnv
from machina.noise import OUActionNoise
from machina.pols import GaussianPol, CategoricalPol, MultiCategoricalPol, DeterministicActionNoisePol
import machina as mc
"""
Script for taking movie of learned policy.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--pol_dir', type=str, default='garbage',
help='Directory path storing file of optimal policy model.')
parser.add_argument('--pol_fname', type=str, default='pol_max.pkl',
help='File name of optimal policy model.')
parser.add_argument('--env_name', type=str,
default='Pendulum-v0', help='Name of environment.')
parser.add_argument('--c2d', action='store_true',
default=False, help='If True, action is discretized.')
parser.add_argument('--record', action='store_true',
default=False, help='If True, movie is saved.')
parser.add_argument('--seed', type=int, default=256)
parser.add_argument('--cuda', type=int, default=-1, help='cuda device number.')
parser.add_argument('--rnn', action='store_true',
default=False, help='If True, network is reccurent.')
parser.add_argument('--pol_h1', type=int, default=200,
help='Hidden size of layer1 of policy.')
parser.add_argument('--pol_h2', type=int, default=100,
help='Hidden size of layer2 of policy.')
parser.add_argument('--num_epis', type=int, default=5,
help='Number of episodes of expert trajectories.')
parser.add_argument('--ddpg', action='store_true',
default=False, help='If True, policy for DDPG is used.')
args = parser.parse_args()
if not os.path.exists(args.pol_dir):
os.mkdir(args.pol_dir)
with open(os.path.join(args.pol_dir, 'args.json'), 'w') as f:
json.dump(vars(args), f)
pprint(vars(args))
np.random.seed(args.seed)
torch.manual_seed(args.seed)
device_name = 'cpu' if args.cuda < 0 else "cuda:{}".format(args.cuda)
device = torch.device(device_name)
set_device(device)
env = GymEnv(args.env_name, log_dir=os.path.join(
args.pol_dir, 'optimal_movie'), record_video=True, video_schedule=lambda x: True)
env.env.seed(args.seed)
if args.c2d:
env = C2DEnv(env)
observation_space = env.observation_space
action_space = env.action_space
if args.ddpg:
pol_net = PolNet(observation_space, action_space,
args.h1, args.h2, deterministic=True)
noise = OUActionNoise(action_space.shape)
pol = DeterministicActionNoisePol(
observation_space, action_space, pol_net, noise)
else:
if args.rnn:
pol_net = PolNetLSTM(observation_space, action_space,
h_size=256, cell_size=256)
else:
pol_net = PolNet(observation_space, action_space)
if isinstance(action_space, gym.spaces.Box):
pol = GaussianPol(observation_space, action_space, pol_net, args.rnn)
elif isinstance(action_space, gym.spaces.Discrete):
pol = CategoricalPol(
observation_space, action_space, pol_net, args.rnn)
elif isinstance(action_space, gym.spaces.MultiDiscrete):
pol = MultiCategoricalPol(
observation_space, action_space, pol_net, args.rnn)
else:
raise ValueError('Only Box, Discrete, and MultiDiscrete are supported')
sampler = EpiSampler(env, pol, num_parallel=1, seed=args.seed)
with open(os.path.join(args.pol_dir, 'models', args.pol_fname), 'rb') as f:
pol.load_state_dict(torch.load(
f, map_location=lambda storage, location: storage))
epis = sampler.sample(pol, max_epis=args.num_epis)
rewards = [np.sum(epi['rews']) for epi in epis]
mean_rew = np.mean(rewards)
logger.log('score={}'.format(mean_rew))
del sampler
|
AswinRetnakumar/Machina
|
machina/pols/mpc_pol.py
|
import numpy as np
import torch
import copy
from machina.pds import DeterministicPd
from machina.pols import BasePol
from machina.utils import get_device
class MPCPol(BasePol):
"""
Policy with model predictive control.
Parameters
----------
observation_space : gym.Space
observation's space
action_space : gym.Space
action's space.
This should be gym.spaces.Box
net : torch.nn.Module
dymamics model
rew_func : function
rt = rew_func(st+1, at). rt, st+1 and at are torch.tensor.
n_samples : int
num of action samples in the model predictive control
horizon : int
horizon of prediction
mean_obs : np.array
std_obs : np.array
mean_acs : np.array
std_acs : np.array
rnn : bool
normalize_ac : bool
If True, the output of network is spreaded for action_space.
In this situation the output of network is expected to be in -1~1.
data_parallel : bool
If True, network computation is executed in parallel.
This value must be False in this policy. MPCPol doesn't support data_parallel
parallel_dim : int
Splitted dimension in data parallel.
"""
def __init__(self, observation_space, action_space, net, rew_func, n_samples=1000, horizon=20,
mean_obs=0., std_obs=1., mean_acs=0., std_acs=1., rnn=False,
normalize_ac=True, data_parallel=False, parallel_dim=0):
BasePol.__init__(self, observation_space, action_space, net, rnn=rnn, normalize_ac=normalize_ac,
data_parallel=data_parallel, parallel_dim=parallel_dim)
self.rew_func = rew_func
self.n_samples = n_samples
self.horizon = horizon
self.to(get_device())
self.mean_obs = torch.tensor(
mean_obs, dtype=torch.float).repeat(n_samples, 1)
self.std_obs = torch.tensor(
std_obs, dtype=torch.float).repeat(n_samples, 1)
self.mean_acs = torch.tensor(
mean_acs, dtype=torch.float).repeat(n_samples, 1)
self.std_acs = torch.tensor(
std_acs, dtype=torch.float).repeat(n_samples, 1)
def reset(self):
super(MPCPol, self).reset()
def forward(self, ob, hs=None, h_masks=None):
# randomly sample N candidate action sequences
sample_acs = torch.empty(self.horizon, self.n_samples, self.action_space.shape[0], dtype=torch.float).uniform_(
self.action_space.low[0], self.action_space.high[0])
normalized_acs = (sample_acs - self.mean_acs) / self.std_acs
# forward simulate the action sequences to get predicted trajectories
obs = torch.zeros((self.horizon+1, self.n_samples,
self.observation_space.shape[0]), dtype=torch.float)
rews_sum = torch.zeros(
(self.n_samples), dtype=torch.float)
obs[0] = ob.repeat(self.n_samples, 1)
obs[0] = (obs[0] - self.mean_obs) / self.std_obs
if self.rnn:
time_seq, batch_size, *_ = obs.shape
if hs is None:
if self.hs is None:
self.hs = self.net.init_hs(batch_size)
hs = self.hs
if h_masks is None:
h_masks = hs[0].new(time_seq, batch_size, 1).zero_()
h_masks = h_masks.reshape(time_seq, batch_size, 1)
with torch.no_grad():
for i in range(self.horizon):
ac = normalized_acs[i]
if self.rnn:
d_ob, hs = self.net(obs[i].unsqueeze(
0), ac.unsqueeze(0), hs, h_masks)
obs[i+1] = obs[i] + d_ob
else:
obs[i+1] = obs[i] + self.net(obs[i], ac)
rews_sum += self.rew_func(obs[i+1], sample_acs[i],
self.mean_obs, self.std_obs)
best_sample_index = rews_sum.max(0)[1]
ac = sample_acs[0][best_sample_index]
ac_real = ac.cpu().numpy()
if self.rnn:
normalized_ac = normalized_acs[0][best_sample_index].repeat(
self.n_samples, 1)
with torch.no_grad():
_, self.hs = self.net(obs[0].unsqueeze(
0), normalized_ac.unsqueeze(0), self.hs, h_masks)
return ac_real, ac, dict(mean=ac)
def deterministic_ac_real(self, obs):
"""
action for deployment
"""
mean_read, mean, dic = self.forward(obs)
return mean_real, mean, dic
|
AswinRetnakumar/Machina
|
machina/pols/categorical_pol.py
|
import numpy as np
import torch
import torch.nn as nn
from machina.pols import BasePol
from machina.pds.categorical_pd import CategoricalPd
from machina.utils import get_device
class CategoricalPol(BasePol):
"""
Policy with Categorical distribution.
Parameters
----------
observation_space : gym.Space
observation's space
action_space : gym.Space
action's space
This should be gym.spaces.Discrete
net : torch.nn.Module
rnn : bool
normalize_ac : bool
If True, the output of network is spreaded for action_space.
In this situation the output of network is expected to be in -1~1.
data_parallel : bool or str
If True, network computation is executed in parallel.
If data_parallel is ddp, network computation is executed in distributed parallel.
parallel_dim : int
Splitted dimension in data parallel.
"""
def __init__(self, observation_space, action_space, net, rnn=False, normalize_ac=True, data_parallel=False, parallel_dim=0):
BasePol.__init__(self, observation_space, action_space, net, rnn,
normalize_ac, data_parallel, parallel_dim)
self.pd = CategoricalPd()
self.to(get_device())
def forward(self, obs, hs=None, h_masks=None):
obs = self._check_obs_shape(obs)
if self.rnn:
time_seq, batch_size, *_ = obs.shape
if hs is None:
if self.hs is None:
self.hs = self.net.init_hs(batch_size)
if self.dp_run:
self.hs = (self.hs[0].unsqueeze(
0), self.hs[1].unsqueeze(0))
hs = self.hs
if h_masks is None:
h_masks = hs[0].new(time_seq, batch_size, 1).zero_()
h_masks = h_masks.reshape(time_seq, batch_size, 1)
if self.dp_run:
pi, hs = self.dp_net(obs, hs, h_masks)
else:
pi, hs = self.net(obs, hs, h_masks)
self.hs = hs
else:
if self.dp_run:
pi = self.dp_net(obs)
else:
pi = self.net(obs)
ac = self.pd.sample(dict(pi=pi))
ac_real = self.convert_ac_for_real(ac.detach().cpu().numpy())
return ac_real, ac, dict(pi=pi, hs=hs)
def deterministic_ac_real(self, obs, hs=None, h_masks=None):
"""
action for deployment
"""
obs = self._check_obs_shape(obs)
if self.rnn:
time_seq, batch_size, *_ = obs.shape
if hs is None:
if self.hs is None:
self.hs = self.net.init_hs(batch_size)
hs = self.hs
if h_masks is None:
h_masks = hs[0].new(time_seq, batch_size, 1).zero_()
h_masks = h_masks.reshape(time_seq, batch_size, 1)
pi, hs = self.net(obs, hs, h_masks)
self.hs = hs
else:
pi = self.net(obs)
_, ac = torch.max(pi, dim=-1)
ac_real = self.convert_ac_for_real(ac.detach().cpu().numpy())
return ac_real, ac, dict(pi=pi, hs=hs)
|
AswinRetnakumar/Machina
|
machina/models/__init__.py
|
from machina.models.base import BaseModel
from machina.models.deterministic_state_model import DeterministicSModel
|
4so-fourseasons/web-cookiecutter
|
hooks/post_gen_project.py
|
<reponame>4so-fourseasons/web-cookiecutter<gh_stars>1-10
import subprocess
import os
from sys import version_info
py3 = version_info[0] > 2
if py3:
input_fnc = input
else:
input_fnc = raw_input
init_git = input_fnc('Initiate new git repository? [YES][no]')
if init_git.lower() == 'no':
pass
else:
subprocess.call(['git', 'init'])
install_npm = input_fnc('Install npm dependencies? [YES][no]')
if install_npm.lower() == 'no':
pass
else:
subprocess.call('npm i', shell=True)
if init_git.lower() == 'no':
pass
else:
initial_commit = input_fnc('Create initial commit? [YES][no]')
if initial_commit.lower() == 'no':
pass
else:
subprocess.call(['git', 'add', './'])
subprocess.call(['git', 'commit', '-m', '"Initial Commit"'])
print('Adding remote origin {{cookiecutter.repo_url}}...')
subprocess.call(['git', 'remote', 'add', 'origin', '{{ cookiecutter.repo_url }}'])
print('Checking out dev branch')
subprocess.call(['git', 'checkout', '-b', 'dev'])
|
bradshjg/nameko-cron
|
tests/test_cron.py
|
import logging
import eventlet
import freezegun
import pytest
from unittest.mock import Mock
from nameko.testing.services import get_extension
from nameko.testing.utils import wait_for_call
from nameko_cron import ConcurrencyPolicy, Cron, cron
@pytest.fixture
def tracker():
return Mock()
@pytest.mark.parametrize("timeout,concurrency,task_time,expected_calls", [
# the cron schedule is set to spawn a worker every second
(5, ConcurrencyPolicy.WAIT, 0, 5), # a short-lived worker run at 0, 1, 2, 3, 4, 5
(5, ConcurrencyPolicy.WAIT, 2, 3), # a long-lived worker should fire at 0, 2, 4
(5, ConcurrencyPolicy.ALLOW, 10, 5), # if concurrency is permitted, new workers spawn alongside existing ones
(5, ConcurrencyPolicy.SKIP, 1.5, 3), # skipping should run at 0, 2, and 4
(5, ConcurrencyPolicy.WAIT, 1.5, 4), # run at 0, 1.5, 3, 4.5 (always behind)
])
def test_cron_runs(timeout, concurrency, task_time, expected_calls, container_factory, tracker):
"""Test running the cron main loop."""
class Service(object):
name = "service"
@cron('* * * * * *', concurrency=concurrency)
def tick(self):
tracker()
eventlet.sleep(task_time)
container = container_factory(Service, {})
# Check that Cron instance is initialized correctly
instance = get_extension(container, Cron)
assert instance.schedule == '* * * * * *'
assert instance.tz is None
assert instance.concurrency == concurrency
with freezegun.freeze_time('2020-11-20 23:59:59.5', tick=True):
container.start()
eventlet.sleep(timeout)
container.stop()
assert tracker.call_count == expected_calls
@pytest.mark.parametrize("timezone,expected_first_interval_hours", [
("America/Chicago", 22),
("America/New_York", 21),
])
def test_timezone_aware_cron(timezone, expected_first_interval_hours):
with freezegun.freeze_time('2020-11-20 08:00:00'): # 2AM America/Chicago time (i.e. this is UTC)
cron_extension = Cron('0 0 * * *', tz=timezone)
next_interval = cron_extension._get_next_interval()
assert next(next_interval) == expected_first_interval_hours*60*60
assert next(next_interval) == expected_first_interval_hours*60*60 + 24*60*60
def test_kill_stops_cron(container_factory, tracker):
class Service(object):
name = "service"
@cron('* * * * * *')
def tick(self):
tracker()
container = container_factory(Service, {})
container.start()
with wait_for_call(2.0, tracker):
container.kill()
eventlet.sleep(2.0)
assert tracker.call_count == 1
def test_stop_while_sleeping(container_factory, tracker):
"""Check that waiting for the cron to fire does not block the container
from being shut down gracefully.
"""
class Service(object):
name = "service"
@cron('* * * * * *')
def tick(self):
tracker() # pragma: no cover
container = container_factory(Service, {})
container.start()
# raise a Timeout if the container fails to stop within 1 second
with eventlet.Timeout(1):
container.stop()
assert tracker.call_count == 0, 'Cron should not have fired.'
def test_timer_error(container_factory, caplog, tracker):
"""Check that an error in the decorated method does not cause the service
containers loop to raise an exception.
"""
class Service(object):
name = "service"
@cron('* * * * * *')
def tick(self):
tracker()
tracker.side_effect = ValueError('Boom!')
container = container_factory(Service, {})
with caplog.at_level(logging.CRITICAL):
container.start()
eventlet.sleep(1.0)
# Check that the function was actually called and that the error was
# handled gracefully.
assert tracker.call_count == 1
container.stop()
# Check that no errors are thrown in the runners thread.
# We can't check for raised errors here as the actual
# exception is eaten by the worker pools handler.
assert len(caplog.records) == 0, (
'Expected no errors to have been '
'raised in the worker thread.'
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.